summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig12
-rw-r--r--drivers/acpi/acpi_apd.c2
-rw-r--r--drivers/acpi/acpi_configfs.c6
-rw-r--r--drivers/acpi/acpi_lpit.c7
-rw-r--r--drivers/acpi/acpi_lpss.c111
-rw-r--r--drivers/acpi/acpi_pad.c1
-rw-r--r--drivers/acpi/acpica/acevents.h3
-rw-r--r--drivers/acpi/acpica/acglobal.h1
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c8
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c54
-rw-r--r--drivers/acpi/acpica/nseval.c190
-rw-r--r--drivers/acpi/acpica/nsinit.c49
-rw-r--r--drivers/acpi/acpica/nsload.c12
-rw-r--r--drivers/acpi/acpica/nsutils.c12
-rw-r--r--drivers/acpi/acpica/tbdata.c13
-rw-r--r--drivers/acpi/acpica/tbxfload.c11
-rw-r--r--drivers/acpi/acpica/utinit.c1
-rw-r--r--drivers/acpi/acpica/utxfinit.c18
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/device_pm.c165
-rw-r--r--drivers/acpi/internal.h7
-rw-r--r--drivers/acpi/irq.c26
-rw-r--r--drivers/acpi/osl.c4
-rw-r--r--drivers/acpi/pmic/intel_pmic.c2
-rw-r--r--drivers/acpi/power.c135
-rw-r--r--drivers/acpi/pptt.c61
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/property.c26
-rw-r--r--drivers/acpi/sleep.c22
-rw-r--r--drivers/acpi/tables.c21
-rw-r--r--drivers/ata/acard-ahci.c1
-rw-r--r--drivers/ata/ahci_sunxi.c47
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/pdc_adma.c1
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/ata/sata_qstor.c1
-rw-r--r--drivers/ata/sata_sil24.c1
-rw-r--r--drivers/auxdisplay/Kconfig2
-rw-r--r--drivers/base/arch_topology.c6
-rw-r--r--drivers/base/cacheinfo.c5
-rw-r--r--drivers/base/core.c28
-rw-r--r--drivers/base/devcon.c26
-rw-r--r--drivers/base/power/clock_ops.c6
-rw-r--r--drivers/base/power/main.c36
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/base/property.c24
-rw-r--r--drivers/base/regmap/Kconfig6
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/regcache-lzo.c8
-rw-r--r--drivers/base/regmap/regmap-debugfs.c2
-rw-r--r--drivers/base/regmap/regmap-i3c.c60
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/base/swnode.c324
-rw-r--r--drivers/base/topology.c22
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/drbd/drbd_debugfs.c64
-rw-r--r--drivers/block/drbd/drbd_debugfs.h4
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_main.c5
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c16
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c5
-rw-r--r--drivers/block/null_blk_main.c14
-rw-r--r--drivers/block/skd_main.c1
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/bpa10x.c3
-rw-r--r--drivers/bluetooth/btbcm.c1
-rw-r--r--drivers/bluetooth/btmtkuart.c51
-rw-r--r--drivers/bluetooth/btqca.c47
-rw-r--r--drivers/bluetooth/btqca.h10
-rw-r--r--drivers/bluetooth/btrtl.c28
-rw-r--r--drivers/bluetooth/btrtl.h6
-rw-r--r--drivers/bluetooth/btsdio.c1
-rw-r--r--drivers/bluetooth/btusb.c584
-rw-r--r--drivers/bluetooth/hci_bcsp.c5
-rw-r--r--drivers/bluetooth/hci_ldisc.c8
-rw-r--r--drivers/bluetooth/hci_ll.c109
-rw-r--r--drivers/bluetooth/hci_mrvl.c72
-rw-r--r--drivers/bluetooth/hci_qca.c73
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/agp/generic.c3
-rw-r--r--drivers/char/hw_random/iproc-rng200.c1
-rw-r--r--drivers/char/hw_random/meson-rng.c52
-rw-r--r--drivers/char/tpm/eventlog/efi.c59
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c47
-rw-r--r--drivers/char/tpm/tpm-chip.c6
-rw-r--r--drivers/char/tpm/tpm1-cmd.c7
-rw-r--r--drivers/char/tpm/tpm2-cmd.c7
-rw-r--r--drivers/clk/clk.c2
-rw-r--r--drivers/clk/meson/g12a.c4
-rw-r--r--drivers/clk/meson/g12a.h2
-rw-r--r--drivers/clk/meson/meson8b.c10
-rw-r--r--drivers/clk/socfpga/clk-s10.c4
-rw-r--r--drivers/clk/tegra/clk-tegra210.c2
-rw-r--r--drivers/clk/ti/clkctrl.c7
-rw-r--r--drivers/clocksource/Kconfig14
-rw-r--r--drivers/clocksource/Makefile5
-rw-r--r--drivers/clocksource/arc_timer.c3
-rw-r--r--drivers/clocksource/arm_arch_timer.c15
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/clocksource/hyperv_timer.c339
-rw-r--r--drivers/clocksource/timer-davinci.c369
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c145
-rw-r--r--drivers/clocksource/timer-ixp4xx.c16
-rw-r--r--drivers/clocksource/timer-meson6.c5
-rw-r--r--drivers/clocksource/timer-npcm7xx.c2
-rw-r--r--drivers/clocksource/timer-tegra.c416
-rw-r--r--drivers/clocksource/timer-tegra20.c379
-rw-r--r--drivers/cpufreq/Kconfig.arm17
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c12
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c5
-rw-r--r--drivers/cpufreq/cpufreq.c121
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c97
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c4
-rw-r--r--drivers/cpufreq/raspberrypi-cpufreq.c97
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c2
-rw-r--r--drivers/crypto/Kconfig20
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c36
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c25
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h10
-rw-r--r--drivers/crypto/atmel-ecc.c403
-rw-r--r--drivers/crypto/atmel-ecc.h116
-rw-r--r--drivers/crypto/atmel-i2c.c364
-rw-r--r--drivers/crypto/atmel-i2c.h197
-rw-r--r--drivers/crypto/atmel-sha204a.c171
-rw-r--r--drivers/crypto/bcm/cipher.c8
-rw-r--r--drivers/crypto/bcm/spu2.c10
-rw-r--r--drivers/crypto/caam/Kconfig46
-rw-r--r--drivers/crypto/caam/Makefile18
-rw-r--r--drivers/crypto/caam/caamalg.c338
-rw-r--r--drivers/crypto/caam/caamalg_desc.c147
-rw-r--r--drivers/crypto/caam/caamalg_desc.h4
-rw-r--r--drivers/crypto/caam/caamalg_qi.c267
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c202
-rw-r--r--drivers/crypto/caam/caamhash.c329
-rw-r--r--drivers/crypto/caam/caampkc.c177
-rw-r--r--drivers/crypto/caam/caampkc.h9
-rw-r--r--drivers/crypto/caam/caamrng.c76
-rw-r--r--drivers/crypto/caam/ctrl.c56
-rw-r--r--drivers/crypto/caam/desc_constr.h11
-rw-r--r--drivers/crypto/caam/error.c8
-rw-r--r--drivers/crypto/caam/error.h2
-rw-r--r--drivers/crypto/caam/intern.h102
-rw-r--r--drivers/crypto/caam/jr.c43
-rw-r--r--drivers/crypto/caam/key_gen.c28
-rw-r--r--drivers/crypto/caam/qi.c52
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h18
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h18
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h26
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.h2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.h2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c7
-rw-r--r--drivers/crypto/ccp/ccp-dev.c96
-rw-r--r--drivers/crypto/ccp/ccp-dev.h2
-rw-r--r--drivers/crypto/ccp/ccp-ops.c20
-rw-r--r--drivers/crypto/ccree/cc_driver.c70
-rw-r--r--drivers/crypto/ccree/cc_driver.h6
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h20
-rw-r--r--drivers/crypto/ccree/cc_pm.c11
-rw-r--r--drivers/crypto/ccree/cc_pm.h7
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c13
-rw-r--r--drivers/crypto/inside-secure/safexcel.h17
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c116
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c92
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c3
-rw-r--r--drivers/crypto/ixp4xx_crypto.c15
-rw-r--r--drivers/crypto/mxs-dcp.c5
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c8
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c6
-rw-r--r--drivers/crypto/nx/nx.c4
-rw-r--r--drivers/crypto/nx/nx.h12
-rw-r--r--drivers/crypto/nx/nx_debugfs.c71
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c294
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h2
-rw-r--r--drivers/crypto/sahara.c4
-rw-r--r--drivers/crypto/stm32/Makefile2
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c (renamed from drivers/crypto/stm32/stm32_crc32.c)0
-rw-r--r--drivers/crypto/stm32/stm32-hash.c6
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c47
-rw-r--r--drivers/crypto/talitos.c368
-rw-r--r--drivers/crypto/talitos.h73
-rw-r--r--drivers/crypto/vmx/aes_cbc.c183
-rw-r--r--drivers/crypto/vmx/aes_ctr.c165
-rw-r--r--drivers/crypto/vmx/aes_xts.c175
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.h2
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl22
-rw-r--r--drivers/crypto/vmx/vmx.c72
-rw-r--r--drivers/dma/dma-jz4780.c5
-rw-r--r--drivers/dma/imx-sdma.c52
-rw-r--r--drivers/dma/qcom/bam_dma.c3
-rw-r--r--drivers/edac/Kconfig6
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/altera_edac.c43
-rw-r--r--drivers/edac/aspeed_edac.c4
-rw-r--r--drivers/edac/debugfs.c12
-rw-r--r--drivers/edac/edac_mc_sysfs.c34
-rw-r--r--drivers/edac/edac_module.h20
-rw-r--r--drivers/edac/i10nm_base.c10
-rw-r--r--drivers/edac/ie31200_edac.c78
-rw-r--r--drivers/edac/sb_edac.c1
-rw-r--r--drivers/edac/sifive_edac.c119
-rw-r--r--drivers/edac/skx_base.c2
-rw-r--r--drivers/edac/skx_common.c4
-rw-r--r--drivers/edac/skx_common.h2
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/efi/efi-bgrt.c5
-rw-r--r--drivers/firmware/efi/efi.c14
-rw-r--r--drivers/firmware/efi/efibc.c12
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c15
-rw-r--r--drivers/firmware/efi/libstub/efistub.h2
-rw-r--r--drivers/firmware/efi/libstub/fdt.c27
-rw-r--r--drivers/firmware/efi/libstub/tpm.c80
-rw-r--r--drivers/firmware/efi/tpm.c63
-rw-r--r--drivers/fmc/Kconfig52
-rw-r--r--drivers/fmc/Makefile15
-rw-r--r--drivers/fmc/fmc-chardev.c199
-rw-r--r--drivers/fmc/fmc-core.c388
-rw-r--r--drivers/fmc/fmc-debug.c172
-rw-r--r--drivers/fmc/fmc-dump.c58
-rw-r--r--drivers/fmc/fmc-fakedev.c355
-rw-r--r--drivers/fmc/fmc-match.c113
-rw-r--r--drivers/fmc/fmc-private.h8
-rw-r--r--drivers/fmc/fmc-sdb.c219
-rw-r--r--drivers/fmc/fmc-trivial.c103
-rw-r--r--drivers/fmc/fmc-write-eeprom.c175
-rw-r--r--drivers/fmc/fru-parse.c80
-rw-r--r--drivers/gpio/Kconfig20
-rw-r--r--drivers/gpio/Makefile296
-rw-r--r--drivers/gpio/TODO40
-rw-r--r--drivers/gpio/gpio-altera.c65
-rw-r--r--drivers/gpio/gpio-amd-fch.c4
-rw-r--r--drivers/gpio/gpio-amdpt.c10
-rw-r--r--drivers/gpio/gpio-ath79.c66
-rw-r--r--drivers/gpio/gpio-davinci.c7
-rw-r--r--drivers/gpio/gpio-eic-sprd.c9
-rw-r--r--drivers/gpio/gpio-em.c34
-rw-r--r--drivers/gpio/gpio-ep93xx.c7
-rw-r--r--drivers/gpio/gpio-ftgpio010.c35
-rw-r--r--drivers/gpio/gpio-grgpio.c4
-rw-r--r--drivers/gpio/gpio-ixp4xx.c14
-rw-r--r--drivers/gpio/gpio-janz-ttl.c9
-rw-r--r--drivers/gpio/gpio-madera.c6
-rw-r--r--drivers/gpio/gpio-max732x.c45
-rw-r--r--drivers/gpio/gpio-mb86s7x.c51
-rw-r--r--drivers/gpio/gpio-mockup.c21
-rw-r--r--drivers/gpio/gpio-mvebu.c11
-rw-r--r--drivers/gpio/gpio-omap.c509
-rw-r--r--drivers/gpio/gpio-pca953x.c1
-rw-r--r--drivers/gpio/gpio-pl061.c30
-rw-r--r--drivers/gpio/gpio-rcar.c2
-rw-r--r--drivers/gpio/gpio-siox.c51
-rw-r--r--drivers/gpio/gpio-stp-xway.c33
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-vf610.c14
-rw-r--r--drivers/gpio/gpio-vr41xx.c19
-rw-r--r--drivers/gpio/gpio-xilinx.c90
-rw-r--r--drivers/gpio/gpiolib-acpi.c6
-rw-r--r--drivers/gpio/gpiolib-of.c52
-rw-r--r--drivers/gpio/gpiolib.c94
-rw-r--r--drivers/gpio/gpiolib.h2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c19
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
-rw-r--r--drivers/gpu/vga/Kconfig1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c11
-rw-r--r--drivers/hid/hid-cp2112.c7
-rw-r--r--drivers/hid/hid-picolcd_fb.c4
-rw-r--r--drivers/hv/Kconfig4
-rw-r--r--drivers/hv/hv.c156
-rw-r--r--drivers/hv/hv_util.c1
-rw-r--r--drivers/hv/hyperv_vmbus.h3
-rw-r--r--drivers/hv/vmbus_drv.c42
-rw-r--r--drivers/hwmon/adm1029.c10
-rw-r--r--drivers/hwmon/asus_atk0110.c23
-rw-r--r--drivers/hwmon/coretemp.c36
-rw-r--r--drivers/hwmon/gpio-fan.c22
-rw-r--r--drivers/hwmon/hwmon.c6
-rw-r--r--drivers/hwmon/ina3221.c4
-rw-r--r--drivers/hwmon/lm90.c106
-rw-r--r--drivers/hwmon/max6650.c710
-rw-r--r--drivers/hwmon/nct7904.c81
-rw-r--r--drivers/hwmon/occ/common.c6
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/adm1275.c105
-rw-r--r--drivers/hwmon/pmbus/irps5401.c67
-rw-r--r--drivers/hwmon/pmbus/pxe1610.c139
-rw-r--r--drivers/hwmon/pwm-fan.c10
-rw-r--r--drivers/hwmon/scpi-hwmon.c10
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c9
-rw-r--r--drivers/i3c/master.c82
-rw-r--r--drivers/i3c/master/dw-i3c-master.c7
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c10
-rw-r--r--drivers/ide/Kconfig20
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/iio/humidity/dht11.c8
-rw-r--r--drivers/iio/industrialio-core.c4
-rw-r--r--drivers/infiniband/core/device.c8
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c9
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c6
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c7
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c12
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c13
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c18
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c13
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c39
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c79
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c10
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c33
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c8
-rw-r--r--drivers/infiniband/hw/qedr/main.c25
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c7
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c21
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/touchscreen/sur40.c6
-rw-r--r--drivers/iommu/amd_iommu.c26
-rw-r--r--drivers/iommu/amd_iommu_init.c45
-rw-r--r--drivers/iommu/arm-smmu-v3.c69
-rw-r--r--drivers/iommu/arm-smmu.c4
-rw-r--r--drivers/iommu/dma-iommu.c458
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c137
-rw-r--r--drivers/iommu/intel-iommu.c940
-rw-r--r--drivers/iommu/intel-pasid.c17
-rw-r--r--drivers/iommu/intel-pasid.h26
-rw-r--r--drivers/iommu/intel-svm.c15
-rw-r--r--drivers/iommu/intel_irq_remapping.c4
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c17
-rw-r--r--drivers/iommu/io-pgtable-arm.c40
-rw-r--r--drivers/iommu/iommu.c298
-rw-r--r--drivers/iommu/ipmmu-vmsa.c186
-rw-r--r--drivers/iommu/omap-iommu-debug.c35
-rw-r--r--drivers/iommu/omap-iommu.c3
-rw-r--r--drivers/irqchip/Kconfig32
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-al-fic.c278
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c101
-rw-r--r--drivers/irqchip/irq-gic-v2m.c85
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c37
-rw-r--r--drivers/irqchip/irq-gic-v3.c10
-rw-r--r--drivers/irqchip/irq-mbigen.c3
-rw-r--r--drivers/irqchip/irq-meson-gpio.c1
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c3
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c91
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c284
-rw-r--r--drivers/irqchip/irq-sni-exiu.c142
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c4
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c5
-rw-r--r--drivers/isdn/Kconfig51
-rw-r--r--drivers/isdn/Makefile6
-rw-r--r--drivers/isdn/capi/Kconfig29
-rw-r--r--drivers/isdn/capi/Makefile2
-rw-r--r--drivers/isdn/capi/capidrv.c2525
-rw-r--r--drivers/isdn/capi/capidrv.h140
-rw-r--r--drivers/isdn/divert/Makefile10
-rw-r--r--drivers/isdn/divert/divert_init.c82
-rw-r--r--drivers/isdn/divert/divert_procfs.c336
-rw-r--r--drivers/isdn/divert/isdn_divert.c846
-rw-r--r--drivers/isdn/divert/isdn_divert.h132
-rw-r--r--drivers/isdn/gigaset/i4l.c692
-rw-r--r--drivers/isdn/hardware/Kconfig8
-rw-r--r--drivers/isdn/hardware/Makefile1
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig7
-rw-r--r--drivers/isdn/hardware/mISDN/Makefile2
-rw-r--r--drivers/isdn/hardware/mISDN/isdnhdlc.c (renamed from drivers/isdn/i4l/isdnhdlc.c)2
-rw-r--r--drivers/isdn/hardware/mISDN/isdnhdlc.h69
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hisax/Kconfig423
-rw-r--r--drivers/isdn/hisax/Makefile60
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c794
-rw-r--r--drivers/isdn/hisax/amd7930_fn.h37
-rw-r--r--drivers/isdn/hisax/arcofi.c131
-rw-r--r--drivers/isdn/hisax/arcofi.h27
-rw-r--r--drivers/isdn/hisax/asuscom.c423
-rw-r--r--drivers/isdn/hisax/avm_a1.c307
-rw-r--r--drivers/isdn/hisax/avm_a1p.c267
-rw-r--r--drivers/isdn/hisax/avm_pci.c904
-rw-r--r--drivers/isdn/hisax/avma1_cs.c162
-rw-r--r--drivers/isdn/hisax/bkm_a4t.c358
-rw-r--r--drivers/isdn/hisax/bkm_a8.c433
-rw-r--r--drivers/isdn/hisax/bkm_ax.h119
-rw-r--r--drivers/isdn/hisax/callc.c1792
-rw-r--r--drivers/isdn/hisax/config.c1993
-rw-r--r--drivers/isdn/hisax/diva.c1282
-rw-r--r--drivers/isdn/hisax/elsa.c1245
-rw-r--r--drivers/isdn/hisax/elsa_cs.c218
-rw-r--r--drivers/isdn/hisax/elsa_ser.c659
-rw-r--r--drivers/isdn/hisax/enternow_pci.c420
-rw-r--r--drivers/isdn/hisax/fsm.c161
-rw-r--r--drivers/isdn/hisax/fsm.h61
-rw-r--r--drivers/isdn/hisax/gazel.c691
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c1584
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.h89
-rw-r--r--drivers/isdn/hisax/hfc_2bds0.c1078
-rw-r--r--drivers/isdn/hisax/hfc_2bds0.h128
-rw-r--r--drivers/isdn/hisax/hfc_2bs0.c591
-rw-r--r--drivers/isdn/hisax/hfc_2bs0.h60
-rw-r--r--drivers/isdn/hisax/hfc_pci.c1755
-rw-r--r--drivers/isdn/hisax/hfc_pci.h235
-rw-r--r--drivers/isdn/hisax/hfc_sx.c1517
-rw-r--r--drivers/isdn/hisax/hfc_sx.h196
-rw-r--r--drivers/isdn/hisax/hfc_usb.c1594
-rw-r--r--drivers/isdn/hisax/hfc_usb.h208
-rw-r--r--drivers/isdn/hisax/hfcscard.c261
-rw-r--r--drivers/isdn/hisax/hisax.h1352
-rw-r--r--drivers/isdn/hisax/hisax_cfg.h66
-rw-r--r--drivers/isdn/hisax/hisax_debug.h80
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c1024
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.h58
-rw-r--r--drivers/isdn/hisax/hisax_if.h66
-rw-r--r--drivers/isdn/hisax/hisax_isac.c895
-rw-r--r--drivers/isdn/hisax/hisax_isac.h46
-rw-r--r--drivers/isdn/hisax/hscx.c277
-rw-r--r--drivers/isdn/hisax/hscx.h41
-rw-r--r--drivers/isdn/hisax/hscx_irq.c294
-rw-r--r--drivers/isdn/hisax/icc.c680
-rw-r--r--drivers/isdn/hisax/icc.h72
-rw-r--r--drivers/isdn/hisax/ipac.h29
-rw-r--r--drivers/isdn/hisax/ipacx.c913
-rw-r--r--drivers/isdn/hisax/ipacx.h162
-rw-r--r--drivers/isdn/hisax/isac.c681
-rw-r--r--drivers/isdn/hisax/isac.h70
-rw-r--r--drivers/isdn/hisax/isar.c1910
-rw-r--r--drivers/isdn/hisax/isar.h222
-rw-r--r--drivers/isdn/hisax/isdnl1.c930
-rw-r--r--drivers/isdn/hisax/isdnl1.h32
-rw-r--r--drivers/isdn/hisax/isdnl2.c1839
-rw-r--r--drivers/isdn/hisax/isdnl2.h25
-rw-r--r--drivers/isdn/hisax/isdnl3.c594
-rw-r--r--drivers/isdn/hisax/isdnl3.h42
-rw-r--r--drivers/isdn/hisax/isurf.c305
-rw-r--r--drivers/isdn/hisax/ix1_micro.c316
-rw-r--r--drivers/isdn/hisax/jade.c305
-rw-r--r--drivers/isdn/hisax/jade.h134
-rw-r--r--drivers/isdn/hisax/jade_irq.c238
-rw-r--r--drivers/isdn/hisax/l3_1tr6.c932
-rw-r--r--drivers/isdn/hisax/l3_1tr6.h164
-rw-r--r--drivers/isdn/hisax/l3dss1.c3227
-rw-r--r--drivers/isdn/hisax/l3dss1.h124
-rw-r--r--drivers/isdn/hisax/l3ni1.c3182
-rw-r--r--drivers/isdn/hisax/l3ni1.h136
-rw-r--r--drivers/isdn/hisax/lmgr.c50
-rw-r--r--drivers/isdn/hisax/mic.c235
-rw-r--r--drivers/isdn/hisax/netjet.c985
-rw-r--r--drivers/isdn/hisax/netjet.h69
-rw-r--r--drivers/isdn/hisax/niccy.c380
-rw-r--r--drivers/isdn/hisax/nj_s.c294
-rw-r--r--drivers/isdn/hisax/nj_u.c258
-rw-r--r--drivers/isdn/hisax/q931.c1513
-rw-r--r--drivers/isdn/hisax/s0box.c260
-rw-r--r--drivers/isdn/hisax/saphir.c296
-rw-r--r--drivers/isdn/hisax/sedlbauer.c873
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c209
-rw-r--r--drivers/isdn/hisax/sportster.c267
-rw-r--r--drivers/isdn/hisax/st5481.h529
-rw-r--r--drivers/isdn/hisax/st5481_b.c380
-rw-r--r--drivers/isdn/hisax/st5481_d.c780
-rw-r--r--drivers/isdn/hisax/st5481_init.c221
-rw-r--r--drivers/isdn/hisax/st5481_usb.c659
-rw-r--r--drivers/isdn/hisax/tei.c465
-rw-r--r--drivers/isdn/hisax/teleint.c334
-rw-r--r--drivers/isdn/hisax/teles0.c364
-rw-r--r--drivers/isdn/hisax/teles3.c498
-rw-r--r--drivers/isdn/hisax/teles_cs.c201
-rw-r--r--drivers/isdn/hisax/telespci.c349
-rw-r--r--drivers/isdn/hisax/w6692.c1085
-rw-r--r--drivers/isdn/hisax/w6692.h184
-rw-r--r--drivers/isdn/i4l/Kconfig129
-rw-r--r--drivers/isdn/i4l/Makefile20
-rw-r--r--drivers/isdn/i4l/isdn_audio.c711
-rw-r--r--drivers/isdn/i4l/isdn_audio.h44
-rw-r--r--drivers/isdn/i4l/isdn_bsdcomp.c930
-rw-r--r--drivers/isdn/i4l/isdn_common.c2368
-rw-r--r--drivers/isdn/i4l/isdn_common.h47
-rw-r--r--drivers/isdn/i4l/isdn_concap.c99
-rw-r--r--drivers/isdn/i4l/isdn_concap.h11
-rw-r--r--drivers/isdn/i4l/isdn_net.c3198
-rw-r--r--drivers/isdn/i4l/isdn_net.h151
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c3046
-rw-r--r--drivers/isdn/i4l/isdn_ppp.h41
-rw-r--r--drivers/isdn/i4l/isdn_tty.c3756
-rw-r--r--drivers/isdn/i4l/isdn_tty.h120
-rw-r--r--drivers/isdn/i4l/isdn_ttyfax.c1123
-rw-r--r--drivers/isdn/i4l/isdn_ttyfax.h17
-rw-r--r--drivers/isdn/i4l/isdn_v110.c625
-rw-r--r--drivers/isdn/i4l/isdn_v110.h29
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c332
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.h30
-rw-r--r--drivers/isdn/isdnloop/Makefile6
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c1528
-rw-r--r--drivers/isdn/isdnloop/isdnloop.h112
-rw-r--r--drivers/isdn/mISDN/dsp_core.c2
-rw-r--r--drivers/leds/Kconfig35
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/leds-lm36274.c172
-rw-r--r--drivers/leds/leds-lm3697.c395
-rw-r--r--drivers/leds/leds-max77650.c2
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-pwm.c45
-rw-r--r--drivers/leds/leds-spi-byte.c161
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/leds-ti-lmu-common.c156
-rw-r--r--drivers/leds/trigger/Kconfig2
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c2
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c2
-rw-r--r--drivers/lightnvm/core.c2
-rw-r--r--drivers/lightnvm/pblk-core.c18
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/bcache/alloc.c9
-rw-r--r--drivers/md/bcache/bcache.h6
-rw-r--r--drivers/md/bcache/bset.c61
-rw-r--r--drivers/md/bcache/btree.c53
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/io.c12
-rw-r--r--drivers/md/bcache/journal.c141
-rw-r--r--drivers/md/bcache/journal.h4
-rw-r--r--drivers/md/bcache/super.c227
-rw-r--r--drivers/md/bcache/sysfs.c67
-rw-r--r--drivers/md/bcache/util.h2
-rw-r--r--drivers/md/bcache/writeback.c8
-rw-r--r--drivers/md/dm-init.c12
-rw-r--r--drivers/md/dm-log-writes.c23
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/md/md-bitmap.c20
-rw-r--r--drivers/md/md.c129
-rw-r--r--drivers/md/md.h23
-rw-r--r--drivers/md/raid1-10.c30
-rw-r--r--drivers/md/raid1.c119
-rw-r--r--drivers/md/raid10.c86
-rw-r--r--drivers/md/raid5.c12
-rw-r--r--drivers/media/Kconfig37
-rw-r--r--drivers/media/Makefile13
-rw-r--r--drivers/media/cec/cec-adap.c141
-rw-r--r--drivers/media/cec/cec-api.c8
-rw-r--r--drivers/media/cec/cec-core.c8
-rw-r--r--drivers/media/cec/cec-notifier.c112
-rw-r--r--drivers/media/cec/cec-priv.h5
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c9
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c18
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c5
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c5
-rw-r--r--drivers/media/common/videobuf2/videobuf2-memops.c9
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c10
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c3
-rw-r--r--drivers/media/dvb-core/Kconfig3
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c140
-rw-r--r--drivers/media/dvb-frontends/Kconfig3
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c5
-rw-r--r--drivers/media/dvb-frontends/si2168.c7
-rw-r--r--drivers/media/dvb-frontends/stv0297.c2
-rw-r--r--drivers/media/dvb-frontends/stv090x.c197
-rw-r--r--drivers/media/dvb-frontends/stv090x.h3
-rw-r--r--drivers/media/dvb-frontends/stv090x_priv.h2
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c135
-rw-r--r--drivers/media/dvb-frontends/stv6110x.h3
-rw-r--r--drivers/media/dvb-frontends/stv6110x_priv.h3
-rw-r--r--drivers/media/dvb-frontends/tua6100.c22
-rw-r--r--drivers/media/i2c/Kconfig5
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c (renamed from drivers/media/i2c/adv7511.c)5
-rw-r--r--drivers/media/i2c/ak881x.c2
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c1409
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.h30
-rw-r--r--drivers/media/i2c/cx25840/cx25840-vbi.c4
-rw-r--r--drivers/media/i2c/imx214.c2
-rw-r--r--drivers/media/i2c/mt9m001.c2
-rw-r--r--drivers/media/i2c/mt9m111.c40
-rw-r--r--drivers/media/i2c/mt9p031.c2
-rw-r--r--drivers/media/i2c/ov13858.c4
-rw-r--r--drivers/media/i2c/ov2640.c2
-rw-r--r--drivers/media/i2c/ov2685.c2
-rw-r--r--drivers/media/i2c/ov5695.c2
-rw-r--r--drivers/media/i2c/ov6650.c1
-rw-r--r--drivers/media/i2c/ov7740.c24
-rw-r--r--drivers/media/i2c/ov8856.c12
-rw-r--r--drivers/media/i2c/ov9640.c4
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.c2
-rw-r--r--drivers/media/i2c/st-mipid02.c60
-rw-r--r--drivers/media/i2c/tda7432.c3
-rw-r--r--drivers/media/i2c/tw9910.c3
-rw-r--r--drivers/media/i2c/video-i2c.c8
-rw-r--r--drivers/media/mc/Kconfig33
-rw-r--r--drivers/media/mc/Makefile10
-rw-r--r--drivers/media/mc/mc-dev-allocator.c (renamed from drivers/media/media-dev-allocator.c)0
-rw-r--r--drivers/media/mc/mc-device.c (renamed from drivers/media/media-device.c)10
-rw-r--r--drivers/media/mc/mc-devnode.c (renamed from drivers/media/media-devnode.c)0
-rw-r--r--drivers/media/mc/mc-entity.c (renamed from drivers/media/media-entity.c)0
-rw-r--r--drivers/media/mc/mc-request.c (renamed from drivers/media/media-request.c)0
-rw-r--r--drivers/media/pci/bt8xx/bttv-audio-hook.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-audio-hook.h2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c50
-rw-r--r--drivers/media/pci/cobalt/Kconfig2
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c14
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c5
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c13
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c22
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c14
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c6
-rw-r--r--drivers/media/pci/cx88/cx88-core.c2
-rw-r--r--drivers/media/pci/cx88/cx88-i2c.c1
-rw-r--r--drivers/media/pci/cx88/cx88-input.c4
-rw-r--r--drivers/media/pci/cx88/cx88-video.c34
-rw-r--r--drivers/media/pci/ddbridge/Kconfig1
-rw-r--r--drivers/media/pci/dt3155/Kconfig1
-rw-r--r--drivers/media/pci/dt3155/dt3155.c5
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c2
-rw-r--r--drivers/media/pci/ivtv/Kconfig2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.h3
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c7
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c14
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c16
-rw-r--r--drivers/media/pci/meye/Kconfig3
-rw-r--r--drivers/media/pci/meye/meye.c6
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c15
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c46
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c33
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c15
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c15
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c5
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c5
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c6
-rw-r--r--drivers/media/pci/ttpci/Kconfig3
-rw-r--r--drivers/media/pci/ttpci/av7110.c14
-rw-r--r--drivers/media/pci/ttpci/av7110.h21
-rw-r--r--drivers/media/pci/ttpci/av7110_ir.c423
-rw-r--r--drivers/media/pci/tw68/tw68-video.c8
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c5
-rw-r--r--drivers/media/platform/Kconfig12
-rw-r--r--drivers/media/platform/aspeed-video.c156
-rw-r--r--drivers/media/platform/atmel/Makefile4
-rw-r--r--drivers/media/platform/atmel/atmel-isc-base.c (renamed from drivers/media/platform/atmel/atmel-isc.c)783
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h6
-rw-r--r--drivers/media/platform/atmel/atmel-isc.h245
-rw-r--r--drivers/media/platform/atmel/atmel-sama5d2-isc.c348
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c28
-rw-r--r--drivers/media/platform/coda/Makefile5
-rw-r--r--drivers/media/platform/coda/coda-bit.c452
-rw-r--r--drivers/media/platform/coda/coda-common.c392
-rw-r--r--drivers/media/platform/coda/coda-h264.c3
-rw-r--r--drivers/media/platform/coda/coda-mpeg2.c87
-rw-r--r--drivers/media/platform/coda/coda-mpeg4.c87
-rw-r--r--drivers/media/platform/coda/coda.h47
-rw-r--r--drivers/media/platform/coda/coda_regs.h20
-rw-r--r--drivers/media/platform/coda/trace.h2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c16
-rw-r--r--drivers/media/platform/davinci/vpss.c7
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c14
-rw-r--r--drivers/media/platform/exynos4-is/common.c5
-rw-r--r--drivers/media/platform/exynos4-is/common.h3
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c10
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c9
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c10
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c12
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c6
-rw-r--r--drivers/media/platform/marvell-ccic/Kconfig2
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c58
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c348
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h12
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c238
-rw-r--r--drivers/media/platform/meson/ao-cec-g12a.c21
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c6
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c18
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c44
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h6
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c47
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c23
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c23
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c25
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_base.h10
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.c22
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.h6
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c21
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c21
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_base.h10
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_if.c15
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_if.h5
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_ipi_msg.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.h2
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c2
-rw-r--r--drivers/media/platform/omap/Kconfig1
-rw-r--r--drivers/media/platform/omap3isp/isp.c18
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c24
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c24
-rw-r--r--drivers/media/platform/omap3isp/isphist.c11
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c4
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c3
-rw-r--r--drivers/media/platform/pxa_camera.c2
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c2
-rw-r--r--drivers/media/platform/qcom/venus/core.c4
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c6
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c7
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c2
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c4
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c2
-rw-r--r--drivers/media/platform/qcom/venus/venc.c4
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c23
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c190
-rw-r--r--drivers/media/platform/rcar_fdp1.c12
-rw-r--r--drivers/media/platform/rcar_jpu.c10
-rw-r--r--drivers/media/platform/renesas-ceu.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c5
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c19
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c21
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c8
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c5
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.c2
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c4
-rw-r--r--drivers/media/platform/sti/hva/hva-v4l2.c4
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c1
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c7
-rw-r--r--drivers/media/platform/vicodec/Kconfig1
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c313
-rw-r--r--drivers/media/platform/vim2m.c6
-rw-r--r--drivers/media/platform/vimc/Kconfig1
-rw-r--r--drivers/media/platform/vimc/Makefile12
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c5
-rw-r--r--drivers/media/platform/vimc/vimc-common.c4
-rw-r--r--drivers/media/platform/vimc/vimc-core.c7
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c11
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c7
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c7
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c26
-rw-r--r--drivers/media/platform/vivid/Kconfig1
-rw-r--r--drivers/media/platform/vivid/vivid-core.c126
-rw-r--r--drivers/media/platform/vivid/vivid-core.h44
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c108
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c8
-rw-r--r--drivers/media/platform/vivid/vivid-osd.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c16
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c142
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c28
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.h2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c6
-rw-r--r--drivers/media/radio/Kconfig1
-rw-r--r--drivers/media/radio/dsbr100.c3
-rw-r--r--drivers/media/radio/radio-cadet.c5
-rw-r--r--drivers/media/radio/radio-isa.c4
-rw-r--r--drivers/media/radio/radio-keene.c3
-rw-r--r--drivers/media/radio/radio-ma901.c3
-rw-r--r--drivers/media/radio/radio-miropcm20.c4
-rw-r--r--drivers/media/radio/radio-mr800.c5
-rw-r--r--drivers/media/radio/radio-raremono.c33
-rw-r--r--drivers/media/radio/radio-sf16fmi.c3
-rw-r--r--drivers/media/radio/radio-si476x.c21
-rw-r--r--drivers/media/radio/radio-tea5764.c3
-rw-r--r--drivers/media/radio/radio-tea5777.c5
-rw-r--r--drivers/media/radio/radio-timb.c3
-rw-r--r--drivers/media/radio/radio-wl1273.c12
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c7
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c6
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c4
-rw-r--r--drivers/media/radio/si4713/radio-usb-si4713.c4
-rw-r--r--drivers/media/radio/tea575x.c7
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c13
-rw-r--r--drivers/media/rc/bpf-lirc.c30
-rw-r--r--drivers/media/rc/ir-spi.c1
-rw-r--r--drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c20
-rw-r--r--drivers/media/rc/keymaps/rc-alink-dtu-m.c20
-rw-r--r--drivers/media/rc/keymaps/rc-anysee.c20
-rw-r--r--drivers/media/rc/keymaps/rc-apac-viewcomp.c20
-rw-r--r--drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c20
-rw-r--r--drivers/media/rc/keymaps/rc-asus-pc39.c20
-rw-r--r--drivers/media/rc/keymaps/rc-asus-ps3-100.c20
-rw-r--r--drivers/media/rc/keymaps/rc-ati-x10.c20
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-a16d.c20
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-cardbus.c20
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-dvbt.c20
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m135a.c40
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c20
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-rm-ks.c20
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia.c20
-rw-r--r--drivers/media/rc/keymaps/rc-avertv-303.c20
-rw-r--r--drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c20
-rw-r--r--drivers/media/rc/keymaps/rc-behold-columbus.c20
-rw-r--r--drivers/media/rc/keymaps/rc-behold.c20
-rw-r--r--drivers/media/rc/keymaps/rc-budget-ci-old.c20
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy-1400.c20
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy.c20
-rw-r--r--drivers/media/rc/keymaps/rc-d680-dmb.c20
-rw-r--r--drivers/media/rc/keymaps/rc-delock-61959.c20
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c40
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-rc5.c100
-rw-r--r--drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c20
-rw-r--r--drivers/media/rc/keymaps/rc-digittrade.c20
-rw-r--r--drivers/media/rc/keymaps/rc-dm1105-nec.c20
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c20
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c20
-rw-r--r--drivers/media/rc/keymaps/rc-dtt200u.c20
-rw-r--r--drivers/media/rc/keymaps/rc-dvbsky.c20
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-mce.c20
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-portable.c20
-rw-r--r--drivers/media/rc/keymaps/rc-em-terratec.c20
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv-fm53.c20
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv.c20
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv2.c20
-rw-r--r--drivers/media/rc/keymaps/rc-eztv.c20
-rw-r--r--drivers/media/rc/keymaps/rc-flydvb.c20
-rw-r--r--drivers/media/rc/keymaps/rc-flyvideo.c20
-rw-r--r--drivers/media/rc/keymaps/rc-fusionhdtv-mce.c20
-rw-r--r--drivers/media/rc/keymaps/rc-gadmei-rm008z.c20
-rw-r--r--drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c20
-rw-r--r--drivers/media/rc/keymaps/rc-gotview7135.c20
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c101
-rw-r--r--drivers/media/rc/keymaps/rc-hisi-poplar.c20
-rw-r--r--drivers/media/rc/keymaps/rc-hisi-tv-demo.c20
-rw-r--r--drivers/media/rc/keymaps/rc-iodata-bctv7e.c20
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v1.c40
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v2.c40
-rw-r--r--drivers/media/rc/keymaps/rc-kaiomy.c20
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-315u.c20
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-pc150u.c20
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c24
-rw-r--r--drivers/media/rc/keymaps/rc-leadtek-y04g0051.c20
-rw-r--r--drivers/media/rc/keymaps/rc-lme2510.c60
-rw-r--r--drivers/media/rc/keymaps/rc-manli.c20
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-digitainer.c20
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-or2x.c20
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10.c20
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-ii.c20
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-iii.c20
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c20
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere.c20
-rw-r--r--drivers/media/rc/keymaps/rc-nebula.c20
-rw-r--r--drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c40
-rw-r--r--drivers/media/rc/keymaps/rc-norwood.c20
-rw-r--r--drivers/media/rc/keymaps/rc-npgtech.c20
-rw-r--r--drivers/media/rc/keymaps/rc-pctv-sedna.c20
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-color.c20
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-grey.c20
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c20
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-002t.c20
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-mk12.c20
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-new.c20
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview.c20
-rw-r--r--drivers/media/rc/keymaps/rc-powercolor-real-angel.c20
-rw-r--r--drivers/media/rc/keymaps/rc-proteus-2309.c20
-rw-r--r--drivers/media/rc/keymaps/rc-purpletv.c20
-rw-r--r--drivers/media/rc/keymaps/rc-pv951.c20
-rw-r--r--drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c20
-rw-r--r--drivers/media/rc/keymaps/rc-reddo.c20
-rw-r--r--drivers/media/rc/keymaps/rc-snapstream-firefly.c20
-rw-r--r--drivers/media/rc/keymaps/rc-su3000.c20
-rw-r--r--drivers/media/rc/keymaps/rc-tango.c20
-rw-r--r--drivers/media/rc/keymaps/rc-tbs-nec.c20
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-ts35.c20
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-usb2.c20
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c20
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c20
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c20
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim-2.c20
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim.c20
-rw-r--r--drivers/media/rc/keymaps/rc-tevii-nec.c20
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand-02.c20
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand.c20
-rw-r--r--drivers/media/rc/keymaps/rc-trekstor.c20
-rw-r--r--drivers/media/rc/keymaps/rc-tt-1500.c20
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c20
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan1027.c20
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-m1f.c20
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-s350.c20
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-tv-pvr.c20
-rw-r--r--drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c20
-rw-r--r--drivers/media/rc/keymaps/rc-winfast.c20
-rw-r--r--drivers/media/rc/keymaps/rc-xbox-dvd.c20
-rw-r--r--drivers/media/rc/keymaps/rc-zx-irdec.c20
-rw-r--r--drivers/media/rc/lirc_dev.c2
-rw-r--r--drivers/media/rc/mceusb.c4
-rw-r--r--drivers/media/rc/meson-ir.c6
-rw-r--r--drivers/media/rc/mtk-cir.c4
-rw-r--r--drivers/media/rc/rc-main.c6
-rw-r--r--drivers/media/rc/sunxi-cir.c1
-rw-r--r--drivers/media/spi/Kconfig2
-rw-r--r--drivers/media/tuners/Kconfig2
-rw-r--r--drivers/media/tuners/si2157.c6
-rw-r--r--drivers/media/tuners/si2157_priv.h3
-rw-r--r--drivers/media/usb/airspy/airspy.c6
-rw-r--r--drivers/media/usb/au0828/au0828-core.c12
-rw-r--r--drivers/media/usb/au0828/au0828-video.c21
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c3
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c9
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c28
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c15
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c11
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig16
-rw-r--r--drivers/media/usb/dvb-usb/Makefile3
-rw-r--r--drivers/media/usb/dvb-usb/cxusb-analog.c1845
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c796
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h158
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-dvb.c5
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c20
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h10
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c35
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c32
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c15
-rw-r--r--drivers/media/usb/gspca/gspca.c6
-rw-r--r--drivers/media/usb/hackrf/hackrf.c14
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c22
-rw-r--r--drivers/media/usb/msi2500/msi2500.c5
-rw-r--r--drivers/media/usb/pvrusb2/Kconfig2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c25
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-devattr.c212
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-devattr.h1
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-dvb.c88
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-dvb.h5
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c40
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-sysfs.c3
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c17
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/pwc/pwc-v4l.c3
-rw-r--r--drivers/media/usb/pwc/pwc.h18
-rw-r--r--drivers/media/usb/s2255/Kconfig1
-rw-r--r--drivers/media/usb/s2255/s2255drv.c5
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c7
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c6
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c20
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c5
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c20
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c4
-rw-r--r--drivers/media/usb/uvc/uvc_debugfs.c5
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c10
-rw-r--r--drivers/media/v4l2-core/Kconfig2
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c32
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c126
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c27
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c29
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c268
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c2
-rw-r--r--drivers/memory/omap-gpmc.c4
-rw-r--r--drivers/message/fusion/mptbase.c3
-rw-r--r--drivers/mfd/Kconfig5
-rw-r--r--drivers/mfd/ti-lmu.c23
-rw-r--r--drivers/misc/lkdtm/bugs.c2
-rw-r--r--drivers/misc/lkdtm/core.c2
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/ingenic/Makefile4
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c9
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c (renamed from drivers/mtd/nand/raw/ingenic/ingenic_nand.c)0
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c40
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c2
-rw-r--r--drivers/mtd/nand/spi/macronix.c4
-rw-r--r--drivers/net/bonding/bond_3ad.c222
-rw-r--r--drivers/net/bonding/bond_alb.c30
-rw-r--r--drivers/net/bonding/bond_main.c388
-rw-r--r--drivers/net/bonding/bond_netlink.c14
-rw-r--r--drivers/net/bonding/bond_options.c101
-rw-r--r--drivers/net/bonding/bond_procfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c13
-rw-r--r--drivers/net/can/softing/softing_main.c4
-rw-r--r--drivers/net/dsa/Kconfig24
-rw-r--r--drivers/net/dsa/Makefile4
-rw-r--r--drivers/net/dsa/b53/b53_common.c4
-rw-r--r--drivers/net/dsa/microchip/Kconfig1
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c229
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c114
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c8
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h169
-rw-r--r--drivers/net/dsa/microchip/ksz_priv.h25
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.h69
-rw-r--r--drivers/net/dsa/mt7530.c46
-rw-r--r--drivers/net/dsa/mt7530.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c269
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h18
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c35
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c64
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c46
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h14
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c28
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c77
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h14
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c32
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c25
-rw-r--r--drivers/net/dsa/qca8k.c15
-rw-r--r--drivers/net/dsa/qca8k.h2
-rw-r--r--drivers/net/dsa/sja1105/Kconfig9
-rw-r--r--drivers/net/dsa/sja1105/Makefile4
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h54
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c100
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c296
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h11
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c868
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c393
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h64
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c70
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c88
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h37
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c (renamed from drivers/net/dsa/vitesse-vsc73xx.c)206
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c164
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c203
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h29
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c5
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h61
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c145
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h19
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c54
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h73
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c35
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c389
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c28
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c62
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h5
-rw-r--r--drivers/net/ethernet/atheros/Kconfig10
-rw-r--r--drivers/net/ethernet/atheros/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c1898
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c20
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c125
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c144
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c18
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig10
-rw-r--r--drivers/net/ethernet/cadence/macb.h12
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c143
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c7
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h62
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c240
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c241
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c79
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h28
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c47
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h7
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c147
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c242
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h48
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.c191
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h62
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c216
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h18
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h25
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c16
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c3
-rw-r--r--drivers/net/ethernet/google/Kconfig27
-rw-r--r--drivers/net/ethernet/google/Makefile5
-rw-r--r--drivers/net/ethernet/google/gve/Makefile4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h459
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c387
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h217
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc.h113
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c245
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c1232
-rw-r--r--drivers/net/ethernet/google/gve/gve_register.h27
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c446
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c584
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig10
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c142
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c455
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c60
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c70
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c95
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c799
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1348
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h62
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c170
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c59
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c286
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h28
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c762
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c12
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h56
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c60
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h53
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c339
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c638
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h371
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c82
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c25
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.h1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c20
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c111
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c86
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c672
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c118
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c13
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h530
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h13
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c (renamed from drivers/net/ethernet/intel/iavf/i40e_adminq.c)267
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h (renamed from drivers/net/ethernet/intel/iavf/i40e_adminq.h)80
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h528
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_alloc.h17
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.c127
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.h104
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c499
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c868
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_osdep.h11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h58
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h136
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c41
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c77
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c250
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c230
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c1027
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c477
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c362
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c301
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h33
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c75
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c47
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c49
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h18
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c23
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c181
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c97
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c5
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c11
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c38
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h39
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c400
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h43
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c244
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c3
-rw-r--r--drivers/net/ethernet/mediatek/Makefile3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c352
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c138
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h199
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h285
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c293
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c335
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c192
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c460
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c845
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c323
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c507
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c233
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c786
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c277
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c237
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c569
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c157
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c316
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c334
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c248
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h522
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c584
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c1111
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h186
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
-rw-r--r--drivers/net/ethernet/mscc/Makefile2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c26
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h11
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c782
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h232
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c363
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c227
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h22
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c11
-rw-r--r--drivers/net/ethernet/mscc/ocelot_s2.h64
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c197
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.h22
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h403
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile6
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c115
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.h60
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm_mbox.c743
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/fw.h84
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c522
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c260
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h57
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h18
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c149
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c339
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h73
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c212
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c7
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c1276
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h113
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c44
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c35
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c406
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c157
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c65
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c42
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h25
-rw-r--r--drivers/net/ethernet/realtek/Makefile1
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c231
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.h39
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c (renamed from drivers/net/ethernet/realtek/r8169.c)1212
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c25
-rw-r--r--drivers/net/ethernet/sfc/efx.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c24
-rw-r--r--drivers/net/ethernet/smsc/Kconfig6
-rw-r--r--drivers/net/ethernet/socionext/Kconfig1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c577
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c118
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c816
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c104
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c850
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c561
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c97
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h8
-rw-r--r--drivers/net/ethernet/ti/cpts.c88
-rw-r--r--drivers/net/ethernet/ti/cpts.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c187
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h9
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c9
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h2
-rw-r--r--drivers/net/ethernet/via/via-velocity.h2
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c24
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h5
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c258
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c20
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h35
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c678
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c111
-rw-r--r--drivers/net/fddi/skfp/drvfbi.c3
-rw-r--r--drivers/net/fddi/skfp/h/skfbi.h231
-rw-r--r--drivers/net/fjes/fjes_debugfs.c15
-rw-r--r--drivers/net/gtp.c37
-rw-r--r--drivers/net/loopback.c78
-rw-r--r--drivers/net/macsec.c6
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/netdevsim/dev.c44
-rw-r--r--drivers/net/netdevsim/netdev.c29
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/aquantia_main.c8
-rw-r--r--drivers/net/phy/bcm87xx.c20
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/dp83867.c193
-rw-r--r--drivers/net/phy/lxt.c6
-rw-r--r--drivers/net/phy/nxp-tja11xx.c403
-rw-r--r--drivers/net/phy/phy-core.c4
-rw-r--r--drivers/net/phy/phy.c128
-rw-r--r--drivers/net/phy/phy_device.c109
-rw-r--r--drivers/net/phy/phylink.c288
-rw-r--r--drivers/net/phy/sfp-bus.c14
-rw-r--r--drivers/net/phy/sfp.c72
-rw-r--r--drivers/net/plip/plip.c4
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/ppp_mppe.c97
-rw-r--r--drivers/net/tap.c5
-rw-r--r--drivers/net/team/team.c25
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/asix_devices.c6
-rw-r--r--drivers/net/usb/r8152.c101
-rw-r--r--drivers/net/veth.c61
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c20
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/vrf.c5
-rw-r--r--drivers/net/vxlan.c131
-rw-r--r--drivers/net/wan/hdlc_cisco.c11
-rw-r--r--drivers/net/wan/x25_asy.c4
-rw-r--r--drivers/net/wireless/ath/Kconfig2
-rw-r--r--drivers/net/wireless/ath/Makefile2
-rw-r--r--drivers/net/wireless/ath/ar5523/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ar5523/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c80
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h27
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c58
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h25
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h76
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c401
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c38
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c223
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c61
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c35
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c17
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c61
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h20
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c37
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h23
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c39
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c2
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Kconfig2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile2
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig2
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c238
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c148
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c67
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c37
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c33
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c35
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h39
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c141
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h47
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c69
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c7
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c57
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig52
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Makefile14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig50
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.h16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_radio.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phyreg_n.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/defs.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/soc.h13
-rw-r--r--drivers/net/wireless/cisco/Kconfig2
-rw-r--r--drivers/net/wireless/cisco/airo.c57
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c17
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c35
-rw-r--r--drivers/net/wireless/intel/iwlegacy/Kconfig4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c427
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h133
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c241
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c204
-rw-r--r--drivers/net/wireless/intersil/p54/main.c9
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.c43
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c11
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c53
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c26
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c125
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c37
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c103
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c32
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c35
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c76
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c68
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c111
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c191
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c97
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c52
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c1265
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c106
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c66
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c54
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c96
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c31
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c35
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00link.c15
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c35
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c695
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c253
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h708
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c32
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h38
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c10
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c1265
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c69
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c436
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h23
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c799
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c38
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c2
-rw-r--r--drivers/net/wireless/virt_wifi.c2
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/nfc/st-nci/i2c.c2
-rw-r--r--drivers/nvme/host/core.c45
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/fault_inject.c41
-rw-r--r--drivers/nvme/host/fc.c13
-rw-r--r--drivers/nvme/host/lightnvm.c2
-rw-r--r--drivers/nvme/host/nvme.h42
-rw-r--r--drivers/nvme/host/pci.c143
-rw-r--r--drivers/nvme/host/rdma.c7
-rw-r--r--drivers/nvme/host/trace.c64
-rw-r--r--drivers/nvme/host/trace.h66
-rw-r--r--drivers/nvme/target/Makefile3
-rw-r--r--drivers/nvme/target/core.c12
-rw-r--r--drivers/nvme/target/discovery.c4
-rw-r--r--drivers/nvme/target/fabrics-cmd.c2
-rw-r--r--drivers/nvme/target/fc.c13
-rw-r--r--drivers/nvme/target/fcloop.c37
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvme/target/trace.c201
-rw-r--r--drivers/nvme/target/trace.h141
-rw-r--r--drivers/opp/core.c174
-rw-r--r--drivers/opp/of.c30
-rw-r--r--drivers/parport/Kconfig2
-rw-r--r--drivers/pci/pci-acpi.c14
-rw-r--r--drivers/pci/pci-driver.c82
-rw-r--r--drivers/pci/pci.c116
-rw-r--r--drivers/pci/pci.h8
-rw-r--r--drivers/pci/pcie/aspm.c20
-rw-r--r--drivers/pci/pcie/portdrv_core.c66
-rw-r--r--drivers/pcmcia/ds.c2
-rw-r--r--drivers/perf/Kconfig8
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm_pmu_acpi.c72
-rw-r--r--drivers/perf/arm_spe_pmu.c12
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c554
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c34
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c8
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c18
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c291
-rw-r--r--drivers/power/avs/smartreflex.c41
-rw-r--r--drivers/powercap/intel_rapl.c75
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/ptp/ptp_clock.c3
-rw-r--r--drivers/pwm/Kconfig11
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c172
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c1
-rw-r--r--drivers/pwm/pwm-bcm2835.c8
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c383
-rw-r--r--drivers/pwm/pwm-jz4740.c49
-rw-r--r--drivers/pwm/pwm-meson.c386
-rw-r--r--drivers/pwm/pwm-rcar.c39
-rw-r--r--drivers/pwm/pwm-sifive.c339
-rw-r--r--drivers/pwm/pwm-stm32-lp.c25
-rw-r--r--drivers/pwm/pwm-stm32.c2
-rw-r--r--drivers/pwm/sysfs.c102
-rw-r--r--drivers/ras/cec.c132
-rw-r--r--drivers/regulator/88pm800-regulator.c (renamed from drivers/regulator/88pm800.c)0
-rw-r--r--drivers/regulator/Kconfig39
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/arizona-ldo1.c83
-rw-r--r--drivers/regulator/arizona-micsupp.c72
-rw-r--r--drivers/regulator/bd70528-regulator.c1
-rw-r--r--drivers/regulator/bd718x7-regulator.c1
-rw-r--r--drivers/regulator/core.c280
-rw-r--r--drivers/regulator/cpcap-regulator.c2
-rw-r--r--drivers/regulator/da9062-regulator.c40
-rw-r--r--drivers/regulator/da9063-regulator.c61
-rw-r--r--drivers/regulator/da9211-regulator.c2
-rw-r--r--drivers/regulator/helpers.c11
-rw-r--r--drivers/regulator/lm363x-regulator.c78
-rw-r--r--drivers/regulator/max77620-regulator.c28
-rw-r--r--drivers/regulator/max77650-regulator.c170
-rw-r--r--drivers/regulator/max77802-regulator.c2
-rw-r--r--drivers/regulator/max8952.c64
-rw-r--r--drivers/regulator/of_regulator.c63
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c252
-rw-r--r--drivers/regulator/s2mps11.c255
-rw-r--r--drivers/regulator/s5m8767.c4
-rw-r--r--drivers/regulator/slg51000-regulator.c523
-rw-r--r--drivers/regulator/slg51000-regulator.h505
-rw-r--r--drivers/regulator/stm32-booster.c132
-rw-r--r--drivers/regulator/tps65090-regulator.c7
-rw-r--r--drivers/regulator/wm831x-dcdc.c29
-rw-r--r--drivers/s390/block/Kconfig2
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/char/Kconfig22
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/sclp_async.c189
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/airq.c37
-rw-r--r--drivers/s390/cio/ccwreq.c9
-rw-r--r--drivers/s390/cio/chsc.c30
-rw-r--r--drivers/s390/cio/cio.h3
-rw-r--r--drivers/s390/cio/css.c187
-rw-r--r--drivers/s390/cio/device.c68
-rw-r--r--drivers/s390/cio/device_fsm.c49
-rw-r--r--drivers/s390/cio/device_id.c20
-rw-r--r--drivers/s390/cio/device_ops.c21
-rw-r--r--drivers/s390/cio/device_pgid.c22
-rw-r--r--drivers/s390/cio/device_status.c24
-rw-r--r--drivers/s390/cio/io_sch.h20
-rw-r--r--drivers/s390/cio/qdio_main.c1
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c524
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h7
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c13
-rw-r--r--drivers/s390/crypto/pkey_api.c8
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c34
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c380
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h15
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c4
-rw-r--r--drivers/s390/net/Kconfig8
-rw-r--r--drivers/s390/net/qeth_core.h109
-rw-r--r--drivers/s390/net/qeth_core_main.c1013
-rw-r--r--drivers/s390/net/qeth_core_mpc.h51
-rw-r--r--drivers/s390/net/qeth_l2_main.c276
-rw-r--r--drivers/s390/net/qeth_l3_main.c249
-rw-r--r--drivers/s390/scsi/zfcp_fc.c4
-rw-r--r--drivers/s390/virtio/virtio_ccw.c246
-rw-r--r--drivers/scsi/Kconfig61
-rw-r--r--drivers/scsi/Makefile4
-rw-r--r--drivers/scsi/NCR5380.c59
-rw-r--r--drivers/scsi/NCR5380.h2
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c46
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h14
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c60
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c116
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c10
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c10
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c26
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c15
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h9
-rw-r--r--drivers/scsi/esp_scsi.c20
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fdomain.c597
-rw-r--r--drivers/scsi/fdomain.h114
-rw-r--r--drivers/scsi/fdomain_isa.c222
-rw-r--r--drivers/scsi/fdomain_pci.c68
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c16
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c50
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c50
-rw-r--r--drivers/scsi/hpsa.c284
-rw-r--r--drivers/scsi/hpsa.h6
-rw-r--r--drivers/scsi/hpsa_cmd.h2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c77
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h10
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/ipr.c29
-rw-r--r--drivers/scsi/isci/remote_device.c4
-rw-r--r--drivers/scsi/isci/remote_device.h5
-rw-r--r--drivers/scsi/isci/request.c8
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c23
-rw-r--r--drivers/scsi/libsas/sas_event.c18
-rw-r--r--drivers/scsi/libsas/sas_expander.c71
-rw-r--r--drivers/scsi/libsas/sas_init.c2
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_phy.c18
-rw-r--r--drivers/scsi/libsas/sas_port.c24
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c94
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c128
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c512
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c60
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c352
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c77
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c421
-rw-r--r--drivers/scsi/megaraid/Kconfig.megaraid1
-rw-r--r--drivers/scsi/megaraid/Makefile2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h101
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c712
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_debugfs.c179
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c82
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c551
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h33
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c497
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h35
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c73
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c234
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c52
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.h3
-rw-r--r--drivers/scsi/mvumi.c11
-rw-r--r--drivers/scsi/osst.c6108
-rw-r--r--drivers/scsi/osst.h651
-rw-r--r--drivers/scsi/osst_detect.h7
-rw-r--r--drivers/scsi/osst_options.h107
-rw-r--r--drivers/scsi/pcmcia/Kconfig10
-rw-r--r--drivers/scsi/pcmcia/Makefile1
-rw-r--r--drivers/scsi/pcmcia/fdomain_cs.c95
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c52
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h1
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c4
-rw-r--r--drivers/scsi/pmcraid.c14
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c39
-rw-r--r--drivers/scsi/qedi/qedi_main.c34
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c236
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c16
-rw-r--r--drivers/scsi/scsi.c12
-rw-r--r--drivers/scsi/scsi_debugfs.h1
-rw-r--r--drivers/scsi/scsi_error.c26
-rw-r--r--drivers/scsi/scsi_lib.c39
-rw-r--r--drivers/scsi/scsi_pm.c6
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/scsi/sd.c111
-rw-r--r--drivers/scsi/ses.c7
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/storvsc_drv.c11
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c23
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c6
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c6
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c35
-rw-r--r--drivers/scsi/ufs/ufshcd.h5
-rw-r--r--drivers/scsi/ufs/ufshci.h6
-rw-r--r--drivers/scsi/virtio_scsi.c3
-rw-r--r--drivers/scsi/vmw_pvscsi.c8
-rw-r--r--drivers/scsi/wd33c93.c2
-rw-r--r--drivers/scsi/wd719x.c42
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/imx/soc-imx8.c3
-rw-r--r--drivers/soc/ti/Kconfig4
-rw-r--r--drivers/spi/Kconfig14
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/atmel-quadspi.c21
-rw-r--r--drivers/spi/spi-at91-usart.c221
-rw-r--r--drivers/spi/spi-bcm2835.c328
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c12
-rw-r--r--drivers/spi/spi-mt65xx.c15
-rw-r--r--drivers/spi/spi-pxa2xx.c14
-rw-r--r--drivers/spi/spi-qup.c55
-rw-r--r--drivers/spi/spi-rockchip.c4
-rw-r--r--drivers/spi/spi-sh-msiof.c2
-rw-r--r--drivers/spi/spi-stm32-qspi.c14
-rw-r--r--drivers/spi/spi-synquacer.c828
-rw-r--r--drivers/spi/spi-tegra114.c170
-rw-r--r--drivers/spi/spi-uniphier.c17
-rw-r--r--drivers/spi/spi.c230
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/ssb/driver_gpio.c6
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/fbtft/fbtft-core.c4
-rw-r--r--drivers/staging/fieldbus/Documentation/fieldbus_dev.txt4
-rw-r--r--drivers/staging/isdn/Kconfig12
-rw-r--r--drivers/staging/isdn/Makefile8
-rw-r--r--drivers/staging/isdn/TODO22
-rw-r--r--drivers/staging/isdn/avm/Kconfig (renamed from drivers/isdn/hardware/avm/Kconfig)0
-rw-r--r--drivers/staging/isdn/avm/Makefile (renamed from drivers/isdn/hardware/avm/Makefile)0
-rw-r--r--drivers/staging/isdn/avm/avm_cs.c (renamed from drivers/isdn/hardware/avm/avm_cs.c)0
-rw-r--r--drivers/staging/isdn/avm/avmcard.h (renamed from drivers/isdn/hardware/avm/avmcard.h)0
-rw-r--r--drivers/staging/isdn/avm/b1.c (renamed from drivers/isdn/hardware/avm/b1.c)0
-rw-r--r--drivers/staging/isdn/avm/b1dma.c (renamed from drivers/isdn/hardware/avm/b1dma.c)0
-rw-r--r--drivers/staging/isdn/avm/b1isa.c (renamed from drivers/isdn/hardware/avm/b1isa.c)0
-rw-r--r--drivers/staging/isdn/avm/b1pci.c (renamed from drivers/isdn/hardware/avm/b1pci.c)0
-rw-r--r--drivers/staging/isdn/avm/b1pcmcia.c (renamed from drivers/isdn/hardware/avm/b1pcmcia.c)0
-rw-r--r--drivers/staging/isdn/avm/c4.c (renamed from drivers/isdn/hardware/avm/c4.c)0
-rw-r--r--drivers/staging/isdn/avm/t1isa.c (renamed from drivers/isdn/hardware/avm/t1isa.c)0
-rw-r--r--drivers/staging/isdn/avm/t1pci.c (renamed from drivers/isdn/hardware/avm/t1pci.c)0
-rw-r--r--drivers/staging/isdn/gigaset/Kconfig (renamed from drivers/isdn/gigaset/Kconfig)9
-rw-r--r--drivers/staging/isdn/gigaset/Makefile (renamed from drivers/isdn/gigaset/Makefile)10
-rw-r--r--drivers/staging/isdn/gigaset/asyncdata.c (renamed from drivers/isdn/gigaset/asyncdata.c)0
-rw-r--r--drivers/staging/isdn/gigaset/bas-gigaset.c (renamed from drivers/isdn/gigaset/bas-gigaset.c)0
-rw-r--r--drivers/staging/isdn/gigaset/capi.c (renamed from drivers/isdn/gigaset/capi.c)0
-rw-r--r--drivers/staging/isdn/gigaset/common.c (renamed from drivers/isdn/gigaset/common.c)0
-rw-r--r--drivers/staging/isdn/gigaset/dummyll.c (renamed from drivers/isdn/gigaset/dummyll.c)0
-rw-r--r--drivers/staging/isdn/gigaset/ev-layer.c (renamed from drivers/isdn/gigaset/ev-layer.c)0
-rw-r--r--drivers/staging/isdn/gigaset/gigaset.h (renamed from drivers/isdn/gigaset/gigaset.h)0
-rw-r--r--drivers/staging/isdn/gigaset/interface.c (renamed from drivers/isdn/gigaset/interface.c)0
-rw-r--r--drivers/staging/isdn/gigaset/isocdata.c (renamed from drivers/isdn/gigaset/isocdata.c)0
-rw-r--r--drivers/staging/isdn/gigaset/proc.c (renamed from drivers/isdn/gigaset/proc.c)0
-rw-r--r--drivers/staging/isdn/gigaset/ser-gigaset.c (renamed from drivers/isdn/gigaset/ser-gigaset.c)0
-rw-r--r--drivers/staging/isdn/gigaset/usb-gigaset.c (renamed from drivers/isdn/gigaset/usb-gigaset.c)0
-rw-r--r--drivers/staging/isdn/hysdn/Kconfig (renamed from drivers/isdn/hysdn/Kconfig)0
-rw-r--r--drivers/staging/isdn/hysdn/Makefile (renamed from drivers/isdn/hysdn/Makefile)0
-rw-r--r--drivers/staging/isdn/hysdn/boardergo.c (renamed from drivers/isdn/hysdn/boardergo.c)0
-rw-r--r--drivers/staging/isdn/hysdn/boardergo.h (renamed from drivers/isdn/hysdn/boardergo.h)0
-rw-r--r--drivers/staging/isdn/hysdn/hycapi.c (renamed from drivers/isdn/hysdn/hycapi.c)0
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_boot.c (renamed from drivers/isdn/hysdn/hysdn_boot.c)0
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_defs.h (renamed from drivers/isdn/hysdn/hysdn_defs.h)0
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_init.c (renamed from drivers/isdn/hysdn/hysdn_init.c)0
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_net.c (renamed from drivers/isdn/hysdn/hysdn_net.c)6
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_pof.h (renamed from drivers/isdn/hysdn/hysdn_pof.h)0
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_procconf.c (renamed from drivers/isdn/hysdn/hysdn_procconf.c)0
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_proclog.c (renamed from drivers/isdn/hysdn/hysdn_proclog.c)0
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_sched.c (renamed from drivers/isdn/hysdn/hysdn_sched.c)0
-rw-r--r--drivers/staging/isdn/hysdn/ince1pc.h (renamed from drivers/isdn/hysdn/ince1pc.h)0
-rw-r--r--drivers/staging/media/Kconfig8
-rw-r--r--drivers/staging/media/Makefile4
-rw-r--r--drivers/staging/media/allegro-dvt/Kconfig16
-rw-r--r--drivers/staging/media/allegro-dvt/Makefile5
-rw-r--r--drivers/staging/media/allegro-dvt/TODO4
-rw-r--r--drivers/staging/media/allegro-dvt/allegro-core.c3014
-rw-r--r--drivers/staging/media/allegro-dvt/nal-h264.c1001
-rw-r--r--drivers/staging/media/allegro-dvt/nal-h264.h208
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c7
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c25
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c8
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c8
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c12
-rw-r--r--drivers/staging/media/hantro/Kconfig23
-rw-r--r--drivers/staging/media/hantro/Makefile15
-rw-r--r--drivers/staging/media/hantro/TODO (renamed from drivers/staging/media/rockchip/vpu/TODO)0
-rw-r--r--drivers/staging/media/hantro/hantro.h351
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c876
-rw-r--r--drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c260
-rw-r--r--drivers/staging/media/hantro/hantro_g1_regs.h301
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c125
-rw-r--r--drivers/staging/media/hantro/hantro_h1_regs.h154
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h102
-rw-r--r--drivers/staging/media/hantro/hantro_jpeg.c (renamed from drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.c)41
-rw-r--r--drivers/staging/media/hantro/hantro_jpeg.h13
-rw-r--r--drivers/staging/media/hantro/hantro_mpeg2.c61
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c686
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.h26
-rw-r--r--drivers/staging/media/hantro/rk3288_vpu_hw.c187
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw.c186
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c (renamed from drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c)42
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c266
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_regs.h (renamed from drivers/staging/media/rockchip/vpu/rk3399_vpu_regs.h)2
-rw-r--r--drivers/staging/media/imx/Makefile18
-rw-r--r--drivers/staging/media/imx/imx-ic-common.c68
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c36
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c90
-rw-r--r--drivers/staging/media/imx/imx-ic.h6
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c97
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c51
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c346
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c449
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c9
-rw-r--r--drivers/staging/media/imx/imx-media-internal-sd.c357
-rw-r--r--drivers/staging/media/imx/imx-media-of.c41
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c170
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c84
-rw-r--r--drivers/staging/media/imx/imx-media.h116
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c169
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c41
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.c6
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c14
-rw-r--r--drivers/staging/media/ipu3/ipu3-dmamap.c15
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.c125
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.h5
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c4
-rw-r--r--drivers/staging/media/meson/vdec/Kconfig11
-rw-r--r--drivers/staging/media/meson/vdec/Makefile8
-rw-r--r--drivers/staging/media/meson/vdec/TODO8
-rw-r--r--drivers/staging/media/meson/vdec/codec_mpeg12.c210
-rw-r--r--drivers/staging/media/meson/vdec/codec_mpeg12.h14
-rw-r--r--drivers/staging/media/meson/vdec/dos_regs.h98
-rw-r--r--drivers/staging/media/meson/vdec/esparser.c324
-rw-r--r--drivers/staging/media/meson/vdec/esparser.h32
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c1099
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h267
-rw-r--r--drivers/staging/media/meson/vdec/vdec_1.c230
-rw-r--r--drivers/staging/media/meson/vdec/vdec_1.h14
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.c449
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.h83
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.c101
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.h30
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c11
-rw-r--r--drivers/staging/media/rockchip/vpu/Kconfig13
-rw-r--r--drivers/staging/media/rockchip/vpu/Makefile11
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c118
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c125
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h442
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c118
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu.h232
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h29
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c542
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c671
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h58
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h14
-rw-r--r--drivers/staging/media/soc_camera/imx074.c2
-rw-r--r--drivers/staging/media/soc_camera/mt9t031.c2
-rw-r--r--drivers/staging/media/soc_camera/soc_mt9v022.c2
-rw-r--r--drivers/staging/media/soc_camera/soc_ov5642.c6
-rw-r--r--drivers/staging/media/sunxi/cedrus/Makefile3
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c42
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h39
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c13
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c576
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c6
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h91
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c9
-rw-r--r--drivers/staging/media/tegra-vde/Kconfig1
-rw-r--r--drivers/staging/media/tegra-vde/Makefile1
-rw-r--r--drivers/staging/media/tegra-vde/dmabuf-cache.c226
-rw-r--r--drivers/staging/media/tegra-vde/iommu.c157
-rw-r--r--drivers/staging/media/tegra-vde/trace.h2
-rw-r--r--drivers/staging/media/tegra-vde/uapi.h48
-rw-r--r--drivers/staging/media/tegra-vde/vde.c (renamed from drivers/staging/media/tegra-vde/tegra-vde.c)212
-rw-r--r--drivers/staging/media/tegra-vde/vde.h107
-rw-r--r--drivers/staging/olpc_dcon/TODO7
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c6
-rw-r--r--drivers/staging/sm750fb/Kconfig2
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c9
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c6
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c15
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_user.c16
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c142
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/tty/tty_ldisc.c8
-rw-r--r--drivers/tty/vt/vt.c18
-rw-r--r--drivers/usb/core/devio.c48
-rw-r--r--drivers/usb/gadget/function/f_uvc.c1
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c4
-rw-r--r--drivers/usb/image/microtek.c20
-rw-r--r--drivers/usb/image/microtek.h2
-rw-r--r--drivers/usb/misc/Kconfig4
-rw-r--r--drivers/usb/roles/class.c2
-rw-r--r--drivers/usb/typec/bus.h15
-rw-r--r--drivers/usb/typec/class.c17
-rw-r--r--drivers/usb/typec/mux.c238
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c46
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/lcd.c12
-rw-r--r--drivers/video/console/dummycon.c6
-rw-r--r--drivers/video/fbdev/Kconfig72
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/amifb.c4
-rw-r--r--drivers/video/fbdev/arkfb.c4
-rw-r--r--drivers/video/fbdev/atafb.c21
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c10
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c69
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c13
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c2
-rw-r--r--drivers/video/fbdev/au1200fb.c19
-rw-r--r--drivers/video/fbdev/chipsfb.c1
-rw-r--r--drivers/video/fbdev/cirrusfb.c5
-rw-r--r--drivers/video/fbdev/controlfb.c8
-rw-r--r--drivers/video/fbdev/core/fbcmap.c6
-rw-r--r--drivers/video/fbdev/core/fbcon.c314
-rw-r--r--drivers/video/fbdev/core/fbcon.h6
-rw-r--r--drivers/video/fbdev/core/fbmem.c399
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c20
-rw-r--r--drivers/video/fbdev/cyber2000fb.c6
-rw-r--r--drivers/video/fbdev/da8xx-fb.c1
-rw-r--r--drivers/video/fbdev/efifb.c6
-rw-r--r--drivers/video/fbdev/gbefb.c19
-rw-r--r--drivers/video/fbdev/grvga.c4
-rw-r--r--drivers/video/fbdev/gxt4500.c5
-rw-r--r--drivers/video/fbdev/hyperv_fb.c4
-rw-r--r--drivers/video/fbdev/i740fb.c4
-rw-r--r--drivers/video/fbdev/imsttfb.c5
-rw-r--r--drivers/video/fbdev/imxfb.c11
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c7
-rw-r--r--drivers/video/fbdev/jz4740_fb.c11
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c5
-rw-r--r--drivers/video/fbdev/mbx/mbxfb.c4
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c8
-rw-r--r--drivers/video/fbdev/mxsfb.c1028
-rw-r--r--drivers/video/fbdev/neofb.c9
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/Kconfig12
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/Makefile1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/rfbi.c1067
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c21
-rw-r--r--drivers/video/fbdev/platinumfb.c5
-rw-r--r--drivers/video/fbdev/pmag-aa-fb.c4
-rw-r--r--drivers/video/fbdev/pmag-ba-fb.c4
-rw-r--r--drivers/video/fbdev/pmagb-b-fb.c4
-rw-r--r--drivers/video/fbdev/pvr2fb.c188
-rw-r--r--drivers/video/fbdev/pxafb.c2
-rw-r--r--drivers/video/fbdev/riva/fbdev.c1
-rw-r--r--drivers/video/fbdev/s3c-fb.c24
-rw-r--r--drivers/video/fbdev/s3fb.c4
-rw-r--r--drivers/video/fbdev/sa1100fb.c25
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c9
-rw-r--r--drivers/video/fbdev/sh7760fb.c2
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c140
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.h5
-rw-r--r--drivers/video/fbdev/sm501fb.c4
-rw-r--r--drivers/video/fbdev/sm712fb.c1
-rw-r--r--drivers/video/fbdev/smscufx.c4
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/sunxvr1000.c1
-rw-r--r--drivers/video/fbdev/sunxvr2500.c1
-rw-r--r--drivers/video/fbdev/sunxvr500.c1
-rw-r--r--drivers/video/fbdev/tgafb.c4
-rw-r--r--drivers/video/fbdev/udlfb.c4
-rw-r--r--drivers/video/fbdev/via/viafbdev.c6
-rw-r--r--drivers/video/fbdev/vt8623fb.c4
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c2
2491 files changed, 108157 insertions, 131261 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index e8231663f201..61cf4ea2c229 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -188,8 +188,6 @@ source "drivers/ipack/Kconfig"
source "drivers/reset/Kconfig"
-source "drivers/fmc/Kconfig"
-
source "drivers/phy/Kconfig"
source "drivers/powercap/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 28b030d7988d..6d37564e783c 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -168,7 +168,6 @@ obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IPACK_BUS) += ipack/
obj-$(CONFIG_NTB) += ntb/
-obj-$(CONFIG_FMC) += fmc/
obj-$(CONFIG_POWERCAP) += powercap/
obj-$(CONFIG_MCB) += mcb/
obj-$(CONFIG_PERF_EVENTS) += perf/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 283ee94224c6..5f6158973289 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -155,7 +155,6 @@ config ACPI_EC_DEBUGFS
config ACPI_AC
tristate "AC Adapter"
- depends on X86
select POWER_SUPPLY
default y
help
@@ -168,7 +167,6 @@ config ACPI_AC
config ACPI_BATTERY
tristate "Battery"
- depends on X86
select POWER_SUPPLY
default y
help
@@ -333,7 +331,7 @@ config ACPI_CUSTOM_DSDT_FILE
depends on !STANDALONE
help
This option supports a custom DSDT by linking it into the kernel.
- See Documentation/acpi/dsdt-override.txt
+ See Documentation/admin-guide/acpi/dsdt-override.rst
Enter the full path name to the file which includes the AmlCode
or dsdt_aml_code declaration.
@@ -355,7 +353,7 @@ config ACPI_TABLE_UPGRADE
This option provides functionality to upgrade arbitrary ACPI tables
via initrd. No functional change if no ACPI tables are passed via
initrd, therefore it's safe to say Y.
- See Documentation/acpi/initrd_table_override.txt for details
+ See Documentation/admin-guide/acpi/initrd_table_override.rst for details
config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
bool "Override ACPI tables from built-in initrd"
@@ -365,7 +363,7 @@ config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
This option provides functionality to override arbitrary ACPI tables
from built-in uncompressed initrd.
- See Documentation/acpi/initrd_table_override.txt for details
+ See Documentation/admin-guide/acpi/initrd_table_override.rst for details
config ACPI_DEBUG
bool "Debug Statements"
@@ -374,7 +372,7 @@ config ACPI_DEBUG
output and increases the kernel size by around 50K.
Use the acpi.debug_layer and acpi.debug_level kernel command-line
- parameters documented in Documentation/acpi/debug.txt and
+ parameters documented in Documentation/firmware-guide/acpi/debug.rst and
Documentation/admin-guide/kernel-parameters.rst to control the type and
amount of debug output.
@@ -445,7 +443,7 @@ config ACPI_CUSTOM_METHOD
help
This debug facility allows ACPI AML methods to be inserted and/or
replaced without rebooting the system. For details refer to:
- Documentation/acpi/method-customizing.txt.
+ Documentation/firmware-guide/acpi/method-customizing.rst.
NOTE: This option is security sensitive, because it allows arbitrary
kernel memory to be written to by root (uid=0) users, allowing them
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index ff47317d8ef1..7cd0c9ac71ea 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -57,7 +57,7 @@ struct apd_private_data {
static int acpi_apd_setup(struct apd_private_data *pdata)
{
const struct apd_device_desc *dev_desc = pdata->dev_desc;
- struct clk *clk = ERR_PTR(-ENODEV);
+ struct clk *clk;
if (dev_desc->fixed_clk_rate) {
clk = clk_register_fixed_rate(&pdata->adev->dev,
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index 9c6ff0f5a25e..57d9d574d4dd 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -53,11 +53,7 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg,
if (!table->header)
return -ENOMEM;
- ACPI_INFO(("Host-directed Dynamic ACPI Table Load:"));
- ret = acpi_tb_install_and_load_table(
- ACPI_PTR_TO_PHYSADDR(table->header),
- ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL, FALSE,
- &table->index);
+ ret = acpi_load_table(table->header);
if (ret) {
kfree(table->header);
table->header = NULL;
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 6116b0fb86d4..433376e819bb 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -129,7 +129,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
static void lpit_process(u64 begin, u64 end)
{
- while (begin + sizeof(struct acpi_lpit_native) < end) {
+ while (begin + sizeof(struct acpi_lpit_native) <= end) {
struct acpi_lpit_native *lpit_native = (struct acpi_lpit_native *)begin;
if (!lpit_native->header.type && !lpit_native->header.flags) {
@@ -148,7 +148,6 @@ static void lpit_process(u64 begin, u64 end)
void acpi_init_lpit(void)
{
acpi_status status;
- u64 lpit_begin;
struct acpi_table_lpit *lpit;
status = acpi_get_table(ACPI_SIG_LPIT, 0, (struct acpi_table_header **)&lpit);
@@ -156,6 +155,6 @@ void acpi_init_lpit(void)
if (ACPI_FAILURE(status))
return;
- lpit_begin = (u64)lpit + sizeof(*lpit);
- lpit_process(lpit_begin, lpit_begin + lpit->header.length);
+ lpit_process((u64)lpit + sizeof(*lpit),
+ (u64)lpit + lpit->header.length);
}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 23484aa877b6..398451839178 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -1061,6 +1061,13 @@ static int acpi_lpss_suspend_noirq(struct device *dev)
int ret;
if (pdata->dev_desc->resume_from_noirq) {
+ /*
+ * The driver's ->suspend_late callback will be invoked by
+ * acpi_lpss_do_suspend_late(), with the assumption that the
+ * driver really wanted to run that code in ->suspend_noirq, but
+ * it could not run after acpi_dev_suspend() and the driver
+ * expected the latter to be called in the "late" phase.
+ */
ret = acpi_lpss_do_suspend_late(dev);
if (ret)
return ret;
@@ -1091,16 +1098,99 @@ static int acpi_lpss_resume_noirq(struct device *dev)
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
- ret = acpi_subsys_resume_noirq(dev);
+ /* Follow acpi_subsys_resume_noirq(). */
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ pm_runtime_set_active(dev);
+
+ ret = pm_generic_resume_noirq(dev);
if (ret)
return ret;
- if (!dev_pm_may_skip_resume(dev) && pdata->dev_desc->resume_from_noirq)
- ret = acpi_lpss_do_resume_early(dev);
+ if (!pdata->dev_desc->resume_from_noirq)
+ return 0;
- return ret;
+ /*
+ * The driver's ->resume_early callback will be invoked by
+ * acpi_lpss_do_resume_early(), with the assumption that the driver
+ * really wanted to run that code in ->resume_noirq, but it could not
+ * run before acpi_dev_resume() and the driver expected the latter to be
+ * called in the "early" phase.
+ */
+ return acpi_lpss_do_resume_early(dev);
+}
+
+static int acpi_lpss_do_restore_early(struct device *dev)
+{
+ int ret = acpi_lpss_resume(dev);
+
+ return ret ? ret : pm_generic_restore_early(dev);
}
+static int acpi_lpss_restore_early(struct device *dev)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+
+ if (pdata->dev_desc->resume_from_noirq)
+ return 0;
+
+ return acpi_lpss_do_restore_early(dev);
+}
+
+static int acpi_lpss_restore_noirq(struct device *dev)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+ int ret;
+
+ ret = pm_generic_restore_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (!pdata->dev_desc->resume_from_noirq)
+ return 0;
+
+ /* This is analogous to what happens in acpi_lpss_resume_noirq(). */
+ return acpi_lpss_do_restore_early(dev);
+}
+
+static int acpi_lpss_do_poweroff_late(struct device *dev)
+{
+ int ret = pm_generic_poweroff_late(dev);
+
+ return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
+}
+
+static int acpi_lpss_poweroff_late(struct device *dev)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ if (pdata->dev_desc->resume_from_noirq)
+ return 0;
+
+ return acpi_lpss_do_poweroff_late(dev);
+}
+
+static int acpi_lpss_poweroff_noirq(struct device *dev)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ if (pdata->dev_desc->resume_from_noirq) {
+ /* This is analogous to the acpi_lpss_suspend_noirq() case. */
+ int ret = acpi_lpss_do_poweroff_late(dev);
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_poweroff_noirq(dev);
+}
#endif /* CONFIG_PM_SLEEP */
static int acpi_lpss_runtime_suspend(struct device *dev)
@@ -1134,14 +1224,11 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
.resume_noirq = acpi_lpss_resume_noirq,
.resume_early = acpi_lpss_resume_early,
.freeze = acpi_subsys_freeze,
- .freeze_late = acpi_subsys_freeze_late,
- .freeze_noirq = acpi_subsys_freeze_noirq,
- .thaw_noirq = acpi_subsys_thaw_noirq,
- .poweroff = acpi_subsys_suspend,
- .poweroff_late = acpi_lpss_suspend_late,
- .poweroff_noirq = acpi_lpss_suspend_noirq,
- .restore_noirq = acpi_lpss_resume_noirq,
- .restore_early = acpi_lpss_resume_early,
+ .poweroff = acpi_subsys_poweroff,
+ .poweroff_late = acpi_lpss_poweroff_late,
+ .poweroff_noirq = acpi_lpss_poweroff_noirq,
+ .restore_noirq = acpi_lpss_restore_noirq,
+ .restore_early = acpi_lpss_restore_early,
#endif
.runtime_suspend = acpi_lpss_runtime_suspend,
.runtime_resume = acpi_lpss_runtime_resume,
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 6b3f1217a237..e7dc0133f817 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -64,6 +64,7 @@ static void power_saving_mwait_init(void)
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
+ case X86_VENDOR_ZHAOXIN:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 831660179662..c8652f91054e 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -69,7 +69,8 @@ acpi_status
acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked);
acpi_status
-acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
+acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
+ u8 clear_on_enable);
acpi_status
acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index d056a1845613..fd3beea93421 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -178,7 +178,6 @@ ACPI_GLOBAL(u8, acpi_gbl_verbose_leak_dump);
ACPI_GLOBAL(struct acpi_namespace_node, acpi_gbl_root_node_struct);
ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_root_node);
ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_fadt_gpe_device);
-ACPI_GLOBAL(union acpi_operand_object *, acpi_gbl_module_code_list);
extern const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES];
extern const struct acpi_predefined_names
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 39812fc4386a..7da1864798a0 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -207,8 +207,6 @@ acpi_ns_dump_object_paths(acpi_object_type type,
*/
acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info);
-void acpi_ns_exec_module_code_list(void);
-
/*
* nsarguments - Argument count/type checking for predefined/reserved names
*/
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 4ebd23700bbc..a1ffed29903b 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -202,7 +202,7 @@ acpi_ds_initialize_objects(u32 table_index,
if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\nInitializing Namespace objects:\n"));
+ "\nACPI table initialization:\n"));
}
/* Summary of objects initialized */
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 62d3aa74277b..344feba29063 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -146,6 +146,7 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
* FUNCTION: acpi_ev_add_gpe_reference
*
* PARAMETERS: gpe_event_info - Add a reference to this GPE
+ * clear_on_enable - Clear GPE status before enabling it
*
* RETURN: Status
*
@@ -155,7 +156,8 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
******************************************************************************/
acpi_status
-acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
+acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
+ u8 clear_on_enable)
{
acpi_status status = AE_OK;
@@ -170,6 +172,10 @@ acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
/* Enable on first reference */
+ if (clear_on_enable) {
+ (void)acpi_hw_clear_gpe(gpe_event_info);
+ }
+
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
if (ACPI_SUCCESS(status)) {
status = acpi_ev_enable_gpe(gpe_event_info);
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 328d1d6123ad..fb15e9e2373b 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -453,7 +453,7 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
continue;
}
- status = acpi_ev_add_gpe_reference(gpe_event_info);
+ status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 3df00eb6621b..279ef0557aa3 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -971,7 +971,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
ACPI_GPE_DISPATCH_METHOD) ||
(ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) {
- (void)acpi_ev_add_gpe_reference(gpe_event_info);
+ (void)acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
if (ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
/* Poll edge triggered GPEs to handle existing events */
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 30a083902f52..710488ec59e9 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -108,7 +108,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
if (gpe_event_info) {
if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
ACPI_GPE_DISPATCH_NONE) {
- status = acpi_ev_add_gpe_reference(gpe_event_info);
+ status = acpi_ev_add_gpe_reference(gpe_event_info, TRUE);
if (ACPI_SUCCESS(status) &&
ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 7b855603f81a..2566e2d4c780 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -36,6 +36,7 @@ acpi_status acpi_ns_root_initialize(void)
acpi_status status;
const struct acpi_predefined_names *init_val = NULL;
struct acpi_namespace_node *new_node;
+ struct acpi_namespace_node *prev_node = NULL;
union acpi_operand_object *obj_desc;
acpi_string val = NULL;
@@ -61,12 +62,28 @@ acpi_status acpi_ns_root_initialize(void)
*/
acpi_gbl_root_node = &acpi_gbl_root_node_struct;
- /* Enter the pre-defined names in the name table */
+ /* Enter the predefined names in the name table */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Entering predefined entries into namespace\n"));
+ /*
+ * Create the initial (default) namespace.
+ * This namespace looks like something similar to this:
+ *
+ * ACPI Namespace (from Namespace Root):
+ * 0 _GPE Scope 00203160 00
+ * 0 _PR_ Scope 002031D0 00
+ * 0 _SB_ Device 00203240 00 Notify Object: 0020ADD8
+ * 0 _SI_ Scope 002032B0 00
+ * 0 _TZ_ Device 00203320 00
+ * 0 _REV Integer 00203390 00 = 0000000000000002
+ * 0 _OS_ String 00203488 00 Len 14 "Microsoft Windows NT"
+ * 0 _GL_ Mutex 00203580 00 Object 002035F0
+ * 0 _OSI Method 00203678 00 Args 1 Len 0000 Aml 00000000
+ */
for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) {
+ status = AE_OK;
/* _OSI is optional for now, will be permanent later */
@@ -75,17 +92,32 @@ acpi_status acpi_ns_root_initialize(void)
continue;
}
- status =
- acpi_ns_lookup(NULL, ACPI_CAST_PTR(char, init_val->name),
- init_val->type, ACPI_IMODE_LOAD_PASS2,
- ACPI_NS_NO_UPSEARCH, NULL, &new_node);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Could not create predefined name %s",
- init_val->name));
- continue;
+ /*
+ * Create, init, and link the new predefined name
+ * Note: No need to use acpi_ns_lookup here because all the
+ * predefined names are at the root level. It is much easier to
+ * just create and link the new node(s) here.
+ */
+ new_node =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node));
+ if (!new_node) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
}
+ ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name);
+ new_node->descriptor_type = ACPI_DESC_TYPE_NAMED;
+ new_node->type = init_val->type;
+
+ if (!prev_node) {
+ acpi_gbl_root_node_struct.child = new_node;
+ } else {
+ prev_node->peer = new_node;
+ }
+
+ new_node->parent = &acpi_gbl_root_node_struct;
+ prev_node = new_node;
+
/*
* Name entered successfully. If entry in pre_defined_names[] specifies
* an initial value, create the initial value.
@@ -131,7 +163,7 @@ acpi_status acpi_ns_root_initialize(void)
new_node->value = obj_desc->method.param_count;
#else
- /* Mark this as a very SPECIAL method */
+ /* Mark this as a very SPECIAL method (_OSI) */
obj_desc->method.info_flags =
ACPI_METHOD_INTERNAL_ONLY;
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 6390b7951ebf..63748ac699f7 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -14,11 +14,6 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nseval")
-/* Local prototypes */
-static void
-acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
- struct acpi_evaluate_info *info);
-
/*******************************************************************************
*
* FUNCTION: acpi_ns_evaluate
@@ -44,7 +39,6 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
* MUTEX: Locks interpreter
*
******************************************************************************/
-
acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
{
acpi_status status;
@@ -310,187 +304,3 @@ cleanup:
info->full_pathname = NULL;
return_ACPI_STATUS(status);
}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_exec_module_code_list
- *
- * PARAMETERS: None
- *
- * RETURN: None. Exceptions during method execution are ignored, since
- * we cannot abort a table load.
- *
- * DESCRIPTION: Execute all elements of the global module-level code list.
- * Each element is executed as a single control method.
- *
- * NOTE: With this option enabled, each block of detected executable AML
- * code that is outside of any control method is wrapped with a temporary
- * control method object and placed on a global list. The methods on this
- * list are executed below.
- *
- * This function executes the module-level code for all tables only after
- * all of the tables have been loaded. It is a legacy option and is
- * not compatible with other ACPI implementations. See acpi_ns_load_table.
- *
- * This function will be removed when the legacy option is removed.
- *
- ******************************************************************************/
-
-void acpi_ns_exec_module_code_list(void)
-{
- union acpi_operand_object *prev;
- union acpi_operand_object *next;
- struct acpi_evaluate_info *info;
- u32 method_count = 0;
-
- ACPI_FUNCTION_TRACE(ns_exec_module_code_list);
-
- /* Exit now if the list is empty */
-
- next = acpi_gbl_module_code_list;
- if (!next) {
- ACPI_DEBUG_PRINT((ACPI_DB_INIT_NAMES,
- "Legacy MLC block list is empty\n"));
-
- return_VOID;
- }
-
- /* Allocate the evaluation information block */
-
- info = ACPI_ALLOCATE(sizeof(struct acpi_evaluate_info));
- if (!info) {
- return_VOID;
- }
-
- /* Walk the list, executing each "method" */
-
- while (next) {
- prev = next;
- next = next->method.mutex;
-
- /* Clear the link field and execute the method */
-
- prev->method.mutex = NULL;
- acpi_ns_exec_module_code(prev, info);
- method_count++;
-
- /* Delete the (temporary) method object */
-
- acpi_ut_remove_reference(prev);
- }
-
- ACPI_INFO(("Executed %u blocks of module-level executable AML code",
- method_count));
-
- ACPI_FREE(info);
- acpi_gbl_module_code_list = NULL;
- return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_exec_module_code
- *
- * PARAMETERS: method_obj - Object container for the module-level code
- * info - Info block for method evaluation
- *
- * RETURN: None. Exceptions during method execution are ignored, since
- * we cannot abort a table load.
- *
- * DESCRIPTION: Execute a control method containing a block of module-level
- * executable AML code. The control method is temporarily
- * installed to the root node, then evaluated.
- *
- ******************************************************************************/
-
-static void
-acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
- struct acpi_evaluate_info *info)
-{
- union acpi_operand_object *parent_obj;
- struct acpi_namespace_node *parent_node;
- acpi_object_type type;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ns_exec_module_code);
-
- /*
- * Get the parent node. We cheat by using the next_object field
- * of the method object descriptor.
- */
- parent_node =
- ACPI_CAST_PTR(struct acpi_namespace_node,
- method_obj->method.next_object);
- type = acpi_ns_get_type(parent_node);
-
- /*
- * Get the region handler and save it in the method object. We may need
- * this if an operation region declaration causes a _REG method to be run.
- *
- * We can't do this in acpi_ps_link_module_code because
- * acpi_gbl_root_node->Object is NULL at PASS1.
- */
- if ((type == ACPI_TYPE_DEVICE) && parent_node->object) {
- method_obj->method.dispatch.handler =
- parent_node->object->device.handler;
- }
-
- /* Must clear next_object (acpi_ns_attach_object needs the field) */
-
- method_obj->method.next_object = NULL;
-
- /* Initialize the evaluation information block */
-
- memset(info, 0, sizeof(struct acpi_evaluate_info));
- info->prefix_node = parent_node;
-
- /*
- * Get the currently attached parent object. Add a reference,
- * because the ref count will be decreased when the method object
- * is installed to the parent node.
- */
- parent_obj = acpi_ns_get_attached_object(parent_node);
- if (parent_obj) {
- acpi_ut_add_reference(parent_obj);
- }
-
- /* Install the method (module-level code) in the parent node */
-
- status =
- acpi_ns_attach_object(parent_node, method_obj, ACPI_TYPE_METHOD);
- if (ACPI_FAILURE(status)) {
- goto exit;
- }
-
- /* Execute the parent node as a control method */
-
- status = acpi_ns_evaluate(info);
-
- ACPI_DEBUG_PRINT((ACPI_DB_INIT_NAMES,
- "Executed module-level code at %p\n",
- method_obj->method.aml_start));
-
- /* Delete a possible implicit return value (in slack mode) */
-
- if (info->return_object) {
- acpi_ut_remove_reference(info->return_object);
- }
-
- /* Detach the temporary method object */
-
- acpi_ns_detach_object(parent_node);
-
- /* Restore the original parent object */
-
- if (parent_obj) {
- status = acpi_ns_attach_object(parent_node, parent_obj, type);
- } else {
- parent_node->type = (u8)type;
- }
-
-exit:
- if (parent_obj) {
- acpi_ut_remove_reference(parent_obj);
- }
- return_VOID;
-}
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 53e5d00d3a5e..61e9dfc9fe8c 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -55,14 +55,19 @@ acpi_status acpi_ns_initialize_objects(void)
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "Completing Region/Field/Buffer/Package initialization:\n"));
+ "Final data object initialization: "));
- /* Set all init info to zero */
+ /* Clear the info block */
memset(&info, 0, sizeof(struct acpi_init_walk_info));
/* Walk entire namespace from the supplied root */
+ /*
+ * TBD: will become ACPI_TYPE_PACKAGE as this type object
+ * is now the only one that supports deferred initialization
+ * (forward references).
+ */
status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, acpi_ns_init_one_object,
NULL, &info, NULL);
@@ -71,13 +76,8 @@ acpi_status acpi_ns_initialize_objects(void)
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- " Initialized %u/%u Regions %u/%u Fields %u/%u "
- "Buffers %u/%u Packages (%u nodes)\n",
- info.op_region_init, info.op_region_count,
- info.field_init, info.field_count,
- info.buffer_init, info.buffer_count,
- info.package_init, info.package_count,
- info.object_count));
+ "Namespace contains %u (0x%X) objects\n",
+ info.object_count, info.object_count));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"%u Control Methods found\n%u Op Regions found\n",
@@ -382,34 +382,18 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
acpi_ex_enter_interpreter();
/*
- * Each of these types can contain executable AML code within the
- * declaration.
+ * Only initialization of Package objects can be deferred, in order
+ * to support forward references.
*/
switch (type) {
- case ACPI_TYPE_REGION:
-
- info->op_region_init++;
- status = acpi_ds_get_region_arguments(obj_desc);
- break;
-
- case ACPI_TYPE_BUFFER_FIELD:
-
- info->field_init++;
- status = acpi_ds_get_buffer_field_arguments(obj_desc);
- break;
-
case ACPI_TYPE_LOCAL_BANK_FIELD:
+ /* TBD: bank_fields do not require deferred init, remove this code */
+
info->field_init++;
status = acpi_ds_get_bank_field_arguments(obj_desc);
break;
- case ACPI_TYPE_BUFFER:
-
- info->buffer_init++;
- status = acpi_ds_get_buffer_arguments(obj_desc);
- break;
-
case ACPI_TYPE_PACKAGE:
/* Complete the initialization/resolution of the package object */
@@ -421,8 +405,13 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
default:
- /* No other types can get here */
+ /* No other types should get here */
+ status = AE_TYPE;
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Opcode is not deferred [%4.4s] (%s)",
+ acpi_ut_get_node_name(node),
+ acpi_ut_get_type_name(type)));
break;
}
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 35fff5c75da1..d7c4d6e8e21e 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -109,18 +109,6 @@ unlock:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Completed Table Object Initialization\n"));
- /*
- * This case handles the legacy option that groups all module-level
- * code blocks together and defers execution until all of the tables
- * are loaded. Execute all of these blocks at this time.
- * Execute any module-level code that was detected during the table
- * load phase.
- *
- * Note: this option is deprecated and will be eliminated in the
- * future. Use of this option can cause problems with AML code that
- * depends upon in-order immediate execution of module-level code.
- */
- acpi_ns_exec_module_code_list();
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 6bc90d46db5c..b8d007c84d32 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -560,21 +560,9 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
void acpi_ns_terminate(void)
{
acpi_status status;
- union acpi_operand_object *prev;
- union acpi_operand_object *next;
ACPI_FUNCTION_TRACE(ns_terminate);
- /* Delete any module-level code blocks */
-
- next = acpi_gbl_module_code_list;
- while (next) {
- prev = next;
- next = next->method.mutex;
- prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
- acpi_ut_remove_reference(prev);
- }
-
/*
* Free the entire namespace -- all nodes and all objects
* attached to the nodes
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 933f81316ad2..91a4b984f224 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -934,19 +934,6 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
status = acpi_ns_load_table(table_index, parent_node);
/*
- * This case handles the legacy option that groups all module-level
- * code blocks together and defers execution until all of the tables
- * are loaded. Execute all of these blocks at this time.
- * Execute any module-level code that was detected during the table
- * load phase.
- *
- * Note: this option is deprecated and will be eliminated in the
- * future. Use of this option can cause problems with AML code that
- * depends upon in-order immediate execution of module-level code.
- */
- acpi_ns_exec_module_code_list();
-
- /*
* Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is
* responsible for discovering any new wake GPEs by running _PRW methods
* that may have been loaded by this table.
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 4f30f06a6f78..ef8f8a9f3c9c 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -297,6 +297,17 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
FALSE, &table_index);
+
+ if (ACPI_SUCCESS(status)) {
+ /* Complete the initialization/resolution of package objects */
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE,
+ ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, 0,
+ acpi_ns_init_one_package,
+ NULL, NULL, NULL);
+ }
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index bc124591320e..6f33e7c72327 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -180,7 +180,6 @@ acpi_status acpi_ut_init_globals(void)
/* Namespace */
- acpi_gbl_module_code_list = NULL;
acpi_gbl_root_node = NULL;
acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 9f3b1e3a09de..cf769e94fe0f 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -211,24 +211,17 @@ acpi_status ACPI_INIT_FUNCTION acpi_initialize_objects(u32 flags)
ACPI_FUNCTION_TRACE(acpi_initialize_objects);
+#ifdef ACPI_OBSOLETE_BEHAVIOR
/*
- * This case handles the legacy option that groups all module-level
- * code blocks together and defers execution until all of the tables
- * are loaded. Execute all of these blocks at this time.
- * Execute any module-level code that was detected during the table
- * load phase.
- *
- * Note: this option is deprecated and will be eliminated in the
- * future. Use of this option can cause problems with AML code that
- * depends upon in-order immediate execution of module-level code.
+ * 05/2019: Removed, initialization now happens at both object
+ * creation and table load time
*/
- acpi_ns_exec_module_code_list();
/*
* Initialize the objects that remain uninitialized. This
* runs the executable AML that may be part of the
- * declaration of these objects:
- * operation_regions, buffer_fields, Buffers, and Packages.
+ * declaration of these objects: operation_regions, buffer_fields,
+ * bank_fields, Buffers, and Packages.
*/
if (!(flags & ACPI_NO_OBJECT_INIT)) {
status = acpi_ns_initialize_objects();
@@ -236,6 +229,7 @@ acpi_status ACPI_INIT_FUNCTION acpi_initialize_objects(u32 flags)
return_ACPI_STATUS(status);
}
}
+#endif
/*
* Initialize all device/region objects in the namespace. This runs
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 993940d582f5..a66e00fe31fe 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -345,7 +345,7 @@ static int __ghes_peek_estatus(struct ghes *ghes,
return -ENOENT;
}
- return __ghes_check_estatus(ghes, estatus);
+ return 0;
}
static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus,
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index e54956ae93d3..28cffaaf9d82 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -45,6 +45,19 @@ const char *acpi_power_state_string(int state)
}
}
+static int acpi_dev_pm_explicit_get(struct acpi_device *device, int *state)
+{
+ unsigned long long psc;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "_PSC", NULL, &psc);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ *state = psc;
+ return 0;
+}
+
/**
* acpi_device_get_power - Get power state of an ACPI device.
* @device: Device to get the power state of.
@@ -53,10 +66,16 @@ const char *acpi_power_state_string(int state)
* This function does not update the device's power.state field, but it may
* update its parent's power.state field (when the parent's power state is
* unknown and the device's power state turns out to be D0).
+ *
+ * Also, it does not update power resource reference counters to ensure that
+ * the power state returned by it will be persistent and it may return a power
+ * state shallower than previously set by acpi_device_set_power() for @device
+ * (if that power state depends on any power resources).
*/
int acpi_device_get_power(struct acpi_device *device, int *state)
{
int result = ACPI_STATE_UNKNOWN;
+ int error;
if (!device || !state)
return -EINVAL;
@@ -73,18 +92,16 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
* if available.
*/
if (device->power.flags.power_resources) {
- int error = acpi_power_get_inferred_state(device, &result);
+ error = acpi_power_get_inferred_state(device, &result);
if (error)
return error;
}
if (device->power.flags.explicit_get) {
- acpi_handle handle = device->handle;
- unsigned long long psc;
- acpi_status status;
+ int psc;
- status = acpi_evaluate_integer(handle, "_PSC", NULL, &psc);
- if (ACPI_FAILURE(status))
- return -ENODEV;
+ error = acpi_dev_pm_explicit_get(device, &psc);
+ if (error)
+ return error;
/*
* The power resources settings may indicate a power state
@@ -118,7 +135,6 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
return 0;
}
-EXPORT_SYMBOL(acpi_device_get_power);
static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state)
{
@@ -152,7 +168,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
/* Make sure this is a valid target state */
- if (state == device->power.state) {
+ /* There is a special case for D0 addressed below. */
+ if (state > ACPI_STATE_D0 && state == device->power.state) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already in %s\n",
device->pnp.bus_id,
acpi_power_state_string(state)));
@@ -202,9 +219,15 @@ int acpi_device_set_power(struct acpi_device *device, int state)
return -ENODEV;
}
- result = acpi_dev_pm_explicit_set(device, state);
- if (result)
- goto end;
+ /*
+ * If the device goes from D3hot to D3cold, _PS3 has been
+ * evaluated for it already, so skip it in that case.
+ */
+ if (device->power.state < ACPI_STATE_D3_HOT) {
+ result = acpi_dev_pm_explicit_set(device, state);
+ if (result)
+ goto end;
+ }
if (device->power.flags.power_resources)
result = acpi_power_transition(device, target_state);
@@ -214,6 +237,30 @@ int acpi_device_set_power(struct acpi_device *device, int state)
if (result)
goto end;
}
+
+ if (device->power.state == ACPI_STATE_D0) {
+ int psc;
+
+ /* Nothing to do here if _PSC is not present. */
+ if (!device->power.flags.explicit_get)
+ return 0;
+
+ /*
+ * The power state of the device was set to D0 last
+ * time, but that might have happened before a
+ * system-wide transition involving the platform
+ * firmware, so it may be necessary to evaluate _PS0
+ * for the device here. However, use extra care here
+ * and evaluate _PSC to check the device's current power
+ * state, and only invoke _PS0 if the evaluation of _PSC
+ * is successful and it returns a power state different
+ * from D0.
+ */
+ result = acpi_dev_pm_explicit_get(device, &psc);
+ if (result || psc == ACPI_STATE_D0)
+ return 0;
+ }
+
result = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
}
@@ -1073,7 +1120,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
* acpi_subsys_resume_noirq - Run the device driver's "noirq" resume callback.
* @dev: Device to handle.
*/
-int acpi_subsys_resume_noirq(struct device *dev)
+static int acpi_subsys_resume_noirq(struct device *dev)
{
if (dev_pm_may_skip_resume(dev))
return 0;
@@ -1088,7 +1135,6 @@ int acpi_subsys_resume_noirq(struct device *dev)
return pm_generic_resume_noirq(dev);
}
-EXPORT_SYMBOL_GPL(acpi_subsys_resume_noirq);
/**
* acpi_subsys_resume_early - Resume device using ACPI.
@@ -1098,12 +1144,11 @@ EXPORT_SYMBOL_GPL(acpi_subsys_resume_noirq);
* generic early resume procedure for it during system transition into the
* working state.
*/
-int acpi_subsys_resume_early(struct device *dev)
+static int acpi_subsys_resume_early(struct device *dev)
{
int ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
-EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
/**
* acpi_subsys_freeze - Run the device driver's freeze callback.
@@ -1112,65 +1157,81 @@ EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
int acpi_subsys_freeze(struct device *dev)
{
/*
- * This used to be done in acpi_subsys_prepare() for all devices and
- * some drivers may depend on it, so do it here. Ideally, however,
- * runtime-suspended devices should not be touched during freeze/thaw
- * transitions.
+ * Resume all runtime-suspended devices before creating a snapshot
+ * image of system memory, because the restore kernel generally cannot
+ * be expected to always handle them consistently and they need to be
+ * put into the runtime-active metastate during system resume anyway,
+ * so it is better to ensure that the state saved in the image will be
+ * always consistent with that.
*/
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
- pm_runtime_resume(dev);
+ pm_runtime_resume(dev);
return pm_generic_freeze(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
/**
- * acpi_subsys_freeze_late - Run the device driver's "late" freeze callback.
- * @dev: Device to handle.
+ * acpi_subsys_restore_early - Restore device using ACPI.
+ * @dev: Device to restore.
*/
-int acpi_subsys_freeze_late(struct device *dev)
+int acpi_subsys_restore_early(struct device *dev)
{
+ int ret = acpi_dev_resume(dev);
+ return ret ? ret : pm_generic_restore_early(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_restore_early);
- if (dev_pm_smart_suspend_and_suspended(dev))
- return 0;
+/**
+ * acpi_subsys_poweroff - Run the device driver's poweroff callback.
+ * @dev: Device to handle.
+ *
+ * Follow PCI and resume devices from runtime suspend before running their
+ * system poweroff callbacks, unless the driver can cope with runtime-suspended
+ * devices during system suspend and there are no ACPI-specific reasons for
+ * resuming them.
+ */
+int acpi_subsys_poweroff(struct device *dev)
+{
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
+ pm_runtime_resume(dev);
- return pm_generic_freeze_late(dev);
+ return pm_generic_poweroff(dev);
}
-EXPORT_SYMBOL_GPL(acpi_subsys_freeze_late);
+EXPORT_SYMBOL_GPL(acpi_subsys_poweroff);
/**
- * acpi_subsys_freeze_noirq - Run the device driver's "noirq" freeze callback.
+ * acpi_subsys_poweroff_late - Run the device driver's poweroff callback.
* @dev: Device to handle.
+ *
+ * Carry out the generic late poweroff procedure for @dev and use ACPI to put
+ * it into a low-power state during system transition into a sleep state.
*/
-int acpi_subsys_freeze_noirq(struct device *dev)
+static int acpi_subsys_poweroff_late(struct device *dev)
{
+ int ret;
if (dev_pm_smart_suspend_and_suspended(dev))
return 0;
- return pm_generic_freeze_noirq(dev);
+ ret = pm_generic_poweroff_late(dev);
+ if (ret)
+ return ret;
+
+ return acpi_dev_suspend(dev, device_may_wakeup(dev));
}
-EXPORT_SYMBOL_GPL(acpi_subsys_freeze_noirq);
/**
- * acpi_subsys_thaw_noirq - Run the device driver's "noirq" thaw callback.
- * @dev: Device to handle.
+ * acpi_subsys_poweroff_noirq - Run the driver's "noirq" poweroff callback.
+ * @dev: Device to suspend.
*/
-int acpi_subsys_thaw_noirq(struct device *dev)
+static int acpi_subsys_poweroff_noirq(struct device *dev)
{
- /*
- * If the device is in runtime suspend, the "thaw" code may not work
- * correctly with it, so skip the driver callback and make the PM core
- * skip all of the subsequent "thaw" callbacks for the device.
- */
- if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev_pm_skip_next_resume_phases(dev);
+ if (dev_pm_smart_suspend_and_suspended(dev))
return 0;
- }
- return pm_generic_thaw_noirq(dev);
+ return pm_generic_poweroff_noirq(dev);
}
-EXPORT_SYMBOL_GPL(acpi_subsys_thaw_noirq);
#endif /* CONFIG_PM_SLEEP */
static struct dev_pm_domain acpi_general_pm_domain = {
@@ -1186,14 +1247,10 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.resume_noirq = acpi_subsys_resume_noirq,
.resume_early = acpi_subsys_resume_early,
.freeze = acpi_subsys_freeze,
- .freeze_late = acpi_subsys_freeze_late,
- .freeze_noirq = acpi_subsys_freeze_noirq,
- .thaw_noirq = acpi_subsys_thaw_noirq,
- .poweroff = acpi_subsys_suspend,
- .poweroff_late = acpi_subsys_suspend_late,
- .poweroff_noirq = acpi_subsys_suspend_noirq,
- .restore_noirq = acpi_subsys_resume_noirq,
- .restore_early = acpi_subsys_resume_early,
+ .poweroff = acpi_subsys_poweroff,
+ .poweroff_late = acpi_subsys_poweroff_late,
+ .poweroff_noirq = acpi_subsys_poweroff_noirq,
+ .restore_early = acpi_subsys_restore_early,
#endif
},
};
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index f6157d4d637a..f4c2fe6be4f2 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -139,8 +139,15 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
+/* --------------------------------------------------------------------------
+ Device Power Management
+ -------------------------------------------------------------------------- */
+int acpi_device_get_power(struct acpi_device *device, int *state);
int acpi_wakeup_device_init(void);
+/* --------------------------------------------------------------------------
+ Processor
+ -------------------------------------------------------------------------- */
#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
void acpi_early_processor_set_pdc(void);
#else
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 89690a471360..e209081d644b 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -292,3 +292,29 @@ void __init acpi_set_irq_model(enum acpi_irq_model_id model,
acpi_irq_model = model;
acpi_gsi_domain_id = fwnode;
}
+
+/**
+ * acpi_irq_create_hierarchy - Create a hierarchical IRQ domain with the default
+ * GSI domain as its parent.
+ * @flags: Irq domain flags associated with the domain
+ * @size: Size of the domain.
+ * @fwnode: Optional fwnode of the interrupt controller
+ * @ops: Pointer to the interrupt domain callbacks
+ * @host_data: Controller private data pointer
+ */
+struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
+ unsigned int size,
+ struct fwnode_handle *fwnode,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
+ DOMAIN_BUS_ANY);
+
+ if (!d)
+ return NULL;
+
+ return irq_domain_create_hierarchy(d, flags, size, fwnode, ops,
+ host_data);
+}
+EXPORT_SYMBOL_GPL(acpi_irq_create_hierarchy);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index cc7507091dec..9c0edf2fc0dd 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -301,8 +301,8 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
* During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_map_table() to get the job done.
*/
-void __iomem *__ref
-acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
+void __iomem __ref
+*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map;
void __iomem *virt;
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index 1b722fd57d5e..452041398b34 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -284,8 +284,6 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
intel_pmic_thermal_handler,
NULL, opregion);
if (ACPI_FAILURE(status)) {
- acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
- intel_pmic_power_handler);
ret = -ENODEV;
goto out_remove_power_handler;
}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index a916417b9e70..fe1e7bc91a5e 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -42,6 +42,11 @@ ACPI_MODULE_NAME("power");
#define ACPI_POWER_RESOURCE_STATE_ON 0x01
#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
+struct acpi_power_dependent_device {
+ struct device *dev;
+ struct list_head node;
+};
+
struct acpi_power_resource {
struct acpi_device device;
struct list_head list_node;
@@ -51,6 +56,7 @@ struct acpi_power_resource {
unsigned int ref_count;
bool wakeup_enabled;
struct mutex resource_lock;
+ struct list_head dependents;
};
struct acpi_power_resource_entry {
@@ -232,8 +238,121 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
return 0;
}
+static int
+acpi_power_resource_add_dependent(struct acpi_power_resource *resource,
+ struct device *dev)
+{
+ struct acpi_power_dependent_device *dep;
+ int ret = 0;
+
+ mutex_lock(&resource->resource_lock);
+ list_for_each_entry(dep, &resource->dependents, node) {
+ /* Only add it once */
+ if (dep->dev == dev)
+ goto unlock;
+ }
+
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
+ if (!dep) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ dep->dev = dev;
+ list_add_tail(&dep->node, &resource->dependents);
+ dev_dbg(dev, "added power dependency to [%s]\n", resource->name);
+
+unlock:
+ mutex_unlock(&resource->resource_lock);
+ return ret;
+}
+
+static void
+acpi_power_resource_remove_dependent(struct acpi_power_resource *resource,
+ struct device *dev)
+{
+ struct acpi_power_dependent_device *dep;
+
+ mutex_lock(&resource->resource_lock);
+ list_for_each_entry(dep, &resource->dependents, node) {
+ if (dep->dev == dev) {
+ list_del(&dep->node);
+ kfree(dep);
+ dev_dbg(dev, "removed power dependency to [%s]\n",
+ resource->name);
+ break;
+ }
+ }
+ mutex_unlock(&resource->resource_lock);
+}
+
+/**
+ * acpi_device_power_add_dependent - Add dependent device of this ACPI device
+ * @adev: ACPI device pointer
+ * @dev: Dependent device
+ *
+ * If @adev has non-empty _PR0 the @dev is added as dependent device to all
+ * power resources returned by it. This means that whenever these power
+ * resources are turned _ON the dependent devices get runtime resumed. This
+ * is needed for devices such as PCI to allow its driver to re-initialize
+ * it after it went to D0uninitialized.
+ *
+ * If @adev does not have _PR0 this does nothing.
+ *
+ * Returns %0 in case of success and negative errno otherwise.
+ */
+int acpi_device_power_add_dependent(struct acpi_device *adev,
+ struct device *dev)
+{
+ struct acpi_power_resource_entry *entry;
+ struct list_head *resources;
+ int ret;
+
+ if (!adev->flags.power_manageable)
+ return 0;
+
+ resources = &adev->power.states[ACPI_STATE_D0].resources;
+ list_for_each_entry(entry, resources, node) {
+ ret = acpi_power_resource_add_dependent(entry->resource, dev);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry(entry, resources, node)
+ acpi_power_resource_remove_dependent(entry->resource, dev);
+
+ return ret;
+}
+
+/**
+ * acpi_device_power_remove_dependent - Remove dependent device
+ * @adev: ACPI device pointer
+ * @dev: Dependent device
+ *
+ * Does the opposite of acpi_device_power_add_dependent() and removes the
+ * dependent device if it is found. Can be called to @adev that does not
+ * have _PR0 as well.
+ */
+void acpi_device_power_remove_dependent(struct acpi_device *adev,
+ struct device *dev)
+{
+ struct acpi_power_resource_entry *entry;
+ struct list_head *resources;
+
+ if (!adev->flags.power_manageable)
+ return;
+
+ resources = &adev->power.states[ACPI_STATE_D0].resources;
+ list_for_each_entry_reverse(entry, resources, node)
+ acpi_power_resource_remove_dependent(entry->resource, dev);
+}
+
static int __acpi_power_on(struct acpi_power_resource *resource)
{
+ struct acpi_power_dependent_device *dep;
acpi_status status = AE_OK;
status = acpi_evaluate_object(resource->device.handle, "_ON", NULL, NULL);
@@ -243,6 +362,21 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
resource->name));
+ /*
+ * If there are other dependents on this power resource we need to
+ * resume them now so that their drivers can re-initialize the
+ * hardware properly after it went back to D0.
+ */
+ if (list_empty(&resource->dependents) ||
+ list_is_singular(&resource->dependents))
+ return 0;
+
+ list_for_each_entry(dep, &resource->dependents, node) {
+ dev_dbg(dep->dev, "runtime resuming because [%s] turned on\n",
+ resource->name);
+ pm_request_resume(dep->dev);
+ }
+
return 0;
}
@@ -810,6 +944,7 @@ int acpi_add_power_resource(acpi_handle handle)
ACPI_STA_DEFAULT);
mutex_init(&resource->resource_lock);
INIT_LIST_HEAD(&resource->list_node);
+ INIT_LIST_HEAD(&resource->dependents);
resource->name = device->pnp.bus_id;
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index b72e6afaa8fb..1e7ac0bd0d3a 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -432,17 +432,40 @@ static void cache_setup_acpi_cpu(struct acpi_table_header *table,
}
}
+static bool flag_identical(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu)
+{
+ struct acpi_pptt_processor *next;
+
+ /* heterogeneous machines must use PPTT revision > 1 */
+ if (table_hdr->revision < 2)
+ return false;
+
+ /* Locate the last node in the tree with IDENTICAL set */
+ if (cpu->flags & ACPI_PPTT_ACPI_IDENTICAL) {
+ next = fetch_pptt_node(table_hdr, cpu->parent);
+ if (!(next && next->flags & ACPI_PPTT_ACPI_IDENTICAL))
+ return true;
+ }
+
+ return false;
+}
+
/* Passing level values greater than this will result in search termination */
#define PPTT_ABORT_PACKAGE 0xFF
-static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu,
- int level, int flag)
+static struct acpi_pptt_processor *acpi_find_processor_tag(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu,
+ int level, int flag)
{
struct acpi_pptt_processor *prev_node;
while (cpu && level) {
- if (cpu->flags & flag)
+ /* special case the identical flag to find last identical */
+ if (flag == ACPI_PPTT_ACPI_IDENTICAL) {
+ if (flag_identical(table_hdr, cpu))
+ break;
+ } else if (cpu->flags & flag)
break;
pr_debug("level %d\n", level);
prev_node = fetch_pptt_node(table_hdr, cpu->parent);
@@ -480,8 +503,8 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
if (cpu_node) {
- cpu_node = acpi_find_processor_package_id(table, cpu_node,
- level, flag);
+ cpu_node = acpi_find_processor_tag(table, cpu_node,
+ level, flag);
/*
* As per specification if the processor structure represents
* an actual processor, then ACPI processor ID must be valid.
@@ -660,3 +683,29 @@ int find_acpi_cpu_topology_package(unsigned int cpu)
return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
ACPI_PPTT_PHYSICAL_PACKAGE);
}
+
+/**
+ * find_acpi_cpu_topology_hetero_id() - Get a core architecture tag
+ * @cpu: Kernel logical CPU number
+ *
+ * Determine a unique heterogeneous tag for the given CPU. CPUs with the same
+ * implementation should have matching tags.
+ *
+ * The returned tag can be used to group peers with identical implementation.
+ *
+ * The search terminates when a level is found with the identical implementation
+ * flag set or we reach a root node.
+ *
+ * Due to limitations in the PPTT data structure, there may be rare situations
+ * where two cores in a heterogeneous machine may be identical, but won't have
+ * the same tag.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
+ * Otherwise returns a value which represents a group of identical cores
+ * similar to this CPU.
+ */
+int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
+{
+ return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
+ ACPI_PPTT_ACPI_IDENTICAL);
+}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e387a258d649..ed56c6d20b08 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -196,6 +196,7 @@ static void tsc_check_state(int state)
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
+ case X86_VENDOR_ZHAOXIN:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index da3ced297f19..ea3d700da3ca 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -600,15 +600,29 @@ static struct fwnode_handle *
acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
+ char name[ACPI_PATH_SEGMENT_LENGTH];
struct fwnode_handle *child;
+ struct acpi_buffer path;
+ acpi_status status;
- /*
- * Find first matching named child node of this fwnode.
- * For ACPI this will be a data only sub-node.
- */
- fwnode_for_each_child_node(fwnode, child)
- if (acpi_data_node_match(child, childname))
+ path.length = sizeof(name);
+ path.pointer = name;
+
+ fwnode_for_each_child_node(fwnode, child) {
+ if (is_acpi_data_node(child)) {
+ if (acpi_data_node_match(child, childname))
+ return child;
+ continue;
+ }
+
+ status = acpi_get_name(ACPI_HANDLE_FWNODE(child),
+ ACPI_SINGLE_NAME, &path);
+ if (ACPI_FAILURE(status))
+ break;
+
+ if (!strncmp(name, childname, ACPI_NAMESEG_SIZE))
return child;
+ }
return NULL;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 8ff08e531443..f0fe7c15d657 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -77,7 +77,7 @@ static int acpi_sleep_prepare(u32 acpi_state)
return 0;
}
-static bool acpi_sleep_state_supported(u8 sleep_state)
+bool acpi_sleep_state_supported(u8 sleep_state)
{
acpi_status status;
u8 type_a, type_b;
@@ -452,14 +452,6 @@ static int acpi_pm_prepare(void)
return error;
}
-static int find_powerf_dev(struct device *dev, void *data)
-{
- struct acpi_device *device = to_acpi_device(dev);
- const char *hid = acpi_device_hid(device);
-
- return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
-}
-
/**
* acpi_pm_finish - Instruct the platform to leave a sleep state.
*
@@ -468,7 +460,7 @@ static int find_powerf_dev(struct device *dev, void *data)
*/
static void acpi_pm_finish(void)
{
- struct device *pwr_btn_dev;
+ struct acpi_device *pwr_btn_adev;
u32 acpi_state = acpi_target_sleep_state;
acpi_ec_unblock_transactions();
@@ -499,11 +491,11 @@ static void acpi_pm_finish(void)
return;
pwr_btn_event_pending = false;
- pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
- find_powerf_dev);
- if (pwr_btn_dev) {
- pm_wakeup_event(pwr_btn_dev, 0);
- put_device(pwr_btn_dev);
+ pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF,
+ NULL, -1);
+ if (pwr_btn_adev) {
+ pm_wakeup_event(&pwr_btn_adev->dev, 0);
+ acpi_dev_put(pwr_btn_adev);
}
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index de974322a197..b32327759380 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -490,16 +490,17 @@ static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
static const char * const table_sigs[] = {
- ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
- ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
- ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
- ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
- ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
- ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
- ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
- ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
- ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT,
- ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT, NULL };
+ ACPI_SIG_BERT, ACPI_SIG_BGRT, ACPI_SIG_CPEP, ACPI_SIG_ECDT,
+ ACPI_SIG_EINJ, ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT,
+ ACPI_SIG_MSCT, ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT,
+ ACPI_SIG_ASF, ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR,
+ ACPI_SIG_HPET, ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG,
+ ACPI_SIG_MCHI, ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI,
+ ACPI_SIG_TCPA, ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT,
+ ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT,
+ ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
+ ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
+ NULL };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index b1b49dbd0b14..85357f27a66b 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -344,7 +344,6 @@ static int acard_ahci_port_start(struct ata_port *ap)
mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
if (!mem)
return -ENOMEM;
- memset(mem, 0, dma_sz);
/*
* First item in chunk of DMA memory: 32-slot command table,
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 4100e904376b..cb69b737cb49 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -149,8 +149,51 @@ static void ahci_sunxi_start_engine(struct ata_port *ap)
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_host_priv *hpriv = ap->host->private_data;
- /* Setup DMA before DMA start */
- sunxi_clrsetbits(hpriv->mmio + AHCI_P0DMACR, 0x0000ff00, 0x00004400);
+ /* Setup DMA before DMA start
+ *
+ * NOTE: A similar SoC with SATA/AHCI by Texas Instruments documents
+ * this Vendor Specific Port (P0DMACR, aka PxDMACR) in its
+ * User's Guide document (TMS320C674x/OMAP-L1x Processor
+ * Serial ATA (SATA) Controller, Literature Number: SPRUGJ8C,
+ * March 2011, Chapter 4.33 Port DMA Control Register (P0DMACR),
+ * p.68, https://www.ti.com/lit/ug/sprugj8c/sprugj8c.pdf)
+ * as equivalent to the following struct:
+ *
+ * struct AHCI_P0DMACR_t
+ * {
+ * unsigned TXTS : 4;
+ * unsigned RXTS : 4;
+ * unsigned TXABL : 4;
+ * unsigned RXABL : 4;
+ * unsigned Reserved : 16;
+ * };
+ *
+ * TXTS: Transmit Transaction Size (TX_TRANSACTION_SIZE).
+ * This field defines the DMA transaction size in DWORDs for
+ * transmit (system bus read, device write) operation. [...]
+ *
+ * RXTS: Receive Transaction Size (RX_TRANSACTION_SIZE).
+ * This field defines the Port DMA transaction size in DWORDs
+ * for receive (system bus write, device read) operation. [...]
+ *
+ * TXABL: Transmit Burst Limit.
+ * This field allows software to limit the VBUSP master read
+ * burst size. [...]
+ *
+ * RXABL: Receive Burst Limit.
+ * Allows software to limit the VBUSP master write burst
+ * size. [...]
+ *
+ * Reserved: Reserved.
+ *
+ *
+ * NOTE: According to the above document, the following alternative
+ * to the code below could perhaps be a better option
+ * (or preparation) for possible further improvements later:
+ * sunxi_clrsetbits(hpriv->mmio + AHCI_P0DMACR, 0x0000ffff,
+ * 0x00000033);
+ */
+ sunxi_clrsetbits(hpriv->mmio + AHCI_P0DMACR, 0x0000ffff, 0x00004433);
/* Start DMA */
sunxi_setbits(port_mmio + PORT_CMD, PORT_CMD_START);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 0984c4b76d7e..e4c45d3cca79 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2365,7 +2365,6 @@ static int ahci_port_start(struct ata_port *ap)
mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
if (!mem)
return -ENOMEM;
- memset(mem, 0, dma_sz);
/*
* First item in chunk of DMA memory: 32-slot command table,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4a2dff303865..28c492be0a57 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4462,9 +4462,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
the ST disks also have LPM issues */
- { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
- ATA_HORKAGE_NOLPM, },
- { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
+ { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
ATA_HORKAGE_NOLPM, },
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 9d687e1d4325..3bfd9da58473 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1469,7 +1469,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
- if (ata_id_has_ncq_autosense(dev->id))
+ if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
@@ -1716,7 +1716,8 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
- if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) {
+ if (dev->class == ATA_DEV_ZAC &&
+ ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
char sense_key, asc, ascq;
sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
@@ -1770,10 +1771,11 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
}
switch (qc->dev->class) {
- case ATA_DEV_ATA:
case ATA_DEV_ZAC:
if (stat & ATA_SENSE)
ata_eh_request_sense(qc, qc->scsicmd);
+ /* fall through */
+ case ATA_DEV_ATA:
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
if (err & (ATA_UNC | ATA_AMNF))
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 52fa8606a25f..c5bbb07aa7d9 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -550,7 +550,6 @@ static int adma_port_start(struct ata_port *ap)
(u32)pp->pkt_dma);
return -ENOMEM;
}
- memset(pp->pkt, 0, ADMA_PKT_BYTES);
ap->private_data = pp;
adma_reinit_engine(ap);
return 0;
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 54bfab15c74a..b44b4b64354c 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1136,7 +1136,6 @@ static int nv_adma_port_start(struct ata_port *ap)
&mem_dma, GFP_KERNEL);
if (!mem)
return -ENOMEM;
- memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
/*
* First item in chunk of DMA memory:
@@ -1946,7 +1945,6 @@ static int nv_swncq_port_start(struct ata_port *ap)
&pp->prd_dma, GFP_KERNEL);
if (!pp->prd)
return -ENOMEM;
- memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
ap->private_data = pp;
pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 7ec0c216a6a6..865e5c58bd94 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -477,7 +477,6 @@ static int qs_port_start(struct ata_port *ap)
GFP_KERNEL);
if (!pp->pkt)
return -ENOMEM;
- memset(pp->pkt, 0, QS_PKT_BYTES);
ap->private_data = pp;
qs_enter_reg_mode(ap);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index bfdf41912588..98aad8206921 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1202,7 +1202,6 @@ static int sil24_port_start(struct ata_port *ap)
cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
if (!cb)
return -ENOMEM;
- memset(cb, 0, cb_size);
pp->cmd_block = cb;
pp->cmd_block_dma = cb_dma;
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index c52c738e554a..dd61fdd400f0 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
#
# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
+# see Documentation/kbuild/kconfig-language.rst.
#
# Auxiliary display drivers configuration.
#
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1739d7e1952a..9b09e31ae82f 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -43,7 +43,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
+ return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
}
static void update_topology_flags_workfn(struct work_struct *work);
@@ -116,7 +116,7 @@ void topology_normalize_cpu_scale(void)
/ capacity_scale;
topology_set_cpu_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
- cpu, topology_get_cpu_scale(NULL, cpu));
+ cpu, topology_get_cpu_scale(cpu));
}
}
@@ -185,7 +185,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
+ raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
policy->cpuinfo.max_freq / 1000UL;
capacity_scale = max(raw_capacity[cpu], capacity_scale);
}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index a7359535caf5..8827c60f51e2 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -213,6 +213,8 @@ int __weak cache_setup_acpi(unsigned int cpu)
return -ENOTSUPP;
}
+unsigned int coherency_max_size;
+
static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -251,6 +253,9 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
}
}
+ /* record the maximum cache line size */
+ if (this_leaf->coherency_line_size > coherency_max_size)
+ coherency_max_size = this_leaf->coherency_line_size;
}
return 0;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index fd7511e04e62..b4c64528f13c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2474,6 +2474,34 @@ struct device *device_find_child(struct device *parent, void *data,
}
EXPORT_SYMBOL_GPL(device_find_child);
+/**
+ * device_find_child_by_name - device iterator for locating a child device.
+ * @parent: parent struct device
+ * @name: name of the child device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a device that has the name @name.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+struct device *device_find_child_by_name(struct device *parent,
+ const char *name)
+{
+ struct klist_iter i;
+ struct device *child;
+
+ if (!parent)
+ return NULL;
+
+ klist_iter_init(&parent->p->klist_children, &i);
+ while ((child = next_device(&i)))
+ if (!strcmp(dev_name(child), name) && get_device(child))
+ break;
+ klist_iter_exit(&i);
+ return child;
+}
+EXPORT_SYMBOL_GPL(device_find_child_by_name);
+
int __init devices_init(void)
{
devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c
index 04db9ae235e4..f7035fc12b92 100644
--- a/drivers/base/devcon.c
+++ b/drivers/base/devcon.c
@@ -38,6 +38,28 @@ fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
return NULL;
}
+static void *
+fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
+ void *data, devcon_match_fn_t match)
+{
+ struct device_connection con = { };
+ void *ret;
+ int i;
+
+ for (i = 0; ; i++) {
+ con.fwnode = fwnode_find_reference(fwnode, con_id, i);
+ if (IS_ERR(con.fwnode))
+ break;
+
+ ret = match(&con, -1, data);
+ fwnode_handle_put(con.fwnode);
+ if (ret)
+ return ret;
+ }
+
+ return NULL;
+}
+
/**
* device_connection_find_match - Find physical connection to a device
* @dev: Device with the connection
@@ -65,6 +87,10 @@ void *device_connection_find_match(struct device *dev, const char *con_id,
ret = fwnode_graph_devcon_match(fwnode, con_id, data, match);
if (ret)
return ret;
+
+ ret = fwnode_devcon_match(fwnode, con_id, data, match);
+ if (ret)
+ return ret;
}
mutex_lock(&devcon_lock);
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 59d19dd64928..ced6863a16a5 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -12,6 +12,7 @@
#include <linux/pm_clock.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/of_clk.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/pm_domain.h>
@@ -92,8 +93,6 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
if (con_id) {
ce->con_id = kstrdup(con_id, GFP_KERNEL);
if (!ce->con_id) {
- dev_err(dev,
- "Not enough memory for clock connection ID.\n");
kfree(ce);
return -ENOMEM;
}
@@ -195,8 +194,7 @@ int of_pm_clk_add_clks(struct device *dev)
if (!dev || !dev->of_node)
return -EINVAL;
- count = of_count_phandle_with_args(dev->of_node, "clocks",
- "#clock-cells");
+ count = of_clk_get_parent_count(dev->of_node);
if (count <= 0)
return -ENODEV;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index dcfc0a36c8f7..7fb2c39bc725 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -530,21 +530,6 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
/*------------------------- Resume routines -------------------------*/
/**
- * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
- * @dev: Target device.
- *
- * Make the core skip the "early resume" and "resume" phases for @dev.
- *
- * This function can be called by middle-layer code during the "noirq" phase of
- * system resume if necessary, but not by device drivers.
- */
-void dev_pm_skip_next_resume_phases(struct device *dev)
-{
- dev->power.is_late_suspended = false;
- dev->power.is_suspended = false;
-}
-
-/**
* suspend_event - Return a "suspend" message for given "resume" one.
* @resume_msg: PM message representing a system-wide resume transition.
*/
@@ -681,6 +666,9 @@ Skip:
dev->power.is_noirq_suspended = false;
if (skip_resume) {
+ /* Make the next phases of resume skip the device. */
+ dev->power.is_late_suspended = false;
+ dev->power.is_suspended = false;
/*
* The device is going to be left in suspend, but it might not
* have been in runtime suspend before the system suspended, so
@@ -689,7 +677,6 @@ Skip:
* device again.
*/
pm_runtime_set_suspended(dev);
- dev_pm_skip_next_resume_phases(dev);
}
Out:
@@ -1631,17 +1618,20 @@ int dpm_suspend_late(pm_message_t state)
*/
int dpm_suspend_end(pm_message_t state)
{
- int error = dpm_suspend_late(state);
+ ktime_t starttime = ktime_get();
+ int error;
+
+ error = dpm_suspend_late(state);
if (error)
- return error;
+ goto out;
error = dpm_suspend_noirq(state);
- if (error) {
+ if (error)
dpm_resume_early(resume_event(state));
- return error;
- }
- return 0;
+out:
+ dpm_show_time(starttime, state, error, "end");
+ return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
@@ -2034,6 +2024,7 @@ int dpm_prepare(pm_message_t state)
*/
int dpm_suspend_start(pm_message_t state)
{
+ ktime_t starttime = ktime_get();
int error;
error = dpm_prepare(state);
@@ -2042,6 +2033,7 @@ int dpm_suspend_start(pm_message_t state)
dpm_save_failed_step(SUSPEND_PREPARE);
} else
error = dpm_suspend(state);
+ dpm_show_time(starttime, state, error, "start");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_start);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 5b2b6a05a4f3..ee31d4f8d856 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -968,8 +968,6 @@ void pm_wakep_autosleep_enabled(bool set)
}
#endif /* CONFIG_PM_AUTOSLEEP */
-static struct dentry *wakeup_sources_stats_dentry;
-
/**
* print_wakeup_source_stats - Print wakeup source statistics information.
* @m: seq_file to print the statistics into.
@@ -1099,8 +1097,8 @@ static const struct file_operations wakeup_sources_stats_fops = {
static int __init wakeup_sources_debugfs_init(void)
{
- wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
- S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
+ debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL,
+ &wakeup_sources_stats_fops);
return 0;
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 348b37e64944..81bd01ed4042 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -485,6 +485,30 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
/**
+ * fwnode_find_reference - Find named reference to a fwnode_handle
+ * @fwnode: Firmware node where to look for the reference
+ * @name: The name of the reference
+ * @index: Index of the reference
+ *
+ * @index can be used when the named reference holds a table of references.
+ *
+ * Returns pointer to the reference fwnode, or ERR_PTR. Caller is responsible to
+ * call fwnode_handle_put() on the returned fwnode pointer.
+ */
+struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
+ const char *name,
+ unsigned int index)
+{
+ struct fwnode_reference_args args;
+ int ret;
+
+ ret = fwnode_property_get_reference_args(fwnode, name, NULL, 0, index,
+ &args);
+ return ret ? ERR_PTR(ret) : args.fwnode;
+}
+EXPORT_SYMBOL_GPL(fwnode_find_reference);
+
+/**
* device_remove_properties - Remove properties from a device object.
* @dev: Device whose properties to remove.
*
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 6ad5ef48b61e..a4984136c19d 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -4,7 +4,7 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SCCB || REGMAP_I3C)
select IRQ_DOMAIN if REGMAP_IRQ
bool
@@ -49,3 +49,7 @@ config REGMAP_SOUNDWIRE
config REGMAP_SCCB
tristate
depends on I2C
+
+config REGMAP_I3C
+ tristate
+ depends on I3C
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index f5b4e8851d00..ff6c7d8ec1cd 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o
+obj-$(CONFIG_REGMAP_I3C) += regmap-i3c.o
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index fc14e8b9344f..7886303eb026 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -148,20 +148,18 @@ static int regcache_lzo_init(struct regmap *map)
* that register.
*/
bmp_size = map->num_reg_defaults_raw;
- sync_bmp = kmalloc_array(BITS_TO_LONGS(bmp_size), sizeof(long),
- GFP_KERNEL);
+ sync_bmp = bitmap_zalloc(bmp_size, GFP_KERNEL);
if (!sync_bmp) {
ret = -ENOMEM;
goto err;
}
- bitmap_zero(sync_bmp, bmp_size);
/* allocate the lzo blocks and initialize them */
for (i = 0; i < blkcount; i++) {
lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
GFP_KERNEL);
if (!lzo_blocks[i]) {
- kfree(sync_bmp);
+ bitmap_free(sync_bmp);
ret = -ENOMEM;
goto err;
}
@@ -213,7 +211,7 @@ static int regcache_lzo_exit(struct regmap *map)
* only once.
*/
if (lzo_blocks[0])
- kfree(lzo_blocks[0]->sync_bmp);
+ bitmap_free(lzo_blocks[0]->sync_bmp);
for (i = 0; i < blkcount; i++) {
if (lzo_blocks[i]) {
kfree(lzo_blocks[i]->wmem);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 263f82516ff4..e5e1b3a01b1a 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -579,6 +579,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
}
if (!strcmp(name, "dummy")) {
+ kfree(map->debugfs_name);
+
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
name = map->debugfs_name;
diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c
new file mode 100644
index 000000000000..1578fb506683
--- /dev/null
+++ b/drivers/base/regmap/regmap-i3c.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+
+#include <linux/regmap.h>
+#include <linux/i3c/device.h>
+#include <linux/i3c/master.h>
+#include <linux/module.h>
+
+static int regmap_i3c_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct i3c_device *i3c = dev_to_i3cdev(dev);
+ struct i3c_priv_xfer xfers[] = {
+ {
+ .rnw = false,
+ .len = count,
+ .data.out = data,
+ },
+ };
+
+ return i3c_device_do_priv_xfers(i3c, xfers, 1);
+}
+
+static int regmap_i3c_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct i3c_device *i3c = dev_to_i3cdev(dev);
+ struct i3c_priv_xfer xfers[2];
+
+ xfers[0].rnw = false;
+ xfers[0].len = reg_size;
+ xfers[0].data.out = reg;
+
+ xfers[1].rnw = true;
+ xfers[1].len = val_size;
+ xfers[1].data.in = val;
+
+ return i3c_device_do_priv_xfers(i3c, xfers, 2);
+}
+
+static struct regmap_bus regmap_i3c = {
+ .write = regmap_i3c_write,
+ .read = regmap_i3c_read,
+};
+
+struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ return __devm_regmap_init(&i3c->dev, &regmap_i3c, &i3c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_i3c);
+
+MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
+MODULE_DESCRIPTION("Regmap I3C Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f1025452bb39..19f57ccfbe1d 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1637,6 +1637,8 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
map->format.reg_bytes +
map->format.pad_bytes,
val, val_len);
+ else
+ ret = -ENOTSUPP;
/* If that didn't work fall back on linearising by hand. */
if (ret == -ENOTSUPP) {
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 7fc5a18e02ad..e7b3aa3bd55a 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -11,25 +11,25 @@
#include <linux/property.h>
#include <linux/slab.h>
-struct software_node {
+struct swnode {
int id;
struct kobject kobj;
struct fwnode_handle fwnode;
+ const struct software_node *node;
/* hierarchy */
struct ida child_ids;
struct list_head entry;
struct list_head children;
- struct software_node *parent;
+ struct swnode *parent;
- /* properties */
- const struct property_entry *properties;
+ unsigned int allocated:1;
};
static DEFINE_IDA(swnode_root_ids);
static struct kset *swnode_kset;
-#define kobj_to_swnode(_kobj_) container_of(_kobj_, struct software_node, kobj)
+#define kobj_to_swnode(_kobj_) container_of(_kobj_, struct swnode, kobj)
static const struct fwnode_operations software_node_ops;
@@ -37,17 +37,56 @@ bool is_software_node(const struct fwnode_handle *fwnode)
{
return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &software_node_ops;
}
+EXPORT_SYMBOL_GPL(is_software_node);
-#define to_software_node(__fwnode) \
+#define to_swnode(__fwnode) \
({ \
- typeof(__fwnode) __to_software_node_fwnode = __fwnode; \
+ typeof(__fwnode) __to_swnode_fwnode = __fwnode; \
\
- is_software_node(__to_software_node_fwnode) ? \
- container_of(__to_software_node_fwnode, \
- struct software_node, fwnode) : \
- NULL; \
+ is_software_node(__to_swnode_fwnode) ? \
+ container_of(__to_swnode_fwnode, \
+ struct swnode, fwnode) : NULL; \
})
+static struct swnode *
+software_node_to_swnode(const struct software_node *node)
+{
+ struct swnode *swnode;
+ struct kobject *k;
+
+ if (!node)
+ return NULL;
+
+ spin_lock(&swnode_kset->list_lock);
+
+ list_for_each_entry(k, &swnode_kset->list, entry) {
+ swnode = kobj_to_swnode(k);
+ if (swnode->node == node)
+ break;
+ swnode = NULL;
+ }
+
+ spin_unlock(&swnode_kset->list_lock);
+
+ return swnode;
+}
+
+const struct software_node *to_software_node(struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ return swnode ? swnode->node : NULL;
+}
+EXPORT_SYMBOL_GPL(to_software_node);
+
+struct fwnode_handle *software_node_fwnode(const struct software_node *node)
+{
+ struct swnode *swnode = software_node_to_swnode(node);
+
+ return swnode ? &swnode->fwnode : NULL;
+}
+EXPORT_SYMBOL_GPL(software_node_fwnode);
+
/* -------------------------------------------------------------------------- */
/* property_entry processing */
@@ -383,6 +422,9 @@ property_entries_dup(const struct property_entry *properties)
int i, n = 0;
int ret;
+ if (!properties)
+ return NULL;
+
while (properties[n].name)
n++;
@@ -430,7 +472,7 @@ EXPORT_SYMBOL_GPL(property_entries_free);
static struct fwnode_handle *software_node_get(struct fwnode_handle *fwnode)
{
- struct software_node *swnode = to_software_node(fwnode);
+ struct swnode *swnode = to_swnode(fwnode);
kobject_get(&swnode->kobj);
@@ -439,7 +481,7 @@ static struct fwnode_handle *software_node_get(struct fwnode_handle *fwnode)
static void software_node_put(struct fwnode_handle *fwnode)
{
- struct software_node *swnode = to_software_node(fwnode);
+ struct swnode *swnode = to_swnode(fwnode);
kobject_put(&swnode->kobj);
}
@@ -447,8 +489,9 @@ static void software_node_put(struct fwnode_handle *fwnode)
static bool software_node_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
- return !!property_entry_get(to_software_node(fwnode)->properties,
- propname);
+ struct swnode *swnode = to_swnode(fwnode);
+
+ return !!property_entry_get(swnode->node->properties, propname);
}
static int software_node_read_int_array(const struct fwnode_handle *fwnode,
@@ -456,9 +499,9 @@ static int software_node_read_int_array(const struct fwnode_handle *fwnode,
unsigned int elem_size, void *val,
size_t nval)
{
- struct software_node *swnode = to_software_node(fwnode);
+ struct swnode *swnode = to_swnode(fwnode);
- return property_entry_read_int_array(swnode->properties, propname,
+ return property_entry_read_int_array(swnode->node->properties, propname,
elem_size, val, nval);
}
@@ -466,27 +509,26 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode,
const char *propname,
const char **val, size_t nval)
{
- struct software_node *swnode = to_software_node(fwnode);
+ struct swnode *swnode = to_swnode(fwnode);
- return property_entry_read_string_array(swnode->properties, propname,
- val, nval);
+ return property_entry_read_string_array(swnode->node->properties,
+ propname, val, nval);
}
static struct fwnode_handle *
software_node_get_parent(const struct fwnode_handle *fwnode)
{
- struct software_node *swnode = to_software_node(fwnode);
+ struct swnode *swnode = to_swnode(fwnode);
- return swnode ? (swnode->parent ? &swnode->parent->fwnode : NULL) :
- NULL;
+ return swnode ? (swnode->parent ? &swnode->parent->fwnode : NULL) : NULL;
}
static struct fwnode_handle *
software_node_get_next_child(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
- struct software_node *p = to_software_node(fwnode);
- struct software_node *c = to_software_node(child);
+ struct swnode *p = to_swnode(fwnode);
+ struct swnode *c = to_swnode(child);
if (!p || list_empty(&p->children) ||
(c && list_is_last(&c->entry, &p->children)))
@@ -495,7 +537,7 @@ software_node_get_next_child(const struct fwnode_handle *fwnode,
if (c)
c = list_next_entry(c, entry);
else
- c = list_first_entry(&p->children, struct software_node, entry);
+ c = list_first_entry(&p->children, struct swnode, entry);
return &c->fwnode;
}
@@ -503,18 +545,14 @@ static struct fwnode_handle *
software_node_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
- struct software_node *swnode = to_software_node(fwnode);
- const struct property_entry *prop;
- struct software_node *child;
+ struct swnode *swnode = to_swnode(fwnode);
+ struct swnode *child;
if (!swnode || list_empty(&swnode->children))
return NULL;
list_for_each_entry(child, &swnode->children, entry) {
- prop = property_entry_get(child->properties, "name");
- if (!prop)
- continue;
- if (!strcmp(childname, prop->value.str)) {
+ if (!strcmp(childname, kobject_name(&child->kobj))) {
kobject_get(&child->kobj);
return &child->fwnode;
}
@@ -522,6 +560,52 @@ software_node_get_named_child_node(const struct fwnode_handle *fwnode,
return NULL;
}
+static int
+software_node_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *propname, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ const struct software_node_reference *ref;
+ const struct property_entry *prop;
+ struct fwnode_handle *refnode;
+ int i;
+
+ if (!swnode || !swnode->node->references)
+ return -ENOENT;
+
+ for (ref = swnode->node->references; ref->name; ref++)
+ if (!strcmp(ref->name, propname))
+ break;
+
+ if (!ref->name || index > (ref->nrefs - 1))
+ return -ENOENT;
+
+ refnode = software_node_fwnode(ref->refs[index].node);
+ if (!refnode)
+ return -ENOENT;
+
+ if (nargs_prop) {
+ prop = property_entry_get(swnode->node->properties, nargs_prop);
+ if (!prop)
+ return -EINVAL;
+
+ nargs = prop->value.u32_data;
+ }
+
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
+ return -EINVAL;
+
+ args->fwnode = software_node_get(refnode);
+ args->nargs = nargs;
+
+ for (i = 0; i < nargs; i++)
+ args->args[i] = ref->refs[index].args[i];
+
+ return 0;
+}
+
static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
@@ -531,12 +615,13 @@ static const struct fwnode_operations software_node_ops = {
.get_parent = software_node_get_parent,
.get_next_child_node = software_node_get_next_child,
.get_named_child_node = software_node_get_named_child_node,
+ .get_reference_args = software_node_get_reference_args
};
/* -------------------------------------------------------------------------- */
static int
-software_node_register_properties(struct software_node *swnode,
+software_node_register_properties(struct software_node *node,
const struct property_entry *properties)
{
struct property_entry *props;
@@ -545,24 +630,20 @@ software_node_register_properties(struct software_node *swnode,
if (IS_ERR(props))
return PTR_ERR(props);
- swnode->properties = props;
+ node->properties = props;
return 0;
}
static void software_node_release(struct kobject *kobj)
{
- struct software_node *swnode = kobj_to_swnode(kobj);
+ struct swnode *swnode = kobj_to_swnode(kobj);
- if (swnode->parent) {
- ida_simple_remove(&swnode->parent->child_ids, swnode->id);
- list_del(&swnode->entry);
- } else {
- ida_simple_remove(&swnode_root_ids, swnode->id);
+ if (swnode->allocated) {
+ property_entries_free(swnode->node->properties);
+ kfree(swnode->node);
}
-
ida_destroy(&swnode->child_ids);
- property_entries_free(swnode->properties);
kfree(swnode);
}
@@ -571,70 +652,165 @@ static struct kobj_type software_node_type = {
.sysfs_ops = &kobj_sysfs_ops,
};
-struct fwnode_handle *
-fwnode_create_software_node(const struct property_entry *properties,
- const struct fwnode_handle *parent)
+static struct fwnode_handle *
+swnode_register(const struct software_node *node, struct swnode *parent,
+ unsigned int allocated)
{
- struct software_node *p = NULL;
- struct software_node *swnode;
+ struct swnode *swnode;
int ret;
- if (parent) {
- if (IS_ERR(parent))
- return ERR_CAST(parent);
- if (!is_software_node(parent))
- return ERR_PTR(-EINVAL);
- p = to_software_node(parent);
- }
-
swnode = kzalloc(sizeof(*swnode), GFP_KERNEL);
- if (!swnode)
- return ERR_PTR(-ENOMEM);
+ if (!swnode) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
- ret = ida_simple_get(p ? &p->child_ids : &swnode_root_ids, 0, 0,
- GFP_KERNEL);
+ ret = ida_simple_get(parent ? &parent->child_ids : &swnode_root_ids,
+ 0, 0, GFP_KERNEL);
if (ret < 0) {
kfree(swnode);
- return ERR_PTR(ret);
+ goto out_err;
}
swnode->id = ret;
+ swnode->node = node;
+ swnode->parent = parent;
+ swnode->allocated = allocated;
swnode->kobj.kset = swnode_kset;
swnode->fwnode.ops = &software_node_ops;
ida_init(&swnode->child_ids);
INIT_LIST_HEAD(&swnode->entry);
INIT_LIST_HEAD(&swnode->children);
- swnode->parent = p;
-
- if (p)
- list_add_tail(&swnode->entry, &p->children);
- ret = kobject_init_and_add(&swnode->kobj, &software_node_type,
- p ? &p->kobj : NULL, "node%d", swnode->id);
+ if (node->name)
+ ret = kobject_init_and_add(&swnode->kobj, &software_node_type,
+ parent ? &parent->kobj : NULL,
+ "%s", node->name);
+ else
+ ret = kobject_init_and_add(&swnode->kobj, &software_node_type,
+ parent ? &parent->kobj : NULL,
+ "node%d", swnode->id);
if (ret) {
kobject_put(&swnode->kobj);
return ERR_PTR(ret);
}
- ret = software_node_register_properties(swnode, properties);
+ if (parent)
+ list_add_tail(&swnode->entry, &parent->children);
+
+ kobject_uevent(&swnode->kobj, KOBJ_ADD);
+ return &swnode->fwnode;
+
+out_err:
+ if (allocated)
+ property_entries_free(node->properties);
+ return ERR_PTR(ret);
+}
+
+/**
+ * software_node_register_nodes - Register an array of software nodes
+ * @nodes: Zero terminated array of software nodes to be registered
+ *
+ * Register multiple software nodes at once.
+ */
+int software_node_register_nodes(const struct software_node *nodes)
+{
+ int ret;
+ int i;
+
+ for (i = 0; nodes[i].name; i++) {
+ ret = software_node_register(&nodes[i]);
+ if (ret) {
+ software_node_unregister_nodes(nodes);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(software_node_register_nodes);
+
+/**
+ * software_node_unregister_nodes - Unregister an array of software nodes
+ * @nodes: Zero terminated array of software nodes to be unregistered
+ *
+ * Unregister multiple software nodes at once.
+ */
+void software_node_unregister_nodes(const struct software_node *nodes)
+{
+ struct swnode *swnode;
+ int i;
+
+ for (i = 0; nodes[i].name; i++) {
+ swnode = software_node_to_swnode(&nodes[i]);
+ if (swnode)
+ fwnode_remove_software_node(&swnode->fwnode);
+ }
+}
+EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
+
+/**
+ * software_node_register - Register static software node
+ * @node: The software node to be registered
+ */
+int software_node_register(const struct software_node *node)
+{
+ struct swnode *parent = software_node_to_swnode(node->parent);
+
+ if (software_node_to_swnode(node))
+ return -EEXIST;
+
+ return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
+}
+EXPORT_SYMBOL_GPL(software_node_register);
+
+struct fwnode_handle *
+fwnode_create_software_node(const struct property_entry *properties,
+ const struct fwnode_handle *parent)
+{
+ struct software_node *node;
+ struct swnode *p = NULL;
+ int ret;
+
+ if (parent) {
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+ if (!is_software_node(parent))
+ return ERR_PTR(-EINVAL);
+ p = to_swnode(parent);
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ ret = software_node_register_properties(node, properties);
if (ret) {
- kobject_put(&swnode->kobj);
+ kfree(node);
return ERR_PTR(ret);
}
- kobject_uevent(&swnode->kobj, KOBJ_ADD);
- return &swnode->fwnode;
+ node->parent = p ? p->node : NULL;
+
+ return swnode_register(node, p, 1);
}
EXPORT_SYMBOL_GPL(fwnode_create_software_node);
void fwnode_remove_software_node(struct fwnode_handle *fwnode)
{
- struct software_node *swnode = to_software_node(fwnode);
+ struct swnode *swnode = to_swnode(fwnode);
if (!swnode)
return;
+ if (swnode->parent) {
+ ida_simple_remove(&swnode->parent->child_ids, swnode->id);
+ list_del(&swnode->entry);
+ } else {
+ ida_simple_remove(&swnode_root_ids, swnode->id);
+ }
+
kobject_put(&swnode->kobj);
}
EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
@@ -642,7 +818,7 @@ EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
int software_node_notify(struct device *dev, unsigned long action)
{
struct fwnode_handle *fwnode = dev_fwnode(dev);
- struct software_node *swnode;
+ struct swnode *swnode;
int ret;
if (!fwnode)
@@ -653,7 +829,7 @@ int software_node_notify(struct device *dev, unsigned long action)
if (!is_software_node(fwnode))
return 0;
- swnode = to_software_node(fwnode);
+ swnode = to_swnode(fwnode);
switch (action) {
case KOBJ_ADD:
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 5fd9f167ecc1..4e033d4cc0dc 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -43,6 +43,9 @@ static ssize_t name##_list_show(struct device *dev, \
define_id_show_func(physical_package_id);
static DEVICE_ATTR_RO(physical_package_id);
+define_id_show_func(die_id);
+static DEVICE_ATTR_RO(die_id);
+
define_id_show_func(core_id);
static DEVICE_ATTR_RO(core_id);
@@ -50,10 +53,22 @@ define_siblings_show_func(thread_siblings, sibling_cpumask);
static DEVICE_ATTR_RO(thread_siblings);
static DEVICE_ATTR_RO(thread_siblings_list);
+define_siblings_show_func(core_cpus, sibling_cpumask);
+static DEVICE_ATTR_RO(core_cpus);
+static DEVICE_ATTR_RO(core_cpus_list);
+
define_siblings_show_func(core_siblings, core_cpumask);
static DEVICE_ATTR_RO(core_siblings);
static DEVICE_ATTR_RO(core_siblings_list);
+define_siblings_show_func(die_cpus, die_cpumask);
+static DEVICE_ATTR_RO(die_cpus);
+static DEVICE_ATTR_RO(die_cpus_list);
+
+define_siblings_show_func(package_cpus, core_cpumask);
+static DEVICE_ATTR_RO(package_cpus);
+static DEVICE_ATTR_RO(package_cpus_list);
+
#ifdef CONFIG_SCHED_BOOK
define_id_show_func(book_id);
static DEVICE_ATTR_RO(book_id);
@@ -72,11 +87,18 @@ static DEVICE_ATTR_RO(drawer_siblings_list);
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
+ &dev_attr_die_id.attr,
&dev_attr_core_id.attr,
&dev_attr_thread_siblings.attr,
&dev_attr_thread_siblings_list.attr,
+ &dev_attr_core_cpus.attr,
+ &dev_attr_core_cpus_list.attr,
&dev_attr_core_siblings.attr,
&dev_attr_core_siblings_list.attr,
+ &dev_attr_die_cpus.attr,
+ &dev_attr_die_cpus_list.attr,
+ &dev_attr_package_cpus.attr,
+ &dev_attr_package_cpus_list.attr,
#ifdef CONFIG_SCHED_BOOK
&dev_attr_book_id.attr,
&dev_attr_book_siblings.attr,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 20bb4bfa4be6..96ec7e0fc1ea 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -347,7 +347,7 @@ config CDROM_PKTCDVD
is possible.
DVD-RW disks must be in restricted overwrite mode.
- See the file <file:Documentation/cdrom/packet-writing.txt>
+ See the file <file:Documentation/cdrom/packet-writing.rst>
for further information on the use of this driver.
To compile this driver as a module, choose M here: the
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index f13b48ff5f43..b3b9cd5628fd 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -465,35 +465,20 @@ static const struct file_operations in_flight_summary_fops = {
void drbd_debugfs_resource_add(struct drbd_resource *resource)
{
struct dentry *dentry;
- if (!drbd_debugfs_resources)
- return;
dentry = debugfs_create_dir(resource->name, drbd_debugfs_resources);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
resource->debugfs_res = dentry;
dentry = debugfs_create_dir("volumes", resource->debugfs_res);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
resource->debugfs_res_volumes = dentry;
dentry = debugfs_create_dir("connections", resource->debugfs_res);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
resource->debugfs_res_connections = dentry;
dentry = debugfs_create_file("in_flight_summary", 0440,
resource->debugfs_res, resource,
&in_flight_summary_fops);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
resource->debugfs_res_in_flight_summary = dentry;
- return;
-
-fail:
- drbd_debugfs_resource_cleanup(resource);
- drbd_err(resource, "failed to create debugfs dentry\n");
}
static void drbd_debugfs_remove(struct dentry **dp)
@@ -636,35 +621,22 @@ void drbd_debugfs_connection_add(struct drbd_connection *connection)
{
struct dentry *conns_dir = connection->resource->debugfs_res_connections;
struct dentry *dentry;
- if (!conns_dir)
- return;
/* Once we enable mutliple peers,
* these connections will have descriptive names.
* For now, it is just the one connection to the (only) "peer". */
dentry = debugfs_create_dir("peer", conns_dir);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
connection->debugfs_conn = dentry;
dentry = debugfs_create_file("callback_history", 0440,
connection->debugfs_conn, connection,
&connection_callback_history_fops);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
connection->debugfs_conn_callback_history = dentry;
dentry = debugfs_create_file("oldest_requests", 0440,
connection->debugfs_conn, connection,
&connection_oldest_requests_fops);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
connection->debugfs_conn_oldest_requests = dentry;
- return;
-
-fail:
- drbd_debugfs_connection_cleanup(connection);
- drbd_err(connection, "failed to create debugfs dentry\n");
}
void drbd_debugfs_connection_cleanup(struct drbd_connection *connection)
@@ -809,8 +781,6 @@ void drbd_debugfs_device_add(struct drbd_device *device)
snprintf(vnr_buf, sizeof(vnr_buf), "%u", device->vnr);
dentry = debugfs_create_dir(vnr_buf, vols_dir);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
device->debugfs_vol = dentry;
snprintf(minor_buf, sizeof(minor_buf), "%u", device->minor);
@@ -819,18 +789,14 @@ void drbd_debugfs_device_add(struct drbd_device *device)
if (!slink_name)
goto fail;
dentry = debugfs_create_symlink(minor_buf, drbd_debugfs_minors, slink_name);
+ device->debugfs_minor = dentry;
kfree(slink_name);
slink_name = NULL;
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
- device->debugfs_minor = dentry;
#define DCF(name) do { \
dentry = debugfs_create_file(#name, 0440, \
device->debugfs_vol, device, \
&device_ ## name ## _fops); \
- if (IS_ERR_OR_NULL(dentry)) \
- goto fail; \
device->debugfs_vol_ ## name = dentry; \
} while (0)
@@ -864,19 +830,9 @@ void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device)
struct dentry *dentry;
char vnr_buf[8];
- if (!conn_dir)
- return;
-
snprintf(vnr_buf, sizeof(vnr_buf), "%u", peer_device->device->vnr);
dentry = debugfs_create_dir(vnr_buf, conn_dir);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
peer_device->debugfs_peer_dev = dentry;
- return;
-
-fail:
- drbd_debugfs_peer_device_cleanup(peer_device);
- drbd_err(peer_device, "failed to create debugfs entries\n");
}
void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device)
@@ -917,35 +873,19 @@ void drbd_debugfs_cleanup(void)
drbd_debugfs_remove(&drbd_debugfs_root);
}
-int __init drbd_debugfs_init(void)
+void __init drbd_debugfs_init(void)
{
struct dentry *dentry;
dentry = debugfs_create_dir("drbd", NULL);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
drbd_debugfs_root = dentry;
dentry = debugfs_create_file("version", 0444, drbd_debugfs_root, NULL, &drbd_version_fops);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
drbd_debugfs_version = dentry;
dentry = debugfs_create_dir("resources", drbd_debugfs_root);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
drbd_debugfs_resources = dentry;
dentry = debugfs_create_dir("minors", drbd_debugfs_root);
- if (IS_ERR_OR_NULL(dentry))
- goto fail;
drbd_debugfs_minors = dentry;
- return 0;
-
-fail:
- drbd_debugfs_cleanup();
- if (dentry)
- return PTR_ERR(dentry);
- else
- return -EINVAL;
}
diff --git a/drivers/block/drbd/drbd_debugfs.h b/drivers/block/drbd/drbd_debugfs.h
index 4ecfbb3358d7..58e31cef0844 100644
--- a/drivers/block/drbd/drbd_debugfs.h
+++ b/drivers/block/drbd/drbd_debugfs.h
@@ -6,7 +6,7 @@
#include "drbd_int.h"
#ifdef CONFIG_DEBUG_FS
-int __init drbd_debugfs_init(void);
+void __init drbd_debugfs_init(void);
void drbd_debugfs_cleanup(void);
void drbd_debugfs_resource_add(struct drbd_resource *resource);
@@ -22,7 +22,7 @@ void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device);
void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device);
#else
-static inline int __init drbd_debugfs_init(void) { return -ENODEV; }
+static inline void __init drbd_debugfs_init(void) { }
static inline void drbd_debugfs_cleanup(void) { }
static inline void drbd_debugfs_resource_add(struct drbd_resource *resource) { }
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 31237f45247a..ddbf56014c51 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1960,7 +1960,7 @@ static inline void wake_ack_receiver(struct drbd_connection *connection)
{
struct task_struct *task = connection->ack_receiver.task;
if (task && get_t_state(&connection->ack_receiver) == RUNNING)
- force_sig(SIGXCPU, task);
+ send_sig(SIGXCPU, task, 1);
}
static inline void request_ping(struct drbd_connection *connection)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 541b31fa42b3..9bd4ddd12b25 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -465,7 +465,7 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
smp_mb();
init_completion(&thi->stop);
if (thi->task != current)
- force_sig(DRBD_SIGKILL, thi->task);
+ send_sig(DRBD_SIGKILL, thi->task, 1);
}
spin_unlock_irqrestore(&thi->t_lock, flags);
@@ -3009,8 +3009,7 @@ static int __init drbd_init(void)
spin_lock_init(&retry.lock);
INIT_LIST_HEAD(&retry.writes);
- if (drbd_debugfs_init())
- pr_notice("failed to initialize debugfs -- will not be available\n");
+ drbd_debugfs_init();
pr_info("initialized. "
"Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index cdd748b8116d..5d52a2d32155 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -599,7 +599,7 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
struct task_struct *opa;
kref_get(&connection->kref);
- /* We may just have force_sig()'ed this thread
+ /* We may have just sent a signal to this thread
* to get it out of some blocking network function.
* Clear signals; otherwise kthread_run(), which internally uses
* wait_on_completion_killable(), will mistake our pending signal
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9fb9b312ab6b..b933a7eea52b 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3900,7 +3900,7 @@ static void __init config_types(void)
if (!UDP->cmos)
UDP->cmos = FLOPPY0_TYPE;
drive = 1;
- if (!UDP->cmos && FLOPPY1_TYPE)
+ if (!UDP->cmos)
UDP->cmos = FLOPPY1_TYPE;
/* FIXME: additional physical CMOS drive detection should go here */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f11b7dc16e9d..44c9985f352a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -264,20 +264,12 @@ lo_do_transfer(struct loop_device *lo, int cmd,
return ret;
}
-static inline void loop_iov_iter_bvec(struct iov_iter *i,
- unsigned int direction, const struct bio_vec *bvec,
- unsigned long nr_segs, size_t count)
-{
- iov_iter_bvec(i, direction, bvec, nr_segs, count);
- i->type |= ITER_BVEC_FLAG_NO_REF;
-}
-
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct iov_iter i;
ssize_t bw;
- loop_iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
+ iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
file_start_write(file);
bw = vfs_iter_write(file, &i, ppos, 0);
@@ -355,7 +347,7 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
ssize_t len;
rq_for_each_segment(bvec, rq, iter) {
- loop_iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
+ iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0)
return len;
@@ -396,7 +388,7 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq,
b.bv_offset = 0;
b.bv_len = bvec.bv_len;
- loop_iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
+ iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0) {
ret = len;
@@ -563,7 +555,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
}
atomic_set(&cmd->ref, 2);
- loop_iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
+ iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
iter.iov_offset = offset;
cmd->iocb.ki_pos = pos;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index a14b09ab3a41..964f78cfffa0 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1577,7 +1577,6 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
ATA_SECT_SIZE * xfer_sz);
return -ENOMEM;
}
- memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
}
/* Build the FIS. */
@@ -2776,7 +2775,6 @@ static int mtip_dma_alloc(struct driver_data *dd)
&port->block1_dma, GFP_KERNEL);
if (!port->block1)
return -ENOMEM;
- memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ);
/* Allocate dma memory for command list */
port->command_list =
@@ -2789,7 +2787,6 @@ static int mtip_dma_alloc(struct driver_data *dd)
port->block1_dma = 0;
return -ENOMEM;
}
- memset(port->command_list, 0, AHCI_CMD_TBL_SZ);
/* Setup all pointers into first DMA region */
port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET;
@@ -3529,8 +3526,6 @@ static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
if (!cmd->command)
return -ENOMEM;
- memset(cmd->command, 0, CMD_DMA_ALLOC_SZ);
-
sg_init_table(cmd->sg, MTIP_MAX_SG);
return 0;
}
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 447d635c79a2..99328ded60d1 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -327,11 +327,12 @@ static ssize_t nullb_device_power_store(struct config_item *item,
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
dev->power = newp;
} else if (dev->power && !newp) {
- mutex_lock(&lock);
- dev->power = newp;
- null_del_dev(dev->nullb);
- mutex_unlock(&lock);
- clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+ if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
+ mutex_lock(&lock);
+ dev->power = newp;
+ null_del_dev(dev->nullb);
+ mutex_unlock(&lock);
+ }
clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
}
@@ -1197,7 +1198,7 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
if (!cmd->error && dev->zoned) {
sector_t sector;
unsigned int nr_sectors;
- int op;
+ enum req_opf op;
if (dev->queue_mode == NULL_Q_BIO) {
op = bio_op(cmd->bio);
@@ -1488,7 +1489,6 @@ static int setup_queues(struct nullb *nullb)
if (!nullb->queues)
return -ENOMEM;
- nullb->nr_queues = 0;
nullb->queue_depth = nullb->dev->hw_queue_depth;
return 0;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index c479235862e5..51569c199a6c 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2694,7 +2694,6 @@ static int skd_cons_skmsg(struct skd_device *skdev)
(FIT_QCMD_ALIGN - 1),
"not aligned: msg_buf %p mb_dma_address %pad\n",
skmsg->msg_buf, &skmsg->mb_dma_address);
- memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
}
err_out:
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index b9c34ff9a0d3..aae665a3a254 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -52,6 +52,17 @@ config BT_HCIBTUSB_BCM
Say Y here to compile support for Broadcom protocol.
+config BT_HCIBTUSB_MTK
+ bool "MediaTek protocol support"
+ depends on BT_HCIBTUSB
+ default n
+ help
+ The MediaTek protocol support enables firmware download
+ support and chip initialization for MediaTek Bluetooth
+ USB controllers.
+
+ Say Y here to compile support for MediaTek protocol.
+
config BT_HCIBTUSB_RTL
bool "Realtek protocol support"
depends on BT_HCIBTUSB
@@ -237,6 +248,7 @@ config BT_HCIUART_AG6XX
config BT_HCIUART_MRVL
bool "Marvell protocol support"
depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
select BT_HCIUART_H4
help
Marvell is serial protocol for communication between Bluetooth
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index a346ccb5450d..a0e84538cec8 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -359,7 +359,8 @@ static int bpa10x_set_diag(struct hci_dev *hdev, bool enable)
return 0;
}
-static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int bpa10x_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct bpa10x_data *data;
struct hci_dev *hdev;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 3fe941539a1f..124ef0a3e1dd 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -335,6 +335,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x230f, "BCM4356A2" }, /* 001.003.015 */
{ 0x220e, "BCM20702A1" }, /* 001.002.014 */
{ 0x4217, "BCM4329B1" }, /* 002.002.023 */
+ { 0x6106, "BCM4359C0" }, /* 003.001.006 */
{ }
};
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index f5dbeec8e274..e11169ad8247 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -115,10 +115,12 @@ struct btmtk_hci_wmt_params {
struct btmtkuart_dev {
struct hci_dev *hdev;
struct serdev_device *serdev;
- struct clk *clk;
+ struct clk *clk;
+ struct clk *osc;
struct regulator *vcc;
struct gpio_desc *reset;
+ struct gpio_desc *boot;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_runtime;
struct pinctrl_state *pins_boot;
@@ -911,6 +913,19 @@ static int btmtkuart_parse_dt(struct serdev_device *serdev)
return err;
}
+ bdev->osc = devm_clk_get_optional(&serdev->dev, "osc");
+ if (IS_ERR(bdev->osc)) {
+ err = PTR_ERR(bdev->osc);
+ return err;
+ }
+
+ bdev->boot = devm_gpiod_get_optional(&serdev->dev, "boot",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(bdev->boot)) {
+ err = PTR_ERR(bdev->boot);
+ return err;
+ }
+
bdev->pinctrl = devm_pinctrl_get(&serdev->dev);
if (IS_ERR(bdev->pinctrl)) {
err = PTR_ERR(bdev->pinctrl);
@@ -919,8 +934,10 @@ static int btmtkuart_parse_dt(struct serdev_device *serdev)
bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl,
"default");
- if (IS_ERR(bdev->pins_boot)) {
+ if (IS_ERR(bdev->pins_boot) && !bdev->boot) {
err = PTR_ERR(bdev->pins_boot);
+ dev_err(&serdev->dev,
+ "Should assign RXD to LOW at boot stage\n");
return err;
}
@@ -996,13 +1013,25 @@ static int btmtkuart_probe(struct serdev_device *serdev)
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
if (btmtkuart_is_standalone(bdev)) {
- /* Switch to the specific pin state for the booting requires */
- pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
+ err = clk_prepare_enable(bdev->osc);
+ if (err < 0)
+ return err;
+
+ if (bdev->boot) {
+ gpiod_set_value_cansleep(bdev->boot, 1);
+ } else {
+ /* Switch to the specific pin state for the booting
+ * requires.
+ */
+ pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
+ }
/* Power on */
err = regulator_enable(bdev->vcc);
- if (err < 0)
+ if (err < 0) {
+ clk_disable_unprepare(bdev->osc);
return err;
+ }
/* Reset if the reset-gpios is available otherwise the board
* -level design should be guaranteed.
@@ -1017,6 +1046,10 @@ static int btmtkuart_probe(struct serdev_device *serdev)
* mode the device requires for UART transfers.
*/
msleep(50);
+
+ if (bdev->boot)
+ devm_gpiod_put(&serdev->dev, bdev->boot);
+
pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime);
/* A standalone device doesn't depends on power domain on SoC,
@@ -1037,10 +1070,8 @@ static int btmtkuart_probe(struct serdev_device *serdev)
return 0;
err_regulator_disable:
- if (btmtkuart_is_standalone(bdev)) {
- pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
+ if (btmtkuart_is_standalone(bdev))
regulator_disable(bdev->vcc);
- }
return err;
}
@@ -1050,9 +1081,9 @@ static void btmtkuart_remove(struct serdev_device *serdev)
struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
struct hci_dev *hdev = bdev->hdev;
- if (btmtkuart_is_standalone(bdev)) {
- pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
+ if (btmtkuart_is_standalone(bdev)) {
regulator_disable(bdev->vcc);
+ clk_disable_unprepare(bdev->osc);
}
hci_unregister_dev(hdev);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index aff1d22223bd..8b33128dccee 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -131,6 +131,7 @@ static void qca_tlv_check_data(struct rome_config *config,
* In case VSE is skipped, only the last segment is acked.
*/
config->dnld_mode = tlv_patch->download_mode;
+ config->dnld_type = config->dnld_mode;
BT_DBG("Total Length : %d bytes",
le32_to_cpu(tlv_patch->total_size));
@@ -251,6 +252,31 @@ out:
return err;
}
+static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
+{
+ struct hci_event_hdr *hdr;
+ struct hci_ev_cmd_complete *evt;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = skb_put(skb, sizeof(*hdr));
+ hdr->evt = HCI_EV_CMD_COMPLETE;
+ hdr->plen = sizeof(*evt) + 1;
+
+ evt = skb_put(skb, sizeof(*evt));
+ evt->ncmd = 1;
+ evt->opcode = QCA_HCI_CC_OPCODE;
+
+ skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
+
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+
+ return hci_recv_frame(hdev, skb);
+}
+
static int qca_download_firmware(struct hci_dev *hdev,
struct rome_config *config)
{
@@ -284,11 +310,22 @@ static int qca_download_firmware(struct hci_dev *hdev,
ret = qca_tlv_send_segment(hdev, segsize, segment,
config->dnld_mode);
if (ret)
- break;
+ goto out;
segment += segsize;
}
+ /* Latest qualcomm chipsets are not sending a command complete event
+ * for every fw packet sent. They only respond with a vendor specific
+ * event for the last packet. This optimization in the chip will
+ * decrease the BT in initialization time. Here we will inject a command
+ * complete event to avoid a command timeout error message.
+ */
+ if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
+ config->dnld_type == ROME_SKIP_EVT_VSE)
+ return qca_inject_cmd_complete_event(hdev);
+
+out:
release_firmware(fw);
return ret;
@@ -319,7 +356,8 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
- enum qca_btsoc_type soc_type, u32 soc_ver)
+ enum qca_btsoc_type soc_type, u32 soc_ver,
+ const char *firmware_name)
{
struct rome_config config;
int err;
@@ -352,7 +390,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
- if (qca_is_wcn399x(soc_type))
+ if (firmware_name)
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/%s", firmware_name);
+ else if (qca_is_wcn399x(soc_type))
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02x.bin", rom_ver);
else
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index e9c999959603..6a291a7a5d96 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -28,6 +28,9 @@
#define QCA_WCN3990_POWERON_PULSE 0xFC
#define QCA_WCN3990_POWEROFF_PULSE 0xC0
+#define QCA_HCI_CC_OPCODE 0xFC00
+#define QCA_HCI_CC_SUCCESS 0x00
+
enum qca_baudrate {
QCA_BAUDRATE_115200 = 0,
QCA_BAUDRATE_57600,
@@ -69,6 +72,7 @@ struct rome_config {
char fwname[64];
uint8_t user_baud_rate;
enum rome_tlv_dnld_mode dnld_mode;
+ enum rome_tlv_dnld_mode dnld_type;
};
struct edl_event_hdr {
@@ -127,7 +131,8 @@ enum qca_btsoc_type {
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
- enum qca_btsoc_type soc_type, u32 soc_ver);
+ enum qca_btsoc_type soc_type, u32 soc_ver,
+ const char *firmware_name);
int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
@@ -142,7 +147,8 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad
}
static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
- enum qca_btsoc_type soc_type, u32 soc_ver)
+ enum qca_btsoc_type soc_type, u32 soc_ver,
+ const char *firmware_name)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 208feef63de4..4f75a9b61d09 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -21,6 +21,7 @@
#define RTL_ROM_LMP_3499 0x3499
#define RTL_ROM_LMP_8723A 0x1200
#define RTL_ROM_LMP_8723B 0x8723
+#define RTL_ROM_LMP_8723D 0x8873
#define RTL_ROM_LMP_8821A 0x8821
#define RTL_ROM_LMP_8761A 0x8761
#define RTL_ROM_LMP_8822B 0x8822
@@ -107,6 +108,13 @@ static const struct id_table ic_id_table[] = {
.fw_name = "rtl_bt/rtl8723ds_fw.bin",
.cfg_name = "rtl_bt/rtl8723ds_config" },
+ /* 8723DU */
+ { IC_INFO(RTL_ROM_LMP_8723D, 0x826C),
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8723d_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723d_config" },
+
/* 8821A */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xa),
.config_needed = false,
@@ -637,6 +645,26 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
+int btrtl_shutdown_realtek(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ /* According to the vendor driver, BT must be reset on close to avoid
+ * firmware crash.
+ */
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "HCI reset during shutdown failed");
+ return ret;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btrtl_shutdown_realtek);
+
static unsigned int btrtl_convert_baudrate(u32 device_baudrate)
{
switch (device_baudrate) {
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
index f1676144fce8..10ad40c3e42c 100644
--- a/drivers/bluetooth/btrtl.h
+++ b/drivers/bluetooth/btrtl.h
@@ -55,6 +55,7 @@ void btrtl_free(struct btrtl_device_info *btrtl_dev);
int btrtl_download_firmware(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev);
int btrtl_setup_realtek(struct hci_dev *hdev);
+int btrtl_shutdown_realtek(struct hci_dev *hdev);
int btrtl_get_uart_settings(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev,
unsigned int *controller_baudrate,
@@ -83,6 +84,11 @@ static inline int btrtl_setup_realtek(struct hci_dev *hdev)
return -EOPNOTSUPP;
}
+static inline int btrtl_shutdown_realtek(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int btrtl_get_uart_settings(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev,
unsigned int *controller_baudrate,
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 83748b7b2033..fd9571d5fdac 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -286,6 +286,7 @@ static int btsdio_probe(struct sdio_func *func,
switch (func->device) {
case SDIO_DEVICE_ID_BROADCOM_43341:
case SDIO_DEVICE_ID_BROADCOM_43430:
+ case SDIO_DEVICE_ID_BROADCOM_4356:
return -ENODEV;
}
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 50aed5259c2b..3876fee6ad13 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -11,6 +11,7 @@
#include <linux/usb.h>
#include <linux/usb/quirks.h>
#include <linux/firmware.h>
+#include <linux/iopoll.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/suspend.h>
@@ -55,6 +56,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_BCM2045 0x40000
#define BTUSB_IFNUM_2 0x80000
#define BTUSB_CW6622 0x100000
+#define BTUSB_MEDIATEK 0x200000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -264,7 +266,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -346,6 +350,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK },
+ /* MediaTek Bluetooth devices */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
+ .driver_info = BTUSB_MEDIATEK },
+
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
@@ -426,6 +434,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
#define BTUSB_DIAG_RUNNING 10
#define BTUSB_OOB_WAKE_ENABLED 11
#define BTUSB_HW_RESET_ACTIVE 12
+#define BTUSB_TX_WAIT_VND_EVT 13
struct btusb_data {
struct hci_dev *hdev;
@@ -449,6 +458,7 @@ struct btusb_data {
struct usb_anchor bulk_anchor;
struct usb_anchor isoc_anchor;
struct usb_anchor diag_anchor;
+ struct usb_anchor ctrl_anchor;
spinlock_t rxlock;
struct sk_buff *evt_skb;
@@ -1202,6 +1212,7 @@ static void btusb_stop_traffic(struct btusb_data *data)
usb_kill_anchored_urbs(&data->bulk_anchor);
usb_kill_anchored_urbs(&data->isoc_anchor);
usb_kill_anchored_urbs(&data->diag_anchor);
+ usb_kill_anchored_urbs(&data->ctrl_anchor);
}
static int btusb_close(struct hci_dev *hdev)
@@ -2437,6 +2448,568 @@ static int btusb_shutdown_intel_new(struct hci_dev *hdev)
return 0;
}
+#ifdef CONFIG_BT_HCIBTUSB_MTK
+
+#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
+#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
+
+#define HCI_WMT_MAX_EVENT_SIZE 64
+
+enum {
+ BTMTK_WMT_PATCH_DWNLD = 0x1,
+ BTMTK_WMT_FUNC_CTRL = 0x6,
+ BTMTK_WMT_RST = 0x7,
+ BTMTK_WMT_SEMAPHORE = 0x17,
+};
+
+enum {
+ BTMTK_WMT_INVALID,
+ BTMTK_WMT_PATCH_UNDONE,
+ BTMTK_WMT_PATCH_DONE,
+ BTMTK_WMT_ON_UNDONE,
+ BTMTK_WMT_ON_DONE,
+ BTMTK_WMT_ON_PROGRESS,
+};
+
+struct btmtk_wmt_hdr {
+ u8 dir;
+ u8 op;
+ __le16 dlen;
+ u8 flag;
+} __packed;
+
+struct btmtk_hci_wmt_cmd {
+ struct btmtk_wmt_hdr hdr;
+ u8 data[256];
+} __packed;
+
+struct btmtk_hci_wmt_evt {
+ struct hci_event_hdr hhdr;
+ struct btmtk_wmt_hdr whdr;
+} __packed;
+
+struct btmtk_hci_wmt_evt_funcc {
+ struct btmtk_hci_wmt_evt hwhdr;
+ __be16 status;
+} __packed;
+
+struct btmtk_tci_sleep {
+ u8 mode;
+ __le16 duration;
+ __le16 host_duration;
+ u8 host_wakeup_pin;
+ u8 time_compensation;
+} __packed;
+
+struct btmtk_hci_wmt_params {
+ u8 op;
+ u8 flag;
+ u16 dlen;
+ const void *data;
+ u32 *status;
+};
+
+static void btusb_mtk_wmt_recv(struct urb *urb)
+{
+ struct hci_dev *hdev = urb->context;
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct hci_event_hdr *hdr;
+ struct sk_buff *skb;
+ int err;
+
+ if (urb->status == 0 && urb->actual_length > 0) {
+ hdev->stat.byte_rx += urb->actual_length;
+
+ /* WMT event shouldn't be fragmented and the size should be
+ * less than HCI_WMT_MAX_EVENT_SIZE.
+ */
+ skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ hdev->stat.err_rx++;
+ goto err_out;
+ }
+
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+ skb_put_data(skb, urb->transfer_buffer, urb->actual_length);
+
+ hdr = (void *)skb->data;
+ /* Fix up the vendor event id with 0xff for vendor specific
+ * instead of 0xe4 so that event send via monitoring socket can
+ * be parsed properly.
+ */
+ hdr->evt = 0xff;
+
+ /* When someone waits for the WMT event, the skb is being cloned
+ * and being processed the events from there then.
+ */
+ if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
+ data->evt_skb = skb_clone(skb, GFP_KERNEL);
+ if (!data->evt_skb)
+ goto err_out;
+ }
+
+ err = hci_recv_frame(hdev, skb);
+ if (err < 0)
+ goto err_free_skb;
+
+ if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
+ &data->flags)) {
+ /* Barrier to sync with other CPUs */
+ smp_mb__after_atomic();
+ wake_up_bit(&data->flags,
+ BTUSB_TX_WAIT_VND_EVT);
+ }
+err_out:
+ return;
+err_free_skb:
+ kfree_skb(data->evt_skb);
+ data->evt_skb = NULL;
+ return;
+ } else if (urb->status == -ENOENT) {
+ /* Avoid suspend failed when usb_kill_urb */
+ return;
+ }
+
+ usb_mark_last_busy(data->udev);
+
+ /* The URB complete handler is still called with urb->actual_length = 0
+ * when the event is not available, so we should keep re-submitting
+ * URB until WMT event returns, Also, It's necessary to wait some time
+ * between the two consecutive control URBs to relax the target device
+ * to generate the event. Otherwise, the WMT event cannot return from
+ * the device successfully.
+ */
+ udelay(100);
+
+ usb_anchor_urb(urb, &data->ctrl_anchor);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected
+ */
+ if (err != -EPERM && err != -ENODEV)
+ bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
+ urb, -err);
+ usb_unanchor_urb(urb);
+ }
+}
+
+static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct usb_ctrlrequest *dr;
+ unsigned char *buf;
+ int err, size = 64;
+ unsigned int pipe;
+ struct urb *urb;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
+ dr = kmalloc(sizeof(*dr), GFP_KERNEL);
+ if (!dr) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
+ dr->bRequest = 1;
+ dr->wIndex = cpu_to_le16(0);
+ dr->wValue = cpu_to_le16(48);
+ dr->wLength = cpu_to_le16(size);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ kfree(dr);
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvctrlpipe(data->udev, 0);
+
+ usb_fill_control_urb(urb, data->udev, pipe, (void *)dr,
+ buf, size, btusb_mtk_wmt_recv, hdev);
+
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ usb_anchor_urb(urb, &data->ctrl_anchor);
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ bt_dev_err(hdev, "urb %p submission failed (%d)",
+ urb, -err);
+ usb_unanchor_urb(urb);
+ }
+
+ usb_free_urb(urb);
+
+ return err;
+}
+
+static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
+ struct btmtk_hci_wmt_params *wmt_params)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
+ u32 hlen, status = BTMTK_WMT_INVALID;
+ struct btmtk_hci_wmt_evt *wmt_evt;
+ struct btmtk_hci_wmt_cmd wc;
+ struct btmtk_wmt_hdr *hdr;
+ int err;
+
+ /* Submit control IN URB on demand to process the WMT event */
+ err = btusb_mtk_submit_wmt_recv_urb(hdev);
+ if (err < 0)
+ return err;
+
+ /* Send the WMT command and wait until the WMT event returns */
+ hlen = sizeof(*hdr) + wmt_params->dlen;
+ if (hlen > 255)
+ return -EINVAL;
+
+ hdr = (struct btmtk_wmt_hdr *)&wc;
+ hdr->dir = 1;
+ hdr->op = wmt_params->op;
+ hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
+ hdr->flag = wmt_params->flag;
+ memcpy(wc.data, wmt_params->data, wmt_params->dlen);
+
+ set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
+
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+
+ if (err < 0) {
+ clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
+ return err;
+ }
+
+ /* The vendor specific WMT commands are all answered by a vendor
+ * specific event and will have the Command Status or Command
+ * Complete as with usual HCI command flow control.
+ *
+ * After sending the command, wait for BTUSB_TX_WAIT_VND_EVT
+ * state to be cleared. The driver specific event receive routine
+ * will clear that state and with that indicate completion of the
+ * WMT command.
+ */
+ err = wait_on_bit_timeout(&data->flags, BTUSB_TX_WAIT_VND_EVT,
+ TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Execution of wmt command interrupted");
+ clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
+ return err;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Execution of wmt command timed out");
+ clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
+ return -ETIMEDOUT;
+ }
+
+ /* Parse and handle the return WMT event */
+ wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
+ if (wmt_evt->whdr.op != hdr->op) {
+ bt_dev_err(hdev, "Wrong op received %d expected %d",
+ wmt_evt->whdr.op, hdr->op);
+ err = -EIO;
+ goto err_free_skb;
+ }
+
+ switch (wmt_evt->whdr.op) {
+ case BTMTK_WMT_SEMAPHORE:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_UNDONE;
+ else
+ status = BTMTK_WMT_PATCH_DONE;
+ break;
+ case BTMTK_WMT_FUNC_CTRL:
+ wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
+ if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
+ status = BTMTK_WMT_ON_DONE;
+ else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
+ status = BTMTK_WMT_ON_PROGRESS;
+ else
+ status = BTMTK_WMT_ON_UNDONE;
+ break;
+ }
+
+ if (wmt_params->status)
+ *wmt_params->status = status;
+
+err_free_skb:
+ kfree_skb(data->evt_skb);
+ data->evt_skb = NULL;
+
+ return err;
+}
+
+static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ size_t fw_size;
+ int err, dlen;
+ u8 flag;
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+ /* The size of patch header is 30 bytes, should be skip */
+ if (fw_size < 30)
+ goto err_release_fw;
+
+ fw_size -= 30;
+ fw_ptr += 30;
+ flag = 1;
+
+ wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
+ wmt_params.status = NULL;
+
+ while (fw_size > 0) {
+ dlen = min_t(int, 250, fw_size);
+
+ /* Tell deivice the position in sequence */
+ if (fw_size - dlen <= 0)
+ flag = 3;
+ else if (fw_size < fw->size - 30)
+ flag = 2;
+
+ wmt_params.flag = flag;
+ wmt_params.dlen = dlen;
+ wmt_params.data = fw_ptr;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto err_release_fw;
+ }
+
+ fw_size -= dlen;
+ fw_ptr += dlen;
+ }
+
+ wmt_params.op = BTMTK_WMT_RST;
+ wmt_params.flag = 4;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = NULL;
+
+ /* Activate funciton the firmware providing to */
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
+ return err;
+ }
+
+ /* Wait a few moments for firmware activation done */
+ usleep_range(10000, 12000);
+
+err_release_fw:
+ release_firmware(fw);
+
+ return err;
+}
+
+static int btusb_mtk_func_query(struct hci_dev *hdev)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ int status, err;
+ u8 param = 0;
+
+ /* Query whether the function is enabled */
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 4;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = &status;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query function status (%d)", err);
+ return err;
+ }
+
+ return status;
+}
+
+static int btusb_mtk_reg_read(struct btusb_data *data, u32 reg, u32 *val)
+{
+ int pipe, err, size = sizeof(u32);
+ void *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pipe = usb_rcvctrlpipe(data->udev, 0);
+ err = usb_control_msg(data->udev, pipe, 0x63,
+ USB_TYPE_VENDOR | USB_DIR_IN,
+ reg >> 16, reg & 0xffff,
+ buf, size, USB_CTRL_SET_TIMEOUT);
+ if (err < 0)
+ goto err_free_buf;
+
+ *val = get_unaligned_le32(buf);
+
+err_free_buf:
+ kfree(buf);
+
+ return err;
+}
+
+static int btusb_mtk_id_get(struct btusb_data *data, u32 *id)
+{
+ return btusb_mtk_reg_read(data, 0x80000008, id);
+}
+
+static int btusb_mtk_setup(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ ktime_t calltime, delta, rettime;
+ struct btmtk_tci_sleep tci_sleep;
+ unsigned long long duration;
+ struct sk_buff *skb;
+ const char *fwname;
+ int err, status;
+ u32 dev_id;
+ u8 param;
+
+ calltime = ktime_get();
+
+ err = btusb_mtk_id_get(data, &dev_id);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get device id (%d)", err);
+ return err;
+ }
+
+ switch (dev_id) {
+ case 0x7663:
+ fwname = FIRMWARE_MT7663;
+ break;
+ case 0x7668:
+ fwname = FIRMWARE_MT7668;
+ break;
+ default:
+ bt_dev_err(hdev, "Unsupported support hardware variant (%08x)",
+ dev_id);
+ return -ENODEV;
+ }
+
+ /* Query whether the firmware is already download */
+ wmt_params.op = BTMTK_WMT_SEMAPHORE;
+ wmt_params.flag = 1;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = &status;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
+ return err;
+ }
+
+ if (status == BTMTK_WMT_PATCH_DONE) {
+ bt_dev_info(hdev, "firmware already downloaded");
+ goto ignore_setup_fw;
+ }
+
+ /* Setup a firmware which the device definitely requires */
+ err = btusb_mtk_setup_firmware(hdev, fwname);
+ if (err < 0)
+ return err;
+
+ignore_setup_fw:
+ err = readx_poll_timeout(btusb_mtk_func_query, hdev, status,
+ status < 0 || status != BTMTK_WMT_ON_PROGRESS,
+ 2000, 5000000);
+ /* -ETIMEDOUT happens */
+ if (err < 0)
+ return err;
+
+ /* The other errors happen in btusb_mtk_func_query */
+ if (status < 0)
+ return status;
+
+ if (status == BTMTK_WMT_ON_DONE) {
+ bt_dev_info(hdev, "function already on");
+ goto ignore_func_on;
+ }
+
+ /* Enable Bluetooth protocol */
+ param = 1;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ignore_func_on:
+ /* Apply the low power environment setup */
+ tci_sleep.mode = 0x5;
+ tci_sleep.duration = cpu_to_le16(0x640);
+ tci_sleep.host_duration = cpu_to_le16(0x640);
+ tci_sleep.host_wakeup_pin = 0;
+ tci_sleep.time_compensation = 0;
+
+ skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
+
+ bt_dev_info(hdev, "Device setup in %llu usecs", duration);
+
+ return 0;
+}
+
+static int btusb_mtk_shutdown(struct hci_dev *hdev)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ u8 param = 0;
+ int err;
+
+ /* Disable the device */
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
+MODULE_FIRMWARE(FIRMWARE_MT7663);
+MODULE_FIRMWARE(FIRMWARE_MT7668);
+#endif
+
#ifdef CONFIG_PM
/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */
static int marvell_config_oob_wake(struct hci_dev *hdev)
@@ -3044,6 +3617,7 @@ static int btusb_probe(struct usb_interface *intf,
init_usb_anchor(&data->bulk_anchor);
init_usb_anchor(&data->isoc_anchor);
init_usb_anchor(&data->diag_anchor);
+ init_usb_anchor(&data->ctrl_anchor);
spin_lock_init(&data->rxlock);
if (id->driver_info & BTUSB_INTEL_NEW) {
@@ -3157,6 +3731,15 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_MARVELL)
hdev->set_bdaddr = btusb_set_bdaddr_marvell;
+#ifdef CONFIG_BT_HCIBTUSB_MTK
+ if (id->driver_info & BTUSB_MEDIATEK) {
+ hdev->setup = btusb_mtk_setup;
+ hdev->shutdown = btusb_mtk_shutdown;
+ hdev->manufacturer = 70;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ }
+#endif
+
if (id->driver_info & BTUSB_SWAVE) {
set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
@@ -3184,6 +3767,7 @@ static int btusb_probe(struct usb_interface *intf,
#ifdef CONFIG_BT_HCIBTUSB_RTL
if (id->driver_info & BTUSB_REALTEK) {
hdev->setup = btrtl_setup_realtek;
+ hdev->shutdown = btrtl_shutdown_realtek;
/* Realtek devices lose their updated firmware over suspend,
* but the USB hub doesn't notice any status change.
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 82b13faa9422..fe2e307009f4 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -744,6 +744,11 @@ static int bcsp_close(struct hci_uart *hu)
skb_queue_purge(&bcsp->rel);
skb_queue_purge(&bcsp->unrel);
+ if (bcsp->rx_skb) {
+ kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
+ }
+
kfree(bcsp);
return 0;
}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index c84f985f348d..8950e07889fe 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -178,6 +178,7 @@ restart:
goto restart;
clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ wake_up_bit(&hu->tx_state, HCI_UART_SENDING);
}
void hci_uart_init_work(struct work_struct *work)
@@ -213,6 +214,13 @@ int hci_uart_init_ready(struct hci_uart *hu)
return 0;
}
+int hci_uart_wait_until_sent(struct hci_uart *hu)
+{
+ return wait_on_bit_timeout(&hu->tx_state, HCI_UART_SENDING,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(2000));
+}
+
/* ------- Interface to HCI layer ------ */
/* Reset device */
static int hci_uart_flush(struct hci_dev *hdev)
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index c04f5f9e1ed0..285706618f8a 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -128,6 +128,7 @@ static int ll_open(struct hci_uart *hu)
if (hu->serdev) {
struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev);
+
if (!IS_ERR(lldev->ext_clk))
clk_prepare_enable(lldev->ext_clk);
}
@@ -162,6 +163,7 @@ static int ll_close(struct hci_uart *hu)
if (hu->serdev) {
struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev);
+
gpiod_set_value_cansleep(lldev->enable_gpio, 0);
clk_disable_unprepare(lldev->ext_clk);
@@ -227,7 +229,8 @@ static void ll_device_want_to_wakeup(struct hci_uart *hu)
break;
default:
/* any other state is illegal */
- BT_ERR("received HCILL_WAKE_UP_IND in state %ld", ll->hcill_state);
+ BT_ERR("received HCILL_WAKE_UP_IND in state %ld",
+ ll->hcill_state);
break;
}
@@ -256,7 +259,8 @@ static void ll_device_want_to_sleep(struct hci_uart *hu)
/* sanity check */
if (ll->hcill_state != HCILL_AWAKE)
- BT_ERR("ERR: HCILL_GO_TO_SLEEP_IND in state %ld", ll->hcill_state);
+ BT_ERR("ERR: HCILL_GO_TO_SLEEP_IND in state %ld",
+ ll->hcill_state);
/* acknowledge device sleep */
if (send_hcill_cmd(HCILL_GO_TO_SLEEP_ACK, hu) < 0) {
@@ -289,7 +293,8 @@ static void ll_device_woke_up(struct hci_uart *hu)
/* sanity check */
if (ll->hcill_state != HCILL_ASLEEP_TO_AWAKE)
- BT_ERR("received HCILL_WAKE_UP_ACK in state %ld", ll->hcill_state);
+ BT_ERR("received HCILL_WAKE_UP_ACK in state %ld",
+ ll->hcill_state);
/* send pending packets and change state to HCILL_AWAKE */
__ll_do_awake(ll);
@@ -338,7 +343,8 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
skb_queue_tail(&ll->tx_wait_q, skb);
break;
default:
- BT_ERR("illegal hcill state: %ld (losing packet)", ll->hcill_state);
+ BT_ERR("illegal hcill state: %ld (losing packet)",
+ ll->hcill_state);
kfree_skb(skb);
break;
}
@@ -438,6 +444,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)
static struct sk_buff *ll_dequeue(struct hci_uart *hu)
{
struct ll_struct *ll = hu->priv;
+
return skb_dequeue(&ll->txq);
}
@@ -449,7 +456,8 @@ static int read_local_version(struct hci_dev *hdev)
struct sk_buff *skb;
struct hci_rp_read_local_version *ver;
- skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_INIT_TIMEOUT);
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+ HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Reading TI version information failed (%ld)",
PTR_ERR(skb));
@@ -469,11 +477,38 @@ static int read_local_version(struct hci_dev *hdev)
version = le16_to_cpu(ver->lmp_subver);
out:
- if (err) bt_dev_err(hdev, "Failed to read TI version info: %d", err);
+ if (err)
+ bt_dev_err(hdev, "Failed to read TI version info: %d", err);
kfree_skb(skb);
return err ? err : version;
}
+static int send_command_from_firmware(struct ll_device *lldev,
+ struct hci_command *cmd)
+{
+ struct sk_buff *skb;
+
+ if (cmd->opcode == HCI_VS_UPDATE_UART_HCI_BAUDRATE) {
+ /* ignore remote change
+ * baud rate HCI VS command
+ */
+ bt_dev_warn(lldev->hu.hdev,
+ "change remote baud rate command in firmware");
+ return 0;
+ }
+ if (cmd->prefix != 1)
+ bt_dev_dbg(lldev->hu.hdev, "command type %d", cmd->prefix);
+
+ skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen,
+ &cmd->speed, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(lldev->hu.hdev, "send command failed");
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+ return 0;
+}
+
/**
* download_firmware -
* internal function which parses through the .bts firmware
@@ -486,7 +521,6 @@ static int download_firmware(struct ll_device *lldev)
unsigned char *ptr, *action_ptr;
unsigned char bts_scr_name[40]; /* 40 char long bts scr name? */
const struct firmware *fw;
- struct sk_buff *skb;
struct hci_command *cmd;
version = read_local_version(lldev->hu.hdev);
@@ -528,23 +562,9 @@ static int download_firmware(struct ll_device *lldev)
case ACTION_SEND_COMMAND: /* action send */
bt_dev_dbg(lldev->hu.hdev, "S");
cmd = (struct hci_command *)action_ptr;
- if (cmd->opcode == HCI_VS_UPDATE_UART_HCI_BAUDRATE) {
- /* ignore remote change
- * baud rate HCI VS command
- */
- bt_dev_warn(lldev->hu.hdev, "change remote baud rate command in firmware");
- break;
- }
- if (cmd->prefix != 1)
- bt_dev_dbg(lldev->hu.hdev, "command type %d", cmd->prefix);
-
- skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, &cmd->speed, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- bt_dev_err(lldev->hu.hdev, "send command failed");
- err = PTR_ERR(skb);
+ err = send_command_from_firmware(lldev, cmd);
+ if (err)
goto out_rel_fw;
- }
- kfree_skb(skb);
break;
case ACTION_WAIT_EVENT: /* wait */
/* no need to wait as command was synchronous */
@@ -601,6 +621,13 @@ static int ll_setup(struct hci_uart *hu)
serdev_device_set_flow_control(serdev, true);
+ if (hu->oper_speed)
+ speed = hu->oper_speed;
+ else if (hu->proto->oper_speed)
+ speed = hu->proto->oper_speed;
+ else
+ speed = 0;
+
do {
/* Reset the Bluetooth device */
gpiod_set_value_cansleep(lldev->enable_gpio, 0);
@@ -612,6 +639,20 @@ static int ll_setup(struct hci_uart *hu)
return err;
}
+ if (speed) {
+ __le32 speed_le = cpu_to_le32(speed);
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hu->hdev,
+ HCI_VS_UPDATE_UART_HCI_BAUDRATE,
+ sizeof(speed_le), &speed_le,
+ HCI_INIT_TIMEOUT);
+ if (!IS_ERR(skb)) {
+ kfree_skb(skb);
+ serdev_device_set_baudrate(serdev, speed);
+ }
+ }
+
err = download_firmware(lldev);
if (!err)
break;
@@ -636,25 +677,7 @@ static int ll_setup(struct hci_uart *hu)
}
/* Operational speed if any */
- if (hu->oper_speed)
- speed = hu->oper_speed;
- else if (hu->proto->oper_speed)
- speed = hu->proto->oper_speed;
- else
- speed = 0;
-
- if (speed) {
- __le32 speed_le = cpu_to_le32(speed);
- struct sk_buff *skb;
- skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
- sizeof(speed_le), &speed_le,
- HCI_INIT_TIMEOUT);
- if (!IS_ERR(skb)) {
- kfree_skb(skb);
- serdev_device_set_baudrate(serdev, speed);
- }
- }
return 0;
}
@@ -676,7 +699,9 @@ static int hci_ti_probe(struct serdev_device *serdev)
serdev_device_set_drvdata(serdev, lldev);
lldev->serdev = hu->serdev = serdev;
- lldev->enable_gpio = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW);
+ lldev->enable_gpio = devm_gpiod_get_optional(&serdev->dev,
+ "enable",
+ GPIOD_OUT_LOW);
if (IS_ERR(lldev->enable_gpio))
return PTR_ERR(lldev->enable_gpio);
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index 50212ac629e3..f98e5cc343b2 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -13,6 +13,8 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/tty.h>
+#include <linux/of.h>
+#include <linux/serdev.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -40,6 +42,10 @@ struct mrvl_data {
u8 id, rev;
};
+struct mrvl_serdev {
+ struct hci_uart hu;
+};
+
struct hci_mrvl_pkt {
__le16 lhs;
__le16 rhs;
@@ -49,6 +55,7 @@ struct hci_mrvl_pkt {
static int mrvl_open(struct hci_uart *hu)
{
struct mrvl_data *mrvl;
+ int ret;
BT_DBG("hu %p", hu);
@@ -62,7 +69,18 @@ static int mrvl_open(struct hci_uart *hu)
set_bit(STATE_CHIP_VER_PENDING, &mrvl->flags);
hu->priv = mrvl;
+
+ if (hu->serdev) {
+ ret = serdev_device_open(hu->serdev);
+ if (ret)
+ goto err;
+ }
+
return 0;
+err:
+ kfree(mrvl);
+
+ return ret;
}
static int mrvl_close(struct hci_uart *hu)
@@ -71,6 +89,9 @@ static int mrvl_close(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (hu->serdev)
+ serdev_device_close(hu->serdev);
+
skb_queue_purge(&mrvl->txq);
skb_queue_purge(&mrvl->rawq);
kfree_skb(mrvl->rx_skb);
@@ -339,7 +360,14 @@ static int mrvl_setup(struct hci_uart *hu)
return -EINVAL;
}
- hci_uart_set_baudrate(hu, 3000000);
+ /* Let the final ack go out before switching the baudrate */
+ hci_uart_wait_until_sent(hu);
+
+ if (hu->serdev)
+ serdev_device_set_baudrate(hu->serdev, 3000000);
+ else
+ hci_uart_set_baudrate(hu, 3000000);
+
hci_uart_set_flow_control(hu, false);
err = mrvl_load_firmware(hu->hdev, "mrvl/uart8897_bt.bin");
@@ -362,12 +390,54 @@ static const struct hci_uart_proto mrvl_proto = {
.dequeue = mrvl_dequeue,
};
+static int mrvl_serdev_probe(struct serdev_device *serdev)
+{
+ struct mrvl_serdev *mrvldev;
+
+ mrvldev = devm_kzalloc(&serdev->dev, sizeof(*mrvldev), GFP_KERNEL);
+ if (!mrvldev)
+ return -ENOMEM;
+
+ mrvldev->hu.serdev = serdev;
+ serdev_device_set_drvdata(serdev, mrvldev);
+
+ return hci_uart_register_device(&mrvldev->hu, &mrvl_proto);
+}
+
+static void mrvl_serdev_remove(struct serdev_device *serdev)
+{
+ struct mrvl_serdev *mrvldev = serdev_device_get_drvdata(serdev);
+
+ hci_uart_unregister_device(&mrvldev->hu);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mrvl_bluetooth_of_match[] = {
+ { .compatible = "mrvl,88w8897" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mrvl_bluetooth_of_match);
+#endif
+
+static struct serdev_device_driver mrvl_serdev_driver = {
+ .probe = mrvl_serdev_probe,
+ .remove = mrvl_serdev_remove,
+ .driver = {
+ .name = "hci_uart_mrvl",
+ .of_match_table = of_match_ptr(mrvl_bluetooth_of_match),
+ },
+};
+
int __init mrvl_init(void)
{
+ serdev_device_driver_register(&mrvl_serdev_driver);
+
return hci_uart_register_proto(&mrvl_proto);
}
int __exit mrvl_deinit(void)
{
+ serdev_device_driver_unregister(&mrvl_serdev_driver);
+
return hci_uart_unregister_proto(&mrvl_proto);
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 9d273cdde563..9a5c9c1f9484 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -53,6 +54,7 @@
enum qca_flags {
QCA_IBS_ENABLED,
+ QCA_DROP_VENDOR_EVENT,
};
/* HCI_IBS transmit side sleep protocol states */
@@ -97,6 +99,7 @@ struct qca_data {
struct work_struct ws_rx_vote_off;
struct work_struct ws_tx_vote_off;
unsigned long flags;
+ struct completion drop_ev_comp;
/* For debugging purpose */
u64 ibs_sent_wacks;
@@ -156,6 +159,7 @@ struct qca_serdev {
struct qca_power *bt_power;
u32 init_speed;
u32 oper_speed;
+ const char *firmware_name;
};
static int qca_power_setup(struct hci_uart *hu, bool on);
@@ -177,6 +181,17 @@ static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
return soc_type;
}
+static const char *qca_get_firmware_name(struct hci_uart *hu)
+{
+ if (hu->serdev) {
+ struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
+
+ return qsd->firmware_name;
+ } else {
+ return NULL;
+ }
+}
+
static void __serial_clock_on(struct tty_struct *tty)
{
/* TODO: Some chipset requires to enable UART clock on client
@@ -478,6 +493,7 @@ static int qca_open(struct hci_uart *hu)
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
qca->hu = hu;
+ init_completion(&qca->drop_ev_comp);
/* Assume we start with both sides asleep -- extra wakes OK */
qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
@@ -872,6 +888,35 @@ static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb);
}
+static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+
+ if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) {
+ struct hci_event_hdr *hdr = (void *)skb->data;
+
+ /* For the WCN3990 the vendor command for a baudrate change
+ * isn't sent as synchronous HCI command, because the
+ * controller sends the corresponding vendor event with the
+ * new baudrate. The event is received and properly decoded
+ * after changing the baudrate of the host port. It needs to
+ * be dropped, otherwise it can be misinterpreted as
+ * response to a later firmware download command (also a
+ * vendor command).
+ */
+
+ if (hdr->evt == HCI_EV_VENDOR)
+ complete(&qca->drop_ev_comp);
+
+ kfree(skb);
+
+ return 0;
+ }
+
+ return hci_recv_frame(hdev, skb);
+}
+
#define QCA_IBS_SLEEP_IND_EVENT \
.type = HCI_IBS_SLEEP_IND, \
.hlen = 0, \
@@ -896,7 +941,7 @@ static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
static const struct h4_recv_pkt qca_recv_pkts[] = {
{ H4_RECV_ACL, .recv = qca_recv_acl_data },
{ H4_RECV_SCO, .recv = hci_recv_frame },
- { H4_RECV_EVENT, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = qca_recv_event },
{ QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
{ QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
{ QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
@@ -1091,6 +1136,7 @@ static int qca_check_speeds(struct hci_uart *hu)
static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
{
unsigned int speed, qca_baudrate;
+ struct qca_data *qca = hu->priv;
int ret = 0;
if (speed_type == QCA_INIT_SPEED) {
@@ -1110,6 +1156,11 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
if (qca_is_wcn399x(soc_type))
hci_uart_set_flow_control(hu, true);
+ if (soc_type == QCA_WCN3990) {
+ reinit_completion(&qca->drop_ev_comp);
+ set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
+ }
+
qca_baudrate = qca_get_baudrate_value(speed);
bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
ret = qca_set_baudrate(hu->hdev, qca_baudrate);
@@ -1121,6 +1172,20 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
error:
if (qca_is_wcn399x(soc_type))
hci_uart_set_flow_control(hu, false);
+
+ if (soc_type == QCA_WCN3990) {
+ /* Wait for the controller to send the vendor event
+ * for the baudrate change command.
+ */
+ if (!wait_for_completion_timeout(&qca->drop_ev_comp,
+ msecs_to_jiffies(100))) {
+ bt_dev_err(hu->hdev,
+ "Failed to change controller baudrate\n");
+ ret = -ETIMEDOUT;
+ }
+
+ clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
+ }
}
return ret;
@@ -1182,6 +1247,7 @@ static int qca_setup(struct hci_uart *hu)
struct qca_data *qca = hu->priv;
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
+ const char *firmware_name = qca_get_firmware_name(hu);
int ret;
int soc_ver = 0;
@@ -1232,7 +1298,8 @@ static int qca_setup(struct hci_uart *hu)
bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
/* Setup patch / NVM configurations */
- ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver);
+ ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver,
+ firmware_name);
if (!ret) {
set_bit(QCA_IBS_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
@@ -1426,6 +1493,8 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->serdev_hu.serdev = serdev;
data = of_device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
+ device_property_read_string(&serdev->dev, "firmware-name",
+ &qcadev->firmware_name);
if (data && qca_is_wcn399x(data->soc_type)) {
qcadev->btsoc_type = data->soc_type;
qcadev->bt_power = devm_kzalloc(&serdev->dev,
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index d8cf005e3c5d..f11af3912ce6 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -100,6 +100,7 @@ int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p
void hci_uart_unregister_device(struct hci_uart *hu);
int hci_uart_tx_wakeup(struct hci_uart *hu);
+int hci_uart_wait_until_sent(struct hci_uart *hu);
int hci_uart_init_ready(struct hci_uart *hu);
void hci_uart_init_work(struct work_struct *work);
void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 933268b8d6a5..ac42ae4651ce 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -7,7 +7,7 @@
License. See linux/COPYING for more information.
Uniform CD-ROM driver for Linux.
- See Documentation/cdrom/cdrom-standard.tex for usage information.
+ See Documentation/cdrom/cdrom-standard.rst for usage information.
The routines in the file provide a uniform interface between the
software that uses CD-ROMs and the various low-level drivers that
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 658664a5a5aa..df1edb5ec0ad 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -1311,8 +1311,7 @@ static void ipi_handler(void *null)
void global_cache_flush(void)
{
- if (on_each_cpu(ipi_handler, NULL, 1) != 0)
- panic(PFX "timed out waiting for the other CPUs!\n");
+ on_each_cpu(ipi_handler, NULL, 1);
}
EXPORT_SYMBOL(global_cache_flush);
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 8b5a20b35293..92be1c0ab99f 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
}
static const struct of_device_id iproc_rng200_of_match[] = {
+ { .compatible = "brcm,bcm7211-rng200", },
{ .compatible = "brcm,bcm7278-rng200", },
{ .compatible = "brcm,iproc-rng200", },
{},
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 2e23be802a62..76e693da5dde 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -1,58 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
* Copyright (C) 2014 Amlogic, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * BSD LICENSE
- *
- * Copyright (c) 2016 BayLibre, SAS.
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2014 Amlogic, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/err.h>
#include <linux/module.h>
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
index 3e44362e469c..6bb023de17f1 100644
--- a/drivers/char/tpm/eventlog/efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -16,10 +16,13 @@
int tpm_read_log_efi(struct tpm_chip *chip)
{
+ struct efi_tcg2_final_events_table *final_tbl = NULL;
struct linux_efi_tpm_eventlog *log_tbl;
struct tpm_bios_log *log;
u32 log_size;
u8 tpm_log_version;
+ void *tmp;
+ int ret;
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
@@ -47,15 +50,57 @@ int tpm_read_log_efi(struct tpm_chip *chip)
/* malloc EventLog space */
log->bios_event_log = kmemdup(log_tbl->log, log_size, GFP_KERNEL);
- if (!log->bios_event_log)
- goto err_memunmap;
- log->bios_event_log_end = log->bios_event_log + log_size;
+ if (!log->bios_event_log) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ log->bios_event_log_end = log->bios_event_log + log_size;
tpm_log_version = log_tbl->version;
- memunmap(log_tbl);
- return tpm_log_version;
-err_memunmap:
+ ret = tpm_log_version;
+
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
+ efi_tpm_final_log_size == 0 ||
+ tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ goto out;
+
+ final_tbl = memremap(efi.tpm_final_log,
+ sizeof(*final_tbl) + efi_tpm_final_log_size,
+ MEMREMAP_WB);
+ if (!final_tbl) {
+ pr_err("Could not map UEFI TPM final log\n");
+ kfree(log->bios_event_log);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
+
+ tmp = krealloc(log->bios_event_log,
+ log_size + efi_tpm_final_log_size,
+ GFP_KERNEL);
+ if (!tmp) {
+ kfree(log->bios_event_log);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ log->bios_event_log = tmp;
+
+ /*
+ * Copy any of the final events log that didn't also end up in the
+ * main log. Events can be logged in both if events are generated
+ * between GetEventLog() and ExitBootServices().
+ */
+ memcpy((void *)log->bios_event_log + log_size,
+ final_tbl->events + log_tbl->final_events_preboot_size,
+ efi_tpm_final_log_size);
+ log->bios_event_log_end = log->bios_event_log +
+ log_size + efi_tpm_final_log_size;
+
+out:
+ memunmap(final_tbl);
memunmap(log_tbl);
- return -ENOMEM;
+ return ret;
}
diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c
index d506362e046f..b9aeda1cbcd7 100644
--- a/drivers/char/tpm/eventlog/tpm2.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -36,52 +36,7 @@
static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
struct tcg_pcr_event *event_header)
{
- struct tcg_efi_specid_event_head *efispecid;
- struct tcg_event_field *event_field;
- void *marker;
- void *marker_start;
- u32 halg_size;
- size_t size;
- u16 halg;
- int i;
- int j;
-
- marker = event;
- marker_start = marker;
- marker = marker + sizeof(event->pcr_idx) + sizeof(event->event_type)
- + sizeof(event->count);
-
- efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
-
- /* Check if event is malformed. */
- if (event->count > efispecid->num_algs)
- return 0;
-
- for (i = 0; i < event->count; i++) {
- halg_size = sizeof(event->digests[i].alg_id);
- memcpy(&halg, marker, halg_size);
- marker = marker + halg_size;
- for (j = 0; j < efispecid->num_algs; j++) {
- if (halg == efispecid->digest_sizes[j].alg_id) {
- marker +=
- efispecid->digest_sizes[j].digest_size;
- break;
- }
- }
- /* Algorithm without known length. Such event is unparseable. */
- if (j == efispecid->num_algs)
- return 0;
- }
-
- event_field = (struct tcg_event_field *)marker;
- marker = marker + sizeof(event_field->event_size)
- + event_field->event_size;
- size = marker - marker_start;
-
- if ((event->event_type == 0) && (event_field->event_size == 0))
- return 0;
-
- return size;
+ return __calc_tpm2_event_size(event, event_header, false);
}
static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos)
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 90325e1749fb..d47ad10a35fe 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -289,15 +289,15 @@ static int tpm_class_shutdown(struct device *dev)
{
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+ down_write(&chip->ops_sem);
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- down_write(&chip->ops_sem);
if (!tpm_chip_start(chip)) {
tpm2_shutdown(chip, TPM2_SU_CLEAR);
tpm_chip_stop(chip);
}
- chip->ops = NULL;
- up_write(&chip->ops_sem);
}
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
return 0;
}
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index 85dcf2654d11..faacbe1ffa1a 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -510,7 +510,7 @@ struct tpm1_get_random_out {
*
* Return:
* * number of bytes read
- * * -errno or a TPM return code otherwise
+ * * -errno (positive TPM return codes are masked to -EIO)
*/
int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
{
@@ -531,8 +531,11 @@ int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
rc = tpm_transmit_cmd(chip, &buf, sizeof(out->rng_data_len),
"attempting get random");
- if (rc)
+ if (rc) {
+ if (rc > 0)
+ rc = -EIO;
goto out;
+ }
out = (struct tpm1_get_random_out *)&buf.data[TPM_HEADER_SIZE];
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 4de49924cfc4..d103545e4055 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -297,7 +297,7 @@ struct tpm2_get_random_out {
*
* Return:
* size of the buffer on success,
- * -errno otherwise
+ * -errno otherwise (positive TPM return codes are masked to -EIO)
*/
int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
{
@@ -324,8 +324,11 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
offsetof(struct tpm2_get_random_out,
buffer),
"attempting get random");
- if (err)
+ if (err) {
+ if (err > 0)
+ err = -EIO;
goto out;
+ }
out = (struct tpm2_get_random_out *)
&buf.data[TPM_HEADER_SIZE];
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index aa51756fd4d6..87b410d6e51d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -368,7 +368,7 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
const char *dev_id = dev ? dev_name(dev) : NULL;
struct device_node *np = core->of_node;
- if (np && index >= 0)
+ if (np && (name || index >= 0))
hw = of_clk_get_hw(np, index, name);
/*
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 739f64fdf1e3..206fafd299ea 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -2734,8 +2734,8 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = {
[CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw,
[CLKID_MALI_1] = &g12a_mali_1.hw,
[CLKID_MALI] = &g12a_mali.hw,
- [CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw,
- [CLKID_MPLL_5OM] = &g12a_mpll_50m.hw,
+ [CLKID_MPLL_50M_DIV] = &g12a_mpll_50m_div.hw,
+ [CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
[CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index 39c41af70804..bcc05cd9882f 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -166,7 +166,7 @@
#define CLKID_HDMI_DIV 167
#define CLKID_MALI_0_DIV 170
#define CLKID_MALI_1_DIV 173
-#define CLKID_MPLL_5OM_DIV 176
+#define CLKID_MPLL_50M_DIV 176
#define CLKID_SYS_PLL_DIV16_EN 178
#define CLKID_SYS_PLL_DIV16 179
#define CLKID_CPU_CLK_DYN0_SEL 180
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 37cf0f01bb5d..62cd3a7f1f65 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1761,7 +1761,7 @@ static struct clk_regmap meson8m2_gp_pll = {
},
};
-static const char * const mmeson8b_vpu_0_1_parent_names[] = {
+static const char * const meson8b_vpu_0_1_parent_names[] = {
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
};
@@ -1778,8 +1778,8 @@ static struct clk_regmap meson8b_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = mmeson8b_vpu_0_1_parent_names,
- .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names),
+ .parent_names = meson8b_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1837,8 +1837,8 @@ static struct clk_regmap meson8b_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = mmeson8b_vpu_0_1_parent_names,
- .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names),
+ .parent_names = meson8b_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 8281dfbf38c2..5bed36e12951 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -103,9 +103,9 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
{ STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
0, 0, 0, 0x3C, 1},
{ STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
- 0, 0, 4, 0xB0, 0},
+ 0, 0, 2, 0xB0, 0},
{ STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
- 0, 0, 4, 0xB0, 1},
+ 0, 0, 2, 0xB0, 1},
{ STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
{ STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index e1ba62d2b1a0..ac1d27a8c650 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3366,6 +3366,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 },
+ { TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 },
/* This MUST be the last entry. */
{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 8e834317c97d..975995eea15c 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -229,6 +229,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
{
struct omap_clkctrl_provider *provider = data;
struct omap_clkctrl_clk *entry;
+ bool found = false;
if (clkspec->args_count != 2)
return ERR_PTR(-EINVAL);
@@ -238,11 +239,13 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
list_for_each_entry(entry, &provider->clocks, node) {
if (entry->reg_offset == clkspec->args[0] &&
- entry->bit_offset == clkspec->args[1])
+ entry->bit_offset == clkspec->args[1]) {
+ found = true;
break;
+ }
}
- if (!entry)
+ if (!found)
return ERR_PTR(-EINVAL);
return entry->clk;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 3300739edce4..5e9317dc3d39 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -43,6 +43,11 @@ config BCM_KONA_TIMER
help
Enables the support for the BCM Kona mobile timer driver.
+config DAVINCI_TIMER
+ bool "Texas Instruments DaVinci timer driver" if COMPILE_TEST
+ help
+ Enables the support for the TI DaVinci timer driver.
+
config DIGICOLOR_TIMER
bool "Digicolor timer driver" if COMPILE_TEST
select CLKSRC_MMIO
@@ -140,7 +145,7 @@ config TEGRA_TIMER
bool "Tegra timer driver" if COMPILE_TEST
select CLKSRC_MMIO
select TIMER_OF
- depends on ARM || ARM64
+ depends on ARCH_TEGRA || COMPILE_TEST
help
Enables support for the Tegra driver.
@@ -617,6 +622,13 @@ config CLKSRC_IMX_TPM
Enable this option to use IMX Timer/PWM Module (TPM) timer as
clocksource.
+config TIMER_IMX_SYS_CTR
+ bool "i.MX system counter timer" if COMPILE_TEST
+ select TIMER_OF
+ help
+ Enable this option to use i.MX system counter timer as a
+ clockevent.
+
config CLKSRC_ST_LPC
bool "Low power clocksource found in the LPC" if COMPILE_TEST
select TIMER_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 236858fa7fbf..2e7936e7833f 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
+obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
@@ -36,7 +37,7 @@ obj-$(CONFIG_U300_TIMER) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += timer-meson6.o
-obj-$(CONFIG_TEGRA_TIMER) += timer-tegra20.o
+obj-$(CONFIG_TEGRA_TIMER) += timer-tegra.o
obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o
obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o
obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += timer-tango-xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
obj-$(CONFIG_CLKSRC_IMX_TPM) += timer-imx-tpm.o
+obj-$(CONFIG_TIMER_IMX_SYS_CTR) += timer-imx-sysctr.o
obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
obj-$(CONFIG_H8300_TMR8) += h8300_timer8.o
obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o
@@ -84,3 +86,4 @@ obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o
obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o
obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o
+obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index ebfbccefc7b3..b29b5a75333e 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -13,6 +13,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clocksource.h>
@@ -139,7 +140,7 @@ static u64 arc_read_rtc(struct clocksource *cs)
l = read_aux_reg(AUX_RTC_LOW);
h = read_aux_reg(AUX_RTC_HIGH);
status = read_aux_reg(AUX_RTC_CTRL);
- } while (!(status & _BITUL(31)));
+ } while (!(status & BIT(31)));
return (((u64)h) << 32) | l;
}
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 07e57a49d1e8..9a5464c625b4 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -801,14 +801,7 @@ static void arch_timer_evtstrm_enable(int divider)
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
| ARCH_TIMER_VIRT_EVT_EN;
arch_timer_set_cntkctl(cntkctl);
-#ifdef CONFIG_ARM64
- cpu_set_named_feature(EVTSTRM);
-#else
- elf_hwcap |= HWCAP_EVTSTRM;
-#endif
-#ifdef CONFIG_COMPAT
- compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
-#endif
+ arch_timer_set_evtstrm_feature();
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
@@ -1037,11 +1030,7 @@ static int arch_timer_cpu_pm_notify(struct notifier_block *self,
} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
-#ifdef CONFIG_ARM64
- if (cpu_have_named_feature(EVTSTRM))
-#else
- if (elf_hwcap & HWCAP_EVTSTRM)
-#endif
+ if (arch_timer_have_evtstrm_feature())
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
return NOTIFY_OK;
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index e8eab16b154b..74cb299f5089 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -206,7 +206,7 @@ static void exynos4_frc_resume(struct clocksource *cs)
static struct clocksource mct_frc = {
.name = "mct-frc",
- .rating = 400,
+ .rating = 450, /* use value higher than ARM arch timer */
.read = exynos4_frc_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -461,7 +461,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
evt->set_state_oneshot_stopped = set_state_shutdown;
evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- evt->rating = 450;
+ evt->rating = 500; /* use value higher than ARM arch timer */
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
new file mode 100644
index 000000000000..ba2c79e6a0ee
--- /dev/null
+++ b/drivers/clocksource/hyperv_timer.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Clocksource driver for the synthetic counter and timers
+ * provided by the Hyper-V hypervisor to guest VMs, as described
+ * in the Hyper-V Top Level Functional Spec (TLFS). This driver
+ * is instruction set architecture independent.
+ *
+ * Copyright (C) 2019, Microsoft, Inc.
+ *
+ * Author: Michael Kelley <mikelley@microsoft.com>
+ */
+
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/mm.h>
+#include <clocksource/hyperv_timer.h>
+#include <asm/hyperv-tlfs.h>
+#include <asm/mshyperv.h>
+
+static struct clock_event_device __percpu *hv_clock_event;
+
+/*
+ * If false, we're using the old mechanism for stimer0 interrupts
+ * where it sends a VMbus message when it expires. The old
+ * mechanism is used when running on older versions of Hyper-V
+ * that don't support Direct Mode. While Hyper-V provides
+ * four stimer's per CPU, Linux uses only stimer0.
+ */
+static bool direct_mode_enabled;
+
+static int stimer0_irq;
+static int stimer0_vector;
+static int stimer0_message_sint;
+
+/*
+ * ISR for when stimer0 is operating in Direct Mode. Direct Mode
+ * does not use VMbus or any VMbus messages, so process here and not
+ * in the VMbus driver code.
+ */
+void hv_stimer0_isr(void)
+{
+ struct clock_event_device *ce;
+
+ ce = this_cpu_ptr(hv_clock_event);
+ ce->event_handler(ce);
+}
+EXPORT_SYMBOL_GPL(hv_stimer0_isr);
+
+static int hv_ce_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ u64 current_tick;
+
+ current_tick = hyperv_cs->read(NULL);
+ current_tick += delta;
+ hv_init_timer(0, current_tick);
+ return 0;
+}
+
+static int hv_ce_shutdown(struct clock_event_device *evt)
+{
+ hv_init_timer(0, 0);
+ hv_init_timer_config(0, 0);
+ if (direct_mode_enabled)
+ hv_disable_stimer0_percpu_irq(stimer0_irq);
+
+ return 0;
+}
+
+static int hv_ce_set_oneshot(struct clock_event_device *evt)
+{
+ union hv_stimer_config timer_cfg;
+
+ timer_cfg.as_uint64 = 0;
+ timer_cfg.enable = 1;
+ timer_cfg.auto_enable = 1;
+ if (direct_mode_enabled) {
+ /*
+ * When it expires, the timer will directly interrupt
+ * on the specified hardware vector/IRQ.
+ */
+ timer_cfg.direct_mode = 1;
+ timer_cfg.apic_vector = stimer0_vector;
+ hv_enable_stimer0_percpu_irq(stimer0_irq);
+ } else {
+ /*
+ * When it expires, the timer will generate a VMbus message,
+ * to be handled by the normal VMbus interrupt handler.
+ */
+ timer_cfg.direct_mode = 0;
+ timer_cfg.sintx = stimer0_message_sint;
+ }
+ hv_init_timer_config(0, timer_cfg.as_uint64);
+ return 0;
+}
+
+/*
+ * hv_stimer_init - Per-cpu initialization of the clockevent
+ */
+void hv_stimer_init(unsigned int cpu)
+{
+ struct clock_event_device *ce;
+
+ /*
+ * Synthetic timers are always available except on old versions of
+ * Hyper-V on x86. In that case, just return as Linux will use a
+ * clocksource based on emulated PIT or LAPIC timer hardware.
+ */
+ if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
+ return;
+
+ ce = per_cpu_ptr(hv_clock_event, cpu);
+ ce->name = "Hyper-V clockevent";
+ ce->features = CLOCK_EVT_FEAT_ONESHOT;
+ ce->cpumask = cpumask_of(cpu);
+ ce->rating = 1000;
+ ce->set_state_shutdown = hv_ce_shutdown;
+ ce->set_state_oneshot = hv_ce_set_oneshot;
+ ce->set_next_event = hv_ce_set_next_event;
+
+ clockevents_config_and_register(ce,
+ HV_CLOCK_HZ,
+ HV_MIN_DELTA_TICKS,
+ HV_MAX_MAX_DELTA_TICKS);
+}
+EXPORT_SYMBOL_GPL(hv_stimer_init);
+
+/*
+ * hv_stimer_cleanup - Per-cpu cleanup of the clockevent
+ */
+void hv_stimer_cleanup(unsigned int cpu)
+{
+ struct clock_event_device *ce;
+
+ /* Turn off clockevent device */
+ if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
+ ce = per_cpu_ptr(hv_clock_event, cpu);
+ hv_ce_shutdown(ce);
+ }
+}
+EXPORT_SYMBOL_GPL(hv_stimer_cleanup);
+
+/* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
+int hv_stimer_alloc(int sint)
+{
+ int ret;
+
+ hv_clock_event = alloc_percpu(struct clock_event_device);
+ if (!hv_clock_event)
+ return -ENOMEM;
+
+ direct_mode_enabled = ms_hyperv.misc_features &
+ HV_STIMER_DIRECT_MODE_AVAILABLE;
+ if (direct_mode_enabled) {
+ ret = hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
+ hv_stimer0_isr);
+ if (ret) {
+ free_percpu(hv_clock_event);
+ hv_clock_event = NULL;
+ return ret;
+ }
+ }
+
+ stimer0_message_sint = sint;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hv_stimer_alloc);
+
+/* hv_stimer_free - Free global resources allocated by hv_stimer_alloc() */
+void hv_stimer_free(void)
+{
+ if (direct_mode_enabled && (stimer0_irq != 0)) {
+ hv_remove_stimer0_irq(stimer0_irq);
+ stimer0_irq = 0;
+ }
+ free_percpu(hv_clock_event);
+ hv_clock_event = NULL;
+}
+EXPORT_SYMBOL_GPL(hv_stimer_free);
+
+/*
+ * Do a global cleanup of clockevents for the cases of kexec and
+ * vmbus exit
+ */
+void hv_stimer_global_cleanup(void)
+{
+ int cpu;
+ struct clock_event_device *ce;
+
+ if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
+ for_each_present_cpu(cpu) {
+ ce = per_cpu_ptr(hv_clock_event, cpu);
+ clockevents_unbind_device(ce, cpu);
+ }
+ }
+ hv_stimer_free();
+}
+EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
+
+/*
+ * Code and definitions for the Hyper-V clocksources. Two
+ * clocksources are defined: one that reads the Hyper-V defined MSR, and
+ * the other that uses the TSC reference page feature as defined in the
+ * TLFS. The MSR version is for compatibility with old versions of
+ * Hyper-V and 32-bit x86. The TSC reference page version is preferred.
+ */
+
+struct clocksource *hyperv_cs;
+EXPORT_SYMBOL_GPL(hyperv_cs);
+
+#ifdef CONFIG_HYPERV_TSCPAGE
+
+static struct ms_hyperv_tsc_page *tsc_pg;
+
+struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
+{
+ return tsc_pg;
+}
+EXPORT_SYMBOL_GPL(hv_get_tsc_page);
+
+static u64 notrace read_hv_sched_clock_tsc(void)
+{
+ u64 current_tick = hv_read_tsc_page(tsc_pg);
+
+ if (current_tick == U64_MAX)
+ hv_get_time_ref_count(current_tick);
+
+ return current_tick;
+}
+
+static u64 read_hv_clock_tsc(struct clocksource *arg)
+{
+ return read_hv_sched_clock_tsc();
+}
+
+static struct clocksource hyperv_cs_tsc = {
+ .name = "hyperv_clocksource_tsc_page",
+ .rating = 400,
+ .read = read_hv_clock_tsc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+#endif
+
+static u64 notrace read_hv_sched_clock_msr(void)
+{
+ u64 current_tick;
+ /*
+ * Read the partition counter to get the current tick count. This count
+ * is set to 0 when the partition is created and is incremented in
+ * 100 nanosecond units.
+ */
+ hv_get_time_ref_count(current_tick);
+ return current_tick;
+}
+
+static u64 read_hv_clock_msr(struct clocksource *arg)
+{
+ return read_hv_sched_clock_msr();
+}
+
+static struct clocksource hyperv_cs_msr = {
+ .name = "hyperv_clocksource_msr",
+ .rating = 400,
+ .read = read_hv_clock_msr,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#ifdef CONFIG_HYPERV_TSCPAGE
+static bool __init hv_init_tsc_clocksource(void)
+{
+ u64 tsc_msr;
+ phys_addr_t phys_addr;
+
+ if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
+ return false;
+
+ tsc_pg = vmalloc(PAGE_SIZE);
+ if (!tsc_pg)
+ return false;
+
+ hyperv_cs = &hyperv_cs_tsc;
+ phys_addr = page_to_phys(vmalloc_to_page(tsc_pg));
+
+ /*
+ * The Hyper-V TLFS specifies to preserve the value of reserved
+ * bits in registers. So read the existing value, preserve the
+ * low order 12 bits, and add in the guest physical address
+ * (which already has at least the low 12 bits set to zero since
+ * it is page aligned). Also set the "enable" bit, which is bit 0.
+ */
+ hv_get_reference_tsc(tsc_msr);
+ tsc_msr &= GENMASK_ULL(11, 0);
+ tsc_msr = tsc_msr | 0x1 | (u64)phys_addr;
+ hv_set_reference_tsc(tsc_msr);
+
+ hv_set_clocksource_vdso(hyperv_cs_tsc);
+ clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
+
+ /* sched_clock_register is needed on ARM64 but is a no-op on x86 */
+ sched_clock_register(read_hv_sched_clock_tsc, 64, HV_CLOCK_HZ);
+ return true;
+}
+#else
+static bool __init hv_init_tsc_clocksource(void)
+{
+ return false;
+}
+#endif
+
+
+void __init hv_init_clocksource(void)
+{
+ /*
+ * Try to set up the TSC page clocksource. If it succeeds, we're
+ * done. Otherwise, set up the MSR clocksoruce. At least one of
+ * these will always be available except on very old versions of
+ * Hyper-V on x86. In that case we won't have a Hyper-V
+ * clocksource, but Linux will still run with a clocksource based
+ * on the emulated PIT or LAPIC timer.
+ */
+ if (hv_init_tsc_clocksource())
+ return;
+
+ if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE))
+ return;
+
+ hyperv_cs = &hyperv_cs_msr;
+ clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
+
+ /* sched_clock_register is needed on ARM64 but is a no-op on x86 */
+ sched_clock_register(read_hv_sched_clock_msr, 64, HV_CLOCK_HZ);
+}
+EXPORT_SYMBOL_GPL(hv_init_clocksource);
diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
new file mode 100644
index 000000000000..62745c962049
--- /dev/null
+++ b/drivers/clocksource/timer-davinci.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI DaVinci clocksource driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ * (with tiny parts adopted from code by Kevin Hilman <khilman@baylibre.com>)
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+#include <clocksource/timer-davinci.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+
+#define DAVINCI_TIMER_REG_TIM12 0x10
+#define DAVINCI_TIMER_REG_TIM34 0x14
+#define DAVINCI_TIMER_REG_PRD12 0x18
+#define DAVINCI_TIMER_REG_PRD34 0x1c
+#define DAVINCI_TIMER_REG_TCR 0x20
+#define DAVINCI_TIMER_REG_TGCR 0x24
+
+#define DAVINCI_TIMER_TIMMODE_MASK GENMASK(3, 2)
+#define DAVINCI_TIMER_RESET_MASK GENMASK(1, 0)
+#define DAVINCI_TIMER_TIMMODE_32BIT_UNCHAINED BIT(2)
+#define DAVINCI_TIMER_UNRESET GENMASK(1, 0)
+
+#define DAVINCI_TIMER_ENAMODE_MASK GENMASK(1, 0)
+#define DAVINCI_TIMER_ENAMODE_DISABLED 0x00
+#define DAVINCI_TIMER_ENAMODE_ONESHOT BIT(0)
+#define DAVINCI_TIMER_ENAMODE_PERIODIC BIT(1)
+
+#define DAVINCI_TIMER_ENAMODE_SHIFT_TIM12 6
+#define DAVINCI_TIMER_ENAMODE_SHIFT_TIM34 22
+
+#define DAVINCI_TIMER_MIN_DELTA 0x01
+#define DAVINCI_TIMER_MAX_DELTA 0xfffffffe
+
+#define DAVINCI_TIMER_CLKSRC_BITS 32
+
+#define DAVINCI_TIMER_TGCR_DEFAULT \
+ (DAVINCI_TIMER_TIMMODE_32BIT_UNCHAINED | DAVINCI_TIMER_UNRESET)
+
+struct davinci_clockevent {
+ struct clock_event_device dev;
+ void __iomem *base;
+ unsigned int cmp_off;
+};
+
+/*
+ * This must be globally accessible by davinci_timer_read_sched_clock(), so
+ * let's keep it here.
+ */
+static struct {
+ struct clocksource dev;
+ void __iomem *base;
+ unsigned int tim_off;
+} davinci_clocksource;
+
+static struct davinci_clockevent *
+to_davinci_clockevent(struct clock_event_device *clockevent)
+{
+ return container_of(clockevent, struct davinci_clockevent, dev);
+}
+
+static unsigned int
+davinci_clockevent_read(struct davinci_clockevent *clockevent,
+ unsigned int reg)
+{
+ return readl_relaxed(clockevent->base + reg);
+}
+
+static void davinci_clockevent_write(struct davinci_clockevent *clockevent,
+ unsigned int reg, unsigned int val)
+{
+ writel_relaxed(val, clockevent->base + reg);
+}
+
+static void davinci_tim12_shutdown(void __iomem *base)
+{
+ unsigned int tcr;
+
+ tcr = DAVINCI_TIMER_ENAMODE_DISABLED <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
+ /*
+ * This function is only ever called if we're using both timer
+ * halves. In this case TIM34 runs in periodic mode and we must
+ * not modify it.
+ */
+ tcr |= DAVINCI_TIMER_ENAMODE_PERIODIC <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
+
+ writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
+}
+
+static void davinci_tim12_set_oneshot(void __iomem *base)
+{
+ unsigned int tcr;
+
+ tcr = DAVINCI_TIMER_ENAMODE_ONESHOT <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
+ /* Same as above. */
+ tcr |= DAVINCI_TIMER_ENAMODE_PERIODIC <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
+
+ writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
+}
+
+static int davinci_clockevent_shutdown(struct clock_event_device *dev)
+{
+ struct davinci_clockevent *clockevent;
+
+ clockevent = to_davinci_clockevent(dev);
+
+ davinci_tim12_shutdown(clockevent->base);
+
+ return 0;
+}
+
+static int davinci_clockevent_set_oneshot(struct clock_event_device *dev)
+{
+ struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
+
+ davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_TIM12, 0x0);
+
+ davinci_tim12_set_oneshot(clockevent->base);
+
+ return 0;
+}
+
+static int
+davinci_clockevent_set_next_event_std(unsigned long cycles,
+ struct clock_event_device *dev)
+{
+ struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
+
+ davinci_clockevent_shutdown(dev);
+
+ davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_TIM12, 0x0);
+ davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_PRD12, cycles);
+
+ davinci_clockevent_set_oneshot(dev);
+
+ return 0;
+}
+
+static int
+davinci_clockevent_set_next_event_cmp(unsigned long cycles,
+ struct clock_event_device *dev)
+{
+ struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
+ unsigned int curr_time;
+
+ curr_time = davinci_clockevent_read(clockevent,
+ DAVINCI_TIMER_REG_TIM12);
+ davinci_clockevent_write(clockevent,
+ clockevent->cmp_off, curr_time + cycles);
+
+ return 0;
+}
+
+static irqreturn_t davinci_timer_irq_timer(int irq, void *data)
+{
+ struct davinci_clockevent *clockevent = data;
+
+ if (!clockevent_state_oneshot(&clockevent->dev))
+ davinci_tim12_shutdown(clockevent->base);
+
+ clockevent->dev.event_handler(&clockevent->dev);
+
+ return IRQ_HANDLED;
+}
+
+static u64 notrace davinci_timer_read_sched_clock(void)
+{
+ return readl_relaxed(davinci_clocksource.base +
+ davinci_clocksource.tim_off);
+}
+
+static u64 davinci_clocksource_read(struct clocksource *dev)
+{
+ return davinci_timer_read_sched_clock();
+}
+
+/*
+ * Standard use-case: we're using tim12 for clockevent and tim34 for
+ * clocksource. The default is making the former run in oneshot mode
+ * and the latter in periodic mode.
+ */
+static void davinci_clocksource_init_tim34(void __iomem *base)
+{
+ int tcr;
+
+ tcr = DAVINCI_TIMER_ENAMODE_PERIODIC <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
+ tcr |= DAVINCI_TIMER_ENAMODE_ONESHOT <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
+
+ writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM34);
+ writel_relaxed(UINT_MAX, base + DAVINCI_TIMER_REG_PRD34);
+ writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
+}
+
+/*
+ * Special use-case on da830: the DSP may use tim34. We're using tim12 for
+ * both clocksource and clockevent. We set tim12 to periodic and don't touch
+ * tim34.
+ */
+static void davinci_clocksource_init_tim12(void __iomem *base)
+{
+ unsigned int tcr;
+
+ tcr = DAVINCI_TIMER_ENAMODE_PERIODIC <<
+ DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
+
+ writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM12);
+ writel_relaxed(UINT_MAX, base + DAVINCI_TIMER_REG_PRD12);
+ writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
+}
+
+static void davinci_timer_init(void __iomem *base)
+{
+ /* Set clock to internal mode and disable it. */
+ writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TCR);
+ /*
+ * Reset both 32-bit timers, set no prescaler for timer 34, set the
+ * timer to dual 32-bit unchained mode, unreset both 32-bit timers.
+ */
+ writel_relaxed(DAVINCI_TIMER_TGCR_DEFAULT,
+ base + DAVINCI_TIMER_REG_TGCR);
+ /* Init both counters to zero. */
+ writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM12);
+ writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM34);
+}
+
+int __init davinci_timer_register(struct clk *clk,
+ const struct davinci_timer_cfg *timer_cfg)
+{
+ struct davinci_clockevent *clockevent;
+ unsigned int tick_rate;
+ void __iomem *base;
+ int rv;
+
+ rv = clk_prepare_enable(clk);
+ if (rv) {
+ pr_err("Unable to prepare and enable the timer clock");
+ return rv;
+ }
+
+ if (!request_mem_region(timer_cfg->reg.start,
+ resource_size(&timer_cfg->reg),
+ "davinci-timer")) {
+ pr_err("Unable to request memory region");
+ return -EBUSY;
+ }
+
+ base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
+ if (!base) {
+ pr_err("Unable to map the register range");
+ return -ENOMEM;
+ }
+
+ davinci_timer_init(base);
+ tick_rate = clk_get_rate(clk);
+
+ clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL | __GFP_NOFAIL);
+ if (!clockevent) {
+ pr_err("Error allocating memory for clockevent data");
+ return -ENOMEM;
+ }
+
+ clockevent->dev.name = "tim12";
+ clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
+ clockevent->dev.cpumask = cpumask_of(0);
+ clockevent->base = base;
+
+ if (timer_cfg->cmp_off) {
+ clockevent->cmp_off = timer_cfg->cmp_off;
+ clockevent->dev.set_next_event =
+ davinci_clockevent_set_next_event_cmp;
+ } else {
+ clockevent->dev.set_next_event =
+ davinci_clockevent_set_next_event_std;
+ clockevent->dev.set_state_oneshot =
+ davinci_clockevent_set_oneshot;
+ clockevent->dev.set_state_shutdown =
+ davinci_clockevent_shutdown;
+ }
+
+ rv = request_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
+ davinci_timer_irq_timer, IRQF_TIMER,
+ "clockevent/tim12", clockevent);
+ if (rv) {
+ pr_err("Unable to request the clockevent interrupt");
+ return rv;
+ }
+
+ clockevents_config_and_register(&clockevent->dev, tick_rate,
+ DAVINCI_TIMER_MIN_DELTA,
+ DAVINCI_TIMER_MAX_DELTA);
+
+ davinci_clocksource.dev.rating = 300;
+ davinci_clocksource.dev.read = davinci_clocksource_read;
+ davinci_clocksource.dev.mask =
+ CLOCKSOURCE_MASK(DAVINCI_TIMER_CLKSRC_BITS);
+ davinci_clocksource.dev.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ davinci_clocksource.base = base;
+
+ if (timer_cfg->cmp_off) {
+ davinci_clocksource.dev.name = "tim12";
+ davinci_clocksource.tim_off = DAVINCI_TIMER_REG_TIM12;
+ davinci_clocksource_init_tim12(base);
+ } else {
+ davinci_clocksource.dev.name = "tim34";
+ davinci_clocksource.tim_off = DAVINCI_TIMER_REG_TIM34;
+ davinci_clocksource_init_tim34(base);
+ }
+
+ rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
+ if (rv) {
+ pr_err("Unable to register clocksource");
+ return rv;
+ }
+
+ sched_clock_register(davinci_timer_read_sched_clock,
+ DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
+
+ return 0;
+}
+
+static int __init of_davinci_timer_register(struct device_node *np)
+{
+ struct davinci_timer_cfg timer_cfg = { };
+ struct clk *clk;
+ int rv;
+
+ rv = of_address_to_resource(np, 0, &timer_cfg.reg);
+ if (rv) {
+ pr_err("Unable to get the register range for timer");
+ return rv;
+ }
+
+ rv = of_irq_to_resource_table(np, timer_cfg.irq,
+ DAVINCI_TIMER_NUM_IRQS);
+ if (rv != DAVINCI_TIMER_NUM_IRQS) {
+ pr_err("Unable to get the interrupts for timer");
+ return rv;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Unable to get the timer clock");
+ return PTR_ERR(clk);
+ }
+
+ rv = davinci_timer_register(clk, &timer_cfg);
+ if (rv)
+ clk_put(clk);
+
+ return rv;
+}
+TIMER_OF_DECLARE(davinci_timer, "ti,da830-timer", of_davinci_timer_register);
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
new file mode 100644
index 000000000000..fd7d68066efb
--- /dev/null
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2017-2019 NXP
+
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include "timer-of.h"
+
+#define CMP_OFFSET 0x10000
+
+#define CNTCV_LO 0x8
+#define CNTCV_HI 0xc
+#define CMPCV_LO (CMP_OFFSET + 0x20)
+#define CMPCV_HI (CMP_OFFSET + 0x24)
+#define CMPCR (CMP_OFFSET + 0x2c)
+
+#define SYS_CTR_EN 0x1
+#define SYS_CTR_IRQ_MASK 0x2
+
+static void __iomem *sys_ctr_base;
+static u32 cmpcr;
+
+static void sysctr_timer_enable(bool enable)
+{
+ writel(enable ? cmpcr | SYS_CTR_EN : cmpcr, sys_ctr_base + CMPCR);
+}
+
+static void sysctr_irq_acknowledge(void)
+{
+ /*
+ * clear the enable bit(EN =0) will clear
+ * the status bit(ISTAT = 0), then the interrupt
+ * signal will be negated(acknowledged).
+ */
+ sysctr_timer_enable(false);
+}
+
+static inline u64 sysctr_read_counter(void)
+{
+ u32 cnt_hi, tmp_hi, cnt_lo;
+
+ do {
+ cnt_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
+ cnt_lo = readl_relaxed(sys_ctr_base + CNTCV_LO);
+ tmp_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
+ } while (tmp_hi != cnt_hi);
+
+ return ((u64) cnt_hi << 32) | cnt_lo;
+}
+
+static int sysctr_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ u32 cmp_hi, cmp_lo;
+ u64 next;
+
+ sysctr_timer_enable(false);
+
+ next = sysctr_read_counter();
+
+ next += delta;
+
+ cmp_hi = (next >> 32) & 0x00fffff;
+ cmp_lo = next & 0xffffffff;
+
+ writel_relaxed(cmp_hi, sys_ctr_base + CMPCV_HI);
+ writel_relaxed(cmp_lo, sys_ctr_base + CMPCV_LO);
+
+ sysctr_timer_enable(true);
+
+ return 0;
+}
+
+static int sysctr_set_state_oneshot(struct clock_event_device *evt)
+{
+ return 0;
+}
+
+static int sysctr_set_state_shutdown(struct clock_event_device *evt)
+{
+ sysctr_timer_enable(false);
+
+ return 0;
+}
+
+static irqreturn_t sysctr_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ sysctr_irq_acknowledge();
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct timer_of to_sysctr = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE,
+ .clkevt = {
+ .name = "i.MX system counter timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ,
+ .set_state_oneshot = sysctr_set_state_oneshot,
+ .set_next_event = sysctr_set_next_event,
+ .set_state_shutdown = sysctr_set_state_shutdown,
+ .rating = 200,
+ },
+ .of_irq = {
+ .handler = sysctr_timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+ .of_clk = {
+ .name = "per",
+ },
+};
+
+static void __init sysctr_clockevent_init(void)
+{
+ to_sysctr.clkevt.cpumask = cpumask_of(0);
+
+ clockevents_config_and_register(&to_sysctr.clkevt,
+ timer_of_rate(&to_sysctr),
+ 0xff, 0x7fffffff);
+}
+
+static int __init sysctr_timer_init(struct device_node *np)
+{
+ int ret = 0;
+
+ ret = timer_of_init(np, &to_sysctr);
+ if (ret)
+ return ret;
+
+ sys_ctr_base = timer_of_base(&to_sysctr);
+ cmpcr = readl(sys_ctr_base + CMPCR);
+ cmpcr &= ~SYS_CTR_EN;
+
+ sysctr_clockevent_init();
+
+ return 0;
+}
+TIMER_OF_DECLARE(sysctr_timer, "nxp,sysctr-timer", sysctr_timer_init);
diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c
index 5c2190b654cd..9396745e1c17 100644
--- a/drivers/clocksource/timer-ixp4xx.c
+++ b/drivers/clocksource/timer-ixp4xx.c
@@ -75,14 +75,19 @@ to_ixp4xx_timer(struct clock_event_device *evt)
return container_of(evt, struct ixp4xx_timer, clkevt);
}
-static u64 notrace ixp4xx_read_sched_clock(void)
+static unsigned long ixp4xx_read_timer(void)
{
return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
}
+static u64 notrace ixp4xx_read_sched_clock(void)
+{
+ return ixp4xx_read_timer();
+}
+
static u64 ixp4xx_clocksource_read(struct clocksource *c)
{
- return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+ return ixp4xx_read_timer();
}
static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
@@ -224,6 +229,13 @@ static __init int ixp4xx_timer_register(void __iomem *base,
sched_clock_register(ixp4xx_read_sched_clock, 32, timer_freq);
+#ifdef CONFIG_ARM
+ /* Also use this timer for delays */
+ tmr->delay_timer.read_current_timer = ixp4xx_read_timer;
+ tmr->delay_timer.freq = timer_freq;
+ register_current_timer_delay(&tmr->delay_timer);
+#endif
+
return 0;
}
diff --git a/drivers/clocksource/timer-meson6.c b/drivers/clocksource/timer-meson6.c
index 84bd9479c3f8..9e8b467c71da 100644
--- a/drivers/clocksource/timer-meson6.c
+++ b/drivers/clocksource/timer-meson6.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Amlogic Meson6 SoCs timer handling.
*
* Copyright (C) 2014 Carlo Caione <carlo@caione.org>
*
* Based on code from Amlogic, Inc
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/bitfield.h>
diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c
index 7a9bb5532d99..8a30da7f083b 100644
--- a/drivers/clocksource/timer-npcm7xx.c
+++ b/drivers/clocksource/timer-npcm7xx.c
@@ -32,7 +32,7 @@
#define NPCM7XX_Tx_INTEN BIT(29)
#define NPCM7XX_Tx_COUNTEN BIT(30)
#define NPCM7XX_Tx_ONESHOT 0x0
-#define NPCM7XX_Tx_OPER GENMASK(3, 27)
+#define NPCM7XX_Tx_OPER GENMASK(27, 3)
#define NPCM7XX_Tx_MIN_PRESCALE 0x1
#define NPCM7XX_Tx_TDR_MASK_BITS 24
#define NPCM7XX_Tx_MAX_CNT 0xFFFFFF
diff --git a/drivers/clocksource/timer-tegra.c b/drivers/clocksource/timer-tegra.c
new file mode 100644
index 000000000000..e9635c25eef4
--- /dev/null
+++ b/drivers/clocksource/timer-tegra.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ */
+
+#define pr_fmt(fmt) "tegra-timer: " fmt
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/sched_clock.h>
+#include <linux/time.h>
+
+#include "timer-of.h"
+
+#define RTC_SECONDS 0x08
+#define RTC_SHADOW_SECONDS 0x0c
+#define RTC_MILLISECONDS 0x10
+
+#define TIMERUS_CNTR_1US 0x10
+#define TIMERUS_USEC_CFG 0x14
+#define TIMERUS_CNTR_FREEZE 0x4c
+
+#define TIMER_PTV 0x0
+#define TIMER_PTV_EN BIT(31)
+#define TIMER_PTV_PER BIT(30)
+#define TIMER_PCR 0x4
+#define TIMER_PCR_INTR_CLR BIT(30)
+
+#define TIMER1_BASE 0x00
+#define TIMER2_BASE 0x08
+#define TIMER3_BASE 0x50
+#define TIMER4_BASE 0x58
+#define TIMER10_BASE 0x90
+
+#define TIMER1_IRQ_IDX 0
+#define TIMER10_IRQ_IDX 10
+
+#define TIMER_1MHz 1000000
+
+static u32 usec_config;
+static void __iomem *timer_reg_base;
+
+static int tegra_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ /*
+ * Tegra's timer uses n+1 scheme for the counter, i.e. timer will
+ * fire after one tick if 0 is loaded.
+ *
+ * The minimum and maximum numbers of oneshot ticks are defined
+ * by clockevents_config_and_register(1, 0x1fffffff + 1) invocation
+ * below in the code. Hence the cycles (ticks) can't be outside of
+ * a range supportable by hardware.
+ */
+ writel_relaxed(TIMER_PTV_EN | (cycles - 1), reg_base + TIMER_PTV);
+
+ return 0;
+}
+
+static int tegra_timer_shutdown(struct clock_event_device *evt)
+{
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel_relaxed(0, reg_base + TIMER_PTV);
+
+ return 0;
+}
+
+static int tegra_timer_set_periodic(struct clock_event_device *evt)
+{
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+ unsigned long period = timer_of_period(to_timer_of(evt));
+
+ writel_relaxed(TIMER_PTV_EN | TIMER_PTV_PER | (period - 1),
+ reg_base + TIMER_PTV);
+
+ return 0;
+}
+
+static irqreturn_t tegra_timer_isr(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel_relaxed(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_timer_suspend(struct clock_event_device *evt)
+{
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel_relaxed(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
+}
+
+static void tegra_timer_resume(struct clock_event_device *evt)
+{
+ writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
+}
+
+static DEFINE_PER_CPU(struct timer_of, tegra_to) = {
+ .flags = TIMER_OF_CLOCK | TIMER_OF_BASE,
+
+ .clkevt = {
+ .name = "tegra_timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = tegra_timer_set_next_event,
+ .set_state_shutdown = tegra_timer_shutdown,
+ .set_state_periodic = tegra_timer_set_periodic,
+ .set_state_oneshot = tegra_timer_shutdown,
+ .tick_resume = tegra_timer_shutdown,
+ .suspend = tegra_timer_suspend,
+ .resume = tegra_timer_resume,
+ },
+};
+
+static int tegra_timer_setup(unsigned int cpu)
+{
+ struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
+
+ writel_relaxed(0, timer_of_base(to) + TIMER_PTV);
+ writel_relaxed(TIMER_PCR_INTR_CLR, timer_of_base(to) + TIMER_PCR);
+
+ irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
+ enable_irq(to->clkevt.irq);
+
+ /*
+ * Tegra's timer uses n+1 scheme for the counter, i.e. timer will
+ * fire after one tick if 0 is loaded and thus minimum number of
+ * ticks is 1. In result both of the clocksource's tick limits are
+ * higher than a minimum and maximum that hardware register can
+ * take by 1, this is then taken into account by set_next_event
+ * callback.
+ */
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
+ 1, /* min */
+ 0x1fffffff + 1); /* max 29 bits + 1 */
+
+ return 0;
+}
+
+static int tegra_timer_stop(unsigned int cpu)
+{
+ struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
+
+ to->clkevt.set_state_shutdown(&to->clkevt);
+ disable_irq_nosync(to->clkevt.irq);
+
+ return 0;
+}
+
+static u64 notrace tegra_read_sched_clock(void)
+{
+ return readl_relaxed(timer_reg_base + TIMERUS_CNTR_1US);
+}
+
+#ifdef CONFIG_ARM
+static unsigned long tegra_delay_timer_read_counter_long(void)
+{
+ return readl_relaxed(timer_reg_base + TIMERUS_CNTR_1US);
+}
+
+static struct delay_timer tegra_delay_timer = {
+ .read_current_timer = tegra_delay_timer_read_counter_long,
+ .freq = TIMER_1MHz,
+};
+#endif
+
+static struct timer_of suspend_rtc_to = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
+/*
+ * tegra_rtc_read - Reads the Tegra RTC registers
+ * Care must be taken that this function is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+static u64 tegra_rtc_read_ms(struct clocksource *cs)
+{
+ void __iomem *reg_base = timer_of_base(&suspend_rtc_to);
+
+ u32 ms = readl_relaxed(reg_base + RTC_MILLISECONDS);
+ u32 s = readl_relaxed(reg_base + RTC_SHADOW_SECONDS);
+
+ return (u64)s * MSEC_PER_SEC + ms;
+}
+
+static struct clocksource suspend_rtc_clocksource = {
+ .name = "tegra_suspend_timer",
+ .rating = 200,
+ .read = tegra_rtc_read_ms,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
+
+static inline unsigned int tegra_base_for_cpu(int cpu, bool tegra20)
+{
+ if (tegra20) {
+ switch (cpu) {
+ case 0:
+ return TIMER1_BASE;
+ case 1:
+ return TIMER2_BASE;
+ case 2:
+ return TIMER3_BASE;
+ default:
+ return TIMER4_BASE;
+ }
+ }
+
+ return TIMER10_BASE + cpu * 8;
+}
+
+static inline unsigned int tegra_irq_idx_for_cpu(int cpu, bool tegra20)
+{
+ if (tegra20)
+ return TIMER1_IRQ_IDX + cpu;
+
+ return TIMER10_IRQ_IDX + cpu;
+}
+
+static inline unsigned long tegra_rate_for_timer(struct timer_of *to,
+ bool tegra20)
+{
+ /*
+ * TIMER1-9 are fixed to 1MHz, TIMER10-13 are running off the
+ * parent clock.
+ */
+ if (tegra20)
+ return TIMER_1MHz;
+
+ return timer_of_rate(to);
+}
+
+static int __init tegra_init_timer(struct device_node *np, bool tegra20,
+ int rating)
+{
+ struct timer_of *to;
+ int cpu, ret;
+
+ to = this_cpu_ptr(&tegra_to);
+ ret = timer_of_init(np, to);
+ if (ret)
+ goto out;
+
+ timer_reg_base = timer_of_base(to);
+
+ /*
+ * Configure microsecond timers to have 1MHz clock
+ * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
+ * Uses n+1 scheme
+ */
+ switch (timer_of_rate(to)) {
+ case 12000000:
+ usec_config = 0x000b; /* (11+1)/(0+1) */
+ break;
+ case 12800000:
+ usec_config = 0x043f; /* (63+1)/(4+1) */
+ break;
+ case 13000000:
+ usec_config = 0x000c; /* (12+1)/(0+1) */
+ break;
+ case 16800000:
+ usec_config = 0x0453; /* (83+1)/(4+1) */
+ break;
+ case 19200000:
+ usec_config = 0x045f; /* (95+1)/(4+1) */
+ break;
+ case 26000000:
+ usec_config = 0x0019; /* (25+1)/(0+1) */
+ break;
+ case 38400000:
+ usec_config = 0x04bf; /* (191+1)/(4+1) */
+ break;
+ case 48000000:
+ usec_config = 0x002f; /* (47+1)/(0+1) */
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
+
+ for_each_possible_cpu(cpu) {
+ struct timer_of *cpu_to = per_cpu_ptr(&tegra_to, cpu);
+ unsigned long flags = IRQF_TIMER | IRQF_NOBALANCING;
+ unsigned long rate = tegra_rate_for_timer(to, tegra20);
+ unsigned int base = tegra_base_for_cpu(cpu, tegra20);
+ unsigned int idx = tegra_irq_idx_for_cpu(cpu, tegra20);
+ unsigned int irq = irq_of_parse_and_map(np, idx);
+
+ if (!irq) {
+ pr_err("failed to map irq for cpu%d\n", cpu);
+ ret = -EINVAL;
+ goto out_irq;
+ }
+
+ cpu_to->clkevt.irq = irq;
+ cpu_to->clkevt.rating = rating;
+ cpu_to->clkevt.cpumask = cpumask_of(cpu);
+ cpu_to->of_base.base = timer_reg_base + base;
+ cpu_to->of_clk.period = rate / HZ;
+ cpu_to->of_clk.rate = rate;
+
+ irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
+
+ ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr, flags,
+ cpu_to->clkevt.name, &cpu_to->clkevt);
+ if (ret) {
+ pr_err("failed to set up irq for cpu%d: %d\n",
+ cpu, ret);
+ irq_dispose_mapping(cpu_to->clkevt.irq);
+ cpu_to->clkevt.irq = 0;
+ goto out_irq;
+ }
+ }
+
+ sched_clock_register(tegra_read_sched_clock, 32, TIMER_1MHz);
+
+ ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
+ "timer_us", TIMER_1MHz, 300, 32,
+ clocksource_mmio_readl_up);
+ if (ret)
+ pr_err("failed to register clocksource: %d\n", ret);
+
+#ifdef CONFIG_ARM
+ register_current_timer_delay(&tegra_delay_timer);
+#endif
+
+ ret = cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
+ "AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
+ tegra_timer_stop);
+ if (ret)
+ pr_err("failed to set up cpu hp state: %d\n", ret);
+
+ return ret;
+
+out_irq:
+ for_each_possible_cpu(cpu) {
+ struct timer_of *cpu_to;
+
+ cpu_to = per_cpu_ptr(&tegra_to, cpu);
+ if (cpu_to->clkevt.irq) {
+ free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
+ irq_dispose_mapping(cpu_to->clkevt.irq);
+ }
+ }
+
+ to->of_base.base = timer_reg_base;
+out:
+ timer_of_cleanup(to);
+
+ return ret;
+}
+
+static int __init tegra210_init_timer(struct device_node *np)
+{
+ /*
+ * Arch-timer can't survive across power cycle of CPU core and
+ * after CPUPORESET signal due to a system design shortcoming,
+ * hence tegra-timer is more preferable on Tegra210.
+ */
+ return tegra_init_timer(np, false, 460);
+}
+TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra210_init_timer);
+
+static int __init tegra20_init_timer(struct device_node *np)
+{
+ int rating;
+
+ /*
+ * Tegra20 and Tegra30 have Cortex A9 CPU that has a TWD timer,
+ * that timer runs off the CPU clock and hence is subjected to
+ * a jitter caused by DVFS clock rate changes. Tegra-timer is
+ * more preferable for older Tegra's, while later SoC generations
+ * have arch-timer as a main per-CPU timer and it is not affected
+ * by DVFS changes.
+ */
+ if (of_machine_is_compatible("nvidia,tegra20") ||
+ of_machine_is_compatible("nvidia,tegra30"))
+ rating = 460;
+ else
+ rating = 330;
+
+ return tegra_init_timer(np, true, rating);
+}
+TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
+
+static int __init tegra20_init_rtc(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &suspend_rtc_to);
+ if (ret)
+ return ret;
+
+ return clocksource_register_hz(&suspend_rtc_clocksource, 1000);
+}
+TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/timer-tegra20.c b/drivers/clocksource/timer-tegra20.c
deleted file mode 100644
index 1e7ece279730..000000000000
--- a/drivers/clocksource/timer-tegra20.c
+++ /dev/null
@@ -1,379 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2010 Google, Inc.
- *
- * Author:
- * Colin Cross <ccross@google.com>
- */
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/percpu.h>
-#include <linux/sched_clock.h>
-#include <linux/time.h>
-
-#include "timer-of.h"
-
-#ifdef CONFIG_ARM
-#include <asm/mach/time.h>
-#endif
-
-#define RTC_SECONDS 0x08
-#define RTC_SHADOW_SECONDS 0x0c
-#define RTC_MILLISECONDS 0x10
-
-#define TIMERUS_CNTR_1US 0x10
-#define TIMERUS_USEC_CFG 0x14
-#define TIMERUS_CNTR_FREEZE 0x4c
-
-#define TIMER_PTV 0x0
-#define TIMER_PTV_EN BIT(31)
-#define TIMER_PTV_PER BIT(30)
-#define TIMER_PCR 0x4
-#define TIMER_PCR_INTR_CLR BIT(30)
-
-#ifdef CONFIG_ARM
-#define TIMER_CPU0 0x50 /* TIMER3 */
-#else
-#define TIMER_CPU0 0x90 /* TIMER10 */
-#define TIMER10_IRQ_IDX 10
-#define IRQ_IDX_FOR_CPU(cpu) (TIMER10_IRQ_IDX + cpu)
-#endif
-#define TIMER_BASE_FOR_CPU(cpu) (TIMER_CPU0 + (cpu) * 8)
-
-static u32 usec_config;
-static void __iomem *timer_reg_base;
-#ifdef CONFIG_ARM
-static struct delay_timer tegra_delay_timer;
-#endif
-
-static int tegra_timer_set_next_event(unsigned long cycles,
- struct clock_event_device *evt)
-{
- void __iomem *reg_base = timer_of_base(to_timer_of(evt));
-
- writel(TIMER_PTV_EN |
- ((cycles > 1) ? (cycles - 1) : 0), /* n+1 scheme */
- reg_base + TIMER_PTV);
-
- return 0;
-}
-
-static int tegra_timer_shutdown(struct clock_event_device *evt)
-{
- void __iomem *reg_base = timer_of_base(to_timer_of(evt));
-
- writel(0, reg_base + TIMER_PTV);
-
- return 0;
-}
-
-static int tegra_timer_set_periodic(struct clock_event_device *evt)
-{
- void __iomem *reg_base = timer_of_base(to_timer_of(evt));
-
- writel(TIMER_PTV_EN | TIMER_PTV_PER |
- ((timer_of_rate(to_timer_of(evt)) / HZ) - 1),
- reg_base + TIMER_PTV);
-
- return 0;
-}
-
-static irqreturn_t tegra_timer_isr(int irq, void *dev_id)
-{
- struct clock_event_device *evt = (struct clock_event_device *)dev_id;
- void __iomem *reg_base = timer_of_base(to_timer_of(evt));
-
- writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static void tegra_timer_suspend(struct clock_event_device *evt)
-{
- void __iomem *reg_base = timer_of_base(to_timer_of(evt));
-
- writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
-}
-
-static void tegra_timer_resume(struct clock_event_device *evt)
-{
- writel(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
-}
-
-#ifdef CONFIG_ARM64
-static DEFINE_PER_CPU(struct timer_of, tegra_to) = {
- .flags = TIMER_OF_CLOCK | TIMER_OF_BASE,
-
- .clkevt = {
- .name = "tegra_timer",
- .rating = 460,
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_next_event = tegra_timer_set_next_event,
- .set_state_shutdown = tegra_timer_shutdown,
- .set_state_periodic = tegra_timer_set_periodic,
- .set_state_oneshot = tegra_timer_shutdown,
- .tick_resume = tegra_timer_shutdown,
- .suspend = tegra_timer_suspend,
- .resume = tegra_timer_resume,
- },
-};
-
-static int tegra_timer_setup(unsigned int cpu)
-{
- struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
-
- irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
- enable_irq(to->clkevt.irq);
-
- clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
- 1, /* min */
- 0x1fffffff); /* 29 bits */
-
- return 0;
-}
-
-static int tegra_timer_stop(unsigned int cpu)
-{
- struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
-
- to->clkevt.set_state_shutdown(&to->clkevt);
- disable_irq_nosync(to->clkevt.irq);
-
- return 0;
-}
-#else /* CONFIG_ARM */
-static struct timer_of tegra_to = {
- .flags = TIMER_OF_CLOCK | TIMER_OF_BASE | TIMER_OF_IRQ,
-
- .clkevt = {
- .name = "tegra_timer",
- .rating = 300,
- .features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_DYNIRQ,
- .set_next_event = tegra_timer_set_next_event,
- .set_state_shutdown = tegra_timer_shutdown,
- .set_state_periodic = tegra_timer_set_periodic,
- .set_state_oneshot = tegra_timer_shutdown,
- .tick_resume = tegra_timer_shutdown,
- .suspend = tegra_timer_suspend,
- .resume = tegra_timer_resume,
- .cpumask = cpu_possible_mask,
- },
-
- .of_irq = {
- .index = 2,
- .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH,
- .handler = tegra_timer_isr,
- },
-};
-
-static u64 notrace tegra_read_sched_clock(void)
-{
- return readl(timer_reg_base + TIMERUS_CNTR_1US);
-}
-
-static unsigned long tegra_delay_timer_read_counter_long(void)
-{
- return readl(timer_reg_base + TIMERUS_CNTR_1US);
-}
-
-static struct timer_of suspend_rtc_to = {
- .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
-};
-
-/*
- * tegra_rtc_read - Reads the Tegra RTC registers
- * Care must be taken that this funciton is not called while the
- * tegra_rtc driver could be executing to avoid race conditions
- * on the RTC shadow register
- */
-static u64 tegra_rtc_read_ms(struct clocksource *cs)
-{
- u32 ms = readl(timer_of_base(&suspend_rtc_to) + RTC_MILLISECONDS);
- u32 s = readl(timer_of_base(&suspend_rtc_to) + RTC_SHADOW_SECONDS);
- return (u64)s * MSEC_PER_SEC + ms;
-}
-
-static struct clocksource suspend_rtc_clocksource = {
- .name = "tegra_suspend_timer",
- .rating = 200,
- .read = tegra_rtc_read_ms,
- .mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
-};
-#endif
-
-static int tegra_timer_common_init(struct device_node *np, struct timer_of *to)
-{
- int ret = 0;
-
- ret = timer_of_init(np, to);
- if (ret < 0)
- goto out;
-
- timer_reg_base = timer_of_base(to);
-
- /*
- * Configure microsecond timers to have 1MHz clock
- * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
- * Uses n+1 scheme
- */
- switch (timer_of_rate(to)) {
- case 12000000:
- usec_config = 0x000b; /* (11+1)/(0+1) */
- break;
- case 12800000:
- usec_config = 0x043f; /* (63+1)/(4+1) */
- break;
- case 13000000:
- usec_config = 0x000c; /* (12+1)/(0+1) */
- break;
- case 16800000:
- usec_config = 0x0453; /* (83+1)/(4+1) */
- break;
- case 19200000:
- usec_config = 0x045f; /* (95+1)/(4+1) */
- break;
- case 26000000:
- usec_config = 0x0019; /* (25+1)/(0+1) */
- break;
- case 38400000:
- usec_config = 0x04bf; /* (191+1)/(4+1) */
- break;
- case 48000000:
- usec_config = 0x002f; /* (47+1)/(0+1) */
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- writel(usec_config, timer_of_base(to) + TIMERUS_USEC_CFG);
-
-out:
- return ret;
-}
-
-#ifdef CONFIG_ARM64
-static int __init tegra_init_timer(struct device_node *np)
-{
- int cpu, ret = 0;
- struct timer_of *to;
-
- to = this_cpu_ptr(&tegra_to);
- ret = tegra_timer_common_init(np, to);
- if (ret < 0)
- goto out;
-
- for_each_possible_cpu(cpu) {
- struct timer_of *cpu_to;
-
- cpu_to = per_cpu_ptr(&tegra_to, cpu);
- cpu_to->of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(cpu);
- cpu_to->of_clk.rate = timer_of_rate(to);
- cpu_to->clkevt.cpumask = cpumask_of(cpu);
- cpu_to->clkevt.irq =
- irq_of_parse_and_map(np, IRQ_IDX_FOR_CPU(cpu));
- if (!cpu_to->clkevt.irq) {
- pr_err("%s: can't map IRQ for CPU%d\n",
- __func__, cpu);
- ret = -EINVAL;
- goto out;
- }
-
- irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
- ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr,
- IRQF_TIMER | IRQF_NOBALANCING,
- cpu_to->clkevt.name, &cpu_to->clkevt);
- if (ret) {
- pr_err("%s: cannot setup irq %d for CPU%d\n",
- __func__, cpu_to->clkevt.irq, cpu);
- ret = -EINVAL;
- goto out_irq;
- }
- }
-
- cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
- "AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
- tegra_timer_stop);
-
- return ret;
-out_irq:
- for_each_possible_cpu(cpu) {
- struct timer_of *cpu_to;
-
- cpu_to = per_cpu_ptr(&tegra_to, cpu);
- if (cpu_to->clkevt.irq) {
- free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
- irq_dispose_mapping(cpu_to->clkevt.irq);
- }
- }
-out:
- timer_of_cleanup(to);
- return ret;
-}
-#else /* CONFIG_ARM */
-static int __init tegra_init_timer(struct device_node *np)
-{
- int ret = 0;
-
- ret = tegra_timer_common_init(np, &tegra_to);
- if (ret < 0)
- goto out;
-
- tegra_to.of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(0);
- tegra_to.of_clk.rate = 1000000; /* microsecond timer */
-
- sched_clock_register(tegra_read_sched_clock, 32,
- timer_of_rate(&tegra_to));
- ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
- "timer_us", timer_of_rate(&tegra_to),
- 300, 32, clocksource_mmio_readl_up);
- if (ret) {
- pr_err("Failed to register clocksource\n");
- goto out;
- }
-
- tegra_delay_timer.read_current_timer =
- tegra_delay_timer_read_counter_long;
- tegra_delay_timer.freq = timer_of_rate(&tegra_to);
- register_current_timer_delay(&tegra_delay_timer);
-
- clockevents_config_and_register(&tegra_to.clkevt,
- timer_of_rate(&tegra_to),
- 0x1,
- 0x1fffffff);
-
- return ret;
-out:
- timer_of_cleanup(&tegra_to);
-
- return ret;
-}
-
-static int __init tegra20_init_rtc(struct device_node *np)
-{
- int ret;
-
- ret = timer_of_init(np, &suspend_rtc_to);
- if (ret)
- return ret;
-
- clocksource_register_hz(&suspend_rtc_clocksource, 1000);
-
- return 0;
-}
-TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
-#endif
-TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra_init_timer);
-TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra_init_timer);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index f8129edc145e..56c31a78c692 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -93,6 +93,15 @@ config ARM_IMX6Q_CPUFREQ
If in doubt, say N.
+config ARM_IMX_CPUFREQ_DT
+ tristate "Freescale i.MX8M cpufreq support"
+ depends on ARCH_MXC && CPUFREQ_DT
+ help
+ This adds cpufreq driver support for Freescale i.MX8M series SoCs,
+ based on cpufreq-dt.
+
+ If in doubt, say N.
+
config ARM_KIRKWOOD_CPUFREQ
def_bool MACH_KIRKWOOD
help
@@ -133,6 +142,14 @@ config ARM_QCOM_CPUFREQ_HW
The driver implements the cpufreq interface for this HW engine.
Say Y if you want to support CPUFreq HW.
+config ARM_RASPBERRYPI_CPUFREQ
+ tristate "Raspberry Pi cpufreq support"
+ depends on CLK_RASPBERRYPI || COMPILE_TEST
+ help
+ This adds the CPUFreq driver for Raspberry Pi
+
+ If in doubt, say N.
+
config ARM_S3C_CPUFREQ
bool
help
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 689b26c6f949..5a6c70d26c98 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
+obj-$(CONFIG_ARM_IMX_CPUFREQ_DT) += imx-cpufreq-dt.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
@@ -64,6 +65,7 @@ obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o
obj-$(CONFIG_ARM_QCOM_CPUFREQ_KRYO) += qcom-cpufreq-kryo.o
+obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 0df16eb1eb3c..aa0f06dec959 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -257,7 +257,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
struct armada_37xx_dvfs *dvfs)
{
- unsigned int avs_val = 0, freq;
+ unsigned int avs_val = 0;
int load_level = 0;
if (base == NULL)
@@ -275,8 +275,6 @@ static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++) {
- freq = dvfs->cpu_freq_max / dvfs->divider[load_level];
-
avs_val = dvfs->avs[load_level];
regmap_update_bits(base, ARMADA_37XX_AVS_VSET(load_level-1),
ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index e6f9cbe5835f..77b0e5d0fb13 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -384,12 +384,12 @@ static int brcm_avs_set_pstate(struct private_data *priv, unsigned int pstate)
return __issue_avs_command(priv, AVS_CMD_SET_PSTATE, true, args);
}
-static unsigned long brcm_avs_get_voltage(void __iomem *base)
+static u32 brcm_avs_get_voltage(void __iomem *base)
{
return readl(base + AVS_MBOX_VOLTAGE1);
}
-static unsigned long brcm_avs_get_frequency(void __iomem *base)
+static u32 brcm_avs_get_frequency(void __iomem *base)
{
return readl(base + AVS_MBOX_FREQUENCY) * 1000; /* in kHz */
}
@@ -446,8 +446,8 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
rc = brcm_avs_get_pmap(priv, NULL);
magic = readl(priv->base + AVS_MBOX_MAGIC);
- return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) &&
- (rc != -EINVAL);
+ return (magic == AVS_FIRMWARE_MAGIC) && ((rc != -ENOTSUPP) ||
+ (rc != -EINVAL));
}
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
@@ -653,14 +653,14 @@ static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
{
struct private_data *priv = policy->driver_data;
- return sprintf(buf, "0x%08lx\n", brcm_avs_get_voltage(priv->base));
+ return sprintf(buf, "0x%08x\n", brcm_avs_get_voltage(priv->base));
}
static ssize_t show_brcm_avs_frequency(struct cpufreq_policy *policy, char *buf)
{
struct private_data *priv = policy->driver_data;
- return sprintf(buf, "0x%08lx\n", brcm_avs_get_frequency(priv->base));
+ return sprintf(buf, "0x%08x\n", brcm_avs_get_frequency(priv->base));
}
cpufreq_freq_attr_ro(brcm_avs_pstate);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 88e00683eaeb..03dc4244ab00 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -37,7 +37,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "fsl,imx27", },
{ .compatible = "fsl,imx51", },
{ .compatible = "fsl,imx53", },
- { .compatible = "fsl,imx7d", },
{ .compatible = "marvell,berlin", },
{ .compatible = "marvell,pxa250", },
@@ -105,6 +104,10 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "calxeda,highbank", },
{ .compatible = "calxeda,ecx-2000", },
+ { .compatible = "fsl,imx7d", },
+ { .compatible = "fsl,imx8mq", },
+ { .compatible = "fsl,imx8mm", },
+
{ .compatible = "marvell,armadaxp", },
{ .compatible = "mediatek,mt2701", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e84bf0eb7239..0a9f675f2af4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -356,12 +356,10 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
* which is not equal to what the cpufreq core thinks is
* "old frequency".
*/
- if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
- if (policy->cur && (policy->cur != freqs->old)) {
- pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
- freqs->old, policy->cur);
- freqs->old = policy->cur;
- }
+ if (policy->cur && policy->cur != freqs->old) {
+ pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
+ freqs->old, policy->cur);
+ freqs->old = policy->cur;
}
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
@@ -631,7 +629,7 @@ static int cpufreq_parse_policy(char *str_governor,
}
/**
- * cpufreq_parse_governor - parse a governor string only for !setpolicy
+ * cpufreq_parse_governor - parse a governor string only for has_target()
*/
static int cpufreq_parse_governor(char *str_governor,
struct cpufreq_policy *policy)
@@ -1114,13 +1112,25 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
return ret;
}
+static void refresh_frequency_limits(struct cpufreq_policy *policy)
+{
+ struct cpufreq_policy new_policy = *policy;
+
+ pr_debug("updating policy for CPU %u\n", policy->cpu);
+
+ new_policy.min = policy->user_policy.min;
+ new_policy.max = policy->user_policy.max;
+
+ cpufreq_set_policy(policy, &new_policy);
+}
+
static void handle_update(struct work_struct *work)
{
struct cpufreq_policy *policy =
container_of(work, struct cpufreq_policy, update);
- unsigned int cpu = policy->cpu;
- pr_debug("handle_update for cpu %u called\n", cpu);
- cpufreq_update_policy(cpu);
+
+ pr_debug("handle_update for cpu %u called\n", policy->cpu);
+ refresh_frequency_limits(policy);
}
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
@@ -1300,7 +1310,7 @@ static int cpufreq_online(unsigned int cpu)
policy->max = policy->user_policy.max;
}
- if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
+ if (cpufreq_driver->get && has_target()) {
policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) {
pr_err("%s: ->get() failed\n", __func__);
@@ -1375,8 +1385,7 @@ static int cpufreq_online(unsigned int cpu)
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
- if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
- cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV)
+ if (cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
pr_debug("initialization complete\n");
@@ -1466,8 +1475,7 @@ static int cpufreq_offline(unsigned int cpu)
goto unlock;
}
- if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
- cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) {
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
cpufreq_cooling_unregister(policy->cdev);
policy->cdev = NULL;
}
@@ -1546,6 +1554,30 @@ static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
cpufreq_freq_transition_end(policy, &freqs, 0);
}
+static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
+{
+ unsigned int new_freq;
+
+ new_freq = cpufreq_driver->get(policy->cpu);
+ if (!new_freq)
+ return 0;
+
+ /*
+ * If fast frequency switching is used with the given policy, the check
+ * against policy->cur is pointless, so skip it in that case.
+ */
+ if (policy->fast_switch_enabled || !has_target())
+ return new_freq;
+
+ if (policy->cur != new_freq) {
+ cpufreq_out_of_sync(policy, new_freq);
+ if (update)
+ schedule_work(&policy->update);
+ }
+
+ return new_freq;
+}
+
/**
* cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
* @cpu: CPU number
@@ -1601,31 +1633,10 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{
- unsigned int ret_freq = 0;
-
if (unlikely(policy_is_inactive(policy)))
- return ret_freq;
-
- ret_freq = cpufreq_driver->get(policy->cpu);
-
- /*
- * If fast frequency switching is used with the given policy, the check
- * against policy->cur is pointless, so skip it in that case too.
- */
- if (policy->fast_switch_enabled)
- return ret_freq;
-
- if (ret_freq && policy->cur &&
- !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
- /* verify no discrepancy between actual and
- saved value exists */
- if (unlikely(ret_freq != policy->cur)) {
- cpufreq_out_of_sync(policy, ret_freq);
- schedule_work(&policy->update);
- }
- }
+ return 0;
- return ret_freq;
+ return cpufreq_verify_current_freq(policy, true);
}
/**
@@ -1652,24 +1663,6 @@ unsigned int cpufreq_get(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_get);
-static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
-{
- unsigned int new_freq;
-
- new_freq = cpufreq_driver->get(policy->cpu);
- if (!new_freq)
- return 0;
-
- if (!policy->cur) {
- pr_debug("cpufreq: Driver did not initialize current freq\n");
- policy->cur = new_freq;
- } else if (policy->cur != new_freq && has_target()) {
- cpufreq_out_of_sync(policy, new_freq);
- }
-
- return new_freq;
-}
-
static struct subsys_interface cpufreq_interface = {
.name = "cpufreq",
.subsys = &cpu_subsys,
@@ -2150,8 +2143,8 @@ static int cpufreq_start_governor(struct cpufreq_policy *policy)
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
- if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
- cpufreq_update_current_freq(policy);
+ if (cpufreq_driver->get)
+ cpufreq_verify_current_freq(policy, false);
if (policy->governor->start) {
ret = policy->governor->start(policy);
@@ -2392,7 +2385,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
void cpufreq_update_policy(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
- struct cpufreq_policy new_policy;
if (!policy)
return;
@@ -2401,16 +2393,11 @@ void cpufreq_update_policy(unsigned int cpu)
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
*/
- if (cpufreq_driver->get && !cpufreq_driver->setpolicy &&
- (cpufreq_suspended || WARN_ON(!cpufreq_update_current_freq(policy))))
+ if (cpufreq_driver->get && has_target() &&
+ (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
goto unlock;
- pr_debug("updating policy for CPU %u\n", cpu);
- memcpy(&new_policy, policy, sizeof(*policy));
- new_policy.min = policy->user_policy.min;
- new_policy.max = policy->user_policy.max;
-
- cpufreq_set_policy(policy, &new_policy);
+ refresh_frequency_limits(policy);
unlock:
cpufreq_cpu_release(policy);
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
new file mode 100644
index 000000000000..b54fd26ea7df
--- /dev/null
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+#define OCOTP_CFG3_SPEED_GRADE_SHIFT 8
+#define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8)
+#define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6
+#define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6)
+
+/* cpufreq-dt device registered by imx-cpufreq-dt */
+static struct platform_device *cpufreq_dt_pdev;
+static struct opp_table *cpufreq_opp_table;
+
+static int imx_cpufreq_dt_probe(struct platform_device *pdev)
+{
+ struct device *cpu_dev = get_cpu_device(0);
+ u32 cell_value, supported_hw[2];
+ int speed_grade, mkt_segment;
+ int ret;
+
+ ret = nvmem_cell_read_u32(cpu_dev, "speed_grade", &cell_value);
+ if (ret)
+ return ret;
+
+ speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK) >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
+ mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
+
+ /*
+ * Early samples without fuses written report "0 0" which means
+ * consumer segment and minimum speed grading.
+ *
+ * According to datasheet minimum speed grading is not supported for
+ * consumer parts so clamp to 1 to avoid warning for "no OPPs"
+ *
+ * Applies to 8mq and 8mm.
+ */
+ if (mkt_segment == 0 && speed_grade == 0 && (
+ of_machine_is_compatible("fsl,imx8mm") ||
+ of_machine_is_compatible("fsl,imx8mq")))
+ speed_grade = 1;
+
+ supported_hw[0] = BIT(speed_grade);
+ supported_hw[1] = BIT(mkt_segment);
+ dev_info(&pdev->dev, "cpu speed grade %d mkt segment %d supported-hw %#x %#x\n",
+ speed_grade, mkt_segment, supported_hw[0], supported_hw[1]);
+
+ cpufreq_opp_table = dev_pm_opp_set_supported_hw(cpu_dev, supported_hw, 2);
+ if (IS_ERR(cpufreq_opp_table)) {
+ ret = PTR_ERR(cpufreq_opp_table);
+ dev_err(&pdev->dev, "Failed to set supported opp: %d\n", ret);
+ return ret;
+ }
+
+ cpufreq_dt_pdev = platform_device_register_data(
+ &pdev->dev, "cpufreq-dt", -1, NULL, 0);
+ if (IS_ERR(cpufreq_dt_pdev)) {
+ dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+ ret = PTR_ERR(cpufreq_dt_pdev);
+ dev_err(&pdev->dev, "Failed to register cpufreq-dt: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx_cpufreq_dt_remove(struct platform_device *pdev)
+{
+ platform_device_unregister(cpufreq_dt_pdev);
+ dev_pm_opp_put_supported_hw(cpufreq_opp_table);
+
+ return 0;
+}
+
+static struct platform_driver imx_cpufreq_dt_driver = {
+ .probe = imx_cpufreq_dt_probe,
+ .remove = imx_cpufreq_dt_remove,
+ .driver = {
+ .name = "imx-cpufreq-dt",
+ },
+};
+module_platform_driver(imx_cpufreq_dt_driver);
+
+MODULE_ALIAS("platform:imx-cpufreq-dt");
+MODULE_DESCRIPTION("Freescale i.MX cpufreq speed grading driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 1e5e64643c3a..fdc767fdbe6a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -582,10 +582,10 @@ static int __init pcc_cpufreq_init(void)
/* Skip initialization if another cpufreq driver is there. */
if (cpufreq_get_current_driver())
- return 0;
+ return -EEXIST;
if (acpi_disabled)
- return 0;
+ return -ENODEV;
ret = pcc_cpufreq_probe();
if (ret) {
diff --git a/drivers/cpufreq/raspberrypi-cpufreq.c b/drivers/cpufreq/raspberrypi-cpufreq.c
new file mode 100644
index 000000000000..2bc7d9734272
--- /dev/null
+++ b/drivers/cpufreq/raspberrypi-cpufreq.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Raspberry Pi cpufreq driver
+ *
+ * Copyright (C) 2019, Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+
+#define RASPBERRYPI_FREQ_INTERVAL 100000000
+
+static struct platform_device *cpufreq_dt;
+
+static int raspberrypi_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device *cpu_dev;
+ unsigned long min, max;
+ unsigned long rate;
+ struct clk *clk;
+ int ret;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("Cannot get CPU for cpufreq driver\n");
+ return -ENODEV;
+ }
+
+ clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+ return PTR_ERR(clk);
+ }
+
+ /*
+ * The max and min frequencies are configurable in the Raspberry Pi
+ * firmware, so we query them at runtime.
+ */
+ min = roundup(clk_round_rate(clk, 0), RASPBERRYPI_FREQ_INTERVAL);
+ max = roundup(clk_round_rate(clk, ULONG_MAX), RASPBERRYPI_FREQ_INTERVAL);
+ clk_put(clk);
+
+ for (rate = min; rate <= max; rate += RASPBERRYPI_FREQ_INTERVAL) {
+ ret = dev_pm_opp_add(cpu_dev, rate, 0);
+ if (ret)
+ goto remove_opp;
+ }
+
+ cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(cpufreq_dt);
+ if (ret) {
+ dev_err(cpu_dev, "Failed to create platform device, %d\n", ret);
+ goto remove_opp;
+ }
+
+ return 0;
+
+remove_opp:
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+
+ return ret;
+}
+
+static int raspberrypi_cpufreq_remove(struct platform_device *pdev)
+{
+ struct device *cpu_dev;
+
+ cpu_dev = get_cpu_device(0);
+ if (cpu_dev)
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+
+ platform_device_unregister(cpufreq_dt);
+
+ return 0;
+}
+
+/*
+ * Since the driver depends on clk-raspberrypi, which may return EPROBE_DEFER,
+ * all the activity is performed in the probe, which may be defered as well.
+ */
+static struct platform_driver raspberrypi_cpufreq_driver = {
+ .driver = {
+ .name = "raspberrypi-cpufreq",
+ },
+ .probe = raspberrypi_cpufreq_probe,
+ .remove = raspberrypi_cpufreq_remove,
+};
+module_platform_driver(raspberrypi_cpufreq_driver);
+
+MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de");
+MODULE_DESCRIPTION("Raspberry Pi cpufreq driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:raspberrypi-cpufreq");
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 57e5374592bd..e5cb17d4be7b 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -478,7 +478,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
arm_volt, arm_volt_max);
}
- printk(KERN_DEBUG "Perf changed[L%d]\n", index);
+ pr_debug("Perf changed[L%d]\n", index);
exit:
mutex_unlock(&set_freq_lock);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0af08081e305..603413f28fa3 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -520,10 +520,13 @@ config CRYPTO_DEV_ATMEL_SHA
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
+config CRYPTO_DEV_ATMEL_I2C
+ tristate
+
config CRYPTO_DEV_ATMEL_ECC
tristate "Support for Microchip / Atmel ECC hw accelerator"
- depends on ARCH_AT91 || COMPILE_TEST
depends on I2C
+ select CRYPTO_DEV_ATMEL_I2C
select CRYPTO_ECDH
select CRC16
help
@@ -534,6 +537,21 @@ config CRYPTO_DEV_ATMEL_ECC
To compile this driver as a module, choose M here: the module
will be called atmel-ecc.
+config CRYPTO_DEV_ATMEL_SHA204A
+ tristate "Support for Microchip / Atmel SHA accelerator and RNG"
+ depends on I2C
+ select CRYPTO_DEV_ATMEL_I2C
+ select HW_RANDOM
+ select CRC16
+ help
+ Microhip / Atmel SHA accelerator and RNG.
+ Select this if you want to use the Microchip / Atmel SHA204A
+ module as a random number generator. (Other functions of the
+ chip are currently not exposed by this driver)
+
+ To compile this driver as a module, choose M here: the module
+ will be called atmel-sha204a.
+
config CRYPTO_DEV_CCP
bool "Support for AMD Secure Processor"
depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index a23a7197fcd7..afc4753b5d28 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,7 +2,9 @@
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 49f3e0ce242c..cbfc607282f4 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -67,12 +67,16 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
}
static inline int crypto4xx_crypt(struct skcipher_request *req,
- const unsigned int ivlen, bool decrypt)
+ const unsigned int ivlen, bool decrypt,
+ bool check_blocksize)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[AES_IV_SIZE];
+ if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+ return -EINVAL;
+
if (ivlen)
crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
@@ -81,24 +85,34 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
ctx->sa_len, 0, NULL);
}
-int crypto4xx_encrypt_noiv(struct skcipher_request *req)
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, 0, false, true);
+}
+
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
+}
+
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, 0, false);
+ return crypto4xx_crypt(req, 0, true, true);
}
-int crypto4xx_encrypt_iv(struct skcipher_request *req)
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, AES_IV_SIZE, false);
+ return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
}
-int crypto4xx_decrypt_noiv(struct skcipher_request *req)
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, 0, true);
+ return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
}
-int crypto4xx_decrypt_iv(struct skcipher_request *req)
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, AES_IV_SIZE, true);
+ return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
}
/**
@@ -269,8 +283,8 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
return ret;
}
- return encrypt ? crypto4xx_encrypt_iv(req)
- : crypto4xx_decrypt_iv(req);
+ return encrypt ? crypto4xx_encrypt_iv_stream(req)
+ : crypto4xx_decrypt_iv_stream(req);
}
static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 16d911aaa508..de5e9352e920 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -182,7 +182,6 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->pdr_pa);
return -ENOMEM;
}
- memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
@@ -1210,8 +1209,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cbc,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_block,
+ .decrypt = crypto4xx_decrypt_iv_block,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1222,7 +1221,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1230,8 +1229,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cfb,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_stream,
+ .decrypt = crypto4xx_decrypt_iv_stream,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1243,7 +1242,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1263,7 +1262,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1290,8 +1289,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = crypto4xx_setkey_aes_ecb,
- .encrypt = crypto4xx_encrypt_noiv,
- .decrypt = crypto4xx_decrypt_noiv,
+ .encrypt = crypto4xx_encrypt_noiv_block,
+ .decrypt = crypto4xx_decrypt_noiv_block,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1302,7 +1301,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1310,8 +1309,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_ofb,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_stream,
+ .decrypt = crypto4xx_decrypt_iv_stream,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index ca1c25c40c23..6b6841359190 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -173,10 +173,12 @@ int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_encrypt_ctr(struct skcipher_request *req);
int crypto4xx_decrypt_ctr(struct skcipher_request *req);
-int crypto4xx_encrypt_iv(struct skcipher_request *req);
-int crypto4xx_decrypt_iv(struct skcipher_request *req);
-int crypto4xx_encrypt_noiv(struct skcipher_request *req);
-int crypto4xx_decrypt_noiv(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index ba00e4563ca0..ff02cc05affb 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -6,8 +6,6 @@
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
*/
-#include <linux/bitrev.h>
-#include <linux/crc16.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -23,42 +21,11 @@
#include <crypto/internal/kpp.h>
#include <crypto/ecdh.h>
#include <crypto/kpp.h>
-#include "atmel-ecc.h"
-
-/* Used for binding tfm objects to i2c clients. */
-struct atmel_ecc_driver_data {
- struct list_head i2c_client_list;
- spinlock_t i2c_list_lock;
-} ____cacheline_aligned;
+#include "atmel-i2c.h"
static struct atmel_ecc_driver_data driver_data;
/**
- * atmel_ecc_i2c_client_priv - i2c_client private data
- * @client : pointer to i2c client device
- * @i2c_client_list_node: part of i2c_client_list
- * @lock : lock for sending i2c commands
- * @wake_token : wake token array of zeros
- * @wake_token_sz : size in bytes of the wake_token
- * @tfm_count : number of active crypto transformations on i2c client
- *
- * Reads and writes from/to the i2c client are sequential. The first byte
- * transmitted to the device is treated as the byte size. Any attempt to send
- * more than this number of bytes will cause the device to not ACK those bytes.
- * After the host writes a single command byte to the input buffer, reads are
- * prohibited until after the device completes command execution. Use a mutex
- * when sending i2c commands.
- */
-struct atmel_ecc_i2c_client_priv {
- struct i2c_client *client;
- struct list_head i2c_client_list_node;
- struct mutex lock;
- u8 wake_token[WAKE_TOKEN_MAX_SIZE];
- size_t wake_token_sz;
- atomic_t tfm_count ____cacheline_aligned;
-};
-
-/**
* atmel_ecdh_ctx - transformation context
* @client : pointer to i2c client device
* @fallback : used for unsupported curves or when user wants to use its own
@@ -80,188 +47,12 @@ struct atmel_ecdh_ctx {
bool do_fallback;
};
-/**
- * atmel_ecc_work_data - data structure representing the work
- * @ctx : transformation context.
- * @cbk : pointer to a callback function to be invoked upon completion of this
- * request. This has the form:
- * callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status)
- * where:
- * @work_data: data structure representing the work
- * @areq : optional pointer to an argument passed with the original
- * request.
- * @status : status returned from the i2c client device or i2c error.
- * @areq: optional pointer to a user argument for use at callback time.
- * @work: describes the task to be executed.
- * @cmd : structure used for communicating with the device.
- */
-struct atmel_ecc_work_data {
- struct atmel_ecdh_ctx *ctx;
- void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq,
- int status);
- void *areq;
- struct work_struct work;
- struct atmel_ecc_cmd cmd;
-};
-
-static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len)
-{
- return cpu_to_le16(bitrev16(crc16(crc, buffer, len)));
-}
-
-/**
- * atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
- * CRC16 verification of the count, opcode, param1, param2 and data bytes.
- * The checksum is saved in little-endian format in the least significant
- * two bytes of the command. CRC polynomial is 0x8005 and the initial register
- * value should be zero.
- *
- * @cmd : structure used for communicating with the device.
- */
-static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd)
-{
- u8 *data = &cmd->count;
- size_t len = cmd->count - CRC_SIZE;
- u16 *crc16 = (u16 *)(data + len);
-
- *crc16 = atmel_ecc_crc16(0, data, len);
-}
-
-static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd)
-{
- cmd->word_addr = COMMAND;
- cmd->opcode = OPCODE_READ;
- /*
- * Read the word from Configuration zone that contains the lock bytes
- * (UserExtra, Selector, LockValue, LockConfig).
- */
- cmd->param1 = CONFIG_ZONE;
- cmd->param2 = DEVICE_LOCK_ADDR;
- cmd->count = READ_COUNT;
-
- atmel_ecc_checksum(cmd);
-
- cmd->msecs = MAX_EXEC_TIME_READ;
- cmd->rxsize = READ_RSP_SIZE;
-}
-
-static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid)
-{
- cmd->word_addr = COMMAND;
- cmd->count = GENKEY_COUNT;
- cmd->opcode = OPCODE_GENKEY;
- cmd->param1 = GENKEY_MODE_PRIVATE;
- /* a random private key will be generated and stored in slot keyID */
- cmd->param2 = cpu_to_le16(keyid);
-
- atmel_ecc_checksum(cmd);
-
- cmd->msecs = MAX_EXEC_TIME_GENKEY;
- cmd->rxsize = GENKEY_RSP_SIZE;
-}
-
-static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
- struct scatterlist *pubkey)
-{
- size_t copied;
-
- cmd->word_addr = COMMAND;
- cmd->count = ECDH_COUNT;
- cmd->opcode = OPCODE_ECDH;
- cmd->param1 = ECDH_PREFIX_MODE;
- /* private key slot */
- cmd->param2 = cpu_to_le16(DATA_SLOT_2);
-
- /*
- * The device only supports NIST P256 ECC keys. The public key size will
- * always be the same. Use a macro for the key size to avoid unnecessary
- * computations.
- */
- copied = sg_copy_to_buffer(pubkey,
- sg_nents_for_len(pubkey,
- ATMEL_ECC_PUBKEY_SIZE),
- cmd->data, ATMEL_ECC_PUBKEY_SIZE);
- if (copied != ATMEL_ECC_PUBKEY_SIZE)
- return -EINVAL;
-
- atmel_ecc_checksum(cmd);
-
- cmd->msecs = MAX_EXEC_TIME_ECDH;
- cmd->rxsize = ECDH_RSP_SIZE;
-
- return 0;
-}
-
-/*
- * After wake and after execution of a command, there will be error, status, or
- * result bytes in the device's output register that can be retrieved by the
- * system. When the length of that group is four bytes, the codes returned are
- * detailed in error_list.
- */
-static int atmel_ecc_status(struct device *dev, u8 *status)
-{
- size_t err_list_len = ARRAY_SIZE(error_list);
- int i;
- u8 err_id = status[1];
-
- if (*status != STATUS_SIZE)
- return 0;
-
- if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
- return 0;
-
- for (i = 0; i < err_list_len; i++)
- if (error_list[i].value == err_id)
- break;
-
- /* if err_id is not in the error_list then ignore it */
- if (i != err_list_len) {
- dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
- return err_id;
- }
-
- return 0;
-}
-
-static int atmel_ecc_wakeup(struct i2c_client *client)
-{
- struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
- u8 status[STATUS_RSP_SIZE];
- int ret;
-
- /*
- * The device ignores any levels or transitions on the SCL pin when the
- * device is idle, asleep or during waking up. Don't check for error
- * when waking up the device.
- */
- i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
-
- /*
- * Wait to wake the device. Typical execution times for ecdh and genkey
- * are around tens of milliseconds. Delta is chosen to 50 microseconds.
- */
- usleep_range(TWHI_MIN, TWHI_MAX);
-
- ret = i2c_master_recv(client, status, STATUS_SIZE);
- if (ret < 0)
- return ret;
-
- return atmel_ecc_status(&client->dev, status);
-}
-
-static int atmel_ecc_sleep(struct i2c_client *client)
-{
- u8 sleep = SLEEP_TOKEN;
-
- return i2c_master_send(client, &sleep, 1);
-}
-
-static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
+static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
int status)
{
struct kpp_request *req = areq;
struct atmel_ecdh_ctx *ctx = work_data->ctx;
- struct atmel_ecc_cmd *cmd = &work_data->cmd;
+ struct atmel_i2c_cmd *cmd = &work_data->cmd;
size_t copied, n_sz;
if (status)
@@ -282,82 +73,6 @@ free_work_data:
kpp_request_complete(req, status);
}
-/*
- * atmel_ecc_send_receive() - send a command to the device and receive its
- * response.
- * @client: i2c client device
- * @cmd : structure used to communicate with the device
- *
- * After the device receives a Wake token, a watchdog counter starts within the
- * device. After the watchdog timer expires, the device enters sleep mode
- * regardless of whether some I/O transmission or command execution is in
- * progress. If a command is attempted when insufficient time remains prior to
- * watchdog timer execution, the device will return the watchdog timeout error
- * code without attempting to execute the command. There is no way to reset the
- * counter other than to put the device into sleep or idle mode and then
- * wake it up again.
- */
-static int atmel_ecc_send_receive(struct i2c_client *client,
- struct atmel_ecc_cmd *cmd)
-{
- struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
- int ret;
-
- mutex_lock(&i2c_priv->lock);
-
- ret = atmel_ecc_wakeup(client);
- if (ret)
- goto err;
-
- /* send the command */
- ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
- if (ret < 0)
- goto err;
-
- /* delay the appropriate amount of time for command to execute */
- msleep(cmd->msecs);
-
- /* receive the response */
- ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
- if (ret < 0)
- goto err;
-
- /* put the device into low-power mode */
- ret = atmel_ecc_sleep(client);
- if (ret < 0)
- goto err;
-
- mutex_unlock(&i2c_priv->lock);
- return atmel_ecc_status(&client->dev, cmd->data);
-err:
- mutex_unlock(&i2c_priv->lock);
- return ret;
-}
-
-static void atmel_ecc_work_handler(struct work_struct *work)
-{
- struct atmel_ecc_work_data *work_data =
- container_of(work, struct atmel_ecc_work_data, work);
- struct atmel_ecc_cmd *cmd = &work_data->cmd;
- struct i2c_client *client = work_data->ctx->client;
- int status;
-
- status = atmel_ecc_send_receive(client, cmd);
- work_data->cbk(work_data, work_data->areq, status);
-}
-
-static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data,
- void (*cbk)(struct atmel_ecc_work_data *work_data,
- void *areq, int status),
- void *areq)
-{
- work_data->cbk = (void *)cbk;
- work_data->areq = areq;
-
- INIT_WORK(&work_data->work, atmel_ecc_work_handler);
- schedule_work(&work_data->work);
-}
-
static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
{
if (curve_id == ECC_CURVE_NIST_P256)
@@ -374,7 +89,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
- struct atmel_ecc_cmd *cmd;
+ struct atmel_i2c_cmd *cmd;
void *public_key;
struct ecdh params;
int ret = -ENOMEM;
@@ -412,9 +127,9 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
ctx->do_fallback = false;
ctx->curve_id = params.curve_id;
- atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2);
+ atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
- ret = atmel_ecc_send_receive(ctx->client, cmd);
+ ret = atmel_i2c_send_receive(ctx->client, cmd);
if (ret)
goto free_public_key;
@@ -444,6 +159,9 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
return crypto_kpp_generate_public_key(req);
}
+ if (!ctx->public_key)
+ return -EINVAL;
+
/* might want less than we've got */
nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
@@ -461,7 +179,7 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
- struct atmel_ecc_work_data *work_data;
+ struct atmel_i2c_work_data *work_data;
gfp_t gfp;
int ret;
@@ -482,12 +200,13 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
return -ENOMEM;
work_data->ctx = ctx;
+ work_data->client = ctx->client;
- ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src);
+ ret = atmel_i2c_init_ecdh_cmd(&work_data->cmd, req->src);
if (ret)
goto free_work_data;
- atmel_ecc_enqueue(work_data, atmel_ecdh_done, req);
+ atmel_i2c_enqueue(work_data, atmel_ecdh_done, req);
return -EINPROGRESS;
@@ -498,7 +217,7 @@ free_work_data:
static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
{
- struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
+ struct atmel_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
struct i2c_client *client = ERR_PTR(-ENODEV);
int min_tfm_cnt = INT_MAX;
int tfm_cnt;
@@ -533,7 +252,7 @@ static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
static void atmel_ecc_i2c_client_free(struct i2c_client *client)
{
- struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
atomic_dec(&i2c_priv->tfm_count);
}
@@ -604,96 +323,18 @@ static struct kpp_alg atmel_ecdh = {
},
};
-static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate)
-{
- u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
-
- /* return the size of the wake_token in bytes */
- return DIV_ROUND_UP(no_of_bits, 8);
-}
-
-static int device_sanity_check(struct i2c_client *client)
-{
- struct atmel_ecc_cmd *cmd;
- int ret;
-
- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- atmel_ecc_init_read_cmd(cmd);
-
- ret = atmel_ecc_send_receive(client, cmd);
- if (ret)
- goto free_cmd;
-
- /*
- * It is vital that the Configuration, Data and OTP zones be locked
- * prior to release into the field of the system containing the device.
- * Failure to lock these zones may permit modification of any secret
- * keys and may lead to other security problems.
- */
- if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
- dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
- ret = -ENOTSUPP;
- }
-
- /* fall through */
-free_cmd:
- kfree(cmd);
- return ret;
-}
-
static int atmel_ecc_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct atmel_ecc_i2c_client_priv *i2c_priv;
- struct device *dev = &client->dev;
+ struct atmel_i2c_client_priv *i2c_priv;
int ret;
- u32 bus_clk_rate;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(dev, "I2C_FUNC_I2C not supported\n");
- return -ENODEV;
- }
- ret = of_property_read_u32(client->adapter->dev.of_node,
- "clock-frequency", &bus_clk_rate);
- if (ret) {
- dev_err(dev, "of: failed to read clock-frequency property\n");
- return ret;
- }
-
- if (bus_clk_rate > 1000000L) {
- dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
- bus_clk_rate);
- return -EINVAL;
- }
-
- i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
- if (!i2c_priv)
- return -ENOMEM;
-
- i2c_priv->client = client;
- mutex_init(&i2c_priv->lock);
-
- /*
- * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
- * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
- * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
- */
- i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate);
-
- memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
-
- atomic_set(&i2c_priv->tfm_count, 0);
-
- i2c_set_clientdata(client, i2c_priv);
-
- ret = device_sanity_check(client);
+ ret = atmel_i2c_probe(client, id);
if (ret)
return ret;
+ i2c_priv = i2c_get_clientdata(client);
+
spin_lock(&driver_data.i2c_list_lock);
list_add_tail(&i2c_priv->i2c_client_list_node,
&driver_data.i2c_client_list);
@@ -705,10 +346,10 @@ static int atmel_ecc_probe(struct i2c_client *client,
list_del(&i2c_priv->i2c_client_list_node);
spin_unlock(&driver_data.i2c_list_lock);
- dev_err(dev, "%s alg registration failed\n",
+ dev_err(&client->dev, "%s alg registration failed\n",
atmel_ecdh.base.cra_driver_name);
} else {
- dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n");
+ dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
}
return ret;
@@ -716,7 +357,7 @@ static int atmel_ecc_probe(struct i2c_client *client,
static int atmel_ecc_remove(struct i2c_client *client)
{
- struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
/* Return EBUSY if i2c client already allocated. */
if (atomic_read(&i2c_priv->tfm_count)) {
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
deleted file mode 100644
index 643a3b947338..000000000000
--- a/drivers/crypto/atmel-ecc.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
- */
-
-#ifndef __ATMEL_ECC_H__
-#define __ATMEL_ECC_H__
-
-#define ATMEL_ECC_PRIORITY 300
-
-#define COMMAND 0x03 /* packet function */
-#define SLEEP_TOKEN 0x01
-#define WAKE_TOKEN_MAX_SIZE 8
-
-/* Definitions of Data and Command sizes */
-#define WORD_ADDR_SIZE 1
-#define COUNT_SIZE 1
-#define CRC_SIZE 2
-#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
-
-/* size in bytes of the n prime */
-#define ATMEL_ECC_NIST_P256_N_SIZE 32
-#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
-
-#define STATUS_RSP_SIZE 4
-#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
-#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
- CMD_OVERHEAD_SIZE)
-#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
-#define MAX_RSP_SIZE GENKEY_RSP_SIZE
-
-/**
- * atmel_ecc_cmd - structure used for communicating with the device.
- * @word_addr: indicates the function of the packet sent to the device. This
- * byte should have a value of COMMAND for normal operation.
- * @count : number of bytes to be transferred to (or from) the device.
- * @opcode : the command code.
- * @param1 : the first parameter; always present.
- * @param2 : the second parameter; always present.
- * @data : optional remaining input data. Includes a 2-byte CRC.
- * @rxsize : size of the data received from i2c client.
- * @msecs : command execution time in milliseconds
- */
-struct atmel_ecc_cmd {
- u8 word_addr;
- u8 count;
- u8 opcode;
- u8 param1;
- u16 param2;
- u8 data[MAX_RSP_SIZE];
- u8 msecs;
- u16 rxsize;
-} __packed;
-
-/* Status/Error codes */
-#define STATUS_SIZE 0x04
-#define STATUS_NOERR 0x00
-#define STATUS_WAKE_SUCCESSFUL 0x11
-
-static const struct {
- u8 value;
- const char *error_text;
-} error_list[] = {
- { 0x01, "CheckMac or Verify miscompare" },
- { 0x03, "Parse Error" },
- { 0x05, "ECC Fault" },
- { 0x0F, "Execution Error" },
- { 0xEE, "Watchdog about to expire" },
- { 0xFF, "CRC or other communication error" },
-};
-
-/* Definitions for eeprom organization */
-#define CONFIG_ZONE 0
-
-/* Definitions for Indexes common to all commands */
-#define RSP_DATA_IDX 1 /* buffer index of data in response */
-#define DATA_SLOT_2 2 /* used for ECDH private key */
-
-/* Definitions for the device lock state */
-#define DEVICE_LOCK_ADDR 0x15
-#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
-#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
-
-/*
- * Wake High delay to data communication (microseconds). SDA should be stable
- * high for this entire duration.
- */
-#define TWHI_MIN 1500
-#define TWHI_MAX 1550
-
-/* Wake Low duration */
-#define TWLO_USEC 60
-
-/* Command execution time (milliseconds) */
-#define MAX_EXEC_TIME_ECDH 58
-#define MAX_EXEC_TIME_GENKEY 115
-#define MAX_EXEC_TIME_READ 1
-
-/* Command opcode */
-#define OPCODE_ECDH 0x43
-#define OPCODE_GENKEY 0x40
-#define OPCODE_READ 0x02
-
-/* Definitions for the READ Command */
-#define READ_COUNT 7
-
-/* Definitions for the GenKey Command */
-#define GENKEY_COUNT 7
-#define GENKEY_MODE_PRIVATE 0x04
-
-/* Definitions for the ECDH Command */
-#define ECDH_COUNT 71
-#define ECDH_PREFIX_MODE 0x00
-
-#endif /* __ATMEL_ECC_H__ */
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
new file mode 100644
index 000000000000..dc876fab2882
--- /dev/null
+++ b/drivers/crypto/atmel-i2c.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip / Atmel ECC (I2C) driver.
+ *
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ */
+
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "atmel-i2c.h"
+
+/**
+ * atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
+ * CRC16 verification of the count, opcode, param1, param2 and data bytes.
+ * The checksum is saved in little-endian format in the least significant
+ * two bytes of the command. CRC polynomial is 0x8005 and the initial register
+ * value should be zero.
+ *
+ * @cmd : structure used for communicating with the device.
+ */
+static void atmel_i2c_checksum(struct atmel_i2c_cmd *cmd)
+{
+ u8 *data = &cmd->count;
+ size_t len = cmd->count - CRC_SIZE;
+ __le16 *__crc16 = (__le16 *)(data + len);
+
+ *__crc16 = cpu_to_le16(bitrev16(crc16(0, data, len)));
+}
+
+void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd)
+{
+ cmd->word_addr = COMMAND;
+ cmd->opcode = OPCODE_READ;
+ /*
+ * Read the word from Configuration zone that contains the lock bytes
+ * (UserExtra, Selector, LockValue, LockConfig).
+ */
+ cmd->param1 = CONFIG_ZONE;
+ cmd->param2 = cpu_to_le16(DEVICE_LOCK_ADDR);
+ cmd->count = READ_COUNT;
+
+ atmel_i2c_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_READ;
+ cmd->rxsize = READ_RSP_SIZE;
+}
+EXPORT_SYMBOL(atmel_i2c_init_read_cmd);
+
+void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd)
+{
+ cmd->word_addr = COMMAND;
+ cmd->opcode = OPCODE_RANDOM;
+ cmd->param1 = 0;
+ cmd->param2 = 0;
+ cmd->count = RANDOM_COUNT;
+
+ atmel_i2c_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_RANDOM;
+ cmd->rxsize = RANDOM_RSP_SIZE;
+}
+EXPORT_SYMBOL(atmel_i2c_init_random_cmd);
+
+void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid)
+{
+ cmd->word_addr = COMMAND;
+ cmd->count = GENKEY_COUNT;
+ cmd->opcode = OPCODE_GENKEY;
+ cmd->param1 = GENKEY_MODE_PRIVATE;
+ /* a random private key will be generated and stored in slot keyID */
+ cmd->param2 = cpu_to_le16(keyid);
+
+ atmel_i2c_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_GENKEY;
+ cmd->rxsize = GENKEY_RSP_SIZE;
+}
+EXPORT_SYMBOL(atmel_i2c_init_genkey_cmd);
+
+int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
+ struct scatterlist *pubkey)
+{
+ size_t copied;
+
+ cmd->word_addr = COMMAND;
+ cmd->count = ECDH_COUNT;
+ cmd->opcode = OPCODE_ECDH;
+ cmd->param1 = ECDH_PREFIX_MODE;
+ /* private key slot */
+ cmd->param2 = cpu_to_le16(DATA_SLOT_2);
+
+ /*
+ * The device only supports NIST P256 ECC keys. The public key size will
+ * always be the same. Use a macro for the key size to avoid unnecessary
+ * computations.
+ */
+ copied = sg_copy_to_buffer(pubkey,
+ sg_nents_for_len(pubkey,
+ ATMEL_ECC_PUBKEY_SIZE),
+ cmd->data, ATMEL_ECC_PUBKEY_SIZE);
+ if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ return -EINVAL;
+
+ atmel_i2c_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_ECDH;
+ cmd->rxsize = ECDH_RSP_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(atmel_i2c_init_ecdh_cmd);
+
+/*
+ * After wake and after execution of a command, there will be error, status, or
+ * result bytes in the device's output register that can be retrieved by the
+ * system. When the length of that group is four bytes, the codes returned are
+ * detailed in error_list.
+ */
+static int atmel_i2c_status(struct device *dev, u8 *status)
+{
+ size_t err_list_len = ARRAY_SIZE(error_list);
+ int i;
+ u8 err_id = status[1];
+
+ if (*status != STATUS_SIZE)
+ return 0;
+
+ if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
+ return 0;
+
+ for (i = 0; i < err_list_len; i++)
+ if (error_list[i].value == err_id)
+ break;
+
+ /* if err_id is not in the error_list then ignore it */
+ if (i != err_list_len) {
+ dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
+ return err_id;
+ }
+
+ return 0;
+}
+
+static int atmel_i2c_wakeup(struct i2c_client *client)
+{
+ struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ u8 status[STATUS_RSP_SIZE];
+ int ret;
+
+ /*
+ * The device ignores any levels or transitions on the SCL pin when the
+ * device is idle, asleep or during waking up. Don't check for error
+ * when waking up the device.
+ */
+ i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
+
+ /*
+ * Wait to wake the device. Typical execution times for ecdh and genkey
+ * are around tens of milliseconds. Delta is chosen to 50 microseconds.
+ */
+ usleep_range(TWHI_MIN, TWHI_MAX);
+
+ ret = i2c_master_recv(client, status, STATUS_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return atmel_i2c_status(&client->dev, status);
+}
+
+static int atmel_i2c_sleep(struct i2c_client *client)
+{
+ u8 sleep = SLEEP_TOKEN;
+
+ return i2c_master_send(client, &sleep, 1);
+}
+
+/*
+ * atmel_i2c_send_receive() - send a command to the device and receive its
+ * response.
+ * @client: i2c client device
+ * @cmd : structure used to communicate with the device
+ *
+ * After the device receives a Wake token, a watchdog counter starts within the
+ * device. After the watchdog timer expires, the device enters sleep mode
+ * regardless of whether some I/O transmission or command execution is in
+ * progress. If a command is attempted when insufficient time remains prior to
+ * watchdog timer execution, the device will return the watchdog timeout error
+ * code without attempting to execute the command. There is no way to reset the
+ * counter other than to put the device into sleep or idle mode and then
+ * wake it up again.
+ */
+int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd)
+{
+ struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ int ret;
+
+ mutex_lock(&i2c_priv->lock);
+
+ ret = atmel_i2c_wakeup(client);
+ if (ret)
+ goto err;
+
+ /* send the command */
+ ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
+ if (ret < 0)
+ goto err;
+
+ /* delay the appropriate amount of time for command to execute */
+ msleep(cmd->msecs);
+
+ /* receive the response */
+ ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
+ if (ret < 0)
+ goto err;
+
+ /* put the device into low-power mode */
+ ret = atmel_i2c_sleep(client);
+ if (ret < 0)
+ goto err;
+
+ mutex_unlock(&i2c_priv->lock);
+ return atmel_i2c_status(&client->dev, cmd->data);
+err:
+ mutex_unlock(&i2c_priv->lock);
+ return ret;
+}
+EXPORT_SYMBOL(atmel_i2c_send_receive);
+
+static void atmel_i2c_work_handler(struct work_struct *work)
+{
+ struct atmel_i2c_work_data *work_data =
+ container_of(work, struct atmel_i2c_work_data, work);
+ struct atmel_i2c_cmd *cmd = &work_data->cmd;
+ struct i2c_client *client = work_data->client;
+ int status;
+
+ status = atmel_i2c_send_receive(client, cmd);
+ work_data->cbk(work_data, work_data->areq, status);
+}
+
+void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
+ void (*cbk)(struct atmel_i2c_work_data *work_data,
+ void *areq, int status),
+ void *areq)
+{
+ work_data->cbk = (void *)cbk;
+ work_data->areq = areq;
+
+ INIT_WORK(&work_data->work, atmel_i2c_work_handler);
+ schedule_work(&work_data->work);
+}
+EXPORT_SYMBOL(atmel_i2c_enqueue);
+
+static inline size_t atmel_i2c_wake_token_sz(u32 bus_clk_rate)
+{
+ u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
+
+ /* return the size of the wake_token in bytes */
+ return DIV_ROUND_UP(no_of_bits, 8);
+}
+
+static int device_sanity_check(struct i2c_client *client)
+{
+ struct atmel_i2c_cmd *cmd;
+ int ret;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ atmel_i2c_init_read_cmd(cmd);
+
+ ret = atmel_i2c_send_receive(client, cmd);
+ if (ret)
+ goto free_cmd;
+
+ /*
+ * It is vital that the Configuration, Data and OTP zones be locked
+ * prior to release into the field of the system containing the device.
+ * Failure to lock these zones may permit modification of any secret
+ * keys and may lead to other security problems.
+ */
+ if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
+ dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
+ ret = -ENOTSUPP;
+ }
+
+ /* fall through */
+free_cmd:
+ kfree(cmd);
+ return ret;
+}
+
+int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct atmel_i2c_client_priv *i2c_priv;
+ struct device *dev = &client->dev;
+ int ret;
+ u32 bus_clk_rate;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "I2C_FUNC_I2C not supported\n");
+ return -ENODEV;
+ }
+
+ bus_clk_rate = i2c_acpi_find_bus_speed(&client->adapter->dev);
+ if (!bus_clk_rate) {
+ ret = device_property_read_u32(&client->adapter->dev,
+ "clock-frequency", &bus_clk_rate);
+ if (ret) {
+ dev_err(dev, "failed to read clock-frequency property\n");
+ return ret;
+ }
+ }
+
+ if (bus_clk_rate > 1000000L) {
+ dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
+ bus_clk_rate);
+ return -EINVAL;
+ }
+
+ i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
+ if (!i2c_priv)
+ return -ENOMEM;
+
+ i2c_priv->client = client;
+ mutex_init(&i2c_priv->lock);
+
+ /*
+ * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
+ * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
+ * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
+ */
+ i2c_priv->wake_token_sz = atmel_i2c_wake_token_sz(bus_clk_rate);
+
+ memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
+
+ atomic_set(&i2c_priv->tfm_count, 0);
+
+ i2c_set_clientdata(client, i2c_priv);
+
+ ret = device_sanity_check(client);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(atmel_i2c_probe);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h
new file mode 100644
index 000000000000..21860b99c3e3
--- /dev/null
+++ b/drivers/crypto/atmel-i2c.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ */
+
+#ifndef __ATMEL_I2C_H__
+#define __ATMEL_I2C_H__
+
+#include <linux/hw_random.h>
+#include <linux/types.h>
+
+#define ATMEL_ECC_PRIORITY 300
+
+#define COMMAND 0x03 /* packet function */
+#define SLEEP_TOKEN 0x01
+#define WAKE_TOKEN_MAX_SIZE 8
+
+/* Definitions of Data and Command sizes */
+#define WORD_ADDR_SIZE 1
+#define COUNT_SIZE 1
+#define CRC_SIZE 2
+#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
+
+/* size in bytes of the n prime */
+#define ATMEL_ECC_NIST_P256_N_SIZE 32
+#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
+
+#define STATUS_RSP_SIZE 4
+#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
+#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
+ CMD_OVERHEAD_SIZE)
+#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
+#define RANDOM_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
+#define MAX_RSP_SIZE GENKEY_RSP_SIZE
+
+/**
+ * atmel_i2c_cmd - structure used for communicating with the device.
+ * @word_addr: indicates the function of the packet sent to the device. This
+ * byte should have a value of COMMAND for normal operation.
+ * @count : number of bytes to be transferred to (or from) the device.
+ * @opcode : the command code.
+ * @param1 : the first parameter; always present.
+ * @param2 : the second parameter; always present.
+ * @data : optional remaining input data. Includes a 2-byte CRC.
+ * @rxsize : size of the data received from i2c client.
+ * @msecs : command execution time in milliseconds
+ */
+struct atmel_i2c_cmd {
+ u8 word_addr;
+ u8 count;
+ u8 opcode;
+ u8 param1;
+ __le16 param2;
+ u8 data[MAX_RSP_SIZE];
+ u8 msecs;
+ u16 rxsize;
+} __packed;
+
+/* Status/Error codes */
+#define STATUS_SIZE 0x04
+#define STATUS_NOERR 0x00
+#define STATUS_WAKE_SUCCESSFUL 0x11
+
+static const struct {
+ u8 value;
+ const char *error_text;
+} error_list[] = {
+ { 0x01, "CheckMac or Verify miscompare" },
+ { 0x03, "Parse Error" },
+ { 0x05, "ECC Fault" },
+ { 0x0F, "Execution Error" },
+ { 0xEE, "Watchdog about to expire" },
+ { 0xFF, "CRC or other communication error" },
+};
+
+/* Definitions for eeprom organization */
+#define CONFIG_ZONE 0
+
+/* Definitions for Indexes common to all commands */
+#define RSP_DATA_IDX 1 /* buffer index of data in response */
+#define DATA_SLOT_2 2 /* used for ECDH private key */
+
+/* Definitions for the device lock state */
+#define DEVICE_LOCK_ADDR 0x15
+#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
+#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
+
+/*
+ * Wake High delay to data communication (microseconds). SDA should be stable
+ * high for this entire duration.
+ */
+#define TWHI_MIN 1500
+#define TWHI_MAX 1550
+
+/* Wake Low duration */
+#define TWLO_USEC 60
+
+/* Command execution time (milliseconds) */
+#define MAX_EXEC_TIME_ECDH 58
+#define MAX_EXEC_TIME_GENKEY 115
+#define MAX_EXEC_TIME_READ 1
+#define MAX_EXEC_TIME_RANDOM 50
+
+/* Command opcode */
+#define OPCODE_ECDH 0x43
+#define OPCODE_GENKEY 0x40
+#define OPCODE_READ 0x02
+#define OPCODE_RANDOM 0x1b
+
+/* Definitions for the READ Command */
+#define READ_COUNT 7
+
+/* Definitions for the RANDOM Command */
+#define RANDOM_COUNT 7
+
+/* Definitions for the GenKey Command */
+#define GENKEY_COUNT 7
+#define GENKEY_MODE_PRIVATE 0x04
+
+/* Definitions for the ECDH Command */
+#define ECDH_COUNT 71
+#define ECDH_PREFIX_MODE 0x00
+
+/* Used for binding tfm objects to i2c clients. */
+struct atmel_ecc_driver_data {
+ struct list_head i2c_client_list;
+ spinlock_t i2c_list_lock;
+} ____cacheline_aligned;
+
+/**
+ * atmel_i2c_client_priv - i2c_client private data
+ * @client : pointer to i2c client device
+ * @i2c_client_list_node: part of i2c_client_list
+ * @lock : lock for sending i2c commands
+ * @wake_token : wake token array of zeros
+ * @wake_token_sz : size in bytes of the wake_token
+ * @tfm_count : number of active crypto transformations on i2c client
+ *
+ * Reads and writes from/to the i2c client are sequential. The first byte
+ * transmitted to the device is treated as the byte size. Any attempt to send
+ * more than this number of bytes will cause the device to not ACK those bytes.
+ * After the host writes a single command byte to the input buffer, reads are
+ * prohibited until after the device completes command execution. Use a mutex
+ * when sending i2c commands.
+ */
+struct atmel_i2c_client_priv {
+ struct i2c_client *client;
+ struct list_head i2c_client_list_node;
+ struct mutex lock;
+ u8 wake_token[WAKE_TOKEN_MAX_SIZE];
+ size_t wake_token_sz;
+ atomic_t tfm_count ____cacheline_aligned;
+ struct hwrng hwrng;
+};
+
+/**
+ * atmel_i2c_work_data - data structure representing the work
+ * @ctx : transformation context.
+ * @cbk : pointer to a callback function to be invoked upon completion of this
+ * request. This has the form:
+ * callback(struct atmel_i2c_work_data *work_data, void *areq, u8 status)
+ * where:
+ * @work_data: data structure representing the work
+ * @areq : optional pointer to an argument passed with the original
+ * request.
+ * @status : status returned from the i2c client device or i2c error.
+ * @areq: optional pointer to a user argument for use at callback time.
+ * @work: describes the task to be executed.
+ * @cmd : structure used for communicating with the device.
+ */
+struct atmel_i2c_work_data {
+ void *ctx;
+ struct i2c_client *client;
+ void (*cbk)(struct atmel_i2c_work_data *work_data, void *areq,
+ int status);
+ void *areq;
+ struct work_struct work;
+ struct atmel_i2c_cmd cmd;
+};
+
+int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id);
+
+void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
+ void (*cbk)(struct atmel_i2c_work_data *work_data,
+ void *areq, int status),
+ void *areq);
+
+int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd);
+
+void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd);
+void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd);
+void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid);
+int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
+ struct scatterlist *pubkey);
+
+#endif /* __ATMEL_I2C_H__ */
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
new file mode 100644
index 000000000000..ea0d2068ea4f
--- /dev/null
+++ b/drivers/crypto/atmel-sha204a.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip / Atmel SHA204A (I2C) driver.
+ *
+ * Copyright (c) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "atmel-i2c.h"
+
+static void atmel_sha204a_rng_done(struct atmel_i2c_work_data *work_data,
+ void *areq, int status)
+{
+ struct atmel_i2c_client_priv *i2c_priv = work_data->ctx;
+ struct hwrng *rng = areq;
+
+ if (status)
+ dev_warn_ratelimited(&i2c_priv->client->dev,
+ "i2c transaction failed (%d)\n",
+ status);
+
+ rng->priv = (unsigned long)work_data;
+ atomic_dec(&i2c_priv->tfm_count);
+}
+
+static int atmel_sha204a_rng_read_nonblocking(struct hwrng *rng, void *data,
+ size_t max)
+{
+ struct atmel_i2c_client_priv *i2c_priv;
+ struct atmel_i2c_work_data *work_data;
+
+ i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng);
+
+ /* keep maximum 1 asynchronous read in flight at any time */
+ if (!atomic_add_unless(&i2c_priv->tfm_count, 1, 1))
+ return 0;
+
+ if (rng->priv) {
+ work_data = (struct atmel_i2c_work_data *)rng->priv;
+ max = min(sizeof(work_data->cmd.data), max);
+ memcpy(data, &work_data->cmd.data, max);
+ rng->priv = 0;
+ } else {
+ work_data = kmalloc(sizeof(*work_data), GFP_ATOMIC);
+ if (!work_data)
+ return -ENOMEM;
+
+ work_data->ctx = i2c_priv;
+ work_data->client = i2c_priv->client;
+
+ max = 0;
+ }
+
+ atmel_i2c_init_random_cmd(&work_data->cmd);
+ atmel_i2c_enqueue(work_data, atmel_sha204a_rng_done, rng);
+
+ return max;
+}
+
+static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
+{
+ struct atmel_i2c_client_priv *i2c_priv;
+ struct atmel_i2c_cmd cmd;
+ int ret;
+
+ if (!wait)
+ return atmel_sha204a_rng_read_nonblocking(rng, data, max);
+
+ i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng);
+
+ atmel_i2c_init_random_cmd(&cmd);
+
+ ret = atmel_i2c_send_receive(i2c_priv->client, &cmd);
+ if (ret)
+ return ret;
+
+ max = min(sizeof(cmd.data), max);
+ memcpy(data, cmd.data, max);
+
+ return max;
+}
+
+static int atmel_sha204a_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct atmel_i2c_client_priv *i2c_priv;
+ int ret;
+
+ ret = atmel_i2c_probe(client, id);
+ if (ret)
+ return ret;
+
+ i2c_priv = i2c_get_clientdata(client);
+
+ memset(&i2c_priv->hwrng, 0, sizeof(i2c_priv->hwrng));
+
+ i2c_priv->hwrng.name = dev_name(&client->dev);
+ i2c_priv->hwrng.read = atmel_sha204a_rng_read;
+ i2c_priv->hwrng.quality = 1024;
+
+ ret = hwrng_register(&i2c_priv->hwrng);
+ if (ret)
+ dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
+
+ return ret;
+}
+
+static int atmel_sha204a_remove(struct i2c_client *client)
+{
+ struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+
+ if (atomic_read(&i2c_priv->tfm_count)) {
+ dev_err(&client->dev, "Device is busy\n");
+ return -EBUSY;
+ }
+
+ if (i2c_priv->hwrng.priv)
+ kfree((void *)i2c_priv->hwrng.priv);
+ hwrng_unregister(&i2c_priv->hwrng);
+
+ return 0;
+}
+
+static const struct of_device_id atmel_sha204a_dt_ids[] = {
+ { .compatible = "atmel,atsha204a", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids);
+
+static const struct i2c_device_id atmel_sha204a_id[] = {
+ { "atsha204a", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);
+
+static struct i2c_driver atmel_sha204a_driver = {
+ .probe = atmel_sha204a_probe,
+ .remove = atmel_sha204a_remove,
+ .id_table = atmel_sha204a_id,
+
+ .driver.name = "atmel-sha204a",
+ .driver.of_match_table = of_match_ptr(atmel_sha204a_dt_ids),
+};
+
+static int __init atmel_sha204a_init(void)
+{
+ return i2c_add_driver(&atmel_sha204a_driver);
+}
+
+static void __exit atmel_sha204a_exit(void)
+{
+ flush_scheduled_work();
+ i2c_del_driver(&atmel_sha204a_driver);
+}
+
+module_init(atmel_sha204a_init);
+module_exit(atmel_sha204a_exit);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 18410c9e7b29..869602fcfd96 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
* 0x70 - ring 2
* 0x78 - ring 3
*/
-char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
+static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
/*
* Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
* is set dynamically after reading SPU type from device tree.
@@ -2083,7 +2083,7 @@ static int __ahash_init(struct ahash_request *req)
* Return: true if incremental hashing is not supported
* false otherwise
*/
-bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
+static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
{
struct spu_hw *spu = &iproc_priv.spu;
@@ -4809,7 +4809,7 @@ static int spu_dt_read(struct platform_device *pdev)
return 0;
}
-int bcm_spu_probe(struct platform_device *pdev)
+static int bcm_spu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spu_hw *spu = &iproc_priv.spu;
@@ -4853,7 +4853,7 @@ failure:
return err;
}
-int bcm_spu_remove(struct platform_device *pdev)
+static int bcm_spu_remove(struct platform_device *pdev)
{
int i;
struct device *dev = &pdev->dev;
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index cb477259a2e2..2add51024575 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -38,21 +38,21 @@ enum spu2_proto_sel {
SPU2_DTLS_AEAD = 10
};
-char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
+static char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
"DES", "3DES"
};
-char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS",
- "CCM", "GCM"
+static char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB",
+ "XTS", "CCM", "GCM"
};
-char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
+static char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
"Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
"SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
"SHA3-384", "SHA3-512"
};
-char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
+static char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
"Rabin", "CCM", "GCM", "Reserved"
};
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 577c9844b322..3720ddabb507 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -2,6 +2,12 @@
config CRYPTO_DEV_FSL_CAAM_COMMON
tristate
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ tristate
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
+ tristate
+
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore platform driver backend"
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
@@ -25,7 +31,7 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
Selecting this will enable printing of various debug
information in the CAAM driver.
-config CRYPTO_DEV_FSL_CAAM_JR
+menuconfig CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
default y
help
@@ -86,8 +92,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
threshold. Range is 1-65535.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
- tristate "Register algorithm implementations with the Crypto API"
+ bool "Register algorithm implementations with the Crypto API"
default y
+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
@@ -97,13 +104,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
scatterlist crypto API (such as the linux native IPSec
stack) to the SEC4 via job ring.
- To compile this as a module, choose M here: the module
- will be called caamalg.
-
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
- tristate "Queue Interface as Crypto API backend"
+ bool "Queue Interface as Crypto API backend"
depends on FSL_DPAA && NET
default y
+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
help
@@ -114,33 +119,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
assigned to the kernel should also be more than the number of
job rings.
- To compile this as a module, choose M here: the module
- will be called caamalg_qi.
-
config CRYPTO_DEV_FSL_CAAM_AHASH_API
- tristate "Register hash algorithm implementations with Crypto API"
+ bool "Register hash algorithm implementations with Crypto API"
default y
+ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
select CRYPTO_HASH
help
Selecting this will offload ahash for users of the
scatterlist crypto API to the SEC4 via job ring.
- To compile this as a module, choose M here: the module
- will be called caamhash.
-
config CRYPTO_DEV_FSL_CAAM_PKC_API
- tristate "Register public key cryptography implementations with Crypto API"
+ bool "Register public key cryptography implementations with Crypto API"
default y
select CRYPTO_RSA
help
Selecting this will allow SEC Public key support for RSA.
Supported cryptographic primitives: encryption, decryption,
signature and verification.
- To compile this as a module, choose M here: the module
- will be called caam_pkc.
config CRYPTO_DEV_FSL_CAAM_RNG_API
- tristate "Register caam device for hwrng API"
+ bool "Register caam device for hwrng API"
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -148,9 +146,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
Selecting this will register the SEC4 hardware rng to
the hw_random API for suppying the kernel entropy pool.
- To compile this as a module, choose M here: the module
- will be called caamrng.
-
endif # CRYPTO_DEV_FSL_CAAM_JR
endif # CRYPTO_DEV_FSL_CAAM
@@ -160,6 +155,8 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
depends on FSL_MC_DPIO
depends on NETDEVICES
select CRYPTO_DEV_FSL_CAAM_COMMON
+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
select CRYPTO_BLKCIPHER
select CRYPTO_AUTHENC
select CRYPTO_AEAD
@@ -171,12 +168,3 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
To compile this as a module, choose M here: the module
will be called dpaa2_caam.
-
-config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
- def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
- CRYPTO_DEV_FSL_DPAA2_CAAM)
-
-config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
- def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
- CRYPTO_DEV_FSL_DPAA2_CAAM)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 7bbfd06a11ff..9ab4e81ea21e 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -11,20 +11,20 @@ ccflags-y += -DVERSION=\"\"
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
-caam-objs := ctrl.o
-caam_jr-objs := jr.o key_gen.o
-caam_pkc-y := caampkc.o pkc_desc.o
+caam-y := ctrl.o
+caam_jr-y := jr.o key_gen.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
+
+caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
- caam-objs += qi.o
endif
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index c0ece44f303b..43f18253e5b6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -77,13 +77,6 @@
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
-#ifdef DEBUG
-/* for print_hex_dumps with line references */
-#define debug(format, arg...) printk(format, arg)
-#else
-#define debug(format, arg...)
-#endif
-
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
@@ -583,13 +576,11 @@ static int aead_setkey(struct crypto_aead *aead,
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
-#ifdef DEBUG
- printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
+ dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
/*
* If DKP is supported, use it in the shared descriptor to generate
@@ -623,11 +614,10 @@ static int aead_setkey(struct crypto_aead *aead,
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad + keys.enckeylen, 1);
-#endif
+
+ print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
@@ -678,10 +668,8 @@ static int gcm_setkey(struct crypto_aead *aead,
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
@@ -699,10 +687,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
if (keylen < 4)
return -EINVAL;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
@@ -725,10 +711,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
if (keylen < 4)
return -EINVAL;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
@@ -757,10 +741,8 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
/*
* AES-CTR needs to load IV in CONTEXT1 reg
* at an offset of 128bits (16bytes)
@@ -916,7 +898,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE);
@@ -949,9 +931,7 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct aead_request *req = context;
struct aead_edesc *edesc;
-#ifdef DEBUG
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
@@ -971,9 +951,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct aead_request *req = context;
struct aead_edesc *edesc;
-#ifdef DEBUG
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
@@ -1001,33 +979,32 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
int ivsize = crypto_skcipher_ivsize(skcipher);
-#ifdef DEBUG
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- edesc->src_nents > 1 ? 100 : ivsize, 1);
-#endif
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
-
skcipher_unmap(jrdev, edesc, req);
/*
* The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
*/
- if (ivsize)
- scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
- ivsize, ivsize, 0);
+ if (ivsize) {
+ memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
+ ivsize);
+
+ print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ }
+
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
kfree(edesc);
@@ -1039,26 +1016,35 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct skcipher_request *req = context;
struct skcipher_edesc *edesc;
-#ifdef DEBUG
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
int ivsize = crypto_skcipher_ivsize(skcipher);
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
-#endif
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ skcipher_unmap(jrdev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
+ */
+ if (ivsize) {
+ memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
+ ivsize);
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ ivsize, 1);
+ }
+
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
skcipher_request_complete(req, err);
@@ -1106,6 +1092,7 @@ static void init_aead_job(struct aead_request *req,
if (unlikely(req->src != req->dst)) {
if (!edesc->mapped_dst_nents) {
dst_dma = 0;
+ out_options = 0;
} else if (edesc->mapped_dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
out_options = 0;
@@ -1249,6 +1236,7 @@ static void init_skcipher_job(struct skcipher_request *req,
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *jrdev = ctx->jrdev;
int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
u32 *sh_desc;
@@ -1256,13 +1244,12 @@ static void init_skcipher_job(struct skcipher_request *req,
dma_addr_t src_dma, dst_dma, ptr;
int len, sec4_sg_index = 0;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
- pr_err("asked=%d, cryptlen%d\n",
+ print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
(int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
-#endif
- caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
+
+ caam_dump_sg("src @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
@@ -1285,7 +1272,7 @@ static void init_skcipher_job(struct skcipher_request *req,
if (likely(req->src == req->dst)) {
dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
out_options = in_options;
- } else if (edesc->mapped_dst_nents == 1) {
+ } else if (!ivsize && edesc->mapped_dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
} else {
dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
@@ -1293,7 +1280,7 @@ static void init_skcipher_job(struct skcipher_request *req,
out_options = LDST_SGF;
}
- append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
}
/*
@@ -1309,37 +1296,36 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ int src_len, dst_len = 0;
struct aead_edesc *edesc;
int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
unsigned int authsize = ctx->authsize;
if (unlikely(req->dst != req->src)) {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen);
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen);
+ src_len);
return ERR_PTR(src_nents);
}
- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize :
- (-authsize)));
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
if (unlikely(dst_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : (-authsize)));
+ dst_len);
return ERR_PTR(dst_nents);
}
} else {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len = req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0);
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len);
return ERR_PTR(src_nents);
}
}
@@ -1380,8 +1366,16 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
}
}
+ /*
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries.
+ */
sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
- sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ if (mapped_dst_nents > 1)
+ sec4_sg_len += pad_sg_nents(mapped_dst_nents);
+ else
+ sec4_sg_len = pad_sg_nents(sec4_sg_len);
+
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
@@ -1403,12 +1397,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_index = 0;
if (mapped_src_nents > 1) {
- sg_to_sec4_sg_last(req->src, mapped_src_nents,
+ sg_to_sec4_sg_last(req->src, src_len,
edesc->sec4_sg + sec4_sg_index, 0);
sec4_sg_index += mapped_src_nents;
}
if (mapped_dst_nents > 1) {
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+ sg_to_sec4_sg_last(req->dst, dst_len,
edesc->sec4_sg + sec4_sg_index, 0);
}
@@ -1446,11 +1440,10 @@ static int gcm_encrypt(struct aead_request *req)
/* Create and submit job descriptor */
init_gcm_job(req, edesc, all_contig, true);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
@@ -1556,11 +1549,10 @@ static int aead_encrypt(struct aead_request *req)
/* Create and submit job descriptor */
init_authenc_job(req, edesc, all_contig, true);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
@@ -1591,11 +1583,10 @@ static int gcm_decrypt(struct aead_request *req)
/* Create and submit job descriptor*/
init_gcm_job(req, edesc, all_contig, false);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
@@ -1627,7 +1618,7 @@ static int aead_decrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
- caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
+ caam_dump_sg("dec src@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
req->assoclen + req->cryptlen, 1);
@@ -1639,11 +1630,10 @@ static int aead_decrypt(struct aead_request *req)
/* Create and submit job descriptor*/
init_authenc_job(req, edesc, all_contig, false);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
@@ -1719,7 +1709,29 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
else
sec4_sg_ents = mapped_src_nents + !!ivsize;
dst_sg_idx = sec4_sg_ents;
- sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+
+ /*
+ * Input, output HW S/G tables: [IV, src][dst, IV]
+ * IV entries point to the same buffer
+ * If src == dst, S/G entries are reused (S/G tables overlap)
+ *
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (output S/G)
+ * pad output S/G, if needed
+ * else if (input S/G) ...
+ * pad input S/G, if needed
+ */
+ if (ivsize || mapped_dst_nents > 1) {
+ if (req->src == req->dst)
+ sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
+ else
+ sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
+ !!ivsize);
+ } else {
+ sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
+ }
+
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
/*
@@ -1744,10 +1756,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/* Make sure IV is located in a DMAable area */
if (ivsize) {
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+ iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
memcpy(iv, req->iv, ivsize);
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+ iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
caam_unmap(jrdev, req->src, req->dst, src_nents,
@@ -1759,13 +1771,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
}
if (dst_sg_idx)
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg +
- !!ivsize, 0);
+ sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
+ !!ivsize, 0);
- if (mapped_dst_nents > 1) {
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
- edesc->sec4_sg + dst_sg_idx, 0);
- }
+ if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
+ sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
+ dst_sg_idx, 0);
+
+ if (ivsize)
+ dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
+ mapped_dst_nents, iv_dma, ivsize, 0);
+
+ if (ivsize || mapped_dst_nents > 1)
+ sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
+ mapped_dst_nents);
if (sec4_sg_bytes) {
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1782,11 +1801,9 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
edesc->iv_dma = iv_dma;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
- sec4_sg_bytes, 1);
-#endif
+ print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ sec4_sg_bytes, 1);
return edesc;
}
@@ -1807,11 +1824,11 @@ static int skcipher_encrypt(struct skcipher_request *req)
/* Create and submit job descriptor*/
init_skcipher_job(req, edesc, true);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
@@ -1830,7 +1847,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- int ivsize = crypto_skcipher_ivsize(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
@@ -1840,22 +1856,13 @@ static int skcipher_decrypt(struct skcipher_request *req)
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- /*
- * The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block.
- */
- if (ivsize)
- scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
- ivsize, ivsize, 0);
-
/* Create and submit job descriptor*/
init_skcipher_job(req, edesc, false);
desc = edesc->hw_desc;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
if (!ret) {
@@ -3444,7 +3451,7 @@ static void caam_aead_exit(struct crypto_aead *tfm)
caam_exit_common(crypto_aead_ctx(tfm));
}
-static void __exit caam_algapi_exit(void)
+void caam_algapi_exit(void)
{
int i;
@@ -3489,43 +3496,15 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->exit = caam_aead_exit;
}
-static int __init caam_algapi_init(void)
+int caam_algapi_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct caam_drv_private *priv;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false, gcm_support;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- priv = dev_get_drvdata(&pdev->dev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv) {
- err = -ENODEV;
- goto out_put_dev;
- }
-
-
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
@@ -3668,14 +3647,5 @@ static int __init caam_algapi_init(void)
if (registered)
pr_info("caam algorithms registered in /proc/crypto\n");
-out_put_dev:
- put_device(&pdev->dev);
return err;
}
-
-module_init(caam_algapi_init);
-module_exit(caam_algapi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("FSL CAAM support for crypto API");
-MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 1e1a376edc2f..72531837571e 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -33,12 +33,11 @@ static inline void append_dec_op1(u32 *desc, u32 type)
}
jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
+ append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT);
uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, jump_cmd);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT |
+ OP_ALG_AAI_DK);
set_jump_tgt_here(desc, uncond_jump_cmd);
}
@@ -115,11 +114,9 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead null enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
@@ -204,11 +201,9 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead null dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
@@ -358,10 +353,9 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
@@ -475,10 +469,9 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
@@ -613,11 +606,9 @@ copy_iv:
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead givenc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead givenc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
@@ -742,10 +733,9 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("gcm enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
@@ -838,10 +828,9 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("gcm dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
@@ -933,11 +922,9 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
@@ -1030,11 +1017,9 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
@@ -1115,11 +1100,9 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
@@ -1205,11 +1188,9 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
@@ -1410,17 +1391,21 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
LDST_OFFSET_SHIFT));
/* Load operation */
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
OP_ALG_ENCRYPT);
/* Perform operation */
skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "skcipher enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store IV */
+ if (ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
@@ -1479,7 +1464,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Choose operation */
if (ctx1_iv_off)
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
OP_ALG_DECRYPT);
else
append_dec_op1(desc, cdata->algtype);
@@ -1487,11 +1472,15 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Perform operation */
skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "skcipher dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store IV */
+ if (ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
@@ -1538,11 +1527,13 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
/* Perform operation */
skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts skcipher enc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store upper 8B of IV */
+ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__)
+ ": ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
}
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
@@ -1588,11 +1579,13 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
/* Perform operation */
skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts skcipher dec shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store upper 8B of IV */
+ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__)
+ ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
}
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index d5ca42ff961a..da4a4ee60c80 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -44,9 +44,9 @@
#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
- 20 * CAAM_CMD_SZ)
+ 21 * CAAM_CMD_SZ)
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
- 15 * CAAM_CMD_SZ)
+ 16 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize, int era);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index d290d6b41825..32f0f8a72067 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -4,7 +4,7 @@
* Based on caamalg.c
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
*/
#include "compat.h"
@@ -214,13 +214,11 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
-#ifdef DEBUG
- dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+ dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
/*
* If DKP is supported, use it in the shared descriptor to generate
@@ -237,7 +235,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
memcpy(ctx->key, keys.authkey, keys.authkeylen);
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
keys.enckeylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma,
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
ctx->adata.keylen_pad +
keys.enckeylen, ctx->dir);
goto skip_split_key;
@@ -251,8 +249,9 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, ctx->dir);
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
+ ctx->adata.keylen_pad + keys.enckeylen,
+ ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
@@ -386,13 +385,12 @@ static int gcm_setkey(struct crypto_aead *aead,
struct device *jrdev = ctx->jrdev;
int ret;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen,
+ ctx->dir);
ctx->cdata.keylen = keylen;
ret = gcm_set_sh_desc(aead);
@@ -485,10 +483,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
if (keylen < 4)
return -EINVAL;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
/*
@@ -496,8 +492,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
* in the nonce. Update the AES key length.
*/
ctx->cdata.keylen = keylen - 4;
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- ctx->dir);
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
+ ctx->cdata.keylen, ctx->dir);
ret = rfc4106_set_sh_desc(aead);
if (ret)
@@ -589,10 +585,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
if (keylen < 4)
return -EINVAL;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
/*
@@ -600,8 +594,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
* in the nonce. Update the AES key length.
*/
ctx->cdata.keylen = keylen - 4;
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- ctx->dir);
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
+ ctx->cdata.keylen, ctx->dir);
ret = rfc4543_set_sh_desc(aead);
if (ret)
@@ -644,10 +638,9 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
const bool is_rfc3686 = alg->caam.rfc3686;
int ret = 0;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
/*
* AES-CTR needs to load IV in CONTEXT1 reg
* at an offset of 128bits (16bytes)
@@ -838,7 +831,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize,
- dma_addr_t qm_sg_dma, int qm_sg_bytes)
+ enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
+ int qm_sg_bytes)
{
if (dst != src) {
if (src_nents)
@@ -850,7 +844,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
}
@@ -863,7 +857,8 @@ static void aead_unmap(struct device *dev,
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
@@ -874,7 +869,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
}
static void aead_done(struct caam_drv_req *drv_req, u32 status)
@@ -924,6 +920,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ int src_len, dst_len = 0;
struct aead_edesc *edesc;
dma_addr_t qm_sg_dma, iv_dma = 0;
int ivsize = 0;
@@ -945,13 +942,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
}
if (likely(req->src == req->dst)) {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len = req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0);
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
@@ -964,23 +961,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return ERR_PTR(-ENOMEM);
}
} else {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen);
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen);
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize :
- (-authsize)));
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
if (unlikely(dst_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : (-authsize)));
+ dst_len);
qi_cache_free(edesc);
return ERR_PTR(dst_nents);
}
@@ -1019,9 +1014,24 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
/*
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
* Input is not contiguous.
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (src != dst && output S/G)
+ * pad output S/G, if needed
+ * else if (src == dst && S/G)
+ * overlapping S/Gs; pad one of them
+ * else if (input S/G) ...
+ * pad input S/G, if needed
*/
- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
+ if (mapped_dst_nents > 1)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents);
+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
+ qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
+ 1 + !!ivsize + pad_sg_nents(mapped_src_nents));
+ else
+ qm_sg_ents = pad_sg_nents(qm_sg_ents);
+
sg_table = &edesc->sgt[0];
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
@@ -1029,7 +1039,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1044,7 +1054,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, 0, 0);
+ dst_nents, 0, 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1063,7 +1073,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
dev_err(qidev, "unable to map assoclen\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1074,19 +1084,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
qm_sg_index++;
}
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
qm_sg_index += mapped_src_nents;
if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- qm_sg_index, 0);
+ sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(qidev, qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n");
dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1109,7 +1118,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
(1 + !!ivsize) * sizeof(*sg_table),
out_len, 0);
- } else if (mapped_dst_nents == 1) {
+ } else if (mapped_dst_nents <= 1) {
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
0);
} else {
@@ -1182,33 +1191,28 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
struct device *qidev = caam_ctx->qidev;
int ivsize = crypto_skcipher_ivsize(skcipher);
-#ifdef DEBUG
- dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
-#endif
+ dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
edesc = container_of(drv_req, typeof(*edesc), drv_req);
if (status)
caam_jr_strstatus(qidev, status);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- edesc->src_nents > 1 ? 100 : ivsize, 1);
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
-#endif
skcipher_unmap(qidev, edesc, req);
/*
* The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
*/
- if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
- scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
- ivsize, ivsize, 0);
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, status);
@@ -1276,14 +1280,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
qm_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = qm_sg_ents;
- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ /*
+ * Input, output HW S/G tables: [IV, src][dst, IV]
+ * IV entries point to the same buffer
+ * If src == dst, S/G entries are reused (S/G tables overlap)
+ *
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries.
+ */
+ if (req->src != req->dst)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
+ else
+ qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
+
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1292,7 +1308,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1301,11 +1317,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
iv = (u8 *)(sg_table + qm_sg_ents);
memcpy(iv, req->iv, ivsize);
- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1319,18 +1335,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+ sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
- if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- dst_sg_idx, 0);
+ if (req->src != req->dst)
+ sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
+
+ dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
+ ivsize, 0);
edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1340,16 +1358,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
ivsize + req->cryptlen, 0);
- if (req->src == req->dst) {
+ if (req->src == req->dst)
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
- sizeof(*sg_table), req->cryptlen, 0);
- } else if (mapped_dst_nents > 1) {
+ sizeof(*sg_table), req->cryptlen + ivsize,
+ 0);
+ else
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), req->cryptlen, 0);
- } else {
- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
- req->cryptlen, 0);
- }
+ sizeof(*sg_table), req->cryptlen + ivsize,
+ 0);
return edesc;
}
@@ -1359,7 +1375,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- int ivsize = crypto_skcipher_ivsize(skcipher);
int ret;
if (unlikely(caam_congested))
@@ -1370,14 +1385,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- /*
- * The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block.
- */
- if (!encrypt)
- scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
- ivsize, ivsize, 0);
-
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
if (!ret) {
ret = -EINPROGRESS;
@@ -2382,6 +2389,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{
struct caam_drv_private *priv;
+ struct device *dev;
/*
* distribute tfms across job rings to ensure in-order
@@ -2393,16 +2401,17 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
return PTR_ERR(ctx->jrdev);
}
- priv = dev_get_drvdata(ctx->jrdev->parent);
+ dev = ctx->jrdev->parent;
+ priv = dev_get_drvdata(dev);
if (priv->era >= 6 && uses_dkp)
ctx->dir = DMA_BIDIRECTIONAL;
else
ctx->dir = DMA_TO_DEVICE;
- ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
+ ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
ctx->dir);
- if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
- dev_err(ctx->jrdev, "unable to map key\n");
+ if (dma_mapping_error(dev, ctx->key_dma)) {
+ dev_err(dev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
return -ENOMEM;
}
@@ -2411,7 +2420,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- ctx->qidev = priv->qidev;
+ ctx->qidev = dev;
spin_lock_init(&ctx->lock);
ctx->drv_ctx[ENCRYPT] = NULL;
@@ -2445,7 +2454,8 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
+ dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key),
+ ctx->dir);
caam_jr_free(ctx->jrdev);
}
@@ -2460,7 +2470,7 @@ static void caam_aead_exit(struct crypto_aead *tfm)
caam_exit_common(crypto_aead_ctx(tfm));
}
-static void __exit caam_qi_algapi_exit(void)
+void caam_qi_algapi_exit(void)
{
int i;
@@ -2505,45 +2515,17 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->exit = caam_aead_exit;
}
-static int __init caam_qi_algapi_init(void)
+int caam_qi_algapi_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- of_node_put(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv || !priv->qi_present) {
- err = -ENODEV;
- goto out_put_dev;
- }
-
if (caam_dpaa2) {
dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
- err = -ENODEV;
- goto out_put_dev;
+ return -ENODEV;
}
/*
@@ -2598,7 +2580,7 @@ static int __init caam_qi_algapi_init(void)
err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
- dev_warn(priv->qidev, "%s alg registration failed\n",
+ dev_warn(ctrldev, "%s alg registration failed\n",
t_alg->skcipher.base.cra_driver_name);
continue;
}
@@ -2654,16 +2636,7 @@ static int __init caam_qi_algapi_init(void)
}
if (registered)
- dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
+ dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
-out_put_dev:
- put_device(ctrldev);
return err;
}
-
-module_init(caam_qi_algapi_init);
-module_exit(caam_qi_algapi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
-MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 2b2980a8a9b9..06bf32c32cbd 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2015-2016 Freescale Semiconductor Inc.
- * Copyright 2017-2018 NXP
+ * Copyright 2017-2019 NXP
*/
#include "compat.h"
@@ -140,7 +140,8 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq)
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize,
- dma_addr_t qm_sg_dma, int qm_sg_bytes)
+ enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
+ int qm_sg_bytes)
{
if (dst != src) {
if (src_nents)
@@ -152,7 +153,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
@@ -371,6 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ int src_len, dst_len = 0;
struct aead_edesc *edesc;
dma_addr_t qm_sg_dma, iv_dma = 0;
int ivsize = 0;
@@ -387,23 +389,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
}
if (unlikely(req->dst != req->src)) {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen);
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen);
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize :
- (-authsize)));
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
if (unlikely(dst_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : (-authsize)));
+ dst_len);
qi_cache_free(edesc);
return ERR_PTR(dst_nents);
}
@@ -434,13 +434,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
mapped_dst_nents = 0;
}
} else {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len = req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0);
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
@@ -460,9 +460,25 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
/*
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
* Input is not contiguous.
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (src != dst && output S/G)
+ * pad output S/G, if needed
+ * else if (src == dst && S/G)
+ * overlapping S/Gs; pad one of them
+ * else if (input S/G) ...
+ * pad input S/G, if needed
*/
- qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
+ if (mapped_dst_nents > 1)
+ qm_sg_nents += pad_sg_nents(mapped_dst_nents);
+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
+ qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
+ 1 + !!ivsize +
+ pad_sg_nents(mapped_src_nents));
+ else
+ qm_sg_nents = pad_sg_nents(qm_sg_nents);
+
sg_table = &edesc->sgt[0];
qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
@@ -470,7 +486,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_nents, ivsize);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -485,7 +501,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(dev, iv_dma)) {
dev_err(dev, "unable to map IV\n");
caam_unmap(dev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, 0, 0);
+ dst_nents, 0, 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -509,7 +525,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(dev, edesc->assoclen_dma)) {
dev_err(dev, "unable to map assoclen\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -520,19 +536,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
qm_sg_index++;
}
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
qm_sg_index += mapped_src_nents;
if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- qm_sg_index, 0);
+ sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(dev, qm_sg_dma)) {
dev_err(dev, "unable to map S/G table\n");
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -559,6 +574,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dpaa2_fl_set_addr(out_fle, qm_sg_dma +
(1 + !!ivsize) * sizeof(*sg_table));
}
+ } else if (!mapped_dst_nents) {
+ /*
+ * crypto engine requires the output entry to be present when
+ * "frame list" FD is used.
+ * Since engine does not support FMT=2'b11 (unused entry type),
+ * leaving out_fle zeroized is the best option.
+ */
+ goto skip_out_fle;
} else if (mapped_dst_nents == 1) {
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
@@ -570,6 +593,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dpaa2_fl_set_len(out_fle, out_len);
+skip_out_fle:
return edesc;
}
@@ -1077,14 +1101,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
qm_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = qm_sg_ents;
- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ /*
+ * Input, output HW S/G tables: [IV, src][dst, IV]
+ * IV entries point to the same buffer
+ * If src == dst, S/G entries are reused (S/G tables overlap)
+ *
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries.
+ */
+ if (req->src != req->dst)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
+ else
+ qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
+
qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1093,7 +1129,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1102,11 +1138,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
iv = (u8 *)(sg_table + qm_sg_ents);
memcpy(iv, req->iv, ivsize);
- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, iv_dma)) {
dev_err(dev, "unable to map IV\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1117,18 +1153,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
edesc->qm_sg_bytes = qm_sg_bytes;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+ sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
- if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- dst_sg_idx, 0);
+ if (req->src != req->dst)
+ sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
+
+ dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
+ ivsize, 0);
edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
dev_err(dev, "unable to map S/G table\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1136,23 +1174,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
- dpaa2_fl_set_len(out_fle, req->cryptlen);
+ dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
- if (req->src == req->dst) {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+
+ if (req->src == req->dst)
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
sizeof(*sg_table));
- } else if (mapped_dst_nents > 1) {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ else
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
sizeof(*sg_table));
- } else {
- dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
- }
return edesc;
}
@@ -1164,7 +1198,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
@@ -1175,7 +1210,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
}
static void aead_encrypt_done(void *cbk_ctx, u32 status)
@@ -1324,7 +1360,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
@@ -1332,10 +1368,10 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
/*
* The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
- ivsize, 0);
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@@ -1362,11 +1398,19 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
skcipher_unmap(ctx->dev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
+ */
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
+
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
}
@@ -1405,7 +1449,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_request *caam_req = skcipher_request_ctx(req);
- int ivsize = crypto_skcipher_ivsize(skcipher);
int ret;
/* allocate extended descriptor */
@@ -1413,13 +1456,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- /*
- * The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block.
- */
- scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
- ivsize, 0);
-
caam_req->flc = &ctx->flc[DECRYPT];
caam_req->flc_dma = ctx->flc_dma[DECRYPT];
caam_req->cbk = skcipher_decrypt_done;
@@ -3380,9 +3416,9 @@ static int ahash_update_ctx(struct ahash_request *req)
if (to_hash) {
struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - (*next_buflen));
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents;
@@ -3409,7 +3445,7 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
qm_sg_src_index = 1 + (*buflen ? 1 : 0);
- qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
+ qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
sizeof(*sg_table);
sg_table = &edesc->sgt[0];
@@ -3423,7 +3459,7 @@ static int ahash_update_ctx(struct ahash_request *req)
goto unmap_ctx;
if (mapped_nents) {
- sg_to_qm_sg_last(req->src, mapped_nents,
+ sg_to_qm_sg_last(req->src, src_len,
sg_table + qm_sg_src_index, 0);
if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src,
@@ -3494,7 +3530,7 @@ static int ahash_final_ctx(struct ahash_request *req)
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int buflen = *current_buflen(state);
- int qm_sg_bytes, qm_sg_src_index;
+ int qm_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
struct dpaa2_sg_entry *sg_table;
@@ -3505,8 +3541,7 @@ static int ahash_final_ctx(struct ahash_request *req)
if (!edesc)
return -ENOMEM;
- qm_sg_src_index = 1 + (buflen ? 1 : 0);
- qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
@@ -3518,7 +3553,7 @@ static int ahash_final_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
+ dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
DMA_TO_DEVICE);
@@ -3599,7 +3634,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
qm_sg_src_index = 1 + (buflen ? 1 : 0);
- qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
+ sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
@@ -3611,7 +3647,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
DMA_TO_DEVICE);
@@ -3696,8 +3732,8 @@ static int ahash_digest(struct ahash_request *req)
int qm_sg_bytes;
struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
- qm_sg_bytes = mapped_nents * sizeof(*sg_table);
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+ qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
@@ -3840,9 +3876,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (to_hash) {
struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - *next_buflen);
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents;
@@ -3868,14 +3904,15 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
edesc->src_nents = src_nents;
- qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
+ sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
if (ret)
goto unmap_ctx;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+ sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src,
@@ -3987,14 +4024,14 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
}
edesc->src_nents = src_nents;
- qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
+ qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
sg_table = &edesc->sgt[0];
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
if (ret)
goto unmap;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
DMA_TO_DEVICE);
@@ -4064,9 +4101,9 @@ static int ahash_update_first(struct ahash_request *req)
if (to_hash) {
struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - (*next_buflen));
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents;
@@ -4101,8 +4138,9 @@ static int ahash_update_first(struct ahash_request *req)
if (mapped_nents > 1) {
int qm_sg_bytes;
- sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
- qm_sg_bytes = mapped_nents * sizeof(*sg_table);
+ sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
+ qm_sg_bytes = pad_sg_nents(mapped_nents) *
+ sizeof(*sg_table);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes,
DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 7205d9f4029e..e4ac5d591ad6 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -82,14 +82,6 @@
#define HASH_MSG_LEN 8
#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
-#ifdef DEBUG
-/* for print_hex_dumps with line references */
-#define debug(format, arg...) printk(format, arg)
-#else
-#define debug(format, arg...)
-#endif
-
-
static struct list_head hash_list;
/* ahash per-session context */
@@ -243,11 +235,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash update shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
@@ -255,11 +246,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash update first shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
+ ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
@@ -267,11 +256,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
@@ -279,12 +267,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash digest shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
return 0;
}
@@ -328,9 +314,9 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, ctx->key_dma);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
- print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
- 1);
+ print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
+ " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
/* shared descriptor for ahash_digest */
desc = ctx->sh_desc_digest;
@@ -377,8 +363,8 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
ctx->ctx_len, 0);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
- print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
+ " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
/* shared descriptor for ahash_digest */
@@ -429,12 +415,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
result.err = 0;
init_completion(&result.completion);
@@ -444,11 +429,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "digested key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1);
-#endif
+
+ print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key,
+ digestsize, 1);
}
dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
@@ -463,15 +447,14 @@ static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
u8 *hashed_key = NULL;
-#ifdef DEBUG
- printk(KERN_ERR "keylen %d\n", keylen);
-#endif
+ dev_dbg(jrdev, "keylen %d\n", keylen);
if (keylen > blocksize) {
hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
@@ -600,11 +583,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
-#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
@@ -614,11 +595,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
-#endif
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
req->base.complete(&req->base, err);
}
@@ -631,11 +610,9 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
-#ifdef DEBUG
int digestsize = crypto_ahash_digestsize(ahash);
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
@@ -645,15 +622,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
switch_buf(state);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
-#endif
+ print_hex_dump_debug("result@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
req->base.complete(&req->base, err);
}
@@ -666,11 +641,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
-#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
@@ -680,11 +653,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
-#endif
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
req->base.complete(&req->base, err);
}
@@ -697,11 +668,9 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
-#ifdef DEBUG
int digestsize = crypto_ahash_digestsize(ahash);
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
@@ -711,15 +680,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
switch_buf(state);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
-#endif
+ print_hex_dump_debug("result@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
req->base.complete(&req->base, err);
}
@@ -759,9 +726,10 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
if (nents > 1 || first_sg) {
struct sec4_sg_entry *sg = edesc->sec4_sg;
- unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
+ unsigned int sgsize = sizeof(*sg) *
+ pad_sg_nents(first_sg + nents);
- sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
+ sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->jrdev, src_dma)) {
@@ -819,8 +787,10 @@ static int ahash_update_ctx(struct ahash_request *req)
}
if (to_hash) {
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - (*next_buflen));
+ int pad_nents;
+ int src_len = req->nbytes - *next_buflen;
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
@@ -838,15 +808,14 @@ static int ahash_update_ctx(struct ahash_request *req)
}
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
- sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
- sizeof(struct sec4_sg_entry);
+ pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
+ sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_update,
+ edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
ctx->sh_desc_update_dma, flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
@@ -866,7 +835,7 @@ static int ahash_update_ctx(struct ahash_request *req)
goto unmap_ctx;
if (mapped_nents)
- sg_to_sec4_sg_last(req->src, mapped_nents,
+ sg_to_sec4_sg_last(req->src, src_len,
edesc->sec4_sg + sec4_sg_src_index,
0);
else
@@ -893,11 +862,9 @@ static int ahash_update_ctx(struct ahash_request *req)
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
if (ret)
@@ -910,13 +877,12 @@ static int ahash_update_ctx(struct ahash_request *req)
*buflen = *next_buflen;
*next_buflen = last_buflen;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
-#endif
+
+ print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
return ret;
unmap_ctx:
@@ -935,18 +901,17 @@ static int ahash_final_ctx(struct ahash_request *req)
GFP_KERNEL : GFP_ATOMIC;
int buflen = *current_buflen(state);
u32 *desc;
- int sec4_sg_bytes, sec4_sg_src_index;
+ int sec4_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret;
- sec4_sg_src_index = 1 + (buflen ? 1 : 0);
- sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
+ sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
+ sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
- ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
- flags);
+ edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
+ ctx->sh_desc_fin_dma, flags);
if (!edesc)
return -ENOMEM;
@@ -963,7 +928,7 @@ static int ahash_final_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
+ sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
@@ -977,10 +942,9 @@ static int ahash_final_ctx(struct ahash_request *req)
LDST_SGF);
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (ret)
@@ -1058,10 +1022,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (ret)
@@ -1135,10 +1098,9 @@ static int ahash_digest(struct ahash_request *req)
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
@@ -1190,10 +1152,9 @@ static int ahash_final_no_ctx(struct ahash_request *req)
if (ret)
goto unmap;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
@@ -1246,8 +1207,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
if (to_hash) {
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - *next_buflen);
+ int pad_nents;
+ int src_len = req->nbytes - *next_buflen;
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
@@ -1264,14 +1227,14 @@ static int ahash_update_no_ctx(struct ahash_request *req)
mapped_nents = 0;
}
- sec4_sg_bytes = (1 + mapped_nents) *
- sizeof(struct sec4_sg_entry);
+ pad_nents = pad_sg_nents(1 + mapped_nents);
+ sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
+ edesc = ahash_edesc_alloc(ctx, pad_nents,
ctx->sh_desc_update_first,
ctx->sh_desc_update_first_dma,
flags);
@@ -1287,8 +1250,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- sg_to_sec4_sg_last(req->src, mapped_nents,
- edesc->sec4_sg + 1, 0);
+ sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
if (*next_buflen) {
scatterwalk_map_and_copy(next_buf, req->src,
@@ -1313,11 +1275,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
if (ret)
@@ -1333,13 +1293,12 @@ static int ahash_update_no_ctx(struct ahash_request *req)
*buflen = *next_buflen;
*next_buflen = 0;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
-#endif
+
+ print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
return ret;
unmap_ctx:
@@ -1414,10 +1373,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
if (ret)
goto unmap;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
@@ -1517,11 +1475,9 @@ static int ahash_update_first(struct ahash_request *req)
if (ret)
goto unmap_ctx;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
if (ret)
@@ -1539,11 +1495,10 @@ static int ahash_update_first(struct ahash_request *req)
req->nbytes, 0);
switch_buf(state);
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
-#endif
+
+ print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
return ret;
unmap_ctx:
@@ -1930,7 +1885,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
caam_jr_free(ctx->jrdev);
}
-static void __exit caam_algapi_hash_exit(void)
+void caam_algapi_hash_exit(void)
{
struct caam_hash_alg *t_alg, *n;
@@ -1988,40 +1943,13 @@ caam_hash_alloc(struct caam_hash_template *template,
return t_alg;
}
-static int __init caam_algapi_hash_init(void)
+int caam_algapi_hash_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
int i = 0, err = 0;
- struct caam_drv_private *priv;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
unsigned int md_limit = SHA512_DIGEST_SIZE;
u32 md_inst, md_vid;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- priv = dev_get_drvdata(&pdev->dev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv) {
- err = -ENODEV;
- goto out_put_dev;
- }
-
/*
* Register crypto algorithms the device supports. First, identify
* presence and attributes of MD block.
@@ -2042,10 +1970,8 @@ static int __init caam_algapi_hash_init(void)
* Skip registration of any hashing algorithms if MD block
* is not present.
*/
- if (!md_inst) {
- err = -ENODEV;
- goto out_put_dev;
- }
+ if (!md_inst)
+ return -ENODEV;
/* Limit digest size based on LP256 */
if (md_vid == CHA_VER_VID_MD_LP256)
@@ -2102,14 +2028,5 @@ static int __init caam_algapi_hash_init(void)
list_add_tail(&t_alg->entry, &hash_list);
}
-out_put_dev:
- put_device(&pdev->dev);
return err;
}
-
-module_init(caam_algapi_hash_init);
-module_exit(caam_algapi_hash_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
-MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index fe24485274e1..80574106af29 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
* Copyright 2016 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
*
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
* all the desired key parameters, input and output pointers.
@@ -24,12 +24,18 @@
sizeof(struct rsa_priv_f2_pdb))
#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
sizeof(struct rsa_priv_f3_pdb))
+#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
+
+/* buffer filled with zeros, used for padding */
+static u8 *zero_buffer;
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
- dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
@@ -168,6 +174,13 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
akcipher_request_complete(req, err);
}
+/**
+ * Count leading zeros, need it to strip, from a given scatterlist
+ *
+ * @sgl : scatterlist to count zeros from
+ * @nbytes: number of zeros, in bytes, to strip
+ * @flags : operation flags
+ */
static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
unsigned int nbytes,
unsigned int flags)
@@ -187,7 +200,8 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
lzeros = 0;
len = 0;
while (nbytes > 0) {
- while (len && !*buff) {
+ /* do not strip more than given bytes */
+ while (len && !*buff && lzeros < nbytes) {
lzeros++;
len--;
buff++;
@@ -218,6 +232,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = ctx->dev;
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+ struct caam_rsa_key *key = &ctx->key;
struct rsa_edesc *edesc;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -225,22 +240,45 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents;
+ unsigned int diff_size = 0;
int lzeros;
- lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
- if (lzeros < 0)
- return ERR_PTR(lzeros);
-
- req->src_len -= lzeros;
- req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
+ if (req->src_len > key->n_sz) {
+ /*
+ * strip leading zeros and
+ * return the number of zeros to skip
+ */
+ lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
+ key->n_sz, sg_flags);
+ if (lzeros < 0)
+ return ERR_PTR(lzeros);
+
+ req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
+ lzeros);
+ req_ctx->fixup_src_len = req->src_len - lzeros;
+ } else {
+ /*
+ * input src is less then n key modulus,
+ * so there will be zero padding
+ */
+ diff_size = key->n_sz - req->src_len;
+ req_ctx->fixup_src = req->src;
+ req_ctx->fixup_src_len = req->src_len;
+ }
- src_nents = sg_nents_for_len(req->src, req->src_len);
+ src_nents = sg_nents_for_len(req_ctx->fixup_src,
+ req_ctx->fixup_src_len);
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
- if (src_nents > 1)
- sec4_sg_len = src_nents;
+ if (!diff_size && src_nents == 1)
+ sec4_sg_len = 0; /* no need for an input hw s/g table */
+ else
+ sec4_sg_len = src_nents + !!diff_size;
+ sec4_sg_index = sec4_sg_len;
if (dst_nents > 1)
- sec4_sg_len += dst_nents;
+ sec4_sg_len += pad_sg_nents(dst_nents);
+ else
+ sec4_sg_len = pad_sg_nents(sec4_sg_len);
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
@@ -250,7 +288,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
if (!edesc)
return ERR_PTR(-ENOMEM);
- sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
if (unlikely(!sgc)) {
dev_err(dev, "unable to map source\n");
goto src_fail;
@@ -263,14 +301,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
}
edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
+ if (diff_size)
+ dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
+ 0);
+
+ if (sec4_sg_index)
+ sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
+ edesc->sec4_sg + !!diff_size, 0);
- sec4_sg_index = 0;
- if (src_nents > 1) {
- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
- sec4_sg_index += src_nents;
- }
if (dst_nents > 1)
- sg_to_sec4_sg_last(req->dst, dst_nents,
+ sg_to_sec4_sg_last(req->dst, req->dst_len,
edesc->sec4_sg + sec4_sg_index, 0);
/* Save nents for later use in Job Descriptor */
@@ -289,12 +329,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
edesc->sec4_sg_bytes = sec4_sg_bytes;
+ print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ edesc->sec4_sg_bytes, 1);
+
return edesc;
sec4_sg_fail:
dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
dst_fail:
- dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
src_fail:
kfree(edesc);
return ERR_PTR(-ENOMEM);
@@ -304,6 +348,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
@@ -328,7 +373,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
pdb->f_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->f_dma = sg_dma_address(req->src);
+ pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -340,7 +385,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
}
pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
- pdb->f_len = req->src_len;
+ pdb->f_len = req_ctx->fixup_src_len;
return 0;
}
@@ -373,7 +418,9 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -436,7 +483,9 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -523,7 +572,9 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -978,6 +1029,15 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->dev);
}
+ ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
+ CAAM_RSA_MAX_INPUT_SIZE - 1,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
+ dev_err(ctx->dev, "unable to map padding\n");
+ caam_jr_free(ctx->dev);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -987,6 +1047,8 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
+ dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
+ 1, DMA_TO_DEVICE);
caam_rsa_free_key(key);
caam_jr_free(ctx->dev);
}
@@ -1010,41 +1072,12 @@ static struct akcipher_alg caam_rsa = {
};
/* Public Key Cryptography module initialization handler */
-static int __init caam_pkc_init(void)
+int caam_pkc_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
u32 pk_inst;
int err;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv) {
- err = -ENODEV;
- goto out_put_dev;
- }
-
/* Determine public key hardware accelerator presence. */
if (priv->era < 10)
pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
@@ -1053,31 +1086,29 @@ static int __init caam_pkc_init(void)
pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
/* Do not register algorithms if PKHA is not present. */
- if (!pk_inst) {
- err = -ENODEV;
- goto out_put_dev;
- }
+ if (!pk_inst)
+ return 0;
+
+ /* allocate zero buffer, used for padding input */
+ zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
+ GFP_KERNEL);
+ if (!zero_buffer)
+ return -ENOMEM;
err = crypto_register_akcipher(&caam_rsa);
- if (err)
+ if (err) {
+ kfree(zero_buffer);
dev_warn(ctrldev, "%s alg registration failed\n",
caam_rsa.base.cra_driver_name);
- else
+ } else {
dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
+ }
-out_put_dev:
- put_device(ctrldev);
return err;
}
-static void __exit caam_pkc_exit(void)
+void caam_pkc_exit(void)
{
+ kfree(zero_buffer);
crypto_unregister_akcipher(&caam_rsa);
}
-
-module_init(caam_pkc_init);
-module_exit(caam_pkc_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
-MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index 82645bcf8b27..2c488c9a3812 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -89,18 +89,25 @@ struct caam_rsa_key {
* caam_rsa_ctx - per session context.
* @key : RSA key in DMA zone
* @dev : device structure
+ * @padding_dma : dma address of padding, for adding it to the input
*/
struct caam_rsa_ctx {
struct caam_rsa_key key;
struct device *dev;
+ dma_addr_t padding_dma;
+
};
/**
* caam_rsa_req_ctx - per request context.
- * @src: input scatterlist (stripped of leading zeros)
+ * @src : input scatterlist (stripped of leading zeros)
+ * @fixup_src : input scatterlist (that might be stripped of leading zeros)
+ * @fixup_src_len : length of the fixup_src input scatterlist
*/
struct caam_rsa_req_ctx {
struct scatterlist src[2];
+ struct scatterlist *fixup_src;
+ unsigned int fixup_src_len;
};
/**
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 95eb5402c59f..561bcb535184 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for hw_random
*
* Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -113,10 +113,8 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
/* Buffer refilled, invalidate cache */
dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
- DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
-#endif
+ print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ bd->buf, RN_BUF_SIZE, 1);
}
static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
@@ -209,10 +207,10 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- desc, desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+
return 0;
}
@@ -233,10 +231,10 @@ static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
}
append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- desc, desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+
return 0;
}
@@ -296,47 +294,20 @@ static struct hwrng caam_rng = {
.read = caam_read,
};
-static void __exit caam_rng_exit(void)
+void caam_rng_exit(void)
{
caam_jr_free(rng_ctx->jrdev);
hwrng_unregister(&caam_rng);
kfree(rng_ctx);
}
-static int __init caam_rng_init(void)
+int caam_rng_init(struct device *ctrldev)
{
struct device *dev;
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct caam_drv_private *priv;
u32 rng_inst;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int err;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- priv = dev_get_drvdata(&pdev->dev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv) {
- err = -ENODEV;
- goto out_put_dev;
- }
-
/* Check for an instantiated RNG before registration */
if (priv->era < 10)
rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
@@ -344,16 +315,13 @@ static int __init caam_rng_init(void)
else
rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
- if (!rng_inst) {
- err = -ENODEV;
- goto out_put_dev;
- }
+ if (!rng_inst)
+ return 0;
dev = caam_jr_alloc();
if (IS_ERR(dev)) {
pr_err("Job Ring Device allocation for transform failed\n");
- err = PTR_ERR(dev);
- goto out_put_dev;
+ return PTR_ERR(dev);
}
rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
if (!rng_ctx) {
@@ -364,7 +332,6 @@ static int __init caam_rng_init(void)
if (err)
goto free_rng_ctx;
- put_device(&pdev->dev);
dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
@@ -372,14 +339,5 @@ free_rng_ctx:
kfree(rng_ctx);
free_caam_alloc:
caam_jr_free(dev);
-out_put_dev:
- put_device(&pdev->dev);
return err;
}
-
-module_init(caam_rng_init);
-module_exit(caam_rng_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
-MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index fec39c35c877..4e43ca4d3656 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -3,7 +3,7 @@
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
*/
#include <linux/device.h>
@@ -323,8 +323,8 @@ static int caam_remove(struct platform_device *pdev)
of_platform_depopulate(ctrldev);
#ifdef CONFIG_CAAM_QI
- if (ctrlpriv->qidev)
- caam_qi_shutdown(ctrlpriv->qidev);
+ if (ctrlpriv->qi_init)
+ caam_qi_shutdown(ctrldev);
#endif
/*
@@ -540,7 +540,8 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->caam_ipg = clk;
if (!of_machine_is_compatible("fsl,imx7d") &&
- !of_machine_is_compatible("fsl,imx7s")) {
+ !of_machine_is_compatible("fsl,imx7s") &&
+ !of_machine_is_compatible("fsl,imx7ulp")) {
clk = caam_drv_identify_clk(&pdev->dev, "mem");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
@@ -562,7 +563,8 @@ static int caam_probe(struct platform_device *pdev)
if (!of_machine_is_compatible("fsl,imx6ul") &&
!of_machine_is_compatible("fsl,imx7d") &&
- !of_machine_is_compatible("fsl,imx7s")) {
+ !of_machine_is_compatible("fsl,imx7s") &&
+ !of_machine_is_compatible("fsl,imx7ulp")) {
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
@@ -702,12 +704,7 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->era = caam_get_era(ctrl);
-
- ret = of_platform_populate(nprop, caam_match, NULL, dev);
- if (ret) {
- dev_err(dev, "JR platform devices creation error\n");
- goto iounmap_ctrl;
- }
+ ctrlpriv->domain = iommu_get_domain_for_dev(dev);
#ifdef CONFIG_DEBUG_FS
/*
@@ -721,19 +718,6 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
#endif
- ring = 0;
- for_each_available_child_of_node(nprop, np)
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
- ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
- ((__force uint8_t *)ctrl +
- (ring + JR_BLOCK_NUMBER) *
- BLOCK_OFFSET
- );
- ctrlpriv->total_jobrs++;
- ring++;
- }
-
/* Check to see if (DPAA 1.x) QI present. If so, enable */
ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present && !caam_dpaa2) {
@@ -752,6 +736,25 @@ static int caam_probe(struct platform_device *pdev)
#endif
}
+ ret = of_platform_populate(nprop, caam_match, NULL, dev);
+ if (ret) {
+ dev_err(dev, "JR platform devices creation error\n");
+ goto shutdown_qi;
+ }
+
+ ring = 0;
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl +
+ (ring + JR_BLOCK_NUMBER) *
+ BLOCK_OFFSET
+ );
+ ctrlpriv->total_jobrs++;
+ ring++;
+ }
+
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
@@ -898,6 +901,11 @@ caam_remove:
caam_remove(pdev);
return ret;
+shutdown_qi:
+#ifdef CONFIG_CAAM_QI
+ if (ctrlpriv->qi_init)
+ caam_qi_shutdown(dev);
+#endif
iounmap_ctrl:
iounmap(ctrl);
disable_caam_emi_slow:
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 2980b8ef1fb1..5988a26a2441 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -3,6 +3,7 @@
* caam descriptor construction helper functions
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2019 NXP
*/
#ifndef DESC_CONSTR_H
@@ -37,6 +38,16 @@
extern bool caam_little_end;
+/*
+ * HW fetches 4 S/G table entries at a time, irrespective of how many entries
+ * are in the table. It's SW's responsibility to make sure these accesses
+ * do not have side effects.
+ */
+static inline int pad_sg_nents(int sg_nents)
+{
+ return ALIGN(sg_nents, 4);
+}
+
static inline int desc_len(u32 * const desc)
{
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 4da844e4b61d..4f0d45865aa2 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -13,7 +13,7 @@
#ifdef DEBUG
#include <linux/highmem.h>
-void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+void caam_dump_sg(const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii)
{
@@ -35,15 +35,15 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
buf = it_page + it->offset;
len = min_t(size_t, tlen, it->length);
- print_hex_dump(level, prefix_str, prefix_type, rowsize,
- groupsize, buf, len, ascii);
+ print_hex_dump_debug(prefix_str, prefix_type, rowsize,
+ groupsize, buf, len, ascii);
tlen -= len;
kunmap_atomic(it_page);
}
}
#else
-void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+void caam_dump_sg(const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii)
{}
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 8c6b83e02a70..d9726e66edbf 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -17,7 +17,7 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
-void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+void caam_dump_sg(const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii);
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 3392615dc91b..6af84bbc612c 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -4,7 +4,7 @@
* Private/internal definitions between modules
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- *
+ * Copyright 2019 NXP
*/
#ifndef INTERN_H
@@ -63,10 +63,6 @@ struct caam_drv_private_jr {
* Driver-private storage for a single CAAM block instance
*/
struct caam_drv_private {
-#ifdef CONFIG_CAAM_QI
- struct device *qidev;
-#endif
-
/* Physical-presence section */
struct caam_ctrl __iomem *ctrl; /* controller region */
struct caam_deco __iomem *deco; /* DECO/CCB views */
@@ -74,12 +70,17 @@ struct caam_drv_private {
struct caam_queue_if __iomem *qi; /* QI control region */
struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
+ struct iommu_domain *domain;
+
/*
* Detected geometry block. Filled in from device tree if powerpc,
* or from register-based version detection code
*/
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
+#ifdef CONFIG_CAAM_QI
+ u8 qi_init; /* Nonzero if QI has been initialized */
+#endif
u8 mc_en; /* Nonzero if MC f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
@@ -107,8 +108,95 @@ struct caam_drv_private {
#endif
};
-void caam_jr_algapi_init(struct device *dev);
-void caam_jr_algapi_remove(struct device *dev);
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+
+int caam_algapi_init(struct device *dev);
+void caam_algapi_exit(void);
+
+#else
+
+static inline int caam_algapi_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_algapi_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
+
+int caam_algapi_hash_init(struct device *dev);
+void caam_algapi_hash_exit(void);
+
+#else
+
+static inline int caam_algapi_hash_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_algapi_hash_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
+
+int caam_pkc_init(struct device *dev);
+void caam_pkc_exit(void);
+
+#else
+
+static inline int caam_pkc_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_pkc_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
+
+int caam_rng_init(struct device *dev);
+void caam_rng_exit(void);
+
+#else
+
+static inline int caam_rng_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_rng_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
+
+#ifdef CONFIG_CAAM_QI
+
+int caam_qi_algapi_init(struct device *dev);
+void caam_qi_algapi_exit(void);
+
+#else
+
+static inline int caam_qi_algapi_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_qi_algapi_exit(void)
+{
+}
+
+#endif /* CONFIG_CAAM_QI */
#ifdef CONFIG_DEBUG_FS
static int caam_debugfs_u64_get(void *data, u64 *val)
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 1de2562d0982..cea811fed320 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -4,6 +4,7 @@
* JobR backend functionality
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2019 NXP
*/
#include <linux/of_irq.h>
@@ -23,6 +24,43 @@ struct jr_driver_data {
} ____cacheline_aligned;
static struct jr_driver_data driver_data;
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+static void register_algs(struct device *dev)
+{
+ mutex_lock(&algs_lock);
+
+ if (++active_devs != 1)
+ goto algs_unlock;
+
+ caam_algapi_init(dev);
+ caam_algapi_hash_init(dev);
+ caam_pkc_init(dev);
+ caam_rng_init(dev);
+ caam_qi_algapi_init(dev);
+
+algs_unlock:
+ mutex_unlock(&algs_lock);
+}
+
+static void unregister_algs(void)
+{
+ mutex_lock(&algs_lock);
+
+ if (--active_devs != 0)
+ goto algs_unlock;
+
+ caam_qi_algapi_exit();
+
+ caam_rng_exit();
+ caam_pkc_exit();
+ caam_algapi_hash_exit();
+ caam_algapi_exit();
+
+algs_unlock:
+ mutex_unlock(&algs_lock);
+}
static int caam_reset_hw_jr(struct device *dev)
{
@@ -109,6 +147,9 @@ static int caam_jr_remove(struct platform_device *pdev)
return -EBUSY;
}
+ /* Unregister JR-based RNG & crypto algorithms */
+ unregister_algs();
+
/* Remove the node from Physical JobR list maintained by driver */
spin_lock(&driver_data.jr_alloc_lock);
list_del(&jrpriv->list_node);
@@ -541,6 +582,8 @@ static int caam_jr_probe(struct platform_device *pdev)
atomic_set(&jrpriv->tfm_count, 0);
+ register_algs(jrdev->parent);
+
return 0;
}
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 8d0713fae6ac..48dd3536060d 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -16,9 +16,7 @@ void split_key_done(struct device *dev, u32 *desc, u32 err,
{
struct split_key_result *res = context;
-#ifdef DEBUG
- dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
if (err)
caam_jr_strstatus(dev, err);
@@ -55,12 +53,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
adata->keylen_pad = split_key_pad_len(adata->algtype &
OP_ALG_ALGSEL_MASK);
-#ifdef DEBUG
- dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ dev_dbg(jrdev, "split keylen %d split keylen padded %d\n",
adata->keylen, adata->keylen_pad);
- print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
-#endif
+ print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
if (adata->keylen_pad > max_keylen)
return -EINVAL;
@@ -102,10 +98,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
append_fifo_store(desc, dma_addr, adata->keylen,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
result.err = 0;
init_completion(&result.completion);
@@ -115,11 +110,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_out,
- adata->keylen_pad, 1);
-#endif
+
+ print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
+ adata->keylen_pad, 1);
}
dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL);
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 9f08f84cca59..0fe618e3804a 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -4,7 +4,7 @@
* Queue Interface backend functionality
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2017, 2019 NXP
*/
#include <linux/cpumask.h>
@@ -18,6 +18,7 @@
#include "desc_constr.h"
#define PREHDR_RSLS_SHIFT 31
+#define PREHDR_ABS BIT(25)
/*
* Use a reasonable backlog of frames (per CPU) as congestion threshold,
@@ -58,11 +59,9 @@ static DEFINE_PER_CPU(int, last_cpu);
/*
* caam_qi_priv - CAAM QI backend private params
* @cgr: QMan congestion group
- * @qi_pdev: platform device for QI backend
*/
struct caam_qi_priv {
struct qman_cgr cgr;
- struct platform_device *qi_pdev;
};
static struct caam_qi_priv qipriv ____cacheline_aligned;
@@ -95,6 +94,16 @@ static u64 times_congested;
*/
static struct kmem_cache *qi_cache;
+static void *caam_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
{
struct qm_fd fd;
@@ -135,6 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
const struct qm_fd *fd;
struct caam_drv_req *drv_req;
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ struct caam_drv_private *priv = dev_get_drvdata(qidev);
fd = &msg->ern.fd;
@@ -143,7 +153,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
return;
}
- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
if (!drv_req) {
dev_err(qidev,
"Can't find original request for CAAM response\n");
@@ -346,6 +356,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
*/
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
num_words);
+ drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
dma_sync_single_for_device(qidev, drv_ctx->context_a,
sizeof(drv_ctx->sh_desc) +
@@ -401,6 +412,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
*/
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
num_words);
+ drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
@@ -488,7 +500,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
void caam_qi_shutdown(struct device *qidev)
{
int i;
- struct caam_qi_priv *priv = dev_get_drvdata(qidev);
+ struct caam_qi_priv *priv = &qipriv;
const cpumask_t *cpus = qman_affine_cpus();
for_each_cpu(i, cpus) {
@@ -506,8 +518,6 @@ void caam_qi_shutdown(struct device *qidev)
qman_release_cgrid(priv->cgr.cgrid);
kmem_cache_destroy(qi_cache);
-
- platform_device_unregister(priv->qi_pdev);
}
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
@@ -550,6 +560,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct caam_drv_req *drv_req;
const struct qm_fd *fd;
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ struct caam_drv_private *priv = dev_get_drvdata(qidev);
u32 status;
if (caam_qi_napi_schedule(p, caam_napi))
@@ -572,7 +583,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
return qman_cb_dqrr_consume;
}
- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
if (unlikely(!drv_req)) {
dev_err(qidev,
"Can't find original request for caam response\n");
@@ -692,33 +703,17 @@ static void free_rsp_fqs(void)
int caam_qi_init(struct platform_device *caam_pdev)
{
int err, i;
- struct platform_device *qi_pdev;
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
- static struct platform_device_info qi_pdev_info = {
- .name = "caam_qi",
- .id = PLATFORM_DEVID_NONE
- };
-
- qi_pdev_info.parent = ctrldev;
- qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
- qi_pdev = platform_device_register_full(&qi_pdev_info);
- if (IS_ERR(qi_pdev))
- return PTR_ERR(qi_pdev);
- set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
ctrlpriv = dev_get_drvdata(ctrldev);
- qidev = &qi_pdev->dev;
-
- qipriv.qi_pdev = qi_pdev;
- dev_set_drvdata(qidev, &qipriv);
+ qidev = ctrldev;
/* Initialize the congestion detection */
err = init_cgr(qidev);
if (err) {
dev_err(qidev, "CGR initialization failed: %d\n", err);
- platform_device_unregister(qi_pdev);
return err;
}
@@ -727,7 +722,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
if (err) {
dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
free_rsp_fqs();
- platform_device_unregister(qi_pdev);
return err;
}
@@ -750,15 +744,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
napi_enable(irqtask);
}
- /* Hook up QI device to parent controlling caam device */
- ctrlpriv->qidev = qidev;
-
qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
SLAB_CACHE_DMA, NULL);
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
free_rsp_fqs();
- platform_device_unregister(qi_pdev);
return -ENOMEM;
}
@@ -766,6 +756,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
&times_congested, &caam_fops_u64_ro);
#endif
+
+ ctrlpriv->qi_init = 1;
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
}
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
index b3e1aaaeffea..d56cc7efbc13 100644
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -54,15 +54,19 @@ static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
* but does not have final bit; instead, returns last entry
*/
static inline struct qm_sg_entry *
-sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+sg_to_qm_sg(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
- while (sg_count && sg) {
- dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
- sg_dma_len(sg), offset);
+ int ent_len;
+
+ while (len) {
+ ent_len = min_t(int, sg_dma_len(sg), len);
+
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
+ offset);
qm_sg_ptr++;
sg = sg_next(sg);
- sg_count--;
+ len -= ent_len;
}
return qm_sg_ptr - 1;
}
@@ -71,10 +75,10 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
-static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
- qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
}
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
index c9378402a5f8..b8b737d2b0ea 100644
--- a/drivers/crypto/caam/sg_sw_qm2.h
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -25,15 +25,19 @@ static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
* but does not have final bit; instead, returns last entry
*/
static inline struct dpaa2_sg_entry *
-sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+sg_to_qm_sg(struct scatterlist *sg, int len,
struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
{
- while (sg_count && sg) {
- dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
- sg_dma_len(sg), offset);
+ int ent_len;
+
+ while (len) {
+ ent_len = min_t(int, sg_dma_len(sg), len);
+
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
+ offset);
qm_sg_ptr++;
sg = sg_next(sg);
- sg_count--;
+ len -= ent_len;
}
return qm_sg_ptr - 1;
}
@@ -42,11 +46,11 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
-static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
struct dpaa2_sg_entry *qm_sg_ptr,
u16 offset)
{
- qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
dpaa2_sg_set_final(qm_sg_ptr, true);
}
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index dbfa9fce33e0..07e1ee99273b 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -35,11 +35,9 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
SEC4_SG_OFFSET_MASK);
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
- DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
- sizeof(struct sec4_sg_entry), 1);
-#endif
+
+ print_hex_dump_debug("sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ sec4_sg_ptr, sizeof(struct sec4_sg_entry), 1);
}
/*
@@ -47,15 +45,19 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
* but does not have final bit; instead, returns last entry
*/
static inline struct sec4_sg_entry *
-sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
+sg_to_sec4_sg(struct scatterlist *sg, int len,
struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
{
- while (sg_count) {
- dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
- sg_dma_len(sg), offset);
+ int ent_len;
+
+ while (len) {
+ ent_len = min_t(int, sg_dma_len(sg), len);
+
+ dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len,
+ offset);
sec4_sg_ptr++;
sg = sg_next(sg);
- sg_count--;
+ len -= ent_len;
}
return sec4_sg_ptr - 1;
}
@@ -72,11 +74,11 @@ static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
-static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len,
struct sec4_sg_entry *sec4_sg_ptr,
u16 offset)
{
- sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
+ sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset);
sg_to_sec4_set_last(sec4_sg_ptr);
}
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index e9f4704494fb..ff3cb1f8f2b6 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -7,7 +7,6 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
-#include <crypto/crypto_wq.h>
#include <crypto/des.h>
#include <crypto/xts.h>
#include <linux/crypto.h>
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
index f177b79bbab0..09c4cf2513fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NITROX_DEBUGFS_H
#define __NITROX_DEBUGFS_H
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.h b/drivers/crypto/cavium/nitrox/nitrox_mbx.h
index 5008399775a9..7c93d0282174 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NITROX_MBX_H
#define __NITROX_MBX_H
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index ea3d6de55ff6..58c6dddfc5e1 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -2,7 +2,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES crypto API support
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
@@ -76,8 +76,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
return -EINVAL;
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
- (ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
- (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
+ (ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
(req->nbytes & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
@@ -288,7 +287,7 @@ static struct ccp_aes_def aes_algs[] = {
.version = CCP_VERSION(3, 0),
.name = "cfb(aes)",
.driver_name = "cfb-aes-ccp",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.ivsize = AES_BLOCK_SIZE,
.alg_defaults = &ccp_aes_defaults,
},
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index cc3e96c4f5fb..f79eede71c62 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -32,56 +32,62 @@ struct ccp_tasklet_data {
};
/* Human-readable error strings */
+#define CCP_MAX_ERROR_CODE 64
static char *ccp_error_codes[] = {
"",
- "ERR 01: ILLEGAL_ENGINE",
- "ERR 02: ILLEGAL_KEY_ID",
- "ERR 03: ILLEGAL_FUNCTION_TYPE",
- "ERR 04: ILLEGAL_FUNCTION_MODE",
- "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
- "ERR 06: ILLEGAL_FUNCTION_SIZE",
- "ERR 07: Zlib_MISSING_INIT_EOM",
- "ERR 08: ILLEGAL_FUNCTION_RSVD",
- "ERR 09: ILLEGAL_BUFFER_LENGTH",
- "ERR 10: VLSB_FAULT",
- "ERR 11: ILLEGAL_MEM_ADDR",
- "ERR 12: ILLEGAL_MEM_SEL",
- "ERR 13: ILLEGAL_CONTEXT_ID",
- "ERR 14: ILLEGAL_KEY_ADDR",
- "ERR 15: 0xF Reserved",
- "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
- "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
- "ERR 18: CMD_TIMEOUT",
- "ERR 19: IDMA0_AXI_SLVERR",
- "ERR 20: IDMA0_AXI_DECERR",
- "ERR 21: 0x15 Reserved",
- "ERR 22: IDMA1_AXI_SLAVE_FAULT",
- "ERR 23: IDMA1_AIXI_DECERR",
- "ERR 24: 0x18 Reserved",
- "ERR 25: ZLIBVHB_AXI_SLVERR",
- "ERR 26: ZLIBVHB_AXI_DECERR",
- "ERR 27: 0x1B Reserved",
- "ERR 27: ZLIB_UNEXPECTED_EOM",
- "ERR 27: ZLIB_EXTRA_DATA",
- "ERR 30: ZLIB_BTYPE",
- "ERR 31: ZLIB_UNDEFINED_SYMBOL",
- "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
- "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
- "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
- "ERR 35: ZLIB_UNCOMPRESSED_LEN",
- "ERR 36: ZLIB_LIMIT_REACHED",
- "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
- "ERR 38: ODMA0_AXI_SLVERR",
- "ERR 39: ODMA0_AXI_DECERR",
- "ERR 40: 0x28 Reserved",
- "ERR 41: ODMA1_AXI_SLVERR",
- "ERR 42: ODMA1_AXI_DECERR",
- "ERR 43: LSB_PARITY_ERR",
+ "ILLEGAL_ENGINE",
+ "ILLEGAL_KEY_ID",
+ "ILLEGAL_FUNCTION_TYPE",
+ "ILLEGAL_FUNCTION_MODE",
+ "ILLEGAL_FUNCTION_ENCRYPT",
+ "ILLEGAL_FUNCTION_SIZE",
+ "Zlib_MISSING_INIT_EOM",
+ "ILLEGAL_FUNCTION_RSVD",
+ "ILLEGAL_BUFFER_LENGTH",
+ "VLSB_FAULT",
+ "ILLEGAL_MEM_ADDR",
+ "ILLEGAL_MEM_SEL",
+ "ILLEGAL_CONTEXT_ID",
+ "ILLEGAL_KEY_ADDR",
+ "0xF Reserved",
+ "Zlib_ILLEGAL_MULTI_QUEUE",
+ "Zlib_ILLEGAL_JOBID_CHANGE",
+ "CMD_TIMEOUT",
+ "IDMA0_AXI_SLVERR",
+ "IDMA0_AXI_DECERR",
+ "0x15 Reserved",
+ "IDMA1_AXI_SLAVE_FAULT",
+ "IDMA1_AIXI_DECERR",
+ "0x18 Reserved",
+ "ZLIBVHB_AXI_SLVERR",
+ "ZLIBVHB_AXI_DECERR",
+ "0x1B Reserved",
+ "ZLIB_UNEXPECTED_EOM",
+ "ZLIB_EXTRA_DATA",
+ "ZLIB_BTYPE",
+ "ZLIB_UNDEFINED_SYMBOL",
+ "ZLIB_UNDEFINED_DISTANCE_S",
+ "ZLIB_CODE_LENGTH_SYMBOL",
+ "ZLIB _VHB_ILLEGAL_FETCH",
+ "ZLIB_UNCOMPRESSED_LEN",
+ "ZLIB_LIMIT_REACHED",
+ "ZLIB_CHECKSUM_MISMATCH0",
+ "ODMA0_AXI_SLVERR",
+ "ODMA0_AXI_DECERR",
+ "0x28 Reserved",
+ "ODMA1_AXI_SLVERR",
+ "ODMA1_AXI_DECERR",
};
-void ccp_log_error(struct ccp_device *d, int e)
+void ccp_log_error(struct ccp_device *d, unsigned int e)
{
- dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
+ if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
+ return;
+
+ if (e < ARRAY_SIZE(ccp_error_codes))
+ dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
+ else
+ dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
}
/* List of CCPs, CCP count, read-write access lock, and access functions
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 90523a069bff..5e624920fd99 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -629,7 +629,7 @@ struct ccp5_desc {
void ccp_add_device(struct ccp_device *ccp);
void ccp_del_device(struct ccp_device *ccp);
-extern void ccp_log_error(struct ccp_device *, int);
+extern void ccp_log_error(struct ccp_device *, unsigned int);
struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
bool ccp_queues_suspended(struct ccp_device *ccp);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index db8de89d990f..866b2e05ca77 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -2,7 +2,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -890,8 +890,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return -EINVAL;
if (((aes->mode == CCP_AES_MODE_ECB) ||
- (aes->mode == CCP_AES_MODE_CBC) ||
- (aes->mode == CCP_AES_MODE_CFB)) &&
+ (aes->mode == CCP_AES_MODE_CBC)) &&
(aes->src_len & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
@@ -1264,6 +1263,9 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
int ret;
/* Error checks */
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
+ return -EINVAL;
+
if (!cmd_q->ccp->vdata->perform->des3)
return -EINVAL;
@@ -1346,8 +1348,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
* passthru option to convert from big endian to little endian.
*/
if (des3->mode != CCP_DES3_MODE_ECB) {
- u32 load_mode;
-
op.sb_ctx = cmd_q->sb_ctx;
ret = ccp_init_dm_workarea(&ctx, cmd_q,
@@ -1363,12 +1363,8 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (ret)
goto e_ctx;
- if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
- load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
- else
- load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- load_mode);
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
@@ -1430,10 +1426,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
}
/* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
- if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
- dm_offset = CCP_SB_BYTES - des3->iv_len;
- else
- dm_offset = 0;
ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
DES3_EDE_BLOCK_SIZE);
}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 86ac7b443355..980aa04b655b 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -48,6 +48,7 @@ struct cc_hw_data {
};
#define CC_NUM_IDRS 4
+#define CC_HW_RESET_LOOP_COUNT 10
/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
@@ -133,6 +134,9 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
u32 imr;
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
+ /* if driver suspended return, probebly shared interrupt */
+ if (cc_pm_is_dev_suspended(dev))
+ return IRQ_NONE;
/* read the interrupt status */
irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
@@ -188,6 +192,31 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata)
+{
+ unsigned int val;
+ unsigned int i;
+
+ /* 712/710/63 has no reset completion indication, always return true */
+ if (drvdata->hw_rev <= CC_HW_REV_712)
+ return true;
+
+ for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
+ /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
+ * completed and device is fully functional
+ */
+ val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE));
+ if (val & CC_NVM_IS_IDLE_MASK) {
+ /* hw indicate reset completed */
+ return true;
+ }
+ /* allow scheduling other process on the processor */
+ schedule();
+ }
+ /* reset not completed */
+ return false;
+}
+
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
{
unsigned int val, cache_params;
@@ -315,15 +344,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
return new_drvdata->irq;
}
- rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
- IRQF_SHARED, "ccree", new_drvdata);
- if (rc) {
- dev_err(dev, "Could not register to interrupt %d\n",
- new_drvdata->irq);
- return rc;
- }
- dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
-
init_completion(&new_drvdata->hw_queue_avail);
if (!plat_dev->dev.dma_mask)
@@ -352,6 +372,11 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->sec_disabled = cc_sec_disable;
+ /* wait for Crytpcell reset completion */
+ if (!cc_wait_for_reset_completion(new_drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
+ }
+
if (hw_rev->rev <= CC_HW_REV_712) {
/* Verify correct mapping */
val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
@@ -383,6 +408,24 @@ static int init_cc_resources(struct platform_device *plat_dev)
}
sig_cidr = val;
+ /* Check HW engine configuration */
+ val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS));
+ switch (val) {
+ case CC_PINS_FULL:
+ /* This is fine */
+ break;
+ case CC_PINS_SLIM:
+ if (new_drvdata->std_bodies & CC_STD_NIST) {
+ dev_warn(dev, "703 mode forced due to HW configuration.\n");
+ new_drvdata->std_bodies = CC_STD_OSCCA;
+ }
+ break;
+ default:
+ dev_err(dev, "Unsupported engines configration.\n");
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+
/* Check security disable state */
val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
val &= CC_SECURITY_DISABLED_MASK;
@@ -401,6 +444,15 @@ static int init_cc_resources(struct platform_device *plat_dev)
/* Display HW versions */
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
+ /* register the driver isr function */
+ rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
+ IRQF_SHARED, "ccree", new_drvdata);
+ if (rc) {
+ dev_err(dev, "Could not register to interrupt %d\n",
+ new_drvdata->irq);
+ goto post_clk_err;
+ }
+ dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index b76181335c08..7cd99380bf1f 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -53,6 +53,9 @@ enum cc_std_body {
#define CC_COHERENT_CACHE_PARAMS 0xEEE
+#define CC_PINS_FULL 0x0
+#define CC_PINS_SLIM 0x9F
+
/* Maximum DMA mask supported by IP */
#define DMA_BIT_MASK_LEN 48
@@ -67,6 +70,8 @@ enum cc_std_body {
#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
+#define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
+
#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
@@ -216,6 +221,7 @@ static inline void dump_byte_array(const char *name, const u8 *the_array,
__dump_byte_array(name, the_array, size);
}
+bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
void fini_cc_regs(struct cc_drvdata *drvdata);
int cc_clk_on(struct cc_drvdata *drvdata);
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index d0764147573f..efe3e1d8b87b 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -114,6 +114,9 @@
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_NVM_IS_IDLE_REG_OFFSET 0x0A10UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL
#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
@@ -203,6 +206,23 @@
#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REG_OFFSET 0x0A7CUL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SHIFT 0x0UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SHIFT 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SHIFT 0x2UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SHIFT 0x3UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SHIFT 0x4UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SHIFT 0x5UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SHIFT 0x6UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SHIFT 0x7UL
+#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SIZE 0x1UL
// --------------------------------------
// BLOCK: ID_REGISTERS
// --------------------------------------
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 2dad9c9543c6..899a52f05b7a 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -49,6 +49,11 @@ int cc_pm_resume(struct device *dev)
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
+ /* wait for Crytpcell reset completion */
+ if (!cc_wait_for_reset_completion(drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
+ return -EBUSY;
+ }
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
rc = init_cc_regs(drvdata, false);
@@ -101,6 +106,12 @@ int cc_pm_put_suspend(struct device *dev)
return rc;
}
+bool cc_pm_is_dev_suspended(struct device *dev)
+{
+ /* check device state using runtime api */
+ return pm_runtime_suspended(dev);
+}
+
int cc_pm_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 6190cdba5dad..a7d98a5da2e1 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -22,6 +22,7 @@ int cc_pm_suspend(struct device *dev);
int cc_pm_resume(struct device *dev);
int cc_pm_get(struct device *dev);
int cc_pm_put_suspend(struct device *dev);
+bool cc_pm_is_dev_suspended(struct device *dev);
#else
@@ -54,6 +55,12 @@ static inline int cc_pm_put_suspend(struct device *dev)
return 0;
}
+static inline bool cc_pm_is_dev_suspended(struct device *dev)
+{
+ /* if PM not supported device is never suspend */
+ return false;
+}
+
#endif
#endif /*__POWER_MGR_H__*/
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
index 2d2f186674ba..4d9063a8b10b 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.h
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2016-2017 Hisilicon Limited. */
#ifndef _SEC_DRV_H_
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 86c699c14f84..df43a2c6933b 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -398,6 +398,12 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Processing Engine configuration */
+ /* Token & context configuration */
+ val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
+ EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
+ EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX;
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
+
/* H/W capabilities selection */
val = EIP197_FUNCTION_RSVD;
val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
@@ -589,9 +595,9 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
if (rdesc->result_data.error_code & 0x407f) {
/* Fatal error (bits 0-7, 14) */
dev_err(priv->dev,
- "cipher: result: result descriptor error (%d)\n",
+ "cipher: result: result descriptor error (0x%x)\n",
rdesc->result_data.error_code);
- return -EIO;
+ return -EINVAL;
} else if (rdesc->result_data.error_code == BIT(9)) {
/* Authentication failed */
return -EBADMSG;
@@ -720,11 +726,10 @@ handle_results:
}
acknowledge:
- if (i) {
+ if (i)
writel(EIP197_xDR_PROC_xD_PKT(i) |
EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
- }
/* If the number of requests overflowed the counter, try to proceed more
* requests.
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 65624a81f0fd..e0c202f33674 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -118,6 +118,7 @@
#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n)))
#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n)))
#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
@@ -249,6 +250,11 @@
#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0)
#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1)
+/* EIP197_PE_EIP96_TOKEN_CTRL */
+#define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16)
+#define EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX BIT(19)
+#define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20)
+
/* EIP197_PE_EIP96_FUNCTION_EN */
#define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23))
#define EIP197_PROTOCOL_HASH_ONLY BIT(0)
@@ -333,6 +339,7 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_IV3 BIT(8)
#define CONTEXT_CONTROL_DIGEST_CNT BIT(9)
#define CONTEXT_CONTROL_COUNTER_MODE BIT(10)
+#define CONTEXT_CONTROL_CRYPTO_STORE BIT(12)
#define CONTEXT_CONTROL_HASH_STORE BIT(19)
/* The hash counter given to the engine in the context has a granularity of
@@ -425,6 +432,10 @@ struct safexcel_token {
#define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16)
+#define EIP197_TOKEN_CTX_OFFSET(x) (x)
+#define EIP197_TOKEN_DIRECTION_EXTERNAL BIT(11)
+#define EIP197_TOKEN_EXEC_IF_SUCCESSFUL (0x1 << 12)
+
#define EIP197_TOKEN_STAT_LAST_HASH BIT(0)
#define EIP197_TOKEN_STAT_LAST_PACKET BIT(1)
#define EIP197_TOKEN_OPCODE_DIRECTION 0x0
@@ -432,6 +443,7 @@ struct safexcel_token {
#define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT
#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4
#define EIP197_TOKEN_OPCODE_VERIFY 0xd
+#define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe
#define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0)
static inline void eip197_noop_token(struct safexcel_token *token)
@@ -442,6 +454,8 @@ static inline void eip197_noop_token(struct safexcel_token *token)
/* Instructions */
#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c
+#define EIP197_TOKEN_INS_ORIGIN_IV0 0x14
+#define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5)
#define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5)
#define EIP197_TOKEN_INS_TYPE_HASH BIT(6)
#define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7)
@@ -468,6 +482,7 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
#define EIP197_OPTION_64BIT_CTX BIT(1)
+#define EIP197_OPTION_RC_AUTO (0x2 << 3)
#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8)
#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
@@ -629,7 +644,7 @@ struct safexcel_ahash_export_state {
u32 digest;
u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
- u8 cache[SHA512_BLOCK_SIZE];
+ u8 cache[SHA512_BLOCK_SIZE << 1];
};
/*
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index de4be10b172f..8cdbdbe35681 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
struct safexcel_cipher_req {
enum safexcel_cipher_direction direction;
+ /* Number of result descriptors associated to the request */
+ unsigned int rdescs;
bool needs_inv;
};
@@ -59,27 +61,26 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
u32 length)
{
struct safexcel_token *token;
- unsigned offset = 0;
+ u32 offset = 0, block_sz = 0;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
switch (ctx->alg) {
case SAFEXCEL_DES:
- offset = DES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
+ block_sz = DES_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
break;
case SAFEXCEL_3DES:
- offset = DES3_EDE_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
+ block_sz = DES3_EDE_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
break;
-
case SAFEXCEL_AES:
- offset = AES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+ block_sz = AES_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
break;
}
+
+ offset = block_sz / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, block_sz);
}
token = (struct safexcel_token *)(cdesc->control_data.token + offset);
@@ -91,6 +92,25 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
token[0].instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_CRYTO |
EIP197_TOKEN_INS_TYPE_OUTPUT;
+
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+ u32 last = (EIP197_MAX_TOKENS - 1) - offset;
+
+ token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS;
+ token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL |
+ EIP197_TOKEN_EXEC_IF_SUCCESSFUL|
+ EIP197_TOKEN_CTX_OFFSET(0x2);
+ token[last].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ token[last].instructions =
+ EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) |
+ EIP197_TOKEN_INS_ORIGIN_IV0;
+
+ /* Store the updated IV values back in the internal context
+ * registers.
+ */
+ cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE;
+ }
}
static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
@@ -333,7 +353,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = 0;
- do {
+ if (unlikely(!sreq->rdescs))
+ return 0;
+
+ while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -346,21 +369,15 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
- } while (!rdesc->last_seg);
+ }
safexcel_complete(priv, ring);
if (src == dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, cryptlen),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, cryptlen),
- DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst,
- sg_nents_for_len(dst, cryptlen),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
}
*should_complete = true;
@@ -385,26 +402,21 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
int i, ret = 0;
if (src == dst) {
- nr_src = dma_map_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
+ nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
DMA_BIDIRECTIONAL);
nr_dst = nr_src;
if (!nr_src)
return -EINVAL;
} else {
- nr_src = dma_map_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
+ nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
DMA_TO_DEVICE);
if (!nr_src)
return -EINVAL;
- nr_dst = dma_map_sg(priv->dev, dst,
- sg_nents_for_len(dst, totlen),
+ nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst),
DMA_FROM_DEVICE);
if (!nr_dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
return -EINVAL;
}
}
@@ -454,7 +466,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
/* result descriptors */
for_each_sg(dst, sg, nr_dst, i) {
- bool first = !i, last = (i == nr_dst - 1);
+ bool first = !i, last = sg_is_last(sg);
u32 len = sg_dma_len(sg);
rdesc = safexcel_add_rdesc(priv, ring, first, last,
@@ -483,16 +495,10 @@ cdesc_rollback:
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
if (src == dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst,
- sg_nents_for_len(dst, totlen),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE);
}
return ret;
@@ -501,6 +507,7 @@ cdesc_rollback:
static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
int ring,
struct crypto_async_request *base,
+ struct safexcel_cipher_req *sreq,
bool *should_complete, int *ret)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
@@ -509,7 +516,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- do {
+ if (unlikely(!sreq->rdescs))
+ return 0;
+
+ while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -522,7 +532,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
- } while (!rdesc->last_seg);
+ }
safexcel_complete(priv, ring);
@@ -560,16 +570,35 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm);
int err;
if (sreq->needs_inv) {
sreq->needs_inv = false;
- err = safexcel_handle_inv_result(priv, ring, async,
+ err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async, req->src,
req->dst, req->cryptlen, sreq,
should_complete, ret);
+
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+ u32 block_sz = 0;
+
+ switch (ctx->alg) {
+ case SAFEXCEL_DES:
+ block_sz = DES_BLOCK_SIZE;
+ break;
+ case SAFEXCEL_3DES:
+ block_sz = DES3_EDE_BLOCK_SIZE;
+ break;
+ case SAFEXCEL_AES:
+ block_sz = AES_BLOCK_SIZE;
+ break;
+ }
+
+ memcpy(req->iv, ctx->base.ctxr->data, block_sz);
+ }
}
return err;
@@ -587,7 +616,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
if (sreq->needs_inv) {
sreq->needs_inv = false;
- err = safexcel_handle_inv_result(priv, ring, async,
+ err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async, req->src,
@@ -633,6 +662,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, req->iv,
commands, results);
+
+ sreq->rdescs = *results;
return ret;
}
@@ -655,6 +686,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv,
commands, results);
+ sreq->rdescs = *results;
return ret;
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index ac9282c1a5ec..a80a5e757b1f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -41,19 +41,21 @@ struct safexcel_ahash_req {
u64 len[2];
u64 processed[2];
- u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
dma_addr_t cache_dma;
unsigned int cache_sz;
- u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
};
static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
{
- if (req->len[1] > req->processed[1])
- return 0xffffffff - (req->len[0] - req->processed[0]);
+ u64 len, processed;
- return req->len[0] - req->processed[0];
+ len = (0xffffffff * req->len[1]) + req->len[0];
+ processed = (0xffffffff * req->processed[1]) + req->processed[0];
+
+ return len - processed;
}
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
@@ -87,6 +89,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= ctx->alg;
cdesc->control_data.control0 |= req->digest;
+ if (!req->finish)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
+
if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
if (req->processed[0] || req->processed[1]) {
if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
@@ -105,9 +110,6 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
}
- if (!req->finish)
- cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
-
/*
* Copy the input digest if needed, and setup the context
* fields. Do this now as we need it to setup the first command
@@ -183,6 +185,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
DMA_TO_DEVICE);
sreq->cache_dma = 0;
+ sreq->cache_sz = 0;
}
if (sreq->finish)
@@ -209,11 +212,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, extra, n_cdesc = 0, ret = 0;
- u64 queued, len, cache_len;
+ int i, extra = 0, n_cdesc = 0, ret = 0;
+ u64 queued, len, cache_len, cache_max;
+
+ cache_max = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_max <<= 1;
queued = len = safexcel_queued_len(req);
- if (queued <= crypto_ahash_blocksize(ahash))
+ if (queued <= cache_max)
cache_len = queued;
else
cache_len = queued - areq->nbytes;
@@ -223,26 +230,23 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
* fit into full blocks, cache it for the next send() call.
*/
extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC &&
+ extra < crypto_ahash_blocksize(ahash))
+ extra += crypto_ahash_blocksize(ahash);
+
+ /* If this is not the last request and the queued data
+ * is a multiple of a block, cache the last one for now.
+ */
if (!extra)
- /* If this is not the last request and the queued data
- * is a multiple of a block, cache the last one for now.
- */
extra = crypto_ahash_blocksize(ahash);
- if (extra) {
- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
- req->cache_next, extra,
- areq->nbytes - extra);
-
- queued -= extra;
- len -= extra;
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req->cache_next, extra,
+ areq->nbytes - extra);
- if (!queued) {
- *commands = 0;
- *results = 0;
- return 0;
- }
- }
+ queued -= extra;
+ len -= extra;
}
/* Add a command descriptor for the cached data, if any */
@@ -269,8 +273,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
}
/* Now handle the current ahash request buffer(s) */
- req->nents = dma_map_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes),
+ req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
if (!req->nents) {
ret = -ENOMEM;
@@ -345,6 +348,7 @@ unmap_cache:
if (req->cache_dma) {
dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
DMA_TO_DEVICE);
+ req->cache_dma = 0;
req->cache_sz = 0;
}
@@ -486,7 +490,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(req, 0, sizeof(struct ahash_request));
+ memset(req, 0, EIP197_AHASH_REQ_SIZE);
/* create invalidation request */
init_completion(&result.completion);
@@ -519,10 +523,9 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
/* safexcel_ahash_cache: cache data until at least one request can be sent to
* the engine, aka. when there is at least 1 block size in the pipe.
*/
-static int safexcel_ahash_cache(struct ahash_request *areq)
+static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
u64 queued, cache_len;
/* queued: everything accepted by the driver which will be handled by
@@ -539,7 +542,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
* In case there isn't enough bytes to proceed (less than a
* block size), cache the data until we have enough.
*/
- if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
+ if (cache_len + areq->nbytes <= cache_max) {
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
req->cache + cache_len,
areq->nbytes, 0);
@@ -599,6 +602,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+ u32 cache_max;
/* If the request is 0 length, do nothing */
if (!areq->nbytes)
@@ -608,7 +612,11 @@ static int safexcel_ahash_update(struct ahash_request *areq)
if (req->len[0] < areq->nbytes)
req->len[1]++;
- safexcel_ahash_cache(areq);
+ cache_max = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_max <<= 1;
+
+ safexcel_ahash_cache(areq, cache_max);
/*
* We're not doing partial updates when performing an hmac request.
@@ -621,7 +629,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
return safexcel_ahash_enqueue(areq);
if (!req->last_req &&
- safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
+ safexcel_queued_len(req) > cache_max)
return safexcel_ahash_enqueue(areq);
return 0;
@@ -678,6 +686,11 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_ahash_export_state *export = out;
+ u32 cache_sz;
+
+ cache_sz = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_sz <<= 1;
export->len[0] = req->len[0];
export->len[1] = req->len[1];
@@ -687,7 +700,7 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
export->digest = req->digest;
memcpy(export->state, req->state, req->state_sz);
- memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
+ memcpy(export->cache, req->cache, cache_sz);
return 0;
}
@@ -697,12 +710,17 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
const struct safexcel_ahash_export_state *export = in;
+ u32 cache_sz;
int ret;
ret = crypto_ahash_init(areq);
if (ret)
return ret;
+ cache_sz = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_sz <<= 1;
+
req->len[0] = export->len[0];
req->len[1] = export->len[1];
req->processed[0] = export->processed[0];
@@ -710,7 +728,7 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
req->digest = export->digest;
- memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
+ memcpy(req->cache, export->cache, cache_sz);
memcpy(req->state, export->state, req->state_sz);
return 0;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index eb75fa684876..142bc3f5c45c 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -145,6 +145,9 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
(lower_32_bits(context) & GENMASK(31, 2)) >> 2;
cdesc->control_data.context_hi = upper_32_bits(context);
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
+
/* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
cdesc->control_data.refresh = 2;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index e5cf3a59c420..acedafe3fa98 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -100,7 +100,7 @@ struct buffer_desc {
u16 pkt_len;
u16 buf_len;
#endif
- u32 phys_addr;
+ dma_addr_t phys_addr;
u32 __reserved[4];
struct buffer_desc *next;
enum dma_data_direction dir;
@@ -117,9 +117,9 @@ struct crypt_ctl {
u8 mode; /* NPE_OP_* operation mode */
#endif
u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
- u32 icv_rev_aes; /* icv or rev aes */
- u32 src_buf;
- u32 dst_buf;
+ dma_addr_t icv_rev_aes; /* icv or rev aes */
+ dma_addr_t src_buf;
+ dma_addr_t dst_buf;
#ifdef __ARMEB__
u16 auth_offs; /* Authentication start offset */
u16 auth_len; /* Authentication data length */
@@ -320,7 +320,8 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
}
}
-static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
+static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
+ dma_addr_t phys)
{
while (buf) {
struct buffer_desc *buf1;
@@ -602,7 +603,7 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
struct buffer_desc *buf;
int i;
u8 *pad;
- u32 pad_phys, buf_phys;
+ dma_addr_t pad_phys, buf_phys;
BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
@@ -787,7 +788,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
for (; nbytes > 0; sg = sg_next(sg)) {
unsigned len = min(nbytes, sg->length);
struct buffer_desc *next_buf;
- u32 next_buf_phys;
+ dma_addr_t next_buf_phys;
void *ptr;
nbytes -= len;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index bdc4c42d3ac8..f1fa637cb029 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -986,8 +986,6 @@ static int mxs_dcp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct dcp *sdcp = NULL;
int i, ret;
-
- struct resource *iores;
int dcp_vmi_irq, dcp_irq;
if (global_sdcp) {
@@ -995,7 +993,6 @@ static int mxs_dcp_probe(struct platform_device *pdev)
return -ENODEV;
}
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dcp_vmi_irq = platform_get_irq(pdev, 0);
if (dcp_vmi_irq < 0) {
dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
@@ -1013,7 +1010,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
return -ENOMEM;
sdcp->dev = dev;
- sdcp->base = devm_ioremap_resource(dev, iores);
+ sdcp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdcp->base))
return PTR_ERR(sdcp->base);
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 4acbc47973e9..e78ff5c65ed6 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -27,8 +27,6 @@ MODULE_ALIAS_CRYPTO("842-nx");
#define WORKMEM_ALIGN (CRB_ALIGN)
#define CSB_WAIT_MAX (5000) /* ms */
#define VAS_RETRIES (10)
-/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
-#define MAX_CREDITS_PER_RXFIFO (1024)
struct nx842_workmem {
/* Below fields must be properly aligned */
@@ -812,7 +810,11 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
rxattr.lnotify_lpid = lpid;
rxattr.lnotify_pid = pid;
rxattr.lnotify_tid = tid;
- rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
+ /*
+ * Maximum RX window credits can not be more than #CRBs in
+ * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
+ */
+ rxattr.wcreds_max = fifo_size / CRB_SIZE;
/*
* Open a VAS receice window which is used to configure RxFIFO
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index 5c4aa606208c..2de5e3672e42 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -856,7 +856,7 @@ static ssize_t nx842_##_name##_show(struct device *dev, \
rcu_read_lock(); \
local_devdata = rcu_dereference(devdata); \
if (local_devdata) \
- p = snprintf(buf, PAGE_SIZE, "%ld\n", \
+ p = snprintf(buf, PAGE_SIZE, "%lld\n", \
atomic64_read(&local_devdata->counters->_name)); \
rcu_read_unlock(); \
return p; \
@@ -909,7 +909,7 @@ static ssize_t nx842_timehist_show(struct device *dev,
}
for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
- bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n",
+ bytes = snprintf(p, bytes_remain, "%u-%uus:\t%lld\n",
i ? (2<<(i-1)) : 0, (2<<i)-1,
atomic64_read(&times[i]));
bytes_remain -= bytes;
@@ -917,7 +917,7 @@ static ssize_t nx842_timehist_show(struct device *dev,
}
/* The last bucket holds everything over
* 2<<(NX842_HIST_SLOTS - 2) us */
- bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n",
+ bytes = snprintf(p, bytes_remain, "%uus - :\t%lld\n",
2<<(NX842_HIST_SLOTS - 2),
atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
p += bytes;
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 428c273a1ab6..28817880c76d 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -569,9 +569,7 @@ static int nx_register_algs(void)
memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
- rc = NX_DEBUGFS_INIT(&nx_driver);
- if (rc)
- goto out;
+ NX_DEBUGFS_INIT(&nx_driver);
nx_driver.of.status = NX_OKAY;
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index c3e54af18645..c6b5a3be02be 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -76,20 +76,12 @@ struct nx_stats {
atomic_t last_error_pid;
};
-struct nx_debugfs {
- struct dentry *dfs_root;
- struct dentry *dfs_aes_ops, *dfs_aes_bytes;
- struct dentry *dfs_sha256_ops, *dfs_sha256_bytes;
- struct dentry *dfs_sha512_ops, *dfs_sha512_bytes;
- struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid;
-};
-
struct nx_crypto_driver {
struct nx_stats stats;
struct nx_of of;
struct vio_dev *viodev;
struct vio_driver viodriver;
- struct nx_debugfs dfs;
+ struct dentry *dfs_root;
};
#define NX_GCM4106_NONCE_LEN (4)
@@ -177,7 +169,7 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv)
#define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv)
-int nx_debugfs_init(struct nx_crypto_driver *);
+void nx_debugfs_init(struct nx_crypto_driver *);
void nx_debugfs_fini(struct nx_crypto_driver *);
#else
#define NX_DEBUGFS_INIT(drv) (0)
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
index 03e4f0363c6a..e0d44a5512ab 100644
--- a/drivers/crypto/nx/nx_debugfs.c
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -30,62 +30,37 @@
* Documentation/ABI/testing/debugfs-pfo-nx-crypto
*/
-int nx_debugfs_init(struct nx_crypto_driver *drv)
+void nx_debugfs_init(struct nx_crypto_driver *drv)
{
- struct nx_debugfs *dfs = &drv->dfs;
+ struct dentry *root;
- dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL);
+ root = debugfs_create_dir(NX_NAME, NULL);
+ drv->dfs_root = root;
- dfs->dfs_aes_ops =
- debugfs_create_u32("aes_ops",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root, (u32 *)&drv->stats.aes_ops);
- dfs->dfs_sha256_ops =
- debugfs_create_u32("sha256_ops",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u32 *)&drv->stats.sha256_ops);
- dfs->dfs_sha512_ops =
- debugfs_create_u32("sha512_ops",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u32 *)&drv->stats.sha512_ops);
- dfs->dfs_aes_bytes =
- debugfs_create_u64("aes_bytes",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u64 *)&drv->stats.aes_bytes);
- dfs->dfs_sha256_bytes =
- debugfs_create_u64("sha256_bytes",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u64 *)&drv->stats.sha256_bytes);
- dfs->dfs_sha512_bytes =
- debugfs_create_u64("sha512_bytes",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u64 *)&drv->stats.sha512_bytes);
- dfs->dfs_errors =
- debugfs_create_u32("errors",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root, (u32 *)&drv->stats.errors);
- dfs->dfs_last_error =
- debugfs_create_u32("last_error",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u32 *)&drv->stats.last_error);
- dfs->dfs_last_error_pid =
- debugfs_create_u32("last_error_pid",
- S_IRUSR | S_IRGRP | S_IROTH,
- dfs->dfs_root,
- (u32 *)&drv->stats.last_error_pid);
- return 0;
+ debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.aes_ops);
+ debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.sha256_ops);
+ debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.sha512_ops);
+ debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u64 *)&drv->stats.aes_bytes);
+ debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u64 *)&drv->stats.sha256_bytes);
+ debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u64 *)&drv->stats.sha512_bytes);
+ debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.errors);
+ debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.last_error);
+ debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH,
+ root, (u32 *)&drv->stats.last_error_pid);
}
void
nx_debugfs_fini(struct nx_crypto_driver *drv)
{
- debugfs_remove_recursive(drv->dfs.dfs_root);
+ debugfs_remove_recursive(drv->dfs_root);
}
#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index c8d401646902..b50eb55f8f57 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -131,7 +131,6 @@ struct qat_alg_ablkcipher_ctx {
struct icp_qat_fw_la_bulk_req dec_fw_req;
struct qat_crypto_instance *inst;
struct crypto_tfm *tfm;
- spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
};
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -223,6 +222,9 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
return -EFAULT;
offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+ if (offset < 0)
+ return -EFAULT;
+
hash_state_out = (__be32 *)(hash->sha.state1 + offset);
hash512_state_out = (__be64 *)hash_state_out;
@@ -253,7 +255,24 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
return 0;
}
-static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
+{
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_UPDATE_STATE);
+}
+
+static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
+{
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ int aead)
{
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -263,12 +282,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
QAT_COMN_PTR_TYPE_SGL);
ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_PARTIAL_NONE);
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+ if (aead)
+ qat_alg_init_hdr_no_iv_updt(header);
+ else
+ qat_alg_init_hdr_iv_updt(header);
ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_PROTO);
- ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
@@ -303,7 +322,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
return -EFAULT;
/* Request setup */
- qat_alg_init_common_hdr(header);
+ qat_alg_init_common_hdr(header, 1);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
@@ -390,7 +409,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
return -EFAULT;
/* Request setup */
- qat_alg_init_common_hdr(header);
+ qat_alg_init_common_hdr(header, 1);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
@@ -454,7 +473,7 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
memcpy(cd->aes.key, key, keylen);
- qat_alg_init_common_hdr(header);
+ qat_alg_init_common_hdr(header, 0);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
cd_pars->u.s.content_desc_params_sz =
sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
@@ -576,45 +595,52 @@ bad_key:
return -EINVAL;
}
-static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
+static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+
+ return qat_alg_aead_init_sessions(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct qat_crypto_instance *inst = NULL;
+ int node = get_current_node();
struct device *dev;
+ int ret;
- if (ctx->enc_cd) {
- /* rekeying */
- dev = &GET_DEV(ctx->inst->accel_dev);
- memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
- memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
- memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
- memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
- } else {
- /* new key */
- int node = get_current_node();
- struct qat_crypto_instance *inst =
- qat_crypto_get_instance_node(node);
- if (!inst) {
- return -EINVAL;
- }
-
- dev = &GET_DEV(inst->accel_dev);
- ctx->inst = inst;
- ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
- &ctx->enc_cd_paddr,
- GFP_ATOMIC);
- if (!ctx->enc_cd) {
- return -ENOMEM;
- }
- ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
- &ctx->dec_cd_paddr,
- GFP_ATOMIC);
- if (!ctx->dec_cd) {
- goto out_free_enc;
- }
+ inst = qat_crypto_get_instance_node(node);
+ if (!inst)
+ return -EINVAL;
+ dev = &GET_DEV(inst->accel_dev);
+ ctx->inst = inst;
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->enc_cd) {
+ ret = -ENOMEM;
+ goto out_free_inst;
+ }
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->dec_cd) {
+ ret = -ENOMEM;
+ goto out_free_enc;
}
- if (qat_alg_aead_init_sessions(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_CBC_MODE))
+
+ ret = qat_alg_aead_init_sessions(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
+ if (ret)
goto out_free_all;
return 0;
@@ -629,7 +655,21 @@ out_free_enc:
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
- return -ENOMEM;
+out_free_inst:
+ ctx->inst = NULL;
+ qat_crypto_put_instance(inst);
+ return ret;
+}
+
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (ctx->enc_cd)
+ return qat_alg_aead_rekey(tfm, key, keylen);
+ else
+ return qat_alg_aead_newkey(tfm, key, keylen);
}
static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
@@ -677,8 +717,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dma_addr_t blp;
dma_addr_t bloutp = 0;
struct scatterlist *sg;
- size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
- ((1 + n) * sizeof(struct qat_alg_buf));
+ size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
if (unlikely(!n))
return -EINVAL;
@@ -715,8 +754,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers;
n = sg_nents(sglout);
- sz_out = sizeof(struct qat_alg_buf_list) +
- ((1 + n) * sizeof(struct qat_alg_buf));
+ sz_out = struct_size(buflout, bufers, n + 1);
sg_nctr = 0;
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
@@ -801,11 +839,17 @@ static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_crypto_instance *inst = ctx->inst;
struct ablkcipher_request *areq = qat_req->ablkcipher_req;
uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
qat_alg_free_bufl(inst, qat_req);
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EINVAL;
+
+ memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE);
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
+ qat_req->iv_paddr);
+
areq->base.complete(&areq->base, res);
}
@@ -905,50 +949,49 @@ static int qat_alg_aead_enc(struct aead_request *areq)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
+{
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+
+ return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+}
+
+static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
const u8 *key, unsigned int keylen,
int mode)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct qat_crypto_instance *inst = NULL;
struct device *dev;
+ int node = get_current_node();
+ int ret;
- spin_lock(&ctx->lock);
- if (ctx->enc_cd) {
- /* rekeying */
- dev = &GET_DEV(ctx->inst->accel_dev);
- memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
- memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
- memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
- memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
- } else {
- /* new key */
- int node = get_current_node();
- struct qat_crypto_instance *inst =
- qat_crypto_get_instance_node(node);
- if (!inst) {
- spin_unlock(&ctx->lock);
- return -EINVAL;
- }
-
- dev = &GET_DEV(inst->accel_dev);
- ctx->inst = inst;
- ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
- &ctx->enc_cd_paddr,
- GFP_ATOMIC);
- if (!ctx->enc_cd) {
- spin_unlock(&ctx->lock);
- return -ENOMEM;
- }
- ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
- &ctx->dec_cd_paddr,
- GFP_ATOMIC);
- if (!ctx->dec_cd) {
- spin_unlock(&ctx->lock);
- goto out_free_enc;
- }
+ inst = qat_crypto_get_instance_node(node);
+ if (!inst)
+ return -EINVAL;
+ dev = &GET_DEV(inst->accel_dev);
+ ctx->inst = inst;
+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->enc_cd) {
+ ret = -ENOMEM;
+ goto out_free_instance;
+ }
+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->dec_cd) {
+ ret = -ENOMEM;
+ goto out_free_enc;
}
- spin_unlock(&ctx->lock);
- if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
+
+ ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+ if (ret)
goto out_free_all;
return 0;
@@ -963,7 +1006,22 @@ out_free_enc:
dma_free_coherent(dev, sizeof(*ctx->enc_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
- return -ENOMEM;
+out_free_instance:
+ ctx->inst = NULL;
+ qat_crypto_put_instance(inst);
+ return ret;
+}
+
+static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ int mode)
+{
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ if (ctx->enc_cd)
+ return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode);
+ else
+ return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode);
}
static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
@@ -995,11 +1053,23 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
+ if (req->nbytes == 0)
+ return 0;
+
+ qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
+ &qat_req->iv_paddr, GFP_ATOMIC);
+ if (!qat_req->iv)
+ return -ENOMEM;
+
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
+ qat_req->iv_paddr);
return ret;
+ }
msg = &qat_req->req;
*msg = ctx->enc_fw_req;
@@ -1012,18 +1082,29 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = req->nbytes;
cipher_param->cipher_offset = 0;
- memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+ cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
+ memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
qat_alg_free_bufl(ctx->inst, qat_req);
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
+ qat_req->iv_paddr);
return -EBUSY;
}
return -EINPROGRESS;
}
+static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req)
+{
+ if (req->nbytes % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ return qat_alg_ablkcipher_encrypt(req);
+}
+
static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
@@ -1032,11 +1113,23 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
+ if (req->nbytes == 0)
+ return 0;
+
+ qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
+ &qat_req->iv_paddr, GFP_ATOMIC);
+ if (!qat_req->iv)
+ return -ENOMEM;
+
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
+ qat_req->iv_paddr);
return ret;
+ }
msg = &qat_req->req;
*msg = ctx->dec_fw_req;
@@ -1049,18 +1142,28 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = req->nbytes;
cipher_param->cipher_offset = 0;
- memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+ cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
+ memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
qat_alg_free_bufl(ctx->inst, qat_req);
+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
+ qat_req->iv_paddr);
return -EBUSY;
}
return -EINPROGRESS;
}
+static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req)
+{
+ if (req->nbytes % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
+ return qat_alg_ablkcipher_decrypt(req);
+}
static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
const char *hash_name)
@@ -1119,7 +1222,6 @@ static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
{
struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- spin_lock_init(&ctx->lock);
tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
ctx->tfm = tfm;
return 0;
@@ -1221,8 +1323,8 @@ static struct crypto_alg qat_algs[] = { {
.cra_u = {
.ablkcipher = {
.setkey = qat_alg_ablkcipher_cbc_setkey,
- .decrypt = qat_alg_ablkcipher_decrypt,
- .encrypt = qat_alg_ablkcipher_encrypt,
+ .decrypt = qat_alg_ablkcipher_blk_decrypt,
+ .encrypt = qat_alg_ablkcipher_blk_encrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -1233,7 +1335,7 @@ static struct crypto_alg qat_algs[] = { {
.cra_driver_name = "qat_aes_ctr",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
@@ -1265,8 +1367,8 @@ static struct crypto_alg qat_algs[] = { {
.cra_u = {
.ablkcipher = {
.setkey = qat_alg_ablkcipher_xts_setkey,
- .decrypt = qat_alg_ablkcipher_decrypt,
- .encrypt = qat_alg_ablkcipher_encrypt,
+ .decrypt = qat_alg_ablkcipher_blk_decrypt,
+ .encrypt = qat_alg_ablkcipher_blk_encrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index dc0273fe3620..c77a80020cde 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -88,6 +88,8 @@ struct qat_crypto_request {
struct qat_crypto_request_buffs buf;
void (*cb)(struct icp_qat_fw_la_resp *resp,
struct qat_crypto_request *req);
+ void *iv;
+ dma_addr_t iv_paddr;
};
#endif
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 6b498a90181e..b0b8e3d48aef 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1384,7 +1384,6 @@ MODULE_DEVICE_TABLE(of, sahara_dt_ids);
static int sahara_probe(struct platform_device *pdev)
{
struct sahara_dev *dev;
- struct resource *res;
u32 version;
int irq;
int err;
@@ -1398,8 +1397,7 @@ static int sahara_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
/* Get the base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index ce77e38c77e0..518e0e0b11a9 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o
obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 440c9f1bd006..440c9f1bd006 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 29519d1c403f..23061f2bc74b 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -349,7 +349,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
return -ETIMEDOUT;
if ((hdev->flags & HASH_FLAGS_HMAC) &&
- (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
+ (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
hdev->flags |= HASH_FLAGS_HMAC_KEY;
stm32_hash_write_key(hdev);
if (stm32_hash_wait_busy(hdev))
@@ -447,8 +447,8 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
dma_async_issue_pending(hdev->dma_lch);
- if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
- msecs_to_jiffies(100)))
+ if (!wait_for_completion_timeout(&hdev->dma_completion,
+ msecs_to_jiffies(100)))
err = -ETIMEDOUT;
if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 7b0c42882830..4ab14d58e85b 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -12,7 +12,7 @@
*/
#include "sun4i-ss.h"
-static int sun4i_ss_opti_poll(struct skcipher_request *areq)
+static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
@@ -114,6 +114,29 @@ release_ss:
return err;
}
+
+static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+ int err;
+
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL,
+ NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (ctx->mode & SS_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+
+ return err;
+}
+
/* Generic function that support SG with size not multiple of 4 */
static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
{
@@ -140,8 +163,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int todo;
struct sg_mapping_iter mi, mo;
unsigned int oi, oo; /* offset for in and out */
- char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
- char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
unsigned int ob = 0; /* offset in buf */
unsigned int obo = 0; /* offset in bufo*/
unsigned int obl = 0; /* length of data in bufo */
@@ -178,20 +199,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
if (no_chunk == 1 && !need_fallback)
return sun4i_ss_opti_poll(areq);
- if (need_fallback) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
- skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
- skcipher_request_set_callback(subreq, areq->base.flags, NULL,
- NULL);
- skcipher_request_set_crypt(subreq, areq->src, areq->dst,
- areq->cryptlen, areq->iv);
- if (ctx->mode & SS_DECRYPTION)
- err = crypto_skcipher_decrypt(subreq);
- else
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
- }
+ if (need_fallback)
+ return sun4i_ss_cipher_poll_fallback(areq);
spin_lock_irqsave(&ss->slock, flags);
@@ -224,6 +233,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
while (oleft) {
if (ileft) {
+ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+
/*
* todo is the number of consecutive 4byte word that we
* can read from current SG
@@ -281,6 +292,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
oo = 0;
}
} else {
+ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+
/*
* read obl bytes in bufo, we read at maximum for
* emptying the device
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index fbc7bf9d7380..c9d686a0e805 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -265,11 +265,11 @@ static int init_device(struct device *dev)
* callback must check err and feedback in descriptor header
* for device processing status.
*/
-int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
- void (*callback)(struct device *dev,
- struct talitos_desc *desc,
- void *context, int error),
- void *context)
+static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
+ void (*callback)(struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error),
+ void *context)
{
struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_request *request;
@@ -319,7 +319,21 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
return -EINPROGRESS;
}
-EXPORT_SYMBOL(talitos_submit);
+
+static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
+{
+ struct talitos_edesc *edesc;
+
+ if (!is_sec1)
+ return request->desc->hdr;
+
+ if (!request->desc->next_desc)
+ return request->desc->hdr1;
+
+ edesc = container_of(request->desc, struct talitos_edesc, desc);
+
+ return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
+}
/*
* process what was done, notify callback of error if not
@@ -342,12 +356,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
/* descriptors with their done bits set don't get the error */
rmb();
- if (!is_sec1)
- hdr = request->desc->hdr;
- else if (request->desc->next_desc)
- hdr = (request->desc + 1)->hdr1;
- else
- hdr = request->desc->hdr1;
+ hdr = get_request_hdr(request, is_sec1);
if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
status = 0;
@@ -477,8 +486,14 @@ static u32 current_desc_hdr(struct device *dev, int ch)
}
}
- if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
- return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
+ if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
+ struct talitos_edesc *edesc;
+
+ edesc = container_of(priv->chan[ch].fifo[iter].desc,
+ struct talitos_edesc, desc);
+ return ((struct talitos_desc *)
+ (edesc->buf + edesc->dma_len))->hdr;
+ }
return priv->chan[ch].fifo[iter].desc->hdr;
}
@@ -824,7 +839,11 @@ static void talitos_unregister_rng(struct device *dev)
* HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
*/
#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
+#ifdef CONFIG_CRYPTO_DEV_TALITOS2
#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
+#else
+#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
+#endif
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
struct talitos_ctx {
@@ -948,36 +967,6 @@ badkey:
goto out;
}
-/*
- * talitos_edesc - s/w-extended descriptor
- * @src_nents: number of segments in input scatterlist
- * @dst_nents: number of segments in output scatterlist
- * @icv_ool: whether ICV is out-of-line
- * @iv_dma: dma address of iv for checking continuity and link table
- * @dma_len: length of dma mapped link_tbl space
- * @dma_link_tbl: bus physical address of link_tbl/buf
- * @desc: h/w descriptor
- * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
- * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
- *
- * if decrypting (with authcheck), or either one of src_nents or dst_nents
- * is greater than 1, an integrity check value is concatenated to the end
- * of link_tbl data
- */
-struct talitos_edesc {
- int src_nents;
- int dst_nents;
- bool icv_ool;
- dma_addr_t iv_dma;
- int dma_len;
- dma_addr_t dma_link_tbl;
- struct talitos_desc desc;
- union {
- struct talitos_ptr link_tbl[0];
- u8 buf[0];
- };
-};
-
static void talitos_sg_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct scatterlist *src,
@@ -1008,11 +997,13 @@ static void talitos_sg_unmap(struct device *dev,
static void ipsec_esp_unmap(struct device *dev,
struct talitos_edesc *edesc,
- struct aead_request *areq)
+ struct aead_request *areq, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int authsize = crypto_aead_authsize(aead);
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
@@ -1021,8 +1012,8 @@ static void ipsec_esp_unmap(struct device *dev,
DMA_FROM_DEVICE);
unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
- areq->assoclen);
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
+ cryptlen + authsize, areq->assoclen);
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
@@ -1032,7 +1023,7 @@ static void ipsec_esp_unmap(struct device *dev,
unsigned int dst_nents = edesc->dst_nents ? : 1;
sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
- areq->assoclen + areq->cryptlen - ivsize);
+ areq->assoclen + cryptlen - ivsize);
}
}
@@ -1043,31 +1034,14 @@ static void ipsec_esp_encrypt_done(struct device *dev,
struct talitos_desc *desc, void *context,
int err)
{
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
struct aead_request *areq = context;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
- unsigned int authsize = crypto_aead_authsize(authenc);
unsigned int ivsize = crypto_aead_ivsize(authenc);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
- void *icvdata;
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, areq);
-
- /* copy the generated ICV to dst */
- if (edesc->icv_ool) {
- if (is_sec1)
- icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
- else
- icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
- sg = sg_last(areq->dst, edesc->dst_nents);
- memcpy((char *)sg_virt(sg) + sg->length - authsize,
- icvdata, authsize);
- }
+ ipsec_esp_unmap(dev, edesc, areq, true);
dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
@@ -1084,32 +1058,16 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
char *oicv, *icv;
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, req);
+ ipsec_esp_unmap(dev, edesc, req, false);
if (!err) {
/* auth check */
- sg = sg_last(req->dst, edesc->dst_nents ? : 1);
- icv = (char *)sg_virt(sg) + sg->length - authsize;
-
- if (edesc->dma_len) {
- if (is_sec1)
- oicv = (char *)&edesc->dma_link_tbl +
- req->assoclen + req->cryptlen;
- else
- oicv = (char *)
- &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
- if (edesc->icv_ool)
- icv = oicv + authsize;
- } else
- oicv = (char *)&edesc->link_tbl[0];
+ oicv = edesc->buf + edesc->dma_len;
+ icv = oicv - authsize;
err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
}
@@ -1128,7 +1086,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, req);
+ ipsec_esp_unmap(dev, edesc, req, false);
/* check ICV auth status */
if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
@@ -1145,11 +1103,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
* stop at cryptlen bytes
*/
static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
- unsigned int offset, int cryptlen,
+ unsigned int offset, int datalen, int elen,
struct talitos_ptr *link_tbl_ptr)
{
- int n_sg = sg_count;
+ int n_sg = elen ? sg_count + 1 : sg_count;
int count = 0;
+ int cryptlen = datalen + elen;
while (cryptlen && sg && n_sg--) {
unsigned int len = sg_dma_len(sg);
@@ -1164,11 +1123,20 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
if (len > cryptlen)
len = cryptlen;
+ if (datalen > 0 && len > datalen) {
+ to_talitos_ptr(link_tbl_ptr + count,
+ sg_dma_address(sg) + offset, datalen, 0);
+ to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
+ count++;
+ len -= datalen;
+ offset += datalen;
+ }
to_talitos_ptr(link_tbl_ptr + count,
sg_dma_address(sg) + offset, len, 0);
to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
count++;
cryptlen -= len;
+ datalen -= len;
offset = 0;
next:
@@ -1178,7 +1146,7 @@ next:
/* tag end of link table */
if (count > 0)
to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
- DESC_PTR_LNKTBL_RETURN, 0);
+ DESC_PTR_LNKTBL_RET, 0);
return count;
}
@@ -1186,7 +1154,8 @@ next:
static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
unsigned int len, struct talitos_edesc *edesc,
struct talitos_ptr *ptr, int sg_count,
- unsigned int offset, int tbl_off, int elen)
+ unsigned int offset, int tbl_off, int elen,
+ bool force)
{
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1196,7 +1165,7 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
return 1;
}
to_talitos_ptr_ext_set(ptr, elen, is_sec1);
- if (sg_count == 1) {
+ if (sg_count == 1 && !force) {
to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
return sg_count;
}
@@ -1204,9 +1173,9 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
return sg_count;
}
- sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
+ sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
&edesc->link_tbl[tbl_off]);
- if (sg_count == 1) {
+ if (sg_count == 1 && !force) {
/* Only one segment now, so no link tbl needed*/
copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
return sg_count;
@@ -1224,13 +1193,14 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
unsigned int offset, int tbl_off)
{
return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
- tbl_off, 0);
+ tbl_off, 0, false);
}
/*
* fill in and submit ipsec_esp descriptor
*/
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ bool encrypt,
void (*callback)(struct device *dev,
struct talitos_desc *desc,
void *context, int error))
@@ -1240,7 +1210,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
- unsigned int cryptlen = areq->cryptlen;
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
unsigned int ivsize = crypto_aead_ivsize(aead);
int tbl_off = 0;
int sg_count, ret;
@@ -1251,6 +1221,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
+ dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
/* hmac key */
to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
@@ -1290,7 +1261,8 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
elen = authsize;
ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
- sg_count, areq->assoclen, tbl_off, elen);
+ sg_count, areq->assoclen, tbl_off, elen,
+ false);
if (ret > 1) {
tbl_off += ret;
@@ -1304,55 +1276,32 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
}
- ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
- sg_count, areq->assoclen, tbl_off);
-
- if (is_ipsec_esp)
- to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
-
- /* ICV data */
- if (ret > 1) {
- tbl_off += ret;
- edesc->icv_ool = true;
- sync_needed = true;
-
- if (is_ipsec_esp) {
- struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
- int offset = (edesc->src_nents + edesc->dst_nents + 2) *
- sizeof(struct talitos_ptr) + authsize;
-
- /* Add an entry to the link table for ICV data */
- to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
- to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
- is_sec1);
+ if (is_ipsec_esp && encrypt)
+ elen = authsize;
+ else
+ elen = 0;
+ ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+ sg_count, areq->assoclen, tbl_off, elen,
+ is_ipsec_esp && !encrypt);
+ tbl_off += ret;
- /* icv data follows link tables */
- to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
- authsize, is_sec1);
- } else {
- dma_addr_t addr = edesc->dma_link_tbl;
+ if (!encrypt && is_ipsec_esp) {
+ struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
- if (is_sec1)
- addr += areq->assoclen + cryptlen;
- else
- addr += sizeof(struct talitos_ptr) * tbl_off;
+ /* Add an entry to the link table for ICV data */
+ to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
+ to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
- to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
- }
+ /* icv data follows link tables */
+ to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
+ to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
+ sync_needed = true;
+ } else if (!encrypt) {
+ to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
+ sync_needed = true;
} else if (!is_ipsec_esp) {
- ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
- &desc->ptr[6], sg_count, areq->assoclen +
- cryptlen,
- tbl_off);
- if (ret > 1) {
- tbl_off += ret;
- edesc->icv_ool = true;
- sync_needed = true;
- } else {
- edesc->icv_ool = false;
- }
- } else {
- edesc->icv_ool = false;
+ talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
+ sg_count, areq->assoclen + cryptlen, tbl_off);
}
/* iv out */
@@ -1367,7 +1316,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
- ipsec_esp_unmap(dev, edesc, areq);
+ ipsec_esp_unmap(dev, edesc, areq, encrypt);
kfree(edesc);
}
return ret;
@@ -1435,18 +1384,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
* and space for two sets of ICVs (stashed and generated)
*/
alloc_len = sizeof(struct talitos_edesc);
- if (src_nents || dst_nents) {
+ if (src_nents || dst_nents || !encrypt) {
if (is_sec1)
dma_len = (src_nents ? src_len : 0) +
- (dst_nents ? dst_len : 0);
+ (dst_nents ? dst_len : 0) + authsize;
else
dma_len = (src_nents + dst_nents + 2) *
- sizeof(struct talitos_ptr) + authsize * 2;
+ sizeof(struct talitos_ptr) + authsize;
alloc_len += dma_len;
} else {
dma_len = 0;
- alloc_len += icv_stashing ? authsize : 0;
}
+ alloc_len += icv_stashing ? authsize : 0;
/* if its a ahash, add space for a second desc next to the first one */
if (is_sec1 && !dst)
@@ -1466,15 +1415,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
edesc->dma_len = dma_len;
- if (dma_len) {
- void *addr = &edesc->link_tbl[0];
-
- if (is_sec1 && !dst)
- addr += sizeof(struct talitos_desc);
- edesc->dma_link_tbl = dma_map_single(dev, addr,
+ if (dma_len)
+ edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
edesc->dma_len,
DMA_BIDIRECTIONAL);
- }
+
return edesc;
}
@@ -1485,9 +1430,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
unsigned int ivsize = crypto_aead_ivsize(authenc);
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
- iv, areq->assoclen, areq->cryptlen,
+ iv, areq->assoclen, cryptlen,
authsize, ivsize, icv_stashing,
areq->base.flags, encrypt);
}
@@ -1506,7 +1452,7 @@ static int aead_encrypt(struct aead_request *req)
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
+ return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
}
static int aead_decrypt(struct aead_request *req)
@@ -1516,17 +1462,15 @@ static int aead_decrypt(struct aead_request *req)
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct talitos_private *priv = dev_get_drvdata(ctx->dev);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
void *icvdata;
- req->cryptlen -= authsize;
-
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, req->iv, 1, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+ (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
((!edesc->src_nents && !edesc->dst_nents) ||
priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
@@ -1537,24 +1481,20 @@ static int aead_decrypt(struct aead_request *req)
/* reset integrity check result bits */
- return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
+ return ipsec_esp(edesc, req, false,
+ ipsec_esp_decrypt_hwauth_done);
}
/* Have to check the ICV with software */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
/* stash incoming ICV for later cmp with ICV generated by the h/w */
- if (edesc->dma_len)
- icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
- else
- icvdata = &edesc->link_tbl[0];
+ icvdata = edesc->buf + edesc->dma_len;
- sg = sg_last(req->src, edesc->src_nents ? : 1);
+ sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
+ req->assoclen + req->cryptlen - authsize);
- memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
-
- return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
+ return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
}
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1605,6 +1545,18 @@ static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
return ablkcipher_setkey(cipher, key, keylen);
}
+static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
+ keylen == AES_KEYSIZE_256)
+ return ablkcipher_setkey(cipher, key, keylen);
+
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ return -EINVAL;
+}
+
static void common_nonsnoop_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct ablkcipher_request *areq)
@@ -1624,11 +1576,15 @@ static void ablkcipher_done(struct device *dev,
int err)
{
struct ablkcipher_request *areq = context;
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
struct talitos_edesc *edesc;
edesc = container_of(desc, struct talitos_edesc, desc);
common_nonsnoop_unmap(dev, edesc, areq);
+ memcpy(areq->info, ctx->iv, ivsize);
kfree(edesc);
@@ -1723,6 +1679,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+ if (!areq->nbytes)
+ return 0;
+
+ if (areq->nbytes % blocksize)
+ return -EINVAL;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq, true);
@@ -1740,6 +1704,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+ if (!areq->nbytes)
+ return 0;
+
+ if (areq->nbytes % blocksize)
+ return -EINVAL;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq, false);
@@ -1759,14 +1731,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
struct talitos_desc *desc = &edesc->desc;
- struct talitos_desc *desc2 = desc + 1;
+ struct talitos_desc *desc2 = (struct talitos_desc *)
+ (edesc->buf + edesc->dma_len);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
if (desc->next_desc &&
desc->ptr[5].ptr != desc2->ptr[5].ptr)
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
- talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
+ if (req_ctx->psrc)
+ talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
/* When using hashctx-in, must unmap it. */
if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
@@ -1833,7 +1807,6 @@ static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
static int common_nonsnoop_hash(struct talitos_edesc *edesc,
struct ahash_request *areq, unsigned int length,
- unsigned int offset,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
@@ -1872,9 +1845,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
- sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
- edesc->buf + sizeof(struct talitos_desc),
- length, req_ctx->nbuf);
+ sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
else if (length)
sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
DMA_TO_DEVICE);
@@ -1887,7 +1858,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
DMA_TO_DEVICE);
} else {
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
- &desc->ptr[3], sg_count, offset, 0);
+ &desc->ptr[3], sg_count, 0, 0);
if (sg_count > 1)
sync_needed = true;
}
@@ -1911,7 +1882,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
if (is_sec1 && req_ctx->nbuf && length) {
- struct talitos_desc *desc2 = desc + 1;
+ struct talitos_desc *desc2 = (struct talitos_desc *)
+ (edesc->buf + edesc->dma_len);
dma_addr_t next_desc;
memset(desc2, 0, sizeof(*desc2));
@@ -1932,7 +1904,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
DMA_TO_DEVICE);
copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
- &desc2->ptr[3], sg_count, offset, 0);
+ &desc2->ptr[3], sg_count, 0, 0);
if (sg_count > 1)
sync_needed = true;
copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
@@ -2043,7 +2015,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
struct device *dev = ctx->dev;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
- int offset = 0;
u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
@@ -2083,6 +2054,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
sg_chain(req_ctx->bufsl, 2, areq->src);
req_ctx->psrc = req_ctx->bufsl;
} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
+ int offset;
+
if (nbytes_to_hash > blocksize)
offset = blocksize - req_ctx->nbuf;
else
@@ -2095,7 +2068,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
sg_copy_to_buffer(areq->src, nents,
ctx_buf + req_ctx->nbuf, offset);
req_ctx->nbuf += offset;
- req_ctx->psrc = areq->src;
+ req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
+ offset);
} else
req_ctx->psrc = areq->src;
@@ -2135,8 +2109,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
if (ctx->keylen && (req_ctx->first || req_ctx->last))
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
- return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
- ahash_done);
+ return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
}
static int ahash_update(struct ahash_request *areq)
@@ -2339,7 +2312,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2384,7 +2357,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha1),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha1-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2427,7 +2400,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha224-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2472,7 +2445,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha224),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha224-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2515,7 +2488,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2560,7 +2533,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha256),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha256-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2689,7 +2662,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2732,7 +2705,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2760,7 +2733,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablkcipher_aes_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2777,6 +2750,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .setkey = ablkcipher_aes_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2787,13 +2761,14 @@ static struct talitos_alg_template driver_algs[] = {
.alg.crypto = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .setkey = ablkcipher_aes_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
@@ -2810,7 +2785,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
.setkey = ablkcipher_des_setkey,
}
},
@@ -2845,7 +2819,6 @@ static struct talitos_alg_template driver_algs[] = {
.cra_ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
.setkey = ablkcipher_des3_setkey,
}
},
@@ -3270,7 +3243,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_priority = t_alg->algt.priority;
else
alg->cra_priority = TALITOS_CRA_PRIORITY;
- alg->cra_alignmask = 0;
+ if (has_ftr_sec1(priv))
+ alg->cra_alignmask = 3;
+ else
+ alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
@@ -3418,7 +3394,7 @@ static int talitos_probe(struct platform_device *ofdev)
if (err)
goto err_out;
- if (of_device_is_compatible(np, "fsl,sec1.0")) {
+ if (has_ftr_sec1(priv)) {
if (priv->num_channels == 1)
tasklet_init(&priv->done_task[0], talitos1_done_ch0,
(unsigned long)dev);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index a65a63e0d6c1..1469b956948a 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -1,31 +1,8 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
* Freescale SEC (talitos) device register and descriptor header defines
*
* Copyright (c) 2006-2011 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#define TALITOS_TIMEOUT 100000
@@ -65,6 +42,34 @@ struct talitos_desc {
#define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32))
+/*
+ * talitos_edesc - s/w-extended descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @dma_len: length of dma mapped link_tbl space
+ * @dma_link_tbl: bus physical address of link_tbl/buf
+ * @desc: h/w descriptor
+ * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
+ * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
+ *
+ * if decrypting (with authcheck), or either one of src_nents or dst_nents
+ * is greater than 1, an integrity check value is concatenated to the end
+ * of link_tbl data
+ */
+struct talitos_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int dma_len;
+ dma_addr_t dma_link_tbl;
+ struct talitos_desc desc;
+ union {
+ struct talitos_ptr link_tbl[0];
+ u8 buf[0];
+ };
+};
+
/**
* talitos_request - descriptor submission request
* @desc: descriptor pointer (kernel virtual)
@@ -150,12 +155,6 @@ struct talitos_private {
bool rng_registered;
};
-extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
- void (*callback)(struct device *dev,
- struct talitos_desc *desc,
- void *context, int error),
- void *context);
-
/* .features flag */
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
@@ -170,13 +169,11 @@ extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
*/
static inline bool has_ftr_sec1(struct talitos_private *priv)
{
-#if defined(CONFIG_CRYPTO_DEV_TALITOS1) && defined(CONFIG_CRYPTO_DEV_TALITOS2)
- return priv->features & TALITOS_FTR_SEC1 ? true : false;
-#elif defined(CONFIG_CRYPTO_DEV_TALITOS1)
- return true;
-#else
- return false;
-#endif
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1) &&
+ IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS2))
+ return priv->features & TALITOS_FTR_SEC1;
+
+ return IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1);
}
/*
@@ -412,5 +409,5 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
/* link table extent field bits */
#define DESC_PTR_LNKTBL_JUMP 0x80
-#define DESC_PTR_LNKTBL_RETURN 0x02
+#define DESC_PTR_LNKTBL_RET 0x02
#define DESC_PTR_LNKTBL_NEXT 0x01
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index c7e515a1bc97..d88084447f1c 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -7,64 +7,52 @@
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
*/
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include "aesp8-ppc.h"
struct p8_aes_cbc_ctx {
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
};
-static int p8_aes_cbc_init(struct crypto_tfm *tfm)
+static int p8_aes_cbc_init(struct crypto_skcipher *tfm)
{
- const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_sync_skcipher *fallback;
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- fallback = crypto_alloc_sync_skcipher(alg, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *fallback;
+ fallback = crypto_alloc_skcipher("cbc(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(fallback)) {
- printk(KERN_ERR
- "Failed to allocate transformation for '%s': %ld\n",
- alg, PTR_ERR(fallback));
+ pr_err("Failed to allocate cbc(aes) fallback: %ld\n",
+ PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- crypto_sync_skcipher_set_flags(
- fallback,
- crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(fallback));
ctx->fallback = fallback;
-
return 0;
}
-static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
+static void p8_aes_cbc_exit(struct crypto_skcipher *tfm)
{
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (ctx->fallback) {
- crypto_free_sync_skcipher(ctx->fallback);
- ctx->fallback = NULL;
- }
+ crypto_free_skcipher(ctx->fallback);
}
-static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
+static int p8_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
preempt_disable();
pagefault_disable();
@@ -75,108 +63,71 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
return ret ? -EINVAL : 0;
}
-static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int ret;
- struct blkcipher_walk walk;
- struct p8_aes_cbc_ctx *ctx =
- crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (!crypto_simd_usable()) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_sync_tfm(req, ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- } else {
- blkcipher_walk_init(&walk, dst, src, nbytes);
- ret = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_cbc_encrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK,
- &ctx->enc_key, walk.iv, 1);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ return enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
}
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ aes_p8_cbc_encrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ enc ? &ctx->enc_key : &ctx->dec_key,
+ walk.iv, enc);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
+ }
return ret;
}
-static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_cbc_encrypt(struct skcipher_request *req)
{
- int ret;
- struct blkcipher_walk walk;
- struct p8_aes_cbc_ctx *ctx =
- crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
-
- if (!crypto_simd_usable()) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_sync_tfm(req, ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- } else {
- blkcipher_walk_init(&walk, dst, src, nbytes);
- ret = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_cbc_encrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK,
- &ctx->dec_key, walk.iv, 0);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
- }
-
- return ret;
+ return p8_aes_cbc_crypt(req, 1);
}
+static int p8_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return p8_aes_cbc_crypt(req, 0);
+}
-struct crypto_alg p8_aes_cbc_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "p8_aes_cbc",
- .cra_module = THIS_MODULE,
- .cra_priority = 2000,
- .cra_type = &crypto_blkcipher_type,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
- .cra_alignmask = 0,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
- .cra_init = p8_aes_cbc_init,
- .cra_exit = p8_aes_cbc_exit,
- .cra_blkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = p8_aes_cbc_setkey,
- .encrypt = p8_aes_cbc_encrypt,
- .decrypt = p8_aes_cbc_decrypt,
- },
+struct skcipher_alg p8_aes_cbc_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "p8_aes_cbc",
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 2000,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
+ .setkey = p8_aes_cbc_setkey,
+ .encrypt = p8_aes_cbc_encrypt,
+ .decrypt = p8_aes_cbc_decrypt,
+ .init = p8_aes_cbc_init,
+ .exit = p8_aes_cbc_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
};
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index dd017ef42fa9..79ba062ee1c1 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -7,62 +7,51 @@
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
*/
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include "aesp8-ppc.h"
struct p8_aes_ctr_ctx {
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
struct aes_key enc_key;
};
-static int p8_aes_ctr_init(struct crypto_tfm *tfm)
+static int p8_aes_ctr_init(struct crypto_skcipher *tfm)
{
- const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_sync_skcipher *fallback;
- struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *fallback;
- fallback = crypto_alloc_sync_skcipher(alg, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_skcipher("ctr(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(fallback)) {
- printk(KERN_ERR
- "Failed to allocate transformation for '%s': %ld\n",
- alg, PTR_ERR(fallback));
+ pr_err("Failed to allocate ctr(aes) fallback: %ld\n",
+ PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- crypto_sync_skcipher_set_flags(
- fallback,
- crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(fallback));
ctx->fallback = fallback;
-
return 0;
}
-static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
+static void p8_aes_ctr_exit(struct crypto_skcipher *tfm)
{
- struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (ctx->fallback) {
- crypto_free_sync_skcipher(ctx->fallback);
- ctx->fallback = NULL;
- }
+ crypto_free_skcipher(ctx->fallback);
}
-static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
+static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
+ struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
preempt_disable();
pagefault_disable();
@@ -72,13 +61,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
return ret ? -EINVAL : 0;
}
-static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
- struct blkcipher_walk *walk)
+static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx,
+ struct skcipher_walk *walk)
{
u8 *ctrblk = walk->iv;
u8 keystream[AES_BLOCK_SIZE];
@@ -98,77 +87,63 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
-static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_ctr_crypt(struct skcipher_request *req)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int ret;
- u64 inc;
- struct blkcipher_walk walk;
- struct p8_aes_ctr_ctx *ctx =
- crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (!crypto_simd_usable()) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_sync_tfm(req, ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- } else {
- blkcipher_walk_init(&walk, dst, src, nbytes);
- ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
- while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
- walk.dst.virt.addr,
- (nbytes &
- AES_BLOCK_MASK) /
- AES_BLOCK_SIZE,
- &ctx->enc_key,
- walk.iv);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- /* We need to update IV mostly for last bytes/round */
- inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
- if (inc > 0)
- while (inc--)
- crypto_inc(walk.iv, AES_BLOCK_SIZE);
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
- if (walk.nbytes) {
- p8_aes_ctr_final(ctx, &walk);
- ret = blkcipher_walk_done(desc, &walk, 0);
- }
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ return crypto_skcipher_encrypt(subreq);
}
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes / AES_BLOCK_SIZE,
+ &ctx->enc_key, walk.iv);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ do {
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ } while ((nbytes -= AES_BLOCK_SIZE) >= AES_BLOCK_SIZE);
+
+ ret = skcipher_walk_done(&walk, nbytes);
+ }
+ if (nbytes) {
+ p8_aes_ctr_final(ctx, &walk);
+ ret = skcipher_walk_done(&walk, 0);
+ }
return ret;
}
-struct crypto_alg p8_aes_ctr_alg = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "p8_aes_ctr",
- .cra_module = THIS_MODULE,
- .cra_priority = 2000,
- .cra_type = &crypto_blkcipher_type,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
- .cra_alignmask = 0,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
- .cra_init = p8_aes_ctr_init,
- .cra_exit = p8_aes_ctr_exit,
- .cra_blkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = p8_aes_ctr_setkey,
- .encrypt = p8_aes_ctr_crypt,
- .decrypt = p8_aes_ctr_crypt,
- },
+struct skcipher_alg p8_aes_ctr_alg = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "p8_aes_ctr",
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 2000,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
+ .setkey = p8_aes_ctr_setkey,
+ .encrypt = p8_aes_ctr_crypt,
+ .decrypt = p8_aes_ctr_crypt,
+ .init = p8_aes_ctr_init,
+ .exit = p8_aes_ctr_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
};
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 536167e737a0..49f7258045fa 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -7,67 +7,56 @@
* Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
*/
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
-#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
-#include <crypto/skcipher.h>
#include "aesp8-ppc.h"
struct p8_aes_xts_ctx {
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
struct aes_key tweak_key;
};
-static int p8_aes_xts_init(struct crypto_tfm *tfm)
+static int p8_aes_xts_init(struct crypto_skcipher *tfm)
{
- const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_sync_skcipher *fallback;
- struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *fallback;
- fallback = crypto_alloc_sync_skcipher(alg, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(fallback)) {
- printk(KERN_ERR
- "Failed to allocate transformation for '%s': %ld\n",
- alg, PTR_ERR(fallback));
+ pr_err("Failed to allocate xts(aes) fallback: %ld\n",
+ PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- crypto_sync_skcipher_set_flags(
- fallback,
- crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(fallback));
ctx->fallback = fallback;
-
return 0;
}
-static void p8_aes_xts_exit(struct crypto_tfm *tfm)
+static void p8_aes_xts_exit(struct crypto_skcipher *tfm)
{
- struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (ctx->fallback) {
- crypto_free_sync_skcipher(ctx->fallback);
- ctx->fallback = NULL;
- }
+ crypto_free_skcipher(ctx->fallback);
}
-static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
+static int p8_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
+ struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- ret = xts_check_key(tfm, key, keylen);
+ ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
@@ -81,100 +70,90 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
return ret ? -EINVAL : 0;
}
-static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, int enc)
+static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
{
- int ret;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
u8 tweak[AES_BLOCK_SIZE];
- u8 *iv;
- struct blkcipher_walk walk;
- struct p8_aes_xts_ctx *ctx =
- crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+ int ret;
if (!crypto_simd_usable()) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_sync_tfm(req, ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
- ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- } else {
- blkcipher_walk_init(&walk, dst, src, nbytes);
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ return enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ }
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
- ret = blkcipher_walk_virt(desc, &walk);
+ aes_p8_encrypt(walk.iv, tweak, &ctx->tweak_key);
+
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ while ((nbytes = walk.nbytes) != 0) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
-
- iv = walk.iv;
- memset(tweak, 0, AES_BLOCK_SIZE);
- aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
-
+ if (enc)
+ aes_p8_xts_encrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ &ctx->enc_key, NULL, tweak);
+ else
+ aes_p8_xts_decrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ &ctx->dec_key, NULL, tweak);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- while ((nbytes = walk.nbytes)) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- if (enc)
- aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
- else
- aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
- nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, &walk, nbytes);
- }
+ ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
}
return ret;
}
-static int p8_aes_xts_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_xts_encrypt(struct skcipher_request *req)
{
- return p8_aes_xts_crypt(desc, dst, src, nbytes, 1);
+ return p8_aes_xts_crypt(req, 1);
}
-static int p8_aes_xts_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int p8_aes_xts_decrypt(struct skcipher_request *req)
{
- return p8_aes_xts_crypt(desc, dst, src, nbytes, 0);
+ return p8_aes_xts_crypt(req, 0);
}
-struct crypto_alg p8_aes_xts_alg = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "p8_aes_xts",
- .cra_module = THIS_MODULE,
- .cra_priority = 2000,
- .cra_type = &crypto_blkcipher_type,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
- .cra_alignmask = 0,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
- .cra_init = p8_aes_xts_init,
- .cra_exit = p8_aes_xts_exit,
- .cra_blkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .setkey = p8_aes_xts_setkey,
- .encrypt = p8_aes_xts_encrypt,
- .decrypt = p8_aes_xts_decrypt,
- }
+struct skcipher_alg p8_aes_xts_alg = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "p8_aes_xts",
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 2000,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
+ .setkey = p8_aes_xts_setkey,
+ .encrypt = p8_aes_xts_encrypt,
+ .decrypt = p8_aes_xts_decrypt,
+ .init = p8_aes_xts_init,
+ .exit = p8_aes_xts_exit,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
};
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
index 349646b73754..01774a4d26a2 100644
--- a/drivers/crypto/vmx/aesp8-ppc.h
+++ b/drivers/crypto/vmx/aesp8-ppc.h
@@ -2,8 +2,6 @@
#include <linux/types.h>
#include <crypto/aes.h>
-#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
-
struct aes_key {
u8 key[AES_MAX_KEYLENGTH];
int rounds;
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 9c6b5c1d6a1a..db874367b602 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1286,6 +1286,24 @@ ___
#########################################################################
{{{ # CTR procedure[s] #
+
+####################### WARNING: Here be dragons! #######################
+#
+# This code is written as 'ctr32', based on a 32-bit counter used
+# upstream. The kernel does *not* use a 32-bit counter. The kernel uses
+# a 128-bit counter.
+#
+# This leads to subtle changes from the upstream code: the counter
+# is incremented with vaddu_q_m rather than vaddu_w_m. This occurs in
+# both the bulk (8 blocks at a time) path, and in the individual block
+# path. Be aware of this when doing updates.
+#
+# See:
+# 1d4aa0b4c181 ("crypto: vmx - Fixing AES-CTR counter bug")
+# 009b30ac7444 ("crypto: vmx - CTR: always increment IV as quadword")
+# https://github.com/openssl/openssl/pull/8942
+#
+#########################################################################
my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10));
my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)=
@@ -1357,7 +1375,7 @@ Loop_ctr32_enc:
addi $idx,$idx,16
bdnz Loop_ctr32_enc
- vadduqm $ivec,$ivec,$one
+ vadduqm $ivec,$ivec,$one # Kernel change for 128-bit
vmr $dat,$inptail
lvx $inptail,0,$inp
addi $inp,$inp,16
@@ -1501,7 +1519,7 @@ Load_ctr32_enc_key:
$SHL $len,$len,4
vadduqm $out1,$ivec,$one # counter values ...
- vadduqm $out2,$ivec,$two
+ vadduqm $out2,$ivec,$two # (do all ctr adds as 128-bit)
vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
le?li $idx,8
vadduqm $out3,$out1,$two
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index 6c4c77f4e159..3e0335fb406c 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -15,54 +15,58 @@
#include <linux/crypto.h>
#include <asm/cputable.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
extern struct shash_alg p8_ghash_alg;
extern struct crypto_alg p8_aes_alg;
-extern struct crypto_alg p8_aes_cbc_alg;
-extern struct crypto_alg p8_aes_ctr_alg;
-extern struct crypto_alg p8_aes_xts_alg;
-static struct crypto_alg *algs[] = {
- &p8_aes_alg,
- &p8_aes_cbc_alg,
- &p8_aes_ctr_alg,
- &p8_aes_xts_alg,
- NULL,
-};
+extern struct skcipher_alg p8_aes_cbc_alg;
+extern struct skcipher_alg p8_aes_ctr_alg;
+extern struct skcipher_alg p8_aes_xts_alg;
static int __init p8_init(void)
{
- int ret = 0;
- struct crypto_alg **alg_it;
+ int ret;
- for (alg_it = algs; *alg_it; alg_it++) {
- ret = crypto_register_alg(*alg_it);
- printk(KERN_INFO "crypto_register_alg '%s' = %d\n",
- (*alg_it)->cra_name, ret);
- if (ret) {
- for (alg_it--; alg_it >= algs; alg_it--)
- crypto_unregister_alg(*alg_it);
- break;
- }
- }
+ ret = crypto_register_shash(&p8_ghash_alg);
if (ret)
- return ret;
+ goto err;
- ret = crypto_register_shash(&p8_ghash_alg);
- if (ret) {
- for (alg_it = algs; *alg_it; alg_it++)
- crypto_unregister_alg(*alg_it);
- }
+ ret = crypto_register_alg(&p8_aes_alg);
+ if (ret)
+ goto err_unregister_ghash;
+
+ ret = crypto_register_skcipher(&p8_aes_cbc_alg);
+ if (ret)
+ goto err_unregister_aes;
+
+ ret = crypto_register_skcipher(&p8_aes_ctr_alg);
+ if (ret)
+ goto err_unregister_aes_cbc;
+
+ ret = crypto_register_skcipher(&p8_aes_xts_alg);
+ if (ret)
+ goto err_unregister_aes_ctr;
+
+ return 0;
+
+err_unregister_aes_ctr:
+ crypto_unregister_skcipher(&p8_aes_ctr_alg);
+err_unregister_aes_cbc:
+ crypto_unregister_skcipher(&p8_aes_cbc_alg);
+err_unregister_aes:
+ crypto_unregister_alg(&p8_aes_alg);
+err_unregister_ghash:
+ crypto_unregister_shash(&p8_ghash_alg);
+err:
return ret;
}
static void __exit p8_exit(void)
{
- struct crypto_alg **alg_it;
-
- for (alg_it = algs; *alg_it; alg_it++) {
- printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name);
- crypto_unregister_alg(*alg_it);
- }
+ crypto_unregister_skcipher(&p8_aes_xts_alg);
+ crypto_unregister_skcipher(&p8_aes_ctr_alg);
+ crypto_unregister_skcipher(&p8_aes_cbc_alg);
+ crypto_unregister_alg(&p8_aes_alg);
crypto_unregister_shash(&p8_ghash_alg);
}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 263bee76ef0d..6b8c4c458e8a 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -718,12 +718,13 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
{
struct jz4780_dma_dev *jzdma = data;
unsigned int nb_channels = jzdma->soc_data->nb_channels;
- uint32_t pending, dmac;
+ unsigned long pending;
+ uint32_t dmac;
int i;
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
- for_each_set_bit(i, (unsigned long *)&pending, nb_channels) {
+ for_each_set_bit(i, &pending, nb_channels) {
if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
pending &= ~BIT(i);
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 99d9f431ae2c..4ec84a633bd3 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -703,7 +703,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
spin_lock_irqsave(&sdma->channel_0_lock, flags);
bd0->mode.command = C0_SETPM;
- bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+ bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
bd0->mode.count = size / 2;
bd0->buffer_addr = buf_phys;
bd0->ext_buffer_addr = address;
@@ -1025,7 +1025,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
context->gReg[7] = sdmac->watermark_level;
bd0->mode.command = C0_SETDM;
- bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+ bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
bd0->mode.count = sizeof(*context) / 4;
bd0->buffer_addr = sdma->context_phys;
bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
@@ -2096,27 +2096,6 @@ static int sdma_probe(struct platform_device *pdev)
if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);
- if (pdata) {
- ret = sdma_get_firmware(sdma, pdata->fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
- } else {
- /*
- * Because that device tree does not encode ROM script address,
- * the RAM script in firmware is mandatory for device tree
- * probe, otherwise it fails.
- */
- ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
- &fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware name\n");
- else {
- ret = sdma_get_firmware(sdma, fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
- }
- }
-
sdma->dma_device.dev = &pdev->dev;
sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
@@ -2161,6 +2140,33 @@ static int sdma_probe(struct platform_device *pdev)
of_node_put(spba_bus);
}
+ /*
+ * Kick off firmware loading as the very last step:
+ * attempt to load firmware only if we're not on the error path, because
+ * the firmware callback requires a fully functional and allocated sdma
+ * instance.
+ */
+ if (pdata) {
+ ret = sdma_get_firmware(sdma, pdata->fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
+ } else {
+ /*
+ * Because that device tree does not encode ROM script address,
+ * the RAM script in firmware is mandatory for device tree
+ * probe, otherwise it fails.
+ */
+ ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+ &fw_name);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get firmware name\n");
+ } else {
+ ret = sdma_get_firmware(sdma, fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
+ }
+ }
+
return 0;
err_register:
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 4b43844f6af5..8e90a405939d 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -799,6 +799,9 @@ static u32 process_channel_irqs(struct bam_device *bdev)
/* Number of bytes available to read */
avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
+ if (offset < bchan->head)
+ avail--;
+
list_for_each_entry_safe(async_desc, tmp,
&bchan->desc_list, desc_node) {
/* Not enough data to read */
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 5e2e0348d460..200c04ce5b0e 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -460,6 +460,12 @@ config EDAC_ALTERA_SDMMC
Support for error detection and correction on the
Altera SDMMC FIFO Memory for Altera SoCs.
+config EDAC_SIFIVE
+ bool "Sifive platform EDAC driver"
+ depends on EDAC=y && RISCV
+ help
+ Support for error detection and correction on the SiFive SoCs.
+
config EDAC_SYNOPSYS
tristate "Synopsys DDR Memory Controller"
depends on ARCH_ZYNQ || ARCH_ZYNQMP
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 89ad4a84a0f6..165ca65e1a3a 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
obj-$(CONFIG_EDAC_THUNDERX) += thunderx_edac.o
obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
+obj-$(CONFIG_EDAC_SIFIVE) += sifive_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
obj-$(CONFIG_EDAC_TI) += ti_edac.o
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 8816f74a22b4..c2e693e34d43 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1223,8 +1223,31 @@ static const struct edac_device_prv_data ocramecc_data = {
.inject_fops = &altr_edac_device_inject_fops,
};
+static int __maybe_unused
+altr_check_ocram_deps_init(struct altr_edac_device_dev *device)
+{
+ void __iomem *base = device->base;
+ int ret;
+
+ ret = altr_check_ecc_deps(device);
+ if (ret)
+ return ret;
+
+ /* Verify OCRAM has been initialized */
+ if (!ecc_test_bits(ALTR_A10_ECC_INITCOMPLETEA,
+ (base + ALTR_A10_ECC_INITSTAT_OFST)))
+ return -ENODEV;
+
+ /* Enable IRQ on Single Bit Error */
+ writel(ALTR_A10_ECC_SERRINTEN, (base + ALTR_A10_ECC_ERRINTENS_OFST));
+ /* Ensure all writes complete */
+ wmb();
+
+ return 0;
+}
+
static const struct edac_device_prv_data a10_ocramecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = altr_check_ocram_deps_init,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.irq_status_mask = A10_SYSMGR_ECC_INTSTAT_OCRAM,
@@ -1234,7 +1257,7 @@ static const struct edac_device_prv_data a10_ocramecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject_fops,
+ .inject_fops = &altr_edac_a10_device_inject2_fops,
/*
* OCRAM panic on uncorrectable error because sleep/resume
* functions and FPGA contents are stored in OCRAM. Prefer
@@ -1560,8 +1583,12 @@ static int altr_portb_setup(struct altr_edac_device_dev *device)
dci->mod_name = ecc_name;
dci->dev_name = ecc_name;
- /* Update the IRQs for PortB */
+ /* Update the PortB IRQs - A10 has 4, S10 has 2, Index accordingly */
+#ifdef CONFIG_ARCH_STRATIX10
+ altdev->sb_irq = irq_of_parse_and_map(np, 1);
+#else
altdev->sb_irq = irq_of_parse_and_map(np, 2);
+#endif
if (!altdev->sb_irq) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Error PortB SBIRQ alloc\n");
rc = -ENODEV;
@@ -1576,6 +1603,15 @@ static int altr_portb_setup(struct altr_edac_device_dev *device)
goto err_release_group_1;
}
+#ifdef CONFIG_ARCH_STRATIX10
+ /* Use IRQ to determine SError origin instead of assigning IRQ */
+ rc = of_property_read_u32_index(np, "interrupts", 1, &altdev->db_irq);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Error PortB DBIRQ alloc\n");
+ goto err_release_group_1;
+ }
+#else
altdev->db_irq = irq_of_parse_and_map(np, 3);
if (!altdev->db_irq) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Error PortB DBIRQ alloc\n");
@@ -1590,6 +1626,7 @@ static int altr_portb_setup(struct altr_edac_device_dev *device)
edac_printk(KERN_ERR, EDAC_DEVICE, "PortB DBERR IRQ error\n");
goto err_release_group_1;
}
+#endif
rc = edac_device_add_device(dci);
if (rc) {
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index 11833c0a5d07..5634437bb39d 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -281,15 +281,11 @@ static int aspeed_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
- struct device_node *np;
struct resource *res;
void __iomem *regs;
u32 reg04;
int rc;
- /* setup regmap */
- np = dev->of_node;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOENT;
diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c
index 6b8e484db851..1f943599a8ac 100644
--- a/drivers/edac/debugfs.c
+++ b/drivers/edac/debugfs.c
@@ -118,23 +118,23 @@ edac_debugfs_create_file(const char *name, umode_t mode, struct dentry *parent,
EXPORT_SYMBOL_GPL(edac_debugfs_create_file);
/* Wrapper for debugfs_create_x8() */
-struct dentry *edac_debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value)
+void edac_debugfs_create_x8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value)
{
if (!parent)
parent = edac_debugfs;
- return debugfs_create_x8(name, mode, parent, value);
+ debugfs_create_x8(name, mode, parent, value);
}
EXPORT_SYMBOL_GPL(edac_debugfs_create_x8);
/* Wrapper for debugfs_create_x16() */
-struct dentry *edac_debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value)
+void edac_debugfs_create_x16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value)
{
if (!parent)
parent = edac_debugfs;
- return debugfs_create_x16(name, mode, parent, value);
+ debugfs_create_x16(name, mode, parent, value);
}
EXPORT_SYMBOL_GPL(edac_debugfs_create_x16);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 464174685589..4386ea4b9b5a 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -26,7 +26,7 @@
static int edac_mc_log_ue = 1;
static int edac_mc_log_ce = 1;
static int edac_mc_panic_on_ue;
-static int edac_mc_poll_msec = 1000;
+static unsigned int edac_mc_poll_msec = 1000;
/* Getter functions for above */
int edac_mc_get_log_ue(void)
@@ -45,30 +45,30 @@ int edac_mc_get_panic_on_ue(void)
}
/* this is temporary */
-int edac_mc_get_poll_msec(void)
+unsigned int edac_mc_get_poll_msec(void)
{
return edac_mc_poll_msec;
}
static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
{
- unsigned long l;
+ unsigned int i;
int ret;
if (!val)
return -EINVAL;
- ret = kstrtoul(val, 0, &l);
+ ret = kstrtouint(val, 0, &i);
if (ret)
return ret;
- if (l < 1000)
+ if (i < 1000)
return -EINVAL;
- *((unsigned long *)kp->arg) = l;
+ *((unsigned int *)kp->arg) = i;
/* notify edac_mc engine to reset the poll period */
- edac_mc_reset_delay_period(l);
+ edac_mc_reset_delay_period(i);
return 0;
}
@@ -82,7 +82,7 @@ MODULE_PARM_DESC(edac_mc_log_ue,
module_param(edac_mc_log_ce, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ce,
"Log correctable error to console: 0=off 1=on");
-module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
+module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_uint,
&edac_mc_poll_msec, 0644);
MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
@@ -404,6 +404,8 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
static int edac_create_csrow_object(struct mem_ctl_info *mci,
struct csrow_info *csrow, int index)
{
+ int err;
+
csrow->dev.type = &csrow_attr_type;
csrow->dev.groups = csrow_dev_groups;
device_initialize(&csrow->dev);
@@ -415,7 +417,11 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
edac_dbg(0, "creating (virtual) csrow node %s\n",
dev_name(&csrow->dev));
- return device_add(&csrow->dev);
+ err = device_add(&csrow->dev);
+ if (err)
+ put_device(&csrow->dev);
+
+ return err;
}
/* Create a CSROW object under specifed edac_mc_device */
@@ -443,7 +449,8 @@ error:
csrow = mci->csrows[i];
if (!nr_pages_per_csrow(csrow))
continue;
- put_device(&mci->csrows[i]->dev);
+
+ device_del(&mci->csrows[i]->dev);
}
return err;
@@ -645,9 +652,11 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
dev_set_drvdata(&dimm->dev, dimm);
pm_runtime_forbid(&mci->dev);
- err = device_add(&dimm->dev);
+ err = device_add(&dimm->dev);
+ if (err)
+ put_device(&dimm->dev);
- edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
+ edac_dbg(0, "created rank/dimm device %s\n", dev_name(&dimm->dev));
return err;
}
@@ -928,6 +937,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
err = device_add(&mci->dev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
+ put_device(&mci->dev);
goto out;
}
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index dd7d0b509aa3..b2f59ee76c22 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -36,7 +36,7 @@ extern int edac_mc_get_log_ue(void);
extern int edac_mc_get_log_ce(void);
extern int edac_mc_get_panic_on_ue(void);
extern int edac_get_poll_msec(void);
-extern int edac_mc_get_poll_msec(void);
+extern unsigned int edac_mc_get_poll_msec(void);
unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
unsigned len);
@@ -78,10 +78,10 @@ edac_debugfs_create_dir_at(const char *dirname, struct dentry *parent);
struct dentry *
edac_debugfs_create_file(const char *name, umode_t mode, struct dentry *parent,
void *data, const struct file_operations *fops);
-struct dentry *
-edac_debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value);
-struct dentry *
-edac_debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, u16 *value);
+void edac_debugfs_create_x8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value);
+void edac_debugfs_create_x16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value);
#else
static inline void edac_debugfs_init(void) { }
static inline void edac_debugfs_exit(void) { }
@@ -92,12 +92,10 @@ edac_debugfs_create_dir_at(const char *dirname, struct dentry *parent) { return
static inline struct dentry *
edac_debugfs_create_file(const char *name, umode_t mode, struct dentry *parent,
void *data, const struct file_operations *fops) { return NULL; }
-static inline struct dentry *
-edac_debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value) { return NULL; }
-static inline struct dentry *
-edac_debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value) { return NULL; }
+static inline void edac_debugfs_create_x8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value) { }
+static inline void edac_debugfs_create_x16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value) { }
#endif
/*
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 6f06aec4877c..83392f2841de 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -124,6 +124,8 @@ static int i10nm_get_all_munits(void)
static const struct x86_cpu_id i10nm_cpuids[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_X, 0, 0 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_X, 0, 0 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_XEON_D, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
@@ -166,9 +168,9 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
EDAC_MOD_STR);
}
- if (ndimms && !i10nm_check_ecc(imc, 0)) {
- i10nm_printk(KERN_ERR, "ECC is disabled on imc %d\n",
- imc->mc);
+ if (ndimms && !i10nm_check_ecc(imc, i)) {
+ i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n",
+ imc->mc, i);
return -ENODEV;
}
}
@@ -265,7 +267,7 @@ static int __init i10nm_init(void)
goto fail;
list_for_each_entry(d, i10nm_edac_list, list) {
- rc = skx_get_src_id(d, &src_id);
+ rc = skx_get_src_id(d, 0xf8, &src_id);
if (rc < 0)
goto fail;
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index adf60eb45bd4..d26300f9cb07 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -20,11 +20,13 @@
* 0c08: Xeon E3-1200 v3 Processor DRAM Controller
* 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers
* 5918: Xeon E3-1200 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
+ * 3e..: 8th/9th Gen Core Processor Host Bridge/DRAM Registers
*
* Based on Intel specification:
* http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
* http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html
* http://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-h-processor-lines-datasheet-vol-2.html
+ * https://www.intel.com/content/www/us/en/products/docs/processors/core/8th-gen-core-family-datasheet-vol-2.html
*
* According to the above datasheet (p.16):
* "
@@ -61,6 +63,26 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x1918
#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x5918
+/* Coffee Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK 0x3e00
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_1 0x3e0f
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_2 0x3e18
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_3 0x3e1f
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_4 0x3e30
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_5 0x3e31
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_6 0x3e32
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_7 0x3e33
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_8 0x3ec2
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9 0x3ec6
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10 0x3eca
+
+/* Test if HB is for Skylake or later. */
+#define DEVICE_ID_SKYLAKE_OR_LATER(did) \
+ (((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_8) || \
+ ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \
+ (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
+ PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
+
#define IE31200_DIMMS 4
#define IE31200_RANKS 8
#define IE31200_RANKS_PER_CHANNEL 4
@@ -381,10 +403,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
u32 addr_decode, mad_offset;
/*
- * Kaby Lake seems to work like Skylake. Please re-visit this logic
- * when adding new CPU support.
+ * Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
+ * this logic when adding new CPU support.
*/
- bool skl = (pdev->device >= PCI_DEVICE_ID_INTEL_IE31200_HB_8);
+ bool skl = DEVICE_ID_SKYLAKE_OR_LATER(pdev->device);
edac_dbg(0, "MC:\n");
@@ -542,36 +564,26 @@ static void ie31200_remove_one(struct pci_dev *pdev)
}
static const struct pci_device_id ie31200_pci_tbl[] = {
- {
- PCI_VEND_DEV(INTEL, IE31200_HB_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- IE31200},
- {
- PCI_VEND_DEV(INTEL, IE31200_HB_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- IE31200},
- {
- PCI_VEND_DEV(INTEL, IE31200_HB_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- IE31200},
- {
- PCI_VEND_DEV(INTEL, IE31200_HB_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- IE31200},
- {
- PCI_VEND_DEV(INTEL, IE31200_HB_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- IE31200},
- {
- PCI_VEND_DEV(INTEL, IE31200_HB_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- IE31200},
- {
- PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- IE31200},
- {
- PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- IE31200},
- {
- PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- IE31200},
- {
- 0,
- } /* 0 terminated list. */
+ { PCI_VEND_DEV(INTEL, IE31200_HB_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index fa700a170380..37746b045e18 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1511,7 +1511,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
sad_actual_size[mc] += tad_size;
}
}
- tad_base = tad_limit+1;
}
}
diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
new file mode 100644
index 000000000000..413cdb4a591d
--- /dev/null
+++ b/drivers/edac/sifive_edac.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiFive Platform EDAC Driver
+ *
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ *
+ * This driver is partially based on octeon_edac-pc.c
+ *
+ */
+#include <linux/edac.h>
+#include <linux/platform_device.h>
+#include "edac_module.h"
+#include <asm/sifive_l2_cache.h>
+
+#define DRVNAME "sifive_edac"
+
+struct sifive_edac_priv {
+ struct notifier_block notifier;
+ struct edac_device_ctl_info *dci;
+};
+
+/**
+ * EDAC error callback
+ *
+ * @event: non-zero if unrecoverable.
+ */
+static
+int ecc_err_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ const char *msg = (char *)ptr;
+ struct sifive_edac_priv *p;
+
+ p = container_of(this, struct sifive_edac_priv, notifier);
+
+ if (event == SIFIVE_L2_ERR_TYPE_UE)
+ edac_device_handle_ue(p->dci, 0, 0, msg);
+ else if (event == SIFIVE_L2_ERR_TYPE_CE)
+ edac_device_handle_ce(p->dci, 0, 0, msg);
+
+ return NOTIFY_OK;
+}
+
+static int ecc_register(struct platform_device *pdev)
+{
+ struct sifive_edac_priv *p;
+
+ p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->notifier.notifier_call = ecc_err_event;
+ platform_set_drvdata(pdev, p);
+
+ p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
+ 1, 1, NULL, 0,
+ edac_device_alloc_index());
+ if (IS_ERR(p->dci))
+ return PTR_ERR(p->dci);
+
+ p->dci->dev = &pdev->dev;
+ p->dci->mod_name = "Sifive ECC Manager";
+ p->dci->ctl_name = dev_name(&pdev->dev);
+ p->dci->dev_name = dev_name(&pdev->dev);
+
+ if (edac_device_add_device(p->dci)) {
+ dev_err(p->dci->dev, "failed to register with EDAC core\n");
+ goto err;
+ }
+
+ register_sifive_l2_error_notifier(&p->notifier);
+
+ return 0;
+
+err:
+ edac_device_free_ctl_info(p->dci);
+
+ return -ENXIO;
+}
+
+static int ecc_unregister(struct platform_device *pdev)
+{
+ struct sifive_edac_priv *p = platform_get_drvdata(pdev);
+
+ unregister_sifive_l2_error_notifier(&p->notifier);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(p->dci);
+
+ return 0;
+}
+
+static struct platform_device *sifive_pdev;
+
+static int __init sifive_edac_init(void)
+{
+ int ret;
+
+ sifive_pdev = platform_device_register_simple(DRVNAME, 0, NULL, 0);
+ if (IS_ERR(sifive_pdev))
+ return PTR_ERR(sifive_pdev);
+
+ ret = ecc_register(sifive_pdev);
+ if (ret)
+ platform_device_unregister(sifive_pdev);
+
+ return ret;
+}
+
+static void __exit sifive_edac_exit(void)
+{
+ ecc_unregister(sifive_pdev);
+ platform_device_unregister(sifive_pdev);
+}
+
+module_init(sifive_edac_init);
+module_exit(sifive_edac_exit);
+
+MODULE_AUTHOR("SiFive Inc.");
+MODULE_DESCRIPTION("SiFive platform EDAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index a5c8fa3a249a..0fcf3785e8f3 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -639,7 +639,7 @@ static int __init skx_init(void)
}
list_for_each_entry(d, skx_edac_list, list) {
- rc = skx_get_src_id(d, &src_id);
+ rc = skx_get_src_id(d, 0xf0, &src_id);
if (rc < 0)
goto fail;
rc = skx_get_node_id(d, &node_id);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index b0dddcfa9baa..d8ff63d91b86 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -136,11 +136,11 @@ void skx_set_decode(skx_decode_f decode)
skx_decode = decode;
}
-int skx_get_src_id(struct skx_dev *d, u8 *id)
+int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
{
u32 reg;
- if (pci_read_config_dword(d->util_all, 0xf0, &reg)) {
+ if (pci_read_config_dword(d->util_all, off, &reg)) {
skx_printk(KERN_ERR, "Failed to read src id\n");
return -ENODEV;
}
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index d18fa98669af..08cc971a50ea 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -118,7 +118,7 @@ int __init skx_adxl_get(void);
void __exit skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode);
-int skx_get_src_id(struct skx_dev *d, u8 *id);
+int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
int skx_get_node_id(struct skx_dev *d, u8 *id);
int skx_get_all_bus_mappings(unsigned int did, int off, enum type,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index d40ccc3af9e2..53446e39a32c 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
+# see Documentation/kbuild/kconfig-language.rst.
#
menu "Firmware Drivers"
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index a2384184a7de..b07c17643210 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -47,11 +47,6 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
bgrt->version);
goto out;
}
- if (bgrt->status & 0xfe) {
- pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
- bgrt->status);
- goto out;
- }
if (bgrt->image_type != 0) {
pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
bgrt->image_type);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 16b2137d117c..ad3b1f4866b3 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -52,6 +52,7 @@ struct efi __read_mostly efi = {
.mem_attr_table = EFI_INVALID_TABLE_ADDR,
.rng_seed = EFI_INVALID_TABLE_ADDR,
.tpm_log = EFI_INVALID_TABLE_ADDR,
+ .tpm_final_log = EFI_INVALID_TABLE_ADDR,
.mem_reserve = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
@@ -484,6 +485,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
{LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
+ {LINUX_EFI_TPM_FINAL_LOG_GUID, "TPMFinalLog", &efi.tpm_final_log},
{LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve},
{NULL_GUID, NULL, NULL},
};
@@ -1009,14 +1011,16 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
/* first try to find a slot in an existing linked list entry */
for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
- rsv = __va(prsv);
+ rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
if (index < rsv->size) {
rsv->entry[index].base = addr;
rsv->entry[index].size = size;
+ memunmap(rsv);
return 0;
}
+ memunmap(rsv);
}
/* no slot found - allocate a new linked list entry */
@@ -1024,7 +1028,13 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
if (!rsv)
return -ENOMEM;
- rsv->size = EFI_MEMRESERVE_COUNT(PAGE_SIZE);
+ /*
+ * The memremap() call above assumes that a linux_efi_memreserve entry
+ * never crosses a page boundary, so let's ensure that this remains true
+ * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
+ * using SZ_4K explicitly in the size calculation below.
+ */
+ rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
atomic_set(&rsv->count, 1);
rsv->entry[0].base = addr;
rsv->entry[0].size = size;
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 61e099826cbb..35dccc88ac0a 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -43,11 +43,13 @@ static int efibc_set_variable(const char *name, const char *value)
efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
- ret = efivar_entry_set(entry,
- EFI_VARIABLE_NON_VOLATILE
- | EFI_VARIABLE_BOOTSERVICE_ACCESS
- | EFI_VARIABLE_RUNTIME_ACCESS,
- size, entry->var.Data, NULL);
+ ret = efivar_entry_set_safe(entry->var.VariableName,
+ entry->var.VendorGuid,
+ EFI_VARIABLE_NON_VOLATILE
+ | EFI_VARIABLE_BOOTSERVICE_ACCESS
+ | EFI_VARIABLE_RUNTIME_ACCESS,
+ false, size, entry->var.Data);
+
if (ret)
pr_err("failed to set %s EFI variable: 0x%x\n",
name, ret);
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index e4610e72b78f..1db780c0f07b 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -926,3 +926,18 @@ free_map:
fail:
return status;
}
+
+void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
+{
+ efi_config_table_t *tables = (efi_config_table_t *)sys_table->tables;
+ int i;
+
+ for (i = 0; i < sys_table->nr_tables; i++) {
+ if (efi_guidcmp(tables[i].guid, guid) != 0)
+ continue;
+
+ return (void *)tables[i].table;
+ }
+
+ return NULL;
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 1b1dfcaa6fb9..7f1556fd867d 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -65,6 +65,8 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
+void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid);
+
/* Helper macros for the usual case of using simple C variables: */
#ifndef fdt_setprop_inplace_var
#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 5440ba17a1c5..0bf0190917e0 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -363,26 +363,17 @@ fail:
void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size)
{
- efi_guid_t fdt_guid = DEVICE_TREE_GUID;
- efi_config_table_t *tables;
- int i;
+ void *fdt;
- tables = (efi_config_table_t *)sys_table->tables;
+ fdt = get_efi_config_table(sys_table, DEVICE_TREE_GUID);
- for (i = 0; i < sys_table->nr_tables; i++) {
- void *fdt;
+ if (!fdt)
+ return NULL;
- if (efi_guidcmp(tables[i].guid, fdt_guid) != 0)
- continue;
-
- fdt = (void *)tables[i].table;
- if (fdt_check_header(fdt) != 0) {
- pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
- return NULL;
- }
- *fdt_size = fdt_totalsize(fdt);
- return fdt;
+ if (fdt_check_header(fdt) != 0) {
+ pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+ return NULL;
}
-
- return NULL;
+ *fdt_size = fdt_totalsize(fdt);
+ return fdt;
}
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 5bd04f75d8d6..eb9af83e4d59 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -57,31 +57,40 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
#endif
-static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
{
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
efi_status_t status;
efi_physical_addr_t log_location = 0, log_last_entry = 0;
struct linux_efi_tpm_eventlog *log_tbl = NULL;
+ struct efi_tcg2_final_events_table *final_events_table;
unsigned long first_entry_addr, last_entry_addr;
size_t log_size, last_entry_size;
efi_bool_t truncated;
+ int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
void *tcg2_protocol = NULL;
+ int final_events_size = 0;
status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
&tcg2_protocol);
if (status != EFI_SUCCESS)
return;
- status = efi_call_proto(efi_tcg2_protocol, get_event_log, tcg2_protocol,
- EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2,
- &log_location, &log_last_entry, &truncated);
- if (status != EFI_SUCCESS)
- return;
+ status = efi_call_proto(efi_tcg2_protocol, get_event_log,
+ tcg2_protocol, version, &log_location,
+ &log_last_entry, &truncated);
+
+ if (status != EFI_SUCCESS || !log_location) {
+ version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ status = efi_call_proto(efi_tcg2_protocol, get_event_log,
+ tcg2_protocol, version, &log_location,
+ &log_last_entry, &truncated);
+ if (status != EFI_SUCCESS || !log_location)
+ return;
+
+ }
- if (!log_location)
- return;
first_entry_addr = (unsigned long) log_location;
/*
@@ -96,8 +105,23 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
* We need to calculate its size to deduce the full size of
* the logs.
*/
- last_entry_size = sizeof(struct tcpa_event) +
- ((struct tcpa_event *) last_entry_addr)->event_size;
+ if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+ /*
+ * The TCG2 log format has variable length entries,
+ * and the information to decode the hash algorithms
+ * back into a size is contained in the first entry -
+ * pass a pointer to the final entry (to calculate its
+ * size) and the first entry (so we know how long each
+ * digest is)
+ */
+ last_entry_size =
+ __calc_tpm2_event_size((void *)last_entry_addr,
+ (void *)(long)log_location,
+ false);
+ } else {
+ last_entry_size = sizeof(struct tcpa_event) +
+ ((struct tcpa_event *) last_entry_addr)->event_size;
+ }
log_size = log_last_entry - log_location + last_entry_size;
}
@@ -112,9 +136,37 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
return;
}
+ /*
+ * Figure out whether any events have already been logged to the
+ * final events structure, and if so how much space they take up
+ */
+ final_events_table = get_efi_config_table(sys_table_arg,
+ LINUX_EFI_TPM_FINAL_LOG_GUID);
+ if (final_events_table && final_events_table->nr_events) {
+ struct tcg_pcr_event2_head *header;
+ int offset;
+ void *data;
+ int event_size;
+ int i = final_events_table->nr_events;
+
+ data = (void *)final_events_table;
+ offset = sizeof(final_events_table->version) +
+ sizeof(final_events_table->nr_events);
+
+ while (i > 0) {
+ header = data + offset + final_events_size;
+ event_size = __calc_tpm2_event_size(header,
+ (void *)(long)log_location,
+ false);
+ final_events_size += event_size;
+ i--;
+ }
+ }
+
memset(log_tbl, 0, sizeof(*log_tbl) + log_size);
log_tbl->size = log_size;
- log_tbl->version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ log_tbl->final_events_preboot_size = final_events_size;
+ log_tbl->version = version;
memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
status = efi_call_early(install_configuration_table,
@@ -126,9 +178,3 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
err_free:
efi_call_early(free_pool, log_tbl);
}
-
-void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
-{
- /* Only try to retrieve the logs in 1.2 format. */
- efi_retrieve_tpm2_eventlog_1_2(sys_table_arg);
-}
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index 3a689b40ccc0..1d3f5ca3eaaf 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -4,11 +4,34 @@
* Thiebaud Weksteen <tweek@google.com>
*/
+#define TPM_MEMREMAP(start, size) early_memremap(start, size)
+#define TPM_MEMUNMAP(start, size) early_memunmap(start, size)
+
+#include <asm/early_ioremap.h>
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/memblock.h>
+#include <linux/tpm_eventlog.h>
-#include <asm/early_ioremap.h>
+int efi_tpm_final_log_size;
+EXPORT_SYMBOL(efi_tpm_final_log_size);
+
+static int tpm2_calc_event_log_size(void *data, int count, void *size_info)
+{
+ struct tcg_pcr_event2_head *header;
+ int event_size, size = 0;
+
+ while (count > 0) {
+ header = data + size;
+ event_size = __calc_tpm2_event_size(header, size_info, true);
+ if (event_size == 0)
+ return -1;
+ size += event_size;
+ count--;
+ }
+
+ return size;
+}
/*
* Reserve the memory associated with the TPM Event Log configuration table.
@@ -16,22 +39,54 @@
int __init efi_tpm_eventlog_init(void)
{
struct linux_efi_tpm_eventlog *log_tbl;
+ struct efi_tcg2_final_events_table *final_tbl;
unsigned int tbl_size;
+ int ret = 0;
- if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
+ /*
+ * We can't calculate the size of the final events without the
+ * first entry in the TPM log, so bail here.
+ */
return 0;
+ }
log_tbl = early_memremap(efi.tpm_log, sizeof(*log_tbl));
if (!log_tbl) {
pr_err("Failed to map TPM Event Log table @ 0x%lx\n",
- efi.tpm_log);
+ efi.tpm_log);
efi.tpm_log = EFI_INVALID_TABLE_ADDR;
return -ENOMEM;
}
tbl_size = sizeof(*log_tbl) + log_tbl->size;
memblock_reserve(efi.tpm_log, tbl_size);
+
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR)
+ goto out;
+
+ final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl));
+
+ if (!final_tbl) {
+ pr_err("Failed to map TPM Final Event Log table @ 0x%lx\n",
+ efi.tpm_final_log);
+ efi.tpm_final_log = EFI_INVALID_TABLE_ADDR;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tbl_size = tpm2_calc_event_log_size((void *)efi.tpm_final_log
+ + sizeof(final_tbl->version)
+ + sizeof(final_tbl->nr_events),
+ final_tbl->nr_events,
+ log_tbl->log);
+ memblock_reserve((unsigned long)final_tbl,
+ tbl_size + sizeof(*final_tbl));
+ early_memunmap(final_tbl, sizeof(*final_tbl));
+ efi_tpm_final_log_size = tbl_size;
+
+out:
early_memunmap(log_tbl, sizeof(*log_tbl));
- return 0;
+ return ret;
}
diff --git a/drivers/fmc/Kconfig b/drivers/fmc/Kconfig
deleted file mode 100644
index ae3d7f634932..000000000000
--- a/drivers/fmc/Kconfig
+++ /dev/null
@@ -1,52 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# FMC (ANSI-VITA 57.1) bus support
-#
-
-menuconfig FMC
- tristate "FMC support"
- help
-
- FMC (FPGA Mezzanine Carrier) is a mechanical and electrical
- standard for mezzanine cards that plug into a carrier board.
- This kernel subsystem supports the matching between carrier
- and mezzanine based on identifiers stored in the internal I2C
- EEPROM, as well as having carrier-independent drivers.
-
- The framework was born outside of the kernel and at this time
- the off-tree code base is more complete. Code and documentation
- is at git://ohwr.org/fmc-projects/fmc-bus.git .
-
-if FMC
-
-config FMC_FAKEDEV
- tristate "FMC fake device (software testing)"
- help
- This is a fake carrier, bringing a default EEPROM content
- that can be rewritten at run time and usef for matching
- mezzanines.
-
-config FMC_TRIVIAL
- tristate "FMC trivial mezzanine driver (software testing)"
- help
- This is a fake mezzanine driver, to show how FMC works and test it.
- The driver also handles interrupts (we used it with a real carrier
- before the mezzanines were produced)
-
-config FMC_WRITE_EEPROM
- tristate "FMC mezzanine driver to write I2C EEPROM"
- help
- This driver matches every mezzanine device and can write the
- internal EEPROM of the PCB, using the firmware loader to get
- its binary and the function carrier->reprogram to actually do it.
- It is useful when the mezzanines are produced.
-
-config FMC_CHARDEV
- tristate "FMC mezzanine driver that registers a char device"
- help
- This driver matches every mezzanine device and allows user
- space to read and write registers using a char device. It
- can be used to write user-space drivers, or just get
- acquainted with a mezzanine before writing its specific driver.
-
-endif # FMC
diff --git a/drivers/fmc/Makefile b/drivers/fmc/Makefile
deleted file mode 100644
index e3da6192cf39..000000000000
--- a/drivers/fmc/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_FMC) += fmc.o
-
-fmc-y = fmc-core.o
-fmc-y += fmc-match.o
-fmc-y += fmc-sdb.o
-fmc-y += fru-parse.o
-fmc-y += fmc-dump.o
-fmc-y += fmc-debug.o
-
-obj-$(CONFIG_FMC_FAKEDEV) += fmc-fakedev.o
-obj-$(CONFIG_FMC_TRIVIAL) += fmc-trivial.o
-obj-$(CONFIG_FMC_WRITE_EEPROM) += fmc-write-eeprom.o
-obj-$(CONFIG_FMC_CHARDEV) += fmc-chardev.o
diff --git a/drivers/fmc/fmc-chardev.c b/drivers/fmc/fmc-chardev.c
deleted file mode 100644
index 7d2091b5e978..000000000000
--- a/drivers/fmc/fmc-chardev.c
+++ /dev/null
@@ -1,199 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/spinlock.h>
-#include <linux/fmc.h>
-#include <linux/uaccess.h>
-
-static LIST_HEAD(fc_devices);
-static DEFINE_SPINLOCK(fc_lock);
-
-struct fc_instance {
- struct list_head list;
- struct fmc_device *fmc;
- struct miscdevice misc;
-};
-
-/* at open time, we must identify our device */
-static int fc_open(struct inode *ino, struct file *f)
-{
- struct fmc_device *fmc;
- struct fc_instance *fc;
- int minor = iminor(ino);
-
- list_for_each_entry(fc, &fc_devices, list)
- if (fc->misc.minor == minor)
- break;
- if (fc->misc.minor != minor)
- return -ENODEV;
- fmc = fc->fmc;
- if (try_module_get(fmc->owner) == 0)
- return -ENODEV;
-
- f->private_data = fmc;
- return 0;
-}
-
-static int fc_release(struct inode *ino, struct file *f)
-{
- struct fmc_device *fmc = f->private_data;
- module_put(fmc->owner);
- return 0;
-}
-
-/* read and write are simple after the default llseek has been used */
-static ssize_t fc_read(struct file *f, char __user *buf, size_t count,
- loff_t *offp)
-{
- struct fmc_device *fmc = f->private_data;
- unsigned long addr;
- uint32_t val;
-
- if (count < sizeof(val))
- return -EINVAL;
- count = sizeof(val);
-
- addr = *offp;
- if (addr > fmc->memlen)
- return -ESPIPE; /* Illegal seek */
- val = fmc_readl(fmc, addr);
- if (copy_to_user(buf, &val, count))
- return -EFAULT;
- *offp += count;
- return count;
-}
-
-static ssize_t fc_write(struct file *f, const char __user *buf, size_t count,
- loff_t *offp)
-{
- struct fmc_device *fmc = f->private_data;
- unsigned long addr;
- uint32_t val;
-
- if (count < sizeof(val))
- return -EINVAL;
- count = sizeof(val);
-
- addr = *offp;
- if (addr > fmc->memlen)
- return -ESPIPE; /* Illegal seek */
- if (copy_from_user(&val, buf, count))
- return -EFAULT;
- fmc_writel(fmc, val, addr);
- *offp += count;
- return count;
-}
-
-static const struct file_operations fc_fops = {
- .owner = THIS_MODULE,
- .open = fc_open,
- .release = fc_release,
- .llseek = generic_file_llseek,
- .read = fc_read,
- .write = fc_write,
-};
-
-
-/* Device part .. */
-static int fc_probe(struct fmc_device *fmc);
-static int fc_remove(struct fmc_device *fmc);
-
-static struct fmc_driver fc_drv = {
- .version = FMC_VERSION,
- .driver.name = KBUILD_MODNAME,
- .probe = fc_probe,
- .remove = fc_remove,
- /* no table: we want to match everything */
-};
-
-/* We accept the generic busid parameter */
-FMC_PARAM_BUSID(fc_drv);
-
-/* probe and remove must allocate and release a misc device */
-static int fc_probe(struct fmc_device *fmc)
-{
- int ret;
- int index = 0;
-
- struct fc_instance *fc;
-
- index = fmc_validate(fmc, &fc_drv);
- if (index < 0)
- return -EINVAL; /* not our device: invalid */
-
- /* Create a char device: we want to create it anew */
- fc = kzalloc(sizeof(*fc), GFP_KERNEL);
- if (!fc)
- return -ENOMEM;
- fc->fmc = fmc;
- fc->misc.minor = MISC_DYNAMIC_MINOR;
- fc->misc.fops = &fc_fops;
- fc->misc.name = kstrdup(dev_name(&fmc->dev), GFP_KERNEL);
-
- ret = misc_register(&fc->misc);
- if (ret < 0)
- goto out;
- spin_lock(&fc_lock);
- list_add(&fc->list, &fc_devices);
- spin_unlock(&fc_lock);
- dev_info(&fc->fmc->dev, "Created misc device \"%s\"\n",
- fc->misc.name);
- return 0;
-
-out:
- kfree(fc->misc.name);
- kfree(fc);
- return ret;
-}
-
-static int fc_remove(struct fmc_device *fmc)
-{
- struct fc_instance *fc;
-
- list_for_each_entry(fc, &fc_devices, list)
- if (fc->fmc == fmc)
- break;
- if (fc->fmc != fmc) {
- dev_err(&fmc->dev, "remove called but not found\n");
- return -ENODEV;
- }
-
- spin_lock(&fc_lock);
- list_del(&fc->list);
- spin_unlock(&fc_lock);
- misc_deregister(&fc->misc);
- kfree(fc->misc.name);
- kfree(fc);
-
- return 0;
-}
-
-
-static int fc_init(void)
-{
- int ret;
-
- ret = fmc_driver_register(&fc_drv);
- return ret;
-}
-
-static void fc_exit(void)
-{
- fmc_driver_unregister(&fc_drv);
-}
-
-module_init(fc_init);
-module_exit(fc_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/fmc/fmc-core.c b/drivers/fmc/fmc-core.c
deleted file mode 100644
index 573f5471f680..000000000000
--- a/drivers/fmc/fmc-core.c
+++ /dev/null
@@ -1,388 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/fmc.h>
-#include <linux/fmc-sdb.h>
-
-#include "fmc-private.h"
-
-static int fmc_check_version(unsigned long version, const char *name)
-{
- if (__FMC_MAJOR(version) != FMC_MAJOR) {
- pr_err("%s: \"%s\" has wrong major (has %li, expected %i)\n",
- __func__, name, __FMC_MAJOR(version), FMC_MAJOR);
- return -EINVAL;
- }
-
- if (__FMC_MINOR(version) != FMC_MINOR)
- pr_info("%s: \"%s\" has wrong minor (has %li, expected %i)\n",
- __func__, name, __FMC_MINOR(version), FMC_MINOR);
- return 0;
-}
-
-static int fmc_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- /* struct fmc_device *fdev = to_fmc_device(dev); */
-
- /* FIXME: The MODALIAS */
- add_uevent_var(env, "MODALIAS=%s", "fmc");
- return 0;
-}
-
-static int fmc_probe(struct device *dev)
-{
- struct fmc_driver *fdrv = to_fmc_driver(dev->driver);
- struct fmc_device *fdev = to_fmc_device(dev);
-
- return fdrv->probe(fdev);
-}
-
-static int fmc_remove(struct device *dev)
-{
- struct fmc_driver *fdrv = to_fmc_driver(dev->driver);
- struct fmc_device *fdev = to_fmc_device(dev);
-
- return fdrv->remove(fdev);
-}
-
-static void fmc_shutdown(struct device *dev)
-{
- /* not implemented but mandatory */
-}
-
-static struct bus_type fmc_bus_type = {
- .name = "fmc",
- .match = fmc_match,
- .uevent = fmc_uevent,
- .probe = fmc_probe,
- .remove = fmc_remove,
- .shutdown = fmc_shutdown,
-};
-
-static void fmc_release(struct device *dev)
-{
- struct fmc_device *fmc = container_of(dev, struct fmc_device, dev);
-
- kfree(fmc);
-}
-
-/*
- * The eeprom is exported in sysfs, through a binary attribute
- */
-
-static ssize_t fmc_read_eeprom(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct device *dev;
- struct fmc_device *fmc;
- int eelen;
-
- dev = container_of(kobj, struct device, kobj);
- fmc = container_of(dev, struct fmc_device, dev);
- eelen = fmc->eeprom_len;
- if (off > eelen)
- return -ESPIPE;
- if (off == eelen)
- return 0; /* EOF */
- if (off + count > eelen)
- count = eelen - off;
- memcpy(buf, fmc->eeprom + off, count);
- return count;
-}
-
-static ssize_t fmc_write_eeprom(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct device *dev;
- struct fmc_device *fmc;
-
- dev = container_of(kobj, struct device, kobj);
- fmc = container_of(dev, struct fmc_device, dev);
- return fmc->op->write_ee(fmc, off, buf, count);
-}
-
-static struct bin_attribute fmc_eeprom_attr = {
- .attr = { .name = "eeprom", .mode = S_IRUGO | S_IWUSR, },
- .size = 8192, /* more or less standard */
- .read = fmc_read_eeprom,
- .write = fmc_write_eeprom,
-};
-
-int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
- char *name, int flags)
-{
- if (fmc->op->irq_request)
- return fmc->op->irq_request(fmc, h, name, flags);
- return -EPERM;
-}
-EXPORT_SYMBOL(fmc_irq_request);
-
-void fmc_irq_free(struct fmc_device *fmc)
-{
- if (fmc->op->irq_free)
- fmc->op->irq_free(fmc);
-}
-EXPORT_SYMBOL(fmc_irq_free);
-
-void fmc_irq_ack(struct fmc_device *fmc)
-{
- if (likely(fmc->op->irq_ack))
- fmc->op->irq_ack(fmc);
-}
-EXPORT_SYMBOL(fmc_irq_ack);
-
-int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv)
-{
- if (fmc->op->validate)
- return fmc->op->validate(fmc, drv);
- return -EPERM;
-}
-EXPORT_SYMBOL(fmc_validate);
-
-int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio, int ngpio)
-{
- if (fmc->op->gpio_config)
- return fmc->op->gpio_config(fmc, gpio, ngpio);
- return -EPERM;
-}
-EXPORT_SYMBOL(fmc_gpio_config);
-
-int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l)
-{
- if (fmc->op->read_ee)
- return fmc->op->read_ee(fmc, pos, d, l);
- return -EPERM;
-}
-EXPORT_SYMBOL(fmc_read_ee);
-
-int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l)
-{
- if (fmc->op->write_ee)
- return fmc->op->write_ee(fmc, pos, d, l);
- return -EPERM;
-}
-EXPORT_SYMBOL(fmc_write_ee);
-
-/*
- * Functions for client modules follow
- */
-
-int fmc_driver_register(struct fmc_driver *drv)
-{
- if (fmc_check_version(drv->version, drv->driver.name))
- return -EINVAL;
- drv->driver.bus = &fmc_bus_type;
- return driver_register(&drv->driver);
-}
-EXPORT_SYMBOL(fmc_driver_register);
-
-void fmc_driver_unregister(struct fmc_driver *drv)
-{
- driver_unregister(&drv->driver);
-}
-EXPORT_SYMBOL(fmc_driver_unregister);
-
-/*
- * When a device set is registered, all eeproms must be read
- * and all FRUs must be parsed
- */
-int fmc_device_register_n_gw(struct fmc_device **devs, int n,
- struct fmc_gateware *gw)
-{
- struct fmc_device *fmc, **devarray;
- uint32_t device_id;
- int i, ret = 0;
-
- if (n < 1)
- return 0;
-
- /* Check the version of the first data structure (function prints) */
- if (fmc_check_version(devs[0]->version, devs[0]->carrier_name))
- return -EINVAL;
-
- devarray = kmemdup(devs, n * sizeof(*devs), GFP_KERNEL);
- if (!devarray)
- return -ENOMEM;
-
- /* Make all other checks before continuing, for all devices */
- for (i = 0; i < n; i++) {
- fmc = devarray[i];
- if (!fmc->hwdev) {
- pr_err("%s: device nr. %i has no hwdev pointer\n",
- __func__, i);
- ret = -EINVAL;
- break;
- }
- if (fmc->flags & FMC_DEVICE_NO_MEZZANINE) {
- dev_info(fmc->hwdev, "absent mezzanine in slot %d\n",
- fmc->slot_id);
- continue;
- }
- if (!fmc->eeprom) {
- dev_err(fmc->hwdev, "no eeprom provided for slot %i\n",
- fmc->slot_id);
- ret = -EINVAL;
- }
- if (!fmc->eeprom_addr) {
- dev_err(fmc->hwdev, "no eeprom_addr for slot %i\n",
- fmc->slot_id);
- ret = -EINVAL;
- }
- if (!fmc->carrier_name || !fmc->carrier_data ||
- !fmc->device_id) {
- dev_err(fmc->hwdev,
- "device nr %i: carrier name, "
- "data or dev_id not set\n", i);
- ret = -EINVAL;
- }
- if (ret)
- break;
-
- }
- if (ret) {
- kfree(devarray);
- return ret;
- }
-
- /* Validation is ok. Now init and register the devices */
- for (i = 0; i < n; i++) {
- fmc = devarray[i];
-
- fmc->nr_slots = n; /* each slot must know how many are there */
- fmc->devarray = devarray;
-
- device_initialize(&fmc->dev);
- fmc->dev.release = fmc_release;
- fmc->dev.parent = fmc->hwdev;
-
- /* Fill the identification stuff (may fail) */
- fmc_fill_id_info(fmc);
-
- fmc->dev.bus = &fmc_bus_type;
-
- /* Name from mezzanine info or carrier info. Or 0,1,2.. */
- device_id = fmc->device_id;
- if (!fmc->mezzanine_name)
- dev_set_name(&fmc->dev, "fmc-%04x", device_id);
- else
- dev_set_name(&fmc->dev, "%s-%04x", fmc->mezzanine_name,
- device_id);
-
- if (gw) {
- /*
- * The carrier already know the bitstream to load
- * for this set of FMC mezzanines.
- */
- ret = fmc->op->reprogram_raw(fmc, NULL,
- gw->bitstream, gw->len);
- if (ret) {
- dev_warn(fmc->hwdev,
- "Invalid gateware for FMC mezzanine\n");
- goto out;
- }
- }
-
- ret = device_add(&fmc->dev);
- if (ret < 0) {
- dev_err(fmc->hwdev, "Slot %i: Failed in registering "
- "\"%s\"\n", fmc->slot_id, fmc->dev.kobj.name);
- goto out;
- }
- ret = sysfs_create_bin_file(&fmc->dev.kobj, &fmc_eeprom_attr);
- if (ret < 0) {
- dev_err(&fmc->dev, "Failed in registering eeprom\n");
- goto out1;
- }
- /* This device went well, give information to the user */
- fmc_dump_eeprom(fmc);
- fmc_debug_init(fmc);
- }
- return 0;
-
-out1:
- device_del(&fmc->dev);
-out:
- kfree(devarray);
- for (i--; i >= 0; i--) {
- fmc_debug_exit(devs[i]);
- sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
- device_del(&devs[i]->dev);
- fmc_free_id_info(devs[i]);
- put_device(&devs[i]->dev);
- }
- return ret;
-
-}
-EXPORT_SYMBOL(fmc_device_register_n_gw);
-
-int fmc_device_register_n(struct fmc_device **devs, int n)
-{
- return fmc_device_register_n_gw(devs, n, NULL);
-}
-EXPORT_SYMBOL(fmc_device_register_n);
-
-int fmc_device_register_gw(struct fmc_device *fmc, struct fmc_gateware *gw)
-{
- return fmc_device_register_n_gw(&fmc, 1, gw);
-}
-EXPORT_SYMBOL(fmc_device_register_gw);
-
-int fmc_device_register(struct fmc_device *fmc)
-{
- return fmc_device_register_n(&fmc, 1);
-}
-EXPORT_SYMBOL(fmc_device_register);
-
-void fmc_device_unregister_n(struct fmc_device **devs, int n)
-{
- int i;
-
- if (n < 1)
- return;
-
- /* Free devarray first, not used by the later loop */
- kfree(devs[0]->devarray);
-
- for (i = 0; i < n; i++) {
- fmc_debug_exit(devs[i]);
- sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
- device_del(&devs[i]->dev);
- fmc_free_id_info(devs[i]);
- put_device(&devs[i]->dev);
- }
-}
-EXPORT_SYMBOL(fmc_device_unregister_n);
-
-void fmc_device_unregister(struct fmc_device *fmc)
-{
- fmc_device_unregister_n(&fmc, 1);
-}
-EXPORT_SYMBOL(fmc_device_unregister);
-
-/* Init and exit are trivial */
-static int fmc_init(void)
-{
- return bus_register(&fmc_bus_type);
-}
-
-static void fmc_exit(void)
-{
- bus_unregister(&fmc_bus_type);
-}
-
-module_init(fmc_init);
-module_exit(fmc_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/fmc/fmc-debug.c b/drivers/fmc/fmc-debug.c
deleted file mode 100644
index 1734c7cf0e76..000000000000
--- a/drivers/fmc/fmc-debug.c
+++ /dev/null
@@ -1,172 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015 CERN (www.cern.ch)
- * Author: Federico Vaga <federico.vaga@cern.ch>
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <asm/byteorder.h>
-
-#include <linux/fmc.h>
-#include <linux/sdb.h>
-#include <linux/fmc-sdb.h>
-
-#define FMC_DBG_SDB_DUMP "dump_sdb"
-
-static char *__strip_trailing_space(char *buf, char *str, int len)
-{
- int i = len - 1;
-
- memcpy(buf, str, len);
- buf[len] = '\0';
- while (i >= 0 && buf[i] == ' ')
- buf[i--] = '\0';
- return buf;
-}
-
-#define __sdb_string(buf, field) ({ \
- BUILD_BUG_ON(sizeof(buf) < sizeof(field)); \
- __strip_trailing_space(buf, (void *)(field), sizeof(field)); \
- })
-
-/**
- * We do not check seq_printf() errors because we want to see things in any case
- */
-static void fmc_sdb_dump_recursive(struct fmc_device *fmc, struct seq_file *s,
- const struct sdb_array *arr)
-{
- unsigned long base = arr->baseaddr;
- int i, j, n = arr->len, level = arr->level;
- char tmp[64];
-
- for (i = 0; i < n; i++) {
- union sdb_record *r;
- struct sdb_product *p;
- struct sdb_component *c;
-
- r = &arr->record[i];
- c = &r->dev.sdb_component;
- p = &c->product;
-
- for (j = 0; j < level; j++)
- seq_printf(s, " ");
- switch (r->empty.record_type) {
- case sdb_type_interconnect:
- seq_printf(s, "%08llx:%08x %.19s\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name);
- break;
- case sdb_type_device:
- seq_printf(s, "%08llx:%08x %.19s (%08llx-%08llx)\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name,
- __be64_to_cpu(c->addr_first) + base,
- __be64_to_cpu(c->addr_last) + base);
- break;
- case sdb_type_bridge:
- seq_printf(s, "%08llx:%08x %.19s (bridge: %08llx)\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name,
- __be64_to_cpu(c->addr_first) + base);
- if (IS_ERR(arr->subtree[i])) {
- seq_printf(s, "SDB: (bridge error %li)\n",
- PTR_ERR(arr->subtree[i]));
- break;
- }
- fmc_sdb_dump_recursive(fmc, s, arr->subtree[i]);
- break;
- case sdb_type_integration:
- seq_printf(s, "integration\n");
- break;
- case sdb_type_repo_url:
- seq_printf(s, "Synthesis repository: %s\n",
- __sdb_string(tmp, r->repo_url.repo_url));
- break;
- case sdb_type_synthesis:
- seq_printf(s, "Bitstream '%s' ",
- __sdb_string(tmp, r->synthesis.syn_name));
- seq_printf(s, "synthesized %08x by %s ",
- __be32_to_cpu(r->synthesis.date),
- __sdb_string(tmp, r->synthesis.user_name));
- seq_printf(s, "(%s version %x), ",
- __sdb_string(tmp, r->synthesis.tool_name),
- __be32_to_cpu(r->synthesis.tool_version));
- seq_printf(s, "commit %pm\n",
- r->synthesis.commit_id);
- break;
- case sdb_type_empty:
- seq_printf(s, "empty\n");
- break;
- default:
- seq_printf(s, "UNKNOWN TYPE 0x%02x\n",
- r->empty.record_type);
- break;
- }
- }
-}
-
-static int fmc_sdb_dump(struct seq_file *s, void *offset)
-{
- struct fmc_device *fmc = s->private;
-
- if (!fmc->sdb) {
- seq_printf(s, "no SDB information\n");
- return 0;
- }
-
- seq_printf(s, "FMC: %s (%s), slot %i, device %s\n", dev_name(fmc->hwdev),
- fmc->carrier_name, fmc->slot_id, dev_name(&fmc->dev));
- /* Dump SDB information */
- fmc_sdb_dump_recursive(fmc, s, fmc->sdb);
-
- return 0;
-}
-
-
-static int fmc_sdb_dump_open(struct inode *inode, struct file *file)
-{
- struct fmc_device *fmc = inode->i_private;
-
- return single_open(file, fmc_sdb_dump, fmc);
-}
-
-
-const struct file_operations fmc_dbgfs_sdb_dump = {
- .owner = THIS_MODULE,
- .open = fmc_sdb_dump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-int fmc_debug_init(struct fmc_device *fmc)
-{
- fmc->dbg_dir = debugfs_create_dir(dev_name(&fmc->dev), NULL);
- if (IS_ERR_OR_NULL(fmc->dbg_dir)) {
- pr_err("FMC: Cannot create debugfs\n");
- return PTR_ERR(fmc->dbg_dir);
- }
-
- fmc->dbg_sdb_dump = debugfs_create_file(FMC_DBG_SDB_DUMP, 0444,
- fmc->dbg_dir, fmc,
- &fmc_dbgfs_sdb_dump);
- if (IS_ERR_OR_NULL(fmc->dbg_sdb_dump))
- pr_err("FMC: Cannot create debugfs file %s\n",
- FMC_DBG_SDB_DUMP);
-
- return 0;
-}
-
-void fmc_debug_exit(struct fmc_device *fmc)
-{
- if (fmc->dbg_dir)
- debugfs_remove_recursive(fmc->dbg_dir);
-}
diff --git a/drivers/fmc/fmc-dump.c b/drivers/fmc/fmc-dump.c
deleted file mode 100644
index 6c81dbde1d16..000000000000
--- a/drivers/fmc/fmc-dump.c
+++ /dev/null
@@ -1,58 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2013 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#include <linux/kernel.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/fmc.h>
-#include <linux/fmc-sdb.h>
-
-static int fmc_must_dump_eeprom;
-module_param_named(dump_eeprom, fmc_must_dump_eeprom, int, 0644);
-
-#define LINELEN 16
-
-/* Dumping 8k takes oh so much: avoid duplicate lines */
-static const uint8_t *dump_line(int addr, const uint8_t *line,
- const uint8_t *prev)
-{
- int i;
-
- if (!prev || memcmp(line, prev, LINELEN)) {
- pr_info("%04x: ", addr);
- for (i = 0; i < LINELEN; ) {
- printk(KERN_CONT "%02x", line[i]);
- i++;
- printk(i & 3 ? " " : i & (LINELEN - 1) ? " " : "\n");
- }
- return line;
- }
- /* repeated line */
- if (line == prev + LINELEN)
- pr_info("[...]\n");
- return prev;
-}
-
-void fmc_dump_eeprom(const struct fmc_device *fmc)
-{
- const uint8_t *line, *prev;
- int i;
-
- if (!fmc_must_dump_eeprom)
- return;
-
- pr_info("FMC: %s (%s), slot %i, device %s\n", dev_name(fmc->hwdev),
- fmc->carrier_name, fmc->slot_id, dev_name(&fmc->dev));
- pr_info("FMC: dumping eeprom 0x%x (%i) bytes\n", fmc->eeprom_len,
- fmc->eeprom_len);
-
- line = fmc->eeprom;
- prev = NULL;
- for (i = 0; i < fmc->eeprom_len; i += LINELEN, line += LINELEN)
- prev = dump_line(i, line, prev);
-}
diff --git a/drivers/fmc/fmc-fakedev.c b/drivers/fmc/fmc-fakedev.c
deleted file mode 100644
index 941d0930969a..000000000000
--- a/drivers/fmc/fmc-fakedev.c
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * The software is provided "as is"; the copyright holders disclaim
- * all warranties and liabilities, to the extent permitted by
- * applicable law.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/firmware.h>
-#include <linux/workqueue.h>
-#include <linux/err.h>
-#include <linux/fmc.h>
-
-#define FF_EEPROM_SIZE 8192 /* The standard eeprom size */
-#define FF_MAX_MEZZANINES 4 /* Fakes a multi-mezzanine carrier */
-
-/* The user can pass up to 4 names of eeprom images to load */
-static char *ff_eeprom[FF_MAX_MEZZANINES];
-static int ff_nr_eeprom;
-module_param_array_named(eeprom, ff_eeprom, charp, &ff_nr_eeprom, 0444);
-
-/* The user can ask for a multi-mezzanine carrier, with the default eeprom */
-static int ff_nr_dev = 1;
-module_param_named(ndev, ff_nr_dev, int, 0444);
-
-
-/* Lazily, don't support the "standard" module parameters */
-
-/*
- * Eeprom built from these commands:
-
- ../fru-generator -v fake-vendor -n fake-design-for-testing \
- -s 01234 -p none > IPMI-FRU
-
- gensdbfs . ../fake-eeprom.bin
-*/
-static char ff_eeimg[FF_MAX_MEZZANINES][FF_EEPROM_SIZE] = {
- {
- 0x01, 0x00, 0x00, 0x01, 0x00, 0x0c, 0x00, 0xf2, 0x01, 0x0b, 0x00, 0xb2,
- 0x86, 0x87, 0xcb, 0x66, 0x61, 0x6b, 0x65, 0x2d, 0x76, 0x65, 0x6e, 0x64,
- 0x6f, 0x72, 0xd7, 0x66, 0x61, 0x6b, 0x65, 0x2d, 0x64, 0x65, 0x73, 0x69,
- 0x67, 0x6e, 0x2d, 0x66, 0x6f, 0x72, 0x2d, 0x74, 0x65, 0x73, 0x74, 0x69,
- 0x6e, 0x67, 0xc5, 0x30, 0x31, 0x32, 0x33, 0x34, 0xc4, 0x6e, 0x6f, 0x6e,
- 0x65, 0xda, 0x32, 0x30, 0x31, 0x32, 0x2d, 0x31, 0x31, 0x2d, 0x31, 0x39,
- 0x20, 0x32, 0x32, 0x3a, 0x34, 0x32, 0x3a, 0x33, 0x30, 0x2e, 0x30, 0x37,
- 0x34, 0x30, 0x35, 0x35, 0xc1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87,
- 0x02, 0x02, 0x0d, 0xf7, 0xf8, 0x02, 0xb0, 0x04, 0x74, 0x04, 0xec, 0x04,
- 0x00, 0x00, 0x00, 0x00, 0xe8, 0x03, 0x02, 0x02, 0x0d, 0x5c, 0x93, 0x01,
- 0x4a, 0x01, 0x39, 0x01, 0x5a, 0x01, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x0b,
- 0x02, 0x02, 0x0d, 0x63, 0x8c, 0x00, 0xfa, 0x00, 0xed, 0x00, 0x06, 0x01,
- 0x00, 0x00, 0x00, 0x00, 0xa0, 0x0f, 0x01, 0x02, 0x0d, 0xfb, 0xf5, 0x05,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x02, 0x0d, 0xfc, 0xf4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x0d, 0xfd, 0xf3, 0x03,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xfa, 0x82, 0x0b, 0xea, 0x8f, 0xa2, 0x12, 0x00, 0x00, 0x1e, 0x44, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x53, 0x44, 0x42, 0x2d, 0x00, 0x03, 0x01, 0x01,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x01, 0xc4, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61,
- 0x2e, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
- 0x2e, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc0,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc4, 0x46, 0x69, 0x6c, 0x65,
- 0x44, 0x61, 0x74, 0x61, 0x6e, 0x61, 0x6d, 0x65, 0x00, 0x00, 0x00, 0x01,
- 0x00, 0x00, 0x00, 0x00, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdf,
- 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x49, 0x50, 0x4d, 0x49,
- 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x49, 0x50, 0x4d, 0x49,
- 0x2d, 0x46, 0x52, 0x55, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x01, 0x66, 0x61, 0x6b, 0x65, 0x0a,
- },
-};
-
-struct ff_dev {
- struct fmc_device *fmc[FF_MAX_MEZZANINES];
- struct device dev;
-};
-
-static struct ff_dev *ff_current_dev; /* We have 1 carrier, 1 slot */
-
-static int ff_reprogram(struct fmc_device *fmc, struct fmc_driver *drv,
- char *gw)
-{
- const struct firmware *fw;
- int ret;
-
- if (!gw) {
- /* program golden: success */
- fmc->flags &= ~FMC_DEVICE_HAS_CUSTOM;
- fmc->flags |= FMC_DEVICE_HAS_GOLDEN;
- return 0;
- }
-
- dev_info(&fmc->dev, "reprogramming with %s\n", gw);
- ret = request_firmware(&fw, gw, &fmc->dev);
- if (ret < 0) {
- dev_warn(&fmc->dev, "request firmware \"%s\": error %i\n",
- gw, ret);
- goto out;
- }
- fmc->flags &= ~FMC_DEVICE_HAS_GOLDEN;
- fmc->flags |= FMC_DEVICE_HAS_CUSTOM;
-
-out:
- release_firmware(fw);
- return ret;
-}
-
-static int ff_irq_request(struct fmc_device *fmc, irq_handler_t handler,
- char *name, int flags)
-{
- return -EOPNOTSUPP;
-}
-
-/* FIXME: should also have some fake FMC GPIO mapping */
-
-
-/*
- * This work function is called when we changed the eeprom. It removes the
- * current fmc device and registers a new one, with different identifiers.
- */
-static struct ff_dev *ff_dev_create(void); /* defined later */
-
-static void ff_work_fn(struct work_struct *work)
-{
- struct ff_dev *ff = ff_current_dev;
- int ret;
-
- fmc_device_unregister_n(ff->fmc, ff_nr_dev);
- device_unregister(&ff->dev);
- ff_current_dev = NULL;
-
- ff = ff_dev_create();
- if (IS_ERR(ff)) {
- pr_warning("%s: can't re-create FMC devices\n", __func__);
- return;
- }
- ret = fmc_device_register_n(ff->fmc, ff_nr_dev);
- if (ret < 0) {
- dev_warn(&ff->dev, "can't re-register FMC devices\n");
- device_unregister(&ff->dev);
- return;
- }
-
- ff_current_dev = ff;
-}
-
-static DECLARE_DELAYED_WORK(ff_work, ff_work_fn);
-
-
-/* low-level i2c */
-static int ff_eeprom_read(struct fmc_device *fmc, uint32_t offset,
- void *buf, size_t size)
-{
- if (offset > FF_EEPROM_SIZE)
- return -EINVAL;
- if (offset + size > FF_EEPROM_SIZE)
- size = FF_EEPROM_SIZE - offset;
- memcpy(buf, fmc->eeprom + offset, size);
- return size;
-}
-
-static int ff_eeprom_write(struct fmc_device *fmc, uint32_t offset,
- const void *buf, size_t size)
-{
- if (offset > FF_EEPROM_SIZE)
- return -EINVAL;
- if (offset + size > FF_EEPROM_SIZE)
- size = FF_EEPROM_SIZE - offset;
- dev_info(&fmc->dev, "write_eeprom: offset %i, size %zi\n",
- (int)offset, size);
- memcpy(fmc->eeprom + offset, buf, size);
- schedule_delayed_work(&ff_work, HZ * 2); /* remove, replug, in 2s */
- return size;
-}
-
-/* i2c operations for fmc */
-static int ff_read_ee(struct fmc_device *fmc, int pos, void *data, int len)
-{
- if (!(fmc->flags & FMC_DEVICE_HAS_GOLDEN))
- return -EOPNOTSUPP;
- return ff_eeprom_read(fmc, pos, data, len);
-}
-
-static int ff_write_ee(struct fmc_device *fmc, int pos,
- const void *data, int len)
-{
- if (!(fmc->flags & FMC_DEVICE_HAS_GOLDEN))
- return -EOPNOTSUPP;
- return ff_eeprom_write(fmc, pos, data, len);
-}
-
-/* readl and writel do not do anything. Don't waste RAM with "base" */
-static uint32_t ff_readl(struct fmc_device *fmc, int offset)
-{
- return 0;
-}
-
-static void ff_writel(struct fmc_device *fmc, uint32_t value, int offset)
-{
- return;
-}
-
-/* validate is useful so fmc-write-eeprom will not reprogram every 2 seconds */
-static int ff_validate(struct fmc_device *fmc, struct fmc_driver *drv)
-{
- int i;
-
- if (!drv->busid_n)
- return 0; /* everyhing is valid */
- for (i = 0; i < drv->busid_n; i++)
- if (drv->busid_val[i] == fmc->device_id)
- return i;
- return -ENOENT;
-}
-
-
-
-static struct fmc_operations ff_fmc_operations = {
- .read32 = ff_readl,
- .write32 = ff_writel,
- .reprogram = ff_reprogram,
- .irq_request = ff_irq_request,
- .read_ee = ff_read_ee,
- .write_ee = ff_write_ee,
- .validate = ff_validate,
-};
-
-/* This device is kmalloced: release it */
-static void ff_dev_release(struct device *dev)
-{
- struct ff_dev *ff = container_of(dev, struct ff_dev, dev);
- kfree(ff);
-}
-
-static struct fmc_device ff_template_fmc = {
- .version = FMC_VERSION,
- .owner = THIS_MODULE,
- .carrier_name = "fake-fmc-carrier",
- .device_id = 0xf001, /* fool */
- .eeprom_len = sizeof(ff_eeimg[0]),
- .memlen = 0x1000, /* 4k, to show something */
- .op = &ff_fmc_operations,
- .hwdev = NULL, /* filled at creation time */
- .flags = FMC_DEVICE_HAS_GOLDEN,
-};
-
-static struct ff_dev *ff_dev_create(void)
-{
- struct ff_dev *ff;
- struct fmc_device *fmc;
- int i, ret;
-
- ff = kzalloc(sizeof(*ff), GFP_KERNEL);
- if (!ff)
- return ERR_PTR(-ENOMEM);
- dev_set_name(&ff->dev, "fake-fmc-carrier");
- ff->dev.release = ff_dev_release;
-
- ret = device_register(&ff->dev);
- if (ret < 0) {
- put_device(&ff->dev);
- return ERR_PTR(ret);
- }
-
- /* Create fmc structures that refer to this new "hw" device */
- for (i = 0; i < ff_nr_dev; i++) {
- fmc = kmemdup(&ff_template_fmc, sizeof(ff_template_fmc),
- GFP_KERNEL);
- fmc->hwdev = &ff->dev;
- fmc->carrier_data = ff;
- fmc->nr_slots = ff_nr_dev;
- /* the following fields are different for each slot */
- fmc->eeprom = ff_eeimg[i];
- fmc->eeprom_addr = 0x50 + 2 * i;
- fmc->slot_id = i;
- ff->fmc[i] = fmc;
- /* increment the identifier, each must be different */
- ff_template_fmc.device_id++;
- }
- return ff;
-}
-
-/* init and exit */
-static int ff_init(void)
-{
- struct ff_dev *ff;
- const struct firmware *fw;
- int i, len, ret = 0;
-
- /* Replicate the default eeprom for the max number of mezzanines */
- for (i = 1; i < FF_MAX_MEZZANINES; i++)
- memcpy(ff_eeimg[i], ff_eeimg[0], sizeof(ff_eeimg[0]));
-
- if (ff_nr_eeprom > ff_nr_dev)
- ff_nr_dev = ff_nr_eeprom;
-
- ff = ff_dev_create();
- if (IS_ERR(ff))
- return PTR_ERR(ff);
-
- /* If the user passed "eeprom=" as a parameter, fetch them */
- for (i = 0; i < ff_nr_eeprom; i++) {
- if (!strlen(ff_eeprom[i]))
- continue;
- ret = request_firmware(&fw, ff_eeprom[i], &ff->dev);
- if (ret < 0) {
- dev_err(&ff->dev, "Mezzanine %i: can't load \"%s\" "
- "(error %i)\n", i, ff_eeprom[i], -ret);
- } else {
- len = min_t(size_t, fw->size, (size_t)FF_EEPROM_SIZE);
- memcpy(ff_eeimg[i], fw->data, len);
- release_firmware(fw);
- dev_info(&ff->dev, "Mezzanine %i: eeprom \"%s\"\n", i,
- ff_eeprom[i]);
- }
- }
-
- ret = fmc_device_register_n(ff->fmc, ff_nr_dev);
- if (ret) {
- device_unregister(&ff->dev);
- return ret;
- }
- ff_current_dev = ff;
- return ret;
-}
-
-static void ff_exit(void)
-{
- if (ff_current_dev) {
- fmc_device_unregister_n(ff_current_dev->fmc, ff_nr_dev);
- device_unregister(&ff_current_dev->dev);
- }
- cancel_delayed_work_sync(&ff_work);
-}
-
-module_init(ff_init);
-module_exit(ff_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/fmc/fmc-match.c b/drivers/fmc/fmc-match.c
deleted file mode 100644
index 995bd6041a67..000000000000
--- a/drivers/fmc/fmc-match.c
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/fmc.h>
-#include <linux/ipmi-fru.h>
-
-/* The fru parser is both user and kernel capable: it needs alloc */
-void *fru_alloc(size_t size)
-{
- return kzalloc(size, GFP_KERNEL);
-}
-
-/* The actual match function */
-int fmc_match(struct device *dev, struct device_driver *drv)
-{
- struct fmc_driver *fdrv = to_fmc_driver(drv);
- struct fmc_device *fdev = to_fmc_device(dev);
- struct fmc_fru_id *fid;
- int i, matched = 0;
-
- /* This currently only matches the EEPROM (FRU id) */
- fid = fdrv->id_table.fru_id;
- if (!fid) {
- dev_warn(&fdev->dev, "Driver has no ID: matches all\n");
- matched = 1;
- } else {
- if (!fdev->id.manufacturer || !fdev->id.product_name)
- return 0; /* the device has no FRU information */
- for (i = 0; i < fdrv->id_table.fru_id_nr; i++, fid++) {
- if (fid->manufacturer &&
- strcmp(fid->manufacturer, fdev->id.manufacturer))
- continue;
- if (fid->product_name &&
- strcmp(fid->product_name, fdev->id.product_name))
- continue;
- matched = 1;
- break;
- }
- }
-
- /* FIXME: match SDB contents */
- return matched;
-}
-
-/* This function creates ID info for a newly registered device */
-int fmc_fill_id_info(struct fmc_device *fmc)
-{
- struct fru_common_header *h;
- struct fru_board_info_area *bia;
- int ret, allocated = 0;
-
- /* If we know the eeprom length, try to read it off the device */
- if (fmc->eeprom_len && !fmc->eeprom) {
- fmc->eeprom = kzalloc(fmc->eeprom_len, GFP_KERNEL);
- if (!fmc->eeprom)
- return -ENOMEM;
- allocated = 1;
- ret = fmc_read_ee(fmc, 0, fmc->eeprom, fmc->eeprom_len);
- if (ret < 0)
- goto out;
- }
-
- /* If no eeprom, continue with other matches */
- if (!fmc->eeprom)
- return 0;
-
- dev_info(fmc->hwdev, "mezzanine %i\n", fmc->slot_id); /* header */
-
- /* So we have the eeprom: parse the FRU part (if any) */
- h = (void *)fmc->eeprom;
- if (h->format != 1) {
- pr_info(" EEPROM has no FRU information\n");
- goto out;
- }
- if (!fru_header_cksum_ok(h)) {
- pr_info(" FRU: wrong header checksum\n");
- goto out;
- }
- bia = fru_get_board_area(h);
- if (!fru_bia_cksum_ok(bia)) {
- pr_info(" FRU: wrong board area checksum\n");
- goto out;
- }
- fmc->id.manufacturer = fru_get_board_manufacturer(h);
- fmc->id.product_name = fru_get_product_name(h);
- pr_info(" Manufacturer: %s\n", fmc->id.manufacturer);
- pr_info(" Product name: %s\n", fmc->id.product_name);
-
- /* Create the short name (FIXME: look in sdb as well) */
- fmc->mezzanine_name = kstrdup(fmc->id.product_name, GFP_KERNEL);
-
-out:
- if (allocated) {
- kfree(fmc->eeprom);
- fmc->eeprom = NULL;
- }
- return 0; /* no error: let other identification work */
-}
-
-/* Some ID data is allocated using fru_alloc() above, so release it */
-void fmc_free_id_info(struct fmc_device *fmc)
-{
- kfree(fmc->mezzanine_name);
- kfree(fmc->id.manufacturer);
- kfree(fmc->id.product_name);
-}
diff --git a/drivers/fmc/fmc-private.h b/drivers/fmc/fmc-private.h
deleted file mode 100644
index 93cb8030f764..000000000000
--- a/drivers/fmc/fmc-private.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2015 CERN (www.cern.ch)
- * Author: Federico Vaga <federico.vaga@cern.ch>
- */
-
-extern int fmc_debug_init(struct fmc_device *fmc);
-extern void fmc_debug_exit(struct fmc_device *fmc);
diff --git a/drivers/fmc/fmc-sdb.c b/drivers/fmc/fmc-sdb.c
deleted file mode 100644
index 14758db1a5fb..000000000000
--- a/drivers/fmc/fmc-sdb.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fmc.h>
-#include <linux/sdb.h>
-#include <linux/err.h>
-#include <linux/fmc-sdb.h>
-#include <asm/byteorder.h>
-
-static uint32_t __sdb_rd(struct fmc_device *fmc, unsigned long address,
- int convert)
-{
- uint32_t res = fmc_readl(fmc, address);
- if (convert)
- return __be32_to_cpu(res);
- return res;
-}
-
-static struct sdb_array *__fmc_scan_sdb_tree(struct fmc_device *fmc,
- unsigned long sdb_addr,
- unsigned long reg_base, int level)
-{
- uint32_t onew;
- int i, j, n, convert = 0;
- struct sdb_array *arr, *sub;
-
- onew = fmc_readl(fmc, sdb_addr);
- if (onew == SDB_MAGIC) {
- /* Uh! If we are little-endian, we must convert */
- if (SDB_MAGIC != __be32_to_cpu(SDB_MAGIC))
- convert = 1;
- } else if (onew == __be32_to_cpu(SDB_MAGIC)) {
- /* ok, don't convert */
- } else {
- return ERR_PTR(-ENOENT);
- }
- /* So, the magic was there: get the count from offset 4*/
- onew = __sdb_rd(fmc, sdb_addr + 4, convert);
- n = __be16_to_cpu(*(uint16_t *)&onew);
- arr = kzalloc(sizeof(*arr), GFP_KERNEL);
- if (!arr)
- return ERR_PTR(-ENOMEM);
- arr->record = kcalloc(n, sizeof(arr->record[0]), GFP_KERNEL);
- arr->subtree = kcalloc(n, sizeof(arr->subtree[0]), GFP_KERNEL);
- if (!arr->record || !arr->subtree) {
- kfree(arr->record);
- kfree(arr->subtree);
- kfree(arr);
- return ERR_PTR(-ENOMEM);
- }
-
- arr->len = n;
- arr->level = level;
- arr->fmc = fmc;
- for (i = 0; i < n; i++) {
- union sdb_record *r;
-
- for (j = 0; j < sizeof(arr->record[0]); j += 4) {
- *(uint32_t *)((void *)(arr->record + i) + j) =
- __sdb_rd(fmc, sdb_addr + (i * 64) + j, convert);
- }
- r = &arr->record[i];
- arr->subtree[i] = ERR_PTR(-ENODEV);
- if (r->empty.record_type == sdb_type_bridge) {
- struct sdb_component *c = &r->bridge.sdb_component;
- uint64_t subaddr = __be64_to_cpu(r->bridge.sdb_child);
- uint64_t newbase = __be64_to_cpu(c->addr_first);
-
- subaddr += reg_base;
- newbase += reg_base;
- sub = __fmc_scan_sdb_tree(fmc, subaddr, newbase,
- level + 1);
- arr->subtree[i] = sub; /* may be error */
- if (IS_ERR(sub))
- continue;
- sub->parent = arr;
- sub->baseaddr = newbase;
- }
- }
- return arr;
-}
-
-int fmc_scan_sdb_tree(struct fmc_device *fmc, unsigned long address)
-{
- struct sdb_array *ret;
- if (fmc->sdb)
- return -EBUSY;
- ret = __fmc_scan_sdb_tree(fmc, address, 0 /* regs */, 0);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
- fmc->sdb = ret;
- return 0;
-}
-EXPORT_SYMBOL(fmc_scan_sdb_tree);
-
-static void __fmc_sdb_free(struct sdb_array *arr)
-{
- int i, n;
-
- if (!arr)
- return;
- n = arr->len;
- for (i = 0; i < n; i++) {
- if (IS_ERR(arr->subtree[i]))
- continue;
- __fmc_sdb_free(arr->subtree[i]);
- }
- kfree(arr->record);
- kfree(arr->subtree);
- kfree(arr);
-}
-
-int fmc_free_sdb_tree(struct fmc_device *fmc)
-{
- __fmc_sdb_free(fmc->sdb);
- fmc->sdb = NULL;
- return 0;
-}
-EXPORT_SYMBOL(fmc_free_sdb_tree);
-
-/* This helper calls reprogram and inizialized sdb as well */
-int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d,
- void *gw, unsigned long len, int sdb_entry)
-{
- int ret;
-
- ret = fmc->op->reprogram_raw(fmc, d, gw, len);
- if (ret < 0)
- return ret;
- if (sdb_entry < 0)
- return ret;
-
- /* We are required to find SDB at a given offset */
- ret = fmc_scan_sdb_tree(fmc, sdb_entry);
- if (ret < 0) {
- dev_err(&fmc->dev, "Can't find SDB at address 0x%x\n",
- sdb_entry);
- return -ENODEV;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(fmc_reprogram_raw);
-
-/* This helper calls reprogram and inizialized sdb as well */
-int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
- int sdb_entry)
-{
- int ret;
-
- ret = fmc->op->reprogram(fmc, d, gw);
- if (ret < 0)
- return ret;
- if (sdb_entry < 0)
- return ret;
-
- /* We are required to find SDB at a given offset */
- ret = fmc_scan_sdb_tree(fmc, sdb_entry);
- if (ret < 0) {
- dev_err(&fmc->dev, "Can't find SDB at address 0x%x\n",
- sdb_entry);
- return -ENODEV;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(fmc_reprogram);
-
-void fmc_show_sdb_tree(const struct fmc_device *fmc)
-{
- pr_err("%s: not supported anymore, use debugfs to dump SDB\n",
- __func__);
-}
-EXPORT_SYMBOL(fmc_show_sdb_tree);
-
-signed long fmc_find_sdb_device(struct sdb_array *tree,
- uint64_t vid, uint32_t did, unsigned long *sz)
-{
- signed long res = -ENODEV;
- union sdb_record *r;
- struct sdb_product *p;
- struct sdb_component *c;
- int i, n = tree->len;
- uint64_t last, first;
-
- /* FIXME: what if the first interconnect is not at zero? */
- for (i = 0; i < n; i++) {
- r = &tree->record[i];
- c = &r->dev.sdb_component;
- p = &c->product;
-
- if (!IS_ERR(tree->subtree[i]))
- res = fmc_find_sdb_device(tree->subtree[i],
- vid, did, sz);
- if (res >= 0)
- return res + tree->baseaddr;
- if (r->empty.record_type != sdb_type_device)
- continue;
- if (__be64_to_cpu(p->vendor_id) != vid)
- continue;
- if (__be32_to_cpu(p->device_id) != did)
- continue;
- /* found */
- last = __be64_to_cpu(c->addr_last);
- first = __be64_to_cpu(c->addr_first);
- if (sz)
- *sz = (typeof(*sz))(last + 1 - first);
- return first + tree->baseaddr;
- }
- return res;
-}
-EXPORT_SYMBOL(fmc_find_sdb_device);
diff --git a/drivers/fmc/fmc-trivial.c b/drivers/fmc/fmc-trivial.c
deleted file mode 100644
index 8defdee3e3a3..000000000000
--- a/drivers/fmc/fmc-trivial.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * The software is provided "as is"; the copyright holders disclaim
- * all warranties and liabilities, to the extent permitted by
- * applicable law.
- */
-
-/* A trivial fmc driver that can load a gateware file and reports interrupts */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/fmc.h>
-
-static struct fmc_driver t_drv; /* initialized later */
-
-static irqreturn_t t_handler(int irq, void *dev_id)
-{
- struct fmc_device *fmc = dev_id;
-
- fmc_irq_ack(fmc);
- dev_info(&fmc->dev, "received irq %i\n", irq);
- return IRQ_HANDLED;
-}
-
-static struct fmc_gpio t_gpio[] = {
- {
- .gpio = FMC_GPIO_IRQ(0),
- .mode = GPIOF_DIR_IN,
- .irqmode = IRQF_TRIGGER_RISING,
- }, {
- .gpio = FMC_GPIO_IRQ(1),
- .mode = GPIOF_DIR_IN,
- .irqmode = IRQF_TRIGGER_RISING,
- }
-};
-
-static int t_probe(struct fmc_device *fmc)
-{
- int ret;
- int index = 0;
-
- index = fmc_validate(fmc, &t_drv);
- if (index < 0)
- return -EINVAL; /* not our device: invalid */
-
- ret = fmc_irq_request(fmc, t_handler, "fmc-trivial", IRQF_SHARED);
- if (ret < 0)
- return ret;
- /* ignore error code of call below, we really don't care */
- fmc_gpio_config(fmc, t_gpio, ARRAY_SIZE(t_gpio));
-
- ret = fmc_reprogram(fmc, &t_drv, "", 0);
- if (ret == -EPERM) /* programming not supported */
- ret = 0;
- if (ret < 0)
- fmc_irq_free(fmc);
-
- /* FIXME: reprogram LM32 too */
- return ret;
-}
-
-static int t_remove(struct fmc_device *fmc)
-{
- fmc_irq_free(fmc);
- return 0;
-}
-
-static struct fmc_driver t_drv = {
- .version = FMC_VERSION,
- .driver.name = KBUILD_MODNAME,
- .probe = t_probe,
- .remove = t_remove,
- /* no table, as the current match just matches everything */
-};
-
- /* We accept the generic parameters */
-FMC_PARAM_BUSID(t_drv);
-FMC_PARAM_GATEWARE(t_drv);
-
-static int t_init(void)
-{
- int ret;
-
- ret = fmc_driver_register(&t_drv);
- return ret;
-}
-
-static void t_exit(void)
-{
- fmc_driver_unregister(&t_drv);
-}
-
-module_init(t_init);
-module_exit(t_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/fmc/fmc-write-eeprom.c b/drivers/fmc/fmc-write-eeprom.c
deleted file mode 100644
index 1c7826e3f526..000000000000
--- a/drivers/fmc/fmc-write-eeprom.c
+++ /dev/null
@@ -1,175 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/firmware.h>
-#include <linux/init.h>
-#include <linux/fmc.h>
-#include <asm/unaligned.h>
-
-/*
- * This module uses the firmware loader to program the whole or part
- * of the FMC eeprom. The meat is in the _run functions. However, no
- * default file name is provided, to avoid accidental mishaps. Also,
- * you must pass the busid argument
- */
-static struct fmc_driver fwe_drv;
-
-FMC_PARAM_BUSID(fwe_drv);
-
-/* The "file=" is like the generic "gateware=" used elsewhere */
-static char *fwe_file[FMC_MAX_CARDS];
-static int fwe_file_n;
-module_param_array_named(file, fwe_file, charp, &fwe_file_n, 0444);
-
-static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw,
- int write)
-{
- const uint8_t *p = fw->data;
- int len = fw->size;
- uint16_t thislen, thisaddr;
- int err;
-
- /* format is: 'w' addr16 len16 data... */
- while (len > 5) {
- thisaddr = get_unaligned_le16(p+1);
- thislen = get_unaligned_le16(p+3);
- if (p[0] != 'w' || thislen + 5 > len) {
- dev_err(&fmc->dev, "invalid tlv at offset %ti\n",
- p - fw->data);
- return -EINVAL;
- }
- err = 0;
- if (write) {
- dev_info(&fmc->dev, "write %i bytes at 0x%04x\n",
- thislen, thisaddr);
- err = fmc_write_ee(fmc, thisaddr, p + 5, thislen);
- }
- if (err < 0) {
- dev_err(&fmc->dev, "write failure @0x%04x\n",
- thisaddr);
- return err;
- }
- p += 5 + thislen;
- len -= 5 + thislen;
- }
- if (write)
- dev_info(&fmc->dev, "write_eeprom: success\n");
- return 0;
-}
-
-static int fwe_run_bin(struct fmc_device *fmc, const struct firmware *fw)
-{
- int ret;
-
- dev_info(&fmc->dev, "programming %zi bytes\n", fw->size);
- ret = fmc_write_ee(fmc, 0, (void *)fw->data, fw->size);
- if (ret < 0) {
- dev_info(&fmc->dev, "write_eeprom: error %i\n", ret);
- return ret;
- }
- dev_info(&fmc->dev, "write_eeprom: success\n");
- return 0;
-}
-
-static int fwe_run(struct fmc_device *fmc, const struct firmware *fw, char *s)
-{
- char *last4 = s + strlen(s) - 4;
- int err;
-
- if (!strcmp(last4, ".bin"))
- return fwe_run_bin(fmc, fw);
- if (!strcmp(last4, ".tlv")) {
- err = fwe_run_tlv(fmc, fw, 0);
- if (!err)
- err = fwe_run_tlv(fmc, fw, 1);
- return err;
- }
- dev_err(&fmc->dev, "invalid file name \"%s\"\n", s);
- return -EINVAL;
-}
-
-/*
- * Programming is done at probe time. Morever, only those listed with
- * busid= are programmed.
- * card is probed for, only one is programmed. Unfortunately, it's
- * difficult to know in advance when probing the first card if others
- * are there.
- */
-static int fwe_probe(struct fmc_device *fmc)
-{
- int err, index = 0;
- const struct firmware *fw;
- struct device *dev = &fmc->dev;
- char *s;
-
- if (!fwe_drv.busid_n) {
- dev_err(dev, "%s: no busid passed, refusing all cards\n",
- KBUILD_MODNAME);
- return -ENODEV;
- }
-
- index = fmc_validate(fmc, &fwe_drv);
- if (index < 0) {
- pr_err("%s: refusing device \"%s\"\n", KBUILD_MODNAME,
- dev_name(dev));
- return -ENODEV;
- }
- if (index >= fwe_file_n) {
- pr_err("%s: no filename for device index %i\n",
- KBUILD_MODNAME, index);
- return -ENODEV;
- }
- s = fwe_file[index];
- if (!s) {
- pr_err("%s: no filename for \"%s\" not programming\n",
- KBUILD_MODNAME, dev_name(dev));
- return -ENOENT;
- }
- err = request_firmware(&fw, s, dev);
- if (err < 0) {
- dev_err(&fmc->dev, "request firmware \"%s\": error %i\n",
- s, err);
- return err;
- }
- fwe_run(fmc, fw, s);
- release_firmware(fw);
- return 0;
-}
-
-static int fwe_remove(struct fmc_device *fmc)
-{
- return 0;
-}
-
-static struct fmc_driver fwe_drv = {
- .version = FMC_VERSION,
- .driver.name = KBUILD_MODNAME,
- .probe = fwe_probe,
- .remove = fwe_remove,
- /* no table, as the current match just matches everything */
-};
-
-static int fwe_init(void)
-{
- int ret;
-
- ret = fmc_driver_register(&fwe_drv);
- return ret;
-}
-
-static void fwe_exit(void)
-{
- fmc_driver_unregister(&fwe_drv);
-}
-
-module_init(fwe_init);
-module_exit(fwe_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/fmc/fru-parse.c b/drivers/fmc/fru-parse.c
deleted file mode 100644
index f551b81f4fd9..000000000000
--- a/drivers/fmc/fru-parse.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#include <linux/ipmi-fru.h>
-
-/* Some internal helpers */
-static struct fru_type_length *
-__fru_get_board_tl(struct fru_common_header *header, int nr)
-{
- struct fru_board_info_area *bia;
- struct fru_type_length *tl;
-
- bia = fru_get_board_area(header);
- tl = bia->tl;
- while (nr > 0 && !fru_is_eof(tl)) {
- tl = fru_next_tl(tl);
- nr--;
- }
- if (fru_is_eof(tl))
- return NULL;
- return tl;
-}
-
-static char *__fru_alloc_get_tl(struct fru_common_header *header, int nr)
-{
- struct fru_type_length *tl;
- char *res;
-
- tl = __fru_get_board_tl(header, nr);
- if (!tl)
- return NULL;
-
- res = fru_alloc(fru_strlen(tl) + 1);
- if (!res)
- return NULL;
- return fru_strcpy(res, tl);
-}
-
-/* Public checksum verifiers */
-int fru_header_cksum_ok(struct fru_common_header *header)
-{
- uint8_t *ptr = (void *)header;
- int i, sum;
-
- for (i = sum = 0; i < sizeof(*header); i++)
- sum += ptr[i];
- return (sum & 0xff) == 0;
-}
-int fru_bia_cksum_ok(struct fru_board_info_area *bia)
-{
- uint8_t *ptr = (void *)bia;
- int i, sum;
-
- for (i = sum = 0; i < 8 * bia->area_len; i++)
- sum += ptr[i];
- return (sum & 0xff) == 0;
-}
-
-/* Get various stuff, trivial */
-char *fru_get_board_manufacturer(struct fru_common_header *header)
-{
- return __fru_alloc_get_tl(header, 0);
-}
-char *fru_get_product_name(struct fru_common_header *header)
-{
- return __fru_alloc_get_tl(header, 1);
-}
-char *fru_get_serial_number(struct fru_common_header *header)
-{
- return __fru_alloc_get_tl(header, 2);
-}
-char *fru_get_part_number(struct fru_common_header *header)
-{
- return __fru_alloc_get_tl(header, 3);
-}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index acd40eb51c46..e4fee216d5a4 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -62,16 +62,12 @@ config GPIO_SYSFS
bool "/sys/class/gpio/... (sysfs interface)"
depends on SYSFS
help
- Say Y here to add a sysfs interface for GPIOs.
+ Say Y here to add the legacy sysfs interface for GPIOs.
- This is mostly useful to work around omissions in a system's
- kernel support. Those are common in custom and semicustom
- hardware assembled using standard kernels with a minimum of
- custom patches. In those cases, userspace code may import
- a given GPIO from the kernel, if no kernel driver requested it.
-
- Kernel drivers may also request that a particular GPIO be
- exported to userspace; this can be useful when debugging.
+ This ABI is deprecated. If you want to use GPIO from userspace,
+ use the character device /dev/gpiochipN with the appropriate
+ ioctl() operations instead. The character device is always
+ available.
config GPIO_GENERIC
depends on HAS_IOMEM # Only for IOMEM drivers
@@ -178,7 +174,7 @@ config GPIO_CLPS711X
config GPIO_DAVINCI
bool "TI Davinci/Keystone GPIO support"
default y if ARCH_DAVINCI
- depends on ARM && (ARCH_DAVINCI || ARCH_KEYSTONE)
+ depends on (ARM || ARM64) && (ARCH_DAVINCI || ARCH_KEYSTONE || ARCH_K3)
help
Say yes here to enable GPIO support for TI Davinci/Keystone SoCs.
@@ -493,7 +489,8 @@ config GPIO_STA2X11
config GPIO_STP_XWAY
bool "XWAY STP GPIOs"
- depends on SOC_XWAY
+ depends on SOC_XWAY || COMPILE_TEST
+ depends on OF_GPIO
help
This enables support for the Serial To Parallel (STP) unit found on
XWAY SoC. The STP allows the SoC to drive a shift registers cascade,
@@ -602,7 +599,6 @@ config GPIO_XGENE_SB
config GPIO_XILINX
tristate "Xilinx GPIO support"
- depends on OF_GPIO
help
Say yes here to support the Xilinx FPGA GPIO device
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 6700eee860b7..9e400e34e300 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -17,154 +17,154 @@ obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
# directly supported by gpio-generic
gpio-generic-$(CONFIG_GPIO_GENERIC) += gpio-mmio.o
-obj-$(CONFIG_GPIO_104_DIO_48E) += gpio-104-dio-48e.o
-obj-$(CONFIG_GPIO_104_IDIO_16) += gpio-104-idio-16.o
-obj-$(CONFIG_GPIO_104_IDI_48) += gpio-104-idi-48.o
-obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
-obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o
-obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
-obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
-obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
-obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
-obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o
-obj-$(CONFIG_GPIO_AMD_FCH) += gpio-amd-fch.o
-obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
-obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
-obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
-obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
-obj-$(CONFIG_GPIO_ASPEED) += gpio-aspeed.o
-obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gpio-raspberrypi-exp.o
-obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
-obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
-obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
-obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
-obj-$(CONFIG_GPIO_CADENCE) += gpio-cadence.o
-obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
-obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
-obj-$(CONFIG_GPIO_CRYSTAL_COVE) += gpio-crystalcove.o
-obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
-obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o
-obj-$(CONFIG_GPIO_DAVINCI) += gpio-davinci.o
-obj-$(CONFIG_GPIO_DLN2) += gpio-dln2.o
-obj-$(CONFIG_GPIO_DWAPB) += gpio-dwapb.o
-obj-$(CONFIG_GPIO_EIC_SPRD) += gpio-eic-sprd.o
-obj-$(CONFIG_GPIO_EM) += gpio-em.o
-obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
-obj-$(CONFIG_GPIO_EXAR) += gpio-exar.o
-obj-$(CONFIG_GPIO_F7188X) += gpio-f7188x.o
-obj-$(CONFIG_GPIO_FTGPIO010) += gpio-ftgpio010.o
-obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
-obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o
-obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
-obj-$(CONFIG_GPIO_GW_PLD) += gpio-gw-pld.o
-obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
-obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
-obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
-obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
-obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
-obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
-obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
-obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
-obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
-obj-$(CONFIG_GPIO_INTEL_MID) += gpio-intel-mid.o
-obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o
-obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o
-obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
-obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
-obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
-obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
-obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
-obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
-obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
-obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
-obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
-obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
-obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
-obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
-obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
-obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
-obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
-obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o
-obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
-obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
-obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o
-obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
-obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
-obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
-obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
-obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
-obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
+obj-$(CONFIG_GPIO_104_DIO_48E) += gpio-104-dio-48e.o
+obj-$(CONFIG_GPIO_104_IDI_48) += gpio-104-idi-48.o
+obj-$(CONFIG_GPIO_104_IDIO_16) += gpio-104-idio-16.o
+obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
+obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o
+obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
+obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
+obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
+obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o
+obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
+obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
+obj-$(CONFIG_GPIO_AMD_FCH) += gpio-amd-fch.o
+obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
+obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
+obj-$(CONFIG_GPIO_ASPEED) += gpio-aspeed.o
+obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
+obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
+obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
+obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
+obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
+obj-$(CONFIG_GPIO_CADENCE) += gpio-cadence.o
+obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
+obj-$(CONFIG_GPIO_SNPS_CREG) += gpio-creg-snps.o
+obj-$(CONFIG_GPIO_CRYSTAL_COVE) += gpio-crystalcove.o
+obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
+obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
+obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o
+obj-$(CONFIG_GPIO_DAVINCI) += gpio-davinci.o
+obj-$(CONFIG_GPIO_DLN2) += gpio-dln2.o
+obj-$(CONFIG_GPIO_DWAPB) += gpio-dwapb.o
+obj-$(CONFIG_GPIO_EIC_SPRD) += gpio-eic-sprd.o
+obj-$(CONFIG_GPIO_EM) += gpio-em.o
+obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
+obj-$(CONFIG_GPIO_EXAR) += gpio-exar.o
+obj-$(CONFIG_GPIO_F7188X) += gpio-f7188x.o
+obj-$(CONFIG_GPIO_FTGPIO010) += gpio-ftgpio010.o
+obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
+obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o
+obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
+obj-$(CONFIG_GPIO_GW_PLD) += gpio-gw-pld.o
+obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
+obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
+obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
+obj-$(CONFIG_GPIO_INTEL_MID) += gpio-intel-mid.o
+obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
+obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
+obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
+obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
+obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
+obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
+obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
+obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o
+obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o
+obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
+obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
+obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
+obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
+obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
+obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
+obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
+obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
+obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
+obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
+obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
+obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
+obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
+obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
+obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
+obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
+obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o
+obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
+obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o
+obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
+obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
+obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
+obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
+obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
-obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
-obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
-obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
-obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o
-obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o
-obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
-obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
-obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
-obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o
-obj-$(CONFIG_GPIO_PCIE_IDIO_24) += gpio-pcie-idio-24.o
-obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
-obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
+obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
+obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
+obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
+obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o
+obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o
+obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
+obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
+obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
+obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
+obj-$(CONFIG_GPIO_PCIE_IDIO_24) += gpio-pcie-idio-24.o
+obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o
+obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
+obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PMIC_EIC_SPRD) += gpio-pmic-eic-sprd.o
-obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
-obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
-obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
-obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
-obj-$(CONFIG_GPIO_REG) += gpio-reg.o
-obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
+obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
+obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gpio-raspberrypi-exp.o
+obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
+obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
+obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
+obj-$(CONFIG_GPIO_REG) += gpio-reg.o
+obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o
-obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
-obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
-obj-$(CONFIG_GPIO_SNPS_CREG) += gpio-creg-snps.o
-obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
-obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
-obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o
-obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
-obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
-obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
-obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o
-obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
-obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
-obj-$(CONFIG_GPIO_TEGRA) += gpio-tegra.o
-obj-$(CONFIG_GPIO_TEGRA186) += gpio-tegra186.o
-obj-$(CONFIG_GPIO_THUNDERX) += gpio-thunderx.o
-obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
-obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
-obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
-obj-$(CONFIG_GPIO_TPIC2810) += gpio-tpic2810.o
-obj-$(CONFIG_GPIO_TPS65086) += gpio-tps65086.o
-obj-$(CONFIG_GPIO_TPS65218) += gpio-tps65218.o
-obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
-obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
-obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
-obj-$(CONFIG_GPIO_TPS68470) += gpio-tps68470.o
-obj-$(CONFIG_GPIO_TQMX86) += gpio-tqmx86.o
-obj-$(CONFIG_GPIO_TS4800) += gpio-ts4800.o
-obj-$(CONFIG_GPIO_TS4900) += gpio-ts4900.o
-obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
-obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
-obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
-obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
-obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
-obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
-obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
-obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
-obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
-obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
-obj-$(CONFIG_GPIO_WINBOND) += gpio-winbond.o
-obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
-obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
-obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
-obj-$(CONFIG_GPIO_WS16C48) += gpio-ws16c48.o
-obj-$(CONFIG_GPIO_XGENE) += gpio-xgene.o
-obj-$(CONFIG_GPIO_XGENE_SB) += gpio-xgene-sb.o
-obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
-obj-$(CONFIG_GPIO_XLP) += gpio-xlp.o
-obj-$(CONFIG_GPIO_XRA1403) += gpio-xra1403.o
-obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
-obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
-obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
-obj-$(CONFIG_GPIO_ZX) += gpio-zx.o
-obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
+obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
+obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
+obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
+obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
+obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
+obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o
+obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
+obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
+obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
+obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o
+obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
+obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
+obj-$(CONFIG_GPIO_TEGRA186) += gpio-tegra186.o
+obj-$(CONFIG_GPIO_TEGRA) += gpio-tegra.o
+obj-$(CONFIG_GPIO_THUNDERX) += gpio-thunderx.o
+obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
+obj-$(CONFIG_GPIO_TPIC2810) += gpio-tpic2810.o
+obj-$(CONFIG_GPIO_TPS65086) += gpio-tps65086.o
+obj-$(CONFIG_GPIO_TPS65218) += gpio-tps65218.o
+obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
+obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
+obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
+obj-$(CONFIG_GPIO_TPS68470) += gpio-tps68470.o
+obj-$(CONFIG_GPIO_TQMX86) += gpio-tqmx86.o
+obj-$(CONFIG_GPIO_TS4800) += gpio-ts4800.o
+obj-$(CONFIG_GPIO_TS4900) += gpio-ts4900.o
+obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
+obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
+obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
+obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
+obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
+obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
+obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
+obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
+obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
+obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
+obj-$(CONFIG_GPIO_WINBOND) += gpio-winbond.o
+obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
+obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
+obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
+obj-$(CONFIG_GPIO_WS16C48) += gpio-ws16c48.o
+obj-$(CONFIG_GPIO_XGENE) += gpio-xgene.o
+obj-$(CONFIG_GPIO_XGENE_SB) += gpio-xgene-sb.o
+obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
+obj-$(CONFIG_GPIO_XLP) += gpio-xlp.o
+obj-$(CONFIG_GPIO_XRA1403) += gpio-xra1403.o
+obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
+obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
+obj-$(CONFIG_GPIO_ZX) += gpio-zx.o
+obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 19d27c904916..9c048f10c9ad 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -90,6 +90,46 @@ GPIOLIB irqchip
The GPIOLIB irqchip is a helper irqchip for "simple cases" that should
try to cover any generic kind of irqchip cascaded from a GPIO.
+- Convert all the GPIOLIB_IRQCHIP users to pass an irqchip template,
+ parent and flags before calling [devm_]gpiochip_add[_data]().
+ Currently we set up the irqchip after setting up the gpiochip
+ using gpiochip_irqchip_add() and gpiochip_set_[chained|nested]_irqchip().
+ This is too complex, so convert all users over to just set up
+ the irqchip before registering the gpio_chip, typical example:
+
+ /* Typical state container with dynamic irqchip */
+ struct my_gpio {
+ struct gpio_chip gc;
+ struct irq_chip irq;
+ };
+
+ int irq; /* from platform etc */
+ struct my_gpio *g;
+ struct gpio_irq_chip *girq
+
+ /* Set up the irqchip dynamically */
+ g->irq.name = "my_gpio_irq";
+ g->irq.irq_ack = my_gpio_ack_irq;
+ g->irq.irq_mask = my_gpio_mask_irq;
+ g->irq.irq_unmask = my_gpio_unmask_irq;
+ g->irq.irq_set_type = my_gpio_set_irq_type;
+
+ /* Get a pointer to the gpio_irq_chip */
+ girq = &g->gc.irq;
+ girq->chip = &g->irq;
+ girq->parent_handler = ftgpio_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->parents[0] = irq;
+
+ When this is done, we will delete the old APIs for instatiating
+ GPIOLIB_IRQCHIP and simplify the code.
+
- Look over and identify any remaining easily converted drivers and
dry-code conversions to gpiolib irqchip for maintainers to test
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index e088b908c2c1..9f2e6b04c361 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -30,6 +30,7 @@ struct altera_gpio_chip {
raw_spinlock_t gpio_lock;
int interrupt_trigger;
int mapped_irq;
+ struct irq_chip irq_chip;
};
static void altera_gpio_irq_unmask(struct irq_data *d)
@@ -101,15 +102,6 @@ static unsigned int altera_gpio_irq_startup(struct irq_data *d)
return 0;
}
-static struct irq_chip altera_irq_chip = {
- .name = "altera-gpio",
- .irq_mask = altera_gpio_irq_mask,
- .irq_unmask = altera_gpio_irq_unmask,
- .irq_set_type = altera_gpio_irq_set_type,
- .irq_startup = altera_gpio_irq_startup,
- .irq_shutdown = altera_gpio_irq_mask,
-};
-
static int altera_gpio_get(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm_gc;
@@ -246,6 +238,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
int reg, ret;
struct altera_gpio_chip *altera_gc;
+ struct gpio_irq_chip *girq;
altera_gc = devm_kzalloc(&pdev->dev, sizeof(*altera_gc), GFP_KERNEL);
if (!altera_gc)
@@ -273,50 +266,50 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->mmchip.gc.owner = THIS_MODULE;
altera_gc->mmchip.gc.parent = &pdev->dev;
- ret = of_mm_gpiochip_add_data(node, &altera_gc->mmchip, altera_gc);
- if (ret) {
- dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
- return ret;
- }
-
- platform_set_drvdata(pdev, altera_gc);
-
altera_gc->mapped_irq = platform_get_irq(pdev, 0);
if (altera_gc->mapped_irq < 0)
goto skip_irq;
if (of_property_read_u32(node, "altr,interrupt-type", &reg)) {
- ret = -EINVAL;
dev_err(&pdev->dev,
"altr,interrupt-type value not set in device tree\n");
- goto teardown;
+ return -EINVAL;
}
altera_gc->interrupt_trigger = reg;
- ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
- handle_bad_irq, IRQ_TYPE_NONE);
+ altera_gc->irq_chip.name = "altera-gpio";
+ altera_gc->irq_chip.irq_mask = altera_gpio_irq_mask;
+ altera_gc->irq_chip.irq_unmask = altera_gpio_irq_unmask;
+ altera_gc->irq_chip.irq_set_type = altera_gpio_irq_set_type;
+ altera_gc->irq_chip.irq_startup = altera_gpio_irq_startup;
+ altera_gc->irq_chip.irq_shutdown = altera_gpio_irq_mask;
+
+ girq = &altera_gc->mmchip.gc.irq;
+ girq->chip = &altera_gc->irq_chip;
+ if (altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
+ girq->parent_handler = altera_gpio_irq_leveL_high_handler;
+ else
+ girq->parent_handler = altera_gpio_irq_edge_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->parents[0] = altera_gc->mapped_irq;
+skip_irq:
+ ret = of_mm_gpiochip_add_data(node, &altera_gc->mmchip, altera_gc);
if (ret) {
- dev_err(&pdev->dev, "could not add irqchip\n");
- goto teardown;
+ dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
+ return ret;
}
- gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc,
- &altera_irq_chip,
- altera_gc->mapped_irq,
- altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH ?
- altera_gpio_irq_leveL_high_handler :
- altera_gpio_irq_edge_handler);
+ platform_set_drvdata(pdev, altera_gc);
-skip_irq:
return 0;
-teardown:
- of_mm_gpiochip_remove(&altera_gc->mmchip);
- pr_err("%pOF: registration failed with status %d\n",
- node, ret);
-
- return ret;
}
static int altera_gpio_remove(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c
index 38c3f4a3d4aa..181df1581df5 100644
--- a/drivers/gpio/gpio-amd-fch.c
+++ b/drivers/gpio/gpio-amd-fch.c
@@ -25,14 +25,13 @@
#define AMD_FCH_GPIO_FLAG_WRITE BIT(22)
#define AMD_FCH_GPIO_FLAG_READ BIT(16)
-static struct resource amd_fch_gpio_iores =
+static const struct resource amd_fch_gpio_iores =
DEFINE_RES_MEM_NAMED(
AMD_FCH_MMIO_BASE + AMD_FCH_GPIO_BANK0_BASE,
AMD_FCH_GPIO_SIZE,
"amd-fch-gpio-iomem");
struct amd_fch_gpio_priv {
- struct platform_device *pdev;
struct gpio_chip gc;
void __iomem *base;
struct amd_fch_gpio_pdata *pdata;
@@ -153,7 +152,6 @@ static int amd_fch_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
priv->pdata = pdata;
- priv->pdev = pdev;
priv->gc.owner = THIS_MODULE;
priv->gc.parent = &pdev->dev;
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index ad255ba7ece9..44398992ae15 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -88,7 +88,7 @@ static int pt_gpio_probe(struct platform_device *pdev)
pt_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pt_gpio->reg_base)) {
- dev_err(&pdev->dev, "Failed to map MMIO resource for PT GPIO.\n");
+ dev_err(dev, "Failed to map MMIO resource for PT GPIO.\n");
return PTR_ERR(pt_gpio->reg_base);
}
@@ -98,7 +98,7 @@ static int pt_gpio_probe(struct platform_device *pdev)
pt_gpio->reg_base + PT_DIRECTION_REG, NULL,
BGPIOF_READ_OUTPUT_REG_SET);
if (ret) {
- dev_err(&pdev->dev, "bgpio_init failed\n");
+ dev_err(dev, "bgpio_init failed\n");
return ret;
}
@@ -107,11 +107,11 @@ static int pt_gpio_probe(struct platform_device *pdev)
pt_gpio->gc.free = pt_gpio_free;
pt_gpio->gc.ngpio = PT_TOTAL_GPIO;
#if defined(CONFIG_OF_GPIO)
- pt_gpio->gc.of_node = pdev->dev.of_node;
+ pt_gpio->gc.of_node = dev->of_node;
#endif
ret = gpiochip_add_data(&pt_gpio->gc, pt_gpio);
if (ret) {
- dev_err(&pdev->dev, "Failed to register GPIO lib\n");
+ dev_err(dev, "Failed to register GPIO lib\n");
return ret;
}
@@ -121,7 +121,7 @@ static int pt_gpio_probe(struct platform_device *pdev)
writel(0, pt_gpio->reg_base + PT_SYNC_REG);
writel(0, pt_gpio->reg_base + PT_CLOCKRATE_REG);
- dev_dbg(&pdev->dev, "PT GPIO driver loaded\n");
+ dev_dbg(dev, "PT GPIO driver loaded\n");
return ret;
}
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 6c6dcda1100c..f1a5ea9b3de2 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -222,14 +222,16 @@ MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
static int ath79_gpio_probe(struct platform_device *pdev)
{
struct ath79_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct ath79_gpio_ctrl *ctrl;
+ struct gpio_irq_chip *girq;
struct resource *res;
u32 ath79_gpio_count;
bool oe_inverted;
int err;
- ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
platform_set_drvdata(pdev, ctrl);
@@ -237,7 +239,7 @@ static int ath79_gpio_probe(struct platform_device *pdev)
if (np) {
err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
if (err) {
- dev_err(&pdev->dev, "ngpios property is not valid\n");
+ dev_err(dev, "ngpios property is not valid\n");
return err;
}
oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio");
@@ -245,25 +247,24 @@ static int ath79_gpio_probe(struct platform_device *pdev)
ath79_gpio_count = pdata->ngpios;
oe_inverted = pdata->oe_inverted;
} else {
- dev_err(&pdev->dev, "No DT node or platform data found\n");
+ dev_err(dev, "No DT node or platform data found\n");
return -EINVAL;
}
if (ath79_gpio_count >= 32) {
- dev_err(&pdev->dev, "ngpios must be less than 32\n");
+ dev_err(dev, "ngpios must be less than 32\n");
return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
- ctrl->base = devm_ioremap_nocache(
- &pdev->dev, res->start, resource_size(res));
+ ctrl->base = devm_ioremap_nocache(dev, res->start, resource_size(res));
if (!ctrl->base)
return -ENOMEM;
raw_spin_lock_init(&ctrl->lock);
- err = bgpio_init(&ctrl->gc, &pdev->dev, 4,
+ err = bgpio_init(&ctrl->gc, dev, 4,
ctrl->base + AR71XX_GPIO_REG_IN,
ctrl->base + AR71XX_GPIO_REG_SET,
ctrl->base + AR71XX_GPIO_REG_CLEAR,
@@ -271,45 +272,33 @@ static int ath79_gpio_probe(struct platform_device *pdev)
oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL,
0);
if (err) {
- dev_err(&pdev->dev, "bgpio_init failed\n");
+ dev_err(dev, "bgpio_init failed\n");
return err;
}
/* Use base 0 to stay compatible with legacy platforms */
ctrl->gc.base = 0;
- err = gpiochip_add_data(&ctrl->gc, ctrl);
- if (err) {
- dev_err(&pdev->dev,
- "cannot add AR71xx GPIO chip, error=%d", err);
- return err;
+ /* Optional interrupt setup */
+ if (!np || of_property_read_bool(np, "interrupt-controller")) {
+ girq = &ctrl->gc.irq;
+ girq->chip = &ath79_gpio_irqchip;
+ girq->parent_handler = ath79_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = platform_get_irq(pdev, 0);
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
}
- if (np && !of_property_read_bool(np, "interrupt-controller"))
- return 0;
-
- err = gpiochip_irqchip_add(&ctrl->gc, &ath79_gpio_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ err = devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
if (err) {
- dev_err(&pdev->dev, "failed to add gpiochip_irqchip\n");
- goto gpiochip_remove;
+ dev_err(dev,
+ "cannot add AR71xx GPIO chip, error=%d", err);
+ return err;
}
-
- gpiochip_set_chained_irqchip(&ctrl->gc, &ath79_gpio_irqchip,
- platform_get_irq(pdev, 0),
- ath79_gpio_irq_handler);
-
- return 0;
-
-gpiochip_remove:
- gpiochip_remove(&ctrl->gc);
- return err;
-}
-
-static int ath79_gpio_remove(struct platform_device *pdev)
-{
- struct ath79_gpio_ctrl *ctrl = platform_get_drvdata(pdev);
-
- gpiochip_remove(&ctrl->gc);
return 0;
}
@@ -319,7 +308,6 @@ static struct platform_driver ath79_gpio_driver = {
.of_match_table = ath79_gpio_of_match,
},
.probe = ath79_gpio_probe,
- .remove = ath79_gpio_remove,
};
module_platform_driver(ath79_gpio_driver);
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 3bbf5804bd11..fc494a84a29d 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -297,7 +297,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
static void gpio_irq_disable(struct irq_data *d)
{
struct davinci_gpio_regs __iomem *g = irq2regs(d);
- u32 mask = (u32) irq_data_get_irq_handler_data(d);
+ uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
writel_relaxed(mask, &g->clr_falling);
writel_relaxed(mask, &g->clr_rising);
@@ -306,7 +306,7 @@ static void gpio_irq_disable(struct irq_data *d)
static void gpio_irq_enable(struct irq_data *d)
{
struct davinci_gpio_regs __iomem *g = irq2regs(d);
- u32 mask = (u32) irq_data_get_irq_handler_data(d);
+ uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
unsigned status = irqd_get_trigger_type(d);
status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
@@ -447,7 +447,7 @@ davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq,
"davinci_gpio");
irq_set_irq_type(irq, IRQ_TYPE_NONE);
irq_set_chip_data(irq, (__force void *)g);
- irq_set_handler_data(irq, (void *)__gpio_mask(hw));
+ irq_set_handler_data(irq, (void *)(uintptr_t)__gpio_mask(hw));
return 0;
}
@@ -632,6 +632,7 @@ done:
static const struct of_device_id davinci_gpio_ids[] = {
{ .compatible = "ti,keystone-gpio", keystone_gpio_get_irq_chip},
+ { .compatible = "ti,am654-gpio", keystone_gpio_get_irq_chip},
{ .compatible = "ti,dm6441-gpio", davinci_gpio_get_irq_chip},
{ /* sentinel */ },
};
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 77092268ee95..7b9ac4a12c20 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -568,7 +568,6 @@ static int sprd_eic_probe(struct platform_device *pdev)
const struct sprd_eic_variant_data *pdata;
struct gpio_irq_chip *irq;
struct sprd_eic *sprd_eic;
- struct resource *res;
int ret, i;
pdata = of_device_get_match_data(&pdev->dev);
@@ -597,13 +596,9 @@ static int sprd_eic_probe(struct platform_device *pdev)
* have one bank EIC, thus base[1] and base[2] can be
* optional.
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res)
- continue;
-
- sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res);
+ sprd_eic->base[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(sprd_eic->base[i]))
- return PTR_ERR(sprd_eic->base[i]);
+ continue;
}
sprd_eic->chip.label = sprd_eic_label_name[sprd_eic->type];
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 84a7375cee0a..b6af705a4e5f 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -270,10 +270,8 @@ static int em_gio_probe(struct platform_device *pdev)
int ret;
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
- if (!p) {
- ret = -ENOMEM;
- goto err0;
- }
+ if (!p)
+ return -ENOMEM;
p->pdev = pdev;
platform_set_drvdata(pdev, p);
@@ -286,30 +284,22 @@ static int em_gio_probe(struct platform_device *pdev)
if (!io[0] || !io[1] || !irq[0] || !irq[1]) {
dev_err(&pdev->dev, "missing IRQ or IOMEM\n");
- ret = -EINVAL;
- goto err0;
+ return -EINVAL;
}
p->base0 = devm_ioremap_nocache(&pdev->dev, io[0]->start,
resource_size(io[0]));
- if (!p->base0) {
- dev_err(&pdev->dev, "failed to remap low I/O memory\n");
- ret = -ENXIO;
- goto err0;
- }
+ if (!p->base0)
+ return -ENOMEM;
p->base1 = devm_ioremap_nocache(&pdev->dev, io[1]->start,
resource_size(io[1]));
- if (!p->base1) {
- dev_err(&pdev->dev, "failed to remap high I/O memory\n");
- ret = -ENXIO;
- goto err0;
- }
+ if (!p->base1)
+ return -ENOMEM;
if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
dev_err(&pdev->dev, "Missing ngpios OF property\n");
- ret = -EINVAL;
- goto err0;
+ return -EINVAL;
}
gpio_chip = &p->gpio_chip;
@@ -339,9 +329,8 @@ static int em_gio_probe(struct platform_device *pdev)
p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, ngpios, 0,
&em_gio_irq_domain_ops, p);
if (!p->irq_domain) {
- ret = -ENXIO;
dev_err(&pdev->dev, "cannot initialize irq domain\n");
- goto err0;
+ return -ENXIO;
}
if (devm_request_irq(&pdev->dev, irq[0]->start,
@@ -358,7 +347,7 @@ static int em_gio_probe(struct platform_device *pdev)
goto err1;
}
- ret = gpiochip_add_data(gpio_chip, p);
+ ret = devm_gpiochip_add_data(&pdev->dev, gpio_chip, p);
if (ret) {
dev_err(&pdev->dev, "failed to add GPIO controller\n");
goto err1;
@@ -368,7 +357,6 @@ static int em_gio_probe(struct platform_device *pdev)
err1:
irq_domain_remove(p->irq_domain);
-err0:
return ret;
}
@@ -376,8 +364,6 @@ static int em_gio_remove(struct platform_device *pdev)
{
struct em_gio_priv *p = platform_get_drvdata(pdev);
- gpiochip_remove(&p->gpio_chip);
-
irq_domain_remove(p->irq_domain);
return 0;
}
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 71728d6e0bca..a90870a60c15 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -393,16 +393,13 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, struct device *dev,
static int ep93xx_gpio_probe(struct platform_device *pdev)
{
struct ep93xx_gpio *epg;
- struct resource *res;
int i;
- struct device *dev = &pdev->dev;
- epg = devm_kzalloc(dev, sizeof(*epg), GFP_KERNEL);
+ epg = devm_kzalloc(&pdev->dev, sizeof(*epg), GFP_KERNEL);
if (!epg)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- epg->base = devm_ioremap_resource(dev, res);
+ epg->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(epg->base))
return PTR_ERR(epg->base);
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 8ff8ce2970d9..250e71f3e688 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -226,6 +226,7 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ftgpio_gpio *g;
+ struct gpio_irq_chip *girq;
int irq;
int ret;
@@ -277,6 +278,24 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
if (!IS_ERR(g->clk))
g->gc.set_config = ftgpio_gpio_set_config;
+ g->irq.name = "FTGPIO010";
+ g->irq.irq_ack = ftgpio_gpio_ack_irq;
+ g->irq.irq_mask = ftgpio_gpio_mask_irq;
+ g->irq.irq_unmask = ftgpio_gpio_unmask_irq;
+ g->irq.irq_set_type = ftgpio_gpio_set_irq_type;
+
+ girq = &g->gc.irq;
+ girq->chip = &g->irq;
+ girq->parent_handler = ftgpio_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->parents[0] = irq;
+
ret = devm_gpiochip_add_data(dev, &g->gc, g);
if (ret)
goto dis_clk;
@@ -289,22 +308,6 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
/* Clear any use of debounce */
writel(0x0, g->base + GPIO_DEBOUNCE_EN);
- g->irq.name = "FTGPIO010";
- g->irq.irq_ack = ftgpio_gpio_ack_irq;
- g->irq.irq_mask = ftgpio_gpio_mask_irq;
- g->irq.irq_unmask = ftgpio_gpio_unmask_irq;
- g->irq.irq_set_type = ftgpio_gpio_set_irq_type;
-
- ret = gpiochip_irqchip_add(&g->gc, &g->irq,
- 0, handle_bad_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_info(dev, "could not add irqchip\n");
- goto dis_clk;
- }
- gpiochip_set_chained_irqchip(&g->gc, &g->irq,
- irq, ftgpio_gpio_irq_handler);
-
platform_set_drvdata(pdev, g);
dev_info(dev, "FTGPIO010 @%p registered\n", g->base);
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 7df48e76baea..0937b605e134 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -329,7 +329,6 @@ static int grgpio_probe(struct platform_device *ofdev)
void __iomem *regs;
struct gpio_chip *gc;
struct grgpio_priv *priv;
- struct resource *res;
int err;
u32 prop;
s32 *irqmap;
@@ -340,8 +339,7 @@ static int grgpio_probe(struct platform_device *ofdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&ofdev->dev, res);
+ regs = devm_platform_ioremap_resource(ofdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index 4b1cf7ea858d..670c2a85a35b 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -205,20 +205,20 @@ static int ixp4xx_gpio_irq_domain_translate(struct irq_domain *domain,
unsigned long *hwirq,
unsigned int *type)
{
+ int ret;
/* We support standard DT translation */
if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
- *hwirq = fwspec->param[0];
- *type = fwspec->param[1];
- return 0;
+ return irq_domain_translate_twocell(domain, fwspec,
+ hwirq, type);
}
/* This goes away when we transition to DT */
if (is_fwnode_irqchip(fwspec->fwnode)) {
- if (fwspec->param_count != 2)
- return -EINVAL;
- *hwirq = fwspec->param[0];
- *type = fwspec->param[1];
+ ret = irq_domain_translate_twocell(domain, fwspec,
+ hwirq, type);
+ if (ret)
+ return ret;
WARN_ON(*type == IRQ_TYPE_NONE);
return 0;
}
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 6b5b5a8b9173..cdf50e4ea165 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -140,18 +140,17 @@ static void ttl_setup_device(struct ttl_module *mod)
static int ttl_probe(struct platform_device *pdev)
{
struct janz_platform_data *pdata;
- struct device *dev = &pdev->dev;
struct ttl_module *mod;
struct gpio_chip *gpio;
int ret;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
- dev_err(dev, "no platform data\n");
+ dev_err(&pdev->dev, "no platform data\n");
return -ENXIO;
}
- mod = devm_kzalloc(dev, sizeof(*mod), GFP_KERNEL);
+ mod = devm_kzalloc(&pdev->dev, sizeof(*mod), GFP_KERNEL);
if (!mod)
return -ENOMEM;
@@ -177,9 +176,9 @@ static int ttl_probe(struct platform_device *pdev)
gpio->base = -1;
gpio->ngpio = 20;
- ret = devm_gpiochip_add_data(dev, gpio, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, gpio, NULL);
if (ret) {
- dev_err(dev, "unable to add GPIO chip\n");
+ dev_err(&pdev->dev, "unable to add GPIO chip\n");
return ret;
}
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index c9dad0543672..4dbc837d1215 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -1,12 +1,8 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* GPIO support for Cirrus Logic Madera codecs
*
* Copyright (C) 2015-2018 Cirrus Logic
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by the
- * Free Software Foundation; version 2.
*/
#include <linux/gpio/driver.h>
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 5e4102e7b1f9..5fb0bcf31142 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -649,12 +649,12 @@ static int max732x_probe(struct i2c_client *client,
case 0x60:
chip->client_group_a = client;
if (nr_port > 8) {
- c = i2c_new_dummy(client->adapter, addr_b);
- if (!c) {
+ c = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter, addr_b);
+ if (IS_ERR(c)) {
dev_err(&client->dev,
"Failed to allocate I2C device\n");
- ret = -ENODEV;
- goto out_failed;
+ return PTR_ERR(c);
}
chip->client_group_b = chip->client_dummy = c;
}
@@ -662,12 +662,12 @@ static int max732x_probe(struct i2c_client *client,
case 0x50:
chip->client_group_b = client;
if (nr_port > 8) {
- c = i2c_new_dummy(client->adapter, addr_a);
- if (!c) {
+ c = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter, addr_a);
+ if (IS_ERR(c)) {
dev_err(&client->dev,
"Failed to allocate I2C device\n");
- ret = -ENODEV;
- goto out_failed;
+ return PTR_ERR(c);
}
chip->client_group_a = chip->client_dummy = c;
}
@@ -675,37 +675,33 @@ static int max732x_probe(struct i2c_client *client,
default:
dev_err(&client->dev, "invalid I2C address specified %02x\n",
client->addr);
- ret = -EINVAL;
- goto out_failed;
+ return -EINVAL;
}
if (nr_port > 8 && !chip->client_dummy) {
dev_err(&client->dev,
"Failed to allocate second group I2C device\n");
- ret = -ENODEV;
- goto out_failed;
+ return -ENODEV;
}
mutex_init(&chip->lock);
ret = max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
if (ret)
- goto out_failed;
+ return ret;
if (nr_port > 8) {
ret = max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
if (ret)
- goto out_failed;
+ return ret;
}
- ret = gpiochip_add_data(&chip->gpio_chip, chip);
+ ret = devm_gpiochip_add_data(&client->dev, &chip->gpio_chip, chip);
if (ret)
- goto out_failed;
+ return ret;
ret = max732x_irq_setup(chip, id);
- if (ret) {
- gpiochip_remove(&chip->gpio_chip);
- goto out_failed;
- }
+ if (ret)
+ return ret;
if (pdata && pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
@@ -716,10 +712,6 @@ static int max732x_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
return 0;
-
-out_failed:
- i2c_unregister_device(chip->client_dummy);
- return ret;
}
static int max732x_remove(struct i2c_client *client)
@@ -739,11 +731,6 @@ static int max732x_remove(struct i2c_client *client)
}
}
- gpiochip_remove(&chip->gpio_chip);
-
- /* unregister any dummy i2c_client */
- i2c_unregister_device(chip->client_dummy);
-
return 0;
}
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 9bfff171f9fe..8f466993cd24 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -6,6 +6,7 @@
* Copyright (C) 2015 Linaro Ltd.
*/
+#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/clk.h>
@@ -19,6 +20,8 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include "gpiolib.h"
+
/*
* Only first 8bits of a register correspond to each pin,
* so there are 4 registers for 32 pins.
@@ -135,6 +138,20 @@ static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
spin_unlock_irqrestore(&gchip->lock, flags);
}
+static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ int irq, index;
+
+ for (index = 0;; index++) {
+ irq = platform_get_irq(to_platform_device(gc->parent), index);
+ if (irq <= 0)
+ break;
+ if (irq_get_irq_data(irq)->hwirq == offset)
+ return irq;
+ }
+ return -EINVAL;
+}
+
static int mb86s70_gpio_probe(struct platform_device *pdev)
{
struct mb86s70_gpio_chip *gchip;
@@ -150,13 +167,15 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gchip->base))
return PTR_ERR(gchip->base);
- gchip->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(gchip->clk))
- return PTR_ERR(gchip->clk);
+ if (!has_acpi_companion(&pdev->dev)) {
+ gchip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(gchip->clk))
+ return PTR_ERR(gchip->clk);
- ret = clk_prepare_enable(gchip->clk);
- if (ret)
- return ret;
+ ret = clk_prepare_enable(gchip->clk);
+ if (ret)
+ return ret;
+ }
spin_lock_init(&gchip->lock);
@@ -172,19 +191,28 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.parent = &pdev->dev;
gchip->gc.base = -1;
+ if (has_acpi_companion(&pdev->dev))
+ gchip->gc.to_irq = mb86s70_gpio_to_irq;
+
ret = gpiochip_add_data(&gchip->gc, gchip);
if (ret) {
dev_err(&pdev->dev, "couldn't register gpio driver\n");
clk_disable_unprepare(gchip->clk);
+ return ret;
}
- return ret;
+ if (has_acpi_companion(&pdev->dev))
+ acpi_gpiochip_request_interrupts(&gchip->gc);
+
+ return 0;
}
static int mb86s70_gpio_remove(struct platform_device *pdev)
{
struct mb86s70_gpio_chip *gchip = platform_get_drvdata(pdev);
+ if (has_acpi_companion(&pdev->dev))
+ acpi_gpiochip_free_interrupts(&gchip->gc);
gpiochip_remove(&gchip->gc);
clk_disable_unprepare(gchip->clk);
@@ -197,10 +225,19 @@ static const struct of_device_id mb86s70_gpio_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mb86s70_gpio_dt_ids);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id mb86s70_gpio_acpi_ids[] = {
+ { "SCX0007" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, mb86s70_gpio_acpi_ids);
+#endif
+
static struct platform_driver mb86s70_gpio_driver = {
.driver = {
.name = "mb86s70-gpio",
.of_match_table = mb86s70_gpio_dt_ids,
+ .acpi_match_table = ACPI_PTR(mb86s70_gpio_acpi_ids),
},
.probe = mb86s70_gpio_probe,
.remove = mb86s70_gpio_remove,
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index b6a4efce7c92..f1a9c0544e3f 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -315,7 +315,6 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
struct gpio_mockup_chip *chip)
{
struct gpio_mockup_dbgfs_private *priv;
- struct dentry *evfile;
struct gpio_chip *gc;
const char *devname;
char *name;
@@ -325,32 +324,25 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
devname = dev_name(&gc->gpiodev->dev);
chip->dbg_dir = debugfs_create_dir(devname, gpio_mockup_dbg_dir);
- if (IS_ERR_OR_NULL(chip->dbg_dir))
- goto err;
for (i = 0; i < gc->ngpio; i++) {
name = devm_kasprintf(dev, GFP_KERNEL, "%d", i);
if (!name)
- goto err;
+ return;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
- goto err;
+ return;
priv->chip = chip;
priv->offset = i;
priv->desc = &gc->gpiodev->descs[i];
- evfile = debugfs_create_file(name, 0200, chip->dbg_dir, priv,
- &gpio_mockup_debugfs_ops);
- if (IS_ERR_OR_NULL(evfile))
- goto err;
+ debugfs_create_file(name, 0200, chip->dbg_dir, priv,
+ &gpio_mockup_debugfs_ops);
}
return;
-
-err:
- dev_err(dev, "error creating debugfs files\n");
}
static int gpio_mockup_name_lines(struct device *dev,
@@ -447,8 +439,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
if (rv)
return rv;
- if (!IS_ERR_OR_NULL(gpio_mockup_dbg_dir))
- gpio_mockup_debugfs_setup(dev, chip);
+ gpio_mockup_debugfs_setup(dev, chip);
return 0;
}
@@ -501,8 +492,6 @@ static int __init gpio_mockup_init(void)
}
gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup", NULL);
- if (IS_ERR_OR_NULL(gpio_mockup_dbg_dir))
- gpio_mockup_err("error creating debugfs directory\n");
err = platform_driver_register(&gpio_mockup_driver);
if (err) {
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 059094ac44cb..869d47f89599 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -38,6 +38,7 @@
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -618,18 +619,14 @@ static int mvebu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
ret = -EBUSY;
} else {
desc = gpiochip_request_own_desc(&mvchip->chip,
- pwm->hwpwm, "mvebu-pwm", 0);
+ pwm->hwpwm, "mvebu-pwm",
+ GPIO_ACTIVE_HIGH,
+ GPIOD_OUT_LOW);
if (IS_ERR(desc)) {
ret = PTR_ERR(desc);
goto out;
}
- ret = gpiod_direction_output(desc, 0);
- if (ret) {
- gpiochip_free_own_desc(desc);
- goto out;
- }
-
mvpwm->gpiod = desc;
}
out:
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 9276ef616430..d0f27084a942 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -44,8 +44,9 @@ struct gpio_regs {
};
struct gpio_bank {
- struct list_head node;
void __iomem *base;
+ const struct omap_gpio_reg_offs *regs;
+
int irq;
u32 non_wakeup_gpios;
u32 enabled_non_wakeup_gpios;
@@ -72,11 +73,7 @@ struct gpio_bank {
int context_loss_count;
void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
- void (*set_dataout_multiple)(struct gpio_bank *bank,
- unsigned long *mask, unsigned long *bits);
int (*get_context_loss_count)(struct device *dev);
-
- struct omap_gpio_reg_offs *regs;
};
#define GPIO_MOD_CTRL_BIT BIT(0)
@@ -92,20 +89,25 @@ static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d)
return gpiochip_get_data(chip);
}
-static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio,
- int is_input)
+static inline u32 omap_gpio_rmw(void __iomem *reg, u32 mask, bool set)
{
- void __iomem *reg = bank->base;
- u32 l;
+ u32 val = readl_relaxed(reg);
- reg += bank->regs->direction;
- l = readl_relaxed(reg);
- if (is_input)
- l |= BIT(gpio);
+ if (set)
+ val |= mask;
else
- l &= ~(BIT(gpio));
- writel_relaxed(l, reg);
- bank->context.oe = l;
+ val &= ~mask;
+
+ writel_relaxed(val, reg);
+
+ return val;
+}
+
+static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio,
+ int is_input)
+{
+ bank->context.oe = omap_gpio_rmw(bank->base + bank->regs->direction,
+ BIT(gpio), is_input);
}
@@ -131,88 +133,8 @@ static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset,
static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset,
int enable)
{
- void __iomem *reg = bank->base + bank->regs->dataout;
- u32 gpio_bit = BIT(offset);
- u32 l;
-
- l = readl_relaxed(reg);
- if (enable)
- l |= gpio_bit;
- else
- l &= ~gpio_bit;
- writel_relaxed(l, reg);
- bank->context.dataout = l;
-}
-
-static int omap_get_gpio_datain(struct gpio_bank *bank, int offset)
-{
- void __iomem *reg = bank->base + bank->regs->datain;
-
- return (readl_relaxed(reg) & (BIT(offset))) != 0;
-}
-
-static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset)
-{
- void __iomem *reg = bank->base + bank->regs->dataout;
-
- return (readl_relaxed(reg) & (BIT(offset))) != 0;
-}
-
-/* set multiple data out values using dedicate set/clear register */
-static void omap_set_gpio_dataout_reg_multiple(struct gpio_bank *bank,
- unsigned long *mask,
- unsigned long *bits)
-{
- void __iomem *reg = bank->base;
- u32 l;
-
- l = *bits & *mask;
- writel_relaxed(l, reg + bank->regs->set_dataout);
- bank->context.dataout |= l;
-
- l = ~*bits & *mask;
- writel_relaxed(l, reg + bank->regs->clr_dataout);
- bank->context.dataout &= ~l;
-}
-
-/* set multiple data out values using mask register */
-static void omap_set_gpio_dataout_mask_multiple(struct gpio_bank *bank,
- unsigned long *mask,
- unsigned long *bits)
-{
- void __iomem *reg = bank->base + bank->regs->dataout;
- u32 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask);
-
- writel_relaxed(l, reg);
- bank->context.dataout = l;
-}
-
-static unsigned long omap_get_gpio_datain_multiple(struct gpio_bank *bank,
- unsigned long *mask)
-{
- void __iomem *reg = bank->base + bank->regs->datain;
-
- return readl_relaxed(reg) & *mask;
-}
-
-static unsigned long omap_get_gpio_dataout_multiple(struct gpio_bank *bank,
- unsigned long *mask)
-{
- void __iomem *reg = bank->base + bank->regs->dataout;
-
- return readl_relaxed(reg) & *mask;
-}
-
-static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
-{
- int l = readl_relaxed(base + reg);
-
- if (set)
- l |= mask;
- else
- l &= ~mask;
-
- writel_relaxed(l, base + reg);
+ bank->context.dataout = omap_gpio_rmw(bank->base + bank->regs->dataout,
+ BIT(offset), enable);
}
static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
@@ -256,7 +178,6 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
unsigned debounce)
{
- void __iomem *reg;
u32 val;
u32 l;
bool enable = !!debounce;
@@ -273,19 +194,11 @@ static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
l = BIT(offset);
clk_enable(bank->dbck);
- reg = bank->base + bank->regs->debounce;
- writel_relaxed(debounce, reg);
+ writel_relaxed(debounce, bank->base + bank->regs->debounce);
- reg = bank->base + bank->regs->debounce_en;
- val = readl_relaxed(reg);
-
- if (enable)
- val |= l;
- else
- val &= ~l;
+ val = omap_gpio_rmw(bank->base + bank->regs->debounce_en, l, enable);
bank->dbck_enable_mask = val;
- writel_relaxed(val, reg);
clk_disable(bank->dbck);
/*
* Enable debounce clock per module.
@@ -360,9 +273,9 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
void __iomem *base = bank->base;
u32 gpio_bit = BIT(gpio);
- omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
+ omap_gpio_rmw(base + bank->regs->leveldetect0, gpio_bit,
trigger & IRQ_TYPE_LEVEL_LOW);
- omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
+ omap_gpio_rmw(base + bank->regs->leveldetect1, gpio_bit,
trigger & IRQ_TYPE_LEVEL_HIGH);
/*
@@ -370,9 +283,9 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
* to be woken from idle state. Set the appropriate edge detection
* in addition to the level detection.
*/
- omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
+ omap_gpio_rmw(base + bank->regs->risingdetect, gpio_bit,
trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH));
- omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
+ omap_gpio_rmw(base + bank->regs->fallingdetect, gpio_bit,
trigger & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW));
bank->context.leveldetect0 =
@@ -384,11 +297,8 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
bank->context.fallingdetect =
readl_relaxed(bank->base + bank->regs->fallingdetect);
- if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
- omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
- bank->context.wake_en =
- readl_relaxed(bank->base + bank->regs->wkup_en);
- }
+ bank->level_mask = bank->context.leveldetect0 |
+ bank->context.leveldetect1;
/* This part needs to be executed always for OMAP{34xx, 44xx} */
if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) {
@@ -403,44 +313,25 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
else
bank->enabled_non_wakeup_gpios &= ~gpio_bit;
}
-
- bank->level_mask =
- readl_relaxed(bank->base + bank->regs->leveldetect0) |
- readl_relaxed(bank->base + bank->regs->leveldetect1);
}
-#ifdef CONFIG_ARCH_OMAP1
/*
* This only applies to chips that can't do both rising and falling edge
* detection at once. For all other chips, this function is a noop.
*/
static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
{
- void __iomem *reg = bank->base;
- u32 l = 0;
-
- if (!bank->regs->irqctrl)
- return;
+ if (IS_ENABLED(CONFIG_ARCH_OMAP1) && bank->regs->irqctrl) {
+ void __iomem *reg = bank->base + bank->regs->irqctrl;
- reg += bank->regs->irqctrl;
-
- l = readl_relaxed(reg);
- if ((l >> gpio) & 1)
- l &= ~(BIT(gpio));
- else
- l |= BIT(gpio);
-
- writel_relaxed(l, reg);
+ writel_relaxed(readl_relaxed(reg) ^ BIT(gpio), reg);
+ }
}
-#else
-static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
-#endif
static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio,
unsigned trigger)
{
void __iomem *reg = bank->base;
- void __iomem *base = bank->base;
u32 l = 0;
if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
@@ -472,11 +363,6 @@ static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio,
l |= 2 << (gpio << 1);
if (trigger & IRQ_TYPE_EDGE_FALLING)
l |= BIT(gpio << 1);
-
- /* Enable wake-up during idle for dynamic tick */
- omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger);
- bank->context.wake_en =
- readl_relaxed(bank->base + bank->regs->wkup_en);
writel_relaxed(l, reg);
}
return 0;
@@ -505,17 +391,6 @@ static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset)
static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset)
{
- void __iomem *base = bank->base;
-
- if (bank->regs->wkup_en &&
- !LINE_USED(bank->mod_usage, offset) &&
- !LINE_USED(bank->irq_usage, offset)) {
- /* Disable wake-up during idle for dynamic tick */
- omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0);
- bank->context.wake_en =
- readl_relaxed(bank->base + bank->regs->wkup_en);
- }
-
if (bank->regs->ctrl && !BANK_USED(bank)) {
void __iomem *reg = bank->base + bank->regs->ctrl;
u32 ctrl;
@@ -626,57 +501,39 @@ static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank)
return l;
}
-static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+static inline void omap_set_gpio_irqenable(struct gpio_bank *bank,
+ unsigned offset, int enable)
{
void __iomem *reg = bank->base;
- u32 l;
+ u32 gpio_mask = BIT(offset);
- if (bank->regs->set_irqenable) {
- reg += bank->regs->set_irqenable;
- l = gpio_mask;
- bank->context.irqenable1 |= gpio_mask;
+ if (bank->regs->set_irqenable && bank->regs->clr_irqenable) {
+ if (enable) {
+ reg += bank->regs->set_irqenable;
+ bank->context.irqenable1 |= gpio_mask;
+ } else {
+ reg += bank->regs->clr_irqenable;
+ bank->context.irqenable1 &= ~gpio_mask;
+ }
+ writel_relaxed(gpio_mask, reg);
} else {
- reg += bank->regs->irqenable;
- l = readl_relaxed(reg);
- if (bank->regs->irqenable_inv)
- l &= ~gpio_mask;
- else
- l |= gpio_mask;
- bank->context.irqenable1 = l;
+ bank->context.irqenable1 =
+ omap_gpio_rmw(reg + bank->regs->irqenable, gpio_mask,
+ enable ^ bank->regs->irqenable_inv);
}
- writel_relaxed(l, reg);
-}
-
-static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
-{
- void __iomem *reg = bank->base;
- u32 l;
-
- if (bank->regs->clr_irqenable) {
- reg += bank->regs->clr_irqenable;
- l = gpio_mask;
- bank->context.irqenable1 &= ~gpio_mask;
- } else {
- reg += bank->regs->irqenable;
- l = readl_relaxed(reg);
- if (bank->regs->irqenable_inv)
- l |= gpio_mask;
- else
- l &= ~gpio_mask;
- bank->context.irqenable1 = l;
+ /*
+ * Program GPIO wakeup along with IRQ enable to satisfy OMAP4430 TRM
+ * note requiring correlation between the IRQ enable registers and
+ * the wakeup registers. In any case, we want wakeup from idle
+ * enabled for the GPIOs which support this feature.
+ */
+ if (bank->regs->wkup_en &&
+ (bank->regs->edgectrl1 || !(bank->non_wakeup_gpios & gpio_mask))) {
+ bank->context.wake_en =
+ omap_gpio_rmw(bank->base + bank->regs->wkup_en,
+ gpio_mask, enable);
}
-
- writel_relaxed(l, reg);
-}
-
-static inline void omap_set_gpio_irqenable(struct gpio_bank *bank,
- unsigned offset, int enable)
-{
- if (enable)
- omap_enable_gpio_irqbank(bank, BIT(offset));
- else
- omap_disable_gpio_irqbank(bank, BIT(offset));
}
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
@@ -687,38 +544,6 @@ static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
return irq_set_irq_wake(bank->irq, enable);
}
-static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- struct gpio_bank *bank = gpiochip_get_data(chip);
- unsigned long flags;
-
- pm_runtime_get_sync(chip->parent);
-
- raw_spin_lock_irqsave(&bank->lock, flags);
- omap_enable_gpio_module(bank, offset);
- bank->mod_usage |= BIT(offset);
- raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
-}
-
-static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- struct gpio_bank *bank = gpiochip_get_data(chip);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&bank->lock, flags);
- bank->mod_usage &= ~(BIT(offset));
- if (!LINE_USED(bank->irq_usage, offset)) {
- omap_set_gpio_direction(bank, offset, 1);
- omap_clear_gpio_debounce(bank, offset);
- }
- omap_disable_gpio_module(bank, offset);
- raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- pm_runtime_put(chip->parent);
-}
-
/*
* We need to unmask the GPIO bank interrupt as soon as possible to
* avoid missing GPIO interrupts for other lines in the bank.
@@ -731,7 +556,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
{
void __iomem *isr_reg = NULL;
- u32 enabled, isr, level_mask;
+ u32 enabled, isr, edge;
unsigned int bit;
struct gpio_bank *bank = gpiobank;
unsigned long wa_lock_flags;
@@ -751,16 +576,14 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
enabled = omap_get_gpio_irqbank_mask(bank);
isr = readl_relaxed(isr_reg) & enabled;
- if (bank->level_mask)
- level_mask = bank->level_mask & enabled;
- else
- level_mask = 0;
-
- /* clear edge sensitive interrupts before handler(s) are
- called so that we don't miss any interrupt occurred while
- executing them */
- if (isr & ~level_mask)
- omap_clear_gpio_irqbank(bank, isr & ~level_mask);
+ /*
+ * Clear edge sensitive interrupts before calling handler(s)
+ * so subsequent edge transitions are not missed while the
+ * handlers are running.
+ */
+ edge = isr & ~bank->level_mask;
+ if (edge)
+ omap_clear_gpio_irqbank(bank, edge);
raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
@@ -807,8 +630,6 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d)
if (!LINE_USED(bank->mod_usage, offset))
omap_set_gpio_direction(bank, offset, 1);
- else if (!omap_gpio_is_input(bank, offset))
- goto err;
omap_enable_gpio_module(bank, offset);
bank->irq_usage |= BIT(offset);
@@ -816,9 +637,6 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d)
omap_gpio_unmask_irq(d);
return 0;
-err:
- raw_spin_unlock_irqrestore(&bank->lock, flags);
- return -EINVAL;
}
static void omap_gpio_irq_shutdown(struct irq_data *d)
@@ -829,9 +647,9 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
raw_spin_lock_irqsave(&bank->lock, flags);
bank->irq_usage &= ~(BIT(offset));
- omap_set_gpio_irqenable(bank, offset, 0);
- omap_clear_gpio_irqstatus(bank, offset);
omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+ omap_clear_gpio_irqstatus(bank, offset);
+ omap_set_gpio_irqenable(bank, offset, 0);
if (!LINE_USED(bank->mod_usage, offset))
omap_clear_gpio_debounce(bank, offset);
omap_disable_gpio_module(bank, offset);
@@ -852,14 +670,6 @@ static void gpio_irq_bus_sync_unlock(struct irq_data *data)
pm_runtime_put(bank->chip.parent);
}
-static void omap_gpio_ack_irq(struct irq_data *d)
-{
- struct gpio_bank *bank = omap_irq_data_get_bank(d);
- unsigned offset = d->hwirq;
-
- omap_clear_gpio_irqstatus(bank, offset);
-}
-
static void omap_gpio_mask_irq(struct irq_data *d)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -867,8 +677,8 @@ static void omap_gpio_mask_irq(struct irq_data *d)
unsigned long flags;
raw_spin_lock_irqsave(&bank->lock, flags);
- omap_set_gpio_irqenable(bank, offset, 0);
omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+ omap_set_gpio_irqenable(bank, offset, 0);
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -880,9 +690,6 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
unsigned long flags;
raw_spin_lock_irqsave(&bank->lock, flags);
- if (trigger)
- omap_set_gpio_triggering(bank, offset, trigger);
-
omap_set_gpio_irqenable(bank, offset, 1);
/*
@@ -890,9 +697,13 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
* is cleared, thus after the handler has run. OMAP4 needs this done
* after enabing the interrupt to clear the wakeup status.
*/
- if (bank->level_mask & BIT(offset))
+ if (bank->regs->leveldetect0 && bank->regs->wkup_en &&
+ trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
omap_clear_gpio_irqstatus(bank, offset);
+ if (trigger)
+ omap_set_gpio_triggering(bank, offset, trigger);
+
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -958,19 +769,44 @@ static inline void omap_mpuio_init(struct gpio_bank *bank)
/*---------------------------------------------------------------------*/
-static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
{
- struct gpio_bank *bank;
+ struct gpio_bank *bank = gpiochip_get_data(chip);
+ unsigned long flags;
+
+ pm_runtime_get_sync(chip->parent);
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ omap_enable_gpio_module(bank, offset);
+ bank->mod_usage |= BIT(offset);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank = gpiochip_get_data(chip);
unsigned long flags;
- void __iomem *reg;
- int dir;
- bank = gpiochip_get_data(chip);
- reg = bank->base + bank->regs->direction;
raw_spin_lock_irqsave(&bank->lock, flags);
- dir = !!(readl_relaxed(reg) & BIT(offset));
+ bank->mod_usage &= ~(BIT(offset));
+ if (!LINE_USED(bank->irq_usage, offset)) {
+ omap_set_gpio_direction(bank, offset, 1);
+ omap_clear_gpio_debounce(bank, offset);
+ }
+ omap_disable_gpio_module(bank, offset);
raw_spin_unlock_irqrestore(&bank->lock, flags);
- return dir;
+
+ pm_runtime_put(chip->parent);
+}
+
+static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank = gpiochip_get_data(chip);
+
+ return !!(readl_relaxed(bank->base + bank->regs->direction) &
+ BIT(offset));
}
static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
@@ -987,14 +823,15 @@ static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
static int omap_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- struct gpio_bank *bank;
-
- bank = gpiochip_get_data(chip);
+ struct gpio_bank *bank = gpiochip_get_data(chip);
+ void __iomem *reg;
if (omap_gpio_is_input(bank, offset))
- return omap_get_gpio_datain(bank, offset);
+ reg = bank->base + bank->regs->datain;
else
- return omap_get_gpio_dataout(bank, offset);
+ reg = bank->base + bank->regs->dataout;
+
+ return (readl_relaxed(reg) & BIT(offset)) != 0;
}
static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
@@ -1014,18 +851,20 @@ static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct gpio_bank *bank = gpiochip_get_data(chip);
- void __iomem *reg = bank->base + bank->regs->direction;
- unsigned long in = readl_relaxed(reg), l;
+ void __iomem *base = bank->base;
+ u32 direction, m, val = 0;
- *bits = 0;
+ direction = readl_relaxed(base + bank->regs->direction);
- l = in & *mask;
- if (l)
- *bits |= omap_get_gpio_datain_multiple(bank, &l);
+ m = direction & *mask;
+ if (m)
+ val |= readl_relaxed(base + bank->regs->datain) & m;
- l = ~in & *mask;
- if (l)
- *bits |= omap_get_gpio_dataout_multiple(bank, &l);
+ m = ~direction & *mask;
+ if (m)
+ val |= readl_relaxed(base + bank->regs->dataout) & m;
+
+ *bits = val;
return 0;
}
@@ -1078,10 +917,14 @@ static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
struct gpio_bank *bank = gpiochip_get_data(chip);
+ void __iomem *reg = bank->base + bank->regs->dataout;
unsigned long flags;
+ u32 l;
raw_spin_lock_irqsave(&bank->lock, flags);
- bank->set_dataout_multiple(bank, mask, bits);
+ l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask);
+ writel_relaxed(l, reg);
+ bank->context.dataout = l;
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -1115,9 +958,9 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
return;
}
- omap_gpio_rmw(base, bank->regs->irqenable, l,
+ omap_gpio_rmw(base + bank->regs->irqenable, l,
bank->regs->irqenable_inv);
- omap_gpio_rmw(base, bank->regs->irqstatus, l,
+ omap_gpio_rmw(base + bank->regs->irqstatus, l,
!bank->regs->irqenable_inv);
if (bank->regs->debounce_en)
writel_relaxed(0, base + bank->regs->debounce_en);
@@ -1180,11 +1023,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
#endif
/* MPUIO is a bit different, reading IRQ status clears it */
- if (bank->is_mpuio) {
- irqc->irq_ack = dummy_irq_chip.irq_ack;
- if (!bank->regs->wkup_en)
- irqc->irq_set_wake = NULL;
- }
+ if (bank->is_mpuio && !bank->regs->wkup_en)
+ irqc->irq_set_wake = NULL;
irq = &bank->chip.irq;
irq->chip = irqc;
@@ -1215,7 +1055,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
static void omap_gpio_init_context(struct gpio_bank *p)
{
- struct omap_gpio_reg_offs *regs = p->regs;
+ const struct omap_gpio_reg_offs *regs = p->regs;
void __iomem *base = p->base;
p->context.ctrl = readl_relaxed(base + regs->ctrl);
@@ -1227,60 +1067,56 @@ static void omap_gpio_init_context(struct gpio_bank *p)
p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
p->context.irqenable1 = readl_relaxed(base + regs->irqenable);
p->context.irqenable2 = readl_relaxed(base + regs->irqenable2);
-
- if (regs->set_dataout && p->regs->clr_dataout)
- p->context.dataout = readl_relaxed(base + regs->set_dataout);
- else
- p->context.dataout = readl_relaxed(base + regs->dataout);
+ p->context.dataout = readl_relaxed(base + regs->dataout);
p->context_valid = true;
}
static void omap_gpio_restore_context(struct gpio_bank *bank)
{
- writel_relaxed(bank->context.wake_en,
- bank->base + bank->regs->wkup_en);
- writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
- writel_relaxed(bank->context.leveldetect0,
- bank->base + bank->regs->leveldetect0);
- writel_relaxed(bank->context.leveldetect1,
- bank->base + bank->regs->leveldetect1);
- writel_relaxed(bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
- writel_relaxed(bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
- if (bank->regs->set_dataout && bank->regs->clr_dataout)
- writel_relaxed(bank->context.dataout,
- bank->base + bank->regs->set_dataout);
- else
- writel_relaxed(bank->context.dataout,
- bank->base + bank->regs->dataout);
- writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
+ const struct omap_gpio_reg_offs *regs = bank->regs;
+ void __iomem *base = bank->base;
+
+ writel_relaxed(bank->context.wake_en, base + regs->wkup_en);
+ writel_relaxed(bank->context.ctrl, base + regs->ctrl);
+ writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0);
+ writel_relaxed(bank->context.leveldetect1, base + regs->leveldetect1);
+ writel_relaxed(bank->context.risingdetect, base + regs->risingdetect);
+ writel_relaxed(bank->context.fallingdetect, base + regs->fallingdetect);
+ writel_relaxed(bank->context.dataout, base + regs->dataout);
+ writel_relaxed(bank->context.oe, base + regs->direction);
if (bank->dbck_enable_mask) {
- writel_relaxed(bank->context.debounce, bank->base +
- bank->regs->debounce);
+ writel_relaxed(bank->context.debounce, base + regs->debounce);
writel_relaxed(bank->context.debounce_en,
- bank->base + bank->regs->debounce_en);
+ base + regs->debounce_en);
}
- writel_relaxed(bank->context.irqenable1,
- bank->base + bank->regs->irqenable);
- writel_relaxed(bank->context.irqenable2,
- bank->base + bank->regs->irqenable2);
+ writel_relaxed(bank->context.irqenable1, base + regs->irqenable);
+ writel_relaxed(bank->context.irqenable2, base + regs->irqenable2);
}
static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
{
struct device *dev = bank->chip.parent;
void __iomem *base = bank->base;
- u32 nowake;
+ u32 mask, nowake;
bank->saved_datain = readl_relaxed(base + bank->regs->datain);
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
+ /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
+ mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect;
+ mask &= ~bank->context.risingdetect;
+ bank->saved_datain |= mask;
+
+ /* Check for pending EDGE_RISING, ignore EDGE_BOTH */
+ mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect;
+ mask &= ~bank->context.fallingdetect;
+ bank->saved_datain &= ~mask;
+
if (!may_lose_context)
goto update_gpio_context_count;
@@ -1291,8 +1127,8 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
*/
if (!bank->loses_context && bank->enabled_non_wakeup_gpios) {
nowake = bank->enabled_non_wakeup_gpios;
- omap_gpio_rmw(base, bank->regs->fallingdetect, nowake, ~nowake);
- omap_gpio_rmw(base, bank->regs->risingdetect, nowake, ~nowake);
+ omap_gpio_rmw(base + bank->regs->fallingdetect, nowake, ~nowake);
+ omap_gpio_rmw(base + bank->regs->risingdetect, nowake, ~nowake);
}
update_gpio_context_count:
@@ -1421,7 +1257,7 @@ static int gpio_omap_cpu_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static struct omap_gpio_reg_offs omap2_gpio_regs = {
+static const struct omap_gpio_reg_offs omap2_gpio_regs = {
.revision = OMAP24XX_GPIO_REVISION,
.direction = OMAP24XX_GPIO_OE,
.datain = OMAP24XX_GPIO_DATAIN,
@@ -1444,7 +1280,7 @@ static struct omap_gpio_reg_offs omap2_gpio_regs = {
.fallingdetect = OMAP24XX_GPIO_FALLINGDETECT,
};
-static struct omap_gpio_reg_offs omap4_gpio_regs = {
+static const struct omap_gpio_reg_offs omap4_gpio_regs = {
.revision = OMAP4_GPIO_REVISION,
.direction = OMAP4_GPIO_OE,
.datain = OMAP4_GPIO_DATAIN,
@@ -1453,6 +1289,8 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
.clr_dataout = OMAP4_GPIO_CLEARDATAOUT,
.irqstatus = OMAP4_GPIO_IRQSTATUS0,
.irqstatus2 = OMAP4_GPIO_IRQSTATUS1,
+ .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0,
+ .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1,
.irqenable = OMAP4_GPIO_IRQSTATUSSET0,
.irqenable2 = OMAP4_GPIO_IRQSTATUSSET1,
.set_irqenable = OMAP4_GPIO_IRQSTATUSSET0,
@@ -1528,7 +1366,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
irqc->irq_startup = omap_gpio_irq_startup,
irqc->irq_shutdown = omap_gpio_irq_shutdown,
- irqc->irq_ack = omap_gpio_ack_irq,
+ irqc->irq_ack = dummy_irq_chip.irq_ack,
irqc->irq_mask = omap_gpio_mask_irq,
irqc->irq_unmask = omap_gpio_unmask_irq,
irqc->irq_set_type = omap_gpio_irq_type,
@@ -1572,14 +1410,10 @@ static int omap_gpio_probe(struct platform_device *pdev)
pdata->get_context_loss_count;
}
- if (bank->regs->set_dataout && bank->regs->clr_dataout) {
+ if (bank->regs->set_dataout && bank->regs->clr_dataout)
bank->set_dataout = omap_set_gpio_dataout_reg;
- bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple;
- } else {
+ else
bank->set_dataout = omap_set_gpio_dataout_mask;
- bank->set_dataout_multiple =
- omap_set_gpio_dataout_mask_multiple;
- }
raw_spin_lock_init(&bank->lock);
raw_spin_lock_init(&bank->wa_lock);
@@ -1635,7 +1469,6 @@ static int omap_gpio_remove(struct platform_device *pdev)
struct gpio_bank *bank = platform_get_drvdata(pdev);
cpu_pm_unregister_notifier(&bank->nb);
- list_del(&bank->node);
gpiochip_remove(&bank->chip);
pm_runtime_disable(&pdev->dev);
if (bank->dbck_flag)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index cfe827cefad8..378b206d2dc9 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1178,6 +1178,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "ti,tca6408", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
+ { .compatible = "ti,tca9539", .data = OF_953X(16, PCA_INT), },
{ .compatible = "onnn,cat9554", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "onnn,pca9654", .data = OF_953X( 8, PCA_INT), },
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 9aad32206e84..722ce5cf861e 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -283,6 +283,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
struct pl061 *pl061;
+ struct gpio_irq_chip *girq;
int ret, irq;
pl061 = devm_kzalloc(dev, sizeof(*pl061), GFP_KERNEL);
@@ -310,10 +311,6 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
pl061->gc.parent = dev;
pl061->gc.owner = THIS_MODULE;
- ret = gpiochip_add_data(&pl061->gc, pl061);
- if (ret)
- return ret;
-
/*
* irq_chip support
*/
@@ -332,19 +329,24 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
}
pl061->parent_irq = irq;
- ret = gpiochip_irqchip_add(&pl061->gc, &pl061->irq_chip,
- 0, handle_bad_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_info(&adev->dev, "could not add irqchip\n");
+ girq = &pl061->gc.irq;
+ girq->chip = &pl061->irq_chip;
+ girq->parent_handler = pl061_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ ret = devm_gpiochip_add_data(dev, &pl061->gc, pl061);
+ if (ret)
return ret;
- }
- gpiochip_set_chained_irqchip(&pl061->gc, &pl061->irq_chip,
- irq, pl061_irq_handler);
amba_set_drvdata(adev, pl061);
- dev_info(&adev->dev, "PL061 GPIO chip @%pa registered\n",
- &adev->res.start);
+ dev_info(dev, "PL061 GPIO chip registered\n");
return 0;
}
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 70e95fc4779f..187984d26f47 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -489,7 +489,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
- irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
+ irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add_data(gpio_chip, p);
if (ret) {
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index 571b2a81c6de..006a7e6a75f2 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -211,20 +211,22 @@ static int gpio_siox_get_direction(struct gpio_chip *chip, unsigned int offset)
static int gpio_siox_probe(struct siox_device *sdevice)
{
struct gpio_siox_ddata *ddata;
+ struct gpio_irq_chip *girq;
+ struct device *dev = &sdevice->dev;
int ret;
- ddata = devm_kzalloc(&sdevice->dev, sizeof(*ddata), GFP_KERNEL);
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
- dev_set_drvdata(&sdevice->dev, ddata);
+ dev_set_drvdata(dev, ddata);
mutex_init(&ddata->lock);
spin_lock_init(&ddata->irqlock);
ddata->gchip.base = -1;
ddata->gchip.can_sleep = 1;
- ddata->gchip.parent = &sdevice->dev;
+ ddata->gchip.parent = dev;
ddata->gchip.owner = THIS_MODULE;
ddata->gchip.get = gpio_siox_get;
ddata->gchip.set = gpio_siox_set;
@@ -239,54 +241,27 @@ static int gpio_siox_probe(struct siox_device *sdevice)
ddata->ichip.irq_unmask = gpio_siox_irq_unmask;
ddata->ichip.irq_set_type = gpio_siox_irq_set_type;
- ret = gpiochip_add(&ddata->gchip);
- if (ret) {
- dev_err(&sdevice->dev,
- "Failed to register gpio chip (%d)\n", ret);
- goto err_gpiochip;
- }
+ girq = &ddata->gchip.irq;
+ girq->chip = &ddata->ichip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- ret = gpiochip_irqchip_add(&ddata->gchip, &ddata->ichip,
- 0, handle_level_irq, IRQ_TYPE_EDGE_RISING);
- if (ret) {
- dev_err(&sdevice->dev,
- "Failed to register irq chip (%d)\n", ret);
-err_gpiochip:
- gpiochip_remove(&ddata->gchip);
- }
+ ret = devm_gpiochip_add_data(dev, &ddata->gchip, NULL);
+ if (ret)
+ dev_err(dev, "Failed to register gpio chip (%d)\n", ret);
return ret;
}
-static int gpio_siox_remove(struct siox_device *sdevice)
-{
- struct gpio_siox_ddata *ddata = dev_get_drvdata(&sdevice->dev);
-
- gpiochip_remove(&ddata->gchip);
- return 0;
-}
-
static struct siox_driver gpio_siox_driver = {
.probe = gpio_siox_probe,
- .remove = gpio_siox_remove,
.set_data = gpio_siox_set_data,
.get_data = gpio_siox_get_data,
.driver = {
.name = "gpio-siox",
},
};
-
-static int __init gpio_siox_init(void)
-{
- return siox_driver_register(&gpio_siox_driver);
-}
-module_init(gpio_siox_init);
-
-static void __exit gpio_siox_exit(void)
-{
- siox_driver_unregister(&gpio_siox_driver);
-}
-module_exit(gpio_siox_exit);
+module_siox_driver(gpio_siox_driver);
MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
MODULE_DESCRIPTION("SIOX gpio driver");
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 24c478392394..9e23a5ae8108 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -15,8 +15,6 @@
#include <linux/clk.h>
#include <linux/err.h>
-#include <lantiq_soc.h>
-
/*
* The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
* peripheral controller used to drive external shift register cascades. At most
@@ -71,8 +69,7 @@
#define xway_stp_r32(m, reg) __raw_readl(m + reg)
#define xway_stp_w32(m, val, reg) __raw_writel(val, m + reg)
#define xway_stp_w32_mask(m, clear, set, reg) \
- ltq_w32((ltq_r32(m + reg) & ~(clear)) | (set), \
- m + reg)
+ xway_stp_w32(m, (xway_stp_r32(m, reg) & ~(clear)) | (set), reg)
struct xway_stp {
struct gpio_chip gc;
@@ -156,9 +153,9 @@ static int xway_stp_request(struct gpio_chip *gc, unsigned gpio)
/**
* xway_stp_hw_init() - Configure the STP unit and enable the clock gate
- * @virt: pointer to the remapped register range
+ * @chip: Pointer to the xway_stp chip structure
*/
-static int xway_stp_hw_init(struct xway_stp *chip)
+static void xway_stp_hw_init(struct xway_stp *chip)
{
/* sane defaults */
xway_stp_w32(chip->virt, 0, XWAY_STP_AR);
@@ -201,8 +198,6 @@ static int xway_stp_hw_init(struct xway_stp *chip)
if (chip->reserved)
xway_stp_w32_mask(chip->virt, XWAY_STP_UPD_MASK,
XWAY_STP_UPD_FPI, XWAY_STP_CON1);
-
- return 0;
}
static int xway_stp_probe(struct platform_device *pdev)
@@ -258,21 +253,27 @@ static int xway_stp_probe(struct platform_device *pdev)
if (!of_find_property(pdev->dev.of_node, "lantiq,rising", NULL))
chip->edge = XWAY_STP_FALLING;
- clk = clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Failed to get clock\n");
return PTR_ERR(clk);
}
- clk_enable(clk);
- ret = xway_stp_hw_init(chip);
- if (!ret)
- ret = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
- if (!ret)
- dev_info(&pdev->dev, "Init done\n");
+ xway_stp_hw_init(chip);
- return ret;
+ ret = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
+ if (ret) {
+ clk_disable_unprepare(clk);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Init done\n");
+
+ return 0;
}
static const struct of_device_id xway_stp_match[] = {
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index f57bfc07ae22..0f59161a4701 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -541,8 +541,8 @@ DEFINE_SHOW_ATTRIBUTE(tegra_dbg_gpio);
static void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
{
- (void) debugfs_create_file("tegra_gpio", 0444,
- NULL, tgi, &tegra_dbg_gpio_fops);
+ debugfs_create_file("tegra_gpio", 0444, NULL, tgi,
+ &tegra_dbg_gpio_fops);
}
#else
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 30aef41e3b7e..7ba668db171b 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -265,7 +265,8 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return port->irq;
port->clk_port = devm_clk_get(dev, "port");
- if (!IS_ERR(port->clk_port)) {
+ ret = PTR_ERR_OR_ZERO(port->clk_port);
+ if (!ret) {
ret = clk_prepare_enable(port->clk_port);
if (ret)
return ret;
@@ -273,16 +274,17 @@ static int vf610_gpio_probe(struct platform_device *pdev)
port->clk_port);
if (ret)
return ret;
- } else if (port->clk_port == ERR_PTR(-EPROBE_DEFER)) {
+ } else if (ret == -EPROBE_DEFER) {
/*
* Percolate deferrals, for anything else,
* just live without the clocking.
*/
- return PTR_ERR(port->clk_port);
+ return ret;
}
port->clk_gpio = devm_clk_get(dev, "gpio");
- if (!IS_ERR(port->clk_gpio)) {
+ ret = PTR_ERR_OR_ZERO(port->clk_gpio);
+ if (!ret) {
ret = clk_prepare_enable(port->clk_gpio);
if (ret)
return ret;
@@ -290,8 +292,8 @@ static int vf610_gpio_probe(struct platform_device *pdev)
port->clk_gpio);
if (ret)
return ret;
- } else if (port->clk_gpio == ERR_PTR(-EPROBE_DEFER)) {
- return PTR_ERR(port->clk_gpio);
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
}
gc = &port->gc;
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index b13a49c89cc1..98cd715ccc33 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -467,10 +467,9 @@ static struct gpio_chip vr41xx_gpio_chip = {
static int giu_probe(struct platform_device *pdev)
{
- struct resource *res;
unsigned int trigger, i, pin;
struct irq_chip *chip;
- int irq, ret;
+ int irq;
switch (pdev->id) {
case GPIO_50PINS_PULLUPDOWN:
@@ -489,21 +488,14 @@ static int giu_probe(struct platform_device *pdev)
return -ENODEV;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EBUSY;
-
- giu_base = ioremap(res->start, resource_size(res));
- if (!giu_base)
- return -ENOMEM;
+ giu_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(giu_base))
+ return PTR_ERR(giu_base);
vr41xx_gpio_chip.parent = &pdev->dev;
- ret = gpiochip_add_data(&vr41xx_gpio_chip, NULL);
- if (!ret) {
- iounmap(giu_base);
+ if (gpiochip_add_data(&vr41xx_gpio_chip, NULL))
return -ENODEV;
- }
giu_write(GIUINTENL, 0);
giu_write(GIUINTENH, 0);
@@ -534,7 +526,6 @@ static int giu_probe(struct platform_device *pdev)
static int giu_remove(struct platform_device *pdev)
{
if (giu_base) {
- iounmap(giu_base);
giu_base = NULL;
}
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 32944eb886c1..a9748b5198e6 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/io.h>
#include <linux/gpio/driver.h>
#include <linux/slab.h>
@@ -33,14 +32,16 @@
/**
* struct xgpio_instance - Stores information about GPIO device
- * @mmchip: OF GPIO chip for memory mapped banks
+ * @gc: GPIO chip
+ * @regs: register block
* @gpio_width: GPIO width for every channel
* @gpio_state: GPIO state shadow register
* @gpio_dir: GPIO direction shadow register
* @gpio_lock: Lock used for synchronization
*/
struct xgpio_instance {
- struct of_mm_gpio_chip mmchip;
+ struct gpio_chip gc;
+ void __iomem *regs;
unsigned int gpio_width[2];
u32 gpio_state[2];
u32 gpio_dir[2];
@@ -84,11 +85,10 @@ static inline int xgpio_offset(struct xgpio_instance *chip, int gpio)
*/
static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct xgpio_instance *chip = gpiochip_get_data(gc);
u32 val;
- val = xgpio_readreg(mm_gc->regs + XGPIO_DATA_OFFSET +
+ val = xgpio_readreg(chip->regs + XGPIO_DATA_OFFSET +
xgpio_regoffset(chip, gpio));
return !!(val & BIT(xgpio_offset(chip, gpio)));
@@ -106,7 +106,6 @@ static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct xgpio_instance *chip = gpiochip_get_data(gc);
int index = xgpio_index(chip, gpio);
int offset = xgpio_offset(chip, gpio);
@@ -119,7 +118,7 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
else
chip->gpio_state[index] &= ~BIT(offset);
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
+ xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
@@ -138,7 +137,6 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
unsigned long flags;
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct xgpio_instance *chip = gpiochip_get_data(gc);
int index = xgpio_index(chip, 0);
int offset, i;
@@ -150,7 +148,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
if (*mask == 0)
break;
if (index != xgpio_index(chip, i)) {
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
+ xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
xgpio_regoffset(chip, i),
chip->gpio_state[index]);
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
@@ -166,7 +164,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
}
}
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
+ xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
xgpio_regoffset(chip, i), chip->gpio_state[index]);
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
@@ -184,7 +182,6 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
unsigned long flags;
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct xgpio_instance *chip = gpiochip_get_data(gc);
int index = xgpio_index(chip, gpio);
int offset = xgpio_offset(chip, gpio);
@@ -193,7 +190,7 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
/* Set the GPIO bit in shadow register and set direction as input */
chip->gpio_dir[index] |= BIT(offset);
- xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET +
+ xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
@@ -216,7 +213,6 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct xgpio_instance *chip = gpiochip_get_data(gc);
int index = xgpio_index(chip, gpio);
int offset = xgpio_offset(chip, gpio);
@@ -228,12 +224,12 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
chip->gpio_state[index] |= BIT(offset);
else
chip->gpio_state[index] &= ~BIT(offset);
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
+ xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
/* Clear the GPIO bit in shadow register and set direction as output */
chip->gpio_dir[index] &= ~BIT(offset);
- xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET +
+ xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
@@ -243,43 +239,23 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
/**
* xgpio_save_regs - Set initial values of GPIO pins
- * @mm_gc: Pointer to memory mapped GPIO chip structure
+ * @chip: Pointer to GPIO instance
*/
-static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+static void xgpio_save_regs(struct xgpio_instance *chip)
{
- struct xgpio_instance *chip =
- container_of(mm_gc, struct xgpio_instance, mmchip);
-
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET, chip->gpio_state[0]);
- xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET, chip->gpio_dir[0]);
+ xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET, chip->gpio_state[0]);
+ xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET, chip->gpio_dir[0]);
if (!chip->gpio_width[1])
return;
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET,
+ xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET,
chip->gpio_state[1]);
- xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET,
+ xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET,
chip->gpio_dir[1]);
}
/**
- * xgpio_remove - Remove method for the GPIO device.
- * @pdev: pointer to the platform device
- *
- * This function remove gpiochips and frees all the allocated resources.
- *
- * Return: 0 always
- */
-static int xgpio_remove(struct platform_device *pdev)
-{
- struct xgpio_instance *chip = platform_get_drvdata(pdev);
-
- of_mm_gpiochip_remove(&chip->mmchip);
-
- return 0;
-}
-
-/**
* xgpio_of_probe - Probe method for the GPIO device.
* @pdev: pointer to the platform device
*
@@ -340,21 +316,28 @@ static int xgpio_probe(struct platform_device *pdev)
spin_lock_init(&chip->gpio_lock[1]);
}
- chip->mmchip.gc.ngpio = chip->gpio_width[0] + chip->gpio_width[1];
- chip->mmchip.gc.parent = &pdev->dev;
- chip->mmchip.gc.direction_input = xgpio_dir_in;
- chip->mmchip.gc.direction_output = xgpio_dir_out;
- chip->mmchip.gc.get = xgpio_get;
- chip->mmchip.gc.set = xgpio_set;
- chip->mmchip.gc.set_multiple = xgpio_set_multiple;
+ chip->gc.base = -1;
+ chip->gc.ngpio = chip->gpio_width[0] + chip->gpio_width[1];
+ chip->gc.parent = &pdev->dev;
+ chip->gc.direction_input = xgpio_dir_in;
+ chip->gc.direction_output = xgpio_dir_out;
+ chip->gc.get = xgpio_get;
+ chip->gc.set = xgpio_set;
+ chip->gc.set_multiple = xgpio_set_multiple;
+
+ chip->gc.label = dev_name(&pdev->dev);
+
+ chip->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(chip->regs)) {
+ dev_err(&pdev->dev, "failed to ioremap memory resource\n");
+ return PTR_ERR(chip->regs);
+ }
- chip->mmchip.save_regs = xgpio_save_regs;
+ xgpio_save_regs(chip);
- /* Call the OF gpio helper to setup and register the GPIO device */
- status = of_mm_gpiochip_add_data(np, &chip->mmchip, chip);
+ status = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
if (status) {
- pr_err("%pOF: error in probe function with status %d\n",
- np, status);
+ dev_err(&pdev->dev, "failed to add GPIO chip\n");
return status;
}
@@ -370,7 +353,6 @@ MODULE_DEVICE_TABLE(of, xgpio_of_match);
static struct platform_driver xgpio_plat_driver = {
.probe = xgpio_probe,
- .remove = xgpio_remove,
.driver = {
.name = "gpio-xilinx",
.of_match_table = xgpio_of_match,
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c9fc9e232aaf..39f2f9035c11 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -217,14 +217,13 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
if (!handler)
return AE_OK;
- desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event", 0);
+ desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event",
+ GPIO_ACTIVE_HIGH, GPIOD_IN);
if (IS_ERR(desc)) {
dev_err(chip->parent, "Failed to request GPIO\n");
return AE_ERROR;
}
- gpiod_direction_input(desc);
-
ret = gpiochip_lock_as_irq(chip, pin);
if (ret) {
dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -951,6 +950,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
const char *label = "ACPI:OpRegion";
desc = gpiochip_request_own_desc(chip, pin, label,
+ GPIO_ACTIVE_HIGH,
flags);
if (IS_ERR(desc)) {
status = AE_ERROR;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index aec7bd86ae7e..f974075ff00e 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -118,8 +118,15 @@ static void of_gpio_flags_quirks(struct device_node *np,
* Legacy handling of SPI active high chip select. If we have a
* property named "cs-gpios" we need to inspect the child node
* to determine if the flags should have inverted semantics.
+ *
+ * This does not apply to an SPI device named "spi-gpio", because
+ * these have traditionally obtained their own GPIOs by parsing
+ * the device tree directly and did not respect any "spi-cs-high"
+ * property on the SPI bus children.
*/
- if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") &&
+ if (IS_ENABLED(CONFIG_SPI_MASTER) &&
+ !strcmp(propname, "cs-gpios") &&
+ !of_device_is_compatible(np, "spi-gpio") &&
of_property_read_bool(np, "cs-gpios")) {
struct device_node *child;
u32 cs;
@@ -158,6 +165,12 @@ static void of_gpio_flags_quirks(struct device_node *np,
}
}
}
+
+ /* Legacy handling of stmmac's active-low PHY reset line */
+ if (IS_ENABLED(CONFIG_STMMAC_ETH) &&
+ !strcmp(propname, "snps,reset-gpio") &&
+ of_property_read_bool(np, "snps,reset-active-low"))
+ *flags |= OF_GPIO_ACTIVE_LOW;
}
/**
@@ -255,6 +268,37 @@ static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id
}
/*
+ * The old Freescale bindings use simply "gpios" as name for the chip select
+ * lines rather than "cs-gpios" like all other SPI hardware. Account for this
+ * with a special quirk.
+ */
+static struct gpio_desc *of_find_spi_cs_gpio(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ unsigned long *flags)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!IS_ENABLED(CONFIG_SPI_MASTER))
+ return ERR_PTR(-ENOENT);
+
+ /* Allow this specifically for Freescale devices */
+ if (!of_device_is_compatible(np, "fsl,spi") &&
+ !of_device_is_compatible(np, "aeroflexgaisler,spictrl"))
+ return ERR_PTR(-ENOENT);
+ /* Allow only if asking for "cs-gpios" */
+ if (!con_id || strcmp(con_id, "cs"))
+ return ERR_PTR(-ENOENT);
+
+ /*
+ * While all other SPI controllers use "cs-gpios" the Freescale
+ * uses just "gpios" so translate to that when "cs-gpios" is
+ * requested.
+ */
+ return of_find_gpio(dev, NULL, idx, flags);
+}
+
+/*
* Some regulator bindings happened before we managed to establish that GPIO
* properties should be named "foo-gpios" so we have this special kludge for
* them.
@@ -325,6 +369,12 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
/* Special handling for SPI GPIOs if used */
if (IS_ERR(desc))
desc = of_find_spi_gpio(dev, con_id, &of_flags);
+ if (IS_ERR(desc)) {
+ /* This quirk looks up flags and all */
+ desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
+ if (!IS_ERR(desc))
+ return desc;
+ }
/* Special handling for regulator GPIOs if used */
if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e013d417a936..3ee99d070608 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1644,39 +1644,47 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
/**
* gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
- * @gpiochip: the gpiochip to set the irqchip chain to
+ * @gc: the gpiochip to set the irqchip chain to
* @parent_irq: the irq number corresponding to the parent IRQ for this
* chained irqchip
* @parent_handler: the parent interrupt handler for the accumulated IRQ
* coming out of the gpiochip. If the interrupt is nested rather than
* cascaded, pass NULL in this handler argument
*/
-static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
+static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gc,
unsigned int parent_irq,
irq_flow_handler_t parent_handler)
{
- if (!gpiochip->irq.domain) {
- chip_err(gpiochip, "called %s before setting up irqchip\n",
+ struct gpio_irq_chip *girq = &gc->irq;
+ struct device *dev = &gc->gpiodev->dev;
+
+ if (!girq->domain) {
+ chip_err(gc, "called %s before setting up irqchip\n",
__func__);
return;
}
if (parent_handler) {
- if (gpiochip->can_sleep) {
- chip_err(gpiochip,
+ if (gc->can_sleep) {
+ chip_err(gc,
"you cannot have chained interrupts on a chip that may sleep\n");
return;
}
+ girq->parents = devm_kcalloc(dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ chip_err(gc, "out of memory allocating parent IRQ\n");
+ return;
+ }
+ girq->parents[0] = parent_irq;
+ girq->num_parents = 1;
/*
* The parent irqchip is already using the chip_data for this
* irqchip, so our callbacks simply use the handler_data.
*/
irq_set_chained_handler_and_data(parent_irq, parent_handler,
- gpiochip);
-
- gpiochip->irq.parent_irq = parent_irq;
- gpiochip->irq.parents = &gpiochip->irq.parent_irq;
- gpiochip->irq.num_parents = 1;
+ gc);
}
}
@@ -2503,7 +2511,11 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
* @chip: GPIO chip
* @hwnum: hardware number of the GPIO for which to request the descriptor
* @label: label for the GPIO
- * @flags: flags for this GPIO or 0 if default
+ * @lflags: lookup flags for this GPIO or 0 if default, this can be used to
+ * specify things like line inversion semantics with the machine flags
+ * such as GPIO_OUT_LOW
+ * @dflags: descriptor request flags for this GPIO or 0 if default, this
+ * can be used to specify consumer semantics such as open drain
*
* Function allows GPIO chip drivers to request and use their own GPIO
* descriptors via gpiolib API. Difference to gpiod_request() is that this
@@ -2517,9 +2529,9 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
*/
struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
const char *label,
- enum gpiod_flags flags)
+ enum gpio_lookup_flags lflags,
+ enum gpiod_flags dflags)
{
- unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum);
int err;
@@ -2532,7 +2544,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
if (err < 0)
return ERR_PTR(err);
- err = gpiod_configure_flags(desc, label, lflags, flags);
+ err = gpiod_configure_flags(desc, label, lflags, dflags);
if (err) {
chip_err(chip, "setup of own GPIO %s failed\n", label);
gpiod_free_commit(desc);
@@ -3019,13 +3031,13 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
* Return the GPIO's raw value, i.e. the value of the physical line disregarding
* its ACTIVE_LOW status, or negative errno on failure.
*
- * This function should be called from contexts where we cannot sleep, and will
+ * This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
*/
int gpiod_get_raw_value(const struct gpio_desc *desc)
{
VALIDATE_DESC(desc);
- /* Should be using gpio_get_value_cansleep() */
+ /* Should be using gpiod_get_raw_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
return gpiod_get_raw_value_commit(desc);
}
@@ -3038,7 +3050,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
* Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into
* account, or negative errno on failure.
*
- * This function should be called from contexts where we cannot sleep, and will
+ * This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
*/
int gpiod_get_value(const struct gpio_desc *desc)
@@ -3046,7 +3058,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
int value;
VALIDATE_DESC(desc);
- /* Should be using gpio_get_value_cansleep() */
+ /* Should be using gpiod_get_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
value = gpiod_get_raw_value_commit(desc);
@@ -3071,7 +3083,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_value);
* without regard for their ACTIVE_LOW status. Return 0 in case of success,
* else an error code.
*
- * This function should be called from contexts where we cannot sleep,
+ * This function can be called from contexts where we cannot sleep,
* and it will complain if the GPIO chip functions potentially sleep.
*/
int gpiod_get_raw_array_value(unsigned int array_size,
@@ -3097,7 +3109,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value);
* Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
* into account. Return 0 in case of success, else an error code.
*
- * This function should be called from contexts where we cannot sleep,
+ * This function can be called from contexts where we cannot sleep,
* and it will complain if the GPIO chip functions potentially sleep.
*/
int gpiod_get_array_value(unsigned int array_size,
@@ -3311,13 +3323,13 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
* Set the raw value of the GPIO, i.e. the value of its physical line without
* regard for its ACTIVE_LOW status.
*
- * This function should be called from contexts where we cannot sleep, and will
+ * This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
*/
void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
VALIDATE_DESC_VOID(desc);
- /* Should be using gpiod_set_value_cansleep() */
+ /* Should be using gpiod_set_raw_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
gpiod_set_raw_value_commit(desc, value);
}
@@ -3352,12 +3364,13 @@ static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
* Set the logical value of the GPIO, i.e. taking its ACTIVE_LOW,
* OPEN_DRAIN and OPEN_SOURCE flags into account.
*
- * This function should be called from contexts where we cannot sleep, and will
+ * This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
*/
void gpiod_set_value(struct gpio_desc *desc, int value)
{
VALIDATE_DESC_VOID(desc);
+ /* Should be using gpiod_set_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
gpiod_set_value_nocheck(desc, value);
}
@@ -3373,7 +3386,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_value);
* Set the raw values of the GPIOs, i.e. the values of the physical lines
* without regard for their ACTIVE_LOW status.
*
- * This function should be called from contexts where we cannot sleep, and will
+ * This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
*/
int gpiod_set_raw_array_value(unsigned int array_size,
@@ -3398,7 +3411,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value);
* Set the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
* into account.
*
- * This function should be called from contexts where we cannot sleep, and will
+ * This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
*/
int gpiod_set_array_value(unsigned int array_size,
@@ -4244,8 +4257,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_index);
*
* Returns:
* On successful request the GPIO pin is configured in accordance with
- * provided @dflags. If the node does not have the requested GPIO
- * property, NULL is returned.
+ * provided @dflags.
*
* In case of error an ERR_PTR() is returned.
*/
@@ -4267,9 +4279,6 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
index, &flags);
if (!desc || IS_ERR(desc)) {
- /* If it is not there, just return NULL */
- if (PTR_ERR(desc) == -ENOENT)
- return NULL;
return desc;
}
@@ -4420,15 +4429,8 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
chip = gpiod_to_chip(desc);
hwnum = gpio_chip_hwgpio(desc);
- /*
- * FIXME: not very elegant that we call gpiod_configure_flags()
- * twice here (once inside gpiochip_request_own_desc() and
- * again here), but the gpiochip_request_own_desc() is external
- * and cannot really pass the lflags so this is the lesser evil
- * at the moment. Pass zero as dflags on this first call so we
- * don't screw anything up.
- */
- local_desc = gpiochip_request_own_desc(chip, hwnum, name, 0);
+ local_desc = gpiochip_request_own_desc(chip, hwnum, name,
+ lflags, dflags);
if (IS_ERR(local_desc)) {
status = PTR_ERR(local_desc);
pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
@@ -4436,14 +4438,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
return status;
}
- status = gpiod_configure_flags(desc, name, lflags, dflags);
- if (status < 0) {
- pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
- name, chip->label, hwnum, status);
- gpiochip_free_own_desc(desc);
- return status;
- }
-
/* Mark GPIO as hogged so it can be identified and removed later */
set_bit(FLAG_IS_HOGGED, &desc->flags);
@@ -4805,8 +4799,8 @@ static const struct file_operations gpiolib_operations = {
static int __init gpiolib_debugfs_init(void)
{
/* /sys/kernel/debug/gpio */
- (void) debugfs_create_file("gpio", S_IFREG | S_IRUGO,
- NULL, NULL, &gpiolib_operations);
+ debugfs_create_file("gpio", S_IFREG | S_IRUGO, NULL, NULL,
+ &gpiolib_operations);
return 0;
}
subsys_initcall(gpiolib_debugfs_init);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 7a65dad43932..7c52c2442173 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -210,7 +210,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
struct gpio_array *array_info,
unsigned long *value_bitmap);
-extern struct spinlock gpio_lock;
+extern spinlock_t gpio_lock;
extern struct list_head gpio_devices;
struct gpio_desc {
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 36f900d63979..e20e2956f620 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -141,7 +141,7 @@ config DRM_LOAD_EDID_FIRMWARE
monitor are unable to provide appropriate EDID data. Since this
feature is provided as a workaround for broken hardware, the
default case is N. Details and instructions how to build your own
- EDID data are given in Documentation/EDID/HOWTO.txt.
+ EDID data are given in Documentation/EDID/howto.rst.
config DRM_DP_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b610e3b30d95..2f18c64d531f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1959,25 +1959,6 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
gfx_v9_0_init_compute_vmid(adev);
-
- mutex_lock(&adev->grbm_idx_mutex);
- /*
- * making sure that the following register writes will be broadcasted
- * to all the shaders
- */
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-
- WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
- (adev->gfx.config.sc_prim_fifo_size_frontend <<
- PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
- (adev->gfx.config.sc_prim_fifo_size_backend <<
- PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
- (adev->gfx.config.sc_hiz_tile_fifo_size <<
- PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
- (adev->gfx.config.sc_earlyz_tile_fifo_size <<
- PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
- mutex_unlock(&adev->grbm_idx_mutex);
-
}
static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 083bd8114db1..dd6b4b0b5f30 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -837,7 +837,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
/* No access to rdtsc. Using raw monotonic time */
args->cpu_clock_counter = ktime_get_raw_ns();
- args->system_clock_counter = ktime_get_boot_ns();
+ args->system_clock_counter = ktime_get_boottime_ns();
/* Since the counter is in nano-seconds we use 1GHz frequency */
args->system_clock_freq = 1000000000;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index f1d326caf69e..a7e8340baf90 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -326,7 +326,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
if (ret)
return ret;
- ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index ae64ff7153d6..1cd5a8b5cdc1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -916,8 +916,10 @@ static int init_thermal_controller(
PHM_PlatformCaps_ThermalController
);
- if (0 == powerplay_table->usFanTableOffset)
+ if (0 == powerplay_table->usFanTableOffset) {
+ hwmgr->thermal_controller.use_hw_fan_control = 1;
return 0;
+ }
fan_table = (const PPTable_Generic_SubTable_Header *)
(((unsigned long)powerplay_table) +
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index c92999aac07c..eccb26fddbd0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -694,6 +694,7 @@ struct pp_thermal_controller_info {
uint8_t ucType;
uint8_t ucI2cLine;
uint8_t ucI2cAddress;
+ uint8_t use_hw_fan_control;
struct pp_fan_info fanInfo;
struct pp_advance_fan_control_parameters advanceFanControlParameters;
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 2d4cfe14f72e..29e641c6a5db 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -2092,6 +2092,10 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
+ /* use hardware fan control */
+ if (hwmgr->thermal_controller.use_hw_fan_control)
+ return 0;
+
tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
usPWMMin * duty100;
do_div(tmp64, 10000);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 72d01e873160..5418a1a87b2c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -760,7 +760,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
if (IS_ERR(gpu->cmdbuf_suballoc)) {
dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
ret = PTR_ERR(gpu->cmdbuf_suballoc);
- goto fail;
+ goto destroy_iommu;
}
/* Create buffer: */
@@ -768,7 +768,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
PAGE_SIZE);
if (ret) {
dev_err(gpu->dev, "could not create command buffer\n");
- goto destroy_iommu;
+ goto destroy_suballoc;
}
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
@@ -800,6 +800,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
free_buffer:
etnaviv_cmdbuf_free(&gpu->buffer);
gpu->buffer.suballoc = NULL;
+destroy_suballoc:
+ etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
+ gpu->cmdbuf_suballoc = NULL;
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 029fd8ec1857..f0d45ccc1aac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1888,12 +1888,12 @@ static int ring_request_alloc(struct i915_request *request)
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- ret = switch_context(request);
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
- /* Unconditionally invalidate GPU caches and TLBs. */
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ ret = switch_context(request);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 9cc1d678674f..c436a28d50e4 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -91,14 +91,14 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
ipu_dc_disable(ipu);
ipu_prg_disable(ipu);
+ drm_crtc_vblank_off(crtc);
+
spin_lock_irq(&crtc->dev->event_lock);
- if (crtc->state->event) {
+ if (crtc->state->event && !crtc->state->active) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
-
- drm_crtc_vblank_off(crtc);
}
static void imx_drm_crtc_reset(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index d11e2281dde6..7e43b25785f7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -63,7 +63,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
return 0;
err_free:
- drm_gem_object_put_unlocked(&shmem->base);
+ drm_gem_handle_delete(file, args->handle);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 718b26276dbd..9f385979d1e6 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -58,6 +58,9 @@
struct vc4_hdmi_audio {
struct snd_soc_card card;
struct snd_soc_dai_link link;
+ struct snd_soc_dai_link_component cpu;
+ struct snd_soc_dai_link_component codec;
+ struct snd_soc_dai_link_component platform;
int samplerate;
int channels;
struct snd_dmaengine_dai_dma_data dma_data;
@@ -1085,12 +1088,20 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
return ret;
}
+ dai_link->cpus = &hdmi->audio.cpu;
+ dai_link->codecs = &hdmi->audio.codec;
+ dai_link->platforms = &hdmi->audio.platform;
+
+ dai_link->num_cpus = 1;
+ dai_link->num_codecs = 1;
+ dai_link->num_platforms = 1;
+
dai_link->name = "MAI";
dai_link->stream_name = "MAI PCM";
- dai_link->codec_dai_name = vc4_hdmi_audio_codec_dai_drv.name;
- dai_link->cpu_dai_name = dev_name(dev);
- dai_link->codec_name = dev_name(dev);
- dai_link->platform_name = dev_name(dev);
+ dai_link->codecs->dai_name = vc4_hdmi_audio_codec_dai_drv.name;
+ dai_link->cpus->dai_name = dev_name(dev);
+ dai_link->codecs->name = dev_name(dev);
+ dai_link->platforms->name = dev_name(dev);
card->dai_link = dai_link;
card->num_links = 1;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index e62fe24b1a2e..5bb0f0a084e9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -619,11 +619,11 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
output = vgdev->outputs + scanout;
new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
+ drm_connector_update_edid_property(&output->conn, new_edid);
spin_lock(&vgdev->display_info_lock);
old_edid = output->edid;
output->edid = new_edid;
- drm_connector_update_edid_property(&output->conn, output->edid);
spin_unlock(&vgdev->display_info_lock);
kfree(old_edid);
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 84ab482d0db6..c8c770b05ed9 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -23,6 +23,7 @@ config VGA_SWITCHEROO
depends on X86
depends on ACPI
depends on PCI
+ depends on (FRAMEBUFFER_CONSOLE=n || FB=y)
select VGA_ARB
help
Many laptops released in 2008/9/10 have two GPUs with a multiplexer
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index a132c37d7334..65d7541c413a 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -35,6 +35,7 @@
#include <linux/debugfs.h>
#include <linux/fb.h>
#include <linux/fs.h>
+#include <linux/fbcon.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_domain.h>
@@ -736,14 +737,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (!active->driver_power_control)
set_audio_state(active->id, VGA_SWITCHEROO_OFF);
- if (new_client->fb_info) {
- struct fb_event event;
-
- console_lock();
- event.info = new_client->fb_info;
- fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
- console_unlock();
- }
+ if (new_client->fb_info)
+ fbcon_remap_all(new_client->fb_info);
mutex_lock(&vgasr_priv.mux_hw_lock);
ret = vgasr_priv.handler->switchto(new_client->id);
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 8bbe3d0cbe5d..2310c96ccf4a 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -16,7 +16,8 @@
* https://www.silabs.com/documents/public/application-notes/an495-cp2112-interface-specification.pdf
*/
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/gpio/driver.h>
#include <linux/hid.h>
#include <linux/hidraw.h>
@@ -1195,7 +1196,9 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
return -EINVAL;
dev->desc[pin] = gpiochip_request_own_desc(&dev->gc, pin,
- "HID/I2C:Event", 0);
+ "HID/I2C:Event",
+ GPIO_ACTIVE_HIGH,
+ GPIOD_IN);
if (IS_ERR(dev->desc[pin])) {
dev_err(dev->gc.parent, "Failed to request GPIO\n");
return PTR_ERR(dev->desc[pin]);
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index 6897e14e7cb7..e162a668fb7e 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -512,10 +512,8 @@ int picolcd_init_framebuffer(struct picolcd_data *data)
sizeof(struct fb_deferred_io) +
sizeof(struct picolcd_fb_data) +
PICOLCDFB_SIZE, dev);
- if (info == NULL) {
- dev_err(dev, "failed to allocate a framebuffer\n");
+ if (!info)
goto err_nomem;
- }
info->fbdefio = info->par;
*info->fbdefio = picolcd_fb_defio;
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 1c1a2514d6f3..9a59957922d4 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -6,10 +6,14 @@ config HYPERV
tristate "Microsoft Hyper-V client drivers"
depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST
select PARAVIRT
+ select X86_HV_CALLBACK_VECTOR
help
Select this option to run Linux as a Hyper-V client operating
system.
+config HYPERV_TIMER
+ def_bool HYPERV
+
config HYPERV_TSCPAGE
def_bool HYPERV && X86_64
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index a1ea482183e8..6188fb7dda42 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -16,6 +16,7 @@
#include <linux/version.h>
#include <linux/random.h>
#include <linux/clockchips.h>
+#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@@ -23,21 +24,6 @@
struct hv_context hv_context;
/*
- * If false, we're using the old mechanism for stimer0 interrupts
- * where it sends a VMbus message when it expires. The old
- * mechanism is used when running on older versions of Hyper-V
- * that don't support Direct Mode. While Hyper-V provides
- * four stimer's per CPU, Linux uses only stimer0.
- */
-static bool direct_mode_enabled;
-static int stimer0_irq;
-static int stimer0_vector;
-
-#define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
-#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
-#define HV_MIN_DELTA_TICKS 1
-
-/*
* hv_init - Main initialization routine.
*
* This routine must be called before any other routines in here are called
@@ -47,9 +33,6 @@ int hv_init(void)
hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
if (!hv_context.cpu_context)
return -ENOMEM;
-
- direct_mode_enabled = ms_hyperv.misc_features &
- HV_STIMER_DIRECT_MODE_AVAILABLE;
return 0;
}
@@ -88,89 +71,6 @@ int hv_post_message(union hv_connection_id connection_id,
return status & 0xFFFF;
}
-/*
- * ISR for when stimer0 is operating in Direct Mode. Direct Mode
- * does not use VMbus or any VMbus messages, so process here and not
- * in the VMbus driver code.
- */
-
-static void hv_stimer0_isr(void)
-{
- struct hv_per_cpu_context *hv_cpu;
-
- hv_cpu = this_cpu_ptr(hv_context.cpu_context);
- hv_cpu->clk_evt->event_handler(hv_cpu->clk_evt);
- add_interrupt_randomness(stimer0_vector, 0);
-}
-
-static int hv_ce_set_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- u64 current_tick;
-
- WARN_ON(!clockevent_state_oneshot(evt));
-
- current_tick = hyperv_cs->read(NULL);
- current_tick += delta;
- hv_init_timer(0, current_tick);
- return 0;
-}
-
-static int hv_ce_shutdown(struct clock_event_device *evt)
-{
- hv_init_timer(0, 0);
- hv_init_timer_config(0, 0);
- if (direct_mode_enabled)
- hv_disable_stimer0_percpu_irq(stimer0_irq);
-
- return 0;
-}
-
-static int hv_ce_set_oneshot(struct clock_event_device *evt)
-{
- union hv_stimer_config timer_cfg;
-
- timer_cfg.as_uint64 = 0;
- timer_cfg.enable = 1;
- timer_cfg.auto_enable = 1;
- if (direct_mode_enabled) {
- /*
- * When it expires, the timer will directly interrupt
- * on the specified hardware vector/IRQ.
- */
- timer_cfg.direct_mode = 1;
- timer_cfg.apic_vector = stimer0_vector;
- hv_enable_stimer0_percpu_irq(stimer0_irq);
- } else {
- /*
- * When it expires, the timer will generate a VMbus message,
- * to be handled by the normal VMbus interrupt handler.
- */
- timer_cfg.direct_mode = 0;
- timer_cfg.sintx = VMBUS_MESSAGE_SINT;
- }
- hv_init_timer_config(0, timer_cfg.as_uint64);
- return 0;
-}
-
-static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
-{
- dev->name = "Hyper-V clockevent";
- dev->features = CLOCK_EVT_FEAT_ONESHOT;
- dev->cpumask = cpumask_of(cpu);
- dev->rating = 1000;
- /*
- * Avoid settint dev->owner = THIS_MODULE deliberately as doing so will
- * result in clockevents_config_and_register() taking additional
- * references to the hv_vmbus module making it impossible to unload.
- */
-
- dev->set_state_shutdown = hv_ce_shutdown;
- dev->set_state_oneshot = hv_ce_set_oneshot;
- dev->set_next_event = hv_ce_set_next_event;
-}
-
-
int hv_synic_alloc(void)
{
int cpu;
@@ -199,14 +99,6 @@ int hv_synic_alloc(void)
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
- hv_cpu->clk_evt = kzalloc(sizeof(struct clock_event_device),
- GFP_KERNEL);
- if (hv_cpu->clk_evt == NULL) {
- pr_err("Unable to allocate clock event device\n");
- goto err;
- }
- hv_init_clockevent_device(hv_cpu->clk_evt, cpu);
-
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_message_page == NULL) {
@@ -229,11 +121,6 @@ int hv_synic_alloc(void)
INIT_LIST_HEAD(&hv_cpu->chan_list);
}
- if (direct_mode_enabled &&
- hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
- hv_stimer0_isr))
- goto err;
-
return 0;
err:
/*
@@ -252,7 +139,6 @@ void hv_synic_free(void)
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
- kfree(hv_cpu->clk_evt);
free_page((unsigned long)hv_cpu->synic_event_page);
free_page((unsigned long)hv_cpu->synic_message_page);
free_page((unsigned long)hv_cpu->post_msg_page);
@@ -311,36 +197,9 @@ int hv_synic_init(unsigned int cpu)
hv_set_synic_state(sctrl.as_uint64);
- /*
- * Register the per-cpu clockevent source.
- */
- if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE)
- clockevents_config_and_register(hv_cpu->clk_evt,
- HV_TIMER_FREQUENCY,
- HV_MIN_DELTA_TICKS,
- HV_MAX_MAX_DELTA_TICKS);
- return 0;
-}
-
-/*
- * hv_synic_clockevents_cleanup - Cleanup clockevent devices
- */
-void hv_synic_clockevents_cleanup(void)
-{
- int cpu;
+ hv_stimer_init(cpu);
- if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
- return;
-
- if (direct_mode_enabled)
- hv_remove_stimer0_irq(stimer0_irq);
-
- for_each_present_cpu(cpu) {
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
-
- clockevents_unbind_device(hv_cpu->clk_evt, cpu);
- }
+ return 0;
}
/*
@@ -388,14 +247,7 @@ int hv_synic_cleanup(unsigned int cpu)
if (channel_found && vmbus_connection.conn_state == CONNECTED)
return -EBUSY;
- /* Turn off clockevent device */
- if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
- struct hv_per_cpu_context *hv_cpu
- = this_cpu_ptr(hv_context.cpu_context);
-
- clockevents_unbind_device(hv_cpu->clk_evt, cpu);
- hv_ce_shutdown(hv_cpu->clk_evt);
- }
+ hv_stimer_cleanup(cpu);
hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 7d3d31f099ea..e32681ee7b9f 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -17,6 +17,7 @@
#include <linux/hyperv.h>
#include <linux/clockchips.h>
#include <linux/ptp_clock_kernel.h>
+#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index b8e1ff05f110..362e70e9d145 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -138,7 +138,6 @@ struct hv_per_cpu_context {
* per-cpu list of the channels based on their CPU affinity.
*/
struct list_head chan_list;
- struct clock_event_device *clk_evt;
};
struct hv_context {
@@ -176,8 +175,6 @@ extern int hv_synic_init(unsigned int cpu);
extern int hv_synic_cleanup(unsigned int cpu);
-extern void hv_synic_clockevents_cleanup(void);
-
/* Interface */
void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 92b1874b3eb3..72d5a7cde7ea 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -30,6 +30,7 @@
#include <linux/kdebug.h>
#include <linux/efi.h>
#include <linux/random.h>
+#include <clocksource/hyperv_timer.h>
#include "hyperv_vmbus.h"
struct vmbus_dynid {
@@ -955,17 +956,6 @@ static void vmbus_onmessage_work(struct work_struct *work)
kfree(ctx);
}
-static void hv_process_timer_expiration(struct hv_message *msg,
- struct hv_per_cpu_context *hv_cpu)
-{
- struct clock_event_device *dev = hv_cpu->clk_evt;
-
- if (dev->event_handler)
- dev->event_handler(dev);
-
- vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
-}
-
void vmbus_on_msg_dpc(unsigned long data)
{
struct hv_per_cpu_context *hv_cpu = (void *)data;
@@ -1159,9 +1149,10 @@ static void vmbus_isr(void)
/* Check if there are actual msgs to be processed */
if (msg->header.message_type != HVMSG_NONE) {
- if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
- hv_process_timer_expiration(msg, hv_cpu);
- else
+ if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
+ hv_stimer0_isr();
+ vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
+ } else
tasklet_schedule(&hv_cpu->msg_dpc);
}
@@ -1263,14 +1254,19 @@ static int vmbus_bus_init(void)
ret = hv_synic_alloc();
if (ret)
goto err_alloc;
+
+ ret = hv_stimer_alloc(VMBUS_MESSAGE_SINT);
+ if (ret < 0)
+ goto err_alloc;
+
/*
- * Initialize the per-cpu interrupt state and
- * connect to the host.
+ * Initialize the per-cpu interrupt state and stimer state.
+ * Then connect to the host.
*/
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
hv_synic_init, hv_synic_cleanup);
if (ret < 0)
- goto err_alloc;
+ goto err_cpuhp;
hyperv_cpuhp_online = ret;
ret = vmbus_connect();
@@ -1318,6 +1314,8 @@ static int vmbus_bus_init(void)
err_connect:
cpuhp_remove_state(hyperv_cpuhp_online);
+err_cpuhp:
+ hv_stimer_free();
err_alloc:
hv_synic_free();
hv_remove_vmbus_irq();
@@ -2064,7 +2062,7 @@ static struct acpi_driver vmbus_acpi_driver = {
static void hv_kexec_handler(void)
{
- hv_synic_clockevents_cleanup();
+ hv_stimer_global_cleanup();
vmbus_initiate_unload(false);
vmbus_connection.conn_state = DISCONNECTED;
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
@@ -2075,6 +2073,8 @@ static void hv_kexec_handler(void)
static void hv_crash_handler(struct pt_regs *regs)
{
+ int cpu;
+
vmbus_initiate_unload(true);
/*
* In crash handler we can't schedule synic cleanup for all CPUs,
@@ -2082,7 +2082,9 @@ static void hv_crash_handler(struct pt_regs *regs)
* for kdump.
*/
vmbus_connection.conn_state = DISCONNECTED;
- hv_synic_cleanup(smp_processor_id());
+ cpu = smp_processor_id();
+ hv_stimer_cleanup(cpu);
+ hv_synic_cleanup(cpu);
hyperv_cleanup();
};
@@ -2131,7 +2133,7 @@ static void __exit vmbus_exit(void)
hv_remove_kexec_handler();
hv_remove_crash_handler();
vmbus_connection.conn_state = DISCONNECTED;
- hv_synic_clockevents_cleanup();
+ hv_stimer_global_cleanup();
vmbus_disconnect();
hv_remove_vmbus_irq();
for_each_online_cpu(cpu) {
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 388060ff85e7..f7752a5bef31 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -10,16 +10,6 @@
* Very rare chip please let me know if you use it
*
* http://www.analog.com/UploadedFiles/Data_Sheets/ADM1029.pdf
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 8dd5b1b8db60..ff64a39d56de 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -789,33 +789,16 @@ static const struct file_operations atk_debugfs_ggrp_fops = {
static void atk_debugfs_init(struct atk_data *data)
{
struct dentry *d;
- struct dentry *f;
data->debugfs.id = 0;
d = debugfs_create_dir("asus_atk0110", NULL);
- if (!d || IS_ERR(d))
- return;
- f = debugfs_create_x32("id", 0600, d, &data->debugfs.id);
- if (!f || IS_ERR(f))
- goto cleanup;
-
- f = debugfs_create_file_unsafe("gitm", 0400, d, data,
- &atk_debugfs_gitm);
- if (!f || IS_ERR(f))
- goto cleanup;
-
- f = debugfs_create_file("ggrp", 0400, d, data,
- &atk_debugfs_ggrp_fops);
- if (!f || IS_ERR(f))
- goto cleanup;
+ debugfs_create_x32("id", 0600, d, &data->debugfs.id);
+ debugfs_create_file_unsafe("gitm", 0400, d, data, &atk_debugfs_gitm);
+ debugfs_create_file("ggrp", 0400, d, data, &atk_debugfs_ggrp_fops);
data->debugfs.root = d;
-
- return;
-cleanup:
- debugfs_remove_recursive(d);
}
static void atk_debugfs_cleanup(struct atk_data *data)
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 4d0d6c86c12f..fe6618e49dc4 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -96,10 +96,10 @@ struct platform_data {
struct device_attribute name_attr;
};
-/* Keep track of how many package pointers we allocated in init() */
-static int max_packages __read_mostly;
-/* Array of package pointers. Serialized by cpu hotplug lock */
-static struct platform_device **pkg_devices;
+/* Keep track of how many zone pointers we allocated in init() */
+static int max_zones __read_mostly;
+/* Array of zone pointers. Serialized by cpu hotplug lock */
+static struct platform_device **zone_devices;
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
@@ -422,10 +422,10 @@ static int chk_ucode_version(unsigned int cpu)
static struct platform_device *coretemp_get_pdev(unsigned int cpu)
{
- int pkgid = topology_logical_package_id(cpu);
+ int id = topology_logical_die_id(cpu);
- if (pkgid >= 0 && pkgid < max_packages)
- return pkg_devices[pkgid];
+ if (id >= 0 && id < max_zones)
+ return zone_devices[id];
return NULL;
}
@@ -531,7 +531,7 @@ static int coretemp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct platform_data *pdata;
- /* Initialize the per-package data structures */
+ /* Initialize the per-zone data structures */
pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -566,13 +566,13 @@ static struct platform_driver coretemp_driver = {
static struct platform_device *coretemp_device_add(unsigned int cpu)
{
- int err, pkgid = topology_logical_package_id(cpu);
+ int err, zoneid = topology_logical_die_id(cpu);
struct platform_device *pdev;
- if (pkgid < 0)
+ if (zoneid < 0)
return ERR_PTR(-ENOMEM);
- pdev = platform_device_alloc(DRVNAME, pkgid);
+ pdev = platform_device_alloc(DRVNAME, zoneid);
if (!pdev)
return ERR_PTR(-ENOMEM);
@@ -582,7 +582,7 @@ static struct platform_device *coretemp_device_add(unsigned int cpu)
return ERR_PTR(err);
}
- pkg_devices[pkgid] = pdev;
+ zone_devices[zoneid] = pdev;
return pdev;
}
@@ -690,7 +690,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
* the rest.
*/
if (cpumask_empty(&pd->cpumask)) {
- pkg_devices[topology_logical_package_id(cpu)] = NULL;
+ zone_devices[topology_logical_die_id(cpu)] = NULL;
platform_device_unregister(pdev);
return 0;
}
@@ -728,10 +728,10 @@ static int __init coretemp_init(void)
if (!x86_match_cpu(coretemp_ids))
return -ENODEV;
- max_packages = topology_max_packages();
- pkg_devices = kcalloc(max_packages, sizeof(struct platform_device *),
+ max_zones = topology_max_packages() * topology_max_die_per_package();
+ zone_devices = kcalloc(max_zones, sizeof(struct platform_device *),
GFP_KERNEL);
- if (!pkg_devices)
+ if (!zone_devices)
return -ENOMEM;
err = platform_driver_register(&coretemp_driver);
@@ -747,7 +747,7 @@ static int __init coretemp_init(void)
outdrv:
platform_driver_unregister(&coretemp_driver);
- kfree(pkg_devices);
+ kfree(zone_devices);
return err;
}
module_init(coretemp_init)
@@ -756,7 +756,7 @@ static void __exit coretemp_exit(void)
{
cpuhp_remove_state(coretemp_hp_online);
platform_driver_unregister(&coretemp_driver);
- kfree(pkg_devices);
+ kfree(zone_devices);
}
module_exit(coretemp_exit)
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 84753680a4e8..3ea4021f267c 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -54,8 +54,8 @@ static void fan_alarm_notify(struct work_struct *ws)
struct gpio_fan_data *fan_data =
container_of(ws, struct gpio_fan_data, alarm_work);
- sysfs_notify(&fan_data->dev->kobj, NULL, "fan1_alarm");
- kobject_uevent(&fan_data->dev->kobj, KOBJ_CHANGE);
+ sysfs_notify(&fan_data->hwmon_dev->kobj, NULL, "fan1_alarm");
+ kobject_uevent(&fan_data->hwmon_dev->kobj, KOBJ_CHANGE);
}
static irqreturn_t fan_alarm_irq_handler(int irq, void *dev_id)
@@ -510,13 +510,6 @@ static int gpio_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fan_data);
mutex_init(&fan_data->lock);
- /* Configure alarm GPIO if available. */
- if (fan_data->alarm_gpio) {
- err = fan_alarm_init(fan_data);
- if (err)
- return err;
- }
-
/* Configure control GPIOs if available. */
if (fan_data->gpios && fan_data->num_gpios > 0) {
if (!fan_data->speed || fan_data->num_speed <= 1)
@@ -524,7 +517,9 @@ static int gpio_fan_probe(struct platform_device *pdev)
err = fan_ctrl_init(fan_data);
if (err)
return err;
- devm_add_action_or_reset(dev, gpio_fan_stop, fan_data);
+ err = devm_add_action_or_reset(dev, gpio_fan_stop, fan_data);
+ if (err)
+ return err;
}
/* Make this driver part of hwmon class. */
@@ -535,6 +530,13 @@ static int gpio_fan_probe(struct platform_device *pdev)
if (IS_ERR(fan_data->hwmon_dev))
return PTR_ERR(fan_data->hwmon_dev);
+ /* Configure alarm GPIO if available. */
+ if (fan_data->alarm_gpio) {
+ err = fan_alarm_init(fan_data);
+ if (err)
+ return err;
+ }
+
/* Optional cooling device register for Device tree platforms */
fan_data->cdev = devm_thermal_of_cooling_device_register(dev, np,
"gpio-fan", fan_data, &gpio_fan_cool_ops);
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 05e120e01cb4..1f3b30b085b9 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -651,6 +651,12 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hwdev, j);
if (err) {
device_unregister(hdev);
+ /*
+ * Don't worry about hwdev;
+ * hwmon_dev_release(), called
+ * from device_unregister(),
+ * will free it.
+ */
goto ida_remove;
}
}
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 55943b4dcc7b..0037e2bdacd6 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -713,8 +713,10 @@ static int ina3221_probe_from_dt(struct device *dev, struct ina3221_data *ina)
for_each_child_of_node(np, child) {
ret = ina3221_probe_child_from_dt(dev, child, ina);
- if (ret)
+ if (ret) {
+ of_node_put(child);
return ret;
+ }
}
return 0;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index e562a578f20e..9b3c9f390ef8 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -174,6 +174,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */
#define LM90_HAVE_TEMP3 (1 << 6) /* 3rd temperature sensor */
#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */
+#define LM90_PAUSE_FOR_CONFIG (1 << 8) /* Pause conversion for config */
/* LM90 status */
#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */
@@ -367,6 +368,7 @@ static const struct lm90_params lm90_params[] = {
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6657] = {
+ .flags = LM90_PAUSE_FOR_CONFIG,
.alert_alarms = 0x7c,
.max_convrate = 8,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
@@ -457,6 +459,7 @@ struct lm90_data {
unsigned int update_interval; /* in milliseconds */
+ u8 config; /* Current configuration register value */
u8 config_orig; /* Original configuration register value */
u8 convrate_orig; /* Original conversion rate register value */
u16 alert_alarms; /* Which alarm bits trigger ALERT# */
@@ -540,6 +543,21 @@ static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl)
return (newh << 8) | l;
}
+static int lm90_update_confreg(struct lm90_data *data, u8 config)
+{
+ if (data->config != config) {
+ int err;
+
+ err = i2c_smbus_write_byte_data(data->client,
+ LM90_REG_W_CONFIG1,
+ config);
+ if (err)
+ return err;
+ data->config = config;
+ }
+ return 0;
+}
+
/*
* client->update_lock must be held when calling this function (unless we are
* in detection or initialization steps), and while a remote channel other
@@ -548,23 +566,39 @@ static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl)
* various registers have different meanings as a result of selecting a
* non-default remote channel.
*/
-static inline int lm90_select_remote_channel(struct i2c_client *client,
- struct lm90_data *data,
- int channel)
+static int lm90_select_remote_channel(struct lm90_data *data, int channel)
{
- int config;
+ int err = 0;
if (data->kind == max6696) {
- config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
- if (config < 0)
- return config;
- config &= ~0x08;
+ u8 config = data->config & ~0x08;
+
if (channel)
config |= 0x08;
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
- config);
+ err = lm90_update_confreg(data, config);
}
- return 0;
+ return err;
+}
+
+static int lm90_write_convrate(struct lm90_data *data, int val)
+{
+ u8 config = data->config;
+ int err;
+
+ /* Save config and pause conversion */
+ if (data->flags & LM90_PAUSE_FOR_CONFIG) {
+ err = lm90_update_confreg(data, config | 0x40);
+ if (err < 0)
+ return err;
+ }
+
+ /* Set conv rate */
+ err = i2c_smbus_write_byte_data(data->client, LM90_REG_W_CONVRATE, val);
+
+ /* Revert change to config */
+ lm90_update_confreg(data, config);
+
+ return err;
}
/*
@@ -587,7 +621,7 @@ static int lm90_set_convrate(struct i2c_client *client, struct lm90_data *data,
if (interval >= update_interval * 3 / 4)
break;
- err = i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE, i);
+ err = lm90_write_convrate(data, i);
data->update_interval = DIV_ROUND_CLOSEST(update_interval, 64);
return err;
}
@@ -658,7 +692,7 @@ static int lm90_update_limits(struct device *dev)
}
if (data->kind == max6696) {
- val = lm90_select_remote_channel(client, data, 1);
+ val = lm90_select_remote_channel(data, 1);
if (val < 0)
return val;
@@ -682,7 +716,7 @@ static int lm90_update_limits(struct device *dev)
return val;
data->temp11[REMOTE2_HIGH] = val << 8;
- lm90_select_remote_channel(client, data, 0);
+ lm90_select_remote_channel(data, 0);
}
return 0;
@@ -742,19 +776,19 @@ static int lm90_update_device(struct device *dev)
data->alarms = val; /* lower 8 bit of alarms */
if (data->kind == max6696) {
- val = lm90_select_remote_channel(client, data, 1);
+ val = lm90_select_remote_channel(data, 1);
if (val < 0)
return val;
val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
LM90_REG_R_REMOTE_TEMPL);
if (val < 0) {
- lm90_select_remote_channel(client, data, 0);
+ lm90_select_remote_channel(data, 0);
return val;
}
data->temp11[REMOTE2_TEMP] = val;
- lm90_select_remote_channel(client, data, 0);
+ lm90_select_remote_channel(data, 0);
val = lm90_read_reg(client, MAX6696_REG_R_STATUS2);
if (val < 0)
@@ -768,15 +802,9 @@ static int lm90_update_device(struct device *dev)
*/
if (!(data->config_orig & 0x80) &&
!(data->alarms & data->alert_alarms)) {
- val = lm90_read_reg(client, LM90_REG_R_CONFIG1);
- if (val < 0)
- return val;
-
- if (val & 0x80) {
+ if (data->config & 0x80) {
dev_dbg(&client->dev, "Re-enabling ALERT#\n");
- i2c_smbus_write_byte_data(client,
- LM90_REG_W_CONFIG1,
- val & ~0x80);
+ lm90_update_confreg(data, data->config & ~0x80);
}
}
@@ -994,7 +1022,7 @@ static int lm90_set_temp11(struct lm90_data *data, int index, long val)
else
data->temp11[index] = temp_to_s8(val) << 8;
- lm90_select_remote_channel(client, data, index >= 3);
+ lm90_select_remote_channel(data, index >= 3);
err = i2c_smbus_write_byte_data(client, regp->high,
data->temp11[index] >> 8);
if (err < 0)
@@ -1003,7 +1031,7 @@ static int lm90_set_temp11(struct lm90_data *data, int index, long val)
err = i2c_smbus_write_byte_data(client, regp->low,
data->temp11[index] & 0xff);
- lm90_select_remote_channel(client, data, 0);
+ lm90_select_remote_channel(data, 0);
return err;
}
@@ -1052,9 +1080,9 @@ static int lm90_set_temp8(struct lm90_data *data, int index, long val)
else
data->temp8[index] = temp_to_s8(val);
- lm90_select_remote_channel(client, data, index >= 6);
+ lm90_select_remote_channel(data, index >= 6);
err = i2c_smbus_write_byte_data(client, reg[index], data->temp8[index]);
- lm90_select_remote_channel(client, data, 0);
+ lm90_select_remote_channel(data, 0);
return err;
}
@@ -1593,8 +1621,7 @@ static void lm90_restore_conf(void *_data)
struct i2c_client *client = data->client;
/* Restore initial configuration */
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE,
- data->convrate_orig);
+ lm90_write_convrate(data, data->convrate_orig);
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
data->config_orig);
}
@@ -1611,11 +1638,13 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
/*
* Start the conversions.
*/
- lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
if (config < 0)
return config;
data->config_orig = config;
+ data->config = config;
+
+ lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
/* Check Temperature Range Select */
if (data->kind == adt7461 || data->kind == tmp451) {
@@ -1638,8 +1667,7 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
config &= ~0x08;
config &= 0xBF; /* run */
- if (config != data->config_orig) /* Only write if changed */
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
+ lm90_update_confreg(data, config);
return devm_add_action_or_reset(&client->dev, lm90_restore_conf, data);
}
@@ -1718,7 +1746,7 @@ static int lm90_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
+ struct i2c_adapter *adapter = client->adapter;
struct hwmon_channel_info *info;
struct regulator *regulator;
struct device *hwmon_dev;
@@ -1873,14 +1901,8 @@ static void lm90_alert(struct i2c_client *client, enum i2c_alert_protocol type,
if ((data->flags & LM90_HAVE_BROKEN_ALERT) &&
(alarms & data->alert_alarms)) {
- int config;
-
dev_dbg(&client->dev, "Disabling ALERT#\n");
- config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
- if (config >= 0)
- i2c_smbus_write_byte_data(client,
- LM90_REG_W_CONFIG1,
- config | 0x80);
+ lm90_update_confreg(data, data->config | 0x80);
}
} else {
dev_info(&client->dev, "Everything OK\n");
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 6b9056f9483f..3d9d371c35b5 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -92,7 +92,8 @@ module_param(clock, int, 0444);
#define FAN_RPM_MIN 240
#define FAN_RPM_MAX 30000
-#define DIV_FROM_REG(reg) (1 << (reg & 7))
+#define DIV_FROM_REG(reg) (1 << ((reg) & 7))
+#define DAC_LIMIT(v12) ((v12) ? 180 : 76)
/*
* Client data (each client gets its own)
@@ -100,11 +101,9 @@ module_param(clock, int, 0444);
struct max6650_data {
struct i2c_client *client;
- const struct attribute_group *groups[3];
- struct thermal_cooling_device *cooling_dev;
- struct mutex update_lock;
+ struct mutex update_lock; /* protect alarm register updates */
int nr_fans;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* register values */
@@ -114,6 +113,7 @@ struct max6650_data {
u8 count;
u8 dac;
u8 alarm;
+ u8 alarm_en;
unsigned long cooling_dev_state;
};
@@ -137,41 +137,60 @@ static const struct of_device_id __maybe_unused max6650_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, max6650_dt_match);
+static int dac_to_pwm(int dac, bool v12)
+{
+ /*
+ * Useful range for dac is 0-180 for 12V fans and 0-76 for 5V fans.
+ * Lower DAC values mean higher speeds.
+ */
+ return clamp_val(255 - (255 * dac) / DAC_LIMIT(v12), 0, 255);
+}
+
+static u8 pwm_to_dac(unsigned int pwm, bool v12)
+{
+ int limit = DAC_LIMIT(v12);
+
+ return limit - (limit * pwm) / 255;
+}
+
static struct max6650_data *max6650_update_device(struct device *dev)
{
struct max6650_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
+ int reg, err = 0;
int i;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- data->speed = i2c_smbus_read_byte_data(client,
- MAX6650_REG_SPEED);
- data->config = i2c_smbus_read_byte_data(client,
- MAX6650_REG_CONFIG);
for (i = 0; i < data->nr_fans; i++) {
- data->tach[i] = i2c_smbus_read_byte_data(client,
- tach_reg[i]);
+ reg = i2c_smbus_read_byte_data(client, tach_reg[i]);
+ if (reg < 0) {
+ err = reg;
+ goto error;
+ }
+ data->tach[i] = reg;
}
- data->count = i2c_smbus_read_byte_data(client,
- MAX6650_REG_COUNT);
- data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC);
/*
* Alarms are cleared on read in case the condition that
* caused the alarm is removed. Keep the value latched here
* for providing the register through different alarm files.
*/
- data->alarm |= i2c_smbus_read_byte_data(client,
- MAX6650_REG_ALARM);
-
+ reg = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM);
+ if (reg < 0) {
+ err = reg;
+ goto error;
+ }
+ data->alarm |= reg;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
+error:
mutex_unlock(&data->update_lock);
-
+ if (err)
+ data = ERR_PTR(err);
return data;
}
@@ -199,26 +218,6 @@ static int max6650_set_operating_mode(struct max6650_data *data, u8 mode)
return 0;
}
-static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct max6650_data *data = max6650_update_device(dev);
- int rpm;
-
- /*
- * Calculation details:
- *
- * Each tachometer counts over an interval given by the "count"
- * register (0.25, 0.5, 1 or 2 seconds). This module assumes
- * that the fans produce two pulses per revolution (this seems
- * to be the most common).
- */
-
- rpm = ((data->tach[attr->index] * 120) / DIV_FROM_REG(data->count));
- return sprintf(buf, "%d\n", rpm);
-}
-
/*
* Set the fan speed to the specified RPM (or read back the RPM setting).
* This works in closed loop mode only. Use pwm1 for open loop speed setting.
@@ -260,26 +259,6 @@ static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
* controlled.
*/
-static ssize_t fan1_target_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct max6650_data *data = max6650_update_device(dev);
- int kscale, ktach, rpm;
-
- /*
- * Use the datasheet equation:
- *
- * FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)]
- *
- * then multiply by 60 to give rpm.
- */
-
- kscale = DIV_FROM_REG(data->config);
- ktach = data->speed;
- rpm = 60 * kscale * clock / (256 * (ktach + 1));
- return sprintf(buf, "%d\n", rpm);
-}
-
static int max6650_set_target(struct max6650_data *data, unsigned long rpm)
{
int kscale, ktach;
@@ -308,197 +287,8 @@ static int max6650_set_target(struct max6650_data *data, unsigned long rpm)
data->speed);
}
-static ssize_t fan1_target_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct max6650_data *data = dev_get_drvdata(dev);
- unsigned long rpm;
- int err;
-
- err = kstrtoul(buf, 10, &rpm);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
-
- err = max6650_set_target(data, rpm);
-
- mutex_unlock(&data->update_lock);
-
- if (err < 0)
- return err;
-
- return count;
-}
-
-/*
- * Get/set the fan speed in open loop mode using pwm1 sysfs file.
- * Speed is given as a relative value from 0 to 255, where 255 is maximum
- * speed. Note that this is done by writing directly to the chip's DAC,
- * it won't change the closed loop speed set by fan1_target.
- * Also note that due to rounding errors it is possible that you don't read
- * back exactly the value you have set.
- */
-
-static ssize_t pwm1_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- int pwm;
- struct max6650_data *data = max6650_update_device(dev);
-
- /*
- * Useful range for dac is 0-180 for 12V fans and 0-76 for 5V fans.
- * Lower DAC values mean higher speeds.
- */
- if (data->config & MAX6650_CFG_V12)
- pwm = 255 - (255 * (int)data->dac)/180;
- else
- pwm = 255 - (255 * (int)data->dac)/76;
-
- if (pwm < 0)
- pwm = 0;
-
- return sprintf(buf, "%d\n", pwm);
-}
-
-static ssize_t pwm1_store(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
-{
- struct max6650_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- unsigned long pwm;
- int err;
-
- err = kstrtoul(buf, 10, &pwm);
- if (err)
- return err;
-
- pwm = clamp_val(pwm, 0, 255);
-
- mutex_lock(&data->update_lock);
-
- if (data->config & MAX6650_CFG_V12)
- data->dac = 180 - (180 * pwm)/255;
- else
- data->dac = 76 - (76 * pwm)/255;
- err = i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac);
-
- mutex_unlock(&data->update_lock);
-
- return err < 0 ? err : count;
-}
-
/*
- * Get/Set controller mode:
- * Possible values:
- * 0 = Fan always on
- * 1 = Open loop, Voltage is set according to speed, not regulated.
- * 2 = Closed loop, RPM for all fans regulated by fan1 tachometer
- * 3 = Fan off
- */
-static ssize_t pwm1_enable_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct max6650_data *data = max6650_update_device(dev);
- int mode = (data->config & MAX6650_CFG_MODE_MASK) >> 4;
- int sysfs_modes[4] = {0, 3, 2, 1};
-
- return sprintf(buf, "%d\n", sysfs_modes[mode]);
-}
-
-static ssize_t pwm1_enable_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct max6650_data *data = dev_get_drvdata(dev);
- unsigned long mode;
- int err;
- const u8 max6650_modes[] = {
- MAX6650_CFG_MODE_ON,
- MAX6650_CFG_MODE_OPEN_LOOP,
- MAX6650_CFG_MODE_CLOSED_LOOP,
- MAX6650_CFG_MODE_OFF,
- };
-
- err = kstrtoul(buf, 10, &mode);
- if (err)
- return err;
-
- if (mode >= ARRAY_SIZE(max6650_modes))
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
-
- max6650_set_operating_mode(data, max6650_modes[mode]);
-
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-/*
- * Read/write functions for fan1_div sysfs file. The MAX6650 has no such
- * divider. We handle this by converting between divider and counttime:
- *
- * (counttime == k) <==> (divider == 2^k), k = 0, 1, 2, or 3
- *
- * Lower values of k allow to connect a faster fan without the risk of
- * counter overflow. The price is lower resolution. You can also set counttime
- * using the module parameter. Note that the module parameter "prescaler" also
- * influences the behaviour. Unfortunately, there's no sysfs attribute
- * defined for that. See the data sheet for details.
- */
-
-static ssize_t fan1_div_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct max6650_data *data = max6650_update_device(dev);
-
- return sprintf(buf, "%d\n", DIV_FROM_REG(data->count));
-}
-
-static ssize_t fan1_div_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct max6650_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- unsigned long div;
- int err;
-
- err = kstrtoul(buf, 10, &div);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- switch (div) {
- case 1:
- data->count = 0;
- break;
- case 2:
- data->count = 1;
- break;
- case 4:
- data->count = 2;
- break;
- case 8:
- data->count = 3;
- break;
- default:
- mutex_unlock(&data->update_lock);
- return -EINVAL;
- }
-
- i2c_smbus_write_byte_data(client, MAX6650_REG_COUNT, data->count);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-/*
- * Get alarm stati:
+ * Get gpio alarm status:
* Possible values:
* 0 = no alarm
* 1 = alarm
@@ -509,42 +299,30 @@ static ssize_t alarm_show(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max6650_data *data = max6650_update_device(dev);
- struct i2c_client *client = data->client;
- int alarm = 0;
+ bool alarm;
- if (data->alarm & attr->index) {
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ alarm = data->alarm & attr->index;
+ if (alarm) {
mutex_lock(&data->update_lock);
- alarm = 1;
data->alarm &= ~attr->index;
- data->alarm |= i2c_smbus_read_byte_data(client,
- MAX6650_REG_ALARM);
+ data->valid = false;
mutex_unlock(&data->update_lock);
}
return sprintf(buf, "%d\n", alarm);
}
-static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
-static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
-static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
-static SENSOR_DEVICE_ATTR_RO(fan4_input, fan, 3);
-static DEVICE_ATTR_RW(fan1_target);
-static DEVICE_ATTR_RW(fan1_div);
-static DEVICE_ATTR_RW(pwm1_enable);
-static DEVICE_ATTR_RW(pwm1);
-static SENSOR_DEVICE_ATTR_RO(fan1_max_alarm, alarm, MAX6650_ALRM_MAX);
-static SENSOR_DEVICE_ATTR_RO(fan1_min_alarm, alarm, MAX6650_ALRM_MIN);
-static SENSOR_DEVICE_ATTR_RO(fan1_fault, alarm, MAX6650_ALRM_TACH);
static SENSOR_DEVICE_ATTR_RO(gpio1_alarm, alarm, MAX6650_ALRM_GPIO1);
static SENSOR_DEVICE_ATTR_RO(gpio2_alarm, alarm, MAX6650_ALRM_GPIO2);
static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
- int n)
+ int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct max6650_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- u8 alarm_en = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN);
struct device_attribute *devattr;
/*
@@ -552,12 +330,9 @@ static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
*/
devattr = container_of(a, struct device_attribute, attr);
- if (devattr == &sensor_dev_attr_fan1_max_alarm.dev_attr
- || devattr == &sensor_dev_attr_fan1_min_alarm.dev_attr
- || devattr == &sensor_dev_attr_fan1_fault.dev_attr
- || devattr == &sensor_dev_attr_gpio1_alarm.dev_attr
- || devattr == &sensor_dev_attr_gpio2_alarm.dev_attr) {
- if (!(alarm_en & to_sensor_dev_attr(devattr)->index))
+ if (devattr == &sensor_dev_attr_gpio1_alarm.dev_attr ||
+ devattr == &sensor_dev_attr_gpio2_alarm.dev_attr) {
+ if (!(data->alarm_en & to_sensor_dev_attr(devattr)->index))
return 0;
}
@@ -565,14 +340,6 @@ static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
}
static struct attribute *max6650_attrs[] = {
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &dev_attr_fan1_target.attr,
- &dev_attr_fan1_div.attr,
- &dev_attr_pwm1_enable.attr,
- &dev_attr_pwm1.attr,
- &sensor_dev_attr_fan1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_fan1_min_alarm.dev_attr.attr,
- &sensor_dev_attr_fan1_fault.dev_attr.attr,
&sensor_dev_attr_gpio1_alarm.dev_attr.attr,
&sensor_dev_attr_gpio2_alarm.dev_attr.attr,
NULL
@@ -583,27 +350,17 @@ static const struct attribute_group max6650_group = {
.is_visible = max6650_attrs_visible,
};
-static struct attribute *max6651_attrs[] = {
- &sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan3_input.dev_attr.attr,
- &sensor_dev_attr_fan4_input.dev_attr.attr,
+static const struct attribute_group *max6650_groups[] = {
+ &max6650_group,
NULL
};
-static const struct attribute_group max6651_group = {
- .attrs = max6651_attrs,
-};
-
-/*
- * Real code
- */
-
static int max6650_init_client(struct max6650_data *data,
struct i2c_client *client)
{
struct device *dev = &client->dev;
- int config;
- int err = -EIO;
+ int reg;
+ int err;
u32 voltage;
u32 prescale;
u32 target_rpm;
@@ -617,21 +374,20 @@ static int max6650_init_client(struct max6650_data *data,
&prescale))
prescale = prescaler;
- config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
-
- if (config < 0) {
- dev_err(dev, "Error reading config, aborting.\n");
- return err;
+ reg = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
+ if (reg < 0) {
+ dev_err(dev, "Error reading config register, aborting.\n");
+ return reg;
}
switch (voltage) {
case 0:
break;
case 5:
- config &= ~MAX6650_CFG_V12;
+ reg &= ~MAX6650_CFG_V12;
break;
case 12:
- config |= MAX6650_CFG_V12;
+ reg |= MAX6650_CFG_V12;
break;
default:
dev_err(dev, "illegal value for fan_voltage (%d)\n", voltage);
@@ -641,22 +397,22 @@ static int max6650_init_client(struct max6650_data *data,
case 0:
break;
case 1:
- config &= ~MAX6650_CFG_PRESCALER_MASK;
+ reg &= ~MAX6650_CFG_PRESCALER_MASK;
break;
case 2:
- config = (config & ~MAX6650_CFG_PRESCALER_MASK)
+ reg = (reg & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_2;
break;
case 4:
- config = (config & ~MAX6650_CFG_PRESCALER_MASK)
+ reg = (reg & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_4;
break;
case 8:
- config = (config & ~MAX6650_CFG_PRESCALER_MASK)
+ reg = (reg & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_8;
break;
case 16:
- config = (config & ~MAX6650_CFG_PRESCALER_MASK)
+ reg = (reg & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_16;
break;
default:
@@ -664,16 +420,43 @@ static int max6650_init_client(struct max6650_data *data,
}
dev_info(dev, "Fan voltage: %dV, prescaler: %d.\n",
- (config & MAX6650_CFG_V12) ? 12 : 5,
- 1 << (config & MAX6650_CFG_PRESCALER_MASK));
+ (reg & MAX6650_CFG_V12) ? 12 : 5,
+ 1 << (reg & MAX6650_CFG_PRESCALER_MASK));
- if (i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, config)) {
+ err = i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, reg);
+ if (err) {
dev_err(dev, "Config write error, aborting.\n");
return err;
}
+ data->config = reg;
- data->config = config;
- data->count = i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT);
+ reg = i2c_smbus_read_byte_data(client, MAX6650_REG_SPEED);
+ if (reg < 0) {
+ dev_err(dev, "Failed to read speed register, aborting.\n");
+ return reg;
+ }
+ data->speed = reg;
+
+ reg = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC);
+ if (reg < 0) {
+ dev_err(dev, "Failed to read DAC register, aborting.\n");
+ return reg;
+ }
+ data->dac = reg;
+
+ reg = i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT);
+ if (reg < 0) {
+ dev_err(dev, "Failed to read count register, aborting.\n");
+ return reg;
+ }
+ data->count = reg;
+
+ reg = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN);
+ if (reg < 0) {
+ dev_err(dev, "Failed to read alarm configuration, aborting.\n");
+ return reg;
+ }
+ data->alarm_en = reg;
if (!of_property_read_u32(client->dev.of_node, "maxim,fan-target-rpm",
&target_rpm)) {
@@ -684,8 +467,6 @@ static int max6650_init_client(struct max6650_data *data,
return 0;
}
-#if IS_ENABLED(CONFIG_THERMAL)
-
static int max6650_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
@@ -715,23 +496,18 @@ static int max6650_set_cur_state(struct thermal_cooling_device *cdev,
mutex_lock(&data->update_lock);
- if (data->config & MAX6650_CFG_V12)
- data->dac = 180 - (180 * state)/255;
- else
- data->dac = 76 - (76 * state)/255;
-
+ data->dac = pwm_to_dac(state, data->config & MAX6650_CFG_V12);
err = i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac);
-
if (!err) {
max6650_set_operating_mode(data, state ?
- MAX6650_CFG_MODE_OPEN_LOOP :
- MAX6650_CFG_MODE_OFF);
+ MAX6650_CFG_MODE_OPEN_LOOP :
+ MAX6650_CFG_MODE_OFF);
data->cooling_dev_state = state;
}
mutex_unlock(&data->update_lock);
- return err < 0 ? err : 0;
+ return err;
}
static const struct thermal_cooling_device_ops max6650_cooling_ops = {
@@ -739,11 +515,252 @@ static const struct thermal_cooling_device_ops max6650_cooling_ops = {
.get_cur_state = max6650_get_cur_state,
.set_cur_state = max6650_set_cur_state,
};
-#endif
+
+static int max6650_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct max6650_data *data = max6650_update_device(dev);
+ int mode;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = dac_to_pwm(data->dac,
+ data->config & MAX6650_CFG_V12);
+ break;
+ case hwmon_pwm_enable:
+ /*
+ * Possible values:
+ * 0 = Fan always on
+ * 1 = Open loop, Voltage is set according to speed,
+ * not regulated.
+ * 2 = Closed loop, RPM for all fans regulated by fan1
+ * tachometer
+ * 3 = Fan off
+ */
+ mode = (data->config & MAX6650_CFG_MODE_MASK) >> 4;
+ *val = (4 - mode) & 3; /* {0 1 2 3} -> {0 3 2 1} */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ /*
+ * Calculation details:
+ *
+ * Each tachometer counts over an interval given by the
+ * "count" register (0.25, 0.5, 1 or 2 seconds).
+ * The driver assumes that the fans produce two pulses
+ * per revolution (this seems to be the most common).
+ */
+ *val = DIV_ROUND_CLOSEST(data->tach[channel] * 120,
+ DIV_FROM_REG(data->count));
+ break;
+ case hwmon_fan_div:
+ *val = DIV_FROM_REG(data->count);
+ break;
+ case hwmon_fan_target:
+ /*
+ * Use the datasheet equation:
+ * FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)]
+ * then multiply by 60 to give rpm.
+ */
+ *val = 60 * DIV_FROM_REG(data->config) * clock /
+ (256 * (data->speed + 1));
+ break;
+ case hwmon_fan_min_alarm:
+ *val = !!(data->alarm & MAX6650_ALRM_MIN);
+ data->alarm &= ~MAX6650_ALRM_MIN;
+ data->valid = false;
+ break;
+ case hwmon_fan_max_alarm:
+ *val = !!(data->alarm & MAX6650_ALRM_MAX);
+ data->alarm &= ~MAX6650_ALRM_MAX;
+ data->valid = false;
+ break;
+ case hwmon_fan_fault:
+ *val = !!(data->alarm & MAX6650_ALRM_TACH);
+ data->alarm &= ~MAX6650_ALRM_TACH;
+ data->valid = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static const u8 max6650_pwm_modes[] = {
+ MAX6650_CFG_MODE_ON,
+ MAX6650_CFG_MODE_OPEN_LOOP,
+ MAX6650_CFG_MODE_CLOSED_LOOP,
+ MAX6650_CFG_MODE_OFF,
+};
+
+static int max6650_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct max6650_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+ u8 reg;
+
+ mutex_lock(&data->update_lock);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ reg = pwm_to_dac(clamp_val(val, 0, 255),
+ data->config & MAX6650_CFG_V12);
+ ret = i2c_smbus_write_byte_data(data->client,
+ MAX6650_REG_DAC, reg);
+ if (ret)
+ break;
+ data->dac = reg;
+ break;
+ case hwmon_pwm_enable:
+ if (val < 0 || val >= ARRAY_SIZE(max6650_pwm_modes)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = max6650_set_operating_mode(data,
+ max6650_pwm_modes[val]);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_div:
+ switch (val) {
+ case 1:
+ reg = 0;
+ break;
+ case 2:
+ reg = 1;
+ break;
+ case 4:
+ reg = 2;
+ break;
+ case 8:
+ reg = 3;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+ ret = i2c_smbus_write_byte_data(data->client,
+ MAX6650_REG_COUNT, reg);
+ if (ret)
+ break;
+ data->count = reg;
+ break;
+ case hwmon_fan_target:
+ if (val < 0) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = max6650_set_target(data, val);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+error:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static umode_t max6650_is_visible(const void *_data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ const struct max6650_data *data = _data;
+
+ if (channel && (channel >= data->nr_fans || type != hwmon_fan))
+ return 0;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_target:
+ case hwmon_fan_div:
+ return 0644;
+ case hwmon_fan_min_alarm:
+ if (data->alarm_en & MAX6650_ALRM_MIN)
+ return 0444;
+ break;
+ case hwmon_fan_max_alarm:
+ if (data->alarm_en & MAX6650_ALRM_MAX)
+ return 0444;
+ break;
+ case hwmon_fan_fault:
+ if (data->alarm_en & MAX6650_ALRM_TACH)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ case hwmon_pwm_enable:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *max6650_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_DIV |
+ HWMON_F_MIN_ALARM | HWMON_F_MAX_ALARM |
+ HWMON_F_FAULT,
+ HWMON_F_INPUT, HWMON_F_INPUT, HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ NULL
+};
+
+static const struct hwmon_ops max6650_hwmon_ops = {
+ .read = max6650_read,
+ .write = max6650_write,
+ .is_visible = max6650_is_visible,
+};
+
+static const struct hwmon_chip_info max6650_chip_info = {
+ .ops = &max6650_hwmon_ops,
+ .info = max6650_info,
+};
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct thermal_cooling_device *cooling_dev;
struct device *dev = &client->dev;
const struct of_device_id *of_id =
of_match_device(of_match_ptr(max6650_dt_match), dev);
@@ -767,37 +784,23 @@ static int max6650_probe(struct i2c_client *client,
if (err)
return err;
- data->groups[0] = &max6650_group;
- /* 3 additional fan inputs for the MAX6651 */
- if (data->nr_fans == 4)
- data->groups[1] = &max6651_group;
-
- hwmon_dev = devm_hwmon_device_register_with_groups(dev,
- client->name, data,
- data->groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ client->name, data,
+ &max6650_chip_info,
+ max6650_groups);
err = PTR_ERR_OR_ZERO(hwmon_dev);
if (err)
return err;
-#if IS_ENABLED(CONFIG_THERMAL)
- data->cooling_dev =
- thermal_of_cooling_device_register(client->dev.of_node,
- client->name, data,
- &max6650_cooling_ops);
- if (IS_ERR(data->cooling_dev))
- dev_warn(&client->dev,
- "thermal cooling device register failed: %ld\n",
- PTR_ERR(data->cooling_dev));
-#endif
- return 0;
-}
-
-static int max6650_remove(struct i2c_client *client)
-{
- struct max6650_data *data = i2c_get_clientdata(client);
-
- if (!IS_ERR(data->cooling_dev))
- thermal_cooling_device_unregister(data->cooling_dev);
+ if (IS_ENABLED(CONFIG_THERMAL)) {
+ cooling_dev = devm_thermal_of_cooling_device_register(dev,
+ dev->of_node, client->name,
+ data, &max6650_cooling_ops);
+ if (IS_ERR(cooling_dev)) {
+ dev_warn(dev, "thermal cooling device register failed: %ld\n",
+ PTR_ERR(cooling_dev));
+ }
+ }
return 0;
}
@@ -815,7 +818,6 @@ static struct i2c_driver max6650_driver = {
.of_match_table = of_match_ptr(max6650_dt_match),
},
.probe = max6650_probe,
- .remove = max6650_remove,
.id_table = max6650_id,
};
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 58a957445484..710c30562fc1 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -4,6 +4,9 @@
*
* Copyright (c) 2015 Kontron
* Author: Vadim V. Vlasov <vvlasov@dev.rtsoft.ru>
+ *
+ * Copyright (c) 2019 Advantech
+ * Author: Amy.Shih <amy.shih@advantech.com.tw>
*/
#include <linux/module.h>
@@ -50,6 +53,8 @@
#define T_CPU1_HV_REG 0xA0 /* Bank 0; 2 regs (HV/LV) per sensor */
#define PRTS_REG 0x03 /* Bank 2 */
+#define PFE_REG 0x00 /* Bank 2; PECI Function Enable */
+#define TSI_CTRL_REG 0x50 /* Bank 2; TSI Control Register */
#define FANCTL1_FMR_REG 0x00 /* Bank 3; 1 reg per channel */
#define FANCTL1_OUT_REG 0x10 /* Bank 3; 1 reg per channel */
@@ -65,6 +70,8 @@ struct nct7904_data {
u32 vsen_mask;
u32 tcpu_mask;
u8 fan_mode[FANCTL_MAX];
+ u8 enable_dts;
+ u8 has_dts;
};
/* Access functions */
@@ -229,11 +236,15 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
switch (attr) {
case hwmon_temp_input:
- if (channel == 0)
+ if (channel == 4)
ret = nct7904_read_reg16(data, BANK_0, LTD_HV_REG);
+ else if (channel < 5)
+ ret = nct7904_read_reg16(data, BANK_0,
+ TEMP_CH1_HV_REG + channel * 4);
else
ret = nct7904_read_reg16(data, BANK_0,
- T_CPU1_HV_REG + (channel - 1) * 2);
+ T_CPU1_HV_REG + (channel - 5)
+ * 2);
if (ret < 0)
return ret;
temp = ((ret & 0xff00) >> 5) | (ret & 0x7);
@@ -249,11 +260,11 @@ static umode_t nct7904_temp_is_visible(const void *_data, u32 attr, int channel)
const struct nct7904_data *data = _data;
if (attr == hwmon_temp_input) {
- if (channel == 0) {
- if (data->vsen_mask & BIT(17))
+ if (channel < 5) {
+ if (data->tcpu_mask & BIT(channel))
return 0444;
} else {
- if (data->tcpu_mask & BIT(channel - 1))
+ if (data->has_dts & BIT(channel - 5))
return 0444;
}
}
@@ -460,6 +471,7 @@ static int nct7904_probe(struct i2c_client *client,
struct device *dev = &client->dev;
int ret, i;
u32 mask;
+ u8 val, bit;
data = devm_kzalloc(dev, sizeof(struct nct7904_data), GFP_KERNEL);
if (!data)
@@ -493,10 +505,65 @@ static int nct7904_probe(struct i2c_client *client,
data->vsen_mask = mask;
/* CPU_TEMP attributes */
- ret = nct7904_read_reg16(data, BANK_0, DTS_T_CTRL0_REG);
+ ret = nct7904_read_reg(data, BANK_0, VT_ADC_CTRL0_REG);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & 0x6) == 0x6)
+ data->tcpu_mask |= 1; /* TR1 */
+ if ((ret & 0x18) == 0x18)
+ data->tcpu_mask |= 2; /* TR2 */
+ if ((ret & 0x20) == 0x20)
+ data->tcpu_mask |= 4; /* TR3 */
+ if ((ret & 0x80) == 0x80)
+ data->tcpu_mask |= 8; /* TR4 */
+
+ /* LTD */
+ ret = nct7904_read_reg(data, BANK_0, VT_ADC_CTRL2_REG);
+ if (ret < 0)
+ return ret;
+ if ((ret & 0x02) == 0x02)
+ data->tcpu_mask |= 0x10;
+
+ /* Multi-Function detecting for Volt and TR/TD */
+ ret = nct7904_read_reg(data, BANK_0, VT_ADC_MD_REG);
if (ret < 0)
return ret;
- data->tcpu_mask = ((ret >> 8) & 0xf) | ((ret & 0xf) << 4);
+
+ for (i = 0; i < 4; i++) {
+ val = (ret & (0x03 << i)) >> (i * 2);
+ bit = (1 << i);
+ if (val == 0)
+ data->tcpu_mask &= ~bit;
+ }
+
+ /* PECI */
+ ret = nct7904_read_reg(data, BANK_2, PFE_REG);
+ if (ret < 0)
+ return ret;
+ if (ret & 0x80) {
+ data->enable_dts = 1; /* Enable DTS & PECI */
+ } else {
+ ret = nct7904_read_reg(data, BANK_2, TSI_CTRL_REG);
+ if (ret < 0)
+ return ret;
+ if (ret & 0x80)
+ data->enable_dts = 0x3; /* Enable DTS & TSI */
+ }
+
+ /* Check DTS enable status */
+ if (data->enable_dts) {
+ ret = nct7904_read_reg(data, BANK_0, DTS_T_CTRL0_REG);
+ if (ret < 0)
+ return ret;
+ data->has_dts = ret & 0xF;
+ if (data->enable_dts & 0x2) {
+ ret = nct7904_read_reg(data, BANK_0, DTS_T_CTRL1_REG);
+ if (ret < 0)
+ return ret;
+ data->has_dts |= (ret & 0xF) << 4;
+ }
+ }
for (i = 0; i < FANCTL_MAX; i++) {
ret = nct7904_read_reg(data, BANK_3, FANCTL1_FMR_REG + i);
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 13a6290c8d25..d593517af5c2 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -241,6 +241,12 @@ static ssize_t occ_show_temp_1(struct device *dev,
val = get_unaligned_be16(&temp->sensor_id);
break;
case 1:
+ /*
+ * If a sensor reading has expired and couldn't be refreshed,
+ * OCC returns 0xFFFF for that sensor.
+ */
+ if (temp->value == 0xFFFF)
+ return -EREMOTEIO;
val = get_unaligned_be16(&temp->value) * 1000;
break;
default:
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 30751eb9550a..b6588483fae1 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -64,6 +64,15 @@ config SENSORS_IR38064
This driver can also be built as a module. If so, the module will
be called ir38064.
+config SENSORS_IRPS5401
+ tristate "Infineon IRPS5401"
+ help
+ If you say yes here you get hardware monitoring support for the
+ Infineon IRPS5401 controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called irps5401.
+
config SENSORS_ISL68137
tristate "Intersil ISL68137"
help
@@ -154,6 +163,15 @@ config SENSORS_MAX8688
This driver can also be built as a module. If so, the module will
be called max8688.
+config SENSORS_PXE1610
+ tristate "Infineon PXE1610"
+ help
+ If you say yes here you get hardware monitoring support for Infineon
+ PXE1610.
+
+ This driver can also be built as a module. If so, the module will
+ be called pxe1610.
+
config SENSORS_TPS40422
tristate "TI TPS40422"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 2219b9300316..c950ea9a5d00 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
obj-$(CONFIG_SENSORS_IR38064) += ir38064.o
+obj-$(CONFIG_SENSORS_IRPS5401) += irps5401.o
obj-$(CONFIG_SENSORS_ISL68137) += isl68137.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 82052b6611c9..5caa37fbfc18 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -14,6 +14,8 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/log2.h>
#include "pmbus.h"
enum chips { adm1075, adm1272, adm1275, adm1276, adm1278, adm1293, adm1294 };
@@ -69,6 +71,18 @@ enum chips { adm1075, adm1272, adm1275, adm1276, adm1278, adm1293, adm1294 };
#define ADM1075_VAUX_OV_WARN BIT(7)
#define ADM1075_VAUX_UV_WARN BIT(6)
+#define ADM1275_VI_AVG_SHIFT 0
+#define ADM1275_VI_AVG_MASK GENMASK(ADM1275_VI_AVG_SHIFT + 2, \
+ ADM1275_VI_AVG_SHIFT)
+#define ADM1275_SAMPLES_AVG_MAX 128
+
+#define ADM1278_PWR_AVG_SHIFT 11
+#define ADM1278_PWR_AVG_MASK GENMASK(ADM1278_PWR_AVG_SHIFT + 2, \
+ ADM1278_PWR_AVG_SHIFT)
+#define ADM1278_VI_AVG_SHIFT 8
+#define ADM1278_VI_AVG_MASK GENMASK(ADM1278_VI_AVG_SHIFT + 2, \
+ ADM1278_VI_AVG_SHIFT)
+
struct adm1275_data {
int id;
bool have_oc_fault;
@@ -80,6 +94,7 @@ struct adm1275_data {
bool have_pin_min;
bool have_pin_max;
bool have_temp_max;
+ bool have_power_sampling;
struct pmbus_driver_info info;
};
@@ -155,6 +170,62 @@ static const struct coefficients adm1293_coefficients[] = {
[18] = { 7658, 0, -3 }, /* power, 21V, irange200 */
};
+static int adm1275_read_pmon_config(const struct adm1275_data *data,
+ struct i2c_client *client, bool is_power)
+{
+ int shift, ret;
+ u16 mask;
+
+ /*
+ * The PMON configuration register is a 16-bit register only on chips
+ * supporting power average sampling. On other chips it is an 8-bit
+ * register.
+ */
+ if (data->have_power_sampling) {
+ ret = i2c_smbus_read_word_data(client, ADM1275_PMON_CONFIG);
+ mask = is_power ? ADM1278_PWR_AVG_MASK : ADM1278_VI_AVG_MASK;
+ shift = is_power ? ADM1278_PWR_AVG_SHIFT : ADM1278_VI_AVG_SHIFT;
+ } else {
+ ret = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
+ mask = ADM1275_VI_AVG_MASK;
+ shift = ADM1275_VI_AVG_SHIFT;
+ }
+ if (ret < 0)
+ return ret;
+
+ return (ret & mask) >> shift;
+}
+
+static int adm1275_write_pmon_config(const struct adm1275_data *data,
+ struct i2c_client *client,
+ bool is_power, u16 word)
+{
+ int shift, ret;
+ u16 mask;
+
+ if (data->have_power_sampling) {
+ ret = i2c_smbus_read_word_data(client, ADM1275_PMON_CONFIG);
+ mask = is_power ? ADM1278_PWR_AVG_MASK : ADM1278_VI_AVG_MASK;
+ shift = is_power ? ADM1278_PWR_AVG_SHIFT : ADM1278_VI_AVG_SHIFT;
+ } else {
+ ret = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
+ mask = ADM1275_VI_AVG_MASK;
+ shift = ADM1275_VI_AVG_SHIFT;
+ }
+ if (ret < 0)
+ return ret;
+
+ word = (ret & ~mask) | ((word << shift) & mask);
+ if (data->have_power_sampling)
+ ret = i2c_smbus_write_word_data(client, ADM1275_PMON_CONFIG,
+ word);
+ else
+ ret = i2c_smbus_write_byte_data(client, ADM1275_PMON_CONFIG,
+ word);
+
+ return ret;
+}
+
static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
@@ -233,6 +304,21 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
if (!data->have_temp_max)
return -ENXIO;
break;
+ case PMBUS_VIRT_POWER_SAMPLES:
+ if (!data->have_power_sampling)
+ return -ENXIO;
+ ret = adm1275_read_pmon_config(data, client, true);
+ if (ret < 0)
+ break;
+ ret = BIT(ret);
+ break;
+ case PMBUS_VIRT_IN_SAMPLES:
+ case PMBUS_VIRT_CURR_SAMPLES:
+ ret = adm1275_read_pmon_config(data, client, false);
+ if (ret < 0)
+ break;
+ ret = BIT(ret);
+ break;
default:
ret = -ENODATA;
break;
@@ -277,6 +363,19 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
case PMBUS_VIRT_RESET_TEMP_HISTORY:
ret = pmbus_write_word_data(client, 0, ADM1278_PEAK_TEMP, 0);
break;
+ case PMBUS_VIRT_POWER_SAMPLES:
+ if (!data->have_power_sampling)
+ return -ENXIO;
+ word = clamp_val(word, 1, ADM1275_SAMPLES_AVG_MAX);
+ ret = adm1275_write_pmon_config(data, client, true,
+ ilog2(word));
+ break;
+ case PMBUS_VIRT_IN_SAMPLES:
+ case PMBUS_VIRT_CURR_SAMPLES:
+ word = clamp_val(word, 1, ADM1275_SAMPLES_AVG_MAX);
+ ret = adm1275_write_pmon_config(data, client, false,
+ ilog2(word));
+ break;
default:
ret = -ENODATA;
break;
@@ -430,7 +529,8 @@ static int adm1275_probe(struct i2c_client *client,
info->format[PSC_CURRENT_OUT] = direct;
info->format[PSC_POWER] = direct;
info->format[PSC_TEMPERATURE] = direct;
- info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
+ info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_SAMPLES;
info->read_word_data = adm1275_read_word_data;
info->read_byte_data = adm1275_read_byte_data;
@@ -471,6 +571,7 @@ static int adm1275_probe(struct i2c_client *client,
data->have_vout = true;
data->have_pin_max = true;
data->have_temp_max = true;
+ data->have_power_sampling = true;
coefficients = adm1272_coefficients;
vindex = (config & ADM1275_VRANGE) ? 1 : 0;
@@ -556,6 +657,7 @@ static int adm1275_probe(struct i2c_client *client,
data->have_vout = true;
data->have_pin_max = true;
data->have_temp_max = true;
+ data->have_power_sampling = true;
coefficients = adm1278_coefficients;
vindex = 0;
@@ -591,6 +693,7 @@ static int adm1275_probe(struct i2c_client *client,
data->have_pin_min = true;
data->have_pin_max = true;
data->have_mfr_vaux_status = true;
+ data->have_power_sampling = true;
coefficients = adm1293_coefficients;
diff --git a/drivers/hwmon/pmbus/irps5401.c b/drivers/hwmon/pmbus/irps5401.c
new file mode 100644
index 000000000000..d37daa001fb3
--- /dev/null
+++ b/drivers/hwmon/pmbus/irps5401.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for the Infineon IRPS5401M PMIC.
+ *
+ * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
+ *
+ * The device supports VOUT_PEAK, IOUT_PEAK, and TEMPERATURE_PEAK, however
+ * this driver does not currently support them.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define IRPS5401_SW_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | \
+ PMBUS_HAVE_STATUS_INPUT | \
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_PIN | PMBUS_HAVE_POUT | \
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP)
+
+#define IRPS5401_LDO_FUNC (PMBUS_HAVE_VIN | \
+ PMBUS_HAVE_STATUS_INPUT | \
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_PIN | PMBUS_HAVE_POUT | \
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP)
+
+static struct pmbus_driver_info irps5401_info = {
+ .pages = 5,
+ .func[0] = IRPS5401_SW_FUNC,
+ .func[1] = IRPS5401_SW_FUNC,
+ .func[2] = IRPS5401_SW_FUNC,
+ .func[3] = IRPS5401_SW_FUNC,
+ .func[4] = IRPS5401_LDO_FUNC,
+};
+
+static int irps5401_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &irps5401_info);
+}
+
+static const struct i2c_device_id irps5401_id[] = {
+ {"irps5401", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, irps5401_id);
+
+static struct i2c_driver irps5401_driver = {
+ .driver = {
+ .name = "irps5401",
+ },
+ .probe = irps5401_probe,
+ .remove = pmbus_do_remove,
+ .id_table = irps5401_id,
+};
+
+module_i2c_driver(irps5401_driver);
+
+MODULE_AUTHOR("Robert Hancock");
+MODULE_DESCRIPTION("PMBus driver for Infineon IRPS5401");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
new file mode 100644
index 000000000000..ebe3f023f840
--- /dev/null
+++ b/drivers/hwmon/pmbus/pxe1610.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for Infineon PXE1610
+ *
+ * Copyright (c) 2019 Facebook Inc
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define PXE1610_NUM_PAGES 3
+
+/* Identify chip parameters. */
+static int pxe1610_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
+ u8 vout_mode;
+ int ret;
+
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_mode = ret & GENMASK(4, 0);
+
+ switch (vout_mode) {
+ case 1:
+ info->vrm_version = vr12;
+ break;
+ case 2:
+ info->vrm_version = vr13;
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static struct pmbus_driver_info pxe1610_info = {
+ .pages = PXE1610_NUM_PAGES,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = vid,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_POWER] = linear,
+ .func[0] = PMBUS_HAVE_VIN
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_PIN
+ | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .func[1] = PMBUS_HAVE_VIN
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_PIN
+ | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .func[2] = PMBUS_HAVE_VIN
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_PIN
+ | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .identify = pxe1610_identify,
+};
+
+static int pxe1610_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pmbus_driver_info *info;
+ u8 buf[I2C_SMBUS_BLOCK_MAX];
+ int ret;
+
+ if (!i2c_check_functionality(
+ client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA
+ | I2C_FUNC_SMBUS_READ_WORD_DATA
+ | I2C_FUNC_SMBUS_READ_BLOCK_DATA))
+ return -ENODEV;
+
+ /*
+ * By default this device doesn't boot to page 0, so set page 0
+ * to access all pmbus registers.
+ */
+ i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+
+ /* Read Manufacturer id */
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read PMBUS_MFR_ID\n");
+ return ret;
+ }
+ if (ret != 2 || strncmp(buf, "XP", 2)) {
+ dev_err(&client->dev, "MFR_ID unrecognized\n");
+ return -ENODEV;
+ }
+
+ info = devm_kmemdup(&client->dev, &pxe1610_info,
+ sizeof(struct pmbus_driver_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id pxe1610_id[] = {
+ {"pxe1610", 0},
+ {"pxe1110", 0},
+ {"pxm1310", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pxe1610_id);
+
+static struct i2c_driver pxe1610_driver = {
+ .driver = {
+ .name = "pxe1610",
+ },
+ .probe = pxe1610_probe,
+ .remove = pmbus_do_remove,
+ .id_table = pxe1610_id,
+};
+
+module_i2c_driver(pxe1610_driver);
+
+MODULE_AUTHOR("Vijay Khemka <vijaykhemka@fb.com>");
+MODULE_DESCRIPTION("PMBus driver for Infineon PXE1610, PXE1110 and PXM1310");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 08c9b9f1c16e..54c0ff00d67f 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -320,8 +320,10 @@ static int pwm_fan_probe(struct platform_device *pdev)
dev_err(dev, "Failed to enable fan supply: %d\n", ret);
return ret;
}
- devm_add_action_or_reset(dev, pwm_fan_regulator_disable,
- ctx->reg_en);
+ ret = devm_add_action_or_reset(dev, pwm_fan_regulator_disable,
+ ctx->reg_en);
+ if (ret)
+ return ret;
}
ctx->pwm_value = MAX_PWM;
@@ -337,7 +339,9 @@ static int pwm_fan_probe(struct platform_device *pdev)
return ret;
}
timer_setup(&ctx->rpm_timer, sample_timer, 0);
- devm_add_action_or_reset(dev, pwm_fan_pwm_disable, ctx);
+ ret = devm_add_action_or_reset(dev, pwm_fan_pwm_disable, ctx);
+ if (ret)
+ return ret;
of_property_read_u32(dev->of_node, "pulses-per-revolution", &ppr);
ctx->pulses_per_revolution = ppr;
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 9bfa228d0eb0..25aac40f2764 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Power Interface(SCPI) based hwmon sensor driver
*
* Copyright (C) 2015 ARM Ltd.
* Punit Agrawal <punit.agrawal@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/hwmon.h>
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index cc6aca6e436c..b637836b58a1 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -351,6 +351,8 @@ static ssize_t fan_div_store(struct device *dev,
tmp |= data->fan_div[2] << 4;
smsc47m1_write_value(data, SMSC47M2_REG_FANDIV3, tmp);
break;
+ default:
+ BUG();
}
/* Preserve fan min */
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index d84095591e45..1969bfdfe6a4 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -111,8 +111,7 @@ static int i2c_acpi_do_lookup(struct acpi_device *adev,
struct list_head resource_list;
int ret;
- if (acpi_bus_get_status(adev) || !adev->status.present ||
- acpi_device_enumerated(adev))
+ if (acpi_bus_get_status(adev) || !adev->status.present)
return -EINVAL;
if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0)
@@ -147,6 +146,9 @@ static int i2c_acpi_get_info(struct acpi_device *adev,
lookup.info = info;
lookup.index = -1;
+ if (acpi_device_enumerated(adev))
+ return -EINVAL;
+
ret = i2c_acpi_do_lookup(adev, &lookup);
if (ret)
return ret;
@@ -333,7 +335,7 @@ static int i2c_acpi_find_match_device(struct device *dev, void *data)
return ACPI_COMPANION(dev) == data;
}
-static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
+struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
{
struct device *dev;
@@ -341,6 +343,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
i2c_acpi_find_match_adapter);
return dev ? i2c_verify_adapter(dev) : NULL;
}
+EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 5f4bd52121fe..d6f8b038a896 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -91,6 +91,12 @@ void i3c_bus_normaluse_unlock(struct i3c_bus *bus)
up_read(&bus->lock);
}
+static struct i3c_master_controller *
+i3c_bus_to_i3c_master(struct i3c_bus *i3cbus)
+{
+ return container_of(i3cbus, struct i3c_master_controller, bus);
+}
+
static struct i3c_master_controller *dev_to_i3cmaster(struct device *dev)
{
return container_of(dev, struct i3c_master_controller, dev);
@@ -464,6 +470,7 @@ static int i3c_bus_init(struct i3c_bus *i3cbus)
static const char * const i3c_bus_mode_strings[] = {
[I3C_BUS_MODE_PURE] = "pure",
[I3C_BUS_MODE_MIXED_FAST] = "mixed-fast",
+ [I3C_BUS_MODE_MIXED_LIMITED] = "mixed-limited",
[I3C_BUS_MODE_MIXED_SLOW] = "mixed-slow",
};
@@ -565,20 +572,39 @@ static const struct device_type i3c_masterdev_type = {
.groups = i3c_masterdev_groups,
};
-int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode)
+int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
+ unsigned long max_i2c_scl_rate)
{
- i3cbus->mode = mode;
+ struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);
- if (!i3cbus->scl_rate.i3c)
- i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ i3cbus->mode = mode;
- if (!i3cbus->scl_rate.i2c) {
- if (i3cbus->mode == I3C_BUS_MODE_MIXED_SLOW)
- i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
- else
- i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
+ switch (i3cbus->mode) {
+ case I3C_BUS_MODE_PURE:
+ if (!i3cbus->scl_rate.i3c)
+ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ break;
+ case I3C_BUS_MODE_MIXED_FAST:
+ case I3C_BUS_MODE_MIXED_LIMITED:
+ if (!i3cbus->scl_rate.i3c)
+ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ if (!i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i2c = max_i2c_scl_rate;
+ break;
+ case I3C_BUS_MODE_MIXED_SLOW:
+ if (!i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i2c = max_i2c_scl_rate;
+ if (!i3cbus->scl_rate.i3c ||
+ i3cbus->scl_rate.i3c > i3cbus->scl_rate.i2c)
+ i3cbus->scl_rate.i3c = i3cbus->scl_rate.i2c;
+ break;
+ default:
+ return -EINVAL;
}
+ dev_dbg(&master->dev, "i2c-scl = %ld Hz i3c-scl = %ld Hz\n",
+ i3cbus->scl_rate.i2c, i3cbus->scl_rate.i3c);
+
/*
* I3C/I2C frequency may have been overridden, check that user-provided
* values are not exceeding max possible frequency.
@@ -924,9 +950,8 @@ int i3c_master_defslvs_locked(struct i3c_master_controller *master)
ndevs++;
defslvs = i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR,
- sizeof(*defslvs) +
- ((ndevs - 1) *
- sizeof(struct i3c_ccc_dev_desc)));
+ struct_size(defslvs, slaves,
+ ndevs - 1));
if (!defslvs)
return -ENOMEM;
@@ -1963,12 +1988,19 @@ of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
if (ret)
return ret;
+ /*
+ * The I3C Specification does not clearly say I2C devices with 10-bit
+ * address are supported. These devices can't be passed properly through
+ * DEFSLVS command.
+ */
+ if (boardinfo->base.flags & I2C_CLIENT_TEN) {
+ dev_err(&master->dev, "I2C device with 10 bit address not supported.");
+ return -ENOTSUPP;
+ }
+
/* LVR is encoded in reg[2]. */
boardinfo->lvr = reg[2];
- if (boardinfo->lvr & I3C_LVR_I2C_FM_MODE)
- master->bus.scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
-
list_add_tail(&boardinfo->node, &master->boardinfo.i2c);
of_node_get(node);
@@ -2111,16 +2143,14 @@ static int i3c_master_i2c_adapter_xfer(struct i2c_adapter *adap,
return ret ? ret : nxfers;
}
-static u32 i3c_master_i2c_functionalities(struct i2c_adapter *adap)
+static u32 i3c_master_i2c_funcs(struct i2c_adapter *adapter)
{
- struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
-
- return master->ops->i2c_funcs(master);
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
}
static const struct i2c_algorithm i3c_master_i2c_algo = {
.master_xfer = i3c_master_i2c_adapter_xfer,
- .functionality = i3c_master_i2c_functionalities,
+ .functionality = i3c_master_i2c_funcs,
};
static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
@@ -2379,8 +2409,7 @@ EXPORT_SYMBOL_GPL(i3c_generic_ibi_recycle_slot);
static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
{
if (!ops || !ops->bus_init || !ops->priv_xfers ||
- !ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers ||
- !ops->i2c_funcs)
+ !ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers)
return -EINVAL;
if (ops->request_ibi &&
@@ -2417,6 +2446,7 @@ int i3c_master_register(struct i3c_master_controller *master,
const struct i3c_master_controller_ops *ops,
bool secondary)
{
+ unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
struct i3c_bus *i3cbus = i3c_master_get_bus(master);
enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
struct i2c_dev_boardinfo *i2cbi;
@@ -2458,6 +2488,9 @@ int i3c_master_register(struct i3c_master_controller *master,
mode = I3C_BUS_MODE_MIXED_FAST;
break;
case I3C_LVR_I2C_INDEX(1):
+ if (mode < I3C_BUS_MODE_MIXED_LIMITED)
+ mode = I3C_BUS_MODE_MIXED_LIMITED;
+ break;
case I3C_LVR_I2C_INDEX(2):
if (mode < I3C_BUS_MODE_MIXED_SLOW)
mode = I3C_BUS_MODE_MIXED_SLOW;
@@ -2466,9 +2499,12 @@ int i3c_master_register(struct i3c_master_controller *master,
ret = -EINVAL;
goto err_put_dev;
}
+
+ if (i2cbi->lvr & I3C_LVR_I2C_FM_MODE)
+ i2c_scl_rate = I3C_BUS_I2C_FM_SCL_RATE;
}
- ret = i3c_bus_set_mode(i3cbus, mode);
+ ret = i3c_bus_set_mode(i3cbus, mode, i2c_scl_rate);
if (ret)
goto err_put_dev;
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 1d83c97431c7..09912d75c6d5 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -599,6 +599,7 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
switch (bus->mode) {
case I3C_BUS_MODE_MIXED_FAST:
+ case I3C_BUS_MODE_MIXED_LIMITED:
ret = dw_i2c_clk_cfg(master);
if (ret)
return ret;
@@ -1060,11 +1061,6 @@ static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
kfree(data);
}
-static u32 dw_i3c_master_i2c_funcs(struct i3c_master_controller *m)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
{
struct dw_i3c_master *master = dev_id;
@@ -1099,7 +1095,6 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
.attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
.detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
.i2c_xfers = dw_i3c_master_i2c_xfers,
- .i2c_funcs = dw_i3c_master_i2c_funcs,
};
static int dw_i3c_probe(struct platform_device *pdev)
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 8889a4fdb454..237f24adddc6 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -864,11 +864,6 @@ static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
return ret;
}
-static u32 cdns_i3c_master_i2c_funcs(struct i3c_master_controller *m)
-{
- return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
-}
-
struct cdns_i3c_i2c_dev_data {
u16 id;
s16 ibi;
@@ -1010,9 +1005,7 @@ static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
master->free_rr_slots &= ~BIT(slot);
i2c_dev_set_master_data(dev, data);
- writel(prepare_rr0_dev_address(dev->boardinfo->base.addr) |
- (dev->boardinfo->base.flags & I2C_CLIENT_TEN ?
- DEV_ID_RR0_LVR_EXT_ADDR : 0),
+ writel(prepare_rr0_dev_address(dev->boardinfo->base.addr),
master->regs + DEV_ID_RR0(data->id));
writel(dev->boardinfo->lvr, master->regs + DEV_ID_RR2(data->id));
writel(readl(master->regs + DEVS_CTRL) |
@@ -1518,7 +1511,6 @@ static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
.send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
.priv_xfers = cdns_i3c_master_priv_xfers,
.i2c_xfers = cdns_i3c_master_i2c_xfers,
- .i2c_funcs = cdns_i3c_master_i2c_funcs,
.enable_ibi = cdns_i3c_master_enable_ibi,
.disable_ibi = cdns_i3c_master_disable_ibi,
.request_ibi = cdns_i3c_master_request_ibi,
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index fdd2a62f9d52..9eada392df15 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -25,13 +25,13 @@ menuconfig IDE
To compile this driver as a module, choose M here: the
module will be called ide-core.
- For further information, please read <file:Documentation/ide/ide.txt>.
+ For further information, please read <file:Documentation/ide/ide.rst>.
If unsure, say N.
if IDE
-comment "Please see Documentation/ide/ide.txt for help/info on IDE drives"
+comment "Please see Documentation/ide/ide.rst for help/info on IDE drives"
config IDE_XFER_MODE
bool
@@ -163,7 +163,7 @@ config BLK_DEV_IDETAPE
along with other IDE devices, as "hdb" or "hdc", or something
similar, and will be mapped to a character device such as "ht0"
(check the boot messages with dmesg). Be sure to consult the
- <file:drivers/ide/ide-tape.c> and <file:Documentation/ide/ide.txt>
+ <file:drivers/ide/ide-tape.c> and <file:Documentation/ide/ide.rst>
files for usage information.
To compile this driver as a module, choose M here: the
@@ -251,7 +251,7 @@ config BLK_DEV_CMD640
The CMD640 chip is also used on add-in cards by Acculogic, and on
the "CSA-6400E PCI to IDE controller" that some people have. For
- details, read <file:Documentation/ide/ide.txt>.
+ details, read <file:Documentation/ide/ide.rst>.
config BLK_DEV_CMD640_ENHANCED
bool "CMD640 enhanced support"
@@ -259,7 +259,7 @@ config BLK_DEV_CMD640_ENHANCED
help
This option includes support for setting/autotuning PIO modes and
prefetch on CMD640 IDE interfaces. For details, read
- <file:Documentation/ide/ide.txt>. If you have a CMD640 IDE interface
+ <file:Documentation/ide/ide.rst>. If you have a CMD640 IDE interface
and your BIOS does not already do this for you, then say Y here.
Otherwise say N.
@@ -819,7 +819,7 @@ config BLK_DEV_ALI14XX
boot parameter. It enables support for the secondary IDE interface
of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster
I/O speeds to be set as well.
- See the files <file:Documentation/ide/ide.txt> and
+ See the files <file:Documentation/ide/ide.rst> and
<file:drivers/ide/ali14xx.c> for more info.
config BLK_DEV_DTC2278
@@ -830,7 +830,7 @@ config BLK_DEV_DTC2278
This driver is enabled at runtime using the "dtc2278.probe" kernel
boot parameter. It enables support for the secondary IDE interface
of the DTC-2278 card, and permits faster I/O speeds to be set as
- well. See the <file:Documentation/ide/ide.txt> and
+ well. See the <file:Documentation/ide/ide.rst> and
<file:drivers/ide/dtc2278.c> files for more info.
config BLK_DEV_HT6560B
@@ -841,7 +841,7 @@ config BLK_DEV_HT6560B
This driver is enabled at runtime using the "ht6560b.probe" kernel
boot parameter. It enables support for the secondary IDE interface
of the Holtek card, and permits faster I/O speeds to be set as well.
- See the <file:Documentation/ide/ide.txt> and
+ See the <file:Documentation/ide/ide.rst> and
<file:drivers/ide/ht6560b.c> files for more info.
config BLK_DEV_QD65XX
@@ -851,7 +851,7 @@ config BLK_DEV_QD65XX
help
This driver is enabled at runtime using the "qd65xx.probe" kernel
boot parameter. It permits faster I/O speeds to be set. See the
- <file:Documentation/ide/ide.txt> and <file:drivers/ide/qd65xx.c>
+ <file:Documentation/ide/ide.rst> and <file:drivers/ide/qd65xx.c>
for more info.
config BLK_DEV_UMC8672
@@ -862,7 +862,7 @@ config BLK_DEV_UMC8672
This driver is enabled at runtime using the "umc8672.probe" kernel
boot parameter. It enables support for the secondary IDE interface
of the UMC-8672, and permits faster I/O speeds to be set as well.
- See the files <file:Documentation/ide/ide.txt> and
+ See the files <file:Documentation/ide/ide.rst> and
<file:drivers/ide/umc8672.c> for more info.
endif
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 3b15adc6ce98..9d117936bee1 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -9,7 +9,7 @@
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
- * See Documentation/cdrom/ide-cd for usage information.
+ * See Documentation/cdrom/ide-cd.rst for usage information.
*
* Suggestions are welcome. Patches that work are more welcome though. ;-)
*
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index c8159205c77d..4e22b3c3e488 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -149,7 +149,7 @@ static int dht11_decode(struct dht11 *dht11, int offset)
return -EIO;
}
- dht11->timestamp = ktime_get_boot_ns();
+ dht11->timestamp = ktime_get_boottime_ns();
if (hum_int < 4) { /* DHT22: 100000 = (3*256+232)*100 */
dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
((temp_int & 0x80) ? -100 : 100);
@@ -177,7 +177,7 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
/* TODO: Consider making the handler safe for IRQ sharing */
if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
- dht11->edges[dht11->num_edges].ts = ktime_get_boot_ns();
+ dht11->edges[dht11->num_edges].ts = ktime_get_boottime_ns();
dht11->edges[dht11->num_edges++].value =
gpio_get_value(dht11->gpio);
@@ -196,7 +196,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
int ret, timeres, offset;
mutex_lock(&dht11->lock);
- if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
+ if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boottime_ns()) {
timeres = ktime_get_resolution_ns();
dev_dbg(dht11->dev, "current timeresolution: %dns\n", timeres);
if (timeres > DHT11_MIN_TIMERES) {
@@ -322,7 +322,7 @@ static int dht11_probe(struct platform_device *pdev)
return -EINVAL;
}
- dht11->timestamp = ktime_get_boot_ns() - DHT11_DATA_VALID_TIME - 1;
+ dht11->timestamp = ktime_get_boottime_ns() - DHT11_DATA_VALID_TIME - 1;
dht11->num_edges = -1;
platform_set_drvdata(pdev, iio);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 245b5844028d..401d7ff99853 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -228,9 +228,9 @@ s64 iio_get_time_ns(const struct iio_dev *indio_dev)
ktime_get_coarse_ts64(&tp);
return timespec64_to_ns(&tp);
case CLOCK_BOOTTIME:
- return ktime_get_boot_ns();
+ return ktime_get_boottime_ns();
case CLOCK_TAI:
- return ktime_get_tai_ns();
+ return ktime_get_clocktai_ns();
default:
BUG();
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 29f7b15c81d9..3352a107b4a3 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -457,7 +457,7 @@ static int alloc_name(struct ib_device *ibdev, const char *name)
int rc;
int i;
- lockdep_assert_held_exclusive(&devices_rwsem);
+ lockdep_assert_held_write(&devices_rwsem);
ida_init(&inuse);
xa_for_each (&devices, index, device) {
char buf[IB_DEVICE_NAME_MAX];
@@ -2520,7 +2520,7 @@ static int __init ib_core_init(void)
goto err_mad;
}
- ret = register_lsm_notifier(&ibdev_lsm_nb);
+ ret = register_blocking_lsm_notifier(&ibdev_lsm_nb);
if (ret) {
pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
goto err_sa;
@@ -2539,7 +2539,7 @@ static int __init ib_core_init(void)
return 0;
err_compat:
- unregister_lsm_notifier(&ibdev_lsm_nb);
+ unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
err_sa:
ib_sa_cleanup();
err_mad:
@@ -2565,7 +2565,7 @@ static void __exit ib_core_cleanup(void)
nldev_exit();
rdma_nl_unregister(RDMA_NL_LS);
unregister_pernet_device(&rdma_dev_net_ops);
- unregister_lsm_notifier(&ibdev_lsm_nb);
+ unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
ib_sa_cleanup();
ib_mad_cleanup();
addr_cleanup();
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 558de0b9895c..2860def84f4d 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -330,6 +330,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
u8 port, struct net_device *ndev)
{
+ const struct in_ifaddr *ifa;
struct in_device *in_dev;
struct sin_list {
struct list_head list;
@@ -349,7 +350,7 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
return;
}
- for_ifa(in_dev) {
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
@@ -359,7 +360,7 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
entry->ip.sin_addr.s_addr = ifa->ifa_address;
list_add_tail(&entry->list, &sin_list);
}
- endfor_ifa(in_dev);
+
rcu_read_unlock();
list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0f3b1193d5f8..09fcfc9e052d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3230,17 +3230,22 @@ static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
int found = 0;
struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ const struct in_ifaddr *ifa;
ind = in_dev_get(dev->rdev.lldi.ports[0]);
if (!ind)
return -EADDRNOTAVAIL;
- for_primary_ifa(ind) {
+ rcu_read_lock();
+ in_dev_for_each_ifa_rcu(ifa, ind) {
+ if (ifa->ifa_flags & IFA_F_SECONDARY)
+ continue;
laddr->sin_addr.s_addr = ifa->ifa_address;
raddr->sin_addr.s_addr = ifa->ifa_address;
found = 1;
break;
}
- endfor_ifa(ind);
+ rcu_read_unlock();
+
in_dev_put(ind);
return found ? 0 : -EADDRNOTAVAIL;
}
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 4fe662c3bbc1..c142b23bb401 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -1038,7 +1038,7 @@ int hfi1_get_proc_affinity(int node)
struct hfi1_affinity_node *entry;
cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
const struct cpumask *node_mask,
- *proc_mask = &current->cpus_allowed;
+ *proc_mask = current->cpus_ptr;
struct hfi1_affinity_node_list *affinity = &node_affinity;
struct cpu_mask_set *set = &affinity->proc;
@@ -1046,7 +1046,7 @@ int hfi1_get_proc_affinity(int node)
* check whether process/context affinity has already
* been set
*/
- if (cpumask_weight(proc_mask) == 1) {
+ if (current->nr_cpus_allowed == 1) {
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
@@ -1057,7 +1057,7 @@ int hfi1_get_proc_affinity(int node)
cpu = cpumask_first(proc_mask);
cpumask_set_cpu(cpu, &set->used);
goto done;
- } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
+ } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 28b66bd70b74..2395fd4233a7 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -869,14 +869,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
{
struct sdma_rht_node *rht_node;
struct sdma_engine *sde = NULL;
- const struct cpumask *current_mask = &current->cpus_allowed;
unsigned long cpu_id;
/*
* To ensure that always the same sdma engine(s) will be
* selected make sure the process is pinned to this CPU only.
*/
- if (cpumask_weight(current_mask) != 1)
+ if (current->nr_cpus_allowed != 1)
goto out;
cpu_id = smp_processor_id();
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 8233f5a4e623..700a5d06b60c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1773,8 +1773,11 @@ static enum i40iw_status_code i40iw_add_mqh_4(
if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&
(rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
(dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
+ const struct in_ifaddr *ifa;
+
idev = in_dev_get(dev);
- for_ifa(idev) {
+
+ in_dev_for_each_ifa_rtnl(ifa, idev) {
i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM,
"Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
@@ -1819,7 +1822,7 @@ static enum i40iw_status_code i40iw_add_mqh_4(
cm_parent_listen_node->cm_core->stats_listen_nodes_created--;
}
}
- endfor_ifa(idev);
+
in_dev_put(idev);
}
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 10932baee279..d44cf33df81a 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1222,8 +1222,10 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
(rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
(dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
+ const struct in_ifaddr *ifa;
+
idev = in_dev_get(dev);
- for_ifa(idev) {
+ in_dev_for_each_ifa_rtnl(ifa, idev) {
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
"IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
@@ -1235,7 +1237,7 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
true,
I40IW_ARP_ADD);
}
- endfor_ifa(idev);
+
in_dev_put(idev);
}
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 337410f40860..016524683e17 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -174,10 +174,14 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
rcu_read_lock();
in = __in_dev_get_rcu(upper_dev);
- if (!in->ifa_list)
- local_ipaddr = 0;
- else
- local_ipaddr = ntohl(in->ifa_list->ifa_address);
+ local_ipaddr = 0;
+ if (in) {
+ struct in_ifaddr *ifa;
+
+ ifa = rcu_dereference(in->ifa_list);
+ if (ifa)
+ local_ipaddr = ntohl(ifa->ifa_address);
+ }
rcu_read_unlock();
} else {
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 2a0b59a4b6eb..cca414ecfcd5 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -310,7 +310,7 @@ static void aliasguid_query_handler(int status,
if (status) {
pr_debug("(port: %d) failed: status = %d\n",
cb_ctx->port, status);
- rec->time_to_run = ktime_get_boot_ns() + 1 * NSEC_PER_SEC;
+ rec->time_to_run = ktime_get_boottime_ns() + 1 * NSEC_PER_SEC;
goto out;
}
@@ -416,7 +416,7 @@ next_entry:
be64_to_cpu((__force __be64)rec->guid_indexes),
be64_to_cpu((__force __be64)applied_guid_indexes),
be64_to_cpu((__force __be64)declined_guid_indexes));
- rec->time_to_run = ktime_get_boot_ns() +
+ rec->time_to_run = ktime_get_boottime_ns() +
resched_delay_sec * NSEC_PER_SEC;
} else {
rec->status = MLX4_GUID_INFO_STATUS_SET;
@@ -709,7 +709,7 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
}
}
if (resched_delay_sec) {
- u64 curr_time = ktime_get_boot_ns();
+ u64 curr_time = ktime_get_boottime_ns();
*resched_delay_sec = (low_record_time < curr_time) ? 0 :
div_u64((low_record_time - curr_time), NSEC_PER_SEC);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 2e2e65f00257..4efbbd2fce0c 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -37,7 +37,7 @@
#include "mlx5_ib.h"
#include "srq.h"
-static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
+static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
{
struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
@@ -522,9 +522,9 @@ repoll:
case MLX5_CQE_SIG_ERR:
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
- read_lock(&dev->mdev->priv.mkey_table.lock);
- mmkey = __mlx5_mr_lookup(dev->mdev,
- mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
+ xa_lock(&dev->mdev->priv.mkey_table);
+ mmkey = xa_load(&dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
mr = to_mibmr(mmkey);
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
mr->sig->sig_err_exists = true;
@@ -537,7 +537,7 @@ repoll:
mr->sig->err_item.expected,
mr->sig->err_item.actual);
- read_unlock(&dev->mdev->priv.mkey_table.lock);
+ xa_unlock(&dev->mdev->priv.mkey_table);
goto repoll;
}
@@ -891,6 +891,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
int entries = attr->cqe;
int vector = attr->comp_vector;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_ib_cq *cq;
int uninitialized_var(index);
int uninitialized_var(inlen);
@@ -958,7 +959,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
- err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
+ err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
if (err)
goto err_cqb;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 80b42d069328..931f587dfb8f 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1043,13 +1043,10 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
struct mlx5_ib_dev *dev,
void *in, void *out)
{
- struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
- unsigned long flags;
struct mlx5_core_mkey *mkey;
void *mkc;
u8 key;
- int err;
mkey = &devx_mr->mmkey;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
@@ -1062,11 +1059,8 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
mkey->pd = MLX5_GET(mkc, mkc, pd);
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
- write_lock_irqsave(&table->lock, flags);
- err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key),
- mkey);
- write_unlock_irqrestore(&table->lock, flags);
- return err;
+ return xa_err(xa_store(&dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
}
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
@@ -1117,12 +1111,8 @@ static void devx_free_indirect_mkey(struct rcu_head *rcu)
*/
static void devx_cleanup_mkey(struct devx_obj *obj)
{
- struct mlx5_mkey_table *table = &obj->mdev->priv.mkey_table;
- unsigned long flags;
-
- write_lock_irqsave(&table->lock, flags);
- radix_tree_delete(&table->tree, mlx5_base_mkey(obj->devx_mr.mmkey.key));
- write_unlock_irqrestore(&table->lock, flags);
+ xa_erase(&obj->mdev->priv.mkey_table,
+ mlx5_base_mkey(obj->devx_mr.mmkey.key));
}
static int devx_obj_cleanup(struct ib_uobject *uobject,
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index 1fc302d41a53..b8841355fcd5 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -65,11 +65,12 @@ static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct uverbs_attr_bundle *attrs)
{
- struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
+ struct mlx5_flow_context flow_context = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
struct mlx5_ib_flow_handler *flow_handler;
struct mlx5_ib_flow_matcher *fs_matcher;
struct ib_uobject **arr_flow_actions;
struct ib_uflow_resources *uflow_res;
+ struct mlx5_flow_act flow_act = {};
void *devx_obj;
int dest_id, dest_type;
void *cmd_in;
@@ -172,17 +173,19 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
arr_flow_actions[i]->object);
}
- ret = uverbs_copy_from(&flow_act.flow_tag, attrs,
+ ret = uverbs_copy_from(&flow_context.flow_tag, attrs,
MLX5_IB_ATTR_CREATE_FLOW_TAG);
if (!ret) {
- if (flow_act.flow_tag >= BIT(24)) {
+ if (flow_context.flow_tag >= BIT(24)) {
ret = -EINVAL;
goto err_out;
}
- flow_act.flags |= FLOW_ACT_HAS_TAG;
+ flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
}
- flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, &flow_act,
+ flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher,
+ &flow_context,
+ &flow_act,
counter_id,
cmd_in, inlen,
dest_id, dest_type);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 269b24a3baa1..74ce9249e75a 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -14,9 +14,10 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
int vport_index;
ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch);
- vport_index = ibdev->free_port++;
+ vport_index = rep->vport_index;
ibdev->port[vport_index].rep = rep;
+ rep->rep_data[REP_IB].priv = ibdev;
write_lock(&ibdev->port[vport_index].roce.netdev_lock);
ibdev->port[vport_index].roce.netdev =
mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
@@ -28,7 +29,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
static int
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- int num_ports = MLX5_TOTAL_VPORTS(dev);
+ int num_ports = mlx5_eswitch_get_total_vports(dev);
const struct mlx5_ib_profile *profile;
struct mlx5_ib_dev *ibdev;
int vport_index;
@@ -50,7 +51,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
}
ibdev->is_rep = true;
- vport_index = ibdev->free_port++;
+ vport_index = rep->vport_index;
ibdev->port[vport_index].rep = rep;
ibdev->port[vport_index].roce.netdev =
mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
@@ -60,7 +61,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
if (!__mlx5_ib_add(ibdev, profile))
return -EINVAL;
- rep->rep_if[REP_IB].priv = ibdev;
+ rep->rep_data[REP_IB].priv = ibdev;
return 0;
}
@@ -68,15 +69,18 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
static void
mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
- struct mlx5_ib_dev *dev;
+ struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
+ struct mlx5_ib_port *port;
- if (!rep->rep_if[REP_IB].priv ||
- rep->vport != MLX5_VPORT_UPLINK)
- return;
+ port = &dev->port[rep->vport_index];
+ write_lock(&port->roce.netdev_lock);
+ port->roce.netdev = NULL;
+ write_unlock(&port->roce.netdev_lock);
+ rep->rep_data[REP_IB].priv = NULL;
+ port->rep = NULL;
- dev = mlx5_ib_rep_to_dev(rep);
- __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
- rep->rep_if[REP_IB].priv = NULL;
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
}
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
@@ -84,16 +88,17 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
return mlx5_ib_rep_to_dev(rep);
}
+static const struct mlx5_eswitch_rep_ops rep_ops = {
+ .load = mlx5_ib_vport_rep_load,
+ .unload = mlx5_ib_vport_rep_unload,
+ .get_proto_dev = mlx5_ib_vport_get_proto_dev,
+};
+
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5_eswitch_rep_if rep_if = {};
-
- rep_if.load = mlx5_ib_vport_rep_load;
- rep_if.unload = mlx5_ib_vport_rep_unload;
- rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
- mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB);
+ mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
}
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index 8336e0517a5c..de43b423bafc 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -28,7 +28,7 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
#else /* CONFIG_MLX5_ESWITCH */
static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
{
- return SRIOV_NONE;
+ return MLX5_ESWITCH_NONE;
}
static inline
@@ -72,6 +72,6 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
static inline
struct mlx5_ib_dev *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
{
- return (struct mlx5_ib_dev *)rep->rep_if[REP_IB].priv;
+ return rep->rep_data[REP_IB].priv;
}
#endif /* __MLX5_IB_REP_H__ */
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 340290b883fe..ba312bf59c7a 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2666,11 +2666,15 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
}
}
-static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
- u32 *match_v, const union ib_flow_spec *ib_spec,
+static int parse_flow_attr(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_spec *spec,
+ const union ib_flow_spec *ib_spec,
const struct ib_flow_attr *flow_attr,
struct mlx5_flow_act *action, u32 prev_type)
{
+ struct mlx5_flow_context *flow_context = &spec->flow_context;
+ u32 *match_c = spec->match_criteria;
+ u32 *match_v = spec->match_value;
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
misc_parameters);
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
@@ -2989,8 +2993,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
if (ib_spec->flow_tag.tag_id >= BIT(24))
return -EINVAL;
- action->flow_tag = ib_spec->flow_tag.tag_id;
- action->flags |= FLOW_ACT_HAS_TAG;
+ flow_context->flow_tag = ib_spec->flow_tag.tag_id;
+ flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
break;
case IB_FLOW_SPEC_ACTION_DROP:
if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
@@ -3084,7 +3088,8 @@ is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
return VALID_SPEC_NA;
return is_crypto && is_ipsec &&
- (!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ?
+ (!egress || (!is_drop &&
+ !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
VALID_SPEC_VALID : VALID_SPEC_INVALID;
}
@@ -3464,6 +3469,37 @@ free:
return ret;
}
+static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ void *misc;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw,
+ rep->vport));
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+
+ MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
+ } else {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ }
+}
+
static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
const struct ib_flow_attr *flow_attr,
@@ -3473,7 +3509,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
- struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
+ struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec;
struct mlx5_flow_destination dest_arr[2] = {};
struct mlx5_flow_destination *rule_dst = dest_arr;
@@ -3504,8 +3540,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
}
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
- err = parse_flow_attr(dev->mdev, spec->match_criteria,
- spec->match_value,
+ err = parse_flow_attr(dev->mdev, spec,
ib_flow, flow_attr, &flow_act,
prev_type);
if (err < 0)
@@ -3519,19 +3554,15 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
set_underlay_qp(dev, spec, underlay_qpn);
if (dev->is_rep) {
- void *misc;
+ struct mlx5_eswitch_rep *rep;
- if (!dev->port[flow_attr->port - 1].rep) {
+ rep = dev->port[flow_attr->port - 1].rep;
+ if (!rep) {
err = -EINVAL;
goto free;
}
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port,
- dev->port[flow_attr->port - 1].rep->vport);
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ mlx5_ib_set_rule_source_port(dev, spec, rep);
}
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
@@ -3572,11 +3603,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
}
- if ((flow_act.flags & FLOW_ACT_HAS_TAG) &&
+ if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) &&
(flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
- flow_act.flow_tag, flow_attr->type);
+ spec->flow_context.flow_tag, flow_attr->type);
err = -EINVAL;
goto free;
}
@@ -3947,6 +3978,7 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
struct mlx5_flow_destination *dst,
struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_context *flow_context,
struct mlx5_flow_act *flow_act,
void *cmd_in, int inlen,
int dst_num)
@@ -3969,6 +4001,7 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
fs_matcher->mask_len);
spec->match_criteria_enable = fs_matcher->match_criteria_enable;
+ spec->flow_context = *flow_context;
handler->rule = mlx5_add_flow_rules(ft, spec,
flow_act, dst, dst_num);
@@ -4033,6 +4066,7 @@ static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
struct mlx5_ib_flow_handler *
mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_context *flow_context,
struct mlx5_flow_act *flow_act,
u32 counter_id,
void *cmd_in, int inlen, int dest_id,
@@ -4085,7 +4119,8 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
dst_num++;
}
- handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
+ handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
+ flow_context, flow_act,
cmd_in, inlen, dst_num);
if (IS_ERR(handler)) {
@@ -4457,7 +4492,7 @@ static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
* lock/unlock above locks Now need to arm all involved CQs.
*/
list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
- mcq->comp(mcq);
+ mcq->comp(mcq, NULL);
}
spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
}
@@ -6779,7 +6814,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
printk_once(KERN_INFO "%s", mlx5_version);
if (MLX5_ESWITCH_MANAGER(mdev) &&
- mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
+ mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
if (!mlx5_core_mp_enabled(mdev))
mlx5_ib_register_vport_reps(mdev);
return mdev;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 40eb8be482e4..ee73dc122d28 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -920,6 +920,7 @@ struct mlx5_ib_lb_state {
};
struct mlx5_ib_pf_eq {
+ struct notifier_block irq_nb;
struct mlx5_ib_dev *dev;
struct mlx5_eq *core;
struct work_struct work;
@@ -977,7 +978,6 @@ struct mlx5_ib_dev {
u16 devx_whitelist_uid;
struct mlx5_srq_table srq_table;
struct mlx5_async_ctx async_ctx;
- int free_port;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1316,6 +1316,7 @@ extern const struct uapi_definition mlx5_ib_devx_defs[];
extern const struct uapi_definition mlx5_ib_flow_defs[];
struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_context *flow_context,
struct mlx5_flow_act *flow_act, u32 counter_id,
void *cmd_in, int inlen, int dest_id, int dest_type);
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 5f09699fab98..83b452d977d4 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -130,7 +130,7 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
struct mlx5_cache_ent *ent = &cache->ent[c];
u8 key;
unsigned long flags;
- struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
+ struct xarray *mkeys = &dev->mdev->priv.mkey_table;
int err;
spin_lock_irqsave(&ent->lock, flags);
@@ -158,12 +158,12 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
ent->size++;
spin_unlock_irqrestore(&ent->lock, flags);
- write_lock_irqsave(&table->lock, flags);
- err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey);
+ xa_lock_irqsave(mkeys, flags);
+ err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey, GFP_ATOMIC));
+ xa_unlock_irqrestore(mkeys, flags);
if (err)
pr_err("Error inserting to mkey tree. 0x%x\n", -err);
- write_unlock_irqrestore(&table->lock, flags);
if (!completion_done(&ent->compl))
complete(&ent->compl);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 91507a2e9290..831c450b271a 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -768,7 +768,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
bcnt -= *bytes_committed;
next_mr:
- mmkey = __mlx5_mr_lookup(dev->mdev, mlx5_base_mkey(key));
+ mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key));
if (!mkey_is_eq(mmkey, key)) {
mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
ret = -EFAULT;
@@ -1488,9 +1488,11 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
mlx5_eq_update_ci(eq->core, cc, 1);
}
-static irqreturn_t mlx5_ib_eq_pf_int(int irq, void *eq_ptr)
+static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
+ void *data)
{
- struct mlx5_ib_pf_eq *eq = eq_ptr;
+ struct mlx5_ib_pf_eq *eq =
+ container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
unsigned long flags;
if (spin_trylock_irqsave(&eq->lock, flags)) {
@@ -1553,20 +1555,26 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
goto err_mempool;
}
+ eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) {
- .index = MLX5_EQ_PFAULT_IDX,
- .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
+ .irq_index = 0,
.nent = MLX5_IB_NUM_PF_EQE,
- .context = eq,
- .handler = mlx5_ib_eq_pf_int
};
- eq->core = mlx5_eq_create_generic(dev->mdev, "mlx5_ib_page_fault_eq", &param);
+ param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
+ eq->core = mlx5_eq_create_generic(dev->mdev, &param);
if (IS_ERR(eq->core)) {
err = PTR_ERR(eq->core);
goto err_wq;
}
+ err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
+ if (err) {
+ mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
+ goto err_eq;
+ }
return 0;
+err_eq:
+ mlx5_eq_destroy_generic(dev->mdev, eq->core);
err_wq:
destroy_workqueue(eq->wq);
err_mempool:
@@ -1579,6 +1587,7 @@ mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{
int err;
+ mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
cancel_work_sync(&eq->work);
destroy_workqueue(eq->wq);
@@ -1677,8 +1686,8 @@ static void num_pending_prefetch_dec(struct mlx5_ib_dev *dev,
struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
- mmkey = __mlx5_mr_lookup(dev->mdev,
- mlx5_base_mkey(sg_list[i].lkey));
+ mmkey = xa_load(&dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(sg_list[i].lkey));
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
atomic_dec(&mr->num_pending_prefetch);
}
@@ -1697,8 +1706,8 @@ static bool num_pending_prefetch_inc(struct ib_pd *pd,
struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
- mmkey = __mlx5_mr_lookup(dev->mdev,
- mlx5_base_mkey(sg_list[i].lkey));
+ mmkey = xa_load(&dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(sg_list[i].lkey));
if (!mmkey || mmkey->key != sg_list[i].lkey) {
ret = false;
break;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index f6623c77443a..768c7e81f688 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -6297,7 +6297,7 @@ static void handle_drain_completion(struct ib_cq *cq,
/* Run the CQ handler - this makes sure that the drain WR will
* be processed if wasn't processed yet.
*/
- mcq->mcq.comp(&mcq->mcq);
+ mcq->mcq.comp(&mcq->mcq, NULL);
}
wait_for_completion(&sdrain->done);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index e00add6d78ec..29b324726ea6 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -183,7 +183,13 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
rcu_read_lock();
in = __in_dev_get_rcu(upper_dev);
- nesvnic->local_ipaddr = in->ifa_list->ifa_address;
+ if (in) {
+ struct in_ifaddr *ifa;
+
+ ifa = rcu_dereference(in->ifa_list);
+ if (ifa)
+ nesvnic->local_ipaddr = ifa->ifa_address;
+ }
rcu_read_unlock();
} else {
nesvnic->local_ipaddr = ifa->ifa_address;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 083c2c00a8e9..5ebf3c53b3fb 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -312,7 +312,8 @@ static void qedr_free_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, int sb_id)
{
if (sb_info->sb_virt) {
- dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
+ dev->ops->common->sb_release(dev->cdev, sb_info, sb_id,
+ QED_SB_TYPE_CNQ);
dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
(void *)sb_info->sb_virt, sb_info->sb_phys);
}
@@ -504,11 +505,13 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle)
static void qedr_sync_free_irqs(struct qedr_dev *dev)
{
u32 vector;
+ u16 idx;
int i;
for (i = 0; i < dev->int_info.used_cnt; i++) {
if (dev->int_info.msix_cnt) {
- vector = dev->int_info.msix[i * dev->num_hwfns].vector;
+ idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
+ vector = dev->int_info.msix[idx].vector;
synchronize_irq(vector);
free_irq(vector, &dev->cnq_array[i]);
}
@@ -520,6 +523,7 @@ static void qedr_sync_free_irqs(struct qedr_dev *dev)
static int qedr_req_msix_irqs(struct qedr_dev *dev)
{
int i, rc = 0;
+ u16 idx;
if (dev->num_cnq > dev->int_info.msix_cnt) {
DP_ERR(dev,
@@ -529,7 +533,8 @@ static int qedr_req_msix_irqs(struct qedr_dev *dev)
}
for (i = 0; i < dev->num_cnq; i++) {
- rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
+ idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
+ rc = request_irq(dev->int_info.msix[idx].vector,
qedr_irq_handler, 0, dev->cnq_array[i].name,
&dev->cnq_array[i]);
if (rc) {
@@ -866,6 +871,16 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
dev->user_dpm_enabled = dev_info.user_dpm_enabled;
dev->rdma_type = dev_info.rdma_type;
dev->num_hwfns = dev_info.common.num_hwfns;
+
+ if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) {
+ rc = dev->ops->iwarp_set_engine_affin(cdev, false);
+ if (rc) {
+ DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
+ goto init_err;
+ }
+ }
+ dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev);
+
dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
@@ -926,6 +941,10 @@ static void qedr_remove(struct qedr_dev *dev)
qedr_stop_hw(dev);
qedr_sync_free_irqs(dev);
qedr_free_resources(dev);
+
+ if (IS_IWARP(dev) && QEDR_IS_CMT(dev))
+ dev->ops->iwarp_set_engine_affin(dev->cdev, true);
+
ib_dealloc_device(&dev->ibdev);
}
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 6175d1e98717..a92ca22e5de1 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -157,6 +157,8 @@ struct qedr_dev {
u32 dp_module;
u8 dp_level;
u8 num_hwfns;
+#define QEDR_IS_CMT(dev) ((dev)->num_hwfns > 1)
+ u8 affin_hwfn_idx;
u8 gsi_ll2_handle;
uint wq_multiplier;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 78fa634de98a..27b6e664e59d 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1142,7 +1142,7 @@ static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
{
struct qib_filedata *fd = fp->private_data;
- const unsigned int weight = cpumask_weight(&current->cpus_allowed);
+ const unsigned int weight = current->nr_cpus_allowed;
const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
int local_cpu;
@@ -1623,9 +1623,8 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
else {
int unit;
- const unsigned int cpu = cpumask_first(&current->cpus_allowed);
- const unsigned int weight =
- cpumask_weight(&current->cpus_allowed);
+ const unsigned int cpu = cpumask_first(current->cpus_ptr);
+ const unsigned int weight = current->nr_cpus_allowed;
if (weight == 1 && !test_bit(cpu, qib_cpulist))
if (!find_hca(cpu, &unit) && unit >= 0)
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index d88d9f8a7f9a..34c1f9d6c915 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -427,11 +427,16 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
if (netif_carrier_ok(us_ibdev->netdev))
usnic_fwd_carrier_up(us_ibdev->ufdev);
- ind = in_dev_get(netdev);
- if (ind->ifa_list)
- usnic_fwd_add_ipaddr(us_ibdev->ufdev,
- ind->ifa_list->ifa_address);
- in_dev_put(ind);
+ rcu_read_lock();
+ ind = __in_dev_get_rcu(netdev);
+ if (ind) {
+ const struct in_ifaddr *ifa;
+
+ ifa = rcu_dereference(ind->ifa_list);
+ if (ifa)
+ usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
+ }
+ rcu_read_unlock();
usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
us_ibdev->ufdev->inaddr, &gid.raw[0]);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 9b5e11d3fb85..04ea7db08e87 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1998,6 +1998,7 @@ static int ipoib_get_vf_config(struct net_device *dev, int vf,
return err;
ivf->vf = vf;
+ memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
return 0;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 4305da2c9037..d5cbad2c61e4 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2340,7 +2340,6 @@ static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(shost);
- struct srp_rport *rport = target->rport;
struct srp_rdma_ch *ch;
struct srp_request *req;
struct srp_iu *iu;
@@ -2350,16 +2349,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
u32 tag;
u16 idx;
int len, ret;
- const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
-
- /*
- * The SCSI EH thread is the only context from which srp_queuecommand()
- * can get invoked for blocked devices (SDEV_BLOCK /
- * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
- * locking the rport mutex if invoked from inside the SCSI EH.
- */
- if (in_scsi_eh)
- mutex_lock(&rport->mutex);
scmnd->result = srp_chkready(target->rport);
if (unlikely(scmnd->result))
@@ -2428,13 +2417,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
goto err_unmap;
}
- ret = 0;
-
-unlock_rport:
- if (in_scsi_eh)
- mutex_unlock(&rport->mutex);
-
- return ret;
+ return 0;
err_unmap:
srp_unmap_data(scmnd, ch, req);
@@ -2456,7 +2439,7 @@ err:
ret = SCSI_MLQUEUE_HOST_BUSY;
}
- goto unlock_rport;
+ return ret;
}
/*
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index e4352741c467..b695094290ab 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1406,7 +1406,7 @@ static void __init i8042_register_ports(void)
* behavior on many platforms using suspend-to-RAM (ACPI S3)
* by default.
*/
- if (pm_suspend_via_s2idle() && i == I8042_KBD_PORT_NO)
+ if (pm_suspend_default_s2idle() && i == I8042_KBD_PORT_NO)
device_set_wakeup_enable(&serio->dev, true);
}
}
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 8c8ac4dff0d5..00cb1ba2d364 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -929,10 +929,6 @@ static int sur40_vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->driver, DRIVER_SHORT, sizeof(cap->driver));
strlcpy(cap->card, DRIVER_LONG, sizeof(cap->card));
usb_make_path(sur40->usbdev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TOUCH |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1162,6 +1158,8 @@ static const struct video_device sur40_video_device = {
.fops = &sur40_video_fops,
.ioctl_ops = &sur40_video_ioctl_ops,
.release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TOUCH |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING,
};
/* USB-specific object needed to register this driver with the USB subsystem. */
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index dce1d8d2e8a4..73740b969e62 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -619,9 +619,9 @@ retry:
pasid = ((event[0] >> 16) & 0xFFFF)
| ((event[1] << 6) & 0xF0000);
tag = event[1] & 0x03FF;
- dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- pasid, address, flags);
+ pasid, address, flags, tag);
break;
default:
dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
@@ -1295,6 +1295,16 @@ static void domain_flush_complete(struct protection_domain *domain)
}
}
+/* Flush the not present cache if it exists */
+static void domain_flush_np_cache(struct protection_domain *domain,
+ dma_addr_t iova, size_t size)
+{
+ if (unlikely(amd_iommu_np_cache)) {
+ domain_flush_pages(domain, iova, size);
+ domain_flush_complete(domain);
+ }
+}
+
/*
* This function flushes the DTEs for all devices in domain
@@ -2377,10 +2387,7 @@ static dma_addr_t __map_single(struct device *dev,
}
address += offset;
- if (unlikely(amd_iommu_np_cache)) {
- domain_flush_pages(&dma_dom->domain, address, size);
- domain_flush_complete(&dma_dom->domain);
- }
+ domain_flush_np_cache(&dma_dom->domain, address, size);
out:
return address;
@@ -2559,6 +2566,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
s->dma_length = s->length;
}
+ if (s)
+ domain_flush_np_cache(domain, s->dma_address, s->dma_length);
+
return nelems;
out_unmap:
@@ -2597,7 +2607,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
struct protection_domain *domain;
struct dma_ops_domain *dma_dom;
unsigned long startaddr;
- int npages = 2;
+ int npages;
domain = get_domain(dev);
if (IS_ERR(domain))
@@ -3039,6 +3049,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
mutex_unlock(&domain->api_lock);
+ domain_flush_np_cache(domain, iova, page_size);
+
return ret;
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 07d84dbab564..eb104c719629 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -406,6 +406,9 @@ static void iommu_enable(struct amd_iommu *iommu)
static void iommu_disable(struct amd_iommu *iommu)
{
+ if (!iommu->mmio_base)
+ return;
+
/* Disable command buffer */
iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
@@ -2325,15 +2328,6 @@ static void __init free_iommu_resources(void)
amd_iommu_dev_table = NULL;
free_iommu_all();
-
-#ifdef CONFIG_GART_IOMMU
- /*
- * We failed to initialize the AMD IOMMU - try fallback to GART
- * if possible.
- */
- gart_iommu_init();
-
-#endif
}
/* SB IOAPIC is always on this device in AMD systems */
@@ -2625,8 +2619,6 @@ static int __init state_next(void)
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
pr_info("AMD IOMMU disabled on kernel command-line\n");
- free_dma_resources();
- free_iommu_resources();
init_state = IOMMU_CMDLINE_DISABLED;
ret = -EINVAL;
}
@@ -2667,6 +2659,19 @@ static int __init state_next(void)
BUG();
}
+ if (ret) {
+ free_dma_resources();
+ if (!irq_remapping_enabled) {
+ disable_iommus();
+ free_iommu_resources();
+ } else {
+ struct amd_iommu *iommu;
+
+ uninit_device_table_dma();
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+ }
+ }
return ret;
}
@@ -2740,17 +2745,15 @@ static int __init amd_iommu_init(void)
int ret;
ret = iommu_go_to_state(IOMMU_INITIALIZED);
- if (ret) {
- free_dma_resources();
- if (!irq_remapping_enabled) {
- disable_iommus();
- free_iommu_resources();
- } else {
- uninit_device_table_dma();
- for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
- }
+#ifdef CONFIG_GART_IOMMU
+ if (ret && list_empty(&amd_iommu_list)) {
+ /*
+ * We failed to initialize the AMD IOMMU - try fallback
+ * to GART if possible.
+ */
+ gart_iommu_init();
}
+#endif
for_each_iommu(iommu)
amd_iommu_debugfs_setup(iommu);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 4d5a694f02c2..2d96cf0023dd 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -192,6 +192,13 @@
#define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
#define Q_BASE_LOG2SIZE GENMASK(4, 0)
+/* Ensure DMA allocations are naturally aligned */
+#ifdef CONFIG_CMA_ALIGNMENT
+#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
+#else
+#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
+#endif
+
/*
* Stream table.
*
@@ -289,8 +296,9 @@
FIELD_GET(ARM64_TCR_##fld, tcr))
/* Command queue */
-#define CMDQ_ENT_DWORDS 2
-#define CMDQ_MAX_SZ_SHIFT 8
+#define CMDQ_ENT_SZ_SHIFT 4
+#define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
+#define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)
#define CMDQ_CONS_ERR GENMASK(30, 24)
#define CMDQ_ERR_CERROR_NONE_IDX 0
@@ -336,14 +344,16 @@
#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2)
/* Event queue */
-#define EVTQ_ENT_DWORDS 4
-#define EVTQ_MAX_SZ_SHIFT 7
+#define EVTQ_ENT_SZ_SHIFT 5
+#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
+#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
#define EVTQ_0_ID GENMASK_ULL(7, 0)
/* PRI queue */
-#define PRIQ_ENT_DWORDS 2
-#define PRIQ_MAX_SZ_SHIFT 8
+#define PRIQ_ENT_SZ_SHIFT 4
+#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
+#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
#define PRIQ_0_SID GENMASK_ULL(31, 0)
#define PRIQ_0_SSID GENMASK_ULL(51, 32)
@@ -798,7 +808,7 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
/* High-level queue accessors */
static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
{
- memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
+ memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT);
cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);
switch (ent->opcode) {
@@ -1785,13 +1795,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
+ .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
.tlb = &arm_smmu_gather_ops,
.iommu_dev = smmu->dev,
};
- if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
-
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
@@ -1884,9 +1892,13 @@ static int arm_smmu_enable_ats(struct arm_smmu_master *master)
static void arm_smmu_disable_ats(struct arm_smmu_master *master)
{
+ struct arm_smmu_cmdq_ent cmd;
+
if (!master->ats_enabled || !dev_is_pci(master->dev))
return;
+ arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
+ arm_smmu_atc_inv_master(master, &cmd);
pci_disable_ats(to_pci_dev(master->dev));
master->ats_enabled = false;
}
@@ -1906,7 +1918,6 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
master->domain = NULL;
arm_smmu_install_ste_for_dev(master);
- /* Disabling ATS invalidates all ATC entries */
arm_smmu_disable_ats(master);
}
@@ -2270,17 +2281,32 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
struct arm_smmu_queue *q,
unsigned long prod_off,
unsigned long cons_off,
- size_t dwords)
+ size_t dwords, const char *name)
{
- size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
+ size_t qsz;
+
+ do {
+ qsz = ((1 << q->max_n_shift) * dwords) << 3;
+ q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
+ GFP_KERNEL);
+ if (q->base || qsz < PAGE_SIZE)
+ break;
+
+ q->max_n_shift--;
+ } while (1);
- q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
if (!q->base) {
- dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
- qsz);
+ dev_err(smmu->dev,
+ "failed to allocate queue (0x%zx bytes) for %s\n",
+ qsz, name);
return -ENOMEM;
}
+ if (!WARN_ON(q->base_dma & (qsz - 1))) {
+ dev_info(smmu->dev, "allocated %u entries for %s\n",
+ 1 << q->max_n_shift, name);
+ }
+
q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
q->ent_dwords = dwords;
@@ -2300,13 +2326,15 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
/* cmdq */
spin_lock_init(&smmu->cmdq.lock);
ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
- ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
+ ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS,
+ "cmdq");
if (ret)
return ret;
/* evtq */
ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
- ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
+ ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS,
+ "evtq");
if (ret)
return ret;
@@ -2315,7 +2343,8 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
return 0;
return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
- ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
+ ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS,
+ "priq");
}
static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
@@ -2879,7 +2908,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
return -ENXIO;
}
- /* Queue sizes, capped at 4k */
+ /* Queue sizes, capped to ensure natural alignment */
smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
FIELD_GET(IDR1_CMDQS, reg));
if (!smmu->cmdq.q.max_n_shift) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 586dd5a46d9f..653b6b3dcafb 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -892,13 +892,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
+ .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
.tlb = smmu_domain->tlb_ops,
.iommu_dev = smmu->dev,
};
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
-
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 379318266468..f802255219d3 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -10,7 +10,9 @@
#include <linux/acpi_iort.h>
#include <linux/device.h>
+#include <linux/dma-contiguous.h>
#include <linux/dma-iommu.h>
+#include <linux/dma-noncoherent.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
@@ -67,11 +69,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
return cookie;
}
-int iommu_dma_init(void)
-{
- return iova_cache_get();
-}
-
/**
* iommu_get_dma_cookie - Acquire DMA-API resources for a domain
* @domain: IOMMU domain to prepare for DMA-API usage
@@ -229,8 +226,8 @@ resv_iova:
start = window->res->end - window->offset + 1;
/* If window is last entry */
if (window->node.next == &bridge->dma_ranges &&
- end != ~(dma_addr_t)0) {
- end = ~(dma_addr_t)0;
+ end != ~(phys_addr_t)0) {
+ end = ~(phys_addr_t)0;
goto resv_iova;
}
}
@@ -302,7 +299,7 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
* any change which could make prior IOVAs invalid will fail.
*/
-int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
+static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -353,7 +350,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
return iova_reserve_iommu_regions(dev, domain);
}
-EXPORT_SYMBOL(iommu_dma_init_domain);
/**
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
@@ -364,7 +360,7 @@ EXPORT_SYMBOL(iommu_dma_init_domain);
*
* Return: corresponding IOMMU API page protection flags
*/
-int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
+static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs)
{
int prot = coherent ? IOMMU_CACHE : 0;
@@ -441,9 +437,10 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
size >> iova_shift(iovad));
}
-static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
+static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
size_t size)
{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, dma_addr);
@@ -457,6 +454,30 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
iommu_dma_free_iova(cookie, dma_addr, size);
}
+static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
+ size_t size, int prot)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ size_t iova_off = 0;
+ dma_addr_t iova;
+
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
+ iova_off = iova_offset(&cookie->iovad, phys);
+ size = iova_align(&cookie->iovad, size + iova_off);
+ }
+
+ iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ if (!iova)
+ return DMA_MAPPING_ERROR;
+
+ if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
+ iommu_dma_free_iova(cookie, iova, size);
+ return DMA_MAPPING_ERROR;
+ }
+ return iova + iova_off;
+}
+
static void __iommu_dma_free_pages(struct page **pages, int count)
{
while (count--)
@@ -522,55 +543,45 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
return pages;
}
-/**
- * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
- * @dev: Device which owns this buffer
- * @pages: Array of buffer pages as returned by iommu_dma_alloc()
- * @size: Size of buffer in bytes
- * @handle: DMA address of buffer
- *
- * Frees both the pages associated with the buffer, and the array
- * describing them
- */
-void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
- dma_addr_t *handle)
+static struct page **__iommu_dma_get_pages(void *cpu_addr)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
- __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
- *handle = DMA_MAPPING_ERROR;
+ struct vm_struct *area = find_vm_area(cpu_addr);
+
+ if (!area || !area->pages)
+ return NULL;
+ return area->pages;
}
/**
- * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
+ * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
* @dev: Device to allocate memory for. Must be a real device
* attached to an iommu_dma_domain
* @size: Size of buffer in bytes
+ * @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
* @attrs: DMA attributes for this allocation
- * @prot: IOMMU mapping flags
- * @handle: Out argument for allocated DMA handle
- * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
- * given VA/PA are visible to the given non-coherent device.
*
* If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
* but an IOMMU which supports smaller pages might not map the whole thing.
*
- * Return: Array of struct page pointers describing the buffer,
- * or NULL on failure.
+ * Return: Mapped virtual address, or NULL on failure.
*/
-struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
- unsigned long attrs, int prot, dma_addr_t *handle,
- void (*flush_page)(struct device *, const void *, phys_addr_t))
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
+ bool coherent = dev_is_dma_coherent(dev);
+ int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+ pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
dma_addr_t iova;
- unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+ void *vaddr;
- *handle = DMA_MAPPING_ERROR;
+ *dma_handle = DMA_MAPPING_ERROR;
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
@@ -596,26 +607,29 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova;
- if (!(prot & IOMMU_CACHE)) {
- struct sg_mapping_iter miter;
- /*
- * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
- * sufficient here, so skip it by using the "wrong" direction.
- */
- sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
- while (sg_miter_next(&miter))
- flush_page(dev, miter.addr, page_to_phys(miter.page));
- sg_miter_stop(&miter);
+ if (!(ioprot & IOMMU_CACHE)) {
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
+ arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
+ if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
< size)
goto out_free_sg;
- *handle = iova;
+ vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+ __builtin_return_address(0));
+ if (!vaddr)
+ goto out_unmap;
+
+ *dma_handle = iova;
sg_free_table(&sgt);
- return pages;
+ return vaddr;
+out_unmap:
+ __iommu_dma_unmap(dev, iova, size);
out_free_sg:
sg_free_table(&sgt);
out_free_iova:
@@ -626,54 +640,94 @@ out_free_pages:
}
/**
- * iommu_dma_mmap - Map a buffer into provided user VMA
- * @pages: Array representing buffer from iommu_dma_alloc()
+ * __iommu_dma_mmap - Map a buffer into provided user VMA
+ * @pages: Array representing buffer from __iommu_dma_alloc()
* @size: Size of buffer in bytes
* @vma: VMA describing requested userspace mapping
*
* Maps the pages of the buffer in @pages into @vma. The caller is responsible
* for verifying the correct size and protection of @vma beforehand.
*/
-
-int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
+static int __iommu_dma_mmap(struct page **pages, size_t size,
+ struct vm_area_struct *vma)
{
return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
}
-static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot, struct iommu_domain *domain)
+static void iommu_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- size_t iova_off = 0;
- dma_addr_t iova;
+ phys_addr_t phys;
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
- iova_off = iova_offset(&cookie->iovad, phys);
- size = iova_align(&cookie->iovad, size + iova_off);
- }
+ if (dev_is_dma_coherent(dev))
+ return;
- iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
- if (!iova)
- return DMA_MAPPING_ERROR;
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
+ arch_sync_dma_for_cpu(dev, phys, size, dir);
+}
- if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
- iommu_dma_free_iova(cookie, iova, size);
- return DMA_MAPPING_ERROR;
- }
- return iova + iova_off;
+static void iommu_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+{
+ phys_addr_t phys;
+
+ if (dev_is_dma_coherent(dev))
+ return;
+
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
+ arch_sync_dma_for_device(dev, phys, size, dir);
+}
+
+static void iommu_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (dev_is_dma_coherent(dev))
+ return;
+
+ for_each_sg(sgl, sg, nelems, i)
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+}
+
+static void iommu_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (dev_is_dma_coherent(dev))
+ return;
+
+ for_each_sg(sgl, sg, nelems, i)
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
}
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, int prot)
+static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
{
- return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
- iommu_get_dma_domain(dev));
+ phys_addr_t phys = page_to_phys(page) + offset;
+ bool coherent = dev_is_dma_coherent(dev);
+ int prot = dma_info_to_prot(dir, coherent, attrs);
+ dma_addr_t dma_handle;
+
+ dma_handle =__iommu_dma_map(dev, phys, size, prot);
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ dma_handle != DMA_MAPPING_ERROR)
+ arch_sync_dma_for_device(dev, phys, size, dir);
+ return dma_handle;
}
-void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
+static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
+ __iommu_dma_unmap(dev, dma_handle, size);
}
/*
@@ -758,18 +812,22 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
* impedance-matching, to be able to hand off a suitably-aligned list,
* but still preserve the original offsets and sizes for the caller.
*/
-int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int prot)
+static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
+ int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
int i;
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
+
/*
* Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever
@@ -829,12 +887,16 @@ out_restore_sg:
return 0;
}
-void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs)
+static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t start, end;
struct scatterlist *tmp;
int i;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.
@@ -846,21 +908,231 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
sg = tmp;
}
end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
+ __iommu_dma_unmap(dev, start, end - start);
}
-dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
- iommu_get_dma_domain(dev));
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
}
-void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
+ __iommu_dma_unmap(dev, handle, size);
+}
+
+static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
+{
+ size_t alloc_size = PAGE_ALIGN(size);
+ int count = alloc_size >> PAGE_SHIFT;
+ struct page *page = NULL, **pages = NULL;
+
+ /* Non-coherent atomic allocation? Easy */
+ if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+ dma_free_from_pool(cpu_addr, alloc_size))
+ return;
+
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+ /*
+ * If it the address is remapped, then it's either non-coherent
+ * or highmem CMA, or an iommu_dma_alloc_remap() construction.
+ */
+ pages = __iommu_dma_get_pages(cpu_addr);
+ if (!pages)
+ page = vmalloc_to_page(cpu_addr);
+ dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
+ } else {
+ /* Lowmem means a coherent atomic or CMA allocation */
+ page = virt_to_page(cpu_addr);
+ }
+
+ if (pages)
+ __iommu_dma_free_pages(pages, count);
+ if (page && !dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(alloc_size));
+}
+
+static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, unsigned long attrs)
+{
+ __iommu_dma_unmap(dev, handle, size);
+ __iommu_dma_free(dev, size, cpu_addr);
+}
+
+static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
+ struct page **pagep, gfp_t gfp, unsigned long attrs)
+{
+ bool coherent = dev_is_dma_coherent(dev);
+ size_t alloc_size = PAGE_ALIGN(size);
+ struct page *page = NULL;
+ void *cpu_addr;
+
+ if (gfpflags_allow_blocking(gfp))
+ page = dma_alloc_from_contiguous(dev, alloc_size >> PAGE_SHIFT,
+ get_order(alloc_size),
+ gfp & __GFP_NOWARN);
+ if (!page)
+ page = alloc_pages(gfp, get_order(alloc_size));
+ if (!page)
+ return NULL;
+
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
+ pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+
+ cpu_addr = dma_common_contiguous_remap(page, alloc_size,
+ VM_USERMAP, prot, __builtin_return_address(0));
+ if (!cpu_addr)
+ goto out_free_pages;
+
+ if (!coherent)
+ arch_dma_prep_coherent(page, size);
+ } else {
+ cpu_addr = page_address(page);
+ }
+
+ *pagep = page;
+ memset(cpu_addr, 0, alloc_size);
+ return cpu_addr;
+out_free_pages:
+ if (!dma_release_from_contiguous(dev, page, alloc_size >> PAGE_SHIFT))
+ __free_pages(page, get_order(alloc_size));
+ return NULL;
+}
+
+static void *iommu_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+{
+ bool coherent = dev_is_dma_coherent(dev);
+ int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+ struct page *page = NULL;
+ void *cpu_addr;
+
+ gfp |= __GFP_ZERO;
+
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
+ !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
+ return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
+
+ if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+ !gfpflags_allow_blocking(gfp) && !coherent)
+ cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+ else
+ cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
+ if (!cpu_addr)
+ return NULL;
+
+ *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
+ if (*handle == DMA_MAPPING_ERROR) {
+ __iommu_dma_free(dev, size, cpu_addr);
+ return NULL;
+ }
+
+ return cpu_addr;
+}
+
+static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn, off = vma->vm_pgoff;
+ int ret;
+
+ vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
+ return -ENXIO;
+
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+ struct page **pages = __iommu_dma_get_pages(cpu_addr);
+
+ if (pages)
+ return __iommu_dma_mmap(pages, size, vma);
+ pfn = vmalloc_to_pfn(cpu_addr);
+ } else {
+ pfn = page_to_pfn(virt_to_page(cpu_addr));
+ }
+
+ return remap_pfn_range(vma, vma->vm_start, pfn + off,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ struct page *page;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+ struct page **pages = __iommu_dma_get_pages(cpu_addr);
+
+ if (pages) {
+ return sg_alloc_table_from_pages(sgt, pages,
+ PAGE_ALIGN(size) >> PAGE_SHIFT,
+ 0, size, GFP_KERNEL);
+ }
+
+ page = vmalloc_to_page(cpu_addr);
+ } else {
+ page = virt_to_page(cpu_addr);
+ }
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (!ret)
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return ret;
+}
+
+static const struct dma_map_ops iommu_dma_ops = {
+ .alloc = iommu_dma_alloc,
+ .free = iommu_dma_free,
+ .mmap = iommu_dma_mmap,
+ .get_sgtable = iommu_dma_get_sgtable,
+ .map_page = iommu_dma_map_page,
+ .unmap_page = iommu_dma_unmap_page,
+ .map_sg = iommu_dma_map_sg,
+ .unmap_sg = iommu_dma_unmap_sg,
+ .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
+ .sync_single_for_device = iommu_dma_sync_single_for_device,
+ .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = iommu_dma_sync_sg_for_device,
+ .map_resource = iommu_dma_map_resource,
+ .unmap_resource = iommu_dma_unmap_resource,
+};
+
+/*
+ * The IOMMU core code allocates the default DMA domain, which the underlying
+ * IOMMU driver needs to support via the dma-iommu layer.
+ */
+void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+ if (!domain)
+ goto out_err;
+
+ /*
+ * The IOMMU core code allocates the default DMA domain, which the
+ * underlying IOMMU driver needs to support via the dma-iommu layer.
+ */
+ if (domain->type == IOMMU_DOMAIN_DMA) {
+ if (iommu_dma_init_domain(domain, dma_base, size, dev))
+ goto out_err;
+ dev->dma_ops = &iommu_dma_ops;
+ }
+
+ return;
+out_err:
+ pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ dev_name(dev));
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
@@ -881,7 +1153,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
+ iova = __iommu_dma_map(dev, msi_addr, size, prot);
if (iova == DMA_MAPPING_ERROR)
goto out_free_page;
@@ -943,3 +1215,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
msg->address_lo += lower_32_bits(msi_page->iova);
}
+
+static int iommu_dma_init(void)
+{
+ return iova_cache_get();
+}
+arch_initcall(iommu_dma_init);
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 7fabf9b1c2dc..73a552914455 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -14,6 +14,17 @@
#include <asm/irq_remapping.h>
+#include "intel-pasid.h"
+
+struct tbl_walk {
+ u16 bus;
+ u16 devfn;
+ u32 pasid;
+ struct root_entry *rt_entry;
+ struct context_entry *ctx_entry;
+ struct pasid_entry *pasid_tbl_entry;
+};
+
struct iommu_regset {
int offset;
const char *regs;
@@ -131,16 +142,86 @@ out:
}
DEFINE_SHOW_ATTRIBUTE(iommu_regset);
-static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
- int bus)
+static inline void print_tbl_walk(struct seq_file *m)
{
- struct context_entry *context;
- int devfn;
+ struct tbl_walk *tbl_wlk = m->private;
+
+ seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t",
+ tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn),
+ PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi,
+ tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi,
+ tbl_wlk->ctx_entry->lo);
+
+ /*
+ * A legacy mode DMAR doesn't support PASID, hence default it to -1
+ * indicating that it's invalid. Also, default all PASID related fields
+ * to 0.
+ */
+ if (!tbl_wlk->pasid_tbl_entry)
+ seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1,
+ (u64)0, (u64)0, (u64)0);
+ else
+ seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
+ tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0],
+ tbl_wlk->pasid_tbl_entry->val[1],
+ tbl_wlk->pasid_tbl_entry->val[2]);
+}
- seq_printf(m, " Context Table Entries for Bus: %d\n", bus);
- seq_puts(m, " Entry\tB:D.F\tHigh\tLow\n");
+static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
+ u16 dir_idx)
+{
+ struct tbl_walk *tbl_wlk = m->private;
+ u8 tbl_idx;
+
+ for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) {
+ if (pasid_pte_is_present(tbl_entry)) {
+ tbl_wlk->pasid_tbl_entry = tbl_entry;
+ tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx;
+ print_tbl_walk(m);
+ }
+
+ tbl_entry++;
+ }
+}
+
+static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr,
+ u16 pasid_dir_size)
+{
+ struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr);
+ struct pasid_entry *pasid_tbl;
+ u16 dir_idx;
+
+ for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) {
+ pasid_tbl = get_pasid_table_from_pde(dir_entry);
+ if (pasid_tbl)
+ pasid_tbl_walk(m, pasid_tbl, dir_idx);
+
+ dir_entry++;
+ }
+}
+
+static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
+{
+ struct context_entry *context;
+ u16 devfn, pasid_dir_size;
+ u64 pasid_dir_ptr;
for (devfn = 0; devfn < 256; devfn++) {
+ struct tbl_walk tbl_wlk = {0};
+
+ /*
+ * Scalable mode root entry points to upper scalable mode
+ * context table and lower scalable mode context table. Each
+ * scalable mode context table has 128 context entries where as
+ * legacy mode context table has 256 context entries. So in
+ * scalable mode, the context entries for former 128 devices are
+ * in the lower scalable mode context table, while the latter
+ * 128 devices are in the upper scalable mode context table.
+ * In scalable mode, when devfn > 127, iommu_context_addr()
+ * automatically refers to upper scalable mode context table and
+ * hence the caller doesn't have to worry about differences
+ * between scalable mode and non scalable mode.
+ */
context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context)
return;
@@ -148,33 +229,41 @@ static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
if (!context_present(context))
continue;
- seq_printf(m, " %-5d\t%02x:%02x.%x\t%-6llx\t%llx\n", devfn,
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- context[0].hi, context[0].lo);
+ tbl_wlk.bus = bus;
+ tbl_wlk.devfn = devfn;
+ tbl_wlk.rt_entry = &iommu->root_entry[bus];
+ tbl_wlk.ctx_entry = context;
+ m->private = &tbl_wlk;
+
+ if (pasid_supported(iommu) && is_pasid_enabled(context)) {
+ pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
+ pasid_dir_size = get_pasid_dir_size(context);
+ pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
+ continue;
+ }
+
+ print_tbl_walk(m);
}
}
-static void root_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu)
+static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
{
unsigned long flags;
- int bus;
+ u16 bus;
spin_lock_irqsave(&iommu->lock, flags);
- seq_printf(m, "IOMMU %s: Root Table Address:%llx\n", iommu->name,
+ seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
(u64)virt_to_phys(iommu->root_entry));
- seq_puts(m, "Root Table Entries:\n");
+ seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
- for (bus = 0; bus < 256; bus++) {
- if (!(iommu->root_entry[bus].lo & 1))
- continue;
+ /*
+ * No need to check if the root entry is present or not because
+ * iommu_context_addr() performs the same check before returning
+ * context entry.
+ */
+ for (bus = 0; bus < 256; bus++)
+ ctx_tbl_walk(m, iommu, bus);
- seq_printf(m, " Bus: %d H: %llx L: %llx\n", bus,
- iommu->root_entry[bus].hi,
- iommu->root_entry[bus].lo);
-
- ctx_tbl_entry_show(m, iommu, bus);
- seq_putc(m, '\n');
- }
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -185,7 +274,7 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
- root_tbl_entry_show(m, iommu);
+ root_tbl_walk(m, iommu);
seq_putc(m, '\n');
}
rcu_read_unlock();
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 162b3236e72c..ac4172c02244 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -294,14 +294,16 @@ static inline void context_clear_entry(struct context_entry *context)
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
+/* si_domain contains mulitple devices */
+#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
+
/*
- * Domain represents a virtual machine, more than one devices
- * across iommus may be owned in one domain, e.g. kvm guest.
+ * This is a DMA domain allocated through the iommu domain allocation
+ * interface. But one or more devices belonging to this domain have
+ * been chosen to use a private domain. We should avoid to use the
+ * map/unmap/iova_to_phys APIs on it.
*/
-#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
-
-/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
+#define DOMAIN_FLAG_LOSE_CHILDREN BIT(1)
#define for_each_domain_iommu(idx, domain) \
for (idx = 0; idx < g_num_of_iommus; idx++) \
@@ -314,7 +316,6 @@ struct dmar_rmrr_unit {
u64 end_address; /* reserved end address */
struct dmar_dev_scope *devices; /* target devices */
int devices_cnt; /* target device count */
- struct iommu_resv_region *resv; /* reserved region handle */
};
struct dmar_atsr_unit {
@@ -342,6 +343,9 @@ static void domain_context_clear(struct intel_iommu *iommu,
struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu);
+static bool device_is_rmrr_locked(struct device *dev);
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev);
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
@@ -349,6 +353,7 @@ int dmar_disabled = 0;
int dmar_disabled = 1;
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+int intel_iommu_sm;
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
@@ -356,21 +361,17 @@ static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
-static int intel_iommu_sm;
static int iommu_identity_mapping;
#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
-#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
-#define pasid_supported(iommu) (sm_supported(iommu) && \
- ecap_pasid((iommu)->ecap))
-
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
+#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
@@ -535,22 +536,11 @@ static inline void free_devinfo_mem(void *vaddr)
kmem_cache_free(iommu_devinfo_cache, vaddr);
}
-static inline int domain_type_is_vm(struct dmar_domain *domain)
-{
- return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
-}
-
static inline int domain_type_is_si(struct dmar_domain *domain)
{
return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
}
-static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
-{
- return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
- DOMAIN_FLAG_STATIC_IDENTITY);
-}
-
static inline int domain_pfn_supported(struct dmar_domain *domain,
unsigned long pfn)
{
@@ -598,7 +588,9 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
int iommu_id;
/* si_domain and vm domain should not get here. */
- BUG_ON(domain_type_is_vm_or_si(domain));
+ if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
+ return NULL;
+
for_each_domain_iommu(iommu_id, domain)
break;
@@ -729,12 +721,39 @@ static int iommu_dummy(struct device *dev)
return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}
+/**
+ * is_downstream_to_pci_bridge - test if a device belongs to the PCI
+ * sub-hierarchy of a candidate PCI-PCI bridge
+ * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
+ * @bridge: the candidate PCI-PCI bridge
+ *
+ * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
+ */
+static bool
+is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
+{
+ struct pci_dev *pdev, *pbridge;
+
+ if (!dev_is_pci(dev) || !dev_is_pci(bridge))
+ return false;
+
+ pdev = to_pci_dev(dev);
+ pbridge = to_pci_dev(bridge);
+
+ if (pbridge->subordinate &&
+ pbridge->subordinate->number <= pdev->bus->number &&
+ pbridge->subordinate->busn_res.end >= pdev->bus->number)
+ return true;
+
+ return false;
+}
+
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
struct intel_iommu *iommu;
struct device *tmp;
- struct pci_dev *ptmp, *pdev = NULL;
+ struct pci_dev *pdev = NULL;
u16 segment = 0;
int i;
@@ -780,13 +799,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
goto out;
}
- if (!pdev || !dev_is_pci(tmp))
- continue;
-
- ptmp = to_pci_dev(tmp);
- if (ptmp->subordinate &&
- ptmp->subordinate->number <= pdev->bus->number &&
- ptmp->subordinate->busn_res.end >= pdev->bus->number)
+ if (is_downstream_to_pci_bridge(dev, tmp))
goto got_pdev;
}
@@ -908,7 +921,6 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return pte;
}
-
/* return address's pte at specific level */
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
unsigned long pfn,
@@ -1577,7 +1589,6 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-
static int iommu_init_domains(struct intel_iommu *iommu)
{
u32 ndomains, nlongs;
@@ -1615,8 +1626,6 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return -ENOMEM;
}
-
-
/*
* If Caching mode is set, then invalid translations are tagged
* with domain-id 0, hence we need to pre-allocate it. We also
@@ -1646,32 +1655,15 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
if (!iommu->domains || !iommu->domain_ids)
return;
-again:
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
- struct dmar_domain *domain;
-
if (info->iommu != iommu)
continue;
if (!info->dev || !info->domain)
continue;
- domain = info->domain;
-
__dmar_remove_one_dev_info(info);
-
- if (!domain_type_is_vm_or_si(domain)) {
- /*
- * The domain_exit() function can't be called under
- * device_domain_lock, as it takes this lock itself.
- * So release the lock here and re-run the loop
- * afterwards.
- */
- spin_unlock_irqrestore(&device_domain_lock, flags);
- domain_exit(domain);
- goto again;
- }
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -1841,71 +1833,12 @@ static inline int guestwidth_to_adjustwidth(int gaw)
return agaw;
}
-static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
- int guest_width)
-{
- int adjust_width, agaw;
- unsigned long sagaw;
- int err;
-
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-
- err = init_iova_flush_queue(&domain->iovad,
- iommu_flush_iova, iova_entry_free);
- if (err)
- return err;
-
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- if (guest_width > cap_mgaw(iommu->cap))
- guest_width = cap_mgaw(iommu->cap);
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- agaw = width_to_agaw(adjust_width);
- sagaw = cap_sagaw(iommu->cap);
- if (!test_bit(agaw, &sagaw)) {
- /* hardware doesn't support it, choose a bigger one */
- pr_debug("Hardware doesn't support agaw %d\n", agaw);
- agaw = find_next_bit(&sagaw, 5, agaw);
- if (agaw >= 5)
- return -ENODEV;
- }
- domain->agaw = agaw;
-
- if (ecap_coherent(iommu->ecap))
- domain->iommu_coherency = 1;
- else
- domain->iommu_coherency = 0;
-
- if (ecap_sc_support(iommu->ecap))
- domain->iommu_snooping = 1;
- else
- domain->iommu_snooping = 0;
-
- if (intel_iommu_superpage)
- domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
- else
- domain->iommu_superpage = 0;
-
- domain->nid = iommu->node;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static void domain_exit(struct dmar_domain *domain)
{
struct page *freelist;
/* Remove associated devices and clear attached or cached domains */
- rcu_read_lock();
domain_remove_dev_info(domain);
- rcu_read_unlock();
/* destroy iovas */
put_iova_domain(&domain->iovad);
@@ -2336,7 +2269,7 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
{
- int ret;
+ int iommu_id, ret;
struct intel_iommu *iommu;
/* Do the real mapping first */
@@ -2344,18 +2277,8 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
if (ret)
return ret;
- /* Notify about the new mapping */
- if (domain_type_is_vm(domain)) {
- /* VM typed domains can have more than one IOMMUs */
- int iommu_id;
-
- for_each_domain_iommu(iommu_id, domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
- }
- } else {
- /* General domains only have one IOMMU */
- iommu = domain_get_iommu(domain);
+ for_each_domain_iommu(iommu_id, domain) {
+ iommu = g_iommus[iommu_id];
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
}
@@ -2435,8 +2358,18 @@ static struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
+ if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
+ struct iommu_domain *domain;
+
+ dev->archdata.iommu = NULL;
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain)
+ intel_iommu_attach_device(domain, dev);
+ }
+
/* No lock here, assumes no domain exit in normal case */
info = dev->archdata.iommu;
+
if (likely(info))
return info->domain;
return NULL;
@@ -2580,6 +2513,31 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
return 0;
}
+static int domain_init(struct dmar_domain *domain, int guest_width)
+{
+ int adjust_width;
+
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
+ domain_reserve_special_ranges(domain);
+
+ /* calculate AGAW */
+ domain->gaw = guest_width;
+ adjust_width = guestwidth_to_adjustwidth(guest_width);
+ domain->agaw = width_to_agaw(adjust_width);
+
+ domain->iommu_coherency = 0;
+ domain->iommu_snooping = 0;
+ domain->iommu_superpage = 0;
+ domain->max_addr = 0;
+
+ /* always allocate the top pgd */
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+ if (!domain->pgd)
+ return -ENOMEM;
+ domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
+ return 0;
+}
+
static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
{
struct device_domain_info *info;
@@ -2617,13 +2575,20 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
domain = alloc_domain(0);
if (!domain)
return NULL;
- if (domain_init(domain, iommu, gaw)) {
+
+ if (domain_init(domain, gaw)) {
domain_exit(domain);
return NULL;
}
-out:
+ if (init_iova_flush_queue(&domain->iovad,
+ iommu_flush_iova,
+ iova_entry_free)) {
+ pr_warn("iova flush queue initialization failed\n");
+ intel_iommu_strict = 1;
+ }
+out:
return domain;
}
@@ -2663,29 +2628,6 @@ static struct dmar_domain *set_domain_for_dev(struct device *dev,
return domain;
}
-static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
-{
- struct dmar_domain *domain, *tmp;
-
- domain = find_domain(dev);
- if (domain)
- goto out;
-
- domain = find_or_alloc_domain(dev, gaw);
- if (!domain)
- goto out;
-
- tmp = set_domain_for_dev(dev, domain);
- if (!tmp || domain != tmp) {
- domain_exit(domain);
- domain = tmp;
- }
-
-out:
-
- return domain;
-}
-
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long long start,
unsigned long long end)
@@ -2750,75 +2692,21 @@ static int domain_prepare_identity_map(struct device *dev,
return iommu_domain_identity_map(domain, start, end);
}
-static int iommu_prepare_identity_map(struct device *dev,
- unsigned long long start,
- unsigned long long end)
-{
- struct dmar_domain *domain;
- int ret;
-
- domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- return -ENOMEM;
-
- ret = domain_prepare_identity_map(dev, domain, start, end);
- if (ret)
- domain_exit(domain);
-
- return ret;
-}
-
-static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
- struct device *dev)
-{
- if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
- return 0;
- return iommu_prepare_identity_map(dev, rmrr->base_address,
- rmrr->end_address);
-}
-
-#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
-static inline void iommu_prepare_isa(void)
-{
- struct pci_dev *pdev;
- int ret;
-
- pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- if (!pdev)
- return;
-
- pr_info("Prepare 0-16MiB unity mapping for LPC\n");
- ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
-
- if (ret)
- pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
-
- pci_dev_put(pdev);
-}
-#else
-static inline void iommu_prepare_isa(void)
-{
- return;
-}
-#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
-
-static int md_domain_init(struct dmar_domain *domain, int guest_width);
-
static int __init si_domain_init(int hw)
{
- int nid, ret;
+ struct dmar_rmrr_unit *rmrr;
+ struct device *dev;
+ int i, nid, ret;
si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
if (!si_domain)
return -EFAULT;
- if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
return -EFAULT;
}
- pr_debug("Identity mapping domain allocated\n");
-
if (hw)
return 0;
@@ -2834,6 +2722,31 @@ static int __init si_domain_init(int hw)
}
}
+ /*
+ * Normally we use DMA domains for devices which have RMRRs. But we
+ * loose this requirement for graphic and usb devices. Identity map
+ * the RMRRs for graphic and USB devices so that they could use the
+ * si_domain.
+ */
+ for_each_rmrr_units(rmrr) {
+ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+ i, dev) {
+ unsigned long long start = rmrr->base_address;
+ unsigned long long end = rmrr->end_address;
+
+ if (device_is_rmrr_locked(dev))
+ continue;
+
+ if (WARN_ON(end < start ||
+ end >> agaw_to_width(si_domain->agaw)))
+ continue;
+
+ ret = iommu_domain_identity_map(si_domain, start, end);
+ if (ret)
+ return ret;
+ }
+ }
+
return 0;
}
@@ -2841,9 +2754,6 @@ static int identity_mapping(struct device *dev)
{
struct device_domain_info *info;
- if (likely(!iommu_identity_mapping))
- return 0;
-
info = dev->archdata.iommu;
if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
return (info->domain == si_domain);
@@ -2882,7 +2792,8 @@ static bool device_has_rmrr(struct device *dev)
*/
for_each_active_dev_scope(rmrr->devices,
rmrr->devices_cnt, i, tmp)
- if (tmp == dev) {
+ if (tmp == dev ||
+ is_downstream_to_pci_bridge(dev, tmp)) {
rcu_read_unlock();
return true;
}
@@ -2891,6 +2802,35 @@ static bool device_has_rmrr(struct device *dev)
return false;
}
+/**
+ * device_rmrr_is_relaxable - Test whether the RMRR of this device
+ * is relaxable (ie. is allowed to be not enforced under some conditions)
+ * @dev: device handle
+ *
+ * We assume that PCI USB devices with RMRRs have them largely
+ * for historical reasons and that the RMRR space is not actively used post
+ * boot. This exclusion may change if vendors begin to abuse it.
+ *
+ * The same exception is made for graphics devices, with the requirement that
+ * any use of the RMRR regions will be torn down before assigning the device
+ * to a guest.
+ *
+ * Return: true if the RMRR is relaxable, false otherwise
+ */
+static bool device_rmrr_is_relaxable(struct device *dev)
+{
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(dev))
+ return false;
+
+ pdev = to_pci_dev(dev);
+ if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
+ return true;
+ else
+ return false;
+}
+
/*
* There are a couple cases where we need to restrict the functionality of
* devices associated with RMRRs. The first is when evaluating a device for
@@ -2905,52 +2845,51 @@ static bool device_has_rmrr(struct device *dev)
* We therefore prevent devices associated with an RMRR from participating in
* the IOMMU API, which eliminates them from device assignment.
*
- * In both cases we assume that PCI USB devices with RMRRs have them largely
- * for historical reasons and that the RMRR space is not actively used post
- * boot. This exclusion may change if vendors begin to abuse it.
- *
- * The same exception is made for graphics devices, with the requirement that
- * any use of the RMRR regions will be torn down before assigning the device
- * to a guest.
+ * In both cases, devices which have relaxable RMRRs are not concerned by this
+ * restriction. See device_rmrr_is_relaxable comment.
*/
static bool device_is_rmrr_locked(struct device *dev)
{
if (!device_has_rmrr(dev))
return false;
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
- return false;
- }
+ if (device_rmrr_is_relaxable(dev))
+ return false;
return true;
}
-static int iommu_should_identity_map(struct device *dev, int startup)
+/*
+ * Return the required default domain type for a specific device.
+ *
+ * @dev: the device in query
+ * @startup: true if this is during early boot
+ *
+ * Returns:
+ * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
+ * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
+ * - 0: both identity and dynamic domains work for this device
+ */
+static int device_def_domain_type(struct device *dev)
{
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
if (device_is_rmrr_locked(dev))
- return 0;
+ return IOMMU_DOMAIN_DMA;
/*
* Prevent any device marked as untrusted from getting
* placed into the statically identity mapping domain.
*/
if (pdev->untrusted)
- return 0;
+ return IOMMU_DOMAIN_DMA;
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
- return 1;
+ return IOMMU_DOMAIN_IDENTITY;
if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
- return 1;
-
- if (!(iommu_identity_mapping & IDENTMAP_ALL))
- return 0;
+ return IOMMU_DOMAIN_IDENTITY;
/*
* We want to start off with all devices in the 1:1 domain, and
@@ -2971,94 +2910,18 @@ static int iommu_should_identity_map(struct device *dev, int startup)
*/
if (!pci_is_pcie(pdev)) {
if (!pci_is_root_bus(pdev->bus))
- return 0;
+ return IOMMU_DOMAIN_DMA;
if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
- return 0;
+ return IOMMU_DOMAIN_DMA;
} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
+ return IOMMU_DOMAIN_DMA;
} else {
if (device_has_rmrr(dev))
- return 0;
+ return IOMMU_DOMAIN_DMA;
}
- /*
- * At boot time, we don't yet know if devices will be 64-bit capable.
- * Assume that they will — if they turn out not to be, then we can
- * take them out of the 1:1 domain later.
- */
- if (!startup) {
- /*
- * If the device's dma_mask is less than the system's memory
- * size then this is not a candidate for identity mapping.
- */
- u64 dma_mask = *dev->dma_mask;
-
- if (dev->coherent_dma_mask &&
- dev->coherent_dma_mask < dma_mask)
- dma_mask = dev->coherent_dma_mask;
-
- return dma_mask >= dma_get_required_mask(dev);
- }
-
- return 1;
-}
-
-static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
-{
- int ret;
-
- if (!iommu_should_identity_map(dev, 1))
- return 0;
-
- ret = domain_add_dev_info(si_domain, dev);
- if (!ret)
- dev_info(dev, "%s identity mapping\n",
- hw ? "Hardware" : "Software");
- else if (ret == -ENODEV)
- /* device not associated with an iommu */
- ret = 0;
-
- return ret;
-}
-
-
-static int __init iommu_prepare_static_identity_mapping(int hw)
-{
- struct pci_dev *pdev = NULL;
- struct dmar_drhd_unit *drhd;
- /* To avoid a -Wunused-but-set-variable warning. */
- struct intel_iommu *iommu __maybe_unused;
- struct device *dev;
- int i;
- int ret = 0;
-
- for_each_pci_dev(pdev) {
- ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
- if (ret)
- return ret;
- }
-
- for_each_active_iommu(iommu, drhd)
- for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
- struct acpi_device_physical_node *pn;
- struct acpi_device *adev;
-
- if (dev->bus != &acpi_bus_type)
- continue;
-
- adev= to_acpi_device(dev);
- mutex_lock(&adev->physical_node_lock);
- list_for_each_entry(pn, &adev->physical_node_list, node) {
- ret = dev_prepare_static_identity_mapping(pn->dev, hw);
- if (ret)
- break;
- }
- mutex_unlock(&adev->physical_node_lock);
- if (ret)
- return ret;
- }
-
- return 0;
+ return (iommu_identity_mapping & IDENTMAP_ALL) ?
+ IOMMU_DOMAIN_IDENTITY : 0;
}
static void intel_iommu_init_qi(struct intel_iommu *iommu)
@@ -3283,11 +3146,8 @@ out_unmap:
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
- struct dmar_rmrr_unit *rmrr;
- bool copied_tables = false;
- struct device *dev;
struct intel_iommu *iommu;
- int i, ret;
+ int ret;
/*
* for each drhd
@@ -3320,7 +3180,12 @@ static int __init init_dmars(void)
goto error;
}
- for_each_active_iommu(iommu, drhd) {
+ for_each_iommu(iommu, drhd) {
+ if (drhd->ignored) {
+ iommu_disable_translation(iommu);
+ continue;
+ }
+
/*
* Find the max pasid size of all IOMMU's in the system.
* We need to ensure the system pasid table is no bigger
@@ -3380,7 +3245,6 @@ static int __init init_dmars(void)
} else {
pr_info("Copied translation tables from previous kernel for %s\n",
iommu->name);
- copied_tables = true;
}
}
@@ -3416,62 +3280,9 @@ static int __init init_dmars(void)
check_tylersburg_isoch();
- if (iommu_identity_mapping) {
- ret = si_domain_init(hw_pass_through);
- if (ret)
- goto free_iommu;
- }
-
-
- /*
- * If we copied translations from a previous kernel in the kdump
- * case, we can not assign the devices to domains now, as that
- * would eliminate the old mappings. So skip this part and defer
- * the assignment to device driver initialization time.
- */
- if (copied_tables)
- goto domains_done;
-
- /*
- * If pass through is not set or not enabled, setup context entries for
- * identity mappings for rmrr, gfx, and isa and may fall back to static
- * identity mapping if iommu_identity_mapping is set.
- */
- if (iommu_identity_mapping) {
- ret = iommu_prepare_static_identity_mapping(hw_pass_through);
- if (ret) {
- pr_crit("Failed to setup IOMMU pass-through\n");
- goto free_iommu;
- }
- }
- /*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
- */
- pr_info("Setting RMRR:\n");
- for_each_rmrr_units(rmrr) {
- /* some BIOS lists non-exist devices in DMAR table. */
- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
- i, dev) {
- ret = iommu_prepare_rmrr_dev(rmrr, dev);
- if (ret)
- pr_err("Mapping reserved region failed\n");
- }
- }
-
- iommu_prepare_isa();
-
-domains_done:
+ ret = si_domain_init(hw_pass_through);
+ if (ret)
+ goto free_iommu;
/*
* for each drhd
@@ -3509,11 +3320,6 @@ domains_done:
ret = dmar_set_interrupt(iommu);
if (ret)
goto free_iommu;
-
- if (!translation_pre_enabled(iommu))
- iommu_enable_translation(iommu);
-
- iommu_disable_protect_mem_regions(iommu);
}
return 0;
@@ -3563,16 +3369,17 @@ static unsigned long intel_alloc_iova(struct device *dev,
return iova_pfn;
}
-struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
+static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
{
struct dmar_domain *domain, *tmp;
struct dmar_rmrr_unit *rmrr;
struct device *i_dev;
int i, ret;
+ /* Device shouldn't be attached by any domains. */
domain = find_domain(dev);
if (domain)
- goto out;
+ return NULL;
domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
@@ -3602,10 +3409,10 @@ struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
}
out:
-
if (!domain)
dev_err(dev, "Allocating domain failed\n");
-
+ else
+ domain->domain.type = IOMMU_DOMAIN_DMA;
return domain;
}
@@ -3613,17 +3420,19 @@ out:
/* Check if the dev needs to go through non-identity map and unmap process.*/
static bool iommu_need_mapping(struct device *dev)
{
- int found;
+ int ret;
if (iommu_dummy(dev))
return false;
- if (!iommu_identity_mapping)
- return true;
+ ret = identity_mapping(dev);
+ if (ret) {
+ u64 dma_mask = *dev->dma_mask;
- found = identity_mapping(dev);
- if (found) {
- if (iommu_should_identity_map(dev, 0))
+ if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
+ dma_mask = dev->coherent_dma_mask;
+
+ if (dma_mask >= dma_get_required_mask(dev))
return false;
/*
@@ -3631,17 +3440,20 @@ static bool iommu_need_mapping(struct device *dev)
* non-identity mapping.
*/
dmar_remove_one_dev_info(dev);
- dev_info(dev, "32bit DMA uses non-identity mapping\n");
- } else {
- /*
- * In case of a detached 64 bit DMA device from vm, the device
- * is put into si_domain for identity mapping.
- */
- if (iommu_should_identity_map(dev, 0) &&
- !domain_add_dev_info(si_domain, dev)) {
- dev_info(dev, "64bit DMA uses identity mapping\n");
- return false;
+ ret = iommu_request_dma_domain_for_dev(dev);
+ if (ret) {
+ struct iommu_domain *domain;
+ struct dmar_domain *dmar_domain;
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain) {
+ dmar_domain = to_dmar_domain(domain);
+ dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
+ }
+ get_private_domain_for_dev(dev);
}
+
+ dev_info(dev, "32bit DMA uses non-identity mapping\n");
}
return true;
@@ -3660,7 +3472,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- domain = get_valid_domain_for_dev(dev);
+ domain = find_domain(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3875,7 +3687,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (!iommu_need_mapping(dev))
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
- domain = get_valid_domain_for_dev(dev);
+ domain = find_domain(dev);
if (!domain)
return 0;
@@ -4194,13 +4006,10 @@ static void __init init_iommu_pm_ops(void)
static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
-
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
- int prot = DMA_PTE_READ|DMA_PTE_WRITE;
struct dmar_rmrr_unit *rmrru;
- size_t length;
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru)
@@ -4211,23 +4020,15 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
- length = rmrr->end_address - rmrr->base_address + 1;
- rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
- IOMMU_RESV_DIRECT);
- if (!rmrru->resv)
- goto free_rmrru;
-
rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
((void *)rmrr) + rmrr->header.length,
&rmrru->devices_cnt);
if (rmrru->devices_cnt && rmrru->devices == NULL)
- goto free_all;
+ goto free_rmrru;
list_add(&rmrru->list, &dmar_rmrr_units);
return 0;
-free_all:
- kfree(rmrru->resv);
free_rmrru:
kfree(rmrru);
out:
@@ -4445,7 +4246,6 @@ static void intel_iommu_free_dmars(void)
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
- kfree(rmrru->resv);
kfree(rmrru);
}
@@ -4550,42 +4350,6 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
return 0;
}
-/*
- * Here we only respond to action of unbound device from driver.
- *
- * Added device is not attached to its DMAR domain here yet. That will happen
- * when mapping the device to iova.
- */
-static int device_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
- struct dmar_domain *domain;
-
- if (iommu_dummy(dev))
- return 0;
-
- if (action == BUS_NOTIFY_REMOVED_DEVICE) {
- domain = find_domain(dev);
- if (!domain)
- return 0;
-
- dmar_remove_one_dev_info(dev);
- if (!domain_type_is_vm_or_si(domain) &&
- list_empty(&domain->devices))
- domain_exit(domain);
- } else if (action == BUS_NOTIFY_ADD_DEVICE) {
- if (iommu_should_identity_map(dev, 1))
- domain_add_dev_info(si_domain, dev);
- }
-
- return 0;
-}
-
-static struct notifier_block device_nb = {
- .notifier_call = device_notifier,
-};
-
static int intel_iommu_memory_notifier(struct notifier_block *nb,
unsigned long val, void *v)
{
@@ -4812,6 +4576,49 @@ static int __init platform_optin_force_iommu(void)
return 1;
}
+static int __init probe_acpi_namespace_devices(void)
+{
+ struct dmar_drhd_unit *drhd;
+ /* To avoid a -Wunused-but-set-variable warning. */
+ struct intel_iommu *iommu __maybe_unused;
+ struct device *dev;
+ int i, ret = 0;
+
+ for_each_active_iommu(iommu, drhd) {
+ for_each_active_dev_scope(drhd->devices,
+ drhd->devices_cnt, i, dev) {
+ struct acpi_device_physical_node *pn;
+ struct iommu_group *group;
+ struct acpi_device *adev;
+
+ if (dev->bus != &acpi_bus_type)
+ continue;
+
+ adev = to_acpi_device(dev);
+ mutex_lock(&adev->physical_node_lock);
+ list_for_each_entry(pn,
+ &adev->physical_node_list, node) {
+ group = iommu_group_get(pn->dev);
+ if (group) {
+ iommu_group_put(group);
+ continue;
+ }
+
+ pn->dev->bus->iommu_ops = &intel_iommu_ops;
+ ret = iommu_probe_device(pn->dev);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&adev->physical_node_lock);
+
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
int __init intel_iommu_init(void)
{
int ret = -ENODEV;
@@ -4901,7 +4708,6 @@ int __init intel_iommu_init(void)
goto out_free_reserved_range;
}
up_write(&dmar_global_lock);
- pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
swiotlb = 0;
@@ -4919,11 +4725,25 @@ int __init intel_iommu_init(void)
}
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
- bus_register_notifier(&pci_bus_type, &device_nb);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
intel_iommu_cpu_dead);
+
+ down_read(&dmar_global_lock);
+ if (probe_acpi_namespace_devices())
+ pr_warn("ACPI name space devices didn't probe correctly\n");
+ up_read(&dmar_global_lock);
+
+ /* Finally, we enable the DMA remapping hardware. */
+ for_each_iommu(iommu, drhd) {
+ if (!drhd->ignored && !translation_pre_enabled(iommu))
+ iommu_enable_translation(iommu);
+
+ iommu_disable_protect_mem_regions(iommu);
+ }
+ pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
+
intel_iommu_enabled = 1;
intel_iommu_debugfs_init();
@@ -4962,6 +4782,7 @@ static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
{
+ struct dmar_domain *domain;
struct intel_iommu *iommu;
unsigned long flags;
@@ -4971,6 +4792,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
return;
iommu = info->iommu;
+ domain = info->domain;
if (info->dev) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
@@ -4985,9 +4807,14 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
unlink_domain_info(info);
spin_lock_irqsave(&iommu->lock, flags);
- domain_detach_iommu(info->domain, iommu);
+ domain_detach_iommu(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
+ /* free the private domain */
+ if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
+ domain_exit(info->domain);
+
free_devinfo_mem(info);
}
@@ -5002,62 +4829,55 @@ static void dmar_remove_one_dev_info(struct device *dev)
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-static int md_domain_init(struct dmar_domain *domain, int guest_width)
-{
- int adjust_width;
-
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- domain->agaw = width_to_agaw(adjust_width);
-
- domain->iommu_coherency = 0;
- domain->iommu_snooping = 0;
- domain->iommu_superpage = 0;
- domain->max_addr = 0;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
+ switch (type) {
+ case IOMMU_DOMAIN_DMA:
+ /* fallthrough */
+ case IOMMU_DOMAIN_UNMANAGED:
+ dmar_domain = alloc_domain(0);
+ if (!dmar_domain) {
+ pr_err("Can't allocate dmar_domain\n");
+ return NULL;
+ }
+ if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ pr_err("Domain initialization failed\n");
+ domain_exit(dmar_domain);
+ return NULL;
+ }
- dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
- if (!dmar_domain) {
- pr_err("Can't allocate dmar_domain\n");
- return NULL;
- }
- if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
- pr_err("Domain initialization failed\n");
- domain_exit(dmar_domain);
+ if (type == IOMMU_DOMAIN_DMA &&
+ init_iova_flush_queue(&dmar_domain->iovad,
+ iommu_flush_iova, iova_entry_free)) {
+ pr_warn("iova flush queue initialization failed\n");
+ intel_iommu_strict = 1;
+ }
+
+ domain_update_iommu_cap(dmar_domain);
+
+ domain = &dmar_domain->domain;
+ domain->geometry.aperture_start = 0;
+ domain->geometry.aperture_end =
+ __DOMAIN_MAX_ADDR(dmar_domain->gaw);
+ domain->geometry.force_aperture = true;
+
+ return domain;
+ case IOMMU_DOMAIN_IDENTITY:
+ return &si_domain->domain;
+ default:
return NULL;
}
- domain_update_iommu_cap(dmar_domain);
-
- domain = &dmar_domain->domain;
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
- domain->geometry.force_aperture = true;
- return domain;
+ return NULL;
}
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
- domain_exit(to_dmar_domain(domain));
+ if (domain != &si_domain->domain)
+ domain_exit(to_dmar_domain(domain));
}
/*
@@ -5233,7 +5053,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
{
int ret;
- if (device_is_rmrr_locked(dev)) {
+ if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
+ device_is_rmrr_locked(dev)) {
dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
return -EPERM;
}
@@ -5246,15 +5067,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct dmar_domain *old_domain;
old_domain = find_domain(dev);
- if (old_domain) {
- rcu_read_lock();
+ if (old_domain)
dmar_remove_one_dev_info(dev);
- rcu_read_unlock();
-
- if (!domain_type_is_vm_or_si(old_domain) &&
- list_empty(&old_domain->devices))
- domain_exit(old_domain);
- }
}
ret = prepare_domain_attach_device(domain, dev);
@@ -5300,6 +5114,9 @@ static int intel_iommu_map(struct iommu_domain *domain,
int prot = 0;
int ret;
+ if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+ return -EINVAL;
+
if (iommu_prot & IOMMU_READ)
prot |= DMA_PTE_READ;
if (iommu_prot & IOMMU_WRITE)
@@ -5341,6 +5158,8 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
/* Cope with horrid API which requires us to unmap more than the
size argument if it happens to be a large-page mapping. */
BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
+ if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+ return 0;
if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
size = VTD_PAGE_SIZE << level_to_offset_bits(level);
@@ -5372,6 +5191,9 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
int level = 0;
u64 phys = 0;
+ if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+ return 0;
+
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
if (pte)
phys = dma_pte_addr(pte);
@@ -5427,9 +5249,12 @@ static bool intel_iommu_capable(enum iommu_cap cap)
static int intel_iommu_add_device(struct device *dev)
{
+ struct dmar_domain *dmar_domain;
+ struct iommu_domain *domain;
struct intel_iommu *iommu;
struct iommu_group *group;
u8 bus, devfn;
+ int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
@@ -5437,12 +5262,45 @@ static int intel_iommu_add_device(struct device *dev)
iommu_device_link(&iommu->iommu, dev);
+ if (translation_pre_enabled(iommu))
+ dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
+
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
iommu_group_put(group);
+
+ domain = iommu_get_domain_for_dev(dev);
+ dmar_domain = to_dmar_domain(domain);
+ if (domain->type == IOMMU_DOMAIN_DMA) {
+ if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
+ ret = iommu_request_dm_for_dev(dev);
+ if (ret) {
+ dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
+ domain_add_dev_info(si_domain, dev);
+ dev_info(dev,
+ "Device uses a private identity domain.\n");
+ }
+ }
+ } else {
+ if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
+ ret = iommu_request_dma_domain_for_dev(dev);
+ if (ret) {
+ dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
+ if (!get_private_domain_for_dev(dev)) {
+ dev_warn(dev,
+ "Failed to get a private domain.\n");
+ return -ENOMEM;
+ }
+
+ dev_info(dev,
+ "Device uses a private dma domain.\n");
+ }
+ }
+ }
+
return 0;
}
@@ -5463,22 +5321,51 @@ static void intel_iommu_remove_device(struct device *dev)
static void intel_iommu_get_resv_regions(struct device *device,
struct list_head *head)
{
+ int prot = DMA_PTE_READ | DMA_PTE_WRITE;
struct iommu_resv_region *reg;
struct dmar_rmrr_unit *rmrr;
struct device *i_dev;
int i;
- rcu_read_lock();
+ down_read(&dmar_global_lock);
for_each_rmrr_units(rmrr) {
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
i, i_dev) {
- if (i_dev != device)
+ struct iommu_resv_region *resv;
+ enum iommu_resv_type type;
+ size_t length;
+
+ if (i_dev != device &&
+ !is_downstream_to_pci_bridge(device, i_dev))
continue;
- list_add_tail(&rmrr->resv->list, head);
+ length = rmrr->end_address - rmrr->base_address + 1;
+
+ type = device_rmrr_is_relaxable(device) ?
+ IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
+
+ resv = iommu_alloc_resv_region(rmrr->base_address,
+ length, prot, type);
+ if (!resv)
+ break;
+
+ list_add_tail(&resv->list, head);
}
}
- rcu_read_unlock();
+ up_read(&dmar_global_lock);
+
+#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
+ if (dev_is_pci(device)) {
+ struct pci_dev *pdev = to_pci_dev(device);
+
+ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
+ reg = iommu_alloc_resv_region(0, 1UL << 24, 0,
+ IOMMU_RESV_DIRECT);
+ if (reg)
+ list_add_tail(&reg->list, head);
+ }
+ }
+#endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
@@ -5493,10 +5380,8 @@ static void intel_iommu_put_resv_regions(struct device *dev,
{
struct iommu_resv_region *entry, *next;
- list_for_each_entry_safe(entry, next, head, list) {
- if (entry->type == IOMMU_RESV_MSI)
- kfree(entry);
- }
+ list_for_each_entry_safe(entry, next, head, list)
+ kfree(entry);
}
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
@@ -5508,7 +5393,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
u64 ctx_lo;
int ret;
- domain = get_valid_domain_for_dev(dev);
+ domain = find_domain(dev);
if (!domain)
return -EINVAL;
@@ -5550,6 +5435,19 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
return ret;
}
+static void intel_iommu_apply_resv_region(struct device *dev,
+ struct iommu_domain *domain,
+ struct iommu_resv_region *region)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long start, end;
+
+ start = IOVA_PFN(region->start);
+ end = IOVA_PFN(region->start + region->length - 1);
+
+ WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
+}
+
#ifdef CONFIG_INTEL_IOMMU_SVM
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
@@ -5699,6 +5597,12 @@ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
dmar_domain->default_pasid : -EINVAL;
}
+static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
+ struct device *dev)
+{
+ return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@@ -5715,11 +5619,13 @@ const struct iommu_ops intel_iommu_ops = {
.remove_device = intel_iommu_remove_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = intel_iommu_put_resv_regions,
+ .apply_resv_region = intel_iommu_apply_resv_region,
.device_group = pci_device_group,
.dev_has_feat = intel_iommu_dev_has_feat,
.dev_feat_enabled = intel_iommu_dev_feat_enabled,
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
+ .is_attach_deferred = intel_iommu_is_attach_deferred,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index fe51d8af457f..040a445be300 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -169,23 +169,6 @@ attach_out:
return 0;
}
-/* Get PRESENT bit of a PASID directory entry. */
-static inline bool
-pasid_pde_is_present(struct pasid_dir_entry *pde)
-{
- return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
-}
-
-/* Get PASID table from a PASID directory entry. */
-static inline struct pasid_entry *
-get_pasid_table_from_pde(struct pasid_dir_entry *pde)
-{
- if (!pasid_pde_is_present(pde))
- return NULL;
-
- return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
-}
-
void intel_pasid_free_table(struct device *dev)
{
struct device_domain_info *info;
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
index 23537b3f34e3..fc8cd8f17de1 100644
--- a/drivers/iommu/intel-pasid.h
+++ b/drivers/iommu/intel-pasid.h
@@ -18,6 +18,10 @@
#define PDE_PFN_MASK PAGE_MASK
#define PASID_PDE_SHIFT 6
#define MAX_NR_PASID_BITS 20
+#define PASID_TBL_ENTRIES BIT(PASID_PDE_SHIFT)
+
+#define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1)
+#define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7))
/*
* Domain ID reserved for pasid entries programmed for first-level
@@ -49,6 +53,28 @@ struct pasid_table {
struct list_head dev; /* device list */
};
+/* Get PRESENT bit of a PASID directory entry. */
+static inline bool pasid_pde_is_present(struct pasid_dir_entry *pde)
+{
+ return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
+}
+
+/* Get PASID table from a PASID directory entry. */
+static inline struct pasid_entry *
+get_pasid_table_from_pde(struct pasid_dir_entry *pde)
+{
+ if (!pasid_pde_is_present(pde))
+ return NULL;
+
+ return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
+}
+
+/* Get PRESENT bit of a PASID table entry. */
+static inline bool pasid_pte_is_present(struct pasid_entry *pte)
+{
+ return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
+}
+
extern u32 intel_pasid_max_id;
int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
void intel_pasid_free_id(int pasid);
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index eceaa7e968ae..780de0caafe8 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -366,6 +366,21 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
}
list_add_tail(&svm->list, &global_svm_list);
+ } else {
+ /*
+ * Binding a new device with existing PASID, need to setup
+ * the PASID entry.
+ */
+ spin_lock(&iommu->lock);
+ ret = intel_pasid_setup_first_level(iommu, dev,
+ mm ? mm->pgd : init_mm.pgd,
+ svm->pasid, FLPT_DEFAULT_DID,
+ mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+ spin_unlock(&iommu->lock);
+ if (ret) {
+ kfree(sdev);
+ goto out;
+ }
}
list_add_rcu(&sdev->list, &svm->devs);
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 4160aa9f3f80..4786ca061e31 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -101,7 +101,7 @@ static void init_ir_status(struct intel_iommu *iommu)
iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
}
-static int alloc_irte(struct intel_iommu *iommu, int irq,
+static int alloc_irte(struct intel_iommu *iommu,
struct irq_2_iommu *irq_iommu, u16 count)
{
struct ir_table *table = iommu->ir_table;
@@ -1374,7 +1374,7 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
goto out_free_parent;
down_read(&dmar_global_lock);
- index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs);
+ index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
up_read(&dmar_global_lock);
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index aa7a3fa6dd09..0fc8dfab2abf 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -204,7 +204,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free;
}
- if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
+ if (table && !cfg->coherent_walk) {
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto out_free;
@@ -238,7 +238,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
struct device *dev = cfg->iommu_dev;
size_t size = ARM_V7S_TABLE_SIZE(lvl);
- if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
+ if (!cfg->coherent_walk)
dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
DMA_TO_DEVICE);
if (lvl == 1)
@@ -250,7 +250,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
struct io_pgtable_cfg *cfg)
{
- if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)
+ if (cfg->coherent_walk)
return;
dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
@@ -716,7 +716,6 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
IO_PGTABLE_QUIRK_NO_PERMS |
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_4GB |
- IO_PGTABLE_QUIRK_NO_DMA |
IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
@@ -779,8 +778,11 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
/* TTBRs */
cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
- ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA);
+ (cfg->coherent_walk ?
+ (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
+ (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
cfg->arm_v7s_cfg.ttbr[1] = 0;
return &data->iop;
@@ -835,7 +837,8 @@ static int __init arm_v7s_do_selftests(void)
.tlb = &dummy_tlb_ops,
.oas = 32,
.ias = 32,
- .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA,
+ .coherent_walk = true,
+ .quirks = IO_PGTABLE_QUIRK_ARM_NS,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
};
unsigned int iova, size, iova_start;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4b6b2f3150a9..161a7d56264d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -156,10 +156,12 @@
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
#define ARM_LPAE_MAIR_ATTR_NC 0x44
+#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
+#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
@@ -239,7 +241,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
return NULL;
pages = page_address(p);
- if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
+ if (!cfg->coherent_walk) {
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto out_free;
@@ -265,7 +267,7 @@ out_free:
static void __arm_lpae_free_pages(void *pages, size_t size,
struct io_pgtable_cfg *cfg)
{
- if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
+ if (!cfg->coherent_walk)
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
size, DMA_TO_DEVICE);
free_pages((unsigned long)pages, get_order(size));
@@ -283,7 +285,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
{
*ptep = pte;
- if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
+ if (!cfg->coherent_walk)
__arm_lpae_sync_pte(ptep, cfg);
}
@@ -361,8 +363,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
old = cmpxchg64_relaxed(ptep, curr, new);
- if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) ||
- (old & ARM_LPAE_PTE_SW_SYNC))
+ if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
return old;
/* Even if it's not ours, there's no point waiting; just kick it */
@@ -403,8 +404,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
if (pte)
__arm_lpae_free_pages(cptep, tblsz, cfg);
- } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) &&
- !(pte & ARM_LPAE_PTE_SW_SYNC)) {
+ } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
__arm_lpae_sync_pte(ptep, cfg);
}
@@ -459,6 +459,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
+ else if (prot & IOMMU_QCOM_SYS_CACHE)
+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
+ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
if (prot & IOMMU_NOEXEC)
@@ -783,7 +786,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
u64 reg;
struct arm_lpae_io_pgtable *data;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
@@ -792,9 +795,15 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
return NULL;
/* TCR */
- reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
- (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
- (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+ if (cfg->coherent_walk) {
+ reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+ } else {
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
+ }
switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K:
@@ -846,7 +855,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_MAIR_ATTR_WBRWA
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
(ARM_LPAE_MAIR_ATTR_DEVICE
- << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
+ (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
cfg->arm_lpae_s1_cfg.mair[0] = reg;
cfg->arm_lpae_s1_cfg.mair[1] = 0;
@@ -876,8 +887,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
/* The NS quirk doesn't apply at stage 2 */
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
- IO_PGTABLE_QUIRK_NON_STRICT))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1212,7 +1222,7 @@ static int __init arm_lpae_do_selftests(void)
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
.oas = 48,
- .quirks = IO_PGTABLE_QUIRK_NO_DMA,
+ .coherent_walk = true,
};
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 9f0a2844371c..0c674d80c37f 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -61,10 +61,11 @@ struct iommu_group_attribute {
};
static const char * const iommu_group_resv_type_string[] = {
- [IOMMU_RESV_DIRECT] = "direct",
- [IOMMU_RESV_RESERVED] = "reserved",
- [IOMMU_RESV_MSI] = "msi",
- [IOMMU_RESV_SW_MSI] = "msi",
+ [IOMMU_RESV_DIRECT] = "direct",
+ [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
+ [IOMMU_RESV_RESERVED] = "reserved",
+ [IOMMU_RESV_MSI] = "msi",
+ [IOMMU_RESV_SW_MSI] = "msi",
};
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
@@ -95,15 +96,43 @@ void iommu_device_unregister(struct iommu_device *iommu)
spin_unlock(&iommu_device_lock);
}
+static struct iommu_param *iommu_get_dev_param(struct device *dev)
+{
+ struct iommu_param *param = dev->iommu_param;
+
+ if (param)
+ return param;
+
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param)
+ return NULL;
+
+ mutex_init(&param->lock);
+ dev->iommu_param = param;
+ return param;
+}
+
+static void iommu_free_dev_param(struct device *dev)
+{
+ kfree(dev->iommu_param);
+ dev->iommu_param = NULL;
+}
+
int iommu_probe_device(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
- int ret = -EINVAL;
+ int ret;
WARN_ON(dev->iommu_group);
+ if (!ops)
+ return -EINVAL;
- if (ops)
- ret = ops->add_device(dev);
+ if (!iommu_get_dev_param(dev))
+ return -ENOMEM;
+
+ ret = ops->add_device(dev);
+ if (ret)
+ iommu_free_dev_param(dev);
return ret;
}
@@ -114,6 +143,8 @@ void iommu_release_device(struct device *dev)
if (dev->iommu_group)
ops->remove_device(dev);
+
+ iommu_free_dev_param(dev);
}
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@@ -225,18 +256,21 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new,
pos = pos->next;
} else if ((start >= a) && (end <= b)) {
if (new->type == type)
- goto done;
+ return 0;
else
pos = pos->next;
} else {
if (new->type == type) {
phys_addr_t new_start = min(a, start);
phys_addr_t new_end = max(b, end);
+ int ret;
list_del(&entry->list);
entry->start = new_start;
entry->length = new_end - new_start + 1;
- iommu_insert_resv_region(entry, regions);
+ ret = iommu_insert_resv_region(entry, regions);
+ kfree(entry);
+ return ret;
} else {
pos = pos->next;
}
@@ -249,7 +283,6 @@ insert:
return -ENOMEM;
list_add_tail(&region->list, pos);
-done:
return 0;
}
@@ -561,7 +594,8 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
start = ALIGN(entry->start, pg_size);
end = ALIGN(entry->start + entry->length, pg_size);
- if (entry->type != IOMMU_RESV_DIRECT)
+ if (entry->type != IOMMU_RESV_DIRECT &&
+ entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
continue;
for (addr = start; addr < end; addr += pg_size) {
@@ -843,6 +877,206 @@ int iommu_group_unregister_notifier(struct iommu_group *group,
EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
/**
+ * iommu_register_device_fault_handler() - Register a device fault handler
+ * @dev: the device
+ * @handler: the fault handler
+ * @data: private data passed as argument to the handler
+ *
+ * When an IOMMU fault event is received, this handler gets called with the
+ * fault event and data as argument. The handler should return 0 on success. If
+ * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
+ * complete the fault by calling iommu_page_response() with one of the following
+ * response code:
+ * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
+ * - IOMMU_PAGE_RESP_INVALID: terminate the fault
+ * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
+ * page faults if possible.
+ *
+ * Return 0 if the fault handler was installed successfully, or an error.
+ */
+int iommu_register_device_fault_handler(struct device *dev,
+ iommu_dev_fault_handler_t handler,
+ void *data)
+{
+ struct iommu_param *param = dev->iommu_param;
+ int ret = 0;
+
+ if (!param)
+ return -EINVAL;
+
+ mutex_lock(&param->lock);
+ /* Only allow one fault handler registered for each device */
+ if (param->fault_param) {
+ ret = -EBUSY;
+ goto done_unlock;
+ }
+
+ get_device(dev);
+ param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
+ if (!param->fault_param) {
+ put_device(dev);
+ ret = -ENOMEM;
+ goto done_unlock;
+ }
+ param->fault_param->handler = handler;
+ param->fault_param->data = data;
+ mutex_init(&param->fault_param->lock);
+ INIT_LIST_HEAD(&param->fault_param->faults);
+
+done_unlock:
+ mutex_unlock(&param->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
+
+/**
+ * iommu_unregister_device_fault_handler() - Unregister the device fault handler
+ * @dev: the device
+ *
+ * Remove the device fault handler installed with
+ * iommu_register_device_fault_handler().
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_unregister_device_fault_handler(struct device *dev)
+{
+ struct iommu_param *param = dev->iommu_param;
+ int ret = 0;
+
+ if (!param)
+ return -EINVAL;
+
+ mutex_lock(&param->lock);
+
+ if (!param->fault_param)
+ goto unlock;
+
+ /* we cannot unregister handler if there are pending faults */
+ if (!list_empty(&param->fault_param->faults)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ kfree(param->fault_param);
+ param->fault_param = NULL;
+ put_device(dev);
+unlock:
+ mutex_unlock(&param->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
+
+/**
+ * iommu_report_device_fault() - Report fault event to device driver
+ * @dev: the device
+ * @evt: fault event data
+ *
+ * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
+ * handler. When this function fails and the fault is recoverable, it is the
+ * caller's responsibility to complete the fault.
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
+{
+ struct iommu_param *param = dev->iommu_param;
+ struct iommu_fault_event *evt_pending = NULL;
+ struct iommu_fault_param *fparam;
+ int ret = 0;
+
+ if (!param || !evt)
+ return -EINVAL;
+
+ /* we only report device fault if there is a handler registered */
+ mutex_lock(&param->lock);
+ fparam = param->fault_param;
+ if (!fparam || !fparam->handler) {
+ ret = -EINVAL;
+ goto done_unlock;
+ }
+
+ if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
+ (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
+ evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
+ GFP_KERNEL);
+ if (!evt_pending) {
+ ret = -ENOMEM;
+ goto done_unlock;
+ }
+ mutex_lock(&fparam->lock);
+ list_add_tail(&evt_pending->list, &fparam->faults);
+ mutex_unlock(&fparam->lock);
+ }
+
+ ret = fparam->handler(&evt->fault, fparam->data);
+ if (ret && evt_pending) {
+ mutex_lock(&fparam->lock);
+ list_del(&evt_pending->list);
+ mutex_unlock(&fparam->lock);
+ kfree(evt_pending);
+ }
+done_unlock:
+ mutex_unlock(&param->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_report_device_fault);
+
+int iommu_page_response(struct device *dev,
+ struct iommu_page_response *msg)
+{
+ bool pasid_valid;
+ int ret = -EINVAL;
+ struct iommu_fault_event *evt;
+ struct iommu_fault_page_request *prm;
+ struct iommu_param *param = dev->iommu_param;
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+ if (!domain || !domain->ops->page_response)
+ return -ENODEV;
+
+ if (!param || !param->fault_param)
+ return -EINVAL;
+
+ if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
+ msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
+ return -EINVAL;
+
+ /* Only send response if there is a fault report pending */
+ mutex_lock(&param->fault_param->lock);
+ if (list_empty(&param->fault_param->faults)) {
+ dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
+ goto done_unlock;
+ }
+ /*
+ * Check if we have a matching page request pending to respond,
+ * otherwise return -EINVAL
+ */
+ list_for_each_entry(evt, &param->fault_param->faults, list) {
+ prm = &evt->fault.prm;
+ pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+
+ if ((pasid_valid && prm->pasid != msg->pasid) ||
+ prm->grpid != msg->grpid)
+ continue;
+
+ /* Sanitize the reply */
+ msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
+
+ ret = domain->ops->page_response(dev, evt, msg);
+ list_del(&evt->list);
+ kfree(evt);
+ break;
+ }
+
+done_unlock:
+ mutex_unlock(&param->fault_param->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_page_response);
+
+/**
* iommu_group_id - Return ID for a group
* @group: the group to ID
*
@@ -1895,24 +2129,23 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
return region;
}
-/* Request that a device is direct mapped by the IOMMU */
-int iommu_request_dm_for_dev(struct device *dev)
+static int
+request_default_domain_for_dev(struct device *dev, unsigned long type)
{
- struct iommu_domain *dm_domain;
+ struct iommu_domain *domain;
struct iommu_group *group;
int ret;
/* Device must already be in a group before calling this function */
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ group = iommu_group_get(dev);
+ if (!group)
+ return -EINVAL;
mutex_lock(&group->mutex);
/* Check if the default domain is already direct mapped */
ret = 0;
- if (group->default_domain &&
- group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
+ if (group->default_domain && group->default_domain->type == type)
goto out;
/* Don't change mappings of existing devices */
@@ -1922,23 +2155,26 @@ int iommu_request_dm_for_dev(struct device *dev)
/* Allocate a direct mapped domain */
ret = -ENOMEM;
- dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
- if (!dm_domain)
+ domain = __iommu_domain_alloc(dev->bus, type);
+ if (!domain)
goto out;
/* Attach the device to the domain */
- ret = __iommu_attach_group(dm_domain, group);
+ ret = __iommu_attach_group(domain, group);
if (ret) {
- iommu_domain_free(dm_domain);
+ iommu_domain_free(domain);
goto out;
}
+ iommu_group_create_direct_mappings(group, dev);
+
/* Make the direct mapped domain the default for this group */
if (group->default_domain)
iommu_domain_free(group->default_domain);
- group->default_domain = dm_domain;
+ group->default_domain = domain;
- dev_info(dev, "Using iommu direct mapping\n");
+ dev_info(dev, "Using iommu %s mapping\n",
+ type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
ret = 0;
out:
@@ -1948,6 +2184,18 @@ out:
return ret;
}
+/* Request that a device is direct mapped by the IOMMU */
+int iommu_request_dm_for_dev(struct device *dev)
+{
+ return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
+}
+
+/* Request that a device can't be direct mapped by the IOMMU */
+int iommu_request_dma_domain_for_dev(struct device *dev)
+{
+ return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
+}
+
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
{
const struct iommu_ops *ops = NULL;
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 9a380c10655e..ad0098c0c87c 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -36,12 +36,16 @@
#define arm_iommu_detach_device(...) do {} while (0)
#endif
-#define IPMMU_CTX_MAX 8
+#define IPMMU_CTX_MAX 8U
+#define IPMMU_CTX_INVALID -1
+
+#define IPMMU_UTLB_MAX 48U
struct ipmmu_features {
bool use_ns_alias_offset;
bool has_cache_leaf_nodes;
unsigned int number_of_contexts;
+ unsigned int num_utlbs;
bool setup_imbuscr;
bool twobit_imttbcr_sl0;
bool reserved_context;
@@ -53,11 +57,11 @@ struct ipmmu_vmsa_device {
struct iommu_device iommu;
struct ipmmu_vmsa_device *root;
const struct ipmmu_features *features;
- unsigned int num_utlbs;
unsigned int num_ctx;
spinlock_t lock; /* Protects ctx and domains[] */
DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
+ s8 utlb_ctx[IPMMU_UTLB_MAX];
struct iommu_group *group;
struct dma_iommu_mapping *mapping;
@@ -186,7 +190,8 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
#define IMMAIR_ATTR_IDX_WBRWA 1
#define IMMAIR_ATTR_IDX_DEV 2
-#define IMEAR 0x0030
+#define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */
+#define IMEUAR 0x0034 /* R-Car Gen3 only */
#define IMPCTR 0x0200
#define IMPSTR 0x0208
@@ -334,6 +339,7 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
ipmmu_write(mmu, IMUCTR(utlb),
IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
IMUCTR_MMUEN);
+ mmu->utlb_ctx[utlb] = domain->context_id;
}
/*
@@ -345,6 +351,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
struct ipmmu_vmsa_device *mmu = domain->mmu;
ipmmu_write(mmu, IMUCTR(utlb), 0);
+ mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
}
static void ipmmu_tlb_flush_all(void *cookie)
@@ -403,52 +410,10 @@ static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
spin_unlock_irqrestore(&mmu->lock, flags);
}
-static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
+static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
{
u64 ttbr;
u32 tmp;
- int ret;
-
- /*
- * Allocate the page table operations.
- *
- * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
- * access, Long-descriptor format" that the NStable bit being set in a
- * table descriptor will result in the NStable and NS bits of all child
- * entries being ignored and considered as being set. The IPMMU seems
- * not to comply with this, as it generates a secure access page fault
- * if any of the NStable and NS bits isn't set when running in
- * non-secure mode.
- */
- domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
- domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
- domain->cfg.ias = 32;
- domain->cfg.oas = 40;
- domain->cfg.tlb = &ipmmu_gather_ops;
- domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
- domain->io_domain.geometry.force_aperture = true;
- /*
- * TODO: Add support for coherent walk through CCI with DVM and remove
- * cache handling. For now, delegate it to the io-pgtable code.
- */
- domain->cfg.iommu_dev = domain->mmu->root->dev;
-
- /*
- * Find an unused context.
- */
- ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
- if (ret < 0)
- return ret;
-
- domain->context_id = ret;
-
- domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
- domain);
- if (!domain->iop) {
- ipmmu_domain_free_context(domain->mmu->root,
- domain->context_id);
- return -EINVAL;
- }
/* TTBR0 */
ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
@@ -494,7 +459,55 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
*/
ipmmu_ctx_write_all(domain, IMCTR,
IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
+}
+
+static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
+{
+ int ret;
+
+ /*
+ * Allocate the page table operations.
+ *
+ * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
+ * access, Long-descriptor format" that the NStable bit being set in a
+ * table descriptor will result in the NStable and NS bits of all child
+ * entries being ignored and considered as being set. The IPMMU seems
+ * not to comply with this, as it generates a secure access page fault
+ * if any of the NStable and NS bits isn't set when running in
+ * non-secure mode.
+ */
+ domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
+ domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
+ domain->cfg.ias = 32;
+ domain->cfg.oas = 40;
+ domain->cfg.tlb = &ipmmu_gather_ops;
+ domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
+ domain->io_domain.geometry.force_aperture = true;
+ /*
+ * TODO: Add support for coherent walk through CCI with DVM and remove
+ * cache handling. For now, delegate it to the io-pgtable code.
+ */
+ domain->cfg.coherent_walk = false;
+ domain->cfg.iommu_dev = domain->mmu->root->dev;
+
+ /*
+ * Find an unused context.
+ */
+ ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
+ if (ret < 0)
+ return ret;
+
+ domain->context_id = ret;
+
+ domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
+ domain);
+ if (!domain->iop) {
+ ipmmu_domain_free_context(domain->mmu->root,
+ domain->context_id);
+ return -EINVAL;
+ }
+ ipmmu_domain_setup_context(domain);
return 0;
}
@@ -522,14 +535,16 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
{
const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
struct ipmmu_vmsa_device *mmu = domain->mmu;
+ unsigned long iova;
u32 status;
- u32 iova;
status = ipmmu_ctx_read_root(domain, IMSTR);
if (!(status & err_mask))
return IRQ_NONE;
- iova = ipmmu_ctx_read_root(domain, IMEAR);
+ iova = ipmmu_ctx_read_root(domain, IMELAR);
+ if (IS_ENABLED(CONFIG_64BIT))
+ iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
/*
* Clear the error status flags. Unlike traditional interrupt flag
@@ -541,10 +556,10 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
/* Log fatal errors. */
if (status & IMSTR_MHIT)
- dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
+ dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
iova);
if (status & IMSTR_ABORT)
- dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
+ dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
iova);
if (!(status & (IMSTR_PF | IMSTR_TF)))
@@ -560,7 +575,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
return IRQ_HANDLED;
dev_err_ratelimited(mmu->dev,
- "Unhandled fault: status 0x%08x iova 0x%08x\n",
+ "Unhandled fault: status 0x%08x iova 0x%lx\n",
status, iova);
return IRQ_HANDLED;
@@ -885,27 +900,37 @@ error:
static int ipmmu_add_device(struct device *dev)
{
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct iommu_group *group;
+ int ret;
/*
* Only let through devices that have been verified in xlate()
*/
- if (!to_ipmmu(dev))
+ if (!mmu)
return -ENODEV;
- if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
- return ipmmu_init_arm_mapping(dev);
+ if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) {
+ ret = ipmmu_init_arm_mapping(dev);
+ if (ret)
+ return ret;
+ } else {
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ iommu_group_put(group);
+ }
- iommu_group_put(group);
+ iommu_device_link(&mmu->iommu, dev);
return 0;
}
static void ipmmu_remove_device(struct device *dev)
{
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
+
+ iommu_device_unlink(&mmu->iommu, dev);
arm_iommu_detach_device(dev);
iommu_group_remove_device(dev);
}
@@ -959,6 +984,7 @@ static const struct ipmmu_features ipmmu_features_default = {
.use_ns_alias_offset = true,
.has_cache_leaf_nodes = false,
.number_of_contexts = 1, /* software only tested with one context */
+ .num_utlbs = 32,
.setup_imbuscr = true,
.twobit_imttbcr_sl0 = false,
.reserved_context = false,
@@ -968,6 +994,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.use_ns_alias_offset = false,
.has_cache_leaf_nodes = true,
.number_of_contexts = 8,
+ .num_utlbs = 48,
.setup_imbuscr = false,
.twobit_imttbcr_sl0 = true,
.reserved_context = true,
@@ -1020,10 +1047,10 @@ static int ipmmu_probe(struct platform_device *pdev)
}
mmu->dev = &pdev->dev;
- mmu->num_utlbs = 48;
spin_lock_init(&mmu->lock);
bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
mmu->features = of_device_get_match_data(&pdev->dev);
+ memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
/* Map I/O memory and request IRQ. */
@@ -1047,8 +1074,7 @@ static int ipmmu_probe(struct platform_device *pdev)
if (mmu->features->use_ns_alias_offset)
mmu->base += IM_NS_ALIAS_OFFSET;
- mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX,
- mmu->features->number_of_contexts);
+ mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
irq = platform_get_irq(pdev, 0);
@@ -1140,10 +1166,48 @@ static int ipmmu_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int ipmmu_resume_noirq(struct device *dev)
+{
+ struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
+ unsigned int i;
+
+ /* Reset root MMU and restore contexts */
+ if (ipmmu_is_root(mmu)) {
+ ipmmu_device_reset(mmu);
+
+ for (i = 0; i < mmu->num_ctx; i++) {
+ if (!mmu->domains[i])
+ continue;
+
+ ipmmu_domain_setup_context(mmu->domains[i]);
+ }
+ }
+
+ /* Re-enable active micro-TLBs */
+ for (i = 0; i < mmu->features->num_utlbs; i++) {
+ if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
+ continue;
+
+ ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops ipmmu_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
+};
+#define DEV_PM_OPS &ipmmu_pm
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
.of_match_table = of_match_ptr(ipmmu_of_ids),
+ .pm = DEV_PM_OPS,
},
.probe = ipmmu_probe,
.remove = ipmmu_remove,
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index ff31bddba60a..8e19bfa94121 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -236,17 +236,6 @@ DEBUG_FOPS_RO(regs);
DEFINE_SHOW_ATTRIBUTE(tlb);
DEFINE_SHOW_ATTRIBUTE(pagetable);
-#define __DEBUG_ADD_FILE(attr, mode) \
- { \
- struct dentry *dent; \
- dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
- obj, &attr##_fops); \
- if (!dent) \
- goto err; \
- }
-
-#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
-
void omap_iommu_debugfs_add(struct omap_iommu *obj)
{
struct dentry *d;
@@ -254,23 +243,13 @@ void omap_iommu_debugfs_add(struct omap_iommu *obj)
if (!iommu_debug_root)
return;
- obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root);
- if (!obj->debug_dir)
- return;
+ d = debugfs_create_dir(obj->name, iommu_debug_root);
+ obj->debug_dir = d;
- d = debugfs_create_u32("nr_tlb_entries", 0400, obj->debug_dir,
- &obj->nr_tlb_entries);
- if (!d)
- return;
-
- DEBUG_ADD_FILE_RO(regs);
- DEBUG_ADD_FILE_RO(tlb);
- DEBUG_ADD_FILE_RO(pagetable);
-
- return;
-
-err:
- debugfs_remove_recursive(obj->debug_dir);
+ debugfs_create_u32("nr_tlb_entries", 0400, d, &obj->nr_tlb_entries);
+ debugfs_create_file("regs", 0400, d, obj, &regs_fops);
+ debugfs_create_file("tlb", 0400, d, obj, &tlb_fops);
+ debugfs_create_file("pagetable", 0400, d, obj, &pagetable_fops);
}
void omap_iommu_debugfs_remove(struct omap_iommu *obj)
@@ -284,8 +263,6 @@ void omap_iommu_debugfs_remove(struct omap_iommu *obj)
void __init omap_iommu_debugfs_init(void)
{
iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
- if (!iommu_debug_root)
- pr_err("can't create debugfs dir\n");
}
void __exit omap_iommu_debugfs_exit(void)
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 62f9c61338a5..dfb961d8c21b 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -35,8 +35,7 @@
static const struct iommu_ops omap_iommu_ops;
-#define to_iommu(dev) \
- ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
+#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
/* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 659c5e0fb835..80e10f4e213a 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -15,10 +15,10 @@ config ARM_GIC_PM
bool
depends on PM
select ARM_GIC
- select PM_CLK
config ARM_GIC_MAX_NR
int
+ depends on ARM_GIC
default 2 if ARCH_REALVIEW
default 1
@@ -87,6 +87,14 @@ config ALPINE_MSI
select PCI_MSI
select GENERIC_IRQ_CHIP
+config AL_FIC
+ bool "Amazon's Annapurna Labs Fabric Interrupt Controller"
+ depends on OF || COMPILE_TEST
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ help
+ Support Amazon's Annapurna Labs Fabric Interrupt Controller.
+
config ATMEL_AIC_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -217,13 +225,26 @@ config RDA_INTC
select IRQ_DOMAIN
config RENESAS_INTC_IRQPIN
- bool
+ bool "Renesas INTC External IRQ Pin Support" if COMPILE_TEST
select IRQ_DOMAIN
+ help
+ Enable support for the Renesas Interrupt Controller for external
+ interrupt pins, as found on SH/R-Mobile and R-Car Gen1 SoCs.
config RENESAS_IRQC
- bool
+ bool "Renesas R-Mobile APE6 and R-Car IRQC support" if COMPILE_TEST
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+ help
+ Enable support for the Renesas Interrupt Controller for external
+ devices, as found on R-Mobile APE6, R-Car Gen2, and R-Car Gen3 SoCs.
+
+config RENESAS_RZA1_IRQC
+ bool "Renesas RZ/A1 IRQC support" if COMPILE_TEST
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Enable support for the Renesas RZ/A1 Interrupt Controller, to use up
+ to 8 external interrupts with configurable sense select.
config ST_IRQCHIP
bool
@@ -299,8 +320,11 @@ config RENESAS_H8300H_INTC
select IRQ_DOMAIN
config RENESAS_H8S_INTC
- bool
+ bool "Renesas H8S Interrupt Controller Support" if COMPILE_TEST
select IRQ_DOMAIN
+ help
+ Enable support for the Renesas H8/300 Interrupt Controller, as found
+ on Renesas H8S SoCs.
config IMX_GPCV2
bool
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 606a003a0000..8d0fcec6ab23 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IRQCHIP) += irqchip.o
+obj-$(CONFIG_AL_FIC) += irq-al-fic.o
obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o
obj-$(CONFIG_ATH79) += irq-ath79-cpu.o
obj-$(CONFIG_ATH79) += irq-ath79-misc.o
@@ -49,6 +50,7 @@ obj-$(CONFIG_JCORE_AIC) += irq-jcore-aic.o
obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
+obj-$(CONFIG_RENESAS_RZA1_IRQC) += irq-renesas-rza1.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c
new file mode 100644
index 000000000000..1a57cee3efab
--- /dev/null
+++ b/drivers/irqchip/irq-al-fic.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+/* FIC Registers */
+#define AL_FIC_CAUSE 0x00
+#define AL_FIC_MASK 0x10
+#define AL_FIC_CONTROL 0x28
+
+#define CONTROL_TRIGGER_RISING BIT(3)
+#define CONTROL_MASK_MSI_X BIT(5)
+
+#define NR_FIC_IRQS 32
+
+MODULE_AUTHOR("Talel Shenhar");
+MODULE_DESCRIPTION("Amazon's Annapurna Labs Interrupt Controller Driver");
+MODULE_LICENSE("GPL v2");
+
+enum al_fic_state {
+ AL_FIC_UNCONFIGURED = 0,
+ AL_FIC_CONFIGURED_LEVEL,
+ AL_FIC_CONFIGURED_RISING_EDGE,
+};
+
+struct al_fic {
+ void __iomem *base;
+ struct irq_domain *domain;
+ const char *name;
+ unsigned int parent_irq;
+ enum al_fic_state state;
+};
+
+static void al_fic_set_trigger(struct al_fic *fic,
+ struct irq_chip_generic *gc,
+ enum al_fic_state new_state)
+{
+ irq_flow_handler_t handler;
+ u32 control = readl_relaxed(fic->base + AL_FIC_CONTROL);
+
+ if (new_state == AL_FIC_CONFIGURED_LEVEL) {
+ handler = handle_level_irq;
+ control &= ~CONTROL_TRIGGER_RISING;
+ } else {
+ handler = handle_edge_irq;
+ control |= CONTROL_TRIGGER_RISING;
+ }
+ gc->chip_types->handler = handler;
+ fic->state = new_state;
+ writel_relaxed(control, fic->base + AL_FIC_CONTROL);
+}
+
+static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct al_fic *fic = gc->private;
+ enum al_fic_state new_state;
+ int ret = 0;
+
+ irq_gc_lock(gc);
+
+ if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) &&
+ ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) {
+ pr_debug("fic doesn't support flow type %d\n", flow_type);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ?
+ AL_FIC_CONFIGURED_LEVEL : AL_FIC_CONFIGURED_RISING_EDGE;
+
+ /*
+ * A given FIC instance can be either all level or all edge triggered.
+ * This is generally fixed depending on what pieces of HW it's wired up
+ * to.
+ *
+ * We configure it based on the sensitivity of the first source
+ * being setup, and reject any subsequent attempt at configuring it in a
+ * different way.
+ */
+ if (fic->state == AL_FIC_UNCONFIGURED) {
+ al_fic_set_trigger(fic, gc, new_state);
+ } else if (fic->state != new_state) {
+ pr_debug("fic %s state already configured to %d\n",
+ fic->name, fic->state);
+ ret = -EINVAL;
+ goto err;
+ }
+
+err:
+ irq_gc_unlock(gc);
+
+ return ret;
+}
+
+static void al_fic_irq_handler(struct irq_desc *desc)
+{
+ struct al_fic *fic = irq_desc_get_handler_data(desc);
+ struct irq_domain *domain = fic->domain;
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
+ unsigned long pending;
+ unsigned int irq;
+ u32 hwirq;
+
+ chained_irq_enter(irqchip, desc);
+
+ pending = readl_relaxed(fic->base + AL_FIC_CAUSE);
+ pending &= ~gc->mask_cache;
+
+ for_each_set_bit(hwirq, &pending, NR_FIC_IRQS) {
+ irq = irq_find_mapping(domain, hwirq);
+ generic_handle_irq(irq);
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int al_fic_register(struct device_node *node,
+ struct al_fic *fic)
+{
+ struct irq_chip_generic *gc;
+ int ret;
+
+ fic->domain = irq_domain_add_linear(node,
+ NR_FIC_IRQS,
+ &irq_generic_chip_ops,
+ fic);
+ if (!fic->domain) {
+ pr_err("fail to add irq domain\n");
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(fic->domain,
+ NR_FIC_IRQS,
+ 1, fic->name,
+ handle_level_irq,
+ 0, 0, IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("fail to allocate generic chip (%d)\n", ret);
+ goto err_domain_remove;
+ }
+
+ gc = irq_get_domain_generic_chip(fic->domain, 0);
+ gc->reg_base = fic->base;
+ gc->chip_types->regs.mask = AL_FIC_MASK;
+ gc->chip_types->regs.ack = AL_FIC_CAUSE;
+ gc->chip_types->chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
+ gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
+ gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
+ gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
+ gc->private = fic;
+
+ irq_set_chained_handler_and_data(fic->parent_irq,
+ al_fic_irq_handler,
+ fic);
+ return 0;
+
+err_domain_remove:
+ irq_domain_remove(fic->domain);
+
+ return ret;
+}
+
+/*
+ * al_fic_wire_init() - initialize and configure fic in wire mode
+ * @of_node: optional pointer to interrupt controller's device tree node.
+ * @base: mmio to fic register
+ * @name: name of the fic
+ * @parent_irq: interrupt of parent
+ *
+ * This API will configure the fic hardware to to work in wire mode.
+ * In wire mode, fic hardware is generating a wire ("wired") interrupt.
+ * Interrupt can be generated based on positive edge or level - configuration is
+ * to be determined based on connected hardware to this fic.
+ */
+static struct al_fic *al_fic_wire_init(struct device_node *node,
+ void __iomem *base,
+ const char *name,
+ unsigned int parent_irq)
+{
+ struct al_fic *fic;
+ int ret;
+ u32 control = CONTROL_MASK_MSI_X;
+
+ fic = kzalloc(sizeof(*fic), GFP_KERNEL);
+ if (!fic)
+ return ERR_PTR(-ENOMEM);
+
+ fic->base = base;
+ fic->parent_irq = parent_irq;
+ fic->name = name;
+
+ /* mask out all interrupts */
+ writel_relaxed(0xFFFFFFFF, fic->base + AL_FIC_MASK);
+
+ /* clear any pending interrupt */
+ writel_relaxed(0, fic->base + AL_FIC_CAUSE);
+
+ writel_relaxed(control, fic->base + AL_FIC_CONTROL);
+
+ ret = al_fic_register(node, fic);
+ if (ret) {
+ pr_err("fail to register irqchip\n");
+ goto err_free;
+ }
+
+ pr_debug("%s initialized successfully in Legacy mode (parent-irq=%u)\n",
+ fic->name, parent_irq);
+
+ return fic;
+
+err_free:
+ kfree(fic);
+ return ERR_PTR(ret);
+}
+
+static int __init al_fic_init_dt(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+ void __iomem *base;
+ unsigned int parent_irq;
+ struct al_fic *fic;
+
+ if (!parent) {
+ pr_err("%s: unsupported - device require a parent\n",
+ node->name);
+ return -EINVAL;
+ }
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s: fail to map memory\n", node->name);
+ return -ENOMEM;
+ }
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ pr_err("%s: fail to map irq\n", node->name);
+ ret = -EINVAL;
+ goto err_unmap;
+ }
+
+ fic = al_fic_wire_init(node,
+ base,
+ node->name,
+ parent_irq);
+ if (IS_ERR(fic)) {
+ pr_err("%s: fail to initialize irqchip (%lu)\n",
+ node->name,
+ PTR_ERR(fic));
+ ret = PTR_ERR(fic);
+ goto err_irq_dispose;
+ }
+
+ return 0;
+
+err_irq_dispose:
+ irq_dispose_mapping(parent_irq);
+err_unmap:
+ iounmap(base);
+
+ return ret;
+}
+
+IRQCHIP_DECLARE(al_fic, "amazon,al-fic", al_fic_init_dt);
diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c
index c67c961ab6cc..a1534edef7fa 100644
--- a/drivers/irqchip/irq-csky-mpintc.c
+++ b/drivers/irqchip/irq-csky-mpintc.c
@@ -32,8 +32,8 @@ static void __iomem *INTCL_base;
#define INTCG_CIDSTR 0x1000
#define INTCL_PICTLR 0x0
+#define INTCL_CFGR 0x14
#define INTCL_SIGR 0x60
-#define INTCL_HPPIR 0x68
#define INTCL_RDYIR 0x6c
#define INTCL_SENR 0xa0
#define INTCL_CENR 0xa4
@@ -41,21 +41,49 @@ static void __iomem *INTCL_base;
static DEFINE_PER_CPU(void __iomem *, intcl_reg);
+static unsigned long *__trigger;
+
+#define IRQ_OFFSET(irq) ((irq < COMM_IRQ_BASE) ? irq : (irq - COMM_IRQ_BASE))
+
+#define TRIG_BYTE_OFFSET(i) ((((i) * 2) / 32) * 4)
+#define TRIG_BIT_OFFSET(i) (((i) * 2) % 32)
+
+#define TRIG_VAL(trigger, irq) (trigger << TRIG_BIT_OFFSET(IRQ_OFFSET(irq)))
+#define TRIG_VAL_MSK(irq) (~(3 << TRIG_BIT_OFFSET(IRQ_OFFSET(irq))))
+
+#define TRIG_BASE(irq) \
+ (TRIG_BYTE_OFFSET(IRQ_OFFSET(irq)) + ((irq < COMM_IRQ_BASE) ? \
+ (this_cpu_read(intcl_reg) + INTCL_CFGR) : (INTCG_base + INTCG_CICFGR)))
+
+static DEFINE_SPINLOCK(setup_lock);
+static void setup_trigger(unsigned long irq, unsigned long trigger)
+{
+ unsigned int tmp;
+
+ spin_lock(&setup_lock);
+
+ /* setup trigger */
+ tmp = readl_relaxed(TRIG_BASE(irq)) & TRIG_VAL_MSK(irq);
+
+ writel_relaxed(tmp | TRIG_VAL(trigger, irq), TRIG_BASE(irq));
+
+ spin_unlock(&setup_lock);
+}
+
static void csky_mpintc_handler(struct pt_regs *regs)
{
void __iomem *reg_base = this_cpu_read(intcl_reg);
- do {
- handle_domain_irq(root_domain,
- readl_relaxed(reg_base + INTCL_RDYIR),
- regs);
- } while (readl_relaxed(reg_base + INTCL_HPPIR) & BIT(31));
+ handle_domain_irq(root_domain,
+ readl_relaxed(reg_base + INTCL_RDYIR), regs);
}
static void csky_mpintc_enable(struct irq_data *d)
{
void __iomem *reg_base = this_cpu_read(intcl_reg);
+ setup_trigger(d->hwirq, __trigger[d->hwirq]);
+
writel_relaxed(d->hwirq, reg_base + INTCL_SENR);
}
@@ -73,6 +101,28 @@ static void csky_mpintc_eoi(struct irq_data *d)
writel_relaxed(d->hwirq, reg_base + INTCL_CACR);
}
+static int csky_mpintc_set_type(struct irq_data *d, unsigned int type)
+{
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ __trigger[d->hwirq] = 0;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ __trigger[d->hwirq] = 1;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ __trigger[d->hwirq] = 2;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ __trigger[d->hwirq] = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_SMP
static int csky_irq_set_affinity(struct irq_data *d,
const struct cpumask *mask_val,
@@ -89,8 +139,19 @@ static int csky_irq_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;
- /* Enable interrupt destination */
- cpu |= BIT(31);
+ /*
+ * The csky,mpintc could support auto irq deliver, but it only
+ * could deliver external irq to one cpu or all cpus. So it
+ * doesn't support deliver external irq to a group of cpus
+ * with cpu_mask.
+ * SO we only use auto deliver mode when affinity mask_val is
+ * equal to cpu_present_mask.
+ *
+ */
+ if (cpumask_equal(mask_val, cpu_present_mask))
+ cpu = 0;
+ else
+ cpu |= BIT(31);
writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);
@@ -105,6 +166,7 @@ static struct irq_chip csky_irq_chip = {
.irq_eoi = csky_mpintc_eoi,
.irq_enable = csky_mpintc_enable,
.irq_disable = csky_mpintc_disable,
+ .irq_set_type = csky_mpintc_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = csky_irq_set_affinity,
#endif
@@ -125,9 +187,26 @@ static int csky_irqdomain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
+static int csky_irq_domain_xlate_cells(struct irq_domain *d,
+ struct device_node *ctrlr, const u32 *intspec,
+ unsigned int intsize, unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+ if (intsize > 1)
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+ else
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
static const struct irq_domain_ops csky_irqdomain_ops = {
.map = csky_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
+ .xlate = csky_irq_domain_xlate_cells,
};
#ifdef CONFIG_SMP
@@ -161,6 +240,10 @@ csky_mpintc_init(struct device_node *node, struct device_node *parent)
if (ret < 0)
nr_irq = INTC_IRQS;
+ __trigger = kcalloc(nr_irq, sizeof(unsigned long), GFP_KERNEL);
+ if (__trigger == NULL)
+ return -ENXIO;
+
if (INTCG_base == NULL) {
INTCG_base = ioremap(mfcr("cr<31, 14>"),
INTCL_SIZE*nr_cpu_ids + INTCG_SIZE);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 875ac80f690b..7338f90b2f9e 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -53,6 +53,7 @@
/* List of flags for specific v2m implementation */
#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
+#define GICV2M_GRAVITON_ADDRESS_ONLY 0x00000002
static LIST_HEAD(v2m_nodes);
static DEFINE_SPINLOCK(v2m_lock);
@@ -95,15 +96,26 @@ static struct msi_domain_info gicv2m_msi_domain_info = {
.chip = &gicv2m_msi_irq_chip,
};
+static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq)
+{
+ if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
+ return v2m->res.start | ((hwirq - 32) << 3);
+ else
+ return v2m->res.start + V2M_MSI_SETSPI_NS;
+}
+
static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
- phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
+ phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq);
msg->address_hi = upper_32_bits(addr);
msg->address_lo = lower_32_bits(addr);
- msg->data = data->hwirq;
+ if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
+ msg->data = 0;
+ else
+ msg->data = data->hwirq;
if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
msg->data -= v2m->spi_offset;
@@ -185,7 +197,7 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
hwirq = v2m->spi_start + offset;
err = iommu_dma_prepare_msi(info->desc,
- v2m->res.start + V2M_MSI_SETSPI_NS);
+ gicv2m_get_msi_addr(v2m, hwirq));
if (err)
return err;
@@ -304,7 +316,7 @@ static int gicv2m_allocate_domains(struct irq_domain *parent)
static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
u32 spi_start, u32 nr_spis,
- struct resource *res)
+ struct resource *res, u32 flags)
{
int ret;
struct v2m_data *v2m;
@@ -317,6 +329,7 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
INIT_LIST_HEAD(&v2m->entry);
v2m->fwnode = fwnode;
+ v2m->flags = flags;
memcpy(&v2m->res, res, sizeof(struct resource));
@@ -331,7 +344,14 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
v2m->spi_start = spi_start;
v2m->nr_spis = nr_spis;
} else {
- u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
+ u32 typer;
+
+ /* Graviton should always have explicit spi_start/nr_spis */
+ if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) {
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+ typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
@@ -352,18 +372,21 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
*
* Broadom NS2 GICv2m implementation has an erratum where the MSI data
* is 'spi_number - 32'
+ *
+ * Reading that register fails on the Graviton implementation
*/
- switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
- case XGENE_GICV2M_MSI_IIDR:
- v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
- v2m->spi_offset = v2m->spi_start;
- break;
- case BCM_NS2_GICV2M_MSI_IIDR:
- v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
- v2m->spi_offset = 32;
- break;
+ if (!(v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)) {
+ switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
+ case XGENE_GICV2M_MSI_IIDR:
+ v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+ v2m->spi_offset = v2m->spi_start;
+ break;
+ case BCM_NS2_GICV2M_MSI_IIDR:
+ v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+ v2m->spi_offset = 32;
+ break;
+ }
}
-
v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
GFP_KERNEL);
if (!v2m->bm) {
@@ -416,7 +439,8 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
spi_start, nr_spis);
- ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res);
+ ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
+ &res, 0);
if (ret) {
of_node_put(child);
break;
@@ -448,6 +472,25 @@ static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
return data->fwnode;
}
+static bool acpi_check_amazon_graviton_quirks(void)
+{
+ static struct acpi_table_madt *madt;
+ acpi_status status;
+ bool rc = false;
+
+#define ACPI_AMZN_OEM_ID "AMAZON"
+
+ status = acpi_get_table(ACPI_SIG_MADT, 0,
+ (struct acpi_table_header **)&madt);
+
+ if (ACPI_FAILURE(status) || !madt)
+ return rc;
+ rc = !memcmp(madt->header.oem_id, ACPI_AMZN_OEM_ID, ACPI_OEM_ID_SIZE);
+ acpi_put_table((struct acpi_table_header *)madt);
+
+ return rc;
+}
+
static int __init
acpi_parse_madt_msi(union acpi_subtable_headers *header,
const unsigned long end)
@@ -457,6 +500,7 @@ acpi_parse_madt_msi(union acpi_subtable_headers *header,
u32 spi_start = 0, nr_spis = 0;
struct acpi_madt_generic_msi_frame *m;
struct fwnode_handle *fwnode;
+ u32 flags = 0;
m = (struct acpi_madt_generic_msi_frame *)header;
if (BAD_MADT_ENTRY(m, end))
@@ -466,6 +510,13 @@ acpi_parse_madt_msi(union acpi_subtable_headers *header,
res.end = m->base_address + SZ_4K - 1;
res.flags = IORESOURCE_MEM;
+ if (acpi_check_amazon_graviton_quirks()) {
+ pr_info("applying Amazon Graviton quirk\n");
+ res.end = res.start + SZ_8K - 1;
+ flags |= GICV2M_GRAVITON_ADDRESS_ONLY;
+ gicv2m_msi_domain_info.flags &= ~MSI_FLAG_MULTI_PCI_MSI;
+ }
+
if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
spi_start = m->spi_base;
nr_spis = m->spi_count;
@@ -480,7 +531,7 @@ acpi_parse_madt_msi(union acpi_subtable_headers *header,
return -EINVAL;
}
- ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res);
+ ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res, flags);
if (ret)
irq_domain_free_fwnode(fwnode);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d29b44b677e4..730fbe0e2a9d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -185,7 +185,7 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
static struct its_collection *valid_col(struct its_collection *col)
{
- if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
+ if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
return NULL;
return col;
@@ -733,32 +733,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
}
static int its_wait_for_range_completion(struct its_node *its,
- struct its_cmd_block *from,
+ u64 prev_idx,
struct its_cmd_block *to)
{
- u64 rd_idx, from_idx, to_idx;
+ u64 rd_idx, to_idx, linear_idx;
u32 count = 1000000; /* 1s! */
- from_idx = its_cmd_ptr_to_offset(its, from);
+ /* Linearize to_idx if the command set has wrapped around */
to_idx = its_cmd_ptr_to_offset(its, to);
+ if (to_idx < prev_idx)
+ to_idx += ITS_CMD_QUEUE_SZ;
+
+ linear_idx = prev_idx;
while (1) {
+ s64 delta;
+
rd_idx = readl_relaxed(its->base + GITS_CREADR);
- /* Direct case */
- if (from_idx < to_idx && rd_idx >= to_idx)
- break;
+ /*
+ * Compute the read pointer progress, taking the
+ * potential wrap-around into account.
+ */
+ delta = rd_idx - prev_idx;
+ if (rd_idx < prev_idx)
+ delta += ITS_CMD_QUEUE_SZ;
- /* Wrapped case */
- if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
+ linear_idx += delta;
+ if (linear_idx >= to_idx)
break;
count--;
if (!count) {
- pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
- from_idx, to_idx, rd_idx);
+ pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
+ to_idx, linear_idx);
return -1;
}
+ prev_idx = rd_idx;
cpu_relax();
udelay(1);
}
@@ -775,6 +786,7 @@ void name(struct its_node *its, \
struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
synctype *sync_obj; \
unsigned long flags; \
+ u64 rd_idx; \
\
raw_spin_lock_irqsave(&its->lock, flags); \
\
@@ -796,10 +808,11 @@ void name(struct its_node *its, \
} \
\
post: \
+ rd_idx = readl_relaxed(its->base + GITS_CREADR); \
next_cmd = its_post_commands(its); \
raw_spin_unlock_irqrestore(&its->lock, flags); \
\
- if (its_wait_for_range_completion(its, cmd, next_cmd)) \
+ if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 6377cb864f4c..9bca4896fa6f 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -461,8 +461,12 @@ static void gic_deactivate_unhandled(u32 irqnr)
static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
{
+ bool irqs_enabled = interrupts_enabled(regs);
int err;
+ if (irqs_enabled)
+ nmi_enter();
+
if (static_branch_likely(&supports_deactivate_key))
gic_write_eoir(irqnr);
/*
@@ -474,6 +478,9 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
err = handle_domain_nmi(gic_data.domain, irqnr, regs);
if (err)
gic_deactivate_unhandled(irqnr);
+
+ if (irqs_enabled)
+ nmi_exit();
}
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
@@ -1332,6 +1339,9 @@ static int __init gic_init_bases(void __iomem *dist_base,
if (gic_dist_supports_lpis()) {
its_init(handle, &gic_data.rdists, gic_data.domain);
its_cpu_init();
+ } else {
+ if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
+ gicv2m_init(handle, gic_data.domain);
}
if (gic_prio_masking_enabled()) {
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index a89c693d5b90..3dd28382d5f5 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -344,8 +344,7 @@ static int mbigen_device_probe(struct platform_device *pdev)
err = -EINVAL;
if (err) {
- dev_err(&pdev->dev, "Failed to create mbi-gen@%p irqdomain",
- mgn_chip->base);
+ dev_err(&pdev->dev, "Failed to create mbi-gen irqdomain\n");
return err;
}
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 8eb92eb98f54..dcdc23b9dce6 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -60,6 +60,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
{ .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
{ .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
+ { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
{ }
};
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index d32268cc1174..f3985469c221 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -388,7 +388,7 @@ static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d);
- write_gic_vl_map(intr, cd->map);
+ write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
if (cd->mask)
write_gic_vl_smask(BIT(intr));
}
@@ -517,7 +517,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
- write_gic_vo_map(intr, map);
+ write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
}
spin_unlock_irqrestore(&gic_lock, flags);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 04c05a18600c..f82bc60a6793 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -508,7 +508,8 @@ static int intc_irqpin_probe(struct platform_device *pdev)
}
irq_chip = &p->irq_chip;
- irq_chip->name = name;
+ irq_chip->name = "intc-irqpin";
+ irq_chip->parent_device = dev;
irq_chip->irq_mask = disable_fn;
irq_chip->irq_unmask = enable_fn;
irq_chip->irq_set_type = intc_irqpin_irq_set_type;
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index a449a7c839b3..11abc09ef76c 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -7,7 +7,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/io.h>
@@ -48,7 +47,7 @@ struct irqc_priv {
void __iomem *cpu_int_base;
struct irqc_irq irq[IRQC_IRQ_MAX];
unsigned int number_of_irqs;
- struct platform_device *pdev;
+ struct device *dev;
struct irq_chip_generic *gc;
struct irq_domain *irq_domain;
atomic_t wakeup_path;
@@ -61,8 +60,7 @@ static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
static void irqc_dbg(struct irqc_irq *i, char *str)
{
- dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
- str, i->requested_irq, i->hw_irq);
+ dev_dbg(i->p->dev, "%s (%d:%d)\n", str, i->requested_irq, i->hw_irq);
}
static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
@@ -125,33 +123,22 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
static int irqc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ const char *name = dev_name(dev);
struct irqc_priv *p;
- struct resource *io;
struct resource *irq;
- const char *name = dev_name(&pdev->dev);
int ret;
int k;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
- ret = -ENOMEM;
- goto err0;
- }
+ p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
- p->pdev = pdev;
+ p->dev = dev;
platform_set_drvdata(pdev, p);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
- /* get hold of manadatory IOMEM */
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io) {
- dev_err(&pdev->dev, "not enough IOMEM resources\n");
- ret = -EINVAL;
- goto err1;
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
/* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
for (k = 0; k < IRQC_IRQ_MAX; k++) {
@@ -166,42 +153,41 @@ static int irqc_probe(struct platform_device *pdev)
p->number_of_irqs = k;
if (p->number_of_irqs < 1) {
- dev_err(&pdev->dev, "not enough IRQ resources\n");
+ dev_err(dev, "not enough IRQ resources\n");
ret = -EINVAL;
- goto err1;
+ goto err_runtime_pm_disable;
}
/* ioremap IOMEM and setup read/write callbacks */
- p->iomem = ioremap_nocache(io->start, resource_size(io));
- if (!p->iomem) {
- dev_err(&pdev->dev, "failed to remap IOMEM\n");
- ret = -ENXIO;
- goto err2;
+ p->iomem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(p->iomem)) {
+ ret = PTR_ERR(p->iomem);
+ goto err_runtime_pm_disable;
}
p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
- p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
- p->number_of_irqs,
+ p->irq_domain = irq_domain_add_linear(dev->of_node, p->number_of_irqs,
&irq_generic_chip_ops, p);
if (!p->irq_domain) {
ret = -ENXIO;
- dev_err(&pdev->dev, "cannot initialize irq domain\n");
- goto err2;
+ dev_err(dev, "cannot initialize irq domain\n");
+ goto err_runtime_pm_disable;
}
ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
- 1, name, handle_level_irq,
+ 1, "irqc", handle_level_irq,
0, 0, IRQ_GC_INIT_NESTED_LOCK);
if (ret) {
- dev_err(&pdev->dev, "cannot allocate generic chip\n");
- goto err3;
+ dev_err(dev, "cannot allocate generic chip\n");
+ goto err_remove_domain;
}
p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
p->gc->reg_base = p->cpu_int_base;
p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
+ p->gc->chip_types[0].chip.parent_device = dev;
p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
@@ -210,46 +196,33 @@ static int irqc_probe(struct platform_device *pdev)
/* request interrupts one by one */
for (k = 0; k < p->number_of_irqs; k++) {
- if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
- 0, name, &p->irq[k])) {
- dev_err(&pdev->dev, "failed to request IRQ\n");
+ if (devm_request_irq(dev, p->irq[k].requested_irq,
+ irqc_irq_handler, 0, name, &p->irq[k])) {
+ dev_err(dev, "failed to request IRQ\n");
ret = -ENOENT;
- goto err4;
+ goto err_remove_domain;
}
}
- dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
+ dev_info(dev, "driving %d irqs\n", p->number_of_irqs);
return 0;
-err4:
- while (--k >= 0)
- free_irq(p->irq[k].requested_irq, &p->irq[k]);
-err3:
+err_remove_domain:
irq_domain_remove(p->irq_domain);
-err2:
- iounmap(p->iomem);
-err1:
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- kfree(p);
-err0:
+err_runtime_pm_disable:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
return ret;
}
static int irqc_remove(struct platform_device *pdev)
{
struct irqc_priv *p = platform_get_drvdata(pdev);
- int k;
-
- for (k = 0; k < p->number_of_irqs; k++)
- free_irq(p->irq[k].requested_irq, &p->irq[k]);
irq_domain_remove(p->irq_domain);
- iounmap(p->iomem);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(p);
return 0;
}
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
new file mode 100644
index 000000000000..b0d46ac42b89
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/A1 IRQC Driver
+ *
+ * Copyright (C) 2019 Glider bvba
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define IRQC_NUM_IRQ 8
+
+#define ICR0 0 /* Interrupt Control Register 0 */
+
+#define ICR0_NMIL BIT(15) /* NMI Input Level (0=low, 1=high) */
+#define ICR0_NMIE BIT(8) /* Edge Select (0=falling, 1=rising) */
+#define ICR0_NMIF BIT(1) /* NMI Interrupt Request */
+
+#define ICR1 2 /* Interrupt Control Register 1 */
+
+#define ICR1_IRQS(n, sense) ((sense) << ((n) * 2)) /* IRQ Sense Select */
+#define ICR1_IRQS_LEVEL_LOW 0
+#define ICR1_IRQS_EDGE_FALLING 1
+#define ICR1_IRQS_EDGE_RISING 2
+#define ICR1_IRQS_EDGE_BOTH 3
+#define ICR1_IRQS_MASK(n) ICR1_IRQS((n), 3)
+
+#define IRQRR 4 /* IRQ Interrupt Request Register */
+
+
+struct rza1_irqc_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_chip chip;
+ struct irq_domain *irq_domain;
+ struct of_phandle_args map[IRQC_NUM_IRQ];
+};
+
+static struct rza1_irqc_priv *irq_data_to_priv(struct irq_data *data)
+{
+ return data->domain->host_data;
+}
+
+static void rza1_irqc_eoi(struct irq_data *d)
+{
+ struct rza1_irqc_priv *priv = irq_data_to_priv(d);
+ u16 bit = BIT(irqd_to_hwirq(d));
+ u16 tmp;
+
+ tmp = readw_relaxed(priv->base + IRQRR);
+ if (tmp & bit)
+ writew_relaxed(GENMASK(IRQC_NUM_IRQ - 1, 0) & ~bit,
+ priv->base + IRQRR);
+
+ irq_chip_eoi_parent(d);
+}
+
+static int rza1_irqc_set_type(struct irq_data *d, unsigned int type)
+{
+ struct rza1_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = irqd_to_hwirq(d);
+ u16 sense, tmp;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_LOW:
+ sense = ICR1_IRQS_LEVEL_LOW;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = ICR1_IRQS_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ sense = ICR1_IRQS_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ sense = ICR1_IRQS_EDGE_BOTH;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tmp = readw_relaxed(priv->base + ICR1);
+ tmp &= ~ICR1_IRQS_MASK(hw_irq);
+ tmp |= ICR1_IRQS(hw_irq, sense);
+ writew_relaxed(tmp, priv->base + ICR1);
+ return 0;
+}
+
+static int rza1_irqc_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct rza1_irqc_priv *priv = domain->host_data;
+ struct irq_fwspec *fwspec = arg;
+ unsigned int hwirq = fwspec->param[0];
+ struct irq_fwspec spec;
+ unsigned int i;
+ int ret;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &priv->chip,
+ priv);
+ if (ret)
+ return ret;
+
+ spec.fwnode = &priv->dev->of_node->fwnode;
+ spec.param_count = priv->map[hwirq].args_count;
+ for (i = 0; i < spec.param_count; i++)
+ spec.param[i] = priv->map[hwirq].args[i];
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &spec);
+}
+
+static int rza1_irqc_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec, unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (fwspec->param_count != 2 || fwspec->param[0] >= IRQC_NUM_IRQ)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+}
+
+static const struct irq_domain_ops rza1_irqc_domain_ops = {
+ .alloc = rza1_irqc_alloc,
+ .translate = rza1_irqc_translate,
+};
+
+static int rza1_irqc_parse_map(struct rza1_irqc_priv *priv,
+ struct device_node *gic_node)
+{
+ unsigned int imaplen, i, j, ret;
+ struct device *dev = priv->dev;
+ struct device_node *ipar;
+ const __be32 *imap;
+ u32 intsize;
+
+ imap = of_get_property(dev->of_node, "interrupt-map", &imaplen);
+ if (!imap)
+ return -EINVAL;
+
+ for (i = 0; i < IRQC_NUM_IRQ; i++) {
+ if (imaplen < 3)
+ return -EINVAL;
+
+ /* Check interrupt number, ignore sense */
+ if (be32_to_cpup(imap) != i)
+ return -EINVAL;
+
+ ipar = of_find_node_by_phandle(be32_to_cpup(imap + 2));
+ if (ipar != gic_node) {
+ of_node_put(ipar);
+ return -EINVAL;
+ }
+
+ imap += 3;
+ imaplen -= 3;
+
+ ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize);
+ of_node_put(ipar);
+ if (ret)
+ return ret;
+
+ if (imaplen < intsize)
+ return -EINVAL;
+
+ priv->map[i].args_count = intsize;
+ for (j = 0; j < intsize; j++)
+ priv->map[i].args[j] = be32_to_cpup(imap++);
+
+ imaplen -= intsize;
+ }
+
+ return 0;
+}
+
+static int rza1_irqc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct irq_domain *parent = NULL;
+ struct device_node *gic_node;
+ struct rza1_irqc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+ priv->dev = dev;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ gic_node = of_irq_find_parent(np);
+ if (gic_node)
+ parent = irq_find_host(gic_node);
+
+ if (!parent) {
+ dev_err(dev, "cannot find parent domain\n");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ ret = rza1_irqc_parse_map(priv, gic_node);
+ if (ret) {
+ dev_err(dev, "cannot parse %s: %d\n", "interrupt-map", ret);
+ goto out_put_node;
+ }
+
+ priv->chip.name = "rza1-irqc",
+ priv->chip.irq_mask = irq_chip_mask_parent,
+ priv->chip.irq_unmask = irq_chip_unmask_parent,
+ priv->chip.irq_eoi = rza1_irqc_eoi,
+ priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy,
+ priv->chip.irq_set_type = rza1_irqc_set_type,
+ priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
+
+ priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ,
+ np, &rza1_irqc_domain_ops,
+ priv);
+ if (!priv->irq_domain) {
+ dev_err(dev, "cannot initialize irq domain\n");
+ ret = -ENOMEM;
+ }
+
+out_put_node:
+ of_node_put(gic_node);
+ return ret;
+}
+
+static int rza1_irqc_remove(struct platform_device *pdev)
+{
+ struct rza1_irqc_priv *priv = platform_get_drvdata(pdev);
+
+ irq_domain_remove(priv->irq_domain);
+ return 0;
+}
+
+static const struct of_device_id rza1_irqc_dt_ids[] = {
+ { .compatible = "renesas,rza1-irqc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rza1_irqc_dt_ids);
+
+static struct platform_driver rza1_irqc_device_driver = {
+ .probe = rza1_irqc_probe,
+ .remove = rza1_irqc_remove,
+ .driver = {
+ .name = "renesas_rza1_irqc",
+ .of_match_table = rza1_irqc_dt_ids,
+ }
+};
+
+static int __init rza1_irqc_init(void)
+{
+ return platform_driver_register(&rza1_irqc_device_driver);
+}
+postcore_initcall(rza1_irqc_init);
+
+static void __exit rza1_irqc_exit(void)
+{
+ platform_driver_unregister(&rza1_irqc_device_driver);
+}
+module_exit(rza1_irqc_exit);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("Renesas RZ/A1 IRQC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
index 4e983bc6cf93..1d027623c776 100644
--- a/drivers/irqchip/irq-sni-exiu.c
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -2,7 +2,7 @@
/*
* Driver for Socionext External Interrupt Unit (EXIU)
*
- * Copyright (c) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (c) 2017-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
* Based on irq-tegra.c:
* Copyright (C) 2011 Google, Inc.
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -131,9 +132,13 @@ static int exiu_domain_translate(struct irq_domain *domain,
*hwirq = fwspec->param[1] - info->spi_base;
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
- return 0;
+ } else {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
}
- return -EINVAL;
+ return 0;
}
static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq,
@@ -144,16 +149,21 @@ static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq,
struct exiu_irq_data *info = dom->host_data;
irq_hw_number_t hwirq;
- if (fwspec->param_count != 3)
- return -EINVAL; /* Not GIC compliant */
- if (fwspec->param[0] != GIC_SPI)
- return -EINVAL; /* No PPI should point to this domain */
+ parent_fwspec = *fwspec;
+ if (is_of_node(dom->parent->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+ hwirq = fwspec->param[1] - info->spi_base;
+ } else {
+ hwirq = fwspec->param[0];
+ parent_fwspec.param[0] = hwirq + info->spi_base + 32;
+ }
WARN_ON(nr_irqs != 1);
- hwirq = fwspec->param[1] - info->spi_base;
irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &exiu_irq_chip, info);
- parent_fwspec = *fwspec;
parent_fwspec.fwnode = dom->parent->fwnode;
return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec);
}
@@ -164,35 +174,23 @@ static const struct irq_domain_ops exiu_domain_ops = {
.free = irq_domain_free_irqs_common,
};
-static int __init exiu_init(struct device_node *node,
- struct device_node *parent)
+static struct exiu_irq_data *exiu_init(const struct fwnode_handle *fwnode,
+ struct resource *res)
{
- struct irq_domain *parent_domain, *domain;
struct exiu_irq_data *data;
int err;
- if (!parent) {
- pr_err("%pOF: no parent, giving up\n", node);
- return -ENODEV;
- }
-
- parent_domain = irq_find_host(parent);
- if (!parent_domain) {
- pr_err("%pOF: unable to obtain parent domain\n", node);
- return -ENXIO;
- }
-
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- if (of_property_read_u32(node, "socionext,spi-base", &data->spi_base)) {
- pr_err("%pOF: failed to parse 'spi-base' property\n", node);
+ if (fwnode_property_read_u32_array(fwnode, "socionext,spi-base",
+ &data->spi_base, 1)) {
err = -ENODEV;
goto out_free;
}
- data->base = of_iomap(node, 0);
+ data->base = ioremap(res->start, resource_size(res));
if (!data->base) {
err = -ENODEV;
goto out_free;
@@ -202,11 +200,44 @@ static int __init exiu_init(struct device_node *node,
writel_relaxed(0xFFFFFFFF, data->base + EIREQCLR);
writel_relaxed(0xFFFFFFFF, data->base + EIMASK);
+ return data;
+
+out_free:
+ kfree(data);
+ return ERR_PTR(err);
+}
+
+static int __init exiu_dt_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct exiu_irq_data *data;
+ struct resource res;
+
+ if (!parent) {
+ pr_err("%pOF: no parent, giving up\n", node);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: unable to obtain parent domain\n", node);
+ return -ENXIO;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("%pOF: failed to parse memory resource\n", node);
+ return -ENXIO;
+ }
+
+ data = exiu_init(of_node_to_fwnode(node), &res);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node,
&exiu_domain_ops, data);
if (!domain) {
pr_err("%pOF: failed to allocate domain\n", node);
- err = -ENOMEM;
goto out_unmap;
}
@@ -217,8 +248,57 @@ static int __init exiu_init(struct device_node *node,
out_unmap:
iounmap(data->base);
-out_free:
kfree(data);
- return err;
+ return -ENOMEM;
}
-IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_init);
+IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_dt_init);
+
+#ifdef CONFIG_ACPI
+static int exiu_acpi_probe(struct platform_device *pdev)
+{
+ struct irq_domain *domain;
+ struct exiu_irq_data *data;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to parse memory resource\n");
+ return -ENXIO;
+ }
+
+ data = exiu_init(dev_fwnode(&pdev->dev), res);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ domain = acpi_irq_create_hierarchy(0, NUM_IRQS, dev_fwnode(&pdev->dev),
+ &exiu_domain_ops, data);
+ if (!domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ goto out_unmap;
+ }
+
+ dev_info(&pdev->dev, "%d interrupts forwarded\n", NUM_IRQS);
+
+ return 0;
+
+out_unmap:
+ iounmap(data->base);
+ kfree(data);
+ return -ENOMEM;
+}
+
+static const struct acpi_device_id exiu_acpi_ids[] = {
+ { "SCX0008" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, exiu_acpi_ids);
+
+static struct platform_driver exiu_driver = {
+ .driver = {
+ .name = "exiu",
+ .acpi_match_table = exiu_acpi_ids,
+ },
+ .probe = exiu_acpi_probe,
+};
+builtin_platform_driver(exiu_driver);
+#endif
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 011b60a49e3f..ef4d625d2d80 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -159,9 +159,9 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
parent_fwspec.param[1] = vint_desc->vint_id;
parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
- if (parent_virq <= 0) {
+ if (parent_virq == 0) {
kfree(vint_desc);
- return ERR_PTR(parent_virq);
+ return ERR_PTR(-EINVAL);
}
vint_desc->parent_virq = parent_virq;
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index 067337ab3f20..d88e993aa66d 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -229,7 +229,6 @@ static int get_registers(struct platform_device *pdev, struct combiner *comb)
static int __init combiner_probe(struct platform_device *pdev)
{
struct combiner *combiner;
- size_t alloc_sz;
int nregs;
int err;
@@ -239,8 +238,8 @@ static int __init combiner_probe(struct platform_device *pdev)
return -EINVAL;
}
- alloc_sz = sizeof(*combiner) + sizeof(struct combiner_reg) * nregs;
- combiner = devm_kzalloc(&pdev->dev, alloc_sz, GFP_KERNEL);
+ combiner = devm_kzalloc(&pdev->dev, struct_size(combiner, regs, nregs),
+ GFP_KERNEL);
if (!combiner)
return -ENOMEM;
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 1ca4d70d198a..be8387c0eeef 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -21,59 +21,8 @@ menuconfig ISDN
if ISDN
-menuconfig ISDN_I4L
- tristate "Old ISDN4Linux (deprecated)"
- depends on TTY
- ---help---
- This driver allows you to use an ISDN adapter for networking
- connections and as dialin/out device. The isdn-tty's have a built
- in AT-compatible modem emulator. Network devices support autodial,
- channel-bundling, callback and caller-authentication without having
- a daemon running. A reduced T.70 protocol is supported with tty's
- suitable for German BTX. On D-Channel, the protocols EDSS1
- (Euro-ISDN) and 1TR6 (German style) are supported. See
- <file:Documentation/isdn/README> for more information.
-
- ISDN support in the linux kernel is moving towards a new API,
- called CAPI (Common ISDN Application Programming Interface).
- Therefore the old ISDN4Linux layer will eventually become obsolete.
- It is still available, though, for use with adapters that are not
- supported by the new CAPI subsystem yet.
-
-source "drivers/isdn/i4l/Kconfig"
-
-menuconfig ISDN_CAPI
- tristate "CAPI 2.0 subsystem"
- help
- This provides CAPI (the Common ISDN Application Programming
- Interface) Version 2.0, a standard making it easy for programs to
- access ISDN hardware in a device independent way. (For details see
- <http://www.capi.org/>.) CAPI supports making and accepting voice
- and data connections, controlling call options and protocols,
- as well as ISDN supplementary services like call forwarding or
- three-party conferences (if supported by the specific hardware
- driver).
-
- Select this option and the appropriate hardware driver below if
- you have an ISDN adapter supported by the CAPI subsystem.
-
-if ISDN_CAPI
-
source "drivers/isdn/capi/Kconfig"
-source "drivers/isdn/hardware/Kconfig"
-
-endif # ISDN_CAPI
-
-source "drivers/isdn/gigaset/Kconfig"
-
-source "drivers/isdn/hysdn/Kconfig"
-
source "drivers/isdn/mISDN/Kconfig"
-config ISDN_HDLC
- tristate
- select CRC_CCITT
- select BITREVERSE
-
endif # ISDN
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index e7d3d8f2ad5a..63baf27a2c79 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -3,12 +3,6 @@
# Object files in subdirectories
-obj-$(CONFIG_ISDN_I4L) += i4l/
obj-$(CONFIG_ISDN_CAPI) += capi/
obj-$(CONFIG_MISDN) += mISDN/
obj-$(CONFIG_ISDN) += hardware/
-obj-$(CONFIG_ISDN_DIVERSION) += divert/
-obj-$(CONFIG_ISDN_DRV_HISAX) += hisax/
-obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/
-obj-$(CONFIG_HYSDN) += hysdn/
-obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index abaadce376c5..573fea5500ce 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -1,4 +1,22 @@
# SPDX-License-Identifier: GPL-2.0-only
+menuconfig ISDN_CAPI
+ tristate "CAPI 2.0 subsystem"
+ help
+ This provides CAPI (the Common ISDN Application Programming
+ Interface) Version 2.0, a standard making it easy for programs to
+ access ISDN hardware in a device independent way. (For details see
+ <http://www.capi.org/>.) CAPI supports making and accepting voice
+ and data connections, controlling call options and protocols,
+ as well as ISDN supplementary services like call forwarding or
+ three-party conferences (if supported by the specific hardware
+ driver).
+
+ This subsystem requires a hardware specific driver.
+ See CONFIG_BT_CMTP for the last remaining regular driver
+ in the kernel that uses the CAPI subsystem.
+
+if ISDN_CAPI
+
config CAPI_TRACE
bool "CAPI trace support"
default y
@@ -27,15 +45,6 @@ config ISDN_CAPI_MIDDLEWARE
device. If you want to use pppd with pppdcapiplugin to dial up to
your ISP, say Y here.
-config ISDN_CAPI_CAPIDRV
- tristate "CAPI2.0 capidrv interface support"
- depends on ISDN_I4L
- help
- This option provides the glue code to hook up CAPI driven cards to
- the legacy isdn4linux link layer. If you have a card which is
- supported by a CAPI driver, but still want to use old features like
- ippp interfaces or ttyI emulation, say Y/M here.
-
config ISDN_CAPI_CAPIDRV_VERBOSE
bool "Verbose reason code reporting"
depends on ISDN_CAPI_CAPIDRV
@@ -43,3 +52,5 @@ config ISDN_CAPI_CAPIDRV_VERBOSE
If you say Y here, the capidrv interface will give verbose reasons
for disconnecting. This will increase the size of the kernel by 7 KB.
If unsure, say N.
+
+endif
diff --git a/drivers/isdn/capi/Makefile b/drivers/isdn/capi/Makefile
index 06da3ed2c40a..d299f3e75f89 100644
--- a/drivers/isdn/capi/Makefile
+++ b/drivers/isdn/capi/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_ISDN_CAPI_CAPIDRV) += capidrv.o
kernelcapi-y := kcapi.o capiutil.o capilib.o
kernelcapi-$(CONFIG_PROC_FS) += kcapi_proc.o
+
+ccflags-y += -I$(srctree)/$(src)/../include -I$(srctree)/$(src)/../include/uapi
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
deleted file mode 100644
index e8949f3dcae1..000000000000
--- a/drivers/isdn/capi/capidrv.c
+++ /dev/null
@@ -1,2525 +0,0 @@
-/* $Id: capidrv.c,v 1.1.2.2 2004/01/12 23:17:24 keil Exp $
- *
- * ISDN4Linux Driver, using capi20 interface (kernelcapi)
- *
- * Copyright 1997 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/compiler.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-#include <linux/fcntl.h>
-#include <linux/fs.h>
-#include <linux/signal.h>
-#include <linux/mm.h>
-#include <linux/timer.h>
-#include <linux/wait.h>
-#include <linux/skbuff.h>
-#include <linux/isdn.h>
-#include <linux/isdnif.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/ctype.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capicmd.h>
-#include "capidrv.h"
-
-static int debugmode = 0;
-
-MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-module_param(debugmode, uint, S_IRUGO | S_IWUSR);
-
-/* -------- type definitions ----------------------------------------- */
-
-
-struct capidrv_contr {
-
- struct capidrv_contr *next;
- struct module *owner;
- u32 contrnr;
- char name[20];
-
- /*
- * for isdn4linux
- */
- isdn_if interface;
- int myid;
-
- /*
- * LISTEN state
- */
- int state;
- u32 cipmask;
- u32 cipmask2;
- struct timer_list listentimer;
-
- /*
- * ID of capi message sent
- */
- u16 msgid;
-
- /*
- * B-Channels
- */
- int nbchan;
- struct capidrv_bchan {
- struct capidrv_contr *contr;
- u8 msn[ISDN_MSNLEN];
- int l2;
- int l3;
- u8 num[ISDN_MSNLEN];
- u8 mynum[ISDN_MSNLEN];
- int si1;
- int si2;
- int incoming;
- int disconnecting;
- struct capidrv_plci {
- struct capidrv_plci *next;
- u32 plci;
- u32 ncci; /* ncci for CONNECT_ACTIVE_IND */
- u16 msgid; /* to identfy CONNECT_CONF */
- int chan;
- int state;
- int leasedline;
- struct capidrv_ncci {
- struct capidrv_ncci *next;
- struct capidrv_plci *plcip;
- u32 ncci;
- u16 msgid; /* to identfy CONNECT_B3_CONF */
- int chan;
- int state;
- int oldstate;
- /* */
- u16 datahandle;
- struct ncci_datahandle_queue {
- struct ncci_datahandle_queue *next;
- u16 datahandle;
- int len;
- } *ackqueue;
- } *ncci_list;
- } *plcip;
- struct capidrv_ncci *nccip;
- } *bchans;
-
- struct capidrv_plci *plci_list;
-
- /* for q931 data */
- u8 q931_buf[4096];
- u8 *q931_read;
- u8 *q931_write;
- u8 *q931_end;
-};
-
-
-struct capidrv_data {
- struct capi20_appl ap;
- int ncontr;
- struct capidrv_contr *contr_list;
-};
-
-typedef struct capidrv_plci capidrv_plci;
-typedef struct capidrv_ncci capidrv_ncci;
-typedef struct capidrv_contr capidrv_contr;
-typedef struct capidrv_data capidrv_data;
-typedef struct capidrv_bchan capidrv_bchan;
-
-/* -------- data definitions ----------------------------------------- */
-
-static capidrv_data global;
-static DEFINE_SPINLOCK(global_lock);
-
-static void handle_dtrace_data(capidrv_contr *card,
- int send, int level2, u8 *data, u16 len);
-
-/* -------- convert functions ---------------------------------------- */
-
-static inline u32 b1prot(int l2, int l3)
-{
- switch (l2) {
- case ISDN_PROTO_L2_X75I:
- case ISDN_PROTO_L2_X75UI:
- case ISDN_PROTO_L2_X75BUI:
- return 0;
- case ISDN_PROTO_L2_HDLC:
- default:
- return 0;
- case ISDN_PROTO_L2_TRANS:
- return 1;
- case ISDN_PROTO_L2_V11096:
- case ISDN_PROTO_L2_V11019:
- case ISDN_PROTO_L2_V11038:
- return 2;
- case ISDN_PROTO_L2_FAX:
- return 4;
- case ISDN_PROTO_L2_MODEM:
- return 8;
- }
-}
-
-static inline u32 b2prot(int l2, int l3)
-{
- switch (l2) {
- case ISDN_PROTO_L2_X75I:
- case ISDN_PROTO_L2_X75UI:
- case ISDN_PROTO_L2_X75BUI:
- default:
- return 0;
- case ISDN_PROTO_L2_HDLC:
- case ISDN_PROTO_L2_TRANS:
- case ISDN_PROTO_L2_V11096:
- case ISDN_PROTO_L2_V11019:
- case ISDN_PROTO_L2_V11038:
- case ISDN_PROTO_L2_MODEM:
- return 1;
- case ISDN_PROTO_L2_FAX:
- return 4;
- }
-}
-
-static inline u32 b3prot(int l2, int l3)
-{
- switch (l2) {
- case ISDN_PROTO_L2_X75I:
- case ISDN_PROTO_L2_X75UI:
- case ISDN_PROTO_L2_X75BUI:
- case ISDN_PROTO_L2_HDLC:
- case ISDN_PROTO_L2_TRANS:
- case ISDN_PROTO_L2_V11096:
- case ISDN_PROTO_L2_V11019:
- case ISDN_PROTO_L2_V11038:
- case ISDN_PROTO_L2_MODEM:
- default:
- return 0;
- case ISDN_PROTO_L2_FAX:
- return 4;
- }
-}
-
-static _cstruct b1config_async_v110(u16 rate)
-{
- /* CAPI-Spec "B1 Configuration" */
- static unsigned char buf[9];
- buf[0] = 8; /* len */
- /* maximum bitrate */
- buf[1] = rate & 0xff; buf[2] = (rate >> 8) & 0xff;
- buf[3] = 8; buf[4] = 0; /* 8 bits per character */
- buf[5] = 0; buf[6] = 0; /* parity none */
- buf[7] = 0; buf[8] = 0; /* 1 stop bit */
- return buf;
-}
-
-static _cstruct b1config(int l2, int l3)
-{
- switch (l2) {
- case ISDN_PROTO_L2_X75I:
- case ISDN_PROTO_L2_X75UI:
- case ISDN_PROTO_L2_X75BUI:
- case ISDN_PROTO_L2_HDLC:
- case ISDN_PROTO_L2_TRANS:
- default:
- return NULL;
- case ISDN_PROTO_L2_V11096:
- return b1config_async_v110(9600);
- case ISDN_PROTO_L2_V11019:
- return b1config_async_v110(19200);
- case ISDN_PROTO_L2_V11038:
- return b1config_async_v110(38400);
- }
-}
-
-static inline u16 si2cip(u8 si1, u8 si2)
-{
- static const u8 cip[17][5] =
- {
- /* 0 1 2 3 4 */
- {0, 0, 0, 0, 0}, /*0 */
- {16, 16, 4, 26, 16}, /*1 */
- {17, 17, 17, 4, 4}, /*2 */
- {2, 2, 2, 2, 2}, /*3 */
- {18, 18, 18, 18, 18}, /*4 */
- {2, 2, 2, 2, 2}, /*5 */
- {0, 0, 0, 0, 0}, /*6 */
- {2, 2, 2, 2, 2}, /*7 */
- {2, 2, 2, 2, 2}, /*8 */
- {21, 21, 21, 21, 21}, /*9 */
- {19, 19, 19, 19, 19}, /*10 */
- {0, 0, 0, 0, 0}, /*11 */
- {0, 0, 0, 0, 0}, /*12 */
- {0, 0, 0, 0, 0}, /*13 */
- {0, 0, 0, 0, 0}, /*14 */
- {22, 22, 22, 22, 22}, /*15 */
- {27, 27, 27, 28, 27} /*16 */
- };
- if (si1 > 16)
- si1 = 0;
- if (si2 > 4)
- si2 = 0;
-
- return (u16) cip[si1][si2];
-}
-
-static inline u8 cip2si1(u16 cipval)
-{
- static const u8 si[32] =
- {7, 1, 7, 7, 1, 1, 7, 7, /*0-7 */
- 7, 1, 0, 0, 0, 0, 0, 0, /*8-15 */
- 1, 2, 4, 10, 9, 9, 15, 7, /*16-23 */
- 7, 7, 1, 16, 16, 0, 0, 0}; /*24-31 */
-
- if (cipval > 31)
- cipval = 0; /* .... */
- return si[cipval];
-}
-
-static inline u8 cip2si2(u16 cipval)
-{
- static const u8 si[32] =
- {0, 0, 0, 0, 2, 3, 0, 0, /*0-7 */
- 0, 3, 0, 0, 0, 0, 0, 0, /*8-15 */
- 1, 2, 0, 0, 9, 0, 0, 0, /*16-23 */
- 0, 0, 3, 2, 3, 0, 0, 0}; /*24-31 */
-
- if (cipval > 31)
- cipval = 0; /* .... */
- return si[cipval];
-}
-
-
-/* -------- controller management ------------------------------------- */
-
-static inline capidrv_contr *findcontrbydriverid(int driverid)
-{
- unsigned long flags;
- capidrv_contr *p;
-
- spin_lock_irqsave(&global_lock, flags);
- for (p = global.contr_list; p; p = p->next)
- if (p->myid == driverid)
- break;
- spin_unlock_irqrestore(&global_lock, flags);
- return p;
-}
-
-static capidrv_contr *findcontrbynumber(u32 contr)
-{
- unsigned long flags;
- capidrv_contr *p = global.contr_list;
-
- spin_lock_irqsave(&global_lock, flags);
- for (p = global.contr_list; p; p = p->next)
- if (p->contrnr == contr)
- break;
- spin_unlock_irqrestore(&global_lock, flags);
- return p;
-}
-
-
-/* -------- plci management ------------------------------------------ */
-
-static capidrv_plci *new_plci(capidrv_contr *card, int chan)
-{
- capidrv_plci *plcip;
-
- plcip = kzalloc(sizeof(capidrv_plci), GFP_ATOMIC);
-
- if (plcip == NULL)
- return NULL;
-
- plcip->state = ST_PLCI_NONE;
- plcip->plci = 0;
- plcip->msgid = 0;
- plcip->chan = chan;
- plcip->next = card->plci_list;
- card->plci_list = plcip;
- card->bchans[chan].plcip = plcip;
-
- return plcip;
-}
-
-static capidrv_plci *find_plci_by_plci(capidrv_contr *card, u32 plci)
-{
- capidrv_plci *p;
- for (p = card->plci_list; p; p = p->next)
- if (p->plci == plci)
- return p;
- return NULL;
-}
-
-static capidrv_plci *find_plci_by_msgid(capidrv_contr *card, u16 msgid)
-{
- capidrv_plci *p;
- for (p = card->plci_list; p; p = p->next)
- if (p->msgid == msgid)
- return p;
- return NULL;
-}
-
-static capidrv_plci *find_plci_by_ncci(capidrv_contr *card, u32 ncci)
-{
- capidrv_plci *p;
- for (p = card->plci_list; p; p = p->next)
- if (p->plci == (ncci & 0xffff))
- return p;
- return NULL;
-}
-
-static void free_plci(capidrv_contr *card, capidrv_plci *plcip)
-{
- capidrv_plci **pp;
-
- for (pp = &card->plci_list; *pp; pp = &(*pp)->next) {
- if (*pp == plcip) {
- *pp = (*pp)->next;
- card->bchans[plcip->chan].plcip = NULL;
- card->bchans[plcip->chan].disconnecting = 0;
- card->bchans[plcip->chan].incoming = 0;
- kfree(plcip);
- return;
- }
- }
- printk(KERN_ERR "capidrv-%d: free_plci %p (0x%x) not found, Huh?\n",
- card->contrnr, plcip, plcip->plci);
-}
-
-/* -------- ncci management ------------------------------------------ */
-
-static inline capidrv_ncci *new_ncci(capidrv_contr *card,
- capidrv_plci *plcip,
- u32 ncci)
-{
- capidrv_ncci *nccip;
-
- nccip = kzalloc(sizeof(capidrv_ncci), GFP_ATOMIC);
-
- if (nccip == NULL)
- return NULL;
-
- nccip->ncci = ncci;
- nccip->state = ST_NCCI_NONE;
- nccip->plcip = plcip;
- nccip->chan = plcip->chan;
- nccip->datahandle = 0;
-
- nccip->next = plcip->ncci_list;
- plcip->ncci_list = nccip;
-
- card->bchans[plcip->chan].nccip = nccip;
-
- return nccip;
-}
-
-static inline capidrv_ncci *find_ncci(capidrv_contr *card, u32 ncci)
-{
- capidrv_plci *plcip;
- capidrv_ncci *p;
-
- if ((plcip = find_plci_by_ncci(card, ncci)) == NULL)
- return NULL;
-
- for (p = plcip->ncci_list; p; p = p->next)
- if (p->ncci == ncci)
- return p;
- return NULL;
-}
-
-static inline capidrv_ncci *find_ncci_by_msgid(capidrv_contr *card,
- u32 ncci, u16 msgid)
-{
- capidrv_plci *plcip;
- capidrv_ncci *p;
-
- if ((plcip = find_plci_by_ncci(card, ncci)) == NULL)
- return NULL;
-
- for (p = plcip->ncci_list; p; p = p->next)
- if (p->msgid == msgid)
- return p;
- return NULL;
-}
-
-static void free_ncci(capidrv_contr *card, struct capidrv_ncci *nccip)
-{
- struct capidrv_ncci **pp;
-
- for (pp = &(nccip->plcip->ncci_list); *pp; pp = &(*pp)->next) {
- if (*pp == nccip) {
- *pp = (*pp)->next;
- break;
- }
- }
- card->bchans[nccip->chan].nccip = NULL;
- kfree(nccip);
-}
-
-static int capidrv_add_ack(struct capidrv_ncci *nccip,
- u16 datahandle, int len)
-{
- struct ncci_datahandle_queue *n, **pp;
-
- n = kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
- if (!n) {
- printk(KERN_ERR "capidrv: kmalloc ncci_datahandle failed\n");
- return -1;
- }
- n->next = NULL;
- n->datahandle = datahandle;
- n->len = len;
- for (pp = &nccip->ackqueue; *pp; pp = &(*pp)->next);
- *pp = n;
- return 0;
-}
-
-static int capidrv_del_ack(struct capidrv_ncci *nccip, u16 datahandle)
-{
- struct ncci_datahandle_queue **pp, *p;
- int len;
-
- for (pp = &nccip->ackqueue; *pp; pp = &(*pp)->next) {
- if ((*pp)->datahandle == datahandle) {
- p = *pp;
- len = p->len;
- *pp = (*pp)->next;
- kfree(p);
- return len;
- }
- }
- return -1;
-}
-
-/* -------- convert and send capi message ---------------------------- */
-
-static void send_message(capidrv_contr *card, _cmsg *cmsg)
-{
- struct sk_buff *skb;
- size_t len;
-
- if (capi_cmsg2message(cmsg, cmsg->buf)) {
- printk(KERN_ERR "capidrv::send_message: parser failure\n");
- return;
- }
- len = CAPIMSG_LEN(cmsg->buf);
- skb = alloc_skb(len, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_ERR "capidrv::send_message: can't allocate mem\n");
- return;
- }
- skb_put_data(skb, cmsg->buf, len);
- if (capi20_put_message(&global.ap, skb) != CAPI_NOERROR)
- kfree_skb(skb);
-}
-
-/* -------- state machine -------------------------------------------- */
-
-struct listenstatechange {
- int actstate;
- int nextstate;
- int event;
-};
-
-static struct listenstatechange listentable[] =
-{
- {ST_LISTEN_NONE, ST_LISTEN_WAIT_CONF, EV_LISTEN_REQ},
- {ST_LISTEN_ACTIVE, ST_LISTEN_ACTIVE_WAIT_CONF, EV_LISTEN_REQ},
- {ST_LISTEN_WAIT_CONF, ST_LISTEN_NONE, EV_LISTEN_CONF_ERROR},
- {ST_LISTEN_ACTIVE_WAIT_CONF, ST_LISTEN_ACTIVE, EV_LISTEN_CONF_ERROR},
- {ST_LISTEN_WAIT_CONF, ST_LISTEN_NONE, EV_LISTEN_CONF_EMPTY},
- {ST_LISTEN_ACTIVE_WAIT_CONF, ST_LISTEN_NONE, EV_LISTEN_CONF_EMPTY},
- {ST_LISTEN_WAIT_CONF, ST_LISTEN_ACTIVE, EV_LISTEN_CONF_OK},
- {ST_LISTEN_ACTIVE_WAIT_CONF, ST_LISTEN_ACTIVE, EV_LISTEN_CONF_OK},
- {},
-};
-
-static void listen_change_state(capidrv_contr *card, int event)
-{
- struct listenstatechange *p = listentable;
- while (p->event) {
- if (card->state == p->actstate && p->event == event) {
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: listen_change_state %d -> %d\n",
- card->contrnr, card->state, p->nextstate);
- card->state = p->nextstate;
- return;
- }
- p++;
- }
- printk(KERN_ERR "capidrv-%d: listen_change_state state=%d event=%d ????\n",
- card->contrnr, card->state, event);
-
-}
-
-/* ------------------------------------------------------------------ */
-
-static void p0(capidrv_contr *card, capidrv_plci *plci)
-{
- isdn_ctrl cmd;
-
- card->bchans[plci->chan].contr = NULL;
- cmd.command = ISDN_STAT_DHUP;
- cmd.driver = card->myid;
- cmd.arg = plci->chan;
- card->interface.statcallb(&cmd);
- free_plci(card, plci);
-}
-
-/* ------------------------------------------------------------------ */
-
-struct plcistatechange {
- int actstate;
- int nextstate;
- int event;
- void (*changefunc)(capidrv_contr *card, capidrv_plci *plci);
-};
-
-static struct plcistatechange plcitable[] =
-{
- /* P-0 */
- {ST_PLCI_NONE, ST_PLCI_OUTGOING, EV_PLCI_CONNECT_REQ, NULL},
- {ST_PLCI_NONE, ST_PLCI_ALLOCATED, EV_PLCI_FACILITY_IND_UP, NULL},
- {ST_PLCI_NONE, ST_PLCI_INCOMING, EV_PLCI_CONNECT_IND, NULL},
- {ST_PLCI_NONE, ST_PLCI_RESUMEING, EV_PLCI_RESUME_REQ, NULL},
- /* P-0.1 */
- {ST_PLCI_OUTGOING, ST_PLCI_NONE, EV_PLCI_CONNECT_CONF_ERROR, p0},
- {ST_PLCI_OUTGOING, ST_PLCI_ALLOCATED, EV_PLCI_CONNECT_CONF_OK, NULL},
- /* P-1 */
- {ST_PLCI_ALLOCATED, ST_PLCI_ACTIVE, EV_PLCI_CONNECT_ACTIVE_IND, NULL},
- {ST_PLCI_ALLOCATED, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
- {ST_PLCI_ALLOCATED, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
- {ST_PLCI_ALLOCATED, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
- /* P-ACT */
- {ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
- {ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
- {ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
- {ST_PLCI_ACTIVE, ST_PLCI_HELD, EV_PLCI_HOLD_IND, NULL},
- {ST_PLCI_ACTIVE, ST_PLCI_DISCONNECTING, EV_PLCI_SUSPEND_IND, NULL},
- /* P-2 */
- {ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_CONNECT_REJECT, NULL},
- {ST_PLCI_INCOMING, ST_PLCI_FACILITY_IND, EV_PLCI_FACILITY_IND_UP, NULL},
- {ST_PLCI_INCOMING, ST_PLCI_ACCEPTING, EV_PLCI_CONNECT_RESP, NULL},
- {ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
- {ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
- {ST_PLCI_INCOMING, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
- {ST_PLCI_INCOMING, ST_PLCI_DISCONNECTING, EV_PLCI_CD_IND, NULL},
- /* P-3 */
- {ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTING, EV_PLCI_CONNECT_REJECT, NULL},
- {ST_PLCI_FACILITY_IND, ST_PLCI_ACCEPTING, EV_PLCI_CONNECT_ACTIVE_IND, NULL},
- {ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
- {ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
- {ST_PLCI_FACILITY_IND, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
- /* P-4 */
- {ST_PLCI_ACCEPTING, ST_PLCI_ACTIVE, EV_PLCI_CONNECT_ACTIVE_IND, NULL},
- {ST_PLCI_ACCEPTING, ST_PLCI_DISCONNECTING, EV_PLCI_DISCONNECT_REQ, NULL},
- {ST_PLCI_ACCEPTING, ST_PLCI_DISCONNECTING, EV_PLCI_FACILITY_IND_DOWN, NULL},
- {ST_PLCI_ACCEPTING, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
- /* P-5 */
- {ST_PLCI_DISCONNECTING, ST_PLCI_DISCONNECTED, EV_PLCI_DISCONNECT_IND, NULL},
- /* P-6 */
- {ST_PLCI_DISCONNECTED, ST_PLCI_NONE, EV_PLCI_DISCONNECT_RESP, p0},
- /* P-0.Res */
- {ST_PLCI_RESUMEING, ST_PLCI_NONE, EV_PLCI_RESUME_CONF_ERROR, p0},
- {ST_PLCI_RESUMEING, ST_PLCI_RESUME, EV_PLCI_RESUME_CONF_OK, NULL},
- /* P-RES */
- {ST_PLCI_RESUME, ST_PLCI_ACTIVE, EV_PLCI_RESUME_IND, NULL},
- /* P-HELD */
- {ST_PLCI_HELD, ST_PLCI_ACTIVE, EV_PLCI_RETRIEVE_IND, NULL},
- {},
-};
-
-static void plci_change_state(capidrv_contr *card, capidrv_plci *plci, int event)
-{
- struct plcistatechange *p = plcitable;
- while (p->event) {
- if (plci->state == p->actstate && p->event == event) {
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: plci_change_state:0x%x %d -> %d\n",
- card->contrnr, plci->plci, plci->state, p->nextstate);
- plci->state = p->nextstate;
- if (p->changefunc)
- p->changefunc(card, plci);
- return;
- }
- p++;
- }
- printk(KERN_ERR "capidrv-%d: plci_change_state:0x%x state=%d event=%d ????\n",
- card->contrnr, plci->plci, plci->state, event);
-}
-
-/* ------------------------------------------------------------------ */
-
-static _cmsg cmsg;
-
-static void n0(capidrv_contr *card, capidrv_ncci *ncci)
-{
- isdn_ctrl cmd;
-
- capi_fill_DISCONNECT_REQ(&cmsg,
- global.ap.applid,
- card->msgid++,
- ncci->plcip->plci,
- NULL, /* BChannelinformation */
- NULL, /* Keypadfacility */
- NULL, /* Useruserdata */ /* $$$$ */
- NULL /* Facilitydataarray */
- );
- plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ);
- send_message(card, &cmsg);
-
- cmd.command = ISDN_STAT_BHUP;
- cmd.driver = card->myid;
- cmd.arg = ncci->chan;
- card->interface.statcallb(&cmd);
- free_ncci(card, ncci);
-}
-
-/* ------------------------------------------------------------------ */
-
-struct nccistatechange {
- int actstate;
- int nextstate;
- int event;
- void (*changefunc)(capidrv_contr *card, capidrv_ncci *ncci);
-};
-
-static struct nccistatechange nccitable[] =
-{
- /* N-0 */
- {ST_NCCI_NONE, ST_NCCI_OUTGOING, EV_NCCI_CONNECT_B3_REQ, NULL},
- {ST_NCCI_NONE, ST_NCCI_INCOMING, EV_NCCI_CONNECT_B3_IND, NULL},
- /* N-0.1 */
- {ST_NCCI_OUTGOING, ST_NCCI_ALLOCATED, EV_NCCI_CONNECT_B3_CONF_OK, NULL},
- {ST_NCCI_OUTGOING, ST_NCCI_NONE, EV_NCCI_CONNECT_B3_CONF_ERROR, n0},
- /* N-1 */
- {ST_NCCI_INCOMING, ST_NCCI_DISCONNECTING, EV_NCCI_CONNECT_B3_REJECT, NULL},
- {ST_NCCI_INCOMING, ST_NCCI_ALLOCATED, EV_NCCI_CONNECT_B3_RESP, NULL},
- {ST_NCCI_INCOMING, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
- {ST_NCCI_INCOMING, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
- /* N-2 */
- {ST_NCCI_ALLOCATED, ST_NCCI_ACTIVE, EV_NCCI_CONNECT_B3_ACTIVE_IND, NULL},
- {ST_NCCI_ALLOCATED, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
- {ST_NCCI_ALLOCATED, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
- /* N-ACT */
- {ST_NCCI_ACTIVE, ST_NCCI_ACTIVE, EV_NCCI_RESET_B3_IND, NULL},
- {ST_NCCI_ACTIVE, ST_NCCI_RESETING, EV_NCCI_RESET_B3_REQ, NULL},
- {ST_NCCI_ACTIVE, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
- {ST_NCCI_ACTIVE, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
- /* N-3 */
- {ST_NCCI_RESETING, ST_NCCI_ACTIVE, EV_NCCI_RESET_B3_IND, NULL},
- {ST_NCCI_RESETING, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
- {ST_NCCI_RESETING, ST_NCCI_DISCONNECTING, EV_NCCI_DISCONNECT_B3_REQ, NULL},
- /* N-4 */
- {ST_NCCI_DISCONNECTING, ST_NCCI_DISCONNECTED, EV_NCCI_DISCONNECT_B3_IND, NULL},
- {ST_NCCI_DISCONNECTING, ST_NCCI_PREVIOUS, EV_NCCI_DISCONNECT_B3_CONF_ERROR, NULL},
- /* N-5 */
- {ST_NCCI_DISCONNECTED, ST_NCCI_NONE, EV_NCCI_DISCONNECT_B3_RESP, n0},
- {},
-};
-
-static void ncci_change_state(capidrv_contr *card, capidrv_ncci *ncci, int event)
-{
- struct nccistatechange *p = nccitable;
- while (p->event) {
- if (ncci->state == p->actstate && p->event == event) {
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: ncci_change_state:0x%x %d -> %d\n",
- card->contrnr, ncci->ncci, ncci->state, p->nextstate);
- if (p->nextstate == ST_NCCI_PREVIOUS) {
- ncci->state = ncci->oldstate;
- ncci->oldstate = p->actstate;
- } else {
- ncci->oldstate = p->actstate;
- ncci->state = p->nextstate;
- }
- if (p->changefunc)
- p->changefunc(card, ncci);
- return;
- }
- p++;
- }
- printk(KERN_ERR "capidrv-%d: ncci_change_state:0x%x state=%d event=%d ????\n",
- card->contrnr, ncci->ncci, ncci->state, event);
-}
-
-/* ------------------------------------------------------------------- */
-
-static inline int new_bchan(capidrv_contr *card)
-{
- int i;
- for (i = 0; i < card->nbchan; i++) {
- if (card->bchans[i].plcip == NULL) {
- card->bchans[i].disconnecting = 0;
- return i;
- }
- }
- return -1;
-}
-
-/* ------------------------------------------------------------------- */
-static char *capi_info2str(u16 reason)
-{
-#ifndef CONFIG_ISDN_CAPI_CAPIDRV_VERBOSE
- return "..";
-#else
- switch (reason) {
-
-/*-- informative values (corresponding message was processed) -----*/
- case 0x0001:
- return "NCPI not supported by current protocol, NCPI ignored";
- case 0x0002:
- return "Flags not supported by current protocol, flags ignored";
- case 0x0003:
- return "Alert already sent by another application";
-
-/*-- error information concerning CAPI_REGISTER -----*/
- case 0x1001:
- return "Too many applications";
- case 0x1002:
- return "Logical block size too small, must be at least 128 Bytes";
- case 0x1003:
- return "Buffer exceeds 64 kByte";
- case 0x1004:
- return "Message buffer size too small, must be at least 1024 Bytes";
- case 0x1005:
- return "Max. number of logical connections not supported";
- case 0x1006:
- return "Reserved";
- case 0x1007:
- return "The message could not be accepted because of an internal busy condition";
- case 0x1008:
- return "OS resource error (no memory ?)";
- case 0x1009:
- return "CAPI not installed";
- case 0x100A:
- return "Controller does not support external equipment";
- case 0x100B:
- return "Controller does only support external equipment";
-
-/*-- error information concerning message exchange functions -----*/
- case 0x1101:
- return "Illegal application number";
- case 0x1102:
- return "Illegal command or subcommand or message length less than 12 bytes";
- case 0x1103:
- return "The message could not be accepted because of a queue full condition !! The error code does not imply that CAPI cannot receive messages directed to another controller, PLCI or NCCI";
- case 0x1104:
- return "Queue is empty";
- case 0x1105:
- return "Queue overflow, a message was lost !! This indicates a configuration error. The only recovery from this error is to perform a CAPI_RELEASE";
- case 0x1106:
- return "Unknown notification parameter";
- case 0x1107:
- return "The Message could not be accepted because of an internal busy condition";
- case 0x1108:
- return "OS Resource error (no memory ?)";
- case 0x1109:
- return "CAPI not installed";
- case 0x110A:
- return "Controller does not support external equipment";
- case 0x110B:
- return "Controller does only support external equipment";
-
-/*-- error information concerning resource / coding problems -----*/
- case 0x2001:
- return "Message not supported in current state";
- case 0x2002:
- return "Illegal Controller / PLCI / NCCI";
- case 0x2003:
- return "Out of PLCI";
- case 0x2004:
- return "Out of NCCI";
- case 0x2005:
- return "Out of LISTEN";
- case 0x2006:
- return "Out of FAX resources (protocol T.30)";
- case 0x2007:
- return "Illegal message parameter coding";
-
-/*-- error information concerning requested services -----*/
- case 0x3001:
- return "B1 protocol not supported";
- case 0x3002:
- return "B2 protocol not supported";
- case 0x3003:
- return "B3 protocol not supported";
- case 0x3004:
- return "B1 protocol parameter not supported";
- case 0x3005:
- return "B2 protocol parameter not supported";
- case 0x3006:
- return "B3 protocol parameter not supported";
- case 0x3007:
- return "B protocol combination not supported";
- case 0x3008:
- return "NCPI not supported";
- case 0x3009:
- return "CIP Value unknown";
- case 0x300A:
- return "Flags not supported (reserved bits)";
- case 0x300B:
- return "Facility not supported";
- case 0x300C:
- return "Data length not supported by current protocol";
- case 0x300D:
- return "Reset procedure not supported by current protocol";
-
-/*-- informations about the clearing of a physical connection -----*/
- case 0x3301:
- return "Protocol error layer 1 (broken line or B-channel removed by signalling protocol)";
- case 0x3302:
- return "Protocol error layer 2";
- case 0x3303:
- return "Protocol error layer 3";
- case 0x3304:
- return "Another application got that call";
-/*-- T.30 specific reasons -----*/
- case 0x3311:
- return "Connecting not successful (remote station is no FAX G3 machine)";
- case 0x3312:
- return "Connecting not successful (training error)";
- case 0x3313:
- return "Disconnected before transfer (remote station does not support transfer mode, e.g. resolution)";
- case 0x3314:
- return "Disconnected during transfer (remote abort)";
- case 0x3315:
- return "Disconnected during transfer (remote procedure error, e.g. unsuccessful repetition of T.30 commands)";
- case 0x3316:
- return "Disconnected during transfer (local tx data underrun)";
- case 0x3317:
- return "Disconnected during transfer (local rx data overflow)";
- case 0x3318:
- return "Disconnected during transfer (local abort)";
- case 0x3319:
- return "Illegal parameter coding (e.g. SFF coding error)";
-
-/*-- disconnect causes from the network according to ETS 300 102-1/Q.931 -----*/
- case 0x3481: return "Unallocated (unassigned) number";
- case 0x3482: return "No route to specified transit network";
- case 0x3483: return "No route to destination";
- case 0x3486: return "Channel unacceptable";
- case 0x3487:
- return "Call awarded and being delivered in an established channel";
- case 0x3490: return "Normal call clearing";
- case 0x3491: return "User busy";
- case 0x3492: return "No user responding";
- case 0x3493: return "No answer from user (user alerted)";
- case 0x3495: return "Call rejected";
- case 0x3496: return "Number changed";
- case 0x349A: return "Non-selected user clearing";
- case 0x349B: return "Destination out of order";
- case 0x349C: return "Invalid number format";
- case 0x349D: return "Facility rejected";
- case 0x349E: return "Response to STATUS ENQUIRY";
- case 0x349F: return "Normal, unspecified";
- case 0x34A2: return "No circuit / channel available";
- case 0x34A6: return "Network out of order";
- case 0x34A9: return "Temporary failure";
- case 0x34AA: return "Switching equipment congestion";
- case 0x34AB: return "Access information discarded";
- case 0x34AC: return "Requested circuit / channel not available";
- case 0x34AF: return "Resources unavailable, unspecified";
- case 0x34B1: return "Quality of service unavailable";
- case 0x34B2: return "Requested facility not subscribed";
- case 0x34B9: return "Bearer capability not authorized";
- case 0x34BA: return "Bearer capability not presently available";
- case 0x34BF: return "Service or option not available, unspecified";
- case 0x34C1: return "Bearer capability not implemented";
- case 0x34C2: return "Channel type not implemented";
- case 0x34C5: return "Requested facility not implemented";
- case 0x34C6: return "Only restricted digital information bearer capability is available";
- case 0x34CF: return "Service or option not implemented, unspecified";
- case 0x34D1: return "Invalid call reference value";
- case 0x34D2: return "Identified channel does not exist";
- case 0x34D3: return "A suspended call exists, but this call identity does not";
- case 0x34D4: return "Call identity in use";
- case 0x34D5: return "No call suspended";
- case 0x34D6: return "Call having the requested call identity has been cleared";
- case 0x34D8: return "Incompatible destination";
- case 0x34DB: return "Invalid transit network selection";
- case 0x34DF: return "Invalid message, unspecified";
- case 0x34E0: return "Mandatory information element is missing";
- case 0x34E1: return "Message type non-existent or not implemented";
- case 0x34E2: return "Message not compatible with call state or message type non-existent or not implemented";
- case 0x34E3: return "Information element non-existent or not implemented";
- case 0x34E4: return "Invalid information element contents";
- case 0x34E5: return "Message not compatible with call state";
- case 0x34E6: return "Recovery on timer expiry";
- case 0x34EF: return "Protocol error, unspecified";
- case 0x34FF: return "Interworking, unspecified";
-
- default: return "No additional information";
- }
-#endif
-}
-
-static void handle_controller(_cmsg *cmsg)
-{
- capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
-
- if (!card) {
- printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrController & 0x7f);
- return;
- }
- switch (CAPICMD(cmsg->Command, cmsg->Subcommand)) {
-
- case CAPI_LISTEN_CONF: /* Controller */
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: listenconf Info=0x%4x (%s) cipmask=0x%x\n",
- card->contrnr, cmsg->Info, capi_info2str(cmsg->Info), card->cipmask);
- if (cmsg->Info) {
- listen_change_state(card, EV_LISTEN_CONF_ERROR);
- } else if (card->cipmask == 0) {
- listen_change_state(card, EV_LISTEN_CONF_EMPTY);
- } else {
- listen_change_state(card, EV_LISTEN_CONF_OK);
- }
- break;
-
- case CAPI_MANUFACTURER_IND: /* Controller */
- if (cmsg->ManuID == 0x214D5641
- && cmsg->Class == 0
- && cmsg->Function == 1) {
- u8 *data = cmsg->ManuData + 3;
- u16 len = cmsg->ManuData[0];
- u16 layer;
- int direction;
- if (len == 255) {
- len = (cmsg->ManuData[1] | (cmsg->ManuData[2] << 8));
- data += 2;
- }
- len -= 2;
- layer = ((*(data - 1)) << 8) | *(data - 2);
- if (layer & 0x300)
- direction = (layer & 0x200) ? 0 : 1;
- else direction = (layer & 0x800) ? 0 : 1;
- if (layer & 0x0C00) {
- if ((layer & 0xff) == 0x80) {
- handle_dtrace_data(card, direction, 1, data, len);
- break;
- }
- } else if ((layer & 0xff) < 0x80) {
- handle_dtrace_data(card, direction, 0, data, len);
- break;
- }
- printk(KERN_INFO "capidrv-%d: %s from controller 0x%x layer 0x%x, ignored\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrController, layer);
- break;
- }
- goto ignored;
- case CAPI_MANUFACTURER_CONF: /* Controller */
- if (cmsg->ManuID == 0x214D5641) {
- char *s = NULL;
- switch (cmsg->Class) {
- case 0: break;
- case 1: s = "unknown class"; break;
- case 2: s = "unknown function"; break;
- default: s = "unknown error"; break;
- }
- if (s)
- printk(KERN_INFO "capidrv-%d: %s from controller 0x%x function %d: %s\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrController,
- cmsg->Function, s);
- break;
- }
- goto ignored;
- case CAPI_FACILITY_IND: /* Controller/plci/ncci */
- goto ignored;
- case CAPI_FACILITY_CONF: /* Controller/plci/ncci */
- goto ignored;
- case CAPI_INFO_IND: /* Controller/plci */
- goto ignored;
- case CAPI_INFO_CONF: /* Controller/plci */
- goto ignored;
-
- default:
- printk(KERN_ERR "capidrv-%d: got %s from controller 0x%x ???",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrController);
- }
- return;
-
-ignored:
- printk(KERN_INFO "capidrv-%d: %s from controller 0x%x ignored\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrController);
-}
-
-static void handle_incoming_call(capidrv_contr *card, _cmsg *cmsg)
-{
- capidrv_plci *plcip;
- capidrv_bchan *bchan;
- isdn_ctrl cmd;
- int chan;
-
- if ((chan = new_bchan(card)) == -1) {
- printk(KERN_ERR "capidrv-%d: incoming call on not existing bchan ?\n", card->contrnr);
- return;
- }
- bchan = &card->bchans[chan];
- if ((plcip = new_plci(card, chan)) == NULL) {
- printk(KERN_ERR "capidrv-%d: incoming call: no memory, sorry.\n", card->contrnr);
- return;
- }
- bchan->incoming = 1;
- plcip->plci = cmsg->adr.adrPLCI;
- plci_change_state(card, plcip, EV_PLCI_CONNECT_IND);
-
- cmd.command = ISDN_STAT_ICALL;
- cmd.driver = card->myid;
- cmd.arg = chan;
- memset(&cmd.parm.setup, 0, sizeof(cmd.parm.setup));
- strncpy(cmd.parm.setup.phone,
- cmsg->CallingPartyNumber + 3,
- cmsg->CallingPartyNumber[0] - 2);
- strncpy(cmd.parm.setup.eazmsn,
- cmsg->CalledPartyNumber + 2,
- cmsg->CalledPartyNumber[0] - 1);
- cmd.parm.setup.si1 = cip2si1(cmsg->CIPValue);
- cmd.parm.setup.si2 = cip2si2(cmsg->CIPValue);
- cmd.parm.setup.plan = cmsg->CallingPartyNumber[1];
- cmd.parm.setup.screen = cmsg->CallingPartyNumber[2];
-
- printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s\n",
- card->contrnr,
- cmd.parm.setup.phone,
- cmd.parm.setup.si1,
- cmd.parm.setup.si2,
- cmd.parm.setup.eazmsn);
-
- if (cmd.parm.setup.si1 == 1 && cmd.parm.setup.si2 != 0) {
- printk(KERN_INFO "capidrv-%d: patching si2=%d to 0 for VBOX\n",
- card->contrnr,
- cmd.parm.setup.si2);
- cmd.parm.setup.si2 = 0;
- }
-
- switch (card->interface.statcallb(&cmd)) {
- case 0:
- case 3:
- /* No device matching this call.
- * and isdn_common.c has send a HANGUP command
- * which is ignored in state ST_PLCI_INCOMING,
- * so we send RESP to ignore the call
- */
- capi_cmsg_answer(cmsg);
- cmsg->Reject = 1; /* ignore */
- plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
- send_message(card, cmsg);
- printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n",
- card->contrnr,
- cmd.parm.setup.phone,
- cmd.parm.setup.si1,
- cmd.parm.setup.si2,
- cmd.parm.setup.eazmsn);
- break;
- case 1:
- /* At least one device matching this call (RING on ttyI)
- * HL-driver may send ALERTING on the D-channel in this
- * case.
- * really means: RING on ttyI or a net interface
- * accepted this call already.
- *
- * If the call was accepted, state has already changed,
- * and CONNECT_RESP already sent.
- */
- if (plcip->state == ST_PLCI_INCOMING) {
- printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s tty alerting\n",
- card->contrnr,
- cmd.parm.setup.phone,
- cmd.parm.setup.si1,
- cmd.parm.setup.si2,
- cmd.parm.setup.eazmsn);
- capi_fill_ALERT_REQ(cmsg,
- global.ap.applid,
- card->msgid++,
- plcip->plci, /* adr */
- NULL,/* BChannelinformation */
- NULL,/* Keypadfacility */
- NULL,/* Useruserdata */
- NULL /* Facilitydataarray */
- );
- plcip->msgid = cmsg->Messagenumber;
- send_message(card, cmsg);
- } else {
- printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s on netdev\n",
- card->contrnr,
- cmd.parm.setup.phone,
- cmd.parm.setup.si1,
- cmd.parm.setup.si2,
- cmd.parm.setup.eazmsn);
- }
- break;
-
- case 2: /* Call will be rejected. */
- capi_cmsg_answer(cmsg);
- cmsg->Reject = 2; /* reject call, normal call clearing */
- plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
- send_message(card, cmsg);
- break;
-
- default:
- /* An error happened. (Invalid parameters for example.) */
- capi_cmsg_answer(cmsg);
- cmsg->Reject = 8; /* reject call,
- destination out of order */
- plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
- send_message(card, cmsg);
- break;
- }
- return;
-}
-
-static void handle_plci(_cmsg *cmsg)
-{
- capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
- capidrv_plci *plcip;
- isdn_ctrl cmd;
- _cdebbuf *cdb;
-
- if (!card) {
- printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrController & 0x7f);
- return;
- }
- switch (CAPICMD(cmsg->Command, cmsg->Subcommand)) {
-
- case CAPI_DISCONNECT_IND: /* plci */
- if (cmsg->Reason) {
- printk(KERN_INFO "capidrv-%d: %s reason 0x%x (%s) for plci 0x%x\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->Reason, capi_info2str(cmsg->Reason), cmsg->adr.adrPLCI);
- }
- if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI))) {
- capi_cmsg_answer(cmsg);
- send_message(card, cmsg);
- goto notfound;
- }
- card->bchans[plcip->chan].disconnecting = 1;
- plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND);
- capi_cmsg_answer(cmsg);
- plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP);
- send_message(card, cmsg);
- break;
-
- case CAPI_DISCONNECT_CONF: /* plci */
- if (cmsg->Info) {
- printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for plci 0x%x\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->Info, capi_info2str(cmsg->Info),
- cmsg->adr.adrPLCI);
- }
- if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI)))
- goto notfound;
-
- card->bchans[plcip->chan].disconnecting = 1;
- break;
-
- case CAPI_ALERT_CONF: /* plci */
- if (cmsg->Info) {
- printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for plci 0x%x\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->Info, capi_info2str(cmsg->Info),
- cmsg->adr.adrPLCI);
- }
- break;
-
- case CAPI_CONNECT_IND: /* plci */
- handle_incoming_call(card, cmsg);
- break;
-
- case CAPI_CONNECT_CONF: /* plci */
- if (cmsg->Info) {
- printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for plci 0x%x\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->Info, capi_info2str(cmsg->Info),
- cmsg->adr.adrPLCI);
- }
- if (!(plcip = find_plci_by_msgid(card, cmsg->Messagenumber)))
- goto notfound;
-
- plcip->plci = cmsg->adr.adrPLCI;
- if (cmsg->Info) {
- plci_change_state(card, plcip, EV_PLCI_CONNECT_CONF_ERROR);
- } else {
- plci_change_state(card, plcip, EV_PLCI_CONNECT_CONF_OK);
- }
- break;
-
- case CAPI_CONNECT_ACTIVE_IND: /* plci */
-
- if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI)))
- goto notfound;
-
- if (card->bchans[plcip->chan].incoming) {
- capi_cmsg_answer(cmsg);
- plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND);
- send_message(card, cmsg);
- } else {
- capidrv_ncci *nccip;
- capi_cmsg_answer(cmsg);
- send_message(card, cmsg);
-
- nccip = new_ncci(card, plcip, cmsg->adr.adrPLCI);
-
- if (!nccip) {
- printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr);
- break; /* $$$$ */
- }
- capi_fill_CONNECT_B3_REQ(cmsg,
- global.ap.applid,
- card->msgid++,
- plcip->plci, /* adr */
- NULL /* NCPI */
- );
- nccip->msgid = cmsg->Messagenumber;
- plci_change_state(card, plcip,
- EV_PLCI_CONNECT_ACTIVE_IND);
- ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ);
- send_message(card, cmsg);
- cmd.command = ISDN_STAT_DCONN;
- cmd.driver = card->myid;
- cmd.arg = plcip->chan;
- card->interface.statcallb(&cmd);
- }
- break;
-
- case CAPI_INFO_IND: /* Controller/plci */
-
- if (!(plcip = find_plci_by_plci(card, cmsg->adr.adrPLCI)))
- goto notfound;
-
- if (cmsg->InfoNumber == 0x4000) {
- if (cmsg->InfoElement[0] == 4) {
- cmd.command = ISDN_STAT_CINF;
- cmd.driver = card->myid;
- cmd.arg = plcip->chan;
- sprintf(cmd.parm.num, "%lu",
- (unsigned long)
- ((u32) cmsg->InfoElement[1]
- | ((u32) (cmsg->InfoElement[2]) << 8)
- | ((u32) (cmsg->InfoElement[3]) << 16)
- | ((u32) (cmsg->InfoElement[4]) << 24)));
- card->interface.statcallb(&cmd);
- break;
- }
- }
- cdb = capi_cmsg2str(cmsg);
- if (cdb) {
- printk(KERN_WARNING "capidrv-%d: %s\n",
- card->contrnr, cdb->buf);
- cdebbuf_free(cdb);
- } else
- printk(KERN_WARNING "capidrv-%d: CAPI_INFO_IND InfoNumber %x not handled\n",
- card->contrnr, cmsg->InfoNumber);
-
- break;
-
- case CAPI_CONNECT_ACTIVE_CONF: /* plci */
- goto ignored;
- case CAPI_SELECT_B_PROTOCOL_CONF: /* plci */
- goto ignored;
- case CAPI_FACILITY_IND: /* Controller/plci/ncci */
- goto ignored;
- case CAPI_FACILITY_CONF: /* Controller/plci/ncci */
- goto ignored;
-
- case CAPI_INFO_CONF: /* Controller/plci */
- goto ignored;
-
- default:
- printk(KERN_ERR "capidrv-%d: got %s for plci 0x%x ???",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrPLCI);
- }
- return;
-ignored:
- printk(KERN_INFO "capidrv-%d: %s for plci 0x%x ignored\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrPLCI);
- return;
-notfound:
- printk(KERN_ERR "capidrv-%d: %s: plci 0x%x not found\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrPLCI);
- return;
-}
-
-static void handle_ncci(_cmsg *cmsg)
-{
- capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
- capidrv_plci *plcip;
- capidrv_ncci *nccip;
- isdn_ctrl cmd;
- int len;
-
- if (!card) {
- printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrController & 0x7f);
- return;
- }
- switch (CAPICMD(cmsg->Command, cmsg->Subcommand)) {
-
- case CAPI_CONNECT_B3_ACTIVE_IND: /* ncci */
- if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
- goto notfound;
-
- capi_cmsg_answer(cmsg);
- ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND);
- send_message(card, cmsg);
-
- cmd.command = ISDN_STAT_BCONN;
- cmd.driver = card->myid;
- cmd.arg = nccip->chan;
- card->interface.statcallb(&cmd);
-
- printk(KERN_INFO "capidrv-%d: chan %d up with ncci 0x%x\n",
- card->contrnr, nccip->chan, nccip->ncci);
- break;
-
- case CAPI_CONNECT_B3_ACTIVE_CONF: /* ncci */
- goto ignored;
-
- case CAPI_CONNECT_B3_IND: /* ncci */
-
- plcip = find_plci_by_ncci(card, cmsg->adr.adrNCCI);
- if (plcip) {
- nccip = new_ncci(card, plcip, cmsg->adr.adrNCCI);
- if (nccip) {
- ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_IND);
- capi_fill_CONNECT_B3_RESP(cmsg,
- global.ap.applid,
- card->msgid++,
- nccip->ncci, /* adr */
- 0, /* Reject */
- NULL /* NCPI */
- );
- ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP);
- send_message(card, cmsg);
- break;
- }
- printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr);
- } else {
- printk(KERN_ERR "capidrv-%d: %s: plci for ncci 0x%x not found\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrNCCI);
- }
- capi_fill_CONNECT_B3_RESP(cmsg,
- global.ap.applid,
- card->msgid++,
- cmsg->adr.adrNCCI,
- 2, /* Reject */
- NULL /* NCPI */
- );
- send_message(card, cmsg);
- break;
-
- case CAPI_CONNECT_B3_CONF: /* ncci */
-
- if (!(nccip = find_ncci_by_msgid(card,
- cmsg->adr.adrNCCI,
- cmsg->Messagenumber)))
- goto notfound;
-
- nccip->ncci = cmsg->adr.adrNCCI;
- if (cmsg->Info) {
- printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for ncci 0x%x\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->Info, capi_info2str(cmsg->Info),
- cmsg->adr.adrNCCI);
- }
-
- if (cmsg->Info)
- ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_CONF_ERROR);
- else
- ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_CONF_OK);
- break;
-
- case CAPI_CONNECT_B3_T90_ACTIVE_IND: /* ncci */
- capi_cmsg_answer(cmsg);
- send_message(card, cmsg);
- break;
-
- case CAPI_DATA_B3_IND: /* ncci */
- /* handled in handle_data() */
- goto ignored;
-
- case CAPI_DATA_B3_CONF: /* ncci */
- if (cmsg->Info) {
- printk(KERN_WARNING "CAPI_DATA_B3_CONF: Info %x - %s\n",
- cmsg->Info, capi_info2str(cmsg->Info));
- }
- if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
- goto notfound;
-
- len = capidrv_del_ack(nccip, cmsg->DataHandle);
- if (len < 0)
- break;
- cmd.command = ISDN_STAT_BSENT;
- cmd.driver = card->myid;
- cmd.arg = nccip->chan;
- cmd.parm.length = len;
- card->interface.statcallb(&cmd);
- break;
-
- case CAPI_DISCONNECT_B3_IND: /* ncci */
- if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
- goto notfound;
-
- card->bchans[nccip->chan].disconnecting = 1;
- ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND);
- capi_cmsg_answer(cmsg);
- ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP);
- send_message(card, cmsg);
- break;
-
- case CAPI_DISCONNECT_B3_CONF: /* ncci */
- if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
- goto notfound;
- if (cmsg->Info) {
- printk(KERN_INFO "capidrv-%d: %s info 0x%x (%s) for ncci 0x%x\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->Info, capi_info2str(cmsg->Info),
- cmsg->adr.adrNCCI);
- ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_CONF_ERROR);
- }
- break;
-
- case CAPI_RESET_B3_IND: /* ncci */
- if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI)))
- goto notfound;
- ncci_change_state(card, nccip, EV_NCCI_RESET_B3_IND);
- capi_cmsg_answer(cmsg);
- send_message(card, cmsg);
- break;
-
- case CAPI_RESET_B3_CONF: /* ncci */
- goto ignored; /* $$$$ */
-
- case CAPI_FACILITY_IND: /* Controller/plci/ncci */
- goto ignored;
- case CAPI_FACILITY_CONF: /* Controller/plci/ncci */
- goto ignored;
-
- default:
- printk(KERN_ERR "capidrv-%d: got %s for ncci 0x%x ???",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrNCCI);
- }
- return;
-ignored:
- printk(KERN_INFO "capidrv-%d: %s for ncci 0x%x ignored\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrNCCI);
- return;
-notfound:
- printk(KERN_ERR "capidrv-%d: %s: ncci 0x%x not found\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrNCCI);
-}
-
-
-static void handle_data(_cmsg *cmsg, struct sk_buff *skb)
-{
- capidrv_contr *card = findcontrbynumber(cmsg->adr.adrController & 0x7f);
- capidrv_ncci *nccip;
-
- if (!card) {
- printk(KERN_ERR "capidrv: %s from unknown controller 0x%x\n",
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrController & 0x7f);
- kfree_skb(skb);
- return;
- }
- if (!(nccip = find_ncci(card, cmsg->adr.adrNCCI))) {
- printk(KERN_ERR "capidrv-%d: %s: ncci 0x%x not found\n",
- card->contrnr,
- capi_cmd2str(cmsg->Command, cmsg->Subcommand),
- cmsg->adr.adrNCCI);
- kfree_skb(skb);
- return;
- }
- (void) skb_pull(skb, CAPIMSG_LEN(skb->data));
- card->interface.rcvcallb_skb(card->myid, nccip->chan, skb);
- capi_cmsg_answer(cmsg);
- send_message(card, cmsg);
-}
-
-static _cmsg s_cmsg;
-
-static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
-{
- if (capi_message2cmsg(&s_cmsg, skb->data)) {
- printk(KERN_ERR "capidrv: applid=%d: received invalid message\n",
- ap->applid);
- kfree_skb(skb);
- return;
- }
- if (debugmode > 3) {
- _cdebbuf *cdb = capi_cmsg2str(&s_cmsg);
-
- if (cdb) {
- printk(KERN_DEBUG "%s: applid=%d %s\n", __func__,
- ap->applid, cdb->buf);
- cdebbuf_free(cdb);
- } else
- printk(KERN_DEBUG "%s: applid=%d %s not traced\n",
- __func__, ap->applid,
- capi_cmd2str(s_cmsg.Command, s_cmsg.Subcommand));
- }
- if (s_cmsg.Command == CAPI_DATA_B3
- && s_cmsg.Subcommand == CAPI_IND) {
- handle_data(&s_cmsg, skb);
- return;
- }
- if ((s_cmsg.adr.adrController & 0xffffff00) == 0)
- handle_controller(&s_cmsg);
- else if ((s_cmsg.adr.adrPLCI & 0xffff0000) == 0)
- handle_plci(&s_cmsg);
- else
- handle_ncci(&s_cmsg);
- /*
- * data of skb used in s_cmsg,
- * free data when s_cmsg is not used again
- * thanks to Lars Heete <hel@admin.de>
- */
- kfree_skb(skb);
-}
-
-/* ------------------------------------------------------------------- */
-
-#define PUTBYTE_TO_STATUS(card, byte) \
- do { \
- *(card)->q931_write++ = (byte); \
- if ((card)->q931_write > (card)->q931_end) \
- (card)->q931_write = (card)->q931_buf; \
- } while (0)
-
-static void handle_dtrace_data(capidrv_contr *card,
- int send, int level2, u8 *data, u16 len)
-{
- u8 *p, *end;
- isdn_ctrl cmd;
-
- if (!len) {
- printk(KERN_DEBUG "capidrv-%d: avmb1_q931_data: len == %d\n",
- card->contrnr, len);
- return;
- }
-
- if (level2) {
- PUTBYTE_TO_STATUS(card, 'D');
- PUTBYTE_TO_STATUS(card, '2');
- PUTBYTE_TO_STATUS(card, send ? '>' : '<');
- PUTBYTE_TO_STATUS(card, ':');
- } else {
- PUTBYTE_TO_STATUS(card, 'D');
- PUTBYTE_TO_STATUS(card, '3');
- PUTBYTE_TO_STATUS(card, send ? '>' : '<');
- PUTBYTE_TO_STATUS(card, ':');
- }
-
- for (p = data, end = data + len; p < end; p++) {
- PUTBYTE_TO_STATUS(card, ' ');
- PUTBYTE_TO_STATUS(card, hex_asc_hi(*p));
- PUTBYTE_TO_STATUS(card, hex_asc_lo(*p));
- }
- PUTBYTE_TO_STATUS(card, '\n');
-
- cmd.command = ISDN_STAT_STAVAIL;
- cmd.driver = card->myid;
- cmd.arg = len * 3 + 5;
- card->interface.statcallb(&cmd);
-}
-
-/* ------------------------------------------------------------------- */
-
-static _cmsg cmdcmsg;
-
-static int capidrv_ioctl(isdn_ctrl *c, capidrv_contr *card)
-{
- switch (c->arg) {
- case 1:
- debugmode = (int)(*((unsigned int *)c->parm.num));
- printk(KERN_DEBUG "capidrv-%d: debugmode=%d\n",
- card->contrnr, debugmode);
- return 0;
- default:
- printk(KERN_DEBUG "capidrv-%d: capidrv_ioctl(%ld) called ??\n",
- card->contrnr, c->arg);
- return -EINVAL;
- }
- return -EINVAL;
-}
-
-/*
- * Handle leased lines (CAPI-Bundling)
- */
-
-struct internal_bchannelinfo {
- unsigned short channelalloc;
- unsigned short operation;
- unsigned char cmask[31];
-};
-
-static int decodeFVteln(char *teln, unsigned long *bmaskp, int *activep)
-{
- unsigned long bmask = 0;
- int active = !0;
- char *s;
- int i;
-
- if (strncmp(teln, "FV:", 3) != 0)
- return 1;
- s = teln + 3;
- while (*s && *s == ' ') s++;
- if (!*s) return -2;
- if (*s == 'p' || *s == 'P') {
- active = 0;
- s++;
- }
- if (*s == 'a' || *s == 'A') {
- active = !0;
- s++;
- }
- while (*s) {
- int digit1 = 0;
- int digit2 = 0;
- char *endp;
-
- digit1 = simple_strtoul(s, &endp, 10);
- if (s == endp)
- return -3;
- s = endp;
-
- if (digit1 <= 0 || digit1 > 30) return -4;
- if (*s == 0 || *s == ',' || *s == ' ') {
- bmask |= (1 << digit1);
- digit1 = 0;
- if (*s) s++;
- continue;
- }
- if (*s != '-') return -5;
- s++;
-
- digit2 = simple_strtoul(s, &endp, 10);
- if (s == endp)
- return -3;
- s = endp;
-
- if (digit2 <= 0 || digit2 > 30) return -4;
- if (*s == 0 || *s == ',' || *s == ' ') {
- if (digit1 > digit2)
- for (i = digit2; i <= digit1; i++)
- bmask |= (1 << i);
- else
- for (i = digit1; i <= digit2; i++)
- bmask |= (1 << i);
- digit1 = digit2 = 0;
- if (*s) s++;
- continue;
- }
- return -6;
- }
- if (activep) *activep = active;
- if (bmaskp) *bmaskp = bmask;
- return 0;
-}
-
-static int FVteln2capi20(char *teln, u8 AdditionalInfo[1 + 2 + 2 + 31])
-{
- unsigned long bmask;
- int active;
- int rc, i;
-
- rc = decodeFVteln(teln, &bmask, &active);
- if (rc) return rc;
- /* Length */
- AdditionalInfo[0] = 2 + 2 + 31;
- /* Channel: 3 => use channel allocation */
- AdditionalInfo[1] = 3; AdditionalInfo[2] = 0;
- /* Operation: 0 => DTE mode, 1 => DCE mode */
- if (active) {
- AdditionalInfo[3] = 0; AdditionalInfo[4] = 0;
- } else {
- AdditionalInfo[3] = 1; AdditionalInfo[4] = 0;
- }
- /* Channel mask array */
- AdditionalInfo[5] = 0; /* no D-Channel */
- for (i = 1; i <= 30; i++)
- AdditionalInfo[5 + i] = (bmask & (1 << i)) ? 0xff : 0;
- return 0;
-}
-
-static int capidrv_command(isdn_ctrl *c, capidrv_contr *card)
-{
- isdn_ctrl cmd;
- struct capidrv_bchan *bchan;
- struct capidrv_plci *plcip;
- u8 AdditionalInfo[1 + 2 + 2 + 31];
- int rc, isleasedline = 0;
-
- if (c->command == ISDN_CMD_IOCTL)
- return capidrv_ioctl(c, card);
-
- switch (c->command) {
- case ISDN_CMD_DIAL: {
- u8 calling[ISDN_MSNLEN + 3];
- u8 called[ISDN_MSNLEN + 2];
-
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_DIAL(ch=%ld,\"%s,%d,%d,%s\")\n",
- card->contrnr,
- c->arg,
- c->parm.setup.phone,
- c->parm.setup.si1,
- c->parm.setup.si2,
- c->parm.setup.eazmsn);
-
- bchan = &card->bchans[c->arg % card->nbchan];
-
- if (bchan->plcip) {
- printk(KERN_ERR "capidrv-%d: dail ch=%ld,\"%s,%d,%d,%s\" in use (plci=0x%x)\n",
- card->contrnr,
- c->arg,
- c->parm.setup.phone,
- c->parm.setup.si1,
- c->parm.setup.si2,
- c->parm.setup.eazmsn,
- bchan->plcip->plci);
- return 0;
- }
- bchan->si1 = c->parm.setup.si1;
- bchan->si2 = c->parm.setup.si2;
-
- strncpy(bchan->num, c->parm.setup.phone, sizeof(bchan->num));
- strncpy(bchan->mynum, c->parm.setup.eazmsn, sizeof(bchan->mynum));
- rc = FVteln2capi20(bchan->num, AdditionalInfo);
- isleasedline = (rc == 0);
- if (rc < 0)
- printk(KERN_ERR "capidrv-%d: WARNING: invalid leased linedefinition \"%s\"\n", card->contrnr, bchan->num);
-
- if (isleasedline) {
- calling[0] = 0;
- called[0] = 0;
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: connecting leased line\n", card->contrnr);
- } else {
- calling[0] = strlen(bchan->mynum) + 2;
- calling[1] = 0;
- calling[2] = 0x80;
- strncpy(calling + 3, bchan->mynum, ISDN_MSNLEN);
- called[0] = strlen(bchan->num) + 1;
- called[1] = 0x80;
- strncpy(called + 2, bchan->num, ISDN_MSNLEN);
- }
-
- capi_fill_CONNECT_REQ(&cmdcmsg,
- global.ap.applid,
- card->msgid++,
- card->contrnr, /* adr */
- si2cip(bchan->si1, bchan->si2), /* cipvalue */
- called, /* CalledPartyNumber */
- calling, /* CallingPartyNumber */
- NULL, /* CalledPartySubaddress */
- NULL, /* CallingPartySubaddress */
- b1prot(bchan->l2, bchan->l3), /* B1protocol */
- b2prot(bchan->l2, bchan->l3), /* B2protocol */
- b3prot(bchan->l2, bchan->l3), /* B3protocol */
- b1config(bchan->l2, bchan->l3), /* B1configuration */
- NULL, /* B2configuration */
- NULL, /* B3configuration */
- NULL, /* BC */
- NULL, /* LLC */
- NULL, /* HLC */
- /* BChannelinformation */
- isleasedline ? AdditionalInfo : NULL,
- NULL, /* Keypadfacility */
- NULL, /* Useruserdata */
- NULL /* Facilitydataarray */
- );
- if ((plcip = new_plci(card, (c->arg % card->nbchan))) == NULL) {
- cmd.command = ISDN_STAT_DHUP;
- cmd.driver = card->myid;
- cmd.arg = (c->arg % card->nbchan);
- card->interface.statcallb(&cmd);
- return -1;
- }
- plcip->msgid = cmdcmsg.Messagenumber;
- plcip->leasedline = isleasedline;
- plci_change_state(card, plcip, EV_PLCI_CONNECT_REQ);
- send_message(card, &cmdcmsg);
- return 0;
- }
-
- case ISDN_CMD_ACCEPTD:
-
- bchan = &card->bchans[c->arg % card->nbchan];
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_ACCEPTD(ch=%ld) l2=%d l3=%d\n",
- card->contrnr,
- c->arg, bchan->l2, bchan->l3);
-
- capi_fill_CONNECT_RESP(&cmdcmsg,
- global.ap.applid,
- card->msgid++,
- bchan->plcip->plci, /* adr */
- 0, /* Reject */
- b1prot(bchan->l2, bchan->l3), /* B1protocol */
- b2prot(bchan->l2, bchan->l3), /* B2protocol */
- b3prot(bchan->l2, bchan->l3), /* B3protocol */
- b1config(bchan->l2, bchan->l3), /* B1configuration */
- NULL, /* B2configuration */
- NULL, /* B3configuration */
- NULL, /* ConnectedNumber */
- NULL, /* ConnectedSubaddress */
- NULL, /* LLC */
- NULL, /* BChannelinformation */
- NULL, /* Keypadfacility */
- NULL, /* Useruserdata */
- NULL /* Facilitydataarray */
- );
- if (capi_cmsg2message(&cmdcmsg, cmdcmsg.buf)) {
- printk(KERN_ERR "capidrv-%d: capidrv_command: parser failure\n",
- card->contrnr);
- return -EINVAL;
- }
- plci_change_state(card, bchan->plcip, EV_PLCI_CONNECT_RESP);
- send_message(card, &cmdcmsg);
- return 0;
-
- case ISDN_CMD_ACCEPTB:
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_ACCEPTB(ch=%ld)\n",
- card->contrnr,
- c->arg);
- return -ENOSYS;
-
- case ISDN_CMD_HANGUP:
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: ISDN_CMD_HANGUP(ch=%ld)\n",
- card->contrnr,
- c->arg);
- bchan = &card->bchans[c->arg % card->nbchan];
-
- if (bchan->disconnecting) {
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: chan %ld already disconnecting ...\n",
- card->contrnr,
- c->arg);
- return 0;
- }
- if (bchan->nccip) {
- bchan->disconnecting = 1;
- capi_fill_DISCONNECT_B3_REQ(&cmdcmsg,
- global.ap.applid,
- card->msgid++,
- bchan->nccip->ncci,
- NULL /* NCPI */
- );
- ncci_change_state(card, bchan->nccip, EV_NCCI_DISCONNECT_B3_REQ);
- send_message(card, &cmdcmsg);
- return 0;
- } else if (bchan->plcip) {
- if (bchan->plcip->state == ST_PLCI_INCOMING) {
- /*
- * just ignore, we a called from
- * isdn_status_callback(),
- * which will return 0 or 2, this is handled
- * by the CONNECT_IND handler
- */
- bchan->disconnecting = 1;
- return 0;
- } else if (bchan->plcip->plci) {
- bchan->disconnecting = 1;
- capi_fill_DISCONNECT_REQ(&cmdcmsg,
- global.ap.applid,
- card->msgid++,
- bchan->plcip->plci,
- NULL, /* BChannelinformation */
- NULL, /* Keypadfacility */
- NULL, /* Useruserdata */
- NULL /* Facilitydataarray */
- );
- plci_change_state(card, bchan->plcip, EV_PLCI_DISCONNECT_REQ);
- send_message(card, &cmdcmsg);
- return 0;
- } else {
- printk(KERN_ERR "capidrv-%d: chan %ld disconnect request while waiting for CONNECT_CONF\n",
- card->contrnr,
- c->arg);
- return -EINVAL;
- }
- }
- printk(KERN_ERR "capidrv-%d: chan %ld disconnect request on free channel\n",
- card->contrnr,
- c->arg);
- return -EINVAL;
-/* ready */
-
- case ISDN_CMD_SETL2:
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: set L2 on chan %ld to %ld\n",
- card->contrnr,
- (c->arg & 0xff), (c->arg >> 8));
- bchan = &card->bchans[(c->arg & 0xff) % card->nbchan];
- bchan->l2 = (c->arg >> 8);
- return 0;
-
- case ISDN_CMD_SETL3:
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: set L3 on chan %ld to %ld\n",
- card->contrnr,
- (c->arg & 0xff), (c->arg >> 8));
- bchan = &card->bchans[(c->arg & 0xff) % card->nbchan];
- bchan->l3 = (c->arg >> 8);
- return 0;
-
- case ISDN_CMD_SETEAZ:
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: set EAZ \"%s\" on chan %ld\n",
- card->contrnr,
- c->parm.num, c->arg);
- bchan = &card->bchans[c->arg % card->nbchan];
- strncpy(bchan->msn, c->parm.num, ISDN_MSNLEN);
- return 0;
-
- case ISDN_CMD_CLREAZ:
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: clearing EAZ on chan %ld\n",
- card->contrnr, c->arg);
- bchan = &card->bchans[c->arg % card->nbchan];
- bchan->msn[0] = 0;
- return 0;
-
- default:
- printk(KERN_ERR "capidrv-%d: ISDN_CMD_%d, Huh?\n",
- card->contrnr, c->command);
- return -EINVAL;
- }
- return 0;
-}
-
-static int if_command(isdn_ctrl *c)
-{
- capidrv_contr *card = findcontrbydriverid(c->driver);
-
- if (card)
- return capidrv_command(c, card);
-
- printk(KERN_ERR
- "capidrv: if_command %d called with invalid driverId %d!\n",
- c->command, c->driver);
- return -ENODEV;
-}
-
-static _cmsg sendcmsg;
-
-static int if_sendbuf(int id, int channel, int doack, struct sk_buff *skb)
-{
- capidrv_contr *card = findcontrbydriverid(id);
- capidrv_bchan *bchan;
- capidrv_ncci *nccip;
- int len = skb->len;
- int msglen;
- u16 errcode;
- u16 datahandle;
- u32 data;
-
- if (!card) {
- printk(KERN_ERR "capidrv: if_sendbuf called with invalid driverId %d!\n",
- id);
- return 0;
- }
- if (debugmode > 4)
- printk(KERN_DEBUG "capidrv-%d: sendbuf len=%d skb=%p doack=%d\n",
- card->contrnr, len, skb, doack);
- bchan = &card->bchans[channel % card->nbchan];
- nccip = bchan->nccip;
- if (!nccip || nccip->state != ST_NCCI_ACTIVE) {
- printk(KERN_ERR "capidrv-%d: if_sendbuf: %s:%d: chan not up!\n",
- card->contrnr, card->name, channel);
- return 0;
- }
- datahandle = nccip->datahandle;
-
- /*
- * Here we copy pointer skb->data into the 32-bit 'Data' field.
- * The 'Data' field is not used in practice in linux kernel
- * (neither in 32 or 64 bit), but should have some value,
- * since a CAPI message trace will display it.
- *
- * The correct value in the 32 bit case is the address of the
- * data, in 64 bit it makes no sense, we use 0 there.
- */
-
-#ifdef CONFIG_64BIT
- data = 0;
-#else
- data = (unsigned long) skb->data;
-#endif
-
- capi_fill_DATA_B3_REQ(&sendcmsg, global.ap.applid, card->msgid++,
- nccip->ncci, /* adr */
- data, /* Data */
- skb->len, /* DataLength */
- datahandle, /* DataHandle */
- 0 /* Flags */
- );
-
- if (capidrv_add_ack(nccip, datahandle, doack ? (int)skb->len : -1) < 0)
- return 0;
-
- if (capi_cmsg2message(&sendcmsg, sendcmsg.buf)) {
- printk(KERN_ERR "capidrv-%d: if_sendbuf: parser failure\n",
- card->contrnr);
- return -EINVAL;
- }
- msglen = CAPIMSG_LEN(sendcmsg.buf);
- if (skb_headroom(skb) < msglen) {
- struct sk_buff *nskb = skb_realloc_headroom(skb, msglen);
- if (!nskb) {
- printk(KERN_ERR "capidrv-%d: if_sendbuf: no memory\n",
- card->contrnr);
- (void)capidrv_del_ack(nccip, datahandle);
- return 0;
- }
- printk(KERN_DEBUG "capidrv-%d: only %d bytes headroom, need %d\n",
- card->contrnr, skb_headroom(skb), msglen);
- memcpy(skb_push(nskb, msglen), sendcmsg.buf, msglen);
- errcode = capi20_put_message(&global.ap, nskb);
- if (errcode == CAPI_NOERROR) {
- dev_kfree_skb(skb);
- nccip->datahandle++;
- return len;
- }
- if (debugmode > 3)
- printk(KERN_DEBUG "capidrv-%d: sendbuf putmsg ret(%x) - %s\n",
- card->contrnr, errcode, capi_info2str(errcode));
- (void)capidrv_del_ack(nccip, datahandle);
- dev_kfree_skb(nskb);
- return errcode == CAPI_SENDQUEUEFULL ? 0 : -1;
- } else {
- memcpy(skb_push(skb, msglen), sendcmsg.buf, msglen);
- errcode = capi20_put_message(&global.ap, skb);
- if (errcode == CAPI_NOERROR) {
- nccip->datahandle++;
- return len;
- }
- if (debugmode > 3)
- printk(KERN_DEBUG "capidrv-%d: sendbuf putmsg ret(%x) - %s\n",
- card->contrnr, errcode, capi_info2str(errcode));
- skb_pull(skb, msglen);
- (void)capidrv_del_ack(nccip, datahandle);
- return errcode == CAPI_SENDQUEUEFULL ? 0 : -1;
- }
-}
-
-static int if_readstat(u8 __user *buf, int len, int id, int channel)
-{
- capidrv_contr *card = findcontrbydriverid(id);
- int count;
- u8 __user *p;
-
- if (!card) {
- printk(KERN_ERR "capidrv: if_readstat called with invalid driverId %d!\n",
- id);
- return -ENODEV;
- }
-
- for (p = buf, count = 0; count < len; p++, count++) {
- if (put_user(*card->q931_read++, p))
- return -EFAULT;
- if (card->q931_read > card->q931_end)
- card->q931_read = card->q931_buf;
- }
- return count;
-
-}
-
-static void enable_dchannel_trace(capidrv_contr *card)
-{
- u8 manufacturer[CAPI_MANUFACTURER_LEN];
- capi_version version;
- u16 contr = card->contrnr;
- u16 errcode;
- u16 avmversion[3];
-
- errcode = capi20_get_manufacturer(contr, manufacturer);
- if (errcode != CAPI_NOERROR) {
- printk(KERN_ERR "%s: can't get manufacturer (0x%x)\n",
- card->name, errcode);
- return;
- }
- if (strstr(manufacturer, "AVM") == NULL) {
- printk(KERN_ERR "%s: not from AVM, no d-channel trace possible (%s)\n",
- card->name, manufacturer);
- return;
- }
- errcode = capi20_get_version(contr, &version);
- if (errcode != CAPI_NOERROR) {
- printk(KERN_ERR "%s: can't get version (0x%x)\n",
- card->name, errcode);
- return;
- }
- avmversion[0] = (version.majormanuversion >> 4) & 0x0f;
- avmversion[1] = (version.majormanuversion << 4) & 0xf0;
- avmversion[1] |= (version.minormanuversion >> 4) & 0x0f;
- avmversion[2] |= version.minormanuversion & 0x0f;
-
- if (avmversion[0] > 3 || (avmversion[0] == 3 && avmversion[1] > 5)) {
- printk(KERN_INFO "%s: D2 trace enabled\n", card->name);
- capi_fill_MANUFACTURER_REQ(&cmdcmsg, global.ap.applid,
- card->msgid++,
- contr,
- 0x214D5641, /* ManuID */
- 0, /* Class */
- 1, /* Function */
- (_cstruct)"\004\200\014\000\000");
- } else {
- printk(KERN_INFO "%s: D3 trace enabled\n", card->name);
- capi_fill_MANUFACTURER_REQ(&cmdcmsg, global.ap.applid,
- card->msgid++,
- contr,
- 0x214D5641, /* ManuID */
- 0, /* Class */
- 1, /* Function */
- (_cstruct)"\004\002\003\000\000");
- }
- send_message(card, &cmdcmsg);
-}
-
-
-static void send_listen(capidrv_contr *card)
-{
- capi_fill_LISTEN_REQ(&cmdcmsg, global.ap.applid,
- card->msgid++,
- card->contrnr, /* controller */
- 1 << 6, /* Infomask */
- card->cipmask,
- card->cipmask2,
- NULL, NULL);
- listen_change_state(card, EV_LISTEN_REQ);
- send_message(card, &cmdcmsg);
-}
-
-static void listentimerfunc(struct timer_list *t)
-{
- capidrv_contr *card = from_timer(card, t, listentimer);
- if (card->state != ST_LISTEN_NONE && card->state != ST_LISTEN_ACTIVE)
- printk(KERN_ERR "%s: controller dead ??\n", card->name);
- send_listen(card);
- mod_timer(&card->listentimer, jiffies + 60 * HZ);
-}
-
-
-static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
-{
- capidrv_contr *card;
- unsigned long flags;
- isdn_ctrl cmd;
- char id[20];
- int i;
-
- sprintf(id, "capidrv-%d", contr);
- if (!try_module_get(THIS_MODULE)) {
- printk(KERN_WARNING "capidrv: (%s) Could not reserve module\n", id);
- return -1;
- }
- if (!(card = kzalloc(sizeof(capidrv_contr), GFP_ATOMIC))) {
- printk(KERN_WARNING
- "capidrv: (%s) Could not allocate contr-struct.\n", id);
- return -1;
- }
- card->owner = THIS_MODULE;
- timer_setup(&card->listentimer, listentimerfunc, 0);
- strcpy(card->name, id);
- card->contrnr = contr;
- card->nbchan = profp->nbchannel;
- card->bchans = kmalloc_array(card->nbchan, sizeof(capidrv_bchan),
- GFP_ATOMIC);
- if (!card->bchans) {
- printk(KERN_WARNING
- "capidrv: (%s) Could not allocate bchan-structs.\n", id);
- module_put(card->owner);
- kfree(card);
- return -1;
- }
- card->interface.channels = profp->nbchannel;
- card->interface.maxbufsize = 2048;
- card->interface.command = if_command;
- card->interface.writebuf_skb = if_sendbuf;
- card->interface.writecmd = NULL;
- card->interface.readstat = if_readstat;
- card->interface.features =
- ISDN_FEATURE_L2_HDLC |
- ISDN_FEATURE_L2_TRANS |
- ISDN_FEATURE_L3_TRANS |
- ISDN_FEATURE_P_UNKNOWN |
- ISDN_FEATURE_L2_X75I |
- ISDN_FEATURE_L2_X75UI |
- ISDN_FEATURE_L2_X75BUI;
- if (profp->support1 & (1 << 2))
- card->interface.features |=
- ISDN_FEATURE_L2_V11096 |
- ISDN_FEATURE_L2_V11019 |
- ISDN_FEATURE_L2_V11038;
- if (profp->support1 & (1 << 8))
- card->interface.features |= ISDN_FEATURE_L2_MODEM;
- card->interface.hl_hdrlen = 22; /* len of DATA_B3_REQ */
- strncpy(card->interface.id, id, sizeof(card->interface.id) - 1);
-
-
- card->q931_read = card->q931_buf;
- card->q931_write = card->q931_buf;
- card->q931_end = card->q931_buf + sizeof(card->q931_buf) - 1;
-
- if (!register_isdn(&card->interface)) {
- printk(KERN_ERR "capidrv: Unable to register contr %s\n", id);
- kfree(card->bchans);
- module_put(card->owner);
- kfree(card);
- return -1;
- }
- card->myid = card->interface.channels;
- memset(card->bchans, 0, sizeof(capidrv_bchan) * card->nbchan);
- for (i = 0; i < card->nbchan; i++) {
- card->bchans[i].contr = card;
- }
-
- spin_lock_irqsave(&global_lock, flags);
- card->next = global.contr_list;
- global.contr_list = card;
- global.ncontr++;
- spin_unlock_irqrestore(&global_lock, flags);
-
- cmd.command = ISDN_STAT_RUN;
- cmd.driver = card->myid;
- card->interface.statcallb(&cmd);
-
- card->cipmask = 0x1FFF03FF; /* any */
- card->cipmask2 = 0;
-
- send_listen(card);
- mod_timer(&card->listentimer, jiffies + 60 * HZ);
-
- printk(KERN_INFO "%s: now up (%d B channels)\n",
- card->name, card->nbchan);
-
- enable_dchannel_trace(card);
-
- return 0;
-}
-
-static int capidrv_delcontr(u16 contr)
-{
- capidrv_contr **pp, *card;
- unsigned long flags;
- isdn_ctrl cmd;
-
- spin_lock_irqsave(&global_lock, flags);
- for (card = global.contr_list; card; card = card->next) {
- if (card->contrnr == contr)
- break;
- }
- if (!card) {
- spin_unlock_irqrestore(&global_lock, flags);
- printk(KERN_ERR "capidrv: delcontr: no contr %u\n", contr);
- return -1;
- }
-
- /* FIXME: maybe a race condition the card should be removed
- * here from global list /kkeil
- */
- spin_unlock_irqrestore(&global_lock, flags);
-
- del_timer(&card->listentimer);
-
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: id=%d unloading\n",
- card->contrnr, card->myid);
-
- cmd.command = ISDN_STAT_STOP;
- cmd.driver = card->myid;
- card->interface.statcallb(&cmd);
-
- while (card->nbchan) {
-
- cmd.command = ISDN_STAT_DISCH;
- cmd.driver = card->myid;
- cmd.arg = card->nbchan - 1;
- cmd.parm.num[0] = 0;
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: id=%d disable chan=%ld\n",
- card->contrnr, card->myid, cmd.arg);
- card->interface.statcallb(&cmd);
-
- if (card->bchans[card->nbchan - 1].nccip)
- free_ncci(card, card->bchans[card->nbchan - 1].nccip);
- if (card->bchans[card->nbchan - 1].plcip)
- free_plci(card, card->bchans[card->nbchan - 1].plcip);
- if (card->plci_list)
- printk(KERN_ERR "capidrv: bug in free_plci()\n");
- card->nbchan--;
- }
- kfree(card->bchans);
- card->bchans = NULL;
-
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: id=%d isdn unload\n",
- card->contrnr, card->myid);
-
- cmd.command = ISDN_STAT_UNLOAD;
- cmd.driver = card->myid;
- card->interface.statcallb(&cmd);
-
- if (debugmode)
- printk(KERN_DEBUG "capidrv-%d: id=%d remove contr from list\n",
- card->contrnr, card->myid);
-
- spin_lock_irqsave(&global_lock, flags);
- for (pp = &global.contr_list; *pp; pp = &(*pp)->next) {
- if (*pp == card) {
- *pp = (*pp)->next;
- card->next = NULL;
- global.ncontr--;
- break;
- }
- }
- spin_unlock_irqrestore(&global_lock, flags);
-
- module_put(card->owner);
- printk(KERN_INFO "%s: now down.\n", card->name);
- kfree(card);
- return 0;
-}
-
-
-static int
-lower_callback(struct notifier_block *nb, unsigned long val, void *v)
-{
- capi_profile profile;
- u32 contr = (long)v;
-
- switch (val) {
- case CAPICTR_UP:
- printk(KERN_INFO "capidrv: controller %hu up\n", contr);
- if (capi20_get_profile(contr, &profile) == CAPI_NOERROR)
- (void) capidrv_addcontr(contr, &profile);
- break;
- case CAPICTR_DOWN:
- printk(KERN_INFO "capidrv: controller %hu down\n", contr);
- (void) capidrv_delcontr(contr);
- break;
- }
- return NOTIFY_OK;
-}
-
-/*
- * /proc/capi/capidrv:
- * nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
- */
-static int __maybe_unused capidrv_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%lu %lu %lu %lu\n",
- global.ap.nrecvctlpkt,
- global.ap.nrecvdatapkt,
- global.ap.nsentctlpkt,
- global.ap.nsentdatapkt);
- return 0;
-}
-
-static void __init proc_init(void)
-{
- proc_create_single("capi/capidrv", 0, NULL, capidrv_proc_show);
-}
-
-static void __exit proc_exit(void)
-{
- remove_proc_entry("capi/capidrv", NULL);
-}
-
-static struct notifier_block capictr_nb = {
- .notifier_call = lower_callback,
-};
-
-static int __init capidrv_init(void)
-{
- capi_profile profile;
- u32 ncontr, contr;
- u16 errcode;
-
- global.ap.rparam.level3cnt = -2; /* number of bchannels twice */
- global.ap.rparam.datablkcnt = 16;
- global.ap.rparam.datablklen = 2048;
-
- global.ap.recv_message = capidrv_recv_message;
- errcode = capi20_register(&global.ap);
- if (errcode) {
- return -EIO;
- }
-
- register_capictr_notifier(&capictr_nb);
-
- errcode = capi20_get_profile(0, &profile);
- if (errcode != CAPI_NOERROR) {
- unregister_capictr_notifier(&capictr_nb);
- capi20_release(&global.ap);
- return -EIO;
- }
-
- ncontr = profile.ncontroller;
- for (contr = 1; contr <= ncontr; contr++) {
- errcode = capi20_get_profile(contr, &profile);
- if (errcode != CAPI_NOERROR)
- continue;
- (void) capidrv_addcontr(contr, &profile);
- }
- proc_init();
-
- return 0;
-}
-
-static void __exit capidrv_exit(void)
-{
- unregister_capictr_notifier(&capictr_nb);
- capi20_release(&global.ap);
-
- proc_exit();
-}
-
-module_init(capidrv_init);
-module_exit(capidrv_exit);
diff --git a/drivers/isdn/capi/capidrv.h b/drivers/isdn/capi/capidrv.h
deleted file mode 100644
index 4466b2e0176d..000000000000
--- a/drivers/isdn/capi/capidrv.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* $Id: capidrv.h,v 1.2.8.2 2001/09/23 22:24:33 kai Exp $
- *
- * ISDN4Linux Driver, using capi20 interface (kernelcapi)
- *
- * Copyright 1997 by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef __CAPIDRV_H__
-#define __CAPIDRV_H__
-
-/*
- * LISTEN state machine
- */
-#define ST_LISTEN_NONE 0 /* L-0 */
-#define ST_LISTEN_WAIT_CONF 1 /* L-0.1 */
-#define ST_LISTEN_ACTIVE 2 /* L-1 */
-#define ST_LISTEN_ACTIVE_WAIT_CONF 3 /* L-1.1 */
-
-
-#define EV_LISTEN_REQ 1 /* L-0 -> L-0.1
- L-1 -> L-1.1 */
-#define EV_LISTEN_CONF_ERROR 2 /* L-0.1 -> L-0
- L-1.1 -> L-1 */
-#define EV_LISTEN_CONF_EMPTY 3 /* L-0.1 -> L-0
- L-1.1 -> L-0 */
-#define EV_LISTEN_CONF_OK 4 /* L-0.1 -> L-1
- L-1.1 -> L.1 */
-
-/*
- * per plci state machine
- */
-#define ST_PLCI_NONE 0 /* P-0 */
-#define ST_PLCI_OUTGOING 1 /* P-0.1 */
-#define ST_PLCI_ALLOCATED 2 /* P-1 */
-#define ST_PLCI_ACTIVE 3 /* P-ACT */
-#define ST_PLCI_INCOMING 4 /* P-2 */
-#define ST_PLCI_FACILITY_IND 5 /* P-3 */
-#define ST_PLCI_ACCEPTING 6 /* P-4 */
-#define ST_PLCI_DISCONNECTING 7 /* P-5 */
-#define ST_PLCI_DISCONNECTED 8 /* P-6 */
-#define ST_PLCI_RESUMEING 9 /* P-0.Res */
-#define ST_PLCI_RESUME 10 /* P-Res */
-#define ST_PLCI_HELD 11 /* P-HELD */
-
-#define EV_PLCI_CONNECT_REQ 1 /* P-0 -> P-0.1
- */
-#define EV_PLCI_CONNECT_CONF_ERROR 2 /* P-0.1 -> P-0
- */
-#define EV_PLCI_CONNECT_CONF_OK 3 /* P-0.1 -> P-1
- */
-#define EV_PLCI_FACILITY_IND_UP 4 /* P-0 -> P-1
- */
-#define EV_PLCI_CONNECT_IND 5 /* P-0 -> P-2
- */
-#define EV_PLCI_CONNECT_ACTIVE_IND 6 /* P-1 -> P-ACT
- */
-#define EV_PLCI_CONNECT_REJECT 7 /* P-2 -> P-5
- P-3 -> P-5
- */
-#define EV_PLCI_DISCONNECT_REQ 8 /* P-1 -> P-5
- P-2 -> P-5
- P-3 -> P-5
- P-4 -> P-5
- P-ACT -> P-5
- P-Res -> P-5 (*)
- P-HELD -> P-5 (*)
- */
-#define EV_PLCI_DISCONNECT_IND 9 /* P-1 -> P-6
- P-2 -> P-6
- P-3 -> P-6
- P-4 -> P-6
- P-5 -> P-6
- P-ACT -> P-6
- P-Res -> P-6 (*)
- P-HELD -> P-6 (*)
- */
-#define EV_PLCI_FACILITY_IND_DOWN 10 /* P-0.1 -> P-5
- P-1 -> P-5
- P-ACT -> P-5
- P-2 -> P-5
- P-3 -> P-5
- P-4 -> P-5
- */
-#define EV_PLCI_DISCONNECT_RESP 11 /* P-6 -> P-0
- */
-#define EV_PLCI_CONNECT_RESP 12 /* P-6 -> P-0
- */
-
-#define EV_PLCI_RESUME_REQ 13 /* P-0 -> P-0.Res
- */
-#define EV_PLCI_RESUME_CONF_OK 14 /* P-0.Res -> P-Res
- */
-#define EV_PLCI_RESUME_CONF_ERROR 15 /* P-0.Res -> P-0
- */
-#define EV_PLCI_RESUME_IND 16 /* P-Res -> P-ACT
- */
-#define EV_PLCI_HOLD_IND 17 /* P-ACT -> P-HELD
- */
-#define EV_PLCI_RETRIEVE_IND 18 /* P-HELD -> P-ACT
- */
-#define EV_PLCI_SUSPEND_IND 19 /* P-ACT -> P-5
- */
-#define EV_PLCI_CD_IND 20 /* P-2 -> P-5
- */
-
-/*
- * per ncci state machine
- */
-#define ST_NCCI_PREVIOUS -1
-#define ST_NCCI_NONE 0 /* N-0 */
-#define ST_NCCI_OUTGOING 1 /* N-0.1 */
-#define ST_NCCI_INCOMING 2 /* N-1 */
-#define ST_NCCI_ALLOCATED 3 /* N-2 */
-#define ST_NCCI_ACTIVE 4 /* N-ACT */
-#define ST_NCCI_RESETING 5 /* N-3 */
-#define ST_NCCI_DISCONNECTING 6 /* N-4 */
-#define ST_NCCI_DISCONNECTED 7 /* N-5 */
-
-#define EV_NCCI_CONNECT_B3_REQ 1 /* N-0 -> N-0.1 */
-#define EV_NCCI_CONNECT_B3_IND 2 /* N-0 -> N.1 */
-#define EV_NCCI_CONNECT_B3_CONF_OK 3 /* N-0.1 -> N.2 */
-#define EV_NCCI_CONNECT_B3_CONF_ERROR 4 /* N-0.1 -> N.0 */
-#define EV_NCCI_CONNECT_B3_REJECT 5 /* N-1 -> N-4 */
-#define EV_NCCI_CONNECT_B3_RESP 6 /* N-1 -> N-2 */
-#define EV_NCCI_CONNECT_B3_ACTIVE_IND 7 /* N-2 -> N-ACT */
-#define EV_NCCI_RESET_B3_REQ 8 /* N-ACT -> N-3 */
-#define EV_NCCI_RESET_B3_IND 9 /* N-3 -> N-ACT */
-#define EV_NCCI_DISCONNECT_B3_IND 10 /* N-4 -> N.5 */
-#define EV_NCCI_DISCONNECT_B3_CONF_ERROR 11 /* N-4 -> previous */
-#define EV_NCCI_DISCONNECT_B3_REQ 12 /* N-1 -> N-4
- N-2 -> N-4
- N-3 -> N-4
- N-ACT -> N-4 */
-#define EV_NCCI_DISCONNECT_B3_RESP 13 /* N-5 -> N-0 */
-
-#endif /* __CAPIDRV_H__ */
diff --git a/drivers/isdn/divert/Makefile b/drivers/isdn/divert/Makefile
deleted file mode 100644
index 07684fe53537..000000000000
--- a/drivers/isdn/divert/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-# Makefile for the dss1_divert ISDN module
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_ISDN_DIVERSION) += dss1_divert.o
-
-# Multipart objects.
-
-dss1_divert-y := isdn_divert.o divert_procfs.o divert_init.o
diff --git a/drivers/isdn/divert/divert_init.c b/drivers/isdn/divert/divert_init.c
deleted file mode 100644
index 267dede13bfd..000000000000
--- a/drivers/isdn/divert/divert_init.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/* $Id divert_init.c,v 1.5.6.2 2001/01/24 22:18:17 kai Exp $
- *
- * Module init for DSS1 diversion services for i4l.
- *
- * Copyright 1999 by Werner Cornelius (werner@isdn4linux.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include "isdn_divert.h"
-
-MODULE_DESCRIPTION("ISDN4Linux: Call diversion support");
-MODULE_AUTHOR("Werner Cornelius");
-MODULE_LICENSE("GPL");
-
-/****************************************/
-/* structure containing interface to hl */
-/****************************************/
-isdn_divert_if divert_if = {
- DIVERT_IF_MAGIC, /* magic value */
- DIVERT_CMD_REG, /* register cmd */
- ll_callback, /* callback routine from ll */
- NULL, /* command still not specified */
- NULL, /* drv_to_name */
- NULL, /* name_to_drv */
-};
-
-/*************************/
-/* Module interface code */
-/* no cmd line parms */
-/*************************/
-static int __init divert_init(void)
-{
- int i;
-
- if (divert_dev_init()) {
- printk(KERN_WARNING "dss1_divert: cannot install device, not loaded\n");
- return (-EIO);
- }
- if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR) {
- divert_dev_deinit();
- printk(KERN_WARNING "dss1_divert: error %d registering module, not loaded\n", i);
- return (-EIO);
- }
- printk(KERN_INFO "dss1_divert module successfully installed\n");
- return (0);
-}
-
-/**********************/
-/* Module deinit code */
-/**********************/
-static void __exit divert_exit(void)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&divert_lock, flags);
- divert_if.cmd = DIVERT_CMD_REL; /* release */
- if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR) {
- printk(KERN_WARNING "dss1_divert: error %d releasing module\n", i);
- spin_unlock_irqrestore(&divert_lock, flags);
- return;
- }
- if (divert_dev_deinit()) {
- printk(KERN_WARNING "dss1_divert: device busy, remove cancelled\n");
- spin_unlock_irqrestore(&divert_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&divert_lock, flags);
- deleterule(-1); /* delete all rules and free mem */
- deleteprocs();
- printk(KERN_INFO "dss1_divert module successfully removed \n");
-}
-
-module_init(divert_init);
-module_exit(divert_exit);
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
deleted file mode 100644
index 342585e04fd3..000000000000
--- a/drivers/isdn/divert/divert_procfs.c
+++ /dev/null
@@ -1,336 +0,0 @@
-/* $Id: divert_procfs.c,v 1.11.6.2 2001/09/23 22:24:36 kai Exp $
- *
- * Filesystem handling for the diversion supplementary services.
- *
- * Copyright 1998 by Werner Cornelius (werner@isdn4linux.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#else
-#include <linux/fs.h>
-#endif
-#include <linux/sched.h>
-#include <linux/isdnif.h>
-#include <net/net_namespace.h>
-#include <linux/mutex.h>
-#include "isdn_divert.h"
-
-
-/*********************************/
-/* Variables for interface queue */
-/*********************************/
-ulong if_used = 0; /* number of interface users */
-static DEFINE_MUTEX(isdn_divert_mutex);
-static struct divert_info *divert_info_head = NULL; /* head of queue */
-static struct divert_info *divert_info_tail = NULL; /* pointer to last entry */
-static DEFINE_SPINLOCK(divert_info_lock);/* lock for queue */
-static wait_queue_head_t rd_queue;
-
-/*********************************/
-/* put an info buffer into queue */
-/*********************************/
-void
-put_info_buffer(char *cp)
-{
- struct divert_info *ib;
- unsigned long flags;
-
- if (if_used <= 0)
- return;
- if (!cp)
- return;
- if (!*cp)
- return;
- if (!(ib = kmalloc(sizeof(struct divert_info) + strlen(cp), GFP_ATOMIC)))
- return; /* no memory */
- strcpy(ib->info_start, cp); /* set output string */
- ib->next = NULL;
- spin_lock_irqsave(&divert_info_lock, flags);
- ib->usage_cnt = if_used;
- if (!divert_info_head)
- divert_info_head = ib; /* new head */
- else
- divert_info_tail->next = ib; /* follows existing messages */
- divert_info_tail = ib; /* new tail */
-
- /* delete old entrys */
- while (divert_info_head->next) {
- if ((divert_info_head->usage_cnt <= 0) &&
- (divert_info_head->next->usage_cnt <= 0)) {
- ib = divert_info_head;
- divert_info_head = divert_info_head->next;
- kfree(ib);
- } else
- break;
- } /* divert_info_head->next */
- spin_unlock_irqrestore(&divert_info_lock, flags);
- wake_up_interruptible(&(rd_queue));
-} /* put_info_buffer */
-
-#ifdef CONFIG_PROC_FS
-
-/**********************************/
-/* deflection device read routine */
-/**********************************/
-static ssize_t
-isdn_divert_read(struct file *file, char __user *buf, size_t count, loff_t *off)
-{
- struct divert_info *inf;
- int len;
-
- if (!(inf = *((struct divert_info **) file->private_data))) {
- if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
- wait_event_interruptible(rd_queue, (inf =
- *((struct divert_info **) file->private_data)));
- }
- if (!inf)
- return (0);
-
- inf->usage_cnt--; /* new usage count */
- file->private_data = &inf->next; /* next structure */
- if ((len = strlen(inf->info_start)) <= count) {
- if (copy_to_user(buf, inf->info_start, len))
- return -EFAULT;
- *off += len;
- return (len);
- }
- return (0);
-} /* isdn_divert_read */
-
-/**********************************/
-/* deflection device write routine */
-/**********************************/
-static ssize_t
-isdn_divert_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
-{
- return (-ENODEV);
-} /* isdn_divert_write */
-
-
-/***************************************/
-/* select routines for various kernels */
-/***************************************/
-static __poll_t
-isdn_divert_poll(struct file *file, poll_table *wait)
-{
- __poll_t mask = 0;
-
- poll_wait(file, &(rd_queue), wait);
- /* mask = EPOLLOUT | EPOLLWRNORM; */
- if (*((struct divert_info **) file->private_data)) {
- mask |= EPOLLIN | EPOLLRDNORM;
- }
- return mask;
-} /* isdn_divert_poll */
-
-/****************/
-/* Open routine */
-/****************/
-static int
-isdn_divert_open(struct inode *ino, struct file *filep)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&divert_info_lock, flags);
- if_used++;
- if (divert_info_head)
- filep->private_data = &(divert_info_tail->next);
- else
- filep->private_data = &divert_info_head;
- spin_unlock_irqrestore(&divert_info_lock, flags);
- /* start_divert(); */
- return nonseekable_open(ino, filep);
-} /* isdn_divert_open */
-
-/*******************/
-/* close routine */
-/*******************/
-static int
-isdn_divert_close(struct inode *ino, struct file *filep)
-{
- struct divert_info *inf;
- unsigned long flags;
-
- spin_lock_irqsave(&divert_info_lock, flags);
- if_used--;
- inf = *((struct divert_info **) filep->private_data);
- while (inf) {
- inf->usage_cnt--;
- inf = inf->next;
- }
- if (if_used <= 0)
- while (divert_info_head) {
- inf = divert_info_head;
- divert_info_head = divert_info_head->next;
- kfree(inf);
- }
- spin_unlock_irqrestore(&divert_info_lock, flags);
- return (0);
-} /* isdn_divert_close */
-
-/*********/
-/* IOCTL */
-/*********/
-static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
-{
- divert_ioctl dioctl;
- int i;
- unsigned long flags;
- divert_rule *rulep;
- char *cp;
-
- if (copy_from_user(&dioctl, (void __user *) arg, sizeof(dioctl)))
- return -EFAULT;
-
- switch (cmd) {
- case IIOCGETVER:
- dioctl.drv_version = DIVERT_IIOC_VERSION; /* set version */
- break;
-
- case IIOCGETDRV:
- if ((dioctl.getid.drvid = divert_if.name_to_drv(dioctl.getid.drvnam)) < 0)
- return (-EINVAL);
- break;
-
- case IIOCGETNAM:
- cp = divert_if.drv_to_name(dioctl.getid.drvid);
- if (!cp)
- return (-EINVAL);
- if (!*cp)
- return (-EINVAL);
- strcpy(dioctl.getid.drvnam, cp);
- break;
-
- case IIOCGETRULE:
- if (!(rulep = getruleptr(dioctl.getsetrule.ruleidx)))
- return (-EINVAL);
- dioctl.getsetrule.rule = *rulep; /* copy data */
- break;
-
- case IIOCMODRULE:
- if (!(rulep = getruleptr(dioctl.getsetrule.ruleidx)))
- return (-EINVAL);
- spin_lock_irqsave(&divert_lock, flags);
- *rulep = dioctl.getsetrule.rule; /* copy data */
- spin_unlock_irqrestore(&divert_lock, flags);
- return (0); /* no copy required */
- break;
-
- case IIOCINSRULE:
- return (insertrule(dioctl.getsetrule.ruleidx, &dioctl.getsetrule.rule));
- break;
-
- case IIOCDELRULE:
- return (deleterule(dioctl.getsetrule.ruleidx));
- break;
-
- case IIOCDODFACT:
- return (deflect_extern_action(dioctl.fwd_ctrl.subcmd,
- dioctl.fwd_ctrl.callid,
- dioctl.fwd_ctrl.to_nr));
-
- case IIOCDOCFACT:
- case IIOCDOCFDIS:
- case IIOCDOCFINT:
- if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
- return (-EINVAL); /* invalid driver */
- if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
- sizeof(dioctl.cf_ctrl.msn))
- return -EINVAL;
- if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
- sizeof(dioctl.cf_ctrl.fwd_nr))
- return -EINVAL;
- if ((i = cf_command(dioctl.cf_ctrl.drvid,
- (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
- dioctl.cf_ctrl.cfproc,
- dioctl.cf_ctrl.msn,
- dioctl.cf_ctrl.service,
- dioctl.cf_ctrl.fwd_nr,
- &dioctl.cf_ctrl.procid)))
- return (i);
- break;
-
- default:
- return (-EINVAL);
- } /* switch cmd */
- return copy_to_user((void __user *)arg, &dioctl, sizeof(dioctl)) ? -EFAULT : 0;
-} /* isdn_divert_ioctl */
-
-static long isdn_divert_ioctl(struct file *file, uint cmd, ulong arg)
-{
- long ret;
-
- mutex_lock(&isdn_divert_mutex);
- ret = isdn_divert_ioctl_unlocked(file, cmd, arg);
- mutex_unlock(&isdn_divert_mutex);
-
- return ret;
-}
-
-static const struct file_operations isdn_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = isdn_divert_read,
- .write = isdn_divert_write,
- .poll = isdn_divert_poll,
- .unlocked_ioctl = isdn_divert_ioctl,
- .open = isdn_divert_open,
- .release = isdn_divert_close,
-};
-
-/****************************/
-/* isdn subdir in /proc/net */
-/****************************/
-static struct proc_dir_entry *isdn_proc_entry = NULL;
-static struct proc_dir_entry *isdn_divert_entry = NULL;
-#endif /* CONFIG_PROC_FS */
-
-/***************************************************************************/
-/* divert_dev_init must be called before the proc filesystem may be used */
-/***************************************************************************/
-int
-divert_dev_init(void)
-{
-
- init_waitqueue_head(&rd_queue);
-
-#ifdef CONFIG_PROC_FS
- isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net);
- if (!isdn_proc_entry)
- return (-1);
- isdn_divert_entry = proc_create("divert", S_IFREG | S_IRUGO,
- isdn_proc_entry, &isdn_fops);
- if (!isdn_divert_entry) {
- remove_proc_entry("isdn", init_net.proc_net);
- return (-1);
- }
-#endif /* CONFIG_PROC_FS */
-
- return (0);
-} /* divert_dev_init */
-
-/***************************************************************************/
-/* divert_dev_deinit must be called before leaving isdn when included as */
-/* a module. */
-/***************************************************************************/
-int
-divert_dev_deinit(void)
-{
-
-#ifdef CONFIG_PROC_FS
- remove_proc_entry("divert", isdn_proc_entry);
- remove_proc_entry("isdn", init_net.proc_net);
-#endif /* CONFIG_PROC_FS */
-
- return (0);
-} /* divert_dev_deinit */
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
deleted file mode 100644
index 5620fd2c6009..000000000000
--- a/drivers/isdn/divert/isdn_divert.c
+++ /dev/null
@@ -1,846 +0,0 @@
-/* $Id: isdn_divert.c,v 1.6.6.3 2001/09/23 22:24:36 kai Exp $
- *
- * DSS1 main diversion supplementary handling for i4l.
- *
- * Copyright 1999 by Werner Cornelius (werner@isdn4linux.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/proc_fs.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/jiffies.h>
-
-#include "isdn_divert.h"
-
-/**********************************/
-/* structure keeping calling info */
-/**********************************/
-struct call_struc {
- isdn_ctrl ics; /* delivered setup + driver parameters */
- ulong divert_id; /* Id delivered to user */
- unsigned char akt_state; /* actual state */
- char deflect_dest[35]; /* deflection destination */
- struct timer_list timer; /* timer control structure */
- char info[90]; /* device info output */
- struct call_struc *next; /* pointer to next entry */
- struct call_struc *prev;
-};
-
-
-/********************************************/
-/* structure keeping deflection table entry */
-/********************************************/
-struct deflect_struc {
- struct deflect_struc *next, *prev;
- divert_rule rule; /* used rule */
-};
-
-
-/*****************************************/
-/* variables for main diversion services */
-/*****************************************/
-/* diversion/deflection processes */
-static struct call_struc *divert_head = NULL; /* head of remembered entrys */
-static ulong next_id = 1; /* next info id */
-static struct deflect_struc *table_head = NULL;
-static struct deflect_struc *table_tail = NULL;
-static unsigned char extern_wait_max = 4; /* maximum wait in s for external process */
-
-DEFINE_SPINLOCK(divert_lock);
-
-/***************************/
-/* timer callback function */
-/***************************/
-static void deflect_timer_expire(struct timer_list *t)
-{
- unsigned long flags;
- struct call_struc *cs = from_timer(cs, t, timer);
-
- spin_lock_irqsave(&divert_lock, flags);
- del_timer(&cs->timer); /* delete active timer */
- spin_unlock_irqrestore(&divert_lock, flags);
-
- switch (cs->akt_state) {
- case DEFLECT_PROCEED:
- cs->ics.command = ISDN_CMD_HANGUP; /* cancel action */
- divert_if.ll_cmd(&cs->ics);
- spin_lock_irqsave(&divert_lock, flags);
- cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
- cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
- add_timer(&cs->timer);
- spin_unlock_irqrestore(&divert_lock, flags);
- break;
-
- case DEFLECT_ALERT:
- cs->ics.command = ISDN_CMD_REDIR; /* protocol */
- strlcpy(cs->ics.parm.setup.phone, cs->deflect_dest, sizeof(cs->ics.parm.setup.phone));
- strcpy(cs->ics.parm.setup.eazmsn, "Testtext delayed");
- divert_if.ll_cmd(&cs->ics);
- spin_lock_irqsave(&divert_lock, flags);
- cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
- cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
- add_timer(&cs->timer);
- spin_unlock_irqrestore(&divert_lock, flags);
- break;
-
- case DEFLECT_AUTODEL:
- default:
- spin_lock_irqsave(&divert_lock, flags);
- if (cs->prev)
- cs->prev->next = cs->next; /* forward link */
- else
- divert_head = cs->next;
- if (cs->next)
- cs->next->prev = cs->prev; /* back link */
- spin_unlock_irqrestore(&divert_lock, flags);
- kfree(cs);
- return;
-
- } /* switch */
-} /* deflect_timer_func */
-
-
-/*****************************************/
-/* handle call forwarding de/activations */
-/* 0 = deact, 1 = act, 2 = interrogate */
-/*****************************************/
-int cf_command(int drvid, int mode,
- u_char proc, char *msn,
- u_char service, char *fwd_nr, ulong *procid)
-{
- unsigned long flags;
- int retval, msnlen;
- int fwd_len;
- char *p, *ielenp, tmp[60];
- struct call_struc *cs;
-
- if (strchr(msn, '.')) return (-EINVAL); /* subaddress not allowed in msn */
- if ((proc & 0x7F) > 2) return (-EINVAL);
- proc &= 3;
- p = tmp;
- *p++ = 0x30; /* enumeration */
- ielenp = p++; /* remember total length position */
- *p++ = 0xa; /* proc tag */
- *p++ = 1; /* length */
- *p++ = proc & 0x7F; /* procedure to de/activate/interrogate */
- *p++ = 0xa; /* service tag */
- *p++ = 1; /* length */
- *p++ = service; /* service to handle */
-
- if (mode == 1) {
- if (!*fwd_nr) return (-EINVAL); /* destination missing */
- if (strchr(fwd_nr, '.')) return (-EINVAL); /* subaddress not allowed */
- fwd_len = strlen(fwd_nr);
- *p++ = 0x30; /* number enumeration */
- *p++ = fwd_len + 2; /* complete forward to len */
- *p++ = 0x80; /* fwd to nr */
- *p++ = fwd_len; /* length of number */
- strcpy(p, fwd_nr); /* copy number */
- p += fwd_len; /* pointer beyond fwd */
- } /* activate */
-
- msnlen = strlen(msn);
- *p++ = 0x80; /* msn number */
- if (msnlen > 1) {
- *p++ = msnlen; /* length */
- strcpy(p, msn);
- p += msnlen;
- } else
- *p++ = 0;
-
- *ielenp = p - ielenp - 1; /* set total IE length */
-
- /* allocate mem for information struct */
- if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
- return (-ENOMEM); /* no memory */
- timer_setup(&cs->timer, deflect_timer_expire, 0);
- cs->info[0] = '\0';
- cs->ics.driver = drvid;
- cs->ics.command = ISDN_CMD_PROT_IO; /* protocol specific io */
- cs->ics.arg = DSS1_CMD_INVOKE; /* invoke supplementary service */
- cs->ics.parm.dss1_io.proc = (mode == 1) ? 7 : (mode == 2) ? 11 : 8; /* operation */
- cs->ics.parm.dss1_io.timeout = 4000; /* from ETS 300 207-1 */
- cs->ics.parm.dss1_io.datalen = p - tmp; /* total len */
- cs->ics.parm.dss1_io.data = tmp; /* start of buffer */
-
- spin_lock_irqsave(&divert_lock, flags);
- cs->ics.parm.dss1_io.ll_id = next_id++; /* id for callback */
- spin_unlock_irqrestore(&divert_lock, flags);
- *procid = cs->ics.parm.dss1_io.ll_id;
-
- sprintf(cs->info, "%d 0x%lx %s%s 0 %s %02x %d%s%s\n",
- (!mode) ? DIVERT_DEACTIVATE : (mode == 1) ? DIVERT_ACTIVATE : DIVERT_REPORT,
- cs->ics.parm.dss1_io.ll_id,
- (mode != 2) ? "" : "0 ",
- divert_if.drv_to_name(cs->ics.driver),
- msn,
- service & 0xFF,
- proc,
- (mode != 1) ? "" : " 0 ",
- (mode != 1) ? "" : fwd_nr);
-
- retval = divert_if.ll_cmd(&cs->ics); /* execute command */
-
- if (!retval) {
- cs->prev = NULL;
- spin_lock_irqsave(&divert_lock, flags);
- cs->next = divert_head;
- divert_head = cs;
- spin_unlock_irqrestore(&divert_lock, flags);
- } else
- kfree(cs);
- return (retval);
-} /* cf_command */
-
-
-/****************************************/
-/* handle a external deflection command */
-/****************************************/
-int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
-{
- struct call_struc *cs;
- isdn_ctrl ic;
- unsigned long flags;
- int i;
-
- if ((cmd & 0x7F) > 2) return (-EINVAL); /* invalid command */
- cs = divert_head; /* start of parameter list */
- while (cs) {
- if (cs->divert_id == callid) break; /* found */
- cs = cs->next;
- } /* search entry */
- if (!cs) return (-EINVAL); /* invalid callid */
-
- ic.driver = cs->ics.driver;
- ic.arg = cs->ics.arg;
- i = -EINVAL;
- if (cs->akt_state == DEFLECT_AUTODEL) return (i); /* no valid call */
- switch (cmd & 0x7F) {
- case 0: /* hangup */
- del_timer(&cs->timer);
- ic.command = ISDN_CMD_HANGUP;
- i = divert_if.ll_cmd(&ic);
- spin_lock_irqsave(&divert_lock, flags);
- cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
- cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
- add_timer(&cs->timer);
- spin_unlock_irqrestore(&divert_lock, flags);
- break;
-
- case 1: /* alert */
- if (cs->akt_state == DEFLECT_ALERT) return (0);
- cmd &= 0x7F; /* never wait */
- del_timer(&cs->timer);
- ic.command = ISDN_CMD_ALERT;
- if ((i = divert_if.ll_cmd(&ic))) {
- spin_lock_irqsave(&divert_lock, flags);
- cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
- cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
- add_timer(&cs->timer);
- spin_unlock_irqrestore(&divert_lock, flags);
- } else
- cs->akt_state = DEFLECT_ALERT;
- break;
-
- case 2: /* redir */
- del_timer(&cs->timer);
- strlcpy(cs->ics.parm.setup.phone, to_nr, sizeof(cs->ics.parm.setup.phone));
- strcpy(cs->ics.parm.setup.eazmsn, "Testtext manual");
- ic.command = ISDN_CMD_REDIR;
- if ((i = divert_if.ll_cmd(&ic))) {
- spin_lock_irqsave(&divert_lock, flags);
- cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
- cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
- add_timer(&cs->timer);
- spin_unlock_irqrestore(&divert_lock, flags);
- } else
- cs->akt_state = DEFLECT_ALERT;
- break;
-
- } /* switch */
- return (i);
-} /* deflect_extern_action */
-
-/********************************/
-/* insert a new rule before idx */
-/********************************/
-int insertrule(int idx, divert_rule *newrule)
-{
- struct deflect_struc *ds, *ds1 = NULL;
- unsigned long flags;
-
- if (!(ds = kmalloc(sizeof(struct deflect_struc), GFP_KERNEL)))
- return (-ENOMEM); /* no memory */
-
- ds->rule = *newrule; /* set rule */
-
- spin_lock_irqsave(&divert_lock, flags);
-
- if (idx >= 0) {
- ds1 = table_head;
- while ((ds1) && (idx > 0))
- { idx--;
- ds1 = ds1->next;
- }
- if (!ds1) idx = -1;
- }
-
- if (idx < 0) {
- ds->prev = table_tail; /* previous entry */
- ds->next = NULL; /* end of chain */
- if (ds->prev)
- ds->prev->next = ds; /* last forward */
- else
- table_head = ds; /* is first entry */
- table_tail = ds; /* end of queue */
- } else {
- ds->next = ds1; /* next entry */
- ds->prev = ds1->prev; /* prev entry */
- ds1->prev = ds; /* backward chain old element */
- if (!ds->prev)
- table_head = ds; /* first element */
- }
-
- spin_unlock_irqrestore(&divert_lock, flags);
- return (0);
-} /* insertrule */
-
-/***********************************/
-/* delete the rule at position idx */
-/***********************************/
-int deleterule(int idx)
-{
- struct deflect_struc *ds, *ds1;
- unsigned long flags;
-
- if (idx < 0) {
- spin_lock_irqsave(&divert_lock, flags);
- ds = table_head;
- table_head = NULL;
- table_tail = NULL;
- spin_unlock_irqrestore(&divert_lock, flags);
- while (ds) {
- ds1 = ds;
- ds = ds->next;
- kfree(ds1);
- }
- return (0);
- }
-
- spin_lock_irqsave(&divert_lock, flags);
- ds = table_head;
-
- while ((ds) && (idx > 0)) {
- idx--;
- ds = ds->next;
- }
-
- if (!ds) {
- spin_unlock_irqrestore(&divert_lock, flags);
- return (-EINVAL);
- }
-
- if (ds->next)
- ds->next->prev = ds->prev; /* backward chain */
- else
- table_tail = ds->prev; /* end of chain */
-
- if (ds->prev)
- ds->prev->next = ds->next; /* forward chain */
- else
- table_head = ds->next; /* start of chain */
-
- spin_unlock_irqrestore(&divert_lock, flags);
- kfree(ds);
- return (0);
-} /* deleterule */
-
-/*******************************************/
-/* get a pointer to a specific rule number */
-/*******************************************/
-divert_rule *getruleptr(int idx)
-{
- struct deflect_struc *ds = table_head;
-
- if (idx < 0) return (NULL);
- while ((ds) && (idx >= 0)) {
- if (!(idx--)) {
- return (&ds->rule);
- break;
- }
- ds = ds->next;
- }
- return (NULL);
-} /* getruleptr */
-
-/*************************************************/
-/* called from common module on an incoming call */
-/*************************************************/
-static int isdn_divert_icall(isdn_ctrl *ic)
-{
- int retval = 0;
- unsigned long flags;
- struct call_struc *cs = NULL;
- struct deflect_struc *dv;
- char *p, *p1;
- u_char accept;
-
- /* first check the internal deflection table */
- for (dv = table_head; dv; dv = dv->next) {
- /* scan table */
- if (((dv->rule.callopt == 1) && (ic->command == ISDN_STAT_ICALLW)) ||
- ((dv->rule.callopt == 2) && (ic->command == ISDN_STAT_ICALL)))
- continue; /* call option check */
- if (!(dv->rule.drvid & (1L << ic->driver)))
- continue; /* driver not matching */
- if ((dv->rule.si1) && (dv->rule.si1 != ic->parm.setup.si1))
- continue; /* si1 not matching */
- if ((dv->rule.si2) && (dv->rule.si2 != ic->parm.setup.si2))
- continue; /* si2 not matching */
-
- p = dv->rule.my_msn;
- p1 = ic->parm.setup.eazmsn;
- accept = 0;
- while (*p) {
- /* complete compare */
- if (*p == '-') {
- accept = 1; /* call accepted */
- break;
- }
- if (*p++ != *p1++)
- break; /* not accepted */
- if ((!*p) && (!*p1))
- accept = 1;
- } /* complete compare */
- if (!accept) continue; /* not accepted */
-
- if ((strcmp(dv->rule.caller, "0")) ||
- (ic->parm.setup.phone[0])) {
- p = dv->rule.caller;
- p1 = ic->parm.setup.phone;
- accept = 0;
- while (*p) {
- /* complete compare */
- if (*p == '-') {
- accept = 1; /* call accepted */
- break;
- }
- if (*p++ != *p1++)
- break; /* not accepted */
- if ((!*p) && (!*p1))
- accept = 1;
- } /* complete compare */
- if (!accept) continue; /* not accepted */
- }
-
- switch (dv->rule.action) {
- case DEFLECT_IGNORE:
- return 0;
-
- case DEFLECT_ALERT:
- case DEFLECT_PROCEED:
- case DEFLECT_REPORT:
- case DEFLECT_REJECT:
- if (dv->rule.action == DEFLECT_PROCEED)
- if ((!if_used) || ((!extern_wait_max) && (!dv->rule.waittime)))
- return (0); /* no external deflection needed */
- if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
- return (0); /* no memory */
- timer_setup(&cs->timer, deflect_timer_expire, 0);
- cs->info[0] = '\0';
-
- cs->ics = *ic; /* copy incoming data */
- if (!cs->ics.parm.setup.phone[0]) strcpy(cs->ics.parm.setup.phone, "0");
- if (!cs->ics.parm.setup.eazmsn[0]) strcpy(cs->ics.parm.setup.eazmsn, "0");
- cs->ics.parm.setup.screen = dv->rule.screen;
- if (dv->rule.waittime)
- cs->timer.expires = jiffies + (HZ * dv->rule.waittime);
- else if (dv->rule.action == DEFLECT_PROCEED)
- cs->timer.expires = jiffies + (HZ * extern_wait_max);
- else
- cs->timer.expires = 0;
- cs->akt_state = dv->rule.action;
- spin_lock_irqsave(&divert_lock, flags);
- cs->divert_id = next_id++; /* new sequence number */
- spin_unlock_irqrestore(&divert_lock, flags);
- cs->prev = NULL;
- if (cs->akt_state == DEFLECT_ALERT) {
- strcpy(cs->deflect_dest, dv->rule.to_nr);
- if (!cs->timer.expires) {
- strcpy(ic->parm.setup.eazmsn,
- "Testtext direct");
- ic->parm.setup.screen = dv->rule.screen;
- strlcpy(ic->parm.setup.phone, dv->rule.to_nr, sizeof(ic->parm.setup.phone));
- cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
- cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
- retval = 5;
- } else
- retval = 1; /* alerting */
- } else {
- cs->deflect_dest[0] = '\0';
- retval = 4; /* only proceed */
- }
- snprintf(cs->info, sizeof(cs->info),
- "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
- cs->akt_state,
- cs->divert_id,
- divert_if.drv_to_name(cs->ics.driver),
- (ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
- cs->ics.parm.setup.phone,
- cs->ics.parm.setup.eazmsn,
- cs->ics.parm.setup.si1,
- cs->ics.parm.setup.si2,
- cs->ics.parm.setup.screen,
- dv->rule.waittime,
- cs->deflect_dest);
- if ((dv->rule.action == DEFLECT_REPORT) ||
- (dv->rule.action == DEFLECT_REJECT)) {
- put_info_buffer(cs->info);
- kfree(cs); /* remove */
- return ((dv->rule.action == DEFLECT_REPORT) ? 0 : 2); /* nothing to do */
- }
- break;
-
- default:
- return 0; /* ignore call */
- } /* switch action */
- break; /* will break the 'for' looping */
- } /* scan_table */
-
- if (cs) {
- cs->prev = NULL;
- spin_lock_irqsave(&divert_lock, flags);
- cs->next = divert_head;
- divert_head = cs;
- if (cs->timer.expires) add_timer(&cs->timer);
- spin_unlock_irqrestore(&divert_lock, flags);
-
- put_info_buffer(cs->info);
- return (retval);
- } else
- return (0);
-} /* isdn_divert_icall */
-
-
-void deleteprocs(void)
-{
- struct call_struc *cs, *cs1;
- unsigned long flags;
-
- spin_lock_irqsave(&divert_lock, flags);
- cs = divert_head;
- divert_head = NULL;
- while (cs) {
- del_timer(&cs->timer);
- cs1 = cs;
- cs = cs->next;
- kfree(cs1);
- }
- spin_unlock_irqrestore(&divert_lock, flags);
-} /* deleteprocs */
-
-/****************************************************/
-/* put a address including address type into buffer */
-/****************************************************/
-static int put_address(char *st, u_char *p, int len)
-{
- u_char retval = 0;
- u_char adr_typ = 0; /* network standard */
-
- if (len < 2) return (retval);
- if (*p == 0xA1) {
- retval = *(++p) + 2; /* total length */
- if (retval > len) return (0); /* too short */
- len = retval - 2; /* remaining length */
- if (len < 3) return (0);
- if ((*(++p) != 0x0A) || (*(++p) != 1)) return (0);
- adr_typ = *(++p);
- len -= 3;
- p++;
- if (len < 2) return (0);
- if (*p++ != 0x12) return (0);
- if (*p > len) return (0); /* check number length */
- len = *p++;
- } else if (*p == 0x80) {
- retval = *(++p) + 2; /* total length */
- if (retval > len) return (0);
- len = retval - 2;
- p++;
- } else
- return (0); /* invalid address information */
-
- sprintf(st, "%d ", adr_typ);
- st += strlen(st);
- if (!len)
- *st++ = '-';
- else
- while (len--)
- *st++ = *p++;
- *st = '\0';
- return (retval);
-} /* put_address */
-
-/*************************************/
-/* report a successful interrogation */
-/*************************************/
-static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
-{
- char *src = ic->parm.dss1_io.data;
- int restlen = ic->parm.dss1_io.datalen;
- int cnt = 1;
- u_char n, n1;
- char st[90], *p, *stp;
-
- if (restlen < 2) return (-100); /* frame too short */
- if (*src++ != 0x30) return (-101);
- if ((n = *src++) > 0x81) return (-102); /* invalid length field */
- restlen -= 2; /* remaining bytes */
- if (n == 0x80) {
- if (restlen < 2) return (-103);
- if ((*(src + restlen - 1)) || (*(src + restlen - 2))) return (-104);
- restlen -= 2;
- } else if (n == 0x81) {
- n = *src++;
- restlen--;
- if (n > restlen) return (-105);
- restlen = n;
- } else if (n > restlen)
- return (-106);
- else
- restlen = n; /* standard format */
- if (restlen < 3) return (-107); /* no procedure */
- if ((*src++ != 2) || (*src++ != 1) || (*src++ != 0x0B)) return (-108);
- restlen -= 3;
- if (restlen < 2) return (-109); /* list missing */
- if (*src == 0x31) {
- src++;
- if ((n = *src++) > 0x81) return (-110); /* invalid length field */
- restlen -= 2; /* remaining bytes */
- if (n == 0x80) {
- if (restlen < 2) return (-111);
- if ((*(src + restlen - 1)) || (*(src + restlen - 2))) return (-112);
- restlen -= 2;
- } else if (n == 0x81) {
- n = *src++;
- restlen--;
- if (n > restlen) return (-113);
- restlen = n;
- } else if (n > restlen)
- return (-114);
- else
- restlen = n; /* standard format */
- } /* result list header */
-
- while (restlen >= 2) {
- stp = st;
- sprintf(stp, "%d 0x%lx %d %s ", DIVERT_REPORT, ic->parm.dss1_io.ll_id,
- cnt++, divert_if.drv_to_name(ic->driver));
- stp += strlen(stp);
- if (*src++ != 0x30) return (-115); /* invalid enum */
- n = *src++;
- restlen -= 2;
- if (n > restlen) return (-116); /* enum length wrong */
- restlen -= n;
- p = src; /* one entry */
- src += n;
- if (!(n1 = put_address(stp, p, n & 0xFF))) continue;
- stp += strlen(stp);
- p += n1;
- n -= n1;
- if (n < 6) continue; /* no service and proc */
- if ((*p++ != 0x0A) || (*p++ != 1)) continue;
- sprintf(stp, " 0x%02x ", (*p++) & 0xFF);
- stp += strlen(stp);
- if ((*p++ != 0x0A) || (*p++ != 1)) continue;
- sprintf(stp, "%d ", (*p++) & 0xFF);
- stp += strlen(stp);
- n -= 6;
- if (n > 2) {
- if (*p++ != 0x30) continue;
- if (*p > (n - 2)) continue;
- n = *p++;
- if (!(n1 = put_address(stp, p, n & 0xFF))) continue;
- stp += strlen(stp);
- }
- sprintf(stp, "\n");
- put_info_buffer(st);
- } /* while restlen */
- if (restlen) return (-117);
- return (0);
-} /* interrogate_success */
-
-/*********************************************/
-/* callback for protocol specific extensions */
-/*********************************************/
-static int prot_stat_callback(isdn_ctrl *ic)
-{
- struct call_struc *cs, *cs1;
- int i;
- unsigned long flags;
-
- cs = divert_head; /* start of list */
- cs1 = NULL;
- while (cs) {
- if (ic->driver == cs->ics.driver) {
- switch (cs->ics.arg) {
- case DSS1_CMD_INVOKE:
- if ((cs->ics.parm.dss1_io.ll_id == ic->parm.dss1_io.ll_id) &&
- (cs->ics.parm.dss1_io.hl_id == ic->parm.dss1_io.hl_id)) {
- switch (ic->arg) {
- case DSS1_STAT_INVOKE_ERR:
- sprintf(cs->info, "128 0x%lx 0x%x\n",
- ic->parm.dss1_io.ll_id,
- ic->parm.dss1_io.timeout);
- put_info_buffer(cs->info);
- break;
-
- case DSS1_STAT_INVOKE_RES:
- switch (cs->ics.parm.dss1_io.proc) {
- case 7:
- case 8:
- put_info_buffer(cs->info);
- break;
-
- case 11:
- i = interrogate_success(ic, cs);
- if (i)
- sprintf(cs->info, "%d 0x%lx %d\n", DIVERT_REPORT,
- ic->parm.dss1_io.ll_id, i);
- put_info_buffer(cs->info);
- break;
-
- default:
- printk(KERN_WARNING "dss1_divert: unknown proc %d\n", cs->ics.parm.dss1_io.proc);
- break;
- }
-
- break;
-
- default:
- printk(KERN_WARNING "dss1_divert unknown invoke answer %lx\n", ic->arg);
- break;
- }
- cs1 = cs; /* remember structure */
- cs = NULL;
- continue; /* abort search */
- } /* id found */
- break;
-
- case DSS1_CMD_INVOKE_ABORT:
- printk(KERN_WARNING "dss1_divert unhandled invoke abort\n");
- break;
-
- default:
- printk(KERN_WARNING "dss1_divert unknown cmd 0x%lx\n", cs->ics.arg);
- break;
- } /* switch ics.arg */
- cs = cs->next;
- } /* driver ok */
- }
-
- if (!cs1) {
- printk(KERN_WARNING "dss1_divert unhandled process\n");
- return (0);
- }
-
- if (cs1->ics.driver == -1) {
- spin_lock_irqsave(&divert_lock, flags);
- del_timer(&cs1->timer);
- if (cs1->prev)
- cs1->prev->next = cs1->next; /* forward link */
- else
- divert_head = cs1->next;
- if (cs1->next)
- cs1->next->prev = cs1->prev; /* back link */
- spin_unlock_irqrestore(&divert_lock, flags);
- kfree(cs1);
- }
-
- return (0);
-} /* prot_stat_callback */
-
-
-/***************************/
-/* status callback from HL */
-/***************************/
-static int isdn_divert_stat_callback(isdn_ctrl *ic)
-{
- struct call_struc *cs, *cs1;
- unsigned long flags;
- int retval;
-
- retval = -1;
- cs = divert_head; /* start of list */
- while (cs) {
- if ((ic->driver == cs->ics.driver) &&
- (ic->arg == cs->ics.arg)) {
- switch (ic->command) {
- case ISDN_STAT_DHUP:
- sprintf(cs->info, "129 0x%lx\n", cs->divert_id);
- del_timer(&cs->timer);
- cs->ics.driver = -1;
- break;
-
- case ISDN_STAT_CAUSE:
- sprintf(cs->info, "130 0x%lx %s\n", cs->divert_id, ic->parm.num);
- break;
-
- case ISDN_STAT_REDIR:
- sprintf(cs->info, "131 0x%lx\n", cs->divert_id);
- del_timer(&cs->timer);
- cs->ics.driver = -1;
- break;
-
- default:
- sprintf(cs->info, "999 0x%lx 0x%x\n", cs->divert_id, (int)(ic->command));
- break;
- }
- put_info_buffer(cs->info);
- retval = 0;
- }
- cs1 = cs;
- cs = cs->next;
- if (cs1->ics.driver == -1) {
- spin_lock_irqsave(&divert_lock, flags);
- if (cs1->prev)
- cs1->prev->next = cs1->next; /* forward link */
- else
- divert_head = cs1->next;
- if (cs1->next)
- cs1->next->prev = cs1->prev; /* back link */
- spin_unlock_irqrestore(&divert_lock, flags);
- kfree(cs1);
- }
- }
- return (retval); /* not found */
-} /* isdn_divert_stat_callback */
-
-
-/********************/
-/* callback from ll */
-/********************/
-int ll_callback(isdn_ctrl *ic)
-{
- switch (ic->command) {
- case ISDN_STAT_ICALL:
- case ISDN_STAT_ICALLW:
- return (isdn_divert_icall(ic));
- break;
-
- case ISDN_STAT_PROT:
- if ((ic->arg & 0xFF) == ISDN_PTYPE_EURO) {
- if (ic->arg != DSS1_STAT_INVOKE_BRD)
- return (prot_stat_callback(ic));
- else
- return (0); /* DSS1 invoke broadcast */
- } else
- return (-1); /* protocol not euro */
-
- default:
- return (isdn_divert_stat_callback(ic));
- }
-} /* ll_callback */
diff --git a/drivers/isdn/divert/isdn_divert.h b/drivers/isdn/divert/isdn_divert.h
deleted file mode 100644
index 55033dd872c0..000000000000
--- a/drivers/isdn/divert/isdn_divert.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* $Id: isdn_divert.h,v 1.5.6.1 2001/09/23 22:24:36 kai Exp $
- *
- * Header for the diversion supplementary ioctl interface.
- *
- * Copyright 1998 by Werner Cornelius (werner@ikt.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-/******************************************/
-/* IOCTL codes for interface to user prog */
-/******************************************/
-#define DIVERT_IIOC_VERSION 0x01 /* actual version */
-#define IIOCGETVER _IO('I', 1) /* get version of interface */
-#define IIOCGETDRV _IO('I', 2) /* get driver number */
-#define IIOCGETNAM _IO('I', 3) /* get driver name */
-#define IIOCGETRULE _IO('I', 4) /* read one rule */
-#define IIOCMODRULE _IO('I', 5) /* modify/replace a rule */
-#define IIOCINSRULE _IO('I', 6) /* insert/append one rule */
-#define IIOCDELRULE _IO('I', 7) /* delete a rule */
-#define IIOCDODFACT _IO('I', 8) /* hangup/reject/alert/immediately deflect a call */
-#define IIOCDOCFACT _IO('I', 9) /* activate control forwarding in PBX */
-#define IIOCDOCFDIS _IO('I', 10) /* deactivate control forwarding in PBX */
-#define IIOCDOCFINT _IO('I', 11) /* interrogate control forwarding in PBX */
-
-/*************************************/
-/* states reported through interface */
-/*************************************/
-#define DEFLECT_IGNORE 0 /* ignore incoming call */
-#define DEFLECT_REPORT 1 /* only report */
-#define DEFLECT_PROCEED 2 /* deflect when externally triggered */
-#define DEFLECT_ALERT 3 /* alert and deflect after delay */
-#define DEFLECT_REJECT 4 /* reject immediately */
-#define DIVERT_ACTIVATE 5 /* diversion activate */
-#define DIVERT_DEACTIVATE 6 /* diversion deactivate */
-#define DIVERT_REPORT 7 /* interrogation result */
-#define DEFLECT_AUTODEL 255 /* only for internal use */
-
-#define DEFLECT_ALL_IDS 0xFFFFFFFF /* all drivers selected */
-
-typedef struct {
- ulong drvid; /* driver ids, bit mapped */
- char my_msn[35]; /* desired msn, subaddr allowed */
- char caller[35]; /* caller id, partial string with * + subaddr allowed */
- char to_nr[35]; /* deflected to number incl. subaddress */
- u_char si1, si2; /* service indicators, si1=bitmask, si1+2 0 = all */
- u_char screen; /* screening: 0 = no info, 1 = info, 2 = nfo with nr */
- u_char callopt; /* option for call handling:
- 0 = all calls
- 1 = only non waiting calls
- 2 = only waiting calls */
- u_char action; /* desired action:
- 0 = don't report call -> ignore
- 1 = report call, do not allow/proceed for deflection
- 2 = report call, send proceed, wait max waittime secs
- 3 = report call, alert and deflect after waittime
- 4 = report call, reject immediately
- actions 1-2 only take place if interface is opened
- */
- u_char waittime; /* maximum wait time for proceeding */
-} divert_rule;
-
-typedef union {
- int drv_version; /* return of driver version */
- struct {
- int drvid; /* id of driver */
- char drvnam[30]; /* name of driver */
- } getid;
- struct {
- int ruleidx; /* index of rule */
- divert_rule rule; /* rule parms */
- } getsetrule;
- struct {
- u_char subcmd; /* 0 = hangup/reject,
- 1 = alert,
- 2 = deflect */
- ulong callid; /* id of call delivered by ascii output */
- char to_nr[35]; /* destination when deflect,
- else uus1 string (maxlen 31),
- data from rule used if empty */
- } fwd_ctrl;
- struct {
- int drvid; /* id of driver */
- u_char cfproc; /* cfu = 0, cfb = 1, cfnr = 2 */
- ulong procid; /* process id returned when no error */
- u_char service; /* basically coded service, 0 = all */
- char msn[25]; /* desired msn, empty = all */
- char fwd_nr[35];/* forwarded to number + subaddress */
- } cf_ctrl;
-} divert_ioctl;
-
-#ifdef __KERNEL__
-
-#include <linux/isdnif.h>
-#include <linux/isdn_divertif.h>
-
-#define AUTODEL_TIME 30 /* timeout in s to delete internal entries */
-
-/**************************************************/
-/* structure keeping ascii info for device output */
-/**************************************************/
-struct divert_info {
- struct divert_info *next;
- ulong usage_cnt; /* number of files still to work */
- char info_start[2]; /* info string start */
-};
-
-
-/**************/
-/* Prototypes */
-/**************/
-extern spinlock_t divert_lock;
-
-extern ulong if_used; /* number of interface users */
-extern int divert_dev_deinit(void);
-extern int divert_dev_init(void);
-extern void put_info_buffer(char *);
-extern int ll_callback(isdn_ctrl *);
-extern isdn_divert_if divert_if;
-extern divert_rule *getruleptr(int);
-extern int insertrule(int, divert_rule *);
-extern int deleterule(int);
-extern void deleteprocs(void);
-extern int deflect_extern_action(u_char, ulong, char *);
-extern int cf_command(int, int, u_char, char *, u_char, char *, ulong *);
-
-#endif /* __KERNEL__ */
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
deleted file mode 100644
index 335b8ce2bb06..000000000000
--- a/drivers/isdn/gigaset/i4l.c
+++ /dev/null
@@ -1,692 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Stuff used by all variants of the driver
- *
- * Copyright (c) 2001 by Stefan Eilers,
- * Hansjoerg Lipp <hjlipp@web.de>,
- * Tilman Schmidt <tilman@imap.cc>.
- *
- * =====================================================================
- * =====================================================================
- */
-
-#include "gigaset.h"
-#include <linux/isdnif.h>
-#include <linux/export.h>
-
-#define SBUFSIZE 4096 /* sk_buff payload size */
-#define TRANSBUFSIZE 768 /* bytes per skb for transparent receive */
-#define HW_HDR_LEN 2 /* Header size used to store ack info */
-#define MAX_BUF_SIZE (SBUFSIZE - HW_HDR_LEN) /* max data packet from LL */
-
-/* == Handling of I4L IO =====================================================*/
-
-/* writebuf_from_LL
- * called by LL to transmit data on an open channel
- * inserts the buffer data into the send queue and starts the transmission
- * Note that this operation must not sleep!
- * When the buffer is processed completely, gigaset_skb_sent() should be called.
- * parameters:
- * driverID driver ID as assigned by LL
- * channel channel number
- * ack if != 0 LL wants to be notified on completion via
- * statcallb(ISDN_STAT_BSENT)
- * skb skb containing data to send
- * return value:
- * number of accepted bytes
- * 0 if temporarily unable to accept data (out of buffer space)
- * <0 on error (eg. -EINVAL)
- */
-static int writebuf_from_LL(int driverID, int channel, int ack,
- struct sk_buff *skb)
-{
- struct cardstate *cs = gigaset_get_cs_by_id(driverID);
- struct bc_state *bcs;
- unsigned char *ack_header;
- unsigned len;
-
- if (!cs) {
- pr_err("%s: invalid driver ID (%d)\n", __func__, driverID);
- return -ENODEV;
- }
- if (channel < 0 || channel >= cs->channels) {
- dev_err(cs->dev, "%s: invalid channel ID (%d)\n",
- __func__, channel);
- return -ENODEV;
- }
- bcs = &cs->bcs[channel];
-
- /* can only handle linear sk_buffs */
- if (skb_linearize(skb) < 0) {
- dev_err(cs->dev, "%s: skb_linearize failed\n", __func__);
- return -ENOMEM;
- }
- len = skb->len;
-
- gig_dbg(DEBUG_LLDATA,
- "Receiving data from LL (id: %d, ch: %d, ack: %d, sz: %d)",
- driverID, channel, ack, len);
-
- if (!len) {
- if (ack)
- dev_notice(cs->dev, "%s: not ACKing empty packet\n",
- __func__);
- return 0;
- }
- if (len > MAX_BUF_SIZE) {
- dev_err(cs->dev, "%s: packet too large (%d bytes)\n",
- __func__, len);
- return -EINVAL;
- }
-
- /* set up acknowledgement header */
- if (skb_headroom(skb) < HW_HDR_LEN) {
- /* should never happen */
- dev_err(cs->dev, "%s: insufficient skb headroom\n", __func__);
- return -ENOMEM;
- }
- skb_set_mac_header(skb, -HW_HDR_LEN);
- skb->mac_len = HW_HDR_LEN;
- ack_header = skb_mac_header(skb);
- if (ack) {
- ack_header[0] = len & 0xff;
- ack_header[1] = len >> 8;
- } else {
- ack_header[0] = ack_header[1] = 0;
- }
- gig_dbg(DEBUG_MCMD, "skb: len=%u, ack=%d: %02x %02x",
- len, ack, ack_header[0], ack_header[1]);
-
- /* pass to device-specific module */
- return cs->ops->send_skb(bcs, skb);
-}
-
-/**
- * gigaset_skb_sent() - acknowledge sending an skb
- * @bcs: B channel descriptor structure.
- * @skb: sent data.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when the data in a
- * skb has been successfully sent, for signalling completion to the LL.
- */
-void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
-{
- isdn_if *iif = bcs->cs->iif;
- unsigned char *ack_header = skb_mac_header(skb);
- unsigned len;
- isdn_ctrl response;
-
- ++bcs->trans_up;
-
- if (skb->len)
- dev_warn(bcs->cs->dev, "%s: skb->len==%d\n",
- __func__, skb->len);
-
- len = ack_header[0] + ((unsigned) ack_header[1] << 8);
- if (len) {
- gig_dbg(DEBUG_MCMD, "ACKing to LL (id: %d, ch: %d, sz: %u)",
- bcs->cs->myid, bcs->channel, len);
-
- response.driver = bcs->cs->myid;
- response.command = ISDN_STAT_BSENT;
- response.arg = bcs->channel;
- response.parm.length = len;
- iif->statcallb(&response);
- }
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_sent);
-
-/**
- * gigaset_skb_rcvd() - pass received skb to LL
- * @bcs: B channel descriptor structure.
- * @skb: received data.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when user data has
- * been successfully received, for passing to the LL.
- * Warning: skb must not be accessed anymore!
- */
-void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
-{
- isdn_if *iif = bcs->cs->iif;
-
- iif->rcvcallb_skb(bcs->cs->myid, bcs->channel, skb);
- bcs->trans_down++;
-}
-EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
-
-/**
- * gigaset_isdn_rcv_err() - signal receive error
- * @bcs: B channel descriptor structure.
- *
- * Called by hardware module {bas,ser,usb}_gigaset when a receive error
- * has occurred, for signalling to the LL.
- */
-void gigaset_isdn_rcv_err(struct bc_state *bcs)
-{
- isdn_if *iif = bcs->cs->iif;
- isdn_ctrl response;
-
- /* if currently ignoring packets, just count down */
- if (bcs->ignore) {
- bcs->ignore--;
- return;
- }
-
- /* update statistics */
- bcs->corrupted++;
-
- /* error -> LL */
- gig_dbg(DEBUG_CMD, "sending L1ERR");
- response.driver = bcs->cs->myid;
- response.command = ISDN_STAT_L1ERR;
- response.arg = bcs->channel;
- response.parm.errcode = ISDN_STAT_L1ERR_RECV;
- iif->statcallb(&response);
-}
-EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
-
-/* This function will be called by LL to send commands
- * NOTE: LL ignores the returned value, for commands other than ISDN_CMD_IOCTL,
- * so don't put too much effort into it.
- */
-static int command_from_LL(isdn_ctrl *cntrl)
-{
- struct cardstate *cs;
- struct bc_state *bcs;
- int retval = 0;
- char **commands;
- int ch;
- int i;
- size_t l;
-
- gig_dbg(DEBUG_CMD, "driver: %d, command: %d, arg: 0x%lx",
- cntrl->driver, cntrl->command, cntrl->arg);
-
- cs = gigaset_get_cs_by_id(cntrl->driver);
- if (cs == NULL) {
- pr_err("%s: invalid driver ID (%d)\n", __func__, cntrl->driver);
- return -ENODEV;
- }
- ch = cntrl->arg & 0xff;
-
- switch (cntrl->command) {
- case ISDN_CMD_IOCTL:
- dev_warn(cs->dev, "ISDN_CMD_IOCTL not supported\n");
- return -EINVAL;
-
- case ISDN_CMD_DIAL:
- gig_dbg(DEBUG_CMD,
- "ISDN_CMD_DIAL (phone: %s, msn: %s, si1: %d, si2: %d)",
- cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn,
- cntrl->parm.setup.si1, cntrl->parm.setup.si2);
-
- if (ch >= cs->channels) {
- dev_err(cs->dev,
- "ISDN_CMD_DIAL: invalid channel (%d)\n", ch);
- return -EINVAL;
- }
- bcs = cs->bcs + ch;
- if (gigaset_get_channel(bcs) < 0) {
- dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n");
- return -EBUSY;
- }
- switch (bcs->proto2) {
- case L2_HDLC:
- bcs->rx_bufsize = SBUFSIZE;
- break;
- default: /* assume transparent */
- bcs->rx_bufsize = TRANSBUFSIZE;
- }
- dev_kfree_skb(bcs->rx_skb);
- gigaset_new_rx_skb(bcs);
-
- commands = kcalloc(AT_NUM, sizeof(*commands), GFP_ATOMIC);
- if (!commands) {
- gigaset_free_channel(bcs);
- dev_err(cs->dev, "ISDN_CMD_DIAL: out of memory\n");
- return -ENOMEM;
- }
-
- l = 3 + strlen(cntrl->parm.setup.phone);
- commands[AT_DIAL] = kmalloc(l, GFP_ATOMIC);
- if (!commands[AT_DIAL])
- goto oom;
- if (cntrl->parm.setup.phone[0] == '*' &&
- cntrl->parm.setup.phone[1] == '*') {
- /* internal call: translate ** prefix to CTP value */
- commands[AT_TYPE] = kstrdup("^SCTP=0\r", GFP_ATOMIC);
- if (!commands[AT_TYPE])
- goto oom;
- snprintf(commands[AT_DIAL], l,
- "D%s\r", cntrl->parm.setup.phone + 2);
- } else {
- commands[AT_TYPE] = kstrdup("^SCTP=1\r", GFP_ATOMIC);
- if (!commands[AT_TYPE])
- goto oom;
- snprintf(commands[AT_DIAL], l,
- "D%s\r", cntrl->parm.setup.phone);
- }
-
- l = strlen(cntrl->parm.setup.eazmsn);
- if (l) {
- l += 8;
- commands[AT_MSN] = kmalloc(l, GFP_ATOMIC);
- if (!commands[AT_MSN])
- goto oom;
- snprintf(commands[AT_MSN], l, "^SMSN=%s\r",
- cntrl->parm.setup.eazmsn);
- }
-
- switch (cntrl->parm.setup.si1) {
- case 1: /* audio */
- /* BC = 9090A3: 3.1 kHz audio, A-law */
- commands[AT_BC] = kstrdup("^SBC=9090A3\r", GFP_ATOMIC);
- if (!commands[AT_BC])
- goto oom;
- break;
- case 7: /* data */
- default: /* hope the app knows what it is doing */
- /* BC = 8890: unrestricted digital information */
- commands[AT_BC] = kstrdup("^SBC=8890\r", GFP_ATOMIC);
- if (!commands[AT_BC])
- goto oom;
- }
- /* ToDo: other si1 values, inspect si2, set HLC/LLC */
-
- commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
- if (!commands[AT_PROTO])
- goto oom;
- snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
-
- commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
- if (!commands[AT_ISO])
- goto oom;
- snprintf(commands[AT_ISO], 9, "^SISO=%u\r",
- (unsigned) bcs->channel + 1);
-
- if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
- bcs->at_state.seq_index, NULL)) {
- for (i = 0; i < AT_NUM; ++i)
- kfree(commands[i]);
- kfree(commands);
- gigaset_free_channel(bcs);
- return -ENOMEM;
- }
- gigaset_schedule_event(cs);
- break;
- case ISDN_CMD_ACCEPTD:
- gig_dbg(DEBUG_CMD, "ISDN_CMD_ACCEPTD");
- if (ch >= cs->channels) {
- dev_err(cs->dev,
- "ISDN_CMD_ACCEPTD: invalid channel (%d)\n", ch);
- return -EINVAL;
- }
- bcs = cs->bcs + ch;
- switch (bcs->proto2) {
- case L2_HDLC:
- bcs->rx_bufsize = SBUFSIZE;
- break;
- default: /* assume transparent */
- bcs->rx_bufsize = TRANSBUFSIZE;
- }
- dev_kfree_skb(bcs->rx_skb);
- gigaset_new_rx_skb(bcs);
- if (!gigaset_add_event(cs, &bcs->at_state,
- EV_ACCEPT, NULL, 0, NULL))
- return -ENOMEM;
- gigaset_schedule_event(cs);
-
- break;
- case ISDN_CMD_HANGUP:
- gig_dbg(DEBUG_CMD, "ISDN_CMD_HANGUP");
- if (ch >= cs->channels) {
- dev_err(cs->dev,
- "ISDN_CMD_HANGUP: invalid channel (%d)\n", ch);
- return -EINVAL;
- }
- bcs = cs->bcs + ch;
- if (!gigaset_add_event(cs, &bcs->at_state,
- EV_HUP, NULL, 0, NULL))
- return -ENOMEM;
- gigaset_schedule_event(cs);
-
- break;
- case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */
- dev_info(cs->dev, "ignoring ISDN_CMD_CLREAZ\n");
- break;
- case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */
- dev_info(cs->dev, "ignoring ISDN_CMD_SETEAZ (%s)\n",
- cntrl->parm.num);
- break;
- case ISDN_CMD_SETL2: /* Set L2 to given protocol */
- if (ch >= cs->channels) {
- dev_err(cs->dev,
- "ISDN_CMD_SETL2: invalid channel (%d)\n", ch);
- return -EINVAL;
- }
- bcs = cs->bcs + ch;
- if (bcs->chstate & CHS_D_UP) {
- dev_err(cs->dev,
- "ISDN_CMD_SETL2: channel active (%d)\n", ch);
- return -EINVAL;
- }
- switch (cntrl->arg >> 8) {
- case ISDN_PROTO_L2_HDLC:
- gig_dbg(DEBUG_CMD, "ISDN_CMD_SETL2: setting L2_HDLC");
- bcs->proto2 = L2_HDLC;
- break;
- case ISDN_PROTO_L2_TRANS:
- gig_dbg(DEBUG_CMD, "ISDN_CMD_SETL2: setting L2_VOICE");
- bcs->proto2 = L2_VOICE;
- break;
- default:
- dev_err(cs->dev,
- "ISDN_CMD_SETL2: unsupported protocol (%lu)\n",
- cntrl->arg >> 8);
- return -EINVAL;
- }
- break;
- case ISDN_CMD_SETL3: /* Set L3 to given protocol */
- gig_dbg(DEBUG_CMD, "ISDN_CMD_SETL3");
- if (ch >= cs->channels) {
- dev_err(cs->dev,
- "ISDN_CMD_SETL3: invalid channel (%d)\n", ch);
- return -EINVAL;
- }
-
- if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) {
- dev_err(cs->dev,
- "ISDN_CMD_SETL3: unsupported protocol (%lu)\n",
- cntrl->arg >> 8);
- return -EINVAL;
- }
-
- break;
-
- default:
- gig_dbg(DEBUG_CMD, "unknown command %d from LL",
- cntrl->command);
- return -EINVAL;
- }
-
- return retval;
-
-oom:
- dev_err(bcs->cs->dev, "out of memory\n");
- for (i = 0; i < AT_NUM; ++i)
- kfree(commands[i]);
- kfree(commands);
- gigaset_free_channel(bcs);
- return -ENOMEM;
-}
-
-static void gigaset_i4l_cmd(struct cardstate *cs, int cmd)
-{
- isdn_if *iif = cs->iif;
- isdn_ctrl command;
-
- command.driver = cs->myid;
- command.command = cmd;
- command.arg = 0;
- iif->statcallb(&command);
-}
-
-static void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd)
-{
- isdn_if *iif = bcs->cs->iif;
- isdn_ctrl command;
-
- command.driver = bcs->cs->myid;
- command.command = cmd;
- command.arg = bcs->channel;
- iif->statcallb(&command);
-}
-
-/**
- * gigaset_isdn_icall() - signal incoming call
- * @at_state: connection state structure.
- *
- * Called by main module to notify the LL that an incoming call has been
- * received. @at_state contains the parameters of the call.
- *
- * Return value: call disposition (ICALL_*)
- */
-int gigaset_isdn_icall(struct at_state_t *at_state)
-{
- struct cardstate *cs = at_state->cs;
- struct bc_state *bcs = at_state->bcs;
- isdn_if *iif = cs->iif;
- isdn_ctrl response;
- int retval;
-
- /* fill ICALL structure */
- response.parm.setup.si1 = 0; /* default: unknown */
- response.parm.setup.si2 = 0;
- response.parm.setup.screen = 0;
- response.parm.setup.plan = 0;
- if (!at_state->str_var[STR_ZBC]) {
- /* no BC (internal call): assume speech, A-law */
- response.parm.setup.si1 = 1;
- } else if (!strcmp(at_state->str_var[STR_ZBC], "8890")) {
- /* unrestricted digital information */
- response.parm.setup.si1 = 7;
- } else if (!strcmp(at_state->str_var[STR_ZBC], "8090A3")) {
- /* speech, A-law */
- response.parm.setup.si1 = 1;
- } else if (!strcmp(at_state->str_var[STR_ZBC], "9090A3")) {
- /* 3,1 kHz audio, A-law */
- response.parm.setup.si1 = 1;
- response.parm.setup.si2 = 2;
- } else {
- dev_warn(cs->dev, "RING ignored - unsupported BC %s\n",
- at_state->str_var[STR_ZBC]);
- return ICALL_IGNORE;
- }
- if (at_state->str_var[STR_NMBR]) {
- strlcpy(response.parm.setup.phone, at_state->str_var[STR_NMBR],
- sizeof response.parm.setup.phone);
- } else
- response.parm.setup.phone[0] = 0;
- if (at_state->str_var[STR_ZCPN]) {
- strlcpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN],
- sizeof response.parm.setup.eazmsn);
- } else
- response.parm.setup.eazmsn[0] = 0;
-
- if (!bcs) {
- dev_notice(cs->dev, "no channel for incoming call\n");
- response.command = ISDN_STAT_ICALLW;
- response.arg = 0;
- } else {
- gig_dbg(DEBUG_CMD, "Sending ICALL");
- response.command = ISDN_STAT_ICALL;
- response.arg = bcs->channel;
- }
- response.driver = cs->myid;
- retval = iif->statcallb(&response);
- gig_dbg(DEBUG_CMD, "Response: %d", retval);
- switch (retval) {
- case 0: /* no takers */
- return ICALL_IGNORE;
- case 1: /* alerting */
- bcs->chstate |= CHS_NOTIFY_LL;
- return ICALL_ACCEPT;
- case 2: /* reject */
- return ICALL_REJECT;
- case 3: /* incomplete */
- dev_warn(cs->dev,
- "LL requested unsupported feature: Incomplete Number\n");
- return ICALL_IGNORE;
- case 4: /* proceeding */
- /* Gigaset will send ALERTING anyway.
- * There doesn't seem to be a way to avoid this.
- */
- return ICALL_ACCEPT;
- case 5: /* deflect */
- dev_warn(cs->dev,
- "LL requested unsupported feature: Call Deflection\n");
- return ICALL_IGNORE;
- default:
- dev_err(cs->dev, "LL error %d on ICALL\n", retval);
- return ICALL_IGNORE;
- }
-}
-
-/**
- * gigaset_isdn_connD() - signal D channel connect
- * @bcs: B channel descriptor structure.
- *
- * Called by main module to notify the LL that the D channel connection has
- * been established.
- */
-void gigaset_isdn_connD(struct bc_state *bcs)
-{
- gig_dbg(DEBUG_CMD, "sending DCONN");
- gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
-}
-
-/**
- * gigaset_isdn_hupD() - signal D channel hangup
- * @bcs: B channel descriptor structure.
- *
- * Called by main module to notify the LL that the D channel connection has
- * been shut down.
- */
-void gigaset_isdn_hupD(struct bc_state *bcs)
-{
- gig_dbg(DEBUG_CMD, "sending DHUP");
- gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
-}
-
-/**
- * gigaset_isdn_connB() - signal B channel connect
- * @bcs: B channel descriptor structure.
- *
- * Called by main module to notify the LL that the B channel connection has
- * been established.
- */
-void gigaset_isdn_connB(struct bc_state *bcs)
-{
- gig_dbg(DEBUG_CMD, "sending BCONN");
- gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN);
-}
-
-/**
- * gigaset_isdn_hupB() - signal B channel hangup
- * @bcs: B channel descriptor structure.
- *
- * Called by main module to notify the LL that the B channel connection has
- * been shut down.
- */
-void gigaset_isdn_hupB(struct bc_state *bcs)
-{
- gig_dbg(DEBUG_CMD, "sending BHUP");
- gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP);
-}
-
-/**
- * gigaset_isdn_start() - signal device availability
- * @cs: device descriptor structure.
- *
- * Called by main module to notify the LL that the device is available for
- * use.
- */
-void gigaset_isdn_start(struct cardstate *cs)
-{
- gig_dbg(DEBUG_CMD, "sending RUN");
- gigaset_i4l_cmd(cs, ISDN_STAT_RUN);
-}
-
-/**
- * gigaset_isdn_stop() - signal device unavailability
- * @cs: device descriptor structure.
- *
- * Called by main module to notify the LL that the device is no longer
- * available for use.
- */
-void gigaset_isdn_stop(struct cardstate *cs)
-{
- gig_dbg(DEBUG_CMD, "sending STOP");
- gigaset_i4l_cmd(cs, ISDN_STAT_STOP);
-}
-
-/**
- * gigaset_isdn_regdev() - register to LL
- * @cs: device descriptor structure.
- * @isdnid: device name.
- *
- * Return value: 0 on success, error code < 0 on failure
- */
-int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
-{
- isdn_if *iif;
-
- iif = kmalloc(sizeof *iif, GFP_KERNEL);
- if (!iif) {
- pr_err("out of memory\n");
- return -ENOMEM;
- }
-
- if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
- >= sizeof iif->id) {
- pr_err("ID too long: %s\n", isdnid);
- kfree(iif);
- return -EINVAL;
- }
-
- iif->owner = THIS_MODULE;
- iif->channels = cs->channels;
- iif->maxbufsize = MAX_BUF_SIZE;
- iif->features = ISDN_FEATURE_L2_TRANS |
- ISDN_FEATURE_L2_HDLC |
- ISDN_FEATURE_L2_X75I |
- ISDN_FEATURE_L3_TRANS |
- ISDN_FEATURE_P_EURO;
- iif->hl_hdrlen = HW_HDR_LEN; /* Area for storing ack */
- iif->command = command_from_LL;
- iif->writebuf_skb = writebuf_from_LL;
- iif->writecmd = NULL; /* Don't support isdnctrl */
- iif->readstat = NULL; /* Don't support isdnctrl */
- iif->rcvcallb_skb = NULL; /* Will be set by LL */
- iif->statcallb = NULL; /* Will be set by LL */
-
- if (!register_isdn(iif)) {
- pr_err("register_isdn failed\n");
- kfree(iif);
- return -EINVAL;
- }
-
- cs->iif = iif;
- cs->myid = iif->channels; /* Set my device id */
- cs->hw_hdr_len = HW_HDR_LEN;
- return 0;
-}
-
-/**
- * gigaset_isdn_unregdev() - unregister device from LL
- * @cs: device descriptor structure.
- */
-void gigaset_isdn_unregdev(struct cardstate *cs)
-{
- gig_dbg(DEBUG_CMD, "sending UNLOAD");
- gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
- kfree(cs->iif);
- cs->iif = NULL;
-}
-
-/**
- * gigaset_isdn_regdrv() - register driver to LL
- */
-void gigaset_isdn_regdrv(void)
-{
- pr_info("ISDN4Linux interface\n");
- /* nothing to do */
-}
-
-/**
- * gigaset_isdn_unregdrv() - unregister driver from LL
- */
-void gigaset_isdn_unregdrv(void)
-{
- /* nothing to do */
-}
diff --git a/drivers/isdn/hardware/Kconfig b/drivers/isdn/hardware/Kconfig
deleted file mode 100644
index 0d609b5fcf01..000000000000
--- a/drivers/isdn/hardware/Kconfig
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# ISDN hardware drivers
-#
-comment "CAPI hardware drivers"
-
-source "drivers/isdn/hardware/avm/Kconfig"
-
diff --git a/drivers/isdn/hardware/Makefile b/drivers/isdn/hardware/Makefile
index a43760a0a4f5..96f9eb2e46ba 100644
--- a/drivers/isdn/hardware/Makefile
+++ b/drivers/isdn/hardware/Makefile
@@ -3,5 +3,4 @@
# Object files in subdirectories
-obj-$(CONFIG_CAPI_AVM) += avm/
obj-$(CONFIG_MISDN) += mISDN/
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index a7a34a85b970..304f50c08da2 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -79,11 +79,14 @@ config MISDN_NETJET
depends on PCI
depends on TTY
select MISDN_IPAC
- select ISDN_HDLC
- select ISDN_I4L
+ select MISDN_HDLC
help
Enable support for Traverse Technologies NETJet PCI cards.
+config MISDN_HDLC
+ tristate
+ select CRC_CCITT
+ select BITREVERSE
config MISDN_IPAC
tristate
diff --git a/drivers/isdn/hardware/mISDN/Makefile b/drivers/isdn/hardware/mISDN/Makefile
index 422f9fd8ab9a..3f50f8c4753f 100644
--- a/drivers/isdn/hardware/mISDN/Makefile
+++ b/drivers/isdn/hardware/mISDN/Makefile
@@ -15,3 +15,5 @@ obj-$(CONFIG_MISDN_NETJET) += netjet.o
# chip modules
obj-$(CONFIG_MISDN_IPAC) += mISDNipac.o
obj-$(CONFIG_MISDN_ISAR) += mISDNisar.o
+
+obj-$(CONFIG_MISDN_HDLC) += isdnhdlc.o
diff --git a/drivers/isdn/i4l/isdnhdlc.c b/drivers/isdn/hardware/mISDN/isdnhdlc.c
index 382a6b24e6a3..9fea16ed3dd8 100644
--- a/drivers/isdn/i4l/isdnhdlc.c
+++ b/drivers/isdn/hardware/mISDN/isdnhdlc.c
@@ -12,8 +12,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/crc-ccitt.h>
-#include <linux/isdn/hdlc.h>
#include <linux/bitrev.h>
+#include "isdnhdlc.h"
/*-------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/mISDN/isdnhdlc.h b/drivers/isdn/hardware/mISDN/isdnhdlc.h
new file mode 100644
index 000000000000..fe2c1279c139
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/isdnhdlc.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * hdlc.h -- General purpose ISDN HDLC decoder.
+ *
+ * Implementation of a HDLC decoder/encoder in software.
+ * Necessary because some ISDN devices don't have HDLC
+ * controllers.
+ *
+ * Copyright (C)
+ * 2009 Karsten Keil <keil@b1-systems.de>
+ * 2002 Wolfgang Mües <wolfgang@iksw-muees.de>
+ * 2001 Frode Isaksen <fisaksen@bewan.com>
+ * 2001 Kai Germaschewski <kai.germaschewski@gmx.de>
+ */
+
+#ifndef __ISDNHDLC_H__
+#define __ISDNHDLC_H__
+
+struct isdnhdlc_vars {
+ int bit_shift;
+ int hdlc_bits1;
+ int data_bits;
+ int ffbit_shift; /* encoding only */
+ int state;
+ int dstpos;
+
+ u16 crc;
+
+ u8 cbin;
+ u8 shift_reg;
+ u8 ffvalue;
+
+ /* set if transferring data */
+ u32 data_received:1;
+ /* set if D channel (send idle instead of flags) */
+ u32 dchannel:1;
+ /* set if 56K adaptation */
+ u32 do_adapt56:1;
+ /* set if in closing phase (need to send CRC + flag) */
+ u32 do_closing:1;
+ /* set if data is bitreverse */
+ u32 do_bitreverse:1;
+};
+
+/* Feature Flags */
+#define HDLC_56KBIT 0x01
+#define HDLC_DCHANNEL 0x02
+#define HDLC_BITREVERSE 0x04
+
+/*
+ The return value from isdnhdlc_decode is
+ the frame length, 0 if no complete frame was decoded,
+ or a negative error number
+*/
+#define HDLC_FRAMING_ERROR 1
+#define HDLC_CRC_ERROR 2
+#define HDLC_LENGTH_ERROR 3
+
+extern void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features);
+
+extern int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src,
+ int slen, int *count, u8 *dst, int dsize);
+
+extern void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features);
+
+extern int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src,
+ u16 slen, int *count, u8 *dst, int dsize);
+
+#endif /* __ISDNHDLC_H__ */
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 5c9e38ba52ea..4e30affd1a7c 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -16,7 +16,7 @@
#include "ipac.h"
#include "iohelper.h"
#include "netjet.h"
-#include <linux/isdn/hdlc.h>
+#include "isdnhdlc.h"
#define NETJET_REV "2.0"
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
deleted file mode 100644
index 43d98ccf5ff6..000000000000
--- a/drivers/isdn/hisax/Kconfig
+++ /dev/null
@@ -1,423 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-menu "Passive cards"
-
-config ISDN_DRV_HISAX
- tristate "HiSax SiemensChipSet driver support"
- select CRC_CCITT
- ---help---
- This is a driver supporting the Siemens chipset on various
- ISDN-cards (like AVM A1, Elsa ISDN cards, Teles S0-16.0, Teles
- S0-16.3, Teles S0-8, Teles/Creatix PnP, ITK micro ix1 and many
- compatibles).
-
- HiSax is just the name of this driver, not the name of any hardware.
-
- If you have a card with such a chipset, you should say Y here and
- also to the configuration option of the driver for your particular
- card, below.
-
-if ISDN_DRV_HISAX
-
-comment "D-channel protocol features"
-
-config HISAX_EURO
- bool "HiSax Support for EURO/DSS1"
- help
- Say Y or N according to the D-channel protocol which your local
- telephone service company provides.
-
- The call control protocol E-DSS1 is used in most European countries.
- If unsure, say Y.
-
-config DE_AOC
- bool "Support for german chargeinfo"
- depends on HISAX_EURO
- help
- If you want that the HiSax hardware driver sends messages to the
- upper level of the isdn code on each AOCD (Advice Of Charge, During
- the call -- transmission of the fee information during a call) and
- on each AOCE (Advice Of Charge, at the End of the call --
- transmission of fee information at the end of the call), say Y here.
- This works only in Germany.
-
-config HISAX_NO_SENDCOMPLETE
- bool "Disable sending complete"
- depends on HISAX_EURO
- help
- If you have trouble with some ugly exchanges or you live in
- Australia select this option.
-
-config HISAX_NO_LLC
- bool "Disable sending low layer compatibility"
- depends on HISAX_EURO
- help
- If you have trouble with some ugly exchanges try to select this
- option.
-
-config HISAX_NO_KEYPAD
- bool "Disable keypad protocol option"
- depends on HISAX_EURO
- help
- If you like to send special dial strings including * or # without
- using the keypad protocol, select this option.
-
-config HISAX_1TR6
- bool "HiSax Support for german 1TR6"
- help
- Say Y or N according to the D-channel protocol which your local
- telephone service company provides.
-
- 1TR6 is an old call control protocol which was used in Germany
- before E-DSS1 was established. Nowadays, all new lines in Germany
- use E-DSS1.
-
-config HISAX_NI1
- bool "HiSax Support for US NI1"
- help
- Enable this if you like to use ISDN in US on a NI1 basic rate
- interface.
-
-config HISAX_MAX_CARDS
- int "Maximum number of cards supported by HiSax"
- default "8"
- help
- This option allows you to specify the maximum number of cards which
- the HiSax driver will be able to handle.
-
-comment "HiSax supported cards"
-
-config HISAX_16_0
- bool "Teles 16.0/8.0"
- depends on ISA
- help
- This enables HiSax support for the Teles ISDN-cards S0-16.0, S0-8
- and many compatibles.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using the different cards, a different D-channel protocol, or
- non-standard IRQ/port/shmem settings.
-
-config HISAX_16_3
- bool "Teles 16.3 or PNP or PCMCIA"
- help
- This enables HiSax support for the Teles ISDN-cards S0-16.3 the
- Teles/Creatix PnP and the Teles PCMCIA.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using the different cards, a different D-channel protocol, or
- non-standard IRQ/port settings.
-
-config HISAX_TELESPCI
- bool "Teles PCI"
- depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || (XTENSA && !CPU_LITTLE_ENDIAN)))
- help
- This enables HiSax support for the Teles PCI.
- See <file:Documentation/isdn/README.HiSax> on how to configure it.
-
-config HISAX_S0BOX
- bool "Teles S0Box"
- help
- This enables HiSax support for the Teles/Creatix parallel port
- S0BOX. See <file:Documentation/isdn/README.HiSax> on how to
- configure it.
-
-config HISAX_AVM_A1
- bool "AVM A1 (Fritz)"
- depends on ISA
- help
- This enables HiSax support for the AVM A1 (aka "Fritz").
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using the different cards, a different D-channel protocol, or
- non-standard IRQ/port settings.
-
-config HISAX_FRITZPCI
- bool "AVM PnP/PCI (Fritz!PnP/PCI)"
- depends on BROKEN || !PPC64
- help
- This enables HiSax support for the AVM "Fritz!PnP" and "Fritz!PCI".
- See <file:Documentation/isdn/README.HiSax> on how to configure it.
-
-config HISAX_AVM_A1_PCMCIA
- bool "AVM A1 PCMCIA (Fritz)"
- help
- This enables HiSax support for the AVM A1 "Fritz!PCMCIA").
- See <file:Documentation/isdn/README.HiSax> on how to configure it.
-
-config HISAX_ELSA
- bool "Elsa cards"
- help
- This enables HiSax support for the Elsa Mircolink ISA cards, for the
- Elsa Quickstep series cards and Elsa PCMCIA.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using the different cards, a different D-channel protocol, or
- non-standard IRQ/port settings.
-
-config HISAX_IX1MICROR2
- bool "ITK ix1-micro Revision 2"
- depends on ISA
- help
- This enables HiSax support for the ITK ix1-micro Revision 2 card.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using the different cards, a different D-channel protocol, or
- non-standard IRQ/port settings.
-
-config HISAX_DIEHLDIVA
- bool "Eicon.Diehl Diva cards"
- help
- This enables HiSax support for the Eicon.Diehl Diva none PRO
- versions passive ISDN cards.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using the different cards, a different D-channel protocol, or
- non-standard IRQ/port settings.
-
-config HISAX_ASUSCOM
- bool "ASUSCOM ISA cards"
- depends on ISA
- help
- This enables HiSax support for the AsusCom and their OEM versions
- passive ISDN ISA cards.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using the different cards, a different D-channel protocol, or
- non-standard IRQ/port settings.
-
-config HISAX_TELEINT
- bool "TELEINT cards"
- depends on ISA
- help
- This enables HiSax support for the TELEINT SA1 semiactiv ISDN card.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using the different cards, a different D-channel protocol, or
- non-standard IRQ/port settings.
-
-config HISAX_HFCS
- bool "HFC-S based cards"
- depends on ISA
- help
- This enables HiSax support for the HFC-S 2BDS0 based cards, like
- teles 16.3c.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using the different cards, a different D-channel protocol, or
- non-standard IRQ/port settings.
-
-config HISAX_SEDLBAUER
- bool "Sedlbauer cards"
- help
- This enables HiSax support for the Sedlbauer passive ISDN cards.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using the different cards, a different D-channel protocol, or
- non-standard IRQ/port settings.
-
-config HISAX_SPORTSTER
- bool "USR Sportster internal TA"
- depends on ISA
- help
- This enables HiSax support for the USR Sportster internal TA card.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using a different D-channel protocol, or non-standard IRQ/port
- settings.
-
-config HISAX_MIC
- bool "MIC card"
- depends on ISA
- help
- This enables HiSax support for the ITH MIC card.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using a different D-channel protocol, or non-standard IRQ/port
- settings.
-
-config HISAX_NETJET
- bool "NETjet card"
- depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || (XTENSA && !CPU_LITTLE_ENDIAN) || MICROBLAZE))
- depends on VIRT_TO_BUS
- help
- This enables HiSax support for the NetJet from Traverse
- Technologies.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using a different D-channel protocol, or non-standard IRQ/port
- settings.
-
-config HISAX_NETJET_U
- bool "NETspider U card"
- depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || (XTENSA && !CPU_LITTLE_ENDIAN) || MICROBLAZE))
- depends on VIRT_TO_BUS
- help
- This enables HiSax support for the Netspider U interface ISDN card
- from Traverse Technologies.
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using a different D-channel protocol, or non-standard IRQ/port
- settings.
-
-config HISAX_NICCY
- bool "Niccy PnP/PCI card"
- help
- This enables HiSax support for the Dr. Neuhaus Niccy PnP or PCI.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using a different D-channel protocol, or non-standard IRQ/port
- settings.
-
-config HISAX_ISURF
- bool "Siemens I-Surf card"
- depends on ISA
- help
- This enables HiSax support for the Siemens I-Talk/I-Surf card with
- ISAR chip.
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using a different D-channel protocol, or non-standard IRQ/port
- settings.
-
-config HISAX_HSTSAPHIR
- bool "HST Saphir card"
- depends on ISA
- help
- This enables HiSax support for the HST Saphir card.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using a different D-channel protocol, or non-standard IRQ/port
- settings.
-
-config HISAX_BKM_A4T
- bool "Telekom A4T card"
- depends on PCI
- help
- This enables HiSax support for the Telekom A4T card.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using a different D-channel protocol, or non-standard IRQ/port
- settings.
-
-config HISAX_SCT_QUADRO
- bool "Scitel Quadro card"
- depends on PCI
- help
- This enables HiSax support for the Scitel Quadro card.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using a different D-channel protocol, or non-standard IRQ/port
- settings.
-
-config HISAX_GAZEL
- bool "Gazel cards"
- help
- This enables HiSax support for the Gazel cards.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using a different D-channel protocol, or non-standard IRQ/port
- settings.
-
-config HISAX_HFC_PCI
- bool "HFC PCI-Bus cards"
- depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || (XTENSA && !CPU_LITTLE_ENDIAN)))
- help
- This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
-
- For more information see under
- <file:Documentation/isdn/README.hfc-pci>.
-
-config HISAX_W6692
- bool "Winbond W6692 based cards"
- depends on PCI
- help
- This enables HiSax support for Winbond W6692 based PCI ISDN cards.
-
- See <file:Documentation/isdn/README.HiSax> on how to configure it
- using a different D-channel protocol, or non-standard IRQ/port
- settings.
-
-config HISAX_HFC_SX
- bool "HFC-S+, HFC-SP, HFC-PCMCIA cards"
- help
- This enables HiSax support for the HFC-S+, HFC-SP and HFC-PCMCIA
- cards. This code is not finished yet.
-
-config HISAX_ENTERNOW_PCI
- bool "Formula-n enter:now PCI card"
- depends on HISAX_NETJET && PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || (XTENSA && !CPU_LITTLE_ENDIAN)))
- help
- This enables HiSax support for the Formula-n enter:now PCI
- ISDN card.
-
-config HISAX_DEBUG
- bool "HiSax debugging"
- help
- This enables debugging code in the new-style HiSax drivers, i.e.
- the ST5481 USB driver currently.
- If in doubt, say yes.
-
-comment "HiSax PCMCIA card service modules"
-
-config HISAX_SEDLBAUER_CS
- tristate "Sedlbauer PCMCIA cards"
- depends on PCMCIA && HISAX_SEDLBAUER
- help
- This enables the PCMCIA client driver for the Sedlbauer Speed Star
- and Speed Star II cards.
-
-config HISAX_ELSA_CS
- tristate "ELSA PCMCIA MicroLink cards"
- depends on PCMCIA && HISAX_ELSA
- help
- This enables the PCMCIA client driver for the Elsa PCMCIA MicroLink
- card.
-
-config HISAX_AVM_A1_CS
- tristate "AVM A1 PCMCIA cards"
- depends on PCMCIA && ISDN_DRV_HISAX
- help
- This enables the PCMCIA client driver for the AVM A1 / Fritz!Card
- PCMCIA cards.
-
-config HISAX_TELES_CS
- tristate "TELES PCMCIA cards"
- depends on PCMCIA && HISAX_16_3
- help
- This enables the PCMCIA client driver for the Teles PCMCIA cards.
-
-comment "HiSax sub driver modules"
-
-config HISAX_ST5481
- tristate "ST5481 USB ISDN modem"
- depends on USB
- select ISDN_HDLC
- select CRC_CCITT
- select BITREVERSE
- help
- This enables the driver for ST5481 based USB ISDN adapters,
- e.g. the BeWan Gazel 128 USB
-
-config HISAX_HFCUSB
- tristate "HFC USB based ISDN modems"
- depends on USB
- help
- This enables the driver for HFC USB based ISDN modems.
-
-config HISAX_HFC4S8S
- tristate "HFC-4S/8S based ISDN cards"
- help
- This enables the driver for HFC-4S/8S based ISDN cards.
-
-config HISAX_FRITZ_PCIPNP
- tristate "AVM Fritz!Card PCI/PCIv2/PnP support"
- depends on PCI
- help
- This enables the driver for the AVM Fritz!Card PCI,
- Fritz!Card PCI v2 and Fritz!Card PnP.
- (the latter also needs you to select "ISA Plug and Play support"
- from the menu "Plug and Play configuration")
-
-endif
-
-endmenu
-
diff --git a/drivers/isdn/hisax/Makefile b/drivers/isdn/hisax/Makefile
deleted file mode 100644
index 3eca9d23f1c2..000000000000
--- a/drivers/isdn/hisax/Makefile
+++ /dev/null
@@ -1,60 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for the hisax ISDN device driver
-
-# The target object and module list name.
-
-# Define maximum number of cards
-
-ccflags-y := -DHISAX_MAX_CARDS=$(CONFIG_HISAX_MAX_CARDS)
-
-obj-$(CONFIG_ISDN_DRV_HISAX) += hisax.o
-obj-$(CONFIG_HISAX_SEDLBAUER_CS) += sedlbauer_cs.o
-obj-$(CONFIG_HISAX_ELSA_CS) += elsa_cs.o
-obj-$(CONFIG_HISAX_AVM_A1_CS) += avma1_cs.o
-obj-$(CONFIG_HISAX_TELES_CS) += teles_cs.o
-obj-$(CONFIG_HISAX_ST5481) += hisax_st5481.o
-obj-$(CONFIG_HISAX_HFCUSB) += hfc_usb.o
-obj-$(CONFIG_HISAX_HFC4S8S) += hfc4s8s_l1.o
-obj-$(CONFIG_HISAX_FRITZ_PCIPNP) += hisax_isac.o hisax_fcpcipnp.o
-
-# Multipart objects.
-
-hisax_st5481-y := st5481_init.o st5481_usb.o st5481_d.o \
- st5481_b.o
-
-hisax-y := config.o isdnl1.o tei.o isdnl2.o isdnl3.o \
- lmgr.o q931.o callc.o fsm.o
-hisax-$(CONFIG_HISAX_EURO) += l3dss1.o
-hisax-$(CONFIG_HISAX_NI1) += l3ni1.o
-hisax-$(CONFIG_HISAX_1TR6) += l3_1tr6.o
-
-hisax-$(CONFIG_HISAX_16_0) += teles0.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_16_3) += teles3.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_TELESPCI) += telespci.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_S0BOX) += s0box.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_AVM_A1) += avm_a1.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_AVM_A1_PCMCIA) += avm_a1p.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_FRITZPCI) += avm_pci.o isac.o arcofi.o
-hisax-$(CONFIG_HISAX_ELSA) += elsa.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_IX1MICROR2) += ix1_micro.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_DIEHLDIVA) += diva.o isac.o arcofi.o hscx.o ipacx.o
-hisax-$(CONFIG_HISAX_ASUSCOM) += asuscom.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_TELEINT) += teleint.o isac.o arcofi.o hfc_2bs0.o
-hisax-$(CONFIG_HISAX_SEDLBAUER) += sedlbauer.o isac.o arcofi.o hscx.o \
- isar.o
-hisax-$(CONFIG_HISAX_SPORTSTER) += sportster.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_MIC) += mic.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_NETJET) += nj_s.o netjet.o isac.o arcofi.o
-hisax-$(CONFIG_HISAX_NETJET_U) += nj_u.o netjet.o icc.o
-hisax-$(CONFIG_HISAX_HFCS) += hfcscard.o hfc_2bds0.o
-hisax-$(CONFIG_HISAX_HFC_PCI) += hfc_pci.o
-hisax-$(CONFIG_HISAX_HFC_SX) += hfc_sx.o
-hisax-$(CONFIG_HISAX_NICCY) += niccy.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_ISURF) += isurf.o isac.o arcofi.o isar.o
-hisax-$(CONFIG_HISAX_HSTSAPHIR) += saphir.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_BKM_A4T) += bkm_a4t.o isac.o arcofi.o jade.o
-hisax-$(CONFIG_HISAX_SCT_QUADRO) += bkm_a8.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_GAZEL) += gazel.o isac.o arcofi.o hscx.o
-hisax-$(CONFIG_HISAX_W6692) += w6692.o
-hisax-$(CONFIG_HISAX_ENTERNOW_PCI) += enternow_pci.o amd7930_fn.o
-
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
deleted file mode 100644
index 6c336366128c..000000000000
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ /dev/null
@@ -1,794 +0,0 @@
-/* gerdes_amd7930.c,v 0.99 2001/10/02
- *
- * gerdes_amd7930.c Amd 79C30A and 79C32A specific routines
- * (based on HiSax driver by Karsten Keil)
- *
- * Author Christoph Ersfeld <info@formula-n.de>
- * Formula-n Europe AG (www.formula-n.com)
- * previously Gerdes AG
- *
- *
- * This file is (c) under GNU PUBLIC LICENSE
- *
- *
- * Notes:
- * Version 0.99 is the first release of this driver and there are
- * certainly a few bugs.
- *
- * Please don't report any malfunction to me without sending
- * (compressed) debug-logs.
- * It would be nearly impossible to retrace it.
- *
- * Log D-channel-processing as follows:
- *
- * 1. Load hisax with card-specific parameters, this example ist for
- * Formula-n enter:now ISDN PCI and compatible
- * (f.e. Gerdes Power ISDN PCI)
- *
- * modprobe hisax type=41 protocol=2 id=gerdes
- *
- * if you chose an other value for id, you need to modify the
- * code below, too.
- *
- * 2. set debug-level
- *
- * hisaxctrl gerdes 1 0x3ff
- * hisaxctrl gerdes 11 0x4f
- * cat /dev/isdnctrl >> ~/log &
- *
- * Please take also a look into /var/log/messages if there is
- * anything importand concerning HISAX.
- *
- *
- * Credits:
- * Programming the driver for Formula-n enter:now ISDN PCI and
- * necessary this driver for the used Amd 7930 D-channel-controller
- * was spnsored by Formula-n Europe AG.
- * Thanks to Karsten Keil and Petr Novak, who gave me support in
- * Hisax-specific questions.
- * I want so say special thanks to Carl-Friedrich Braun, who had to
- * answer a lot of questions about generally ISDN and about handling
- * of the Amd-Chip.
- *
- */
-
-
-#include "hisax.h"
-#include "isdnl1.h"
-#include "isac.h"
-#include "amd7930_fn.h"
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/gfp.h>
-
-static void Amd7930_new_ph(struct IsdnCardState *cs);
-
-static WORD initAMD[] = {
- 0x0100,
-
- 0x00A5, 3, 0x01, 0x40, 0x58, // LPR, LMR1, LMR2
- 0x0086, 1, 0x0B, // DMR1 (D-Buffer TH-Interrupts on)
- 0x0087, 1, 0xFF, // DMR2
- 0x0092, 1, 0x03, // EFCR (extended mode d-channel-fifo on)
- 0x0090, 4, 0xFE, 0xFF, 0x02, 0x0F, // FRAR4, SRAR4, DMR3, DMR4 (address recognition )
- 0x0084, 2, 0x80, 0x00, // DRLR
- 0x00C0, 1, 0x47, // PPCR1
- 0x00C8, 1, 0x01, // PPCR2
-
- 0x0102,
- 0x0107,
- 0x01A1, 1,
- 0x0121, 1,
- 0x0189, 2,
-
- 0x0045, 4, 0x61, 0x72, 0x00, 0x00, // MCR1, MCR2, MCR3, MCR4
- 0x0063, 2, 0x08, 0x08, // GX
- 0x0064, 2, 0x08, 0x08, // GR
- 0x0065, 2, 0x99, 0x00, // GER
- 0x0066, 2, 0x7C, 0x8B, // STG
- 0x0067, 2, 0x00, 0x00, // FTGR1, FTGR2
- 0x0068, 2, 0x20, 0x20, // ATGR1, ATGR2
- 0x0069, 1, 0x4F, // MMR1
- 0x006A, 1, 0x00, // MMR2
- 0x006C, 1, 0x40, // MMR3
- 0x0021, 1, 0x02, // INIT
- 0x00A3, 1, 0x40, // LMR1
-
- 0xFFFF
-};
-
-
-static void /* macro wWordAMD */
-WriteWordAmd7930(struct IsdnCardState *cs, BYTE reg, WORD val)
-{
- wByteAMD(cs, 0x00, reg);
- wByteAMD(cs, 0x01, LOBYTE(val));
- wByteAMD(cs, 0x01, HIBYTE(val));
-}
-
-static WORD /* macro rWordAMD */
-ReadWordAmd7930(struct IsdnCardState *cs, BYTE reg)
-{
- WORD res;
- /* direct access register */
- if (reg < 8) {
- res = rByteAMD(cs, reg);
- res += 256 * rByteAMD(cs, reg);
- }
- /* indirect access register */
- else {
- wByteAMD(cs, 0x00, reg);
- res = rByteAMD(cs, 0x01);
- res += 256 * rByteAMD(cs, 0x01);
- }
- return (res);
-}
-
-
-static void
-Amd7930_ph_command(struct IsdnCardState *cs, u_char command, char *s)
-{
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "AMD7930: %s: ph_command 0x%02X", s, command);
-
- cs->dc.amd7930.lmr1 = command;
- wByteAMD(cs, 0xA3, command);
-}
-
-
-
-static BYTE i430States[] = {
-// to reset F3 F4 F5 F6 F7 F8 AR from
- 0x01, 0x02, 0x00, 0x00, 0x00, 0x07, 0x05, 0x00, // init
- 0x01, 0x02, 0x00, 0x00, 0x00, 0x07, 0x05, 0x00, // reset
- 0x01, 0x02, 0x00, 0x00, 0x00, 0x09, 0x05, 0x04, // F3
- 0x01, 0x02, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, // F4
- 0x01, 0x02, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, // F5
- 0x01, 0x03, 0x00, 0x00, 0x00, 0x06, 0x05, 0x00, // F6
- 0x11, 0x13, 0x00, 0x00, 0x1B, 0x00, 0x15, 0x00, // F7
- 0x01, 0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, // F8
- 0x01, 0x03, 0x00, 0x00, 0x00, 0x09, 0x00, 0x0A}; // AR
-
-
-/* Row init - reset F3 F4 F5 F6 F7 F8 AR */
-static BYTE stateHelper[] = { 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 };
-
-
-
-
-static void
-Amd7930_get_state(struct IsdnCardState *cs) {
- BYTE lsr = rByteAMD(cs, 0xA1);
- cs->dc.amd7930.ph_state = (lsr & 0x7) + 2;
- Amd7930_new_ph(cs);
-}
-
-
-
-static void
-Amd7930_new_ph(struct IsdnCardState *cs)
-{
- u_char index = stateHelper[cs->dc.amd7930.old_state] * 8 + stateHelper[cs->dc.amd7930.ph_state] - 1;
- u_char message = i430States[index];
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "AMD7930: new_ph %d, old_ph %d, message %d, index %d",
- cs->dc.amd7930.ph_state, cs->dc.amd7930.old_state, message & 0x0f, index);
-
- cs->dc.amd7930.old_state = cs->dc.amd7930.ph_state;
-
- /* abort transmit if nessesary */
- if ((message & 0xf0) && (cs->tx_skb)) {
- wByteAMD(cs, 0x21, 0xC2);
- wByteAMD(cs, 0x21, 0x02);
- }
-
- switch (message & 0x0f) {
-
- case (1):
- l1_msg(cs, HW_RESET | INDICATION, NULL);
- Amd7930_get_state(cs);
- break;
- case (2): /* init, Card starts in F3 */
- l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL);
- break;
- case (3):
- l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
- break;
- case (4):
- l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
- Amd7930_ph_command(cs, 0x50, "HW_ENABLE REQUEST");
- break;
- case (5):
- l1_msg(cs, HW_RSYNC | INDICATION, NULL);
- break;
- case (6):
- l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
- break;
- case (7): /* init, Card starts in F7 */
- l1_msg(cs, HW_RSYNC | INDICATION, NULL);
- l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
- break;
- case (8):
- l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
- /* fall through */
- case (9):
- Amd7930_ph_command(cs, 0x40, "HW_ENABLE REQ cleared if set");
- l1_msg(cs, HW_RSYNC | INDICATION, NULL);
- l1_msg(cs, HW_INFO2 | INDICATION, NULL);
- l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
- break;
- case (10):
- Amd7930_ph_command(cs, 0x40, "T3 expired, HW_ENABLE REQ cleared");
- cs->dc.amd7930.old_state = 3;
- break;
- case (11):
- l1_msg(cs, HW_INFO2 | INDICATION, NULL);
- break;
- default:
- break;
- }
-}
-
-
-
-static void
-Amd7930_bh(struct work_struct *work)
-{
- struct IsdnCardState *cs =
- container_of(work, struct IsdnCardState, tqueue);
- struct PStack *stptr;
-
- if (test_and_clear_bit(D_CLEARBUSY, &cs->event)) {
- if (cs->debug)
- debugl1(cs, "Amd7930: bh, D-Channel Busy cleared");
- stptr = cs->stlist;
- while (stptr != NULL) {
- stptr->l1.l1l2(stptr, PH_PAUSE | CONFIRM, NULL);
- stptr = stptr->next;
- }
- }
- if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "AMD7930: bh, D_L1STATECHANGE");
- Amd7930_new_ph(cs);
- }
-
- if (test_and_clear_bit(D_RCVBUFREADY, &cs->event)) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "AMD7930: bh, D_RCVBUFREADY");
- DChannel_proc_rcv(cs);
- }
-
- if (test_and_clear_bit(D_XMTBUFREADY, &cs->event)) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "AMD7930: bh, D_XMTBUFREADY");
- DChannel_proc_xmt(cs);
- }
-}
-
-static void
-Amd7930_empty_Dfifo(struct IsdnCardState *cs, int flag)
-{
-
- BYTE stat, der;
- BYTE *ptr;
- struct sk_buff *skb;
-
-
- if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
- debugl1(cs, "Amd7930: empty_Dfifo");
-
-
- ptr = cs->rcvbuf + cs->rcvidx;
-
- /* AMD interrupts off */
- AmdIrqOff(cs);
-
- /* read D-Channel-Fifo*/
- stat = rByteAMD(cs, 0x07); // DSR2
-
- /* while Data in Fifo ... */
- while ((stat & 2) && ((ptr-cs->rcvbuf) < MAX_DFRAME_LEN_L1)) {
- *ptr = rByteAMD(cs, 0x04); // DCRB
- ptr++;
- stat = rByteAMD(cs, 0x07); // DSR2
- cs->rcvidx = ptr - cs->rcvbuf;
-
- /* Paket ready? */
- if (stat & 1) {
-
- der = rWordAMD(cs, 0x03);
-
- /* no errors, packet ok */
- if (!der && !flag) {
- rWordAMD(cs, 0x89); // clear DRCR
-
- if ((cs->rcvidx) > 0) {
- if (!(skb = alloc_skb(cs->rcvidx, GFP_ATOMIC)))
- printk(KERN_WARNING "HiSax: Amd7930: empty_Dfifo, D receive out of memory!\n");
- else {
- /* Debugging */
- if (cs->debug & L1_DEB_ISAC_FIFO) {
- char *t = cs->dlog;
-
- t += sprintf(t, "Amd7930: empty_Dfifo cnt: %d |", cs->rcvidx);
- QuickHex(t, cs->rcvbuf, cs->rcvidx);
- debugl1(cs, "%s", cs->dlog);
- }
- /* moves received data in sk-buffer */
- skb_put_data(skb, cs->rcvbuf,
- cs->rcvidx);
- skb_queue_tail(&cs->rq, skb);
- }
- }
-
- }
- /* throw damaged packets away, reset receive-buffer, indicate RX */
- ptr = cs->rcvbuf;
- cs->rcvidx = 0;
- schedule_event(cs, D_RCVBUFREADY);
- }
- }
- /* Packet to long, overflow */
- if (cs->rcvidx >= MAX_DFRAME_LEN_L1) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "AMD7930: empty_Dfifo L2-Framelength overrun");
- cs->rcvidx = 0;
- return;
- }
- /* AMD interrupts on */
- AmdIrqOn(cs);
-}
-
-
-static void
-Amd7930_fill_Dfifo(struct IsdnCardState *cs)
-{
-
- WORD dtcrr, dtcrw, len, count;
- BYTE txstat, dmr3;
- BYTE *ptr, *deb_ptr;
-
- if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
- debugl1(cs, "Amd7930: fill_Dfifo");
-
- if ((!cs->tx_skb) || (cs->tx_skb->len <= 0))
- return;
-
- dtcrw = 0;
- if (!cs->dc.amd7930.tx_xmtlen)
- /* new Frame */
- len = dtcrw = cs->tx_skb->len;
- /* continue frame */
- else len = cs->dc.amd7930.tx_xmtlen;
-
-
- /* AMD interrupts off */
- AmdIrqOff(cs);
-
- deb_ptr = ptr = cs->tx_skb->data;
-
- /* while free place in tx-fifo available and data in sk-buffer */
- txstat = 0x10;
- while ((txstat & 0x10) && (cs->tx_cnt < len)) {
- wByteAMD(cs, 0x04, *ptr);
- ptr++;
- cs->tx_cnt++;
- txstat = rByteAMD(cs, 0x07);
- }
- count = ptr - cs->tx_skb->data;
- skb_pull(cs->tx_skb, count);
-
-
- dtcrr = rWordAMD(cs, 0x85); // DTCR
- dmr3 = rByteAMD(cs, 0x8E);
-
- if (cs->debug & L1_DEB_ISAC) {
- debugl1(cs, "Amd7930: fill_Dfifo, DMR3: 0x%02X, DTCR read: 0x%04X write: 0x%02X 0x%02X", dmr3, dtcrr, LOBYTE(dtcrw), HIBYTE(dtcrw));
- }
-
- /* writeing of dtcrw starts transmit */
- if (!cs->dc.amd7930.tx_xmtlen) {
- wWordAMD(cs, 0x85, dtcrw);
- cs->dc.amd7930.tx_xmtlen = dtcrw;
- }
-
- if (test_and_set_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
- debugl1(cs, "Amd7930: fill_Dfifo dbusytimer running");
- del_timer(&cs->dbusytimer);
- }
- cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000);
- add_timer(&cs->dbusytimer);
-
- if (cs->debug & L1_DEB_ISAC_FIFO) {
- char *t = cs->dlog;
-
- t += sprintf(t, "Amd7930: fill_Dfifo cnt: %d |", count);
- QuickHex(t, deb_ptr, count);
- debugl1(cs, "%s", cs->dlog);
- }
- /* AMD interrupts on */
- AmdIrqOn(cs);
-}
-
-
-void Amd7930_interrupt(struct IsdnCardState *cs, BYTE irflags)
-{
- BYTE dsr1, dsr2, lsr;
- WORD der;
-
- while (irflags)
- {
-
- dsr1 = rByteAMD(cs, 0x02);
- der = rWordAMD(cs, 0x03);
- dsr2 = rByteAMD(cs, 0x07);
- lsr = rByteAMD(cs, 0xA1);
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: interrupt: flags: 0x%02X, DSR1: 0x%02X, DSR2: 0x%02X, LSR: 0x%02X, DER=0x%04X", irflags, dsr1, dsr2, lsr, der);
-
- /* D error -> read DER and DSR2 bit 2 */
- if (der || (dsr2 & 4)) {
-
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "Amd7930: interrupt: D error DER=0x%04X", der);
-
- /* RX, TX abort if collision detected */
- if (der & 2) {
- wByteAMD(cs, 0x21, 0xC2);
- wByteAMD(cs, 0x21, 0x02);
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- /* restart frame */
- if (cs->tx_skb) {
- skb_push(cs->tx_skb, cs->tx_cnt);
- cs->tx_cnt = 0;
- cs->dc.amd7930.tx_xmtlen = 0;
- Amd7930_fill_Dfifo(cs);
- } else {
- printk(KERN_WARNING "HiSax: Amd7930 D-Collision, no skb\n");
- debugl1(cs, "Amd7930: interrupt: D-Collision, no skb");
- }
- }
- /* remove damaged data from fifo */
- Amd7930_empty_Dfifo(cs, 1);
-
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- /* restart TX-Frame */
- if (cs->tx_skb) {
- skb_push(cs->tx_skb, cs->tx_cnt);
- cs->tx_cnt = 0;
- cs->dc.amd7930.tx_xmtlen = 0;
- Amd7930_fill_Dfifo(cs);
- }
- }
-
- /* D TX FIFO empty -> fill */
- if (irflags & 1) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: interrupt: clear Timer and fill D-TX-FIFO if data");
-
- /* AMD interrupts off */
- AmdIrqOff(cs);
-
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- if (cs->tx_skb) {
- if (cs->tx_skb->len)
- Amd7930_fill_Dfifo(cs);
- }
- /* AMD interrupts on */
- AmdIrqOn(cs);
- }
-
-
- /* D RX FIFO full or tiny packet in Fifo -> empty */
- if ((irflags & 2) || (dsr1 & 2)) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: interrupt: empty D-FIFO");
- Amd7930_empty_Dfifo(cs, 0);
- }
-
-
- /* D-Frame transmit complete */
- if (dsr1 & 64) {
- if (cs->debug & L1_DEB_ISAC) {
- debugl1(cs, "Amd7930: interrupt: transmit packet ready");
- }
- /* AMD interrupts off */
- AmdIrqOff(cs);
-
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
-
- if (cs->tx_skb) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: interrupt: TX-Packet ready, freeing skb");
- dev_kfree_skb_irq(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->dc.amd7930.tx_xmtlen = 0;
- cs->tx_skb = NULL;
- }
- if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: interrupt: TX-Packet ready, next packet dequeued");
- cs->tx_cnt = 0;
- cs->dc.amd7930.tx_xmtlen = 0;
- Amd7930_fill_Dfifo(cs);
- }
- else
- schedule_event(cs, D_XMTBUFREADY);
- /* AMD interrupts on */
- AmdIrqOn(cs);
- }
-
- /* LIU status interrupt -> read LSR, check statechanges */
- if (lsr & 0x38) {
- /* AMD interrupts off */
- AmdIrqOff(cs);
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd: interrupt: LSR=0x%02X, LIU is in state %d", lsr, ((lsr & 0x7) + 2));
-
- cs->dc.amd7930.ph_state = (lsr & 0x7) + 2;
-
- schedule_event(cs, D_L1STATECHANGE);
- /* AMD interrupts on */
- AmdIrqOn(cs);
- }
-
- /* reads Interrupt-Register again. If there is a new interrupt-flag: restart handler */
- irflags = rByteAMD(cs, 0x00);
- }
-
-}
-
-static void
-Amd7930_l1hw(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
- struct sk_buff *skb = arg;
- u_long flags;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: l1hw called, pr: 0x%04X", pr);
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- skb_queue_tail(&cs->sq, skb);
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "Amd7930: l1hw: PH_DATA Queued", 0);
-#endif
- } else {
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
- cs->dc.amd7930.tx_xmtlen = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "Amd7930: l1hw: PH_DATA", 0);
-#endif
- Amd7930_fill_Dfifo(cs);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "Amd7930: l1hw: l2l1 tx_skb exist this shouldn't happen");
- skb_queue_tail(&cs->sq, skb);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- }
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
- cs->dc.amd7930.tx_xmtlen = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "Amd7930: l1hw: PH_DATA_PULLED", 0);
-#endif
- Amd7930_fill_Dfifo(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- debugl1(cs, "Amd7930: l1hw: -> PH_REQUEST_PULL, skb: %s", (cs->tx_skb) ? "yes" : "no");
-#endif
- if (!cs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (HW_RESET | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->dc.amd7930.ph_state == 8) {
- /* b-channels off, PH-AR cleared
- * change to F3 */
- Amd7930_ph_command(cs, 0x20, "HW_RESET REQUEST"); //LMR1 bit 5
- spin_unlock_irqrestore(&cs->lock, flags);
- } else {
- Amd7930_ph_command(cs, 0x40, "HW_RESET REQUEST");
- cs->dc.amd7930.ph_state = 2;
- spin_unlock_irqrestore(&cs->lock, flags);
- Amd7930_new_ph(cs);
- }
- break;
- case (HW_ENABLE | REQUEST):
- cs->dc.amd7930.ph_state = 9;
- Amd7930_new_ph(cs);
- break;
- case (HW_INFO3 | REQUEST):
- // automatic
- break;
- case (HW_TESTLOOP | REQUEST):
- /* not implemented yet */
- break;
- case (HW_DEACTIVATE | RESPONSE):
- skb_queue_purge(&cs->rq);
- skb_queue_purge(&cs->sq);
- if (cs->tx_skb) {
- dev_kfree_skb(cs->tx_skb);
- cs->tx_skb = NULL;
- }
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- break;
- default:
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "Amd7930: l1hw: unknown %04x", pr);
- break;
- }
-}
-
-static void
-setstack_Amd7930(struct PStack *st, struct IsdnCardState *cs)
-{
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: setstack called");
-
- st->l1.l1hw = Amd7930_l1hw;
-}
-
-
-static void
-DC_Close_Amd7930(struct IsdnCardState *cs) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: DC_Close called");
-}
-
-
-static void
-dbusy_timer_handler(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, dbusytimer);
- u_long flags;
- struct PStack *stptr;
- WORD dtcr, der;
- BYTE dsr1, dsr2;
-
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: dbusy_timer expired!");
-
- if (test_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
- spin_lock_irqsave(&cs->lock, flags);
- /* D Transmit Byte Count Register:
- * Counts down packet's number of Bytes, 0 if packet ready */
- dtcr = rWordAMD(cs, 0x85);
- dsr1 = rByteAMD(cs, 0x02);
- dsr2 = rByteAMD(cs, 0x07);
- der = rWordAMD(cs, 0x03);
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: dbusy_timer_handler: DSR1=0x%02X, DSR2=0x%02X, DER=0x%04X, cs->tx_skb->len=%u, tx_stat=%u, dtcr=%u, cs->tx_cnt=%u", dsr1, dsr2, der, cs->tx_skb->len, cs->dc.amd7930.tx_xmtlen, dtcr, cs->tx_cnt);
-
- if ((cs->dc.amd7930.tx_xmtlen - dtcr) < cs->tx_cnt) { /* D-Channel Busy */
- test_and_set_bit(FLG_L1_DBUSY, &cs->HW_Flags);
- stptr = cs->stlist;
- spin_unlock_irqrestore(&cs->lock, flags);
- while (stptr != NULL) {
- stptr->l1.l1l2(stptr, PH_PAUSE | INDICATION, NULL);
- stptr = stptr->next;
- }
-
- } else {
- /* discard frame; reset transceiver */
- test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags);
- if (cs->tx_skb) {
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->tx_skb = NULL;
- cs->dc.amd7930.tx_xmtlen = 0;
- } else {
- printk(KERN_WARNING "HiSax: Amd7930: D-Channel Busy no skb\n");
- debugl1(cs, "Amd7930: D-Channel Busy no skb");
-
- }
- /* Transmitter reset, abort transmit */
- wByteAMD(cs, 0x21, 0x82);
- wByteAMD(cs, 0x21, 0x02);
- spin_unlock_irqrestore(&cs->lock, flags);
- cs->irq_func(cs->irq, cs);
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: dbusy_timer_handler: Transmitter reset");
- }
- }
-}
-
-
-
-void Amd7930_init(struct IsdnCardState *cs)
-{
- WORD *ptr;
- BYTE cmd, cnt;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Amd7930: initamd called");
-
- cs->dc.amd7930.tx_xmtlen = 0;
- cs->dc.amd7930.old_state = 0;
- cs->dc.amd7930.lmr1 = 0x40;
- cs->dc.amd7930.ph_command = Amd7930_ph_command;
- cs->setstack_d = setstack_Amd7930;
- cs->DC_Close = DC_Close_Amd7930;
-
- /* AMD Initialisation */
- for (ptr = initAMD; *ptr != 0xFFFF; ) {
- cmd = LOBYTE(*ptr);
-
- /* read */
- if (*ptr++ >= 0x100) {
- if (cmd < 8)
- /* reset register */
- rByteAMD(cs, cmd);
- else {
- wByteAMD(cs, 0x00, cmd);
- for (cnt = *ptr++; cnt > 0; cnt--)
- rByteAMD(cs, 0x01);
- }
- }
- /* write */
- else if (cmd < 8)
- wByteAMD(cs, cmd, LOBYTE(*ptr++));
-
- else {
- wByteAMD(cs, 0x00, cmd);
- for (cnt = *ptr++; cnt > 0; cnt--)
- wByteAMD(cs, 0x01, LOBYTE(*ptr++));
- }
- }
-}
-
-void setup_Amd7930(struct IsdnCardState *cs)
-{
- INIT_WORK(&cs->tqueue, Amd7930_bh);
- timer_setup(&cs->dbusytimer, dbusy_timer_handler, 0);
-}
diff --git a/drivers/isdn/hisax/amd7930_fn.h b/drivers/isdn/hisax/amd7930_fn.h
deleted file mode 100644
index 1f4d80c5e5a6..000000000000
--- a/drivers/isdn/hisax/amd7930_fn.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* drivers/isdn/hisax/amd7930_fn.h
- *
- * gerdes_amd7930.h Header-file included by
- * gerdes_amd7930.c
- *
- * Author Christoph Ersfeld <info@formula-n.de>
- * Formula-n Europe AG (www.formula-n.com)
- * previously Gerdes AG
- *
- *
- * This file is (c) under GNU PUBLIC LICENSE
- */
-
-
-
-
-#define BYTE unsigned char
-#define WORD unsigned int
-#define rByteAMD(cs, reg) cs->readisac(cs, reg)
-#define wByteAMD(cs, reg, val) cs->writeisac(cs, reg, val)
-#define rWordAMD(cs, reg) ReadWordAmd7930(cs, reg)
-#define wWordAMD(cs, reg, val) WriteWordAmd7930(cs, reg, val)
-#define HIBYTE(w) ((unsigned char)((w & 0xff00) / 256))
-#define LOBYTE(w) ((unsigned char)(w & 0x00ff))
-
-#define AmdIrqOff(cs) cs->dc.amd7930.setIrqMask(cs, 0)
-#define AmdIrqOn(cs) cs->dc.amd7930.setIrqMask(cs, 1)
-
-#define AMD_CR 0x00
-#define AMD_DR 0x01
-
-
-#define DBUSY_TIMER_VALUE 80
-
-extern void Amd7930_interrupt(struct IsdnCardState *, unsigned char);
-extern void Amd7930_init(struct IsdnCardState *);
-extern void setup_Amd7930(struct IsdnCardState *);
diff --git a/drivers/isdn/hisax/arcofi.c b/drivers/isdn/hisax/arcofi.c
deleted file mode 100644
index 2f784f96d439..000000000000
--- a/drivers/isdn/hisax/arcofi.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/* $Id: arcofi.c,v 1.14.2.3 2004/01/13 14:31:24 keil Exp $
- *
- * Ansteuerung ARCOFI 2165
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/sched.h>
-#include "hisax.h"
-#include "isdnl1.h"
-#include "isac.h"
-#include "arcofi.h"
-
-#define ARCOFI_TIMER_VALUE 20
-
-static void
-add_arcofi_timer(struct IsdnCardState *cs) {
- if (test_and_set_bit(FLG_ARCOFI_TIMER, &cs->HW_Flags)) {
- del_timer(&cs->dc.isac.arcofitimer);
- }
- cs->dc.isac.arcofitimer.expires = jiffies + ((ARCOFI_TIMER_VALUE * HZ) / 1000);
- add_timer(&cs->dc.isac.arcofitimer);
-}
-
-static void
-send_arcofi(struct IsdnCardState *cs) {
- add_arcofi_timer(cs);
- cs->dc.isac.mon_txp = 0;
- cs->dc.isac.mon_txc = cs->dc.isac.arcofi_list->len;
- memcpy(cs->dc.isac.mon_tx, cs->dc.isac.arcofi_list->msg, cs->dc.isac.mon_txc);
- switch (cs->dc.isac.arcofi_bc) {
- case 0: break;
- case 1: cs->dc.isac.mon_tx[1] |= 0x40;
- break;
- default: break;
- }
- cs->dc.isac.mocr &= 0x0f;
- cs->dc.isac.mocr |= 0xa0;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- (void) cs->readisac(cs, ISAC_MOSR);
- cs->writeisac(cs, ISAC_MOX1, cs->dc.isac.mon_tx[cs->dc.isac.mon_txp++]);
- cs->dc.isac.mocr |= 0x10;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
-}
-
-int
-arcofi_fsm(struct IsdnCardState *cs, int event, void *data) {
- if (cs->debug & L1_DEB_MONITOR) {
- debugl1(cs, "arcofi state %d event %d", cs->dc.isac.arcofi_state, event);
- }
- if (event == ARCOFI_TIMEOUT) {
- cs->dc.isac.arcofi_state = ARCOFI_NOP;
- test_and_set_bit(FLG_ARCOFI_ERROR, &cs->HW_Flags);
- wake_up(&cs->dc.isac.arcofi_wait);
- return (1);
- }
- switch (cs->dc.isac.arcofi_state) {
- case ARCOFI_NOP:
- if (event == ARCOFI_START) {
- cs->dc.isac.arcofi_list = data;
- cs->dc.isac.arcofi_state = ARCOFI_TRANSMIT;
- send_arcofi(cs);
- }
- break;
- case ARCOFI_TRANSMIT:
- if (event == ARCOFI_TX_END) {
- if (cs->dc.isac.arcofi_list->receive) {
- add_arcofi_timer(cs);
- cs->dc.isac.arcofi_state = ARCOFI_RECEIVE;
- } else {
- if (cs->dc.isac.arcofi_list->next) {
- cs->dc.isac.arcofi_list =
- cs->dc.isac.arcofi_list->next;
- send_arcofi(cs);
- } else {
- if (test_and_clear_bit(FLG_ARCOFI_TIMER, &cs->HW_Flags)) {
- del_timer(&cs->dc.isac.arcofitimer);
- }
- cs->dc.isac.arcofi_state = ARCOFI_NOP;
- wake_up(&cs->dc.isac.arcofi_wait);
- }
- }
- }
- break;
- case ARCOFI_RECEIVE:
- if (event == ARCOFI_RX_END) {
- if (cs->dc.isac.arcofi_list->next) {
- cs->dc.isac.arcofi_list =
- cs->dc.isac.arcofi_list->next;
- cs->dc.isac.arcofi_state = ARCOFI_TRANSMIT;
- send_arcofi(cs);
- } else {
- if (test_and_clear_bit(FLG_ARCOFI_TIMER, &cs->HW_Flags)) {
- del_timer(&cs->dc.isac.arcofitimer);
- }
- cs->dc.isac.arcofi_state = ARCOFI_NOP;
- wake_up(&cs->dc.isac.arcofi_wait);
- }
- }
- break;
- default:
- debugl1(cs, "Arcofi unknown state %x", cs->dc.isac.arcofi_state);
- return (2);
- }
- return (0);
-}
-
-static void
-arcofi_timer(struct timer_list *t) {
- struct IsdnCardState *cs = from_timer(cs, t, dc.isac.arcofitimer);
- arcofi_fsm(cs, ARCOFI_TIMEOUT, NULL);
-}
-
-void
-clear_arcofi(struct IsdnCardState *cs) {
- if (test_and_clear_bit(FLG_ARCOFI_TIMER, &cs->HW_Flags)) {
- del_timer(&cs->dc.isac.arcofitimer);
- }
-}
-
-void
-init_arcofi(struct IsdnCardState *cs) {
- timer_setup(&cs->dc.isac.arcofitimer, arcofi_timer, 0);
- init_waitqueue_head(&cs->dc.isac.arcofi_wait);
- test_and_set_bit(HW_ARCOFI, &cs->HW_Flags);
-}
diff --git a/drivers/isdn/hisax/arcofi.h b/drivers/isdn/hisax/arcofi.h
deleted file mode 100644
index b9c77529fabf..000000000000
--- a/drivers/isdn/hisax/arcofi.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* $Id: arcofi.h,v 1.6.6.2 2001/09/23 22:24:46 kai Exp $
- *
- * Ansteuerung ARCOFI 2165
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#define ARCOFI_USE 1
-
-/* states */
-#define ARCOFI_NOP 0
-#define ARCOFI_TRANSMIT 1
-#define ARCOFI_RECEIVE 2
-/* events */
-#define ARCOFI_START 1
-#define ARCOFI_TX_END 2
-#define ARCOFI_RX_END 3
-#define ARCOFI_TIMEOUT 4
-
-extern int arcofi_fsm(struct IsdnCardState *cs, int event, void *data);
-extern void init_arcofi(struct IsdnCardState *cs);
-extern void clear_arcofi(struct IsdnCardState *cs);
diff --git a/drivers/isdn/hisax/asuscom.c b/drivers/isdn/hisax/asuscom.c
deleted file mode 100644
index 74c871495e81..000000000000
--- a/drivers/isdn/hisax/asuscom.c
+++ /dev/null
@@ -1,423 +0,0 @@
-/* $Id: asuscom.c,v 1.14.2.4 2004/01/13 23:48:39 keil Exp $
- *
- * low level stuff for ASUSCOM NETWORK INC. ISDNLink cards
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to ASUSCOM NETWORK INC. Taiwan and Dynalink NL for information
- *
- */
-
-#include <linux/init.h>
-#include <linux/isapnp.h>
-#include "hisax.h"
-#include "isac.h"
-#include "ipac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-
-static const char *Asuscom_revision = "$Revision: 1.14.2.4 $";
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-#define ASUS_ISAC 0
-#define ASUS_HSCX 1
-#define ASUS_ADR 2
-#define ASUS_CTRL_U7 3
-#define ASUS_CTRL_POTS 5
-
-#define ASUS_IPAC_ALE 0
-#define ASUS_IPAC_DATA 1
-
-#define ASUS_ISACHSCX 1
-#define ASUS_IPAC 2
-
-/* CARD_ADR (Write) */
-#define ASUS_RESET 0x80 /* Bit 7 Reset-Leitung */
-
-static inline u_char
-readreg(unsigned int ale, unsigned int adr, u_char off)
-{
- register u_char ret;
-
- byteout(ale, off);
- ret = bytein(adr);
- return (ret);
-}
-
-static inline void
-readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- insb(adr, data, size);
-}
-
-
-static inline void
-writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
-{
- byteout(ale, off);
- byteout(adr, data);
-}
-
-static inline void
-writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- outsb(adr, data, size);
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.asus.adr, cs->hw.asus.isac, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.asus.adr, cs->hw.asus.isac, 0, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.asus.adr, cs->hw.asus.isac, 0, data, size);
-}
-
-static u_char
-ReadISAC_IPAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.asus.adr, cs->hw.asus.isac, offset | 0x80));
-}
-
-static void
-WriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, offset | 0x80, value);
-}
-
-static void
-ReadISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.asus.adr, cs->hw.asus.isac, 0x80, data, size);
-}
-
-static void
-WriteISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.asus.adr, cs->hw.asus.isac, 0x80, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readreg(cs->hw.asus.adr,
- cs->hw.asus.hscx, offset + (hscx ? 0x40 : 0)));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writereg(cs->hw.asus.adr,
- cs->hw.asus.hscx, offset + (hscx ? 0x40 : 0), value);
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.asus.adr, \
- cs->hw.asus.hscx, reg + (nr ? 0x40 : 0))
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.asus.adr, \
- cs->hw.asus.hscx, reg + (nr ? 0x40 : 0), data)
-
-#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.asus.adr, \
- cs->hw.asus.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.asus.adr, \
- cs->hw.asus.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-asuscom_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = readreg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_ISTA + 0x40);
-Start_HSCX:
- if (val)
- hscx_int_main(cs, val);
- val = readreg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- val = readreg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_ISTA + 0x40);
- if (val) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX IntStat after IntRoutine");
- goto Start_HSCX;
- }
- val = readreg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_ISTA);
- if (val) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK, 0xFF);
- writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK + 0x40, 0xFF);
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_MASK, 0xFF);
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_MASK, 0x0);
- writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK, 0x0);
- writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK + 0x40, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-asuscom_interrupt_ipac(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char ista, val, icnt = 5;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- ista = readreg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_ISTA);
-Start_IPAC:
- if (cs->debug & L1_DEB_IPAC)
- debugl1(cs, "IPAC ISTA %02X", ista);
- if (ista & 0x0f) {
- val = readreg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_ISTA + 0x40);
- if (ista & 0x01)
- val |= 0x01;
- if (ista & 0x04)
- val |= 0x02;
- if (ista & 0x08)
- val |= 0x04;
- if (val)
- hscx_int_main(cs, val);
- }
- if (ista & 0x20) {
- val = 0xfe & readreg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_ISTA | 0x80);
- if (val) {
- isac_interrupt(cs, val);
- }
- }
- if (ista & 0x10) {
- val = 0x01;
- isac_interrupt(cs, val);
- }
- ista = readreg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_ISTA);
- if ((ista & 0x3f) && icnt) {
- icnt--;
- goto Start_IPAC;
- }
- if (!icnt)
- printk(KERN_WARNING "ASUS IRQ LOOP\n");
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_MASK, 0xFF);
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_MASK, 0xC0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_asuscom(struct IsdnCardState *cs)
-{
- int bytecnt = 8;
-
- if (cs->hw.asus.cfg_reg)
- release_region(cs->hw.asus.cfg_reg, bytecnt);
-}
-
-static void
-reset_asuscom(struct IsdnCardState *cs)
-{
- if (cs->subtyp == ASUS_IPAC)
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_POTA2, 0x20);
- else
- byteout(cs->hw.asus.adr, ASUS_RESET); /* Reset On */
- mdelay(10);
- if (cs->subtyp == ASUS_IPAC)
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_POTA2, 0x0);
- else
- byteout(cs->hw.asus.adr, 0); /* Reset Off */
- mdelay(10);
- if (cs->subtyp == ASUS_IPAC) {
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_CONF, 0x0);
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_ACFG, 0xff);
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_AOE, 0x0);
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_MASK, 0xc0);
- writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_PCFG, 0x12);
- }
-}
-
-static int
-Asus_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_asuscom(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_asuscom(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- cs->debug |= L1_DEB_IPAC;
- inithscxisac(cs, 3);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-#ifdef __ISAPNP__
-static struct isapnp_device_id asus_ids[] = {
- { ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1688),
- ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1688),
- (unsigned long) "Asus1688 PnP" },
- { ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1690),
- ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1690),
- (unsigned long) "Asus1690 PnP" },
- { ISAPNP_VENDOR('S', 'I', 'E'), ISAPNP_FUNCTION(0x0020),
- ISAPNP_VENDOR('S', 'I', 'E'), ISAPNP_FUNCTION(0x0020),
- (unsigned long) "Isurf2 PnP" },
- { ISAPNP_VENDOR('E', 'L', 'F'), ISAPNP_FUNCTION(0x0000),
- ISAPNP_VENDOR('E', 'L', 'F'), ISAPNP_FUNCTION(0x0000),
- (unsigned long) "Iscas TE320" },
- { 0, }
-};
-
-static struct isapnp_device_id *ipid = &asus_ids[0];
-static struct pnp_card *pnp_c = NULL;
-#endif
-
-int setup_asuscom(struct IsdnCard *card)
-{
- int bytecnt;
- struct IsdnCardState *cs = card->cs;
- u_char val;
- char tmp[64];
-
- strcpy(tmp, Asuscom_revision);
- printk(KERN_INFO "HiSax: Asuscom ISDNLink driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_ASUSCOM)
- return (0);
-#ifdef __ISAPNP__
- if (!card->para[1] && isapnp_present()) {
- struct pnp_dev *pnp_d;
- while (ipid->card_vendor) {
- if ((pnp_c = pnp_find_card(ipid->card_vendor,
- ipid->card_device, pnp_c))) {
- pnp_d = NULL;
- if ((pnp_d = pnp_find_dev(pnp_c,
- ipid->vendor, ipid->function, pnp_d))) {
- int err;
-
- printk(KERN_INFO "HiSax: %s detected\n",
- (char *)ipid->driver_data);
- pnp_disable_dev(pnp_d);
- err = pnp_activate_dev(pnp_d);
- if (err < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __func__, err);
- return (0);
- }
- card->para[1] = pnp_port_start(pnp_d, 0);
- card->para[0] = pnp_irq(pnp_d, 0);
- if (card->para[0] == -1 || !card->para[1]) {
- printk(KERN_ERR "AsusPnP:some resources are missing %ld/%lx\n",
- card->para[0], card->para[1]);
- pnp_disable_dev(pnp_d);
- return (0);
- }
- break;
- } else {
- printk(KERN_ERR "AsusPnP: PnP error card found, no device\n");
- }
- }
- ipid++;
- pnp_c = NULL;
- }
- if (!ipid->card_vendor) {
- printk(KERN_INFO "AsusPnP: no ISAPnP card found\n");
- return (0);
- }
- }
-#endif
- bytecnt = 8;
- cs->hw.asus.cfg_reg = card->para[1];
- cs->irq = card->para[0];
- if (!request_region(cs->hw.asus.cfg_reg, bytecnt, "asuscom isdn")) {
- printk(KERN_WARNING
- "HiSax: ISDNLink config port %x-%x already in use\n",
- cs->hw.asus.cfg_reg,
- cs->hw.asus.cfg_reg + bytecnt);
- return (0);
- }
- printk(KERN_INFO "ISDNLink: defined at 0x%x IRQ %d\n",
- cs->hw.asus.cfg_reg, cs->irq);
- setup_isac(cs);
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &Asus_card_msg;
- val = readreg(cs->hw.asus.cfg_reg + ASUS_IPAC_ALE,
- cs->hw.asus.cfg_reg + ASUS_IPAC_DATA, IPAC_ID);
- if ((val == 1) || (val == 2)) {
- cs->subtyp = ASUS_IPAC;
- cs->hw.asus.adr = cs->hw.asus.cfg_reg + ASUS_IPAC_ALE;
- cs->hw.asus.isac = cs->hw.asus.cfg_reg + ASUS_IPAC_DATA;
- cs->hw.asus.hscx = cs->hw.asus.cfg_reg + ASUS_IPAC_DATA;
- test_and_set_bit(HW_IPAC, &cs->HW_Flags);
- cs->readisac = &ReadISAC_IPAC;
- cs->writeisac = &WriteISAC_IPAC;
- cs->readisacfifo = &ReadISACfifo_IPAC;
- cs->writeisacfifo = &WriteISACfifo_IPAC;
- cs->irq_func = &asuscom_interrupt_ipac;
- printk(KERN_INFO "Asus: IPAC version %x\n", val);
- } else {
- cs->subtyp = ASUS_ISACHSCX;
- cs->hw.asus.adr = cs->hw.asus.cfg_reg + ASUS_ADR;
- cs->hw.asus.isac = cs->hw.asus.cfg_reg + ASUS_ISAC;
- cs->hw.asus.hscx = cs->hw.asus.cfg_reg + ASUS_HSCX;
- cs->hw.asus.u7 = cs->hw.asus.cfg_reg + ASUS_CTRL_U7;
- cs->hw.asus.pots = cs->hw.asus.cfg_reg + ASUS_CTRL_POTS;
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->irq_func = &asuscom_interrupt;
- ISACVersion(cs, "ISDNLink:");
- if (HscxVersion(cs, "ISDNLink:")) {
- printk(KERN_WARNING
- "ISDNLink: wrong HSCX versions check IO address\n");
- release_io_asuscom(cs);
- return (0);
- }
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/avm_a1.c b/drivers/isdn/hisax/avm_a1.c
deleted file mode 100644
index 7dd74087ad72..000000000000
--- a/drivers/isdn/hisax/avm_a1.c
+++ /dev/null
@@ -1,307 +0,0 @@
-/* $Id: avm_a1.c,v 2.15.2.4 2004/01/13 21:46:03 keil Exp $
- *
- * low level stuff for AVM A1 (Fritz) isdn cards
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-
-static const char *avm_revision = "$Revision: 2.15.2.4 $";
-
-#define AVM_A1_STAT_ISAC 0x01
-#define AVM_A1_STAT_HSCX 0x02
-#define AVM_A1_STAT_TIMER 0x04
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-static inline u_char
-readreg(unsigned int adr, u_char off)
-{
- return (bytein(adr + off));
-}
-
-static inline void
-writereg(unsigned int adr, u_char off, u_char data)
-{
- byteout(adr + off, data);
-}
-
-
-static inline void
-read_fifo(unsigned int adr, u_char *data, int size)
-{
- insb(adr, data, size);
-}
-
-static void
-write_fifo(unsigned int adr, u_char *data, int size)
-{
- outsb(adr, data, size);
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.avm.isac, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.avm.isac, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- read_fifo(cs->hw.avm.isacfifo, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- write_fifo(cs->hw.avm.isacfifo, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readreg(cs->hw.avm.hscx[hscx], offset));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writereg(cs->hw.avm.hscx[hscx], offset, value);
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.avm.hscx[nr], reg)
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.avm.hscx[nr], reg, data)
-#define READHSCXFIFO(cs, nr, ptr, cnt) read_fifo(cs->hw.avm.hscxfifo[nr], ptr, cnt)
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) write_fifo(cs->hw.avm.hscxfifo[nr], ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-avm_a1_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val, sval;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- while (((sval = bytein(cs->hw.avm.cfg_reg)) & 0xf) != 0x7) {
- if (!(sval & AVM_A1_STAT_TIMER)) {
- byteout(cs->hw.avm.cfg_reg, 0x1E);
- sval = bytein(cs->hw.avm.cfg_reg);
- } else if (cs->debug & L1_DEB_INTSTAT)
- debugl1(cs, "avm IntStatus %x", sval);
- if (!(sval & AVM_A1_STAT_HSCX)) {
- val = readreg(cs->hw.avm.hscx[1], HSCX_ISTA);
- if (val)
- hscx_int_main(cs, val);
- }
- if (!(sval & AVM_A1_STAT_ISAC)) {
- val = readreg(cs->hw.avm.isac, ISAC_ISTA);
- if (val)
- isac_interrupt(cs, val);
- }
- }
- writereg(cs->hw.avm.hscx[0], HSCX_MASK, 0xFF);
- writereg(cs->hw.avm.hscx[1], HSCX_MASK, 0xFF);
- writereg(cs->hw.avm.isac, ISAC_MASK, 0xFF);
- writereg(cs->hw.avm.isac, ISAC_MASK, 0x0);
- writereg(cs->hw.avm.hscx[0], HSCX_MASK, 0x0);
- writereg(cs->hw.avm.hscx[1], HSCX_MASK, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static inline void
-release_ioregs(struct IsdnCardState *cs, int mask)
-{
- release_region(cs->hw.avm.cfg_reg, 8);
- if (mask & 1)
- release_region(cs->hw.avm.isac + 32, 32);
- if (mask & 2)
- release_region(cs->hw.avm.isacfifo, 1);
- if (mask & 4)
- release_region(cs->hw.avm.hscx[0] + 32, 32);
- if (mask & 8)
- release_region(cs->hw.avm.hscxfifo[0], 1);
- if (mask & 0x10)
- release_region(cs->hw.avm.hscx[1] + 32, 32);
- if (mask & 0x20)
- release_region(cs->hw.avm.hscxfifo[1], 1);
-}
-
-static int
-AVM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- return (0);
- case CARD_RELEASE:
- release_ioregs(cs, 0x3f);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- inithscxisac(cs, 1);
- byteout(cs->hw.avm.cfg_reg, 0x16);
- byteout(cs->hw.avm.cfg_reg, 0x1E);
- inithscxisac(cs, 2);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-int setup_avm_a1(struct IsdnCard *card)
-{
- u_char val;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, avm_revision);
- printk(KERN_INFO "HiSax: AVM driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_A1)
- return (0);
-
- cs->hw.avm.cfg_reg = card->para[1] + 0x1800;
- cs->hw.avm.isac = card->para[1] + 0x1400 - 0x20;
- cs->hw.avm.hscx[0] = card->para[1] + 0x400 - 0x20;
- cs->hw.avm.hscx[1] = card->para[1] + 0xc00 - 0x20;
- cs->hw.avm.isacfifo = card->para[1] + 0x1000;
- cs->hw.avm.hscxfifo[0] = card->para[1];
- cs->hw.avm.hscxfifo[1] = card->para[1] + 0x800;
- cs->irq = card->para[0];
- if (!request_region(cs->hw.avm.cfg_reg, 8, "avm cfg")) {
- printk(KERN_WARNING
- "HiSax: AVM A1 config port %x-%x already in use\n",
- cs->hw.avm.cfg_reg,
- cs->hw.avm.cfg_reg + 8);
- return (0);
- }
- if (!request_region(cs->hw.avm.isac + 32, 32, "HiSax isac")) {
- printk(KERN_WARNING
- "HiSax: AVM A1 isac ports %x-%x already in use\n",
- cs->hw.avm.isac + 32,
- cs->hw.avm.isac + 64);
- release_ioregs(cs, 0);
- return (0);
- }
- if (!request_region(cs->hw.avm.isacfifo, 1, "HiSax isac fifo")) {
- printk(KERN_WARNING
- "HiSax: AVM A1 isac fifo port %x already in use\n",
- cs->hw.avm.isacfifo);
- release_ioregs(cs, 1);
- return (0);
- }
- if (!request_region(cs->hw.avm.hscx[0] + 32, 32, "HiSax hscx A")) {
- printk(KERN_WARNING
- "HiSax: AVM A1 hscx A ports %x-%x already in use\n",
- cs->hw.avm.hscx[0] + 32,
- cs->hw.avm.hscx[0] + 64);
- release_ioregs(cs, 3);
- return (0);
- }
- if (!request_region(cs->hw.avm.hscxfifo[0], 1, "HiSax hscx A fifo")) {
- printk(KERN_WARNING
- "HiSax: AVM A1 hscx A fifo port %x already in use\n",
- cs->hw.avm.hscxfifo[0]);
- release_ioregs(cs, 7);
- return (0);
- }
- if (!request_region(cs->hw.avm.hscx[1] + 32, 32, "HiSax hscx B")) {
- printk(KERN_WARNING
- "HiSax: AVM A1 hscx B ports %x-%x already in use\n",
- cs->hw.avm.hscx[1] + 32,
- cs->hw.avm.hscx[1] + 64);
- release_ioregs(cs, 0xf);
- return (0);
- }
- if (!request_region(cs->hw.avm.hscxfifo[1], 1, "HiSax hscx B fifo")) {
- printk(KERN_WARNING
- "HiSax: AVM A1 hscx B fifo port %x already in use\n",
- cs->hw.avm.hscxfifo[1]);
- release_ioregs(cs, 0x1f);
- return (0);
- }
- byteout(cs->hw.avm.cfg_reg, 0x0);
- HZDELAY(HZ / 5 + 1);
- byteout(cs->hw.avm.cfg_reg, 0x1);
- HZDELAY(HZ / 5 + 1);
- byteout(cs->hw.avm.cfg_reg, 0x0);
- HZDELAY(HZ / 5 + 1);
- val = cs->irq;
- if (val == 9)
- val = 2;
- byteout(cs->hw.avm.cfg_reg + 1, val);
- HZDELAY(HZ / 5 + 1);
- byteout(cs->hw.avm.cfg_reg, 0x0);
- HZDELAY(HZ / 5 + 1);
-
- val = bytein(cs->hw.avm.cfg_reg);
- printk(KERN_INFO "AVM A1: Byte at %x is %x\n",
- cs->hw.avm.cfg_reg, val);
- val = bytein(cs->hw.avm.cfg_reg + 3);
- printk(KERN_INFO "AVM A1: Byte at %x is %x\n",
- cs->hw.avm.cfg_reg + 3, val);
- val = bytein(cs->hw.avm.cfg_reg + 2);
- printk(KERN_INFO "AVM A1: Byte at %x is %x\n",
- cs->hw.avm.cfg_reg + 2, val);
- val = bytein(cs->hw.avm.cfg_reg);
- printk(KERN_INFO "AVM A1: Byte at %x is %x\n",
- cs->hw.avm.cfg_reg, val);
-
- printk(KERN_INFO "HiSax: AVM A1 config irq:%d cfg:0x%X\n",
- cs->irq,
- cs->hw.avm.cfg_reg);
- printk(KERN_INFO
- "HiSax: isac:0x%X/0x%X\n",
- cs->hw.avm.isac + 32, cs->hw.avm.isacfifo);
- printk(KERN_INFO
- "HiSax: hscx A:0x%X/0x%X hscx B:0x%X/0x%X\n",
- cs->hw.avm.hscx[0] + 32, cs->hw.avm.hscxfifo[0],
- cs->hw.avm.hscx[1] + 32, cs->hw.avm.hscxfifo[1]);
-
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- setup_isac(cs);
- cs->cardmsg = &AVM_card_msg;
- cs->irq_func = &avm_a1_interrupt;
- ISACVersion(cs, "AVM A1:");
- if (HscxVersion(cs, "AVM A1:")) {
- printk(KERN_WARNING
- "AVM A1: wrong HSCX versions check IO address\n");
- release_ioregs(cs, 0x3f);
- return (0);
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/avm_a1p.c b/drivers/isdn/hisax/avm_a1p.c
deleted file mode 100644
index bc52d54ff5e1..000000000000
--- a/drivers/isdn/hisax/avm_a1p.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/* $Id: avm_a1p.c,v 2.9.2.5 2004/01/24 20:47:19 keil Exp $
- *
- * low level stuff for the following AVM cards:
- * A1 PCMCIA
- * FRITZ!Card PCMCIA
- * FRITZ!Card PCMCIA 2.0
- *
- * Author Carsten Paeth
- * Copyright by Carsten Paeth <calle@calle.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-
-/* register offsets */
-#define ADDRREG_OFFSET 0x02
-#define DATAREG_OFFSET 0x03
-#define ASL0_OFFSET 0x04
-#define ASL1_OFFSET 0x05
-#define MODREG_OFFSET 0x06
-#define VERREG_OFFSET 0x07
-
-/* address offsets */
-#define ISAC_FIFO_OFFSET 0x00
-#define ISAC_REG_OFFSET 0x20
-#define HSCX_CH_DIFF 0x40
-#define HSCX_FIFO_OFFSET 0x80
-#define HSCX_REG_OFFSET 0xa0
-
-/* read bits ASL0 */
-#define ASL0_R_TIMER 0x10 /* active low */
-#define ASL0_R_ISAC 0x20 /* active low */
-#define ASL0_R_HSCX 0x40 /* active low */
-#define ASL0_R_TESTBIT 0x80
-#define ASL0_R_IRQPENDING (ASL0_R_ISAC | ASL0_R_HSCX | ASL0_R_TIMER)
-
-/* write bits ASL0 */
-#define ASL0_W_RESET 0x01
-#define ASL0_W_TDISABLE 0x02
-#define ASL0_W_TRESET 0x04
-#define ASL0_W_IRQENABLE 0x08
-#define ASL0_W_TESTBIT 0x80
-
-/* write bits ASL1 */
-#define ASL1_W_LED0 0x10
-#define ASL1_W_LED1 0x20
-#define ASL1_W_ENABLE_S0 0xC0
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-static const char *avm_revision = "$Revision: 2.9.2.5 $";
-
-static inline u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- u_char ret;
-
- offset -= 0x20;
- byteout(cs->hw.avm.cfg_reg + ADDRREG_OFFSET, ISAC_REG_OFFSET + offset);
- ret = bytein(cs->hw.avm.cfg_reg + DATAREG_OFFSET);
- return ret;
-}
-
-static inline void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- offset -= 0x20;
- byteout(cs->hw.avm.cfg_reg + ADDRREG_OFFSET, ISAC_REG_OFFSET + offset);
- byteout(cs->hw.avm.cfg_reg + DATAREG_OFFSET, value);
-}
-
-static inline void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- byteout(cs->hw.avm.cfg_reg + ADDRREG_OFFSET, ISAC_FIFO_OFFSET);
- insb(cs->hw.avm.cfg_reg + DATAREG_OFFSET, data, size);
-}
-
-static inline void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- byteout(cs->hw.avm.cfg_reg + ADDRREG_OFFSET, ISAC_FIFO_OFFSET);
- outsb(cs->hw.avm.cfg_reg + DATAREG_OFFSET, data, size);
-}
-
-static inline u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- u_char ret;
-
- offset -= 0x20;
- byteout(cs->hw.avm.cfg_reg + ADDRREG_OFFSET,
- HSCX_REG_OFFSET + hscx * HSCX_CH_DIFF + offset);
- ret = bytein(cs->hw.avm.cfg_reg + DATAREG_OFFSET);
- return ret;
-}
-
-static inline void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- offset -= 0x20;
- byteout(cs->hw.avm.cfg_reg + ADDRREG_OFFSET,
- HSCX_REG_OFFSET + hscx * HSCX_CH_DIFF + offset);
- byteout(cs->hw.avm.cfg_reg + DATAREG_OFFSET, value);
-}
-
-static inline void
-ReadHSCXfifo(struct IsdnCardState *cs, int hscx, u_char *data, int size)
-{
- byteout(cs->hw.avm.cfg_reg + ADDRREG_OFFSET,
- HSCX_FIFO_OFFSET + hscx * HSCX_CH_DIFF);
- insb(cs->hw.avm.cfg_reg + DATAREG_OFFSET, data, size);
-}
-
-static inline void
-WriteHSCXfifo(struct IsdnCardState *cs, int hscx, u_char *data, int size)
-{
- byteout(cs->hw.avm.cfg_reg + ADDRREG_OFFSET,
- HSCX_FIFO_OFFSET + hscx * HSCX_CH_DIFF);
- outsb(cs->hw.avm.cfg_reg + DATAREG_OFFSET, data, size);
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) ReadHSCX(cs, nr, reg)
-#define WRITEHSCX(cs, nr, reg, data) WriteHSCX(cs, nr, reg, data)
-#define READHSCXFIFO(cs, nr, ptr, cnt) ReadHSCXfifo(cs, nr, ptr, cnt)
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) WriteHSCXfifo(cs, nr, ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-avm_a1p_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val, sval;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- while ((sval = (~bytein(cs->hw.avm.cfg_reg + ASL0_OFFSET) & ASL0_R_IRQPENDING))) {
- if (cs->debug & L1_DEB_INTSTAT)
- debugl1(cs, "avm IntStatus %x", sval);
- if (sval & ASL0_R_HSCX) {
- val = ReadHSCX(cs, 1, HSCX_ISTA);
- if (val)
- hscx_int_main(cs, val);
- }
- if (sval & ASL0_R_ISAC) {
- val = ReadISAC(cs, ISAC_ISTA);
- if (val)
- isac_interrupt(cs, val);
- }
- }
- WriteHSCX(cs, 0, HSCX_MASK, 0xff);
- WriteHSCX(cs, 1, HSCX_MASK, 0xff);
- WriteISAC(cs, ISAC_MASK, 0xff);
- WriteISAC(cs, ISAC_MASK, 0x00);
- WriteHSCX(cs, 0, HSCX_MASK, 0x00);
- WriteHSCX(cs, 1, HSCX_MASK, 0x00);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static int
-AVM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- byteout(cs->hw.avm.cfg_reg + ASL0_OFFSET, 0x00);
- HZDELAY(HZ / 5 + 1);
- byteout(cs->hw.avm.cfg_reg + ASL0_OFFSET, ASL0_W_RESET);
- HZDELAY(HZ / 5 + 1);
- byteout(cs->hw.avm.cfg_reg + ASL0_OFFSET, 0x00);
- spin_unlock_irqrestore(&cs->lock, flags);
- return 0;
-
- case CARD_RELEASE:
- /* free_irq is done in HiSax_closecard(). */
- /* free_irq(cs->irq, cs); */
- return 0;
-
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- byteout(cs->hw.avm.cfg_reg + ASL0_OFFSET, ASL0_W_TDISABLE | ASL0_W_TRESET | ASL0_W_IRQENABLE);
- clear_pending_isac_ints(cs);
- clear_pending_hscx_ints(cs);
- inithscxisac(cs, 1);
- inithscxisac(cs, 2);
- spin_unlock_irqrestore(&cs->lock, flags);
- return 0;
-
- case CARD_TEST:
- /* we really don't need it for the PCMCIA Version */
- return 0;
-
- default:
- /* all card drivers ignore others, so we do the same */
- return 0;
- }
- return 0;
-}
-
-int setup_avm_a1_pcmcia(struct IsdnCard *card)
-{
- u_char model, vers;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
-
- strcpy(tmp, avm_revision);
- printk(KERN_INFO "HiSax: AVM A1 PCMCIA driver Rev. %s\n",
- HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_A1_PCMCIA)
- return (0);
-
- cs->hw.avm.cfg_reg = card->para[1];
- cs->irq = card->para[0];
-
-
- byteout(cs->hw.avm.cfg_reg + ASL1_OFFSET, ASL1_W_ENABLE_S0);
- byteout(cs->hw.avm.cfg_reg + ASL0_OFFSET, 0x00);
- HZDELAY(HZ / 5 + 1);
- byteout(cs->hw.avm.cfg_reg + ASL0_OFFSET, ASL0_W_RESET);
- HZDELAY(HZ / 5 + 1);
- byteout(cs->hw.avm.cfg_reg + ASL0_OFFSET, 0x00);
-
- byteout(cs->hw.avm.cfg_reg + ASL0_OFFSET, ASL0_W_TDISABLE | ASL0_W_TRESET);
-
- model = bytein(cs->hw.avm.cfg_reg + MODREG_OFFSET);
- vers = bytein(cs->hw.avm.cfg_reg + VERREG_OFFSET);
-
- printk(KERN_INFO "AVM A1 PCMCIA: io 0x%x irq %d model %d version %d\n",
- cs->hw.avm.cfg_reg, cs->irq, model, vers);
-
- setup_isac(cs);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &AVM_card_msg;
- cs->irq_flags = IRQF_SHARED;
- cs->irq_func = &avm_a1p_interrupt;
-
- ISACVersion(cs, "AVM A1 PCMCIA:");
- if (HscxVersion(cs, "AVM A1 PCMCIA:")) {
- printk(KERN_WARNING
- "AVM A1 PCMCIA: wrong HSCX versions check IO address\n");
- return (0);
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
deleted file mode 100644
index b161456c942e..000000000000
--- a/drivers/isdn/hisax/avm_pci.c
+++ /dev/null
@@ -1,904 +0,0 @@
-/* $Id: avm_pci.c,v 1.29.2.4 2004/02/11 13:21:32 keil Exp $
- *
- * low level stuff for AVM Fritz!PCI and ISA PnP isdn cards
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to AVM, Berlin for information
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "isdnl1.h"
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/isapnp.h>
-#include <linux/interrupt.h>
-
-static const char *avm_pci_rev = "$Revision: 1.29.2.4 $";
-
-#define AVM_FRITZ_PCI 1
-#define AVM_FRITZ_PNP 2
-
-#define HDLC_FIFO 0x0
-#define HDLC_STATUS 0x4
-
-#define AVM_HDLC_1 0x00
-#define AVM_HDLC_2 0x01
-#define AVM_ISAC_FIFO 0x02
-#define AVM_ISAC_REG_LOW 0x04
-#define AVM_ISAC_REG_HIGH 0x06
-
-#define AVM_STATUS0_IRQ_ISAC 0x01
-#define AVM_STATUS0_IRQ_HDLC 0x02
-#define AVM_STATUS0_IRQ_TIMER 0x04
-#define AVM_STATUS0_IRQ_MASK 0x07
-
-#define AVM_STATUS0_RESET 0x01
-#define AVM_STATUS0_DIS_TIMER 0x02
-#define AVM_STATUS0_RES_TIMER 0x04
-#define AVM_STATUS0_ENA_IRQ 0x08
-#define AVM_STATUS0_TESTBIT 0x10
-
-#define AVM_STATUS1_INT_SEL 0x0f
-#define AVM_STATUS1_ENA_IOM 0x80
-
-#define HDLC_MODE_ITF_FLG 0x01
-#define HDLC_MODE_TRANS 0x02
-#define HDLC_MODE_CCR_7 0x04
-#define HDLC_MODE_CCR_16 0x08
-#define HDLC_MODE_TESTLOOP 0x80
-
-#define HDLC_INT_XPR 0x80
-#define HDLC_INT_XDU 0x40
-#define HDLC_INT_RPR 0x20
-#define HDLC_INT_MASK 0xE0
-
-#define HDLC_STAT_RME 0x01
-#define HDLC_STAT_RDO 0x10
-#define HDLC_STAT_CRCVFRRAB 0x0E
-#define HDLC_STAT_CRCVFR 0x06
-#define HDLC_STAT_RML_MASK 0x3f00
-
-#define HDLC_CMD_XRS 0x80
-#define HDLC_CMD_XME 0x01
-#define HDLC_CMD_RRS 0x20
-#define HDLC_CMD_XML_MASK 0x3f00
-
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- register u_char idx = (offset > 0x2f) ? AVM_ISAC_REG_HIGH : AVM_ISAC_REG_LOW;
- register u_char val;
-
- outb(idx, cs->hw.avm.cfg_reg + 4);
- val = inb(cs->hw.avm.isac + (offset & 0xf));
- return (val);
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- register u_char idx = (offset > 0x2f) ? AVM_ISAC_REG_HIGH : AVM_ISAC_REG_LOW;
-
- outb(idx, cs->hw.avm.cfg_reg + 4);
- outb(value, cs->hw.avm.isac + (offset & 0xf));
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- outb(AVM_ISAC_FIFO, cs->hw.avm.cfg_reg + 4);
- insb(cs->hw.avm.isac, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- outb(AVM_ISAC_FIFO, cs->hw.avm.cfg_reg + 4);
- outsb(cs->hw.avm.isac, data, size);
-}
-
-static inline u_int
-ReadHDLCPCI(struct IsdnCardState *cs, int chan, u_char offset)
-{
- register u_int idx = chan ? AVM_HDLC_2 : AVM_HDLC_1;
- register u_int val;
-
- outl(idx, cs->hw.avm.cfg_reg + 4);
- val = inl(cs->hw.avm.isac + offset);
- return (val);
-}
-
-static inline void
-WriteHDLCPCI(struct IsdnCardState *cs, int chan, u_char offset, u_int value)
-{
- register u_int idx = chan ? AVM_HDLC_2 : AVM_HDLC_1;
-
- outl(idx, cs->hw.avm.cfg_reg + 4);
- outl(value, cs->hw.avm.isac + offset);
-}
-
-static inline u_char
-ReadHDLCPnP(struct IsdnCardState *cs, int chan, u_char offset)
-{
- register u_char idx = chan ? AVM_HDLC_2 : AVM_HDLC_1;
- register u_char val;
-
- outb(idx, cs->hw.avm.cfg_reg + 4);
- val = inb(cs->hw.avm.isac + offset);
- return (val);
-}
-
-static inline void
-WriteHDLCPnP(struct IsdnCardState *cs, int chan, u_char offset, u_char value)
-{
- register u_char idx = chan ? AVM_HDLC_2 : AVM_HDLC_1;
-
- outb(idx, cs->hw.avm.cfg_reg + 4);
- outb(value, cs->hw.avm.isac + offset);
-}
-
-static u_char
-ReadHDLC_s(struct IsdnCardState *cs, int chan, u_char offset)
-{
- return (0xff & ReadHDLCPCI(cs, chan, offset));
-}
-
-static void
-WriteHDLC_s(struct IsdnCardState *cs, int chan, u_char offset, u_char value)
-{
- WriteHDLCPCI(cs, chan, offset, value);
-}
-
-static inline
-struct BCState *Sel_BCS(struct IsdnCardState *cs, int channel)
-{
- if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
- return (&cs->bcs[0]);
- else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
- return (&cs->bcs[1]);
- else
- return (NULL);
-}
-
-static void
-write_ctrl(struct BCState *bcs, int which) {
-
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "hdlc %c wr%x ctrl %x",
- 'A' + bcs->channel, which, bcs->hw.hdlc.ctrl.ctrl);
- if (bcs->cs->subtyp == AVM_FRITZ_PCI) {
- WriteHDLCPCI(bcs->cs, bcs->channel, HDLC_STATUS, bcs->hw.hdlc.ctrl.ctrl);
- } else {
- if (which & 4)
- WriteHDLCPnP(bcs->cs, bcs->channel, HDLC_STATUS + 2,
- bcs->hw.hdlc.ctrl.sr.mode);
- if (which & 2)
- WriteHDLCPnP(bcs->cs, bcs->channel, HDLC_STATUS + 1,
- bcs->hw.hdlc.ctrl.sr.xml);
- if (which & 1)
- WriteHDLCPnP(bcs->cs, bcs->channel, HDLC_STATUS,
- bcs->hw.hdlc.ctrl.sr.cmd);
- }
-}
-
-static void
-modehdlc(struct BCState *bcs, int mode, int bc)
-{
- struct IsdnCardState *cs = bcs->cs;
- int hdlc = bcs->channel;
-
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hdlc %c mode %d --> %d ichan %d --> %d",
- 'A' + hdlc, bcs->mode, mode, hdlc, bc);
- bcs->hw.hdlc.ctrl.ctrl = 0;
- switch (mode) {
- case (-1): /* used for init */
- bcs->mode = 1;
- bcs->channel = bc;
- bc = 0;
- /* fall through */
- case (L1_MODE_NULL):
- if (bcs->mode == L1_MODE_NULL)
- return;
- bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS;
- bcs->hw.hdlc.ctrl.sr.mode = HDLC_MODE_TRANS;
- write_ctrl(bcs, 5);
- bcs->mode = L1_MODE_NULL;
- bcs->channel = bc;
- break;
- case (L1_MODE_TRANS):
- bcs->mode = mode;
- bcs->channel = bc;
- bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS;
- bcs->hw.hdlc.ctrl.sr.mode = HDLC_MODE_TRANS;
- write_ctrl(bcs, 5);
- bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS;
- write_ctrl(bcs, 1);
- bcs->hw.hdlc.ctrl.sr.cmd = 0;
- schedule_event(bcs, B_XMTBUFREADY);
- break;
- case (L1_MODE_HDLC):
- bcs->mode = mode;
- bcs->channel = bc;
- bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS;
- bcs->hw.hdlc.ctrl.sr.mode = HDLC_MODE_ITF_FLG;
- write_ctrl(bcs, 5);
- bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS;
- write_ctrl(bcs, 1);
- bcs->hw.hdlc.ctrl.sr.cmd = 0;
- schedule_event(bcs, B_XMTBUFREADY);
- break;
- }
-}
-
-static inline void
-hdlc_empty_fifo(struct BCState *bcs, int count)
-{
- register u_int *ptr;
- u_char *p;
- u_char idx = bcs->channel ? AVM_HDLC_2 : AVM_HDLC_1;
- int cnt = 0;
- struct IsdnCardState *cs = bcs->cs;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "hdlc_empty_fifo %d", count);
- if (bcs->hw.hdlc.rcvidx + count > HSCX_BUFMAX) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hdlc_empty_fifo: incoming packet too large");
- return;
- }
- p = bcs->hw.hdlc.rcvbuf + bcs->hw.hdlc.rcvidx;
- ptr = (u_int *)p;
- bcs->hw.hdlc.rcvidx += count;
- if (cs->subtyp == AVM_FRITZ_PCI) {
- outl(idx, cs->hw.avm.cfg_reg + 4);
- while (cnt < count) {
-#ifdef __powerpc__
- *ptr++ = in_be32((unsigned *)(cs->hw.avm.isac + _IO_BASE));
-#else
- *ptr++ = inl(cs->hw.avm.isac);
-#endif /* __powerpc__ */
- cnt += 4;
- }
- } else {
- outb(idx, cs->hw.avm.cfg_reg + 4);
- while (cnt < count) {
- *p++ = inb(cs->hw.avm.isac);
- cnt++;
- }
- }
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- if (cs->subtyp == AVM_FRITZ_PNP)
- p = (u_char *) ptr;
- t += sprintf(t, "hdlc_empty_fifo %c cnt %d",
- bcs->channel ? 'B' : 'A', count);
- QuickHex(t, p, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-static inline void
-hdlc_fill_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int count, cnt = 0;
- int fifo_size = 32;
- u_char *p;
- u_int *ptr;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "hdlc_fill_fifo");
- if (!bcs->tx_skb)
- return;
- if (bcs->tx_skb->len <= 0)
- return;
-
- bcs->hw.hdlc.ctrl.sr.cmd &= ~HDLC_CMD_XME;
- if (bcs->tx_skb->len > fifo_size) {
- count = fifo_size;
- } else {
- count = bcs->tx_skb->len;
- if (bcs->mode != L1_MODE_TRANS)
- bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_XME;
- }
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "hdlc_fill_fifo %d/%u", count, bcs->tx_skb->len);
- p = bcs->tx_skb->data;
- ptr = (u_int *)p;
- skb_pull(bcs->tx_skb, count);
- bcs->tx_cnt -= count;
- bcs->hw.hdlc.count += count;
- bcs->hw.hdlc.ctrl.sr.xml = ((count == fifo_size) ? 0 : count);
- write_ctrl(bcs, 3); /* sets the correct index too */
- if (cs->subtyp == AVM_FRITZ_PCI) {
- while (cnt < count) {
-#ifdef __powerpc__
- out_be32((unsigned *)(cs->hw.avm.isac + _IO_BASE), *ptr++);
-#else
- outl(*ptr++, cs->hw.avm.isac);
-#endif /* __powerpc__ */
- cnt += 4;
- }
- } else {
- while (cnt < count) {
- outb(*p++, cs->hw.avm.isac);
- cnt++;
- }
- }
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- if (cs->subtyp == AVM_FRITZ_PNP)
- p = (u_char *) ptr;
- t += sprintf(t, "hdlc_fill_fifo %c cnt %d",
- bcs->channel ? 'B' : 'A', count);
- QuickHex(t, p, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-static void
-HDLC_irq(struct BCState *bcs, u_int stat) {
- int len;
- struct sk_buff *skb;
-
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "ch%d stat %#x", bcs->channel, stat);
- if (stat & HDLC_INT_RPR) {
- if (stat & HDLC_STAT_RDO) {
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "RDO");
- else
- debugl1(bcs->cs, "ch%d stat %#x", bcs->channel, stat);
- bcs->hw.hdlc.ctrl.sr.xml = 0;
- bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_RRS;
- write_ctrl(bcs, 1);
- bcs->hw.hdlc.ctrl.sr.cmd &= ~HDLC_CMD_RRS;
- write_ctrl(bcs, 1);
- bcs->hw.hdlc.rcvidx = 0;
- } else {
- if (!(len = (stat & HDLC_STAT_RML_MASK) >> 8))
- len = 32;
- hdlc_empty_fifo(bcs, len);
- if ((stat & HDLC_STAT_RME) || (bcs->mode == L1_MODE_TRANS)) {
- if (((stat & HDLC_STAT_CRCVFRRAB) == HDLC_STAT_CRCVFR) ||
- (bcs->mode == L1_MODE_TRANS)) {
- if (!(skb = dev_alloc_skb(bcs->hw.hdlc.rcvidx)))
- printk(KERN_WARNING "HDLC: receive out of memory\n");
- else {
- skb_put_data(skb,
- bcs->hw.hdlc.rcvbuf,
- bcs->hw.hdlc.rcvidx);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- bcs->hw.hdlc.rcvidx = 0;
- schedule_event(bcs, B_RCVBUFREADY);
- } else {
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "invalid frame");
- else
- debugl1(bcs->cs, "ch%d invalid frame %#x", bcs->channel, stat);
- bcs->hw.hdlc.rcvidx = 0;
- }
- }
- }
- }
- if (stat & HDLC_INT_XDU) {
- /* Here we lost an TX interrupt, so
- * restart transmitting the whole frame.
- */
- if (bcs->tx_skb) {
- skb_push(bcs->tx_skb, bcs->hw.hdlc.count);
- bcs->tx_cnt += bcs->hw.hdlc.count;
- bcs->hw.hdlc.count = 0;
- if (bcs->cs->debug & L1_DEB_WARN)
- debugl1(bcs->cs, "ch%d XDU", bcs->channel);
- } else if (bcs->cs->debug & L1_DEB_WARN)
- debugl1(bcs->cs, "ch%d XDU without skb", bcs->channel);
- bcs->hw.hdlc.ctrl.sr.xml = 0;
- bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_XRS;
- write_ctrl(bcs, 1);
- bcs->hw.hdlc.ctrl.sr.cmd &= ~HDLC_CMD_XRS;
- write_ctrl(bcs, 1);
- hdlc_fill_fifo(bcs);
- } else if (stat & HDLC_INT_XPR) {
- if (bcs->tx_skb) {
- if (bcs->tx_skb->len) {
- hdlc_fill_fifo(bcs);
- return;
- } else {
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->hw.hdlc.count;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- dev_kfree_skb_irq(bcs->tx_skb);
- bcs->hw.hdlc.count = 0;
- bcs->tx_skb = NULL;
- }
- }
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- bcs->hw.hdlc.count = 0;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- hdlc_fill_fifo(bcs);
- } else {
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- schedule_event(bcs, B_XMTBUFREADY);
- }
- }
-}
-
-static inline void
-HDLC_irq_main(struct IsdnCardState *cs)
-{
- u_int stat;
- struct BCState *bcs;
-
- if (cs->subtyp == AVM_FRITZ_PCI) {
- stat = ReadHDLCPCI(cs, 0, HDLC_STATUS);
- } else {
- stat = ReadHDLCPnP(cs, 0, HDLC_STATUS);
- if (stat & HDLC_INT_RPR)
- stat |= (ReadHDLCPnP(cs, 0, HDLC_STATUS + 1)) << 8;
- }
- if (stat & HDLC_INT_MASK) {
- if (!(bcs = Sel_BCS(cs, 0))) {
- if (cs->debug)
- debugl1(cs, "hdlc spurious channel 0 IRQ");
- } else
- HDLC_irq(bcs, stat);
- }
- if (cs->subtyp == AVM_FRITZ_PCI) {
- stat = ReadHDLCPCI(cs, 1, HDLC_STATUS);
- } else {
- stat = ReadHDLCPnP(cs, 1, HDLC_STATUS);
- if (stat & HDLC_INT_RPR)
- stat |= (ReadHDLCPnP(cs, 1, HDLC_STATUS + 1)) << 8;
- }
- if (stat & HDLC_INT_MASK) {
- if (!(bcs = Sel_BCS(cs, 1))) {
- if (cs->debug)
- debugl1(cs, "hdlc spurious channel 1 IRQ");
- } else
- HDLC_irq(bcs, stat);
- }
-}
-
-static void
-hdlc_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- struct sk_buff *skb = arg;
- u_long flags;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->hw.hdlc.count = 0;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- printk(KERN_WARNING "hdlc_l2l1: this shouldn't happen\n");
- } else {
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->tx_skb = skb;
- bcs->hw.hdlc.count = 0;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
- if (!bcs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (PH_ACTIVATE | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
- modehdlc(bcs, st->l1.mode, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | REQUEST):
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | CONFIRM):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- modehdlc(bcs, 0, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
- break;
- }
-}
-
-static void
-close_hdlcstate(struct BCState *bcs)
-{
- modehdlc(bcs, 0, 0);
- if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
- kfree(bcs->hw.hdlc.rcvbuf);
- bcs->hw.hdlc.rcvbuf = NULL;
- kfree(bcs->blog);
- bcs->blog = NULL;
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- }
-}
-
-static int
-open_hdlcstate(struct IsdnCardState *cs, struct BCState *bcs)
-{
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- if (!(bcs->hw.hdlc.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for hdlc.rcvbuf\n");
- return (1);
- }
- if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for bcs->blog\n");
- test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
- kfree(bcs->hw.hdlc.rcvbuf);
- bcs->hw.hdlc.rcvbuf = NULL;
- return (2);
- }
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->event = 0;
- bcs->hw.hdlc.rcvidx = 0;
- bcs->tx_cnt = 0;
- return (0);
-}
-
-static int
-setstack_hdlc(struct PStack *st, struct BCState *bcs)
-{
- bcs->channel = st->l1.bc;
- if (open_hdlcstate(st->l1.hardware, bcs))
- return (-1);
- st->l1.bcs = bcs;
- st->l2.l2l1 = hdlc_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-#if 0
-void __init
-clear_pending_hdlc_ints(struct IsdnCardState *cs)
-{
- u_int val;
-
- if (cs->subtyp == AVM_FRITZ_PCI) {
- val = ReadHDLCPCI(cs, 0, HDLC_STATUS);
- debugl1(cs, "HDLC 1 STA %x", val);
- val = ReadHDLCPCI(cs, 1, HDLC_STATUS);
- debugl1(cs, "HDLC 2 STA %x", val);
- } else {
- val = ReadHDLCPnP(cs, 0, HDLC_STATUS);
- debugl1(cs, "HDLC 1 STA %x", val);
- val = ReadHDLCPnP(cs, 0, HDLC_STATUS + 1);
- debugl1(cs, "HDLC 1 RML %x", val);
- val = ReadHDLCPnP(cs, 0, HDLC_STATUS + 2);
- debugl1(cs, "HDLC 1 MODE %x", val);
- val = ReadHDLCPnP(cs, 0, HDLC_STATUS + 3);
- debugl1(cs, "HDLC 1 VIN %x", val);
- val = ReadHDLCPnP(cs, 1, HDLC_STATUS);
- debugl1(cs, "HDLC 2 STA %x", val);
- val = ReadHDLCPnP(cs, 1, HDLC_STATUS + 1);
- debugl1(cs, "HDLC 2 RML %x", val);
- val = ReadHDLCPnP(cs, 1, HDLC_STATUS + 2);
- debugl1(cs, "HDLC 2 MODE %x", val);
- val = ReadHDLCPnP(cs, 1, HDLC_STATUS + 3);
- debugl1(cs, "HDLC 2 VIN %x", val);
- }
-}
-#endif /* 0 */
-
-static void
-inithdlc(struct IsdnCardState *cs)
-{
- cs->bcs[0].BC_SetStack = setstack_hdlc;
- cs->bcs[1].BC_SetStack = setstack_hdlc;
- cs->bcs[0].BC_Close = close_hdlcstate;
- cs->bcs[1].BC_Close = close_hdlcstate;
- modehdlc(cs->bcs, -1, 0);
- modehdlc(cs->bcs + 1, -1, 1);
-}
-
-static irqreturn_t
-avm_pcipnp_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_long flags;
- u_char val;
- u_char sval;
-
- spin_lock_irqsave(&cs->lock, flags);
- sval = inb(cs->hw.avm.cfg_reg + 2);
- if ((sval & AVM_STATUS0_IRQ_MASK) == AVM_STATUS0_IRQ_MASK) {
- /* possible a shared IRQ reqest */
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE;
- }
- if (!(sval & AVM_STATUS0_IRQ_ISAC)) {
- val = ReadISAC(cs, ISAC_ISTA);
- isac_interrupt(cs, val);
- }
- if (!(sval & AVM_STATUS0_IRQ_HDLC)) {
- HDLC_irq_main(cs);
- }
- WriteISAC(cs, ISAC_MASK, 0xFF);
- WriteISAC(cs, ISAC_MASK, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-reset_avmpcipnp(struct IsdnCardState *cs)
-{
- printk(KERN_INFO "AVM PCI/PnP: reset\n");
- outb(AVM_STATUS0_RESET | AVM_STATUS0_DIS_TIMER, cs->hw.avm.cfg_reg + 2);
- mdelay(10);
- outb(AVM_STATUS0_DIS_TIMER | AVM_STATUS0_RES_TIMER | AVM_STATUS0_ENA_IRQ, cs->hw.avm.cfg_reg + 2);
- outb(AVM_STATUS1_ENA_IOM | cs->irq, cs->hw.avm.cfg_reg + 3);
- mdelay(10);
- printk(KERN_INFO "AVM PCI/PnP: S1 %x\n", inb(cs->hw.avm.cfg_reg + 3));
-}
-
-static int
-AVM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_avmpcipnp(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- outb(0, cs->hw.avm.cfg_reg + 2);
- release_region(cs->hw.avm.cfg_reg, 32);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- reset_avmpcipnp(cs);
- clear_pending_isac_ints(cs);
- initisac(cs);
- inithdlc(cs);
- outb(AVM_STATUS0_DIS_TIMER | AVM_STATUS0_RES_TIMER,
- cs->hw.avm.cfg_reg + 2);
- WriteISAC(cs, ISAC_MASK, 0);
- outb(AVM_STATUS0_DIS_TIMER | AVM_STATUS0_RES_TIMER |
- AVM_STATUS0_ENA_IRQ, cs->hw.avm.cfg_reg + 2);
- /* RESET Receiver and Transmitter */
- WriteISAC(cs, ISAC_CMDR, 0x41);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-static int avm_setup_rest(struct IsdnCardState *cs)
-{
- u_int val, ver;
-
- cs->hw.avm.isac = cs->hw.avm.cfg_reg + 0x10;
- if (!request_region(cs->hw.avm.cfg_reg, 32,
- (cs->subtyp == AVM_FRITZ_PCI) ? "avm PCI" : "avm PnP")) {
- printk(KERN_WARNING
- "HiSax: Fritz!PCI/PNP config port %x-%x already in use\n",
- cs->hw.avm.cfg_reg,
- cs->hw.avm.cfg_reg + 31);
- return (0);
- }
- switch (cs->subtyp) {
- case AVM_FRITZ_PCI:
- val = inl(cs->hw.avm.cfg_reg);
- printk(KERN_INFO "AVM PCI: stat %#x\n", val);
- printk(KERN_INFO "AVM PCI: Class %X Rev %d\n",
- val & 0xff, (val >> 8) & 0xff);
- cs->BC_Read_Reg = &ReadHDLC_s;
- cs->BC_Write_Reg = &WriteHDLC_s;
- break;
- case AVM_FRITZ_PNP:
- val = inb(cs->hw.avm.cfg_reg);
- ver = inb(cs->hw.avm.cfg_reg + 1);
- printk(KERN_INFO "AVM PnP: Class %X Rev %d\n", val, ver);
- cs->BC_Read_Reg = &ReadHDLCPnP;
- cs->BC_Write_Reg = &WriteHDLCPnP;
- break;
- default:
- printk(KERN_WARNING "AVM unknown subtype %d\n", cs->subtyp);
- return (0);
- }
- printk(KERN_INFO "HiSax: %s config irq:%d base:0x%X\n",
- (cs->subtyp == AVM_FRITZ_PCI) ? "AVM Fritz!PCI" : "AVM Fritz!PnP",
- cs->irq, cs->hw.avm.cfg_reg);
-
- setup_isac(cs);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Send_Data = &hdlc_fill_fifo;
- cs->cardmsg = &AVM_card_msg;
- cs->irq_func = &avm_pcipnp_interrupt;
- cs->writeisac(cs, ISAC_MASK, 0xFF);
- ISACVersion(cs, (cs->subtyp == AVM_FRITZ_PCI) ? "AVM PCI:" : "AVM PnP:");
- return (1);
-}
-
-#ifndef __ISAPNP__
-
-static int avm_pnp_setup(struct IsdnCardState *cs)
-{
- return (1); /* no-op: success */
-}
-
-#else
-
-static struct pnp_card *pnp_avm_c = NULL;
-
-static int avm_pnp_setup(struct IsdnCardState *cs)
-{
- struct pnp_dev *pnp_avm_d = NULL;
-
- if (!isapnp_present())
- return (1); /* no-op: success */
-
- if ((pnp_avm_c = pnp_find_card(
- ISAPNP_VENDOR('A', 'V', 'M'),
- ISAPNP_FUNCTION(0x0900), pnp_avm_c))) {
- if ((pnp_avm_d = pnp_find_dev(pnp_avm_c,
- ISAPNP_VENDOR('A', 'V', 'M'),
- ISAPNP_FUNCTION(0x0900), pnp_avm_d))) {
- int err;
-
- pnp_disable_dev(pnp_avm_d);
- err = pnp_activate_dev(pnp_avm_d);
- if (err < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __func__, err);
- return (0);
- }
- cs->hw.avm.cfg_reg =
- pnp_port_start(pnp_avm_d, 0);
- cs->irq = pnp_irq(pnp_avm_d, 0);
- if (cs->irq == -1) {
- printk(KERN_ERR "FritzPnP:No IRQ\n");
- return (0);
- }
- if (!cs->hw.avm.cfg_reg) {
- printk(KERN_ERR "FritzPnP:No IO address\n");
- return (0);
- }
- cs->subtyp = AVM_FRITZ_PNP;
-
- return (2); /* goto 'ready' label */
- }
- }
-
- return (1);
-}
-
-#endif /* __ISAPNP__ */
-
-#ifndef CONFIG_PCI
-
-static int avm_pci_setup(struct IsdnCardState *cs)
-{
- return (1); /* no-op: success */
-}
-
-#else
-
-static struct pci_dev *dev_avm = NULL;
-
-static int avm_pci_setup(struct IsdnCardState *cs)
-{
- if ((dev_avm = hisax_find_pci_device(PCI_VENDOR_ID_AVM,
- PCI_DEVICE_ID_AVM_A1, dev_avm))) {
-
- if (pci_enable_device(dev_avm))
- return (0);
-
- cs->irq = dev_avm->irq;
- if (!cs->irq) {
- printk(KERN_ERR "FritzPCI: No IRQ for PCI card found\n");
- return (0);
- }
-
- cs->hw.avm.cfg_reg = pci_resource_start(dev_avm, 1);
- if (!cs->hw.avm.cfg_reg) {
- printk(KERN_ERR "FritzPCI: No IO-Adr for PCI card found\n");
- return (0);
- }
-
- cs->subtyp = AVM_FRITZ_PCI;
- } else {
- printk(KERN_WARNING "FritzPCI: No PCI card found\n");
- return (0);
- }
-
- cs->irq_flags |= IRQF_SHARED;
-
- return (1);
-}
-
-#endif /* CONFIG_PCI */
-
-int setup_avm_pcipnp(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
- int rc;
-
- strcpy(tmp, avm_pci_rev);
- printk(KERN_INFO "HiSax: AVM PCI driver Rev. %s\n", HiSax_getrev(tmp));
-
- if (cs->typ != ISDN_CTYPE_FRITZPCI)
- return (0);
-
- if (card->para[1]) {
- /* old manual method */
- cs->hw.avm.cfg_reg = card->para[1];
- cs->irq = card->para[0];
- cs->subtyp = AVM_FRITZ_PNP;
- goto ready;
- }
-
- rc = avm_pnp_setup(cs);
- if (rc < 1)
- return (0);
- if (rc == 2)
- goto ready;
-
- rc = avm_pci_setup(cs);
- if (rc < 1)
- return (0);
-
-ready:
- return avm_setup_rest(cs);
-}
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
deleted file mode 100644
index baad94ec1f4a..000000000000
--- a/drivers/isdn/hisax/avma1_cs.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * PCMCIA client driver for AVM A1 / Fritz!PCMCIA
- *
- * Author Carsten Paeth
- * Copyright 1998-2001 by Carsten Paeth <calle@calle.in-berlin.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <asm/io.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-#include "hisax_cfg.h"
-
-MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for AVM A1/Fritz!PCMCIA cards");
-MODULE_AUTHOR("Carsten Paeth");
-MODULE_LICENSE("GPL");
-
-
-/*====================================================================*/
-
-/* Parameters that can be set with 'insmod' */
-
-static int isdnprot = 2;
-
-module_param(isdnprot, int, 0);
-
-/*====================================================================*/
-
-static int avma1cs_config(struct pcmcia_device *link);
-static void avma1cs_release(struct pcmcia_device *link);
-static void avma1cs_detach(struct pcmcia_device *p_dev);
-
-static int avma1cs_probe(struct pcmcia_device *p_dev)
-{
- dev_dbg(&p_dev->dev, "avma1cs_attach()\n");
-
- /* General socket configuration */
- p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
- p_dev->config_index = 1;
- p_dev->config_regs = PRESENT_OPTION;
-
- return avma1cs_config(p_dev);
-} /* avma1cs_attach */
-
-static void avma1cs_detach(struct pcmcia_device *link)
-{
- dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link);
- avma1cs_release(link);
- kfree(link->priv);
-} /* avma1cs_detach */
-
-static int avma1cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
-{
- p_dev->resource[0]->end = 16;
- p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
- p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
- p_dev->io_lines = 5;
-
- return pcmcia_request_io(p_dev);
-}
-
-
-static int avma1cs_config(struct pcmcia_device *link)
-{
- int i = -1;
- char devname[128];
- IsdnCard_t icard;
- int busy = 0;
-
- dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link);
-
- devname[0] = 0;
- if (link->prod_id[1])
- strlcpy(devname, link->prod_id[1], sizeof(devname));
-
- if (pcmcia_loop_config(link, avma1cs_configcheck, NULL))
- return -ENODEV;
-
- do {
- /*
- * allocate an interrupt line
- */
- if (!link->irq) {
- /* undo */
- pcmcia_disable_device(link);
- break;
- }
-
- /*
- * configure the PCMCIA socket
- */
- i = pcmcia_enable_device(link);
- if (i != 0) {
- pcmcia_disable_device(link);
- break;
- }
-
- } while (0);
-
- /* If any step failed, release any partially configured state */
- if (i != 0) {
- avma1cs_release(link);
- return -ENODEV;
- }
-
- icard.para[0] = link->irq;
- icard.para[1] = link->resource[0]->start;
- icard.protocol = isdnprot;
- icard.typ = ISDN_CTYPE_A1_PCMCIA;
-
- i = hisax_init_pcmcia(link, &busy, &icard);
- if (i < 0) {
- printk(KERN_ERR "avma1_cs: failed to initialize AVM A1 "
- "PCMCIA %d at i/o %#x\n", i,
- (unsigned int) link->resource[0]->start);
- avma1cs_release(link);
- return -ENODEV;
- }
- link->priv = (void *) (unsigned long) i;
-
- return 0;
-} /* avma1cs_config */
-
-static void avma1cs_release(struct pcmcia_device *link)
-{
- unsigned long minor = (unsigned long) link->priv;
-
- dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link);
-
- /* now unregister function with hisax */
- HiSax_closecard(minor);
-
- pcmcia_disable_device(link);
-} /* avma1cs_release */
-
-static const struct pcmcia_device_id avma1cs_ids[] = {
- PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN A", 0x95d42008, 0xadc9d4bb),
- PCMCIA_DEVICE_PROD_ID12("ISDN", "CARD", 0x8d9761c8, 0x01c5aa7b),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, avma1cs_ids);
-
-static struct pcmcia_driver avma1cs_driver = {
- .owner = THIS_MODULE,
- .name = "avma1_cs",
- .probe = avma1cs_probe,
- .remove = avma1cs_detach,
- .id_table = avma1cs_ids,
-};
-module_pcmcia_driver(avma1cs_driver);
diff --git a/drivers/isdn/hisax/bkm_a4t.c b/drivers/isdn/hisax/bkm_a4t.c
deleted file mode 100644
index c360164bde1b..000000000000
--- a/drivers/isdn/hisax/bkm_a4t.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/* $Id: bkm_a4t.c,v 1.22.2.4 2004/01/14 16:04:48 keil Exp $
- *
- * low level stuff for T-Berkom A4T
- *
- * Author Roland Klabunde
- * Copyright by Roland Klabunde <R.Klabunde@Berkom.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "jade.h"
-#include "isdnl1.h"
-#include <linux/pci.h>
-#include "bkm_ax.h"
-
-static const char *bkm_a4t_revision = "$Revision: 1.22.2.4 $";
-
-
-static inline u_char
-readreg(unsigned int ale, unsigned long adr, u_char off)
-{
- register u_int ret;
- unsigned int *po = (unsigned int *) adr; /* Postoffice */
-
- *po = (GCS_2 | PO_WRITE | off);
- __WAITI20__(po);
- *po = (ale | PO_READ);
- __WAITI20__(po);
- ret = *po;
- return ((unsigned char) ret);
-}
-
-
-static inline void
-readfifo(unsigned int ale, unsigned long adr, u_char off, u_char *data, int size)
-{
- int i;
- for (i = 0; i < size; i++)
- *data++ = readreg(ale, adr, off);
-}
-
-
-static inline void
-writereg(unsigned int ale, unsigned long adr, u_char off, u_char data)
-{
- unsigned int *po = (unsigned int *) adr; /* Postoffice */
- *po = (GCS_2 | PO_WRITE | off);
- __WAITI20__(po);
- *po = (ale | PO_WRITE | data);
- __WAITI20__(po);
-}
-
-
-static inline void
-writefifo(unsigned int ale, unsigned long adr, u_char off, u_char *data, int size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- writereg(ale, adr, off, *data++);
-}
-
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.ax.isac_ale, cs->hw.ax.isac_adr, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.ax.isac_ale, cs->hw.ax.isac_adr, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.ax.isac_ale, cs->hw.ax.isac_adr, 0, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.ax.isac_ale, cs->hw.ax.isac_adr, 0, data, size);
-}
-
-static u_char
-ReadJADE(struct IsdnCardState *cs, int jade, u_char offset)
-{
- return (readreg(cs->hw.ax.jade_ale, cs->hw.ax.jade_adr, offset + (jade == -1 ? 0 : (jade ? 0xC0 : 0x80))));
-}
-
-static void
-WriteJADE(struct IsdnCardState *cs, int jade, u_char offset, u_char value)
-{
- writereg(cs->hw.ax.jade_ale, cs->hw.ax.jade_adr, offset + (jade == -1 ? 0 : (jade ? 0xC0 : 0x80)), value);
-}
-
-/*
- * fast interrupt JADE stuff goes here
- */
-
-#define READJADE(cs, nr, reg) readreg(cs->hw.ax.jade_ale, \
- cs->hw.ax.jade_adr, reg + (nr == -1 ? 0 : (nr ? 0xC0 : 0x80)))
-#define WRITEJADE(cs, nr, reg, data) writereg(cs->hw.ax.jade_ale, \
- cs->hw.ax.jade_adr, reg + (nr == -1 ? 0 : (nr ? 0xC0 : 0x80)), data)
-
-#define READJADEFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.ax.jade_ale, \
- cs->hw.ax.jade_adr, (nr == -1 ? 0 : (nr ? 0xC0 : 0x80)), ptr, cnt)
-#define WRITEJADEFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.ax.jade_ale, \
- cs->hw.ax.jade_adr, (nr == -1 ? 0 : (nr ? 0xC0 : 0x80)), ptr, cnt)
-
-#include "jade_irq.c"
-
-static irqreturn_t
-bkm_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val = 0;
- u_long flags;
- I20_REGISTER_FILE *pI20_Regs;
-
- spin_lock_irqsave(&cs->lock, flags);
- pI20_Regs = (I20_REGISTER_FILE *) (cs->hw.ax.base);
-
- /* ISDN interrupt pending? */
- if (pI20_Regs->i20IntStatus & intISDN) {
- /* Reset the ISDN interrupt */
- pI20_Regs->i20IntStatus = intISDN;
- /* Disable ISDN interrupt */
- pI20_Regs->i20IntCtrl &= ~intISDN;
- /* Channel A first */
- val = readreg(cs->hw.ax.jade_ale, cs->hw.ax.jade_adr, jade_HDLC_ISR + 0x80);
- if (val) {
- jade_int_main(cs, val, 0);
- }
- /* Channel B */
- val = readreg(cs->hw.ax.jade_ale, cs->hw.ax.jade_adr, jade_HDLC_ISR + 0xC0);
- if (val) {
- jade_int_main(cs, val, 1);
- }
- /* D-Channel */
- val = readreg(cs->hw.ax.isac_ale, cs->hw.ax.isac_adr, ISAC_ISTA);
- if (val) {
- isac_interrupt(cs, val);
- }
- /* Reenable ISDN interrupt */
- pI20_Regs->i20IntCtrl |= intISDN;
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
- } else {
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE;
- }
-}
-
-static void
-release_io_bkm(struct IsdnCardState *cs)
-{
- if (cs->hw.ax.base) {
- iounmap((void *) cs->hw.ax.base);
- cs->hw.ax.base = 0;
- }
-}
-
-static void
-enable_bkm_int(struct IsdnCardState *cs, unsigned bEnable)
-{
- if (cs->typ == ISDN_CTYPE_BKM_A4T) {
- I20_REGISTER_FILE *pI20_Regs = (I20_REGISTER_FILE *) (cs->hw.ax.base);
- if (bEnable)
- pI20_Regs->i20IntCtrl |= (intISDN | intPCI);
- else
- /* CAUTION: This disables the video capture driver too */
- pI20_Regs->i20IntCtrl &= ~(intISDN | intPCI);
- }
-}
-
-static void
-reset_bkm(struct IsdnCardState *cs)
-{
- if (cs->typ == ISDN_CTYPE_BKM_A4T) {
- I20_REGISTER_FILE *pI20_Regs = (I20_REGISTER_FILE *) (cs->hw.ax.base);
- /* Issue the I20 soft reset */
- pI20_Regs->i20SysControl = 0xFF; /* all in */
- mdelay(10);
- /* Remove the soft reset */
- pI20_Regs->i20SysControl = sysRESET | 0xFF;
- mdelay(10);
- /* Set our configuration */
- pI20_Regs->i20SysControl = sysRESET | sysCFG;
- /* Issue ISDN reset */
- pI20_Regs->i20GuestControl = guestWAIT_CFG |
- g_A4T_JADE_RES |
- g_A4T_ISAR_RES |
- g_A4T_ISAC_RES |
- g_A4T_JADE_BOOTR |
- g_A4T_ISAR_BOOTR;
- mdelay(10);
-
- /* Remove RESET state from ISDN */
- pI20_Regs->i20GuestControl &= ~(g_A4T_ISAC_RES |
- g_A4T_JADE_RES |
- g_A4T_ISAR_RES);
- mdelay(10);
- }
-}
-
-static int
-BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- /* Disable ints */
- spin_lock_irqsave(&cs->lock, flags);
- enable_bkm_int(cs, 0);
- reset_bkm(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- /* Sanity */
- spin_lock_irqsave(&cs->lock, flags);
- enable_bkm_int(cs, 0);
- reset_bkm(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- release_io_bkm(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- clear_pending_isac_ints(cs);
- clear_pending_jade_ints(cs);
- initisac(cs);
- initjade(cs);
- /* Enable ints */
- enable_bkm_int(cs, 1);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-static int a4t_pci_probe(struct pci_dev *dev_a4t, struct IsdnCardState *cs,
- u_int *found, u_int *pci_memaddr)
-{
- u16 sub_sys;
- u16 sub_vendor;
-
- sub_vendor = dev_a4t->subsystem_vendor;
- sub_sys = dev_a4t->subsystem_device;
- if ((sub_sys == PCI_DEVICE_ID_BERKOM_A4T) && (sub_vendor == PCI_VENDOR_ID_BERKOM)) {
- if (pci_enable_device(dev_a4t))
- return (0); /* end loop & function */
- *found = 1;
- *pci_memaddr = pci_resource_start(dev_a4t, 0);
- cs->irq = dev_a4t->irq;
- return (1); /* end loop */
- }
-
- return (-1); /* continue looping */
-}
-
-static int a4t_cs_init(struct IsdnCard *card, struct IsdnCardState *cs,
- u_int pci_memaddr)
-{
- I20_REGISTER_FILE *pI20_Regs;
-
- if (!cs->irq) { /* IRQ range check ?? */
- printk(KERN_WARNING "HiSax: Telekom A4T: No IRQ\n");
- return (0);
- }
- cs->hw.ax.base = (long) ioremap(pci_memaddr, 4096);
- /* Check suspecious address */
- pI20_Regs = (I20_REGISTER_FILE *) (cs->hw.ax.base);
- if ((pI20_Regs->i20IntStatus & 0x8EFFFFFF) != 0) {
- printk(KERN_WARNING "HiSax: Telekom A4T address "
- "%lx-%lx suspicious\n",
- cs->hw.ax.base, cs->hw.ax.base + 4096);
- iounmap((void *) cs->hw.ax.base);
- cs->hw.ax.base = 0;
- return (0);
- }
- cs->hw.ax.isac_adr = cs->hw.ax.base + PO_OFFSET;
- cs->hw.ax.jade_adr = cs->hw.ax.base + PO_OFFSET;
- cs->hw.ax.isac_ale = GCS_1;
- cs->hw.ax.jade_ale = GCS_3;
-
- printk(KERN_INFO "HiSax: Telekom A4T: Card configured at "
- "0x%lX IRQ %d\n",
- cs->hw.ax.base, cs->irq);
-
- setup_isac(cs);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadJADE;
- cs->BC_Write_Reg = &WriteJADE;
- cs->BC_Send_Data = &jade_fill_fifo;
- cs->cardmsg = &BKM_card_msg;
- cs->irq_func = &bkm_interrupt;
- cs->irq_flags |= IRQF_SHARED;
- ISACVersion(cs, "Telekom A4T:");
- /* Jade version */
- JadeVersion(cs, "Telekom A4T:");
-
- return (1);
-}
-
-static struct pci_dev *dev_a4t = NULL;
-
-int setup_bkm_a4t(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
- u_int pci_memaddr = 0, found = 0;
- int ret;
-
- strcpy(tmp, bkm_a4t_revision);
- printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ == ISDN_CTYPE_BKM_A4T) {
- cs->subtyp = BKM_A4T;
- } else
- return (0);
-
- while ((dev_a4t = hisax_find_pci_device(PCI_VENDOR_ID_ZORAN,
- PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
- ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr);
- if (!ret)
- return (0);
- if (ret > 0)
- break;
- }
- if (!found) {
- printk(KERN_WARNING "HiSax: Telekom A4T: Card not found\n");
- return (0);
- }
- if (!pci_memaddr) {
- printk(KERN_WARNING "HiSax: Telekom A4T: "
- "No Memory base address\n");
- return (0);
- }
-
- return a4t_cs_init(card, cs, pci_memaddr);
-}
diff --git a/drivers/isdn/hisax/bkm_a8.c b/drivers/isdn/hisax/bkm_a8.c
deleted file mode 100644
index dd663ea57ec6..000000000000
--- a/drivers/isdn/hisax/bkm_a8.c
+++ /dev/null
@@ -1,433 +0,0 @@
-/* $Id: bkm_a8.c,v 1.22.2.4 2004/01/15 14:02:34 keil Exp $
- *
- * low level stuff for Scitel Quadro (4*S0, passive)
- *
- * Author Roland Klabunde
- * Copyright by Roland Klabunde <R.Klabunde@Berkom.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "ipac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-#include <linux/pci.h>
-#include "bkm_ax.h"
-
-#define ATTEMPT_PCI_REMAPPING /* Required for PLX rev 1 */
-
-static const char sct_quadro_revision[] = "$Revision: 1.22.2.4 $";
-
-static const char *sct_quadro_subtypes[] =
-{
- "",
- "#1",
- "#2",
- "#3",
- "#4"
-};
-
-
-#define wordout(addr, val) outw(val, addr)
-#define wordin(addr) inw(addr)
-
-static inline u_char
-readreg(unsigned int ale, unsigned int adr, u_char off)
-{
- register u_char ret;
- wordout(ale, off);
- ret = wordin(adr) & 0xFF;
- return (ret);
-}
-
-static inline void
-readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- int i;
- wordout(ale, off);
- for (i = 0; i < size; i++)
- data[i] = wordin(adr) & 0xFF;
-}
-
-
-static inline void
-writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
-{
- wordout(ale, off);
- wordout(adr, data);
-}
-
-static inline void
-writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- int i;
- wordout(ale, off);
- for (i = 0; i < size; i++)
- wordout(adr, data[i]);
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.ax.base, cs->hw.ax.data_adr, offset | 0x80));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.ax.base, cs->hw.ax.data_adr, offset | 0x80, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.ax.base, cs->hw.ax.data_adr, 0x80, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.ax.base, cs->hw.ax.data_adr, 0x80, data, size);
-}
-
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readreg(cs->hw.ax.base, cs->hw.ax.data_adr, offset + (hscx ? 0x40 : 0)));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writereg(cs->hw.ax.base, cs->hw.ax.data_adr, offset + (hscx ? 0x40 : 0), value);
-}
-
-/* Set the specific ipac to active */
-static void
-set_ipac_active(struct IsdnCardState *cs, u_int active)
-{
- /* set irq mask */
- writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK,
- active ? 0xc0 : 0xff);
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.ax.base, \
- cs->hw.ax.data_adr, reg + (nr ? 0x40 : 0))
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.ax.base, \
- cs->hw.ax.data_adr, reg + (nr ? 0x40 : 0), data)
-#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.ax.base, \
- cs->hw.ax.data_adr, (nr ? 0x40 : 0), ptr, cnt)
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.ax.base, \
- cs->hw.ax.data_adr, (nr ? 0x40 : 0), ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-bkm_interrupt_ipac(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char ista, val, icnt = 5;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- ista = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ISTA);
- if (!(ista & 0x3f)) { /* not this IPAC */
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE;
- }
-Start_IPAC:
- if (cs->debug & L1_DEB_IPAC)
- debugl1(cs, "IPAC ISTA %02X", ista);
- if (ista & 0x0f) {
- val = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, HSCX_ISTA + 0x40);
- if (ista & 0x01)
- val |= 0x01;
- if (ista & 0x04)
- val |= 0x02;
- if (ista & 0x08)
- val |= 0x04;
- if (val) {
- hscx_int_main(cs, val);
- }
- }
- if (ista & 0x20) {
- val = 0xfe & readreg(cs->hw.ax.base, cs->hw.ax.data_adr, ISAC_ISTA | 0x80);
- if (val) {
- isac_interrupt(cs, val);
- }
- }
- if (ista & 0x10) {
- val = 0x01;
- isac_interrupt(cs, val);
- }
- ista = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ISTA);
- if ((ista & 0x3f) && icnt) {
- icnt--;
- goto Start_IPAC;
- }
- if (!icnt)
- printk(KERN_WARNING "HiSax: Scitel Quadro (%s) IRQ LOOP\n",
- sct_quadro_subtypes[cs->subtyp]);
- writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK, 0xFF);
- writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK, 0xC0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_sct_quadro(struct IsdnCardState *cs)
-{
- release_region(cs->hw.ax.base & 0xffffffc0, 128);
- if (cs->subtyp == SCT_1)
- release_region(cs->hw.ax.plx_adr, 64);
-}
-
-static void
-enable_bkm_int(struct IsdnCardState *cs, unsigned bEnable)
-{
- if (cs->typ == ISDN_CTYPE_SCT_QUADRO) {
- if (bEnable)
- wordout(cs->hw.ax.plx_adr + 0x4C, (wordin(cs->hw.ax.plx_adr + 0x4C) | 0x41));
- else
- wordout(cs->hw.ax.plx_adr + 0x4C, (wordin(cs->hw.ax.plx_adr + 0x4C) & ~0x41));
- }
-}
-
-static void
-reset_bkm(struct IsdnCardState *cs)
-{
- if (cs->subtyp == SCT_1) {
- wordout(cs->hw.ax.plx_adr + 0x50, (wordin(cs->hw.ax.plx_adr + 0x50) & ~4));
- mdelay(10);
- /* Remove the soft reset */
- wordout(cs->hw.ax.plx_adr + 0x50, (wordin(cs->hw.ax.plx_adr + 0x50) | 4));
- mdelay(10);
- }
-}
-
-static int
-BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- /* Disable ints */
- set_ipac_active(cs, 0);
- enable_bkm_int(cs, 0);
- reset_bkm(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- /* Sanity */
- spin_lock_irqsave(&cs->lock, flags);
- set_ipac_active(cs, 0);
- enable_bkm_int(cs, 0);
- spin_unlock_irqrestore(&cs->lock, flags);
- release_io_sct_quadro(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- cs->debug |= L1_DEB_IPAC;
- set_ipac_active(cs, 1);
- inithscxisac(cs, 3);
- /* Enable ints */
- enable_bkm_int(cs, 1);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-static int sct_alloc_io(u_int adr, u_int len)
-{
- if (!request_region(adr, len, "scitel")) {
- printk(KERN_WARNING
- "HiSax: Scitel port %#x-%#x already in use\n",
- adr, adr + len);
- return (1);
- }
- return (0);
-}
-
-static struct pci_dev *dev_a8 = NULL;
-static u16 sub_vendor_id = 0;
-static u16 sub_sys_id = 0;
-static u_char pci_bus = 0;
-static u_char pci_device_fn = 0;
-static u_char pci_irq = 0;
-
-int setup_sct_quadro(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
- u_int found = 0;
- u_int pci_ioaddr1, pci_ioaddr2, pci_ioaddr3, pci_ioaddr4, pci_ioaddr5;
-
- strcpy(tmp, sct_quadro_revision);
- printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ == ISDN_CTYPE_SCT_QUADRO) {
- cs->subtyp = SCT_1; /* Preset */
- } else
- return (0);
-
- /* Identify subtype by para[0] */
- if (card->para[0] >= SCT_1 && card->para[0] <= SCT_4)
- cs->subtyp = card->para[0];
- else {
- printk(KERN_WARNING "HiSax: Scitel Quadro: Invalid "
- "subcontroller in configuration, default to 1\n");
- return (0);
- }
- if ((cs->subtyp != SCT_1) && ((sub_sys_id != PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO) ||
- (sub_vendor_id != PCI_VENDOR_ID_BERKOM)))
- return (0);
- if (cs->subtyp == SCT_1) {
- while ((dev_a8 = hisax_find_pci_device(PCI_VENDOR_ID_PLX,
- PCI_DEVICE_ID_PLX_9050, dev_a8))) {
-
- sub_vendor_id = dev_a8->subsystem_vendor;
- sub_sys_id = dev_a8->subsystem_device;
- if ((sub_sys_id == PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO) &&
- (sub_vendor_id == PCI_VENDOR_ID_BERKOM)) {
- if (pci_enable_device(dev_a8))
- return (0);
- pci_ioaddr1 = pci_resource_start(dev_a8, 1);
- pci_irq = dev_a8->irq;
- pci_bus = dev_a8->bus->number;
- pci_device_fn = dev_a8->devfn;
- found = 1;
- break;
- }
- }
- if (!found) {
- printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
- "Card not found\n",
- sct_quadro_subtypes[cs->subtyp]);
- return (0);
- }
-#ifdef ATTEMPT_PCI_REMAPPING
-/* HACK: PLX revision 1 bug: PLX address bit 7 must not be set */
- if ((pci_ioaddr1 & 0x80) && (dev_a8->revision == 1)) {
- printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
- "PLX rev 1, remapping required!\n",
- sct_quadro_subtypes[cs->subtyp]);
- /* Restart PCI negotiation */
- pci_write_config_dword(dev_a8, PCI_BASE_ADDRESS_1, (u_int)-1);
- /* Move up by 0x80 byte */
- pci_ioaddr1 += 0x80;
- pci_ioaddr1 &= PCI_BASE_ADDRESS_IO_MASK;
- pci_write_config_dword(dev_a8, PCI_BASE_ADDRESS_1, pci_ioaddr1);
- dev_a8->resource[1].start = pci_ioaddr1;
- }
-#endif /* End HACK */
- }
- if (!pci_irq) { /* IRQ range check ?? */
- printk(KERN_WARNING "HiSax: Scitel Quadro (%s): No IRQ\n",
- sct_quadro_subtypes[cs->subtyp]);
- return (0);
- }
- pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_1, &pci_ioaddr1);
- pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_2, &pci_ioaddr2);
- pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_3, &pci_ioaddr3);
- pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_4, &pci_ioaddr4);
- pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_5, &pci_ioaddr5);
- if (!pci_ioaddr1 || !pci_ioaddr2 || !pci_ioaddr3 || !pci_ioaddr4 || !pci_ioaddr5) {
- printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
- "No IO base address(es)\n",
- sct_quadro_subtypes[cs->subtyp]);
- return (0);
- }
- pci_ioaddr1 &= PCI_BASE_ADDRESS_IO_MASK;
- pci_ioaddr2 &= PCI_BASE_ADDRESS_IO_MASK;
- pci_ioaddr3 &= PCI_BASE_ADDRESS_IO_MASK;
- pci_ioaddr4 &= PCI_BASE_ADDRESS_IO_MASK;
- pci_ioaddr5 &= PCI_BASE_ADDRESS_IO_MASK;
- /* Take over */
- cs->irq = pci_irq;
- cs->irq_flags |= IRQF_SHARED;
- /* pci_ioaddr1 is unique to all subdevices */
- /* pci_ioaddr2 is for the fourth subdevice only */
- /* pci_ioaddr3 is for the third subdevice only */
- /* pci_ioaddr4 is for the second subdevice only */
- /* pci_ioaddr5 is for the first subdevice only */
- cs->hw.ax.plx_adr = pci_ioaddr1;
- /* Enter all ipac_base addresses */
- switch (cs->subtyp) {
- case 1:
- cs->hw.ax.base = pci_ioaddr5 + 0x00;
- if (sct_alloc_io(pci_ioaddr1, 128))
- return (0);
- if (sct_alloc_io(pci_ioaddr5, 64))
- return (0);
- /* disable all IPAC */
- writereg(pci_ioaddr5, pci_ioaddr5 + 4,
- IPAC_MASK, 0xFF);
- writereg(pci_ioaddr4 + 0x08, pci_ioaddr4 + 0x0c,
- IPAC_MASK, 0xFF);
- writereg(pci_ioaddr3 + 0x10, pci_ioaddr3 + 0x14,
- IPAC_MASK, 0xFF);
- writereg(pci_ioaddr2 + 0x20, pci_ioaddr2 + 0x24,
- IPAC_MASK, 0xFF);
- break;
- case 2:
- cs->hw.ax.base = pci_ioaddr4 + 0x08;
- if (sct_alloc_io(pci_ioaddr4, 64))
- return (0);
- break;
- case 3:
- cs->hw.ax.base = pci_ioaddr3 + 0x10;
- if (sct_alloc_io(pci_ioaddr3, 64))
- return (0);
- break;
- case 4:
- cs->hw.ax.base = pci_ioaddr2 + 0x20;
- if (sct_alloc_io(pci_ioaddr2, 64))
- return (0);
- break;
- }
- /* For isac and hscx data path */
- cs->hw.ax.data_adr = cs->hw.ax.base + 4;
-
- printk(KERN_INFO "HiSax: Scitel Quadro (%s) configured at "
- "0x%.4lX, 0x%.4lX, 0x%.4lX and IRQ %d\n",
- sct_quadro_subtypes[cs->subtyp],
- cs->hw.ax.plx_adr,
- cs->hw.ax.base,
- cs->hw.ax.data_adr,
- cs->irq);
-
- test_and_set_bit(HW_IPAC, &cs->HW_Flags);
-
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
-
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &BKM_card_msg;
- cs->irq_func = &bkm_interrupt_ipac;
-
- printk(KERN_INFO "HiSax: Scitel Quadro (%s): IPAC Version %d\n",
- sct_quadro_subtypes[cs->subtyp],
- readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ID));
- return (1);
-}
diff --git a/drivers/isdn/hisax/bkm_ax.h b/drivers/isdn/hisax/bkm_ax.h
deleted file mode 100644
index 27ff8a88679b..000000000000
--- a/drivers/isdn/hisax/bkm_ax.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/* $Id: bkm_ax.h,v 1.5.6.3 2001/09/23 22:24:46 kai Exp $
- *
- * low level decls for T-Berkom cards A4T and Scitel Quadro (4*S0, passive)
- *
- * Author Roland Klabunde
- * Copyright by Roland Klabunde <R.Klabunde@Berkom.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef __BKM_AX_H__
-#define __BKM_AX_H__
-
-/* Supported boards (subtypes) */
-#define SCT_1 1
-#define SCT_2 2
-#define SCT_3 3
-#define SCT_4 4
-#define BKM_A4T 5
-
-#define PLX_ADDR_PLX 0x14 /* Addr PLX configuration */
-#define PLX_ADDR_ISAC 0x18 /* Addr ISAC */
-#define PLX_ADDR_HSCX 0x1C /* Addr HSCX */
-#define PLX_ADDR_ALE 0x20 /* Addr ALE */
-#define PLX_ADDR_ALEPLUS 0x24 /* Next Addr behind ALE */
-
-#define PLX_SUBVEN 0x2C /* Offset SubVendor */
-#define PLX_SUBSYS 0x2E /* Offset SubSystem */
-
-
-/* Application specific registers I20 (Siemens SZB6120H) */
-typedef struct {
- /* Video front end horizontal configuration register */
- volatile u_int i20VFEHorzCfg; /* Offset 00 */
- /* Video front end vertical configuration register */
- volatile u_int i20VFEVertCfg; /* Offset 04 */
- /* Video front end scaler and pixel format register */
- volatile u_int i20VFEScaler; /* Offset 08 */
- /* Video display top register */
- volatile u_int i20VDispTop; /* Offset 0C */
- /* Video display bottom register */
- volatile u_int i20VDispBottom; /* Offset 10 */
- /* Video stride, status and frame grab register */
- volatile u_int i20VidFrameGrab;/* Offset 14 */
- /* Video display configuration register */
- volatile u_int i20VDispCfg; /* Offset 18 */
- /* Video masking map top */
- volatile u_int i20VMaskTop; /* Offset 1C */
- /* Video masking map bottom */
- volatile u_int i20VMaskBottom; /* Offset 20 */
- /* Overlay control register */
- volatile u_int i20OvlyControl; /* Offset 24 */
- /* System, PCI and general purpose pins control register */
- volatile u_int i20SysControl; /* Offset 28 */
-#define sysRESET 0x01000000 /* bit 24:Softreset (Low) */
- /* GPIO 4...0: Output fixed for our cfg! */
-#define sysCFG 0x000000E0 /* GPIO 7,6,5: Input */
- /* General purpose pins and guest bus control register */
- volatile u_int i20GuestControl;/* Offset 2C */
-#define guestWAIT_CFG 0x00005555 /* 4 PCI waits for all */
-#define guestISDN_INT_E 0x01000000 /* ISDN Int en (low) */
-#define guestVID_INT_E 0x02000000 /* Video interrupt en (low) */
-#define guestADI1_INT_R 0x04000000 /* ADI #1 int req (low) */
-#define guestADI2_INT_R 0x08000000 /* ADI #2 int req (low) */
-#define guestISDN_RES 0x10000000 /* ISDN reset bit (high) */
-#define guestADI1_INT_S 0x20000000 /* ADI #1 int pending (low) */
-#define guestADI2_INT_S 0x40000000 /* ADI #2 int pending (low) */
-#define guestISDN_INT_S 0x80000000 /* ISAC int pending (low) */
-
-#define g_A4T_JADE_RES 0x01000000 /* JADE Reset (High) */
-#define g_A4T_ISAR_RES 0x02000000 /* ISAR Reset (High) */
-#define g_A4T_ISAC_RES 0x04000000 /* ISAC Reset (High) */
-#define g_A4T_JADE_BOOTR 0x08000000 /* JADE enable boot SRAM (Low) NOT USED */
-#define g_A4T_ISAR_BOOTR 0x10000000 /* ISAR enable boot SRAM (Low) NOT USED */
-#define g_A4T_JADE_INT_S 0x20000000 /* JADE interrupt pnd (Low) */
-#define g_A4T_ISAR_INT_S 0x40000000 /* ISAR interrupt pnd (Low) */
-#define g_A4T_ISAC_INT_S 0x80000000 /* ISAC interrupt pnd (Low) */
-
- volatile u_int i20CodeSource; /* Offset 30 */
- volatile u_int i20CodeXferCtrl;/* Offset 34 */
- volatile u_int i20CodeMemPtr; /* Offset 38 */
-
- volatile u_int i20IntStatus; /* Offset 3C */
- volatile u_int i20IntCtrl; /* Offset 40 */
-#define intISDN 0x40000000 /* GIRQ1En (ISAC/ADI) (High) */
-#define intVID 0x20000000 /* GIRQ0En (VSYNC) (High) */
-#define intCOD 0x10000000 /* CodRepIrqEn (High) */
-#define intPCI 0x01000000 /* PCI IntA enable (High) */
-
- volatile u_int i20I2CCtrl; /* Offset 44 */
-} I20_REGISTER_FILE, *PI20_REGISTER_FILE;
-
-/*
- * Postoffice structure for A4T
- *
- */
-#define PO_OFFSET 0x00000200 /* Postoffice offset from base */
-
-#define GCS_0 0x00000000 /* Guest bus chip selects */
-#define GCS_1 0x00100000
-#define GCS_2 0x00200000
-#define GCS_3 0x00300000
-
-#define PO_READ 0x00000000 /* R/W from/to guest bus */
-#define PO_WRITE 0x00800000
-
-#define PO_PEND 0x02000000
-
-#define POSTOFFICE(postoffice) *(volatile unsigned int *)(postoffice)
-
-/* Wait unlimited (don't worry) */
-#define __WAITI20__(postoffice) \
- do { \
- while ((POSTOFFICE(postoffice) & PO_PEND)) ; \
- } while (0)
-
-#endif /* __BKM_AX_H__ */
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
deleted file mode 100644
index 9ee06328784c..000000000000
--- a/drivers/isdn/hisax/callc.c
+++ /dev/null
@@ -1,1792 +0,0 @@
-/* $Id: callc.c,v 2.59.2.4 2004/02/11 13:21:32 keil Exp $
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- * based on the teles driver from Jan den Ouden
- *
- * Thanks to Jan den Ouden
- * Fritz Elfert
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include "hisax.h"
-#include <linux/isdn/capicmd.h>
-
-const char *lli_revision = "$Revision: 2.59.2.4 $";
-
-extern struct IsdnCard cards[];
-
-static int init_b_st(struct Channel *chanp, int incoming);
-static void release_b_st(struct Channel *chanp);
-
-static struct Fsm callcfsm;
-static int chancount;
-
-/* experimental REJECT after ALERTING for CALLBACK to beat the 4s delay */
-#define ALERT_REJECT 0
-
-/* Value to delay the sending of the first B-channel packet after CONNECT
- * here is no value given by ITU, but experience shows that 300 ms will
- * work on many networks, if you or your other side is behind local exchanges
- * a greater value may be recommented. If the delay is to short the first paket
- * will be lost and autodetect on many comercial routers goes wrong !
- * You can adjust this value on runtime with
- * hisaxctrl <id> 2 <value>
- * value is in milliseconds
- */
-#define DEFAULT_B_DELAY 300
-
-/* Flags for remembering action done in lli */
-
-#define FLG_START_B 0
-
-/*
- * Find card with given driverId
- */
-static inline struct IsdnCardState *
-hisax_findcard(int driverid)
-{
- int i;
-
- for (i = 0; i < nrcards; i++)
- if (cards[i].cs)
- if (cards[i].cs->myid == driverid)
- return (cards[i].cs);
- return (struct IsdnCardState *) 0;
-}
-
-static __printf(3, 4) void
- link_debug(struct Channel *chanp, int direction, char *fmt, ...)
-{
- va_list args;
- char tmp[16];
-
- va_start(args, fmt);
- sprintf(tmp, "Ch%d %s ", chanp->chan,
- direction ? "LL->HL" : "HL->LL");
- VHiSax_putstatus(chanp->cs, tmp, fmt, args);
- va_end(args);
-}
-
-enum {
- ST_NULL, /* 0 inactive */
- ST_OUT_DIAL, /* 1 outgoing, SETUP send; awaiting confirm */
- ST_IN_WAIT_LL, /* 2 incoming call received; wait for LL confirm */
- ST_IN_ALERT_SENT, /* 3 incoming call received; ALERT send */
- ST_IN_WAIT_CONN_ACK, /* 4 incoming CONNECT send; awaiting CONN_ACK */
- ST_WAIT_BCONN, /* 5 CONNECT/CONN_ACK received, awaiting b-channel prot. estbl. */
- ST_ACTIVE, /* 6 active, b channel prot. established */
- ST_WAIT_BRELEASE, /* 7 call clear. (initiator), awaiting b channel prot. rel. */
- ST_WAIT_BREL_DISC, /* 8 call clear. (receiver), DISCONNECT req. received */
- ST_WAIT_DCOMMAND, /* 9 call clear. (receiver), awaiting DCHANNEL message */
- ST_WAIT_DRELEASE, /* 10 DISCONNECT sent, awaiting RELEASE */
- ST_WAIT_D_REL_CNF, /* 11 RELEASE sent, awaiting RELEASE confirm */
- ST_IN_PROCEED_SEND, /* 12 incoming call, proceeding send */
-};
-
-
-#define STATE_COUNT (ST_IN_PROCEED_SEND + 1)
-
-static char *strState[] =
-{
- "ST_NULL",
- "ST_OUT_DIAL",
- "ST_IN_WAIT_LL",
- "ST_IN_ALERT_SENT",
- "ST_IN_WAIT_CONN_ACK",
- "ST_WAIT_BCONN",
- "ST_ACTIVE",
- "ST_WAIT_BRELEASE",
- "ST_WAIT_BREL_DISC",
- "ST_WAIT_DCOMMAND",
- "ST_WAIT_DRELEASE",
- "ST_WAIT_D_REL_CNF",
- "ST_IN_PROCEED_SEND",
-};
-
-enum {
- EV_DIAL, /* 0 */
- EV_SETUP_CNF, /* 1 */
- EV_ACCEPTB, /* 2 */
- EV_DISCONNECT_IND, /* 3 */
- EV_RELEASE, /* 4 */
- EV_LEASED, /* 5 */
- EV_LEASED_REL, /* 6 */
- EV_SETUP_IND, /* 7 */
- EV_ACCEPTD, /* 8 */
- EV_SETUP_CMPL_IND, /* 9 */
- EV_BC_EST, /* 10 */
- EV_WRITEBUF, /* 11 */
- EV_HANGUP, /* 12 */
- EV_BC_REL, /* 13 */
- EV_CINF, /* 14 */
- EV_SUSPEND, /* 15 */
- EV_RESUME, /* 16 */
- EV_NOSETUP_RSP, /* 17 */
- EV_SETUP_ERR, /* 18 */
- EV_CONNECT_ERR, /* 19 */
- EV_PROCEED, /* 20 */
- EV_ALERT, /* 21 */
- EV_REDIR, /* 22 */
-};
-
-#define EVENT_COUNT (EV_REDIR + 1)
-
-static char *strEvent[] =
-{
- "EV_DIAL",
- "EV_SETUP_CNF",
- "EV_ACCEPTB",
- "EV_DISCONNECT_IND",
- "EV_RELEASE",
- "EV_LEASED",
- "EV_LEASED_REL",
- "EV_SETUP_IND",
- "EV_ACCEPTD",
- "EV_SETUP_CMPL_IND",
- "EV_BC_EST",
- "EV_WRITEBUF",
- "EV_HANGUP",
- "EV_BC_REL",
- "EV_CINF",
- "EV_SUSPEND",
- "EV_RESUME",
- "EV_NOSETUP_RSP",
- "EV_SETUP_ERR",
- "EV_CONNECT_ERR",
- "EV_PROCEED",
- "EV_ALERT",
- "EV_REDIR",
-};
-
-
-static inline void
-HL_LL(struct Channel *chanp, int command)
-{
- isdn_ctrl ic;
-
- ic.driver = chanp->cs->myid;
- ic.command = command;
- ic.arg = chanp->chan;
- chanp->cs->iif.statcallb(&ic);
-}
-
-static inline void
-lli_deliver_cause(struct Channel *chanp)
-{
- isdn_ctrl ic;
-
- if (!chanp->proc)
- return;
- if (chanp->proc->para.cause == NO_CAUSE)
- return;
- ic.driver = chanp->cs->myid;
- ic.command = ISDN_STAT_CAUSE;
- ic.arg = chanp->chan;
- if (chanp->cs->protocol == ISDN_PTYPE_EURO)
- sprintf(ic.parm.num, "E%02X%02X", chanp->proc->para.loc & 0x7f,
- chanp->proc->para.cause & 0x7f);
- else
- sprintf(ic.parm.num, "%02X%02X", chanp->proc->para.loc & 0x7f,
- chanp->proc->para.cause & 0x7f);
- chanp->cs->iif.statcallb(&ic);
-}
-
-static inline void
-lli_close(struct FsmInst *fi)
-{
- struct Channel *chanp = fi->userdata;
-
- FsmChangeState(fi, ST_NULL);
- chanp->Flags = 0;
- chanp->cs->cardmsg(chanp->cs, MDL_INFO_REL, (void *) (long)chanp->chan);
-}
-
-static void
-lli_leased_in(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
- isdn_ctrl ic;
- int ret;
-
- if (!chanp->leased)
- return;
- chanp->cs->cardmsg(chanp->cs, MDL_INFO_SETUP, (void *) (long)chanp->chan);
- FsmChangeState(fi, ST_IN_WAIT_LL);
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_ICALL_LEASED");
- ic.driver = chanp->cs->myid;
- ic.command = ((chanp->chan < 2) ? ISDN_STAT_ICALL : ISDN_STAT_ICALLW);
- ic.arg = chanp->chan;
- ic.parm.setup.si1 = 7;
- ic.parm.setup.si2 = 0;
- ic.parm.setup.plan = 0;
- ic.parm.setup.screen = 0;
- sprintf(ic.parm.setup.eazmsn, "%d", chanp->chan + 1);
- sprintf(ic.parm.setup.phone, "LEASED%d", chanp->cs->myid);
- ret = chanp->cs->iif.statcallb(&ic);
- if (chanp->debug & 1)
- link_debug(chanp, 1, "statcallb ret=%d", ret);
- if (!ret) {
- chanp->cs->cardmsg(chanp->cs, MDL_INFO_REL, (void *) (long)chanp->chan);
- FsmChangeState(fi, ST_NULL);
- }
-}
-
-
-/*
- * Dial out
- */
-static void
-lli_init_bchan_out(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- FsmChangeState(fi, ST_WAIT_BCONN);
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_DCONN");
- HL_LL(chanp, ISDN_STAT_DCONN);
- init_b_st(chanp, 0);
- chanp->b_st->lli.l4l3(chanp->b_st, DL_ESTABLISH | REQUEST, NULL);
-}
-
-static void
-lli_prep_dialout(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- FsmDelTimer(&chanp->drel_timer, 60);
- FsmDelTimer(&chanp->dial_timer, 73);
- chanp->l2_active_protocol = chanp->l2_protocol;
- chanp->incoming = 0;
- chanp->cs->cardmsg(chanp->cs, MDL_INFO_SETUP, (void *) (long)chanp->chan);
- if (chanp->leased) {
- lli_init_bchan_out(fi, event, arg);
- } else {
- FsmChangeState(fi, ST_OUT_DIAL);
- chanp->d_st->lli.l4l3(chanp->d_st, CC_SETUP | REQUEST, chanp);
- }
-}
-
-static void
-lli_resume(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- FsmDelTimer(&chanp->drel_timer, 60);
- FsmDelTimer(&chanp->dial_timer, 73);
- chanp->l2_active_protocol = chanp->l2_protocol;
- chanp->incoming = 0;
- chanp->cs->cardmsg(chanp->cs, MDL_INFO_SETUP, (void *) (long)chanp->chan);
- if (chanp->leased) {
- lli_init_bchan_out(fi, event, arg);
- } else {
- FsmChangeState(fi, ST_OUT_DIAL);
- chanp->d_st->lli.l4l3(chanp->d_st, CC_RESUME | REQUEST, chanp);
- }
-}
-
-static void
-lli_go_active(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
- isdn_ctrl ic;
-
-
- FsmChangeState(fi, ST_ACTIVE);
- chanp->data_open = !0;
- if (chanp->bcs->conmsg)
- strcpy(ic.parm.num, chanp->bcs->conmsg);
- else
- ic.parm.num[0] = 0;
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_BCONN %s", ic.parm.num);
- ic.driver = chanp->cs->myid;
- ic.command = ISDN_STAT_BCONN;
- ic.arg = chanp->chan;
- chanp->cs->iif.statcallb(&ic);
- chanp->cs->cardmsg(chanp->cs, MDL_INFO_CONN, (void *) (long)chanp->chan);
-}
-
-
-/*
- * RESUME
- */
-
-/* incoming call */
-
-static void
-lli_deliver_call(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
- isdn_ctrl ic;
- int ret;
-
- chanp->cs->cardmsg(chanp->cs, MDL_INFO_SETUP, (void *) (long)chanp->chan);
- /*
- * Report incoming calls only once to linklevel, use CallFlags
- * which is set to 3 with each broadcast message in isdnl1.c
- * and resetted if a interface answered the STAT_ICALL.
- */
- if (1) { /* for only one TEI */
- FsmChangeState(fi, ST_IN_WAIT_LL);
- if (chanp->debug & 1)
- link_debug(chanp, 0, (chanp->chan < 2) ? "STAT_ICALL" : "STAT_ICALLW");
- ic.driver = chanp->cs->myid;
- ic.command = ((chanp->chan < 2) ? ISDN_STAT_ICALL : ISDN_STAT_ICALLW);
-
- ic.arg = chanp->chan;
- /*
- * No need to return "unknown" for calls without OAD,
- * cause that's handled in linklevel now (replaced by '0')
- */
- memcpy(&ic.parm.setup, &chanp->proc->para.setup, sizeof(setup_parm));
- ret = chanp->cs->iif.statcallb(&ic);
- if (chanp->debug & 1)
- link_debug(chanp, 1, "statcallb ret=%d", ret);
-
- switch (ret) {
- case 1: /* OK, someone likes this call */
- FsmDelTimer(&chanp->drel_timer, 61);
- FsmChangeState(fi, ST_IN_ALERT_SENT);
- chanp->d_st->lli.l4l3(chanp->d_st, CC_ALERTING | REQUEST, chanp->proc);
- break;
- case 5: /* direct redirect */
- case 4: /* Proceeding desired */
- FsmDelTimer(&chanp->drel_timer, 61);
- FsmChangeState(fi, ST_IN_PROCEED_SEND);
- chanp->d_st->lli.l4l3(chanp->d_st, CC_PROCEED_SEND | REQUEST, chanp->proc);
- if (ret == 5) {
- memcpy(&chanp->setup, &ic.parm.setup, sizeof(setup_parm));
- chanp->d_st->lli.l4l3(chanp->d_st, CC_REDIR | REQUEST, chanp->proc);
- }
- break;
- case 2: /* Rejecting Call */
- break;
- case 3: /* incomplete number */
- FsmDelTimer(&chanp->drel_timer, 61);
- chanp->d_st->lli.l4l3(chanp->d_st, CC_MORE_INFO | REQUEST, chanp->proc);
- break;
- case 0: /* OK, nobody likes this call */
- default: /* statcallb problems */
- chanp->d_st->lli.l4l3(chanp->d_st, CC_IGNORE | REQUEST, chanp->proc);
- chanp->cs->cardmsg(chanp->cs, MDL_INFO_REL, (void *) (long)chanp->chan);
- FsmChangeState(fi, ST_NULL);
- break;
- }
- } else {
- chanp->d_st->lli.l4l3(chanp->d_st, CC_IGNORE | REQUEST, chanp->proc);
- chanp->cs->cardmsg(chanp->cs, MDL_INFO_REL, (void *) (long)chanp->chan);
- }
-}
-
-static void
-lli_send_dconnect(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- FsmChangeState(fi, ST_IN_WAIT_CONN_ACK);
- chanp->d_st->lli.l4l3(chanp->d_st, CC_SETUP | RESPONSE, chanp->proc);
-}
-
-static void
-lli_send_alert(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- FsmChangeState(fi, ST_IN_ALERT_SENT);
- chanp->d_st->lli.l4l3(chanp->d_st, CC_ALERTING | REQUEST, chanp->proc);
-}
-
-static void
-lli_send_redir(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- chanp->d_st->lli.l4l3(chanp->d_st, CC_REDIR | REQUEST, chanp->proc);
-}
-
-static void
-lli_init_bchan_in(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- FsmChangeState(fi, ST_WAIT_BCONN);
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_DCONN");
- HL_LL(chanp, ISDN_STAT_DCONN);
- chanp->l2_active_protocol = chanp->l2_protocol;
- chanp->incoming = !0;
- init_b_st(chanp, !0);
- chanp->b_st->lli.l4l3(chanp->b_st, DL_ESTABLISH | REQUEST, NULL);
-}
-
-static void
-lli_setup_rsp(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->leased) {
- lli_init_bchan_in(fi, event, arg);
- } else {
- FsmChangeState(fi, ST_IN_WAIT_CONN_ACK);
-#ifdef WANT_ALERT
- chanp->d_st->lli.l4l3(chanp->d_st, CC_ALERTING | REQUEST, chanp->proc);
-#endif
- chanp->d_st->lli.l4l3(chanp->d_st, CC_SETUP | RESPONSE, chanp->proc);
- }
-}
-
-/* Call suspend */
-
-static void
-lli_suspend(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- chanp->d_st->lli.l4l3(chanp->d_st, CC_SUSPEND | REQUEST, chanp->proc);
-}
-
-/* Call clearing */
-
-static void
-lli_leased_hup(struct FsmInst *fi, struct Channel *chanp)
-{
- isdn_ctrl ic;
-
- ic.driver = chanp->cs->myid;
- ic.command = ISDN_STAT_CAUSE;
- ic.arg = chanp->chan;
- sprintf(ic.parm.num, "L0010");
- chanp->cs->iif.statcallb(&ic);
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_DHUP");
- HL_LL(chanp, ISDN_STAT_DHUP);
- lli_close(fi);
-}
-
-static void
-lli_disconnect_req(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->leased) {
- lli_leased_hup(fi, chanp);
- } else {
- FsmChangeState(fi, ST_WAIT_DRELEASE);
- if (chanp->proc)
- chanp->proc->para.cause = 0x10; /* Normal Call Clearing */
- chanp->d_st->lli.l4l3(chanp->d_st, CC_DISCONNECT | REQUEST,
- chanp->proc);
- }
-}
-
-static void
-lli_disconnect_reject(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->leased) {
- lli_leased_hup(fi, chanp);
- } else {
- FsmChangeState(fi, ST_WAIT_DRELEASE);
- if (chanp->proc)
- chanp->proc->para.cause = 0x15; /* Call Rejected */
- chanp->d_st->lli.l4l3(chanp->d_st, CC_DISCONNECT | REQUEST,
- chanp->proc);
- }
-}
-
-static void
-lli_dhup_close(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->leased) {
- lli_leased_hup(fi, chanp);
- } else {
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_DHUP");
- lli_deliver_cause(chanp);
- HL_LL(chanp, ISDN_STAT_DHUP);
- lli_close(fi);
- }
-}
-
-static void
-lli_reject_req(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->leased) {
- lli_leased_hup(fi, chanp);
- return;
- }
-#ifndef ALERT_REJECT
- if (chanp->proc)
- chanp->proc->para.cause = 0x15; /* Call Rejected */
- chanp->d_st->lli.l4l3(chanp->d_st, CC_REJECT | REQUEST, chanp->proc);
- lli_dhup_close(fi, event, arg);
-#else
- FsmRestartTimer(&chanp->drel_timer, 40, EV_HANGUP, NULL, 63);
- FsmChangeState(fi, ST_IN_ALERT_SENT);
- chanp->d_st->lli.l4l3(chanp->d_st, CC_ALERTING | REQUEST, chanp->proc);
-#endif
-}
-
-static void
-lli_disconn_bchan(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- chanp->data_open = 0;
- FsmChangeState(fi, ST_WAIT_BRELEASE);
- chanp->b_st->lli.l4l3(chanp->b_st, DL_RELEASE | REQUEST, NULL);
-}
-
-static void
-lli_start_disc(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->leased) {
- lli_leased_hup(fi, chanp);
- } else {
- lli_disconnect_req(fi, event, arg);
- }
-}
-
-static void
-lli_rel_b_disc(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- release_b_st(chanp);
- lli_start_disc(fi, event, arg);
-}
-
-static void
-lli_bhup_disc(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_BHUP");
- HL_LL(chanp, ISDN_STAT_BHUP);
- lli_rel_b_disc(fi, event, arg);
-}
-
-static void
-lli_bhup_rel_b(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- FsmChangeState(fi, ST_WAIT_DCOMMAND);
- chanp->data_open = 0;
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_BHUP");
- HL_LL(chanp, ISDN_STAT_BHUP);
- release_b_st(chanp);
-}
-
-static void
-lli_release_bchan(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- chanp->data_open = 0;
- FsmChangeState(fi, ST_WAIT_BREL_DISC);
- chanp->b_st->lli.l4l3(chanp->b_st, DL_RELEASE | REQUEST, NULL);
-}
-
-
-static void
-lli_rel_b_dhup(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- release_b_st(chanp);
- lli_dhup_close(fi, event, arg);
-}
-
-static void
-lli_bhup_dhup(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_BHUP");
- HL_LL(chanp, ISDN_STAT_BHUP);
- lli_rel_b_dhup(fi, event, arg);
-}
-
-static void
-lli_abort(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- chanp->data_open = 0;
- chanp->b_st->lli.l4l3(chanp->b_st, DL_RELEASE | REQUEST, NULL);
- lli_bhup_dhup(fi, event, arg);
-}
-
-static void
-lli_release_req(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->leased) {
- lli_leased_hup(fi, chanp);
- } else {
- FsmChangeState(fi, ST_WAIT_D_REL_CNF);
- chanp->d_st->lli.l4l3(chanp->d_st, CC_RELEASE | REQUEST,
- chanp->proc);
- }
-}
-
-static void
-lli_rel_b_release_req(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- release_b_st(chanp);
- lli_release_req(fi, event, arg);
-}
-
-static void
-lli_bhup_release_req(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_BHUP");
- HL_LL(chanp, ISDN_STAT_BHUP);
- lli_rel_b_release_req(fi, event, arg);
-}
-
-
-/* processing charge info */
-static void
-lli_charge_info(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
- isdn_ctrl ic;
-
- ic.driver = chanp->cs->myid;
- ic.command = ISDN_STAT_CINF;
- ic.arg = chanp->chan;
- sprintf(ic.parm.num, "%d", chanp->proc->para.chargeinfo);
- chanp->cs->iif.statcallb(&ic);
-}
-
-/* error procedures */
-
-static void
-lli_dchan_not_ready(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_DHUP");
- HL_LL(chanp, ISDN_STAT_DHUP);
-}
-
-static void
-lli_no_setup_rsp(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_DHUP");
- HL_LL(chanp, ISDN_STAT_DHUP);
- lli_close(fi);
-}
-
-static void
-lli_error(struct FsmInst *fi, int event, void *arg)
-{
- FsmChangeState(fi, ST_WAIT_DRELEASE);
-}
-
-static void
-lli_failure_l(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
- isdn_ctrl ic;
-
- FsmChangeState(fi, ST_NULL);
- ic.driver = chanp->cs->myid;
- ic.command = ISDN_STAT_CAUSE;
- ic.arg = chanp->chan;
- sprintf(ic.parm.num, "L%02X%02X", 0, 0x2f);
- chanp->cs->iif.statcallb(&ic);
- HL_LL(chanp, ISDN_STAT_DHUP);
- chanp->Flags = 0;
- chanp->cs->cardmsg(chanp->cs, MDL_INFO_REL, (void *) (long)chanp->chan);
-}
-
-static void
-lli_rel_b_fail(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- release_b_st(chanp);
- lli_failure_l(fi, event, arg);
-}
-
-static void
-lli_bhup_fail(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- if (chanp->debug & 1)
- link_debug(chanp, 0, "STAT_BHUP");
- HL_LL(chanp, ISDN_STAT_BHUP);
- lli_rel_b_fail(fi, event, arg);
-}
-
-static void
-lli_failure_a(struct FsmInst *fi, int event, void *arg)
-{
- struct Channel *chanp = fi->userdata;
-
- chanp->data_open = 0;
- chanp->b_st->lli.l4l3(chanp->b_st, DL_RELEASE | REQUEST, NULL);
- lli_bhup_fail(fi, event, arg);
-}
-
-/* *INDENT-OFF* */
-static struct FsmNode fnlist[] __initdata =
-{
- {ST_NULL, EV_DIAL, lli_prep_dialout},
- {ST_NULL, EV_RESUME, lli_resume},
- {ST_NULL, EV_SETUP_IND, lli_deliver_call},
- {ST_NULL, EV_LEASED, lli_leased_in},
- {ST_OUT_DIAL, EV_SETUP_CNF, lli_init_bchan_out},
- {ST_OUT_DIAL, EV_HANGUP, lli_disconnect_req},
- {ST_OUT_DIAL, EV_DISCONNECT_IND, lli_release_req},
- {ST_OUT_DIAL, EV_RELEASE, lli_dhup_close},
- {ST_OUT_DIAL, EV_NOSETUP_RSP, lli_no_setup_rsp},
- {ST_OUT_DIAL, EV_SETUP_ERR, lli_error},
- {ST_IN_WAIT_LL, EV_LEASED_REL, lli_failure_l},
- {ST_IN_WAIT_LL, EV_ACCEPTD, lli_setup_rsp},
- {ST_IN_WAIT_LL, EV_HANGUP, lli_reject_req},
- {ST_IN_WAIT_LL, EV_DISCONNECT_IND, lli_release_req},
- {ST_IN_WAIT_LL, EV_RELEASE, lli_dhup_close},
- {ST_IN_WAIT_LL, EV_SETUP_IND, lli_deliver_call},
- {ST_IN_WAIT_LL, EV_SETUP_ERR, lli_error},
- {ST_IN_ALERT_SENT, EV_SETUP_CMPL_IND, lli_init_bchan_in},
- {ST_IN_ALERT_SENT, EV_ACCEPTD, lli_send_dconnect},
- {ST_IN_ALERT_SENT, EV_HANGUP, lli_disconnect_reject},
- {ST_IN_ALERT_SENT, EV_DISCONNECT_IND, lli_release_req},
- {ST_IN_ALERT_SENT, EV_RELEASE, lli_dhup_close},
- {ST_IN_ALERT_SENT, EV_REDIR, lli_send_redir},
- {ST_IN_PROCEED_SEND, EV_REDIR, lli_send_redir},
- {ST_IN_PROCEED_SEND, EV_ALERT, lli_send_alert},
- {ST_IN_PROCEED_SEND, EV_ACCEPTD, lli_send_dconnect},
- {ST_IN_PROCEED_SEND, EV_HANGUP, lli_disconnect_reject},
- {ST_IN_PROCEED_SEND, EV_DISCONNECT_IND, lli_dhup_close},
- {ST_IN_ALERT_SENT, EV_RELEASE, lli_dhup_close},
- {ST_IN_WAIT_CONN_ACK, EV_SETUP_CMPL_IND, lli_init_bchan_in},
- {ST_IN_WAIT_CONN_ACK, EV_HANGUP, lli_disconnect_req},
- {ST_IN_WAIT_CONN_ACK, EV_DISCONNECT_IND, lli_release_req},
- {ST_IN_WAIT_CONN_ACK, EV_RELEASE, lli_dhup_close},
- {ST_IN_WAIT_CONN_ACK, EV_CONNECT_ERR, lli_error},
- {ST_WAIT_BCONN, EV_BC_EST, lli_go_active},
- {ST_WAIT_BCONN, EV_BC_REL, lli_rel_b_disc},
- {ST_WAIT_BCONN, EV_HANGUP, lli_rel_b_disc},
- {ST_WAIT_BCONN, EV_DISCONNECT_IND, lli_rel_b_release_req},
- {ST_WAIT_BCONN, EV_RELEASE, lli_rel_b_dhup},
- {ST_WAIT_BCONN, EV_LEASED_REL, lli_rel_b_fail},
- {ST_WAIT_BCONN, EV_CINF, lli_charge_info},
- {ST_ACTIVE, EV_CINF, lli_charge_info},
- {ST_ACTIVE, EV_BC_REL, lli_bhup_rel_b},
- {ST_ACTIVE, EV_SUSPEND, lli_suspend},
- {ST_ACTIVE, EV_HANGUP, lli_disconn_bchan},
- {ST_ACTIVE, EV_DISCONNECT_IND, lli_release_bchan},
- {ST_ACTIVE, EV_RELEASE, lli_abort},
- {ST_ACTIVE, EV_LEASED_REL, lli_failure_a},
- {ST_WAIT_BRELEASE, EV_BC_REL, lli_bhup_disc},
- {ST_WAIT_BRELEASE, EV_DISCONNECT_IND, lli_bhup_release_req},
- {ST_WAIT_BRELEASE, EV_RELEASE, lli_bhup_dhup},
- {ST_WAIT_BRELEASE, EV_LEASED_REL, lli_bhup_fail},
- {ST_WAIT_BREL_DISC, EV_BC_REL, lli_bhup_release_req},
- {ST_WAIT_BREL_DISC, EV_RELEASE, lli_bhup_dhup},
- {ST_WAIT_DCOMMAND, EV_HANGUP, lli_start_disc},
- {ST_WAIT_DCOMMAND, EV_DISCONNECT_IND, lli_release_req},
- {ST_WAIT_DCOMMAND, EV_RELEASE, lli_dhup_close},
- {ST_WAIT_DCOMMAND, EV_LEASED_REL, lli_failure_l},
- {ST_WAIT_DRELEASE, EV_RELEASE, lli_dhup_close},
- {ST_WAIT_DRELEASE, EV_DIAL, lli_dchan_not_ready},
- /* ETS 300-104 16.1 */
- {ST_WAIT_D_REL_CNF, EV_RELEASE, lli_dhup_close},
- {ST_WAIT_D_REL_CNF, EV_DIAL, lli_dchan_not_ready},
-};
-/* *INDENT-ON* */
-
-int __init
-CallcNew(void)
-{
- callcfsm.state_count = STATE_COUNT;
- callcfsm.event_count = EVENT_COUNT;
- callcfsm.strEvent = strEvent;
- callcfsm.strState = strState;
- return FsmNew(&callcfsm, fnlist, ARRAY_SIZE(fnlist));
-}
-
-void
-CallcFree(void)
-{
- FsmFree(&callcfsm);
-}
-
-static void
-release_b_st(struct Channel *chanp)
-{
- struct PStack *st = chanp->b_st;
-
- if (test_and_clear_bit(FLG_START_B, &chanp->Flags)) {
- chanp->bcs->BC_Close(chanp->bcs);
- switch (chanp->l2_active_protocol) {
- case (ISDN_PROTO_L2_X75I):
- releasestack_isdnl2(st);
- break;
- case (ISDN_PROTO_L2_HDLC):
- case (ISDN_PROTO_L2_HDLC_56K):
- case (ISDN_PROTO_L2_TRANS):
- case (ISDN_PROTO_L2_MODEM):
- case (ISDN_PROTO_L2_FAX):
- releasestack_transl2(st);
- break;
- }
- }
-}
-
-static struct Channel
-*selectfreechannel(struct PStack *st, int bch)
-{
- struct IsdnCardState *cs = st->l1.hardware;
- struct Channel *chanp = st->lli.userdata;
- int i;
-
- if (test_bit(FLG_TWO_DCHAN, &cs->HW_Flags))
- i = 1;
- else
- i = 0;
-
- if (!bch) {
- i = 2; /* virtual channel */
- chanp += 2;
- }
-
- while (i < ((bch) ? cs->chanlimit : (2 + MAX_WAITING_CALLS))) {
- if (chanp->fi.state == ST_NULL)
- return (chanp);
- chanp++;
- i++;
- }
-
- if (bch) /* number of channels is limited */ {
- i = 2; /* virtual channel */
- chanp = st->lli.userdata;
- chanp += i;
- while (i < (2 + MAX_WAITING_CALLS)) {
- if (chanp->fi.state == ST_NULL)
- return (chanp);
- chanp++;
- i++;
- }
- }
- return (NULL);
-}
-
-static void stat_redir_result(struct IsdnCardState *cs, int chan, ulong result)
-{ isdn_ctrl ic;
-
- ic.driver = cs->myid;
- ic.command = ISDN_STAT_REDIR;
- ic.arg = chan;
- ic.parm.num[0] = result;
- cs->iif.statcallb(&ic);
-} /* stat_redir_result */
-
-static void
-dchan_l3l4(struct PStack *st, int pr, void *arg)
-{
- struct l3_process *pc = arg;
- struct IsdnCardState *cs = st->l1.hardware;
- struct Channel *chanp;
-
- if (!pc)
- return;
-
- if (pr == (CC_SETUP | INDICATION)) {
- if (!(chanp = selectfreechannel(pc->st, pc->para.bchannel))) {
- pc->para.cause = 0x11; /* User busy */
- pc->st->lli.l4l3(pc->st, CC_REJECT | REQUEST, pc);
- } else {
- chanp->proc = pc;
- pc->chan = chanp;
- FsmEvent(&chanp->fi, EV_SETUP_IND, NULL);
- }
- return;
- }
- if (!(chanp = pc->chan))
- return;
-
- switch (pr) {
- case (CC_MORE_INFO | INDICATION):
- FsmEvent(&chanp->fi, EV_SETUP_IND, NULL);
- break;
- case (CC_DISCONNECT | INDICATION):
- FsmEvent(&chanp->fi, EV_DISCONNECT_IND, NULL);
- break;
- case (CC_RELEASE | CONFIRM):
- FsmEvent(&chanp->fi, EV_RELEASE, NULL);
- break;
- case (CC_SUSPEND | CONFIRM):
- FsmEvent(&chanp->fi, EV_RELEASE, NULL);
- break;
- case (CC_RESUME | CONFIRM):
- FsmEvent(&chanp->fi, EV_SETUP_CNF, NULL);
- break;
- case (CC_RESUME_ERR):
- FsmEvent(&chanp->fi, EV_RELEASE, NULL);
- break;
- case (CC_RELEASE | INDICATION):
- FsmEvent(&chanp->fi, EV_RELEASE, NULL);
- break;
- case (CC_SETUP_COMPL | INDICATION):
- FsmEvent(&chanp->fi, EV_SETUP_CMPL_IND, NULL);
- break;
- case (CC_SETUP | CONFIRM):
- FsmEvent(&chanp->fi, EV_SETUP_CNF, NULL);
- break;
- case (CC_CHARGE | INDICATION):
- FsmEvent(&chanp->fi, EV_CINF, NULL);
- break;
- case (CC_NOSETUP_RSP):
- FsmEvent(&chanp->fi, EV_NOSETUP_RSP, NULL);
- break;
- case (CC_SETUP_ERR):
- FsmEvent(&chanp->fi, EV_SETUP_ERR, NULL);
- break;
- case (CC_CONNECT_ERR):
- FsmEvent(&chanp->fi, EV_CONNECT_ERR, NULL);
- break;
- case (CC_RELEASE_ERR):
- FsmEvent(&chanp->fi, EV_RELEASE, NULL);
- break;
- case (CC_PROCEED_SEND | INDICATION):
- case (CC_PROCEEDING | INDICATION):
- case (CC_ALERTING | INDICATION):
- case (CC_PROGRESS | INDICATION):
- case (CC_NOTIFY | INDICATION):
- break;
- case (CC_REDIR | INDICATION):
- stat_redir_result(cs, chanp->chan, pc->redir_result);
- break;
- default:
- if (chanp->debug & 0x800) {
- HiSax_putstatus(chanp->cs, "Ch",
- "%d L3->L4 unknown primitiv %#x",
- chanp->chan, pr);
- }
- }
-}
-
-static void
-dummy_pstack(struct PStack *st, int pr, void *arg) {
- printk(KERN_WARNING"call to dummy_pstack pr=%04x arg %lx\n", pr, (long)arg);
-}
-
-static int
-init_PStack(struct PStack **stp) {
- *stp = kmalloc(sizeof(struct PStack), GFP_KERNEL);
- if (!*stp)
- return -ENOMEM;
- (*stp)->next = NULL;
- (*stp)->l1.l1l2 = dummy_pstack;
- (*stp)->l1.l1hw = dummy_pstack;
- (*stp)->l1.l1tei = dummy_pstack;
- (*stp)->l2.l2tei = dummy_pstack;
- (*stp)->l2.l2l1 = dummy_pstack;
- (*stp)->l2.l2l3 = dummy_pstack;
- (*stp)->l3.l3l2 = dummy_pstack;
- (*stp)->l3.l3ml3 = dummy_pstack;
- (*stp)->l3.l3l4 = dummy_pstack;
- (*stp)->lli.l4l3 = dummy_pstack;
- (*stp)->ma.layer = dummy_pstack;
- return 0;
-}
-
-static int
-init_d_st(struct Channel *chanp)
-{
- struct PStack *st;
- struct IsdnCardState *cs = chanp->cs;
- char tmp[16];
- int err;
-
- err = init_PStack(&chanp->d_st);
- if (err)
- return err;
- st = chanp->d_st;
- st->next = NULL;
- HiSax_addlist(cs, st);
- setstack_HiSax(st, cs);
- st->l2.sap = 0;
- st->l2.tei = -1;
- st->l2.flag = 0;
- test_and_set_bit(FLG_MOD128, &st->l2.flag);
- test_and_set_bit(FLG_LAPD, &st->l2.flag);
- test_and_set_bit(FLG_ORIG, &st->l2.flag);
- st->l2.maxlen = MAX_DFRAME_LEN;
- st->l2.window = 1;
- st->l2.T200 = 1000; /* 1000 milliseconds */
- st->l2.N200 = 3; /* try 3 times */
- st->l2.T203 = 10000; /* 10000 milliseconds */
- if (test_bit(FLG_TWO_DCHAN, &cs->HW_Flags))
- sprintf(tmp, "DCh%d Q.921 ", chanp->chan);
- else
- sprintf(tmp, "DCh Q.921 ");
- setstack_isdnl2(st, tmp);
- setstack_l3dc(st, chanp);
- st->lli.userdata = chanp;
- st->l3.l3l4 = dchan_l3l4;
-
- return 0;
-}
-
-static __printf(2, 3) void
- callc_debug(struct FsmInst *fi, char *fmt, ...)
-{
- va_list args;
- struct Channel *chanp = fi->userdata;
- char tmp[16];
-
- va_start(args, fmt);
- sprintf(tmp, "Ch%d callc ", chanp->chan);
- VHiSax_putstatus(chanp->cs, tmp, fmt, args);
- va_end(args);
-}
-
-static int
-init_chan(int chan, struct IsdnCardState *csta)
-{
- struct Channel *chanp = csta->channel + chan;
- int err;
-
- chanp->cs = csta;
- chanp->bcs = csta->bcs + chan;
- chanp->chan = chan;
- chanp->incoming = 0;
- chanp->debug = 0;
- chanp->Flags = 0;
- chanp->leased = 0;
- err = init_PStack(&chanp->b_st);
- if (err)
- return err;
- chanp->b_st->l1.delay = DEFAULT_B_DELAY;
- chanp->fi.fsm = &callcfsm;
- chanp->fi.state = ST_NULL;
- chanp->fi.debug = 0;
- chanp->fi.userdata = chanp;
- chanp->fi.printdebug = callc_debug;
- FsmInitTimer(&chanp->fi, &chanp->dial_timer);
- FsmInitTimer(&chanp->fi, &chanp->drel_timer);
- if (!chan || (test_bit(FLG_TWO_DCHAN, &csta->HW_Flags) && chan < 2)) {
- err = init_d_st(chanp);
- if (err)
- return err;
- } else {
- chanp->d_st = csta->channel->d_st;
- }
- chanp->data_open = 0;
- return 0;
-}
-
-int
-CallcNewChan(struct IsdnCardState *csta) {
- int i, err;
-
- chancount += 2;
- err = init_chan(0, csta);
- if (err)
- return err;
- err = init_chan(1, csta);
- if (err)
- return err;
- printk(KERN_INFO "HiSax: 2 channels added\n");
-
- for (i = 0; i < MAX_WAITING_CALLS; i++) {
- err = init_chan(i + 2, csta);
- if (err)
- return err;
- }
- printk(KERN_INFO "HiSax: MAX_WAITING_CALLS added\n");
- if (test_bit(FLG_PTP, &csta->channel->d_st->l2.flag)) {
- printk(KERN_INFO "LAYER2 WATCHING ESTABLISH\n");
- csta->channel->d_st->lli.l4l3(csta->channel->d_st,
- DL_ESTABLISH | REQUEST, NULL);
- }
- return (0);
-}
-
-static void
-release_d_st(struct Channel *chanp)
-{
- struct PStack *st = chanp->d_st;
-
- if (!st)
- return;
- releasestack_isdnl2(st);
- releasestack_isdnl3(st);
- HiSax_rmlist(st->l1.hardware, st);
- kfree(st);
- chanp->d_st = NULL;
-}
-
-void
-CallcFreeChan(struct IsdnCardState *csta)
-{
- int i;
-
- for (i = 0; i < 2; i++) {
- FsmDelTimer(&csta->channel[i].drel_timer, 74);
- FsmDelTimer(&csta->channel[i].dial_timer, 75);
- if (i || test_bit(FLG_TWO_DCHAN, &csta->HW_Flags))
- release_d_st(csta->channel + i);
- if (csta->channel[i].b_st) {
- release_b_st(csta->channel + i);
- kfree(csta->channel[i].b_st);
- csta->channel[i].b_st = NULL;
- } else
- printk(KERN_WARNING "CallcFreeChan b_st ch%d already freed\n", i);
- if (i || test_bit(FLG_TWO_DCHAN, &csta->HW_Flags)) {
- release_d_st(csta->channel + i);
- } else
- csta->channel[i].d_st = NULL;
- }
-}
-
-static void
-lldata_handler(struct PStack *st, int pr, void *arg)
-{
- struct Channel *chanp = (struct Channel *) st->lli.userdata;
- struct sk_buff *skb = arg;
-
- switch (pr) {
- case (DL_DATA | INDICATION):
- if (chanp->data_open) {
- if (chanp->debug & 0x800)
- link_debug(chanp, 0, "lldata: %d", skb->len);
- chanp->cs->iif.rcvcallb_skb(chanp->cs->myid, chanp->chan, skb);
- } else {
- link_debug(chanp, 0, "lldata: channel not open");
- dev_kfree_skb(skb);
- }
- break;
- case (DL_ESTABLISH | INDICATION):
- case (DL_ESTABLISH | CONFIRM):
- FsmEvent(&chanp->fi, EV_BC_EST, NULL);
- break;
- case (DL_RELEASE | INDICATION):
- case (DL_RELEASE | CONFIRM):
- FsmEvent(&chanp->fi, EV_BC_REL, NULL);
- break;
- default:
- printk(KERN_WARNING "lldata_handler unknown primitive %#x\n",
- pr);
- break;
- }
-}
-
-static void
-lltrans_handler(struct PStack *st, int pr, void *arg)
-{
- struct Channel *chanp = (struct Channel *) st->lli.userdata;
- struct sk_buff *skb = arg;
-
- switch (pr) {
- case (PH_DATA | INDICATION):
- if (chanp->data_open) {
- if (chanp->debug & 0x800)
- link_debug(chanp, 0, "lltrans: %d", skb->len);
- chanp->cs->iif.rcvcallb_skb(chanp->cs->myid, chanp->chan, skb);
- } else {
- link_debug(chanp, 0, "lltrans: channel not open");
- dev_kfree_skb(skb);
- }
- break;
- case (PH_ACTIVATE | INDICATION):
- case (PH_ACTIVATE | CONFIRM):
- FsmEvent(&chanp->fi, EV_BC_EST, NULL);
- break;
- case (PH_DEACTIVATE | INDICATION):
- case (PH_DEACTIVATE | CONFIRM):
- FsmEvent(&chanp->fi, EV_BC_REL, NULL);
- break;
- default:
- printk(KERN_WARNING "lltrans_handler unknown primitive %#x\n",
- pr);
- break;
- }
-}
-
-void
-lli_writewakeup(struct PStack *st, int len)
-{
- struct Channel *chanp = st->lli.userdata;
- isdn_ctrl ic;
-
- if (chanp->debug & 0x800)
- link_debug(chanp, 0, "llwakeup: %d", len);
- ic.driver = chanp->cs->myid;
- ic.command = ISDN_STAT_BSENT;
- ic.arg = chanp->chan;
- ic.parm.length = len;
- chanp->cs->iif.statcallb(&ic);
-}
-
-static int
-init_b_st(struct Channel *chanp, int incoming)
-{
- struct PStack *st = chanp->b_st;
- struct IsdnCardState *cs = chanp->cs;
- char tmp[16];
-
- st->l1.hardware = cs;
- if (chanp->leased)
- st->l1.bc = chanp->chan & 1;
- else
- st->l1.bc = chanp->proc->para.bchannel - 1;
- switch (chanp->l2_active_protocol) {
- case (ISDN_PROTO_L2_X75I):
- case (ISDN_PROTO_L2_HDLC):
- st->l1.mode = L1_MODE_HDLC;
- break;
- case (ISDN_PROTO_L2_HDLC_56K):
- st->l1.mode = L1_MODE_HDLC_56K;
- break;
- case (ISDN_PROTO_L2_TRANS):
- st->l1.mode = L1_MODE_TRANS;
- break;
- case (ISDN_PROTO_L2_MODEM):
- st->l1.mode = L1_MODE_V32;
- break;
- case (ISDN_PROTO_L2_FAX):
- st->l1.mode = L1_MODE_FAX;
- break;
- }
- chanp->bcs->conmsg = NULL;
- if (chanp->bcs->BC_SetStack(st, chanp->bcs))
- return (-1);
- st->l2.flag = 0;
- test_and_set_bit(FLG_LAPB, &st->l2.flag);
- st->l2.maxlen = MAX_DATA_SIZE;
- if (!incoming)
- test_and_set_bit(FLG_ORIG, &st->l2.flag);
- st->l2.T200 = 1000; /* 1000 milliseconds */
- st->l2.window = 7;
- st->l2.N200 = 4; /* try 4 times */
- st->l2.T203 = 5000; /* 5000 milliseconds */
- st->l3.debug = 0;
- switch (chanp->l2_active_protocol) {
- case (ISDN_PROTO_L2_X75I):
- sprintf(tmp, "Ch%d X.75", chanp->chan);
- setstack_isdnl2(st, tmp);
- setstack_l3bc(st, chanp);
- st->l2.l2l3 = lldata_handler;
- st->lli.userdata = chanp;
- test_and_clear_bit(FLG_LLI_L1WAKEUP, &st->lli.flag);
- test_and_set_bit(FLG_LLI_L2WAKEUP, &st->lli.flag);
- st->l2.l2m.debug = chanp->debug & 16;
- st->l2.debug = chanp->debug & 64;
- break;
- case (ISDN_PROTO_L2_HDLC):
- case (ISDN_PROTO_L2_HDLC_56K):
- case (ISDN_PROTO_L2_TRANS):
- case (ISDN_PROTO_L2_MODEM):
- case (ISDN_PROTO_L2_FAX):
- st->l1.l1l2 = lltrans_handler;
- st->lli.userdata = chanp;
- test_and_set_bit(FLG_LLI_L1WAKEUP, &st->lli.flag);
- test_and_clear_bit(FLG_LLI_L2WAKEUP, &st->lli.flag);
- setstack_transl2(st);
- setstack_l3bc(st, chanp);
- break;
- }
- test_and_set_bit(FLG_START_B, &chanp->Flags);
- return (0);
-}
-
-static void
-leased_l4l3(struct PStack *st, int pr, void *arg)
-{
- struct Channel *chanp = (struct Channel *) st->lli.userdata;
- struct sk_buff *skb = arg;
-
- switch (pr) {
- case (DL_DATA | REQUEST):
- link_debug(chanp, 0, "leased line d-channel DATA");
- dev_kfree_skb(skb);
- break;
- case (DL_ESTABLISH | REQUEST):
- st->l2.l2l1(st, PH_ACTIVATE | REQUEST, NULL);
- break;
- case (DL_RELEASE | REQUEST):
- break;
- default:
- printk(KERN_WARNING "transd_l4l3 unknown primitive %#x\n",
- pr);
- break;
- }
-}
-
-static void
-leased_l1l2(struct PStack *st, int pr, void *arg)
-{
- struct Channel *chanp = (struct Channel *) st->lli.userdata;
- struct sk_buff *skb = arg;
- int i, event = EV_LEASED_REL;
-
- switch (pr) {
- case (PH_DATA | INDICATION):
- link_debug(chanp, 0, "leased line d-channel DATA");
- dev_kfree_skb(skb);
- break;
- case (PH_ACTIVATE | INDICATION):
- case (PH_ACTIVATE | CONFIRM):
- event = EV_LEASED;
- /* fall through */
- case (PH_DEACTIVATE | INDICATION):
- case (PH_DEACTIVATE | CONFIRM):
- if (test_bit(FLG_TWO_DCHAN, &chanp->cs->HW_Flags))
- i = 1;
- else
- i = 0;
- while (i < 2) {
- FsmEvent(&chanp->fi, event, NULL);
- chanp++;
- i++;
- }
- break;
- default:
- printk(KERN_WARNING
- "transd_l1l2 unknown primitive %#x\n", pr);
- break;
- }
-}
-
-static void
-distr_debug(struct IsdnCardState *csta, int debugflags)
-{
- int i;
- struct Channel *chanp = csta->channel;
-
- for (i = 0; i < (2 + MAX_WAITING_CALLS); i++) {
- chanp[i].debug = debugflags;
- chanp[i].fi.debug = debugflags & 2;
- chanp[i].d_st->l2.l2m.debug = debugflags & 8;
- chanp[i].b_st->l2.l2m.debug = debugflags & 0x10;
- chanp[i].d_st->l2.debug = debugflags & 0x20;
- chanp[i].b_st->l2.debug = debugflags & 0x40;
- chanp[i].d_st->l3.l3m.debug = debugflags & 0x80;
- chanp[i].b_st->l3.l3m.debug = debugflags & 0x100;
- chanp[i].b_st->ma.tei_m.debug = debugflags & 0x200;
- chanp[i].b_st->ma.debug = debugflags & 0x200;
- chanp[i].d_st->l1.l1m.debug = debugflags & 0x1000;
- chanp[i].b_st->l1.l1m.debug = debugflags & 0x2000;
- }
- if (debugflags & 4)
- csta->debug |= DEB_DLOG_HEX;
- else
- csta->debug &= ~DEB_DLOG_HEX;
-}
-
-static char tmpbuf[256];
-
-static void
-capi_debug(struct Channel *chanp, capi_msg *cm)
-{
- char *t = tmpbuf;
-
- t += QuickHex(t, (u_char *)cm, (cm->Length > 50) ? 50 : cm->Length);
- t--;
- *t = 0;
- HiSax_putstatus(chanp->cs, "Ch", "%d CAPIMSG %s", chanp->chan, tmpbuf);
-}
-
-static void
-lli_got_fac_req(struct Channel *chanp, capi_msg *cm) {
- if ((cm->para[0] != 3) || (cm->para[1] != 0))
- return;
- if (cm->para[2] < 3)
- return;
- if (cm->para[4] != 0)
- return;
- switch (cm->para[3]) {
- case 4: /* Suspend */
- strncpy(chanp->setup.phone, &cm->para[5], cm->para[5] + 1);
- FsmEvent(&chanp->fi, EV_SUSPEND, cm);
- break;
- case 5: /* Resume */
- strncpy(chanp->setup.phone, &cm->para[5], cm->para[5] + 1);
- if (chanp->fi.state == ST_NULL) {
- FsmEvent(&chanp->fi, EV_RESUME, cm);
- } else {
- FsmDelTimer(&chanp->dial_timer, 72);
- FsmAddTimer(&chanp->dial_timer, 80, EV_RESUME, cm, 73);
- }
- break;
- }
-}
-
-static void
-lli_got_manufacturer(struct Channel *chanp, struct IsdnCardState *cs, capi_msg *cm) {
- if ((cs->typ == ISDN_CTYPE_ELSA) || (cs->typ == ISDN_CTYPE_ELSA_PNP) ||
- (cs->typ == ISDN_CTYPE_ELSA_PCI)) {
- if (cs->hw.elsa.MFlag) {
- cs->cardmsg(cs, CARD_AUX_IND, cm->para);
- }
- }
-}
-
-
-/***************************************************************/
-/* Limit the available number of channels for the current card */
-/***************************************************************/
-static int
-set_channel_limit(struct IsdnCardState *cs, int chanmax)
-{
- isdn_ctrl ic;
- int i, ii;
-
- if ((chanmax < 0) || (chanmax > 2))
- return (-EINVAL);
- cs->chanlimit = 0;
- for (ii = 0; ii < 2; ii++) {
- ic.driver = cs->myid;
- ic.command = ISDN_STAT_DISCH;
- ic.arg = ii;
- if (ii >= chanmax)
- ic.parm.num[0] = 0; /* disabled */
- else
- ic.parm.num[0] = 1; /* enabled */
- i = cs->iif.statcallb(&ic);
- if (i) return (-EINVAL);
- if (ii < chanmax)
- cs->chanlimit++;
- }
- return (0);
-} /* set_channel_limit */
-
-int
-HiSax_command(isdn_ctrl *ic)
-{
- struct IsdnCardState *csta = hisax_findcard(ic->driver);
- struct PStack *st;
- struct Channel *chanp;
- int i;
- u_int num;
-
- if (!csta) {
- printk(KERN_ERR
- "HiSax: if_command %d called with invalid driverId %d!\n",
- ic->command, ic->driver);
- return -ENODEV;
- }
- switch (ic->command) {
- case (ISDN_CMD_SETEAZ):
- chanp = csta->channel + ic->arg;
- break;
- case (ISDN_CMD_SETL2):
- chanp = csta->channel + (ic->arg & 0xff);
- if (chanp->debug & 1)
- link_debug(chanp, 1, "SETL2 card %d %ld",
- csta->cardnr + 1, ic->arg >> 8);
- chanp->l2_protocol = ic->arg >> 8;
- break;
- case (ISDN_CMD_SETL3):
- chanp = csta->channel + (ic->arg & 0xff);
- if (chanp->debug & 1)
- link_debug(chanp, 1, "SETL3 card %d %ld",
- csta->cardnr + 1, ic->arg >> 8);
- chanp->l3_protocol = ic->arg >> 8;
- break;
- case (ISDN_CMD_DIAL):
- chanp = csta->channel + (ic->arg & 0xff);
- if (chanp->debug & 1)
- link_debug(chanp, 1, "DIAL %s -> %s (%d,%d)",
- ic->parm.setup.eazmsn, ic->parm.setup.phone,
- ic->parm.setup.si1, ic->parm.setup.si2);
- memcpy(&chanp->setup, &ic->parm.setup, sizeof(setup_parm));
- if (!strcmp(chanp->setup.eazmsn, "0"))
- chanp->setup.eazmsn[0] = '\0';
- /* this solution is dirty and may be change, if
- * we make a callreference based callmanager */
- if (chanp->fi.state == ST_NULL) {
- FsmEvent(&chanp->fi, EV_DIAL, NULL);
- } else {
- FsmDelTimer(&chanp->dial_timer, 70);
- FsmAddTimer(&chanp->dial_timer, 50, EV_DIAL, NULL, 71);
- }
- break;
- case (ISDN_CMD_ACCEPTB):
- chanp = csta->channel + ic->arg;
- if (chanp->debug & 1)
- link_debug(chanp, 1, "ACCEPTB");
- FsmEvent(&chanp->fi, EV_ACCEPTB, NULL);
- break;
- case (ISDN_CMD_ACCEPTD):
- chanp = csta->channel + ic->arg;
- memcpy(&chanp->setup, &ic->parm.setup, sizeof(setup_parm));
- if (chanp->debug & 1)
- link_debug(chanp, 1, "ACCEPTD");
- FsmEvent(&chanp->fi, EV_ACCEPTD, NULL);
- break;
- case (ISDN_CMD_HANGUP):
- chanp = csta->channel + ic->arg;
- if (chanp->debug & 1)
- link_debug(chanp, 1, "HANGUP");
- FsmEvent(&chanp->fi, EV_HANGUP, NULL);
- break;
- case (CAPI_PUT_MESSAGE):
- chanp = csta->channel + ic->arg;
- if (chanp->debug & 1)
- capi_debug(chanp, &ic->parm.cmsg);
- if (ic->parm.cmsg.Length < 8)
- break;
- switch (ic->parm.cmsg.Command) {
- case CAPI_FACILITY:
- if (ic->parm.cmsg.Subcommand == CAPI_REQ)
- lli_got_fac_req(chanp, &ic->parm.cmsg);
- break;
- case CAPI_MANUFACTURER:
- if (ic->parm.cmsg.Subcommand == CAPI_REQ)
- lli_got_manufacturer(chanp, csta, &ic->parm.cmsg);
- break;
- default:
- break;
- }
- break;
- case (ISDN_CMD_IOCTL):
- switch (ic->arg) {
- case (0):
- num = *(unsigned int *) ic->parm.num;
- HiSax_reportcard(csta->cardnr, num);
- break;
- case (1):
- num = *(unsigned int *) ic->parm.num;
- distr_debug(csta, num);
- printk(KERN_DEBUG "HiSax: debugging flags card %d set to %x\n",
- csta->cardnr + 1, num);
- HiSax_putstatus(csta, "debugging flags ",
- "card %d set to %x", csta->cardnr + 1, num);
- break;
- case (2):
- num = *(unsigned int *) ic->parm.num;
- csta->channel[0].b_st->l1.delay = num;
- csta->channel[1].b_st->l1.delay = num;
- HiSax_putstatus(csta, "delay ", "card %d set to %d ms",
- csta->cardnr + 1, num);
- printk(KERN_DEBUG "HiSax: delay card %d set to %d ms\n",
- csta->cardnr + 1, num);
- break;
- case (5): /* set card in leased mode */
- num = *(unsigned int *) ic->parm.num;
- if ((num < 1) || (num > 2)) {
- HiSax_putstatus(csta, "Set LEASED ",
- "wrong channel %d", num);
- printk(KERN_WARNING "HiSax: Set LEASED wrong channel %d\n",
- num);
- } else {
- num--;
- chanp = csta->channel + num;
- chanp->leased = 1;
- HiSax_putstatus(csta, "Card",
- "%d channel %d set leased mode\n",
- csta->cardnr + 1, num + 1);
- chanp->d_st->l1.l1l2 = leased_l1l2;
- chanp->d_st->lli.l4l3 = leased_l4l3;
- chanp->d_st->lli.l4l3(chanp->d_st,
- DL_ESTABLISH | REQUEST, NULL);
- }
- break;
- case (6): /* set B-channel test loop */
- num = *(unsigned int *) ic->parm.num;
- if (csta->stlist)
- csta->stlist->l2.l2l1(csta->stlist,
- PH_TESTLOOP | REQUEST, (void *) (long)num);
- break;
- case (7): /* set card in PTP mode */
- num = *(unsigned int *) ic->parm.num;
- if (test_bit(FLG_TWO_DCHAN, &csta->HW_Flags)) {
- printk(KERN_ERR "HiSax PTP mode only with one TEI possible\n");
- } else if (num) {
- test_and_set_bit(FLG_PTP, &csta->channel[0].d_st->l2.flag);
- test_and_set_bit(FLG_FIXED_TEI, &csta->channel[0].d_st->l2.flag);
- csta->channel[0].d_st->l2.tei = 0;
- HiSax_putstatus(csta, "set card ", "in PTP mode");
- printk(KERN_DEBUG "HiSax: set card in PTP mode\n");
- printk(KERN_INFO "LAYER2 WATCHING ESTABLISH\n");
- csta->channel[0].d_st->lli.l4l3(csta->channel[0].d_st,
- DL_ESTABLISH | REQUEST, NULL);
- } else {
- test_and_clear_bit(FLG_PTP, &csta->channel[0].d_st->l2.flag);
- test_and_clear_bit(FLG_FIXED_TEI, &csta->channel[0].d_st->l2.flag);
- HiSax_putstatus(csta, "set card ", "in PTMP mode");
- printk(KERN_DEBUG "HiSax: set card in PTMP mode\n");
- }
- break;
- case (8): /* set card in FIXED TEI mode */
- num = *(unsigned int *)ic->parm.num;
- chanp = csta->channel + (num & 1);
- num = num >> 1;
- if (num == 127) {
- test_and_clear_bit(FLG_FIXED_TEI, &chanp->d_st->l2.flag);
- chanp->d_st->l2.tei = -1;
- HiSax_putstatus(csta, "set card ", "in VAR TEI mode");
- printk(KERN_DEBUG "HiSax: set card in VAR TEI mode\n");
- } else {
- test_and_set_bit(FLG_FIXED_TEI, &chanp->d_st->l2.flag);
- chanp->d_st->l2.tei = num;
- HiSax_putstatus(csta, "set card ", "in FIXED TEI (%d) mode", num);
- printk(KERN_DEBUG "HiSax: set card in FIXED TEI (%d) mode\n",
- num);
- }
- chanp->d_st->lli.l4l3(chanp->d_st,
- DL_ESTABLISH | REQUEST, NULL);
- break;
- case (11):
- num = csta->debug & DEB_DLOG_HEX;
- csta->debug = *(unsigned int *) ic->parm.num;
- csta->debug |= num;
- HiSax_putstatus(cards[0].cs, "l1 debugging ",
- "flags card %d set to %x",
- csta->cardnr + 1, csta->debug);
- printk(KERN_DEBUG "HiSax: l1 debugging flags card %d set to %x\n",
- csta->cardnr + 1, csta->debug);
- break;
- case (13):
- csta->channel[0].d_st->l3.debug = *(unsigned int *) ic->parm.num;
- csta->channel[1].d_st->l3.debug = *(unsigned int *) ic->parm.num;
- HiSax_putstatus(cards[0].cs, "l3 debugging ",
- "flags card %d set to %x\n", csta->cardnr + 1,
- *(unsigned int *) ic->parm.num);
- printk(KERN_DEBUG "HiSax: l3 debugging flags card %d set to %x\n",
- csta->cardnr + 1, *(unsigned int *) ic->parm.num);
- break;
- case (10):
- i = *(unsigned int *) ic->parm.num;
- return (set_channel_limit(csta, i));
- default:
- if (csta->auxcmd)
- return (csta->auxcmd(csta, ic));
- printk(KERN_DEBUG "HiSax: invalid ioctl %d\n",
- (int) ic->arg);
- return (-EINVAL);
- }
- break;
-
- case (ISDN_CMD_PROCEED):
- chanp = csta->channel + ic->arg;
- if (chanp->debug & 1)
- link_debug(chanp, 1, "PROCEED");
- FsmEvent(&chanp->fi, EV_PROCEED, NULL);
- break;
-
- case (ISDN_CMD_ALERT):
- chanp = csta->channel + ic->arg;
- if (chanp->debug & 1)
- link_debug(chanp, 1, "ALERT");
- FsmEvent(&chanp->fi, EV_ALERT, NULL);
- break;
-
- case (ISDN_CMD_REDIR):
- chanp = csta->channel + ic->arg;
- if (chanp->debug & 1)
- link_debug(chanp, 1, "REDIR");
- memcpy(&chanp->setup, &ic->parm.setup, sizeof(setup_parm));
- FsmEvent(&chanp->fi, EV_REDIR, NULL);
- break;
-
- /* protocol specific io commands */
- case (ISDN_CMD_PROT_IO):
- for (st = csta->stlist; st; st = st->next)
- if (st->protocol == (ic->arg & 0xFF))
- return (st->lli.l4l3_proto(st, ic));
- return (-EINVAL);
- break;
- default:
- if (csta->auxcmd)
- return (csta->auxcmd(csta, ic));
- return (-EINVAL);
- }
- return (0);
-}
-
-int
-HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb)
-{
- struct IsdnCardState *csta = hisax_findcard(id);
- struct Channel *chanp;
- struct PStack *st;
- int len = skb->len;
- struct sk_buff *nskb;
-
- if (!csta) {
- printk(KERN_ERR
- "HiSax: if_sendbuf called with invalid driverId!\n");
- return -ENODEV;
- }
- chanp = csta->channel + chan;
- st = chanp->b_st;
- if (!chanp->data_open) {
- link_debug(chanp, 1, "writebuf: channel not open");
- return -EIO;
- }
- if (len > MAX_DATA_SIZE) {
- link_debug(chanp, 1, "writebuf: packet too large (%d bytes)", len);
- printk(KERN_WARNING "HiSax_writebuf: packet too large (%d bytes) !\n",
- len);
- return -EINVAL;
- }
- if (len) {
- if ((len + chanp->bcs->tx_cnt) > MAX_DATA_MEM) {
- /* Must return 0 here, since this is not an error
- * but a temporary lack of resources.
- */
- if (chanp->debug & 0x800)
- link_debug(chanp, 1, "writebuf: no buffers for %d bytes", len);
- return 0;
- } else if (chanp->debug & 0x800)
- link_debug(chanp, 1, "writebuf %d/%d/%d", len, chanp->bcs->tx_cnt, MAX_DATA_MEM);
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (nskb) {
- nskb->truesize = nskb->len;
- if (!ack)
- nskb->pkt_type = PACKET_NOACK;
- if (chanp->l2_active_protocol == ISDN_PROTO_L2_X75I)
- st->l3.l3l2(st, DL_DATA | REQUEST, nskb);
- else {
- chanp->bcs->tx_cnt += len;
- st->l2.l2l1(st, PH_DATA | REQUEST, nskb);
- }
- dev_kfree_skb(skb);
- } else
- len = 0;
- }
- return (len);
-}
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
deleted file mode 100644
index de965115a183..000000000000
--- a/drivers/isdn/hisax/config.c
+++ /dev/null
@@ -1,1993 +0,0 @@
-/* $Id: config.c,v 2.84.2.5 2004/02/11 13:21:33 keil Exp $
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- * by Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- * based on the teles driver from Jan den Ouden
- *
- */
-
-#include <linux/types.h>
-#include <linux/stddef.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include "hisax.h"
-#include <linux/module.h>
-#include <linux/kernel_stat.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#define HISAX_STATUS_BUFSIZE 4096
-
-/*
- * This structure array contains one entry per card. An entry looks
- * like this:
- *
- * { type, protocol, p0, p1, p2, NULL }
- *
- * type
- * 1 Teles 16.0 p0=irq p1=membase p2=iobase
- * 2 Teles 8.0 p0=irq p1=membase
- * 3 Teles 16.3 p0=irq p1=iobase
- * 4 Creatix PNP p0=irq p1=IO0 (ISAC) p2=IO1 (HSCX)
- * 5 AVM A1 (Fritz) p0=irq p1=iobase
- * 6 ELSA PC [p0=iobase] or nothing (autodetect)
- * 7 ELSA Quickstep p0=irq p1=iobase
- * 8 Teles PCMCIA p0=irq p1=iobase
- * 9 ITK ix1-micro p0=irq p1=iobase
- * 10 ELSA PCMCIA p0=irq p1=iobase
- * 11 Eicon.Diehl Diva p0=irq p1=iobase
- * 12 Asuscom ISDNLink p0=irq p1=iobase
- * 13 Teleint p0=irq p1=iobase
- * 14 Teles 16.3c p0=irq p1=iobase
- * 15 Sedlbauer speed p0=irq p1=iobase
- * 15 Sedlbauer PC/104 p0=irq p1=iobase
- * 15 Sedlbauer speed pci no parameter
- * 16 USR Sportster internal p0=irq p1=iobase
- * 17 MIC card p0=irq p1=iobase
- * 18 ELSA Quickstep 1000PCI no parameter
- * 19 Compaq ISDN S0 ISA card p0=irq p1=IO0 (HSCX) p2=IO1 (ISAC) p3=IO2
- * 20 Travers Technologies NETjet-S PCI card
- * 21 TELES PCI no parameter
- * 22 Sedlbauer Speed Star p0=irq p1=iobase
- * 23 reserved
- * 24 Dr Neuhaus Niccy PnP/PCI card p0=irq p1=IO0 p2=IO1 (PnP only)
- * 25 Teles S0Box p0=irq p1=iobase (from isapnp setup)
- * 26 AVM A1 PCMCIA (Fritz) p0=irq p1=iobase
- * 27 AVM PnP/PCI p0=irq p1=iobase (PCI no parameter)
- * 28 Sedlbauer Speed Fax+ p0=irq p1=iobase (from isapnp setup)
- * 29 Siemens I-Surf p0=irq p1=iobase p2=memory (from isapnp setup)
- * 30 ACER P10 p0=irq p1=iobase (from isapnp setup)
- * 31 HST Saphir p0=irq p1=iobase
- * 32 Telekom A4T none
- * 33 Scitel Quadro p0=subcontroller (4*S0, subctrl 1...4)
- * 34 Gazel ISDN cards
- * 35 HFC 2BDS0 PCI none
- * 36 Winbond 6692 PCI none
- * 37 HFC 2BDS0 S+/SP p0=irq p1=iobase
- * 38 Travers Technologies NETspider-U PCI card
- * 39 HFC 2BDS0-SP PCMCIA p0=irq p1=iobase
- * 40 hotplug interface
- * 41 Formula-n enter:now ISDN PCI a/b none
- *
- * protocol can be either ISDN_PTYPE_EURO or ISDN_PTYPE_1TR6 or ISDN_PTYPE_NI1
- *
- *
- */
-
-const char *CardType[] = {
- "No Card", "Teles 16.0", "Teles 8.0", "Teles 16.3",
- "Creatix/Teles PnP", "AVM A1", "Elsa ML", "Elsa Quickstep",
- "Teles PCMCIA", "ITK ix1-micro Rev.2", "Elsa PCMCIA",
- "Eicon.Diehl Diva", "ISDNLink", "TeleInt", "Teles 16.3c",
- "Sedlbauer Speed Card", "USR Sportster", "ith mic Linux",
- "Elsa PCI", "Compaq ISA", "NETjet-S", "Teles PCI",
- "Sedlbauer Speed Star (PCMCIA)", "AMD 7930", "NICCY", "S0Box",
- "AVM A1 (PCMCIA)", "AVM Fritz PnP/PCI", "Sedlbauer Speed Fax +",
- "Siemens I-Surf", "Acer P10", "HST Saphir", "Telekom A4T",
- "Scitel Quadro", "Gazel", "HFC 2BDS0 PCI", "Winbond 6692",
- "HFC 2BDS0 SX", "NETspider-U", "HFC-2BDS0-SP PCMCIA",
- "Hotplug", "Formula-n enter:now PCI a/b",
-};
-
-#ifdef CONFIG_HISAX_ELSA
-#define DEFAULT_CARD ISDN_CTYPE_ELSA
-#define DEFAULT_CFG {0, 0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_AVM_A1
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_A1
-#define DEFAULT_CFG {10, 0x340, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_AVM_A1_PCMCIA
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_A1_PCMCIA
-#define DEFAULT_CFG {11, 0x170, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_FRITZPCI
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_FRITZPCI
-#define DEFAULT_CFG {0, 0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_16_3
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_16_3
-#define DEFAULT_CFG {15, 0x180, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_S0BOX
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_S0BOX
-#define DEFAULT_CFG {7, 0x378, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_16_0
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_16_0
-#define DEFAULT_CFG {15, 0xd0000, 0xd80, 0}
-#endif
-
-#ifdef CONFIG_HISAX_TELESPCI
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_TELESPCI
-#define DEFAULT_CFG {0, 0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_IX1MICROR2
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_IX1MICROR2
-#define DEFAULT_CFG {5, 0x390, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_DIEHLDIVA
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_DIEHLDIVA
-#define DEFAULT_CFG {0, 0x0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_ASUSCOM
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_ASUSCOM
-#define DEFAULT_CFG {5, 0x200, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_TELEINT
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_TELEINT
-#define DEFAULT_CFG {5, 0x300, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_SEDLBAUER
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_SEDLBAUER
-#define DEFAULT_CFG {11, 0x270, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_SPORTSTER
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_SPORTSTER
-#define DEFAULT_CFG {7, 0x268, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_MIC
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_MIC
-#define DEFAULT_CFG {12, 0x3e0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_NETJET
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_NETJET_S
-#define DEFAULT_CFG {0, 0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_HFCS
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_TELES3C
-#define DEFAULT_CFG {5, 0x500, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_HFC_PCI
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_HFC_PCI
-#define DEFAULT_CFG {0, 0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_HFC_SX
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_HFC_SX
-#define DEFAULT_CFG {5, 0x2E0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_NICCY
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_NICCY
-#define DEFAULT_CFG {0, 0x0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_ISURF
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_ISURF
-#define DEFAULT_CFG {5, 0x100, 0xc8000, 0}
-#endif
-
-#ifdef CONFIG_HISAX_HSTSAPHIR
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_HSTSAPHIR
-#define DEFAULT_CFG {5, 0x250, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_BKM_A4T
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_BKM_A4T
-#define DEFAULT_CFG {0, 0x0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_SCT_QUADRO
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_SCT_QUADRO
-#define DEFAULT_CFG {1, 0x0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_GAZEL
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_GAZEL
-#define DEFAULT_CFG {15, 0x180, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_W6692
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_W6692
-#define DEFAULT_CFG {0, 0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_NETJET_U
-#undef DEFAULT_CARD
-#undef DEFAULT_CFG
-#define DEFAULT_CARD ISDN_CTYPE_NETJET_U
-#define DEFAULT_CFG {0, 0, 0, 0}
-#endif
-
-#ifdef CONFIG_HISAX_1TR6
-#define DEFAULT_PROTO ISDN_PTYPE_1TR6
-#define DEFAULT_PROTO_NAME "1TR6"
-#endif
-#ifdef CONFIG_HISAX_NI1
-#undef DEFAULT_PROTO
-#define DEFAULT_PROTO ISDN_PTYPE_NI1
-#undef DEFAULT_PROTO_NAME
-#define DEFAULT_PROTO_NAME "NI1"
-#endif
-#ifdef CONFIG_HISAX_EURO
-#undef DEFAULT_PROTO
-#define DEFAULT_PROTO ISDN_PTYPE_EURO
-#undef DEFAULT_PROTO_NAME
-#define DEFAULT_PROTO_NAME "EURO"
-#endif
-#ifndef DEFAULT_PROTO
-#define DEFAULT_PROTO ISDN_PTYPE_UNKNOWN
-#define DEFAULT_PROTO_NAME "UNKNOWN"
-#endif
-#ifndef DEFAULT_CARD
-#define DEFAULT_CARD 0
-#define DEFAULT_CFG {0, 0, 0, 0}
-#endif
-
-#define FIRST_CARD { \
- DEFAULT_CARD, \
- DEFAULT_PROTO, \
- DEFAULT_CFG, \
- NULL, \
- }
-
-struct IsdnCard cards[HISAX_MAX_CARDS] = {
- FIRST_CARD,
-};
-
-#define HISAX_IDSIZE (HISAX_MAX_CARDS * 8)
-static char HiSaxID[HISAX_IDSIZE] = { 0, };
-
-static char *HiSax_id = HiSaxID;
-#ifdef MODULE
-/* Variables for insmod */
-static int type[HISAX_MAX_CARDS] = { 0, };
-static int protocol[HISAX_MAX_CARDS] = { 0, };
-static int io[HISAX_MAX_CARDS] = { 0, };
-#undef IO0_IO1
-#ifdef CONFIG_HISAX_16_3
-#define IO0_IO1
-#endif
-#ifdef CONFIG_HISAX_NICCY
-#undef IO0_IO1
-#define IO0_IO1
-#endif
-#ifdef IO0_IO1
-static int io0[HISAX_MAX_CARDS] = { 0, };
-static int io1[HISAX_MAX_CARDS] = { 0, };
-#endif
-static int irq[HISAX_MAX_CARDS] = { 0, };
-static int mem[HISAX_MAX_CARDS] = { 0, };
-static char *id = HiSaxID;
-
-MODULE_DESCRIPTION("ISDN4Linux: Driver for passive ISDN cards");
-MODULE_AUTHOR("Karsten Keil");
-MODULE_LICENSE("GPL");
-module_param_array(type, int, NULL, 0);
-module_param_array(protocol, int, NULL, 0);
-module_param_hw_array(io, int, ioport, NULL, 0);
-module_param_hw_array(irq, int, irq, NULL, 0);
-module_param_hw_array(mem, int, iomem, NULL, 0);
-module_param(id, charp, 0);
-#ifdef IO0_IO1
-module_param_hw_array(io0, int, ioport, NULL, 0);
-module_param_hw_array(io1, int, ioport, NULL, 0);
-#endif
-#endif /* MODULE */
-
-int nrcards;
-
-char *HiSax_getrev(const char *revision)
-{
- char *rev;
- char *p;
-
- if ((p = strchr(revision, ':'))) {
- rev = p + 2;
- p = strchr(rev, '$');
- *--p = 0;
- } else
- rev = "???";
- return rev;
-}
-
-static void __init HiSaxVersion(void)
-{
- char tmp[64];
-
- printk(KERN_INFO "HiSax: Linux Driver for passive ISDN cards\n");
-#ifdef MODULE
- printk(KERN_INFO "HiSax: Version 3.5 (module)\n");
-#else
- printk(KERN_INFO "HiSax: Version 3.5 (kernel)\n");
-#endif
- strcpy(tmp, l1_revision);
- printk(KERN_INFO "HiSax: Layer1 Revision %s\n", HiSax_getrev(tmp));
- strcpy(tmp, l2_revision);
- printk(KERN_INFO "HiSax: Layer2 Revision %s\n", HiSax_getrev(tmp));
- strcpy(tmp, tei_revision);
- printk(KERN_INFO "HiSax: TeiMgr Revision %s\n", HiSax_getrev(tmp));
- strcpy(tmp, l3_revision);
- printk(KERN_INFO "HiSax: Layer3 Revision %s\n", HiSax_getrev(tmp));
- strcpy(tmp, lli_revision);
- printk(KERN_INFO "HiSax: LinkLayer Revision %s\n",
- HiSax_getrev(tmp));
-}
-
-#ifndef MODULE
-#define MAX_ARG (HISAX_MAX_CARDS * 5)
-static int __init HiSax_setup(char *line)
-{
- int i, j, argc;
- int ints[MAX_ARG + 1];
- char *str;
-
- str = get_options(line, MAX_ARG, ints);
- argc = ints[0];
- printk(KERN_DEBUG "HiSax_setup: argc(%d) str(%s)\n", argc, str);
- i = 0;
- j = 1;
- while (argc && (i < HISAX_MAX_CARDS)) {
- cards[i].protocol = DEFAULT_PROTO;
- if (argc) {
- cards[i].typ = ints[j];
- j++;
- argc--;
- }
- if (argc) {
- cards[i].protocol = ints[j];
- j++;
- argc--;
- }
- if (argc) {
- cards[i].para[0] = ints[j];
- j++;
- argc--;
- }
- if (argc) {
- cards[i].para[1] = ints[j];
- j++;
- argc--;
- }
- if (argc) {
- cards[i].para[2] = ints[j];
- j++;
- argc--;
- }
- i++;
- }
- if (str && *str) {
- if (strlen(str) < HISAX_IDSIZE)
- strcpy(HiSaxID, str);
- else
- printk(KERN_WARNING "HiSax: ID too long!");
- } else
- strcpy(HiSaxID, "HiSax");
-
- HiSax_id = HiSaxID;
- return 1;
-}
-
-__setup("hisax=", HiSax_setup);
-#endif /* MODULES */
-
-#if CARD_TELES0
-extern int setup_teles0(struct IsdnCard *card);
-#endif
-
-#if CARD_TELES3
-extern int setup_teles3(struct IsdnCard *card);
-#endif
-
-#if CARD_S0BOX
-extern int setup_s0box(struct IsdnCard *card);
-#endif
-
-#if CARD_TELESPCI
-extern int setup_telespci(struct IsdnCard *card);
-#endif
-
-#if CARD_AVM_A1
-extern int setup_avm_a1(struct IsdnCard *card);
-#endif
-
-#if CARD_AVM_A1_PCMCIA
-extern int setup_avm_a1_pcmcia(struct IsdnCard *card);
-#endif
-
-#if CARD_FRITZPCI
-extern int setup_avm_pcipnp(struct IsdnCard *card);
-#endif
-
-#if CARD_ELSA
-extern int setup_elsa(struct IsdnCard *card);
-#endif
-
-#if CARD_IX1MICROR2
-extern int setup_ix1micro(struct IsdnCard *card);
-#endif
-
-#if CARD_DIEHLDIVA
-extern int setup_diva(struct IsdnCard *card);
-#endif
-
-#if CARD_ASUSCOM
-extern int setup_asuscom(struct IsdnCard *card);
-#endif
-
-#if CARD_TELEINT
-extern int setup_TeleInt(struct IsdnCard *card);
-#endif
-
-#if CARD_SEDLBAUER
-extern int setup_sedlbauer(struct IsdnCard *card);
-#endif
-
-#if CARD_SPORTSTER
-extern int setup_sportster(struct IsdnCard *card);
-#endif
-
-#if CARD_MIC
-extern int setup_mic(struct IsdnCard *card);
-#endif
-
-#if CARD_NETJET_S
-extern int setup_netjet_s(struct IsdnCard *card);
-#endif
-
-#if CARD_HFCS
-extern int setup_hfcs(struct IsdnCard *card);
-#endif
-
-#if CARD_HFC_PCI
-extern int setup_hfcpci(struct IsdnCard *card);
-#endif
-
-#if CARD_HFC_SX
-extern int setup_hfcsx(struct IsdnCard *card);
-#endif
-
-#if CARD_NICCY
-extern int setup_niccy(struct IsdnCard *card);
-#endif
-
-#if CARD_ISURF
-extern int setup_isurf(struct IsdnCard *card);
-#endif
-
-#if CARD_HSTSAPHIR
-extern int setup_saphir(struct IsdnCard *card);
-#endif
-
-#if CARD_BKM_A4T
-extern int setup_bkm_a4t(struct IsdnCard *card);
-#endif
-
-#if CARD_SCT_QUADRO
-extern int setup_sct_quadro(struct IsdnCard *card);
-#endif
-
-#if CARD_GAZEL
-extern int setup_gazel(struct IsdnCard *card);
-#endif
-
-#if CARD_W6692
-extern int setup_w6692(struct IsdnCard *card);
-#endif
-
-#if CARD_NETJET_U
-extern int setup_netjet_u(struct IsdnCard *card);
-#endif
-
-#if CARD_FN_ENTERNOW_PCI
-extern int setup_enternow_pci(struct IsdnCard *card);
-#endif
-
-/*
- * Find card with given driverId
- */
-static inline struct IsdnCardState *hisax_findcard(int driverid)
-{
- int i;
-
- for (i = 0; i < nrcards; i++)
- if (cards[i].cs)
- if (cards[i].cs->myid == driverid)
- return cards[i].cs;
- return NULL;
-}
-
-/*
- * Find card with given card number
- */
-#if 0
-struct IsdnCardState *hisax_get_card(int cardnr)
-{
- if ((cardnr <= nrcards) && (cardnr > 0))
- if (cards[cardnr - 1].cs)
- return cards[cardnr - 1].cs;
- return NULL;
-}
-#endif /* 0 */
-
-static int HiSax_readstatus(u_char __user *buf, int len, int id, int channel)
-{
- int count, cnt;
- u_char __user *p = buf;
- struct IsdnCardState *cs = hisax_findcard(id);
-
- if (cs) {
- if (len > HISAX_STATUS_BUFSIZE) {
- printk(KERN_WARNING
- "HiSax: status overflow readstat %d/%d\n",
- len, HISAX_STATUS_BUFSIZE);
- }
- count = cs->status_end - cs->status_read + 1;
- if (count >= len)
- count = len;
- if (copy_to_user(p, cs->status_read, count))
- return -EFAULT;
- cs->status_read += count;
- if (cs->status_read > cs->status_end)
- cs->status_read = cs->status_buf;
- p += count;
- count = len - count;
- while (count) {
- if (count > HISAX_STATUS_BUFSIZE)
- cnt = HISAX_STATUS_BUFSIZE;
- else
- cnt = count;
- if (copy_to_user(p, cs->status_read, cnt))
- return -EFAULT;
- p += cnt;
- cs->status_read += cnt % HISAX_STATUS_BUFSIZE;
- count -= cnt;
- }
- return len;
- } else {
- printk(KERN_ERR
- "HiSax: if_readstatus called with invalid driverId!\n");
- return -ENODEV;
- }
-}
-
-int jiftime(char *s, long mark)
-{
- s += 8;
-
- *s-- = '\0';
- *s-- = mark % 10 + '0';
- mark /= 10;
- *s-- = mark % 10 + '0';
- mark /= 10;
- *s-- = '.';
- *s-- = mark % 10 + '0';
- mark /= 10;
- *s-- = mark % 6 + '0';
- mark /= 6;
- *s-- = ':';
- *s-- = mark % 10 + '0';
- mark /= 10;
- *s-- = mark % 10 + '0';
- return 8;
-}
-
-static u_char tmpbuf[HISAX_STATUS_BUFSIZE];
-
-void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt,
- va_list args)
-{
- /* if head == NULL the fmt contains the full info */
-
- u_long flags;
- int count, i;
- u_char *p;
- isdn_ctrl ic;
- int len;
- const u_char *data;
-
- if (!cs) {
- printk(KERN_WARNING "HiSax: No CardStatus for message");
- return;
- }
- spin_lock_irqsave(&cs->statlock, flags);
- if (head) {
- p = tmpbuf;
- p += jiftime(p, jiffies);
- p += sprintf(p, " %s", head);
- p += vsprintf(p, fmt, args);
- *p++ = '\n';
- *p = 0;
- len = p - tmpbuf;
- data = tmpbuf;
- } else {
- data = fmt;
- len = strlen(fmt);
- }
- if (len > HISAX_STATUS_BUFSIZE) {
- spin_unlock_irqrestore(&cs->statlock, flags);
- printk(KERN_WARNING "HiSax: status overflow %d/%d\n",
- len, HISAX_STATUS_BUFSIZE);
- return;
- }
- count = len;
- i = cs->status_end - cs->status_write + 1;
- if (i >= len)
- i = len;
- len -= i;
- memcpy(cs->status_write, data, i);
- cs->status_write += i;
- if (cs->status_write > cs->status_end)
- cs->status_write = cs->status_buf;
- if (len) {
- memcpy(cs->status_write, data + i, len);
- cs->status_write += len;
- }
-#ifdef KERNELSTACK_DEBUG
- i = (ulong) & len - current->kernel_stack_page;
- sprintf(tmpbuf, "kstack %s %lx use %ld\n", current->comm,
- current->kernel_stack_page, i);
- len = strlen(tmpbuf);
- for (p = tmpbuf, i = len; i > 0; i--, p++) {
- *cs->status_write++ = *p;
- if (cs->status_write > cs->status_end)
- cs->status_write = cs->status_buf;
- count++;
- }
-#endif
- spin_unlock_irqrestore(&cs->statlock, flags);
- if (count) {
- ic.command = ISDN_STAT_STAVAIL;
- ic.driver = cs->myid;
- ic.arg = count;
- cs->iif.statcallb(&ic);
- }
-}
-
-void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...)
-{
- va_list args;
-
- va_start(args, fmt);
- VHiSax_putstatus(cs, head, fmt, args);
- va_end(args);
-}
-
-int ll_run(struct IsdnCardState *cs, int addfeatures)
-{
- isdn_ctrl ic;
-
- ic.driver = cs->myid;
- ic.command = ISDN_STAT_RUN;
- cs->iif.features |= addfeatures;
- cs->iif.statcallb(&ic);
- return 0;
-}
-
-static void ll_stop(struct IsdnCardState *cs)
-{
- isdn_ctrl ic;
-
- ic.command = ISDN_STAT_STOP;
- ic.driver = cs->myid;
- cs->iif.statcallb(&ic);
- // CallcFreeChan(cs);
-}
-
-static void ll_unload(struct IsdnCardState *cs)
-{
- isdn_ctrl ic;
-
- ic.command = ISDN_STAT_UNLOAD;
- ic.driver = cs->myid;
- cs->iif.statcallb(&ic);
- kfree(cs->status_buf);
- cs->status_read = NULL;
- cs->status_write = NULL;
- cs->status_end = NULL;
- kfree(cs->dlog);
- cs->dlog = NULL;
-}
-
-static void closecard(int cardnr)
-{
- struct IsdnCardState *csta = cards[cardnr].cs;
-
- if (csta->bcs->BC_Close != NULL) {
- csta->bcs->BC_Close(csta->bcs + 1);
- csta->bcs->BC_Close(csta->bcs);
- }
-
- skb_queue_purge(&csta->rq);
- skb_queue_purge(&csta->sq);
- kfree(csta->rcvbuf);
- csta->rcvbuf = NULL;
- if (csta->tx_skb) {
- dev_kfree_skb(csta->tx_skb);
- csta->tx_skb = NULL;
- }
- if (csta->DC_Close != NULL) {
- csta->DC_Close(csta);
- }
- if (csta->cardmsg)
- csta->cardmsg(csta, CARD_RELEASE, NULL);
- if (csta->dbusytimer.function != NULL) // FIXME?
- del_timer(&csta->dbusytimer);
- ll_unload(csta);
-}
-
-static irqreturn_t card_irq(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- irqreturn_t ret = cs->irq_func(intno, cs);
-
- if (ret == IRQ_HANDLED)
- cs->irq_cnt++;
- return ret;
-}
-
-static int init_card(struct IsdnCardState *cs)
-{
- int irq_cnt, cnt = 3, ret;
-
- if (!cs->irq) {
- ret = cs->cardmsg(cs, CARD_INIT, NULL);
- return (ret);
- }
- irq_cnt = cs->irq_cnt = 0;
- printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ],
- cs->irq, irq_cnt);
- if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) {
- printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n",
- cs->irq);
- return 1;
- }
- while (cnt) {
- cs->cardmsg(cs, CARD_INIT, NULL);
- /* Timeout 10ms */
- msleep(10);
- printk(KERN_INFO "%s: IRQ %d count %d\n",
- CardType[cs->typ], cs->irq, cs->irq_cnt);
- if (cs->irq_cnt == irq_cnt) {
- printk(KERN_WARNING
- "%s: IRQ(%d) getting no interrupts during init %d\n",
- CardType[cs->typ], cs->irq, 4 - cnt);
- if (cnt == 1) {
- free_irq(cs->irq, cs);
- return 2;
- } else {
- cs->cardmsg(cs, CARD_RESET, NULL);
- cnt--;
- }
- } else {
- cs->cardmsg(cs, CARD_TEST, NULL);
- return 0;
- }
- }
- return 3;
-}
-
-static int hisax_cs_setup_card(struct IsdnCard *card)
-{
- int ret;
-
- switch (card->typ) {
-#if CARD_TELES0
- case ISDN_CTYPE_16_0:
- case ISDN_CTYPE_8_0:
- ret = setup_teles0(card);
- break;
-#endif
-#if CARD_TELES3
- case ISDN_CTYPE_16_3:
- case ISDN_CTYPE_PNP:
- case ISDN_CTYPE_TELESPCMCIA:
- case ISDN_CTYPE_COMPAQ_ISA:
- ret = setup_teles3(card);
- break;
-#endif
-#if CARD_S0BOX
- case ISDN_CTYPE_S0BOX:
- ret = setup_s0box(card);
- break;
-#endif
-#if CARD_TELESPCI
- case ISDN_CTYPE_TELESPCI:
- ret = setup_telespci(card);
- break;
-#endif
-#if CARD_AVM_A1
- case ISDN_CTYPE_A1:
- ret = setup_avm_a1(card);
- break;
-#endif
-#if CARD_AVM_A1_PCMCIA
- case ISDN_CTYPE_A1_PCMCIA:
- ret = setup_avm_a1_pcmcia(card);
- break;
-#endif
-#if CARD_FRITZPCI
- case ISDN_CTYPE_FRITZPCI:
- ret = setup_avm_pcipnp(card);
- break;
-#endif
-#if CARD_ELSA
- case ISDN_CTYPE_ELSA:
- case ISDN_CTYPE_ELSA_PNP:
- case ISDN_CTYPE_ELSA_PCMCIA:
- case ISDN_CTYPE_ELSA_PCI:
- ret = setup_elsa(card);
- break;
-#endif
-#if CARD_IX1MICROR2
- case ISDN_CTYPE_IX1MICROR2:
- ret = setup_ix1micro(card);
- break;
-#endif
-#if CARD_DIEHLDIVA
- case ISDN_CTYPE_DIEHLDIVA:
- ret = setup_diva(card);
- break;
-#endif
-#if CARD_ASUSCOM
- case ISDN_CTYPE_ASUSCOM:
- ret = setup_asuscom(card);
- break;
-#endif
-#if CARD_TELEINT
- case ISDN_CTYPE_TELEINT:
- ret = setup_TeleInt(card);
- break;
-#endif
-#if CARD_SEDLBAUER
- case ISDN_CTYPE_SEDLBAUER:
- case ISDN_CTYPE_SEDLBAUER_PCMCIA:
- case ISDN_CTYPE_SEDLBAUER_FAX:
- ret = setup_sedlbauer(card);
- break;
-#endif
-#if CARD_SPORTSTER
- case ISDN_CTYPE_SPORTSTER:
- ret = setup_sportster(card);
- break;
-#endif
-#if CARD_MIC
- case ISDN_CTYPE_MIC:
- ret = setup_mic(card);
- break;
-#endif
-#if CARD_NETJET_S
- case ISDN_CTYPE_NETJET_S:
- ret = setup_netjet_s(card);
- break;
-#endif
-#if CARD_HFCS
- case ISDN_CTYPE_TELES3C:
- case ISDN_CTYPE_ACERP10:
- ret = setup_hfcs(card);
- break;
-#endif
-#if CARD_HFC_PCI
- case ISDN_CTYPE_HFC_PCI:
- ret = setup_hfcpci(card);
- break;
-#endif
-#if CARD_HFC_SX
- case ISDN_CTYPE_HFC_SX:
- ret = setup_hfcsx(card);
- break;
-#endif
-#if CARD_NICCY
- case ISDN_CTYPE_NICCY:
- ret = setup_niccy(card);
- break;
-#endif
-#if CARD_ISURF
- case ISDN_CTYPE_ISURF:
- ret = setup_isurf(card);
- break;
-#endif
-#if CARD_HSTSAPHIR
- case ISDN_CTYPE_HSTSAPHIR:
- ret = setup_saphir(card);
- break;
-#endif
-#if CARD_BKM_A4T
- case ISDN_CTYPE_BKM_A4T:
- ret = setup_bkm_a4t(card);
- break;
-#endif
-#if CARD_SCT_QUADRO
- case ISDN_CTYPE_SCT_QUADRO:
- ret = setup_sct_quadro(card);
- break;
-#endif
-#if CARD_GAZEL
- case ISDN_CTYPE_GAZEL:
- ret = setup_gazel(card);
- break;
-#endif
-#if CARD_W6692
- case ISDN_CTYPE_W6692:
- ret = setup_w6692(card);
- break;
-#endif
-#if CARD_NETJET_U
- case ISDN_CTYPE_NETJET_U:
- ret = setup_netjet_u(card);
- break;
-#endif
-#if CARD_FN_ENTERNOW_PCI
- case ISDN_CTYPE_ENTERNOW:
- ret = setup_enternow_pci(card);
- break;
-#endif
- case ISDN_CTYPE_DYNAMIC:
- ret = 2;
- break;
- default:
- printk(KERN_WARNING
- "HiSax: Support for %s Card not selected\n",
- CardType[card->typ]);
- ret = 0;
- break;
- }
-
- return ret;
-}
-
-static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
- struct IsdnCardState **cs_out, int *busy_flag,
- struct module *lockowner)
-{
- struct IsdnCardState *cs;
-
- *cs_out = NULL;
-
- cs = kzalloc(sizeof(struct IsdnCardState), GFP_KERNEL);
- if (!cs) {
- printk(KERN_WARNING
- "HiSax: No memory for IsdnCardState(card %d)\n",
- cardnr + 1);
- goto out;
- }
- card->cs = cs;
- spin_lock_init(&cs->statlock);
- spin_lock_init(&cs->lock);
- cs->chanlimit = 2; /* maximum B-channel number */
- cs->logecho = 0; /* No echo logging */
- cs->cardnr = cardnr;
- cs->debug = L1_DEB_WARN;
- cs->HW_Flags = 0;
- cs->busy_flag = busy_flag;
- cs->irq_flags = I4L_IRQ_FLAG;
-#if TEI_PER_CARD
- if (card->protocol == ISDN_PTYPE_NI1)
- test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
-#else
- test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
-#endif
- cs->protocol = card->protocol;
-
- if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
- printk(KERN_WARNING
- "HiSax: Card Type %d out of range\n", card->typ);
- goto outf_cs;
- }
- if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_KERNEL))) {
- printk(KERN_WARNING
- "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
- goto outf_cs;
- }
- if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_KERNEL))) {
- printk(KERN_WARNING
- "HiSax: No memory for status_buf(card %d)\n",
- cardnr + 1);
- goto outf_dlog;
- }
- cs->stlist = NULL;
- cs->status_read = cs->status_buf;
- cs->status_write = cs->status_buf;
- cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
- cs->typ = card->typ;
-#ifdef MODULE
- cs->iif.owner = lockowner;
-#endif
- strcpy(cs->iif.id, id);
- cs->iif.channels = 2;
- cs->iif.maxbufsize = MAX_DATA_SIZE;
- cs->iif.hl_hdrlen = MAX_HEADER_LEN;
- cs->iif.features =
- ISDN_FEATURE_L2_X75I |
- ISDN_FEATURE_L2_HDLC |
- ISDN_FEATURE_L2_HDLC_56K |
- ISDN_FEATURE_L2_TRANS |
- ISDN_FEATURE_L3_TRANS |
-#ifdef CONFIG_HISAX_1TR6
- ISDN_FEATURE_P_1TR6 |
-#endif
-#ifdef CONFIG_HISAX_EURO
- ISDN_FEATURE_P_EURO |
-#endif
-#ifdef CONFIG_HISAX_NI1
- ISDN_FEATURE_P_NI1 |
-#endif
- 0;
-
- cs->iif.command = HiSax_command;
- cs->iif.writecmd = NULL;
- cs->iif.writebuf_skb = HiSax_writebuf_skb;
- cs->iif.readstat = HiSax_readstatus;
- register_isdn(&cs->iif);
- cs->myid = cs->iif.channels;
-
- *cs_out = cs;
- return 1; /* success */
-
-outf_dlog:
- kfree(cs->dlog);
-outf_cs:
- kfree(cs);
- card->cs = NULL;
-out:
- return 0; /* error */
-}
-
-static int hisax_cs_setup(int cardnr, struct IsdnCard *card,
- struct IsdnCardState *cs)
-{
- int ret;
-
- if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_KERNEL))) {
- printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n");
- ll_unload(cs);
- goto outf_cs;
- }
- cs->rcvidx = 0;
- cs->tx_skb = NULL;
- cs->tx_cnt = 0;
- cs->event = 0;
-
- skb_queue_head_init(&cs->rq);
- skb_queue_head_init(&cs->sq);
-
- init_bcstate(cs, 0);
- init_bcstate(cs, 1);
-
- /* init_card only handles interrupts which are not */
- /* used here for the loadable driver */
- switch (card->typ) {
- case ISDN_CTYPE_DYNAMIC:
- ret = 0;
- break;
- default:
- ret = init_card(cs);
- break;
- }
- if (ret) {
- closecard(cardnr);
- goto outf_cs;
- }
- init_tei(cs, cs->protocol);
- ret = CallcNewChan(cs);
- if (ret) {
- closecard(cardnr);
- goto outf_cs;
- }
- /* ISAR needs firmware download first */
- if (!test_bit(HW_ISAR, &cs->HW_Flags))
- ll_run(cs, 0);
-
- return 1;
-
-outf_cs:
- kfree(cs);
- card->cs = NULL;
- return 0;
-}
-
-static int checkcard(int cardnr, char *id, int *busy_flag,
- struct module *lockowner, hisax_setup_func_t card_setup)
-{
- int ret;
- struct IsdnCard *card = cards + cardnr;
- struct IsdnCardState *cs;
-
- ret = hisax_cs_new(cardnr, id, card, &cs, busy_flag, lockowner);
- if (!ret)
- return 0;
-
- printk(KERN_INFO
- "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
- (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
- (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
- (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
- (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
- "NONE", cs->iif.id, cs->myid);
-
- ret = card_setup(card);
- if (!ret) {
- ll_unload(cs);
- goto outf_cs;
- }
-
- ret = hisax_cs_setup(cardnr, card, cs);
- goto out;
-
-outf_cs:
- kfree(cs);
- card->cs = NULL;
-out:
- return ret;
-}
-
-static void HiSax_shiftcards(int idx)
-{
- int i;
-
- for (i = idx; i < (HISAX_MAX_CARDS - 1); i++)
- memcpy(&cards[i], &cards[i + 1], sizeof(cards[i]));
-}
-
-static int __init HiSax_inithardware(int *busy_flag)
-{
- int foundcards = 0;
- int i = 0;
- int t = ',';
- int flg = 0;
- char *id;
- char *next_id = HiSax_id;
- char ids[20];
-
- if (strchr(HiSax_id, ','))
- t = ',';
- else if (strchr(HiSax_id, '%'))
- t = '%';
-
- while (i < nrcards) {
- if (cards[i].typ < 1)
- break;
- id = next_id;
- if ((next_id = strchr(id, t))) {
- *next_id++ = 0;
- strcpy(ids, id);
- flg = i + 1;
- } else {
- next_id = id;
- if (flg >= i)
- strcpy(ids, id);
- else
- sprintf(ids, "%s%d", id, i);
- }
- if (checkcard(i, ids, busy_flag, THIS_MODULE,
- hisax_cs_setup_card)) {
- foundcards++;
- i++;
- } else {
- /* make sure we don't oops the module */
- if (cards[i].typ > 0 && cards[i].typ <= ISDN_CTYPE_COUNT) {
- printk(KERN_WARNING
- "HiSax: Card %s not installed !\n",
- CardType[cards[i].typ]);
- }
- HiSax_shiftcards(i);
- nrcards--;
- }
- }
- return foundcards;
-}
-
-void HiSax_closecard(int cardnr)
-{
- int i, last = nrcards - 1;
-
- if (cardnr > last || cardnr < 0)
- return;
- if (cards[cardnr].cs) {
- ll_stop(cards[cardnr].cs);
- release_tei(cards[cardnr].cs);
- CallcFreeChan(cards[cardnr].cs);
-
- closecard(cardnr);
- if (cards[cardnr].cs->irq)
- free_irq(cards[cardnr].cs->irq, cards[cardnr].cs);
- kfree((void *) cards[cardnr].cs);
- cards[cardnr].cs = NULL;
- }
- i = cardnr;
- while (i <= last) {
- cards[i] = cards[i + 1];
- i++;
- }
- nrcards--;
-}
-
-void HiSax_reportcard(int cardnr, int sel)
-{
- struct IsdnCardState *cs = cards[cardnr].cs;
-
- printk(KERN_DEBUG "HiSax: reportcard No %d\n", cardnr + 1);
- printk(KERN_DEBUG "HiSax: Type %s\n", CardType[cs->typ]);
- printk(KERN_DEBUG "HiSax: debuglevel %x\n", cs->debug);
- printk(KERN_DEBUG "HiSax: HiSax_reportcard address 0x%px\n",
- HiSax_reportcard);
- printk(KERN_DEBUG "HiSax: cs 0x%px\n", cs);
- printk(KERN_DEBUG "HiSax: HW_Flags %lx bc0 flg %lx bc1 flg %lx\n",
- cs->HW_Flags, cs->bcs[0].Flag, cs->bcs[1].Flag);
- printk(KERN_DEBUG "HiSax: bcs 0 mode %d ch%d\n",
- cs->bcs[0].mode, cs->bcs[0].channel);
- printk(KERN_DEBUG "HiSax: bcs 1 mode %d ch%d\n",
- cs->bcs[1].mode, cs->bcs[1].channel);
-#ifdef ERROR_STATISTIC
- printk(KERN_DEBUG "HiSax: dc errors(rx,crc,tx) %d,%d,%d\n",
- cs->err_rx, cs->err_crc, cs->err_tx);
- printk(KERN_DEBUG
- "HiSax: bc0 errors(inv,rdo,crc,tx) %d,%d,%d,%d\n",
- cs->bcs[0].err_inv, cs->bcs[0].err_rdo, cs->bcs[0].err_crc,
- cs->bcs[0].err_tx);
- printk(KERN_DEBUG
- "HiSax: bc1 errors(inv,rdo,crc,tx) %d,%d,%d,%d\n",
- cs->bcs[1].err_inv, cs->bcs[1].err_rdo, cs->bcs[1].err_crc,
- cs->bcs[1].err_tx);
- if (sel == 99) {
- cs->err_rx = 0;
- cs->err_crc = 0;
- cs->err_tx = 0;
- cs->bcs[0].err_inv = 0;
- cs->bcs[0].err_rdo = 0;
- cs->bcs[0].err_crc = 0;
- cs->bcs[0].err_tx = 0;
- cs->bcs[1].err_inv = 0;
- cs->bcs[1].err_rdo = 0;
- cs->bcs[1].err_crc = 0;
- cs->bcs[1].err_tx = 0;
- }
-#endif
-}
-
-static int __init HiSax_init(void)
-{
- int i, retval;
-#ifdef MODULE
- int j;
- int nzproto = 0;
-#endif
-
- HiSaxVersion();
- retval = CallcNew();
- if (retval)
- goto out;
- retval = Isdnl3New();
- if (retval)
- goto out_callc;
- retval = Isdnl2New();
- if (retval)
- goto out_isdnl3;
- retval = TeiNew();
- if (retval)
- goto out_isdnl2;
- retval = Isdnl1New();
- if (retval)
- goto out_tei;
-
-#ifdef MODULE
- if (!type[0]) {
- /* We 'll register drivers later, but init basic functions */
- for (i = 0; i < HISAX_MAX_CARDS; i++)
- cards[i].typ = 0;
- return 0;
- }
-#ifdef CONFIG_HISAX_ELSA
- if (type[0] == ISDN_CTYPE_ELSA_PCMCIA) {
- /* we have exported and return in this case */
- return 0;
- }
-#endif
-#ifdef CONFIG_HISAX_SEDLBAUER
- if (type[0] == ISDN_CTYPE_SEDLBAUER_PCMCIA) {
- /* we have to export and return in this case */
- return 0;
- }
-#endif
-#ifdef CONFIG_HISAX_AVM_A1_PCMCIA
- if (type[0] == ISDN_CTYPE_A1_PCMCIA) {
- /* we have to export and return in this case */
- return 0;
- }
-#endif
-#ifdef CONFIG_HISAX_HFC_SX
- if (type[0] == ISDN_CTYPE_HFC_SP_PCMCIA) {
- /* we have to export and return in this case */
- return 0;
- }
-#endif
-#endif
- nrcards = 0;
-#ifdef MODULE
- if (id) /* If id= string used */
- HiSax_id = id;
- for (i = j = 0; j < HISAX_MAX_CARDS; i++) {
- cards[j].typ = type[i];
- if (protocol[i]) {
- cards[j].protocol = protocol[i];
- nzproto++;
- } else {
- cards[j].protocol = DEFAULT_PROTO;
- }
- switch (type[i]) {
- case ISDN_CTYPE_16_0:
- cards[j].para[0] = irq[i];
- cards[j].para[1] = mem[i];
- cards[j].para[2] = io[i];
- break;
-
- case ISDN_CTYPE_8_0:
- cards[j].para[0] = irq[i];
- cards[j].para[1] = mem[i];
- break;
-
-#ifdef IO0_IO1
- case ISDN_CTYPE_PNP:
- case ISDN_CTYPE_NICCY:
- cards[j].para[0] = irq[i];
- cards[j].para[1] = io0[i];
- cards[j].para[2] = io1[i];
- break;
- case ISDN_CTYPE_COMPAQ_ISA:
- cards[j].para[0] = irq[i];
- cards[j].para[1] = io0[i];
- cards[j].para[2] = io1[i];
- cards[j].para[3] = io[i];
- break;
-#endif
- case ISDN_CTYPE_ELSA:
- case ISDN_CTYPE_HFC_PCI:
- cards[j].para[0] = io[i];
- break;
- case ISDN_CTYPE_16_3:
- case ISDN_CTYPE_TELESPCMCIA:
- case ISDN_CTYPE_A1:
- case ISDN_CTYPE_A1_PCMCIA:
- case ISDN_CTYPE_ELSA_PNP:
- case ISDN_CTYPE_ELSA_PCMCIA:
- case ISDN_CTYPE_IX1MICROR2:
- case ISDN_CTYPE_DIEHLDIVA:
- case ISDN_CTYPE_ASUSCOM:
- case ISDN_CTYPE_TELEINT:
- case ISDN_CTYPE_SEDLBAUER:
- case ISDN_CTYPE_SEDLBAUER_PCMCIA:
- case ISDN_CTYPE_SEDLBAUER_FAX:
- case ISDN_CTYPE_SPORTSTER:
- case ISDN_CTYPE_MIC:
- case ISDN_CTYPE_TELES3C:
- case ISDN_CTYPE_ACERP10:
- case ISDN_CTYPE_S0BOX:
- case ISDN_CTYPE_FRITZPCI:
- case ISDN_CTYPE_HSTSAPHIR:
- case ISDN_CTYPE_GAZEL:
- case ISDN_CTYPE_HFC_SX:
- case ISDN_CTYPE_HFC_SP_PCMCIA:
- cards[j].para[0] = irq[i];
- cards[j].para[1] = io[i];
- break;
- case ISDN_CTYPE_ISURF:
- cards[j].para[0] = irq[i];
- cards[j].para[1] = io[i];
- cards[j].para[2] = mem[i];
- break;
- case ISDN_CTYPE_ELSA_PCI:
- case ISDN_CTYPE_NETJET_S:
- case ISDN_CTYPE_TELESPCI:
- case ISDN_CTYPE_W6692:
- case ISDN_CTYPE_NETJET_U:
- break;
- case ISDN_CTYPE_BKM_A4T:
- break;
- case ISDN_CTYPE_SCT_QUADRO:
- if (irq[i]) {
- cards[j].para[0] = irq[i];
- } else {
- /* QUADRO is a 4 BRI card */
- cards[j++].para[0] = 1;
- /* we need to check if further cards can be added */
- if (j < HISAX_MAX_CARDS) {
- cards[j].typ = ISDN_CTYPE_SCT_QUADRO;
- cards[j].protocol = protocol[i];
- cards[j++].para[0] = 2;
- }
- if (j < HISAX_MAX_CARDS) {
- cards[j].typ = ISDN_CTYPE_SCT_QUADRO;
- cards[j].protocol = protocol[i];
- cards[j++].para[0] = 3;
- }
- if (j < HISAX_MAX_CARDS) {
- cards[j].typ = ISDN_CTYPE_SCT_QUADRO;
- cards[j].protocol = protocol[i];
- cards[j].para[0] = 4;
- }
- }
- break;
- }
- j++;
- }
- if (!nzproto) {
- printk(KERN_WARNING
- "HiSax: Warning - no protocol specified\n");
- printk(KERN_WARNING "HiSax: using protocol %s\n",
- DEFAULT_PROTO_NAME);
- }
-#endif
- if (!HiSax_id)
- HiSax_id = HiSaxID;
- if (!HiSaxID[0])
- strcpy(HiSaxID, "HiSax");
- for (i = 0; i < HISAX_MAX_CARDS; i++)
- if (cards[i].typ > 0)
- nrcards++;
- printk(KERN_DEBUG "HiSax: Total %d card%s defined\n",
- nrcards, (nrcards > 1) ? "s" : "");
-
- /* Install only, if at least one card found */
- if (!HiSax_inithardware(NULL))
- return -ENODEV;
- return 0;
-
-out_tei:
- TeiFree();
-out_isdnl2:
- Isdnl2Free();
-out_isdnl3:
- Isdnl3Free();
-out_callc:
- CallcFree();
-out:
- return retval;
-}
-
-static void __exit HiSax_exit(void)
-{
- int cardnr = nrcards - 1;
-
- while (cardnr >= 0)
- HiSax_closecard(cardnr--);
- Isdnl1Free();
- TeiFree();
- Isdnl2Free();
- Isdnl3Free();
- CallcFree();
- printk(KERN_INFO "HiSax module removed\n");
-}
-
-int hisax_init_pcmcia(void *pcm_iob, int *busy_flag, struct IsdnCard *card)
-{
- u_char ids[16];
- int ret = -1;
-
- cards[nrcards] = *card;
- if (nrcards)
- sprintf(ids, "HiSax%d", nrcards);
- else
- sprintf(ids, "HiSax");
- if (!checkcard(nrcards, ids, busy_flag, THIS_MODULE,
- hisax_cs_setup_card))
- goto error;
-
- ret = nrcards;
- nrcards++;
-error:
- return ret;
-}
-EXPORT_SYMBOL(hisax_init_pcmcia);
-
-EXPORT_SYMBOL(HiSax_closecard);
-
-#include "hisax_if.h"
-
-EXPORT_SYMBOL(hisax_register);
-EXPORT_SYMBOL(hisax_unregister);
-
-static void hisax_d_l1l2(struct hisax_if *ifc, int pr, void *arg);
-static void hisax_b_l1l2(struct hisax_if *ifc, int pr, void *arg);
-static void hisax_d_l2l1(struct PStack *st, int pr, void *arg);
-static void hisax_b_l2l1(struct PStack *st, int pr, void *arg);
-static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg);
-static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs);
-static void hisax_bc_close(struct BCState *bcs);
-static void hisax_bh(struct work_struct *work);
-static void EChannel_proc_rcv(struct hisax_d_if *d_if);
-
-static int hisax_setup_card_dynamic(struct IsdnCard *card)
-{
- return 2;
-}
-
-int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
- char *name, int protocol)
-{
- int i, retval;
- char id[20];
- struct IsdnCardState *cs;
-
- for (i = 0; i < HISAX_MAX_CARDS; i++) {
- if (!cards[i].typ)
- break;
- }
-
- if (i >= HISAX_MAX_CARDS)
- return -EBUSY;
-
- cards[i].typ = ISDN_CTYPE_DYNAMIC;
- cards[i].protocol = protocol;
- sprintf(id, "%s%d", name, i);
- nrcards++;
- retval = checkcard(i, id, NULL, hisax_d_if->owner,
- hisax_setup_card_dynamic);
- if (retval == 0) { // yuck
- cards[i].typ = 0;
- nrcards--;
- return -EINVAL;
- }
- cs = cards[i].cs;
- hisax_d_if->cs = cs;
- cs->hw.hisax_d_if = hisax_d_if;
- cs->cardmsg = hisax_cardmsg;
- INIT_WORK(&cs->tqueue, hisax_bh);
- cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1;
- for (i = 0; i < 2; i++) {
- cs->bcs[i].BC_SetStack = hisax_bc_setstack;
- cs->bcs[i].BC_Close = hisax_bc_close;
-
- b_if[i]->ifc.l1l2 = hisax_b_l1l2;
-
- hisax_d_if->b_if[i] = b_if[i];
- }
- hisax_d_if->ifc.l1l2 = hisax_d_l1l2;
- skb_queue_head_init(&hisax_d_if->erq);
- clear_bit(0, &hisax_d_if->ph_state);
-
- return 0;
-}
-
-void hisax_unregister(struct hisax_d_if *hisax_d_if)
-{
- cards[hisax_d_if->cs->cardnr].typ = 0;
- HiSax_closecard(hisax_d_if->cs->cardnr);
- skb_queue_purge(&hisax_d_if->erq);
-}
-
-#include "isdnl1.h"
-
-static void hisax_sched_event(struct IsdnCardState *cs, int event)
-{
- test_and_set_bit(event, &cs->event);
- schedule_work(&cs->tqueue);
-}
-
-static void hisax_bh(struct work_struct *work)
-{
- struct IsdnCardState *cs =
- container_of(work, struct IsdnCardState, tqueue);
- struct PStack *st;
- int pr;
-
- if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
- DChannel_proc_rcv(cs);
- if (test_and_clear_bit(E_RCVBUFREADY, &cs->event))
- EChannel_proc_rcv(cs->hw.hisax_d_if);
- if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
- if (test_bit(0, &cs->hw.hisax_d_if->ph_state))
- pr = PH_ACTIVATE | INDICATION;
- else
- pr = PH_DEACTIVATE | INDICATION;
- for (st = cs->stlist; st; st = st->next)
- st->l1.l1l2(st, pr, NULL);
-
- }
-}
-
-static void hisax_b_sched_event(struct BCState *bcs, int event)
-{
- test_and_set_bit(event, &bcs->event);
- schedule_work(&bcs->tqueue);
-}
-
-static inline void D_L2L1(struct hisax_d_if *d_if, int pr, void *arg)
-{
- struct hisax_if *ifc = (struct hisax_if *) d_if;
- ifc->l2l1(ifc, pr, arg);
-}
-
-static inline void B_L2L1(struct hisax_b_if *b_if, int pr, void *arg)
-{
- struct hisax_if *ifc = (struct hisax_if *) b_if;
- ifc->l2l1(ifc, pr, arg);
-}
-
-static void hisax_d_l1l2(struct hisax_if *ifc, int pr, void *arg)
-{
- struct hisax_d_if *d_if = (struct hisax_d_if *) ifc;
- struct IsdnCardState *cs = d_if->cs;
- struct PStack *st;
- struct sk_buff *skb;
-
- switch (pr) {
- case PH_ACTIVATE | INDICATION:
- set_bit(0, &d_if->ph_state);
- hisax_sched_event(cs, D_L1STATECHANGE);
- break;
- case PH_DEACTIVATE | INDICATION:
- clear_bit(0, &d_if->ph_state);
- hisax_sched_event(cs, D_L1STATECHANGE);
- break;
- case PH_DATA | INDICATION:
- skb_queue_tail(&cs->rq, arg);
- hisax_sched_event(cs, D_RCVBUFREADY);
- break;
- case PH_DATA | CONFIRM:
- skb = skb_dequeue(&cs->sq);
- if (skb) {
- D_L2L1(d_if, PH_DATA | REQUEST, skb);
- break;
- }
- clear_bit(FLG_L1_DBUSY, &cs->HW_Flags);
- for (st = cs->stlist; st; st = st->next) {
- if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags)) {
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- break;
- }
- }
- break;
- case PH_DATA_E | INDICATION:
- skb_queue_tail(&d_if->erq, arg);
- hisax_sched_event(cs, E_RCVBUFREADY);
- break;
- default:
- printk("pr %#x\n", pr);
- break;
- }
-}
-
-static void hisax_b_l1l2(struct hisax_if *ifc, int pr, void *arg)
-{
- struct hisax_b_if *b_if = (struct hisax_b_if *) ifc;
- struct BCState *bcs = b_if->bcs;
- struct PStack *st = bcs->st;
- struct sk_buff *skb;
-
- // FIXME use isdnl1?
- switch (pr) {
- case PH_ACTIVATE | INDICATION:
- st->l1.l1l2(st, pr, NULL);
- break;
- case PH_DEACTIVATE | INDICATION:
- st->l1.l1l2(st, pr, NULL);
- clear_bit(BC_FLG_BUSY, &bcs->Flag);
- skb_queue_purge(&bcs->squeue);
- bcs->hw.b_if = NULL;
- break;
- case PH_DATA | INDICATION:
- skb_queue_tail(&bcs->rqueue, arg);
- hisax_b_sched_event(bcs, B_RCVBUFREADY);
- break;
- case PH_DATA | CONFIRM:
- bcs->tx_cnt -= (long)arg;
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += (long)arg;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- skb = skb_dequeue(&bcs->squeue);
- if (skb) {
- B_L2L1(b_if, PH_DATA | REQUEST, skb);
- break;
- }
- clear_bit(BC_FLG_BUSY, &bcs->Flag);
- if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags)) {
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- }
- break;
- default:
- printk("hisax_b_l1l2 pr %#x\n", pr);
- break;
- }
-}
-
-static void hisax_d_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs = st->l1.hardware;
- struct hisax_d_if *hisax_d_if = cs->hw.hisax_d_if;
- struct sk_buff *skb = arg;
-
- switch (pr) {
- case PH_DATA | REQUEST:
- case PH_PULL | INDICATION:
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- Logl2Frame(cs, skb, "PH_DATA_REQ", 0);
- // FIXME lock?
- if (!test_and_set_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- D_L2L1(hisax_d_if, PH_DATA | REQUEST, skb);
- else
- skb_queue_tail(&cs->sq, skb);
- break;
- case PH_PULL | REQUEST:
- if (!test_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- else
- set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- default:
- D_L2L1(hisax_d_if, pr, arg);
- break;
- }
-}
-
-static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg)
-{
- return 0;
-}
-
-static void hisax_b_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- struct hisax_b_if *b_if = bcs->hw.b_if;
-
- switch (pr) {
- case PH_ACTIVATE | REQUEST:
- B_L2L1(b_if, pr, (void *)(unsigned long)st->l1.mode);
- break;
- case PH_DATA | REQUEST:
- case PH_PULL | INDICATION:
- // FIXME lock?
- if (!test_and_set_bit(BC_FLG_BUSY, &bcs->Flag)) {
- B_L2L1(b_if, PH_DATA | REQUEST, arg);
- } else {
- skb_queue_tail(&bcs->squeue, arg);
- }
- break;
- case PH_PULL | REQUEST:
- if (!test_bit(BC_FLG_BUSY, &bcs->Flag))
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- else
- set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case PH_DEACTIVATE | REQUEST:
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- skb_queue_purge(&bcs->squeue);
- /* fall through */
- default:
- B_L2L1(b_if, pr, arg);
- break;
- }
-}
-
-static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs)
-{
- struct IsdnCardState *cs = st->l1.hardware;
- struct hisax_d_if *hisax_d_if = cs->hw.hisax_d_if;
-
- bcs->channel = st->l1.bc;
-
- bcs->hw.b_if = hisax_d_if->b_if[st->l1.bc];
- hisax_d_if->b_if[st->l1.bc]->bcs = bcs;
-
- st->l1.bcs = bcs;
- st->l2.l2l1 = hisax_b_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- return 0;
-}
-
-static void hisax_bc_close(struct BCState *bcs)
-{
- struct hisax_b_if *b_if = bcs->hw.b_if;
-
- if (b_if)
- B_L2L1(b_if, PH_DEACTIVATE | REQUEST, NULL);
-}
-
-static void EChannel_proc_rcv(struct hisax_d_if *d_if)
-{
- struct IsdnCardState *cs = d_if->cs;
- u_char *ptr;
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&d_if->erq)) != NULL) {
- if (cs->debug & DEB_DLOG_HEX) {
- ptr = cs->dlog;
- if ((skb->len) < MAX_DLOG_SPACE / 3 - 10) {
- *ptr++ = 'E';
- *ptr++ = 'C';
- *ptr++ = 'H';
- *ptr++ = 'O';
- *ptr++ = ':';
- ptr += QuickHex(ptr, skb->data, skb->len);
- ptr--;
- *ptr++ = '\n';
- *ptr = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
- } else
- HiSax_putstatus(cs, "LogEcho: ",
- "warning Frame too big (%d)",
- skb->len);
- }
- dev_kfree_skb_any(skb);
- }
-}
-
-#ifdef CONFIG_PCI
-#include <linux/pci.h>
-
-static const struct pci_device_id hisax_pci_tbl[] __used = {
-#ifdef CONFIG_HISAX_FRITZPCI
- {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) },
-#endif
-#ifdef CONFIG_HISAX_DIEHLDIVA
- {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20) },
- {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20_U) },
- {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA201) },
-/*##########################################################################*/
- {PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA202) },
-/*##########################################################################*/
-#endif
-#ifdef CONFIG_HISAX_ELSA
- {PCI_VDEVICE(ELSA, PCI_DEVICE_ID_ELSA_MICROLINK) },
- {PCI_VDEVICE(ELSA, PCI_DEVICE_ID_ELSA_QS3000) },
-#endif
-#ifdef CONFIG_HISAX_GAZEL
- {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_R685) },
- {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_R753) },
- {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_DJINN_ITOO) },
- {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_OLITEC) },
-#endif
-#ifdef CONFIG_HISAX_SCT_QUADRO
- {PCI_VDEVICE(PLX, PCI_DEVICE_ID_PLX_9050) },
-#endif
-#ifdef CONFIG_HISAX_NICCY
- {PCI_VDEVICE(SATSAGEM, PCI_DEVICE_ID_SATSAGEM_NICCY) },
-#endif
-#ifdef CONFIG_HISAX_SEDLBAUER
- {PCI_VDEVICE(TIGERJET, PCI_DEVICE_ID_TIGERJET_100) },
-#endif
-#if defined(CONFIG_HISAX_NETJET) || defined(CONFIG_HISAX_NETJET_U)
- {PCI_VDEVICE(TIGERJET, PCI_DEVICE_ID_TIGERJET_300) },
-#endif
-#if defined(CONFIG_HISAX_TELESPCI) || defined(CONFIG_HISAX_SCT_QUADRO)
- {PCI_VDEVICE(ZORAN, PCI_DEVICE_ID_ZORAN_36120) },
-#endif
-#ifdef CONFIG_HISAX_W6692
- {PCI_VDEVICE(DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH) },
- {PCI_VDEVICE(WINBOND2, PCI_DEVICE_ID_WINBOND2_6692) },
-#endif
-#ifdef CONFIG_HISAX_HFC_PCI
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0) },
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B000) },
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B006) },
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B007) },
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B008) },
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B009) },
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00A) },
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00B) },
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00C) },
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B100) },
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B700) },
- {PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B701) },
- {PCI_VDEVICE(ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1) },
- {PCI_VDEVICE(ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675) },
- {PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT) },
- {PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_A1T) },
- {PCI_VDEVICE(ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575) },
- {PCI_VDEVICE(ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0) },
- {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E) },
- {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_E) },
- {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A) },
- {PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_A) },
-#endif
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, hisax_pci_tbl);
-#endif /* CONFIG_PCI */
-
-module_init(HiSax_init);
-module_exit(HiSax_exit);
-
-EXPORT_SYMBOL(FsmNew);
-EXPORT_SYMBOL(FsmFree);
-EXPORT_SYMBOL(FsmEvent);
-EXPORT_SYMBOL(FsmChangeState);
-EXPORT_SYMBOL(FsmInitTimer);
-EXPORT_SYMBOL(FsmDelTimer);
-EXPORT_SYMBOL(FsmRestartTimer);
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
deleted file mode 100644
index d23df7a7784d..000000000000
--- a/drivers/isdn/hisax/diva.c
+++ /dev/null
@@ -1,1282 +0,0 @@
-/* $Id: diva.c,v 1.33.2.6 2004/02/11 13:21:33 keil Exp $
- *
- * low level stuff for Eicon.Diehl Diva Family ISDN cards
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- * Thanks to Eicon Technology for documents and information
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "ipac.h"
-#include "ipacx.h"
-#include "isdnl1.h"
-#include <linux/pci.h>
-#include <linux/isapnp.h>
-
-static const char *Diva_revision = "$Revision: 1.33.2.6 $";
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-#define DIVA_HSCX_DATA 0
-#define DIVA_HSCX_ADR 4
-#define DIVA_ISA_ISAC_DATA 2
-#define DIVA_ISA_ISAC_ADR 6
-#define DIVA_ISA_CTRL 7
-#define DIVA_IPAC_ADR 0
-#define DIVA_IPAC_DATA 1
-
-#define DIVA_PCI_ISAC_DATA 8
-#define DIVA_PCI_ISAC_ADR 0xc
-#define DIVA_PCI_CTRL 0x10
-
-/* SUB Types */
-#define DIVA_ISA 1
-#define DIVA_PCI 2
-#define DIVA_IPAC_ISA 3
-#define DIVA_IPAC_PCI 4
-#define DIVA_IPACX_PCI 5
-
-/* CTRL (Read) */
-#define DIVA_IRQ_STAT 0x01
-#define DIVA_EEPROM_SDA 0x02
-
-/* CTRL (Write) */
-#define DIVA_IRQ_REQ 0x01
-#define DIVA_RESET 0x08
-#define DIVA_EEPROM_CLK 0x40
-#define DIVA_PCI_LED_A 0x10
-#define DIVA_PCI_LED_B 0x20
-#define DIVA_ISA_LED_A 0x20
-#define DIVA_ISA_LED_B 0x40
-#define DIVA_IRQ_CLR 0x80
-
-/* Siemens PITA */
-#define PITA_MISC_REG 0x1c
-#ifdef __BIG_ENDIAN
-#define PITA_PARA_SOFTRESET 0x00000001
-#define PITA_SER_SOFTRESET 0x00000002
-#define PITA_PARA_MPX_MODE 0x00000004
-#define PITA_INT0_ENABLE 0x00000200
-#else
-#define PITA_PARA_SOFTRESET 0x01000000
-#define PITA_SER_SOFTRESET 0x02000000
-#define PITA_PARA_MPX_MODE 0x04000000
-#define PITA_INT0_ENABLE 0x00020000
-#endif
-#define PITA_INT0_STATUS 0x02
-
-static inline u_char
-readreg(unsigned int ale, unsigned int adr, u_char off)
-{
- register u_char ret;
-
- byteout(ale, off);
- ret = bytein(adr);
- return (ret);
-}
-
-static inline void
-readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- insb(adr, data, size);
-}
-
-
-static inline void
-writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
-{
- byteout(ale, off);
- byteout(adr, data);
-}
-
-static inline void
-writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- outsb(adr, data, size);
-}
-
-static inline u_char
-memreadreg(unsigned long adr, u_char off)
-{
- return (*((unsigned char *)
- (((unsigned int *)adr) + off)));
-}
-
-static inline void
-memwritereg(unsigned long adr, u_char off, u_char data)
-{
- register u_char *p;
-
- p = (unsigned char *)(((unsigned int *)adr) + off);
- *p = data;
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.diva.isac_adr, cs->hw.diva.isac, 0, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.diva.isac_adr, cs->hw.diva.isac, 0, data, size);
-}
-
-static u_char
-ReadISAC_IPAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, offset + 0x80));
-}
-
-static void
-WriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, offset | 0x80, value);
-}
-
-static void
-ReadISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.diva.isac_adr, cs->hw.diva.isac, 0x80, data, size);
-}
-
-static void
-WriteISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.diva.isac_adr, cs->hw.diva.isac, 0x80, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readreg(cs->hw.diva.hscx_adr,
- cs->hw.diva.hscx, offset + (hscx ? 0x40 : 0)));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writereg(cs->hw.diva.hscx_adr,
- cs->hw.diva.hscx, offset + (hscx ? 0x40 : 0), value);
-}
-
-static u_char
-MemReadISAC_IPAC(struct IsdnCardState *cs, u_char offset)
-{
- return (memreadreg(cs->hw.diva.cfg_reg, offset + 0x80));
-}
-
-static void
-MemWriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- memwritereg(cs->hw.diva.cfg_reg, offset | 0x80, value);
-}
-
-static void
-MemReadISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
-{
- while (size--)
- *data++ = memreadreg(cs->hw.diva.cfg_reg, 0x80);
-}
-
-static void
-MemWriteISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
-{
- while (size--)
- memwritereg(cs->hw.diva.cfg_reg, 0x80, *data++);
-}
-
-static u_char
-MemReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (memreadreg(cs->hw.diva.cfg_reg, offset + (hscx ? 0x40 : 0)));
-}
-
-static void
-MemWriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- memwritereg(cs->hw.diva.cfg_reg, offset + (hscx ? 0x40 : 0), value);
-}
-
-/* IO-Functions for IPACX type cards */
-static u_char
-MemReadISAC_IPACX(struct IsdnCardState *cs, u_char offset)
-{
- return (memreadreg(cs->hw.diva.cfg_reg, offset));
-}
-
-static void
-MemWriteISAC_IPACX(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- memwritereg(cs->hw.diva.cfg_reg, offset, value);
-}
-
-static void
-MemReadISACfifo_IPACX(struct IsdnCardState *cs, u_char *data, int size)
-{
- while (size--)
- *data++ = memreadreg(cs->hw.diva.cfg_reg, 0);
-}
-
-static void
-MemWriteISACfifo_IPACX(struct IsdnCardState *cs, u_char *data, int size)
-{
- while (size--)
- memwritereg(cs->hw.diva.cfg_reg, 0, *data++);
-}
-
-static u_char
-MemReadHSCX_IPACX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (memreadreg(cs->hw.diva.cfg_reg, offset +
- (hscx ? IPACX_OFF_B2 : IPACX_OFF_B1)));
-}
-
-static void
-MemWriteHSCX_IPACX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- memwritereg(cs->hw.diva.cfg_reg, offset +
- (hscx ? IPACX_OFF_B2 : IPACX_OFF_B1), value);
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.diva.hscx_adr, \
- cs->hw.diva.hscx, reg + (nr ? 0x40 : 0))
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.diva.hscx_adr, \
- cs->hw.diva.hscx, reg + (nr ? 0x40 : 0), data)
-
-#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.diva.hscx_adr, \
- cs->hw.diva.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.diva.hscx_adr, \
- cs->hw.diva.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-diva_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val, sval;
- u_long flags;
- int cnt = 5;
-
- spin_lock_irqsave(&cs->lock, flags);
- while (((sval = bytein(cs->hw.diva.ctrl)) & DIVA_IRQ_REQ) && cnt) {
- val = readreg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, HSCX_ISTA + 0x40);
- if (val)
- hscx_int_main(cs, val);
- val = readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, ISAC_ISTA);
- if (val)
- isac_interrupt(cs, val);
- cnt--;
- }
- if (!cnt)
- printk(KERN_WARNING "Diva: IRQ LOOP\n");
- writereg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, HSCX_MASK, 0xFF);
- writereg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, HSCX_MASK + 0x40, 0xFF);
- writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, ISAC_MASK, 0xFF);
- writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, ISAC_MASK, 0x0);
- writereg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, HSCX_MASK, 0x0);
- writereg(cs->hw.diva.hscx_adr, cs->hw.diva.hscx, HSCX_MASK + 0x40, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-diva_irq_ipac_isa(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char ista, val;
- u_long flags;
- int icnt = 5;
-
- spin_lock_irqsave(&cs->lock, flags);
- ista = readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_ISTA);
-Start_IPACISA:
- if (cs->debug & L1_DEB_IPAC)
- debugl1(cs, "IPAC ISTA %02X", ista);
- if (ista & 0x0f) {
- val = readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, HSCX_ISTA + 0x40);
- if (ista & 0x01)
- val |= 0x01;
- if (ista & 0x04)
- val |= 0x02;
- if (ista & 0x08)
- val |= 0x04;
- if (val)
- hscx_int_main(cs, val);
- }
- if (ista & 0x20) {
- val = 0xfe & readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, ISAC_ISTA + 0x80);
- if (val) {
- isac_interrupt(cs, val);
- }
- }
- if (ista & 0x10) {
- val = 0x01;
- isac_interrupt(cs, val);
- }
- ista = readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_ISTA);
- if ((ista & 0x3f) && icnt) {
- icnt--;
- goto Start_IPACISA;
- }
- if (!icnt)
- printk(KERN_WARNING "DIVA IPAC IRQ LOOP\n");
- writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_MASK, 0xFF);
- writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_MASK, 0xC0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static inline void
-MemwaitforCEC(struct IsdnCardState *cs, int hscx)
-{
- int to = 50;
-
- while ((MemReadHSCX(cs, hscx, HSCX_STAR) & 0x04) && to) {
- udelay(1);
- to--;
- }
- if (!to)
- printk(KERN_WARNING "HiSax: waitforCEC timeout\n");
-}
-
-
-static inline void
-MemwaitforXFW(struct IsdnCardState *cs, int hscx)
-{
- int to = 50;
-
- while (((MemReadHSCX(cs, hscx, HSCX_STAR) & 0x44) != 0x40) && to) {
- udelay(1);
- to--;
- }
- if (!to)
- printk(KERN_WARNING "HiSax: waitforXFW timeout\n");
-}
-
-static inline void
-MemWriteHSCXCMDR(struct IsdnCardState *cs, int hscx, u_char data)
-{
- MemwaitforCEC(cs, hscx);
- MemWriteHSCX(cs, hscx, HSCX_CMDR, data);
-}
-
-static void
-Memhscx_empty_fifo(struct BCState *bcs, int count)
-{
- u_char *ptr;
- struct IsdnCardState *cs = bcs->cs;
- int cnt;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "hscx_empty_fifo");
-
- if (bcs->hw.hscx.rcvidx + count > HSCX_BUFMAX) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hscx_empty_fifo: incoming packet too large");
- MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x80);
- bcs->hw.hscx.rcvidx = 0;
- return;
- }
- ptr = bcs->hw.hscx.rcvbuf + bcs->hw.hscx.rcvidx;
- cnt = count;
- while (cnt--)
- *ptr++ = memreadreg(cs->hw.diva.cfg_reg, bcs->hw.hscx.hscx ? 0x40 : 0);
- MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x80);
- ptr = bcs->hw.hscx.rcvbuf + bcs->hw.hscx.rcvidx;
- bcs->hw.hscx.rcvidx += count;
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- t += sprintf(t, "hscx_empty_fifo %c cnt %d",
- bcs->hw.hscx.hscx ? 'B' : 'A', count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-static void
-Memhscx_fill_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int more, count, cnt;
- int fifo_size = test_bit(HW_IPAC, &cs->HW_Flags) ? 64 : 32;
- u_char *ptr, *p;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "hscx_fill_fifo");
-
- if (!bcs->tx_skb)
- return;
- if (bcs->tx_skb->len <= 0)
- return;
-
- more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0;
- if (bcs->tx_skb->len > fifo_size) {
- more = !0;
- count = fifo_size;
- } else
- count = bcs->tx_skb->len;
- cnt = count;
- MemwaitforXFW(cs, bcs->hw.hscx.hscx);
- p = ptr = bcs->tx_skb->data;
- skb_pull(bcs->tx_skb, count);
- bcs->tx_cnt -= count;
- bcs->hw.hscx.count += count;
- while (cnt--)
- memwritereg(cs->hw.diva.cfg_reg, bcs->hw.hscx.hscx ? 0x40 : 0,
- *p++);
- MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, more ? 0x8 : 0xa);
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- t += sprintf(t, "hscx_fill_fifo %c cnt %d",
- bcs->hw.hscx.hscx ? 'B' : 'A', count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-static void
-Memhscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
-{
- u_char r;
- struct BCState *bcs = cs->bcs + hscx;
- struct sk_buff *skb;
- int fifo_size = test_bit(HW_IPAC, &cs->HW_Flags) ? 64 : 32;
- int count;
-
- if (!test_bit(BC_FLG_INIT, &bcs->Flag))
- return;
-
- if (val & 0x80) { /* RME */
- r = MemReadHSCX(cs, hscx, HSCX_RSTA);
- if ((r & 0xf0) != 0xa0) {
- if (!(r & 0x80))
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "HSCX invalid frame");
- if ((r & 0x40) && bcs->mode)
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "HSCX RDO mode=%d",
- bcs->mode);
- if (!(r & 0x20))
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "HSCX CRC error");
- MemWriteHSCXCMDR(cs, hscx, 0x80);
- } else {
- count = MemReadHSCX(cs, hscx, HSCX_RBCL) & (
- test_bit(HW_IPAC, &cs->HW_Flags) ? 0x3f : 0x1f);
- if (count == 0)
- count = fifo_size;
- Memhscx_empty_fifo(bcs, count);
- if ((count = bcs->hw.hscx.rcvidx - 1) > 0) {
- if (cs->debug & L1_DEB_HSCX_FIFO)
- debugl1(cs, "HX Frame %d", count);
- if (!(skb = dev_alloc_skb(count)))
- printk(KERN_WARNING "HSCX: receive out of memory\n");
- else {
- skb_put_data(skb, bcs->hw.hscx.rcvbuf,
- count);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- }
- }
- bcs->hw.hscx.rcvidx = 0;
- schedule_event(bcs, B_RCVBUFREADY);
- }
- if (val & 0x40) { /* RPF */
- Memhscx_empty_fifo(bcs, fifo_size);
- if (bcs->mode == L1_MODE_TRANS) {
- /* receive audio data */
- if (!(skb = dev_alloc_skb(fifo_size)))
- printk(KERN_WARNING "HiSax: receive out of memory\n");
- else {
- skb_put_data(skb, bcs->hw.hscx.rcvbuf,
- fifo_size);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- bcs->hw.hscx.rcvidx = 0;
- schedule_event(bcs, B_RCVBUFREADY);
- }
- }
- if (val & 0x10) { /* XPR */
- if (bcs->tx_skb) {
- if (bcs->tx_skb->len) {
- Memhscx_fill_fifo(bcs);
- return;
- } else {
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->hw.hscx.count;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- dev_kfree_skb_irq(bcs->tx_skb);
- bcs->hw.hscx.count = 0;
- bcs->tx_skb = NULL;
- }
- }
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- bcs->hw.hscx.count = 0;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- Memhscx_fill_fifo(bcs);
- } else {
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- schedule_event(bcs, B_XMTBUFREADY);
- }
- }
-}
-
-static inline void
-Memhscx_int_main(struct IsdnCardState *cs, u_char val)
-{
-
- u_char exval;
- struct BCState *bcs;
-
- if (val & 0x01) { // EXB
- bcs = cs->bcs + 1;
- exval = MemReadHSCX(cs, 1, HSCX_EXIR);
- if (exval & 0x40) {
- if (bcs->mode == 1)
- Memhscx_fill_fifo(bcs);
- else {
- /* Here we lost an TX interrupt, so
- * restart transmitting the whole frame.
- */
- if (bcs->tx_skb) {
- skb_push(bcs->tx_skb, bcs->hw.hscx.count);
- bcs->tx_cnt += bcs->hw.hscx.count;
- bcs->hw.hscx.count = 0;
- }
- MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x01);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "HSCX B EXIR %x Lost TX", exval);
- }
- } else if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX B EXIR %x", exval);
- }
- if (val & 0xf8) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX B interrupt %x", val);
- Memhscx_interrupt(cs, val, 1);
- }
- if (val & 0x02) { // EXA
- bcs = cs->bcs;
- exval = MemReadHSCX(cs, 0, HSCX_EXIR);
- if (exval & 0x40) {
- if (bcs->mode == L1_MODE_TRANS)
- Memhscx_fill_fifo(bcs);
- else {
- /* Here we lost an TX interrupt, so
- * restart transmitting the whole frame.
- */
- if (bcs->tx_skb) {
- skb_push(bcs->tx_skb, bcs->hw.hscx.count);
- bcs->tx_cnt += bcs->hw.hscx.count;
- bcs->hw.hscx.count = 0;
- }
- MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x01);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "HSCX A EXIR %x Lost TX", exval);
- }
- } else if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX A EXIR %x", exval);
- }
- if (val & 0x04) { // ICA
- exval = MemReadHSCX(cs, 0, HSCX_ISTA);
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX A interrupt %x", exval);
- Memhscx_interrupt(cs, exval, 0);
- }
-}
-
-static irqreturn_t
-diva_irq_ipac_pci(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char ista, val;
- int icnt = 5;
- u_char *cfg;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- cfg = (u_char *) cs->hw.diva.pci_cfg;
- val = *cfg;
- if (!(val & PITA_INT0_STATUS)) {
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE; /* other shared IRQ */
- }
- *cfg = PITA_INT0_STATUS; /* Reset pending INT0 */
- ista = memreadreg(cs->hw.diva.cfg_reg, IPAC_ISTA);
-Start_IPACPCI:
- if (cs->debug & L1_DEB_IPAC)
- debugl1(cs, "IPAC ISTA %02X", ista);
- if (ista & 0x0f) {
- val = memreadreg(cs->hw.diva.cfg_reg, HSCX_ISTA + 0x40);
- if (ista & 0x01)
- val |= 0x01;
- if (ista & 0x04)
- val |= 0x02;
- if (ista & 0x08)
- val |= 0x04;
- if (val)
- Memhscx_int_main(cs, val);
- }
- if (ista & 0x20) {
- val = 0xfe & memreadreg(cs->hw.diva.cfg_reg, ISAC_ISTA + 0x80);
- if (val) {
- isac_interrupt(cs, val);
- }
- }
- if (ista & 0x10) {
- val = 0x01;
- isac_interrupt(cs, val);
- }
- ista = memreadreg(cs->hw.diva.cfg_reg, IPAC_ISTA);
- if ((ista & 0x3f) && icnt) {
- icnt--;
- goto Start_IPACPCI;
- }
- if (!icnt)
- printk(KERN_WARNING "DIVA IPAC PCI IRQ LOOP\n");
- memwritereg(cs->hw.diva.cfg_reg, IPAC_MASK, 0xFF);
- memwritereg(cs->hw.diva.cfg_reg, IPAC_MASK, 0xC0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-diva_irq_ipacx_pci(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_char *cfg;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- cfg = (u_char *) cs->hw.diva.pci_cfg;
- val = *cfg;
- if (!(val & PITA_INT0_STATUS)) {
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE; // other shared IRQ
- }
- interrupt_ipacx(cs); // handler for chip
- *cfg = PITA_INT0_STATUS; // Reset PLX interrupt
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_diva(struct IsdnCardState *cs)
-{
- int bytecnt;
-
- if ((cs->subtyp == DIVA_IPAC_PCI) ||
- (cs->subtyp == DIVA_IPACX_PCI)) {
- u_int *cfg = (unsigned int *)cs->hw.diva.pci_cfg;
-
- *cfg = 0; /* disable INT0/1 */
- *cfg = 2; /* reset pending INT0 */
- if (cs->hw.diva.cfg_reg)
- iounmap((void *)cs->hw.diva.cfg_reg);
- if (cs->hw.diva.pci_cfg)
- iounmap((void *)cs->hw.diva.pci_cfg);
- return;
- } else if (cs->subtyp != DIVA_IPAC_ISA) {
- del_timer(&cs->hw.diva.tl);
- if (cs->hw.diva.cfg_reg)
- byteout(cs->hw.diva.ctrl, 0); /* LED off, Reset */
- }
- if ((cs->subtyp == DIVA_ISA) || (cs->subtyp == DIVA_IPAC_ISA))
- bytecnt = 8;
- else
- bytecnt = 32;
- if (cs->hw.diva.cfg_reg) {
- release_region(cs->hw.diva.cfg_reg, bytecnt);
- }
-}
-
-static void
-iounmap_diva(struct IsdnCardState *cs)
-{
- if ((cs->subtyp == DIVA_IPAC_PCI) || (cs->subtyp == DIVA_IPACX_PCI)) {
- if (cs->hw.diva.cfg_reg) {
- iounmap((void *)cs->hw.diva.cfg_reg);
- cs->hw.diva.cfg_reg = 0;
- }
- if (cs->hw.diva.pci_cfg) {
- iounmap((void *)cs->hw.diva.pci_cfg);
- cs->hw.diva.pci_cfg = 0;
- }
- }
-
- return;
-}
-
-static void
-reset_diva(struct IsdnCardState *cs)
-{
- if (cs->subtyp == DIVA_IPAC_ISA) {
- writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_POTA2, 0x20);
- mdelay(10);
- writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_POTA2, 0x00);
- mdelay(10);
- writereg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_MASK, 0xc0);
- } else if (cs->subtyp == DIVA_IPAC_PCI) {
- unsigned int *ireg = (unsigned int *)(cs->hw.diva.pci_cfg +
- PITA_MISC_REG);
- *ireg = PITA_PARA_SOFTRESET | PITA_PARA_MPX_MODE;
- mdelay(10);
- *ireg = PITA_PARA_MPX_MODE;
- mdelay(10);
- memwritereg(cs->hw.diva.cfg_reg, IPAC_MASK, 0xc0);
- } else if (cs->subtyp == DIVA_IPACX_PCI) {
- unsigned int *ireg = (unsigned int *)(cs->hw.diva.pci_cfg +
- PITA_MISC_REG);
- *ireg = PITA_PARA_SOFTRESET | PITA_PARA_MPX_MODE;
- mdelay(10);
- *ireg = PITA_PARA_MPX_MODE | PITA_SER_SOFTRESET;
- mdelay(10);
- MemWriteISAC_IPACX(cs, IPACX_MASK, 0xff); // Interrupts off
- } else { /* DIVA 2.0 */
- cs->hw.diva.ctrl_reg = 0; /* Reset On */
- byteout(cs->hw.diva.ctrl, cs->hw.diva.ctrl_reg);
- mdelay(10);
- cs->hw.diva.ctrl_reg |= DIVA_RESET; /* Reset Off */
- byteout(cs->hw.diva.ctrl, cs->hw.diva.ctrl_reg);
- mdelay(10);
- if (cs->subtyp == DIVA_ISA)
- cs->hw.diva.ctrl_reg |= DIVA_ISA_LED_A;
- else {
- /* Workaround PCI9060 */
- byteout(cs->hw.diva.pci_cfg + 0x69, 9);
- cs->hw.diva.ctrl_reg |= DIVA_PCI_LED_A;
- }
- byteout(cs->hw.diva.ctrl, cs->hw.diva.ctrl_reg);
- }
-}
-
-#define DIVA_ASSIGN 1
-
-static void
-diva_led_handler(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, hw.diva.tl);
- int blink = 0;
-
- if ((cs->subtyp == DIVA_IPAC_ISA) ||
- (cs->subtyp == DIVA_IPAC_PCI) ||
- (cs->subtyp == DIVA_IPACX_PCI))
- return;
- del_timer(&cs->hw.diva.tl);
- if (cs->hw.diva.status & DIVA_ASSIGN)
- cs->hw.diva.ctrl_reg |= (DIVA_ISA == cs->subtyp) ?
- DIVA_ISA_LED_A : DIVA_PCI_LED_A;
- else {
- cs->hw.diva.ctrl_reg ^= (DIVA_ISA == cs->subtyp) ?
- DIVA_ISA_LED_A : DIVA_PCI_LED_A;
- blink = 250;
- }
- if (cs->hw.diva.status & 0xf000)
- cs->hw.diva.ctrl_reg |= (DIVA_ISA == cs->subtyp) ?
- DIVA_ISA_LED_B : DIVA_PCI_LED_B;
- else if (cs->hw.diva.status & 0x0f00) {
- cs->hw.diva.ctrl_reg ^= (DIVA_ISA == cs->subtyp) ?
- DIVA_ISA_LED_B : DIVA_PCI_LED_B;
- blink = 500;
- } else
- cs->hw.diva.ctrl_reg &= ~((DIVA_ISA == cs->subtyp) ?
- DIVA_ISA_LED_B : DIVA_PCI_LED_B);
-
- byteout(cs->hw.diva.ctrl, cs->hw.diva.ctrl_reg);
- if (blink) {
- cs->hw.diva.tl.expires = jiffies + ((blink * HZ) / 1000);
- add_timer(&cs->hw.diva.tl);
- }
-}
-
-static int
-Diva_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_int *ireg;
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_diva(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_diva(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- reset_diva(cs);
- if (cs->subtyp == DIVA_IPACX_PCI) {
- ireg = (unsigned int *)cs->hw.diva.pci_cfg;
- *ireg = PITA_INT0_ENABLE;
- init_ipacx(cs, 3); // init chip and enable interrupts
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- }
- if (cs->subtyp == DIVA_IPAC_PCI) {
- ireg = (unsigned int *)cs->hw.diva.pci_cfg;
- *ireg = PITA_INT0_ENABLE;
- }
- inithscxisac(cs, 3);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- case (MDL_REMOVE | REQUEST):
- cs->hw.diva.status = 0;
- break;
- case (MDL_ASSIGN | REQUEST):
- cs->hw.diva.status |= DIVA_ASSIGN;
- break;
- case MDL_INFO_SETUP:
- if ((long)arg)
- cs->hw.diva.status |= 0x0200;
- else
- cs->hw.diva.status |= 0x0100;
- break;
- case MDL_INFO_CONN:
- if ((long)arg)
- cs->hw.diva.status |= 0x2000;
- else
- cs->hw.diva.status |= 0x1000;
- break;
- case MDL_INFO_REL:
- if ((long)arg) {
- cs->hw.diva.status &= ~0x2000;
- cs->hw.diva.status &= ~0x0200;
- } else {
- cs->hw.diva.status &= ~0x1000;
- cs->hw.diva.status &= ~0x0100;
- }
- break;
- }
- if ((cs->subtyp != DIVA_IPAC_ISA) &&
- (cs->subtyp != DIVA_IPAC_PCI) &&
- (cs->subtyp != DIVA_IPACX_PCI)) {
- spin_lock_irqsave(&cs->lock, flags);
- diva_led_handler(&cs->hw.diva.tl);
- spin_unlock_irqrestore(&cs->lock, flags);
- }
- return (0);
-}
-
-static int setup_diva_common(struct IsdnCardState *cs)
-{
- int bytecnt;
- u_char val;
-
- if ((cs->subtyp == DIVA_ISA) || (cs->subtyp == DIVA_IPAC_ISA))
- bytecnt = 8;
- else
- bytecnt = 32;
-
- printk(KERN_INFO
- "Diva: %s card configured at %#lx IRQ %d\n",
- (cs->subtyp == DIVA_PCI) ? "PCI" :
- (cs->subtyp == DIVA_ISA) ? "ISA" :
- (cs->subtyp == DIVA_IPAC_ISA) ? "IPAC ISA" :
- (cs->subtyp == DIVA_IPAC_PCI) ? "IPAC PCI" : "IPACX PCI",
- cs->hw.diva.cfg_reg, cs->irq);
- if ((cs->subtyp == DIVA_IPAC_PCI) ||
- (cs->subtyp == DIVA_IPACX_PCI) ||
- (cs->subtyp == DIVA_PCI))
- printk(KERN_INFO "Diva: %s space at %#lx\n",
- (cs->subtyp == DIVA_PCI) ? "PCI" :
- (cs->subtyp == DIVA_IPAC_PCI) ? "IPAC PCI" : "IPACX PCI",
- cs->hw.diva.pci_cfg);
- if ((cs->subtyp != DIVA_IPAC_PCI) &&
- (cs->subtyp != DIVA_IPACX_PCI)) {
- if (!request_region(cs->hw.diva.cfg_reg, bytecnt, "diva isdn")) {
- printk(KERN_WARNING
- "HiSax: %s config port %lx-%lx already in use\n",
- "diva",
- cs->hw.diva.cfg_reg,
- cs->hw.diva.cfg_reg + bytecnt);
- iounmap_diva(cs);
- return (0);
- }
- }
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &Diva_card_msg;
- setup_isac(cs);
- if (cs->subtyp == DIVA_IPAC_ISA) {
- cs->readisac = &ReadISAC_IPAC;
- cs->writeisac = &WriteISAC_IPAC;
- cs->readisacfifo = &ReadISACfifo_IPAC;
- cs->writeisacfifo = &WriteISACfifo_IPAC;
- cs->irq_func = &diva_irq_ipac_isa;
- val = readreg(cs->hw.diva.isac_adr, cs->hw.diva.isac, IPAC_ID);
- printk(KERN_INFO "Diva: IPAC version %x\n", val);
- } else if (cs->subtyp == DIVA_IPAC_PCI) {
- cs->readisac = &MemReadISAC_IPAC;
- cs->writeisac = &MemWriteISAC_IPAC;
- cs->readisacfifo = &MemReadISACfifo_IPAC;
- cs->writeisacfifo = &MemWriteISACfifo_IPAC;
- cs->BC_Read_Reg = &MemReadHSCX;
- cs->BC_Write_Reg = &MemWriteHSCX;
- cs->BC_Send_Data = &Memhscx_fill_fifo;
- cs->irq_func = &diva_irq_ipac_pci;
- val = memreadreg(cs->hw.diva.cfg_reg, IPAC_ID);
- printk(KERN_INFO "Diva: IPAC version %x\n", val);
- } else if (cs->subtyp == DIVA_IPACX_PCI) {
- cs->readisac = &MemReadISAC_IPACX;
- cs->writeisac = &MemWriteISAC_IPACX;
- cs->readisacfifo = &MemReadISACfifo_IPACX;
- cs->writeisacfifo = &MemWriteISACfifo_IPACX;
- cs->BC_Read_Reg = &MemReadHSCX_IPACX;
- cs->BC_Write_Reg = &MemWriteHSCX_IPACX;
- cs->BC_Send_Data = NULL; // function located in ipacx module
- cs->irq_func = &diva_irq_ipacx_pci;
- printk(KERN_INFO "Diva: IPACX Design Id: %x\n",
- MemReadISAC_IPACX(cs, IPACX_ID) & 0x3F);
- } else { /* DIVA 2.0 */
- timer_setup(&cs->hw.diva.tl, diva_led_handler, 0);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->irq_func = &diva_interrupt;
- ISACVersion(cs, "Diva:");
- if (HscxVersion(cs, "Diva:")) {
- printk(KERN_WARNING
- "Diva: wrong HSCX versions check IO address\n");
- release_io_diva(cs);
- return (0);
- }
- }
- return (1);
-}
-
-#ifdef CONFIG_ISA
-
-static int setup_diva_isa(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- u_char val;
-
- if (!card->para[1])
- return (-1); /* card not found; continue search */
-
- cs->hw.diva.ctrl_reg = 0;
- cs->hw.diva.cfg_reg = card->para[1];
- val = readreg(cs->hw.diva.cfg_reg + DIVA_IPAC_ADR,
- cs->hw.diva.cfg_reg + DIVA_IPAC_DATA, IPAC_ID);
- printk(KERN_INFO "Diva: IPAC version %x\n", val);
- if ((val == 1) || (val == 2)) {
- cs->subtyp = DIVA_IPAC_ISA;
- cs->hw.diva.ctrl = 0;
- cs->hw.diva.isac = card->para[1] + DIVA_IPAC_DATA;
- cs->hw.diva.hscx = card->para[1] + DIVA_IPAC_DATA;
- cs->hw.diva.isac_adr = card->para[1] + DIVA_IPAC_ADR;
- cs->hw.diva.hscx_adr = card->para[1] + DIVA_IPAC_ADR;
- test_and_set_bit(HW_IPAC, &cs->HW_Flags);
- } else {
- cs->subtyp = DIVA_ISA;
- cs->hw.diva.ctrl = card->para[1] + DIVA_ISA_CTRL;
- cs->hw.diva.isac = card->para[1] + DIVA_ISA_ISAC_DATA;
- cs->hw.diva.hscx = card->para[1] + DIVA_HSCX_DATA;
- cs->hw.diva.isac_adr = card->para[1] + DIVA_ISA_ISAC_ADR;
- cs->hw.diva.hscx_adr = card->para[1] + DIVA_HSCX_ADR;
- }
- cs->irq = card->para[0];
-
- return (1); /* card found */
-}
-
-#else /* if !CONFIG_ISA */
-
-static int setup_diva_isa(struct IsdnCard *card)
-{
- return (-1); /* card not found; continue search */
-}
-
-#endif /* CONFIG_ISA */
-
-#ifdef __ISAPNP__
-static struct isapnp_device_id diva_ids[] = {
- { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
- ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
- (unsigned long) "Diva picola" },
- { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
- ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0x51),
- (unsigned long) "Diva picola" },
- { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71),
- ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71),
- (unsigned long) "Diva 2.0" },
- { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x71),
- ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0x71),
- (unsigned long) "Diva 2.0" },
- { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1),
- ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1),
- (unsigned long) "Diva 2.01" },
- { ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0xA1),
- ISAPNP_VENDOR('E', 'I', 'C'), ISAPNP_FUNCTION(0xA1),
- (unsigned long) "Diva 2.01" },
- { 0, }
-};
-
-static struct isapnp_device_id *ipid = &diva_ids[0];
-static struct pnp_card *pnp_c = NULL;
-
-static int setup_diva_isapnp(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- struct pnp_dev *pnp_d;
-
- if (!isapnp_present())
- return (-1); /* card not found; continue search */
-
- while (ipid->card_vendor) {
- if ((pnp_c = pnp_find_card(ipid->card_vendor,
- ipid->card_device, pnp_c))) {
- pnp_d = NULL;
- if ((pnp_d = pnp_find_dev(pnp_c,
- ipid->vendor, ipid->function, pnp_d))) {
- int err;
-
- printk(KERN_INFO "HiSax: %s detected\n",
- (char *)ipid->driver_data);
- pnp_disable_dev(pnp_d);
- err = pnp_activate_dev(pnp_d);
- if (err < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __func__, err);
- return (0);
- }
- card->para[1] = pnp_port_start(pnp_d, 0);
- card->para[0] = pnp_irq(pnp_d, 0);
- if (card->para[0] == -1 || !card->para[1]) {
- printk(KERN_ERR "Diva PnP:some resources are missing %ld/%lx\n",
- card->para[0], card->para[1]);
- pnp_disable_dev(pnp_d);
- return (0);
- }
- cs->hw.diva.cfg_reg = card->para[1];
- cs->irq = card->para[0];
- if (ipid->function == ISAPNP_FUNCTION(0xA1)) {
- cs->subtyp = DIVA_IPAC_ISA;
- cs->hw.diva.ctrl = 0;
- cs->hw.diva.isac =
- card->para[1] + DIVA_IPAC_DATA;
- cs->hw.diva.hscx =
- card->para[1] + DIVA_IPAC_DATA;
- cs->hw.diva.isac_adr =
- card->para[1] + DIVA_IPAC_ADR;
- cs->hw.diva.hscx_adr =
- card->para[1] + DIVA_IPAC_ADR;
- test_and_set_bit(HW_IPAC, &cs->HW_Flags);
- } else {
- cs->subtyp = DIVA_ISA;
- cs->hw.diva.ctrl =
- card->para[1] + DIVA_ISA_CTRL;
- cs->hw.diva.isac =
- card->para[1] + DIVA_ISA_ISAC_DATA;
- cs->hw.diva.hscx =
- card->para[1] + DIVA_HSCX_DATA;
- cs->hw.diva.isac_adr =
- card->para[1] + DIVA_ISA_ISAC_ADR;
- cs->hw.diva.hscx_adr =
- card->para[1] + DIVA_HSCX_ADR;
- }
- return (1); /* card found */
- } else {
- printk(KERN_ERR "Diva PnP: PnP error card found, no device\n");
- return (0);
- }
- }
- ipid++;
- pnp_c = NULL;
- }
-
- return (-1); /* card not found; continue search */
-}
-
-#else /* if !ISAPNP */
-
-static int setup_diva_isapnp(struct IsdnCard *card)
-{
- return (-1); /* card not found; continue search */
-}
-
-#endif /* ISAPNP */
-
-#ifdef CONFIG_PCI
-static struct pci_dev *dev_diva = NULL;
-static struct pci_dev *dev_diva_u = NULL;
-static struct pci_dev *dev_diva201 = NULL;
-static struct pci_dev *dev_diva202 = NULL;
-
-static int setup_diva_pci(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
-
- cs->subtyp = 0;
- if ((dev_diva = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
- PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) {
- if (pci_enable_device(dev_diva))
- return (0);
- cs->subtyp = DIVA_PCI;
- cs->irq = dev_diva->irq;
- cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2);
- } else if ((dev_diva_u = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
- PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) {
- if (pci_enable_device(dev_diva_u))
- return (0);
- cs->subtyp = DIVA_PCI;
- cs->irq = dev_diva_u->irq;
- cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2);
- } else if ((dev_diva201 = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
- PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) {
- if (pci_enable_device(dev_diva201))
- return (0);
- cs->subtyp = DIVA_IPAC_PCI;
- cs->irq = dev_diva201->irq;
- cs->hw.diva.pci_cfg =
- (ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096);
- cs->hw.diva.cfg_reg =
- (ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096);
- } else if ((dev_diva202 = hisax_find_pci_device(PCI_VENDOR_ID_EICON,
- PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) {
- if (pci_enable_device(dev_diva202))
- return (0);
- cs->subtyp = DIVA_IPACX_PCI;
- cs->irq = dev_diva202->irq;
- cs->hw.diva.pci_cfg =
- (ulong) ioremap(pci_resource_start(dev_diva202, 0), 4096);
- cs->hw.diva.cfg_reg =
- (ulong) ioremap(pci_resource_start(dev_diva202, 1), 4096);
- } else {
- return (-1); /* card not found; continue search */
- }
-
- if (!cs->irq) {
- printk(KERN_WARNING "Diva: No IRQ for PCI card found\n");
- iounmap_diva(cs);
- return (0);
- }
-
- if (!cs->hw.diva.cfg_reg) {
- printk(KERN_WARNING "Diva: No IO-Adr for PCI card found\n");
- iounmap_diva(cs);
- return (0);
- }
- cs->irq_flags |= IRQF_SHARED;
-
- if ((cs->subtyp == DIVA_IPAC_PCI) ||
- (cs->subtyp == DIVA_IPACX_PCI)) {
- cs->hw.diva.ctrl = 0;
- cs->hw.diva.isac = 0;
- cs->hw.diva.hscx = 0;
- cs->hw.diva.isac_adr = 0;
- cs->hw.diva.hscx_adr = 0;
- test_and_set_bit(HW_IPAC, &cs->HW_Flags);
- } else {
- cs->hw.diva.ctrl = cs->hw.diva.cfg_reg + DIVA_PCI_CTRL;
- cs->hw.diva.isac = cs->hw.diva.cfg_reg + DIVA_PCI_ISAC_DATA;
- cs->hw.diva.hscx = cs->hw.diva.cfg_reg + DIVA_HSCX_DATA;
- cs->hw.diva.isac_adr = cs->hw.diva.cfg_reg + DIVA_PCI_ISAC_ADR;
- cs->hw.diva.hscx_adr = cs->hw.diva.cfg_reg + DIVA_HSCX_ADR;
- }
-
- return (1); /* card found */
-}
-
-#else /* if !CONFIG_PCI */
-
-static int setup_diva_pci(struct IsdnCard *card)
-{
- return (-1); /* card not found; continue search */
-}
-
-#endif /* CONFIG_PCI */
-
-int setup_diva(struct IsdnCard *card)
-{
- int rc, have_card = 0;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, Diva_revision);
- printk(KERN_INFO "HiSax: Eicon.Diehl Diva driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_DIEHLDIVA)
- return (0);
- cs->hw.diva.status = 0;
-
- rc = setup_diva_isa(card);
- if (!rc)
- return rc;
- if (rc > 0) {
- have_card = 1;
- goto ready;
- }
-
- rc = setup_diva_isapnp(card);
- if (!rc)
- return rc;
- if (rc > 0) {
- have_card = 1;
- goto ready;
- }
-
- rc = setup_diva_pci(card);
- if (!rc)
- return rc;
- if (rc > 0)
- have_card = 1;
-
-ready:
- if (!have_card) {
- printk(KERN_WARNING "Diva: No ISA, ISAPNP or PCI card found\n");
- return (0);
- }
-
- return setup_diva_common(card->cs);
-}
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
deleted file mode 100644
index 0754c0743790..000000000000
--- a/drivers/isdn/hisax/elsa.c
+++ /dev/null
@@ -1,1245 +0,0 @@
-/* $Id: elsa.c,v 2.32.2.4 2004/01/24 20:47:21 keil Exp $
- *
- * low level stuff for Elsa isdn cards
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- * Thanks to Elsa GmbH for documents and information
- *
- * Klaus Lichtenwalder (Klaus.Lichtenwalder@WebForum.DE)
- * for ELSA PCMCIA support
- *
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include "hisax.h"
-#include "arcofi.h"
-#include "isac.h"
-#include "ipac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-#include <linux/pci.h>
-#include <linux/isapnp.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-
-static const char *Elsa_revision = "$Revision: 2.32.2.4 $";
-static const char *Elsa_Types[] =
-{"None", "PC", "PCC-8", "PCC-16", "PCF", "PCF-Pro",
- "PCMCIA", "QS 1000", "QS 3000", "Microlink PCI", "QS 3000 PCI",
- "PCMCIA-IPAC" };
-
-static const char *ITACVer[] =
-{"?0?", "?1?", "?2?", "?3?", "?4?", "V2.2",
- "B1", "A1"};
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-#define ELSA_ISAC 0
-#define ELSA_ISAC_PCM 1
-#define ELSA_ITAC 1
-#define ELSA_HSCX 2
-#define ELSA_ALE 3
-#define ELSA_ALE_PCM 4
-#define ELSA_CONTROL 4
-#define ELSA_CONFIG 5
-#define ELSA_START_TIMER 6
-#define ELSA_TRIG_IRQ 7
-
-#define ELSA_PC 1
-#define ELSA_PCC8 2
-#define ELSA_PCC16 3
-#define ELSA_PCF 4
-#define ELSA_PCFPRO 5
-#define ELSA_PCMCIA 6
-#define ELSA_QS1000 7
-#define ELSA_QS3000 8
-#define ELSA_QS1000PCI 9
-#define ELSA_QS3000PCI 10
-#define ELSA_PCMCIA_IPAC 11
-
-/* PCI stuff */
-#define ELSA_PCI_IRQ_MASK 0x04
-
-/* ITAC Registeradressen (only Microlink PC) */
-#define ITAC_SYS 0x34
-#define ITAC_ISEN 0x48
-#define ITAC_RFIE 0x4A
-#define ITAC_XFIE 0x4C
-#define ITAC_SCIE 0x4E
-#define ITAC_STIE 0x46
-
-/*** ***
- *** Makros als Befehle fuer die Kartenregister ***
- *** (mehrere Befehle werden durch Bit-Oderung kombiniert) ***
- *** ***/
-
-/* Config-Register (Read) */
-#define ELIRQF_TIMER_RUN 0x02 /* Bit 1 des Config-Reg */
-#define ELIRQF_TIMER_RUN_PCC8 0x01 /* Bit 0 des Config-Reg bei PCC */
-#define ELSA_IRQ_IDX 0x38 /* Bit 3,4,5 des Config-Reg */
-#define ELSA_IRQ_IDX_PCC8 0x30 /* Bit 4,5 des Config-Reg */
-#define ELSA_IRQ_IDX_PC 0x0c /* Bit 2,3 des Config-Reg */
-
-/* Control-Register (Write) */
-#define ELSA_LINE_LED 0x02 /* Bit 1 Gelbe LED */
-#define ELSA_STAT_LED 0x08 /* Bit 3 Gruene LED */
-#define ELSA_ISDN_RESET 0x20 /* Bit 5 Reset-Leitung */
-#define ELSA_ENA_TIMER_INT 0x80 /* Bit 7 Freigabe Timer Interrupt */
-
-/* ALE-Register (Read) */
-#define ELSA_HW_RELEASE 0x07 /* Bit 0-2 Hardwarerkennung */
-#define ELSA_S0_POWER_BAD 0x08 /* Bit 3 S0-Bus Spannung fehlt */
-
-/* Status Flags */
-#define ELIRQF_TIMER_AKTIV 1
-#define ELSA_BAD_PWR 2
-#define ELSA_ASSIGN 4
-
-#define RS_ISR_PASS_LIMIT 256
-#define FLG_MODEM_ACTIVE 1
-/* IPAC AUX */
-#define ELSA_IPAC_LINE_LED 0x40 /* Bit 6 Gelbe LED */
-#define ELSA_IPAC_STAT_LED 0x80 /* Bit 7 Gruene LED */
-
-#if ARCOFI_USE
-static struct arcofi_msg ARCOFI_XOP_F =
-{NULL,0,2,{0xa1,0x3f,0,0,0,0,0,0,0,0}}; /* Normal OP */
-static struct arcofi_msg ARCOFI_XOP_1 =
-{&ARCOFI_XOP_F,0,2,{0xa1,0x31,0,0,0,0,0,0,0,0}}; /* PWR UP */
-static struct arcofi_msg ARCOFI_SOP_F =
-{&ARCOFI_XOP_1,0,10,{0xa1,0x1f,0x00,0x50,0x10,0x00,0x00,0x80,0x02,0x12}};
-static struct arcofi_msg ARCOFI_COP_9 =
-{&ARCOFI_SOP_F,0,10,{0xa1,0x29,0x80,0xcb,0xe9,0x88,0x00,0xc8,0xd8,0x80}}; /* RX */
-static struct arcofi_msg ARCOFI_COP_8 =
-{&ARCOFI_COP_9,0,10,{0xa1,0x28,0x49,0x31,0x8,0x13,0x6e,0x88,0x2a,0x61}}; /* TX */
-static struct arcofi_msg ARCOFI_COP_7 =
-{&ARCOFI_COP_8,0,4,{0xa1,0x27,0x80,0x80,0,0,0,0,0,0}}; /* GZ */
-static struct arcofi_msg ARCOFI_COP_6 =
-{&ARCOFI_COP_7,0,6,{0xa1,0x26,0,0,0x82,0x7c,0,0,0,0}}; /* GRL GRH */
-static struct arcofi_msg ARCOFI_COP_5 =
-{&ARCOFI_COP_6,0,4,{0xa1,0x25,0xbb,0x4a,0,0,0,0,0,0}}; /* GTX */
-static struct arcofi_msg ARCOFI_VERSION =
-{NULL,1,2,{0xa0,0,0,0,0,0,0,0,0,0}};
-static struct arcofi_msg ARCOFI_XOP_0 =
-{NULL,0,2,{0xa1,0x30,0,0,0,0,0,0,0,0}}; /* PWR Down */
-
-static void set_arcofi(struct IsdnCardState *cs, int bc);
-
-#include "elsa_ser.c"
-#endif /* ARCOFI_USE */
-
-static inline u_char
-readreg(unsigned int ale, unsigned int adr, u_char off)
-{
- register u_char ret;
-
- byteout(ale, off);
- ret = bytein(adr);
- return (ret);
-}
-
-static inline void
-readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- insb(adr, data, size);
-}
-
-
-static inline void
-writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
-{
- byteout(ale, off);
- byteout(adr, data);
-}
-
-static inline void
-writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- outsb(adr, data, size);
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0, data, size);
-}
-
-static u_char
-ReadISAC_IPAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset + 0x80));
-}
-
-static void
-WriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, offset | 0x80, value);
-}
-
-static void
-ReadISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0x80, data, size);
-}
-
-static void
-WriteISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.elsa.ale, cs->hw.elsa.isac, 0x80, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readreg(cs->hw.elsa.ale,
- cs->hw.elsa.hscx, offset + (hscx ? 0x40 : 0)));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writereg(cs->hw.elsa.ale,
- cs->hw.elsa.hscx, offset + (hscx ? 0x40 : 0), value);
-}
-
-static inline u_char
-readitac(struct IsdnCardState *cs, u_char off)
-{
- register u_char ret;
-
- byteout(cs->hw.elsa.ale, off);
- ret = bytein(cs->hw.elsa.itac);
- return (ret);
-}
-
-static inline void
-writeitac(struct IsdnCardState *cs, u_char off, u_char data)
-{
- byteout(cs->hw.elsa.ale, off);
- byteout(cs->hw.elsa.itac, data);
-}
-
-static inline int
-TimerRun(struct IsdnCardState *cs)
-{
- register u_char v;
-
- v = bytein(cs->hw.elsa.cfg);
- if ((cs->subtyp == ELSA_QS1000) || (cs->subtyp == ELSA_QS3000))
- return (0 == (v & ELIRQF_TIMER_RUN));
- else if (cs->subtyp == ELSA_PCC8)
- return (v & ELIRQF_TIMER_RUN_PCC8);
- return (v & ELIRQF_TIMER_RUN);
-}
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.elsa.ale, \
- cs->hw.elsa.hscx, reg + (nr ? 0x40 : 0))
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.elsa.ale, \
- cs->hw.elsa.hscx, reg + (nr ? 0x40 : 0), data)
-
-#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.elsa.ale, \
- cs->hw.elsa.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.elsa.ale, \
- cs->hw.elsa.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-elsa_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_long flags;
- u_char val;
- int icnt = 5;
-
- if ((cs->typ == ISDN_CTYPE_ELSA_PCMCIA) && (*cs->busy_flag == 1)) {
- /* The card tends to generate interrupts while being removed
- causing us to just crash the kernel. bad. */
- printk(KERN_WARNING "Elsa: card not available!\n");
- return IRQ_NONE;
- }
- spin_lock_irqsave(&cs->lock, flags);
-#if ARCOFI_USE
- if (cs->hw.elsa.MFlag) {
- val = serial_inp(cs, UART_IIR);
- if (!(val & UART_IIR_NO_INT)) {
- debugl1(cs, "IIR %02x", val);
- rs_interrupt_elsa(cs);
- }
- }
-#endif
- val = readreg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_ISTA + 0x40);
-Start_HSCX:
- if (val) {
- hscx_int_main(cs, val);
- }
- val = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_ISTA);
-Start_ISAC:
- if (val) {
- isac_interrupt(cs, val);
- }
- val = readreg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_ISTA + 0x40);
- if (val && icnt) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX IntStat after IntRoutine");
- icnt--;
- goto Start_HSCX;
- }
- val = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_ISTA);
- if (val && icnt) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- icnt--;
- goto Start_ISAC;
- }
- if (!icnt)
- printk(KERN_WARNING"ELSA IRQ LOOP\n");
- writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK, 0xFF);
- writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK + 0x40, 0xFF);
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_MASK, 0xFF);
- if (cs->hw.elsa.status & ELIRQF_TIMER_AKTIV) {
- if (!TimerRun(cs)) {
- /* Timer Restart */
- byteout(cs->hw.elsa.timer, 0);
- cs->hw.elsa.counter++;
- }
- }
-#if ARCOFI_USE
- if (cs->hw.elsa.MFlag) {
- val = serial_inp(cs, UART_MCR);
- val ^= 0x8;
- serial_outp(cs, UART_MCR, val);
- val = serial_inp(cs, UART_MCR);
- val ^= 0x8;
- serial_outp(cs, UART_MCR, val);
- }
-#endif
- if (cs->hw.elsa.trig)
- byteout(cs->hw.elsa.trig, 0x00);
- writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK, 0x0);
- writereg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_MASK + 0x40, 0x0);
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_MASK, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-elsa_interrupt_ipac(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_long flags;
- u_char ista, val;
- int icnt = 5;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->subtyp == ELSA_QS1000PCI || cs->subtyp == ELSA_QS3000PCI) {
- val = bytein(cs->hw.elsa.cfg + 0x4c); /* PCI IRQ */
- if (!(val & ELSA_PCI_IRQ_MASK)) {
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE;
- }
- }
-#if ARCOFI_USE
- if (cs->hw.elsa.MFlag) {
- val = serial_inp(cs, UART_IIR);
- if (!(val & UART_IIR_NO_INT)) {
- debugl1(cs, "IIR %02x", val);
- rs_interrupt_elsa(cs);
- }
- }
-#endif
- ista = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ISTA);
-Start_IPAC:
- if (cs->debug & L1_DEB_IPAC)
- debugl1(cs, "IPAC ISTA %02X", ista);
- if (ista & 0x0f) {
- val = readreg(cs->hw.elsa.ale, cs->hw.elsa.hscx, HSCX_ISTA + 0x40);
- if (ista & 0x01)
- val |= 0x01;
- if (ista & 0x04)
- val |= 0x02;
- if (ista & 0x08)
- val |= 0x04;
- if (val)
- hscx_int_main(cs, val);
- }
- if (ista & 0x20) {
- val = 0xfe & readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, ISAC_ISTA + 0x80);
- if (val) {
- isac_interrupt(cs, val);
- }
- }
- if (ista & 0x10) {
- val = 0x01;
- isac_interrupt(cs, val);
- }
- ista = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ISTA);
- if ((ista & 0x3f) && icnt) {
- icnt--;
- goto Start_IPAC;
- }
- if (!icnt)
- printk(KERN_WARNING "ELSA IRQ LOOP\n");
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_MASK, 0xFF);
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_MASK, 0xC0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_elsa(struct IsdnCardState *cs)
-{
- int bytecnt = 8;
-
- del_timer(&cs->hw.elsa.tl);
-#if ARCOFI_USE
- clear_arcofi(cs);
-#endif
- if (cs->hw.elsa.ctrl)
- byteout(cs->hw.elsa.ctrl, 0); /* LEDs Out */
- if (cs->subtyp == ELSA_QS1000PCI) {
- byteout(cs->hw.elsa.cfg + 0x4c, 0x01); /* disable IRQ */
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff);
- bytecnt = 2;
- release_region(cs->hw.elsa.cfg, 0x80);
- }
- if (cs->subtyp == ELSA_QS3000PCI) {
- byteout(cs->hw.elsa.cfg + 0x4c, 0x03); /* disable ELSA PCI IRQ */
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff);
- release_region(cs->hw.elsa.cfg, 0x80);
- }
- if (cs->subtyp == ELSA_PCMCIA_IPAC) {
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff);
- }
- if ((cs->subtyp == ELSA_PCFPRO) ||
- (cs->subtyp == ELSA_QS3000) ||
- (cs->subtyp == ELSA_PCF) ||
- (cs->subtyp == ELSA_QS3000PCI)) {
- bytecnt = 16;
-#if ARCOFI_USE
- release_modem(cs);
-#endif
- }
- if (cs->hw.elsa.base)
- release_region(cs->hw.elsa.base, bytecnt);
-}
-
-static void
-reset_elsa(struct IsdnCardState *cs)
-{
- if (cs->hw.elsa.timer) {
- /* Wait 1 Timer */
- byteout(cs->hw.elsa.timer, 0);
- while (TimerRun(cs));
- cs->hw.elsa.ctrl_reg |= 0x50;
- cs->hw.elsa.ctrl_reg &= ~ELSA_ISDN_RESET; /* Reset On */
- byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
- /* Wait 1 Timer */
- byteout(cs->hw.elsa.timer, 0);
- while (TimerRun(cs));
- cs->hw.elsa.ctrl_reg |= ELSA_ISDN_RESET; /* Reset Off */
- byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
- /* Wait 1 Timer */
- byteout(cs->hw.elsa.timer, 0);
- while (TimerRun(cs));
- if (cs->hw.elsa.trig)
- byteout(cs->hw.elsa.trig, 0xff);
- }
- if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI) || (cs->subtyp == ELSA_PCMCIA_IPAC)) {
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_POTA2, 0x20);
- mdelay(10);
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_POTA2, 0x00);
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_MASK, 0xc0);
- mdelay(10);
- if (cs->subtyp != ELSA_PCMCIA_IPAC) {
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ACFG, 0x0);
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_AOE, 0x3c);
- } else {
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_PCFG, 0x10);
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ACFG, 0x4);
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_AOE, 0xf8);
- }
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, 0xff);
- if (cs->subtyp == ELSA_QS1000PCI)
- byteout(cs->hw.elsa.cfg + 0x4c, 0x41); /* enable ELSA PCI IRQ */
- else if (cs->subtyp == ELSA_QS3000PCI)
- byteout(cs->hw.elsa.cfg + 0x4c, 0x43); /* enable ELSA PCI IRQ */
- }
-}
-
-#if ARCOFI_USE
-
-static void
-set_arcofi(struct IsdnCardState *cs, int bc) {
- cs->dc.isac.arcofi_bc = bc;
- arcofi_fsm(cs, ARCOFI_START, &ARCOFI_COP_5);
- wait_event_interruptible(cs->dc.isac.arcofi_wait,
- cs->dc.isac.arcofi_state == ARCOFI_NOP);
-}
-
-static int
-check_arcofi(struct IsdnCardState *cs)
-{
- int arcofi_present = 0;
- char tmp[40];
- char *t;
- u_char *p;
-
- if (!cs->dc.isac.mon_tx)
- if (!(cs->dc.isac.mon_tx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ISAC MON TX out of buffers!");
- return (0);
- }
- cs->dc.isac.arcofi_bc = 0;
- arcofi_fsm(cs, ARCOFI_START, &ARCOFI_VERSION);
- wait_event_interruptible(cs->dc.isac.arcofi_wait,
- cs->dc.isac.arcofi_state == ARCOFI_NOP);
- if (!test_and_clear_bit(FLG_ARCOFI_ERROR, &cs->HW_Flags)) {
- debugl1(cs, "Arcofi response received %d bytes", cs->dc.isac.mon_rxp);
- p = cs->dc.isac.mon_rx;
- t = tmp;
- t += sprintf(tmp, "Arcofi data");
- QuickHex(t, p, cs->dc.isac.mon_rxp);
- debugl1(cs, "%s", tmp);
- if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) {
- switch (cs->dc.isac.mon_rx[1]) {
- case 0x80:
- debugl1(cs, "Arcofi 2160 detected");
- arcofi_present = 1;
- break;
- case 0x82:
- debugl1(cs, "Arcofi 2165 detected");
- arcofi_present = 2;
- break;
- case 0x84:
- debugl1(cs, "Arcofi 2163 detected");
- arcofi_present = 3;
- break;
- default:
- debugl1(cs, "unknown Arcofi response");
- break;
- }
- } else
- debugl1(cs, "undefined Monitor response");
- cs->dc.isac.mon_rxp = 0;
- } else if (cs->dc.isac.mon_tx) {
- debugl1(cs, "Arcofi not detected");
- }
- if (arcofi_present) {
- if (cs->subtyp == ELSA_QS1000) {
- cs->subtyp = ELSA_QS3000;
- printk(KERN_INFO
- "Elsa: %s detected modem at 0x%lx\n",
- Elsa_Types[cs->subtyp],
- cs->hw.elsa.base + 8);
- release_region(cs->hw.elsa.base, 8);
- if (!request_region(cs->hw.elsa.base, 16, "elsa isdn modem")) {
- printk(KERN_WARNING
- "HiSax: %s config port %lx-%lx already in use\n",
- Elsa_Types[cs->subtyp],
- cs->hw.elsa.base + 8,
- cs->hw.elsa.base + 16);
- }
- } else if (cs->subtyp == ELSA_PCC16) {
- cs->subtyp = ELSA_PCF;
- printk(KERN_INFO
- "Elsa: %s detected modem at 0x%lx\n",
- Elsa_Types[cs->subtyp],
- cs->hw.elsa.base + 8);
- release_region(cs->hw.elsa.base, 8);
- if (!request_region(cs->hw.elsa.base, 16, "elsa isdn modem")) {
- printk(KERN_WARNING
- "HiSax: %s config port %lx-%lx already in use\n",
- Elsa_Types[cs->subtyp],
- cs->hw.elsa.base + 8,
- cs->hw.elsa.base + 16);
- }
- } else
- printk(KERN_INFO
- "Elsa: %s detected modem at 0x%lx\n",
- Elsa_Types[cs->subtyp],
- cs->hw.elsa.base + 8);
- arcofi_fsm(cs, ARCOFI_START, &ARCOFI_XOP_0);
- wait_event_interruptible(cs->dc.isac.arcofi_wait,
- cs->dc.isac.arcofi_state == ARCOFI_NOP);
- return (1);
- }
- return (0);
-}
-#endif /* ARCOFI_USE */
-
-static void
-elsa_led_handler(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, hw.elsa.tl);
- int blink = 0;
-
- if (cs->subtyp == ELSA_PCMCIA || cs->subtyp == ELSA_PCMCIA_IPAC)
- return;
- del_timer(&cs->hw.elsa.tl);
- if (cs->hw.elsa.status & ELSA_ASSIGN)
- cs->hw.elsa.ctrl_reg |= ELSA_STAT_LED;
- else if (cs->hw.elsa.status & ELSA_BAD_PWR)
- cs->hw.elsa.ctrl_reg &= ~ELSA_STAT_LED;
- else {
- cs->hw.elsa.ctrl_reg ^= ELSA_STAT_LED;
- blink = 250;
- }
- if (cs->hw.elsa.status & 0xf000)
- cs->hw.elsa.ctrl_reg |= ELSA_LINE_LED;
- else if (cs->hw.elsa.status & 0x0f00) {
- cs->hw.elsa.ctrl_reg ^= ELSA_LINE_LED;
- blink = 500;
- } else
- cs->hw.elsa.ctrl_reg &= ~ELSA_LINE_LED;
-
- if ((cs->subtyp == ELSA_QS1000PCI) ||
- (cs->subtyp == ELSA_QS3000PCI)) {
- u_char led = 0xff;
- if (cs->hw.elsa.ctrl_reg & ELSA_LINE_LED)
- led ^= ELSA_IPAC_LINE_LED;
- if (cs->hw.elsa.ctrl_reg & ELSA_STAT_LED)
- led ^= ELSA_IPAC_STAT_LED;
- writereg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ATX, led);
- } else
- byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
- if (blink) {
- cs->hw.elsa.tl.expires = jiffies + ((blink * HZ) / 1000);
- add_timer(&cs->hw.elsa.tl);
- }
-}
-
-static int
-Elsa_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- int ret = 0;
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_elsa(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_elsa(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- cs->debug |= L1_DEB_IPAC;
- reset_elsa(cs);
- inithscxisac(cs, 1);
- if ((cs->subtyp == ELSA_QS1000) ||
- (cs->subtyp == ELSA_QS3000))
- {
- byteout(cs->hw.elsa.timer, 0);
- }
- if (cs->hw.elsa.trig)
- byteout(cs->hw.elsa.trig, 0xff);
- inithscxisac(cs, 2);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- if ((cs->subtyp == ELSA_PCMCIA) ||
- (cs->subtyp == ELSA_PCMCIA_IPAC) ||
- (cs->subtyp == ELSA_QS1000PCI)) {
- return (0);
- } else if (cs->subtyp == ELSA_QS3000PCI) {
- ret = 0;
- } else {
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.elsa.counter = 0;
- cs->hw.elsa.ctrl_reg |= ELSA_ENA_TIMER_INT;
- cs->hw.elsa.status |= ELIRQF_TIMER_AKTIV;
- byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
- byteout(cs->hw.elsa.timer, 0);
- spin_unlock_irqrestore(&cs->lock, flags);
- msleep(110);
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.elsa.ctrl_reg &= ~ELSA_ENA_TIMER_INT;
- byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
- cs->hw.elsa.status &= ~ELIRQF_TIMER_AKTIV;
- spin_unlock_irqrestore(&cs->lock, flags);
- printk(KERN_INFO "Elsa: %d timer tics in 110 msek\n",
- cs->hw.elsa.counter);
- if ((cs->hw.elsa.counter > 10) &&
- (cs->hw.elsa.counter < 16)) {
- printk(KERN_INFO "Elsa: timer and irq OK\n");
- ret = 0;
- } else {
- printk(KERN_WARNING
- "Elsa: timer tic problem (%d/12) maybe an IRQ(%d) conflict\n",
- cs->hw.elsa.counter, cs->irq);
- ret = 1;
- }
- }
-#if ARCOFI_USE
- if (check_arcofi(cs)) {
- init_modem(cs);
- }
-#endif
- elsa_led_handler(&cs->hw.elsa.tl);
- return (ret);
- case (MDL_REMOVE | REQUEST):
- cs->hw.elsa.status &= 0;
- break;
- case (MDL_ASSIGN | REQUEST):
- cs->hw.elsa.status |= ELSA_ASSIGN;
- break;
- case MDL_INFO_SETUP:
- if ((long) arg)
- cs->hw.elsa.status |= 0x0200;
- else
- cs->hw.elsa.status |= 0x0100;
- break;
- case MDL_INFO_CONN:
- if ((long) arg)
- cs->hw.elsa.status |= 0x2000;
- else
- cs->hw.elsa.status |= 0x1000;
- break;
- case MDL_INFO_REL:
- if ((long) arg) {
- cs->hw.elsa.status &= ~0x2000;
- cs->hw.elsa.status &= ~0x0200;
- } else {
- cs->hw.elsa.status &= ~0x1000;
- cs->hw.elsa.status &= ~0x0100;
- }
- break;
-#if ARCOFI_USE
- case CARD_AUX_IND:
- if (cs->hw.elsa.MFlag) {
- int len;
- u_char *msg;
-
- if (!arg)
- return (0);
- msg = arg;
- len = *msg;
- msg++;
- modem_write_cmd(cs, msg, len);
- }
- break;
-#endif
- }
- if (cs->typ == ISDN_CTYPE_ELSA) {
- int pwr = bytein(cs->hw.elsa.ale);
- if (pwr & 0x08)
- cs->hw.elsa.status |= ELSA_BAD_PWR;
- else
- cs->hw.elsa.status &= ~ELSA_BAD_PWR;
- }
- elsa_led_handler(&cs->hw.elsa.tl);
- return (ret);
-}
-
-static unsigned char
-probe_elsa_adr(unsigned int adr, int typ)
-{
- int i, in1, in2, p16_1 = 0, p16_2 = 0, p8_1 = 0, p8_2 = 0, pc_1 = 0,
- pc_2 = 0, pfp_1 = 0, pfp_2 = 0;
-
- /* In case of the elsa pcmcia card, this region is in use,
- reserved for us by the card manager. So we do not check it
- here, it would fail. */
- if (typ != ISDN_CTYPE_ELSA_PCMCIA) {
- if (request_region(adr, 8, "elsa card")) {
- release_region(adr, 8);
- } else {
- printk(KERN_WARNING
- "Elsa: Probing Port 0x%x: already in use\n", adr);
- return (0);
- }
- }
- for (i = 0; i < 16; i++) {
- in1 = inb(adr + ELSA_CONFIG); /* 'toggelt' bei */
- in2 = inb(adr + ELSA_CONFIG); /* jedem Zugriff */
- p16_1 += 0x04 & in1;
- p16_2 += 0x04 & in2;
- p8_1 += 0x02 & in1;
- p8_2 += 0x02 & in2;
- pc_1 += 0x01 & in1;
- pc_2 += 0x01 & in2;
- pfp_1 += 0x40 & in1;
- pfp_2 += 0x40 & in2;
- }
- printk(KERN_INFO "Elsa: Probing IO 0x%x", adr);
- if (65 == ++p16_1 * ++p16_2) {
- printk(" PCC-16/PCF found\n");
- return (ELSA_PCC16);
- } else if (1025 == ++pfp_1 * ++pfp_2) {
- printk(" PCF-Pro found\n");
- return (ELSA_PCFPRO);
- } else if (33 == ++p8_1 * ++p8_2) {
- printk(" PCC8 found\n");
- return (ELSA_PCC8);
- } else if (17 == ++pc_1 * ++pc_2) {
- printk(" PC found\n");
- return (ELSA_PC);
- } else {
- printk(" failed\n");
- return (0);
- }
-}
-
-static unsigned int
-probe_elsa(struct IsdnCardState *cs)
-{
- int i;
- unsigned int CARD_portlist[] =
- {0x160, 0x170, 0x260, 0x360, 0};
-
- for (i = 0; CARD_portlist[i]; i++) {
- if ((cs->subtyp = probe_elsa_adr(CARD_portlist[i], cs->typ)))
- break;
- }
- return (CARD_portlist[i]);
-}
-
-static int setup_elsa_isa(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- u_char val;
-
- cs->hw.elsa.base = card->para[0];
- printk(KERN_INFO "Elsa: Microlink IO probing\n");
- if (cs->hw.elsa.base) {
- if (!(cs->subtyp = probe_elsa_adr(cs->hw.elsa.base,
- cs->typ))) {
- printk(KERN_WARNING
- "Elsa: no Elsa Microlink at %#lx\n",
- cs->hw.elsa.base);
- return (0);
- }
- } else
- cs->hw.elsa.base = probe_elsa(cs);
-
- if (!cs->hw.elsa.base) {
- printk(KERN_WARNING
- "No Elsa Microlink found\n");
- return (0);
- }
-
- cs->hw.elsa.cfg = cs->hw.elsa.base + ELSA_CONFIG;
- cs->hw.elsa.ctrl = cs->hw.elsa.base + ELSA_CONTROL;
- cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE;
- cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC;
- cs->hw.elsa.itac = cs->hw.elsa.base + ELSA_ITAC;
- cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX;
- cs->hw.elsa.trig = cs->hw.elsa.base + ELSA_TRIG_IRQ;
- cs->hw.elsa.timer = cs->hw.elsa.base + ELSA_START_TIMER;
- val = bytein(cs->hw.elsa.cfg);
- if (cs->subtyp == ELSA_PC) {
- const u_char CARD_IrqTab[8] =
- {7, 3, 5, 9, 0, 0, 0, 0};
- cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX_PC) >> 2];
- } else if (cs->subtyp == ELSA_PCC8) {
- const u_char CARD_IrqTab[8] =
- {7, 3, 5, 9, 0, 0, 0, 0};
- cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX_PCC8) >> 4];
- } else {
- const u_char CARD_IrqTab[8] =
- {15, 10, 15, 3, 11, 5, 11, 9};
- cs->irq = CARD_IrqTab[(val & ELSA_IRQ_IDX) >> 3];
- }
- val = bytein(cs->hw.elsa.ale) & ELSA_HW_RELEASE;
- if (val < 3)
- val |= 8;
- val += 'A' - 3;
- if (val == 'B' || val == 'C')
- val ^= 1;
- if ((cs->subtyp == ELSA_PCFPRO) && (val == 'G'))
- val = 'C';
- printk(KERN_INFO
- "Elsa: %s found at %#lx Rev.:%c IRQ %d\n",
- Elsa_Types[cs->subtyp],
- cs->hw.elsa.base,
- val, cs->irq);
- val = bytein(cs->hw.elsa.ale) & ELSA_S0_POWER_BAD;
- if (val) {
- printk(KERN_WARNING
- "Elsa: Microlink S0 bus power bad\n");
- cs->hw.elsa.status |= ELSA_BAD_PWR;
- }
-
- return (1);
-}
-
-#ifdef __ISAPNP__
-static struct isapnp_device_id elsa_ids[] = {
- { ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0133),
- ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0133),
- (unsigned long) "Elsa QS1000" },
- { ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0134),
- ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0134),
- (unsigned long) "Elsa QS3000" },
- { 0, }
-};
-
-static struct isapnp_device_id *ipid = &elsa_ids[0];
-static struct pnp_card *pnp_c = NULL;
-#endif /* __ISAPNP__ */
-
-static int setup_elsa_isapnp(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
-
-#ifdef __ISAPNP__
- if (!card->para[1] && isapnp_present()) {
- struct pnp_dev *pnp_d;
- while (ipid->card_vendor) {
- if ((pnp_c = pnp_find_card(ipid->card_vendor,
- ipid->card_device, pnp_c))) {
- pnp_d = NULL;
- if ((pnp_d = pnp_find_dev(pnp_c,
- ipid->vendor, ipid->function, pnp_d))) {
- int err;
-
- printk(KERN_INFO "HiSax: %s detected\n",
- (char *)ipid->driver_data);
- pnp_disable_dev(pnp_d);
- err = pnp_activate_dev(pnp_d);
- if (err < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __func__, err);
- return (0);
- }
- card->para[1] = pnp_port_start(pnp_d, 0);
- card->para[0] = pnp_irq(pnp_d, 0);
-
- if (card->para[0] == -1 || !card->para[1]) {
- printk(KERN_ERR "Elsa PnP:some resources are missing %ld/%lx\n",
- card->para[0], card->para[1]);
- pnp_disable_dev(pnp_d);
- return (0);
- }
- if (ipid->function == ISAPNP_FUNCTION(0x133))
- cs->subtyp = ELSA_QS1000;
- else
- cs->subtyp = ELSA_QS3000;
- break;
- } else {
- printk(KERN_ERR "Elsa PnP: PnP error card found, no device\n");
- return (0);
- }
- }
- ipid++;
- pnp_c = NULL;
- }
- if (!ipid->card_vendor) {
- printk(KERN_INFO "Elsa PnP: no ISAPnP card found\n");
- return (0);
- }
- }
-#endif /* __ISAPNP__ */
-
- if (card->para[1] && card->para[0]) {
- cs->hw.elsa.base = card->para[1];
- cs->irq = card->para[0];
- if (!cs->subtyp)
- cs->subtyp = ELSA_QS1000;
- } else {
- printk(KERN_ERR "Elsa PnP: no parameter\n");
- }
- cs->hw.elsa.cfg = cs->hw.elsa.base + ELSA_CONFIG;
- cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE;
- cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC;
- cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX;
- cs->hw.elsa.trig = cs->hw.elsa.base + ELSA_TRIG_IRQ;
- cs->hw.elsa.timer = cs->hw.elsa.base + ELSA_START_TIMER;
- cs->hw.elsa.ctrl = cs->hw.elsa.base + ELSA_CONTROL;
- printk(KERN_INFO
- "Elsa: %s defined at %#lx IRQ %d\n",
- Elsa_Types[cs->subtyp],
- cs->hw.elsa.base,
- cs->irq);
-
- return (1);
-}
-
-static void setup_elsa_pcmcia(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- u_char val;
-
- cs->hw.elsa.base = card->para[1];
- cs->irq = card->para[0];
- val = readreg(cs->hw.elsa.base + 0, cs->hw.elsa.base + 2, IPAC_ID);
- if ((val == 1) || (val == 2)) { /* IPAC version 1.1/1.2 */
- cs->subtyp = ELSA_PCMCIA_IPAC;
- cs->hw.elsa.ale = cs->hw.elsa.base + 0;
- cs->hw.elsa.isac = cs->hw.elsa.base + 2;
- cs->hw.elsa.hscx = cs->hw.elsa.base + 2;
- test_and_set_bit(HW_IPAC, &cs->HW_Flags);
- } else {
- cs->subtyp = ELSA_PCMCIA;
- cs->hw.elsa.ale = cs->hw.elsa.base + ELSA_ALE_PCM;
- cs->hw.elsa.isac = cs->hw.elsa.base + ELSA_ISAC_PCM;
- cs->hw.elsa.hscx = cs->hw.elsa.base + ELSA_HSCX;
- }
- cs->hw.elsa.timer = 0;
- cs->hw.elsa.trig = 0;
- cs->hw.elsa.ctrl = 0;
- cs->irq_flags |= IRQF_SHARED;
- printk(KERN_INFO
- "Elsa: %s defined at %#lx IRQ %d\n",
- Elsa_Types[cs->subtyp],
- cs->hw.elsa.base,
- cs->irq);
-}
-
-#ifdef CONFIG_PCI
-static struct pci_dev *dev_qs1000 = NULL;
-static struct pci_dev *dev_qs3000 = NULL;
-
-static int setup_elsa_pci(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
-
- cs->subtyp = 0;
- if ((dev_qs1000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA,
- PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) {
- if (pci_enable_device(dev_qs1000))
- return (0);
- cs->subtyp = ELSA_QS1000PCI;
- cs->irq = dev_qs1000->irq;
- cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1);
- cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3);
- } else if ((dev_qs3000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA,
- PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) {
- if (pci_enable_device(dev_qs3000))
- return (0);
- cs->subtyp = ELSA_QS3000PCI;
- cs->irq = dev_qs3000->irq;
- cs->hw.elsa.cfg = pci_resource_start(dev_qs3000, 1);
- cs->hw.elsa.base = pci_resource_start(dev_qs3000, 3);
- } else {
- printk(KERN_WARNING "Elsa: No PCI card found\n");
- return (0);
- }
- if (!cs->irq) {
- printk(KERN_WARNING "Elsa: No IRQ for PCI card found\n");
- return (0);
- }
-
- if (!(cs->hw.elsa.base && cs->hw.elsa.cfg)) {
- printk(KERN_WARNING "Elsa: No IO-Adr for PCI card found\n");
- return (0);
- }
- if ((cs->hw.elsa.cfg & 0xff) || (cs->hw.elsa.base & 0xf)) {
- printk(KERN_WARNING "Elsa: You may have a wrong PCI bios\n");
- printk(KERN_WARNING "Elsa: If your system hangs now, read\n");
- printk(KERN_WARNING "Elsa: Documentation/isdn/README.HiSax\n");
- }
- cs->hw.elsa.ale = cs->hw.elsa.base;
- cs->hw.elsa.isac = cs->hw.elsa.base + 1;
- cs->hw.elsa.hscx = cs->hw.elsa.base + 1;
- test_and_set_bit(HW_IPAC, &cs->HW_Flags);
- cs->hw.elsa.timer = 0;
- cs->hw.elsa.trig = 0;
- cs->irq_flags |= IRQF_SHARED;
- printk(KERN_INFO
- "Elsa: %s defined at %#lx/0x%x IRQ %d\n",
- Elsa_Types[cs->subtyp],
- cs->hw.elsa.base,
- cs->hw.elsa.cfg,
- cs->irq);
-
- return (1);
-}
-
-#else
-
-static int setup_elsa_pci(struct IsdnCard *card)
-{
- return (1);
-}
-#endif /* CONFIG_PCI */
-
-static int setup_elsa_common(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- u_char val;
- int bytecnt;
-
- switch (cs->subtyp) {
- case ELSA_PC:
- case ELSA_PCC8:
- case ELSA_PCC16:
- case ELSA_QS1000:
- case ELSA_PCMCIA:
- case ELSA_PCMCIA_IPAC:
- bytecnt = 8;
- break;
- case ELSA_PCFPRO:
- case ELSA_PCF:
- case ELSA_QS3000:
- case ELSA_QS3000PCI:
- bytecnt = 16;
- break;
- case ELSA_QS1000PCI:
- bytecnt = 2;
- break;
- default:
- printk(KERN_WARNING
- "Unknown ELSA subtype %d\n", cs->subtyp);
- return (0);
- }
- /* In case of the elsa pcmcia card, this region is in use,
- reserved for us by the card manager. So we do not check it
- here, it would fail. */
- if (cs->typ != ISDN_CTYPE_ELSA_PCMCIA && !request_region(cs->hw.elsa.base, bytecnt, "elsa isdn")) {
- printk(KERN_WARNING
- "HiSax: ELSA config port %#lx-%#lx already in use\n",
- cs->hw.elsa.base,
- cs->hw.elsa.base + bytecnt);
- return (0);
- }
- if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI)) {
- if (!request_region(cs->hw.elsa.cfg, 0x80, "elsa isdn pci")) {
- printk(KERN_WARNING
- "HiSax: ELSA pci port %x-%x already in use\n",
- cs->hw.elsa.cfg,
- cs->hw.elsa.cfg + 0x80);
- release_region(cs->hw.elsa.base, bytecnt);
- return (0);
- }
- }
-#if ARCOFI_USE
- init_arcofi(cs);
-#endif
- setup_isac(cs);
- timer_setup(&cs->hw.elsa.tl, elsa_led_handler, 0);
- /* Teste Timer */
- if (cs->hw.elsa.timer) {
- byteout(cs->hw.elsa.trig, 0xff);
- byteout(cs->hw.elsa.timer, 0);
- if (!TimerRun(cs)) {
- byteout(cs->hw.elsa.timer, 0); /* 2. Versuch */
- if (!TimerRun(cs)) {
- printk(KERN_WARNING
- "Elsa: timer do not start\n");
- release_io_elsa(cs);
- return (0);
- }
- }
- HZDELAY((HZ / 100) + 1); /* wait >=10 ms */
- if (TimerRun(cs)) {
- printk(KERN_WARNING "Elsa: timer do not run down\n");
- release_io_elsa(cs);
- return (0);
- }
- printk(KERN_INFO "Elsa: timer OK; resetting card\n");
- }
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &Elsa_card_msg;
- if ((cs->subtyp == ELSA_QS1000PCI) || (cs->subtyp == ELSA_QS3000PCI) || (cs->subtyp == ELSA_PCMCIA_IPAC)) {
- cs->readisac = &ReadISAC_IPAC;
- cs->writeisac = &WriteISAC_IPAC;
- cs->readisacfifo = &ReadISACfifo_IPAC;
- cs->writeisacfifo = &WriteISACfifo_IPAC;
- cs->irq_func = &elsa_interrupt_ipac;
- val = readreg(cs->hw.elsa.ale, cs->hw.elsa.isac, IPAC_ID);
- printk(KERN_INFO "Elsa: IPAC version %x\n", val);
- } else {
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->irq_func = &elsa_interrupt;
- ISACVersion(cs, "Elsa:");
- if (HscxVersion(cs, "Elsa:")) {
- printk(KERN_WARNING
- "Elsa: wrong HSCX versions check IO address\n");
- release_io_elsa(cs);
- return (0);
- }
- }
- if (cs->subtyp == ELSA_PC) {
- val = readitac(cs, ITAC_SYS);
- printk(KERN_INFO "Elsa: ITAC version %s\n", ITACVer[val & 7]);
- writeitac(cs, ITAC_ISEN, 0);
- writeitac(cs, ITAC_RFIE, 0);
- writeitac(cs, ITAC_XFIE, 0);
- writeitac(cs, ITAC_SCIE, 0);
- writeitac(cs, ITAC_STIE, 0);
- }
- return (1);
-}
-
-int setup_elsa(struct IsdnCard *card)
-{
- int rc;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, Elsa_revision);
- printk(KERN_INFO "HiSax: Elsa driver Rev. %s\n", HiSax_getrev(tmp));
- cs->hw.elsa.ctrl_reg = 0;
- cs->hw.elsa.status = 0;
- cs->hw.elsa.MFlag = 0;
- cs->subtyp = 0;
-
- if (cs->typ == ISDN_CTYPE_ELSA) {
- rc = setup_elsa_isa(card);
- if (!rc)
- return (0);
-
- } else if (cs->typ == ISDN_CTYPE_ELSA_PNP) {
- rc = setup_elsa_isapnp(card);
- if (!rc)
- return (0);
-
- } else if (cs->typ == ISDN_CTYPE_ELSA_PCMCIA)
- setup_elsa_pcmcia(card);
-
- else if (cs->typ == ISDN_CTYPE_ELSA_PCI) {
- rc = setup_elsa_pci(card);
- if (!rc)
- return (0);
-
- } else
- return (0);
-
- return setup_elsa_common(card);
-}
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
deleted file mode 100644
index 40f6fad79de3..000000000000
--- a/drivers/isdn/hisax/elsa_cs.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/*======================================================================
-
- An elsa_cs PCMCIA client driver
-
- This driver is for the Elsa PCM ISDN Cards, i.e. the MicroLink
-
-
- The contents of this file are subject to the Mozilla Public
- License Version 1.1 (the "License"); you may not use this file
- except in compliance with the License. You may obtain a copy of
- the License at http://www.mozilla.org/MPL/
-
- Software distributed under the License is distributed on an "AS
- IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- implied. See the License for the specific language governing
- rights and limitations under the License.
-
- The initial developer of the original code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-
- Modifications from dummy_cs.c are Copyright (C) 1999-2001 Klaus
- Lichtenwalder <Lichtenwalder@ACM.org>. All Rights Reserved.
-
- Alternatively, the contents of this file may be used under the
- terms of the GNU General Public License version 2 (the "GPL"), in
- which case the provisions of the GPL are applicable instead of the
- above. If you wish to allow the use of your version of this file
- only under the terms of the GPL and not to allow others to use
- your version of this file under the MPL, indicate your decision
- by deleting the provisions above and replace them with the notice
- and other provisions required by the GPL. If you do not delete
- the provisions above, a recipient may use your version of this
- file under either the MPL or the GPL.
-
- ======================================================================*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <asm/io.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-#include "hisax_cfg.h"
-
-MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Elsa PCM cards");
-MODULE_AUTHOR("Klaus Lichtenwalder");
-MODULE_LICENSE("Dual MPL/GPL");
-
-
-/*====================================================================*/
-
-/* Parameters that can be set with 'insmod' */
-
-static int protocol = 2; /* EURO-ISDN Default */
-module_param(protocol, int, 0);
-
-static int elsa_cs_config(struct pcmcia_device *link);
-static void elsa_cs_release(struct pcmcia_device *link);
-static void elsa_cs_detach(struct pcmcia_device *p_dev);
-
-typedef struct local_info_t {
- struct pcmcia_device *p_dev;
- int busy;
- int cardnr;
-} local_info_t;
-
-static int elsa_cs_probe(struct pcmcia_device *link)
-{
- local_info_t *local;
-
- dev_dbg(&link->dev, "elsa_cs_attach()\n");
-
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local) return -ENOMEM;
-
- local->p_dev = link;
- link->priv = local;
-
- local->cardnr = -1;
-
- return elsa_cs_config(link);
-} /* elsa_cs_attach */
-
-static void elsa_cs_detach(struct pcmcia_device *link)
-{
- local_info_t *info = link->priv;
-
- dev_dbg(&link->dev, "elsa_cs_detach(0x%p)\n", link);
-
- info->busy = 1;
- elsa_cs_release(link);
-
- kfree(info);
-} /* elsa_cs_detach */
-
-static int elsa_cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
-{
- int j;
-
- p_dev->io_lines = 3;
- p_dev->resource[0]->end = 8;
- p_dev->resource[0]->flags &= IO_DATA_PATH_WIDTH;
- p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
-
- if ((p_dev->resource[0]->end) && p_dev->resource[0]->start) {
- printk(KERN_INFO "(elsa_cs: looks like the 96 model)\n");
- if (!pcmcia_request_io(p_dev))
- return 0;
- } else {
- printk(KERN_INFO "(elsa_cs: looks like the 97 model)\n");
- for (j = 0x2f0; j > 0x100; j -= 0x10) {
- p_dev->resource[0]->start = j;
- if (!pcmcia_request_io(p_dev))
- return 0;
- }
- }
- return -ENODEV;
-}
-
-static int elsa_cs_config(struct pcmcia_device *link)
-{
- int i;
- IsdnCard_t icard;
-
- dev_dbg(&link->dev, "elsa_config(0x%p)\n", link);
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
-
- i = pcmcia_loop_config(link, elsa_cs_configcheck, NULL);
- if (i != 0)
- goto failed;
-
- if (!link->irq)
- goto failed;
-
- i = pcmcia_enable_device(link);
- if (i != 0)
- goto failed;
-
- icard.para[0] = link->irq;
- icard.para[1] = link->resource[0]->start;
- icard.protocol = protocol;
- icard.typ = ISDN_CTYPE_ELSA_PCMCIA;
-
- i = hisax_init_pcmcia(link, &(((local_info_t *)link->priv)->busy), &icard);
- if (i < 0) {
- printk(KERN_ERR "elsa_cs: failed to initialize Elsa "
- "PCMCIA %d with %pR\n", i, link->resource[0]);
- elsa_cs_release(link);
- } else
- ((local_info_t *)link->priv)->cardnr = i;
-
- return 0;
-failed:
- elsa_cs_release(link);
- return -ENODEV;
-} /* elsa_cs_config */
-
-static void elsa_cs_release(struct pcmcia_device *link)
-{
- local_info_t *local = link->priv;
-
- dev_dbg(&link->dev, "elsa_cs_release(0x%p)\n", link);
-
- if (local) {
- if (local->cardnr >= 0) {
- /* no unregister function with hisax */
- HiSax_closecard(local->cardnr);
- }
- }
-
- pcmcia_disable_device(link);
-} /* elsa_cs_release */
-
-static int elsa_suspend(struct pcmcia_device *link)
-{
- local_info_t *dev = link->priv;
-
- dev->busy = 1;
-
- return 0;
-}
-
-static int elsa_resume(struct pcmcia_device *link)
-{
- local_info_t *dev = link->priv;
-
- dev->busy = 0;
-
- return 0;
-}
-
-static const struct pcmcia_device_id elsa_ids[] = {
- PCMCIA_DEVICE_PROD_ID12("ELSA AG (Aachen, Germany)", "MicroLink ISDN/MC ", 0x983de2c4, 0x333ba257),
- PCMCIA_DEVICE_PROD_ID12("ELSA GmbH, Aachen", "MicroLink ISDN/MC ", 0x639e5718, 0x333ba257),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, elsa_ids);
-
-static struct pcmcia_driver elsa_cs_driver = {
- .owner = THIS_MODULE,
- .name = "elsa_cs",
- .probe = elsa_cs_probe,
- .remove = elsa_cs_detach,
- .id_table = elsa_ids,
- .suspend = elsa_suspend,
- .resume = elsa_resume,
-};
-module_pcmcia_driver(elsa_cs_driver);
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
deleted file mode 100644
index 999effd7a276..000000000000
--- a/drivers/isdn/hisax/elsa_ser.c
+++ /dev/null
@@ -1,659 +0,0 @@
-/* $Id: elsa_ser.c,v 2.14.2.3 2004/02/11 13:21:33 keil Exp $
- *
- * stuff for the serial modem on ELSA cards
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-#include <linux/slab.h>
-
-#define MAX_MODEM_BUF 256
-#define WAKEUP_CHARS (MAX_MODEM_BUF / 2)
-#define RS_ISR_PASS_LIMIT 256
-#define BASE_BAUD (1843200 / 16)
-
-//#define SERIAL_DEBUG_OPEN 1
-//#define SERIAL_DEBUG_INTR 1
-//#define SERIAL_DEBUG_FLOW 1
-#undef SERIAL_DEBUG_OPEN
-#undef SERIAL_DEBUG_INTR
-#undef SERIAL_DEBUG_FLOW
-#undef SERIAL_DEBUG_REG
-//#define SERIAL_DEBUG_REG 1
-
-#ifdef SERIAL_DEBUG_REG
-static u_char deb[32];
-const char *ModemIn[] = {"RBR", "IER", "IIR", "LCR", "MCR", "LSR", "MSR", "SCR"};
-const char *ModemOut[] = {"THR", "IER", "FCR", "LCR", "MCR", "LSR", "MSR", "SCR"};
-#endif
-
-static char *MInit_1 = "AT&F&C1E0&D2\r\0";
-static char *MInit_2 = "ATL2M1S64=13\r\0";
-static char *MInit_3 = "AT+FCLASS=0\r\0";
-static char *MInit_4 = "ATV1S2=128X1\r\0";
-static char *MInit_5 = "AT\\V8\\N3\r\0";
-static char *MInit_6 = "ATL0M0&G0%E1\r\0";
-static char *MInit_7 = "AT%L1%M0%C3\r\0";
-
-static char *MInit_speed28800 = "AT%G0%B28800\r\0";
-
-static char *MInit_dialout = "ATs7=60 x1 d\r\0";
-static char *MInit_dialin = "ATs7=60 x1 a\r\0";
-
-
-static inline unsigned int serial_in(struct IsdnCardState *cs, int offset)
-{
-#ifdef SERIAL_DEBUG_REG
- u_int val = inb(cs->hw.elsa.base + 8 + offset);
- debugl1(cs, "in %s %02x", ModemIn[offset], val);
- return (val);
-#else
- return inb(cs->hw.elsa.base + 8 + offset);
-#endif
-}
-
-static inline unsigned int serial_inp(struct IsdnCardState *cs, int offset)
-{
-#ifdef SERIAL_DEBUG_REG
-#ifdef ELSA_SERIAL_NOPAUSE_IO
- u_int val = inb(cs->hw.elsa.base + 8 + offset);
- debugl1(cs, "inp %s %02x", ModemIn[offset], val);
-#else
- u_int val = inb_p(cs->hw.elsa.base + 8 + offset);
- debugl1(cs, "inP %s %02x", ModemIn[offset], val);
-#endif
- return (val);
-#else
-#ifdef ELSA_SERIAL_NOPAUSE_IO
- return inb(cs->hw.elsa.base + 8 + offset);
-#else
- return inb_p(cs->hw.elsa.base + 8 + offset);
-#endif
-#endif
-}
-
-static inline void serial_out(struct IsdnCardState *cs, int offset, int value)
-{
-#ifdef SERIAL_DEBUG_REG
- debugl1(cs, "out %s %02x", ModemOut[offset], value);
-#endif
- outb(value, cs->hw.elsa.base + 8 + offset);
-}
-
-static inline void serial_outp(struct IsdnCardState *cs, int offset,
- int value)
-{
-#ifdef SERIAL_DEBUG_REG
-#ifdef ELSA_SERIAL_NOPAUSE_IO
- debugl1(cs, "outp %s %02x", ModemOut[offset], value);
-#else
- debugl1(cs, "outP %s %02x", ModemOut[offset], value);
-#endif
-#endif
-#ifdef ELSA_SERIAL_NOPAUSE_IO
- outb(value, cs->hw.elsa.base + 8 + offset);
-#else
- outb_p(value, cs->hw.elsa.base + 8 + offset);
-#endif
-}
-
-/*
- * This routine is called to set the UART divisor registers to match
- * the specified baud rate for a serial port.
- */
-static void change_speed(struct IsdnCardState *cs, int baud)
-{
- int quot = 0, baud_base;
- unsigned cval, fcr = 0;
-
-
- /* byte size and parity */
- cval = 0x03;
- /* Determine divisor based on baud rate */
- baud_base = BASE_BAUD;
- quot = baud_base / baud;
- /* If the quotient is ever zero, default to 9600 bps */
- if (!quot)
- quot = baud_base / 9600;
-
- /* Set up FIFO's */
- if ((baud_base / quot) < 2400)
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
- else
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_8;
- serial_outp(cs, UART_FCR, fcr);
- /* CTS flow control flag and modem status interrupts */
- cs->hw.elsa.IER &= ~UART_IER_MSI;
- cs->hw.elsa.IER |= UART_IER_MSI;
- serial_outp(cs, UART_IER, cs->hw.elsa.IER);
-
- debugl1(cs, "modem quot=0x%x", quot);
- serial_outp(cs, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
- serial_outp(cs, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_outp(cs, UART_DLM, quot >> 8); /* MS of divisor */
- serial_outp(cs, UART_LCR, cval); /* reset DLAB */
- serial_inp(cs, UART_RX);
-}
-
-static int mstartup(struct IsdnCardState *cs)
-{
- int retval = 0;
-
- /*
- * Clear the FIFO buffers and disable them
- * (they will be reenabled in change_speed())
- */
- serial_outp(cs, UART_FCR, (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT));
-
- /*
- * At this point there's no way the LSR could still be 0xFF;
- * if it is, then bail out, because there's likely no UART
- * here.
- */
- if (serial_inp(cs, UART_LSR) == 0xff) {
- retval = -ENODEV;
- goto errout;
- }
-
- /*
- * Clear the interrupt registers.
- */
- (void) serial_inp(cs, UART_RX);
- (void) serial_inp(cs, UART_IIR);
- (void) serial_inp(cs, UART_MSR);
-
- /*
- * Now, initialize the UART
- */
- serial_outp(cs, UART_LCR, UART_LCR_WLEN8); /* reset DLAB */
-
- cs->hw.elsa.MCR = 0;
- cs->hw.elsa.MCR = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
- serial_outp(cs, UART_MCR, cs->hw.elsa.MCR);
-
- /*
- * Finally, enable interrupts
- */
- cs->hw.elsa.IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
- serial_outp(cs, UART_IER, cs->hw.elsa.IER); /* enable interrupts */
-
- /*
- * And clear the interrupt registers again for luck.
- */
- (void)serial_inp(cs, UART_LSR);
- (void)serial_inp(cs, UART_RX);
- (void)serial_inp(cs, UART_IIR);
- (void)serial_inp(cs, UART_MSR);
-
- cs->hw.elsa.transcnt = cs->hw.elsa.transp = 0;
- cs->hw.elsa.rcvcnt = cs->hw.elsa.rcvp = 0;
-
- /*
- * and set the speed of the serial port
- */
- change_speed(cs, BASE_BAUD);
- cs->hw.elsa.MFlag = 1;
-errout:
- return retval;
-}
-
-/*
- * This routine will shutdown a serial port; interrupts are disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void mshutdown(struct IsdnCardState *cs)
-{
-
-#ifdef SERIAL_DEBUG_OPEN
- printk(KERN_DEBUG"Shutting down serial ....");
-#endif
-
- /*
- * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
- * here so the queue might never be waken up
- */
-
- cs->hw.elsa.IER = 0;
- serial_outp(cs, UART_IER, 0x00); /* disable all intrs */
- cs->hw.elsa.MCR &= ~UART_MCR_OUT2;
-
- /* disable break condition */
- serial_outp(cs, UART_LCR, serial_inp(cs, UART_LCR) & ~UART_LCR_SBC);
-
- cs->hw.elsa.MCR &= ~(UART_MCR_DTR | UART_MCR_RTS);
- serial_outp(cs, UART_MCR, cs->hw.elsa.MCR);
-
- /* disable FIFO's */
- serial_outp(cs, UART_FCR, (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT));
- serial_inp(cs, UART_RX); /* read data port to reset things */
-
-#ifdef SERIAL_DEBUG_OPEN
- printk(" done\n");
-#endif
-}
-
-static inline int
-write_modem(struct BCState *bcs) {
- int ret = 0;
- struct IsdnCardState *cs = bcs->cs;
- int count, len, fp;
-
- if (!bcs->tx_skb)
- return 0;
- if (bcs->tx_skb->len <= 0)
- return 0;
- len = bcs->tx_skb->len;
- if (len > MAX_MODEM_BUF - cs->hw.elsa.transcnt)
- len = MAX_MODEM_BUF - cs->hw.elsa.transcnt;
- fp = cs->hw.elsa.transcnt + cs->hw.elsa.transp;
- fp &= (MAX_MODEM_BUF - 1);
- count = len;
- if (count > MAX_MODEM_BUF - fp) {
- count = MAX_MODEM_BUF - fp;
- skb_copy_from_linear_data(bcs->tx_skb,
- cs->hw.elsa.transbuf + fp, count);
- skb_pull(bcs->tx_skb, count);
- cs->hw.elsa.transcnt += count;
- ret = count;
- count = len - count;
- fp = 0;
- }
- skb_copy_from_linear_data(bcs->tx_skb,
- cs->hw.elsa.transbuf + fp, count);
- skb_pull(bcs->tx_skb, count);
- cs->hw.elsa.transcnt += count;
- ret += count;
-
- if (cs->hw.elsa.transcnt &&
- !(cs->hw.elsa.IER & UART_IER_THRI)) {
- cs->hw.elsa.IER |= UART_IER_THRI;
- serial_outp(cs, UART_IER, cs->hw.elsa.IER);
- }
- return (ret);
-}
-
-static inline void
-modem_fill(struct BCState *bcs) {
-
- if (bcs->tx_skb) {
- if (bcs->tx_skb->len) {
- write_modem(bcs);
- return;
- } else {
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->hw.hscx.count;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- }
- }
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- bcs->hw.hscx.count = 0;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- write_modem(bcs);
- } else {
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- schedule_event(bcs, B_XMTBUFREADY);
- }
-}
-
-static inline void receive_chars(struct IsdnCardState *cs,
- int *status)
-{
- unsigned char ch;
- struct sk_buff *skb;
-
- do {
- ch = serial_in(cs, UART_RX);
- if (cs->hw.elsa.rcvcnt >= MAX_MODEM_BUF)
- break;
- cs->hw.elsa.rcvbuf[cs->hw.elsa.rcvcnt++] = ch;
-#ifdef SERIAL_DEBUG_INTR
- printk("DR%02x:%02x...", ch, *status);
-#endif
- if (*status & (UART_LSR_BI | UART_LSR_PE |
- UART_LSR_FE | UART_LSR_OE)) {
-
-#ifdef SERIAL_DEBUG_INTR
- printk("handling exept....");
-#endif
- }
- *status = serial_inp(cs, UART_LSR);
- } while (*status & UART_LSR_DR);
- if (cs->hw.elsa.MFlag == 2) {
- if (!(skb = dev_alloc_skb(cs->hw.elsa.rcvcnt)))
- printk(KERN_WARNING "ElsaSER: receive out of memory\n");
- else {
- skb_put_data(skb, cs->hw.elsa.rcvbuf,
- cs->hw.elsa.rcvcnt);
- skb_queue_tail(&cs->hw.elsa.bcs->rqueue, skb);
- }
- schedule_event(cs->hw.elsa.bcs, B_RCVBUFREADY);
- } else {
- char tmp[128];
- char *t = tmp;
-
- t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt);
- QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt);
- debugl1(cs, "%s", tmp);
- }
- cs->hw.elsa.rcvcnt = 0;
-}
-
-static inline void transmit_chars(struct IsdnCardState *cs, int *intr_done)
-{
- int count;
-
- debugl1(cs, "transmit_chars: p(%x) cnt(%x)", cs->hw.elsa.transp,
- cs->hw.elsa.transcnt);
-
- if (cs->hw.elsa.transcnt <= 0) {
- cs->hw.elsa.IER &= ~UART_IER_THRI;
- serial_out(cs, UART_IER, cs->hw.elsa.IER);
- return;
- }
- count = 16;
- do {
- serial_outp(cs, UART_TX, cs->hw.elsa.transbuf[cs->hw.elsa.transp++]);
- if (cs->hw.elsa.transp >= MAX_MODEM_BUF)
- cs->hw.elsa.transp = 0;
- if (--cs->hw.elsa.transcnt <= 0)
- break;
- } while (--count > 0);
- if ((cs->hw.elsa.transcnt < WAKEUP_CHARS) && (cs->hw.elsa.MFlag == 2))
- modem_fill(cs->hw.elsa.bcs);
-
-#ifdef SERIAL_DEBUG_INTR
- printk("THRE...");
-#endif
- if (intr_done)
- *intr_done = 0;
- if (cs->hw.elsa.transcnt <= 0) {
- cs->hw.elsa.IER &= ~UART_IER_THRI;
- serial_outp(cs, UART_IER, cs->hw.elsa.IER);
- }
-}
-
-
-static void rs_interrupt_elsa(struct IsdnCardState *cs)
-{
- int status, iir, msr;
- int pass_counter = 0;
-
-#ifdef SERIAL_DEBUG_INTR
- printk(KERN_DEBUG "rs_interrupt_single(%d)...", cs->irq);
-#endif
-
- do {
- status = serial_inp(cs, UART_LSR);
- debugl1(cs, "rs LSR %02x", status);
-#ifdef SERIAL_DEBUG_INTR
- printk("status = %x...", status);
-#endif
- if (status & UART_LSR_DR)
- receive_chars(cs, &status);
- if (status & UART_LSR_THRE)
- transmit_chars(cs, NULL);
- if (pass_counter++ > RS_ISR_PASS_LIMIT) {
- printk("rs_single loop break.\n");
- break;
- }
- iir = serial_inp(cs, UART_IIR);
- debugl1(cs, "rs IIR %02x", iir);
- if ((iir & 0xf) == 0) {
- msr = serial_inp(cs, UART_MSR);
- debugl1(cs, "rs MSR %02x", msr);
- }
- } while (!(iir & UART_IIR_NO_INT));
-#ifdef SERIAL_DEBUG_INTR
- printk("end.\n");
-#endif
-}
-
-extern int open_hscxstate(struct IsdnCardState *cs, struct BCState *bcs);
-extern void modehscx(struct BCState *bcs, int mode, int bc);
-extern void hscx_l2l1(struct PStack *st, int pr, void *arg);
-
-static void
-close_elsastate(struct BCState *bcs)
-{
- modehscx(bcs, 0, bcs->channel);
- if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
- if (bcs->hw.hscx.rcvbuf) {
- if (bcs->mode != L1_MODE_MODEM)
- kfree(bcs->hw.hscx.rcvbuf);
- bcs->hw.hscx.rcvbuf = NULL;
- }
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- }
-}
-
-static void
-modem_write_cmd(struct IsdnCardState *cs, u_char *buf, int len) {
- int count, fp;
- u_char *msg = buf;
-
- if (!len)
- return;
- if (len > (MAX_MODEM_BUF - cs->hw.elsa.transcnt)) {
- return;
- }
- fp = cs->hw.elsa.transcnt + cs->hw.elsa.transp;
- fp &= (MAX_MODEM_BUF - 1);
- count = len;
- if (count > MAX_MODEM_BUF - fp) {
- count = MAX_MODEM_BUF - fp;
- memcpy(cs->hw.elsa.transbuf + fp, msg, count);
- cs->hw.elsa.transcnt += count;
- msg += count;
- count = len - count;
- fp = 0;
- }
- memcpy(cs->hw.elsa.transbuf + fp, msg, count);
- cs->hw.elsa.transcnt += count;
- if (cs->hw.elsa.transcnt &&
- !(cs->hw.elsa.IER & UART_IER_THRI)) {
- cs->hw.elsa.IER |= UART_IER_THRI;
- serial_outp(cs, UART_IER, cs->hw.elsa.IER);
- }
-}
-
-static void
-modem_set_init(struct IsdnCardState *cs) {
- int timeout;
-
-#define RCV_DELAY 20
- modem_write_cmd(cs, MInit_1, strlen(MInit_1));
- timeout = 1000;
- while (timeout-- && cs->hw.elsa.transcnt)
- udelay(1000);
- debugl1(cs, "msi tout=%d", timeout);
- mdelay(RCV_DELAY);
- modem_write_cmd(cs, MInit_2, strlen(MInit_2));
- timeout = 1000;
- while (timeout-- && cs->hw.elsa.transcnt)
- udelay(1000);
- debugl1(cs, "msi tout=%d", timeout);
- mdelay(RCV_DELAY);
- modem_write_cmd(cs, MInit_3, strlen(MInit_3));
- timeout = 1000;
- while (timeout-- && cs->hw.elsa.transcnt)
- udelay(1000);
- debugl1(cs, "msi tout=%d", timeout);
- mdelay(RCV_DELAY);
- modem_write_cmd(cs, MInit_4, strlen(MInit_4));
- timeout = 1000;
- while (timeout-- && cs->hw.elsa.transcnt)
- udelay(1000);
- debugl1(cs, "msi tout=%d", timeout);
- mdelay(RCV_DELAY);
- modem_write_cmd(cs, MInit_5, strlen(MInit_5));
- timeout = 1000;
- while (timeout-- && cs->hw.elsa.transcnt)
- udelay(1000);
- debugl1(cs, "msi tout=%d", timeout);
- mdelay(RCV_DELAY);
- modem_write_cmd(cs, MInit_6, strlen(MInit_6));
- timeout = 1000;
- while (timeout-- && cs->hw.elsa.transcnt)
- udelay(1000);
- debugl1(cs, "msi tout=%d", timeout);
- mdelay(RCV_DELAY);
- modem_write_cmd(cs, MInit_7, strlen(MInit_7));
- timeout = 1000;
- while (timeout-- && cs->hw.elsa.transcnt)
- udelay(1000);
- debugl1(cs, "msi tout=%d", timeout);
- mdelay(RCV_DELAY);
-}
-
-static void
-modem_set_dial(struct IsdnCardState *cs, int outgoing) {
- int timeout;
-#define RCV_DELAY 20
-
- modem_write_cmd(cs, MInit_speed28800, strlen(MInit_speed28800));
- timeout = 1000;
- while (timeout-- && cs->hw.elsa.transcnt)
- udelay(1000);
- debugl1(cs, "msi tout=%d", timeout);
- mdelay(RCV_DELAY);
- if (outgoing)
- modem_write_cmd(cs, MInit_dialout, strlen(MInit_dialout));
- else
- modem_write_cmd(cs, MInit_dialin, strlen(MInit_dialin));
- timeout = 1000;
- while (timeout-- && cs->hw.elsa.transcnt)
- udelay(1000);
- debugl1(cs, "msi tout=%d", timeout);
- mdelay(RCV_DELAY);
-}
-
-static void
-modem_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- struct sk_buff *skb = arg;
- u_long flags;
-
- if (pr == (PH_DATA | REQUEST)) {
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->hw.hscx.count = 0;
- write_modem(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- } else if (pr == (PH_ACTIVATE | REQUEST)) {
- test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
- st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
- set_arcofi(bcs->cs, st->l1.bc);
- mstartup(bcs->cs);
- modem_set_dial(bcs->cs, test_bit(FLG_ORIG, &st->l2.flag));
- bcs->cs->hw.elsa.MFlag = 2;
- } else if (pr == (PH_DEACTIVATE | REQUEST)) {
- test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- bcs->cs->dc.isac.arcofi_bc = st->l1.bc;
- arcofi_fsm(bcs->cs, ARCOFI_START, &ARCOFI_XOP_0);
- wait_event_interruptible(bcs->cs->dc.isac.arcofi_wait,
- bcs->cs->dc.isac.arcofi_state == ARCOFI_NOP);
- bcs->cs->hw.elsa.MFlag = 1;
- } else {
- printk(KERN_WARNING "ElsaSer: unknown pr %x\n", pr);
- }
-}
-
-static int
-setstack_elsa(struct PStack *st, struct BCState *bcs)
-{
-
- bcs->channel = st->l1.bc;
- switch (st->l1.mode) {
- case L1_MODE_HDLC:
- case L1_MODE_TRANS:
- if (open_hscxstate(st->l1.hardware, bcs))
- return (-1);
- st->l2.l2l1 = hscx_l2l1;
- break;
- case L1_MODE_MODEM:
- bcs->mode = L1_MODE_MODEM;
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- bcs->hw.hscx.rcvbuf = bcs->cs->hw.elsa.rcvbuf;
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->event = 0;
- bcs->hw.hscx.rcvidx = 0;
- bcs->tx_cnt = 0;
- bcs->cs->hw.elsa.bcs = bcs;
- st->l2.l2l1 = modem_l2l1;
- break;
- }
- st->l1.bcs = bcs;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-static void
-init_modem(struct IsdnCardState *cs) {
-
- cs->bcs[0].BC_SetStack = setstack_elsa;
- cs->bcs[1].BC_SetStack = setstack_elsa;
- cs->bcs[0].BC_Close = close_elsastate;
- cs->bcs[1].BC_Close = close_elsastate;
- if (!(cs->hw.elsa.rcvbuf = kmalloc(MAX_MODEM_BUF,
- GFP_ATOMIC))) {
- printk(KERN_WARNING
- "Elsa: No modem mem hw.elsa.rcvbuf\n");
- return;
- }
- if (!(cs->hw.elsa.transbuf = kmalloc(MAX_MODEM_BUF,
- GFP_ATOMIC))) {
- printk(KERN_WARNING
- "Elsa: No modem mem hw.elsa.transbuf\n");
- kfree(cs->hw.elsa.rcvbuf);
- cs->hw.elsa.rcvbuf = NULL;
- return;
- }
- if (mstartup(cs)) {
- printk(KERN_WARNING "Elsa: problem startup modem\n");
- }
- modem_set_init(cs);
-}
-
-static void
-release_modem(struct IsdnCardState *cs) {
-
- cs->hw.elsa.MFlag = 0;
- if (cs->hw.elsa.transbuf) {
- if (cs->hw.elsa.rcvbuf) {
- mshutdown(cs);
- kfree(cs->hw.elsa.rcvbuf);
- cs->hw.elsa.rcvbuf = NULL;
- }
- kfree(cs->hw.elsa.transbuf);
- cs->hw.elsa.transbuf = NULL;
- }
-}
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
deleted file mode 100644
index e8d431a8302d..000000000000
--- a/drivers/isdn/hisax/enternow_pci.c
+++ /dev/null
@@ -1,420 +0,0 @@
-/* enternow_pci.c,v 0.99 2001/10/02
- *
- * enternow_pci.c Card-specific routines for
- * Formula-n enter:now ISDN PCI ab
- * Gerdes AG Power ISDN PCI
- * Woerltronic SA 16 PCI
- * (based on HiSax driver by Karsten Keil)
- *
- * Author Christoph Ersfeld <info@formula-n.de>
- * Formula-n Europe AG (www.formula-n.com)
- * previously Gerdes AG
- *
- *
- * This file is (c) under GNU PUBLIC LICENSE
- *
- * Notes:
- * This driver interfaces to netjet.c which performs B-channel
- * processing.
- *
- * Version 0.99 is the first release of this driver and there are
- * certainly a few bugs.
- * It isn't testet on linux 2.4 yet, so consider this code to be
- * beta.
- *
- * Please don't report me any malfunction without sending
- * (compressed) debug-logs.
- * It would be nearly impossible to retrace it.
- *
- * Log D-channel-processing as follows:
- *
- * 1. Load hisax with card-specific parameters, this example ist for
- * Formula-n enter:now ISDN PCI and compatible
- * (f.e. Gerdes Power ISDN PCI)
- *
- * modprobe hisax type=41 protocol=2 id=gerdes
- *
- * if you chose an other value for id, you need to modify the
- * code below, too.
- *
- * 2. set debug-level
- *
- * hisaxctrl gerdes 1 0x3ff
- * hisaxctrl gerdes 11 0x4f
- * cat /dev/isdnctrl >> ~/log &
- *
- * Please take also a look into /var/log/messages if there is
- * anything importand concerning HISAX.
- *
- *
- * Credits:
- * Programming the driver for Formula-n enter:now ISDN PCI and
- * necessary the driver for the used Amd 7930 D-channel-controller
- * was spnsored by Formula-n Europe AG.
- * Thanks to Karsten Keil and Petr Novak, who gave me support in
- * Hisax-specific questions.
- * I want so say special thanks to Carl-Friedrich Braun, who had to
- * answer a lot of questions about generally ISDN and about handling
- * of the Amd-Chip.
- *
- */
-
-
-#include "hisax.h"
-#include "isac.h"
-#include "isdnl1.h"
-#include "amd7930_fn.h"
-#include <linux/interrupt.h>
-#include <linux/ppp_defs.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include "netjet.h"
-
-
-
-static const char *enternow_pci_rev = "$Revision: 1.1.4.5 $";
-
-
-/* for PowerISDN PCI */
-#define TJ_AMD_IRQ 0x20
-#define TJ_LED1 0x40
-#define TJ_LED2 0x80
-
-
-/* The window to [the] AMD [chip]...
- * From address hw.njet.base + TJ_AMD_PORT onwards, the AMD
- * maps [consecutive/multiple] 8 bits into the TigerJet I/O space
- * -> 0x01 of the AMD at hw.njet.base + 0C4 */
-#define TJ_AMD_PORT 0xC0
-
-
-
-/* *************************** I/O-Interface functions ************************************* */
-
-
-/* cs->readisac, macro rByteAMD */
-static unsigned char
-ReadByteAmd7930(struct IsdnCardState *cs, unsigned char offset)
-{
- /* direct register */
- if (offset < 8)
- return (inb(cs->hw.njet.isac + 4 * offset));
-
- /* indirect register */
- else {
- outb(offset, cs->hw.njet.isac + 4 * AMD_CR);
- return (inb(cs->hw.njet.isac + 4 * AMD_DR));
- }
-}
-
-/* cs->writeisac, macro wByteAMD */
-static void
-WriteByteAmd7930(struct IsdnCardState *cs, unsigned char offset, unsigned char value)
-{
- /* direct register */
- if (offset < 8)
- outb(value, cs->hw.njet.isac + 4 * offset);
-
- /* indirect register */
- else {
- outb(offset, cs->hw.njet.isac + 4 * AMD_CR);
- outb(value, cs->hw.njet.isac + 4 * AMD_DR);
- }
-}
-
-
-static void
-enpci_setIrqMask(struct IsdnCardState *cs, unsigned char val) {
- if (!val)
- outb(0x00, cs->hw.njet.base + NETJET_IRQMASK1);
- else
- outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1);
-}
-
-
-static unsigned char dummyrr(struct IsdnCardState *cs, int chan, unsigned char off)
-{
- return (5);
-}
-
-static void dummywr(struct IsdnCardState *cs, int chan, unsigned char off, unsigned char value)
-{
-
-}
-
-
-/* ******************************************************************************** */
-
-
-static void
-reset_enpci(struct IsdnCardState *cs)
-{
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "enter:now PCI: reset");
-
- /* Reset on, (also for AMD) */
- cs->hw.njet.ctrl_reg = 0x07;
- outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
- mdelay(20);
- /* Reset off */
- cs->hw.njet.ctrl_reg = 0x30;
- outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
- /* 20ms delay */
- mdelay(20);
- cs->hw.njet.auxd = 0; // LED-status
- cs->hw.njet.dmactrl = 0;
- outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL);
- outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1);
- outb(cs->hw.njet.auxd, cs->hw.njet.auxa); // LED off
-}
-
-
-static int
-enpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
- unsigned char *chan;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "enter:now PCI: card_msg: 0x%04X", mt);
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_enpci(cs);
- Amd7930_init(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case CARD_RELEASE:
- release_io_netjet(cs);
- break;
- case CARD_INIT:
- reset_enpci(cs);
- inittiger(cs);
- /* irq must be on here */
- Amd7930_init(cs);
- break;
- case CARD_TEST:
- break;
- case MDL_ASSIGN:
- /* TEI assigned, LED1 on */
- cs->hw.njet.auxd = TJ_AMD_IRQ << 1;
- outb(cs->hw.njet.auxd, cs->hw.njet.base + NETJET_AUXDATA);
- break;
- case MDL_REMOVE:
- /* TEI removed, LEDs off */
- cs->hw.njet.auxd = 0;
- outb(0x00, cs->hw.njet.base + NETJET_AUXDATA);
- break;
- case MDL_BC_ASSIGN:
- /* activate B-channel */
- chan = (unsigned char *)arg;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "enter:now PCI: assign phys. BC %d in AMD LMR1", *chan);
-
- cs->dc.amd7930.ph_command(cs, (cs->dc.amd7930.lmr1 | (*chan + 1)), "MDL_BC_ASSIGN");
- /* at least one b-channel in use, LED 2 on */
- cs->hw.njet.auxd |= TJ_AMD_IRQ << 2;
- outb(cs->hw.njet.auxd, cs->hw.njet.base + NETJET_AUXDATA);
- break;
- case MDL_BC_RELEASE:
- /* deactivate B-channel */
- chan = (unsigned char *)arg;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "enter:now PCI: release phys. BC %d in Amd LMR1", *chan);
-
- cs->dc.amd7930.ph_command(cs, (cs->dc.amd7930.lmr1 & ~(*chan + 1)), "MDL_BC_RELEASE");
- /* no b-channel active -> LED2 off */
- if (!(cs->dc.amd7930.lmr1 & 3)) {
- cs->hw.njet.auxd &= ~(TJ_AMD_IRQ << 2);
- outb(cs->hw.njet.auxd, cs->hw.njet.base + NETJET_AUXDATA);
- }
- break;
- default:
- break;
-
- }
- return (0);
-}
-
-static irqreturn_t
-enpci_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- unsigned char s0val, s1val, ir;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- s1val = inb(cs->hw.njet.base + NETJET_IRQSTAT1);
-
- /* AMD threw an interrupt */
- if (!(s1val & TJ_AMD_IRQ)) {
- /* read and clear interrupt-register */
- ir = ReadByteAmd7930(cs, 0x00);
- Amd7930_interrupt(cs, ir);
- s1val = 1;
- } else
- s1val = 0;
- s0val = inb(cs->hw.njet.base + NETJET_IRQSTAT0);
- if ((s0val | s1val) == 0) { // shared IRQ
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE;
- }
- if (s0val)
- outb(s0val, cs->hw.njet.base + NETJET_IRQSTAT0);
-
- /* DMA-Interrupt: B-channel-stuff */
- /* set bits in sval to indicate which page is free */
- if (inl(cs->hw.njet.base + NETJET_DMA_WRITE_ADR) <
- inl(cs->hw.njet.base + NETJET_DMA_WRITE_IRQ))
- /* the 2nd write page is free */
- s0val = 0x08;
- else /* the 1st write page is free */
- s0val = 0x04;
- if (inl(cs->hw.njet.base + NETJET_DMA_READ_ADR) <
- inl(cs->hw.njet.base + NETJET_DMA_READ_IRQ))
- /* the 2nd read page is free */
- s0val = s0val | 0x02;
- else /* the 1st read page is free */
- s0val = s0val | 0x01;
- if (s0val != cs->hw.njet.last_is0) /* we have a DMA interrupt */
- {
- if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
- }
- cs->hw.njet.irqstat0 = s0val;
- if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_READ) !=
- (cs->hw.njet.last_is0 & NETJET_IRQM0_READ))
- /* we have a read dma int */
- read_tiger(cs);
- if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_WRITE) !=
- (cs->hw.njet.last_is0 & NETJET_IRQM0_WRITE))
- /* we have a write dma int */
- write_tiger(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static int en_pci_probe(struct pci_dev *dev_netjet, struct IsdnCardState *cs)
-{
- if (pci_enable_device(dev_netjet))
- return (0);
- cs->irq = dev_netjet->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n");
- return (0);
- }
- cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
- if (!cs->hw.njet.base) {
- printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n");
- return (0);
- }
- /* checks Sub-Vendor ID because system crashes with Traverse-Card */
- if ((dev_netjet->subsystem_vendor != 0x55) ||
- (dev_netjet->subsystem_device != 0x02)) {
- printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n");
- printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n");
- return (0);
- }
-
- return (1);
-}
-
-static void en_cs_init(struct IsdnCard *card, struct IsdnCardState *cs)
-{
- cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
- cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
-
- /* Reset an */
- cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
- outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
- /* 20 ms Pause */
- mdelay(20);
-
- cs->hw.njet.ctrl_reg = 0x30; /* Reset Off and status read clear */
- outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
- mdelay(10);
-
- cs->hw.njet.auxd = 0x00; // war 0xc0
- cs->hw.njet.dmactrl = 0;
-
- outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL);
- outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1);
- outb(cs->hw.njet.auxd, cs->hw.njet.auxa);
-}
-
-static int en_cs_init_rest(struct IsdnCard *card, struct IsdnCardState *cs)
-{
- const int bytecnt = 256;
-
- printk(KERN_INFO
- "enter:now PCI: PCI card configured at 0x%lx IRQ %d\n",
- cs->hw.njet.base, cs->irq);
- if (!request_region(cs->hw.njet.base, bytecnt, "Fn_ISDN")) {
- printk(KERN_WARNING
- "HiSax: enter:now config port %lx-%lx already in use\n",
- cs->hw.njet.base,
- cs->hw.njet.base + bytecnt);
- return (0);
- }
-
- setup_Amd7930(cs);
- cs->hw.njet.last_is0 = 0;
- /* macro rByteAMD */
- cs->readisac = &ReadByteAmd7930;
- /* macro wByteAMD */
- cs->writeisac = &WriteByteAmd7930;
- cs->dc.amd7930.setIrqMask = &enpci_setIrqMask;
-
- cs->BC_Read_Reg = &dummyrr;
- cs->BC_Write_Reg = &dummywr;
- cs->BC_Send_Data = &netjet_fill_dma;
- cs->cardmsg = &enpci_card_msg;
- cs->irq_func = &enpci_interrupt;
- cs->irq_flags |= IRQF_SHARED;
-
- return (1);
-}
-
-static struct pci_dev *dev_netjet = NULL;
-
-/* called by config.c */
-int setup_enternow_pci(struct IsdnCard *card)
-{
- int ret;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
- strcpy(tmp, enternow_pci_rev);
- printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_ENTERNOW)
- return (0);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
-
- for (;;)
- {
- if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
- PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
- ret = en_pci_probe(dev_netjet, cs);
- if (!ret)
- return (0);
- } else {
- printk(KERN_WARNING "enter:now PCI: No PCI card found\n");
- return (0);
- }
-
- en_cs_init(card, cs);
- break;
- }
-
- return en_cs_init_rest(card, cs);
-}
diff --git a/drivers/isdn/hisax/fsm.c b/drivers/isdn/hisax/fsm.c
deleted file mode 100644
index 80ba82f77c63..000000000000
--- a/drivers/isdn/hisax/fsm.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/* $Id: fsm.c,v 1.14.6.4 2001/09/23 22:24:47 kai Exp $
- *
- * Finite state machine
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- * by Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to Jan den Ouden
- * Fritz Elfert
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include "hisax.h"
-
-#define FSM_TIMER_DEBUG 0
-
-int
-FsmNew(struct Fsm *fsm, struct FsmNode *fnlist, int fncount)
-{
- int i;
-
- fsm->jumpmatrix =
- kzalloc(array3_size(sizeof(FSMFNPTR), fsm->state_count,
- fsm->event_count),
- GFP_KERNEL);
- if (!fsm->jumpmatrix)
- return -ENOMEM;
-
- for (i = 0; i < fncount; i++)
- if ((fnlist[i].state >= fsm->state_count) || (fnlist[i].event >= fsm->event_count)) {
- printk(KERN_ERR "FsmNew Error line %d st(%ld/%ld) ev(%ld/%ld)\n",
- i, (long)fnlist[i].state, (long)fsm->state_count,
- (long)fnlist[i].event, (long)fsm->event_count);
- } else
- fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
- fnlist[i].state] = (FSMFNPTR)fnlist[i].routine;
- return 0;
-}
-
-void
-FsmFree(struct Fsm *fsm)
-{
- kfree((void *) fsm->jumpmatrix);
-}
-
-int
-FsmEvent(struct FsmInst *fi, int event, void *arg)
-{
- FSMFNPTR r;
-
- if ((fi->state >= fi->fsm->state_count) || (event >= fi->fsm->event_count)) {
- printk(KERN_ERR "FsmEvent Error st(%ld/%ld) ev(%d/%ld)\n",
- (long)fi->state, (long)fi->fsm->state_count, event, (long)fi->fsm->event_count);
- return (1);
- }
- r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state];
- if (r) {
- if (fi->debug)
- fi->printdebug(fi, "State %s Event %s",
- fi->fsm->strState[fi->state],
- fi->fsm->strEvent[event]);
- r(fi, event, arg);
- return (0);
- } else {
- if (fi->debug)
- fi->printdebug(fi, "State %s Event %s no routine",
- fi->fsm->strState[fi->state],
- fi->fsm->strEvent[event]);
- return (!0);
- }
-}
-
-void
-FsmChangeState(struct FsmInst *fi, int newstate)
-{
- fi->state = newstate;
- if (fi->debug)
- fi->printdebug(fi, "ChangeState %s",
- fi->fsm->strState[newstate]);
-}
-
-static void
-FsmExpireTimer(struct timer_list *t)
-{
- struct FsmTimer *ft = from_timer(ft, t, tl);
-#if FSM_TIMER_DEBUG
- if (ft->fi->debug)
- ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft);
-#endif
- FsmEvent(ft->fi, ft->event, ft->arg);
-}
-
-void
-FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
-{
- ft->fi = fi;
-#if FSM_TIMER_DEBUG
- if (ft->fi->debug)
- ft->fi->printdebug(ft->fi, "FsmInitTimer %lx", (long) ft);
-#endif
- timer_setup(&ft->tl, FsmExpireTimer, 0);
-}
-
-void
-FsmDelTimer(struct FsmTimer *ft, int where)
-{
-#if FSM_TIMER_DEBUG
- if (ft->fi->debug)
- ft->fi->printdebug(ft->fi, "FsmDelTimer %lx %d", (long) ft, where);
-#endif
- del_timer(&ft->tl);
-}
-
-int
-FsmAddTimer(struct FsmTimer *ft,
- int millisec, int event, void *arg, int where)
-{
-
-#if FSM_TIMER_DEBUG
- if (ft->fi->debug)
- ft->fi->printdebug(ft->fi, "FsmAddTimer %lx %d %d",
- (long) ft, millisec, where);
-#endif
-
- if (timer_pending(&ft->tl)) {
- printk(KERN_WARNING "FsmAddTimer: timer already active!\n");
- ft->fi->printdebug(ft->fi, "FsmAddTimer already active!");
- return -1;
- }
- ft->event = event;
- ft->arg = arg;
- ft->tl.expires = jiffies + (millisec * HZ) / 1000;
- add_timer(&ft->tl);
- return 0;
-}
-
-void
-FsmRestartTimer(struct FsmTimer *ft,
- int millisec, int event, void *arg, int where)
-{
-
-#if FSM_TIMER_DEBUG
- if (ft->fi->debug)
- ft->fi->printdebug(ft->fi, "FsmRestartTimer %lx %d %d",
- (long) ft, millisec, where);
-#endif
-
- if (timer_pending(&ft->tl))
- del_timer(&ft->tl);
- ft->event = event;
- ft->arg = arg;
- ft->tl.expires = jiffies + (millisec * HZ) / 1000;
- add_timer(&ft->tl);
-}
diff --git a/drivers/isdn/hisax/fsm.h b/drivers/isdn/hisax/fsm.h
deleted file mode 100644
index 8c7385619a46..000000000000
--- a/drivers/isdn/hisax/fsm.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* $Id: fsm.h,v 1.3.2.2 2001/09/23 22:24:47 kai Exp $
- *
- * Finite state machine
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- * by Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef __FSM_H__
-#define __FSM_H__
-
-#include <linux/timer.h>
-
-struct FsmInst;
-
-typedef void (*FSMFNPTR)(struct FsmInst *, int, void *);
-
-struct Fsm {
- FSMFNPTR *jumpmatrix;
- int state_count, event_count;
- char **strEvent, **strState;
-};
-
-struct FsmInst {
- struct Fsm *fsm;
- int state;
- int debug;
- void *userdata;
- int userint;
- void (*printdebug) (struct FsmInst *, char *, ...);
-};
-
-struct FsmNode {
- int state, event;
- void (*routine) (struct FsmInst *, int, void *);
-};
-
-struct FsmTimer {
- struct FsmInst *fi;
- struct timer_list tl;
- int event;
- void *arg;
-};
-
-int FsmNew(struct Fsm *fsm, struct FsmNode *fnlist, int fncount);
-void FsmFree(struct Fsm *fsm);
-int FsmEvent(struct FsmInst *fi, int event, void *arg);
-void FsmChangeState(struct FsmInst *fi, int newstate);
-void FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft);
-int FsmAddTimer(struct FsmTimer *ft, int millisec, int event,
- void *arg, int where);
-void FsmRestartTimer(struct FsmTimer *ft, int millisec, int event,
- void *arg, int where);
-void FsmDelTimer(struct FsmTimer *ft, int where);
-
-#endif
diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c
deleted file mode 100644
index a6d8af02354a..000000000000
--- a/drivers/isdn/hisax/gazel.c
+++ /dev/null
@@ -1,691 +0,0 @@
-/* $Id: gazel.c,v 2.19.2.4 2004/01/14 16:04:48 keil Exp $
- *
- * low level stuff for Gazel isdn cards
- *
- * Author BeWan Systems
- * based on source code from Karsten Keil
- * Copyright by BeWan Systems
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-#include "ipac.h"
-#include <linux/pci.h>
-
-static const char *gazel_revision = "$Revision: 2.19.2.4 $";
-
-#define R647 1
-#define R685 2
-#define R753 3
-#define R742 4
-
-#define PLX_CNTRL 0x50 /* registre de controle PLX */
-#define RESET_GAZEL 0x4
-#define RESET_9050 0x40000000
-#define PLX_INCSR 0x4C /* registre d'IT du 9050 */
-#define INT_ISAC_EN 0x8 /* 1 = enable IT isac */
-#define INT_ISAC 0x20 /* 1 = IT isac en cours */
-#define INT_HSCX_EN 0x1 /* 1 = enable IT hscx */
-#define INT_HSCX 0x4 /* 1 = IT hscx en cours */
-#define INT_PCI_EN 0x40 /* 1 = enable IT PCI */
-#define INT_IPAC_EN 0x3 /* enable IT ipac */
-
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-static inline u_char
-readreg(unsigned int adr, u_short off)
-{
- return bytein(adr + off);
-}
-
-static inline void
-writereg(unsigned int adr, u_short off, u_char data)
-{
- byteout(adr + off, data);
-}
-
-
-static inline void
-read_fifo(unsigned int adr, u_char *data, int size)
-{
- insb(adr, data, size);
-}
-
-static void
-write_fifo(unsigned int adr, u_char *data, int size)
-{
- outsb(adr, data, size);
-}
-
-static inline u_char
-readreg_ipac(unsigned int adr, u_short off)
-{
- register u_char ret;
-
- byteout(adr, off);
- ret = bytein(adr + 4);
- return ret;
-}
-
-static inline void
-writereg_ipac(unsigned int adr, u_short off, u_char data)
-{
- byteout(adr, off);
- byteout(adr + 4, data);
-}
-
-
-static inline void
-read_fifo_ipac(unsigned int adr, u_short off, u_char *data, int size)
-{
- byteout(adr, off);
- insb(adr + 4, data, size);
-}
-
-static void
-write_fifo_ipac(unsigned int adr, u_short off, u_char *data, int size)
-{
- byteout(adr, off);
- outsb(adr + 4, data, size);
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- u_short off2 = offset;
-
- switch (cs->subtyp) {
- case R647:
- off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
- /* fall through */
- case R685:
- return (readreg(cs->hw.gazel.isac, off2));
- case R753:
- case R742:
- return (readreg_ipac(cs->hw.gazel.ipac, 0x80 + off2));
- }
- return 0;
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- u_short off2 = offset;
-
- switch (cs->subtyp) {
- case R647:
- off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
- /* fall through */
- case R685:
- writereg(cs->hw.gazel.isac, off2, value);
- break;
- case R753:
- case R742:
- writereg_ipac(cs->hw.gazel.ipac, 0x80 + off2, value);
- break;
- }
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- switch (cs->subtyp) {
- case R647:
- case R685:
- read_fifo(cs->hw.gazel.isacfifo, data, size);
- break;
- case R753:
- case R742:
- read_fifo_ipac(cs->hw.gazel.ipac, 0x80, data, size);
- break;
- }
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- switch (cs->subtyp) {
- case R647:
- case R685:
- write_fifo(cs->hw.gazel.isacfifo, data, size);
- break;
- case R753:
- case R742:
- write_fifo_ipac(cs->hw.gazel.ipac, 0x80, data, size);
- break;
- }
-}
-
-static void
-ReadHSCXfifo(struct IsdnCardState *cs, int hscx, u_char *data, int size)
-{
- switch (cs->subtyp) {
- case R647:
- case R685:
- read_fifo(cs->hw.gazel.hscxfifo[hscx], data, size);
- break;
- case R753:
- case R742:
- read_fifo_ipac(cs->hw.gazel.ipac, hscx * 0x40, data, size);
- break;
- }
-}
-
-static void
-WriteHSCXfifo(struct IsdnCardState *cs, int hscx, u_char *data, int size)
-{
- switch (cs->subtyp) {
- case R647:
- case R685:
- write_fifo(cs->hw.gazel.hscxfifo[hscx], data, size);
- break;
- case R753:
- case R742:
- write_fifo_ipac(cs->hw.gazel.ipac, hscx * 0x40, data, size);
- break;
- }
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- u_short off2 = offset;
-
- switch (cs->subtyp) {
- case R647:
- off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
- /* fall through */
- case R685:
- return (readreg(cs->hw.gazel.hscx[hscx], off2));
- case R753:
- case R742:
- return (readreg_ipac(cs->hw.gazel.ipac, hscx * 0x40 + off2));
- }
- return 0;
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- u_short off2 = offset;
-
- switch (cs->subtyp) {
- case R647:
- off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
- /* fall through */
- case R685:
- writereg(cs->hw.gazel.hscx[hscx], off2, value);
- break;
- case R753:
- case R742:
- writereg_ipac(cs->hw.gazel.ipac, hscx * 0x40 + off2, value);
- break;
- }
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) ReadHSCX(cs, nr, reg)
-#define WRITEHSCX(cs, nr, reg, data) WriteHSCX(cs, nr, reg, data)
-#define READHSCXFIFO(cs, nr, ptr, cnt) ReadHSCXfifo(cs, nr, ptr, cnt)
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) WriteHSCXfifo(cs, nr, ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-gazel_interrupt(int intno, void *dev_id)
-{
-#define MAXCOUNT 5
- struct IsdnCardState *cs = dev_id;
- u_char valisac, valhscx;
- int count = 0;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- do {
- valhscx = ReadHSCX(cs, 1, HSCX_ISTA);
- if (valhscx)
- hscx_int_main(cs, valhscx);
- valisac = ReadISAC(cs, ISAC_ISTA);
- if (valisac)
- isac_interrupt(cs, valisac);
- count++;
- } while ((valhscx || valisac) && (count < MAXCOUNT));
-
- WriteHSCX(cs, 0, HSCX_MASK, 0xFF);
- WriteHSCX(cs, 1, HSCX_MASK, 0xFF);
- WriteISAC(cs, ISAC_MASK, 0xFF);
- WriteISAC(cs, ISAC_MASK, 0x0);
- WriteHSCX(cs, 0, HSCX_MASK, 0x0);
- WriteHSCX(cs, 1, HSCX_MASK, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-
-static irqreturn_t
-gazel_interrupt_ipac(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char ista, val;
- int count = 0;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- ista = ReadISAC(cs, IPAC_ISTA - 0x80);
- do {
- if (ista & 0x0f) {
- val = ReadHSCX(cs, 1, HSCX_ISTA);
- if (ista & 0x01)
- val |= 0x01;
- if (ista & 0x04)
- val |= 0x02;
- if (ista & 0x08)
- val |= 0x04;
- if (val) {
- hscx_int_main(cs, val);
- }
- }
- if (ista & 0x20) {
- val = 0xfe & ReadISAC(cs, ISAC_ISTA);
- if (val) {
- isac_interrupt(cs, val);
- }
- }
- if (ista & 0x10) {
- val = 0x01;
- isac_interrupt(cs, val);
- }
- ista = ReadISAC(cs, IPAC_ISTA - 0x80);
- count++;
- }
- while ((ista & 0x3f) && (count < MAXCOUNT));
-
- WriteISAC(cs, IPAC_MASK - 0x80, 0xFF);
- WriteISAC(cs, IPAC_MASK - 0x80, 0xC0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_gazel(struct IsdnCardState *cs)
-{
- unsigned int i;
-
- switch (cs->subtyp) {
- case R647:
- for (i = 0x0000; i < 0xC000; i += 0x1000)
- release_region(i + cs->hw.gazel.hscx[0], 16);
- release_region(0xC000 + cs->hw.gazel.hscx[0], 1);
- break;
-
- case R685:
- release_region(cs->hw.gazel.hscx[0], 0x100);
- release_region(cs->hw.gazel.cfg_reg, 0x80);
- break;
-
- case R753:
- release_region(cs->hw.gazel.ipac, 0x8);
- release_region(cs->hw.gazel.cfg_reg, 0x80);
- break;
-
- case R742:
- release_region(cs->hw.gazel.ipac, 8);
- break;
- }
-}
-
-static int
-reset_gazel(struct IsdnCardState *cs)
-{
- unsigned long plxcntrl, addr = cs->hw.gazel.cfg_reg;
-
- switch (cs->subtyp) {
- case R647:
- writereg(addr, 0, 0);
- HZDELAY(10);
- writereg(addr, 0, 1);
- HZDELAY(2);
- break;
- case R685:
- plxcntrl = inl(addr + PLX_CNTRL);
- plxcntrl |= (RESET_9050 + RESET_GAZEL);
- outl(plxcntrl, addr + PLX_CNTRL);
- plxcntrl &= ~(RESET_9050 + RESET_GAZEL);
- HZDELAY(4);
- outl(plxcntrl, addr + PLX_CNTRL);
- HZDELAY(10);
- outb(INT_ISAC_EN + INT_HSCX_EN + INT_PCI_EN, addr + PLX_INCSR);
- break;
- case R753:
- plxcntrl = inl(addr + PLX_CNTRL);
- plxcntrl |= (RESET_9050 + RESET_GAZEL);
- outl(plxcntrl, addr + PLX_CNTRL);
- plxcntrl &= ~(RESET_9050 + RESET_GAZEL);
- WriteISAC(cs, IPAC_POTA2 - 0x80, 0x20);
- HZDELAY(4);
- outl(plxcntrl, addr + PLX_CNTRL);
- HZDELAY(10);
- WriteISAC(cs, IPAC_POTA2 - 0x80, 0x00);
- WriteISAC(cs, IPAC_ACFG - 0x80, 0xff);
- WriteISAC(cs, IPAC_AOE - 0x80, 0x0);
- WriteISAC(cs, IPAC_MASK - 0x80, 0xff);
- WriteISAC(cs, IPAC_CONF - 0x80, 0x1);
- outb(INT_IPAC_EN + INT_PCI_EN, addr + PLX_INCSR);
- WriteISAC(cs, IPAC_MASK - 0x80, 0xc0);
- break;
- case R742:
- WriteISAC(cs, IPAC_POTA2 - 0x80, 0x20);
- HZDELAY(4);
- WriteISAC(cs, IPAC_POTA2 - 0x80, 0x00);
- WriteISAC(cs, IPAC_ACFG - 0x80, 0xff);
- WriteISAC(cs, IPAC_AOE - 0x80, 0x0);
- WriteISAC(cs, IPAC_MASK - 0x80, 0xff);
- WriteISAC(cs, IPAC_CONF - 0x80, 0x1);
- WriteISAC(cs, IPAC_MASK - 0x80, 0xc0);
- break;
- }
- return (0);
-}
-
-static int
-Gazel_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_gazel(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_gazel(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- inithscxisac(cs, 1);
- if ((cs->subtyp == R647) || (cs->subtyp == R685)) {
- int i;
- for (i = 0; i < (2 + MAX_WAITING_CALLS); i++) {
- cs->bcs[i].hw.hscx.tsaxr0 = 0x1f;
- cs->bcs[i].hw.hscx.tsaxr1 = 0x23;
- }
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-static int
-reserve_regions(struct IsdnCard *card, struct IsdnCardState *cs)
-{
- unsigned int i, j, base = 0, adr = 0, len = 0;
-
- switch (cs->subtyp) {
- case R647:
- base = cs->hw.gazel.hscx[0];
- if (!request_region(adr = (0xC000 + base), len = 1, "gazel"))
- goto error;
- for (i = 0x0000; i < 0xC000; i += 0x1000) {
- if (!request_region(adr = (i + base), len = 16, "gazel"))
- goto error;
- }
- if (i != 0xC000) {
- for (j = 0; j < i; j += 0x1000)
- release_region(j + base, 16);
- release_region(0xC000 + base, 1);
- goto error;
- }
- break;
-
- case R685:
- if (!request_region(adr = cs->hw.gazel.hscx[0], len = 0x100, "gazel"))
- goto error;
- if (!request_region(adr = cs->hw.gazel.cfg_reg, len = 0x80, "gazel")) {
- release_region(cs->hw.gazel.hscx[0], 0x100);
- goto error;
- }
- break;
-
- case R753:
- if (!request_region(adr = cs->hw.gazel.ipac, len = 0x8, "gazel"))
- goto error;
- if (!request_region(adr = cs->hw.gazel.cfg_reg, len = 0x80, "gazel")) {
- release_region(cs->hw.gazel.ipac, 8);
- goto error;
- }
- break;
-
- case R742:
- if (!request_region(adr = cs->hw.gazel.ipac, len = 0x8, "gazel"))
- goto error;
- break;
- }
-
- return 0;
-
-error:
- printk(KERN_WARNING "Gazel: io ports 0x%x-0x%x already in use\n",
- adr, adr + len);
- return 1;
-}
-
-static int setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs)
-{
- printk(KERN_INFO "Gazel: ISA PnP card automatic recognition\n");
- // we got an irq parameter, assume it is an ISA card
- // R742 decodes address even in not started...
- // R647 returns FF if not present or not started
- // eventually needs improvment
- if (readreg_ipac(card->para[1], IPAC_ID) == 1)
- cs->subtyp = R742;
- else
- cs->subtyp = R647;
-
- setup_isac(cs);
- cs->hw.gazel.cfg_reg = card->para[1] + 0xC000;
- cs->hw.gazel.ipac = card->para[1];
- cs->hw.gazel.isac = card->para[1] + 0x8000;
- cs->hw.gazel.hscx[0] = card->para[1];
- cs->hw.gazel.hscx[1] = card->para[1] + 0x4000;
- cs->irq = card->para[0];
- cs->hw.gazel.isacfifo = cs->hw.gazel.isac;
- cs->hw.gazel.hscxfifo[0] = cs->hw.gazel.hscx[0];
- cs->hw.gazel.hscxfifo[1] = cs->hw.gazel.hscx[1];
-
- switch (cs->subtyp) {
- case R647:
- printk(KERN_INFO "Gazel: Card ISA R647/R648 found\n");
- cs->dc.isac.adf2 = 0x87;
- printk(KERN_INFO
- "Gazel: config irq:%d isac:0x%X cfg:0x%X\n",
- cs->irq, cs->hw.gazel.isac, cs->hw.gazel.cfg_reg);
- printk(KERN_INFO
- "Gazel: hscx A:0x%X hscx B:0x%X\n",
- cs->hw.gazel.hscx[0], cs->hw.gazel.hscx[1]);
-
- break;
- case R742:
- printk(KERN_INFO "Gazel: Card ISA R742 found\n");
- test_and_set_bit(HW_IPAC, &cs->HW_Flags);
- printk(KERN_INFO
- "Gazel: config irq:%d ipac:0x%X\n",
- cs->irq, cs->hw.gazel.ipac);
- break;
- }
-
- return (0);
-}
-
-#ifdef CONFIG_PCI
-static struct pci_dev *dev_tel = NULL;
-
-static int setup_gazelpci(struct IsdnCardState *cs)
-{
- u_int pci_ioaddr0 = 0, pci_ioaddr1 = 0;
- u_char pci_irq = 0, found;
- u_int nbseek, seekcard;
-
- printk(KERN_WARNING "Gazel: PCI card automatic recognition\n");
-
- found = 0;
- seekcard = PCI_DEVICE_ID_PLX_R685;
- for (nbseek = 0; nbseek < 4; nbseek++) {
- if ((dev_tel = hisax_find_pci_device(PCI_VENDOR_ID_PLX,
- seekcard, dev_tel))) {
- if (pci_enable_device(dev_tel))
- return 1;
- pci_irq = dev_tel->irq;
- pci_ioaddr0 = pci_resource_start(dev_tel, 1);
- pci_ioaddr1 = pci_resource_start(dev_tel, 2);
- found = 1;
- }
- if (found)
- break;
- else {
- switch (seekcard) {
- case PCI_DEVICE_ID_PLX_R685:
- seekcard = PCI_DEVICE_ID_PLX_R753;
- break;
- case PCI_DEVICE_ID_PLX_R753:
- seekcard = PCI_DEVICE_ID_PLX_DJINN_ITOO;
- break;
- case PCI_DEVICE_ID_PLX_DJINN_ITOO:
- seekcard = PCI_DEVICE_ID_PLX_OLITEC;
- break;
- }
- }
- }
- if (!found) {
- printk(KERN_WARNING "Gazel: No PCI card found\n");
- return (1);
- }
- if (!pci_irq) {
- printk(KERN_WARNING "Gazel: No IRQ for PCI card found\n");
- return 1;
- }
- cs->hw.gazel.pciaddr[0] = pci_ioaddr0;
- cs->hw.gazel.pciaddr[1] = pci_ioaddr1;
- setup_isac(cs);
- pci_ioaddr1 &= 0xfffe;
- cs->hw.gazel.cfg_reg = pci_ioaddr0 & 0xfffe;
- cs->hw.gazel.ipac = pci_ioaddr1;
- cs->hw.gazel.isac = pci_ioaddr1 + 0x80;
- cs->hw.gazel.hscx[0] = pci_ioaddr1;
- cs->hw.gazel.hscx[1] = pci_ioaddr1 + 0x40;
- cs->hw.gazel.isacfifo = cs->hw.gazel.isac;
- cs->hw.gazel.hscxfifo[0] = cs->hw.gazel.hscx[0];
- cs->hw.gazel.hscxfifo[1] = cs->hw.gazel.hscx[1];
- cs->irq = pci_irq;
- cs->irq_flags |= IRQF_SHARED;
-
- switch (seekcard) {
- case PCI_DEVICE_ID_PLX_R685:
- printk(KERN_INFO "Gazel: Card PCI R685 found\n");
- cs->subtyp = R685;
- cs->dc.isac.adf2 = 0x87;
- printk(KERN_INFO
- "Gazel: config irq:%d isac:0x%X cfg:0x%X\n",
- cs->irq, cs->hw.gazel.isac, cs->hw.gazel.cfg_reg);
- printk(KERN_INFO
- "Gazel: hscx A:0x%X hscx B:0x%X\n",
- cs->hw.gazel.hscx[0], cs->hw.gazel.hscx[1]);
- break;
- case PCI_DEVICE_ID_PLX_R753:
- case PCI_DEVICE_ID_PLX_DJINN_ITOO:
- case PCI_DEVICE_ID_PLX_OLITEC:
- printk(KERN_INFO "Gazel: Card PCI R753 found\n");
- cs->subtyp = R753;
- test_and_set_bit(HW_IPAC, &cs->HW_Flags);
- printk(KERN_INFO
- "Gazel: config irq:%d ipac:0x%X cfg:0x%X\n",
- cs->irq, cs->hw.gazel.ipac, cs->hw.gazel.cfg_reg);
- break;
- }
-
- return (0);
-}
-#endif /* CONFIG_PCI */
-
-int setup_gazel(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
- u_char val;
-
- strcpy(tmp, gazel_revision);
- printk(KERN_INFO "Gazel: Driver Revision %s\n", HiSax_getrev(tmp));
-
- if (cs->typ != ISDN_CTYPE_GAZEL)
- return (0);
-
- if (card->para[0]) {
- if (setup_gazelisa(card, cs))
- return (0);
- } else {
-
-#ifdef CONFIG_PCI
- if (setup_gazelpci(cs))
- return (0);
-#else
- printk(KERN_WARNING "Gazel: Card PCI requested and NO_PCI_BIOS, unable to config\n");
- return (0);
-#endif /* CONFIG_PCI */
- }
-
- if (reserve_regions(card, cs)) {
- return (0);
- }
- if (reset_gazel(cs)) {
- printk(KERN_WARNING "Gazel: wrong IRQ\n");
- release_io_gazel(cs);
- return (0);
- }
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &Gazel_card_msg;
-
- switch (cs->subtyp) {
- case R647:
- case R685:
- cs->irq_func = &gazel_interrupt;
- ISACVersion(cs, "Gazel:");
- if (HscxVersion(cs, "Gazel:")) {
- printk(KERN_WARNING
- "Gazel: wrong HSCX versions check IO address\n");
- release_io_gazel(cs);
- return (0);
- }
- break;
- case R742:
- case R753:
- cs->irq_func = &gazel_interrupt_ipac;
- val = ReadISAC(cs, IPAC_ID - 0x80);
- printk(KERN_INFO "Gazel: IPAC version %x\n", val);
- break;
- }
-
- return (1);
-}
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
deleted file mode 100644
index e9bb8fb67ad0..000000000000
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ /dev/null
@@ -1,1584 +0,0 @@
-/*************************************************************************/
-/* $Id: hfc4s8s_l1.c,v 1.10 2005/02/09 16:31:09 martinb1 Exp $ */
-/* HFC-4S/8S low layer interface for Cologne Chip HFC-4S/8S isdn chips */
-/* The low layer (L1) is implemented as a loadable module for usage with */
-/* the HiSax isdn driver for passive cards. */
-/* */
-/* Author: Werner Cornelius */
-/* (C) 2003 Cornelius Consult (werner@cornelius-consult.de) */
-/* */
-/* Driver maintained by Cologne Chip */
-/* - Martin Bachem, support@colognechip.com */
-/* */
-/* This driver only works with chip revisions >= 1, older revision 0 */
-/* engineering samples (only first manufacturer sample cards) will not */
-/* work and are rejected by the driver. */
-/* */
-/* This file distributed under the GNU GPL. */
-/* */
-/* See Version History at the end of this file */
-/* */
-/*************************************************************************/
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/skbuff.h>
-#include <linux/wait.h>
-#include <asm/io.h>
-#include "hisax_if.h"
-#include "hfc4s8s_l1.h"
-
-static const char hfc4s8s_rev[] = "Revision: 1.10";
-
-/***************************************************************/
-/* adjustable transparent mode fifo threshold */
-/* The value defines the used fifo threshold with the equation */
-/* */
-/* notify number of bytes = 2 * 2 ^ TRANS_FIFO_THRES */
-/* */
-/* The default value is 5 which results in a buffer size of 64 */
-/* and an interrupt rate of 8ms. */
-/* The maximum value is 7 due to fifo size restrictions. */
-/* Values below 3-4 are not recommended due to high interrupt */
-/* load of the processor. For non critical applications the */
-/* value should be raised to 7 to reduce any interrupt overhead*/
-/***************************************************************/
-#define TRANS_FIFO_THRES 5
-
-/*************/
-/* constants */
-/*************/
-#define CLOCKMODE_0 0 /* ext. 24.576 MhZ clk freq, int. single clock mode */
-#define CLOCKMODE_1 1 /* ext. 49.576 MhZ clk freq, int. single clock mode */
-#define CHIP_ID_SHIFT 4
-#define HFC_MAX_ST 8
-#define MAX_D_FRAME_SIZE 270
-#define MAX_B_FRAME_SIZE 1536
-#define TRANS_TIMER_MODE (TRANS_FIFO_THRES & 0xf)
-#define TRANS_FIFO_BYTES (2 << TRANS_FIFO_THRES)
-#define MAX_F_CNT 0x0f
-
-#define CLKDEL_NT 0x6c
-#define CLKDEL_TE 0xf
-#define CTRL0_NT 4
-#define CTRL0_TE 0
-
-#define L1_TIMER_T4 2 /* minimum in jiffies */
-#define L1_TIMER_T3 (7 * HZ) /* activation timeout */
-#define L1_TIMER_T1 ((120 * HZ) / 1000) /* NT mode deactivation timeout */
-
-
-/******************/
-/* types and vars */
-/******************/
-static int card_cnt;
-
-/* private driver_data */
-typedef struct {
- int chip_id;
- int clock_mode;
- int max_st_ports;
- char *device_name;
-} hfc4s8s_param;
-
-static const struct pci_device_id hfc4s8s_ids[] = {
- {.vendor = PCI_VENDOR_ID_CCD,
- .device = PCI_DEVICE_ID_4S,
- .subvendor = 0x1397,
- .subdevice = 0x08b4,
- .driver_data =
- (unsigned long) &((hfc4s8s_param) {CHIP_ID_4S, CLOCKMODE_0, 4,
- "HFC-4S Evaluation Board"}),
- },
- {.vendor = PCI_VENDOR_ID_CCD,
- .device = PCI_DEVICE_ID_8S,
- .subvendor = 0x1397,
- .subdevice = 0x16b8,
- .driver_data =
- (unsigned long) &((hfc4s8s_param) {CHIP_ID_8S, CLOCKMODE_0, 8,
- "HFC-8S Evaluation Board"}),
- },
- {.vendor = PCI_VENDOR_ID_CCD,
- .device = PCI_DEVICE_ID_4S,
- .subvendor = 0x1397,
- .subdevice = 0xb520,
- .driver_data =
- (unsigned long) &((hfc4s8s_param) {CHIP_ID_4S, CLOCKMODE_1, 4,
- "IOB4ST"}),
- },
- {.vendor = PCI_VENDOR_ID_CCD,
- .device = PCI_DEVICE_ID_8S,
- .subvendor = 0x1397,
- .subdevice = 0xb522,
- .driver_data =
- (unsigned long) &((hfc4s8s_param) {CHIP_ID_8S, CLOCKMODE_1, 8,
- "IOB8ST"}),
- },
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, hfc4s8s_ids);
-
-MODULE_AUTHOR("Werner Cornelius, werner@cornelius-consult.de");
-MODULE_DESCRIPTION("ISDN layer 1 for Cologne Chip HFC-4S/8S chips");
-MODULE_LICENSE("GPL");
-
-/***********/
-/* layer 1 */
-/***********/
-struct hfc4s8s_btype {
- spinlock_t lock;
- struct hisax_b_if b_if;
- struct hfc4s8s_l1 *l1p;
- struct sk_buff_head tx_queue;
- struct sk_buff *tx_skb;
- struct sk_buff *rx_skb;
- __u8 *rx_ptr;
- int tx_cnt;
- int bchan;
- int mode;
-};
-
-struct _hfc4s8s_hw;
-
-struct hfc4s8s_l1 {
- spinlock_t lock;
- struct _hfc4s8s_hw *hw; /* pointer to hardware area */
- int l1_state; /* actual l1 state */
- struct timer_list l1_timer; /* layer 1 timer structure */
- int nt_mode; /* set to nt mode */
- int st_num; /* own index */
- int enabled; /* interface is enabled */
- struct sk_buff_head d_tx_queue; /* send queue */
- int tx_cnt; /* bytes to send */
- struct hisax_d_if d_if; /* D-channel interface */
- struct hfc4s8s_btype b_ch[2]; /* B-channel data */
- struct hisax_b_if *b_table[2];
-};
-
-/**********************/
-/* hardware structure */
-/**********************/
-typedef struct _hfc4s8s_hw {
- spinlock_t lock;
-
- int cardnum;
- int ifnum;
- int iobase;
- int nt_mode;
- u_char *membase;
- u_char *hw_membase;
- void *pdev;
- int max_fifo;
- hfc4s8s_param driver_data;
- int irq;
- int fifo_sched_cnt;
- struct work_struct tqueue;
- struct hfc4s8s_l1 l1[HFC_MAX_ST];
- char card_name[60];
- struct {
- u_char r_irq_ctrl;
- u_char r_ctrl0;
- volatile u_char r_irq_statech; /* active isdn l1 status */
- u_char r_irqmsk_statchg; /* enabled isdn status ints */
- u_char r_irq_fifo_blx[8]; /* fifo status registers */
- u_char fifo_rx_trans_enables[8]; /* mask for enabled transparent rx fifos */
- u_char fifo_slow_timer_service[8]; /* mask for fifos needing slower timer service */
- volatile u_char r_irq_oview; /* contents of overview register */
- volatile u_char timer_irq;
- int timer_usg_cnt; /* number of channels using timer */
- } mr;
-} hfc4s8s_hw;
-
-
-
-/* inline functions io mapped */
-static inline void
-SetRegAddr(hfc4s8s_hw *a, u_char b)
-{
- outb(b, (a->iobase) + 4);
-}
-
-static inline u_char
-GetRegAddr(hfc4s8s_hw *a)
-{
- return (inb((volatile u_int) (a->iobase + 4)));
-}
-
-
-static inline void
-Write_hfc8(hfc4s8s_hw *a, u_char b, u_char c)
-{
- SetRegAddr(a, b);
- outb(c, a->iobase);
-}
-
-static inline void
-fWrite_hfc8(hfc4s8s_hw *a, u_char c)
-{
- outb(c, a->iobase);
-}
-
-static inline void
-fWrite_hfc32(hfc4s8s_hw *a, u_long c)
-{
- outl(c, a->iobase);
-}
-
-static inline u_char
-Read_hfc8(hfc4s8s_hw *a, u_char b)
-{
- SetRegAddr(a, b);
- return (inb((volatile u_int) a->iobase));
-}
-
-static inline u_char
-fRead_hfc8(hfc4s8s_hw *a)
-{
- return (inb((volatile u_int) a->iobase));
-}
-
-
-static inline u_short
-Read_hfc16(hfc4s8s_hw *a, u_char b)
-{
- SetRegAddr(a, b);
- return (inw((volatile u_int) a->iobase));
-}
-
-static inline u_long
-fRead_hfc32(hfc4s8s_hw *a)
-{
- return (inl((volatile u_int) a->iobase));
-}
-
-static inline void
-wait_busy(hfc4s8s_hw *a)
-{
- SetRegAddr(a, R_STATUS);
- while (inb((volatile u_int) a->iobase) & M_BUSY);
-}
-
-#define PCI_ENA_REGIO 0x01
-
-/******************************************************/
-/* function to read critical counter registers that */
-/* may be updated by the chip during read */
-/******************************************************/
-static u_char
-Read_hfc8_stable(hfc4s8s_hw *hw, int reg)
-{
- u_char ref8;
- u_char in8;
- ref8 = Read_hfc8(hw, reg);
- while (((in8 = Read_hfc8(hw, reg)) != ref8)) {
- ref8 = in8;
- }
- return in8;
-}
-
-static int
-Read_hfc16_stable(hfc4s8s_hw *hw, int reg)
-{
- int ref16;
- int in16;
-
- ref16 = Read_hfc16(hw, reg);
- while (((in16 = Read_hfc16(hw, reg)) != ref16)) {
- ref16 = in16;
- }
- return in16;
-}
-
-/*****************************/
-/* D-channel call from HiSax */
-/*****************************/
-static void
-dch_l2l1(struct hisax_d_if *iface, int pr, void *arg)
-{
- struct hfc4s8s_l1 *l1 = iface->ifc.priv;
- struct sk_buff *skb = (struct sk_buff *) arg;
- u_long flags;
-
- switch (pr) {
-
- case (PH_DATA | REQUEST):
- if (!l1->enabled) {
- dev_kfree_skb(skb);
- break;
- }
- spin_lock_irqsave(&l1->lock, flags);
- skb_queue_tail(&l1->d_tx_queue, skb);
- if ((skb_queue_len(&l1->d_tx_queue) == 1) &&
- (l1->tx_cnt <= 0)) {
- l1->hw->mr.r_irq_fifo_blx[l1->st_num] |=
- 0x10;
- spin_unlock_irqrestore(&l1->lock, flags);
- schedule_work(&l1->hw->tqueue);
- } else
- spin_unlock_irqrestore(&l1->lock, flags);
- break;
-
- case (PH_ACTIVATE | REQUEST):
- if (!l1->enabled)
- break;
- if (!l1->nt_mode) {
- if (l1->l1_state < 6) {
- spin_lock_irqsave(&l1->lock,
- flags);
-
- Write_hfc8(l1->hw, R_ST_SEL,
- l1->st_num);
- Write_hfc8(l1->hw, A_ST_WR_STA,
- 0x60);
- mod_timer(&l1->l1_timer,
- jiffies + L1_TIMER_T3);
- spin_unlock_irqrestore(&l1->lock,
- flags);
- } else if (l1->l1_state == 7)
- l1->d_if.ifc.l1l2(&l1->d_if.ifc,
- PH_ACTIVATE |
- INDICATION,
- NULL);
- } else {
- if (l1->l1_state != 3) {
- spin_lock_irqsave(&l1->lock,
- flags);
- Write_hfc8(l1->hw, R_ST_SEL,
- l1->st_num);
- Write_hfc8(l1->hw, A_ST_WR_STA,
- 0x60);
- spin_unlock_irqrestore(&l1->lock,
- flags);
- } else if (l1->l1_state == 3)
- l1->d_if.ifc.l1l2(&l1->d_if.ifc,
- PH_ACTIVATE |
- INDICATION,
- NULL);
- }
- break;
-
- default:
- printk(KERN_INFO
- "HFC-4S/8S: Unknown D-chan cmd 0x%x received, ignored\n",
- pr);
- break;
- }
- if (!l1->enabled)
- l1->d_if.ifc.l1l2(&l1->d_if.ifc,
- PH_DEACTIVATE | INDICATION, NULL);
-} /* dch_l2l1 */
-
-/*****************************/
-/* B-channel call from HiSax */
-/*****************************/
-static void
-bch_l2l1(struct hisax_if *ifc, int pr, void *arg)
-{
- struct hfc4s8s_btype *bch = ifc->priv;
- struct hfc4s8s_l1 *l1 = bch->l1p;
- struct sk_buff *skb = (struct sk_buff *) arg;
- long mode = (long) arg;
- u_long flags;
-
- switch (pr) {
-
- case (PH_DATA | REQUEST):
- if (!l1->enabled || (bch->mode == L1_MODE_NULL)) {
- dev_kfree_skb(skb);
- break;
- }
- spin_lock_irqsave(&l1->lock, flags);
- skb_queue_tail(&bch->tx_queue, skb);
- if (!bch->tx_skb && (bch->tx_cnt <= 0)) {
- l1->hw->mr.r_irq_fifo_blx[l1->st_num] |=
- ((bch->bchan == 1) ? 1 : 4);
- spin_unlock_irqrestore(&l1->lock, flags);
- schedule_work(&l1->hw->tqueue);
- } else
- spin_unlock_irqrestore(&l1->lock, flags);
- break;
-
- case (PH_ACTIVATE | REQUEST):
- case (PH_DEACTIVATE | REQUEST):
- if (!l1->enabled)
- break;
- if (pr == (PH_DEACTIVATE | REQUEST))
- mode = L1_MODE_NULL;
-
- switch (mode) {
- case L1_MODE_HDLC:
- spin_lock_irqsave(&l1->lock,
- flags);
- l1->hw->mr.timer_usg_cnt++;
- l1->hw->mr.
- fifo_slow_timer_service[l1->
- st_num]
- |=
- ((bch->bchan ==
- 1) ? 0x2 : 0x8);
- Write_hfc8(l1->hw, R_FIFO,
- (l1->st_num * 8 +
- ((bch->bchan ==
- 1) ? 0 : 2)));
- wait_busy(l1->hw);
- Write_hfc8(l1->hw, A_CON_HDLC, 0xc); /* HDLC mode, flag fill, connect ST */
- Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */
- Write_hfc8(l1->hw, A_IRQ_MSK, 1); /* enable TX interrupts for hdlc */
- Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
- wait_busy(l1->hw);
-
- Write_hfc8(l1->hw, R_FIFO,
- (l1->st_num * 8 +
- ((bch->bchan ==
- 1) ? 1 : 3)));
- wait_busy(l1->hw);
- Write_hfc8(l1->hw, A_CON_HDLC, 0xc); /* HDLC mode, flag fill, connect ST */
- Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */
- Write_hfc8(l1->hw, A_IRQ_MSK, 1); /* enable RX interrupts for hdlc */
- Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
-
- Write_hfc8(l1->hw, R_ST_SEL,
- l1->st_num);
- l1->hw->mr.r_ctrl0 |=
- (bch->bchan & 3);
- Write_hfc8(l1->hw, A_ST_CTRL0,
- l1->hw->mr.r_ctrl0);
- bch->mode = L1_MODE_HDLC;
- spin_unlock_irqrestore(&l1->lock,
- flags);
-
- bch->b_if.ifc.l1l2(&bch->b_if.ifc,
- PH_ACTIVATE |
- INDICATION,
- NULL);
- break;
-
- case L1_MODE_TRANS:
- spin_lock_irqsave(&l1->lock,
- flags);
- l1->hw->mr.
- fifo_rx_trans_enables[l1->
- st_num]
- |=
- ((bch->bchan ==
- 1) ? 0x2 : 0x8);
- l1->hw->mr.timer_usg_cnt++;
- Write_hfc8(l1->hw, R_FIFO,
- (l1->st_num * 8 +
- ((bch->bchan ==
- 1) ? 0 : 2)));
- wait_busy(l1->hw);
- Write_hfc8(l1->hw, A_CON_HDLC, 0xf); /* Transparent mode, 1 fill, connect ST */
- Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */
- Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable TX interrupts */
- Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
- wait_busy(l1->hw);
-
- Write_hfc8(l1->hw, R_FIFO,
- (l1->st_num * 8 +
- ((bch->bchan ==
- 1) ? 1 : 3)));
- wait_busy(l1->hw);
- Write_hfc8(l1->hw, A_CON_HDLC, 0xf); /* Transparent mode, 1 fill, connect ST */
- Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */
- Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable RX interrupts */
- Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
-
- Write_hfc8(l1->hw, R_ST_SEL,
- l1->st_num);
- l1->hw->mr.r_ctrl0 |=
- (bch->bchan & 3);
- Write_hfc8(l1->hw, A_ST_CTRL0,
- l1->hw->mr.r_ctrl0);
- bch->mode = L1_MODE_TRANS;
- spin_unlock_irqrestore(&l1->lock,
- flags);
-
- bch->b_if.ifc.l1l2(&bch->b_if.ifc,
- PH_ACTIVATE |
- INDICATION,
- NULL);
- break;
-
- default:
- if (bch->mode == L1_MODE_NULL)
- break;
- spin_lock_irqsave(&l1->lock,
- flags);
- l1->hw->mr.
- fifo_slow_timer_service[l1->
- st_num]
- &=
- ~((bch->bchan ==
- 1) ? 0x3 : 0xc);
- l1->hw->mr.
- fifo_rx_trans_enables[l1->
- st_num]
- &=
- ~((bch->bchan ==
- 1) ? 0x3 : 0xc);
- l1->hw->mr.timer_usg_cnt--;
- Write_hfc8(l1->hw, R_FIFO,
- (l1->st_num * 8 +
- ((bch->bchan ==
- 1) ? 0 : 2)));
- wait_busy(l1->hw);
- Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable TX interrupts */
- wait_busy(l1->hw);
- Write_hfc8(l1->hw, R_FIFO,
- (l1->st_num * 8 +
- ((bch->bchan ==
- 1) ? 1 : 3)));
- wait_busy(l1->hw);
- Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable RX interrupts */
- Write_hfc8(l1->hw, R_ST_SEL,
- l1->st_num);
- l1->hw->mr.r_ctrl0 &=
- ~(bch->bchan & 3);
- Write_hfc8(l1->hw, A_ST_CTRL0,
- l1->hw->mr.r_ctrl0);
- spin_unlock_irqrestore(&l1->lock,
- flags);
-
- bch->mode = L1_MODE_NULL;
- bch->b_if.ifc.l1l2(&bch->b_if.ifc,
- PH_DEACTIVATE |
- INDICATION,
- NULL);
- if (bch->tx_skb) {
- dev_kfree_skb(bch->tx_skb);
- bch->tx_skb = NULL;
- }
- if (bch->rx_skb) {
- dev_kfree_skb(bch->rx_skb);
- bch->rx_skb = NULL;
- }
- skb_queue_purge(&bch->tx_queue);
- bch->tx_cnt = 0;
- bch->rx_ptr = NULL;
- break;
- }
-
- /* timer is only used when at least one b channel */
- /* is set up to transparent mode */
- if (l1->hw->mr.timer_usg_cnt) {
- Write_hfc8(l1->hw, R_IRQMSK_MISC,
- M_TI_IRQMSK);
- } else {
- Write_hfc8(l1->hw, R_IRQMSK_MISC, 0);
- }
-
- break;
-
- default:
- printk(KERN_INFO
- "HFC-4S/8S: Unknown B-chan cmd 0x%x received, ignored\n",
- pr);
- break;
- }
- if (!l1->enabled)
- bch->b_if.ifc.l1l2(&bch->b_if.ifc,
- PH_DEACTIVATE | INDICATION, NULL);
-} /* bch_l2l1 */
-
-/**************************/
-/* layer 1 timer function */
-/**************************/
-static void
-hfc_l1_timer(struct timer_list *t)
-{
- struct hfc4s8s_l1 *l1 = from_timer(l1, t, l1_timer);
- u_long flags;
-
- if (!l1->enabled)
- return;
-
- spin_lock_irqsave(&l1->lock, flags);
- if (l1->nt_mode) {
- l1->l1_state = 1;
- Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
- Write_hfc8(l1->hw, A_ST_WR_STA, 0x11);
- spin_unlock_irqrestore(&l1->lock, flags);
- l1->d_if.ifc.l1l2(&l1->d_if.ifc,
- PH_DEACTIVATE | INDICATION, NULL);
- spin_lock_irqsave(&l1->lock, flags);
- l1->l1_state = 1;
- Write_hfc8(l1->hw, A_ST_WR_STA, 0x1);
- spin_unlock_irqrestore(&l1->lock, flags);
- } else {
- /* activation timed out */
- Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
- Write_hfc8(l1->hw, A_ST_WR_STA, 0x13);
- spin_unlock_irqrestore(&l1->lock, flags);
- l1->d_if.ifc.l1l2(&l1->d_if.ifc,
- PH_DEACTIVATE | INDICATION, NULL);
- spin_lock_irqsave(&l1->lock, flags);
- Write_hfc8(l1->hw, R_ST_SEL, l1->st_num);
- Write_hfc8(l1->hw, A_ST_WR_STA, 0x3);
- spin_unlock_irqrestore(&l1->lock, flags);
- }
-} /* hfc_l1_timer */
-
-/****************************************/
-/* a complete D-frame has been received */
-/****************************************/
-static void
-rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
-{
- int z1, z2;
- u_char f1, f2, df;
- struct sk_buff *skb;
- u_char *cp;
-
-
- if (!l1p->enabled)
- return;
- do {
- /* E/D RX fifo */
- Write_hfc8(l1p->hw, R_FIFO,
- (l1p->st_num * 8 + ((ech) ? 7 : 5)));
- wait_busy(l1p->hw);
-
- f1 = Read_hfc8_stable(l1p->hw, A_F1);
- f2 = Read_hfc8(l1p->hw, A_F2);
-
- if (f1 < f2)
- df = MAX_F_CNT + 1 + f1 - f2;
- else
- df = f1 - f2;
-
- if (!df)
- return; /* no complete frame in fifo */
-
- z1 = Read_hfc16_stable(l1p->hw, A_Z1);
- z2 = Read_hfc16(l1p->hw, A_Z2);
-
- z1 = z1 - z2 + 1;
- if (z1 < 0)
- z1 += 384;
-
- if (!(skb = dev_alloc_skb(MAX_D_FRAME_SIZE))) {
- printk(KERN_INFO
- "HFC-4S/8S: Could not allocate D/E "
- "channel receive buffer");
- Write_hfc8(l1p->hw, A_INC_RES_FIFO, 2);
- wait_busy(l1p->hw);
- return;
- }
-
- if (((z1 < 4) || (z1 > MAX_D_FRAME_SIZE))) {
- if (skb)
- dev_kfree_skb(skb);
- /* remove errornous D frame */
- if (df == 1) {
- /* reset fifo */
- Write_hfc8(l1p->hw, A_INC_RES_FIFO, 2);
- wait_busy(l1p->hw);
- return;
- } else {
- /* read errornous D frame */
- SetRegAddr(l1p->hw, A_FIFO_DATA0);
-
- while (z1 >= 4) {
- fRead_hfc32(l1p->hw);
- z1 -= 4;
- }
-
- while (z1--)
- fRead_hfc8(l1p->hw);
-
- Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);
- wait_busy(l1p->hw);
- return;
- }
- }
-
- cp = skb->data;
-
- SetRegAddr(l1p->hw, A_FIFO_DATA0);
-
- while (z1 >= 4) {
- *((unsigned long *) cp) = fRead_hfc32(l1p->hw);
- cp += 4;
- z1 -= 4;
- }
-
- while (z1--)
- *cp++ = fRead_hfc8(l1p->hw);
-
- Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
- wait_busy(l1p->hw);
-
- if (*(--cp)) {
- dev_kfree_skb(skb);
- } else {
- skb->len = (cp - skb->data) - 2;
- if (ech)
- l1p->d_if.ifc.l1l2(&l1p->d_if.ifc,
- PH_DATA_E | INDICATION,
- skb);
- else
- l1p->d_if.ifc.l1l2(&l1p->d_if.ifc,
- PH_DATA | INDICATION,
- skb);
- }
- } while (1);
-} /* rx_d_frame */
-
-/*************************************************************/
-/* a B-frame has been received (perhaps not fully completed) */
-/*************************************************************/
-static void
-rx_b_frame(struct hfc4s8s_btype *bch)
-{
- int z1, z2, hdlc_complete;
- u_char f1, f2;
- struct hfc4s8s_l1 *l1 = bch->l1p;
- struct sk_buff *skb;
-
- if (!l1->enabled || (bch->mode == L1_MODE_NULL))
- return;
-
- do {
- /* RX Fifo */
- Write_hfc8(l1->hw, R_FIFO,
- (l1->st_num * 8 + ((bch->bchan == 1) ? 1 : 3)));
- wait_busy(l1->hw);
-
- if (bch->mode == L1_MODE_HDLC) {
- f1 = Read_hfc8_stable(l1->hw, A_F1);
- f2 = Read_hfc8(l1->hw, A_F2);
- hdlc_complete = ((f1 ^ f2) & MAX_F_CNT);
- } else
- hdlc_complete = 0;
- z1 = Read_hfc16_stable(l1->hw, A_Z1);
- z2 = Read_hfc16(l1->hw, A_Z2);
- z1 = (z1 - z2);
- if (hdlc_complete)
- z1++;
- if (z1 < 0)
- z1 += 384;
-
- if (!z1)
- break;
-
- if (!(skb = bch->rx_skb)) {
- if (!
- (skb =
- dev_alloc_skb((bch->mode ==
- L1_MODE_TRANS) ? z1
- : (MAX_B_FRAME_SIZE + 3)))) {
- printk(KERN_ERR
- "HFC-4S/8S: Could not allocate B "
- "channel receive buffer");
- return;
- }
- bch->rx_ptr = skb->data;
- bch->rx_skb = skb;
- }
-
- skb->len = (bch->rx_ptr - skb->data) + z1;
-
- /* HDLC length check */
- if ((bch->mode == L1_MODE_HDLC) &&
- ((hdlc_complete && (skb->len < 4)) ||
- (skb->len > (MAX_B_FRAME_SIZE + 3)))) {
-
- skb->len = 0;
- bch->rx_ptr = skb->data;
- Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */
- wait_busy(l1->hw);
- return;
- }
- SetRegAddr(l1->hw, A_FIFO_DATA0);
-
- while (z1 >= 4) {
- *((unsigned long *) bch->rx_ptr) =
- fRead_hfc32(l1->hw);
- bch->rx_ptr += 4;
- z1 -= 4;
- }
-
- while (z1--)
- *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
-
- if (hdlc_complete) {
- /* increment f counter */
- Write_hfc8(l1->hw, A_INC_RES_FIFO, 1);
- wait_busy(l1->hw);
-
- /* hdlc crc check */
- bch->rx_ptr--;
- if (*bch->rx_ptr) {
- skb->len = 0;
- bch->rx_ptr = skb->data;
- continue;
- }
- skb->len -= 3;
- }
- if (hdlc_complete || (bch->mode == L1_MODE_TRANS)) {
- bch->rx_skb = NULL;
- bch->rx_ptr = NULL;
- bch->b_if.ifc.l1l2(&bch->b_if.ifc,
- PH_DATA | INDICATION, skb);
- }
-
- } while (1);
-} /* rx_b_frame */
-
-/********************************************/
-/* a D-frame has been/should be transmitted */
-/********************************************/
-static void
-tx_d_frame(struct hfc4s8s_l1 *l1p)
-{
- struct sk_buff *skb;
- u_char f1, f2;
- u_char *cp;
- long cnt;
-
- if (l1p->l1_state != 7)
- return;
-
- /* TX fifo */
- Write_hfc8(l1p->hw, R_FIFO, (l1p->st_num * 8 + 4));
- wait_busy(l1p->hw);
-
- f1 = Read_hfc8(l1p->hw, A_F1);
- f2 = Read_hfc8_stable(l1p->hw, A_F2);
-
- if ((f1 ^ f2) & MAX_F_CNT)
- return; /* fifo is still filled */
-
- if (l1p->tx_cnt > 0) {
- cnt = l1p->tx_cnt;
- l1p->tx_cnt = 0;
- l1p->d_if.ifc.l1l2(&l1p->d_if.ifc, PH_DATA | CONFIRM,
- (void *) cnt);
- }
-
- if ((skb = skb_dequeue(&l1p->d_tx_queue))) {
- cp = skb->data;
- cnt = skb->len;
- SetRegAddr(l1p->hw, A_FIFO_DATA0);
-
- while (cnt >= 4) {
- SetRegAddr(l1p->hw, A_FIFO_DATA0);
- fWrite_hfc32(l1p->hw, *(unsigned long *) cp);
- cp += 4;
- cnt -= 4;
- }
-
- while (cnt--)
- fWrite_hfc8(l1p->hw, *cp++);
-
- l1p->tx_cnt = skb->truesize;
- Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
- wait_busy(l1p->hw);
-
- dev_kfree_skb(skb);
- }
-} /* tx_d_frame */
-
-/******************************************************/
-/* a B-frame may be transmitted (or is not completed) */
-/******************************************************/
-static void
-tx_b_frame(struct hfc4s8s_btype *bch)
-{
- struct sk_buff *skb;
- struct hfc4s8s_l1 *l1 = bch->l1p;
- u_char *cp;
- int cnt, max, hdlc_num;
- long ack_len = 0;
-
- if (!l1->enabled || (bch->mode == L1_MODE_NULL))
- return;
-
- /* TX fifo */
- Write_hfc8(l1->hw, R_FIFO,
- (l1->st_num * 8 + ((bch->bchan == 1) ? 0 : 2)));
- wait_busy(l1->hw);
- do {
-
- if (bch->mode == L1_MODE_HDLC) {
- hdlc_num = Read_hfc8(l1->hw, A_F1) & MAX_F_CNT;
- hdlc_num -=
- (Read_hfc8_stable(l1->hw, A_F2) & MAX_F_CNT);
- if (hdlc_num < 0)
- hdlc_num += 16;
- if (hdlc_num >= 15)
- break; /* fifo still filled up with hdlc frames */
- } else
- hdlc_num = 0;
-
- if (!(skb = bch->tx_skb)) {
- if (!(skb = skb_dequeue(&bch->tx_queue))) {
- l1->hw->mr.fifo_slow_timer_service[l1->
- st_num]
- &= ~((bch->bchan == 1) ? 1 : 4);
- break; /* list empty */
- }
- bch->tx_skb = skb;
- bch->tx_cnt = 0;
- }
-
- if (!hdlc_num)
- l1->hw->mr.fifo_slow_timer_service[l1->st_num] |=
- ((bch->bchan == 1) ? 1 : 4);
- else
- l1->hw->mr.fifo_slow_timer_service[l1->st_num] &=
- ~((bch->bchan == 1) ? 1 : 4);
-
- max = Read_hfc16_stable(l1->hw, A_Z2);
- max -= Read_hfc16(l1->hw, A_Z1);
- if (max <= 0)
- max += 384;
- max--;
-
- if (max < 16)
- break; /* don't write to small amounts of bytes */
-
- cnt = skb->len - bch->tx_cnt;
- if (cnt > max)
- cnt = max;
- cp = skb->data + bch->tx_cnt;
- bch->tx_cnt += cnt;
-
- SetRegAddr(l1->hw, A_FIFO_DATA0);
- while (cnt >= 4) {
- fWrite_hfc32(l1->hw, *(unsigned long *) cp);
- cp += 4;
- cnt -= 4;
- }
-
- while (cnt--)
- fWrite_hfc8(l1->hw, *cp++);
-
- if (bch->tx_cnt >= skb->len) {
- if (bch->mode == L1_MODE_HDLC) {
- /* increment f counter */
- Write_hfc8(l1->hw, A_INC_RES_FIFO, 1);
- }
- ack_len += skb->truesize;
- bch->tx_skb = NULL;
- bch->tx_cnt = 0;
- dev_kfree_skb(skb);
- } else
- /* Re-Select */
- Write_hfc8(l1->hw, R_FIFO,
- (l1->st_num * 8 +
- ((bch->bchan == 1) ? 0 : 2)));
- wait_busy(l1->hw);
- } while (1);
-
- if (ack_len)
- bch->b_if.ifc.l1l2((struct hisax_if *) &bch->b_if,
- PH_DATA | CONFIRM, (void *) ack_len);
-} /* tx_b_frame */
-
-/*************************************/
-/* bottom half handler for interrupt */
-/*************************************/
-static void
-hfc4s8s_bh(struct work_struct *work)
-{
- hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
- u_char b;
- struct hfc4s8s_l1 *l1p;
- volatile u_char *fifo_stat;
- int idx;
-
- /* handle layer 1 state changes */
- b = 1;
- l1p = hw->l1;
- while (b) {
- if ((b & hw->mr.r_irq_statech)) {
- /* reset l1 event */
- hw->mr.r_irq_statech &= ~b;
- if (l1p->enabled) {
- if (l1p->nt_mode) {
- u_char oldstate = l1p->l1_state;
-
- Write_hfc8(l1p->hw, R_ST_SEL,
- l1p->st_num);
- l1p->l1_state =
- Read_hfc8(l1p->hw,
- A_ST_RD_STA) & 0xf;
-
- if ((oldstate == 3)
- && (l1p->l1_state != 3))
- l1p->d_if.ifc.l1l2(&l1p->
- d_if.
- ifc,
- PH_DEACTIVATE
- |
- INDICATION,
- NULL);
-
- if (l1p->l1_state != 2) {
- del_timer(&l1p->l1_timer);
- if (l1p->l1_state == 3) {
- l1p->d_if.ifc.
- l1l2(&l1p->
- d_if.ifc,
- PH_ACTIVATE
- |
- INDICATION,
- NULL);
- }
- } else {
- /* allow transition */
- Write_hfc8(hw, A_ST_WR_STA,
- M_SET_G2_G3);
- mod_timer(&l1p->l1_timer,
- jiffies +
- L1_TIMER_T1);
- }
- printk(KERN_INFO
- "HFC-4S/8S: NT ch %d l1 state %d -> %d\n",
- l1p->st_num, oldstate,
- l1p->l1_state);
- } else {
- u_char oldstate = l1p->l1_state;
-
- Write_hfc8(l1p->hw, R_ST_SEL,
- l1p->st_num);
- l1p->l1_state =
- Read_hfc8(l1p->hw,
- A_ST_RD_STA) & 0xf;
-
- if (((l1p->l1_state == 3) &&
- ((oldstate == 7) ||
- (oldstate == 8))) ||
- ((timer_pending
- (&l1p->l1_timer))
- && (l1p->l1_state == 8))) {
- mod_timer(&l1p->l1_timer,
- L1_TIMER_T4 +
- jiffies);
- } else {
- if (l1p->l1_state == 7) {
- del_timer(&l1p->
- l1_timer);
- l1p->d_if.ifc.
- l1l2(&l1p->
- d_if.ifc,
- PH_ACTIVATE
- |
- INDICATION,
- NULL);
- tx_d_frame(l1p);
- }
- if (l1p->l1_state == 3) {
- if (oldstate != 3)
- l1p->d_if.
- ifc.
- l1l2
- (&l1p->
- d_if.
- ifc,
- PH_DEACTIVATE
- |
- INDICATION,
- NULL);
- }
- }
- printk(KERN_INFO
- "HFC-4S/8S: TE %d ch %d l1 state %d -> %d\n",
- l1p->hw->cardnum,
- l1p->st_num, oldstate,
- l1p->l1_state);
- }
- }
- }
- b <<= 1;
- l1p++;
- }
-
- /* now handle the fifos */
- idx = 0;
- fifo_stat = hw->mr.r_irq_fifo_blx;
- l1p = hw->l1;
- while (idx < hw->driver_data.max_st_ports) {
-
- if (hw->mr.timer_irq) {
- *fifo_stat |= hw->mr.fifo_rx_trans_enables[idx];
- if (hw->fifo_sched_cnt <= 0) {
- *fifo_stat |=
- hw->mr.fifo_slow_timer_service[l1p->
- st_num];
- }
- }
- /* ignore fifo 6 (TX E fifo) */
- *fifo_stat &= 0xff - 0x40;
-
- while (*fifo_stat) {
-
- if (!l1p->nt_mode) {
- /* RX Fifo has data to read */
- if ((*fifo_stat & 0x20)) {
- *fifo_stat &= ~0x20;
- rx_d_frame(l1p, 0);
- }
- /* E Fifo has data to read */
- if ((*fifo_stat & 0x80)) {
- *fifo_stat &= ~0x80;
- rx_d_frame(l1p, 1);
- }
- /* TX Fifo completed send */
- if ((*fifo_stat & 0x10)) {
- *fifo_stat &= ~0x10;
- tx_d_frame(l1p);
- }
- }
- /* B1 RX Fifo has data to read */
- if ((*fifo_stat & 0x2)) {
- *fifo_stat &= ~0x2;
- rx_b_frame(l1p->b_ch);
- }
- /* B1 TX Fifo has send completed */
- if ((*fifo_stat & 0x1)) {
- *fifo_stat &= ~0x1;
- tx_b_frame(l1p->b_ch);
- }
- /* B2 RX Fifo has data to read */
- if ((*fifo_stat & 0x8)) {
- *fifo_stat &= ~0x8;
- rx_b_frame(l1p->b_ch + 1);
- }
- /* B2 TX Fifo has send completed */
- if ((*fifo_stat & 0x4)) {
- *fifo_stat &= ~0x4;
- tx_b_frame(l1p->b_ch + 1);
- }
- }
- fifo_stat++;
- l1p++;
- idx++;
- }
-
- if (hw->fifo_sched_cnt <= 0)
- hw->fifo_sched_cnt += (1 << (7 - TRANS_TIMER_MODE));
- hw->mr.timer_irq = 0; /* clear requested timer irq */
-} /* hfc4s8s_bh */
-
-/*********************/
-/* interrupt handler */
-/*********************/
-static irqreturn_t
-hfc4s8s_interrupt(int intno, void *dev_id)
-{
- hfc4s8s_hw *hw = dev_id;
- u_char b, ovr;
- volatile u_char *ovp;
- int idx;
- u_char old_ioreg;
-
- if (!hw || !(hw->mr.r_irq_ctrl & M_GLOB_IRQ_EN))
- return IRQ_NONE;
-
- /* read current selected regsister */
- old_ioreg = GetRegAddr(hw);
-
- /* Layer 1 State change */
- hw->mr.r_irq_statech |=
- (Read_hfc8(hw, R_SCI) & hw->mr.r_irqmsk_statchg);
- if (!
- (b = (Read_hfc8(hw, R_STATUS) & (M_MISC_IRQSTA | M_FR_IRQSTA)))
- && !hw->mr.r_irq_statech) {
- SetRegAddr(hw, old_ioreg);
- return IRQ_NONE;
- }
-
- /* timer event */
- if (Read_hfc8(hw, R_IRQ_MISC) & M_TI_IRQ) {
- hw->mr.timer_irq = 1;
- hw->fifo_sched_cnt--;
- }
-
- /* FIFO event */
- if ((ovr = Read_hfc8(hw, R_IRQ_OVIEW))) {
- hw->mr.r_irq_oview |= ovr;
- idx = R_IRQ_FIFO_BL0;
- ovp = hw->mr.r_irq_fifo_blx;
- while (ovr) {
- if ((ovr & 1)) {
- *ovp |= Read_hfc8(hw, idx);
- }
- ovp++;
- idx++;
- ovr >>= 1;
- }
- }
-
- /* queue the request to allow other cards to interrupt */
- schedule_work(&hw->tqueue);
-
- SetRegAddr(hw, old_ioreg);
- return IRQ_HANDLED;
-} /* hfc4s8s_interrupt */
-
-/***********************************************************************/
-/* reset the complete chip, don't release the chips irq but disable it */
-/***********************************************************************/
-static void
-chipreset(hfc4s8s_hw *hw)
-{
- u_long flags;
-
- spin_lock_irqsave(&hw->lock, flags);
- Write_hfc8(hw, R_CTRL, 0); /* use internal RAM */
- Write_hfc8(hw, R_RAM_MISC, 0); /* 32k*8 RAM */
- Write_hfc8(hw, R_FIFO_MD, 0); /* fifo mode 386 byte/fifo simple mode */
- Write_hfc8(hw, R_CIRM, M_SRES); /* reset chip */
- hw->mr.r_irq_ctrl = 0; /* interrupt is inactive */
- spin_unlock_irqrestore(&hw->lock, flags);
-
- udelay(3);
- Write_hfc8(hw, R_CIRM, 0); /* disable reset */
- wait_busy(hw);
-
- Write_hfc8(hw, R_PCM_MD0, M_PCM_MD); /* master mode */
- Write_hfc8(hw, R_RAM_MISC, M_FZ_MD); /* transmit fifo option */
- if (hw->driver_data.clock_mode == 1)
- Write_hfc8(hw, R_BRG_PCM_CFG, M_PCM_CLK); /* PCM clk / 2 */
- Write_hfc8(hw, R_TI_WD, TRANS_TIMER_MODE); /* timer interval */
-
- memset(&hw->mr, 0, sizeof(hw->mr));
-} /* chipreset */
-
-/********************************************/
-/* disable/enable hardware in nt or te mode */
-/********************************************/
-static void
-hfc_hardware_enable(hfc4s8s_hw *hw, int enable, int nt_mode)
-{
- u_long flags;
- char if_name[40];
- int i;
-
- if (enable) {
- /* save system vars */
- hw->nt_mode = nt_mode;
-
- /* enable fifo and state irqs, but not global irq enable */
- hw->mr.r_irq_ctrl = M_FIFO_IRQ;
- Write_hfc8(hw, R_IRQ_CTRL, hw->mr.r_irq_ctrl);
- hw->mr.r_irqmsk_statchg = 0;
- Write_hfc8(hw, R_SCI_MSK, hw->mr.r_irqmsk_statchg);
- Write_hfc8(hw, R_PWM_MD, 0x80);
- Write_hfc8(hw, R_PWM1, 26);
- if (!nt_mode)
- Write_hfc8(hw, R_ST_SYNC, M_AUTO_SYNC);
-
- /* enable the line interfaces and fifos */
- for (i = 0; i < hw->driver_data.max_st_ports; i++) {
- hw->mr.r_irqmsk_statchg |= (1 << i);
- Write_hfc8(hw, R_SCI_MSK, hw->mr.r_irqmsk_statchg);
- Write_hfc8(hw, R_ST_SEL, i);
- Write_hfc8(hw, A_ST_CLK_DLY,
- ((nt_mode) ? CLKDEL_NT : CLKDEL_TE));
- hw->mr.r_ctrl0 = ((nt_mode) ? CTRL0_NT : CTRL0_TE);
- Write_hfc8(hw, A_ST_CTRL0, hw->mr.r_ctrl0);
- Write_hfc8(hw, A_ST_CTRL2, 3);
- Write_hfc8(hw, A_ST_WR_STA, 0); /* enable state machine */
-
- hw->l1[i].enabled = 1;
- hw->l1[i].nt_mode = nt_mode;
-
- if (!nt_mode) {
- /* setup E-fifo */
- Write_hfc8(hw, R_FIFO, i * 8 + 7); /* E fifo */
- wait_busy(hw);
- Write_hfc8(hw, A_CON_HDLC, 0x11); /* HDLC mode, 1 fill, connect ST */
- Write_hfc8(hw, A_SUBCH_CFG, 2); /* only 2 bits */
- Write_hfc8(hw, A_IRQ_MSK, 1); /* enable interrupt */
- Write_hfc8(hw, A_INC_RES_FIFO, 2); /* reset fifo */
- wait_busy(hw);
-
- /* setup D RX-fifo */
- Write_hfc8(hw, R_FIFO, i * 8 + 5); /* RX fifo */
- wait_busy(hw);
- Write_hfc8(hw, A_CON_HDLC, 0x11); /* HDLC mode, 1 fill, connect ST */
- Write_hfc8(hw, A_SUBCH_CFG, 2); /* only 2 bits */
- Write_hfc8(hw, A_IRQ_MSK, 1); /* enable interrupt */
- Write_hfc8(hw, A_INC_RES_FIFO, 2); /* reset fifo */
- wait_busy(hw);
-
- /* setup D TX-fifo */
- Write_hfc8(hw, R_FIFO, i * 8 + 4); /* TX fifo */
- wait_busy(hw);
- Write_hfc8(hw, A_CON_HDLC, 0x11); /* HDLC mode, 1 fill, connect ST */
- Write_hfc8(hw, A_SUBCH_CFG, 2); /* only 2 bits */
- Write_hfc8(hw, A_IRQ_MSK, 1); /* enable interrupt */
- Write_hfc8(hw, A_INC_RES_FIFO, 2); /* reset fifo */
- wait_busy(hw);
- }
-
- sprintf(if_name, "hfc4s8s_%d%d_", hw->cardnum, i);
-
- if (hisax_register
- (&hw->l1[i].d_if, hw->l1[i].b_table, if_name,
- ((nt_mode) ? 3 : 2))) {
-
- hw->l1[i].enabled = 0;
- hw->mr.r_irqmsk_statchg &= ~(1 << i);
- Write_hfc8(hw, R_SCI_MSK,
- hw->mr.r_irqmsk_statchg);
- printk(KERN_INFO
- "HFC-4S/8S: Unable to register S/T device %s, break\n",
- if_name);
- break;
- }
- }
- spin_lock_irqsave(&hw->lock, flags);
- hw->mr.r_irq_ctrl |= M_GLOB_IRQ_EN;
- Write_hfc8(hw, R_IRQ_CTRL, hw->mr.r_irq_ctrl);
- spin_unlock_irqrestore(&hw->lock, flags);
- } else {
- /* disable hardware */
- spin_lock_irqsave(&hw->lock, flags);
- hw->mr.r_irq_ctrl &= ~M_GLOB_IRQ_EN;
- Write_hfc8(hw, R_IRQ_CTRL, hw->mr.r_irq_ctrl);
- spin_unlock_irqrestore(&hw->lock, flags);
-
- for (i = hw->driver_data.max_st_ports - 1; i >= 0; i--) {
- hw->l1[i].enabled = 0;
- hisax_unregister(&hw->l1[i].d_if);
- del_timer(&hw->l1[i].l1_timer);
- skb_queue_purge(&hw->l1[i].d_tx_queue);
- skb_queue_purge(&hw->l1[i].b_ch[0].tx_queue);
- skb_queue_purge(&hw->l1[i].b_ch[1].tx_queue);
- }
- chipreset(hw);
- }
-} /* hfc_hardware_enable */
-
-/******************************************/
-/* disable memory mapped ports / io ports */
-/******************************************/
-static void
-release_pci_ports(hfc4s8s_hw *hw)
-{
- pci_write_config_word(hw->pdev, PCI_COMMAND, 0);
- if (hw->iobase)
- release_region(hw->iobase, 8);
-}
-
-/*****************************************/
-/* enable memory mapped ports / io ports */
-/*****************************************/
-static void
-enable_pci_ports(hfc4s8s_hw *hw)
-{
- pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_REGIO);
-}
-
-/*************************************/
-/* initialise the HFC-4s/8s hardware */
-/* return 0 on success. */
-/*************************************/
-static int
-setup_instance(hfc4s8s_hw *hw)
-{
- int err = -EIO;
- int i;
-
- for (i = 0; i < HFC_MAX_ST; i++) {
- struct hfc4s8s_l1 *l1p;
-
- l1p = hw->l1 + i;
- spin_lock_init(&l1p->lock);
- l1p->hw = hw;
- timer_setup(&l1p->l1_timer, hfc_l1_timer, 0);
- l1p->st_num = i;
- skb_queue_head_init(&l1p->d_tx_queue);
- l1p->d_if.ifc.priv = hw->l1 + i;
- l1p->d_if.ifc.l2l1 = (void *) dch_l2l1;
-
- spin_lock_init(&l1p->b_ch[0].lock);
- l1p->b_ch[0].b_if.ifc.l2l1 = (void *) bch_l2l1;
- l1p->b_ch[0].b_if.ifc.priv = (void *) &l1p->b_ch[0];
- l1p->b_ch[0].l1p = hw->l1 + i;
- l1p->b_ch[0].bchan = 1;
- l1p->b_table[0] = &l1p->b_ch[0].b_if;
- skb_queue_head_init(&l1p->b_ch[0].tx_queue);
-
- spin_lock_init(&l1p->b_ch[1].lock);
- l1p->b_ch[1].b_if.ifc.l2l1 = (void *) bch_l2l1;
- l1p->b_ch[1].b_if.ifc.priv = (void *) &l1p->b_ch[1];
- l1p->b_ch[1].l1p = hw->l1 + i;
- l1p->b_ch[1].bchan = 2;
- l1p->b_table[1] = &l1p->b_ch[1].b_if;
- skb_queue_head_init(&l1p->b_ch[1].tx_queue);
- }
-
- enable_pci_ports(hw);
- chipreset(hw);
-
- i = Read_hfc8(hw, R_CHIP_ID) >> CHIP_ID_SHIFT;
- if (i != hw->driver_data.chip_id) {
- printk(KERN_INFO
- "HFC-4S/8S: invalid chip id 0x%x instead of 0x%x, card ignored\n",
- i, hw->driver_data.chip_id);
- goto out;
- }
-
- i = Read_hfc8(hw, R_CHIP_RV) & 0xf;
- if (!i) {
- printk(KERN_INFO
- "HFC-4S/8S: chip revision 0 not supported, card ignored\n");
- goto out;
- }
-
- INIT_WORK(&hw->tqueue, hfc4s8s_bh);
-
- if (request_irq
- (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) {
- printk(KERN_INFO
- "HFC-4S/8S: unable to alloc irq %d, card ignored\n",
- hw->irq);
- goto out;
- }
- printk(KERN_INFO
- "HFC-4S/8S: found PCI card at iobase 0x%x, irq %d\n",
- hw->iobase, hw->irq);
-
- hfc_hardware_enable(hw, 1, 0);
-
- return (0);
-
-out:
- hw->irq = 0;
- release_pci_ports(hw);
- kfree(hw);
- return (err);
-}
-
-/*****************************************/
-/* PCI hotplug interface: probe new card */
-/*****************************************/
-static int
-hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = -ENOMEM;
- hfc4s8s_param *driver_data = (hfc4s8s_param *) ent->driver_data;
- hfc4s8s_hw *hw;
-
- if (!(hw = kzalloc(sizeof(hfc4s8s_hw), GFP_ATOMIC))) {
- printk(KERN_ERR "No kmem for HFC-4S/8S card\n");
- return (err);
- }
-
- hw->pdev = pdev;
- err = pci_enable_device(pdev);
-
- if (err)
- goto out;
-
- hw->cardnum = card_cnt;
- sprintf(hw->card_name, "hfc4s8s_%d", hw->cardnum);
- printk(KERN_INFO "HFC-4S/8S: found adapter %s (%s) at %s\n",
- driver_data->device_name, hw->card_name, pci_name(pdev));
-
- spin_lock_init(&hw->lock);
-
- hw->driver_data = *driver_data;
- hw->irq = pdev->irq;
- hw->iobase = pci_resource_start(pdev, 0);
-
- if (!request_region(hw->iobase, 8, hw->card_name)) {
- printk(KERN_INFO
- "HFC-4S/8S: failed to request address space at 0x%04x\n",
- hw->iobase);
- err = -EBUSY;
- goto out;
- }
-
- pci_set_drvdata(pdev, hw);
- err = setup_instance(hw);
- if (!err)
- card_cnt++;
- return (err);
-
-out:
- kfree(hw);
- return (err);
-}
-
-/**************************************/
-/* PCI hotplug interface: remove card */
-/**************************************/
-static void
-hfc4s8s_remove(struct pci_dev *pdev)
-{
- hfc4s8s_hw *hw = pci_get_drvdata(pdev);
-
- printk(KERN_INFO "HFC-4S/8S: removing card %d\n", hw->cardnum);
- hfc_hardware_enable(hw, 0, 0);
-
- if (hw->irq)
- free_irq(hw->irq, hw);
- hw->irq = 0;
- release_pci_ports(hw);
-
- card_cnt--;
- pci_disable_device(pdev);
- kfree(hw);
- return;
-}
-
-static struct pci_driver hfc4s8s_driver = {
- .name = "hfc4s8s_l1",
- .probe = hfc4s8s_probe,
- .remove = hfc4s8s_remove,
- .id_table = hfc4s8s_ids,
-};
-
-/**********************/
-/* driver Module init */
-/**********************/
-static int __init
-hfc4s8s_module_init(void)
-{
- int err;
-
- printk(KERN_INFO
- "HFC-4S/8S: Layer 1 driver module for HFC-4S/8S isdn chips, %s\n",
- hfc4s8s_rev);
- printk(KERN_INFO
- "HFC-4S/8S: (C) 2003 Cornelius Consult, www.cornelius-consult.de\n");
-
- card_cnt = 0;
-
- err = pci_register_driver(&hfc4s8s_driver);
- if (err < 0) {
- goto out;
- }
- printk(KERN_INFO "HFC-4S/8S: found %d cards\n", card_cnt);
-
- return 0;
-out:
- return (err);
-} /* hfc4s8s_init_hw */
-
-/*************************************/
-/* driver module exit : */
-/* release the HFC-4s/8s hardware */
-/*************************************/
-static void __exit
-hfc4s8s_module_exit(void)
-{
- pci_unregister_driver(&hfc4s8s_driver);
- printk(KERN_INFO "HFC-4S/8S: module removed\n");
-} /* hfc4s8s_release_hw */
-
-module_init(hfc4s8s_module_init);
-module_exit(hfc4s8s_module_exit);
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.h b/drivers/isdn/hisax/hfc4s8s_l1.h
deleted file mode 100644
index 4665b9d5df16..000000000000
--- a/drivers/isdn/hisax/hfc4s8s_l1.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/***************************************************************/
-/* $Id: hfc4s8s_l1.h,v 1.1 2005/02/02 17:28:55 martinb1 Exp $ */
-/* */
-/* This file is a minimal required extraction of hfc48scu.h */
-/* (Genero 3.2, HFC XML 1.7a for HFC-E1, HFC-4S and HFC-8S) */
-/* */
-/* To get this complete register description contact */
-/* Cologne Chip AG : */
-/* Internet: http://www.colognechip.com/ */
-/* E-Mail: info@colognechip.com */
-/***************************************************************/
-
-#ifndef _HFC4S8S_L1_H_
-#define _HFC4S8S_L1_H_
-
-
-/*
- * include Genero generated HFC-4S/8S header file hfc48scu.h
- * for complete register description. This will define _HFC48SCU_H_
- * to prevent redefinitions
- */
-
-// #include "hfc48scu.h"
-
-#ifndef _HFC48SCU_H_
-#define _HFC48SCU_H_
-
-#ifndef PCI_VENDOR_ID_CCD
-#define PCI_VENDOR_ID_CCD 0x1397
-#endif
-
-#define CHIP_ID_4S 0x0C
-#define CHIP_ID_8S 0x08
-#define PCI_DEVICE_ID_4S 0x08B4
-#define PCI_DEVICE_ID_8S 0x16B8
-
-#define R_IRQ_MISC 0x11
-#define M_TI_IRQ 0x02
-#define A_ST_RD_STA 0x30
-#define A_ST_WR_STA 0x30
-#define M_SET_G2_G3 0x80
-#define A_ST_CTRL0 0x31
-#define A_ST_CTRL2 0x33
-#define A_ST_CLK_DLY 0x37
-#define A_Z1 0x04
-#define A_Z2 0x06
-#define R_CIRM 0x00
-#define M_SRES 0x08
-#define R_CTRL 0x01
-#define R_BRG_PCM_CFG 0x02
-#define M_PCM_CLK 0x20
-#define R_RAM_MISC 0x0C
-#define M_FZ_MD 0x80
-#define R_FIFO_MD 0x0D
-#define A_INC_RES_FIFO 0x0E
-#define R_FIFO 0x0F
-#define A_F1 0x0C
-#define A_F2 0x0D
-#define R_IRQ_OVIEW 0x10
-#define R_CHIP_ID 0x16
-#define R_STATUS 0x1C
-#define M_BUSY 0x01
-#define M_MISC_IRQSTA 0x40
-#define M_FR_IRQSTA 0x80
-#define R_CHIP_RV 0x1F
-#define R_IRQ_CTRL 0x13
-#define M_FIFO_IRQ 0x01
-#define M_GLOB_IRQ_EN 0x08
-#define R_PCM_MD0 0x14
-#define M_PCM_MD 0x01
-#define A_FIFO_DATA0 0x80
-#define R_TI_WD 0x1A
-#define R_PWM1 0x39
-#define R_PWM_MD 0x46
-#define R_IRQ_FIFO_BL0 0xC8
-#define A_CON_HDLC 0xFA
-#define A_SUBCH_CFG 0xFB
-#define A_IRQ_MSK 0xFF
-#define R_SCI_MSK 0x12
-#define R_ST_SEL 0x16
-#define R_ST_SYNC 0x17
-#define M_AUTO_SYNC 0x08
-#define R_SCI 0x12
-#define R_IRQMSK_MISC 0x11
-#define M_TI_IRQMSK 0x02
-
-#endif /* _HFC4S8S_L1_H_ */
-#endif /* _HFC48SCU_H_ */
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
deleted file mode 100644
index 3715fa0343db..000000000000
--- a/drivers/isdn/hisax/hfc_2bds0.c
+++ /dev/null
@@ -1,1078 +0,0 @@
-/* $Id: hfc_2bds0.c,v 1.18.2.6 2004/02/11 13:21:33 keil Exp $
- *
- * specific routines for CCD's HFC 2BDS0
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include "hisax.h"
-#include "hfc_2bds0.h"
-#include "isdnl1.h"
-#include <linux/interrupt.h>
-/*
- #define KDEBUG_DEF
- #include "kdebug.h"
-*/
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-static void
-dummyf(struct IsdnCardState *cs, u_char *data, int size)
-{
- printk(KERN_WARNING "HiSax: hfcd dummy fifo called\n");
-}
-
-static inline u_char
-ReadReg(struct IsdnCardState *cs, int data, u_char reg)
-{
- register u_char ret;
-
- if (data) {
- if (cs->hw.hfcD.cip != reg) {
- cs->hw.hfcD.cip = reg;
- byteout(cs->hw.hfcD.addr | 1, reg);
- }
- ret = bytein(cs->hw.hfcD.addr);
-#ifdef HFC_REG_DEBUG
- if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2))
- debugl1(cs, "t3c RD %02x %02x", reg, ret);
-#endif
- } else
- ret = bytein(cs->hw.hfcD.addr | 1);
- return (ret);
-}
-
-static inline void
-WriteReg(struct IsdnCardState *cs, int data, u_char reg, u_char value)
-{
- if (cs->hw.hfcD.cip != reg) {
- cs->hw.hfcD.cip = reg;
- byteout(cs->hw.hfcD.addr | 1, reg);
- }
- if (data)
- byteout(cs->hw.hfcD.addr, value);
-#ifdef HFC_REG_DEBUG
- if (cs->debug & L1_DEB_HSCX_FIFO && (data != HFCD_DATA_NODEB))
- debugl1(cs, "t3c W%c %02x %02x", data ? 'D' : 'C', reg, value);
-#endif
-}
-
-/* Interface functions */
-
-static u_char
-readreghfcd(struct IsdnCardState *cs, u_char offset)
-{
- return (ReadReg(cs, HFCD_DATA, offset));
-}
-
-static void
-writereghfcd(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- WriteReg(cs, HFCD_DATA, offset, value);
-}
-
-static inline int
-WaitForBusy(struct IsdnCardState *cs)
-{
- int to = 130;
-
- while (!(ReadReg(cs, HFCD_DATA, HFCD_STAT) & HFCD_BUSY) && to) {
- udelay(1);
- to--;
- }
- if (!to)
- printk(KERN_WARNING "HiSax: WaitForBusy timeout\n");
- return (to);
-}
-
-static inline int
-WaitNoBusy(struct IsdnCardState *cs)
-{
- int to = 130;
-
- while ((ReadReg(cs, HFCD_STATUS, HFCD_STATUS) & HFCD_BUSY) && to) {
- udelay(1);
- to--;
- }
- if (!to)
- printk(KERN_WARNING "HiSax: WaitNoBusy timeout\n");
- return (to);
-}
-
-static int
-SelFiFo(struct IsdnCardState *cs, u_char FiFo)
-{
- u_char cip;
-
- if (cs->hw.hfcD.fifo == FiFo)
- return (1);
- switch (FiFo) {
- case 0: cip = HFCB_FIFO | HFCB_Z1 | HFCB_SEND | HFCB_B1;
- break;
- case 1: cip = HFCB_FIFO | HFCB_Z1 | HFCB_REC | HFCB_B1;
- break;
- case 2: cip = HFCB_FIFO | HFCB_Z1 | HFCB_SEND | HFCB_B2;
- break;
- case 3: cip = HFCB_FIFO | HFCB_Z1 | HFCB_REC | HFCB_B2;
- break;
- case 4: cip = HFCD_FIFO | HFCD_Z1 | HFCD_SEND;
- break;
- case 5: cip = HFCD_FIFO | HFCD_Z1 | HFCD_REC;
- break;
- default:
- debugl1(cs, "SelFiFo Error");
- return (0);
- }
- cs->hw.hfcD.fifo = FiFo;
- WaitNoBusy(cs);
- cs->BC_Write_Reg(cs, HFCD_DATA, cip, 0);
- WaitForBusy(cs);
- return (2);
-}
-
-static int
-GetFreeFifoBytes_B(struct BCState *bcs)
-{
- int s;
-
- if (bcs->hw.hfc.f1 == bcs->hw.hfc.f2)
- return (bcs->cs->hw.hfcD.bfifosize);
- s = bcs->hw.hfc.send[bcs->hw.hfc.f1] - bcs->hw.hfc.send[bcs->hw.hfc.f2];
- if (s <= 0)
- s += bcs->cs->hw.hfcD.bfifosize;
- s = bcs->cs->hw.hfcD.bfifosize - s;
- return (s);
-}
-
-static int
-GetFreeFifoBytes_D(struct IsdnCardState *cs)
-{
- int s;
-
- if (cs->hw.hfcD.f1 == cs->hw.hfcD.f2)
- return (cs->hw.hfcD.dfifosize);
- s = cs->hw.hfcD.send[cs->hw.hfcD.f1] - cs->hw.hfcD.send[cs->hw.hfcD.f2];
- if (s <= 0)
- s += cs->hw.hfcD.dfifosize;
- s = cs->hw.hfcD.dfifosize - s;
- return (s);
-}
-
-static int
-ReadZReg(struct IsdnCardState *cs, u_char reg)
-{
- int val;
-
- WaitNoBusy(cs);
- val = 256 * ReadReg(cs, HFCD_DATA, reg | HFCB_Z_HIGH);
- WaitNoBusy(cs);
- val += ReadReg(cs, HFCD_DATA, reg | HFCB_Z_LOW);
- return (val);
-}
-
-static struct sk_buff
-*hfc_empty_fifo(struct BCState *bcs, int count)
-{
- u_char *ptr;
- struct sk_buff *skb;
- struct IsdnCardState *cs = bcs->cs;
- int idx;
- int chksum;
- u_char stat, cip;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "hfc_empty_fifo");
- idx = 0;
- if (count > HSCX_BUFMAX + 3) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfc_empty_fifo: incoming packet too large");
- cip = HFCB_FIFO | HFCB_FIFO_OUT | HFCB_REC | HFCB_CHANNEL(bcs->channel);
- while (idx++ < count) {
- WaitNoBusy(cs);
- ReadReg(cs, HFCD_DATA_NODEB, cip);
- }
- skb = NULL;
- } else if (count < 4) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfc_empty_fifo: incoming packet too small");
- cip = HFCB_FIFO | HFCB_FIFO_OUT | HFCB_REC | HFCB_CHANNEL(bcs->channel);
-#ifdef ERROR_STATISTIC
- bcs->err_inv++;
-#endif
- while ((idx++ < count) && WaitNoBusy(cs))
- ReadReg(cs, HFCD_DATA_NODEB, cip);
- skb = NULL;
- } else if (!(skb = dev_alloc_skb(count - 3)))
- printk(KERN_WARNING "HFC: receive out of memory\n");
- else {
- ptr = skb_put(skb, count - 3);
- idx = 0;
- cip = HFCB_FIFO | HFCB_FIFO_OUT | HFCB_REC | HFCB_CHANNEL(bcs->channel);
- while (idx < (count - 3)) {
- if (!WaitNoBusy(cs))
- break;
- *ptr = ReadReg(cs, HFCD_DATA_NODEB, cip);
- ptr++;
- idx++;
- }
- if (idx != count - 3) {
- debugl1(cs, "RFIFO BUSY error");
- printk(KERN_WARNING "HFC FIFO channel %d BUSY Error\n", bcs->channel);
- dev_kfree_skb_irq(skb);
- skb = NULL;
- } else {
- WaitNoBusy(cs);
- chksum = (ReadReg(cs, HFCD_DATA, cip) << 8);
- WaitNoBusy(cs);
- chksum += ReadReg(cs, HFCD_DATA, cip);
- WaitNoBusy(cs);
- stat = ReadReg(cs, HFCD_DATA, cip);
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc_empty_fifo %d chksum %x stat %x",
- bcs->channel, chksum, stat);
- if (stat) {
- debugl1(cs, "FIFO CRC error");
- dev_kfree_skb_irq(skb);
- skb = NULL;
-#ifdef ERROR_STATISTIC
- bcs->err_crc++;
-#endif
- }
- }
- }
- WaitForBusy(cs);
- WaitNoBusy(cs);
- stat = ReadReg(cs, HFCD_DATA, HFCB_FIFO | HFCB_F2_INC |
- HFCB_REC | HFCB_CHANNEL(bcs->channel));
- WaitForBusy(cs);
- return (skb);
-}
-
-static void
-hfc_fill_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int idx, fcnt;
- int count;
- u_char cip;
-
- if (!bcs->tx_skb)
- return;
- if (bcs->tx_skb->len <= 0)
- return;
- SelFiFo(cs, HFCB_SEND | HFCB_CHANNEL(bcs->channel));
- cip = HFCB_FIFO | HFCB_F1 | HFCB_SEND | HFCB_CHANNEL(bcs->channel);
- WaitNoBusy(cs);
- bcs->hw.hfc.f1 = ReadReg(cs, HFCD_DATA, cip);
- WaitNoBusy(cs);
- cip = HFCB_FIFO | HFCB_F2 | HFCB_SEND | HFCB_CHANNEL(bcs->channel);
- WaitNoBusy(cs);
- bcs->hw.hfc.f2 = ReadReg(cs, HFCD_DATA, cip);
- bcs->hw.hfc.send[bcs->hw.hfc.f1] = ReadZReg(cs, HFCB_FIFO | HFCB_Z1 | HFCB_SEND | HFCB_CHANNEL(bcs->channel));
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc_fill_fifo %d f1(%d) f2(%d) z1(%x)",
- bcs->channel, bcs->hw.hfc.f1, bcs->hw.hfc.f2,
- bcs->hw.hfc.send[bcs->hw.hfc.f1]);
- fcnt = bcs->hw.hfc.f1 - bcs->hw.hfc.f2;
- if (fcnt < 0)
- fcnt += 32;
- if (fcnt > 30) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc_fill_fifo more as 30 frames");
- return;
- }
- count = GetFreeFifoBytes_B(bcs);
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc_fill_fifo %d count(%u/%d),%lx",
- bcs->channel, bcs->tx_skb->len,
- count, current->state);
- if (count < bcs->tx_skb->len) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc_fill_fifo no fifo mem");
- return;
- }
- cip = HFCB_FIFO | HFCB_FIFO_IN | HFCB_SEND | HFCB_CHANNEL(bcs->channel);
- idx = 0;
- WaitForBusy(cs);
- WaitNoBusy(cs);
- WriteReg(cs, HFCD_DATA_NODEB, cip, bcs->tx_skb->data[idx++]);
- while (idx < bcs->tx_skb->len) {
- if (!WaitNoBusy(cs))
- break;
- WriteReg(cs, HFCD_DATA_NODEB, cip, bcs->tx_skb->data[idx]);
- idx++;
- }
- if (idx != bcs->tx_skb->len) {
- debugl1(cs, "FIFO Send BUSY error");
- printk(KERN_WARNING "HFC S FIFO channel %d BUSY Error\n", bcs->channel);
- } else {
- bcs->tx_cnt -= bcs->tx_skb->len;
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->tx_skb->len;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- }
- WaitForBusy(cs);
- WaitNoBusy(cs);
- ReadReg(cs, HFCD_DATA, HFCB_FIFO | HFCB_F1_INC | HFCB_SEND | HFCB_CHANNEL(bcs->channel));
- WaitForBusy(cs);
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- return;
-}
-
-static void
-hfc_send_data(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
-
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfc_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "send_data %d blocked", bcs->channel);
-}
-
-static void
-main_rec_2bds0(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int z1, z2, rcnt;
- u_char f1, f2, cip;
- int receive, count = 5;
- struct sk_buff *skb;
-
-Begin:
- count--;
- if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- debugl1(cs, "rec_data %d blocked", bcs->channel);
- return;
- }
- SelFiFo(cs, HFCB_REC | HFCB_CHANNEL(bcs->channel));
- cip = HFCB_FIFO | HFCB_F1 | HFCB_REC | HFCB_CHANNEL(bcs->channel);
- WaitNoBusy(cs);
- f1 = ReadReg(cs, HFCD_DATA, cip);
- cip = HFCB_FIFO | HFCB_F2 | HFCB_REC | HFCB_CHANNEL(bcs->channel);
- WaitNoBusy(cs);
- f2 = ReadReg(cs, HFCD_DATA, cip);
- if (f1 != f2) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc rec %d f1(%d) f2(%d)",
- bcs->channel, f1, f2);
- z1 = ReadZReg(cs, HFCB_FIFO | HFCB_Z1 | HFCB_REC | HFCB_CHANNEL(bcs->channel));
- z2 = ReadZReg(cs, HFCB_FIFO | HFCB_Z2 | HFCB_REC | HFCB_CHANNEL(bcs->channel));
- rcnt = z1 - z2;
- if (rcnt < 0)
- rcnt += cs->hw.hfcD.bfifosize;
- rcnt++;
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc rec %d z1(%x) z2(%x) cnt(%d)",
- bcs->channel, z1, z2, rcnt);
- if ((skb = hfc_empty_fifo(bcs, rcnt))) {
- skb_queue_tail(&bcs->rqueue, skb);
- schedule_event(bcs, B_RCVBUFREADY);
- }
- rcnt = f1 - f2;
- if (rcnt < 0)
- rcnt += 32;
- if (rcnt > 1)
- receive = 1;
- else
- receive = 0;
- } else
- receive = 0;
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- if (count && receive)
- goto Begin;
- return;
-}
-
-static void
-mode_2bs0(struct BCState *bcs, int mode, int bc)
-{
- struct IsdnCardState *cs = bcs->cs;
-
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HFCD bchannel mode %d bchan %d/%d",
- mode, bc, bcs->channel);
- bcs->mode = mode;
- bcs->channel = bc;
- switch (mode) {
- case (L1_MODE_NULL):
- if (bc) {
- cs->hw.hfcD.conn |= 0x18;
- cs->hw.hfcD.sctrl &= ~SCTRL_B2_ENA;
- } else {
- cs->hw.hfcD.conn |= 0x3;
- cs->hw.hfcD.sctrl &= ~SCTRL_B1_ENA;
- }
- break;
- case (L1_MODE_TRANS):
- if (bc) {
- cs->hw.hfcD.ctmt |= 2;
- cs->hw.hfcD.conn &= ~0x18;
- cs->hw.hfcD.sctrl |= SCTRL_B2_ENA;
- } else {
- cs->hw.hfcD.ctmt |= 1;
- cs->hw.hfcD.conn &= ~0x3;
- cs->hw.hfcD.sctrl |= SCTRL_B1_ENA;
- }
- break;
- case (L1_MODE_HDLC):
- if (bc) {
- cs->hw.hfcD.ctmt &= ~2;
- cs->hw.hfcD.conn &= ~0x18;
- cs->hw.hfcD.sctrl |= SCTRL_B2_ENA;
- } else {
- cs->hw.hfcD.ctmt &= ~1;
- cs->hw.hfcD.conn &= ~0x3;
- cs->hw.hfcD.sctrl |= SCTRL_B1_ENA;
- }
- break;
- }
- WriteReg(cs, HFCD_DATA, HFCD_SCTRL, cs->hw.hfcD.sctrl);
- WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt);
- WriteReg(cs, HFCD_DATA, HFCD_CONN, cs->hw.hfcD.conn);
-}
-
-static void
-hfc_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- struct sk_buff *skb = arg;
- u_long flags;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
-// test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n");
- } else {
-// test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->tx_skb = skb;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
- if (!bcs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (PH_ACTIVATE | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
- mode_2bs0(bcs, st->l1.mode, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | REQUEST):
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | CONFIRM):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- mode_2bs0(bcs, 0, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
- break;
- }
-}
-
-static void
-close_2bs0(struct BCState *bcs)
-{
- mode_2bs0(bcs, 0, bcs->channel);
- if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- }
-}
-
-static int
-open_hfcstate(struct IsdnCardState *cs, struct BCState *bcs)
-{
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->event = 0;
- bcs->tx_cnt = 0;
- return (0);
-}
-
-static int
-setstack_2b(struct PStack *st, struct BCState *bcs)
-{
- bcs->channel = st->l1.bc;
- if (open_hfcstate(st->l1.hardware, bcs))
- return (-1);
- st->l1.bcs = bcs;
- st->l2.l2l1 = hfc_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-static void
-hfcd_bh(struct work_struct *work)
-{
- struct IsdnCardState *cs =
- container_of(work, struct IsdnCardState, tqueue);
-
- if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
- switch (cs->dc.hfcd.ph_state) {
- case (0):
- l1_msg(cs, HW_RESET | INDICATION, NULL);
- break;
- case (3):
- l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
- break;
- case (8):
- l1_msg(cs, HW_RSYNC | INDICATION, NULL);
- break;
- case (6):
- l1_msg(cs, HW_INFO2 | INDICATION, NULL);
- break;
- case (7):
- l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
- break;
- default:
- break;
- }
- }
- if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
- DChannel_proc_rcv(cs);
- if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
- DChannel_proc_xmt(cs);
-}
-
-static
-int receive_dmsg(struct IsdnCardState *cs)
-{
- struct sk_buff *skb;
- int idx;
- int rcnt, z1, z2;
- u_char stat, cip, f1, f2;
- int chksum;
- int count = 5;
- u_char *ptr;
-
- if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- debugl1(cs, "rec_dmsg blocked");
- return (1);
- }
- SelFiFo(cs, 4 | HFCD_REC);
- cip = HFCD_FIFO | HFCD_F1 | HFCD_REC;
- WaitNoBusy(cs);
- f1 = cs->readisac(cs, cip) & 0xf;
- cip = HFCD_FIFO | HFCD_F2 | HFCD_REC;
- WaitNoBusy(cs);
- f2 = cs->readisac(cs, cip) & 0xf;
- while ((f1 != f2) && count--) {
- z1 = ReadZReg(cs, HFCD_FIFO | HFCD_Z1 | HFCD_REC);
- z2 = ReadZReg(cs, HFCD_FIFO | HFCD_Z2 | HFCD_REC);
- rcnt = z1 - z2;
- if (rcnt < 0)
- rcnt += cs->hw.hfcD.dfifosize;
- rcnt++;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "hfcd recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)",
- f1, f2, z1, z2, rcnt);
- idx = 0;
- cip = HFCD_FIFO | HFCD_FIFO_OUT | HFCD_REC;
- if (rcnt > MAX_DFRAME_LEN + 3) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "empty_fifo d: incoming packet too large");
- while (idx < rcnt) {
- if (!(WaitNoBusy(cs)))
- break;
- ReadReg(cs, HFCD_DATA_NODEB, cip);
- idx++;
- }
- } else if (rcnt < 4) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "empty_fifo d: incoming packet too small");
- while ((idx++ < rcnt) && WaitNoBusy(cs))
- ReadReg(cs, HFCD_DATA_NODEB, cip);
- } else if ((skb = dev_alloc_skb(rcnt - 3))) {
- ptr = skb_put(skb, rcnt - 3);
- while (idx < (rcnt - 3)) {
- if (!(WaitNoBusy(cs)))
- break;
- *ptr = ReadReg(cs, HFCD_DATA_NODEB, cip);
- idx++;
- ptr++;
- }
- if (idx != (rcnt - 3)) {
- debugl1(cs, "RFIFO D BUSY error");
- printk(KERN_WARNING "HFC DFIFO channel BUSY Error\n");
- dev_kfree_skb_irq(skb);
- skb = NULL;
-#ifdef ERROR_STATISTIC
- cs->err_rx++;
-#endif
- } else {
- WaitNoBusy(cs);
- chksum = (ReadReg(cs, HFCD_DATA, cip) << 8);
- WaitNoBusy(cs);
- chksum += ReadReg(cs, HFCD_DATA, cip);
- WaitNoBusy(cs);
- stat = ReadReg(cs, HFCD_DATA, cip);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "empty_dfifo chksum %x stat %x",
- chksum, stat);
- if (stat) {
- debugl1(cs, "FIFO CRC error");
- dev_kfree_skb_irq(skb);
- skb = NULL;
-#ifdef ERROR_STATISTIC
- cs->err_crc++;
-#endif
- } else {
- skb_queue_tail(&cs->rq, skb);
- schedule_event(cs, D_RCVBUFREADY);
- }
- }
- } else
- printk(KERN_WARNING "HFC: D receive out of memory\n");
- WaitForBusy(cs);
- cip = HFCD_FIFO | HFCD_F2_INC | HFCD_REC;
- WaitNoBusy(cs);
- stat = ReadReg(cs, HFCD_DATA, cip);
- WaitForBusy(cs);
- cip = HFCD_FIFO | HFCD_F2 | HFCD_REC;
- WaitNoBusy(cs);
- f2 = cs->readisac(cs, cip) & 0xf;
- }
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- return (1);
-}
-
-static void
-hfc_fill_dfifo(struct IsdnCardState *cs)
-{
- int idx, fcnt;
- int count;
- u_char cip;
-
- if (!cs->tx_skb)
- return;
- if (cs->tx_skb->len <= 0)
- return;
-
- SelFiFo(cs, 4 | HFCD_SEND);
- cip = HFCD_FIFO | HFCD_F1 | HFCD_SEND;
- WaitNoBusy(cs);
- cs->hw.hfcD.f1 = ReadReg(cs, HFCD_DATA, cip) & 0xf;
- WaitNoBusy(cs);
- cip = HFCD_FIFO | HFCD_F2 | HFCD_SEND;
- cs->hw.hfcD.f2 = ReadReg(cs, HFCD_DATA, cip) & 0xf;
- cs->hw.hfcD.send[cs->hw.hfcD.f1] = ReadZReg(cs, HFCD_FIFO | HFCD_Z1 | HFCD_SEND);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "hfc_fill_Dfifo f1(%d) f2(%d) z1(%x)",
- cs->hw.hfcD.f1, cs->hw.hfcD.f2,
- cs->hw.hfcD.send[cs->hw.hfcD.f1]);
- fcnt = cs->hw.hfcD.f1 - cs->hw.hfcD.f2;
- if (fcnt < 0)
- fcnt += 16;
- if (fcnt > 14) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc_fill_Dfifo more as 14 frames");
- return;
- }
- count = GetFreeFifoBytes_D(cs);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "hfc_fill_Dfifo count(%u/%d)",
- cs->tx_skb->len, count);
- if (count < cs->tx_skb->len) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "hfc_fill_Dfifo no fifo mem");
- return;
- }
- cip = HFCD_FIFO | HFCD_FIFO_IN | HFCD_SEND;
- idx = 0;
- WaitForBusy(cs);
- WaitNoBusy(cs);
- WriteReg(cs, HFCD_DATA_NODEB, cip, cs->tx_skb->data[idx++]);
- while (idx < cs->tx_skb->len) {
- if (!(WaitNoBusy(cs)))
- break;
- WriteReg(cs, HFCD_DATA_NODEB, cip, cs->tx_skb->data[idx]);
- idx++;
- }
- if (idx != cs->tx_skb->len) {
- debugl1(cs, "DFIFO Send BUSY error");
- printk(KERN_WARNING "HFC S DFIFO channel BUSY Error\n");
- }
- WaitForBusy(cs);
- WaitNoBusy(cs);
- ReadReg(cs, HFCD_DATA, HFCD_FIFO | HFCD_F1_INC | HFCD_SEND);
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_skb = NULL;
- WaitForBusy(cs);
- return;
-}
-
-static
-struct BCState *Sel_BCS(struct IsdnCardState *cs, int channel)
-{
- if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
- return (&cs->bcs[0]);
- else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
- return (&cs->bcs[1]);
- else
- return (NULL);
-}
-
-void
-hfc2bds0_interrupt(struct IsdnCardState *cs, u_char val)
-{
- u_char exval;
- struct BCState *bcs;
- int count = 15;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFCD irq %x %s", val,
- test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
- "locked" : "unlocked");
- val &= cs->hw.hfcD.int_m1;
- if (val & 0x40) { /* TE state machine irq */
- exval = cs->readisac(cs, HFCD_STATES) & 0xf;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcd.ph_state,
- exval);
- cs->dc.hfcd.ph_state = exval;
- schedule_event(cs, D_L1STATECHANGE);
- val &= ~0x40;
- }
- while (val) {
- if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- cs->hw.hfcD.int_s1 |= val;
- return;
- }
- if (cs->hw.hfcD.int_s1 & 0x18) {
- exval = val;
- val = cs->hw.hfcD.int_s1;
- cs->hw.hfcD.int_s1 = exval;
- }
- if (val & 0x08) {
- if (!(bcs = Sel_BCS(cs, 0))) {
- if (cs->debug)
- debugl1(cs, "hfcd spurious 0x08 IRQ");
- } else
- main_rec_2bds0(bcs);
- }
- if (val & 0x10) {
- if (!(bcs = Sel_BCS(cs, 1))) {
- if (cs->debug)
- debugl1(cs, "hfcd spurious 0x10 IRQ");
- } else
- main_rec_2bds0(bcs);
- }
- if (val & 0x01) {
- if (!(bcs = Sel_BCS(cs, 0))) {
- if (cs->debug)
- debugl1(cs, "hfcd spurious 0x01 IRQ");
- } else {
- if (bcs->tx_skb) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfc_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfc_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- schedule_event(bcs, B_XMTBUFREADY);
- }
- }
- }
- }
- if (val & 0x02) {
- if (!(bcs = Sel_BCS(cs, 1))) {
- if (cs->debug)
- debugl1(cs, "hfcd spurious 0x02 IRQ");
- } else {
- if (bcs->tx_skb) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfc_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfc_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- schedule_event(bcs, B_XMTBUFREADY);
- }
- }
- }
- }
- if (val & 0x20) { /* receive dframe */
- receive_dmsg(cs);
- }
- if (val & 0x04) { /* dframe transmitted */
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- if (cs->tx_skb) {
- if (cs->tx_skb->len) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfc_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else {
- debugl1(cs, "hfc_fill_dfifo irq blocked");
- }
- goto afterXPR;
- } else {
- dev_kfree_skb_irq(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->tx_skb = NULL;
- }
- }
- if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
- cs->tx_cnt = 0;
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfc_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else {
- debugl1(cs, "hfc_fill_dfifo irq blocked");
- }
- } else
- schedule_event(cs, D_XMTBUFREADY);
- }
- afterXPR:
- if (cs->hw.hfcD.int_s1 && count--) {
- val = cs->hw.hfcD.int_s1;
- cs->hw.hfcD.int_s1 = 0;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFCD irq %x loop %d", val, 15-count);
- } else
- val = 0;
- }
-}
-
-static void
-HFCD_l1hw(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
- struct sk_buff *skb = arg;
- u_long flags;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- skb_queue_tail(&cs->sq, skb);
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA Queued", 0);
-#endif
- } else {
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA", 0);
-#endif
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfc_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "hfc_fill_dfifo blocked");
-
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
- skb_queue_tail(&cs->sq, skb);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- }
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
-#endif
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfc_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "hfc_fill_dfifo blocked");
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- debugl1(cs, "-> PH_REQUEST_PULL");
-#endif
- if (!cs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (HW_RESET | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- cs->writeisac(cs, HFCD_STATES, HFCD_LOAD_STATE | 3); /* HFC ST 3 */
- udelay(6);
- cs->writeisac(cs, HFCD_STATES, 3); /* HFC ST 2 */
- cs->hw.hfcD.mst_m |= HFCD_MASTER;
- cs->writeisac(cs, HFCD_MST_MODE, cs->hw.hfcD.mst_m);
- cs->writeisac(cs, HFCD_STATES, HFCD_ACTIVATE | HFCD_DO_ACTION);
- spin_unlock_irqrestore(&cs->lock, flags);
- l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
- break;
- case (HW_ENABLE | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- cs->writeisac(cs, HFCD_STATES, HFCD_ACTIVATE | HFCD_DO_ACTION);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_DEACTIVATE | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.hfcD.mst_m &= ~HFCD_MASTER;
- cs->writeisac(cs, HFCD_MST_MODE, cs->hw.hfcD.mst_m);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_INFO3 | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.hfcD.mst_m |= HFCD_MASTER;
- cs->writeisac(cs, HFCD_MST_MODE, cs->hw.hfcD.mst_m);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- default:
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfcd_l1hw unknown pr %4x", pr);
- break;
- }
-}
-
-static void
-setstack_hfcd(struct PStack *st, struct IsdnCardState *cs)
-{
- st->l1.l1hw = HFCD_l1hw;
-}
-
-static void
-hfc_dbusy_timer(struct timer_list *t)
-{
-}
-
-static unsigned int
-*init_send_hfcd(int cnt)
-{
- int i;
- unsigned *send;
-
- if (!(send = kmalloc_array(cnt, sizeof(unsigned int), GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for hfcd.send\n");
- return (NULL);
- }
- for (i = 0; i < cnt; i++)
- send[i] = 0x1fff;
- return (send);
-}
-
-void
-init2bds0(struct IsdnCardState *cs)
-{
- cs->setstack_d = setstack_hfcd;
- if (!cs->hw.hfcD.send)
- cs->hw.hfcD.send = init_send_hfcd(16);
- if (!cs->bcs[0].hw.hfc.send)
- cs->bcs[0].hw.hfc.send = init_send_hfcd(32);
- if (!cs->bcs[1].hw.hfc.send)
- cs->bcs[1].hw.hfc.send = init_send_hfcd(32);
- cs->BC_Send_Data = &hfc_send_data;
- cs->bcs[0].BC_SetStack = setstack_2b;
- cs->bcs[1].BC_SetStack = setstack_2b;
- cs->bcs[0].BC_Close = close_2bs0;
- cs->bcs[1].BC_Close = close_2bs0;
- mode_2bs0(cs->bcs, 0, 0);
- mode_2bs0(cs->bcs + 1, 0, 1);
-}
-
-void
-release2bds0(struct IsdnCardState *cs)
-{
- kfree(cs->bcs[0].hw.hfc.send);
- cs->bcs[0].hw.hfc.send = NULL;
- kfree(cs->bcs[1].hw.hfc.send);
- cs->bcs[1].hw.hfc.send = NULL;
- kfree(cs->hw.hfcD.send);
- cs->hw.hfcD.send = NULL;
-}
-
-void
-set_cs_func(struct IsdnCardState *cs)
-{
- cs->readisac = &readreghfcd;
- cs->writeisac = &writereghfcd;
- cs->readisacfifo = &dummyf;
- cs->writeisacfifo = &dummyf;
- cs->BC_Read_Reg = &ReadReg;
- cs->BC_Write_Reg = &WriteReg;
- timer_setup(&cs->dbusytimer, hfc_dbusy_timer, 0);
- INIT_WORK(&cs->tqueue, hfcd_bh);
-}
diff --git a/drivers/isdn/hisax/hfc_2bds0.h b/drivers/isdn/hisax/hfc_2bds0.h
deleted file mode 100644
index 8c7582a3c51e..000000000000
--- a/drivers/isdn/hisax/hfc_2bds0.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* $Id: hfc_2bds0.h,v 1.6.2.2 2004/01/12 22:52:26 keil Exp $
- *
- * specific defines for CCD's HFC 2BDS0
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#define HFCD_CIRM 0x18
-#define HFCD_CTMT 0x19
-#define HFCD_INT_M1 0x1A
-#define HFCD_INT_M2 0x1B
-#define HFCD_INT_S1 0x1E
-#define HFCD_STAT 0x1C
-#define HFCD_STAT_DISB 0x1D
-#define HFCD_STATES 0x30
-#define HFCD_SCTRL 0x31
-#define HFCD_TEST 0x32
-#define HFCD_SQ 0x34
-#define HFCD_CLKDEL 0x37
-#define HFCD_MST_MODE 0x2E
-#define HFCD_CONN 0x2F
-
-#define HFCD_FIFO 0x80
-#define HFCD_Z1 0x10
-#define HFCD_Z2 0x18
-#define HFCD_Z_LOW 0x00
-#define HFCD_Z_HIGH 0x04
-#define HFCD_F1_INC 0x12
-#define HFCD_FIFO_IN 0x16
-#define HFCD_F1 0x1a
-#define HFCD_F2 0x1e
-#define HFCD_F2_INC 0x22
-#define HFCD_FIFO_OUT 0x26
-#define HFCD_REC 0x01
-#define HFCD_SEND 0x00
-
-#define HFCB_FIFO 0x80
-#define HFCB_Z1 0x00
-#define HFCB_Z2 0x08
-#define HFCB_Z_LOW 0x00
-#define HFCB_Z_HIGH 0x04
-#define HFCB_F1_INC 0x28
-#define HFCB_FIFO_IN 0x2c
-#define HFCB_F1 0x30
-#define HFCB_F2 0x34
-#define HFCB_F2_INC 0x38
-#define HFCB_FIFO_OUT 0x3c
-#define HFCB_REC 0x01
-#define HFCB_SEND 0x00
-#define HFCB_B1 0x00
-#define HFCB_B2 0x02
-#define HFCB_CHANNEL(ch) (ch ? HFCB_B2 : HFCB_B1)
-
-#define HFCD_STATUS 0
-#define HFCD_DATA 1
-#define HFCD_DATA_NODEB 2
-
-/* Status (READ) */
-#define HFCD_BUSY 0x01
-#define HFCD_BUSY_NBUSY 0x04
-#define HFCD_TIMER_ELAP 0x10
-#define HFCD_STATINT 0x20
-#define HFCD_FRAMEINT 0x40
-#define HFCD_ANYINT 0x80
-
-/* CTMT (Write) */
-#define HFCD_CLTIMER 0x80
-#define HFCD_TIM25 0x00
-#define HFCD_TIM50 0x08
-#define HFCD_TIM400 0x10
-#define HFCD_TIM800 0x18
-#define HFCD_AUTO_TIMER 0x20
-#define HFCD_TRANSB2 0x02
-#define HFCD_TRANSB1 0x01
-
-/* CIRM (Write) */
-#define HFCD_RESET 0x08
-#define HFCD_MEM8K 0x10
-#define HFCD_INTA 0x01
-#define HFCD_INTB 0x02
-#define HFCD_INTC 0x03
-#define HFCD_INTD 0x04
-#define HFCD_INTE 0x05
-#define HFCD_INTF 0x06
-
-/* INT_M1;INT_S1 */
-#define HFCD_INTS_B1TRANS 0x01
-#define HFCD_INTS_B2TRANS 0x02
-#define HFCD_INTS_DTRANS 0x04
-#define HFCD_INTS_B1REC 0x08
-#define HFCD_INTS_B2REC 0x10
-#define HFCD_INTS_DREC 0x20
-#define HFCD_INTS_L1STATE 0x40
-#define HFCD_INTS_TIMER 0x80
-
-/* INT_M2 */
-#define HFCD_IRQ_ENABLE 0x08
-
-/* STATES */
-#define HFCD_LOAD_STATE 0x10
-#define HFCD_ACTIVATE 0x20
-#define HFCD_DO_ACTION 0x40
-
-/* HFCD_MST_MODE */
-#define HFCD_MASTER 0x01
-
-/* HFCD_SCTRL */
-#define SCTRL_B1_ENA 0x01
-#define SCTRL_B2_ENA 0x02
-#define SCTRL_LOW_PRIO 0x08
-#define SCTRL_SQ_ENA 0x10
-#define SCTRL_TEST 0x20
-#define SCTRL_NONE_CAP 0x40
-#define SCTRL_PWR_DOWN 0x80
-
-/* HFCD_TEST */
-#define HFCD_AUTO_AWAKE 0x01
-
-extern void main_irq_2bds0(struct BCState *bcs);
-extern void init2bds0(struct IsdnCardState *cs);
-extern void release2bds0(struct IsdnCardState *cs);
-extern void hfc2bds0_interrupt(struct IsdnCardState *cs, u_char val);
-extern void set_cs_func(struct IsdnCardState *cs);
diff --git a/drivers/isdn/hisax/hfc_2bs0.c b/drivers/isdn/hisax/hfc_2bs0.c
deleted file mode 100644
index 34d59992839a..000000000000
--- a/drivers/isdn/hisax/hfc_2bs0.c
+++ /dev/null
@@ -1,591 +0,0 @@
-/* $Id: hfc_2bs0.c,v 1.20.2.6 2004/02/11 13:21:33 keil Exp $
- *
- * specific routines for CCD's HFC 2BS0
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "hfc_2bs0.h"
-#include "isac.h"
-#include "isdnl1.h"
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-static inline int
-WaitForBusy(struct IsdnCardState *cs)
-{
- int to = 130;
- u_char val;
-
- while (!(cs->BC_Read_Reg(cs, HFC_STATUS, 0) & HFC_BUSY) && to) {
- val = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2 |
- (cs->hw.hfc.cip & 3));
- udelay(1);
- to--;
- }
- if (!to) {
- printk(KERN_WARNING "HiSax: %s timeout\n", __func__);
- return (0);
- } else
- return (to);
-}
-
-static inline int
-WaitNoBusy(struct IsdnCardState *cs)
-{
- int to = 125;
-
- while ((cs->BC_Read_Reg(cs, HFC_STATUS, 0) & HFC_BUSY) && to) {
- udelay(1);
- to--;
- }
- if (!to) {
- printk(KERN_WARNING "HiSax: waitforBusy timeout\n");
- return (0);
- } else
- return (to);
-}
-
-static int
-GetFreeFifoBytes(struct BCState *bcs)
-{
- int s;
-
- if (bcs->hw.hfc.f1 == bcs->hw.hfc.f2)
- return (bcs->cs->hw.hfc.fifosize);
- s = bcs->hw.hfc.send[bcs->hw.hfc.f1] - bcs->hw.hfc.send[bcs->hw.hfc.f2];
- if (s <= 0)
- s += bcs->cs->hw.hfc.fifosize;
- s = bcs->cs->hw.hfc.fifosize - s;
- return (s);
-}
-
-static int
-ReadZReg(struct BCState *bcs, u_char reg)
-{
- int val;
-
- WaitNoBusy(bcs->cs);
- val = 256 * bcs->cs->BC_Read_Reg(bcs->cs, HFC_DATA, reg | HFC_CIP | HFC_Z_HIGH);
- WaitNoBusy(bcs->cs);
- val += bcs->cs->BC_Read_Reg(bcs->cs, HFC_DATA, reg | HFC_CIP | HFC_Z_LOW);
- return (val);
-}
-
-static void
-hfc_clear_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int idx, cnt;
- int rcnt, z1, z2;
- u_char cip, f1, f2;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "hfc_clear_fifo");
- cip = HFC_CIP | HFC_F1 | HFC_REC | HFC_CHANNEL(bcs->channel);
- if ((cip & 0xc3) != (cs->hw.hfc.cip & 0xc3)) {
- cs->BC_Write_Reg(cs, HFC_STATUS, cip, cip);
- WaitForBusy(cs);
- }
- WaitNoBusy(cs);
- f1 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
- cip = HFC_CIP | HFC_F2 | HFC_REC | HFC_CHANNEL(bcs->channel);
- WaitNoBusy(cs);
- f2 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
- z1 = ReadZReg(bcs, HFC_Z1 | HFC_REC | HFC_CHANNEL(bcs->channel));
- z2 = ReadZReg(bcs, HFC_Z2 | HFC_REC | HFC_CHANNEL(bcs->channel));
- cnt = 32;
- while (((f1 != f2) || (z1 != z2)) && cnt--) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc clear %d f1(%d) f2(%d)",
- bcs->channel, f1, f2);
- rcnt = z1 - z2;
- if (rcnt < 0)
- rcnt += cs->hw.hfc.fifosize;
- if (rcnt)
- rcnt++;
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc clear %d z1(%x) z2(%x) cnt(%d)",
- bcs->channel, z1, z2, rcnt);
- cip = HFC_CIP | HFC_FIFO_OUT | HFC_REC | HFC_CHANNEL(bcs->channel);
- idx = 0;
- while ((idx < rcnt) && WaitNoBusy(cs)) {
- cs->BC_Read_Reg(cs, HFC_DATA_NODEB, cip);
- idx++;
- }
- if (f1 != f2) {
- WaitNoBusy(cs);
- cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC |
- HFC_CHANNEL(bcs->channel));
- WaitForBusy(cs);
- }
- cip = HFC_CIP | HFC_F1 | HFC_REC | HFC_CHANNEL(bcs->channel);
- WaitNoBusy(cs);
- f1 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
- cip = HFC_CIP | HFC_F2 | HFC_REC | HFC_CHANNEL(bcs->channel);
- WaitNoBusy(cs);
- f2 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
- z1 = ReadZReg(bcs, HFC_Z1 | HFC_REC | HFC_CHANNEL(bcs->channel));
- z2 = ReadZReg(bcs, HFC_Z2 | HFC_REC | HFC_CHANNEL(bcs->channel));
- }
- return;
-}
-
-
-static struct sk_buff
-*
-hfc_empty_fifo(struct BCState *bcs, int count)
-{
- u_char *ptr;
- struct sk_buff *skb;
- struct IsdnCardState *cs = bcs->cs;
- int idx;
- int chksum;
- u_char stat, cip;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "hfc_empty_fifo");
- idx = 0;
- if (count > HSCX_BUFMAX + 3) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfc_empty_fifo: incoming packet too large");
- cip = HFC_CIP | HFC_FIFO_OUT | HFC_REC | HFC_CHANNEL(bcs->channel);
- while ((idx++ < count) && WaitNoBusy(cs))
- cs->BC_Read_Reg(cs, HFC_DATA_NODEB, cip);
- WaitNoBusy(cs);
- stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC |
- HFC_CHANNEL(bcs->channel));
- WaitForBusy(cs);
- return (NULL);
- }
- if ((count < 4) && (bcs->mode != L1_MODE_TRANS)) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfc_empty_fifo: incoming packet too small");
- cip = HFC_CIP | HFC_FIFO_OUT | HFC_REC | HFC_CHANNEL(bcs->channel);
- while ((idx++ < count) && WaitNoBusy(cs))
- cs->BC_Read_Reg(cs, HFC_DATA_NODEB, cip);
- WaitNoBusy(cs);
- stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC |
- HFC_CHANNEL(bcs->channel));
- WaitForBusy(cs);
-#ifdef ERROR_STATISTIC
- bcs->err_inv++;
-#endif
- return (NULL);
- }
- if (bcs->mode == L1_MODE_TRANS)
- count -= 1;
- else
- count -= 3;
- if (!(skb = dev_alloc_skb(count)))
- printk(KERN_WARNING "HFC: receive out of memory\n");
- else {
- ptr = skb_put(skb, count);
- idx = 0;
- cip = HFC_CIP | HFC_FIFO_OUT | HFC_REC | HFC_CHANNEL(bcs->channel);
- while ((idx < count) && WaitNoBusy(cs)) {
- *ptr++ = cs->BC_Read_Reg(cs, HFC_DATA_NODEB, cip);
- idx++;
- }
- if (idx != count) {
- debugl1(cs, "RFIFO BUSY error");
- printk(KERN_WARNING "HFC FIFO channel %d BUSY Error\n", bcs->channel);
- dev_kfree_skb_any(skb);
- if (bcs->mode != L1_MODE_TRANS) {
- WaitNoBusy(cs);
- stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC |
- HFC_CHANNEL(bcs->channel));
- WaitForBusy(cs);
- }
- return (NULL);
- }
- if (bcs->mode != L1_MODE_TRANS) {
- WaitNoBusy(cs);
- chksum = (cs->BC_Read_Reg(cs, HFC_DATA, cip) << 8);
- WaitNoBusy(cs);
- chksum += cs->BC_Read_Reg(cs, HFC_DATA, cip);
- WaitNoBusy(cs);
- stat = cs->BC_Read_Reg(cs, HFC_DATA, cip);
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc_empty_fifo %d chksum %x stat %x",
- bcs->channel, chksum, stat);
- if (stat) {
- debugl1(cs, "FIFO CRC error");
- dev_kfree_skb_any(skb);
- skb = NULL;
-#ifdef ERROR_STATISTIC
- bcs->err_crc++;
-#endif
- }
- WaitNoBusy(cs);
- stat = cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F2_INC | HFC_REC |
- HFC_CHANNEL(bcs->channel));
- WaitForBusy(cs);
- }
- }
- return (skb);
-}
-
-static void
-hfc_fill_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int idx, fcnt;
- int count;
- int z1, z2;
- u_char cip;
-
- if (!bcs->tx_skb)
- return;
- if (bcs->tx_skb->len <= 0)
- return;
-
- cip = HFC_CIP | HFC_F1 | HFC_SEND | HFC_CHANNEL(bcs->channel);
- if ((cip & 0xc3) != (cs->hw.hfc.cip & 0xc3)) {
- cs->BC_Write_Reg(cs, HFC_STATUS, cip, cip);
- WaitForBusy(cs);
- }
- WaitNoBusy(cs);
- if (bcs->mode != L1_MODE_TRANS) {
- bcs->hw.hfc.f1 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
- cip = HFC_CIP | HFC_F2 | HFC_SEND | HFC_CHANNEL(bcs->channel);
- WaitNoBusy(cs);
- bcs->hw.hfc.f2 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
- bcs->hw.hfc.send[bcs->hw.hfc.f1] = ReadZReg(bcs, HFC_Z1 | HFC_SEND | HFC_CHANNEL(bcs->channel));
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc_fill_fifo %d f1(%d) f2(%d) z1(%x)",
- bcs->channel, bcs->hw.hfc.f1, bcs->hw.hfc.f2,
- bcs->hw.hfc.send[bcs->hw.hfc.f1]);
- fcnt = bcs->hw.hfc.f1 - bcs->hw.hfc.f2;
- if (fcnt < 0)
- fcnt += 32;
- if (fcnt > 30) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc_fill_fifo more as 30 frames");
- return;
- }
- count = GetFreeFifoBytes(bcs);
- }
- else {
- WaitForBusy(cs);
- z1 = ReadZReg(bcs, HFC_Z1 | HFC_REC | HFC_CHANNEL(bcs->channel));
- z2 = ReadZReg(bcs, HFC_Z2 | HFC_REC | HFC_CHANNEL(bcs->channel));
- count = z1 - z2;
- if (count < 0)
- count += cs->hw.hfc.fifosize;
- } /* L1_MODE_TRANS */
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc_fill_fifo %d count(%u/%d)",
- bcs->channel, bcs->tx_skb->len,
- count);
- if (count < bcs->tx_skb->len) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc_fill_fifo no fifo mem");
- return;
- }
- cip = HFC_CIP | HFC_FIFO_IN | HFC_SEND | HFC_CHANNEL(bcs->channel);
- idx = 0;
- while ((idx < bcs->tx_skb->len) && WaitNoBusy(cs))
- cs->BC_Write_Reg(cs, HFC_DATA_NODEB, cip, bcs->tx_skb->data[idx++]);
- if (idx != bcs->tx_skb->len) {
- debugl1(cs, "FIFO Send BUSY error");
- printk(KERN_WARNING "HFC S FIFO channel %d BUSY Error\n", bcs->channel);
- } else {
- count = bcs->tx_skb->len;
- bcs->tx_cnt -= count;
- if (PACKET_NOACK == bcs->tx_skb->pkt_type)
- count = -1;
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- if (bcs->mode != L1_MODE_TRANS) {
- WaitForBusy(cs);
- WaitNoBusy(cs);
- cs->BC_Read_Reg(cs, HFC_DATA, HFC_CIP | HFC_F1_INC | HFC_SEND | HFC_CHANNEL(bcs->channel));
- }
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (count >= 0)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += count;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- return;
-}
-
-void
-main_irq_hfc(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int z1, z2, rcnt;
- u_char f1, f2, cip;
- int receive, transmit, count = 5;
- struct sk_buff *skb;
-
-Begin:
- count--;
- cip = HFC_CIP | HFC_F1 | HFC_REC | HFC_CHANNEL(bcs->channel);
- if ((cip & 0xc3) != (cs->hw.hfc.cip & 0xc3)) {
- cs->BC_Write_Reg(cs, HFC_STATUS, cip, cip);
- WaitForBusy(cs);
- }
- WaitNoBusy(cs);
- receive = 0;
- if (bcs->mode == L1_MODE_HDLC) {
- f1 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
- cip = HFC_CIP | HFC_F2 | HFC_REC | HFC_CHANNEL(bcs->channel);
- WaitNoBusy(cs);
- f2 = cs->BC_Read_Reg(cs, HFC_DATA, cip);
- if (f1 != f2) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc rec %d f1(%d) f2(%d)",
- bcs->channel, f1, f2);
- receive = 1;
- }
- }
- if (receive || (bcs->mode == L1_MODE_TRANS)) {
- WaitForBusy(cs);
- z1 = ReadZReg(bcs, HFC_Z1 | HFC_REC | HFC_CHANNEL(bcs->channel));
- z2 = ReadZReg(bcs, HFC_Z2 | HFC_REC | HFC_CHANNEL(bcs->channel));
- rcnt = z1 - z2;
- if (rcnt < 0)
- rcnt += cs->hw.hfc.fifosize;
- if ((bcs->mode == L1_MODE_HDLC) || (rcnt)) {
- rcnt++;
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfc rec %d z1(%x) z2(%x) cnt(%d)",
- bcs->channel, z1, z2, rcnt);
- /* sti(); */
- if ((skb = hfc_empty_fifo(bcs, rcnt))) {
- skb_queue_tail(&bcs->rqueue, skb);
- schedule_event(bcs, B_RCVBUFREADY);
- }
- }
- receive = 1;
- }
- if (bcs->tx_skb) {
- transmit = 1;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- hfc_fill_fifo(bcs);
- if (test_bit(BC_FLG_BUSY, &bcs->Flag))
- transmit = 0;
- } else {
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- transmit = 1;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- hfc_fill_fifo(bcs);
- if (test_bit(BC_FLG_BUSY, &bcs->Flag))
- transmit = 0;
- } else {
- transmit = 0;
- schedule_event(bcs, B_XMTBUFREADY);
- }
- }
- if ((receive || transmit) && count)
- goto Begin;
- return;
-}
-
-static void
-mode_hfc(struct BCState *bcs, int mode, int bc)
-{
- struct IsdnCardState *cs = bcs->cs;
-
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HFC 2BS0 mode %d bchan %d/%d",
- mode, bc, bcs->channel);
- bcs->mode = mode;
- bcs->channel = bc;
-
- switch (mode) {
- case (L1_MODE_NULL):
- if (bc) {
- cs->hw.hfc.ctmt &= ~1;
- cs->hw.hfc.isac_spcr &= ~0x03;
- }
- else {
- cs->hw.hfc.ctmt &= ~2;
- cs->hw.hfc.isac_spcr &= ~0x0c;
- }
- break;
- case (L1_MODE_TRANS):
- cs->hw.hfc.ctmt &= ~(1 << bc); /* set HDLC mode */
- cs->BC_Write_Reg(cs, HFC_STATUS, cs->hw.hfc.ctmt, cs->hw.hfc.ctmt);
- hfc_clear_fifo(bcs); /* complete fifo clear */
- if (bc) {
- cs->hw.hfc.ctmt |= 1;
- cs->hw.hfc.isac_spcr &= ~0x03;
- cs->hw.hfc.isac_spcr |= 0x02;
- } else {
- cs->hw.hfc.ctmt |= 2;
- cs->hw.hfc.isac_spcr &= ~0x0c;
- cs->hw.hfc.isac_spcr |= 0x08;
- }
- break;
- case (L1_MODE_HDLC):
- if (bc) {
- cs->hw.hfc.ctmt &= ~1;
- cs->hw.hfc.isac_spcr &= ~0x03;
- cs->hw.hfc.isac_spcr |= 0x02;
- } else {
- cs->hw.hfc.ctmt &= ~2;
- cs->hw.hfc.isac_spcr &= ~0x0c;
- cs->hw.hfc.isac_spcr |= 0x08;
- }
- break;
- }
- cs->BC_Write_Reg(cs, HFC_STATUS, cs->hw.hfc.ctmt, cs->hw.hfc.ctmt);
- cs->writeisac(cs, ISAC_SPCR, cs->hw.hfc.isac_spcr);
- if (mode == L1_MODE_HDLC)
- hfc_clear_fifo(bcs);
-}
-
-static void
-hfc_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- struct sk_buff *skb = arg;
- u_long flags;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n");
- } else {
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->tx_skb = skb;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
- if (!bcs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (PH_ACTIVATE | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
- mode_hfc(bcs, st->l1.mode, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | REQUEST):
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | CONFIRM):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- mode_hfc(bcs, 0, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
- break;
- }
-}
-
-
-static void
-close_hfcstate(struct BCState *bcs)
-{
- mode_hfc(bcs, 0, bcs->channel);
- if (test_bit(BC_FLG_INIT, &bcs->Flag)) {
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- }
- test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
-}
-
-static int
-open_hfcstate(struct IsdnCardState *cs, struct BCState *bcs)
-{
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->event = 0;
- bcs->tx_cnt = 0;
- return (0);
-}
-
-static int
-setstack_hfc(struct PStack *st, struct BCState *bcs)
-{
- bcs->channel = st->l1.bc;
- if (open_hfcstate(st->l1.hardware, bcs))
- return (-1);
- st->l1.bcs = bcs;
- st->l2.l2l1 = hfc_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-static void
-init_send(struct BCState *bcs)
-{
- int i;
-
- bcs->hw.hfc.send = kmalloc_array(32, sizeof(unsigned int), GFP_ATOMIC);
- if (!bcs->hw.hfc.send) {
- printk(KERN_WARNING
- "HiSax: No memory for hfc.send\n");
- return;
- }
- for (i = 0; i < 32; i++)
- bcs->hw.hfc.send[i] = 0x1fff;
-}
-
-void
-inithfc(struct IsdnCardState *cs)
-{
- init_send(&cs->bcs[0]);
- init_send(&cs->bcs[1]);
- cs->BC_Send_Data = &hfc_fill_fifo;
- cs->bcs[0].BC_SetStack = setstack_hfc;
- cs->bcs[1].BC_SetStack = setstack_hfc;
- cs->bcs[0].BC_Close = close_hfcstate;
- cs->bcs[1].BC_Close = close_hfcstate;
- mode_hfc(cs->bcs, 0, 0);
- mode_hfc(cs->bcs + 1, 0, 0);
-}
-
-void
-releasehfc(struct IsdnCardState *cs)
-{
- kfree(cs->bcs[0].hw.hfc.send);
- cs->bcs[0].hw.hfc.send = NULL;
- kfree(cs->bcs[1].hw.hfc.send);
- cs->bcs[1].hw.hfc.send = NULL;
-}
diff --git a/drivers/isdn/hisax/hfc_2bs0.h b/drivers/isdn/hisax/hfc_2bs0.h
deleted file mode 100644
index 1510096363dc..000000000000
--- a/drivers/isdn/hisax/hfc_2bs0.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* $Id: hfc_2bs0.h,v 1.5.2.2 2004/01/12 22:52:26 keil Exp $
- *
- * specific defines for CCD's HFC 2BS0
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#define HFC_CTMT 0xe0
-#define HFC_CIRM 0xc0
-#define HFC_CIP 0x80
-#define HFC_Z1 0x00
-#define HFC_Z2 0x08
-#define HFC_Z_LOW 0x00
-#define HFC_Z_HIGH 0x04
-#define HFC_F1_INC 0x28
-#define HFC_FIFO_IN 0x2c
-#define HFC_F1 0x30
-#define HFC_F2 0x34
-#define HFC_F2_INC 0x38
-#define HFC_FIFO_OUT 0x3c
-#define HFC_B1 0x00
-#define HFC_B2 0x02
-#define HFC_REC 0x01
-#define HFC_SEND 0x00
-#define HFC_CHANNEL(ch) (ch ? HFC_B2 : HFC_B1)
-
-#define HFC_STATUS 0
-#define HFC_DATA 1
-#define HFC_DATA_NODEB 2
-
-/* Status (READ) */
-#define HFC_BUSY 0x01
-#define HFC_TIMINT 0x02
-#define HFC_EXTINT 0x04
-
-/* CTMT (Write) */
-#define HFC_CLTIMER 0x10
-#define HFC_TIM50MS 0x08
-#define HFC_TIMIRQE 0x04
-#define HFC_TRANSB2 0x02
-#define HFC_TRANSB1 0x01
-
-/* CIRM (Write) */
-#define HFC_RESET 0x08
-#define HFC_MEM8K 0x10
-#define HFC_INTA 0x01
-#define HFC_INTB 0x02
-#define HFC_INTC 0x03
-#define HFC_INTD 0x04
-#define HFC_INTE 0x05
-#define HFC_INTF 0x06
-
-extern void main_irq_hfc(struct BCState *bcs);
-extern void inithfc(struct IsdnCardState *cs);
-extern void releasehfc(struct IsdnCardState *cs);
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
deleted file mode 100644
index 71a8312592d6..000000000000
--- a/drivers/isdn/hisax/hfc_pci.c
+++ /dev/null
@@ -1,1755 +0,0 @@
-/* $Id: hfc_pci.c,v 1.48.2.4 2004/02/11 13:21:33 keil Exp $
- *
- * low level driver for CCD's hfc-pci based cards
- *
- * Author Werner Cornelius
- * based on existing driver for CCD hfc ISA cards
- * Copyright by Werner Cornelius <werner@isdn4linux.de>
- * by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "hfc_pci.h"
-#include "isdnl1.h"
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-
-static const char *hfcpci_revision = "$Revision: 1.48.2.4 $";
-
-/* table entry in the PCI devices list */
-typedef struct {
- int vendor_id;
- int device_id;
- char *vendor_name;
- char *card_name;
-} PCI_ENTRY;
-
-#define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */
-#define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
-#define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
-
-static const PCI_ENTRY id_list[] =
-{
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, "CCD/Billion/Asuscom", "2BD0"},
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, "Billion", "B000"},
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, "Billion", "B006"},
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, "Billion", "B007"},
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, "Billion", "B008"},
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, "Billion", "B009"},
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, "Billion", "B00A"},
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, "Billion", "B00B"},
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, "Billion", "B00C"},
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, "Seyeon", "B100"},
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, "Primux II S0", "B700"},
- {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, "Primux II S0 NT", "B701"},
- {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, "Abocom/Magitek", "2BD1"},
- {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, "Asuscom/Askey", "675"},
- {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, "German telekom", "T-Concept"},
- {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, "German telekom", "A1T"},
- {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, "Motorola MC145575", "MC145575"},
- {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, "Zoltrix", "2BD0"},
- {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E, "Digi International", "Digi DataFire Micro V IOM2 (Europe)"},
- {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E, "Digi International", "Digi DataFire Micro V (Europe)"},
- {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A, "Digi International", "Digi DataFire Micro V IOM2 (North America)"},
- {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A, "Digi International", "Digi DataFire Micro V (North America)"},
- {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"},
- {0, 0, NULL, NULL},
-};
-
-
-/******************************************/
-/* free hardware resources used by driver */
-/******************************************/
-static void
-release_io_hfcpci(struct IsdnCardState *cs)
-{
- printk(KERN_INFO "HiSax: release hfcpci at %p\n",
- cs->hw.hfcpci.pci_io);
- cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
- Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
- Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
- mdelay(10);
- Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
- mdelay(10);
- Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
- pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, 0); /* disable memory mapped ports + busmaster */
- del_timer(&cs->hw.hfcpci.timer);
- pci_free_consistent(cs->hw.hfcpci.dev, 0x8000,
- cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma);
- cs->hw.hfcpci.fifos = NULL;
- iounmap(cs->hw.hfcpci.pci_io);
-}
-
-/********************************************************************************/
-/* function called to reset the HFC PCI chip. A complete software reset of chip */
-/* and fifos is done. */
-/********************************************************************************/
-static void
-reset_hfcpci(struct IsdnCardState *cs)
-{
- pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
- cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
- Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
-
- printk(KERN_INFO "HFC_PCI: resetting card\n");
- pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO + PCI_ENA_MASTER); /* enable memory ports + busmaster */
- Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
- mdelay(10);
- Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
- mdelay(10);
- if (Read_hfc(cs, HFCPCI_STATUS) & 2)
- printk(KERN_WARNING "HFC-PCI init bit busy\n");
-
- cs->hw.hfcpci.fifo_en = 0x30; /* only D fifos enabled */
- Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
-
- cs->hw.hfcpci.trm = 0 + HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
- Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
-
- Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_TE); /* ST-Bit delay for TE-Mode */
- cs->hw.hfcpci.sctrl_e = HFCPCI_AUTO_AWAKE;
- Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e); /* S/T Auto awake */
- cs->hw.hfcpci.bswapped = 0; /* no exchange */
- cs->hw.hfcpci.nt_mode = 0; /* we are in TE mode */
- cs->hw.hfcpci.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
- Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
-
- cs->hw.hfcpci.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
- HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
- Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
-
- /* Clear already pending ints */
- Read_hfc(cs, HFCPCI_INT_S1);
-
- Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 2); /* HFC ST 2 */
- udelay(10);
- Write_hfc(cs, HFCPCI_STATES, 2); /* HFC ST 2 */
- cs->hw.hfcpci.mst_m = HFCPCI_MASTER; /* HFC Master Mode */
-
- Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
- cs->hw.hfcpci.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
- Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
- cs->hw.hfcpci.sctrl_r = 0;
- Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
-
- /* Init GCI/IOM2 in master mode */
- /* Slots 0 and 1 are set for B-chan 1 and 2 */
- /* D- and monitor/CI channel are not enabled */
- /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */
- /* STIO2 is used as data input, B1+B2 from IOM->ST */
- /* ST B-channel send disabled -> continuous 1s */
- /* The IOM slots are always enabled */
- cs->hw.hfcpci.conn = 0x36; /* set data flow directions */
- Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
- Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */
- Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */
- Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */
- Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */
-
- /* Finally enable IRQ output */
- cs->hw.hfcpci.int_m2 = HFCPCI_IRQ_ENABLE;
- Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
- Read_hfc(cs, HFCPCI_INT_S1);
-}
-
-/***************************************************/
-/* Timer function called when kernel timer expires */
-/***************************************************/
-static void
-hfcpci_Timer(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, hw.hfcpci.timer);
- cs->hw.hfcpci.timer.expires = jiffies + 75;
- /* WD RESET */
-/* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80);
- add_timer(&cs->hw.hfcpci.timer);
-*/
-}
-
-
-/*********************************/
-/* schedule a new D-channel task */
-/*********************************/
-static void
-sched_event_D_pci(struct IsdnCardState *cs, int event)
-{
- test_and_set_bit(event, &cs->event);
- schedule_work(&cs->tqueue);
-}
-
-/*********************************/
-/* schedule a new b_channel task */
-/*********************************/
-static void
-hfcpci_sched_event(struct BCState *bcs, int event)
-{
- test_and_set_bit(event, &bcs->event);
- schedule_work(&bcs->tqueue);
-}
-
-/************************************************/
-/* select a b-channel entry matching and active */
-/************************************************/
-static
-struct BCState *
-Sel_BCS(struct IsdnCardState *cs, int channel)
-{
- if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
- return (&cs->bcs[0]);
- else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
- return (&cs->bcs[1]);
- else
- return (NULL);
-}
-
-/***************************************/
-/* clear the desired B-channel rx fifo */
-/***************************************/
-static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
-{ u_char fifo_state;
- bzfifo_type *bzr;
-
- if (fifo) {
- bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
- fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2RX;
- } else {
- bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
- fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1RX;
- }
- if (fifo_state)
- cs->hw.hfcpci.fifo_en ^= fifo_state;
- Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
- cs->hw.hfcpci.last_bfifo_cnt[fifo] = 0;
- bzr->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
- bzr->za[MAX_B_FRAMES].z2 = bzr->za[MAX_B_FRAMES].z1;
- bzr->f1 = MAX_B_FRAMES;
- bzr->f2 = bzr->f1; /* init F pointers to remain constant */
- if (fifo_state)
- cs->hw.hfcpci.fifo_en |= fifo_state;
- Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
-}
-
-/***************************************/
-/* clear the desired B-channel tx fifo */
-/***************************************/
-static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
-{ u_char fifo_state;
- bzfifo_type *bzt;
-
- if (fifo) {
- bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
- fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2TX;
- } else {
- bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
- fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1TX;
- }
- if (fifo_state)
- cs->hw.hfcpci.fifo_en ^= fifo_state;
- Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
- bzt->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
- bzt->za[MAX_B_FRAMES].z2 = bzt->za[MAX_B_FRAMES].z1;
- bzt->f1 = MAX_B_FRAMES;
- bzt->f2 = bzt->f1; /* init F pointers to remain constant */
- if (fifo_state)
- cs->hw.hfcpci.fifo_en |= fifo_state;
- Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
-}
-
-/*********************************************/
-/* read a complete B-frame out of the buffer */
-/*********************************************/
-static struct sk_buff
-*
-hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type *bz, u_char *bdata, int count)
-{
- u_char *ptr, *ptr1, new_f2;
- struct sk_buff *skb;
- struct IsdnCardState *cs = bcs->cs;
- int maxlen, new_z2;
- z_type *zp;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "hfcpci_empty_fifo");
- zp = &bz->za[bz->f2]; /* point to Z-Regs */
- new_z2 = zp->z2 + count; /* new position in fifo */
- if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
- new_z2 -= B_FIFO_SIZE; /* buffer wrap */
- new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
- if ((count > HSCX_BUFMAX + 3) || (count < 4) ||
- (*(bdata + (zp->z1 - B_SUB_VAL)))) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfcpci_empty_fifo: incoming packet invalid length %d or crc", count);
-#ifdef ERROR_STATISTIC
- bcs->err_inv++;
-#endif
- bz->za[new_f2].z2 = new_z2;
- bz->f2 = new_f2; /* next buffer */
- skb = NULL;
- } else if (!(skb = dev_alloc_skb(count - 3)))
- printk(KERN_WARNING "HFCPCI: receive out of memory\n");
- else {
- count -= 3;
- ptr = skb_put(skb, count);
-
- if (zp->z2 + count <= B_FIFO_SIZE + B_SUB_VAL)
- maxlen = count; /* complete transfer */
- else
- maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
-
- ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
- memcpy(ptr, ptr1, maxlen); /* copy data */
- count -= maxlen;
-
- if (count) { /* rest remaining */
- ptr += maxlen;
- ptr1 = bdata; /* start of buffer */
- memcpy(ptr, ptr1, count); /* rest */
- }
- bz->za[new_f2].z2 = new_z2;
- bz->f2 = new_f2; /* next buffer */
-
- }
- return (skb);
-}
-
-/*******************************/
-/* D-channel receive procedure */
-/*******************************/
-static
-int
-receive_dmsg(struct IsdnCardState *cs)
-{
- struct sk_buff *skb;
- int maxlen;
- int rcnt, total;
- int count = 5;
- u_char *ptr, *ptr1;
- dfifo_type *df;
- z_type *zp;
-
- df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_rx;
- if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- debugl1(cs, "rec_dmsg blocked");
- return (1);
- }
- while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
- zp = &df->za[df->f2 & D_FREG_MASK];
- rcnt = zp->z1 - zp->z2;
- if (rcnt < 0)
- rcnt += D_FIFO_SIZE;
- rcnt++;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)",
- df->f1, df->f2, zp->z1, zp->z2, rcnt);
-
- if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
- (df->data[zp->z1])) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "empty_fifo hfcpci packet inv. len %d or crc %d", rcnt, df->data[zp->z1]);
-#ifdef ERROR_STATISTIC
- cs->err_rx++;
-#endif
- df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
- df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + rcnt) & (D_FIFO_SIZE - 1);
- } else if ((skb = dev_alloc_skb(rcnt - 3))) {
- total = rcnt;
- rcnt -= 3;
- ptr = skb_put(skb, rcnt);
-
- if (zp->z2 + rcnt <= D_FIFO_SIZE)
- maxlen = rcnt; /* complete transfer */
- else
- maxlen = D_FIFO_SIZE - zp->z2; /* maximum */
-
- ptr1 = df->data + zp->z2; /* start of data */
- memcpy(ptr, ptr1, maxlen); /* copy data */
- rcnt -= maxlen;
-
- if (rcnt) { /* rest remaining */
- ptr += maxlen;
- ptr1 = df->data; /* start of buffer */
- memcpy(ptr, ptr1, rcnt); /* rest */
- }
- df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
- df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + total) & (D_FIFO_SIZE - 1);
-
- skb_queue_tail(&cs->rq, skb);
- sched_event_D_pci(cs, D_RCVBUFREADY);
- } else
- printk(KERN_WARNING "HFC-PCI: D receive out of memory\n");
- }
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- return (1);
-}
-
-/*******************************************************************************/
-/* check for transparent receive data and read max one threshold size if avail */
-/*******************************************************************************/
-static int
-hfcpci_empty_fifo_trans(struct BCState *bcs, bzfifo_type *bz, u_char *bdata)
-{
- unsigned short *z1r, *z2r;
- int new_z2, fcnt, maxlen;
- struct sk_buff *skb;
- u_char *ptr, *ptr1;
-
- z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
- z2r = z1r + 1;
-
- if (!(fcnt = *z1r - *z2r))
- return (0); /* no data avail */
-
- if (fcnt <= 0)
- fcnt += B_FIFO_SIZE; /* bytes actually buffered */
- if (fcnt > HFCPCI_BTRANS_THRESHOLD)
- fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */
-
- new_z2 = *z2r + fcnt; /* new position in fifo */
- if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
- new_z2 -= B_FIFO_SIZE; /* buffer wrap */
-
- if (!(skb = dev_alloc_skb(fcnt)))
- printk(KERN_WARNING "HFCPCI: receive out of memory\n");
- else {
- ptr = skb_put(skb, fcnt);
- if (*z2r + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
- maxlen = fcnt; /* complete transfer */
- else
- maxlen = B_FIFO_SIZE + B_SUB_VAL - *z2r; /* maximum */
-
- ptr1 = bdata + (*z2r - B_SUB_VAL); /* start of data */
- memcpy(ptr, ptr1, maxlen); /* copy data */
- fcnt -= maxlen;
-
- if (fcnt) { /* rest remaining */
- ptr += maxlen;
- ptr1 = bdata; /* start of buffer */
- memcpy(ptr, ptr1, fcnt); /* rest */
- }
- skb_queue_tail(&bcs->rqueue, skb);
- hfcpci_sched_event(bcs, B_RCVBUFREADY);
- }
-
- *z2r = new_z2; /* new position */
- return (1);
-} /* hfcpci_empty_fifo_trans */
-
-/**********************************/
-/* B-channel main receive routine */
-/**********************************/
-static void
-main_rec_hfcpci(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int rcnt, real_fifo;
- int receive, count = 5;
- struct sk_buff *skb;
- bzfifo_type *bz;
- u_char *bdata;
- z_type *zp;
-
-
- if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
- bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
- bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
- real_fifo = 1;
- } else {
- bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
- bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b1;
- real_fifo = 0;
- }
-Begin:
- count--;
- if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- debugl1(cs, "rec_data %d blocked", bcs->channel);
- return;
- }
- if (bz->f1 != bz->f2) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfcpci rec %d f1(%d) f2(%d)",
- bcs->channel, bz->f1, bz->f2);
- zp = &bz->za[bz->f2];
-
- rcnt = zp->z1 - zp->z2;
- if (rcnt < 0)
- rcnt += B_FIFO_SIZE;
- rcnt++;
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfcpci rec %d z1(%x) z2(%x) cnt(%d)",
- bcs->channel, zp->z1, zp->z2, rcnt);
- if ((skb = hfcpci_empty_fifo(bcs, bz, bdata, rcnt))) {
- skb_queue_tail(&bcs->rqueue, skb);
- hfcpci_sched_event(bcs, B_RCVBUFREADY);
- }
- rcnt = bz->f1 - bz->f2;
- if (rcnt < 0)
- rcnt += MAX_B_FRAMES + 1;
- if (cs->hw.hfcpci.last_bfifo_cnt[real_fifo] > rcnt + 1) {
- rcnt = 0;
- hfcpci_clear_fifo_rx(cs, real_fifo);
- }
- cs->hw.hfcpci.last_bfifo_cnt[real_fifo] = rcnt;
- if (rcnt > 1)
- receive = 1;
- else
- receive = 0;
- } else if (bcs->mode == L1_MODE_TRANS)
- receive = hfcpci_empty_fifo_trans(bcs, bz, bdata);
- else
- receive = 0;
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- if (count && receive)
- goto Begin;
-}
-
-/**************************/
-/* D-channel send routine */
-/**************************/
-static void
-hfcpci_fill_dfifo(struct IsdnCardState *cs)
-{
- int fcnt;
- int count, new_z1, maxlen;
- dfifo_type *df;
- u_char *src, *dst, new_f1;
-
- if (!cs->tx_skb)
- return;
- if (cs->tx_skb->len <= 0)
- return;
-
- df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_tx;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "hfcpci_fill_Dfifo f1(%d) f2(%d) z1(f1)(%x)",
- df->f1, df->f2,
- df->za[df->f1 & D_FREG_MASK].z1);
- fcnt = df->f1 - df->f2; /* frame count actually buffered */
- if (fcnt < 0)
- fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
- if (fcnt > (MAX_D_FRAMES - 1)) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "hfcpci_fill_Dfifo more as 14 frames");
-#ifdef ERROR_STATISTIC
- cs->err_tx++;
-#endif
- return;
- }
- /* now determine free bytes in FIFO buffer */
- count = df->za[df->f2 & D_FREG_MASK].z2 - df->za[df->f1 & D_FREG_MASK].z1 - 1;
- if (count <= 0)
- count += D_FIFO_SIZE; /* count now contains available bytes */
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "hfcpci_fill_Dfifo count(%u/%d)",
- cs->tx_skb->len, count);
- if (count < cs->tx_skb->len) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "hfcpci_fill_Dfifo no fifo mem");
- return;
- }
- count = cs->tx_skb->len; /* get frame len */
- new_z1 = (df->za[df->f1 & D_FREG_MASK].z1 + count) & (D_FIFO_SIZE - 1);
- new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
- src = cs->tx_skb->data; /* source pointer */
- dst = df->data + df->za[df->f1 & D_FREG_MASK].z1;
- maxlen = D_FIFO_SIZE - df->za[df->f1 & D_FREG_MASK].z1; /* end fifo */
- if (maxlen > count)
- maxlen = count; /* limit size */
- memcpy(dst, src, maxlen); /* first copy */
-
- count -= maxlen; /* remaining bytes */
- if (count) {
- dst = df->data; /* start of buffer */
- src += maxlen; /* new position */
- memcpy(dst, src, count);
- }
- df->za[new_f1 & D_FREG_MASK].z1 = new_z1; /* for next buffer */
- df->za[df->f1 & D_FREG_MASK].z1 = new_z1; /* new pos actual buffer */
- df->f1 = new_f1; /* next frame */
-
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_skb = NULL;
-}
-
-/**************************/
-/* B-channel send routine */
-/**************************/
-static void
-hfcpci_fill_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int maxlen, fcnt;
- int count, new_z1;
- bzfifo_type *bz;
- u_char *bdata;
- u_char new_f1, *src, *dst;
- unsigned short *z1t, *z2t;
-
- if (!bcs->tx_skb)
- return;
- if (bcs->tx_skb->len <= 0)
- return;
-
- if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
- bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
- bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b2;
- } else {
- bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
- bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b1;
- }
-
- if (bcs->mode == L1_MODE_TRANS) {
- z1t = &bz->za[MAX_B_FRAMES].z1;
- z2t = z1t + 1;
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfcpci_fill_fifo_trans %d z1(%x) z2(%x)",
- bcs->channel, *z1t, *z2t);
- fcnt = *z2t - *z1t;
- if (fcnt <= 0)
- fcnt += B_FIFO_SIZE; /* fcnt contains available bytes in fifo */
- fcnt = B_FIFO_SIZE - fcnt; /* remaining bytes to send */
-
- while ((fcnt < 2 * HFCPCI_BTRANS_THRESHOLD) && (bcs->tx_skb)) {
- if (bcs->tx_skb->len < B_FIFO_SIZE - fcnt) {
- /* data is suitable for fifo */
- count = bcs->tx_skb->len;
-
- new_z1 = *z1t + count; /* new buffer Position */
- if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
- new_z1 -= B_FIFO_SIZE; /* buffer wrap */
- src = bcs->tx_skb->data; /* source pointer */
- dst = bdata + (*z1t - B_SUB_VAL);
- maxlen = (B_FIFO_SIZE + B_SUB_VAL) - *z1t; /* end of fifo */
- if (maxlen > count)
- maxlen = count; /* limit size */
- memcpy(dst, src, maxlen); /* first copy */
-
- count -= maxlen; /* remaining bytes */
- if (count) {
- dst = bdata; /* start of buffer */
- src += maxlen; /* new position */
- memcpy(dst, src, count);
- }
- bcs->tx_cnt -= bcs->tx_skb->len;
- fcnt += bcs->tx_skb->len;
- *z1t = new_z1; /* now send data */
- } else if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfcpci_fill_fifo_trans %d frame length %d discarded",
- bcs->channel, bcs->tx_skb->len);
-
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->tx_skb->len;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
-
- dev_consume_skb_any(bcs->tx_skb);
- bcs->tx_skb = skb_dequeue(&bcs->squeue); /* fetch next data */
- }
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- return;
- }
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfcpci_fill_fifo_hdlc %d f1(%d) f2(%d) z1(f1)(%x)",
- bcs->channel, bz->f1, bz->f2,
- bz->za[bz->f1].z1);
-
- fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
- if (fcnt < 0)
- fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
- if (fcnt > (MAX_B_FRAMES - 1)) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfcpci_fill_Bfifo more as 14 frames");
- return;
- }
- /* now determine free bytes in FIFO buffer */
- count = bz->za[bz->f2].z2 - bz->za[bz->f1].z1 - 1;
- if (count <= 0)
- count += B_FIFO_SIZE; /* count now contains available bytes */
-
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfcpci_fill_fifo %d count(%u/%d),%lx",
- bcs->channel, bcs->tx_skb->len,
- count, current->state);
-
- if (count < bcs->tx_skb->len) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hfcpci_fill_fifo no fifo mem");
- return;
- }
- count = bcs->tx_skb->len; /* get frame len */
- new_z1 = bz->za[bz->f1].z1 + count; /* new buffer Position */
- if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
- new_z1 -= B_FIFO_SIZE; /* buffer wrap */
-
- new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
- src = bcs->tx_skb->data; /* source pointer */
- dst = bdata + (bz->za[bz->f1].z1 - B_SUB_VAL);
- maxlen = (B_FIFO_SIZE + B_SUB_VAL) - bz->za[bz->f1].z1; /* end fifo */
- if (maxlen > count)
- maxlen = count; /* limit size */
- memcpy(dst, src, maxlen); /* first copy */
-
- count -= maxlen; /* remaining bytes */
- if (count) {
- dst = bdata; /* start of buffer */
- src += maxlen; /* new position */
- memcpy(dst, src, count);
- }
- bcs->tx_cnt -= bcs->tx_skb->len;
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->tx_skb->len;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
-
- bz->za[new_f1].z1 = new_z1; /* for next buffer */
- bz->f1 = new_f1; /* next frame */
-
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
-}
-
-/**********************************************/
-/* D-channel l1 state call for leased NT-mode */
-/**********************************************/
-static void
-dch_nt_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- case (PH_PULL | REQUEST):
- case (PH_PULL | INDICATION):
- st->l1.l1hw(st, pr, arg);
- break;
- case (PH_ACTIVATE | REQUEST):
- st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
- break;
- case (PH_TESTLOOP | REQUEST):
- if (1 & (long) arg)
- debugl1(cs, "PH_TEST_LOOP B1");
- if (2 & (long) arg)
- debugl1(cs, "PH_TEST_LOOP B2");
- if (!(3 & (long) arg))
- debugl1(cs, "PH_TEST_LOOP DISABLED");
- st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg);
- break;
- default:
- if (cs->debug)
- debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr);
- break;
- }
-}
-
-
-
-/***********************/
-/* set/reset echo mode */
-/***********************/
-static int
-hfcpci_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic)
-{
- u_long flags;
- int i = *(unsigned int *) ic->parm.num;
-
- if ((ic->arg == 98) &&
- (!(cs->hw.hfcpci.int_m1 & (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC + HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC)))) {
- spin_lock_irqsave(&cs->lock, flags);
- Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_NT); /* ST-Bit delay for NT-Mode */
- Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 0); /* HFC ST G0 */
- udelay(10);
- cs->hw.hfcpci.sctrl |= SCTRL_MODE_NT;
- Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); /* set NT-mode */
- udelay(10);
- Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 1); /* HFC ST G1 */
- udelay(10);
- Write_hfc(cs, HFCPCI_STATES, 1 | HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
- cs->dc.hfcpci.ph_state = 1;
- cs->hw.hfcpci.nt_mode = 1;
- cs->hw.hfcpci.nt_timer = 0;
- cs->stlist->l2.l2l1 = dch_nt_l2l1;
- spin_unlock_irqrestore(&cs->lock, flags);
- debugl1(cs, "NT mode activated");
- return (0);
- }
- if ((cs->chanlimit > 1) || (cs->hw.hfcpci.bswapped) ||
- (cs->hw.hfcpci.nt_mode) || (ic->arg != 12))
- return (-EINVAL);
-
- spin_lock_irqsave(&cs->lock, flags);
- if (i) {
- cs->logecho = 1;
- cs->hw.hfcpci.trm |= 0x20; /* enable echo chan */
- cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_B2REC;
- cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2RX;
- } else {
- cs->logecho = 0;
- cs->hw.hfcpci.trm &= ~0x20; /* disable echo chan */
- cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_B2REC;
- cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2RX;
- }
- cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
- cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
- cs->hw.hfcpci.conn |= 0x10; /* B2-IOM -> B2-ST */
- cs->hw.hfcpci.ctmt &= ~2;
- Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
- Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
- Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
- Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
- Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
- Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
- Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
-} /* hfcpci_auxcmd */
-
-/*****************************/
-/* E-channel receive routine */
-/*****************************/
-static void
-receive_emsg(struct IsdnCardState *cs)
-{
- int rcnt;
- int receive, count = 5;
- bzfifo_type *bz;
- u_char *bdata;
- z_type *zp;
- u_char *ptr, *ptr1, new_f2;
- int total, maxlen, new_z2;
- u_char e_buffer[256];
-
- bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
- bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
-Begin:
- count--;
- if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- debugl1(cs, "echo_rec_data blocked");
- return;
- }
- if (bz->f1 != bz->f2) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "hfcpci e_rec f1(%d) f2(%d)",
- bz->f1, bz->f2);
- zp = &bz->za[bz->f2];
-
- rcnt = zp->z1 - zp->z2;
- if (rcnt < 0)
- rcnt += B_FIFO_SIZE;
- rcnt++;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "hfcpci e_rec z1(%x) z2(%x) cnt(%d)",
- zp->z1, zp->z2, rcnt);
- new_z2 = zp->z2 + rcnt; /* new position in fifo */
- if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
- new_z2 -= B_FIFO_SIZE; /* buffer wrap */
- new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
- if ((rcnt > 256 + 3) || (count < 4) ||
- (*(bdata + (zp->z1 - B_SUB_VAL)))) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfcpci_empty_echan: incoming packet invalid length %d or crc", rcnt);
- bz->za[new_f2].z2 = new_z2;
- bz->f2 = new_f2; /* next buffer */
- } else {
- total = rcnt;
- rcnt -= 3;
- ptr = e_buffer;
-
- if (zp->z2 <= B_FIFO_SIZE + B_SUB_VAL)
- maxlen = rcnt; /* complete transfer */
- else
- maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
-
- ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
- memcpy(ptr, ptr1, maxlen); /* copy data */
- rcnt -= maxlen;
-
- if (rcnt) { /* rest remaining */
- ptr += maxlen;
- ptr1 = bdata; /* start of buffer */
- memcpy(ptr, ptr1, rcnt); /* rest */
- }
- bz->za[new_f2].z2 = new_z2;
- bz->f2 = new_f2; /* next buffer */
- if (cs->debug & DEB_DLOG_HEX) {
- ptr = cs->dlog;
- if ((total - 3) < MAX_DLOG_SPACE / 3 - 10) {
- *ptr++ = 'E';
- *ptr++ = 'C';
- *ptr++ = 'H';
- *ptr++ = 'O';
- *ptr++ = ':';
- ptr += QuickHex(ptr, e_buffer, total - 3);
- ptr--;
- *ptr++ = '\n';
- *ptr = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
- } else
- HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
- }
- }
-
- rcnt = bz->f1 - bz->f2;
- if (rcnt < 0)
- rcnt += MAX_B_FRAMES + 1;
- if (rcnt > 1)
- receive = 1;
- else
- receive = 0;
- } else
- receive = 0;
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- if (count && receive)
- goto Begin;
-} /* receive_emsg */
-
-/*********************/
-/* Interrupt handler */
-/*********************/
-static irqreturn_t
-hfcpci_interrupt(int intno, void *dev_id)
-{
- u_long flags;
- struct IsdnCardState *cs = dev_id;
- u_char exval;
- struct BCState *bcs;
- int count = 15;
- u_char val, stat;
-
- if (!(cs->hw.hfcpci.int_m2 & 0x08)) {
- debugl1(cs, "HFC-PCI: int_m2 %x not initialised", cs->hw.hfcpci.int_m2);
- return IRQ_NONE; /* not initialised */
- }
- spin_lock_irqsave(&cs->lock, flags);
- if (HFCPCI_ANYINT & (stat = Read_hfc(cs, HFCPCI_STATUS))) {
- val = Read_hfc(cs, HFCPCI_INT_S1);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFC-PCI: stat(%02x) s1(%02x)", stat, val);
- } else {
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE;
- }
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFC-PCI irq %x %s", val,
- test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
- "locked" : "unlocked");
- val &= cs->hw.hfcpci.int_m1;
- if (val & 0x40) { /* state machine irq */
- exval = Read_hfc(cs, HFCPCI_STATES) & 0xf;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcpci.ph_state,
- exval);
- cs->dc.hfcpci.ph_state = exval;
- sched_event_D_pci(cs, D_L1STATECHANGE);
- val &= ~0x40;
- }
- if (val & 0x80) { /* timer irq */
- if (cs->hw.hfcpci.nt_mode) {
- if ((--cs->hw.hfcpci.nt_timer) < 0)
- sched_event_D_pci(cs, D_L1STATECHANGE);
- }
- val &= ~0x80;
- Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
- }
- while (val) {
- if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- cs->hw.hfcpci.int_s1 |= val;
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
- }
- if (cs->hw.hfcpci.int_s1 & 0x18) {
- exval = val;
- val = cs->hw.hfcpci.int_s1;
- cs->hw.hfcpci.int_s1 = exval;
- }
- if (val & 0x08) {
- if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
- if (cs->debug)
- debugl1(cs, "hfcpci spurious 0x08 IRQ");
- } else
- main_rec_hfcpci(bcs);
- }
- if (val & 0x10) {
- if (cs->logecho)
- receive_emsg(cs);
- else if (!(bcs = Sel_BCS(cs, 1))) {
- if (cs->debug)
- debugl1(cs, "hfcpci spurious 0x10 IRQ");
- } else
- main_rec_hfcpci(bcs);
- }
- if (val & 0x01) {
- if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
- if (cs->debug)
- debugl1(cs, "hfcpci spurious 0x01 IRQ");
- } else {
- if (bcs->tx_skb) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcpci_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcpci_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- hfcpci_sched_event(bcs, B_XMTBUFREADY);
- }
- }
- }
- }
- if (val & 0x02) {
- if (!(bcs = Sel_BCS(cs, 1))) {
- if (cs->debug)
- debugl1(cs, "hfcpci spurious 0x02 IRQ");
- } else {
- if (bcs->tx_skb) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcpci_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcpci_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- hfcpci_sched_event(bcs, B_XMTBUFREADY);
- }
- }
- }
- }
- if (val & 0x20) { /* receive dframe */
- receive_dmsg(cs);
- }
- if (val & 0x04) { /* dframe transmitted */
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- sched_event_D_pci(cs, D_CLEARBUSY);
- if (cs->tx_skb) {
- if (cs->tx_skb->len) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcpci_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else {
- debugl1(cs, "hfcpci_fill_dfifo irq blocked");
- }
- goto afterXPR;
- } else {
- dev_kfree_skb_irq(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->tx_skb = NULL;
- }
- }
- if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
- cs->tx_cnt = 0;
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcpci_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else {
- debugl1(cs, "hfcpci_fill_dfifo irq blocked");
- }
- } else
- sched_event_D_pci(cs, D_XMTBUFREADY);
- }
- afterXPR:
- if (cs->hw.hfcpci.int_s1 && count--) {
- val = cs->hw.hfcpci.int_s1;
- cs->hw.hfcpci.int_s1 = 0;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFC-PCI irq %x loop %d", val, 15 - count);
- } else
- val = 0;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-/********************************************************************/
-/* timer callback for D-chan busy resolution. Currently no function */
-/********************************************************************/
-static void
-hfcpci_dbusy_timer(struct timer_list *t)
-{
-}
-
-/*************************************/
-/* Layer 1 D-channel hardware access */
-/*************************************/
-static void
-HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
-{
- u_long flags;
- struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
- struct sk_buff *skb = arg;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- skb_queue_tail(&cs->sq, skb);
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA Queued", 0);
-#endif
- } else {
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA", 0);
-#endif
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcpci_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "hfcpci_fill_dfifo blocked");
-
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
- skb_queue_tail(&cs->sq, skb);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- }
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
-#endif
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcpci_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "hfcpci_fill_dfifo blocked");
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- debugl1(cs, "-> PH_REQUEST_PULL");
-#endif
- spin_lock_irqsave(&cs->lock, flags);
- if (!cs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_RESET | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3); /* HFC ST 3 */
- udelay(6);
- Write_hfc(cs, HFCPCI_STATES, 3); /* HFC ST 2 */
- cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
- Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
- Write_hfc(cs, HFCPCI_STATES, HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
- spin_unlock_irqrestore(&cs->lock, flags);
- l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
- break;
- case (HW_ENABLE | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- Write_hfc(cs, HFCPCI_STATES, HFCPCI_DO_ACTION);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_DEACTIVATE | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.hfcpci.mst_m &= ~HFCPCI_MASTER;
- Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_INFO3 | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
- Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_TESTLOOP | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- switch ((long) arg) {
- case (1):
- Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* tx slot */
- Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* rx slot */
- cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~7) | 1;
- Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
- break;
-
- case (2):
- Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* tx slot */
- Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* rx slot */
- cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~0x38) | 0x08;
- Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
- break;
-
- default:
- spin_unlock_irqrestore(&cs->lock, flags);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfcpci_l1hw loop invalid %4lx", (long) arg);
- return;
- }
- cs->hw.hfcpci.trm |= 0x80; /* enable IOM-loop */
- Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- default:
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfcpci_l1hw unknown pr %4x", pr);
- break;
- }
-}
-
-/***********************************************/
-/* called during init setting l1 stack pointer */
-/***********************************************/
-static void
-setstack_hfcpci(struct PStack *st, struct IsdnCardState *cs)
-{
- st->l1.l1hw = HFCPCI_l1hw;
-}
-
-/**************************************/
-/* send B-channel data if not blocked */
-/**************************************/
-static void
-hfcpci_send_data(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
-
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcpci_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "send_data %d blocked", bcs->channel);
-}
-
-/***************************************************************/
-/* activate/deactivate hardware for selected channels and mode */
-/***************************************************************/
-static void
-mode_hfcpci(struct BCState *bcs, int mode, int bc)
-{
- struct IsdnCardState *cs = bcs->cs;
- int fifo2;
-
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HFCPCI bchannel mode %d bchan %d/%d",
- mode, bc, bcs->channel);
- bcs->mode = mode;
- bcs->channel = bc;
- fifo2 = bc;
- if (cs->chanlimit > 1) {
- cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
- cs->hw.hfcpci.sctrl_e &= ~0x80;
- } else {
- if (bc) {
- if (mode != L1_MODE_NULL) {
- cs->hw.hfcpci.bswapped = 1; /* B1 and B2 exchanged */
- cs->hw.hfcpci.sctrl_e |= 0x80;
- } else {
- cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
- cs->hw.hfcpci.sctrl_e &= ~0x80;
- }
- fifo2 = 0;
- } else {
- cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
- cs->hw.hfcpci.sctrl_e &= ~0x80;
- }
- }
- switch (mode) {
- case (L1_MODE_NULL):
- if (bc) {
- cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
- cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
- } else {
- cs->hw.hfcpci.sctrl &= ~SCTRL_B1_ENA;
- cs->hw.hfcpci.sctrl_r &= ~SCTRL_B1_ENA;
- }
- if (fifo2) {
- cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
- cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
- } else {
- cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
- cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
- }
- break;
- case (L1_MODE_TRANS):
- hfcpci_clear_fifo_rx(cs, fifo2);
- hfcpci_clear_fifo_tx(cs, fifo2);
- if (bc) {
- cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
- cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
- } else {
- cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
- cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
- }
- if (fifo2) {
- cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
- cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
- cs->hw.hfcpci.ctmt |= 2;
- cs->hw.hfcpci.conn &= ~0x18;
- } else {
- cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
- cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
- cs->hw.hfcpci.ctmt |= 1;
- cs->hw.hfcpci.conn &= ~0x03;
- }
- break;
- case (L1_MODE_HDLC):
- hfcpci_clear_fifo_rx(cs, fifo2);
- hfcpci_clear_fifo_tx(cs, fifo2);
- if (bc) {
- cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
- cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
- } else {
- cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
- cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
- }
- if (fifo2) {
- cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
- cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
- cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
- cs->hw.hfcpci.ctmt &= ~2;
- cs->hw.hfcpci.conn &= ~0x18;
- } else {
- cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
- cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
- cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
- cs->hw.hfcpci.ctmt &= ~1;
- cs->hw.hfcpci.conn &= ~0x03;
- }
- break;
- case (L1_MODE_EXTRN):
- if (bc) {
- cs->hw.hfcpci.conn |= 0x10;
- cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
- cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
- cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
- cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
- } else {
- cs->hw.hfcpci.conn |= 0x02;
- cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
- cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
- cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
- cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
- }
- break;
- }
- Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e);
- Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
- Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
- Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
- Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
- Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
- Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
-}
-
-/******************************/
-/* Layer2 -> Layer 1 Transfer */
-/******************************/
-static void
-hfcpci_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- u_long flags;
- struct sk_buff *skb = arg;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
-// test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n");
- break;
- }
-// test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->tx_skb = skb;
- bcs->cs->BC_Send_Data(bcs);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
- if (!bcs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (PH_ACTIVATE | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
- mode_hfcpci(bcs, st->l1.mode, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | REQUEST):
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | CONFIRM):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- mode_hfcpci(bcs, 0, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
- break;
- }
-}
-
-/******************************************/
-/* deactivate B-channel access and queues */
-/******************************************/
-static void
-close_hfcpci(struct BCState *bcs)
-{
- mode_hfcpci(bcs, 0, bcs->channel);
- if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- }
-}
-
-/*************************************/
-/* init B-channel queues and control */
-/*************************************/
-static int
-open_hfcpcistate(struct IsdnCardState *cs, struct BCState *bcs)
-{
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->event = 0;
- bcs->tx_cnt = 0;
- return (0);
-}
-
-/*********************************/
-/* inits the stack for B-channel */
-/*********************************/
-static int
-setstack_2b(struct PStack *st, struct BCState *bcs)
-{
- bcs->channel = st->l1.bc;
- if (open_hfcpcistate(st->l1.hardware, bcs))
- return (-1);
- st->l1.bcs = bcs;
- st->l2.l2l1 = hfcpci_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-/***************************/
-/* handle L1 state changes */
-/***************************/
-static void
-hfcpci_bh(struct work_struct *work)
-{
- struct IsdnCardState *cs =
- container_of(work, struct IsdnCardState, tqueue);
- u_long flags;
-// struct PStack *stptr;
-
- if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
- if (!cs->hw.hfcpci.nt_mode)
- switch (cs->dc.hfcpci.ph_state) {
- case (0):
- l1_msg(cs, HW_RESET | INDICATION, NULL);
- break;
- case (3):
- l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
- break;
- case (8):
- l1_msg(cs, HW_RSYNC | INDICATION, NULL);
- break;
- case (6):
- l1_msg(cs, HW_INFO2 | INDICATION, NULL);
- break;
- case (7):
- l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
- break;
- default:
- break;
- } else {
- spin_lock_irqsave(&cs->lock, flags);
- switch (cs->dc.hfcpci.ph_state) {
- case (2):
- if (cs->hw.hfcpci.nt_timer < 0) {
- cs->hw.hfcpci.nt_timer = 0;
- cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
- Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
- /* Clear already pending ints */
- Read_hfc(cs, HFCPCI_INT_S1);
- Write_hfc(cs, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
- udelay(10);
- Write_hfc(cs, HFCPCI_STATES, 4);
- cs->dc.hfcpci.ph_state = 4;
- } else {
- cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_TIMER;
- Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
- cs->hw.hfcpci.ctmt &= ~HFCPCI_AUTO_TIMER;
- cs->hw.hfcpci.ctmt |= HFCPCI_TIM3_125;
- Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
- Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
- cs->hw.hfcpci.nt_timer = NT_T1_COUNT;
- Write_hfc(cs, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); /* allow G2 -> G3 transition */
- }
- break;
- case (1):
- case (3):
- case (4):
- cs->hw.hfcpci.nt_timer = 0;
- cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
- Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
- break;
- default:
- break;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- }
- }
- if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
- DChannel_proc_rcv(cs);
- if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
- DChannel_proc_xmt(cs);
-}
-
-
-/********************************/
-/* called for card init message */
-/********************************/
-static void
-inithfcpci(struct IsdnCardState *cs)
-{
- cs->bcs[0].BC_SetStack = setstack_2b;
- cs->bcs[1].BC_SetStack = setstack_2b;
- cs->bcs[0].BC_Close = close_hfcpci;
- cs->bcs[1].BC_Close = close_hfcpci;
- timer_setup(&cs->dbusytimer, hfcpci_dbusy_timer, 0);
- mode_hfcpci(cs->bcs, 0, 0);
- mode_hfcpci(cs->bcs + 1, 0, 1);
-}
-
-
-
-/*******************************************/
-/* handle card messages from control layer */
-/*******************************************/
-static int
-hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFCPCI: card_msg %x", mt);
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_hfcpci(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_hfcpci(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- inithfcpci(cs);
- reset_hfcpci(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- msleep(80); /* Timeout 80ms */
- /* now switch timer interrupt off */
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
- Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
- /* reinit mode reg */
- Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-
-/* this variable is used as card index when more than one cards are present */
-static struct pci_dev *dev_hfcpci = NULL;
-
-int
-setup_hfcpci(struct IsdnCard *card)
-{
- u_long flags;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
- int i;
- struct pci_dev *tmp_hfcpci = NULL;
-
- strcpy(tmp, hfcpci_revision);
- printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
-
- cs->hw.hfcpci.int_s1 = 0;
- cs->dc.hfcpci.ph_state = 0;
- cs->hw.hfcpci.fifo = 255;
- if (cs->typ != ISDN_CTYPE_HFC_PCI)
- return (0);
-
- i = 0;
- while (id_list[i].vendor_id) {
- tmp_hfcpci = hisax_find_pci_device(id_list[i].vendor_id,
- id_list[i].device_id,
- dev_hfcpci);
- i++;
- if (tmp_hfcpci) {
- dma_addr_t dma_mask = DMA_BIT_MASK(32) & ~0x7fffUL;
- if (pci_enable_device(tmp_hfcpci))
- continue;
- if (pci_set_dma_mask(tmp_hfcpci, dma_mask)) {
- printk(KERN_WARNING
- "HiSax hfc_pci: No suitable DMA available.\n");
- continue;
- }
- if (pci_set_consistent_dma_mask(tmp_hfcpci, dma_mask)) {
- printk(KERN_WARNING
- "HiSax hfc_pci: No suitable consistent DMA available.\n");
- continue;
- }
- pci_set_master(tmp_hfcpci);
- if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[0].start & PCI_BASE_ADDRESS_IO_MASK)))
- continue;
- else
- break;
- }
- }
-
- if (!tmp_hfcpci) {
- printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
- return (0);
- }
-
- i--;
- dev_hfcpci = tmp_hfcpci; /* old device */
- cs->hw.hfcpci.dev = dev_hfcpci;
- cs->irq = dev_hfcpci->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
- return (0);
- }
- cs->hw.hfcpci.pci_io = ioremap(dev_hfcpci->resource[1].start, 256);
- printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
-
- if (!cs->hw.hfcpci.pci_io) {
- printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
- return (0);
- }
-
- /* Allocate memory for FIFOS */
- cs->hw.hfcpci.fifos = pci_alloc_consistent(cs->hw.hfcpci.dev,
- 0x8000, &cs->hw.hfcpci.dma);
- if (!cs->hw.hfcpci.fifos) {
- printk(KERN_WARNING "HFC-PCI: Error allocating FIFO memory!\n");
- return 0;
- }
- if (cs->hw.hfcpci.dma & 0x7fff) {
- printk(KERN_WARNING
- "HFC-PCI: Error DMA memory not on 32K boundary (%lx)\n",
- (u_long)cs->hw.hfcpci.dma);
- pci_free_consistent(cs->hw.hfcpci.dev, 0x8000,
- cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma);
- return 0;
- }
- pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u32)cs->hw.hfcpci.dma);
- printk(KERN_INFO
- "HFC-PCI: defined at mem %p fifo %p(%lx) IRQ %d HZ %d\n",
- cs->hw.hfcpci.pci_io,
- cs->hw.hfcpci.fifos,
- (u_long)cs->hw.hfcpci.dma,
- cs->irq, HZ);
-
- spin_lock_irqsave(&cs->lock, flags);
-
- pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
- cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
- cs->hw.hfcpci.int_m1 = 0;
- Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
- Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
- /* At this point the needed PCI config is done */
- /* fifos are still not enabled */
-
- INIT_WORK(&cs->tqueue, hfcpci_bh);
- cs->setstack_d = setstack_hfcpci;
- cs->BC_Send_Data = &hfcpci_send_data;
- cs->readisac = NULL;
- cs->writeisac = NULL;
- cs->readisacfifo = NULL;
- cs->writeisacfifo = NULL;
- cs->BC_Read_Reg = NULL;
- cs->BC_Write_Reg = NULL;
- cs->irq_func = &hfcpci_interrupt;
- cs->irq_flags |= IRQF_SHARED;
- timer_setup(&cs->hw.hfcpci.timer, hfcpci_Timer, 0);
- cs->cardmsg = &hfcpci_card_msg;
- cs->auxcmd = &hfcpci_auxcmd;
-
- spin_unlock_irqrestore(&cs->lock, flags);
-
- return (1);
-}
diff --git a/drivers/isdn/hisax/hfc_pci.h b/drivers/isdn/hisax/hfc_pci.h
deleted file mode 100644
index 4c3b3ba35726..000000000000
--- a/drivers/isdn/hisax/hfc_pci.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/* $Id: hfc_pci.h,v 1.10.2.2 2004/01/12 22:52:26 keil Exp $
- *
- * specific defines for CCD's HFC 2BDS0 PCI chips
- *
- * Author Werner Cornelius
- * Copyright by Werner Cornelius <werner@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/*********************************************/
-/* thresholds for transparent B-channel mode */
-/* change mask and threshold simultaneously */
-/*********************************************/
-#define HFCPCI_BTRANS_THRESHOLD 128
-#define HFCPCI_BTRANS_THRESMASK 0x00
-
-
-
-/* defines for PCI config */
-
-#define PCI_ENA_MEMIO 0x02
-#define PCI_ENA_MASTER 0x04
-
-
-/* GCI/IOM bus monitor registers */
-
-#define HCFPCI_C_I 0x08
-#define HFCPCI_TRxR 0x0C
-#define HFCPCI_MON1_D 0x28
-#define HFCPCI_MON2_D 0x2C
-
-
-/* GCI/IOM bus timeslot registers */
-
-#define HFCPCI_B1_SSL 0x80
-#define HFCPCI_B2_SSL 0x84
-#define HFCPCI_AUX1_SSL 0x88
-#define HFCPCI_AUX2_SSL 0x8C
-#define HFCPCI_B1_RSL 0x90
-#define HFCPCI_B2_RSL 0x94
-#define HFCPCI_AUX1_RSL 0x98
-#define HFCPCI_AUX2_RSL 0x9C
-
-/* GCI/IOM bus data registers */
-
-#define HFCPCI_B1_D 0xA0
-#define HFCPCI_B2_D 0xA4
-#define HFCPCI_AUX1_D 0xA8
-#define HFCPCI_AUX2_D 0xAC
-
-/* GCI/IOM bus configuration registers */
-
-#define HFCPCI_MST_EMOD 0xB4
-#define HFCPCI_MST_MODE 0xB8
-#define HFCPCI_CONNECT 0xBC
-
-
-/* Interrupt and status registers */
-
-#define HFCPCI_FIFO_EN 0x44
-#define HFCPCI_TRM 0x48
-#define HFCPCI_B_MODE 0x4C
-#define HFCPCI_CHIP_ID 0x58
-#define HFCPCI_CIRM 0x60
-#define HFCPCI_CTMT 0x64
-#define HFCPCI_INT_M1 0x68
-#define HFCPCI_INT_M2 0x6C
-#define HFCPCI_INT_S1 0x78
-#define HFCPCI_INT_S2 0x7C
-#define HFCPCI_STATUS 0x70
-
-/* S/T section registers */
-
-#define HFCPCI_STATES 0xC0
-#define HFCPCI_SCTRL 0xC4
-#define HFCPCI_SCTRL_E 0xC8
-#define HFCPCI_SCTRL_R 0xCC
-#define HFCPCI_SQ 0xD0
-#define HFCPCI_CLKDEL 0xDC
-#define HFCPCI_B1_REC 0xF0
-#define HFCPCI_B1_SEND 0xF0
-#define HFCPCI_B2_REC 0xF4
-#define HFCPCI_B2_SEND 0xF4
-#define HFCPCI_D_REC 0xF8
-#define HFCPCI_D_SEND 0xF8
-#define HFCPCI_E_REC 0xFC
-
-
-/* bits in status register (READ) */
-#define HFCPCI_PCI_PROC 0x02
-#define HFCPCI_NBUSY 0x04
-#define HFCPCI_TIMER_ELAP 0x10
-#define HFCPCI_STATINT 0x20
-#define HFCPCI_FRAMEINT 0x40
-#define HFCPCI_ANYINT 0x80
-
-/* bits in CTMT (Write) */
-#define HFCPCI_CLTIMER 0x80
-#define HFCPCI_TIM3_125 0x04
-#define HFCPCI_TIM25 0x10
-#define HFCPCI_TIM50 0x14
-#define HFCPCI_TIM400 0x18
-#define HFCPCI_TIM800 0x1C
-#define HFCPCI_AUTO_TIMER 0x20
-#define HFCPCI_TRANSB2 0x02
-#define HFCPCI_TRANSB1 0x01
-
-/* bits in CIRM (Write) */
-#define HFCPCI_AUX_MSK 0x07
-#define HFCPCI_RESET 0x08
-#define HFCPCI_B1_REV 0x40
-#define HFCPCI_B2_REV 0x80
-
-/* bits in INT_M1 and INT_S1 */
-#define HFCPCI_INTS_B1TRANS 0x01
-#define HFCPCI_INTS_B2TRANS 0x02
-#define HFCPCI_INTS_DTRANS 0x04
-#define HFCPCI_INTS_B1REC 0x08
-#define HFCPCI_INTS_B2REC 0x10
-#define HFCPCI_INTS_DREC 0x20
-#define HFCPCI_INTS_L1STATE 0x40
-#define HFCPCI_INTS_TIMER 0x80
-
-/* bits in INT_M2 */
-#define HFCPCI_PROC_TRANS 0x01
-#define HFCPCI_GCI_I_CHG 0x02
-#define HFCPCI_GCI_MON_REC 0x04
-#define HFCPCI_IRQ_ENABLE 0x08
-#define HFCPCI_PMESEL 0x80
-
-/* bits in STATES */
-#define HFCPCI_STATE_MSK 0x0F
-#define HFCPCI_LOAD_STATE 0x10
-#define HFCPCI_ACTIVATE 0x20
-#define HFCPCI_DO_ACTION 0x40
-#define HFCPCI_NT_G2_G3 0x80
-
-/* bits in HFCD_MST_MODE */
-#define HFCPCI_MASTER 0x01
-#define HFCPCI_SLAVE 0x00
-/* remaining bits are for codecs control */
-
-/* bits in HFCD_SCTRL */
-#define SCTRL_B1_ENA 0x01
-#define SCTRL_B2_ENA 0x02
-#define SCTRL_MODE_TE 0x00
-#define SCTRL_MODE_NT 0x04
-#define SCTRL_LOW_PRIO 0x08
-#define SCTRL_SQ_ENA 0x10
-#define SCTRL_TEST 0x20
-#define SCTRL_NONE_CAP 0x40
-#define SCTRL_PWR_DOWN 0x80
-
-/* bits in SCTRL_E */
-#define HFCPCI_AUTO_AWAKE 0x01
-#define HFCPCI_DBIT_1 0x04
-#define HFCPCI_IGNORE_COL 0x08
-#define HFCPCI_CHG_B1_B2 0x80
-
-/****************************/
-/* bits in FIFO_EN register */
-/****************************/
-#define HFCPCI_FIFOEN_B1 0x03
-#define HFCPCI_FIFOEN_B2 0x0C
-#define HFCPCI_FIFOEN_DTX 0x10
-#define HFCPCI_FIFOEN_B1TX 0x01
-#define HFCPCI_FIFOEN_B1RX 0x02
-#define HFCPCI_FIFOEN_B2TX 0x04
-#define HFCPCI_FIFOEN_B2RX 0x08
-
-
-/***********************************/
-/* definitions of fifo memory area */
-/***********************************/
-#define MAX_D_FRAMES 15
-#define MAX_B_FRAMES 31
-#define B_SUB_VAL 0x200
-#define B_FIFO_SIZE (0x2000 - B_SUB_VAL)
-#define D_FIFO_SIZE 512
-#define D_FREG_MASK 0xF
-
-typedef struct {
- unsigned short z1; /* Z1 pointer 16 Bit */
- unsigned short z2; /* Z2 pointer 16 Bit */
-} z_type;
-
-typedef struct {
- u_char data[D_FIFO_SIZE]; /* FIFO data space */
- u_char fill1[0x20A0 - D_FIFO_SIZE]; /* reserved, do not use */
- u_char f1, f2; /* f pointers */
- u_char fill2[0x20C0 - 0x20A2]; /* reserved, do not use */
- z_type za[MAX_D_FRAMES + 1]; /* mask index with D_FREG_MASK for access */
- u_char fill3[0x4000 - 0x2100]; /* align 16K */
-} dfifo_type;
-
-typedef struct {
- z_type za[MAX_B_FRAMES + 1]; /* only range 0x0..0x1F allowed */
- u_char f1, f2; /* f pointers */
- u_char fill[0x2100 - 0x2082]; /* alignment */
-} bzfifo_type;
-
-
-typedef union {
- struct {
- dfifo_type d_tx; /* D-send channel */
- dfifo_type d_rx; /* D-receive channel */
- } d_chan;
- struct {
- u_char fill1[0x200];
- u_char txdat_b1[B_FIFO_SIZE];
- bzfifo_type txbz_b1;
-
- bzfifo_type txbz_b2;
- u_char txdat_b2[B_FIFO_SIZE];
-
- u_char fill2[D_FIFO_SIZE];
-
- u_char rxdat_b1[B_FIFO_SIZE];
- bzfifo_type rxbz_b1;
-
- bzfifo_type rxbz_b2;
- u_char rxdat_b2[B_FIFO_SIZE];
- } b_chans;
- u_char fill[32768];
-} fifo_area;
-
-
-#define Write_hfc(a, b, c) (writeb(c, (a->hw.hfcpci.pci_io) + b))
-#define Read_hfc(a, b) (readb((a->hw.hfcpci.pci_io) + b))
-
-extern void main_irq_hcpci(struct BCState *bcs);
-extern void releasehfcpci(struct IsdnCardState *cs);
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
deleted file mode 100644
index 12af628d9b2c..000000000000
--- a/drivers/isdn/hisax/hfc_sx.c
+++ /dev/null
@@ -1,1517 +0,0 @@
-/* $Id: hfc_sx.c,v 1.12.2.5 2004/02/11 13:21:33 keil Exp $
- *
- * level driver for Cologne Chip Designs hfc-s+/sp based cards
- *
- * Author Werner Cornelius
- * based on existing driver for CCD HFC PCI cards
- * Copyright by Werner Cornelius <werner@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "hfc_sx.h"
-#include "isdnl1.h"
-#include <linux/interrupt.h>
-#include <linux/isapnp.h>
-#include <linux/slab.h>
-
-static const char *hfcsx_revision = "$Revision: 1.12.2.5 $";
-
-/***************************************/
-/* IRQ-table for CCDs demo board */
-/* IRQs 6,5,10,11,12,15 are supported */
-/***************************************/
-
-/* Teles 16.3c Vendor Id TAG2620, Version 1.0, Vendor version 2.1
- *
- * Thanks to Uwe Wisniewski
- *
- * ISA-SLOT Signal PIN
- * B25 IRQ3 92 IRQ_G
- * B23 IRQ5 94 IRQ_A
- * B4 IRQ2/9 95 IRQ_B
- * D3 IRQ10 96 IRQ_C
- * D4 IRQ11 97 IRQ_D
- * D5 IRQ12 98 IRQ_E
- * D6 IRQ15 99 IRQ_F
- */
-
-#undef CCD_DEMO_BOARD
-#ifdef CCD_DEMO_BOARD
-static u_char ccd_sp_irqtab[16] = {
- 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 3, 4, 5, 0, 0, 6
-};
-#else /* Teles 16.3c */
-static u_char ccd_sp_irqtab[16] = {
- 0, 0, 0, 7, 0, 1, 0, 0, 0, 2, 3, 4, 5, 0, 0, 6
-};
-#endif
-#define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-/******************************/
-/* In/Out access to registers */
-/******************************/
-static inline void
-Write_hfc(struct IsdnCardState *cs, u_char regnum, u_char val)
-{
- byteout(cs->hw.hfcsx.base + 1, regnum);
- byteout(cs->hw.hfcsx.base, val);
-}
-
-static inline u_char
-Read_hfc(struct IsdnCardState *cs, u_char regnum)
-{
- u_char ret;
-
- byteout(cs->hw.hfcsx.base + 1, regnum);
- ret = bytein(cs->hw.hfcsx.base);
- return (ret);
-}
-
-
-/**************************************************/
-/* select a fifo and remember which one for reuse */
-/**************************************************/
-static void
-fifo_select(struct IsdnCardState *cs, u_char fifo)
-{
- if (fifo == cs->hw.hfcsx.last_fifo)
- return; /* still valid */
-
- byteout(cs->hw.hfcsx.base + 1, HFCSX_FIF_SEL);
- byteout(cs->hw.hfcsx.base, fifo);
- while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */
- udelay(4);
- byteout(cs->hw.hfcsx.base, fifo);
- while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */
-}
-
-/******************************************/
-/* reset the specified fifo to defaults. */
-/* If its a send fifo init needed markers */
-/******************************************/
-static void
-reset_fifo(struct IsdnCardState *cs, u_char fifo)
-{
- fifo_select(cs, fifo); /* first select the fifo */
- byteout(cs->hw.hfcsx.base + 1, HFCSX_CIRM);
- byteout(cs->hw.hfcsx.base, cs->hw.hfcsx.cirm | 0x80); /* reset cmd */
- udelay(1);
- while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */
-}
-
-
-/*************************************************************/
-/* write_fifo writes the skb contents to the desired fifo */
-/* if no space is available or an error occurs 0 is returned */
-/* the skb is not released in any way. */
-/*************************************************************/
-static int
-write_fifo(struct IsdnCardState *cs, struct sk_buff *skb, u_char fifo, int trans_max)
-{
- unsigned short *msp;
- int fifo_size, count, z1, z2;
- u_char f_msk, f1, f2, *src;
-
- if (skb->len <= 0) return (0);
- if (fifo & 1) return (0); /* no write fifo */
-
- fifo_select(cs, fifo);
- if (fifo & 4) {
- fifo_size = D_FIFO_SIZE; /* D-channel */
- f_msk = MAX_D_FRAMES;
- if (trans_max) return (0); /* only HDLC */
- }
- else {
- fifo_size = cs->hw.hfcsx.b_fifo_size; /* B-channel */
- f_msk = MAX_B_FRAMES;
- }
-
- z1 = Read_hfc(cs, HFCSX_FIF_Z1H);
- z1 = ((z1 << 8) | Read_hfc(cs, HFCSX_FIF_Z1L));
-
- /* Check for transparent mode */
- if (trans_max) {
- z2 = Read_hfc(cs, HFCSX_FIF_Z2H);
- z2 = ((z2 << 8) | Read_hfc(cs, HFCSX_FIF_Z2L));
- count = z2 - z1;
- if (count <= 0)
- count += fifo_size; /* free bytes */
- if (count < skb->len + 1) return (0); /* no room */
- count = fifo_size - count; /* bytes still not send */
- if (count > 2 * trans_max) return (0); /* delay to long */
- count = skb->len;
- src = skb->data;
- while (count--)
- Write_hfc(cs, HFCSX_FIF_DWR, *src++);
- return (1); /* success */
- }
-
- msp = ((struct hfcsx_extra *)(cs->hw.hfcsx.extra))->marker;
- msp += (((fifo >> 1) & 3) * (MAX_B_FRAMES + 1));
- f1 = Read_hfc(cs, HFCSX_FIF_F1) & f_msk;
- f2 = Read_hfc(cs, HFCSX_FIF_F2) & f_msk;
-
- count = f1 - f2; /* frame count actually buffered */
- if (count < 0)
- count += (f_msk + 1); /* if wrap around */
- if (count > f_msk - 1) {
- if (cs->debug & L1_DEB_ISAC_FIFO)
- debugl1(cs, "hfcsx_write_fifo %d more as %d frames", fifo, f_msk - 1);
- return (0);
- }
-
- *(msp + f1) = z1; /* remember marker */
-
- if (cs->debug & L1_DEB_ISAC_FIFO)
- debugl1(cs, "hfcsx_write_fifo %d f1(%x) f2(%x) z1(f1)(%x)",
- fifo, f1, f2, z1);
- /* now determine free bytes in FIFO buffer */
- count = *(msp + f2) - z1;
- if (count <= 0)
- count += fifo_size; /* count now contains available bytes */
-
- if (cs->debug & L1_DEB_ISAC_FIFO)
- debugl1(cs, "hfcsx_write_fifo %d count(%u/%d)",
- fifo, skb->len, count);
- if (count < skb->len) {
- if (cs->debug & L1_DEB_ISAC_FIFO)
- debugl1(cs, "hfcsx_write_fifo %d no fifo mem", fifo);
- return (0);
- }
-
- count = skb->len; /* get frame len */
- src = skb->data; /* source pointer */
- while (count--)
- Write_hfc(cs, HFCSX_FIF_DWR, *src++);
-
- Read_hfc(cs, HFCSX_FIF_INCF1); /* increment F1 */
- udelay(1);
- while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */
- return (1);
-}
-
-/***************************************************************/
-/* read_fifo reads data to an skb from the desired fifo */
-/* if no data is available or an error occurs NULL is returned */
-/* the skb is not released in any way. */
-/***************************************************************/
-static struct sk_buff *
-read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max)
-{ int fifo_size, count, z1, z2;
- u_char f_msk, f1, f2, *dst;
- struct sk_buff *skb;
-
- if (!(fifo & 1)) return (NULL); /* no read fifo */
- fifo_select(cs, fifo);
- if (fifo & 4) {
- fifo_size = D_FIFO_SIZE; /* D-channel */
- f_msk = MAX_D_FRAMES;
- if (trans_max) return (NULL); /* only hdlc */
- }
- else {
- fifo_size = cs->hw.hfcsx.b_fifo_size; /* B-channel */
- f_msk = MAX_B_FRAMES;
- }
-
- /* transparent mode */
- if (trans_max) {
- z1 = Read_hfc(cs, HFCSX_FIF_Z1H);
- z1 = ((z1 << 8) | Read_hfc(cs, HFCSX_FIF_Z1L));
- z2 = Read_hfc(cs, HFCSX_FIF_Z2H);
- z2 = ((z2 << 8) | Read_hfc(cs, HFCSX_FIF_Z2L));
- /* now determine bytes in actual FIFO buffer */
- count = z1 - z2;
- if (count <= 0)
- count += fifo_size; /* count now contains buffered bytes */
- count++;
- if (count > trans_max)
- count = trans_max; /* limit length */
- skb = dev_alloc_skb(count);
- if (skb) {
- dst = skb_put(skb, count);
- while (count--)
- *dst++ = Read_hfc(cs, HFCSX_FIF_DRD);
- return skb;
- } else
- return NULL; /* no memory */
- }
-
- do {
- f1 = Read_hfc(cs, HFCSX_FIF_F1) & f_msk;
- f2 = Read_hfc(cs, HFCSX_FIF_F2) & f_msk;
-
- if (f1 == f2) return (NULL); /* no frame available */
-
- z1 = Read_hfc(cs, HFCSX_FIF_Z1H);
- z1 = ((z1 << 8) | Read_hfc(cs, HFCSX_FIF_Z1L));
- z2 = Read_hfc(cs, HFCSX_FIF_Z2H);
- z2 = ((z2 << 8) | Read_hfc(cs, HFCSX_FIF_Z2L));
-
- if (cs->debug & L1_DEB_ISAC_FIFO)
- debugl1(cs, "hfcsx_read_fifo %d f1(%x) f2(%x) z1(f2)(%x) z2(f2)(%x)",
- fifo, f1, f2, z1, z2);
- /* now determine bytes in actual FIFO buffer */
- count = z1 - z2;
- if (count <= 0)
- count += fifo_size; /* count now contains buffered bytes */
- count++;
-
- if (cs->debug & L1_DEB_ISAC_FIFO)
- debugl1(cs, "hfcsx_read_fifo %d count %u)",
- fifo, count);
-
- if ((count > fifo_size) || (count < 4)) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfcsx_read_fifo %d packet inv. len %d ", fifo , count);
- while (count) {
- count--; /* empty fifo */
- Read_hfc(cs, HFCSX_FIF_DRD);
- }
- skb = NULL;
- } else
- if ((skb = dev_alloc_skb(count - 3))) {
- count -= 3;
- dst = skb_put(skb, count);
-
- while (count--)
- *dst++ = Read_hfc(cs, HFCSX_FIF_DRD);
-
- Read_hfc(cs, HFCSX_FIF_DRD); /* CRC 1 */
- Read_hfc(cs, HFCSX_FIF_DRD); /* CRC 2 */
- if (Read_hfc(cs, HFCSX_FIF_DRD)) {
- dev_kfree_skb_irq(skb);
- if (cs->debug & L1_DEB_ISAC_FIFO)
- debugl1(cs, "hfcsx_read_fifo %d crc error", fifo);
- skb = NULL;
- }
- } else {
- printk(KERN_WARNING "HFC-SX: receive out of memory\n");
- return (NULL);
- }
-
- Read_hfc(cs, HFCSX_FIF_INCF2); /* increment F2 */
- udelay(1);
- while (bytein(cs->hw.hfcsx.base + 1) & 1); /* wait for busy */
- udelay(1);
- } while (!skb); /* retry in case of crc error */
- return (skb);
-}
-
-/******************************************/
-/* free hardware resources used by driver */
-/******************************************/
-static void
-release_io_hfcsx(struct IsdnCardState *cs)
-{
- cs->hw.hfcsx.int_m2 = 0; /* interrupt output off ! */
- Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2);
- Write_hfc(cs, HFCSX_CIRM, HFCSX_RESET); /* Reset On */
- msleep(30); /* Timeout 30ms */
- Write_hfc(cs, HFCSX_CIRM, 0); /* Reset Off */
- del_timer(&cs->hw.hfcsx.timer);
- release_region(cs->hw.hfcsx.base, 2); /* release IO-Block */
- kfree(cs->hw.hfcsx.extra);
- cs->hw.hfcsx.extra = NULL;
-}
-
-/**********************************************************/
-/* set_fifo_size determines the size of the RAM and FIFOs */
-/* returning 0 -> need to reset the chip again. */
-/**********************************************************/
-static int set_fifo_size(struct IsdnCardState *cs)
-{
-
- if (cs->hw.hfcsx.b_fifo_size) return (1); /* already determined */
-
- if ((cs->hw.hfcsx.chip >> 4) == 9) {
- cs->hw.hfcsx.b_fifo_size = B_FIFO_SIZE_32K;
- return (1);
- }
-
- cs->hw.hfcsx.b_fifo_size = B_FIFO_SIZE_8K;
- cs->hw.hfcsx.cirm |= 0x10; /* only 8K of ram */
- return (0);
-
-}
-
-/********************************************************************************/
-/* function called to reset the HFC SX chip. A complete software reset of chip */
-/* and fifos is done. */
-/********************************************************************************/
-static void
-reset_hfcsx(struct IsdnCardState *cs)
-{
- cs->hw.hfcsx.int_m2 = 0; /* interrupt output off ! */
- Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2);
-
- printk(KERN_INFO "HFC_SX: resetting card\n");
- while (1) {
- Write_hfc(cs, HFCSX_CIRM, HFCSX_RESET | cs->hw.hfcsx.cirm); /* Reset */
- mdelay(30);
- Write_hfc(cs, HFCSX_CIRM, cs->hw.hfcsx.cirm); /* Reset Off */
- mdelay(20);
- if (Read_hfc(cs, HFCSX_STATUS) & 2)
- printk(KERN_WARNING "HFC-SX init bit busy\n");
- cs->hw.hfcsx.last_fifo = 0xff; /* invalidate */
- if (!set_fifo_size(cs)) continue;
- break;
- }
-
- cs->hw.hfcsx.trm = 0 + HFCSX_BTRANS_THRESMASK; /* no echo connect , threshold */
- Write_hfc(cs, HFCSX_TRM, cs->hw.hfcsx.trm);
-
- Write_hfc(cs, HFCSX_CLKDEL, 0x0e); /* ST-Bit delay for TE-Mode */
- cs->hw.hfcsx.sctrl_e = HFCSX_AUTO_AWAKE;
- Write_hfc(cs, HFCSX_SCTRL_E, cs->hw.hfcsx.sctrl_e); /* S/T Auto awake */
- cs->hw.hfcsx.bswapped = 0; /* no exchange */
- cs->hw.hfcsx.nt_mode = 0; /* we are in TE mode */
- cs->hw.hfcsx.ctmt = HFCSX_TIM3_125 | HFCSX_AUTO_TIMER;
- Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt);
-
- cs->hw.hfcsx.int_m1 = HFCSX_INTS_DTRANS | HFCSX_INTS_DREC |
- HFCSX_INTS_L1STATE | HFCSX_INTS_TIMER;
- Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1);
-
- /* Clear already pending ints */
- Read_hfc(cs, HFCSX_INT_S1);
-
- Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 2); /* HFC ST 2 */
- udelay(10);
- Write_hfc(cs, HFCSX_STATES, 2); /* HFC ST 2 */
- cs->hw.hfcsx.mst_m = HFCSX_MASTER; /* HFC Master Mode */
-
- Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m);
- cs->hw.hfcsx.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
- Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl);
- cs->hw.hfcsx.sctrl_r = 0;
- Write_hfc(cs, HFCSX_SCTRL_R, cs->hw.hfcsx.sctrl_r);
-
- /* Init GCI/IOM2 in master mode */
- /* Slots 0 and 1 are set for B-chan 1 and 2 */
- /* D- and monitor/CI channel are not enabled */
- /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */
- /* STIO2 is used as data input, B1+B2 from IOM->ST */
- /* ST B-channel send disabled -> continuous 1s */
- /* The IOM slots are always enabled */
- cs->hw.hfcsx.conn = 0x36; /* set data flow directions */
- Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn);
- Write_hfc(cs, HFCSX_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */
- Write_hfc(cs, HFCSX_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */
- Write_hfc(cs, HFCSX_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */
- Write_hfc(cs, HFCSX_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */
-
- /* Finally enable IRQ output */
- cs->hw.hfcsx.int_m2 = HFCSX_IRQ_ENABLE;
- Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2);
- Read_hfc(cs, HFCSX_INT_S2);
-}
-
-/***************************************************/
-/* Timer function called when kernel timer expires */
-/***************************************************/
-static void
-hfcsx_Timer(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, hw.hfcsx.timer);
- cs->hw.hfcsx.timer.expires = jiffies + 75;
- /* WD RESET */
-/* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcsx.ctmt | 0x80);
- add_timer(&cs->hw.hfcsx.timer);
-*/
-}
-
-/************************************************/
-/* select a b-channel entry matching and active */
-/************************************************/
-static
-struct BCState *
-Sel_BCS(struct IsdnCardState *cs, int channel)
-{
- if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
- return (&cs->bcs[0]);
- else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
- return (&cs->bcs[1]);
- else
- return (NULL);
-}
-
-/*******************************/
-/* D-channel receive procedure */
-/*******************************/
-static
-int
-receive_dmsg(struct IsdnCardState *cs)
-{
- struct sk_buff *skb;
- int count = 5;
-
- if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- debugl1(cs, "rec_dmsg blocked");
- return (1);
- }
-
- do {
- skb = read_fifo(cs, HFCSX_SEL_D_RX, 0);
- if (skb) {
- skb_queue_tail(&cs->rq, skb);
- schedule_event(cs, D_RCVBUFREADY);
- }
- } while (--count && skb);
-
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- return (1);
-}
-
-/**********************************/
-/* B-channel main receive routine */
-/**********************************/
-static void
-main_rec_hfcsx(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int count = 5;
- struct sk_buff *skb;
-
-Begin:
- count--;
- if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- debugl1(cs, "rec_data %d blocked", bcs->channel);
- return;
- }
- skb = read_fifo(cs, ((bcs->channel) && (!cs->hw.hfcsx.bswapped)) ?
- HFCSX_SEL_B2_RX : HFCSX_SEL_B1_RX,
- (bcs->mode == L1_MODE_TRANS) ?
- HFCSX_BTRANS_THRESHOLD : 0);
-
- if (skb) {
- skb_queue_tail(&bcs->rqueue, skb);
- schedule_event(bcs, B_RCVBUFREADY);
- }
-
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- if (count && skb)
- goto Begin;
- return;
-}
-
-/**************************/
-/* D-channel send routine */
-/**************************/
-static void
-hfcsx_fill_dfifo(struct IsdnCardState *cs)
-{
- if (!cs->tx_skb)
- return;
- if (cs->tx_skb->len <= 0)
- return;
-
- if (write_fifo(cs, cs->tx_skb, HFCSX_SEL_D_TX, 0)) {
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_skb = NULL;
- }
- return;
-}
-
-/**************************/
-/* B-channel send routine */
-/**************************/
-static void
-hfcsx_fill_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
-
- if (!bcs->tx_skb)
- return;
- if (bcs->tx_skb->len <= 0)
- return;
-
- if (write_fifo(cs, bcs->tx_skb,
- ((bcs->channel) && (!cs->hw.hfcsx.bswapped)) ?
- HFCSX_SEL_B2_TX : HFCSX_SEL_B1_TX,
- (bcs->mode == L1_MODE_TRANS) ?
- HFCSX_BTRANS_THRESHOLD : 0)) {
-
- bcs->tx_cnt -= bcs->tx_skb->len;
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->tx_skb->len;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
-}
-
-/**********************************************/
-/* D-channel l1 state call for leased NT-mode */
-/**********************************************/
-static void
-dch_nt_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- case (PH_PULL | REQUEST):
- case (PH_PULL | INDICATION):
- st->l1.l1hw(st, pr, arg);
- break;
- case (PH_ACTIVATE | REQUEST):
- st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
- break;
- case (PH_TESTLOOP | REQUEST):
- if (1 & (long) arg)
- debugl1(cs, "PH_TEST_LOOP B1");
- if (2 & (long) arg)
- debugl1(cs, "PH_TEST_LOOP B2");
- if (!(3 & (long) arg))
- debugl1(cs, "PH_TEST_LOOP DISABLED");
- st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg);
- break;
- default:
- if (cs->debug)
- debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr);
- break;
- }
-}
-
-
-
-/***********************/
-/* set/reset echo mode */
-/***********************/
-static int
-hfcsx_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic)
-{
- unsigned long flags;
- int i = *(unsigned int *) ic->parm.num;
-
- if ((ic->arg == 98) &&
- (!(cs->hw.hfcsx.int_m1 & (HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC + HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC)))) {
- spin_lock_irqsave(&cs->lock, flags);
- Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 0); /* HFC ST G0 */
- udelay(10);
- cs->hw.hfcsx.sctrl |= SCTRL_MODE_NT;
- Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl); /* set NT-mode */
- udelay(10);
- Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 1); /* HFC ST G1 */
- udelay(10);
- Write_hfc(cs, HFCSX_STATES, 1 | HFCSX_ACTIVATE | HFCSX_DO_ACTION);
- cs->dc.hfcsx.ph_state = 1;
- cs->hw.hfcsx.nt_mode = 1;
- cs->hw.hfcsx.nt_timer = 0;
- spin_unlock_irqrestore(&cs->lock, flags);
- cs->stlist->l2.l2l1 = dch_nt_l2l1;
- debugl1(cs, "NT mode activated");
- return (0);
- }
- if ((cs->chanlimit > 1) || (cs->hw.hfcsx.bswapped) ||
- (cs->hw.hfcsx.nt_mode) || (ic->arg != 12))
- return (-EINVAL);
-
- if (i) {
- cs->logecho = 1;
- cs->hw.hfcsx.trm |= 0x20; /* enable echo chan */
- cs->hw.hfcsx.int_m1 |= HFCSX_INTS_B2REC;
- /* reset Channel !!!!! */
- } else {
- cs->logecho = 0;
- cs->hw.hfcsx.trm &= ~0x20; /* disable echo chan */
- cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_B2REC;
- }
- cs->hw.hfcsx.sctrl_r &= ~SCTRL_B2_ENA;
- cs->hw.hfcsx.sctrl &= ~SCTRL_B2_ENA;
- cs->hw.hfcsx.conn |= 0x10; /* B2-IOM -> B2-ST */
- cs->hw.hfcsx.ctmt &= ~2;
- spin_lock_irqsave(&cs->lock, flags);
- Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt);
- Write_hfc(cs, HFCSX_SCTRL_R, cs->hw.hfcsx.sctrl_r);
- Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl);
- Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn);
- Write_hfc(cs, HFCSX_TRM, cs->hw.hfcsx.trm);
- Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
-} /* hfcsx_auxcmd */
-
-/*****************************/
-/* E-channel receive routine */
-/*****************************/
-static void
-receive_emsg(struct IsdnCardState *cs)
-{
- int count = 5;
- u_char *ptr;
- struct sk_buff *skb;
-
- if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- debugl1(cs, "echo_rec_data blocked");
- return;
- }
- do {
- skb = read_fifo(cs, HFCSX_SEL_B2_RX, 0);
- if (skb) {
- if (cs->debug & DEB_DLOG_HEX) {
- ptr = cs->dlog;
- if ((skb->len) < MAX_DLOG_SPACE / 3 - 10) {
- *ptr++ = 'E';
- *ptr++ = 'C';
- *ptr++ = 'H';
- *ptr++ = 'O';
- *ptr++ = ':';
- ptr += QuickHex(ptr, skb->data, skb->len);
- ptr--;
- *ptr++ = '\n';
- *ptr = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
- } else
- HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
- }
- dev_kfree_skb_any(skb);
- }
- } while (--count && skb);
-
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- return;
-} /* receive_emsg */
-
-
-/*********************/
-/* Interrupt handler */
-/*********************/
-static irqreturn_t
-hfcsx_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char exval;
- struct BCState *bcs;
- int count = 15;
- u_long flags;
- u_char val, stat;
-
- if (!(cs->hw.hfcsx.int_m2 & 0x08))
- return IRQ_NONE; /* not initialised */
-
- spin_lock_irqsave(&cs->lock, flags);
- if (HFCSX_ANYINT & (stat = Read_hfc(cs, HFCSX_STATUS))) {
- val = Read_hfc(cs, HFCSX_INT_S1);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFC-SX: stat(%02x) s1(%02x)", stat, val);
- } else {
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE;
- }
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFC-SX irq %x %s", val,
- test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
- "locked" : "unlocked");
- val &= cs->hw.hfcsx.int_m1;
- if (val & 0x40) { /* state machine irq */
- exval = Read_hfc(cs, HFCSX_STATES) & 0xf;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcsx.ph_state,
- exval);
- cs->dc.hfcsx.ph_state = exval;
- schedule_event(cs, D_L1STATECHANGE);
- val &= ~0x40;
- }
- if (val & 0x80) { /* timer irq */
- if (cs->hw.hfcsx.nt_mode) {
- if ((--cs->hw.hfcsx.nt_timer) < 0)
- schedule_event(cs, D_L1STATECHANGE);
- }
- val &= ~0x80;
- Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt | HFCSX_CLTIMER);
- }
- while (val) {
- if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- cs->hw.hfcsx.int_s1 |= val;
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
- }
- if (cs->hw.hfcsx.int_s1 & 0x18) {
- exval = val;
- val = cs->hw.hfcsx.int_s1;
- cs->hw.hfcsx.int_s1 = exval;
- }
- if (val & 0x08) {
- if (!(bcs = Sel_BCS(cs, cs->hw.hfcsx.bswapped ? 1 : 0))) {
- if (cs->debug)
- debugl1(cs, "hfcsx spurious 0x08 IRQ");
- } else
- main_rec_hfcsx(bcs);
- }
- if (val & 0x10) {
- if (cs->logecho)
- receive_emsg(cs);
- else if (!(bcs = Sel_BCS(cs, 1))) {
- if (cs->debug)
- debugl1(cs, "hfcsx spurious 0x10 IRQ");
- } else
- main_rec_hfcsx(bcs);
- }
- if (val & 0x01) {
- if (!(bcs = Sel_BCS(cs, cs->hw.hfcsx.bswapped ? 1 : 0))) {
- if (cs->debug)
- debugl1(cs, "hfcsx spurious 0x01 IRQ");
- } else {
- if (bcs->tx_skb) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcsx_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcsx_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- schedule_event(bcs, B_XMTBUFREADY);
- }
- }
- }
- }
- if (val & 0x02) {
- if (!(bcs = Sel_BCS(cs, 1))) {
- if (cs->debug)
- debugl1(cs, "hfcsx spurious 0x02 IRQ");
- } else {
- if (bcs->tx_skb) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcsx_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcsx_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "fill_data %d blocked", bcs->channel);
- } else {
- schedule_event(bcs, B_XMTBUFREADY);
- }
- }
- }
- }
- if (val & 0x20) { /* receive dframe */
- receive_dmsg(cs);
- }
- if (val & 0x04) { /* dframe transmitted */
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- if (cs->tx_skb) {
- if (cs->tx_skb->len) {
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcsx_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else {
- debugl1(cs, "hfcsx_fill_dfifo irq blocked");
- }
- goto afterXPR;
- } else {
- dev_kfree_skb_irq(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->tx_skb = NULL;
- }
- }
- if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
- cs->tx_cnt = 0;
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcsx_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else {
- debugl1(cs, "hfcsx_fill_dfifo irq blocked");
- }
- } else
- schedule_event(cs, D_XMTBUFREADY);
- }
- afterXPR:
- if (cs->hw.hfcsx.int_s1 && count--) {
- val = cs->hw.hfcsx.int_s1;
- cs->hw.hfcsx.int_s1 = 0;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFC-SX irq %x loop %d", val, 15 - count);
- } else
- val = 0;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-/********************************************************************/
-/* timer callback for D-chan busy resolution. Currently no function */
-/********************************************************************/
-static void
-hfcsx_dbusy_timer(struct timer_list *t)
-{
-}
-
-/*************************************/
-/* Layer 1 D-channel hardware access */
-/*************************************/
-static void
-HFCSX_l1hw(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
- struct sk_buff *skb = arg;
- u_long flags;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- skb_queue_tail(&cs->sq, skb);
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA Queued", 0);
-#endif
- } else {
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA", 0);
-#endif
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcsx_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "hfcsx_fill_dfifo blocked");
-
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
- skb_queue_tail(&cs->sq, skb);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- }
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
-#endif
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcsx_fill_dfifo(cs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "hfcsx_fill_dfifo blocked");
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- debugl1(cs, "-> PH_REQUEST_PULL");
-#endif
- if (!cs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (HW_RESET | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 3); /* HFC ST 3 */
- udelay(6);
- Write_hfc(cs, HFCSX_STATES, 3); /* HFC ST 2 */
- cs->hw.hfcsx.mst_m |= HFCSX_MASTER;
- Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m);
- Write_hfc(cs, HFCSX_STATES, HFCSX_ACTIVATE | HFCSX_DO_ACTION);
- spin_unlock_irqrestore(&cs->lock, flags);
- l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
- break;
- case (HW_ENABLE | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- Write_hfc(cs, HFCSX_STATES, HFCSX_ACTIVATE | HFCSX_DO_ACTION);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_DEACTIVATE | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.hfcsx.mst_m &= ~HFCSX_MASTER;
- Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_INFO3 | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.hfcsx.mst_m |= HFCSX_MASTER;
- Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_TESTLOOP | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- switch ((long) arg) {
- case (1):
- Write_hfc(cs, HFCSX_B1_SSL, 0x80); /* tx slot */
- Write_hfc(cs, HFCSX_B1_RSL, 0x80); /* rx slot */
- cs->hw.hfcsx.conn = (cs->hw.hfcsx.conn & ~7) | 1;
- Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn);
- break;
- case (2):
- Write_hfc(cs, HFCSX_B2_SSL, 0x81); /* tx slot */
- Write_hfc(cs, HFCSX_B2_RSL, 0x81); /* rx slot */
- cs->hw.hfcsx.conn = (cs->hw.hfcsx.conn & ~0x38) | 0x08;
- Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn);
- break;
- default:
- spin_unlock_irqrestore(&cs->lock, flags);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfcsx_l1hw loop invalid %4lx", (unsigned long)arg);
- return;
- }
- cs->hw.hfcsx.trm |= 0x80; /* enable IOM-loop */
- Write_hfc(cs, HFCSX_TRM, cs->hw.hfcsx.trm);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- default:
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfcsx_l1hw unknown pr %4x", pr);
- break;
- }
-}
-
-/***********************************************/
-/* called during init setting l1 stack pointer */
-/***********************************************/
-static void
-setstack_hfcsx(struct PStack *st, struct IsdnCardState *cs)
-{
- st->l1.l1hw = HFCSX_l1hw;
-}
-
-/**************************************/
-/* send B-channel data if not blocked */
-/**************************************/
-static void
-hfcsx_send_data(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
-
- if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- hfcsx_fill_fifo(bcs);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- } else
- debugl1(cs, "send_data %d blocked", bcs->channel);
-}
-
-/***************************************************************/
-/* activate/deactivate hardware for selected channels and mode */
-/***************************************************************/
-static void
-mode_hfcsx(struct BCState *bcs, int mode, int bc)
-{
- struct IsdnCardState *cs = bcs->cs;
- int fifo2;
-
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HFCSX bchannel mode %d bchan %d/%d",
- mode, bc, bcs->channel);
- bcs->mode = mode;
- bcs->channel = bc;
- fifo2 = bc;
- if (cs->chanlimit > 1) {
- cs->hw.hfcsx.bswapped = 0; /* B1 and B2 normal mode */
- cs->hw.hfcsx.sctrl_e &= ~0x80;
- } else {
- if (bc) {
- if (mode != L1_MODE_NULL) {
- cs->hw.hfcsx.bswapped = 1; /* B1 and B2 exchanged */
- cs->hw.hfcsx.sctrl_e |= 0x80;
- } else {
- cs->hw.hfcsx.bswapped = 0; /* B1 and B2 normal mode */
- cs->hw.hfcsx.sctrl_e &= ~0x80;
- }
- fifo2 = 0;
- } else {
- cs->hw.hfcsx.bswapped = 0; /* B1 and B2 normal mode */
- cs->hw.hfcsx.sctrl_e &= ~0x80;
- }
- }
- switch (mode) {
- case (L1_MODE_NULL):
- if (bc) {
- cs->hw.hfcsx.sctrl &= ~SCTRL_B2_ENA;
- cs->hw.hfcsx.sctrl_r &= ~SCTRL_B2_ENA;
- } else {
- cs->hw.hfcsx.sctrl &= ~SCTRL_B1_ENA;
- cs->hw.hfcsx.sctrl_r &= ~SCTRL_B1_ENA;
- }
- if (fifo2) {
- cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC);
- } else {
- cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC);
- }
- break;
- case (L1_MODE_TRANS):
- if (bc) {
- cs->hw.hfcsx.sctrl |= SCTRL_B2_ENA;
- cs->hw.hfcsx.sctrl_r |= SCTRL_B2_ENA;
- } else {
- cs->hw.hfcsx.sctrl |= SCTRL_B1_ENA;
- cs->hw.hfcsx.sctrl_r |= SCTRL_B1_ENA;
- }
- if (fifo2) {
- cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC);
- cs->hw.hfcsx.ctmt |= 2;
- cs->hw.hfcsx.conn &= ~0x18;
- } else {
- cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC);
- cs->hw.hfcsx.ctmt |= 1;
- cs->hw.hfcsx.conn &= ~0x03;
- }
- break;
- case (L1_MODE_HDLC):
- if (bc) {
- cs->hw.hfcsx.sctrl |= SCTRL_B2_ENA;
- cs->hw.hfcsx.sctrl_r |= SCTRL_B2_ENA;
- } else {
- cs->hw.hfcsx.sctrl |= SCTRL_B1_ENA;
- cs->hw.hfcsx.sctrl_r |= SCTRL_B1_ENA;
- }
- if (fifo2) {
- cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC);
- cs->hw.hfcsx.ctmt &= ~2;
- cs->hw.hfcsx.conn &= ~0x18;
- } else {
- cs->hw.hfcsx.int_m1 |= (HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC);
- cs->hw.hfcsx.ctmt &= ~1;
- cs->hw.hfcsx.conn &= ~0x03;
- }
- break;
- case (L1_MODE_EXTRN):
- if (bc) {
- cs->hw.hfcsx.conn |= 0x10;
- cs->hw.hfcsx.sctrl |= SCTRL_B2_ENA;
- cs->hw.hfcsx.sctrl_r |= SCTRL_B2_ENA;
- cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B2TRANS + HFCSX_INTS_B2REC);
- } else {
- cs->hw.hfcsx.conn |= 0x02;
- cs->hw.hfcsx.sctrl |= SCTRL_B1_ENA;
- cs->hw.hfcsx.sctrl_r |= SCTRL_B1_ENA;
- cs->hw.hfcsx.int_m1 &= ~(HFCSX_INTS_B1TRANS + HFCSX_INTS_B1REC);
- }
- break;
- }
- Write_hfc(cs, HFCSX_SCTRL_E, cs->hw.hfcsx.sctrl_e);
- Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1);
- Write_hfc(cs, HFCSX_SCTRL, cs->hw.hfcsx.sctrl);
- Write_hfc(cs, HFCSX_SCTRL_R, cs->hw.hfcsx.sctrl_r);
- Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt);
- Write_hfc(cs, HFCSX_CONNECT, cs->hw.hfcsx.conn);
- if (mode != L1_MODE_EXTRN) {
- reset_fifo(cs, fifo2 ? HFCSX_SEL_B2_RX : HFCSX_SEL_B1_RX);
- reset_fifo(cs, fifo2 ? HFCSX_SEL_B2_TX : HFCSX_SEL_B1_TX);
- }
-}
-
-/******************************/
-/* Layer2 -> Layer 1 Transfer */
-/******************************/
-static void
-hfcsx_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- struct sk_buff *skb = arg;
- u_long flags;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
-// test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- printk(KERN_WARNING "%s: this shouldn't happen\n",
- __func__);
- } else {
-// test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->tx_skb = skb;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
- if (!bcs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (PH_ACTIVATE | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
- mode_hfcsx(bcs, st->l1.mode, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | REQUEST):
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | CONFIRM):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- mode_hfcsx(bcs, 0, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
- break;
- }
-}
-
-/******************************************/
-/* deactivate B-channel access and queues */
-/******************************************/
-static void
-close_hfcsx(struct BCState *bcs)
-{
- mode_hfcsx(bcs, 0, bcs->channel);
- if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- }
-}
-
-/*************************************/
-/* init B-channel queues and control */
-/*************************************/
-static int
-open_hfcsxstate(struct IsdnCardState *cs, struct BCState *bcs)
-{
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->event = 0;
- bcs->tx_cnt = 0;
- return (0);
-}
-
-/*********************************/
-/* inits the stack for B-channel */
-/*********************************/
-static int
-setstack_2b(struct PStack *st, struct BCState *bcs)
-{
- bcs->channel = st->l1.bc;
- if (open_hfcsxstate(st->l1.hardware, bcs))
- return (-1);
- st->l1.bcs = bcs;
- st->l2.l2l1 = hfcsx_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-/***************************/
-/* handle L1 state changes */
-/***************************/
-static void
-hfcsx_bh(struct work_struct *work)
-{
- struct IsdnCardState *cs =
- container_of(work, struct IsdnCardState, tqueue);
- u_long flags;
-
- if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
- if (!cs->hw.hfcsx.nt_mode)
- switch (cs->dc.hfcsx.ph_state) {
- case (0):
- l1_msg(cs, HW_RESET | INDICATION, NULL);
- break;
- case (3):
- l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
- break;
- case (8):
- l1_msg(cs, HW_RSYNC | INDICATION, NULL);
- break;
- case (6):
- l1_msg(cs, HW_INFO2 | INDICATION, NULL);
- break;
- case (7):
- l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
- break;
- default:
- break;
- } else {
- switch (cs->dc.hfcsx.ph_state) {
- case (2):
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->hw.hfcsx.nt_timer < 0) {
- cs->hw.hfcsx.nt_timer = 0;
- cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_TIMER;
- Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1);
- /* Clear already pending ints */
- Read_hfc(cs, HFCSX_INT_S1);
-
- Write_hfc(cs, HFCSX_STATES, 4 | HFCSX_LOAD_STATE);
- udelay(10);
- Write_hfc(cs, HFCSX_STATES, 4);
- cs->dc.hfcsx.ph_state = 4;
- } else {
- cs->hw.hfcsx.int_m1 |= HFCSX_INTS_TIMER;
- Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1);
- cs->hw.hfcsx.ctmt &= ~HFCSX_AUTO_TIMER;
- cs->hw.hfcsx.ctmt |= HFCSX_TIM3_125;
- Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt | HFCSX_CLTIMER);
- Write_hfc(cs, HFCSX_CTMT, cs->hw.hfcsx.ctmt | HFCSX_CLTIMER);
- cs->hw.hfcsx.nt_timer = NT_T1_COUNT;
- Write_hfc(cs, HFCSX_STATES, 2 | HFCSX_NT_G2_G3); /* allow G2 -> G3 transition */
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (1):
- case (3):
- case (4):
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.hfcsx.nt_timer = 0;
- cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_TIMER;
- Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- default:
- break;
- }
- }
- }
- if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
- DChannel_proc_rcv(cs);
- if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
- DChannel_proc_xmt(cs);
-}
-
-
-/********************************/
-/* called for card init message */
-/********************************/
-static void inithfcsx(struct IsdnCardState *cs)
-{
- cs->setstack_d = setstack_hfcsx;
- cs->BC_Send_Data = &hfcsx_send_data;
- cs->bcs[0].BC_SetStack = setstack_2b;
- cs->bcs[1].BC_SetStack = setstack_2b;
- cs->bcs[0].BC_Close = close_hfcsx;
- cs->bcs[1].BC_Close = close_hfcsx;
- mode_hfcsx(cs->bcs, 0, 0);
- mode_hfcsx(cs->bcs + 1, 0, 1);
-}
-
-
-
-/*******************************************/
-/* handle card messages from control layer */
-/*******************************************/
-static int
-hfcsx_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFCSX: card_msg %x", mt);
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_hfcsx(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_hfcsx(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- inithfcsx(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- msleep(80); /* Timeout 80ms */
- /* now switch timer interrupt off */
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_TIMER;
- Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1);
- /* reinit mode reg */
- Write_hfc(cs, HFCSX_MST_MODE, cs->hw.hfcsx.mst_m);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-#ifdef __ISAPNP__
-static struct isapnp_device_id hfc_ids[] = {
- { ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2620),
- ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2620),
- (unsigned long) "Teles 16.3c2" },
- { 0, }
-};
-
-static struct isapnp_device_id *ipid = &hfc_ids[0];
-static struct pnp_card *pnp_c = NULL;
-#endif
-
-int setup_hfcsx(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, hfcsx_revision);
- printk(KERN_INFO "HiSax: HFC-SX driver Rev. %s\n", HiSax_getrev(tmp));
-#ifdef __ISAPNP__
- if (!card->para[1] && isapnp_present()) {
- struct pnp_dev *pnp_d;
- while (ipid->card_vendor) {
- if ((pnp_c = pnp_find_card(ipid->card_vendor,
- ipid->card_device, pnp_c))) {
- pnp_d = NULL;
- if ((pnp_d = pnp_find_dev(pnp_c,
- ipid->vendor, ipid->function, pnp_d))) {
- int err;
-
- printk(KERN_INFO "HiSax: %s detected\n",
- (char *)ipid->driver_data);
- pnp_disable_dev(pnp_d);
- err = pnp_activate_dev(pnp_d);
- if (err < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __func__, err);
- return (0);
- }
- card->para[1] = pnp_port_start(pnp_d, 0);
- card->para[0] = pnp_irq(pnp_d, 0);
- if (card->para[0] == -1 || !card->para[1]) {
- printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n",
- card->para[0], card->para[1]);
- pnp_disable_dev(pnp_d);
- return (0);
- }
- break;
- } else {
- printk(KERN_ERR "HFC PnP: PnP error card found, no device\n");
- }
- }
- ipid++;
- pnp_c = NULL;
- }
- if (!ipid->card_vendor) {
- printk(KERN_INFO "HFC PnP: no ISAPnP card found\n");
- return (0);
- }
- }
-#endif
- cs->hw.hfcsx.base = card->para[1] & 0xfffe;
- cs->irq = card->para[0];
- cs->hw.hfcsx.int_s1 = 0;
- cs->dc.hfcsx.ph_state = 0;
- cs->hw.hfcsx.fifo = 255;
- if ((cs->typ == ISDN_CTYPE_HFC_SX) ||
- (cs->typ == ISDN_CTYPE_HFC_SP_PCMCIA)) {
- if ((!cs->hw.hfcsx.base) || !request_region(cs->hw.hfcsx.base, 2, "HFCSX isdn")) {
- printk(KERN_WARNING
- "HiSax: HFC-SX io-base %#lx already in use\n",
- cs->hw.hfcsx.base);
- return (0);
- }
- byteout(cs->hw.hfcsx.base, cs->hw.hfcsx.base & 0xFF);
- byteout(cs->hw.hfcsx.base + 1,
- ((cs->hw.hfcsx.base >> 8) & 3) | 0x54);
- udelay(10);
- cs->hw.hfcsx.chip = Read_hfc(cs, HFCSX_CHIP_ID);
- switch (cs->hw.hfcsx.chip >> 4) {
- case 1:
- tmp[0] = '+';
- break;
- case 9:
- tmp[0] = 'P';
- break;
- default:
- printk(KERN_WARNING
- "HFC-SX: invalid chip id 0x%x\n",
- cs->hw.hfcsx.chip >> 4);
- release_region(cs->hw.hfcsx.base, 2);
- return (0);
- }
- if (!ccd_sp_irqtab[cs->irq & 0xF]) {
- printk(KERN_WARNING
- "HFC_SX: invalid irq %d specified\n", cs->irq & 0xF);
- release_region(cs->hw.hfcsx.base, 2);
- return (0);
- }
- if (!(cs->hw.hfcsx.extra =
- kmalloc(sizeof(struct hfcsx_extra), GFP_ATOMIC))) {
- release_region(cs->hw.hfcsx.base, 2);
- printk(KERN_WARNING "HFC-SX: unable to allocate memory\n");
- return (0);
- }
- printk(KERN_INFO "HFC-S%c chip detected at base 0x%x IRQ %d HZ %d\n",
- tmp[0], (u_int) cs->hw.hfcsx.base, cs->irq, HZ);
- cs->hw.hfcsx.int_m2 = 0; /* disable alle interrupts */
- cs->hw.hfcsx.int_m1 = 0;
- Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1);
- Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2);
- } else
- return (0); /* no valid card type */
-
- timer_setup(&cs->dbusytimer, hfcsx_dbusy_timer, 0);
- INIT_WORK(&cs->tqueue, hfcsx_bh);
- cs->readisac = NULL;
- cs->writeisac = NULL;
- cs->readisacfifo = NULL;
- cs->writeisacfifo = NULL;
- cs->BC_Read_Reg = NULL;
- cs->BC_Write_Reg = NULL;
- cs->irq_func = &hfcsx_interrupt;
-
- cs->hw.hfcsx.b_fifo_size = 0; /* fifo size still unknown */
- cs->hw.hfcsx.cirm = ccd_sp_irqtab[cs->irq & 0xF]; /* RAM not evaluated */
- timer_setup(&cs->hw.hfcsx.timer, hfcsx_Timer, 0);
-
- reset_hfcsx(cs);
- cs->cardmsg = &hfcsx_card_msg;
- cs->auxcmd = &hfcsx_auxcmd;
- return (1);
-}
diff --git a/drivers/isdn/hisax/hfc_sx.h b/drivers/isdn/hisax/hfc_sx.h
deleted file mode 100644
index eee85dbb0883..000000000000
--- a/drivers/isdn/hisax/hfc_sx.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/* $Id: hfc_sx.h,v 1.2.6.1 2001/09/23 22:24:48 kai Exp $
- *
- * specific defines for CCD's HFC 2BDS0 S+,SP chips
- *
- * Author Werner Cornelius
- * based on existing driver for CCD HFC PCI cards
- * Copyright by Werner Cornelius <werner@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/*********************************************/
-/* thresholds for transparent B-channel mode */
-/* change mask and threshold simultaneously */
-/*********************************************/
-#define HFCSX_BTRANS_THRESHOLD 128
-#define HFCSX_BTRANS_THRESMASK 0x00
-
-/* GCI/IOM bus monitor registers */
-
-#define HFCSX_C_I 0x02
-#define HFCSX_TRxR 0x03
-#define HFCSX_MON1_D 0x0A
-#define HFCSX_MON2_D 0x0B
-
-
-/* GCI/IOM bus timeslot registers */
-
-#define HFCSX_B1_SSL 0x20
-#define HFCSX_B2_SSL 0x21
-#define HFCSX_AUX1_SSL 0x22
-#define HFCSX_AUX2_SSL 0x23
-#define HFCSX_B1_RSL 0x24
-#define HFCSX_B2_RSL 0x25
-#define HFCSX_AUX1_RSL 0x26
-#define HFCSX_AUX2_RSL 0x27
-
-/* GCI/IOM bus data registers */
-
-#define HFCSX_B1_D 0x28
-#define HFCSX_B2_D 0x29
-#define HFCSX_AUX1_D 0x2A
-#define HFCSX_AUX2_D 0x2B
-
-/* GCI/IOM bus configuration registers */
-
-#define HFCSX_MST_EMOD 0x2D
-#define HFCSX_MST_MODE 0x2E
-#define HFCSX_CONNECT 0x2F
-
-
-/* Interrupt and status registers */
-
-#define HFCSX_TRM 0x12
-#define HFCSX_B_MODE 0x13
-#define HFCSX_CHIP_ID 0x16
-#define HFCSX_CIRM 0x18
-#define HFCSX_CTMT 0x19
-#define HFCSX_INT_M1 0x1A
-#define HFCSX_INT_M2 0x1B
-#define HFCSX_INT_S1 0x1E
-#define HFCSX_INT_S2 0x1F
-#define HFCSX_STATUS 0x1C
-
-/* S/T section registers */
-
-#define HFCSX_STATES 0x30
-#define HFCSX_SCTRL 0x31
-#define HFCSX_SCTRL_E 0x32
-#define HFCSX_SCTRL_R 0x33
-#define HFCSX_SQ 0x34
-#define HFCSX_CLKDEL 0x37
-#define HFCSX_B1_REC 0x3C
-#define HFCSX_B1_SEND 0x3C
-#define HFCSX_B2_REC 0x3D
-#define HFCSX_B2_SEND 0x3D
-#define HFCSX_D_REC 0x3E
-#define HFCSX_D_SEND 0x3E
-#define HFCSX_E_REC 0x3F
-
-/****************/
-/* FIFO section */
-/****************/
-#define HFCSX_FIF_SEL 0x10
-#define HFCSX_FIF_Z1L 0x80
-#define HFCSX_FIF_Z1H 0x84
-#define HFCSX_FIF_Z2L 0x88
-#define HFCSX_FIF_Z2H 0x8C
-#define HFCSX_FIF_INCF1 0xA8
-#define HFCSX_FIF_DWR 0xAC
-#define HFCSX_FIF_F1 0xB0
-#define HFCSX_FIF_F2 0xB4
-#define HFCSX_FIF_INCF2 0xB8
-#define HFCSX_FIF_DRD 0xBC
-
-/* bits in status register (READ) */
-#define HFCSX_SX_PROC 0x02
-#define HFCSX_NBUSY 0x04
-#define HFCSX_TIMER_ELAP 0x10
-#define HFCSX_STATINT 0x20
-#define HFCSX_FRAMEINT 0x40
-#define HFCSX_ANYINT 0x80
-
-/* bits in CTMT (Write) */
-#define HFCSX_CLTIMER 0x80
-#define HFCSX_TIM3_125 0x04
-#define HFCSX_TIM25 0x10
-#define HFCSX_TIM50 0x14
-#define HFCSX_TIM400 0x18
-#define HFCSX_TIM800 0x1C
-#define HFCSX_AUTO_TIMER 0x20
-#define HFCSX_TRANSB2 0x02
-#define HFCSX_TRANSB1 0x01
-
-/* bits in CIRM (Write) */
-#define HFCSX_IRQ_SELMSK 0x07
-#define HFCSX_IRQ_SELDIS 0x00
-#define HFCSX_RESET 0x08
-#define HFCSX_FIFO_RESET 0x80
-
-
-/* bits in INT_M1 and INT_S1 */
-#define HFCSX_INTS_B1TRANS 0x01
-#define HFCSX_INTS_B2TRANS 0x02
-#define HFCSX_INTS_DTRANS 0x04
-#define HFCSX_INTS_B1REC 0x08
-#define HFCSX_INTS_B2REC 0x10
-#define HFCSX_INTS_DREC 0x20
-#define HFCSX_INTS_L1STATE 0x40
-#define HFCSX_INTS_TIMER 0x80
-
-/* bits in INT_M2 */
-#define HFCSX_PROC_TRANS 0x01
-#define HFCSX_GCI_I_CHG 0x02
-#define HFCSX_GCI_MON_REC 0x04
-#define HFCSX_IRQ_ENABLE 0x08
-
-/* bits in STATES */
-#define HFCSX_STATE_MSK 0x0F
-#define HFCSX_LOAD_STATE 0x10
-#define HFCSX_ACTIVATE 0x20
-#define HFCSX_DO_ACTION 0x40
-#define HFCSX_NT_G2_G3 0x80
-
-/* bits in HFCD_MST_MODE */
-#define HFCSX_MASTER 0x01
-#define HFCSX_SLAVE 0x00
-/* remaining bits are for codecs control */
-
-/* bits in HFCD_SCTRL */
-#define SCTRL_B1_ENA 0x01
-#define SCTRL_B2_ENA 0x02
-#define SCTRL_MODE_TE 0x00
-#define SCTRL_MODE_NT 0x04
-#define SCTRL_LOW_PRIO 0x08
-#define SCTRL_SQ_ENA 0x10
-#define SCTRL_TEST 0x20
-#define SCTRL_NONE_CAP 0x40
-#define SCTRL_PWR_DOWN 0x80
-
-/* bits in SCTRL_E */
-#define HFCSX_AUTO_AWAKE 0x01
-#define HFCSX_DBIT_1 0x04
-#define HFCSX_IGNORE_COL 0x08
-#define HFCSX_CHG_B1_B2 0x80
-
-/**********************************/
-/* definitions for FIFO selection */
-/**********************************/
-#define HFCSX_SEL_D_RX 5
-#define HFCSX_SEL_D_TX 4
-#define HFCSX_SEL_B1_RX 1
-#define HFCSX_SEL_B1_TX 0
-#define HFCSX_SEL_B2_RX 3
-#define HFCSX_SEL_B2_TX 2
-
-#define MAX_D_FRAMES 15
-#define MAX_B_FRAMES 31
-#define B_SUB_VAL_32K 0x0200
-#define B_FIFO_SIZE_32K (0x2000 - B_SUB_VAL_32K)
-#define B_SUB_VAL_8K 0x1A00
-#define B_FIFO_SIZE_8K (0x2000 - B_SUB_VAL_8K)
-#define D_FIFO_SIZE 512
-#define D_FREG_MASK 0xF
-
-/************************************************************/
-/* structure holding additional dynamic data -> send marker */
-/************************************************************/
-struct hfcsx_extra {
- unsigned short marker[2 * (MAX_B_FRAMES + 1) + (MAX_D_FRAMES + 1)];
-};
-
-extern void main_irq_hfcsx(struct BCState *bcs);
-extern void releasehfcsx(struct IsdnCardState *cs);
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
deleted file mode 100644
index b6e58c11c288..000000000000
--- a/drivers/isdn/hisax/hfc_usb.c
+++ /dev/null
@@ -1,1594 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * hfc_usb.c
- *
- * $Id: hfc_usb.c,v 2.3.2.24 2007/10/14 08:40:29 mbachem Exp $
- *
- * modular HiSax ISDN driver for Colognechip HFC-S USB chip
- *
- * Authors : Peter Sprenger (sprenger@moving-bytes.de)
- * Martin Bachem (m.bachem@gmx.de, info@colognechip.com)
- *
- * based on the first hfc_usb driver of
- * Werner Cornelius (werner@isdn-development.de)
- *
- * See Version Histroy at the bottom of this file
- */
-
-#include <linux/types.h>
-#include <linux/stddef.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel_stat.h>
-#include <linux/usb.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include "hisax.h"
-#include "hisax_if.h"
-#include "hfc_usb.h"
-
-static const char *hfcusb_revision =
- "$Revision: 2.3.2.24 $ $Date: 2007/10/14 08:40:29 $ ";
-
-/* Hisax debug support
- * debug flags defined in hfc_usb.h as HFCUSB_DBG_[*]
- */
-#define __debug_variable hfc_debug
-#include "hisax_debug.h"
-static u_int debug;
-module_param(debug, uint, 0);
-static int hfc_debug;
-
-
-/* private vendor specific data */
-typedef struct {
- __u8 led_scheme; // led display scheme
- signed short led_bits[8]; // array of 8 possible LED bitmask settings
- char *vend_name; // device name
-} hfcsusb_vdata;
-
-/* VID/PID device list */
-static const struct usb_device_id hfcusb_idtab[] = {
- {
- USB_DEVICE(0x0959, 0x2bd0),
- .driver_info = (unsigned long) &((hfcsusb_vdata)
- {LED_OFF, {4, 0, 2, 1},
- "ISDN USB TA (Cologne Chip HFC-S USB based)"}),
- },
- {
- USB_DEVICE(0x0675, 0x1688),
- .driver_info = (unsigned long) &((hfcsusb_vdata)
- {LED_SCHEME1, {1, 2, 0, 0},
- "DrayTek miniVigor 128 USB ISDN TA"}),
- },
- {
- USB_DEVICE(0x07b0, 0x0007),
- .driver_info = (unsigned long) &((hfcsusb_vdata)
- {LED_SCHEME1, {0x80, -64, -32, -16},
- "Billion tiny USB ISDN TA 128"}),
- },
- {
- USB_DEVICE(0x0742, 0x2008),
- .driver_info = (unsigned long) &((hfcsusb_vdata)
- {LED_SCHEME1, {4, 0, 2, 1},
- "Stollmann USB TA"}),
- },
- {
- USB_DEVICE(0x0742, 0x2009),
- .driver_info = (unsigned long) &((hfcsusb_vdata)
- {LED_SCHEME1, {4, 0, 2, 1},
- "Aceex USB ISDN TA"}),
- },
- {
- USB_DEVICE(0x0742, 0x200A),
- .driver_info = (unsigned long) &((hfcsusb_vdata)
- {LED_SCHEME1, {4, 0, 2, 1},
- "OEM USB ISDN TA"}),
- },
- {
- USB_DEVICE(0x08e3, 0x0301),
- .driver_info = (unsigned long) &((hfcsusb_vdata)
- {LED_SCHEME1, {2, 0, 1, 4},
- "Olitec USB RNIS"}),
- },
- {
- USB_DEVICE(0x07fa, 0x0846),
- .driver_info = (unsigned long) &((hfcsusb_vdata)
- {LED_SCHEME1, {0x80, -64, -32, -16},
- "Bewan Modem RNIS USB"}),
- },
- {
- USB_DEVICE(0x07fa, 0x0847),
- .driver_info = (unsigned long) &((hfcsusb_vdata)
- {LED_SCHEME1, {0x80, -64, -32, -16},
- "Djinn Numeris USB"}),
- },
- {
- USB_DEVICE(0x07b0, 0x0006),
- .driver_info = (unsigned long) &((hfcsusb_vdata)
- {LED_SCHEME1, {0x80, -64, -32, -16},
- "Twister ISDN TA"}),
- },
- {
- USB_DEVICE(0x071d, 0x1005),
- .driver_info = (unsigned long) &((hfcsusb_vdata)
- {LED_SCHEME1, {0x02, 0, 0x01, 0x04},
- "Eicon DIVA USB 4.0"}),
- },
- { }
-};
-
-/* structure defining input+output fifos (interrupt/bulk mode) */
-struct usb_fifo; /* forward definition */
-typedef struct iso_urb_struct {
- struct urb *purb;
- __u8 buffer[ISO_BUFFER_SIZE]; /* buffer incoming/outgoing data */
- struct usb_fifo *owner_fifo; /* pointer to owner fifo */
-} iso_urb_struct;
-
-struct hfcusb_data; /* forward definition */
-
-typedef struct usb_fifo {
- int fifonum; /* fifo index attached to this structure */
- int active; /* fifo is currently active */
- struct hfcusb_data *hfc; /* pointer to main structure */
- int pipe; /* address of endpoint */
- __u8 usb_packet_maxlen; /* maximum length for usb transfer */
- unsigned int max_size; /* maximum size of receive/send packet */
- __u8 intervall; /* interrupt interval */
- struct sk_buff *skbuff; /* actual used buffer */
- struct urb *urb; /* transfer structure for usb routines */
- __u8 buffer[128]; /* buffer incoming/outgoing data */
- int bit_line; /* how much bits are in the fifo? */
-
- volatile __u8 usb_transfer_mode; /* switched between ISO and INT */
- iso_urb_struct iso[2]; /* need two urbs to have one always for pending */
- struct hisax_if *hif; /* hisax interface */
- int delete_flg; /* only delete skbuff once */
- int last_urblen; /* remember length of last packet */
-} usb_fifo;
-
-/* structure holding all data for one device */
-typedef struct hfcusb_data {
- /* HiSax Interface for loadable Layer1 drivers */
- struct hisax_d_if d_if; /* see hisax_if.h */
- struct hisax_b_if b_if[2]; /* see hisax_if.h */
- int protocol;
-
- struct usb_device *dev; /* our device */
- int if_used; /* used interface number */
- int alt_used; /* used alternate config */
- int ctrl_paksize; /* control pipe packet size */
- int ctrl_in_pipe, /* handles for control pipe */
- ctrl_out_pipe;
- int cfg_used; /* configuration index used */
- int vend_idx; /* vendor found */
- int b_mode[2]; /* B-channel mode */
- int l1_activated; /* layer 1 activated */
- int disc_flag; /* TRUE if device was disonnected to avoid some USB actions */
- int packet_size, iso_packet_size;
-
- /* control pipe background handling */
- ctrl_buft ctrl_buff[HFC_CTRL_BUFSIZE]; /* buffer holding queued data */
- volatile int ctrl_in_idx, ctrl_out_idx, ctrl_cnt; /* input/output pointer + count */
- struct urb *ctrl_urb; /* transfer structure for control channel */
-
- struct usb_ctrlrequest ctrl_write; /* buffer for control write request */
- struct usb_ctrlrequest ctrl_read; /* same for read request */
-
- __u8 old_led_state, led_state;
-
- volatile __u8 threshold_mask; /* threshold actually reported */
- volatile __u8 bch_enables; /* or mask for sctrl_r and sctrl register values */
-
- usb_fifo fifos[HFCUSB_NUM_FIFOS]; /* structure holding all fifo data */
-
- volatile __u8 l1_state; /* actual l1 state */
- struct timer_list t3_timer; /* timer 3 for activation/deactivation */
- struct timer_list t4_timer; /* timer 4 for activation/deactivation */
-} hfcusb_data;
-
-
-static void collect_rx_frame(usb_fifo *fifo, __u8 *data, int len,
- int finish);
-
-static inline const char *
-symbolic(struct hfcusb_symbolic_list list[], const int num)
-{
- int i;
- for (i = 0; list[i].name != NULL; i++)
- if (list[i].num == num)
- return (list[i].name);
- return "<unknown ERROR>";
-}
-
-static void
-ctrl_start_transfer(hfcusb_data *hfc)
-{
- if (hfc->ctrl_cnt) {
- hfc->ctrl_urb->pipe = hfc->ctrl_out_pipe;
- hfc->ctrl_urb->setup_packet = (u_char *)&hfc->ctrl_write;
- hfc->ctrl_urb->transfer_buffer = NULL;
- hfc->ctrl_urb->transfer_buffer_length = 0;
- hfc->ctrl_write.wIndex =
- cpu_to_le16(hfc->ctrl_buff[hfc->ctrl_out_idx].hfc_reg);
- hfc->ctrl_write.wValue =
- cpu_to_le16(hfc->ctrl_buff[hfc->ctrl_out_idx].reg_val);
-
- usb_submit_urb(hfc->ctrl_urb, GFP_ATOMIC); /* start transfer */
- }
-} /* ctrl_start_transfer */
-
-static int
-queue_control_request(hfcusb_data *hfc, __u8 reg, __u8 val, int action)
-{
- ctrl_buft *buf;
-
- if (hfc->ctrl_cnt >= HFC_CTRL_BUFSIZE)
- return (1); /* no space left */
- buf = &hfc->ctrl_buff[hfc->ctrl_in_idx]; /* pointer to new index */
- buf->hfc_reg = reg;
- buf->reg_val = val;
- buf->action = action;
- if (++hfc->ctrl_in_idx >= HFC_CTRL_BUFSIZE)
- hfc->ctrl_in_idx = 0; /* pointer wrap */
- if (++hfc->ctrl_cnt == 1)
- ctrl_start_transfer(hfc);
- return (0);
-}
-
-static void
-ctrl_complete(struct urb *urb)
-{
- hfcusb_data *hfc = (hfcusb_data *) urb->context;
-
- urb->dev = hfc->dev;
- if (hfc->ctrl_cnt) {
- hfc->ctrl_cnt--; /* decrement actual count */
- if (++hfc->ctrl_out_idx >= HFC_CTRL_BUFSIZE)
- hfc->ctrl_out_idx = 0; /* pointer wrap */
-
- ctrl_start_transfer(hfc); /* start next transfer */
- }
-}
-
-/* write led data to auxport & invert if necessary */
-static void
-write_led(hfcusb_data *hfc, __u8 led_state)
-{
- if (led_state != hfc->old_led_state) {
- hfc->old_led_state = led_state;
- queue_control_request(hfc, HFCUSB_P_DATA, led_state, 1);
- }
-}
-
-static void
-set_led_bit(hfcusb_data *hfc, signed short led_bits, int on)
-{
- if (on) {
- if (led_bits < 0)
- hfc->led_state &= ~abs(led_bits);
- else
- hfc->led_state |= led_bits;
- } else {
- if (led_bits < 0)
- hfc->led_state |= abs(led_bits);
- else
- hfc->led_state &= ~led_bits;
- }
-}
-
-/* handle LED requests */
-static void
-handle_led(hfcusb_data *hfc, int event)
-{
- hfcsusb_vdata *driver_info =
- (hfcsusb_vdata *) hfcusb_idtab[hfc->vend_idx].driver_info;
-
- /* if no scheme -> no LED action */
- if (driver_info->led_scheme == LED_OFF)
- return;
-
- switch (event) {
- case LED_POWER_ON:
- set_led_bit(hfc, driver_info->led_bits[0], 1);
- set_led_bit(hfc, driver_info->led_bits[1], 0);
- set_led_bit(hfc, driver_info->led_bits[2], 0);
- set_led_bit(hfc, driver_info->led_bits[3], 0);
- break;
- case LED_POWER_OFF:
- set_led_bit(hfc, driver_info->led_bits[0], 0);
- set_led_bit(hfc, driver_info->led_bits[1], 0);
- set_led_bit(hfc, driver_info->led_bits[2], 0);
- set_led_bit(hfc, driver_info->led_bits[3], 0);
- break;
- case LED_S0_ON:
- set_led_bit(hfc, driver_info->led_bits[1], 1);
- break;
- case LED_S0_OFF:
- set_led_bit(hfc, driver_info->led_bits[1], 0);
- break;
- case LED_B1_ON:
- set_led_bit(hfc, driver_info->led_bits[2], 1);
- break;
- case LED_B1_OFF:
- set_led_bit(hfc, driver_info->led_bits[2], 0);
- break;
- case LED_B2_ON:
- set_led_bit(hfc, driver_info->led_bits[3], 1);
- break;
- case LED_B2_OFF:
- set_led_bit(hfc, driver_info->led_bits[3], 0);
- break;
- }
- write_led(hfc, hfc->led_state);
-}
-
-/* ISDN l1 timer T3 expires */
-static void
-l1_timer_expire_t3(struct timer_list *t)
-{
- hfcusb_data *hfc = from_timer(hfc, t, t3_timer);
- hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION,
- NULL);
-
- DBG(HFCUSB_DBG_STATES,
- "HFC-S USB: PH_DEACTIVATE | INDICATION sent (T3 expire)");
-
- hfc->l1_activated = 0;
- handle_led(hfc, LED_S0_OFF);
- /* deactivate : */
- queue_control_request(hfc, HFCUSB_STATES, 0x10, 1);
- queue_control_request(hfc, HFCUSB_STATES, 3, 1);
-}
-
-/* ISDN l1 timer T4 expires */
-static void
-l1_timer_expire_t4(struct timer_list *t)
-{
- hfcusb_data *hfc = from_timer(hfc, t, t4_timer);
- hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION,
- NULL);
-
- DBG(HFCUSB_DBG_STATES,
- "HFC-S USB: PH_DEACTIVATE | INDICATION sent (T4 expire)");
-
- hfc->l1_activated = 0;
- handle_led(hfc, LED_S0_OFF);
-}
-
-/* S0 state changed */
-static void
-s0_state_handler(hfcusb_data *hfc, __u8 state)
-{
- __u8 old_state;
-
- old_state = hfc->l1_state;
- if (state == old_state || state < 1 || state > 8)
- return;
-
- DBG(HFCUSB_DBG_STATES, "HFC-S USB: S0 statechange(%d -> %d)",
- old_state, state);
-
- if (state < 4 || state == 7 || state == 8) {
- if (timer_pending(&hfc->t3_timer))
- del_timer(&hfc->t3_timer);
- DBG(HFCUSB_DBG_STATES, "HFC-S USB: T3 deactivated");
- }
- if (state >= 7) {
- if (timer_pending(&hfc->t4_timer))
- del_timer(&hfc->t4_timer);
- DBG(HFCUSB_DBG_STATES, "HFC-S USB: T4 deactivated");
- }
-
- if (state == 7 && !hfc->l1_activated) {
- hfc->d_if.ifc.l1l2(&hfc->d_if.ifc,
- PH_ACTIVATE | INDICATION, NULL);
- DBG(HFCUSB_DBG_STATES, "HFC-S USB: PH_ACTIVATE | INDICATION sent");
- hfc->l1_activated = 1;
- handle_led(hfc, LED_S0_ON);
- } else if (state <= 3 /* && activated */) {
- if (old_state == 7 || old_state == 8) {
- DBG(HFCUSB_DBG_STATES, "HFC-S USB: T4 activated");
- if (!timer_pending(&hfc->t4_timer)) {
- hfc->t4_timer.expires =
- jiffies + (HFC_TIMER_T4 * HZ) / 1000;
- add_timer(&hfc->t4_timer);
- }
- } else {
- hfc->d_if.ifc.l1l2(&hfc->d_if.ifc,
- PH_DEACTIVATE | INDICATION,
- NULL);
- DBG(HFCUSB_DBG_STATES,
- "HFC-S USB: PH_DEACTIVATE | INDICATION sent");
- hfc->l1_activated = 0;
- handle_led(hfc, LED_S0_OFF);
- }
- }
- hfc->l1_state = state;
-}
-
-static void
-fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe,
- void *buf, int num_packets, int packet_size, int interval,
- usb_complete_t complete, void *context)
-{
- int k;
-
- usb_fill_int_urb(urb, dev, pipe, buf, packet_size * num_packets,
- complete, context, interval);
-
- urb->number_of_packets = num_packets;
- urb->transfer_flags = URB_ISO_ASAP;
- urb->actual_length = 0;
- for (k = 0; k < num_packets; k++) {
- urb->iso_frame_desc[k].offset = packet_size * k;
- urb->iso_frame_desc[k].length = packet_size;
- urb->iso_frame_desc[k].actual_length = 0;
- }
-}
-
-/* allocs urbs and start isoc transfer with two pending urbs to avoid
- * gaps in the transfer chain
- */
-static int
-start_isoc_chain(usb_fifo *fifo, int num_packets_per_urb,
- usb_complete_t complete, int packet_size)
-{
- int i, k, errcode;
-
- DBG(HFCUSB_DBG_INIT, "HFC-S USB: starting ISO-URBs for fifo:%d\n",
- fifo->fifonum);
-
- /* allocate Memory for Iso out Urbs */
- for (i = 0; i < 2; i++) {
- if (!(fifo->iso[i].purb)) {
- fifo->iso[i].purb =
- usb_alloc_urb(num_packets_per_urb, GFP_KERNEL);
- if (!(fifo->iso[i].purb)) {
- printk(KERN_INFO
- "alloc urb for fifo %i failed!!!",
- fifo->fifonum);
- }
- fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
-
- /* Init the first iso */
- if (ISO_BUFFER_SIZE >=
- (fifo->usb_packet_maxlen *
- num_packets_per_urb)) {
- fill_isoc_urb(fifo->iso[i].purb,
- fifo->hfc->dev, fifo->pipe,
- fifo->iso[i].buffer,
- num_packets_per_urb,
- fifo->usb_packet_maxlen,
- fifo->intervall, complete,
- &fifo->iso[i]);
- memset(fifo->iso[i].buffer, 0,
- sizeof(fifo->iso[i].buffer));
- /* defining packet delimeters in fifo->buffer */
- for (k = 0; k < num_packets_per_urb; k++) {
- fifo->iso[i].purb->
- iso_frame_desc[k].offset =
- k * packet_size;
- fifo->iso[i].purb->
- iso_frame_desc[k].length =
- packet_size;
- }
- } else {
- printk(KERN_INFO
- "HFC-S USB: ISO Buffer size to small!\n");
- }
- }
- fifo->bit_line = BITLINE_INF;
-
- errcode = usb_submit_urb(fifo->iso[i].purb, GFP_KERNEL);
- fifo->active = (errcode >= 0) ? 1 : 0;
- if (errcode < 0)
- printk(KERN_INFO "HFC-S USB: usb_submit_urb URB nr:%d, error(%i): '%s'\n",
- i, errcode, symbolic(urb_errlist, errcode));
- }
- return (fifo->active);
-}
-
-/* stops running iso chain and frees their pending urbs */
-static void
-stop_isoc_chain(usb_fifo *fifo)
-{
- int i;
-
- for (i = 0; i < 2; i++) {
- if (fifo->iso[i].purb) {
- DBG(HFCUSB_DBG_INIT,
- "HFC-S USB: Stopping iso chain for fifo %i.%i",
- fifo->fifonum, i);
- usb_kill_urb(fifo->iso[i].purb);
- usb_free_urb(fifo->iso[i].purb);
- fifo->iso[i].purb = NULL;
- }
- }
-
- usb_kill_urb(fifo->urb);
- usb_free_urb(fifo->urb);
- fifo->urb = NULL;
- fifo->active = 0;
-}
-
-/* defines how much ISO packets are handled in one URB */
-static int iso_packets[8] =
-{ ISOC_PACKETS_B, ISOC_PACKETS_B, ISOC_PACKETS_B, ISOC_PACKETS_B,
- ISOC_PACKETS_D, ISOC_PACKETS_D, ISOC_PACKETS_D, ISOC_PACKETS_D
-};
-
-static void
-tx_iso_complete(struct urb *urb)
-{
- iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context;
- usb_fifo *fifo = context_iso_urb->owner_fifo;
- hfcusb_data *hfc = fifo->hfc;
- int k, tx_offset, num_isoc_packets, sink, len, current_len,
- errcode;
- int frame_complete, transp_mode, fifon, status;
- __u8 threshbit;
-
- fifon = fifo->fifonum;
- status = urb->status;
-
- tx_offset = 0;
-
- /* ISO transfer only partially completed,
- look at individual frame status for details */
- if (status == -EXDEV) {
- DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: tx_iso_complete with -EXDEV"
- ", urb->status %d, fifonum %d\n",
- status, fifon);
-
- for (k = 0; k < iso_packets[fifon]; ++k) {
- errcode = urb->iso_frame_desc[k].status;
- if (errcode)
- DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: tx_iso_complete "
- "packet %i, status: %i\n",
- k, errcode);
- }
-
- // clear status, so go on with ISO transfers
- status = 0;
- }
-
- if (fifo->active && !status) {
- transp_mode = 0;
- if (fifon < 4 && hfc->b_mode[fifon / 2] == L1_MODE_TRANS)
- transp_mode = 1;
-
- /* is FifoFull-threshold set for our channel? */
- threshbit = (hfc->threshold_mask & (1 << fifon));
- num_isoc_packets = iso_packets[fifon];
-
- /* predict dataflow to avoid fifo overflow */
- if (fifon >= HFCUSB_D_TX) {
- sink = (threshbit) ? SINK_DMIN : SINK_DMAX;
- } else {
- sink = (threshbit) ? SINK_MIN : SINK_MAX;
- }
- fill_isoc_urb(urb, fifo->hfc->dev, fifo->pipe,
- context_iso_urb->buffer, num_isoc_packets,
- fifo->usb_packet_maxlen, fifo->intervall,
- tx_iso_complete, urb->context);
- memset(context_iso_urb->buffer, 0,
- sizeof(context_iso_urb->buffer));
- frame_complete = 0;
-
- /* Generate next ISO Packets */
- for (k = 0; k < num_isoc_packets; ++k) {
- if (fifo->skbuff) {
- len = fifo->skbuff->len;
- /* we lower data margin every msec */
- fifo->bit_line -= sink;
- current_len = (0 - fifo->bit_line) / 8;
- /* maximum 15 byte for every ISO packet makes our life easier */
- if (current_len > 14)
- current_len = 14;
- current_len =
- (len <=
- current_len) ? len : current_len;
- /* how much bit do we put on the line? */
- fifo->bit_line += current_len * 8;
-
- context_iso_urb->buffer[tx_offset] = 0;
- if (current_len == len) {
- if (!transp_mode) {
- /* here frame completion */
- context_iso_urb->
- buffer[tx_offset] = 1;
- /* add 2 byte flags and 16bit CRC at end of ISDN frame */
- fifo->bit_line += 32;
- }
- frame_complete = 1;
- }
-
- memcpy(context_iso_urb->buffer +
- tx_offset + 1, fifo->skbuff->data,
- current_len);
- skb_pull(fifo->skbuff, current_len);
-
- /* define packet delimeters within the URB buffer */
- urb->iso_frame_desc[k].offset = tx_offset;
- urb->iso_frame_desc[k].length =
- current_len + 1;
-
- tx_offset += (current_len + 1);
- } else {
- urb->iso_frame_desc[k].offset =
- tx_offset++;
-
- urb->iso_frame_desc[k].length = 1;
- fifo->bit_line -= sink; /* we lower data margin every msec */
-
- if (fifo->bit_line < BITLINE_INF) {
- fifo->bit_line = BITLINE_INF;
- }
- }
-
- if (frame_complete) {
- fifo->delete_flg = 1;
- fifo->hif->l1l2(fifo->hif,
- PH_DATA | CONFIRM,
- (void *) (unsigned long) fifo->skbuff->
- truesize);
- if (fifo->skbuff && fifo->delete_flg) {
- dev_kfree_skb_any(fifo->skbuff);
- fifo->skbuff = NULL;
- fifo->delete_flg = 0;
- }
- frame_complete = 0;
- }
- }
- errcode = usb_submit_urb(urb, GFP_ATOMIC);
- if (errcode < 0) {
- printk(KERN_INFO
- "HFC-S USB: error submitting ISO URB: %d\n",
- errcode);
- }
- } else {
- if (status && !hfc->disc_flag) {
- printk(KERN_INFO
- "HFC-S USB: tx_iso_complete: error(%i): '%s', fifonum=%d\n",
- status, symbolic(urb_errlist, status), fifon);
- }
- }
-}
-
-static void
-rx_iso_complete(struct urb *urb)
-{
- iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context;
- usb_fifo *fifo = context_iso_urb->owner_fifo;
- hfcusb_data *hfc = fifo->hfc;
- int k, len, errcode, offset, num_isoc_packets, fifon, maxlen,
- status;
- unsigned int iso_status;
- __u8 *buf;
- static __u8 eof[8];
-
- fifon = fifo->fifonum;
- status = urb->status;
-
- if (urb->status == -EOVERFLOW) {
- DBG(HFCUSB_DBG_VERBOSE_USB,
- "HFC-USB: ignoring USB DATAOVERRUN fifo(%i)", fifon);
- status = 0;
- }
-
- /* ISO transfer only partially completed,
- look at individual frame status for details */
- if (status == -EXDEV) {
- DBG(HFCUSB_DBG_VERBOSE_USB, "HFC-S USB: rx_iso_complete with -EXDEV "
- "urb->status %d, fifonum %d\n",
- status, fifon);
- status = 0;
- }
-
- if (fifo->active && !status) {
- num_isoc_packets = iso_packets[fifon];
- maxlen = fifo->usb_packet_maxlen;
- for (k = 0; k < num_isoc_packets; ++k) {
- len = urb->iso_frame_desc[k].actual_length;
- offset = urb->iso_frame_desc[k].offset;
- buf = context_iso_urb->buffer + offset;
- iso_status = urb->iso_frame_desc[k].status;
-
- if (iso_status && !hfc->disc_flag)
- DBG(HFCUSB_DBG_VERBOSE_USB,
- "HFC-S USB: rx_iso_complete "
- "ISO packet %i, status: %i\n",
- k, iso_status);
-
- if (fifon == HFCUSB_D_RX) {
- DBG(HFCUSB_DBG_VERBOSE_USB,
- "HFC-S USB: ISO-D-RX lst_urblen:%2d "
- "act_urblen:%2d max-urblen:%2d EOF:0x%0x",
- fifo->last_urblen, len, maxlen,
- eof[5]);
-
- DBG_PACKET(HFCUSB_DBG_VERBOSE_USB, buf, len);
- }
-
- if (fifo->last_urblen != maxlen) {
- /* the threshold mask is in the 2nd status byte */
- hfc->threshold_mask = buf[1];
- /* care for L1 state only for D-Channel
- to avoid overlapped iso completions */
- if (fifon == HFCUSB_D_RX) {
- /* the S0 state is in the upper half
- of the 1st status byte */
- s0_state_handler(hfc, buf[0] >> 4);
- }
- eof[fifon] = buf[0] & 1;
- if (len > 2)
- collect_rx_frame(fifo, buf + 2,
- len - 2,
- (len < maxlen) ?
- eof[fifon] : 0);
- } else {
- collect_rx_frame(fifo, buf, len,
- (len <
- maxlen) ? eof[fifon] :
- 0);
- }
- fifo->last_urblen = len;
- }
-
- fill_isoc_urb(urb, fifo->hfc->dev, fifo->pipe,
- context_iso_urb->buffer, num_isoc_packets,
- fifo->usb_packet_maxlen, fifo->intervall,
- rx_iso_complete, urb->context);
- errcode = usb_submit_urb(urb, GFP_ATOMIC);
- if (errcode < 0) {
- printk(KERN_ERR
- "HFC-S USB: error submitting ISO URB: %d\n",
- errcode);
- }
- } else {
- if (status && !hfc->disc_flag) {
- printk(KERN_ERR
- "HFC-S USB: rx_iso_complete : "
- "urb->status %d, fifonum %d\n",
- status, fifon);
- }
- }
-}
-
-/* collect rx data from INT- and ISO-URBs */
-static void
-collect_rx_frame(usb_fifo *fifo, __u8 *data, int len, int finish)
-{
- hfcusb_data *hfc = fifo->hfc;
- int transp_mode, fifon;
-
- fifon = fifo->fifonum;
- transp_mode = 0;
- if (fifon < 4 && hfc->b_mode[fifon / 2] == L1_MODE_TRANS)
- transp_mode = 1;
-
- if (!fifo->skbuff) {
- fifo->skbuff = dev_alloc_skb(fifo->max_size + 3);
- if (!fifo->skbuff) {
- printk(KERN_ERR
- "HFC-S USB: cannot allocate buffer for fifo(%d)\n",
- fifon);
- return;
- }
- }
- if (len) {
- if (fifo->skbuff->len + len < fifo->max_size) {
- skb_put_data(fifo->skbuff, data, len);
- } else {
- DBG(HFCUSB_DBG_FIFO_ERR,
- "HCF-USB: got frame exceeded fifo->max_size(%d) fifo(%d)",
- fifo->max_size, fifon);
- DBG_SKB(HFCUSB_DBG_VERBOSE_USB, fifo->skbuff);
- skb_trim(fifo->skbuff, 0);
- }
- }
- if (transp_mode && fifo->skbuff->len >= 128) {
- fifo->hif->l1l2(fifo->hif, PH_DATA | INDICATION,
- fifo->skbuff);
- fifo->skbuff = NULL;
- return;
- }
- /* we have a complete hdlc packet */
- if (finish) {
- if (fifo->skbuff->len > 3 &&
- !fifo->skbuff->data[fifo->skbuff->len - 1]) {
-
- if (fifon == HFCUSB_D_RX) {
- DBG(HFCUSB_DBG_DCHANNEL,
- "HFC-S USB: D-RX len(%d)", fifo->skbuff->len);
- DBG_SKB(HFCUSB_DBG_DCHANNEL, fifo->skbuff);
- }
-
- /* remove CRC & status */
- skb_trim(fifo->skbuff, fifo->skbuff->len - 3);
- if (fifon == HFCUSB_PCM_RX) {
- fifo->hif->l1l2(fifo->hif,
- PH_DATA_E | INDICATION,
- fifo->skbuff);
- } else
- fifo->hif->l1l2(fifo->hif,
- PH_DATA | INDICATION,
- fifo->skbuff);
- fifo->skbuff = NULL; /* buffer was freed from upper layer */
- } else {
- DBG(HFCUSB_DBG_FIFO_ERR,
- "HFC-S USB: ERROR frame len(%d) fifo(%d)",
- fifo->skbuff->len, fifon);
- DBG_SKB(HFCUSB_DBG_VERBOSE_USB, fifo->skbuff);
- skb_trim(fifo->skbuff, 0);
- }
- }
-}
-
-static void
-rx_int_complete(struct urb *urb)
-{
- int len;
- int status;
- __u8 *buf, maxlen, fifon;
- usb_fifo *fifo = (usb_fifo *) urb->context;
- hfcusb_data *hfc = fifo->hfc;
- static __u8 eof[8];
-
- urb->dev = hfc->dev; /* security init */
-
- fifon = fifo->fifonum;
- if ((!fifo->active) || (urb->status)) {
- DBG(HFCUSB_DBG_INIT, "HFC-S USB: RX-Fifo %i is going down (%i)",
- fifon, urb->status);
-
- fifo->urb->interval = 0; /* cancel automatic rescheduling */
- if (fifo->skbuff) {
- dev_kfree_skb_any(fifo->skbuff);
- fifo->skbuff = NULL;
- }
- return;
- }
- len = urb->actual_length;
- buf = fifo->buffer;
- maxlen = fifo->usb_packet_maxlen;
-
- if (fifon == HFCUSB_D_RX) {
- DBG(HFCUSB_DBG_VERBOSE_USB,
- "HFC-S USB: INT-D-RX lst_urblen:%2d "
- "act_urblen:%2d max-urblen:%2d EOF:0x%0x",
- fifo->last_urblen, len, maxlen,
- eof[5]);
- DBG_PACKET(HFCUSB_DBG_VERBOSE_USB, buf, len);
- }
-
- if (fifo->last_urblen != fifo->usb_packet_maxlen) {
- /* the threshold mask is in the 2nd status byte */
- hfc->threshold_mask = buf[1];
- /* the S0 state is in the upper half of the 1st status byte */
- s0_state_handler(hfc, buf[0] >> 4);
- eof[fifon] = buf[0] & 1;
- /* if we have more than the 2 status bytes -> collect data */
- if (len > 2)
- collect_rx_frame(fifo, buf + 2,
- urb->actual_length - 2,
- (len < maxlen) ? eof[fifon] : 0);
- } else {
- collect_rx_frame(fifo, buf, urb->actual_length,
- (len < maxlen) ? eof[fifon] : 0);
- }
- fifo->last_urblen = urb->actual_length;
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- printk(KERN_INFO
- "HFC-S USB: %s error resubmitting URB fifo(%d)\n",
- __func__, fifon);
- }
-}
-
-/* start initial INT-URB for certain fifo */
-static void
-start_int_fifo(usb_fifo *fifo)
-{
- int errcode;
-
- DBG(HFCUSB_DBG_INIT, "HFC-S USB: starting RX INT-URB for fifo:%d\n",
- fifo->fifonum);
-
- if (!fifo->urb) {
- fifo->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!fifo->urb)
- return;
- }
- usb_fill_int_urb(fifo->urb, fifo->hfc->dev, fifo->pipe,
- fifo->buffer, fifo->usb_packet_maxlen,
- rx_int_complete, fifo, fifo->intervall);
- fifo->active = 1; /* must be marked active */
- errcode = usb_submit_urb(fifo->urb, GFP_KERNEL);
- if (errcode) {
- printk(KERN_ERR "HFC-S USB: submit URB error(%s): status:%i\n",
- __func__, errcode);
- fifo->active = 0;
- fifo->skbuff = NULL;
- }
-}
-
-static void
-setup_bchannel(hfcusb_data *hfc, int channel, int mode)
-{
- __u8 val, idx_table[2] = { 0, 2 };
-
- if (hfc->disc_flag) {
- return;
- }
- DBG(HFCUSB_DBG_STATES, "HFC-S USB: setting channel %d to mode %d",
- channel, mode);
- hfc->b_mode[channel] = mode;
-
- /* setup CON_HDLC */
- val = 0;
- if (mode != L1_MODE_NULL)
- val = 8; /* enable fifo? */
- if (mode == L1_MODE_TRANS)
- val |= 2; /* set transparent bit */
-
- /* set FIFO to transmit register */
- queue_control_request(hfc, HFCUSB_FIFO, idx_table[channel], 1);
- queue_control_request(hfc, HFCUSB_CON_HDLC, val, 1);
- /* reset fifo */
- queue_control_request(hfc, HFCUSB_INC_RES_F, 2, 1);
- /* set FIFO to receive register */
- queue_control_request(hfc, HFCUSB_FIFO, idx_table[channel] + 1, 1);
- queue_control_request(hfc, HFCUSB_CON_HDLC, val, 1);
- /* reset fifo */
- queue_control_request(hfc, HFCUSB_INC_RES_F, 2, 1);
-
- val = 0x40;
- if (hfc->b_mode[0])
- val |= 1;
- if (hfc->b_mode[1])
- val |= 2;
- queue_control_request(hfc, HFCUSB_SCTRL, val, 1);
-
- val = 0;
- if (hfc->b_mode[0])
- val |= 1;
- if (hfc->b_mode[1])
- val |= 2;
- queue_control_request(hfc, HFCUSB_SCTRL_R, val, 1);
-
- if (mode == L1_MODE_NULL) {
- if (channel)
- handle_led(hfc, LED_B2_OFF);
- else
- handle_led(hfc, LED_B1_OFF);
- } else {
- if (channel)
- handle_led(hfc, LED_B2_ON);
- else
- handle_led(hfc, LED_B1_ON);
- }
-}
-
-static void
-hfc_usb_l2l1(struct hisax_if *my_hisax_if, int pr, void *arg)
-{
- usb_fifo *fifo = my_hisax_if->priv;
- hfcusb_data *hfc = fifo->hfc;
-
- switch (pr) {
- case PH_ACTIVATE | REQUEST:
- if (fifo->fifonum == HFCUSB_D_TX) {
- DBG(HFCUSB_DBG_STATES,
- "HFC_USB: hfc_usb_d_l2l1 D-chan: PH_ACTIVATE | REQUEST");
-
- if (hfc->l1_state != 3
- && hfc->l1_state != 7) {
- hfc->d_if.ifc.l1l2(&hfc->d_if.ifc,
- PH_DEACTIVATE |
- INDICATION,
- NULL);
- DBG(HFCUSB_DBG_STATES,
- "HFC-S USB: PH_DEACTIVATE | INDICATION sent (not state 3 or 7)");
- } else {
- if (hfc->l1_state == 7) { /* l1 already active */
- hfc->d_if.ifc.l1l2(&hfc->
- d_if.
- ifc,
- PH_ACTIVATE
- |
- INDICATION,
- NULL);
- DBG(HFCUSB_DBG_STATES,
- "HFC-S USB: PH_ACTIVATE | INDICATION sent again ;)");
- } else {
- /* force sending sending INFO1 */
- queue_control_request(hfc,
- HFCUSB_STATES,
- 0x14,
- 1);
- mdelay(1);
- /* start l1 activation */
- queue_control_request(hfc,
- HFCUSB_STATES,
- 0x04,
- 1);
- if (!timer_pending
- (&hfc->t3_timer)) {
- hfc->t3_timer.
- expires =
- jiffies +
- (HFC_TIMER_T3 *
- HZ) / 1000;
- add_timer(&hfc->
- t3_timer);
- }
- }
- }
- } else {
- DBG(HFCUSB_DBG_STATES,
- "HFC_USB: hfc_usb_d_l2l1 B-chan: PH_ACTIVATE | REQUEST");
- setup_bchannel(hfc,
- (fifo->fifonum ==
- HFCUSB_B1_TX) ? 0 : 1,
- (long) arg);
- fifo->hif->l1l2(fifo->hif,
- PH_ACTIVATE | INDICATION,
- NULL);
- }
- break;
- case PH_DEACTIVATE | REQUEST:
- if (fifo->fifonum == HFCUSB_D_TX) {
- DBG(HFCUSB_DBG_STATES,
- "HFC_USB: hfc_usb_d_l2l1 D-chan: PH_DEACTIVATE | REQUEST");
- } else {
- DBG(HFCUSB_DBG_STATES,
- "HFC_USB: hfc_usb_d_l2l1 Bx-chan: PH_DEACTIVATE | REQUEST");
- setup_bchannel(hfc,
- (fifo->fifonum ==
- HFCUSB_B1_TX) ? 0 : 1,
- (int) L1_MODE_NULL);
- fifo->hif->l1l2(fifo->hif,
- PH_DEACTIVATE | INDICATION,
- NULL);
- }
- break;
- case PH_DATA | REQUEST:
- if (fifo->skbuff && fifo->delete_flg) {
- dev_kfree_skb_any(fifo->skbuff);
- fifo->skbuff = NULL;
- fifo->delete_flg = 0;
- }
- fifo->skbuff = arg; /* we have a new buffer */
- break;
- default:
- DBG(HFCUSB_DBG_STATES,
- "HFC_USB: hfc_usb_d_l2l1: unknown state : %#x", pr);
- break;
- }
-}
-
-/* initial init HFC-S USB chip registers, HiSax interface, USB URBs */
-static int
-hfc_usb_init(hfcusb_data *hfc)
-{
- usb_fifo *fifo;
- int i;
- u_char b;
- struct hisax_b_if *p_b_if[2];
-
- /* check the chip id */
- if (read_usb(hfc, HFCUSB_CHIP_ID, &b) != 1) {
- printk(KERN_INFO "HFC-USB: cannot read chip id\n");
- return (1);
- }
- if (b != HFCUSB_CHIPID) {
- printk(KERN_INFO "HFC-S USB: Invalid chip id 0x%02x\n", b);
- return (1);
- }
-
- /* first set the needed config, interface and alternate */
- usb_set_interface(hfc->dev, hfc->if_used, hfc->alt_used);
-
- /* do Chip reset */
- write_usb(hfc, HFCUSB_CIRM, 8);
- /* aux = output, reset off */
- write_usb(hfc, HFCUSB_CIRM, 0x10);
-
- /* set USB_SIZE to match wMaxPacketSize for INT or BULK transfers */
- write_usb(hfc, HFCUSB_USB_SIZE,
- (hfc->packet_size / 8) | ((hfc->packet_size / 8) << 4));
-
- /* set USB_SIZE_I to match wMaxPacketSize for ISO transfers */
- write_usb(hfc, HFCUSB_USB_SIZE_I, hfc->iso_packet_size);
-
- /* enable PCM/GCI master mode */
- write_usb(hfc, HFCUSB_MST_MODE1, 0); /* set default values */
- write_usb(hfc, HFCUSB_MST_MODE0, 1); /* enable master mode */
-
- /* init the fifos */
- write_usb(hfc, HFCUSB_F_THRES,
- (HFCUSB_TX_THRESHOLD /
- 8) | ((HFCUSB_RX_THRESHOLD / 8) << 4));
-
- fifo = hfc->fifos;
- for (i = 0; i < HFCUSB_NUM_FIFOS; i++) {
- write_usb(hfc, HFCUSB_FIFO, i); /* select the desired fifo */
- fifo[i].skbuff = NULL; /* init buffer pointer */
- fifo[i].max_size =
- (i <= HFCUSB_B2_RX) ? MAX_BCH_SIZE : MAX_DFRAME_LEN;
- fifo[i].last_urblen = 0;
- /* set 2 bit for D- & E-channel */
- write_usb(hfc, HFCUSB_HDLC_PAR,
- ((i <= HFCUSB_B2_RX) ? 0 : 2));
- /* rx hdlc, enable IFF for D-channel */
- write_usb(hfc, HFCUSB_CON_HDLC,
- ((i == HFCUSB_D_TX) ? 0x09 : 0x08));
- write_usb(hfc, HFCUSB_INC_RES_F, 2); /* reset the fifo */
- }
-
- write_usb(hfc, HFCUSB_CLKDEL, 0x0f); /* clock delay value */
- write_usb(hfc, HFCUSB_STATES, 3 | 0x10); /* set deactivated mode */
- write_usb(hfc, HFCUSB_STATES, 3); /* enable state machine */
-
- write_usb(hfc, HFCUSB_SCTRL_R, 0); /* disable both B receivers */
- write_usb(hfc, HFCUSB_SCTRL, 0x40); /* disable B transmitters + capacitive mode */
-
- /* set both B-channel to not connected */
- hfc->b_mode[0] = L1_MODE_NULL;
- hfc->b_mode[1] = L1_MODE_NULL;
-
- hfc->l1_activated = 0;
- hfc->disc_flag = 0;
- hfc->led_state = 0;
- hfc->old_led_state = 0;
-
- /* init the t3 timer */
- timer_setup(&hfc->t3_timer, l1_timer_expire_t3, 0);
-
- /* init the t4 timer */
- timer_setup(&hfc->t4_timer, l1_timer_expire_t4, 0);
-
- /* init the background machinery for control requests */
- hfc->ctrl_read.bRequestType = 0xc0;
- hfc->ctrl_read.bRequest = 1;
- hfc->ctrl_read.wLength = cpu_to_le16(1);
- hfc->ctrl_write.bRequestType = 0x40;
- hfc->ctrl_write.bRequest = 0;
- hfc->ctrl_write.wLength = 0;
- usb_fill_control_urb(hfc->ctrl_urb,
- hfc->dev,
- hfc->ctrl_out_pipe,
- (u_char *)&hfc->ctrl_write,
- NULL, 0, ctrl_complete, hfc);
- /* Init All Fifos */
- for (i = 0; i < HFCUSB_NUM_FIFOS; i++) {
- hfc->fifos[i].iso[0].purb = NULL;
- hfc->fifos[i].iso[1].purb = NULL;
- hfc->fifos[i].active = 0;
- }
- /* register Modul to upper Hisax Layers */
- hfc->d_if.owner = THIS_MODULE;
- hfc->d_if.ifc.priv = &hfc->fifos[HFCUSB_D_TX];
- hfc->d_if.ifc.l2l1 = hfc_usb_l2l1;
- for (i = 0; i < 2; i++) {
- hfc->b_if[i].ifc.priv = &hfc->fifos[HFCUSB_B1_TX + i * 2];
- hfc->b_if[i].ifc.l2l1 = hfc_usb_l2l1;
- p_b_if[i] = &hfc->b_if[i];
- }
- /* default Prot: EURO ISDN, should be a module_param */
- hfc->protocol = 2;
- i = hisax_register(&hfc->d_if, p_b_if, "hfc_usb", hfc->protocol);
- if (i) {
- printk(KERN_INFO "HFC-S USB: hisax_register -> %d\n", i);
- return i;
- }
-
-#ifdef CONFIG_HISAX_DEBUG
- hfc_debug = debug;
-#endif
-
- for (i = 0; i < 4; i++)
- hfc->fifos[i].hif = &p_b_if[i / 2]->ifc;
- for (i = 4; i < 8; i++)
- hfc->fifos[i].hif = &hfc->d_if.ifc;
-
- /* 3 (+1) INT IN + 3 ISO OUT */
- if (hfc->cfg_used == CNF_3INT3ISO || hfc->cfg_used == CNF_4INT3ISO) {
- start_int_fifo(hfc->fifos + HFCUSB_D_RX);
- if (hfc->fifos[HFCUSB_PCM_RX].pipe)
- start_int_fifo(hfc->fifos + HFCUSB_PCM_RX);
- start_int_fifo(hfc->fifos + HFCUSB_B1_RX);
- start_int_fifo(hfc->fifos + HFCUSB_B2_RX);
- }
- /* 3 (+1) ISO IN + 3 ISO OUT */
- if (hfc->cfg_used == CNF_3ISO3ISO || hfc->cfg_used == CNF_4ISO3ISO) {
- start_isoc_chain(hfc->fifos + HFCUSB_D_RX, ISOC_PACKETS_D,
- rx_iso_complete, 16);
- if (hfc->fifos[HFCUSB_PCM_RX].pipe)
- start_isoc_chain(hfc->fifos + HFCUSB_PCM_RX,
- ISOC_PACKETS_D, rx_iso_complete,
- 16);
- start_isoc_chain(hfc->fifos + HFCUSB_B1_RX, ISOC_PACKETS_B,
- rx_iso_complete, 16);
- start_isoc_chain(hfc->fifos + HFCUSB_B2_RX, ISOC_PACKETS_B,
- rx_iso_complete, 16);
- }
-
- start_isoc_chain(hfc->fifos + HFCUSB_D_TX, ISOC_PACKETS_D,
- tx_iso_complete, 1);
- start_isoc_chain(hfc->fifos + HFCUSB_B1_TX, ISOC_PACKETS_B,
- tx_iso_complete, 1);
- start_isoc_chain(hfc->fifos + HFCUSB_B2_TX, ISOC_PACKETS_B,
- tx_iso_complete, 1);
-
- handle_led(hfc, LED_POWER_ON);
-
- return (0);
-}
-
-/* initial callback for each plugged USB device */
-static int
-hfc_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
-{
- struct usb_device *dev = interface_to_usbdev(intf);
- hfcusb_data *context;
- struct usb_host_interface *iface = intf->cur_altsetting;
- struct usb_host_interface *iface_used = NULL;
- struct usb_host_endpoint *ep;
- int ifnum = iface->desc.bInterfaceNumber;
- int i, idx, alt_idx, probe_alt_setting, vend_idx, cfg_used, *vcf,
- attr, cfg_found, cidx, ep_addr;
- int cmptbl[16], small_match, iso_packet_size, packet_size,
- alt_used = 0;
- hfcsusb_vdata *driver_info;
-
- vend_idx = 0xffff;
- for (i = 0; hfcusb_idtab[i].idVendor; i++) {
- if ((le16_to_cpu(dev->descriptor.idVendor) == hfcusb_idtab[i].idVendor)
- && (le16_to_cpu(dev->descriptor.idProduct) == hfcusb_idtab[i].idProduct)) {
- vend_idx = i;
- continue;
- }
- }
-
- printk(KERN_INFO
- "HFC-S USB: probing interface(%d) actalt(%d) minor(%d)\n",
- ifnum, iface->desc.bAlternateSetting, intf->minor);
-
- if (vend_idx != 0xffff) {
- /* if vendor and product ID is OK, start probing alternate settings */
- alt_idx = 0;
- small_match = 0xffff;
-
- /* default settings */
- iso_packet_size = 16;
- packet_size = 64;
-
- while (alt_idx < intf->num_altsetting) {
- iface = intf->altsetting + alt_idx;
- probe_alt_setting = iface->desc.bAlternateSetting;
- cfg_used = 0;
-
- /* check for config EOL element */
- while (validconf[cfg_used][0]) {
- cfg_found = 1;
- vcf = validconf[cfg_used];
- /* first endpoint descriptor */
- ep = iface->endpoint;
-
- memcpy(cmptbl, vcf, 16 * sizeof(int));
-
- /* check for all endpoints in this alternate setting */
- for (i = 0; i < iface->desc.bNumEndpoints;
- i++) {
- ep_addr =
- ep->desc.bEndpointAddress;
- /* get endpoint base */
- idx = ((ep_addr & 0x7f) - 1) * 2;
- if (ep_addr & 0x80)
- idx++;
- attr = ep->desc.bmAttributes;
- if (cmptbl[idx] == EP_NUL) {
- cfg_found = 0;
- }
- if (attr == USB_ENDPOINT_XFER_INT
- && cmptbl[idx] == EP_INT)
- cmptbl[idx] = EP_NUL;
- if (attr == USB_ENDPOINT_XFER_BULK
- && cmptbl[idx] == EP_BLK)
- cmptbl[idx] = EP_NUL;
- if (attr == USB_ENDPOINT_XFER_ISOC
- && cmptbl[idx] == EP_ISO)
- cmptbl[idx] = EP_NUL;
-
- /* check if all INT endpoints match minimum interval */
- if ((attr == USB_ENDPOINT_XFER_INT)
- && (ep->desc.bInterval < vcf[17])) {
- cfg_found = 0;
- }
- ep++;
- }
- for (i = 0; i < 16; i++) {
- /* all entries must be EP_NOP or EP_NUL for a valid config */
- if (cmptbl[i] != EP_NOP
- && cmptbl[i] != EP_NUL)
- cfg_found = 0;
- }
- if (cfg_found) {
- if (cfg_used < small_match) {
- small_match = cfg_used;
- alt_used =
- probe_alt_setting;
- iface_used = iface;
- }
- }
- cfg_used++;
- }
- alt_idx++;
- } /* (alt_idx < intf->num_altsetting) */
-
- /* found a valid USB Ta Endpint config */
- if (small_match != 0xffff) {
- iface = iface_used;
- if (!(context = kzalloc(sizeof(hfcusb_data), GFP_KERNEL)))
- return (-ENOMEM); /* got no mem */
-
- ep = iface->endpoint;
- vcf = validconf[small_match];
-
- for (i = 0; i < iface->desc.bNumEndpoints; i++) {
- ep_addr = ep->desc.bEndpointAddress;
- /* get endpoint base */
- idx = ((ep_addr & 0x7f) - 1) * 2;
- if (ep_addr & 0x80)
- idx++;
- cidx = idx & 7;
- attr = ep->desc.bmAttributes;
-
- /* init Endpoints */
- if (vcf[idx] != EP_NOP
- && vcf[idx] != EP_NUL) {
- switch (attr) {
- case USB_ENDPOINT_XFER_INT:
- context->
- fifos[cidx].
- pipe =
- usb_rcvintpipe
- (dev,
- ep->desc.
- bEndpointAddress);
- context->
- fifos[cidx].
- usb_transfer_mode
- = USB_INT;
- packet_size =
- le16_to_cpu(ep->desc.wMaxPacketSize);
- break;
- case USB_ENDPOINT_XFER_BULK:
- if (ep_addr & 0x80)
- context->
- fifos
- [cidx].
- pipe =
- usb_rcvbulkpipe
- (dev,
- ep->
- desc.
- bEndpointAddress);
- else
- context->
- fifos
- [cidx].
- pipe =
- usb_sndbulkpipe
- (dev,
- ep->
- desc.
- bEndpointAddress);
- context->
- fifos[cidx].
- usb_transfer_mode
- = USB_BULK;
- packet_size =
- le16_to_cpu(ep->desc.wMaxPacketSize);
- break;
- case USB_ENDPOINT_XFER_ISOC:
- if (ep_addr & 0x80)
- context->
- fifos
- [cidx].
- pipe =
- usb_rcvisocpipe
- (dev,
- ep->
- desc.
- bEndpointAddress);
- else
- context->
- fifos
- [cidx].
- pipe =
- usb_sndisocpipe
- (dev,
- ep->
- desc.
- bEndpointAddress);
- context->
- fifos[cidx].
- usb_transfer_mode
- = USB_ISOC;
- iso_packet_size =
- le16_to_cpu(ep->desc.wMaxPacketSize);
- break;
- default:
- context->
- fifos[cidx].
- pipe = 0;
- } /* switch attribute */
-
- if (context->fifos[cidx].pipe) {
- context->fifos[cidx].
- fifonum = cidx;
- context->fifos[cidx].hfc =
- context;
- context->fifos[cidx].usb_packet_maxlen =
- le16_to_cpu(ep->desc.wMaxPacketSize);
- context->fifos[cidx].
- intervall =
- ep->desc.bInterval;
- context->fifos[cidx].
- skbuff = NULL;
- }
- }
- ep++;
- }
- context->dev = dev; /* save device */
- context->if_used = ifnum; /* save used interface */
- context->alt_used = alt_used; /* and alternate config */
- context->ctrl_paksize = dev->descriptor.bMaxPacketSize0; /* control size */
- context->cfg_used = vcf[16]; /* store used config */
- context->vend_idx = vend_idx; /* store found vendor */
- context->packet_size = packet_size;
- context->iso_packet_size = iso_packet_size;
-
- /* create the control pipes needed for register access */
- context->ctrl_in_pipe =
- usb_rcvctrlpipe(context->dev, 0);
- context->ctrl_out_pipe =
- usb_sndctrlpipe(context->dev, 0);
-
- driver_info = (hfcsusb_vdata *)
- hfcusb_idtab[vend_idx].driver_info;
-
- context->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
-
- if (!context->ctrl_urb) {
- pr_warn("%s: No memory for control urb\n",
- driver_info->vend_name);
- kfree(context);
- return -ENOMEM;
- }
-
- pr_info("HFC-S USB: detected \"%s\"\n",
- driver_info->vend_name);
-
- DBG(HFCUSB_DBG_INIT,
- "HFC-S USB: Endpoint-Config: %s (if=%d alt=%d), E-Channel(%d)",
- conf_str[small_match], context->if_used,
- context->alt_used,
- validconf[small_match][18]);
-
- /* init the chip and register the driver */
- if (hfc_usb_init(context)) {
- usb_kill_urb(context->ctrl_urb);
- usb_free_urb(context->ctrl_urb);
- context->ctrl_urb = NULL;
- kfree(context);
- return (-EIO);
- }
- usb_set_intfdata(intf, context);
- return (0);
- }
- } else {
- printk(KERN_INFO
- "HFC-S USB: no valid vendor found in USB descriptor\n");
- }
- return (-EIO);
-}
-
-/* callback for unplugged USB device */
-static void
-hfc_usb_disconnect(struct usb_interface *intf)
-{
- hfcusb_data *context = usb_get_intfdata(intf);
- int i;
-
- handle_led(context, LED_POWER_OFF);
- schedule_timeout(HZ / 100);
-
- printk(KERN_INFO "HFC-S USB: device disconnect\n");
- context->disc_flag = 1;
- usb_set_intfdata(intf, NULL);
-
- if (timer_pending(&context->t3_timer))
- del_timer(&context->t3_timer);
- if (timer_pending(&context->t4_timer))
- del_timer(&context->t4_timer);
-
- /* tell all fifos to terminate */
- for (i = 0; i < HFCUSB_NUM_FIFOS; i++) {
- if (context->fifos[i].usb_transfer_mode == USB_ISOC) {
- if (context->fifos[i].active > 0) {
- stop_isoc_chain(&context->fifos[i]);
- DBG(HFCUSB_DBG_INIT,
- "HFC-S USB: %s stopping ISOC chain Fifo(%i)",
- __func__, i);
- }
- } else {
- if (context->fifos[i].active > 0) {
- context->fifos[i].active = 0;
- DBG(HFCUSB_DBG_INIT,
- "HFC-S USB: %s unlinking URB for Fifo(%i)",
- __func__, i);
- }
- usb_kill_urb(context->fifos[i].urb);
- usb_free_urb(context->fifos[i].urb);
- context->fifos[i].urb = NULL;
- }
- context->fifos[i].active = 0;
- }
- usb_kill_urb(context->ctrl_urb);
- usb_free_urb(context->ctrl_urb);
- context->ctrl_urb = NULL;
- hisax_unregister(&context->d_if);
- kfree(context); /* free our structure again */
-}
-
-static struct usb_driver hfc_drv = {
- .name = "hfc_usb",
- .id_table = hfcusb_idtab,
- .probe = hfc_usb_probe,
- .disconnect = hfc_usb_disconnect,
- .disable_hub_initiated_lpm = 1,
-};
-
-static void __exit
-hfc_usb_mod_exit(void)
-{
- usb_deregister(&hfc_drv); /* release our driver */
- printk(KERN_INFO "HFC-S USB: module removed\n");
-}
-
-static int __init
-hfc_usb_mod_init(void)
-{
- char revstr[30], datestr[30], dummy[30];
-#ifndef CONFIG_HISAX_DEBUG
- hfc_debug = debug;
-#endif
- sscanf(hfcusb_revision,
- "%s %s $ %s %s %s $ ", dummy, revstr,
- dummy, datestr, dummy);
- printk(KERN_INFO
- "HFC-S USB: driver module revision %s date %s loaded, (debug=%i)\n",
- revstr, datestr, debug);
- if (usb_register(&hfc_drv)) {
- printk(KERN_INFO
- "HFC-S USB: Unable to register HFC-S USB module at usb stack\n");
- return (-1); /* unable to register */
- }
- return (0);
-}
-
-module_init(hfc_usb_mod_init);
-module_exit(hfc_usb_mod_exit);
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(usb, hfcusb_idtab);
diff --git a/drivers/isdn/hisax/hfc_usb.h b/drivers/isdn/hisax/hfc_usb.h
deleted file mode 100644
index 9a212330e8a8..000000000000
--- a/drivers/isdn/hisax/hfc_usb.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * hfc_usb.h
- *
- * $Id: hfc_usb.h,v 1.1.2.5 2007/08/20 14:36:03 mbachem Exp $
- */
-
-#ifndef __HFC_USB_H__
-#define __HFC_USB_H__
-
-#define DRIVER_AUTHOR "Peter Sprenger (sprenger@moving-byters.de)"
-#define DRIVER_DESC "HFC-S USB based HiSAX ISDN driver"
-
-
-#define HFC_CTRL_TIMEOUT 20 /* 5ms timeout writing/reading regs */
-#define HFC_TIMER_T3 8000 /* timeout for l1 activation timer */
-#define HFC_TIMER_T4 500 /* time for state change interval */
-
-#define HFCUSB_L1_STATECHANGE 0 /* L1 state changed */
-#define HFCUSB_L1_DRX 1 /* D-frame received */
-#define HFCUSB_L1_ERX 2 /* E-frame received */
-#define HFCUSB_L1_DTX 4 /* D-frames completed */
-
-#define MAX_BCH_SIZE 2048 /* allowed B-channel packet size */
-
-#define HFCUSB_RX_THRESHOLD 64 /* threshold for fifo report bit rx */
-#define HFCUSB_TX_THRESHOLD 64 /* threshold for fifo report bit tx */
-
-#define HFCUSB_CHIP_ID 0x16 /* Chip ID register index */
-#define HFCUSB_CIRM 0x00 /* cirm register index */
-#define HFCUSB_USB_SIZE 0x07 /* int length register */
-#define HFCUSB_USB_SIZE_I 0x06 /* iso length register */
-#define HFCUSB_F_CROSS 0x0b /* bit order register */
-#define HFCUSB_CLKDEL 0x37 /* bit delay register */
-#define HFCUSB_CON_HDLC 0xfa /* channel connect register */
-#define HFCUSB_HDLC_PAR 0xfb
-#define HFCUSB_SCTRL 0x31 /* S-bus control register (tx) */
-#define HFCUSB_SCTRL_E 0x32 /* same for E and special funcs */
-#define HFCUSB_SCTRL_R 0x33 /* S-bus control register (rx) */
-#define HFCUSB_F_THRES 0x0c /* threshold register */
-#define HFCUSB_FIFO 0x0f /* fifo select register */
-#define HFCUSB_F_USAGE 0x1a /* fifo usage register */
-#define HFCUSB_MST_MODE0 0x14
-#define HFCUSB_MST_MODE1 0x15
-#define HFCUSB_P_DATA 0x1f
-#define HFCUSB_INC_RES_F 0x0e
-#define HFCUSB_STATES 0x30
-
-#define HFCUSB_CHIPID 0x40 /* ID value of HFC-S USB */
-
-
-/* fifo registers */
-#define HFCUSB_NUM_FIFOS 8 /* maximum number of fifos */
-#define HFCUSB_B1_TX 0 /* index for B1 transmit bulk/int */
-#define HFCUSB_B1_RX 1 /* index for B1 receive bulk/int */
-#define HFCUSB_B2_TX 2
-#define HFCUSB_B2_RX 3
-#define HFCUSB_D_TX 4
-#define HFCUSB_D_RX 5
-#define HFCUSB_PCM_TX 6
-#define HFCUSB_PCM_RX 7
-
-/*
- * used to switch snd_transfer_mode for different TA modes e.g. the Billion USB TA just
- * supports ISO out, while the Cologne Chip EVAL TA just supports BULK out
- */
-#define USB_INT 0
-#define USB_BULK 1
-#define USB_ISOC 2
-
-#define ISOC_PACKETS_D 8
-#define ISOC_PACKETS_B 8
-#define ISO_BUFFER_SIZE 128
-
-/* Fifo flow Control for TX ISO */
-#define SINK_MAX 68
-#define SINK_MIN 48
-#define SINK_DMIN 12
-#define SINK_DMAX 18
-#define BITLINE_INF (-64 * 8)
-
-/* HFC-S USB register access by Control-URSs */
-#define write_usb(a, b, c) usb_control_msg((a)->dev, (a)->ctrl_out_pipe, 0, 0x40, (c), (b), NULL, 0, HFC_CTRL_TIMEOUT)
-#define read_usb(a, b, c) usb_control_msg((a)->dev, (a)->ctrl_in_pipe, 1, 0xC0, 0, (b), (c), 1, HFC_CTRL_TIMEOUT)
-#define HFC_CTRL_BUFSIZE 32
-
-/* entry and size of output/input control buffer */
-typedef struct {
- __u8 hfc_reg; /* register number */
- __u8 reg_val; /* value to be written (or read) */
- int action; /* data for action handler */
-} ctrl_buft;
-
-/* Debugging Flags */
-#define HFCUSB_DBG_INIT 0x0001
-#define HFCUSB_DBG_STATES 0x0002
-#define HFCUSB_DBG_DCHANNEL 0x0080
-#define HFCUSB_DBG_FIFO_ERR 0x4000
-#define HFCUSB_DBG_VERBOSE_USB 0x8000
-
-/*
- * URB error codes:
- * Used to represent a list of values and their respective symbolic names
- */
-struct hfcusb_symbolic_list {
- const int num;
- const char *name;
-};
-
-static struct hfcusb_symbolic_list urb_errlist[] = {
- {-ENOMEM, "No memory for allocation of internal structures"},
- {-ENOSPC, "The host controller's bandwidth is already consumed"},
- {-ENOENT, "URB was canceled by unlink_urb"},
- {-EXDEV, "ISO transfer only partially completed"},
- {-EAGAIN, "Too match scheduled for the future"},
- {-ENXIO, "URB already queued"},
- {-EFBIG, "Too much ISO frames requested"},
- {-ENOSR, "Buffer error (overrun)"},
- {-EPIPE, "Specified endpoint is stalled (device not responding)"},
- {-EOVERFLOW, "Babble (bad cable?)"},
- {-EPROTO, "Bit-stuff error (bad cable?)"},
- {-EILSEQ, "CRC/Timeout"},
- {-ETIMEDOUT, "NAK (device does not respond)"},
- {-ESHUTDOWN, "Device unplugged"},
- {-1, NULL}
-};
-
-
-/*
- * device dependent information to support different
- * ISDN Ta's using the HFC-S USB chip
- */
-
-/* USB descriptor need to contain one of the following EndPoint combination: */
-#define CNF_4INT3ISO 1 // 4 INT IN, 3 ISO OUT
-#define CNF_3INT3ISO 2 // 3 INT IN, 3 ISO OUT
-#define CNF_4ISO3ISO 3 // 4 ISO IN, 3 ISO OUT
-#define CNF_3ISO3ISO 4 // 3 ISO IN, 3 ISO OUT
-
-#define EP_NUL 1 // Endpoint at this position not allowed
-#define EP_NOP 2 // all type of endpoints allowed at this position
-#define EP_ISO 3 // Isochron endpoint mandatory at this position
-#define EP_BLK 4 // Bulk endpoint mandatory at this position
-#define EP_INT 5 // Interrupt endpoint mandatory at this position
-
-/*
- * List of all supported endpoint configuration sets, used to find the
- * best matching endpoint configuration within a devices' USB descriptor.
- * We need at least 3 RX endpoints, and 3 TX endpoints, either
- * INT-in and ISO-out, or ISO-in and ISO-out)
- * with 4 RX endpoints even E-Channel logging is possible
- */
-static int validconf[][19] = {
- // INT in, ISO out config
- {EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NOP, EP_INT,
- EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_NUL, EP_NUL,
- CNF_4INT3ISO, 2, 1},
- {EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_NUL,
- EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_NUL, EP_NUL,
- CNF_3INT3ISO, 2, 0},
- // ISO in, ISO out config
- {EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL,
- EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_NOP, EP_ISO,
- CNF_4ISO3ISO, 2, 1},
- {EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL,
- EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_NUL, EP_NUL,
- CNF_3ISO3ISO, 2, 0},
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} // EOL element
-};
-
-#ifdef CONFIG_HISAX_DEBUG
-// string description of chosen config
-static char *conf_str[] = {
- "4 Interrupt IN + 3 Isochron OUT",
- "3 Interrupt IN + 3 Isochron OUT",
- "4 Isochron IN + 3 Isochron OUT",
- "3 Isochron IN + 3 Isochron OUT"
-};
-#endif
-
-typedef struct {
- int vendor; // vendor id
- int prod_id; // product id
- char *vend_name; // vendor string
- __u8 led_scheme; // led display scheme
- signed short led_bits[8]; // array of 8 possible LED bitmask settings
-} vendor_data;
-
-#define LED_OFF 0 // no LED support
-#define LED_SCHEME1 1 // LED standard scheme
-#define LED_SCHEME2 2 // not used yet...
-
-#define LED_POWER_ON 1
-#define LED_POWER_OFF 2
-#define LED_S0_ON 3
-#define LED_S0_OFF 4
-#define LED_B1_ON 5
-#define LED_B1_OFF 6
-#define LED_B1_DATA 7
-#define LED_B2_ON 8
-#define LED_B2_OFF 9
-#define LED_B2_DATA 10
-
-#define LED_NORMAL 0 // LEDs are normal
-#define LED_INVERTED 1 // LEDs are inverted
-
-
-#endif // __HFC_USB_H__
diff --git a/drivers/isdn/hisax/hfcscard.c b/drivers/isdn/hisax/hfcscard.c
deleted file mode 100644
index 91b5219499ca..000000000000
--- a/drivers/isdn/hisax/hfcscard.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/* $Id: hfcscard.c,v 1.10.2.4 2004/01/14 16:04:48 keil Exp $
- *
- * low level stuff for hfcs based cards (Teles3c, ACER P10)
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include <linux/isapnp.h>
-#include "hisax.h"
-#include "hfc_2bds0.h"
-#include "isdnl1.h"
-
-static const char *hfcs_revision = "$Revision: 1.10.2.4 $";
-
-static irqreturn_t
-hfcs_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val, stat;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- if ((HFCD_ANYINT | HFCD_BUSY_NBUSY) &
- (stat = cs->BC_Read_Reg(cs, HFCD_DATA, HFCD_STAT))) {
- val = cs->BC_Read_Reg(cs, HFCD_DATA, HFCD_INT_S1);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFCS: stat(%02x) s1(%02x)", stat, val);
- hfc2bds0_interrupt(cs, val);
- } else {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFCS: irq_no_irq stat(%02x)", stat);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-hfcs_Timer(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, hw.hfcD.timer);
- cs->hw.hfcD.timer.expires = jiffies + 75;
- /* WD RESET */
-/* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt | 0x80);
- add_timer(&cs->hw.hfcD.timer);
-*/
-}
-
-static void
-release_io_hfcs(struct IsdnCardState *cs)
-{
- release2bds0(cs);
- del_timer(&cs->hw.hfcD.timer);
- if (cs->hw.hfcD.addr)
- release_region(cs->hw.hfcD.addr, 2);
-}
-
-static void
-reset_hfcs(struct IsdnCardState *cs)
-{
- printk(KERN_INFO "HFCS: resetting card\n");
- cs->hw.hfcD.cirm = HFCD_RESET;
- if (cs->typ == ISDN_CTYPE_TELES3C)
- cs->hw.hfcD.cirm |= HFCD_MEM8K;
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CIRM, cs->hw.hfcD.cirm); /* Reset On */
- mdelay(10);
- cs->hw.hfcD.cirm = 0;
- if (cs->typ == ISDN_CTYPE_TELES3C)
- cs->hw.hfcD.cirm |= HFCD_MEM8K;
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CIRM, cs->hw.hfcD.cirm); /* Reset Off */
- mdelay(10);
- if (cs->typ == ISDN_CTYPE_TELES3C)
- cs->hw.hfcD.cirm |= HFCD_INTB;
- else if (cs->typ == ISDN_CTYPE_ACERP10)
- cs->hw.hfcD.cirm |= HFCD_INTA;
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CIRM, cs->hw.hfcD.cirm);
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CLKDEL, 0x0e);
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_TEST, HFCD_AUTO_AWAKE); /* S/T Auto awake */
- cs->hw.hfcD.ctmt = HFCD_TIM25 | HFCD_AUTO_TIMER;
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt);
- cs->hw.hfcD.int_m2 = HFCD_IRQ_ENABLE;
- cs->hw.hfcD.int_m1 = HFCD_INTS_B1TRANS | HFCD_INTS_B2TRANS |
- HFCD_INTS_DTRANS | HFCD_INTS_B1REC | HFCD_INTS_B2REC |
- HFCD_INTS_DREC | HFCD_INTS_L1STATE;
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_INT_M1, cs->hw.hfcD.int_m1);
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_INT_M2, cs->hw.hfcD.int_m2);
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_STATES, HFCD_LOAD_STATE | 2); /* HFC ST 2 */
- udelay(10);
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_STATES, 2); /* HFC ST 2 */
- cs->hw.hfcD.mst_m = HFCD_MASTER;
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_MST_MODE, cs->hw.hfcD.mst_m); /* HFC Master */
- cs->hw.hfcD.sctrl = 0;
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_SCTRL, cs->hw.hfcD.sctrl);
-}
-
-static int
-hfcs_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
- int delay;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "HFCS: card_msg %x", mt);
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_hfcs(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_hfcs(cs);
- return (0);
- case CARD_INIT:
- delay = (75 * HZ) / 100 + 1;
- mod_timer(&cs->hw.hfcD.timer, jiffies + delay);
- spin_lock_irqsave(&cs->lock, flags);
- reset_hfcs(cs);
- init2bds0(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- delay = (80 * HZ) / 1000 + 1;
- msleep(80);
- spin_lock_irqsave(&cs->lock, flags);
- cs->hw.hfcD.ctmt |= HFCD_TIM800;
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt);
- cs->BC_Write_Reg(cs, HFCD_DATA, HFCD_MST_MODE, cs->hw.hfcD.mst_m);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-#ifdef __ISAPNP__
-static struct isapnp_device_id hfc_ids[] = {
- { ISAPNP_VENDOR('A', 'N', 'X'), ISAPNP_FUNCTION(0x1114),
- ISAPNP_VENDOR('A', 'N', 'X'), ISAPNP_FUNCTION(0x1114),
- (unsigned long) "Acer P10" },
- { ISAPNP_VENDOR('B', 'I', 'L'), ISAPNP_FUNCTION(0x0002),
- ISAPNP_VENDOR('B', 'I', 'L'), ISAPNP_FUNCTION(0x0002),
- (unsigned long) "Billion 2" },
- { ISAPNP_VENDOR('B', 'I', 'L'), ISAPNP_FUNCTION(0x0001),
- ISAPNP_VENDOR('B', 'I', 'L'), ISAPNP_FUNCTION(0x0001),
- (unsigned long) "Billion 1" },
- { ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x7410),
- ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x7410),
- (unsigned long) "IStar PnP" },
- { ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2610),
- ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2610),
- (unsigned long) "Teles 16.3c" },
- { ISAPNP_VENDOR('S', 'F', 'M'), ISAPNP_FUNCTION(0x0001),
- ISAPNP_VENDOR('S', 'F', 'M'), ISAPNP_FUNCTION(0x0001),
- (unsigned long) "Tornado Tipa C" },
- { ISAPNP_VENDOR('K', 'Y', 'E'), ISAPNP_FUNCTION(0x0001),
- ISAPNP_VENDOR('K', 'Y', 'E'), ISAPNP_FUNCTION(0x0001),
- (unsigned long) "Genius Speed Surfer" },
- { 0, }
-};
-
-static struct isapnp_device_id *ipid = &hfc_ids[0];
-static struct pnp_card *pnp_c = NULL;
-#endif
-
-int setup_hfcs(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, hfcs_revision);
- printk(KERN_INFO "HiSax: HFC-S driver Rev. %s\n", HiSax_getrev(tmp));
-
-#ifdef __ISAPNP__
- if (!card->para[1] && isapnp_present()) {
- struct pnp_dev *pnp_d;
- while (ipid->card_vendor) {
- if ((pnp_c = pnp_find_card(ipid->card_vendor,
- ipid->card_device, pnp_c))) {
- pnp_d = NULL;
- if ((pnp_d = pnp_find_dev(pnp_c,
- ipid->vendor, ipid->function, pnp_d))) {
- int err;
-
- printk(KERN_INFO "HiSax: %s detected\n",
- (char *)ipid->driver_data);
- pnp_disable_dev(pnp_d);
- err = pnp_activate_dev(pnp_d);
- if (err < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __func__, err);
- return (0);
- }
- card->para[1] = pnp_port_start(pnp_d, 0);
- card->para[0] = pnp_irq(pnp_d, 0);
- if (card->para[0] == -1 || !card->para[1]) {
- printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n",
- card->para[0], card->para[1]);
- pnp_disable_dev(pnp_d);
- return (0);
- }
- break;
- } else {
- printk(KERN_ERR "HFC PnP: PnP error card found, no device\n");
- }
- }
- ipid++;
- pnp_c = NULL;
- }
- if (!ipid->card_vendor) {
- printk(KERN_INFO "HFC PnP: no ISAPnP card found\n");
- return (0);
- }
- }
-#endif
- cs->hw.hfcD.addr = card->para[1] & 0xfffe;
- cs->irq = card->para[0];
- cs->hw.hfcD.cip = 0;
- cs->hw.hfcD.int_s1 = 0;
- cs->hw.hfcD.send = NULL;
- cs->bcs[0].hw.hfc.send = NULL;
- cs->bcs[1].hw.hfc.send = NULL;
- cs->hw.hfcD.dfifosize = 512;
- cs->dc.hfcd.ph_state = 0;
- cs->hw.hfcD.fifo = 255;
- if (cs->typ == ISDN_CTYPE_TELES3C) {
- cs->hw.hfcD.bfifosize = 1024 + 512;
- } else if (cs->typ == ISDN_CTYPE_ACERP10) {
- cs->hw.hfcD.bfifosize = 7 * 1024 + 512;
- } else
- return (0);
- if (!request_region(cs->hw.hfcD.addr, 2, "HFCS isdn")) {
- printk(KERN_WARNING
- "HiSax: %s config port %x-%x already in use\n",
- CardType[card->typ],
- cs->hw.hfcD.addr,
- cs->hw.hfcD.addr + 2);
- return (0);
- }
- printk(KERN_INFO
- "HFCS: defined at 0x%x IRQ %d HZ %d\n",
- cs->hw.hfcD.addr,
- cs->irq, HZ);
- if (cs->typ == ISDN_CTYPE_TELES3C) {
- /* Teles 16.3c IO ADR is 0x200 | YY0U (YY Bit 15/14 address) */
- outb(0x00, cs->hw.hfcD.addr);
- outb(0x56, cs->hw.hfcD.addr | 1);
- } else if (cs->typ == ISDN_CTYPE_ACERP10) {
- /* Acer P10 IO ADR is 0x300 */
- outb(0x00, cs->hw.hfcD.addr);
- outb(0x57, cs->hw.hfcD.addr | 1);
- }
- set_cs_func(cs);
- timer_setup(&cs->hw.hfcD.timer, hfcs_Timer, 0);
- cs->cardmsg = &hfcs_card_msg;
- cs->irq_func = &hfcs_interrupt;
- return (1);
-}
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
deleted file mode 100644
index 40080e06421c..000000000000
--- a/drivers/isdn/hisax/hisax.h
+++ /dev/null
@@ -1,1352 +0,0 @@
-/* $Id: hisax.h,v 2.64.2.4 2004/02/11 13:21:33 keil Exp $
- *
- * Basic declarations, defines and prototypes
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/major.h>
-#include <asm/io.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timer.h>
-#include <linux/wait.h>
-#include <linux/isdnif.h>
-#include <linux/tty.h>
-#include <linux/serial_reg.h>
-#include <linux/netdevice.h>
-
-#define ERROR_STATISTIC
-
-#define REQUEST 0
-#define CONFIRM 1
-#define INDICATION 2
-#define RESPONSE 3
-
-#define HW_ENABLE 0x0000
-#define HW_RESET 0x0004
-#define HW_POWERUP 0x0008
-#define HW_ACTIVATE 0x0010
-#define HW_DEACTIVATE 0x0018
-
-#define HW_INFO1 0x0010
-#define HW_INFO2 0x0020
-#define HW_INFO3 0x0030
-#define HW_INFO4 0x0040
-#define HW_INFO4_P8 0x0040
-#define HW_INFO4_P10 0x0048
-#define HW_RSYNC 0x0060
-#define HW_TESTLOOP 0x0070
-#define CARD_RESET 0x00F0
-#define CARD_INIT 0x00F2
-#define CARD_RELEASE 0x00F3
-#define CARD_TEST 0x00F4
-#define CARD_AUX_IND 0x00F5
-
-#define PH_ACTIVATE 0x0100
-#define PH_DEACTIVATE 0x0110
-#define PH_DATA 0x0120
-#define PH_PULL 0x0130
-#define PH_TESTLOOP 0x0140
-#define PH_PAUSE 0x0150
-#define MPH_ACTIVATE 0x0180
-#define MPH_DEACTIVATE 0x0190
-#define MPH_INFORMATION 0x01A0
-
-#define DL_ESTABLISH 0x0200
-#define DL_RELEASE 0x0210
-#define DL_DATA 0x0220
-#define DL_FLUSH 0x0224
-#define DL_UNIT_DATA 0x0230
-
-#define MDL_BC_RELEASE 0x0278 // Formula-n enter:now
-#define MDL_BC_ASSIGN 0x027C // Formula-n enter:now
-#define MDL_ASSIGN 0x0280
-#define MDL_REMOVE 0x0284
-#define MDL_ERROR 0x0288
-#define MDL_INFO_SETUP 0x02E0
-#define MDL_INFO_CONN 0x02E4
-#define MDL_INFO_REL 0x02E8
-
-#define CC_SETUP 0x0300
-#define CC_RESUME 0x0304
-#define CC_MORE_INFO 0x0310
-#define CC_IGNORE 0x0320
-#define CC_REJECT 0x0324
-#define CC_SETUP_COMPL 0x0330
-#define CC_PROCEEDING 0x0340
-#define CC_ALERTING 0x0344
-#define CC_PROGRESS 0x0348
-#define CC_CONNECT 0x0350
-#define CC_CHARGE 0x0354
-#define CC_NOTIFY 0x0358
-#define CC_DISCONNECT 0x0360
-#define CC_RELEASE 0x0368
-#define CC_SUSPEND 0x0370
-#define CC_PROCEED_SEND 0x0374
-#define CC_REDIR 0x0378
-#define CC_T302 0x0382
-#define CC_T303 0x0383
-#define CC_T304 0x0384
-#define CC_T305 0x0385
-#define CC_T308_1 0x0388
-#define CC_T308_2 0x038A
-#define CC_T309 0x0309
-#define CC_T310 0x0390
-#define CC_T313 0x0393
-#define CC_T318 0x0398
-#define CC_T319 0x0399
-#define CC_TSPID 0x03A0
-#define CC_NOSETUP_RSP 0x03E0
-#define CC_SETUP_ERR 0x03E1
-#define CC_SUSPEND_ERR 0x03E2
-#define CC_RESUME_ERR 0x03E3
-#define CC_CONNECT_ERR 0x03E4
-#define CC_RELEASE_ERR 0x03E5
-#define CC_RESTART 0x03F4
-#define CC_TDSS1_IO 0x13F4 /* DSS1 IO user timer */
-#define CC_TNI1_IO 0x13F5 /* NI1 IO user timer */
-
-/* define maximum number of possible waiting incoming calls */
-#define MAX_WAITING_CALLS 2
-
-
-#ifdef __KERNEL__
-
-extern const char *CardType[];
-extern int nrcards;
-
-extern const char *l1_revision;
-extern const char *l2_revision;
-extern const char *l3_revision;
-extern const char *lli_revision;
-extern const char *tei_revision;
-
-/* include l3dss1 & ni1 specific process structures, but no other defines */
-#ifdef CONFIG_HISAX_EURO
-#define l3dss1_process
-#include "l3dss1.h"
-#undef l3dss1_process
-#endif /* CONFIG_HISAX_EURO */
-
-#ifdef CONFIG_HISAX_NI1
-#define l3ni1_process
-#include "l3ni1.h"
-#undef l3ni1_process
-#endif /* CONFIG_HISAX_NI1 */
-
-#define MAX_DFRAME_LEN 260
-#define MAX_DFRAME_LEN_L1 300
-#define HSCX_BUFMAX 4096
-#define MAX_DATA_SIZE (HSCX_BUFMAX - 4)
-#define MAX_DATA_MEM (HSCX_BUFMAX + 64)
-#define RAW_BUFMAX (((HSCX_BUFMAX * 6) / 5) + 5)
-#define MAX_HEADER_LEN 4
-#define MAX_WINDOW 8
-#define MAX_MON_FRAME 32
-#define MAX_DLOG_SPACE 2048
-#define MAX_BLOG_SPACE 256
-
-/* #define I4L_IRQ_FLAG SA_INTERRUPT */
-#define I4L_IRQ_FLAG 0
-
-/*
- * Statemachine
- */
-
-struct FsmInst;
-
-typedef void (*FSMFNPTR)(struct FsmInst *, int, void *);
-
-struct Fsm {
- FSMFNPTR *jumpmatrix;
- int state_count, event_count;
- char **strEvent, **strState;
-};
-
-struct FsmInst {
- struct Fsm *fsm;
- int state;
- int debug;
- void *userdata;
- int userint;
- void (*printdebug) (struct FsmInst *, char *, ...);
-};
-
-struct FsmNode {
- int state, event;
- void (*routine) (struct FsmInst *, int, void *);
-};
-
-struct FsmTimer {
- struct FsmInst *fi;
- struct timer_list tl;
- int event;
- void *arg;
-};
-
-struct L3Timer {
- struct l3_process *pc;
- struct timer_list tl;
- int event;
-};
-
-#define FLG_L1_ACTIVATING 1
-#define FLG_L1_ACTIVATED 2
-#define FLG_L1_DEACTTIMER 3
-#define FLG_L1_ACTTIMER 4
-#define FLG_L1_T3RUN 5
-#define FLG_L1_PULL_REQ 6
-#define FLG_L1_UINT 7
-
-struct Layer1 {
- void *hardware;
- struct BCState *bcs;
- struct PStack **stlistp;
- unsigned long Flags;
- struct FsmInst l1m;
- struct FsmTimer timer;
- void (*l1l2) (struct PStack *, int, void *);
- void (*l1hw) (struct PStack *, int, void *);
- void (*l1tei) (struct PStack *, int, void *);
- int mode, bc;
- int delay;
-};
-
-#define GROUP_TEI 127
-#define TEI_SAPI 63
-#define CTRL_SAPI 0
-#define PACKET_NOACK 7
-
-/* Layer2 Flags */
-
-#define FLG_LAPB 0
-#define FLG_LAPD 1
-#define FLG_ORIG 2
-#define FLG_MOD128 3
-#define FLG_PEND_REL 4
-#define FLG_L3_INIT 5
-#define FLG_T200_RUN 6
-#define FLG_ACK_PEND 7
-#define FLG_REJEXC 8
-#define FLG_OWN_BUSY 9
-#define FLG_PEER_BUSY 10
-#define FLG_DCHAN_BUSY 11
-#define FLG_L1_ACTIV 12
-#define FLG_ESTAB_PEND 13
-#define FLG_PTP 14
-#define FLG_FIXED_TEI 15
-#define FLG_L2BLOCK 16
-
-struct Layer2 {
- int tei;
- int sap;
- int maxlen;
- u_long flag;
- spinlock_t lock;
- u_int vs, va, vr;
- int rc;
- unsigned int window;
- unsigned int sow;
- struct sk_buff *windowar[MAX_WINDOW];
- struct sk_buff_head i_queue;
- struct sk_buff_head ui_queue;
- void (*l2l1) (struct PStack *, int, void *);
- void (*l2l3) (struct PStack *, int, void *);
- void (*l2tei) (struct PStack *, int, void *);
- struct FsmInst l2m;
- struct FsmTimer t200, t203;
- int T200, N200, T203;
- int debug;
- char debug_id[16];
-};
-
-struct Layer3 {
- void (*l3l4) (struct PStack *, int, void *);
- void (*l3ml3) (struct PStack *, int, void *);
- void (*l3l2) (struct PStack *, int, void *);
- struct FsmInst l3m;
- struct FsmTimer l3m_timer;
- struct sk_buff_head squeue;
- struct l3_process *proc;
- struct l3_process *global;
- int N303;
- int debug;
- char debug_id[8];
-};
-
-struct LLInterface {
- void (*l4l3) (struct PStack *, int, void *);
- int (*l4l3_proto) (struct PStack *, isdn_ctrl *);
- void *userdata;
- u_long flag;
-};
-
-#define FLG_LLI_L1WAKEUP 1
-#define FLG_LLI_L2WAKEUP 2
-
-struct Management {
- int ri;
- struct FsmInst tei_m;
- struct FsmTimer t202;
- int T202, N202, debug;
- void (*layer) (struct PStack *, int, void *);
-};
-
-#define NO_CAUSE 254
-
-struct Param {
- u_char cause;
- u_char loc;
- u_char diag[6];
- int bchannel;
- int chargeinfo;
- int spv; /* SPV Flag */
- setup_parm setup; /* from isdnif.h numbers and Serviceindicator */
- u_char moderate; /* transfer mode and rate (bearer octet 4) */
-};
-
-
-struct PStack {
- struct PStack *next;
- struct Layer1 l1;
- struct Layer2 l2;
- struct Layer3 l3;
- struct LLInterface lli;
- struct Management ma;
- int protocol; /* EDSS1, 1TR6 or NI1 */
-
- /* protocol specific data fields */
- union
- { u_char uuuu; /* only as dummy */
-#ifdef CONFIG_HISAX_EURO
- dss1_stk_priv dss1; /* private dss1 data */
-#endif /* CONFIG_HISAX_EURO */
-#ifdef CONFIG_HISAX_NI1
- ni1_stk_priv ni1; /* private ni1 data */
-#endif /* CONFIG_HISAX_NI1 */
- } prot;
-};
-
-struct l3_process {
- int callref;
- int state;
- struct L3Timer timer;
- int N303;
- int debug;
- struct Param para;
- struct Channel *chan;
- struct PStack *st;
- struct l3_process *next;
- ulong redir_result;
-
- /* protocol specific data fields */
- union
- { u_char uuuu; /* only when euro not defined, avoiding empty union */
-#ifdef CONFIG_HISAX_EURO
- dss1_proc_priv dss1; /* private dss1 data */
-#endif /* CONFIG_HISAX_EURO */
-#ifdef CONFIG_HISAX_NI1
- ni1_proc_priv ni1; /* private ni1 data */
-#endif /* CONFIG_HISAX_NI1 */
- } prot;
-};
-
-struct hscx_hw {
- int hscx;
- int rcvidx;
- int count; /* Current skb sent count */
- u_char *rcvbuf; /* B-Channel receive Buffer */
- u_char tsaxr0;
- u_char tsaxr1;
-};
-
-struct w6692B_hw {
- int bchan;
- int rcvidx;
- int count; /* Current skb sent count */
- u_char *rcvbuf; /* B-Channel receive Buffer */
-};
-
-struct isar_reg {
- unsigned long Flags;
- volatile u_char bstat;
- volatile u_char iis;
- volatile u_char cmsb;
- volatile u_char clsb;
- volatile u_char par[8];
-};
-
-struct isar_hw {
- int dpath;
- int rcvidx;
- int txcnt;
- int mml;
- u_char state;
- u_char cmd;
- u_char mod;
- u_char newcmd;
- u_char newmod;
- char try_mod;
- struct timer_list ftimer;
- u_char *rcvbuf; /* B-Channel receive Buffer */
- u_char conmsg[16];
- struct isar_reg *reg;
-};
-
-struct hdlc_stat_reg {
-#ifdef __BIG_ENDIAN
- u_char fill;
- u_char mode;
- u_char xml;
- u_char cmd;
-#else
- u_char cmd;
- u_char xml;
- u_char mode;
- u_char fill;
-#endif
-} __attribute__((packed));
-
-struct hdlc_hw {
- union {
- u_int ctrl;
- struct hdlc_stat_reg sr;
- } ctrl;
- u_int stat;
- int rcvidx;
- int count; /* Current skb sent count */
- u_char *rcvbuf; /* B-Channel receive Buffer */
-};
-
-struct hfcB_hw {
- unsigned int *send;
- int f1;
- int f2;
-};
-
-struct tiger_hw {
- u_int *send;
- u_int *s_irq;
- u_int *s_end;
- u_int *sendp;
- u_int *rec;
- int free;
- u_char *rcvbuf;
- u_char *sendbuf;
- u_char *sp;
- int sendcnt;
- u_int s_tot;
- u_int r_bitcnt;
- u_int r_tot;
- u_int r_err;
- u_int r_fcs;
- u_char r_state;
- u_char r_one;
- u_char r_val;
- u_char s_state;
-};
-
-struct amd7930_hw {
- u_char *tx_buff;
- u_char *rv_buff;
- int rv_buff_in;
- int rv_buff_out;
- struct sk_buff *rv_skb;
- struct hdlc_state *hdlc_state;
- struct work_struct tq_rcv;
- struct work_struct tq_xmt;
-};
-
-#define BC_FLG_INIT 1
-#define BC_FLG_ACTIV 2
-#define BC_FLG_BUSY 3
-#define BC_FLG_NOFRAME 4
-#define BC_FLG_HALF 5
-#define BC_FLG_EMPTY 6
-#define BC_FLG_ORIG 7
-#define BC_FLG_DLEETX 8
-#define BC_FLG_LASTDLE 9
-#define BC_FLG_FIRST 10
-#define BC_FLG_LASTDATA 11
-#define BC_FLG_NMD_DATA 12
-#define BC_FLG_FTI_RUN 13
-#define BC_FLG_LL_OK 14
-#define BC_FLG_LL_CONN 15
-#define BC_FLG_FTI_FTS 16
-#define BC_FLG_FRH_WAIT 17
-
-#define L1_MODE_NULL 0
-#define L1_MODE_TRANS 1
-#define L1_MODE_HDLC 2
-#define L1_MODE_EXTRN 3
-#define L1_MODE_HDLC_56K 4
-#define L1_MODE_MODEM 7
-#define L1_MODE_V32 8
-#define L1_MODE_FAX 9
-
-struct BCState {
- int channel;
- int mode;
- u_long Flag;
- struct IsdnCardState *cs;
- int tx_cnt; /* B-Channel transmit counter */
- struct sk_buff *tx_skb; /* B-Channel transmit Buffer */
- struct sk_buff_head rqueue; /* B-Channel receive Queue */
- struct sk_buff_head squeue; /* B-Channel send Queue */
- int ackcnt;
- spinlock_t aclock;
- struct PStack *st;
- u_char *blog;
- u_char *conmsg;
- struct timer_list transbusy;
- struct work_struct tqueue;
- u_long event;
- int (*BC_SetStack) (struct PStack *, struct BCState *);
- void (*BC_Close) (struct BCState *);
-#ifdef ERROR_STATISTIC
- int err_crc;
- int err_tx;
- int err_rdo;
- int err_inv;
-#endif
- union {
- struct hscx_hw hscx;
- struct hdlc_hw hdlc;
- struct isar_hw isar;
- struct hfcB_hw hfc;
- struct tiger_hw tiger;
- struct amd7930_hw amd7930;
- struct w6692B_hw w6692;
- struct hisax_b_if *b_if;
- } hw;
-};
-
-struct Channel {
- struct PStack *b_st, *d_st;
- struct IsdnCardState *cs;
- struct BCState *bcs;
- int chan;
- int incoming;
- struct FsmInst fi;
- struct FsmTimer drel_timer, dial_timer;
- int debug;
- int l2_protocol, l2_active_protocol;
- int l3_protocol;
- int data_open;
- struct l3_process *proc;
- setup_parm setup; /* from isdnif.h numbers and Serviceindicator */
- u_long Flags; /* for remembering action done in l4 */
- int leased;
-};
-
-struct elsa_hw {
- struct pci_dev *dev;
- unsigned long base;
- unsigned int cfg;
- unsigned int ctrl;
- unsigned int ale;
- unsigned int isac;
- unsigned int itac;
- unsigned int hscx;
- unsigned int trig;
- unsigned int timer;
- unsigned int counter;
- unsigned int status;
- struct timer_list tl;
- unsigned int MFlag;
- struct BCState *bcs;
- u_char *transbuf;
- u_char *rcvbuf;
- unsigned int transp;
- unsigned int rcvp;
- unsigned int transcnt;
- unsigned int rcvcnt;
- u_char IER;
- u_char FCR;
- u_char LCR;
- u_char MCR;
- u_char ctrl_reg;
-};
-
-struct teles3_hw {
- unsigned int cfg_reg;
- signed int isac;
- signed int hscx[2];
- signed int isacfifo;
- signed int hscxfifo[2];
-};
-
-struct teles0_hw {
- unsigned int cfg_reg;
- void __iomem *membase;
- unsigned long phymem;
-};
-
-struct avm_hw {
- unsigned int cfg_reg;
- unsigned int isac;
- unsigned int hscx[2];
- unsigned int isacfifo;
- unsigned int hscxfifo[2];
- unsigned int counter;
- struct pci_dev *dev;
-};
-
-struct ix1_hw {
- unsigned int cfg_reg;
- unsigned int isac_ale;
- unsigned int isac;
- unsigned int hscx_ale;
- unsigned int hscx;
-};
-
-struct diva_hw {
- unsigned long cfg_reg;
- unsigned long pci_cfg;
- unsigned int ctrl;
- unsigned long isac_adr;
- unsigned int isac;
- unsigned long hscx_adr;
- unsigned int hscx;
- unsigned int status;
- struct timer_list tl;
- u_char ctrl_reg;
- struct pci_dev *dev;
-};
-
-struct asus_hw {
- unsigned int cfg_reg;
- unsigned int adr;
- unsigned int isac;
- unsigned int hscx;
- unsigned int u7;
- unsigned int pots;
-};
-
-
-struct hfc_hw {
- unsigned int addr;
- unsigned int fifosize;
- unsigned char cirm;
- unsigned char ctmt;
- unsigned char cip;
- u_char isac_spcr;
- struct timer_list timer;
-};
-
-struct sedl_hw {
- unsigned int cfg_reg;
- unsigned int adr;
- unsigned int isac;
- unsigned int hscx;
- unsigned int reset_on;
- unsigned int reset_off;
- struct isar_reg isar;
- unsigned int chip;
- unsigned int bus;
- struct pci_dev *dev;
-};
-
-struct spt_hw {
- unsigned int cfg_reg;
- unsigned int isac;
- unsigned int hscx[2];
- unsigned char res_irq;
-};
-
-struct mic_hw {
- unsigned int cfg_reg;
- unsigned int adr;
- unsigned int isac;
- unsigned int hscx;
-};
-
-struct njet_hw {
- unsigned long base;
- unsigned int isac;
- unsigned int auxa;
- unsigned char auxd;
- unsigned char dmactrl;
- unsigned char ctrl_reg;
- unsigned char irqmask0;
- unsigned char irqstat0;
- unsigned char last_is0;
- struct pci_dev *dev;
-};
-
-struct hfcPCI_hw {
- unsigned char cirm;
- unsigned char ctmt;
- unsigned char conn;
- unsigned char mst_m;
- unsigned char int_m1;
- unsigned char int_m2;
- unsigned char int_s1;
- unsigned char sctrl;
- unsigned char sctrl_r;
- unsigned char sctrl_e;
- unsigned char trm;
- unsigned char stat;
- unsigned char fifo;
- unsigned char fifo_en;
- unsigned char bswapped;
- unsigned char nt_mode;
- int nt_timer;
- struct pci_dev *dev;
- void __iomem *pci_io; /* start of PCI IO memory */
- dma_addr_t dma; /* dma handle for Fifos */
- void *fifos; /* FIFO memory */
- int last_bfifo_cnt[2]; /* marker saving last b-fifo frame count */
- struct timer_list timer;
-};
-
-struct hfcSX_hw {
- unsigned long base;
- unsigned char cirm;
- unsigned char ctmt;
- unsigned char conn;
- unsigned char mst_m;
- unsigned char int_m1;
- unsigned char int_m2;
- unsigned char int_s1;
- unsigned char sctrl;
- unsigned char sctrl_r;
- unsigned char sctrl_e;
- unsigned char trm;
- unsigned char stat;
- unsigned char fifo;
- unsigned char bswapped;
- unsigned char nt_mode;
- unsigned char chip;
- int b_fifo_size;
- unsigned char last_fifo;
- void *extra;
- int nt_timer;
- struct timer_list timer;
-};
-
-struct hfcD_hw {
- unsigned int addr;
- unsigned int bfifosize;
- unsigned int dfifosize;
- unsigned char cirm;
- unsigned char ctmt;
- unsigned char cip;
- unsigned char conn;
- unsigned char mst_m;
- unsigned char int_m1;
- unsigned char int_m2;
- unsigned char int_s1;
- unsigned char sctrl;
- unsigned char stat;
- unsigned char fifo;
- unsigned char f1;
- unsigned char f2;
- unsigned int *send;
- struct timer_list timer;
-};
-
-struct isurf_hw {
- unsigned int reset;
- unsigned long phymem;
- void __iomem *isac;
- void __iomem *isar;
- struct isar_reg isar_r;
-};
-
-struct saphir_hw {
- struct pci_dev *dev;
- unsigned int cfg_reg;
- unsigned int ale;
- unsigned int isac;
- unsigned int hscx;
- struct timer_list timer;
-};
-
-struct bkm_hw {
- struct pci_dev *dev;
- unsigned long base;
- /* A4T stuff */
- unsigned long isac_adr;
- unsigned int isac_ale;
- unsigned long jade_adr;
- unsigned int jade_ale;
- /* Scitel Quadro stuff */
- unsigned long plx_adr;
- unsigned long data_adr;
-};
-
-struct gazel_hw {
- struct pci_dev *dev;
- unsigned int cfg_reg;
- unsigned int pciaddr[2];
- signed int ipac;
- signed int isac;
- signed int hscx[2];
- signed int isacfifo;
- signed int hscxfifo[2];
- unsigned char timeslot;
- unsigned char iom2;
-};
-
-struct w6692_hw {
- struct pci_dev *dev;
- unsigned int iobase;
- struct timer_list timer;
-};
-
-struct arcofi_msg {
- struct arcofi_msg *next;
- u_char receive;
- u_char len;
- u_char msg[10];
-};
-
-struct isac_chip {
- int ph_state;
- u_char *mon_tx;
- u_char *mon_rx;
- int mon_txp;
- int mon_txc;
- int mon_rxp;
- struct arcofi_msg *arcofi_list;
- struct timer_list arcofitimer;
- wait_queue_head_t arcofi_wait;
- u_char arcofi_bc;
- u_char arcofi_state;
- u_char mocr;
- u_char adf2;
-};
-
-struct hfcd_chip {
- int ph_state;
-};
-
-struct hfcpci_chip {
- int ph_state;
-};
-
-struct hfcsx_chip {
- int ph_state;
-};
-
-struct w6692_chip {
- int ph_state;
-};
-
-struct amd7930_chip {
- u_char lmr1;
- u_char ph_state;
- u_char old_state;
- u_char flg_t3;
- unsigned int tx_xmtlen;
- struct timer_list timer3;
- void (*ph_command) (struct IsdnCardState *, u_char, char *);
- void (*setIrqMask) (struct IsdnCardState *, u_char);
-};
-
-struct icc_chip {
- int ph_state;
- u_char *mon_tx;
- u_char *mon_rx;
- int mon_txp;
- int mon_txc;
- int mon_rxp;
- struct arcofi_msg *arcofi_list;
- struct timer_list arcofitimer;
- wait_queue_head_t arcofi_wait;
- u_char arcofi_bc;
- u_char arcofi_state;
- u_char mocr;
- u_char adf2;
-};
-
-#define HW_IOM1 0
-#define HW_IPAC 1
-#define HW_ISAR 2
-#define HW_ARCOFI 3
-#define FLG_TWO_DCHAN 4
-#define FLG_L1_DBUSY 5
-#define FLG_DBUSY_TIMER 6
-#define FLG_LOCK_ATOMIC 7
-#define FLG_ARCOFI_TIMER 8
-#define FLG_ARCOFI_ERROR 9
-#define FLG_HW_L1_UINT 10
-
-struct IsdnCardState {
- spinlock_t lock;
- u_char typ;
- u_char subtyp;
- int protocol;
- u_int irq;
- u_long irq_flags;
- u_long HW_Flags;
- int *busy_flag;
- int chanlimit; /* limited number of B-chans to use */
- int logecho; /* log echo if supported by card */
- union {
- struct elsa_hw elsa;
- struct teles0_hw teles0;
- struct teles3_hw teles3;
- struct avm_hw avm;
- struct ix1_hw ix1;
- struct diva_hw diva;
- struct asus_hw asus;
- struct hfc_hw hfc;
- struct sedl_hw sedl;
- struct spt_hw spt;
- struct mic_hw mic;
- struct njet_hw njet;
- struct hfcD_hw hfcD;
- struct hfcPCI_hw hfcpci;
- struct hfcSX_hw hfcsx;
- struct ix1_hw niccy;
- struct isurf_hw isurf;
- struct saphir_hw saphir;
- struct bkm_hw ax;
- struct gazel_hw gazel;
- struct w6692_hw w6692;
- struct hisax_d_if *hisax_d_if;
- } hw;
- int myid;
- isdn_if iif;
- spinlock_t statlock;
- u_char *status_buf;
- u_char *status_read;
- u_char *status_write;
- u_char *status_end;
- u_char (*readisac) (struct IsdnCardState *, u_char);
- void (*writeisac) (struct IsdnCardState *, u_char, u_char);
- void (*readisacfifo) (struct IsdnCardState *, u_char *, int);
- void (*writeisacfifo) (struct IsdnCardState *, u_char *, int);
- u_char (*BC_Read_Reg) (struct IsdnCardState *, int, u_char);
- void (*BC_Write_Reg) (struct IsdnCardState *, int, u_char, u_char);
- void (*BC_Send_Data) (struct BCState *);
- int (*cardmsg) (struct IsdnCardState *, int, void *);
- void (*setstack_d) (struct PStack *, struct IsdnCardState *);
- void (*DC_Close) (struct IsdnCardState *);
- irq_handler_t irq_func;
- int (*auxcmd) (struct IsdnCardState *, isdn_ctrl *);
- struct Channel channel[2 + MAX_WAITING_CALLS];
- struct BCState bcs[2 + MAX_WAITING_CALLS];
- struct PStack *stlist;
- struct sk_buff_head rq, sq; /* D-channel queues */
- int cardnr;
- char *dlog;
- int debug;
- union {
- struct isac_chip isac;
- struct hfcd_chip hfcd;
- struct hfcpci_chip hfcpci;
- struct hfcsx_chip hfcsx;
- struct w6692_chip w6692;
- struct amd7930_chip amd7930;
- struct icc_chip icc;
- } dc;
- u_char *rcvbuf;
- int rcvidx;
- struct sk_buff *tx_skb;
- int tx_cnt;
- u_long event;
- struct work_struct tqueue;
- struct timer_list dbusytimer;
- unsigned int irq_cnt;
-#ifdef ERROR_STATISTIC
- int err_crc;
- int err_tx;
- int err_rx;
-#endif
-};
-
-
-#define schedule_event(s, ev) do { test_and_set_bit(ev, &s->event); schedule_work(&s->tqueue); } while (0)
-
-#define MON0_RX 1
-#define MON1_RX 2
-#define MON0_TX 4
-#define MON1_TX 8
-
-
-#ifdef ISDN_CHIP_ISAC
-#undef ISDN_CHIP_ISAC
-#endif
-
-#ifdef CONFIG_HISAX_16_0
-#define CARD_TELES0 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_TELES0 0
-#endif
-
-#ifdef CONFIG_HISAX_16_3
-#define CARD_TELES3 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_TELES3 0
-#endif
-
-#ifdef CONFIG_HISAX_TELESPCI
-#define CARD_TELESPCI 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_TELESPCI 0
-#endif
-
-#ifdef CONFIG_HISAX_AVM_A1
-#define CARD_AVM_A1 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_AVM_A1 0
-#endif
-
-#ifdef CONFIG_HISAX_AVM_A1_PCMCIA
-#define CARD_AVM_A1_PCMCIA 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_AVM_A1_PCMCIA 0
-#endif
-
-#ifdef CONFIG_HISAX_FRITZPCI
-#define CARD_FRITZPCI 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_FRITZPCI 0
-#endif
-
-#ifdef CONFIG_HISAX_ELSA
-#define CARD_ELSA 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_ELSA 0
-#endif
-
-#ifdef CONFIG_HISAX_IX1MICROR2
-#define CARD_IX1MICROR2 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_IX1MICROR2 0
-#endif
-
-#ifdef CONFIG_HISAX_DIEHLDIVA
-#define CARD_DIEHLDIVA 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_DIEHLDIVA 0
-#endif
-
-#ifdef CONFIG_HISAX_ASUSCOM
-#define CARD_ASUSCOM 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_ASUSCOM 0
-#endif
-
-#ifdef CONFIG_HISAX_TELEINT
-#define CARD_TELEINT 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_TELEINT 0
-#endif
-
-#ifdef CONFIG_HISAX_SEDLBAUER
-#define CARD_SEDLBAUER 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_SEDLBAUER 0
-#endif
-
-#ifdef CONFIG_HISAX_SPORTSTER
-#define CARD_SPORTSTER 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_SPORTSTER 0
-#endif
-
-#ifdef CONFIG_HISAX_MIC
-#define CARD_MIC 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_MIC 0
-#endif
-
-#ifdef CONFIG_HISAX_NETJET
-#define CARD_NETJET_S 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_NETJET_S 0
-#endif
-
-#ifdef CONFIG_HISAX_HFCS
-#define CARD_HFCS 1
-#else
-#define CARD_HFCS 0
-#endif
-
-#ifdef CONFIG_HISAX_HFC_PCI
-#define CARD_HFC_PCI 1
-#else
-#define CARD_HFC_PCI 0
-#endif
-
-#ifdef CONFIG_HISAX_HFC_SX
-#define CARD_HFC_SX 1
-#else
-#define CARD_HFC_SX 0
-#endif
-
-#ifdef CONFIG_HISAX_NICCY
-#define CARD_NICCY 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_NICCY 0
-#endif
-
-#ifdef CONFIG_HISAX_ISURF
-#define CARD_ISURF 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_ISURF 0
-#endif
-
-#ifdef CONFIG_HISAX_S0BOX
-#define CARD_S0BOX 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_S0BOX 0
-#endif
-
-#ifdef CONFIG_HISAX_HSTSAPHIR
-#define CARD_HSTSAPHIR 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_HSTSAPHIR 0
-#endif
-
-#ifdef CONFIG_HISAX_BKM_A4T
-#define CARD_BKM_A4T 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_BKM_A4T 0
-#endif
-
-#ifdef CONFIG_HISAX_SCT_QUADRO
-#define CARD_SCT_QUADRO 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_SCT_QUADRO 0
-#endif
-
-#ifdef CONFIG_HISAX_GAZEL
-#define CARD_GAZEL 1
-#ifndef ISDN_CHIP_ISAC
-#define ISDN_CHIP_ISAC 1
-#endif
-#else
-#define CARD_GAZEL 0
-#endif
-
-#ifdef CONFIG_HISAX_W6692
-#define CARD_W6692 1
-#ifndef ISDN_CHIP_W6692
-#define ISDN_CHIP_W6692 1
-#endif
-#else
-#define CARD_W6692 0
-#endif
-
-#ifdef CONFIG_HISAX_NETJET_U
-#define CARD_NETJET_U 1
-#ifndef ISDN_CHIP_ICC
-#define ISDN_CHIP_ICC 1
-#endif
-#ifndef HISAX_UINTERFACE
-#define HISAX_UINTERFACE 1
-#endif
-#else
-#define CARD_NETJET_U 0
-#endif
-
-#ifdef CONFIG_HISAX_ENTERNOW_PCI
-#define CARD_FN_ENTERNOW_PCI 1
-#else
-#define CARD_FN_ENTERNOW_PCI 0
-#endif
-
-#define TEI_PER_CARD 1
-
-/* L1 Debug */
-#define L1_DEB_WARN 0x01
-#define L1_DEB_INTSTAT 0x02
-#define L1_DEB_ISAC 0x04
-#define L1_DEB_ISAC_FIFO 0x08
-#define L1_DEB_HSCX 0x10
-#define L1_DEB_HSCX_FIFO 0x20
-#define L1_DEB_LAPD 0x40
-#define L1_DEB_IPAC 0x80
-#define L1_DEB_RECEIVE_FRAME 0x100
-#define L1_DEB_MONITOR 0x200
-#define DEB_DLOG_HEX 0x400
-#define DEB_DLOG_VERBOSE 0x800
-
-#define L2FRAME_DEBUG
-
-#ifdef L2FRAME_DEBUG
-extern void Logl2Frame(struct IsdnCardState *cs, struct sk_buff *skb, char *buf, int dir);
-#endif
-
-#include "hisax_cfg.h"
-
-void init_bcstate(struct IsdnCardState *cs, int bc);
-
-void setstack_HiSax(struct PStack *st, struct IsdnCardState *cs);
-void HiSax_addlist(struct IsdnCardState *sp, struct PStack *st);
-void HiSax_rmlist(struct IsdnCardState *sp, struct PStack *st);
-
-void setstack_l1_B(struct PStack *st);
-
-void setstack_tei(struct PStack *st);
-void setstack_manager(struct PStack *st);
-
-void setstack_isdnl2(struct PStack *st, char *debug_id);
-void releasestack_isdnl2(struct PStack *st);
-void setstack_transl2(struct PStack *st);
-void releasestack_transl2(struct PStack *st);
-void lli_writewakeup(struct PStack *st, int len);
-
-void setstack_l3dc(struct PStack *st, struct Channel *chanp);
-void setstack_l3bc(struct PStack *st, struct Channel *chanp);
-void releasestack_isdnl3(struct PStack *st);
-
-u_char *findie(u_char *p, int size, u_char ie, int wanted_set);
-int getcallref(u_char *p);
-int newcallref(void);
-
-int FsmNew(struct Fsm *fsm, struct FsmNode *fnlist, int fncount);
-void FsmFree(struct Fsm *fsm);
-int FsmEvent(struct FsmInst *fi, int event, void *arg);
-void FsmChangeState(struct FsmInst *fi, int newstate);
-void FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft);
-int FsmAddTimer(struct FsmTimer *ft, int millisec, int event,
- void *arg, int where);
-void FsmRestartTimer(struct FsmTimer *ft, int millisec, int event,
- void *arg, int where);
-void FsmDelTimer(struct FsmTimer *ft, int where);
-int jiftime(char *s, long mark);
-
-int HiSax_command(isdn_ctrl *ic);
-int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb);
-__printf(3, 4)
-void HiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, ...);
-__printf(3, 0)
-void VHiSax_putstatus(struct IsdnCardState *cs, char *head, const char *fmt, va_list args);
-void HiSax_reportcard(int cardnr, int sel);
-int QuickHex(char *txt, u_char *p, int cnt);
-void LogFrame(struct IsdnCardState *cs, u_char *p, int size);
-void dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir);
-void iecpy(u_char *dest, u_char *iestart, int ieoffset);
-#endif /* __KERNEL__ */
-
-/*
- * Busywait delay for `jiffs' jiffies
- */
-#define HZDELAY(jiffs) do { \
- int tout = jiffs; \
- \
- while (tout--) { \
- int loops = USEC_PER_SEC / HZ; \
- while (loops--) \
- udelay(1); \
- } \
- } while (0)
-
-int ll_run(struct IsdnCardState *cs, int addfeatures);
-int CallcNew(void);
-void CallcFree(void);
-int CallcNewChan(struct IsdnCardState *cs);
-void CallcFreeChan(struct IsdnCardState *cs);
-int Isdnl1New(void);
-void Isdnl1Free(void);
-int Isdnl2New(void);
-void Isdnl2Free(void);
-int Isdnl3New(void);
-void Isdnl3Free(void);
-void init_tei(struct IsdnCardState *cs, int protocol);
-void release_tei(struct IsdnCardState *cs);
-char *HiSax_getrev(const char *revision);
-int TeiNew(void);
-void TeiFree(void);
-
-#ifdef CONFIG_PCI
-
-#include <linux/pci.h>
-
-/* adaptation wrapper for old usage
- * WARNING! This is unfit for use in a PCI hotplug environment,
- * as the returned PCI device can disappear at any moment in time.
- * Callers should be converted to use pci_get_device() instead.
- */
-static inline struct pci_dev *hisax_find_pci_device(unsigned int vendor,
- unsigned int device,
- struct pci_dev *from)
-{
- struct pci_dev *pdev;
-
- pci_dev_get(from);
- pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
- pci_dev_put(pdev);
- return pdev;
-}
-
-#endif
diff --git a/drivers/isdn/hisax/hisax_cfg.h b/drivers/isdn/hisax/hisax_cfg.h
deleted file mode 100644
index 487dcfe9e718..000000000000
--- a/drivers/isdn/hisax/hisax_cfg.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* $Id: hisax_cfg.h,v 1.1.2.1 2004/01/24 20:47:23 keil Exp $
- * define of the basic HiSax configuration structures
- * and pcmcia interface
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#define ISDN_CTYPE_16_0 1
-#define ISDN_CTYPE_8_0 2
-#define ISDN_CTYPE_16_3 3
-#define ISDN_CTYPE_PNP 4
-#define ISDN_CTYPE_A1 5
-#define ISDN_CTYPE_ELSA 6
-#define ISDN_CTYPE_ELSA_PNP 7
-#define ISDN_CTYPE_TELESPCMCIA 8
-#define ISDN_CTYPE_IX1MICROR2 9
-#define ISDN_CTYPE_ELSA_PCMCIA 10
-#define ISDN_CTYPE_DIEHLDIVA 11
-#define ISDN_CTYPE_ASUSCOM 12
-#define ISDN_CTYPE_TELEINT 13
-#define ISDN_CTYPE_TELES3C 14
-#define ISDN_CTYPE_SEDLBAUER 15
-#define ISDN_CTYPE_SPORTSTER 16
-#define ISDN_CTYPE_MIC 17
-#define ISDN_CTYPE_ELSA_PCI 18
-#define ISDN_CTYPE_COMPAQ_ISA 19
-#define ISDN_CTYPE_NETJET_S 20
-#define ISDN_CTYPE_TELESPCI 21
-#define ISDN_CTYPE_SEDLBAUER_PCMCIA 22
-#define ISDN_CTYPE_AMD7930 23
-#define ISDN_CTYPE_NICCY 24
-#define ISDN_CTYPE_S0BOX 25
-#define ISDN_CTYPE_A1_PCMCIA 26
-#define ISDN_CTYPE_FRITZPCI 27
-#define ISDN_CTYPE_SEDLBAUER_FAX 28
-#define ISDN_CTYPE_ISURF 29
-#define ISDN_CTYPE_ACERP10 30
-#define ISDN_CTYPE_HSTSAPHIR 31
-#define ISDN_CTYPE_BKM_A4T 32
-#define ISDN_CTYPE_SCT_QUADRO 33
-#define ISDN_CTYPE_GAZEL 34
-#define ISDN_CTYPE_HFC_PCI 35
-#define ISDN_CTYPE_W6692 36
-#define ISDN_CTYPE_HFC_SX 37
-#define ISDN_CTYPE_NETJET_U 38
-#define ISDN_CTYPE_HFC_SP_PCMCIA 39
-#define ISDN_CTYPE_DYNAMIC 40
-#define ISDN_CTYPE_ENTERNOW 41
-#define ISDN_CTYPE_COUNT 41
-
-typedef struct IsdnCardState IsdnCardState_t;
-typedef struct IsdnCard IsdnCard_t;
-
-struct IsdnCard {
- int typ;
- int protocol; /* EDSS1, 1TR6 or NI1 */
- unsigned long para[4];
- IsdnCardState_t *cs;
-};
-
-typedef int (*hisax_setup_func_t)(struct IsdnCard *card);
-
-extern void HiSax_closecard(int);
-extern int hisax_init_pcmcia(void *, int *, IsdnCard_t *);
diff --git a/drivers/isdn/hisax/hisax_debug.h b/drivers/isdn/hisax/hisax_debug.h
deleted file mode 100644
index 7b3093d0856a..000000000000
--- a/drivers/isdn/hisax/hisax_debug.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Common debugging macros for use with the hisax driver
- *
- * Author Frode Isaksen
- * Copyright 2001 by Frode Isaksen <fisaksen@bewan.com>
- * 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * How to use:
- *
- * Before including this file, you need to
- * #define __debug_variable my_debug
- * where my_debug is a variable in your code which
- * determines the debug bitmask.
- *
- * If CONFIG_HISAX_DEBUG is not set, all macros evaluate to nothing
- *
- */
-
-#ifndef __HISAX_DEBUG_H__
-#define __HISAX_DEBUG_H__
-
-
-#ifdef CONFIG_HISAX_DEBUG
-
-#define DBG(level, format, arg...) do { \
- if (level & __debug_variable) \
- printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
- } while (0)
-
-#define DBG_PACKET(level, data, count) \
- if (level & __debug_variable) dump_packet(__func__, data, count)
-
-#define DBG_SKB(level, skb) \
- if ((level & __debug_variable) && skb) dump_packet(__func__, skb->data, skb->len)
-
-
-static void __attribute__((unused))
-dump_packet(const char *name, const u_char *data, int pkt_len)
-{
-#define DUMP_HDR_SIZE 20
-#define DUMP_TLR_SIZE 8
- if (pkt_len) {
- int i, len1, len2;
-
- printk(KERN_DEBUG "%s: length=%d,data=", name, pkt_len);
-
- if (pkt_len > DUMP_HDR_SIZE + DUMP_TLR_SIZE) {
- len1 = DUMP_HDR_SIZE;
- len2 = DUMP_TLR_SIZE;
- } else {
- len1 = pkt_len > DUMP_HDR_SIZE ? DUMP_HDR_SIZE : pkt_len;
- len2 = 0;
- }
- for (i = 0; i < len1; ++i) {
- printk("%.2x", data[i]);
- }
- if (len2) {
- printk("..");
- for (i = pkt_len-DUMP_TLR_SIZE; i < pkt_len; ++i) {
- printk("%.2x", data[i]);
- }
- }
- printk("\n");
- }
-#undef DUMP_HDR_SIZE
-#undef DUMP_TLR_SIZE
-}
-
-#else
-
-#define DBG(level, format, arg...) do {} while (0)
-#define DBG_PACKET(level, data, count) do {} while (0)
-#define DBG_SKB(level, skb) do {} while (0)
-
-#endif
-
-#endif
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
deleted file mode 100644
index 7a7137d8664b..000000000000
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- * Driver for AVM Fritz!PCI, Fritz!PCI v2, Fritz!PnP ISDN cards
- *
- * Author Kai Germaschewski
- * Copyright 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
- * 2001 by Karsten Keil <keil@isdn4linux.de>
- *
- * based upon Karsten Keil's original avm_pci.c driver
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to Wizard Computersysteme GmbH, Bremervoerde and
- * SoHaNet Technology GmbH, Berlin
- * for supporting the development of this driver
- */
-
-
-/* TODO:
- *
- * o POWER PC
- * o clean up debugging
- * o tx_skb at PH_DEACTIVATE time
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/isapnp.h>
-#include <linux/kmod.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-
-#include <asm/io.h>
-
-#include "hisax_fcpcipnp.h"
-
-// debugging cruft
-#define __debug_variable debug
-#include "hisax_debug.h"
-
-#ifdef CONFIG_HISAX_DEBUG
-static int debug = 0;
-/* static int hdlcfifosize = 32; */
-module_param(debug, int, 0);
-/* module_param(hdlcfifosize, int, 0); */
-#endif
-
-MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>");
-MODULE_DESCRIPTION("AVM Fritz!PCI/PnP ISDN driver");
-
-static const struct pci_device_id fcpci_ids[] = {
- { .vendor = PCI_VENDOR_ID_AVM,
- .device = PCI_DEVICE_ID_AVM_A1,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) "Fritz!Card PCI",
- },
- { .vendor = PCI_VENDOR_ID_AVM,
- .device = PCI_DEVICE_ID_AVM_A1_V2,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) "Fritz!Card PCI v2" },
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, fcpci_ids);
-
-#ifdef CONFIG_PNP
-static struct pnp_device_id fcpnp_ids[] = {
- {
- .id = "AVM0900",
- .driver_data = (unsigned long) "Fritz!Card PnP",
- },
- { .id = "" }
-};
-
-MODULE_DEVICE_TABLE(pnp, fcpnp_ids);
-#endif
-
-static int protocol = 2; /* EURO-ISDN Default */
-module_param(protocol, int, 0);
-MODULE_LICENSE("GPL");
-
-// ----------------------------------------------------------------------
-
-#define AVM_INDEX 0x04
-#define AVM_DATA 0x10
-
-#define AVM_IDX_HDLC_1 0x00
-#define AVM_IDX_HDLC_2 0x01
-#define AVM_IDX_ISAC_FIFO 0x02
-#define AVM_IDX_ISAC_REG_LOW 0x04
-#define AVM_IDX_ISAC_REG_HIGH 0x06
-
-#define AVM_STATUS0 0x02
-
-#define AVM_STATUS0_IRQ_ISAC 0x01
-#define AVM_STATUS0_IRQ_HDLC 0x02
-#define AVM_STATUS0_IRQ_TIMER 0x04
-#define AVM_STATUS0_IRQ_MASK 0x07
-
-#define AVM_STATUS0_RESET 0x01
-#define AVM_STATUS0_DIS_TIMER 0x02
-#define AVM_STATUS0_RES_TIMER 0x04
-#define AVM_STATUS0_ENA_IRQ 0x08
-#define AVM_STATUS0_TESTBIT 0x10
-
-#define AVM_STATUS1 0x03
-#define AVM_STATUS1_ENA_IOM 0x80
-
-#define HDLC_FIFO 0x0
-#define HDLC_STATUS 0x4
-#define HDLC_CTRL 0x4
-
-#define HDLC_MODE_ITF_FLG 0x01
-#define HDLC_MODE_TRANS 0x02
-#define HDLC_MODE_CCR_7 0x04
-#define HDLC_MODE_CCR_16 0x08
-#define HDLC_MODE_TESTLOOP 0x80
-
-#define HDLC_INT_XPR 0x80
-#define HDLC_INT_XDU 0x40
-#define HDLC_INT_RPR 0x20
-#define HDLC_INT_MASK 0xE0
-
-#define HDLC_STAT_RME 0x01
-#define HDLC_STAT_RDO 0x10
-#define HDLC_STAT_CRCVFRRAB 0x0E
-#define HDLC_STAT_CRCVFR 0x06
-#define HDLC_STAT_RML_MASK 0xff00
-
-#define HDLC_CMD_XRS 0x80
-#define HDLC_CMD_XME 0x01
-#define HDLC_CMD_RRS 0x20
-#define HDLC_CMD_XML_MASK 0xff00
-
-#define AVM_HDLC_FIFO_1 0x10
-#define AVM_HDLC_FIFO_2 0x18
-
-#define AVM_HDLC_STATUS_1 0x14
-#define AVM_HDLC_STATUS_2 0x1c
-
-#define AVM_ISACSX_INDEX 0x04
-#define AVM_ISACSX_DATA 0x08
-
-// ----------------------------------------------------------------------
-// Fritz!PCI
-
-static unsigned char fcpci_read_isac(struct isac *isac, unsigned char offset)
-{
- struct fritz_adapter *adapter = isac->priv;
- unsigned char idx = (offset > 0x2f) ?
- AVM_IDX_ISAC_REG_HIGH : AVM_IDX_ISAC_REG_LOW;
- unsigned char val;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->hw_lock, flags);
- outb(idx, adapter->io + AVM_INDEX);
- val = inb(adapter->io + AVM_DATA + (offset & 0xf));
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
- DBG(0x1000, " port %#x, value %#x",
- offset, val);
- return val;
-}
-
-static void fcpci_write_isac(struct isac *isac, unsigned char offset,
- unsigned char value)
-{
- struct fritz_adapter *adapter = isac->priv;
- unsigned char idx = (offset > 0x2f) ?
- AVM_IDX_ISAC_REG_HIGH : AVM_IDX_ISAC_REG_LOW;
- unsigned long flags;
-
- DBG(0x1000, " port %#x, value %#x",
- offset, value);
- spin_lock_irqsave(&adapter->hw_lock, flags);
- outb(idx, adapter->io + AVM_INDEX);
- outb(value, adapter->io + AVM_DATA + (offset & 0xf));
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
-}
-
-static void fcpci_read_isac_fifo(struct isac *isac, unsigned char *data,
- int size)
-{
- struct fritz_adapter *adapter = isac->priv;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->hw_lock, flags);
- outb(AVM_IDX_ISAC_FIFO, adapter->io + AVM_INDEX);
- insb(adapter->io + AVM_DATA, data, size);
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
-}
-
-static void fcpci_write_isac_fifo(struct isac *isac, unsigned char *data,
- int size)
-{
- struct fritz_adapter *adapter = isac->priv;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->hw_lock, flags);
- outb(AVM_IDX_ISAC_FIFO, adapter->io + AVM_INDEX);
- outsb(adapter->io + AVM_DATA, data, size);
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
-}
-
-static u32 fcpci_read_hdlc_status(struct fritz_adapter *adapter, int nr)
-{
- u32 val;
- int idx = nr ? AVM_IDX_HDLC_2 : AVM_IDX_HDLC_1;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->hw_lock, flags);
- outl(idx, adapter->io + AVM_INDEX);
- val = inl(adapter->io + AVM_DATA + HDLC_STATUS);
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
- return val;
-}
-
-static void __fcpci_write_ctrl(struct fritz_bcs *bcs, int which)
-{
- struct fritz_adapter *adapter = bcs->adapter;
- int idx = bcs->channel ? AVM_IDX_HDLC_2 : AVM_IDX_HDLC_1;
-
- DBG(0x40, "hdlc %c wr%x ctrl %x",
- 'A' + bcs->channel, which, bcs->ctrl.ctrl);
-
- outl(idx, adapter->io + AVM_INDEX);
- outl(bcs->ctrl.ctrl, adapter->io + AVM_DATA + HDLC_CTRL);
-}
-
-static void fcpci_write_ctrl(struct fritz_bcs *bcs, int which)
-{
- struct fritz_adapter *adapter = bcs->adapter;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->hw_lock, flags);
- __fcpci_write_ctrl(bcs, which);
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
-}
-
-// ----------------------------------------------------------------------
-// Fritz!PCI v2
-
-static unsigned char fcpci2_read_isac(struct isac *isac, unsigned char offset)
-{
- struct fritz_adapter *adapter = isac->priv;
- unsigned char val;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->hw_lock, flags);
- outl(offset, adapter->io + AVM_ISACSX_INDEX);
- val = inl(adapter->io + AVM_ISACSX_DATA);
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
- DBG(0x1000, " port %#x, value %#x",
- offset, val);
-
- return val;
-}
-
-static void fcpci2_write_isac(struct isac *isac, unsigned char offset,
- unsigned char value)
-{
- struct fritz_adapter *adapter = isac->priv;
- unsigned long flags;
-
- DBG(0x1000, " port %#x, value %#x",
- offset, value);
- spin_lock_irqsave(&adapter->hw_lock, flags);
- outl(offset, adapter->io + AVM_ISACSX_INDEX);
- outl(value, adapter->io + AVM_ISACSX_DATA);
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
-}
-
-static void fcpci2_read_isac_fifo(struct isac *isac, unsigned char *data,
- int size)
-{
- struct fritz_adapter *adapter = isac->priv;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->hw_lock, flags);
- outl(0, adapter->io + AVM_ISACSX_INDEX);
- for (i = 0; i < size; i++)
- data[i] = inl(adapter->io + AVM_ISACSX_DATA);
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
-}
-
-static void fcpci2_write_isac_fifo(struct isac *isac, unsigned char *data,
- int size)
-{
- struct fritz_adapter *adapter = isac->priv;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->hw_lock, flags);
- outl(0, adapter->io + AVM_ISACSX_INDEX);
- for (i = 0; i < size; i++)
- outl(data[i], adapter->io + AVM_ISACSX_DATA);
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
-}
-
-static u32 fcpci2_read_hdlc_status(struct fritz_adapter *adapter, int nr)
-{
- int offset = nr ? AVM_HDLC_STATUS_2 : AVM_HDLC_STATUS_1;
-
- return inl(adapter->io + offset);
-}
-
-static void fcpci2_write_ctrl(struct fritz_bcs *bcs, int which)
-{
- struct fritz_adapter *adapter = bcs->adapter;
- int offset = bcs->channel ? AVM_HDLC_STATUS_2 : AVM_HDLC_STATUS_1;
-
- DBG(0x40, "hdlc %c wr%x ctrl %x",
- 'A' + bcs->channel, which, bcs->ctrl.ctrl);
-
- outl(bcs->ctrl.ctrl, adapter->io + offset);
-}
-
-// ----------------------------------------------------------------------
-// Fritz!PnP (ISAC access as for Fritz!PCI)
-
-static u32 fcpnp_read_hdlc_status(struct fritz_adapter *adapter, int nr)
-{
- unsigned char idx = nr ? AVM_IDX_HDLC_2 : AVM_IDX_HDLC_1;
- u32 val;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->hw_lock, flags);
- outb(idx, adapter->io + AVM_INDEX);
- val = inb(adapter->io + AVM_DATA + HDLC_STATUS);
- if (val & HDLC_INT_RPR)
- val |= inb(adapter->io + AVM_DATA + HDLC_STATUS + 1) << 8;
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
- return val;
-}
-
-static void __fcpnp_write_ctrl(struct fritz_bcs *bcs, int which)
-{
- struct fritz_adapter *adapter = bcs->adapter;
- unsigned char idx = bcs->channel ? AVM_IDX_HDLC_2 : AVM_IDX_HDLC_1;
-
- DBG(0x40, "hdlc %c wr%x ctrl %x",
- 'A' + bcs->channel, which, bcs->ctrl.ctrl);
-
- outb(idx, adapter->io + AVM_INDEX);
- if (which & 4)
- outb(bcs->ctrl.sr.mode,
- adapter->io + AVM_DATA + HDLC_STATUS + 2);
- if (which & 2)
- outb(bcs->ctrl.sr.xml,
- adapter->io + AVM_DATA + HDLC_STATUS + 1);
- if (which & 1)
- outb(bcs->ctrl.sr.cmd,
- adapter->io + AVM_DATA + HDLC_STATUS + 0);
-}
-
-static void fcpnp_write_ctrl(struct fritz_bcs *bcs, int which)
-{
- struct fritz_adapter *adapter = bcs->adapter;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->hw_lock, flags);
- __fcpnp_write_ctrl(bcs, which);
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
-}
-
-// ----------------------------------------------------------------------
-
-static inline void B_L1L2(struct fritz_bcs *bcs, int pr, void *arg)
-{
- struct hisax_if *ifc = (struct hisax_if *) &bcs->b_if;
-
- DBG(2, "pr %#x", pr);
- ifc->l1l2(ifc, pr, arg);
-}
-
-static void hdlc_fill_fifo(struct fritz_bcs *bcs)
-{
- struct fritz_adapter *adapter = bcs->adapter;
- struct sk_buff *skb = bcs->tx_skb;
- int count;
- unsigned long flags;
- unsigned char *p;
-
- DBG(0x40, "hdlc_fill_fifo");
-
- BUG_ON(skb->len == 0);
-
- bcs->ctrl.sr.cmd &= ~HDLC_CMD_XME;
- if (bcs->tx_skb->len > bcs->fifo_size) {
- count = bcs->fifo_size;
- } else {
- count = bcs->tx_skb->len;
- if (bcs->mode != L1_MODE_TRANS)
- bcs->ctrl.sr.cmd |= HDLC_CMD_XME;
- }
- DBG(0x40, "hdlc_fill_fifo %d/%d", count, bcs->tx_skb->len);
- p = bcs->tx_skb->data;
- skb_pull(bcs->tx_skb, count);
- bcs->tx_cnt += count;
- bcs->ctrl.sr.xml = ((count == bcs->fifo_size) ? 0 : count);
-
- switch (adapter->type) {
- case AVM_FRITZ_PCI:
- spin_lock_irqsave(&adapter->hw_lock, flags);
- // sets the correct AVM_INDEX, too
- __fcpci_write_ctrl(bcs, 3);
- outsl(adapter->io + AVM_DATA + HDLC_FIFO,
- p, (count + 3) / 4);
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
- break;
- case AVM_FRITZ_PCIV2:
- fcpci2_write_ctrl(bcs, 3);
- outsl(adapter->io +
- (bcs->channel ? AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1),
- p, (count + 3) / 4);
- break;
- case AVM_FRITZ_PNP:
- spin_lock_irqsave(&adapter->hw_lock, flags);
- // sets the correct AVM_INDEX, too
- __fcpnp_write_ctrl(bcs, 3);
- outsb(adapter->io + AVM_DATA, p, count);
- spin_unlock_irqrestore(&adapter->hw_lock, flags);
- break;
- }
-}
-
-static inline void hdlc_empty_fifo(struct fritz_bcs *bcs, int count)
-{
- struct fritz_adapter *adapter = bcs->adapter;
- unsigned char *p;
- unsigned char idx = bcs->channel ? AVM_IDX_HDLC_2 : AVM_IDX_HDLC_1;
-
- DBG(0x10, "hdlc_empty_fifo %d", count);
- if (bcs->rcvidx + count > HSCX_BUFMAX) {
- DBG(0x10, "hdlc_empty_fifo: incoming packet too large");
- return;
- }
- p = bcs->rcvbuf + bcs->rcvidx;
- bcs->rcvidx += count;
- switch (adapter->type) {
- case AVM_FRITZ_PCI:
- spin_lock(&adapter->hw_lock);
- outl(idx, adapter->io + AVM_INDEX);
- insl(adapter->io + AVM_DATA + HDLC_FIFO,
- p, (count + 3) / 4);
- spin_unlock(&adapter->hw_lock);
- break;
- case AVM_FRITZ_PCIV2:
- insl(adapter->io +
- (bcs->channel ? AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1),
- p, (count + 3) / 4);
- break;
- case AVM_FRITZ_PNP:
- spin_lock(&adapter->hw_lock);
- outb(idx, adapter->io + AVM_INDEX);
- insb(adapter->io + AVM_DATA, p, count);
- spin_unlock(&adapter->hw_lock);
- break;
- }
-}
-
-static inline void hdlc_rpr_irq(struct fritz_bcs *bcs, u32 stat)
-{
- struct fritz_adapter *adapter = bcs->adapter;
- struct sk_buff *skb;
- int len;
-
- if (stat & HDLC_STAT_RDO) {
- DBG(0x10, "RDO");
- bcs->ctrl.sr.xml = 0;
- bcs->ctrl.sr.cmd |= HDLC_CMD_RRS;
- adapter->write_ctrl(bcs, 1);
- bcs->ctrl.sr.cmd &= ~HDLC_CMD_RRS;
- adapter->write_ctrl(bcs, 1);
- bcs->rcvidx = 0;
- return;
- }
-
- len = (stat & HDLC_STAT_RML_MASK) >> 8;
- if (len == 0)
- len = bcs->fifo_size;
-
- hdlc_empty_fifo(bcs, len);
-
- if ((stat & HDLC_STAT_RME) || (bcs->mode == L1_MODE_TRANS)) {
- if (((stat & HDLC_STAT_CRCVFRRAB) == HDLC_STAT_CRCVFR) ||
- (bcs->mode == L1_MODE_TRANS)) {
- skb = dev_alloc_skb(bcs->rcvidx);
- if (!skb) {
- printk(KERN_WARNING "HDLC: receive out of memory\n");
- } else {
- skb_put_data(skb, bcs->rcvbuf, bcs->rcvidx);
- DBG_SKB(1, skb);
- B_L1L2(bcs, PH_DATA | INDICATION, skb);
- }
- bcs->rcvidx = 0;
- } else {
- DBG(0x10, "ch%d invalid frame %#x",
- bcs->channel, stat);
- bcs->rcvidx = 0;
- }
- }
-}
-
-static inline void hdlc_xdu_irq(struct fritz_bcs *bcs)
-{
- struct fritz_adapter *adapter = bcs->adapter;
-
-
- /* Here we lost an TX interrupt, so
- * restart transmitting the whole frame.
- */
- bcs->ctrl.sr.xml = 0;
- bcs->ctrl.sr.cmd |= HDLC_CMD_XRS;
- adapter->write_ctrl(bcs, 1);
- bcs->ctrl.sr.cmd &= ~HDLC_CMD_XRS;
-
- if (!bcs->tx_skb) {
- DBG(0x10, "XDU without skb");
- adapter->write_ctrl(bcs, 1);
- return;
- }
- /* only hdlc restarts the frame, transparent mode must continue */
- if (bcs->mode == L1_MODE_HDLC) {
- skb_push(bcs->tx_skb, bcs->tx_cnt);
- bcs->tx_cnt = 0;
- }
-}
-
-static inline void hdlc_xpr_irq(struct fritz_bcs *bcs)
-{
- struct sk_buff *skb;
-
- skb = bcs->tx_skb;
- if (!skb)
- return;
-
- if (skb->len) {
- hdlc_fill_fifo(bcs);
- return;
- }
- bcs->tx_cnt = 0;
- bcs->tx_skb = NULL;
- B_L1L2(bcs, PH_DATA | CONFIRM, (void *)(unsigned long)skb->truesize);
- dev_kfree_skb_irq(skb);
-}
-
-static void hdlc_irq_one(struct fritz_bcs *bcs, u32 stat)
-{
- DBG(0x10, "ch%d stat %#x", bcs->channel, stat);
- if (stat & HDLC_INT_RPR) {
- DBG(0x10, "RPR");
- hdlc_rpr_irq(bcs, stat);
- }
- if (stat & HDLC_INT_XDU) {
- DBG(0x10, "XDU");
- hdlc_xdu_irq(bcs);
- hdlc_xpr_irq(bcs);
- return;
- }
- if (stat & HDLC_INT_XPR) {
- DBG(0x10, "XPR");
- hdlc_xpr_irq(bcs);
- }
-}
-
-static inline void hdlc_irq(struct fritz_adapter *adapter)
-{
- int nr;
- u32 stat;
-
- for (nr = 0; nr < 2; nr++) {
- stat = adapter->read_hdlc_status(adapter, nr);
- DBG(0x10, "HDLC %c stat %#x", 'A' + nr, stat);
- if (stat & HDLC_INT_MASK)
- hdlc_irq_one(&adapter->bcs[nr], stat);
- }
-}
-
-static void modehdlc(struct fritz_bcs *bcs, int mode)
-{
- struct fritz_adapter *adapter = bcs->adapter;
-
- DBG(0x40, "hdlc %c mode %d --> %d",
- 'A' + bcs->channel, bcs->mode, mode);
-
- if (bcs->mode == mode)
- return;
-
- bcs->fifo_size = 32;
- bcs->ctrl.ctrl = 0;
- bcs->ctrl.sr.cmd = HDLC_CMD_XRS | HDLC_CMD_RRS;
- switch (mode) {
- case L1_MODE_NULL:
- bcs->ctrl.sr.mode = HDLC_MODE_TRANS;
- adapter->write_ctrl(bcs, 5);
- break;
- case L1_MODE_TRANS:
- case L1_MODE_HDLC:
- bcs->rcvidx = 0;
- bcs->tx_cnt = 0;
- bcs->tx_skb = NULL;
- if (mode == L1_MODE_TRANS) {
- bcs->ctrl.sr.mode = HDLC_MODE_TRANS;
- } else {
- bcs->ctrl.sr.mode = HDLC_MODE_ITF_FLG;
- }
- adapter->write_ctrl(bcs, 5);
- bcs->ctrl.sr.cmd = HDLC_CMD_XRS;
- adapter->write_ctrl(bcs, 1);
- bcs->ctrl.sr.cmd = 0;
- break;
- }
- bcs->mode = mode;
-}
-
-static void fritz_b_l2l1(struct hisax_if *ifc, int pr, void *arg)
-{
- struct fritz_bcs *bcs = ifc->priv;
- struct sk_buff *skb = arg;
- int mode;
-
- DBG(0x10, "pr %#x", pr);
-
- switch (pr) {
- case PH_DATA | REQUEST:
- BUG_ON(bcs->tx_skb);
- bcs->tx_skb = skb;
- DBG_SKB(1, skb);
- hdlc_fill_fifo(bcs);
- break;
- case PH_ACTIVATE | REQUEST:
- mode = (long) arg;
- DBG(4, "B%d,PH_ACTIVATE_REQUEST %d", bcs->channel + 1, mode);
- modehdlc(bcs, mode);
- B_L1L2(bcs, PH_ACTIVATE | INDICATION, NULL);
- break;
- case PH_DEACTIVATE | REQUEST:
- DBG(4, "B%d,PH_DEACTIVATE_REQUEST", bcs->channel + 1);
- modehdlc(bcs, L1_MODE_NULL);
- B_L1L2(bcs, PH_DEACTIVATE | INDICATION, NULL);
- break;
- }
-}
-
-// ----------------------------------------------------------------------
-
-static irqreturn_t
-fcpci2_irq(int intno, void *dev)
-{
- struct fritz_adapter *adapter = dev;
- unsigned char val;
-
- val = inb(adapter->io + AVM_STATUS0);
- if (!(val & AVM_STATUS0_IRQ_MASK))
- /* hopefully a shared IRQ reqest */
- return IRQ_NONE;
- DBG(2, "STATUS0 %#x", val);
- if (val & AVM_STATUS0_IRQ_ISAC)
- isacsx_irq(&adapter->isac);
- if (val & AVM_STATUS0_IRQ_HDLC)
- hdlc_irq(adapter);
- if (val & AVM_STATUS0_IRQ_ISAC)
- isacsx_irq(&adapter->isac);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-fcpci_irq(int intno, void *dev)
-{
- struct fritz_adapter *adapter = dev;
- unsigned char sval;
-
- sval = inb(adapter->io + 2);
- if ((sval & AVM_STATUS0_IRQ_MASK) == AVM_STATUS0_IRQ_MASK)
- /* possibly a shared IRQ reqest */
- return IRQ_NONE;
- DBG(2, "sval %#x", sval);
- if (!(sval & AVM_STATUS0_IRQ_ISAC))
- isac_irq(&adapter->isac);
-
- if (!(sval & AVM_STATUS0_IRQ_HDLC))
- hdlc_irq(adapter);
- return IRQ_HANDLED;
-}
-
-// ----------------------------------------------------------------------
-
-static inline void fcpci2_init(struct fritz_adapter *adapter)
-{
- outb(AVM_STATUS0_RES_TIMER, adapter->io + AVM_STATUS0);
- outb(AVM_STATUS0_ENA_IRQ, adapter->io + AVM_STATUS0);
-
-}
-
-static inline void fcpci_init(struct fritz_adapter *adapter)
-{
- outb(AVM_STATUS0_DIS_TIMER | AVM_STATUS0_RES_TIMER |
- AVM_STATUS0_ENA_IRQ, adapter->io + AVM_STATUS0);
-
- outb(AVM_STATUS1_ENA_IOM | adapter->irq,
- adapter->io + AVM_STATUS1);
- mdelay(10);
-}
-
-// ----------------------------------------------------------------------
-
-static int fcpcipnp_setup(struct fritz_adapter *adapter)
-{
- u32 val = 0;
- int retval;
-
- DBG(1, "");
-
- isac_init(&adapter->isac); // FIXME is this okay now
-
- retval = -EBUSY;
- if (!request_region(adapter->io, 32, "fcpcipnp"))
- goto err;
-
- switch (adapter->type) {
- case AVM_FRITZ_PCIV2:
- case AVM_FRITZ_PCI:
- val = inl(adapter->io);
- break;
- case AVM_FRITZ_PNP:
- val = inb(adapter->io);
- val |= inb(adapter->io + 1) << 8;
- break;
- }
-
- DBG(1, "stat %#x Class %X Rev %d",
- val, val & 0xff, (val >> 8) & 0xff);
-
- spin_lock_init(&adapter->hw_lock);
- adapter->isac.priv = adapter;
- switch (adapter->type) {
- case AVM_FRITZ_PCIV2:
- adapter->isac.read_isac = &fcpci2_read_isac;
- adapter->isac.write_isac = &fcpci2_write_isac;
- adapter->isac.read_isac_fifo = &fcpci2_read_isac_fifo;
- adapter->isac.write_isac_fifo = &fcpci2_write_isac_fifo;
-
- adapter->read_hdlc_status = &fcpci2_read_hdlc_status;
- adapter->write_ctrl = &fcpci2_write_ctrl;
- break;
- case AVM_FRITZ_PCI:
- adapter->isac.read_isac = &fcpci_read_isac;
- adapter->isac.write_isac = &fcpci_write_isac;
- adapter->isac.read_isac_fifo = &fcpci_read_isac_fifo;
- adapter->isac.write_isac_fifo = &fcpci_write_isac_fifo;
-
- adapter->read_hdlc_status = &fcpci_read_hdlc_status;
- adapter->write_ctrl = &fcpci_write_ctrl;
- break;
- case AVM_FRITZ_PNP:
- adapter->isac.read_isac = &fcpci_read_isac;
- adapter->isac.write_isac = &fcpci_write_isac;
- adapter->isac.read_isac_fifo = &fcpci_read_isac_fifo;
- adapter->isac.write_isac_fifo = &fcpci_write_isac_fifo;
-
- adapter->read_hdlc_status = &fcpnp_read_hdlc_status;
- adapter->write_ctrl = &fcpnp_write_ctrl;
- break;
- }
-
- // Reset
- outb(0, adapter->io + AVM_STATUS0);
- mdelay(10);
- outb(AVM_STATUS0_RESET, adapter->io + AVM_STATUS0);
- mdelay(10);
- outb(0, adapter->io + AVM_STATUS0);
- mdelay(10);
-
- switch (adapter->type) {
- case AVM_FRITZ_PCIV2:
- retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED,
- "fcpcipnp", adapter);
- break;
- case AVM_FRITZ_PCI:
- retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED,
- "fcpcipnp", adapter);
- break;
- case AVM_FRITZ_PNP:
- retval = request_irq(adapter->irq, fcpci_irq, 0,
- "fcpcipnp", adapter);
- break;
- }
- if (retval)
- goto err_region;
-
- switch (adapter->type) {
- case AVM_FRITZ_PCIV2:
- fcpci2_init(adapter);
- isacsx_setup(&adapter->isac);
- break;
- case AVM_FRITZ_PCI:
- case AVM_FRITZ_PNP:
- fcpci_init(adapter);
- isac_setup(&adapter->isac);
- break;
- }
- val = adapter->read_hdlc_status(adapter, 0);
- DBG(0x20, "HDLC A STA %x", val);
- val = adapter->read_hdlc_status(adapter, 1);
- DBG(0x20, "HDLC B STA %x", val);
-
- adapter->bcs[0].mode = -1;
- adapter->bcs[1].mode = -1;
- modehdlc(&adapter->bcs[0], L1_MODE_NULL);
- modehdlc(&adapter->bcs[1], L1_MODE_NULL);
-
- return 0;
-
-err_region:
- release_region(adapter->io, 32);
-err:
- return retval;
-}
-
-static void fcpcipnp_release(struct fritz_adapter *adapter)
-{
- DBG(1, "");
-
- outb(0, adapter->io + AVM_STATUS0);
- free_irq(adapter->irq, adapter);
- release_region(adapter->io, 32);
-}
-
-// ----------------------------------------------------------------------
-
-static struct fritz_adapter *new_adapter(void)
-{
- struct fritz_adapter *adapter;
- struct hisax_b_if *b_if[2];
- int i;
-
- adapter = kzalloc(sizeof(struct fritz_adapter), GFP_KERNEL);
- if (!adapter)
- return NULL;
-
- adapter->isac.hisax_d_if.owner = THIS_MODULE;
- adapter->isac.hisax_d_if.ifc.priv = &adapter->isac;
- adapter->isac.hisax_d_if.ifc.l2l1 = isac_d_l2l1;
-
- for (i = 0; i < 2; i++) {
- adapter->bcs[i].adapter = adapter;
- adapter->bcs[i].channel = i;
- adapter->bcs[i].b_if.ifc.priv = &adapter->bcs[i];
- adapter->bcs[i].b_if.ifc.l2l1 = fritz_b_l2l1;
- }
-
- for (i = 0; i < 2; i++)
- b_if[i] = &adapter->bcs[i].b_if;
-
- if (hisax_register(&adapter->isac.hisax_d_if, b_if, "fcpcipnp",
- protocol) != 0) {
- kfree(adapter);
- adapter = NULL;
- }
-
- return adapter;
-}
-
-static void delete_adapter(struct fritz_adapter *adapter)
-{
- hisax_unregister(&adapter->isac.hisax_d_if);
- kfree(adapter);
-}
-
-static int fcpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct fritz_adapter *adapter;
- int retval;
-
- retval = -ENOMEM;
- adapter = new_adapter();
- if (!adapter)
- goto err;
-
- pci_set_drvdata(pdev, adapter);
-
- if (pdev->device == PCI_DEVICE_ID_AVM_A1_V2)
- adapter->type = AVM_FRITZ_PCIV2;
- else
- adapter->type = AVM_FRITZ_PCI;
-
- retval = pci_enable_device(pdev);
- if (retval)
- goto err_free;
-
- adapter->io = pci_resource_start(pdev, 1);
- adapter->irq = pdev->irq;
-
- printk(KERN_INFO "hisax_fcpcipnp: found adapter %s at %s\n",
- (char *) ent->driver_data, pci_name(pdev));
-
- retval = fcpcipnp_setup(adapter);
- if (retval)
- goto err_free;
-
- return 0;
-
-err_free:
- delete_adapter(adapter);
-err:
- return retval;
-}
-
-#ifdef CONFIG_PNP
-static int fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
-{
- struct fritz_adapter *adapter;
- int retval;
-
- if (!pdev)
- return (-ENODEV);
-
- retval = -ENOMEM;
- adapter = new_adapter();
- if (!adapter)
- goto err;
-
- pnp_set_drvdata(pdev, adapter);
-
- adapter->type = AVM_FRITZ_PNP;
-
- pnp_disable_dev(pdev);
- retval = pnp_activate_dev(pdev);
- if (retval < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev(%s) ret(%d)\n", __func__,
- (char *)dev_id->driver_data, retval);
- goto err_free;
- }
- adapter->io = pnp_port_start(pdev, 0);
- adapter->irq = pnp_irq(pdev, 0);
- if (!adapter->io || adapter->irq == -1)
- goto err_free;
-
- printk(KERN_INFO "hisax_fcpcipnp: found adapter %s at IO %#x irq %d\n",
- (char *) dev_id->driver_data, adapter->io, adapter->irq);
-
- retval = fcpcipnp_setup(adapter);
- if (retval)
- goto err_free;
-
- return 0;
-
-err_free:
- delete_adapter(adapter);
-err:
- return retval;
-}
-
-static void fcpnp_remove(struct pnp_dev *pdev)
-{
- struct fritz_adapter *adapter = pnp_get_drvdata(pdev);
-
- if (adapter) {
- fcpcipnp_release(adapter);
- delete_adapter(adapter);
- }
- pnp_disable_dev(pdev);
-}
-
-static struct pnp_driver fcpnp_driver = {
- .name = "fcpnp",
- .probe = fcpnp_probe,
- .remove = fcpnp_remove,
- .id_table = fcpnp_ids,
-};
-#endif
-
-static void fcpci_remove(struct pci_dev *pdev)
-{
- struct fritz_adapter *adapter = pci_get_drvdata(pdev);
-
- fcpcipnp_release(adapter);
- pci_disable_device(pdev);
- delete_adapter(adapter);
-}
-
-static struct pci_driver fcpci_driver = {
- .name = "fcpci",
- .probe = fcpci_probe,
- .remove = fcpci_remove,
- .id_table = fcpci_ids,
-};
-
-static int __init hisax_fcpcipnp_init(void)
-{
- int retval;
-
- printk(KERN_INFO "hisax_fcpcipnp: Fritz!Card PCI/PCIv2/PnP ISDN driver v0.0.1\n");
-
- retval = pci_register_driver(&fcpci_driver);
- if (retval)
- return retval;
-#ifdef CONFIG_PNP
- retval = pnp_register_driver(&fcpnp_driver);
- if (retval < 0) {
- pci_unregister_driver(&fcpci_driver);
- return retval;
- }
-#endif
- return 0;
-}
-
-static void __exit hisax_fcpcipnp_exit(void)
-{
-#ifdef CONFIG_PNP
- pnp_unregister_driver(&fcpnp_driver);
-#endif
- pci_unregister_driver(&fcpci_driver);
-}
-
-module_init(hisax_fcpcipnp_init);
-module_exit(hisax_fcpcipnp_exit);
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.h b/drivers/isdn/hisax/hisax_fcpcipnp.h
deleted file mode 100644
index 1f64e9937aa1..000000000000
--- a/drivers/isdn/hisax/hisax_fcpcipnp.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include "hisax_if.h"
-#include "hisax_isac.h"
-#include <linux/pci.h>
-
-#define HSCX_BUFMAX 4096
-
-enum {
- AVM_FRITZ_PCI,
- AVM_FRITZ_PNP,
- AVM_FRITZ_PCIV2,
-};
-
-struct hdlc_stat_reg {
-#ifdef __BIG_ENDIAN
- u_char fill;
- u_char mode;
- u_char xml;
- u_char cmd;
-#else
- u_char cmd;
- u_char xml;
- u_char mode;
- u_char fill;
-#endif
-} __attribute__((packed));
-
-struct fritz_bcs {
- struct hisax_b_if b_if;
- struct fritz_adapter *adapter;
- int mode;
- int channel;
-
- union {
- u_int ctrl;
- struct hdlc_stat_reg sr;
- } ctrl;
- u_int stat;
- int rcvidx;
- int fifo_size;
- u_char rcvbuf[HSCX_BUFMAX]; /* B-Channel receive Buffer */
-
- int tx_cnt; /* B-Channel transmit counter */
- struct sk_buff *tx_skb; /* B-Channel transmit Buffer */
-};
-
-struct fritz_adapter {
- int type;
- spinlock_t hw_lock;
- unsigned int io;
- unsigned int irq;
- struct isac isac;
-
- struct fritz_bcs bcs[2];
-
- u32 (*read_hdlc_status) (struct fritz_adapter *adapter, int nr);
- void (*write_ctrl) (struct fritz_bcs *bcs, int which);
-};
diff --git a/drivers/isdn/hisax/hisax_if.h b/drivers/isdn/hisax/hisax_if.h
deleted file mode 100644
index 7098d6bd5ff2..000000000000
--- a/drivers/isdn/hisax/hisax_if.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Interface between low level (hardware) drivers and
- * HiSax protocol stack
- *
- * Author Kai Germaschewski
- * Copyright 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef __HISAX_IF_H__
-#define __HISAX_IF_H__
-
-#include <linux/skbuff.h>
-
-#define REQUEST 0
-#define CONFIRM 1
-#define INDICATION 2
-#define RESPONSE 3
-
-#define PH_ACTIVATE 0x0100
-#define PH_DEACTIVATE 0x0110
-#define PH_DATA 0x0120
-#define PH_PULL 0x0130
-#define PH_DATA_E 0x0140
-
-#define L1_MODE_NULL 0
-#define L1_MODE_TRANS 1
-#define L1_MODE_HDLC 2
-#define L1_MODE_EXTRN 3
-#define L1_MODE_HDLC_56K 4
-#define L1_MODE_MODEM 7
-#define L1_MODE_V32 8
-#define L1_MODE_FAX 9
-
-struct hisax_if {
- void *priv; // private to driver
- void (*l1l2)(struct hisax_if *, int pr, void *arg);
- void (*l2l1)(struct hisax_if *, int pr, void *arg);
-};
-
-struct hisax_b_if {
- struct hisax_if ifc;
-
- // private to hisax
- struct BCState *bcs;
-};
-
-struct hisax_d_if {
- struct hisax_if ifc;
-
- // private to hisax
- struct module *owner;
- struct IsdnCardState *cs;
- struct hisax_b_if *b_if[2];
- struct sk_buff_head erq;
- unsigned long ph_state;
-};
-
-int hisax_register(struct hisax_d_if *hisax_if, struct hisax_b_if *b_if[],
- char *name, int protocol);
-void hisax_unregister(struct hisax_d_if *hisax_if);
-
-#endif
diff --git a/drivers/isdn/hisax/hisax_isac.c b/drivers/isdn/hisax/hisax_isac.c
deleted file mode 100644
index 0f36375478c5..000000000000
--- a/drivers/isdn/hisax/hisax_isac.c
+++ /dev/null
@@ -1,895 +0,0 @@
-/*
- * Driver for ISAC-S and ISAC-SX
- * ISDN Subscriber Access Controller for Terminals
- *
- * Author Kai Germaschewski
- * Copyright 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
- * 2001 by Karsten Keil <keil@isdn4linux.de>
- *
- * based upon Karsten Keil's original isac.c driver
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to Wizard Computersysteme GmbH, Bremervoerde and
- * SoHaNet Technology GmbH, Berlin
- * for supporting the development of this driver
- */
-
-/* TODO:
- * specifically handle level vs edge triggered?
- */
-
-#include <linux/module.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include "hisax_isac.h"
-
-// debugging cruft
-
-#define __debug_variable debug
-#include "hisax_debug.h"
-
-#ifdef CONFIG_HISAX_DEBUG
-static int debug = 1;
-module_param(debug, int, 0);
-
-static char *ISACVer[] = {
- "2086/2186 V1.1",
- "2085 B1",
- "2085 B2",
- "2085 V2.3"
-};
-#endif
-
-MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>");
-MODULE_DESCRIPTION("ISAC/ISAC-SX driver");
-MODULE_LICENSE("GPL");
-
-#define DBG_WARN 0x0001
-#define DBG_IRQ 0x0002
-#define DBG_L1M 0x0004
-#define DBG_PR 0x0008
-#define DBG_RFIFO 0x0100
-#define DBG_RPACKET 0x0200
-#define DBG_XFIFO 0x1000
-#define DBG_XPACKET 0x2000
-
-// we need to distinguish ISAC-S and ISAC-SX
-#define TYPE_ISAC 0x00
-#define TYPE_ISACSX 0x01
-
-// registers etc.
-#define ISAC_MASK 0x20
-#define ISAC_ISTA 0x20
-#define ISAC_ISTA_EXI 0x01
-#define ISAC_ISTA_SIN 0x02
-#define ISAC_ISTA_CISQ 0x04
-#define ISAC_ISTA_XPR 0x10
-#define ISAC_ISTA_RSC 0x20
-#define ISAC_ISTA_RPF 0x40
-#define ISAC_ISTA_RME 0x80
-
-#define ISAC_STAR 0x21
-#define ISAC_CMDR 0x21
-#define ISAC_CMDR_XRES 0x01
-#define ISAC_CMDR_XME 0x02
-#define ISAC_CMDR_XTF 0x08
-#define ISAC_CMDR_RRES 0x40
-#define ISAC_CMDR_RMC 0x80
-
-#define ISAC_EXIR 0x24
-#define ISAC_EXIR_MOS 0x04
-#define ISAC_EXIR_XDU 0x40
-#define ISAC_EXIR_XMR 0x80
-
-#define ISAC_ADF2 0x39
-#define ISAC_SPCR 0x30
-#define ISAC_ADF1 0x38
-
-#define ISAC_CIR0 0x31
-#define ISAC_CIX0 0x31
-#define ISAC_CIR0_CIC0 0x02
-#define ISAC_CIR0_CIC1 0x01
-
-#define ISAC_CIR1 0x33
-#define ISAC_CIX1 0x33
-#define ISAC_STCR 0x37
-#define ISAC_MODE 0x22
-
-#define ISAC_RSTA 0x27
-#define ISAC_RSTA_RDO 0x40
-#define ISAC_RSTA_CRC 0x20
-#define ISAC_RSTA_RAB 0x10
-
-#define ISAC_RBCL 0x25
-#define ISAC_RBCH 0x2A
-#define ISAC_TIMR 0x23
-#define ISAC_SQXR 0x3b
-#define ISAC_MOSR 0x3a
-#define ISAC_MOCR 0x3a
-#define ISAC_MOR0 0x32
-#define ISAC_MOX0 0x32
-#define ISAC_MOR1 0x34
-#define ISAC_MOX1 0x34
-
-#define ISAC_RBCH_XAC 0x80
-
-#define ISAC_CMD_TIM 0x0
-#define ISAC_CMD_RES 0x1
-#define ISAC_CMD_SSP 0x2
-#define ISAC_CMD_SCP 0x3
-#define ISAC_CMD_AR8 0x8
-#define ISAC_CMD_AR10 0x9
-#define ISAC_CMD_ARL 0xa
-#define ISAC_CMD_DI 0xf
-
-#define ISACSX_MASK 0x60
-#define ISACSX_ISTA 0x60
-#define ISACSX_ISTA_ICD 0x01
-#define ISACSX_ISTA_CIC 0x10
-
-#define ISACSX_MASKD 0x20
-#define ISACSX_ISTAD 0x20
-#define ISACSX_ISTAD_XDU 0x04
-#define ISACSX_ISTAD_XMR 0x08
-#define ISACSX_ISTAD_XPR 0x10
-#define ISACSX_ISTAD_RFO 0x20
-#define ISACSX_ISTAD_RPF 0x40
-#define ISACSX_ISTAD_RME 0x80
-
-#define ISACSX_CMDRD 0x21
-#define ISACSX_CMDRD_XRES 0x01
-#define ISACSX_CMDRD_XME 0x02
-#define ISACSX_CMDRD_XTF 0x08
-#define ISACSX_CMDRD_RRES 0x40
-#define ISACSX_CMDRD_RMC 0x80
-
-#define ISACSX_MODED 0x22
-
-#define ISACSX_RBCLD 0x26
-
-#define ISACSX_RSTAD 0x28
-#define ISACSX_RSTAD_RAB 0x10
-#define ISACSX_RSTAD_CRC 0x20
-#define ISACSX_RSTAD_RDO 0x40
-#define ISACSX_RSTAD_VFR 0x80
-
-#define ISACSX_CIR0 0x2e
-#define ISACSX_CIR0_CIC0 0x08
-#define ISACSX_CIX0 0x2e
-
-#define ISACSX_TR_CONF0 0x30
-
-#define ISACSX_TR_CONF2 0x32
-
-static struct Fsm l1fsm;
-
-enum {
- ST_L1_RESET,
- ST_L1_F3_PDOWN,
- ST_L1_F3_PUP,
- ST_L1_F3_PEND_DEACT,
- ST_L1_F4,
- ST_L1_F5,
- ST_L1_F6,
- ST_L1_F7,
- ST_L1_F8,
-};
-
-#define L1_STATE_COUNT (ST_L1_F8 + 1)
-
-static char *strL1State[] =
-{
- "ST_L1_RESET",
- "ST_L1_F3_PDOWN",
- "ST_L1_F3_PUP",
- "ST_L1_F3_PEND_DEACT",
- "ST_L1_F4",
- "ST_L1_F5",
- "ST_L1_F6",
- "ST_L1_F7",
- "ST_L1_F8",
-};
-
-enum {
- EV_PH_DR, // 0000
- EV_PH_RES, // 0001
- EV_PH_TMA, // 0010
- EV_PH_SLD, // 0011
- EV_PH_RSY, // 0100
- EV_PH_DR6, // 0101
- EV_PH_EI, // 0110
- EV_PH_PU, // 0111
- EV_PH_AR, // 1000
- EV_PH_9, // 1001
- EV_PH_ARL, // 1010
- EV_PH_CVR, // 1011
- EV_PH_AI8, // 1100
- EV_PH_AI10, // 1101
- EV_PH_AIL, // 1110
- EV_PH_DC, // 1111
- EV_PH_ACTIVATE_REQ,
- EV_PH_DEACTIVATE_REQ,
- EV_TIMER3,
-};
-
-#define L1_EVENT_COUNT (EV_TIMER3 + 1)
-
-static char *strL1Event[] =
-{
- "EV_PH_DR", // 0000
- "EV_PH_RES", // 0001
- "EV_PH_TMA", // 0010
- "EV_PH_SLD", // 0011
- "EV_PH_RSY", // 0100
- "EV_PH_DR6", // 0101
- "EV_PH_EI", // 0110
- "EV_PH_PU", // 0111
- "EV_PH_AR", // 1000
- "EV_PH_9", // 1001
- "EV_PH_ARL", // 1010
- "EV_PH_CVR", // 1011
- "EV_PH_AI8", // 1100
- "EV_PH_AI10", // 1101
- "EV_PH_AIL", // 1110
- "EV_PH_DC", // 1111
- "EV_PH_ACTIVATE_REQ",
- "EV_PH_DEACTIVATE_REQ",
- "EV_TIMER3",
-};
-
-static inline void D_L1L2(struct isac *isac, int pr, void *arg)
-{
- struct hisax_if *ifc = (struct hisax_if *) &isac->hisax_d_if;
-
- DBG(DBG_PR, "pr %#x", pr);
- ifc->l1l2(ifc, pr, arg);
-}
-
-static void ph_command(struct isac *isac, unsigned int command)
-{
- DBG(DBG_L1M, "ph_command %#x", command);
- switch (isac->type) {
- case TYPE_ISAC:
- isac->write_isac(isac, ISAC_CIX0, (command << 2) | 3);
- break;
- case TYPE_ISACSX:
- isac->write_isac(isac, ISACSX_CIX0, (command << 4) | (7 << 1));
- break;
- }
-}
-
-// ----------------------------------------------------------------------
-
-static void l1_di(struct FsmInst *fi, int event, void *arg)
-{
- struct isac *isac = fi->userdata;
-
- FsmChangeState(fi, ST_L1_RESET);
- ph_command(isac, ISAC_CMD_DI);
-}
-
-static void l1_di_deact_ind(struct FsmInst *fi, int event, void *arg)
-{
- struct isac *isac = fi->userdata;
-
- FsmChangeState(fi, ST_L1_RESET);
- D_L1L2(isac, PH_DEACTIVATE | INDICATION, NULL);
- ph_command(isac, ISAC_CMD_DI);
-}
-
-static void l1_go_f3pdown(struct FsmInst *fi, int event, void *arg)
-{
- FsmChangeState(fi, ST_L1_F3_PDOWN);
-}
-
-static void l1_go_f3pend_deact_ind(struct FsmInst *fi, int event, void *arg)
-{
- struct isac *isac = fi->userdata;
-
- FsmChangeState(fi, ST_L1_F3_PEND_DEACT);
- D_L1L2(isac, PH_DEACTIVATE | INDICATION, NULL);
- ph_command(isac, ISAC_CMD_DI);
-}
-
-static void l1_go_f3pend(struct FsmInst *fi, int event, void *arg)
-{
- struct isac *isac = fi->userdata;
-
- FsmChangeState(fi, ST_L1_F3_PEND_DEACT);
- ph_command(isac, ISAC_CMD_DI);
-}
-
-static void l1_go_f4(struct FsmInst *fi, int event, void *arg)
-{
- FsmChangeState(fi, ST_L1_F4);
-}
-
-static void l1_go_f5(struct FsmInst *fi, int event, void *arg)
-{
- FsmChangeState(fi, ST_L1_F5);
-}
-
-static void l1_go_f6(struct FsmInst *fi, int event, void *arg)
-{
- FsmChangeState(fi, ST_L1_F6);
-}
-
-static void l1_go_f6_deact_ind(struct FsmInst *fi, int event, void *arg)
-{
- struct isac *isac = fi->userdata;
-
- FsmChangeState(fi, ST_L1_F6);
- D_L1L2(isac, PH_DEACTIVATE | INDICATION, NULL);
-}
-
-static void l1_go_f7_act_ind(struct FsmInst *fi, int event, void *arg)
-{
- struct isac *isac = fi->userdata;
-
- FsmDelTimer(&isac->timer, 0);
- FsmChangeState(fi, ST_L1_F7);
- ph_command(isac, ISAC_CMD_AR8);
- D_L1L2(isac, PH_ACTIVATE | INDICATION, NULL);
-}
-
-static void l1_go_f8(struct FsmInst *fi, int event, void *arg)
-{
- FsmChangeState(fi, ST_L1_F8);
-}
-
-static void l1_go_f8_deact_ind(struct FsmInst *fi, int event, void *arg)
-{
- struct isac *isac = fi->userdata;
-
- FsmChangeState(fi, ST_L1_F8);
- D_L1L2(isac, PH_DEACTIVATE | INDICATION, NULL);
-}
-
-static void l1_ar8(struct FsmInst *fi, int event, void *arg)
-{
- struct isac *isac = fi->userdata;
-
- FsmRestartTimer(&isac->timer, TIMER3_VALUE, EV_TIMER3, NULL, 2);
- ph_command(isac, ISAC_CMD_AR8);
-}
-
-static void l1_timer3(struct FsmInst *fi, int event, void *arg)
-{
- struct isac *isac = fi->userdata;
-
- ph_command(isac, ISAC_CMD_DI);
- D_L1L2(isac, PH_DEACTIVATE | INDICATION, NULL);
-}
-
-// state machines according to data sheet PSB 2186 / 3186
-
-static struct FsmNode L1FnList[] __initdata =
-{
- {ST_L1_RESET, EV_PH_RES, l1_di},
- {ST_L1_RESET, EV_PH_EI, l1_di},
- {ST_L1_RESET, EV_PH_DC, l1_go_f3pdown},
- {ST_L1_RESET, EV_PH_AR, l1_go_f6},
- {ST_L1_RESET, EV_PH_AI8, l1_go_f7_act_ind},
-
- {ST_L1_F3_PDOWN, EV_PH_RES, l1_di},
- {ST_L1_F3_PDOWN, EV_PH_EI, l1_di},
- {ST_L1_F3_PDOWN, EV_PH_AR, l1_go_f6},
- {ST_L1_F3_PDOWN, EV_PH_RSY, l1_go_f5},
- {ST_L1_F3_PDOWN, EV_PH_PU, l1_go_f4},
- {ST_L1_F3_PDOWN, EV_PH_AI8, l1_go_f7_act_ind},
- {ST_L1_F3_PDOWN, EV_PH_ACTIVATE_REQ, l1_ar8},
- {ST_L1_F3_PDOWN, EV_TIMER3, l1_timer3},
-
- {ST_L1_F3_PEND_DEACT, EV_PH_RES, l1_di},
- {ST_L1_F3_PEND_DEACT, EV_PH_EI, l1_di},
- {ST_L1_F3_PEND_DEACT, EV_PH_DC, l1_go_f3pdown},
- {ST_L1_F3_PEND_DEACT, EV_PH_RSY, l1_go_f5},
- {ST_L1_F3_PEND_DEACT, EV_PH_AR, l1_go_f6},
- {ST_L1_F3_PEND_DEACT, EV_PH_AI8, l1_go_f7_act_ind},
-
- {ST_L1_F4, EV_PH_RES, l1_di},
- {ST_L1_F4, EV_PH_EI, l1_di},
- {ST_L1_F4, EV_PH_RSY, l1_go_f5},
- {ST_L1_F4, EV_PH_AI8, l1_go_f7_act_ind},
- {ST_L1_F4, EV_TIMER3, l1_timer3},
- {ST_L1_F4, EV_PH_DC, l1_go_f3pdown},
-
- {ST_L1_F5, EV_PH_RES, l1_di},
- {ST_L1_F5, EV_PH_EI, l1_di},
- {ST_L1_F5, EV_PH_AR, l1_go_f6},
- {ST_L1_F5, EV_PH_AI8, l1_go_f7_act_ind},
- {ST_L1_F5, EV_TIMER3, l1_timer3},
- {ST_L1_F5, EV_PH_DR, l1_go_f3pend},
- {ST_L1_F5, EV_PH_DC, l1_go_f3pdown},
-
- {ST_L1_F6, EV_PH_RES, l1_di},
- {ST_L1_F6, EV_PH_EI, l1_di},
- {ST_L1_F6, EV_PH_RSY, l1_go_f8},
- {ST_L1_F6, EV_PH_AI8, l1_go_f7_act_ind},
- {ST_L1_F6, EV_PH_DR6, l1_go_f3pend},
- {ST_L1_F6, EV_TIMER3, l1_timer3},
- {ST_L1_F6, EV_PH_DC, l1_go_f3pdown},
-
- {ST_L1_F7, EV_PH_RES, l1_di_deact_ind},
- {ST_L1_F7, EV_PH_EI, l1_di_deact_ind},
- {ST_L1_F7, EV_PH_AR, l1_go_f6_deact_ind},
- {ST_L1_F7, EV_PH_RSY, l1_go_f8_deact_ind},
- {ST_L1_F7, EV_PH_DR, l1_go_f3pend_deact_ind},
-
- {ST_L1_F8, EV_PH_RES, l1_di},
- {ST_L1_F8, EV_PH_EI, l1_di},
- {ST_L1_F8, EV_PH_AR, l1_go_f6},
- {ST_L1_F8, EV_PH_DR, l1_go_f3pend},
- {ST_L1_F8, EV_PH_AI8, l1_go_f7_act_ind},
- {ST_L1_F8, EV_TIMER3, l1_timer3},
- {ST_L1_F8, EV_PH_DC, l1_go_f3pdown},
-};
-
-static void l1m_debug(struct FsmInst *fi, char *fmt, ...)
-{
- va_list args;
- char buf[256];
-
- va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
- DBG(DBG_L1M, "%s", buf);
- va_end(args);
-}
-
-static void isac_version(struct isac *cs)
-{
- int val;
-
- val = cs->read_isac(cs, ISAC_RBCH);
- DBG(1, "ISAC version (%x): %s", val, ISACVer[(val >> 5) & 3]);
-}
-
-static void isac_empty_fifo(struct isac *isac, int count)
-{
- // this also works for isacsx, since
- // CMDR(D) register works the same
- u_char *ptr;
-
- DBG(DBG_IRQ, "count %d", count);
-
- if ((isac->rcvidx + count) >= MAX_DFRAME_LEN_L1) {
- DBG(DBG_WARN, "overrun %d", isac->rcvidx + count);
- isac->write_isac(isac, ISAC_CMDR, ISAC_CMDR_RMC);
- isac->rcvidx = 0;
- return;
- }
- ptr = isac->rcvbuf + isac->rcvidx;
- isac->rcvidx += count;
- isac->read_isac_fifo(isac, ptr, count);
- isac->write_isac(isac, ISAC_CMDR, ISAC_CMDR_RMC);
- DBG_PACKET(DBG_RFIFO, ptr, count);
-}
-
-static void isac_fill_fifo(struct isac *isac)
-{
- // this also works for isacsx, since
- // CMDR(D) register works the same
-
- int count;
- unsigned char cmd;
- u_char *ptr;
-
- BUG_ON(!isac->tx_skb);
-
- count = isac->tx_skb->len;
- BUG_ON(count <= 0);
-
- DBG(DBG_IRQ, "count %d", count);
-
- if (count > 0x20) {
- count = 0x20;
- cmd = ISAC_CMDR_XTF;
- } else {
- cmd = ISAC_CMDR_XTF | ISAC_CMDR_XME;
- }
-
- ptr = isac->tx_skb->data;
- skb_pull(isac->tx_skb, count);
- isac->tx_cnt += count;
- DBG_PACKET(DBG_XFIFO, ptr, count);
- isac->write_isac_fifo(isac, ptr, count);
- isac->write_isac(isac, ISAC_CMDR, cmd);
-}
-
-static void isac_retransmit(struct isac *isac)
-{
- if (!isac->tx_skb) {
- DBG(DBG_WARN, "no skb");
- return;
- }
- skb_push(isac->tx_skb, isac->tx_cnt);
- isac->tx_cnt = 0;
-}
-
-
-static inline void isac_cisq_interrupt(struct isac *isac)
-{
- unsigned char val;
-
- val = isac->read_isac(isac, ISAC_CIR0);
- DBG(DBG_IRQ, "CIR0 %#x", val);
- if (val & ISAC_CIR0_CIC0) {
- DBG(DBG_IRQ, "CODR0 %#x", (val >> 2) & 0xf);
- FsmEvent(&isac->l1m, (val >> 2) & 0xf, NULL);
- }
- if (val & ISAC_CIR0_CIC1) {
- val = isac->read_isac(isac, ISAC_CIR1);
- DBG(DBG_WARN, "ISAC CIR1 %#x", val);
- }
-}
-
-static inline void isac_rme_interrupt(struct isac *isac)
-{
- unsigned char val;
- int count;
- struct sk_buff *skb;
-
- val = isac->read_isac(isac, ISAC_RSTA);
- if ((val & (ISAC_RSTA_RDO | ISAC_RSTA_CRC | ISAC_RSTA_RAB))
- != ISAC_RSTA_CRC) {
- DBG(DBG_WARN, "RSTA %#x, dropped", val);
- isac->write_isac(isac, ISAC_CMDR, ISAC_CMDR_RMC);
- goto out;
- }
-
- count = isac->read_isac(isac, ISAC_RBCL) & 0x1f;
- DBG(DBG_IRQ, "RBCL %#x", count);
- if (count == 0)
- count = 0x20;
-
- isac_empty_fifo(isac, count);
- count = isac->rcvidx;
- if (count < 1) {
- DBG(DBG_WARN, "count %d < 1", count);
- goto out;
- }
-
- skb = alloc_skb(count, GFP_ATOMIC);
- if (!skb) {
- DBG(DBG_WARN, "no memory, dropping\n");
- goto out;
- }
- skb_put_data(skb, isac->rcvbuf, count);
- DBG_SKB(DBG_RPACKET, skb);
- D_L1L2(isac, PH_DATA | INDICATION, skb);
-out:
- isac->rcvidx = 0;
-}
-
-static inline void isac_xpr_interrupt(struct isac *isac)
-{
- if (!isac->tx_skb)
- return;
-
- if (isac->tx_skb->len > 0) {
- isac_fill_fifo(isac);
- return;
- }
- dev_kfree_skb_irq(isac->tx_skb);
- isac->tx_cnt = 0;
- isac->tx_skb = NULL;
- D_L1L2(isac, PH_DATA | CONFIRM, NULL);
-}
-
-static inline void isac_exi_interrupt(struct isac *isac)
-{
- unsigned char val;
-
- val = isac->read_isac(isac, ISAC_EXIR);
- DBG(2, "EXIR %#x", val);
-
- if (val & ISAC_EXIR_XMR) {
- DBG(DBG_WARN, "ISAC XMR");
- isac_retransmit(isac);
- }
- if (val & ISAC_EXIR_XDU) {
- DBG(DBG_WARN, "ISAC XDU");
- isac_retransmit(isac);
- }
- if (val & ISAC_EXIR_MOS) { /* MOS */
- DBG(DBG_WARN, "MOS");
- val = isac->read_isac(isac, ISAC_MOSR);
- DBG(2, "ISAC MOSR %#x", val);
- }
-}
-
-void isac_irq(struct isac *isac)
-{
- unsigned char val;
-
- val = isac->read_isac(isac, ISAC_ISTA);
- DBG(DBG_IRQ, "ISTA %#x", val);
-
- if (val & ISAC_ISTA_EXI) {
- DBG(DBG_IRQ, "EXI");
- isac_exi_interrupt(isac);
- }
- if (val & ISAC_ISTA_XPR) {
- DBG(DBG_IRQ, "XPR");
- isac_xpr_interrupt(isac);
- }
- if (val & ISAC_ISTA_RME) {
- DBG(DBG_IRQ, "RME");
- isac_rme_interrupt(isac);
- }
- if (val & ISAC_ISTA_RPF) {
- DBG(DBG_IRQ, "RPF");
- isac_empty_fifo(isac, 0x20);
- }
- if (val & ISAC_ISTA_CISQ) {
- DBG(DBG_IRQ, "CISQ");
- isac_cisq_interrupt(isac);
- }
- if (val & ISAC_ISTA_RSC) {
- DBG(DBG_WARN, "RSC");
- }
- if (val & ISAC_ISTA_SIN) {
- DBG(DBG_WARN, "SIN");
- }
- isac->write_isac(isac, ISAC_MASK, 0xff);
- isac->write_isac(isac, ISAC_MASK, 0x00);
-}
-
-// ======================================================================
-
-static inline void isacsx_cic_interrupt(struct isac *isac)
-{
- unsigned char val;
-
- val = isac->read_isac(isac, ISACSX_CIR0);
- DBG(DBG_IRQ, "CIR0 %#x", val);
- if (val & ISACSX_CIR0_CIC0) {
- DBG(DBG_IRQ, "CODR0 %#x", val >> 4);
- FsmEvent(&isac->l1m, val >> 4, NULL);
- }
-}
-
-static inline void isacsx_rme_interrupt(struct isac *isac)
-{
- int count;
- struct sk_buff *skb;
- unsigned char val;
-
- val = isac->read_isac(isac, ISACSX_RSTAD);
- if ((val & (ISACSX_RSTAD_VFR |
- ISACSX_RSTAD_RDO |
- ISACSX_RSTAD_CRC |
- ISACSX_RSTAD_RAB))
- != (ISACSX_RSTAD_VFR | ISACSX_RSTAD_CRC)) {
- DBG(DBG_WARN, "RSTAD %#x, dropped", val);
- isac->write_isac(isac, ISACSX_CMDRD, ISACSX_CMDRD_RMC);
- goto out;
- }
-
- count = isac->read_isac(isac, ISACSX_RBCLD) & 0x1f;
- DBG(DBG_IRQ, "RBCLD %#x", count);
- if (count == 0)
- count = 0x20;
-
- isac_empty_fifo(isac, count);
- // strip trailing status byte
- count = isac->rcvidx - 1;
- if (count < 1) {
- DBG(DBG_WARN, "count %d < 1", count);
- goto out;
- }
-
- skb = dev_alloc_skb(count);
- if (!skb) {
- DBG(DBG_WARN, "no memory, dropping");
- goto out;
- }
- skb_put_data(skb, isac->rcvbuf, count);
- DBG_SKB(DBG_RPACKET, skb);
- D_L1L2(isac, PH_DATA | INDICATION, skb);
-out:
- isac->rcvidx = 0;
-}
-
-static inline void isacsx_xpr_interrupt(struct isac *isac)
-{
- if (!isac->tx_skb)
- return;
-
- if (isac->tx_skb->len > 0) {
- isac_fill_fifo(isac);
- return;
- }
- dev_kfree_skb_irq(isac->tx_skb);
- isac->tx_skb = NULL;
- isac->tx_cnt = 0;
- D_L1L2(isac, PH_DATA | CONFIRM, NULL);
-}
-
-static inline void isacsx_icd_interrupt(struct isac *isac)
-{
- unsigned char val;
-
- val = isac->read_isac(isac, ISACSX_ISTAD);
- DBG(DBG_IRQ, "ISTAD %#x", val);
- if (val & ISACSX_ISTAD_XDU) {
- DBG(DBG_WARN, "ISTAD XDU");
- isac_retransmit(isac);
- }
- if (val & ISACSX_ISTAD_XMR) {
- DBG(DBG_WARN, "ISTAD XMR");
- isac_retransmit(isac);
- }
- if (val & ISACSX_ISTAD_XPR) {
- DBG(DBG_IRQ, "ISTAD XPR");
- isacsx_xpr_interrupt(isac);
- }
- if (val & ISACSX_ISTAD_RFO) {
- DBG(DBG_WARN, "ISTAD RFO");
- isac->write_isac(isac, ISACSX_CMDRD, ISACSX_CMDRD_RMC);
- }
- if (val & ISACSX_ISTAD_RME) {
- DBG(DBG_IRQ, "ISTAD RME");
- isacsx_rme_interrupt(isac);
- }
- if (val & ISACSX_ISTAD_RPF) {
- DBG(DBG_IRQ, "ISTAD RPF");
- isac_empty_fifo(isac, 0x20);
- }
-}
-
-void isacsx_irq(struct isac *isac)
-{
- unsigned char val;
-
- val = isac->read_isac(isac, ISACSX_ISTA);
- DBG(DBG_IRQ, "ISTA %#x", val);
-
- if (val & ISACSX_ISTA_ICD)
- isacsx_icd_interrupt(isac);
- if (val & ISACSX_ISTA_CIC)
- isacsx_cic_interrupt(isac);
-}
-
-void isac_init(struct isac *isac)
-{
- isac->tx_skb = NULL;
- isac->l1m.fsm = &l1fsm;
- isac->l1m.state = ST_L1_RESET;
-#ifdef CONFIG_HISAX_DEBUG
- isac->l1m.debug = 1;
-#else
- isac->l1m.debug = 0;
-#endif
- isac->l1m.userdata = isac;
- isac->l1m.printdebug = l1m_debug;
- FsmInitTimer(&isac->l1m, &isac->timer);
-}
-
-void isac_setup(struct isac *isac)
-{
- int val, eval;
-
- isac->type = TYPE_ISAC;
- isac_version(isac);
-
- ph_command(isac, ISAC_CMD_RES);
-
- isac->write_isac(isac, ISAC_MASK, 0xff);
- isac->mocr = 0xaa;
- if (test_bit(ISAC_IOM1, &isac->flags)) {
- /* IOM 1 Mode */
- isac->write_isac(isac, ISAC_ADF2, 0x0);
- isac->write_isac(isac, ISAC_SPCR, 0xa);
- isac->write_isac(isac, ISAC_ADF1, 0x2);
- isac->write_isac(isac, ISAC_STCR, 0x70);
- isac->write_isac(isac, ISAC_MODE, 0xc9);
- } else {
- /* IOM 2 Mode */
- if (!isac->adf2)
- isac->adf2 = 0x80;
- isac->write_isac(isac, ISAC_ADF2, isac->adf2);
- isac->write_isac(isac, ISAC_SQXR, 0x2f);
- isac->write_isac(isac, ISAC_SPCR, 0x00);
- isac->write_isac(isac, ISAC_STCR, 0x70);
- isac->write_isac(isac, ISAC_MODE, 0xc9);
- isac->write_isac(isac, ISAC_TIMR, 0x00);
- isac->write_isac(isac, ISAC_ADF1, 0x00);
- }
- val = isac->read_isac(isac, ISAC_STAR);
- DBG(2, "ISAC STAR %x", val);
- val = isac->read_isac(isac, ISAC_MODE);
- DBG(2, "ISAC MODE %x", val);
- val = isac->read_isac(isac, ISAC_ADF2);
- DBG(2, "ISAC ADF2 %x", val);
- val = isac->read_isac(isac, ISAC_ISTA);
- DBG(2, "ISAC ISTA %x", val);
- if (val & 0x01) {
- eval = isac->read_isac(isac, ISAC_EXIR);
- DBG(2, "ISAC EXIR %x", eval);
- }
- val = isac->read_isac(isac, ISAC_CIR0);
- DBG(2, "ISAC CIR0 %x", val);
- FsmEvent(&isac->l1m, (val >> 2) & 0xf, NULL);
-
- isac->write_isac(isac, ISAC_MASK, 0x0);
- // RESET Receiver and Transmitter
- isac->write_isac(isac, ISAC_CMDR, ISAC_CMDR_XRES | ISAC_CMDR_RRES);
-}
-
-void isacsx_setup(struct isac *isac)
-{
- isac->type = TYPE_ISACSX;
- // clear LDD
- isac->write_isac(isac, ISACSX_TR_CONF0, 0x00);
- // enable transmitter
- isac->write_isac(isac, ISACSX_TR_CONF2, 0x00);
- // transparent mode 0, RAC, stop/go
- isac->write_isac(isac, ISACSX_MODED, 0xc9);
- // all HDLC IRQ unmasked
- isac->write_isac(isac, ISACSX_MASKD, 0x03);
- // unmask ICD, CID IRQs
- isac->write_isac(isac, ISACSX_MASK,
- ~(ISACSX_ISTA_ICD | ISACSX_ISTA_CIC));
-}
-
-void isac_d_l2l1(struct hisax_if *hisax_d_if, int pr, void *arg)
-{
- struct isac *isac = hisax_d_if->priv;
- struct sk_buff *skb = arg;
-
- DBG(DBG_PR, "pr %#x", pr);
-
- switch (pr) {
- case PH_ACTIVATE | REQUEST:
- FsmEvent(&isac->l1m, EV_PH_ACTIVATE_REQ, NULL);
- break;
- case PH_DEACTIVATE | REQUEST:
- FsmEvent(&isac->l1m, EV_PH_DEACTIVATE_REQ, NULL);
- break;
- case PH_DATA | REQUEST:
- DBG(DBG_PR, "PH_DATA REQUEST len %d", skb->len);
- DBG_SKB(DBG_XPACKET, skb);
- if (isac->l1m.state != ST_L1_F7) {
- DBG(1, "L1 wrong state %d\n", isac->l1m.state);
- dev_kfree_skb(skb);
- break;
- }
- BUG_ON(isac->tx_skb);
-
- isac->tx_skb = skb;
- isac_fill_fifo(isac);
- break;
- }
-}
-
-static int __init hisax_isac_init(void)
-{
- printk(KERN_INFO "hisax_isac: ISAC-S/ISAC-SX ISDN driver v0.1.0\n");
-
- l1fsm.state_count = L1_STATE_COUNT;
- l1fsm.event_count = L1_EVENT_COUNT;
- l1fsm.strState = strL1State;
- l1fsm.strEvent = strL1Event;
- return FsmNew(&l1fsm, L1FnList, ARRAY_SIZE(L1FnList));
-}
-
-static void __exit hisax_isac_exit(void)
-{
- FsmFree(&l1fsm);
-}
-
-EXPORT_SYMBOL(isac_init);
-EXPORT_SYMBOL(isac_d_l2l1);
-
-EXPORT_SYMBOL(isacsx_setup);
-EXPORT_SYMBOL(isacsx_irq);
-
-EXPORT_SYMBOL(isac_setup);
-EXPORT_SYMBOL(isac_irq);
-
-module_init(hisax_isac_init);
-module_exit(hisax_isac_exit);
diff --git a/drivers/isdn/hisax/hisax_isac.h b/drivers/isdn/hisax/hisax_isac.h
deleted file mode 100644
index d7301da97991..000000000000
--- a/drivers/isdn/hisax/hisax_isac.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __HISAX_ISAC_H__
-#define __HISAX_ISAC_H__
-
-#include <linux/kernel.h>
-#include "fsm.h"
-#include "hisax_if.h"
-
-#define TIMER3_VALUE 7000
-#define MAX_DFRAME_LEN_L1 300
-
-#define ISAC_IOM1 0
-
-struct isac {
- void *priv;
-
- u_long flags;
- struct hisax_d_if hisax_d_if;
- struct FsmInst l1m;
- struct FsmTimer timer;
- u_char mocr;
- u_char adf2;
- int type;
-
- u_char rcvbuf[MAX_DFRAME_LEN_L1];
- int rcvidx;
-
- struct sk_buff *tx_skb;
- int tx_cnt;
-
- u_char (*read_isac) (struct isac *, u_char);
- void (*write_isac) (struct isac *, u_char, u_char);
- void (*read_isac_fifo) (struct isac *, u_char *, int);
- void (*write_isac_fifo)(struct isac *, u_char *, int);
-};
-
-void isac_init(struct isac *isac);
-void isac_d_l2l1(struct hisax_if *hisax_d_if, int pr, void *arg);
-
-void isac_setup(struct isac *isac);
-void isac_irq(struct isac *isac);
-
-void isacsx_setup(struct isac *isac);
-void isacsx_irq(struct isac *isac);
-
-#endif
diff --git a/drivers/isdn/hisax/hscx.c b/drivers/isdn/hisax/hscx.c
deleted file mode 100644
index 3e305fec0ed9..000000000000
--- a/drivers/isdn/hisax/hscx.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/* $Id: hscx.c,v 1.24.2.4 2004/01/24 20:47:23 keil Exp $
- *
- * HSCX specific routines
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "hscx.h"
-#include "isac.h"
-#include "isdnl1.h"
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-static char *HSCXVer[] =
-{"A1", "?1", "A2", "?3", "A3", "V2.1", "?6", "?7",
- "?8", "?9", "?10", "?11", "?12", "?13", "?14", "???"};
-
-int
-HscxVersion(struct IsdnCardState *cs, char *s)
-{
- int verA, verB;
-
- verA = cs->BC_Read_Reg(cs, 0, HSCX_VSTR) & 0xf;
- verB = cs->BC_Read_Reg(cs, 1, HSCX_VSTR) & 0xf;
- printk(KERN_INFO "%s HSCX version A: %s B: %s\n", s,
- HSCXVer[verA], HSCXVer[verB]);
- if ((verA == 0) | (verA == 0xf) | (verB == 0) | (verB == 0xf))
- return (1);
- else
- return (0);
-}
-
-void
-modehscx(struct BCState *bcs, int mode, int bc)
-{
- struct IsdnCardState *cs = bcs->cs;
- int hscx = bcs->hw.hscx.hscx;
-
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "hscx %c mode %d ichan %d",
- 'A' + hscx, mode, bc);
- bcs->mode = mode;
- bcs->channel = bc;
- cs->BC_Write_Reg(cs, hscx, HSCX_XAD1, 0xFF);
- cs->BC_Write_Reg(cs, hscx, HSCX_XAD2, 0xFF);
- cs->BC_Write_Reg(cs, hscx, HSCX_RAH2, 0xFF);
- cs->BC_Write_Reg(cs, hscx, HSCX_XBCH, 0x0);
- cs->BC_Write_Reg(cs, hscx, HSCX_RLCR, 0x0);
- cs->BC_Write_Reg(cs, hscx, HSCX_CCR1,
- test_bit(HW_IPAC, &cs->HW_Flags) ? 0x82 : 0x85);
- cs->BC_Write_Reg(cs, hscx, HSCX_CCR2, 0x30);
- cs->BC_Write_Reg(cs, hscx, HSCX_XCCR, 7);
- cs->BC_Write_Reg(cs, hscx, HSCX_RCCR, 7);
-
- /* Switch IOM 1 SSI */
- if (test_bit(HW_IOM1, &cs->HW_Flags) && (hscx == 0))
- bc = 1 - bc;
-
- if (bc == 0) {
- cs->BC_Write_Reg(cs, hscx, HSCX_TSAX,
- test_bit(HW_IOM1, &cs->HW_Flags) ? 0x7 : bcs->hw.hscx.tsaxr0);
- cs->BC_Write_Reg(cs, hscx, HSCX_TSAR,
- test_bit(HW_IOM1, &cs->HW_Flags) ? 0x7 : bcs->hw.hscx.tsaxr0);
- } else {
- cs->BC_Write_Reg(cs, hscx, HSCX_TSAX, bcs->hw.hscx.tsaxr1);
- cs->BC_Write_Reg(cs, hscx, HSCX_TSAR, bcs->hw.hscx.tsaxr1);
- }
- switch (mode) {
- case (L1_MODE_NULL):
- cs->BC_Write_Reg(cs, hscx, HSCX_TSAX, 0x1f);
- cs->BC_Write_Reg(cs, hscx, HSCX_TSAR, 0x1f);
- cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0x84);
- break;
- case (L1_MODE_TRANS):
- cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0xe4);
- break;
- case (L1_MODE_HDLC):
- cs->BC_Write_Reg(cs, hscx, HSCX_CCR1,
- test_bit(HW_IPAC, &cs->HW_Flags) ? 0x8a : 0x8d);
- cs->BC_Write_Reg(cs, hscx, HSCX_MODE, 0x8c);
- break;
- }
- if (mode)
- cs->BC_Write_Reg(cs, hscx, HSCX_CMDR, 0x41);
- cs->BC_Write_Reg(cs, hscx, HSCX_ISTA, 0x00);
-}
-
-void
-hscx_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- u_long flags;
- struct sk_buff *skb = arg;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->hw.hscx.count = 0;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- printk(KERN_WARNING "hscx_l2l1: this shouldn't happen\n");
- } else {
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->tx_skb = skb;
- bcs->hw.hscx.count = 0;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
- if (!bcs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (PH_ACTIVATE | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
- modehscx(bcs, st->l1.mode, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | REQUEST):
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | CONFIRM):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- modehscx(bcs, 0, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
- break;
- }
-}
-
-static void
-close_hscxstate(struct BCState *bcs)
-{
- modehscx(bcs, 0, bcs->channel);
- if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
- kfree(bcs->hw.hscx.rcvbuf);
- bcs->hw.hscx.rcvbuf = NULL;
- kfree(bcs->blog);
- bcs->blog = NULL;
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- }
-}
-
-int
-open_hscxstate(struct IsdnCardState *cs, struct BCState *bcs)
-{
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- if (!(bcs->hw.hscx.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for hscx.rcvbuf\n");
- test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
- return (1);
- }
- if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for bcs->blog\n");
- test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
- kfree(bcs->hw.hscx.rcvbuf);
- bcs->hw.hscx.rcvbuf = NULL;
- return (2);
- }
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->event = 0;
- bcs->hw.hscx.rcvidx = 0;
- bcs->tx_cnt = 0;
- return (0);
-}
-
-static int
-setstack_hscx(struct PStack *st, struct BCState *bcs)
-{
- bcs->channel = st->l1.bc;
- if (open_hscxstate(st->l1.hardware, bcs))
- return (-1);
- st->l1.bcs = bcs;
- st->l2.l2l1 = hscx_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-void
-clear_pending_hscx_ints(struct IsdnCardState *cs)
-{
- int val, eval;
-
- val = cs->BC_Read_Reg(cs, 1, HSCX_ISTA);
- debugl1(cs, "HSCX B ISTA %x", val);
- if (val & 0x01) {
- eval = cs->BC_Read_Reg(cs, 1, HSCX_EXIR);
- debugl1(cs, "HSCX B EXIR %x", eval);
- }
- if (val & 0x02) {
- eval = cs->BC_Read_Reg(cs, 0, HSCX_EXIR);
- debugl1(cs, "HSCX A EXIR %x", eval);
- }
- val = cs->BC_Read_Reg(cs, 0, HSCX_ISTA);
- debugl1(cs, "HSCX A ISTA %x", val);
- val = cs->BC_Read_Reg(cs, 1, HSCX_STAR);
- debugl1(cs, "HSCX B STAR %x", val);
- val = cs->BC_Read_Reg(cs, 0, HSCX_STAR);
- debugl1(cs, "HSCX A STAR %x", val);
- /* disable all IRQ */
- cs->BC_Write_Reg(cs, 0, HSCX_MASK, 0xFF);
- cs->BC_Write_Reg(cs, 1, HSCX_MASK, 0xFF);
-}
-
-void
-inithscx(struct IsdnCardState *cs)
-{
- cs->bcs[0].BC_SetStack = setstack_hscx;
- cs->bcs[1].BC_SetStack = setstack_hscx;
- cs->bcs[0].BC_Close = close_hscxstate;
- cs->bcs[1].BC_Close = close_hscxstate;
- cs->bcs[0].hw.hscx.hscx = 0;
- cs->bcs[1].hw.hscx.hscx = 1;
- cs->bcs[0].hw.hscx.tsaxr0 = 0x2f;
- cs->bcs[0].hw.hscx.tsaxr1 = 3;
- cs->bcs[1].hw.hscx.tsaxr0 = 0x2f;
- cs->bcs[1].hw.hscx.tsaxr1 = 3;
- modehscx(cs->bcs, 0, 0);
- modehscx(cs->bcs + 1, 0, 0);
-}
-
-void
-inithscxisac(struct IsdnCardState *cs, int part)
-{
- if (part & 1) {
- clear_pending_isac_ints(cs);
- clear_pending_hscx_ints(cs);
- initisac(cs);
- inithscx(cs);
- }
- if (part & 2) {
- /* Reenable all IRQ */
- cs->writeisac(cs, ISAC_MASK, 0);
- cs->BC_Write_Reg(cs, 0, HSCX_MASK, 0);
- cs->BC_Write_Reg(cs, 1, HSCX_MASK, 0);
- /* RESET Receiver and Transmitter */
- cs->writeisac(cs, ISAC_CMDR, 0x41);
- }
-}
diff --git a/drivers/isdn/hisax/hscx.h b/drivers/isdn/hisax/hscx.h
deleted file mode 100644
index 1148b4bbe711..000000000000
--- a/drivers/isdn/hisax/hscx.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* $Id: hscx.h,v 1.8.2.2 2004/01/12 22:52:26 keil Exp $
- *
- * HSCX specific defines
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/* All Registers original Siemens Spec */
-
-#define HSCX_ISTA 0x20
-#define HSCX_CCR1 0x2f
-#define HSCX_CCR2 0x2c
-#define HSCX_TSAR 0x31
-#define HSCX_TSAX 0x30
-#define HSCX_XCCR 0x32
-#define HSCX_RCCR 0x33
-#define HSCX_MODE 0x22
-#define HSCX_CMDR 0x21
-#define HSCX_EXIR 0x24
-#define HSCX_XAD1 0x24
-#define HSCX_XAD2 0x25
-#define HSCX_RAH2 0x27
-#define HSCX_RSTA 0x27
-#define HSCX_TIMR 0x23
-#define HSCX_STAR 0x21
-#define HSCX_RBCL 0x25
-#define HSCX_XBCH 0x2d
-#define HSCX_VSTR 0x2e
-#define HSCX_RLCR 0x2e
-#define HSCX_MASK 0x20
-
-extern int HscxVersion(struct IsdnCardState *cs, char *s);
-extern void modehscx(struct BCState *bcs, int mode, int bc);
-extern void clear_pending_hscx_ints(struct IsdnCardState *cs);
-extern void inithscx(struct IsdnCardState *cs);
-extern void inithscxisac(struct IsdnCardState *cs, int part);
diff --git a/drivers/isdn/hisax/hscx_irq.c b/drivers/isdn/hisax/hscx_irq.c
deleted file mode 100644
index 0d7e783c8bef..000000000000
--- a/drivers/isdn/hisax/hscx_irq.c
+++ /dev/null
@@ -1,294 +0,0 @@
-/* $Id: hscx_irq.c,v 1.18.2.3 2004/02/11 13:21:34 keil Exp $
- *
- * low level b-channel stuff for Siemens HSCX
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * This is an include file for fast inline IRQ stuff
- *
- */
-
-
-static inline void
-waitforCEC(struct IsdnCardState *cs, int hscx)
-{
- int to = 50;
-
- while ((READHSCX(cs, hscx, HSCX_STAR) & 0x04) && to) {
- udelay(1);
- to--;
- }
- if (!to)
- printk(KERN_WARNING "HiSax: waitforCEC timeout\n");
-}
-
-
-static inline void
-waitforXFW(struct IsdnCardState *cs, int hscx)
-{
- int to = 50;
-
- while (((READHSCX(cs, hscx, HSCX_STAR) & 0x44) != 0x40) && to) {
- udelay(1);
- to--;
- }
- if (!to)
- printk(KERN_WARNING "HiSax: waitforXFW timeout\n");
-}
-
-static inline void
-WriteHSCXCMDR(struct IsdnCardState *cs, int hscx, u_char data)
-{
- waitforCEC(cs, hscx);
- WRITEHSCX(cs, hscx, HSCX_CMDR, data);
-}
-
-
-
-static void
-hscx_empty_fifo(struct BCState *bcs, int count)
-{
- u_char *ptr;
- struct IsdnCardState *cs = bcs->cs;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "hscx_empty_fifo");
-
- if (bcs->hw.hscx.rcvidx + count > HSCX_BUFMAX) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hscx_empty_fifo: incoming packet too large");
- WriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x80);
- bcs->hw.hscx.rcvidx = 0;
- return;
- }
- ptr = bcs->hw.hscx.rcvbuf + bcs->hw.hscx.rcvidx;
- bcs->hw.hscx.rcvidx += count;
- READHSCXFIFO(cs, bcs->hw.hscx.hscx, ptr, count);
- WriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x80);
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- t += sprintf(t, "hscx_empty_fifo %c cnt %d",
- bcs->hw.hscx.hscx ? 'B' : 'A', count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-static void
-hscx_fill_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int more, count;
- int fifo_size = test_bit(HW_IPAC, &cs->HW_Flags) ? 64 : 32;
- u_char *ptr;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "hscx_fill_fifo");
-
- if (!bcs->tx_skb)
- return;
- if (bcs->tx_skb->len <= 0)
- return;
-
- more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0;
- if (bcs->tx_skb->len > fifo_size) {
- more = !0;
- count = fifo_size;
- } else
- count = bcs->tx_skb->len;
-
- waitforXFW(cs, bcs->hw.hscx.hscx);
- ptr = bcs->tx_skb->data;
- skb_pull(bcs->tx_skb, count);
- bcs->tx_cnt -= count;
- bcs->hw.hscx.count += count;
- WRITEHSCXFIFO(cs, bcs->hw.hscx.hscx, ptr, count);
- WriteHSCXCMDR(cs, bcs->hw.hscx.hscx, more ? 0x8 : 0xa);
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- t += sprintf(t, "hscx_fill_fifo %c cnt %d",
- bcs->hw.hscx.hscx ? 'B' : 'A', count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-static void
-hscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
-{
- u_char r;
- struct BCState *bcs = cs->bcs + hscx;
- struct sk_buff *skb;
- int fifo_size = test_bit(HW_IPAC, &cs->HW_Flags) ? 64 : 32;
- int count;
-
- if (!test_bit(BC_FLG_INIT, &bcs->Flag))
- return;
-
- if (val & 0x80) { /* RME */
- r = READHSCX(cs, hscx, HSCX_RSTA);
- if ((r & 0xf0) != 0xa0) {
- if (!(r & 0x80)) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "HSCX invalid frame");
-#ifdef ERROR_STATISTIC
- bcs->err_inv++;
-#endif
- }
- if ((r & 0x40) && bcs->mode) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "HSCX RDO mode=%d",
- bcs->mode);
-#ifdef ERROR_STATISTIC
- bcs->err_rdo++;
-#endif
- }
- if (!(r & 0x20)) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "HSCX CRC error");
-#ifdef ERROR_STATISTIC
- bcs->err_crc++;
-#endif
- }
- WriteHSCXCMDR(cs, hscx, 0x80);
- } else {
- count = READHSCX(cs, hscx, HSCX_RBCL) & (
- test_bit(HW_IPAC, &cs->HW_Flags) ? 0x3f : 0x1f);
- if (count == 0)
- count = fifo_size;
- hscx_empty_fifo(bcs, count);
- if ((count = bcs->hw.hscx.rcvidx - 1) > 0) {
- if (cs->debug & L1_DEB_HSCX_FIFO)
- debugl1(cs, "HX Frame %d", count);
- if (!(skb = dev_alloc_skb(count)))
- printk(KERN_WARNING "HSCX: receive out of memory\n");
- else {
- skb_put_data(skb, bcs->hw.hscx.rcvbuf,
- count);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- }
- }
- bcs->hw.hscx.rcvidx = 0;
- schedule_event(bcs, B_RCVBUFREADY);
- }
- if (val & 0x40) { /* RPF */
- hscx_empty_fifo(bcs, fifo_size);
- if (bcs->mode == L1_MODE_TRANS) {
- /* receive audio data */
- if (!(skb = dev_alloc_skb(fifo_size)))
- printk(KERN_WARNING "HiSax: receive out of memory\n");
- else {
- skb_put_data(skb, bcs->hw.hscx.rcvbuf,
- fifo_size);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- bcs->hw.hscx.rcvidx = 0;
- schedule_event(bcs, B_RCVBUFREADY);
- }
- }
- if (val & 0x10) { /* XPR */
- if (bcs->tx_skb) {
- if (bcs->tx_skb->len) {
- hscx_fill_fifo(bcs);
- return;
- } else {
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->hw.hscx.count;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- dev_kfree_skb_irq(bcs->tx_skb);
- bcs->hw.hscx.count = 0;
- bcs->tx_skb = NULL;
- }
- }
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- bcs->hw.hscx.count = 0;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- hscx_fill_fifo(bcs);
- } else {
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- schedule_event(bcs, B_XMTBUFREADY);
- }
- }
-}
-
-static void
-hscx_int_main(struct IsdnCardState *cs, u_char val)
-{
-
- u_char exval;
- struct BCState *bcs;
-
- if (val & 0x01) {
- bcs = cs->bcs + 1;
- exval = READHSCX(cs, 1, HSCX_EXIR);
- if (exval & 0x40) {
- if (bcs->mode == 1)
- hscx_fill_fifo(bcs);
- else {
-#ifdef ERROR_STATISTIC
- bcs->err_tx++;
-#endif
- /* Here we lost an TX interrupt, so
- * restart transmitting the whole frame.
- */
- if (bcs->tx_skb) {
- skb_push(bcs->tx_skb, bcs->hw.hscx.count);
- bcs->tx_cnt += bcs->hw.hscx.count;
- bcs->hw.hscx.count = 0;
- }
- WriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x01);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "HSCX B EXIR %x Lost TX", exval);
- }
- } else if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX B EXIR %x", exval);
- }
- if (val & 0xf8) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX B interrupt %x", val);
- hscx_interrupt(cs, val, 1);
- }
- if (val & 0x02) {
- bcs = cs->bcs;
- exval = READHSCX(cs, 0, HSCX_EXIR);
- if (exval & 0x40) {
- if (bcs->mode == L1_MODE_TRANS)
- hscx_fill_fifo(bcs);
- else {
- /* Here we lost an TX interrupt, so
- * restart transmitting the whole frame.
- */
-#ifdef ERROR_STATISTIC
- bcs->err_tx++;
-#endif
- if (bcs->tx_skb) {
- skb_push(bcs->tx_skb, bcs->hw.hscx.count);
- bcs->tx_cnt += bcs->hw.hscx.count;
- bcs->hw.hscx.count = 0;
- }
- WriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x01);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "HSCX A EXIR %x Lost TX", exval);
- }
- } else if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX A EXIR %x", exval);
- }
- if (val & 0x04) {
- exval = READHSCX(cs, 0, HSCX_ISTA);
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX A interrupt %x", exval);
- hscx_interrupt(cs, exval, 0);
- }
-}
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
deleted file mode 100644
index 831dd1bb81ef..000000000000
--- a/drivers/isdn/hisax/icc.c
+++ /dev/null
@@ -1,680 +0,0 @@
-/* $Id: icc.c,v 1.8.2.3 2004/01/13 14:31:25 keil Exp $
- *
- * ICC specific routines
- *
- * Author Matt Henderson & Guy Ellis
- * Copyright by Traverse Technologies Pty Ltd, www.travers.com.au
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * 1999.6.25 Initial implementation of routines for Siemens ISDN
- * Communication Controller PEB 2070 based on the ISAC routines
- * written by Karsten Keil.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "icc.h"
-// #include "arcofi.h"
-#include "isdnl1.h"
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#define DBUSY_TIMER_VALUE 80
-#define ARCOFI_USE 0
-
-static char *ICCVer[] =
-{"2070 A1/A3", "2070 B1", "2070 B2/B3", "2070 V2.4"};
-
-void
-ICCVersion(struct IsdnCardState *cs, char *s)
-{
- int val;
-
- val = cs->readisac(cs, ICC_RBCH);
- printk(KERN_INFO "%s ICC version (%x): %s\n", s, val, ICCVer[(val >> 5) & 3]);
-}
-
-static void
-ph_command(struct IsdnCardState *cs, unsigned int command)
-{
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ph_command %x", command);
- cs->writeisac(cs, ICC_CIX0, (command << 2) | 3);
-}
-
-
-static void
-icc_new_ph(struct IsdnCardState *cs)
-{
- switch (cs->dc.icc.ph_state) {
- case (ICC_IND_EI1):
- ph_command(cs, ICC_CMD_DI);
- l1_msg(cs, HW_RESET | INDICATION, NULL);
- break;
- case (ICC_IND_DC):
- l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL);
- break;
- case (ICC_IND_DR):
- l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
- break;
- case (ICC_IND_PU):
- l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
- break;
- case (ICC_IND_FJ):
- l1_msg(cs, HW_RSYNC | INDICATION, NULL);
- break;
- case (ICC_IND_AR):
- l1_msg(cs, HW_INFO2 | INDICATION, NULL);
- break;
- case (ICC_IND_AI):
- l1_msg(cs, HW_INFO4 | INDICATION, NULL);
- break;
- default:
- break;
- }
-}
-
-static void
-icc_bh(struct work_struct *work)
-{
- struct IsdnCardState *cs =
- container_of(work, struct IsdnCardState, tqueue);
- struct PStack *stptr;
-
- if (test_and_clear_bit(D_CLEARBUSY, &cs->event)) {
- if (cs->debug)
- debugl1(cs, "D-Channel Busy cleared");
- stptr = cs->stlist;
- while (stptr != NULL) {
- stptr->l1.l1l2(stptr, PH_PAUSE | CONFIRM, NULL);
- stptr = stptr->next;
- }
- }
- if (test_and_clear_bit(D_L1STATECHANGE, &cs->event))
- icc_new_ph(cs);
- if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
- DChannel_proc_rcv(cs);
- if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
- DChannel_proc_xmt(cs);
-#if ARCOFI_USE
- if (!test_bit(HW_ARCOFI, &cs->HW_Flags))
- return;
- if (test_and_clear_bit(D_RX_MON1, &cs->event))
- arcofi_fsm(cs, ARCOFI_RX_END, NULL);
- if (test_and_clear_bit(D_TX_MON1, &cs->event))
- arcofi_fsm(cs, ARCOFI_TX_END, NULL);
-#endif
-}
-
-static void
-icc_empty_fifo(struct IsdnCardState *cs, int count)
-{
- u_char *ptr;
-
- if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
- debugl1(cs, "icc_empty_fifo");
-
- if ((cs->rcvidx + count) >= MAX_DFRAME_LEN_L1) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "icc_empty_fifo overrun %d",
- cs->rcvidx + count);
- cs->writeisac(cs, ICC_CMDR, 0x80);
- cs->rcvidx = 0;
- return;
- }
- ptr = cs->rcvbuf + cs->rcvidx;
- cs->rcvidx += count;
- cs->readisacfifo(cs, ptr, count);
- cs->writeisac(cs, ICC_CMDR, 0x80);
- if (cs->debug & L1_DEB_ISAC_FIFO) {
- char *t = cs->dlog;
-
- t += sprintf(t, "icc_empty_fifo cnt %d", count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", cs->dlog);
- }
-}
-
-static void
-icc_fill_fifo(struct IsdnCardState *cs)
-{
- int count, more;
- u_char *ptr;
-
- if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
- debugl1(cs, "icc_fill_fifo");
-
- if (!cs->tx_skb)
- return;
-
- count = cs->tx_skb->len;
- if (count <= 0)
- return;
-
- more = 0;
- if (count > 32) {
- more = !0;
- count = 32;
- }
- ptr = cs->tx_skb->data;
- skb_pull(cs->tx_skb, count);
- cs->tx_cnt += count;
- cs->writeisacfifo(cs, ptr, count);
- cs->writeisac(cs, ICC_CMDR, more ? 0x8 : 0xa);
- if (test_and_set_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
- debugl1(cs, "icc_fill_fifo dbusytimer running");
- del_timer(&cs->dbusytimer);
- }
- cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ)/1000);
- add_timer(&cs->dbusytimer);
- if (cs->debug & L1_DEB_ISAC_FIFO) {
- char *t = cs->dlog;
-
- t += sprintf(t, "icc_fill_fifo cnt %d", count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", cs->dlog);
- }
-}
-
-void
-icc_interrupt(struct IsdnCardState *cs, u_char val)
-{
- u_char exval, v1;
- struct sk_buff *skb;
- unsigned int count;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ICC interrupt %x", val);
- if (val & 0x80) { /* RME */
- exval = cs->readisac(cs, ICC_RSTA);
- if ((exval & 0x70) != 0x20) {
- if (exval & 0x40) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ICC RDO");
-#ifdef ERROR_STATISTIC
- cs->err_rx++;
-#endif
- }
- if (!(exval & 0x20)) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ICC CRC error");
-#ifdef ERROR_STATISTIC
- cs->err_crc++;
-#endif
- }
- cs->writeisac(cs, ICC_CMDR, 0x80);
- } else {
- count = cs->readisac(cs, ICC_RBCL) & 0x1f;
- if (count == 0)
- count = 32;
- icc_empty_fifo(cs, count);
- if ((count = cs->rcvidx) > 0) {
- cs->rcvidx = 0;
- if (!(skb = alloc_skb(count, GFP_ATOMIC)))
- printk(KERN_WARNING "HiSax: D receive out of memory\n");
- else {
- skb_put_data(skb, cs->rcvbuf, count);
- skb_queue_tail(&cs->rq, skb);
- }
- }
- }
- cs->rcvidx = 0;
- schedule_event(cs, D_RCVBUFREADY);
- }
- if (val & 0x40) { /* RPF */
- icc_empty_fifo(cs, 32);
- }
- if (val & 0x20) { /* RSC */
- /* never */
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ICC RSC interrupt");
- }
- if (val & 0x10) { /* XPR */
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- if (cs->tx_skb) {
- if (cs->tx_skb->len) {
- icc_fill_fifo(cs);
- goto afterXPR;
- } else {
- dev_kfree_skb_irq(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->tx_skb = NULL;
- }
- }
- if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
- cs->tx_cnt = 0;
- icc_fill_fifo(cs);
- } else
- schedule_event(cs, D_XMTBUFREADY);
- }
-afterXPR:
- if (val & 0x04) { /* CISQ */
- exval = cs->readisac(cs, ICC_CIR0);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ICC CIR0 %02X", exval);
- if (exval & 2) {
- cs->dc.icc.ph_state = (exval >> 2) & 0xf;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ph_state change %x", cs->dc.icc.ph_state);
- schedule_event(cs, D_L1STATECHANGE);
- }
- if (exval & 1) {
- exval = cs->readisac(cs, ICC_CIR1);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ICC CIR1 %02X", exval);
- }
- }
- if (val & 0x02) { /* SIN */
- /* never */
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ICC SIN interrupt");
- }
- if (val & 0x01) { /* EXI */
- exval = cs->readisac(cs, ICC_EXIR);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ICC EXIR %02x", exval);
- if (exval & 0x80) { /* XMR */
- debugl1(cs, "ICC XMR");
- printk(KERN_WARNING "HiSax: ICC XMR\n");
- }
- if (exval & 0x40) { /* XDU */
- debugl1(cs, "ICC XDU");
- printk(KERN_WARNING "HiSax: ICC XDU\n");
-#ifdef ERROR_STATISTIC
- cs->err_tx++;
-#endif
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- if (cs->tx_skb) { /* Restart frame */
- skb_push(cs->tx_skb, cs->tx_cnt);
- cs->tx_cnt = 0;
- icc_fill_fifo(cs);
- } else {
- printk(KERN_WARNING "HiSax: ICC XDU no skb\n");
- debugl1(cs, "ICC XDU no skb");
- }
- }
- if (exval & 0x04) { /* MOS */
- v1 = cs->readisac(cs, ICC_MOSR);
- if (cs->debug & L1_DEB_MONITOR)
- debugl1(cs, "ICC MOSR %02x", v1);
-#if ARCOFI_USE
- if (v1 & 0x08) {
- if (!cs->dc.icc.mon_rx) {
- if (!(cs->dc.icc.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ICC MON RX out of memory!");
- cs->dc.icc.mocr &= 0xf0;
- cs->dc.icc.mocr |= 0x0a;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- goto afterMONR0;
- } else
- cs->dc.icc.mon_rxp = 0;
- }
- if (cs->dc.icc.mon_rxp >= MAX_MON_FRAME) {
- cs->dc.icc.mocr &= 0xf0;
- cs->dc.icc.mocr |= 0x0a;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- cs->dc.icc.mon_rxp = 0;
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ICC MON RX overflow!");
- goto afterMONR0;
- }
- cs->dc.icc.mon_rx[cs->dc.icc.mon_rxp++] = cs->readisac(cs, ICC_MOR0);
- if (cs->debug & L1_DEB_MONITOR)
- debugl1(cs, "ICC MOR0 %02x", cs->dc.icc.mon_rx[cs->dc.icc.mon_rxp - 1]);
- if (cs->dc.icc.mon_rxp == 1) {
- cs->dc.icc.mocr |= 0x04;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- }
- }
- afterMONR0:
- if (v1 & 0x80) {
- if (!cs->dc.icc.mon_rx) {
- if (!(cs->dc.icc.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ICC MON RX out of memory!");
- cs->dc.icc.mocr &= 0x0f;
- cs->dc.icc.mocr |= 0xa0;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- goto afterMONR1;
- } else
- cs->dc.icc.mon_rxp = 0;
- }
- if (cs->dc.icc.mon_rxp >= MAX_MON_FRAME) {
- cs->dc.icc.mocr &= 0x0f;
- cs->dc.icc.mocr |= 0xa0;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- cs->dc.icc.mon_rxp = 0;
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ICC MON RX overflow!");
- goto afterMONR1;
- }
- cs->dc.icc.mon_rx[cs->dc.icc.mon_rxp++] = cs->readisac(cs, ICC_MOR1);
- if (cs->debug & L1_DEB_MONITOR)
- debugl1(cs, "ICC MOR1 %02x", cs->dc.icc.mon_rx[cs->dc.icc.mon_rxp - 1]);
- cs->dc.icc.mocr |= 0x40;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- }
- afterMONR1:
- if (v1 & 0x04) {
- cs->dc.icc.mocr &= 0xf0;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- cs->dc.icc.mocr |= 0x0a;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- schedule_event(cs, D_RX_MON0);
- }
- if (v1 & 0x40) {
- cs->dc.icc.mocr &= 0x0f;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- cs->dc.icc.mocr |= 0xa0;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- schedule_event(cs, D_RX_MON1);
- }
- if (v1 & 0x02) {
- if ((!cs->dc.icc.mon_tx) || (cs->dc.icc.mon_txc &&
- (cs->dc.icc.mon_txp >= cs->dc.icc.mon_txc) &&
- !(v1 & 0x08))) {
- cs->dc.icc.mocr &= 0xf0;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- cs->dc.icc.mocr |= 0x0a;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- if (cs->dc.icc.mon_txc &&
- (cs->dc.icc.mon_txp >= cs->dc.icc.mon_txc))
- schedule_event(cs, D_TX_MON0);
- goto AfterMOX0;
- }
- if (cs->dc.icc.mon_txc && (cs->dc.icc.mon_txp >= cs->dc.icc.mon_txc)) {
- schedule_event(cs, D_TX_MON0);
- goto AfterMOX0;
- }
- cs->writeisac(cs, ICC_MOX0,
- cs->dc.icc.mon_tx[cs->dc.icc.mon_txp++]);
- if (cs->debug & L1_DEB_MONITOR)
- debugl1(cs, "ICC %02x -> MOX0", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]);
- }
- AfterMOX0:
- if (v1 & 0x20) {
- if ((!cs->dc.icc.mon_tx) || (cs->dc.icc.mon_txc &&
- (cs->dc.icc.mon_txp >= cs->dc.icc.mon_txc) &&
- !(v1 & 0x80))) {
- cs->dc.icc.mocr &= 0x0f;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- cs->dc.icc.mocr |= 0xa0;
- cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr);
- if (cs->dc.icc.mon_txc &&
- (cs->dc.icc.mon_txp >= cs->dc.icc.mon_txc))
- schedule_event(cs, D_TX_MON1);
- goto AfterMOX1;
- }
- if (cs->dc.icc.mon_txc && (cs->dc.icc.mon_txp >= cs->dc.icc.mon_txc)) {
- schedule_event(cs, D_TX_MON1);
- goto AfterMOX1;
- }
- cs->writeisac(cs, ICC_MOX1,
- cs->dc.icc.mon_tx[cs->dc.icc.mon_txp++]);
- if (cs->debug & L1_DEB_MONITOR)
- debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]);
- }
- AfterMOX1: ;
-#endif
- }
- }
-}
-
-static void
-ICC_l1hw(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
- struct sk_buff *skb = arg;
- u_long flags;
- int val;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- skb_queue_tail(&cs->sq, skb);
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA Queued", 0);
-#endif
- } else {
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA", 0);
-#endif
- icc_fill_fifo(cs);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
- skb_queue_tail(&cs->sq, skb);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- }
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
-#endif
- icc_fill_fifo(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- debugl1(cs, "-> PH_REQUEST_PULL");
-#endif
- if (!cs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (HW_RESET | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- if ((cs->dc.icc.ph_state == ICC_IND_EI1) ||
- (cs->dc.icc.ph_state == ICC_IND_DR))
- ph_command(cs, ICC_CMD_DI);
- else
- ph_command(cs, ICC_CMD_RES);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_ENABLE | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- ph_command(cs, ICC_CMD_DI);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_INFO1 | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- ph_command(cs, ICC_CMD_AR);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_INFO3 | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- ph_command(cs, ICC_CMD_AI);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_TESTLOOP | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- val = 0;
- if (1 & (long) arg)
- val |= 0x0c;
- if (2 & (long) arg)
- val |= 0x3;
- if (test_bit(HW_IOM1, &cs->HW_Flags)) {
- /* IOM 1 Mode */
- if (!val) {
- cs->writeisac(cs, ICC_SPCR, 0xa);
- cs->writeisac(cs, ICC_ADF1, 0x2);
- } else {
- cs->writeisac(cs, ICC_SPCR, val);
- cs->writeisac(cs, ICC_ADF1, 0xa);
- }
- } else {
- /* IOM 2 Mode */
- cs->writeisac(cs, ICC_SPCR, val);
- if (val)
- cs->writeisac(cs, ICC_ADF1, 0x8);
- else
- cs->writeisac(cs, ICC_ADF1, 0x0);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_DEACTIVATE | RESPONSE):
- skb_queue_purge(&cs->rq);
- skb_queue_purge(&cs->sq);
- if (cs->tx_skb) {
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_skb = NULL;
- }
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- break;
- default:
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "icc_l1hw unknown %04x", pr);
- break;
- }
-}
-
-static void
-setstack_icc(struct PStack *st, struct IsdnCardState *cs)
-{
- st->l1.l1hw = ICC_l1hw;
-}
-
-static void
-DC_Close_icc(struct IsdnCardState *cs) {
- kfree(cs->dc.icc.mon_rx);
- cs->dc.icc.mon_rx = NULL;
- kfree(cs->dc.icc.mon_tx);
- cs->dc.icc.mon_tx = NULL;
-}
-
-static void
-dbusy_timer_handler(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, dbusytimer);
- struct PStack *stptr;
- int rbch, star;
-
- if (test_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
- rbch = cs->readisac(cs, ICC_RBCH);
- star = cs->readisac(cs, ICC_STAR);
- if (cs->debug)
- debugl1(cs, "D-Channel Busy RBCH %02x STAR %02x",
- rbch, star);
- if (rbch & ICC_RBCH_XAC) { /* D-Channel Busy */
- test_and_set_bit(FLG_L1_DBUSY, &cs->HW_Flags);
- stptr = cs->stlist;
- while (stptr != NULL) {
- stptr->l1.l1l2(stptr, PH_PAUSE | INDICATION, NULL);
- stptr = stptr->next;
- }
- } else {
- /* discard frame; reset transceiver */
- test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags);
- if (cs->tx_skb) {
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->tx_skb = NULL;
- } else {
- printk(KERN_WARNING "HiSax: ICC D-Channel Busy no skb\n");
- debugl1(cs, "D-Channel Busy no skb");
- }
- cs->writeisac(cs, ICC_CMDR, 0x01); /* Transmitter reset */
- cs->irq_func(cs->irq, cs);
- }
- }
-}
-
-void
-initicc(struct IsdnCardState *cs)
-{
- cs->setstack_d = setstack_icc;
- cs->DC_Close = DC_Close_icc;
- cs->dc.icc.mon_tx = NULL;
- cs->dc.icc.mon_rx = NULL;
- cs->writeisac(cs, ICC_MASK, 0xff);
- cs->dc.icc.mocr = 0xaa;
- if (test_bit(HW_IOM1, &cs->HW_Flags)) {
- /* IOM 1 Mode */
- cs->writeisac(cs, ICC_ADF2, 0x0);
- cs->writeisac(cs, ICC_SPCR, 0xa);
- cs->writeisac(cs, ICC_ADF1, 0x2);
- cs->writeisac(cs, ICC_STCR, 0x70);
- cs->writeisac(cs, ICC_MODE, 0xc9);
- } else {
- /* IOM 2 Mode */
- if (!cs->dc.icc.adf2)
- cs->dc.icc.adf2 = 0x80;
- cs->writeisac(cs, ICC_ADF2, cs->dc.icc.adf2);
- cs->writeisac(cs, ICC_SQXR, 0xa0);
- cs->writeisac(cs, ICC_SPCR, 0x20);
- cs->writeisac(cs, ICC_STCR, 0x70);
- cs->writeisac(cs, ICC_MODE, 0xca);
- cs->writeisac(cs, ICC_TIMR, 0x00);
- cs->writeisac(cs, ICC_ADF1, 0x20);
- }
- ph_command(cs, ICC_CMD_RES);
- cs->writeisac(cs, ICC_MASK, 0x0);
- ph_command(cs, ICC_CMD_DI);
-}
-
-void
-clear_pending_icc_ints(struct IsdnCardState *cs)
-{
- int val, eval;
-
- val = cs->readisac(cs, ICC_STAR);
- debugl1(cs, "ICC STAR %x", val);
- val = cs->readisac(cs, ICC_MODE);
- debugl1(cs, "ICC MODE %x", val);
- val = cs->readisac(cs, ICC_ADF2);
- debugl1(cs, "ICC ADF2 %x", val);
- val = cs->readisac(cs, ICC_ISTA);
- debugl1(cs, "ICC ISTA %x", val);
- if (val & 0x01) {
- eval = cs->readisac(cs, ICC_EXIR);
- debugl1(cs, "ICC EXIR %x", eval);
- }
- val = cs->readisac(cs, ICC_CIR0);
- debugl1(cs, "ICC CIR0 %x", val);
- cs->dc.icc.ph_state = (val >> 2) & 0xf;
- schedule_event(cs, D_L1STATECHANGE);
- /* Disable all IRQ */
- cs->writeisac(cs, ICC_MASK, 0xFF);
-}
-
-void setup_icc(struct IsdnCardState *cs)
-{
- INIT_WORK(&cs->tqueue, icc_bh);
- timer_setup(&cs->dbusytimer, dbusy_timer_handler, 0);
-}
diff --git a/drivers/isdn/hisax/icc.h b/drivers/isdn/hisax/icc.h
deleted file mode 100644
index f367df5d3669..000000000000
--- a/drivers/isdn/hisax/icc.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* $Id: icc.h,v 1.4.2.2 2004/01/12 22:52:26 keil Exp $
- *
- * ICC specific routines
- *
- * Author Matt Henderson & Guy Ellis
- * Copyright by Traverse Technologies Pty Ltd, www.travers.com.au
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * 1999.7.14 Initial implementation of routines for Siemens ISDN
- * Communication Controller PEB 2070 based on the ISAC routines
- * written by Karsten Keil.
- */
-
-/* All Registers original Siemens Spec */
-
-#define ICC_MASK 0x20
-#define ICC_ISTA 0x20
-#define ICC_STAR 0x21
-#define ICC_CMDR 0x21
-#define ICC_EXIR 0x24
-#define ICC_ADF2 0x39
-#define ICC_SPCR 0x30
-#define ICC_ADF1 0x38
-#define ICC_CIR0 0x31
-#define ICC_CIX0 0x31
-#define ICC_CIR1 0x33
-#define ICC_CIX1 0x33
-#define ICC_STCR 0x37
-#define ICC_MODE 0x22
-#define ICC_RSTA 0x27
-#define ICC_RBCL 0x25
-#define ICC_RBCH 0x2A
-#define ICC_TIMR 0x23
-#define ICC_SQXR 0x3b
-#define ICC_MOSR 0x3a
-#define ICC_MOCR 0x3a
-#define ICC_MOR0 0x32
-#define ICC_MOX0 0x32
-#define ICC_MOR1 0x34
-#define ICC_MOX1 0x34
-
-#define ICC_RBCH_XAC 0x80
-
-#define ICC_CMD_TIM 0x0
-#define ICC_CMD_RES 0x1
-#define ICC_CMD_DU 0x3
-#define ICC_CMD_EI1 0x4
-#define ICC_CMD_SSP 0x5
-#define ICC_CMD_DT 0x6
-#define ICC_CMD_AR 0x8
-#define ICC_CMD_ARL 0xA
-#define ICC_CMD_AI 0xC
-#define ICC_CMD_DI 0xF
-
-#define ICC_IND_DR 0x0
-#define ICC_IND_FJ 0x2
-#define ICC_IND_EI1 0x4
-#define ICC_IND_INT 0x6
-#define ICC_IND_PU 0x7
-#define ICC_IND_AR 0x8
-#define ICC_IND_ARL 0xA
-#define ICC_IND_AI 0xC
-#define ICC_IND_AIL 0xE
-#define ICC_IND_DC 0xF
-
-extern void ICCVersion(struct IsdnCardState *cs, char *s);
-extern void initicc(struct IsdnCardState *cs);
-extern void icc_interrupt(struct IsdnCardState *cs, u_char val);
-extern void clear_pending_icc_ints(struct IsdnCardState *cs);
-extern void setup_icc(struct IsdnCardState *);
diff --git a/drivers/isdn/hisax/ipac.h b/drivers/isdn/hisax/ipac.h
deleted file mode 100644
index 4f937f02ee34..000000000000
--- a/drivers/isdn/hisax/ipac.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* $Id: ipac.h,v 1.7.2.2 2004/01/12 22:52:26 keil Exp $
- *
- * IPAC specific defines
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/* All Registers original Siemens Spec */
-
-#define IPAC_CONF 0xC0
-#define IPAC_MASK 0xC1
-#define IPAC_ISTA 0xC1
-#define IPAC_ID 0xC2
-#define IPAC_ACFG 0xC3
-#define IPAC_AOE 0xC4
-#define IPAC_ARX 0xC5
-#define IPAC_ATX 0xC5
-#define IPAC_PITA1 0xC6
-#define IPAC_PITA2 0xC7
-#define IPAC_POTA1 0xC8
-#define IPAC_POTA2 0xC9
-#define IPAC_PCFG 0xCA
-#define IPAC_SCFG 0xCB
-#define IPAC_TIMR2 0xCC
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
deleted file mode 100644
index c7086c1534bd..000000000000
--- a/drivers/isdn/hisax/ipacx.c
+++ /dev/null
@@ -1,913 +0,0 @@
-/*
- *
- * IPACX specific routines
- *
- * Author Joerg Petersohn
- * Derived from hisax_isac.c, isac.c, hscx.c and others
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include "hisax_if.h"
-#include "hisax.h"
-#include "isdnl1.h"
-#include "ipacx.h"
-
-#define DBUSY_TIMER_VALUE 80
-#define TIMER3_VALUE 7000
-#define MAX_DFRAME_LEN_L1 300
-#define B_FIFO_SIZE 64
-#define D_FIFO_SIZE 32
-
-
-// ipacx interrupt mask values
-#define _MASK_IMASK 0x2E // global mask
-#define _MASKB_IMASK 0x0B
-#define _MASKD_IMASK 0x03 // all on
-
-//----------------------------------------------------------
-// local function declarations
-//----------------------------------------------------------
-static void ph_command(struct IsdnCardState *cs, unsigned int command);
-static inline void cic_int(struct IsdnCardState *cs);
-static void dch_l2l1(struct PStack *st, int pr, void *arg);
-static void dbusy_timer_handler(struct timer_list *t);
-static void dch_empty_fifo(struct IsdnCardState *cs, int count);
-static void dch_fill_fifo(struct IsdnCardState *cs);
-static inline void dch_int(struct IsdnCardState *cs);
-static void dch_setstack(struct PStack *st, struct IsdnCardState *cs);
-static void dch_init(struct IsdnCardState *cs);
-static void bch_l2l1(struct PStack *st, int pr, void *arg);
-static void bch_empty_fifo(struct BCState *bcs, int count);
-static void bch_fill_fifo(struct BCState *bcs);
-static void bch_int(struct IsdnCardState *cs, u_char hscx);
-static void bch_mode(struct BCState *bcs, int mode, int bc);
-static void bch_close_state(struct BCState *bcs);
-static int bch_open_state(struct IsdnCardState *cs, struct BCState *bcs);
-static int bch_setstack(struct PStack *st, struct BCState *bcs);
-static void bch_init(struct IsdnCardState *cs, int hscx);
-static void clear_pending_ints(struct IsdnCardState *cs);
-
-//----------------------------------------------------------
-// Issue Layer 1 command to chip
-//----------------------------------------------------------
-static void
-ph_command(struct IsdnCardState *cs, unsigned int command)
-{
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ph_command (%#x) in (%#x)", command,
- cs->dc.isac.ph_state);
-//###################################
-// printk(KERN_INFO "ph_command (%#x)\n", command);
-//###################################
- cs->writeisac(cs, IPACX_CIX0, (command << 4) | 0x0E);
-}
-
-//----------------------------------------------------------
-// Transceiver interrupt handler
-//----------------------------------------------------------
-static inline void
-cic_int(struct IsdnCardState *cs)
-{
- u_char event;
-
- event = cs->readisac(cs, IPACX_CIR0) >> 4;
- if (cs->debug & L1_DEB_ISAC) debugl1(cs, "cic_int(event=%#x)", event);
-//#########################################
-// printk(KERN_INFO "cic_int(%x)\n", event);
-//#########################################
- cs->dc.isac.ph_state = event;
- schedule_event(cs, D_L1STATECHANGE);
-}
-
-//==========================================================
-// D channel functions
-//==========================================================
-
-//----------------------------------------------------------
-// Command entry point
-//----------------------------------------------------------
-static void
-dch_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
- struct sk_buff *skb = arg;
- u_char cda1_cr;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0);
- if (cs->tx_skb) {
- skb_queue_tail(&cs->sq, skb);
-#ifdef L2FRAME_DEBUG
- if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA Queued", 0);
-#endif
- } else {
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG
- if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA", 0);
-#endif
- dch_fill_fifo(cs);
- }
- break;
-
- case (PH_PULL | INDICATION):
- if (cs->tx_skb) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
- skb_queue_tail(&cs->sq, skb);
- break;
- }
- if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0);
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG
- if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
-#endif
- dch_fill_fifo(cs);
- break;
-
- case (PH_PULL | REQUEST):
-#ifdef L2FRAME_DEBUG
- if (cs->debug & L1_DEB_LAPD) debugl1(cs, "-> PH_REQUEST_PULL");
-#endif
- if (!cs->tx_skb) {
- clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
-
- case (HW_RESET | REQUEST):
- case (HW_ENABLE | REQUEST):
- if ((cs->dc.isac.ph_state == IPACX_IND_RES) ||
- (cs->dc.isac.ph_state == IPACX_IND_DR) ||
- (cs->dc.isac.ph_state == IPACX_IND_DC))
- ph_command(cs, IPACX_CMD_TIM);
- else
- ph_command(cs, IPACX_CMD_RES);
- break;
-
- case (HW_INFO3 | REQUEST):
- ph_command(cs, IPACX_CMD_AR8);
- break;
-
- case (HW_TESTLOOP | REQUEST):
- cs->writeisac(cs, IPACX_CDA_TSDP10, 0x80); // Timeslot 0 is B1
- cs->writeisac(cs, IPACX_CDA_TSDP11, 0x81); // Timeslot 0 is B1
- cda1_cr = cs->readisac(cs, IPACX_CDA1_CR);
- (void) cs->readisac(cs, IPACX_CDA2_CR);
- if ((long)arg & 1) { // loop B1
- cs->writeisac(cs, IPACX_CDA1_CR, cda1_cr | 0x0a);
- }
- else { // B1 off
- cs->writeisac(cs, IPACX_CDA1_CR, cda1_cr & ~0x0a);
- }
- if ((long)arg & 2) { // loop B2
- cs->writeisac(cs, IPACX_CDA1_CR, cda1_cr | 0x14);
- }
- else { // B2 off
- cs->writeisac(cs, IPACX_CDA1_CR, cda1_cr & ~0x14);
- }
- break;
-
- case (HW_DEACTIVATE | RESPONSE):
- skb_queue_purge(&cs->rq);
- skb_queue_purge(&cs->sq);
- if (cs->tx_skb) {
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_skb = NULL;
- }
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- break;
-
- default:
- if (cs->debug & L1_DEB_WARN) debugl1(cs, "dch_l2l1 unknown %04x", pr);
- break;
- }
-}
-
-//----------------------------------------------------------
-//----------------------------------------------------------
-static void
-dbusy_timer_handler(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, dbusytimer);
- struct PStack *st;
- int rbchd, stard;
-
- if (test_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
- rbchd = cs->readisac(cs, IPACX_RBCHD);
- stard = cs->readisac(cs, IPACX_STARD);
- if (cs->debug)
- debugl1(cs, "D-Channel Busy RBCHD %02x STARD %02x", rbchd, stard);
- if (!(stard & 0x40)) { // D-Channel Busy
- set_bit(FLG_L1_DBUSY, &cs->HW_Flags);
- for (st = cs->stlist; st; st = st->next) {
- st->l1.l1l2(st, PH_PAUSE | INDICATION, NULL); // flow control on
- }
- } else {
- // seems we lost an interrupt; reset transceiver */
- clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags);
- if (cs->tx_skb) {
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->tx_skb = NULL;
- } else {
- printk(KERN_WARNING "HiSax: ISAC D-Channel Busy no skb\n");
- debugl1(cs, "D-Channel Busy no skb");
- }
- cs->writeisac(cs, IPACX_CMDRD, 0x01); // Tx reset, generates XPR
- }
- }
-}
-
-//----------------------------------------------------------
-// Fill buffer from receive FIFO
-//----------------------------------------------------------
-static void
-dch_empty_fifo(struct IsdnCardState *cs, int count)
-{
- u_char *ptr;
-
- if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
- debugl1(cs, "dch_empty_fifo()");
-
- // message too large, remove
- if ((cs->rcvidx + count) >= MAX_DFRAME_LEN_L1) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "dch_empty_fifo() incoming message too large");
- cs->writeisac(cs, IPACX_CMDRD, 0x80); // RMC
- cs->rcvidx = 0;
- return;
- }
-
- ptr = cs->rcvbuf + cs->rcvidx;
- cs->rcvidx += count;
-
- cs->readisacfifo(cs, ptr, count);
- cs->writeisac(cs, IPACX_CMDRD, 0x80); // RMC
-
- if (cs->debug & L1_DEB_ISAC_FIFO) {
- char *t = cs->dlog;
-
- t += sprintf(t, "dch_empty_fifo() cnt %d", count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", cs->dlog);
- }
-}
-
-//----------------------------------------------------------
-// Fill transmit FIFO
-//----------------------------------------------------------
-static void
-dch_fill_fifo(struct IsdnCardState *cs)
-{
- int count;
- u_char cmd, *ptr;
-
- if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
- debugl1(cs, "dch_fill_fifo()");
-
- if (!cs->tx_skb) return;
- count = cs->tx_skb->len;
- if (count <= 0) return;
-
- if (count > D_FIFO_SIZE) {
- count = D_FIFO_SIZE;
- cmd = 0x08; // XTF
- } else {
- cmd = 0x0A; // XTF | XME
- }
-
- ptr = cs->tx_skb->data;
- skb_pull(cs->tx_skb, count);
- cs->tx_cnt += count;
- cs->writeisacfifo(cs, ptr, count);
- cs->writeisac(cs, IPACX_CMDRD, cmd);
-
- // set timeout for transmission contol
- if (test_and_set_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
- debugl1(cs, "dch_fill_fifo dbusytimer running");
- del_timer(&cs->dbusytimer);
- }
- cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ)/1000);
- add_timer(&cs->dbusytimer);
-
- if (cs->debug & L1_DEB_ISAC_FIFO) {
- char *t = cs->dlog;
-
- t += sprintf(t, "dch_fill_fifo() cnt %d", count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", cs->dlog);
- }
-}
-
-//----------------------------------------------------------
-// D channel interrupt handler
-//----------------------------------------------------------
-static inline void
-dch_int(struct IsdnCardState *cs)
-{
- struct sk_buff *skb;
- u_char istad, rstad;
- int count;
-
- istad = cs->readisac(cs, IPACX_ISTAD);
-//##############################################
-// printk(KERN_WARNING "dch_int(istad=%02x)\n", istad);
-//##############################################
-
- if (istad & 0x80) { // RME
- rstad = cs->readisac(cs, IPACX_RSTAD);
- if ((rstad & 0xf0) != 0xa0) { // !(VFR && !RDO && CRC && !RAB)
- if (!(rstad & 0x80))
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "dch_int(): invalid frame");
- if ((rstad & 0x40))
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "dch_int(): RDO");
- if (!(rstad & 0x20))
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "dch_int(): CRC error");
- cs->writeisac(cs, IPACX_CMDRD, 0x80); // RMC
- } else { // received frame ok
- count = cs->readisac(cs, IPACX_RBCLD);
- if (count) count--; // RSTAB is last byte
- count &= D_FIFO_SIZE - 1;
- if (count == 0) count = D_FIFO_SIZE;
- dch_empty_fifo(cs, count);
- if ((count = cs->rcvidx) > 0) {
- cs->rcvidx = 0;
- if (!(skb = dev_alloc_skb(count)))
- printk(KERN_WARNING "HiSax dch_int(): receive out of memory\n");
- else {
- skb_put_data(skb, cs->rcvbuf, count);
- skb_queue_tail(&cs->rq, skb);
- }
- }
- }
- cs->rcvidx = 0;
- schedule_event(cs, D_RCVBUFREADY);
- }
-
- if (istad & 0x40) { // RPF
- dch_empty_fifo(cs, D_FIFO_SIZE);
- }
-
- if (istad & 0x20) { // RFO
- if (cs->debug & L1_DEB_WARN) debugl1(cs, "dch_int(): RFO");
- cs->writeisac(cs, IPACX_CMDRD, 0x40); //RRES
- }
-
- if (istad & 0x10) { // XPR
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- if (cs->tx_skb) {
- if (cs->tx_skb->len) {
- dch_fill_fifo(cs);
- goto afterXPR;
- }
- else {
- dev_kfree_skb_irq(cs->tx_skb);
- cs->tx_skb = NULL;
- cs->tx_cnt = 0;
- }
- }
- if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
- cs->tx_cnt = 0;
- dch_fill_fifo(cs);
- }
- else {
- schedule_event(cs, D_XMTBUFREADY);
- }
- }
-afterXPR:
-
- if (istad & 0x0C) { // XDU or XMR
- if (cs->debug & L1_DEB_WARN) debugl1(cs, "dch_int(): XDU");
- if (cs->tx_skb) {
- skb_push(cs->tx_skb, cs->tx_cnt); // retransmit
- cs->tx_cnt = 0;
- dch_fill_fifo(cs);
- } else {
- printk(KERN_WARNING "HiSax: ISAC XDU no skb\n");
- debugl1(cs, "ISAC XDU no skb");
- }
- }
-}
-
-//----------------------------------------------------------
-//----------------------------------------------------------
-static void
-dch_setstack(struct PStack *st, struct IsdnCardState *cs)
-{
- st->l1.l1hw = dch_l2l1;
-}
-
-//----------------------------------------------------------
-//----------------------------------------------------------
-static void
-dch_init(struct IsdnCardState *cs)
-{
- printk(KERN_INFO "HiSax: IPACX ISDN driver v0.1.0\n");
-
- cs->setstack_d = dch_setstack;
-
- timer_setup(&cs->dbusytimer, dbusy_timer_handler, 0);
-
- cs->writeisac(cs, IPACX_TR_CONF0, 0x00); // clear LDD
- cs->writeisac(cs, IPACX_TR_CONF2, 0x00); // enable transmitter
- cs->writeisac(cs, IPACX_MODED, 0xC9); // transparent mode 0, RAC, stop/go
- cs->writeisac(cs, IPACX_MON_CR, 0x00); // disable monitor channel
-}
-
-
-//==========================================================
-// B channel functions
-//==========================================================
-
-//----------------------------------------------------------
-// Entry point for commands
-//----------------------------------------------------------
-static void
-bch_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- struct sk_buff *skb = arg;
- u_long flags;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
- set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->hw.hscx.count = 0;
- bch_fill_fifo(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- printk(KERN_WARNING "HiSax bch_l2l1(): this shouldn't happen\n");
- } else {
- set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->tx_skb = skb;
- bcs->hw.hscx.count = 0;
- bch_fill_fifo(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
- if (!bcs->tx_skb) {
- clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (PH_ACTIVATE | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- set_bit(BC_FLG_ACTIV, &bcs->Flag);
- bch_mode(bcs, st->l1.mode, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | REQUEST):
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | CONFIRM):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bch_mode(bcs, 0, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
- break;
- }
-}
-
-//----------------------------------------------------------
-// Read B channel fifo to receive buffer
-//----------------------------------------------------------
-static void
-bch_empty_fifo(struct BCState *bcs, int count)
-{
- u_char *ptr, hscx;
- struct IsdnCardState *cs;
- int cnt;
-
- cs = bcs->cs;
- hscx = bcs->hw.hscx.hscx;
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "bch_empty_fifo()");
-
- // message too large, remove
- if (bcs->hw.hscx.rcvidx + count > HSCX_BUFMAX) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "bch_empty_fifo() incoming packet too large");
- cs->BC_Write_Reg(cs, hscx, IPACX_CMDRB, 0x80); // RMC
- bcs->hw.hscx.rcvidx = 0;
- return;
- }
-
- ptr = bcs->hw.hscx.rcvbuf + bcs->hw.hscx.rcvidx;
- cnt = count;
- while (cnt--) *ptr++ = cs->BC_Read_Reg(cs, hscx, IPACX_RFIFOB);
- cs->BC_Write_Reg(cs, hscx, IPACX_CMDRB, 0x80); // RMC
-
- ptr = bcs->hw.hscx.rcvbuf + bcs->hw.hscx.rcvidx;
- bcs->hw.hscx.rcvidx += count;
-
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- t += sprintf(t, "bch_empty_fifo() B-%d cnt %d", hscx, count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-//----------------------------------------------------------
-// Fill buffer to transmit FIFO
-//----------------------------------------------------------
-static void
-bch_fill_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs;
- int more, count, cnt;
- u_char *ptr, *p, hscx;
-
- cs = bcs->cs;
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "bch_fill_fifo()");
-
- if (!bcs->tx_skb) return;
- if (bcs->tx_skb->len <= 0) return;
-
- hscx = bcs->hw.hscx.hscx;
- more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0;
- if (bcs->tx_skb->len > B_FIFO_SIZE) {
- more = 1;
- count = B_FIFO_SIZE;
- } else {
- count = bcs->tx_skb->len;
- }
- cnt = count;
-
- p = ptr = bcs->tx_skb->data;
- skb_pull(bcs->tx_skb, count);
- bcs->tx_cnt -= count;
- bcs->hw.hscx.count += count;
- while (cnt--) cs->BC_Write_Reg(cs, hscx, IPACX_XFIFOB, *p++);
- cs->BC_Write_Reg(cs, hscx, IPACX_CMDRB, (more ? 0x08 : 0x0a));
-
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- t += sprintf(t, "%s() B-%d cnt %d", __func__, hscx, count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-//----------------------------------------------------------
-// B channel interrupt handler
-//----------------------------------------------------------
-static void
-bch_int(struct IsdnCardState *cs, u_char hscx)
-{
- u_char istab;
- struct BCState *bcs;
- struct sk_buff *skb;
- int count;
- u_char rstab;
-
- bcs = cs->bcs + hscx;
- istab = cs->BC_Read_Reg(cs, hscx, IPACX_ISTAB);
-//##############################################
-// printk(KERN_WARNING "bch_int(istab=%02x)\n", istab);
-//##############################################
- if (!test_bit(BC_FLG_INIT, &bcs->Flag)) return;
-
- if (istab & 0x80) { // RME
- rstab = cs->BC_Read_Reg(cs, hscx, IPACX_RSTAB);
- if ((rstab & 0xf0) != 0xa0) { // !(VFR && !RDO && CRC && !RAB)
- if (!(rstab & 0x80))
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "bch_int() B-%d: invalid frame", hscx);
- if ((rstab & 0x40) && (bcs->mode != L1_MODE_NULL))
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "bch_int() B-%d: RDO mode=%d", hscx, bcs->mode);
- if (!(rstab & 0x20))
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "bch_int() B-%d: CRC error", hscx);
- cs->BC_Write_Reg(cs, hscx, IPACX_CMDRB, 0x80); // RMC
- }
- else { // received frame ok
- count = cs->BC_Read_Reg(cs, hscx, IPACX_RBCLB) & (B_FIFO_SIZE - 1);
- if (count == 0) count = B_FIFO_SIZE;
- bch_empty_fifo(bcs, count);
- if ((count = bcs->hw.hscx.rcvidx - 1) > 0) {
- if (cs->debug & L1_DEB_HSCX_FIFO)
- debugl1(cs, "bch_int Frame %d", count);
- if (!(skb = dev_alloc_skb(count)))
- printk(KERN_WARNING "HiSax bch_int(): receive frame out of memory\n");
- else {
- skb_put_data(skb, bcs->hw.hscx.rcvbuf,
- count);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- }
- }
- bcs->hw.hscx.rcvidx = 0;
- schedule_event(bcs, B_RCVBUFREADY);
- }
-
- if (istab & 0x40) { // RPF
- bch_empty_fifo(bcs, B_FIFO_SIZE);
-
- if (bcs->mode == L1_MODE_TRANS) { // queue every chunk
- // receive transparent audio data
- if (!(skb = dev_alloc_skb(B_FIFO_SIZE)))
- printk(KERN_WARNING "HiSax bch_int(): receive transparent out of memory\n");
- else {
- skb_put_data(skb, bcs->hw.hscx.rcvbuf,
- B_FIFO_SIZE);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- bcs->hw.hscx.rcvidx = 0;
- schedule_event(bcs, B_RCVBUFREADY);
- }
- }
-
- if (istab & 0x20) { // RFO
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "bch_int() B-%d: RFO error", hscx);
- cs->BC_Write_Reg(cs, hscx, IPACX_CMDRB, 0x40); // RRES
- }
-
- if (istab & 0x10) { // XPR
- if (bcs->tx_skb) {
- if (bcs->tx_skb->len) {
- bch_fill_fifo(bcs);
- goto afterXPR;
- } else {
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->hw.hscx.count;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- }
- dev_kfree_skb_irq(bcs->tx_skb);
- bcs->hw.hscx.count = 0;
- bcs->tx_skb = NULL;
- }
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- bcs->hw.hscx.count = 0;
- set_bit(BC_FLG_BUSY, &bcs->Flag);
- bch_fill_fifo(bcs);
- } else {
- clear_bit(BC_FLG_BUSY, &bcs->Flag);
- schedule_event(bcs, B_XMTBUFREADY);
- }
- }
-afterXPR:
-
- if (istab & 0x04) { // XDU
- if (bcs->mode == L1_MODE_TRANS) {
- bch_fill_fifo(bcs);
- }
- else {
- if (bcs->tx_skb) { // restart transmitting the whole frame
- skb_push(bcs->tx_skb, bcs->hw.hscx.count);
- bcs->tx_cnt += bcs->hw.hscx.count;
- bcs->hw.hscx.count = 0;
- }
- cs->BC_Write_Reg(cs, hscx, IPACX_CMDRB, 0x01); // XRES
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "bch_int() B-%d XDU error", hscx);
- }
- }
-}
-
-//----------------------------------------------------------
-//----------------------------------------------------------
-static void
-bch_mode(struct BCState *bcs, int mode, int bc)
-{
- struct IsdnCardState *cs = bcs->cs;
- int hscx = bcs->hw.hscx.hscx;
-
- bc = bc ? 1 : 0; // in case bc is greater than 1
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "mode_bch() switch B-%d mode %d chan %d", hscx, mode, bc);
- bcs->mode = mode;
- bcs->channel = bc;
-
- // map controller to according timeslot
- if (!hscx)
- {
- cs->writeisac(cs, IPACX_BCHA_TSDP_BC1, 0x80 | bc);
- cs->writeisac(cs, IPACX_BCHA_CR, 0x88);
- }
- else
- {
- cs->writeisac(cs, IPACX_BCHB_TSDP_BC1, 0x80 | bc);
- cs->writeisac(cs, IPACX_BCHB_CR, 0x88);
- }
-
- switch (mode) {
- case (L1_MODE_NULL):
- cs->BC_Write_Reg(cs, hscx, IPACX_MODEB, 0xC0); // rec off
- cs->BC_Write_Reg(cs, hscx, IPACX_EXMB, 0x30); // std adj.
- cs->BC_Write_Reg(cs, hscx, IPACX_MASKB, 0xFF); // ints off
- cs->BC_Write_Reg(cs, hscx, IPACX_CMDRB, 0x41); // validate adjustments
- break;
- case (L1_MODE_TRANS):
- cs->BC_Write_Reg(cs, hscx, IPACX_MODEB, 0x88); // ext transp mode
- cs->BC_Write_Reg(cs, hscx, IPACX_EXMB, 0x00); // xxx00000
- cs->BC_Write_Reg(cs, hscx, IPACX_CMDRB, 0x41); // validate adjustments
- cs->BC_Write_Reg(cs, hscx, IPACX_MASKB, _MASKB_IMASK);
- break;
- case (L1_MODE_HDLC):
- cs->BC_Write_Reg(cs, hscx, IPACX_MODEB, 0xC8); // transp mode 0
- cs->BC_Write_Reg(cs, hscx, IPACX_EXMB, 0x01); // idle=hdlc flags crc enabled
- cs->BC_Write_Reg(cs, hscx, IPACX_CMDRB, 0x41); // validate adjustments
- cs->BC_Write_Reg(cs, hscx, IPACX_MASKB, _MASKB_IMASK);
- break;
- }
-}
-
-//----------------------------------------------------------
-//----------------------------------------------------------
-static void
-bch_close_state(struct BCState *bcs)
-{
- bch_mode(bcs, 0, bcs->channel);
- if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
- kfree(bcs->hw.hscx.rcvbuf);
- bcs->hw.hscx.rcvbuf = NULL;
- kfree(bcs->blog);
- bcs->blog = NULL;
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- }
-}
-
-//----------------------------------------------------------
-//----------------------------------------------------------
-static int
-bch_open_state(struct IsdnCardState *cs, struct BCState *bcs)
-{
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- if (!(bcs->hw.hscx.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax open_bchstate(): No memory for hscx.rcvbuf\n");
- clear_bit(BC_FLG_INIT, &bcs->Flag);
- return (1);
- }
- if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax open_bchstate: No memory for bcs->blog\n");
- clear_bit(BC_FLG_INIT, &bcs->Flag);
- kfree(bcs->hw.hscx.rcvbuf);
- bcs->hw.hscx.rcvbuf = NULL;
- return (2);
- }
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->event = 0;
- bcs->hw.hscx.rcvidx = 0;
- bcs->tx_cnt = 0;
- return (0);
-}
-
-//----------------------------------------------------------
-//----------------------------------------------------------
-static int
-bch_setstack(struct PStack *st, struct BCState *bcs)
-{
- bcs->channel = st->l1.bc;
- if (bch_open_state(st->l1.hardware, bcs)) return (-1);
- st->l1.bcs = bcs;
- st->l2.l2l1 = bch_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-//----------------------------------------------------------
-//----------------------------------------------------------
-static void
-bch_init(struct IsdnCardState *cs, int hscx)
-{
- cs->bcs[hscx].BC_SetStack = bch_setstack;
- cs->bcs[hscx].BC_Close = bch_close_state;
- cs->bcs[hscx].hw.hscx.hscx = hscx;
- cs->bcs[hscx].cs = cs;
- bch_mode(cs->bcs + hscx, 0, hscx);
-}
-
-
-//==========================================================
-// Shared functions
-//==========================================================
-
-//----------------------------------------------------------
-// Main interrupt handler
-//----------------------------------------------------------
-void
-interrupt_ipacx(struct IsdnCardState *cs)
-{
- u_char ista;
-
- while ((ista = cs->readisac(cs, IPACX_ISTA))) {
-//#################################################
-// printk(KERN_WARNING "interrupt_ipacx(ista=%02x)\n", ista);
-//#################################################
- if (ista & 0x80) bch_int(cs, 0); // B channel interrupts
- if (ista & 0x40) bch_int(cs, 1);
-
- if (ista & 0x01) dch_int(cs); // D channel
- if (ista & 0x10) cic_int(cs); // Layer 1 state
- }
-}
-
-//----------------------------------------------------------
-// Clears chip interrupt status
-//----------------------------------------------------------
-static void
-clear_pending_ints(struct IsdnCardState *cs)
-{
- int ista;
-
- // all interrupts off
- cs->writeisac(cs, IPACX_MASK, 0xff);
- cs->writeisac(cs, IPACX_MASKD, 0xff);
- cs->BC_Write_Reg(cs, 0, IPACX_MASKB, 0xff);
- cs->BC_Write_Reg(cs, 1, IPACX_MASKB, 0xff);
-
- ista = cs->readisac(cs, IPACX_ISTA);
- if (ista & 0x80) cs->BC_Read_Reg(cs, 0, IPACX_ISTAB);
- if (ista & 0x40) cs->BC_Read_Reg(cs, 1, IPACX_ISTAB);
- if (ista & 0x10) cs->readisac(cs, IPACX_CIR0);
- if (ista & 0x01) cs->readisac(cs, IPACX_ISTAD);
-}
-
-//----------------------------------------------------------
-// Does chip configuration work
-// Work to do depends on bit mask in part
-//----------------------------------------------------------
-void
-init_ipacx(struct IsdnCardState *cs, int part)
-{
- if (part & 1) { // initialise chip
-//##################################################
-// printk(KERN_INFO "init_ipacx(%x)\n", part);
-//##################################################
- clear_pending_ints(cs);
- bch_init(cs, 0);
- bch_init(cs, 1);
- dch_init(cs);
- }
- if (part & 2) { // reenable all interrupts and start chip
- cs->BC_Write_Reg(cs, 0, IPACX_MASKB, _MASKB_IMASK);
- cs->BC_Write_Reg(cs, 1, IPACX_MASKB, _MASKB_IMASK);
- cs->writeisac(cs, IPACX_MASKD, _MASKD_IMASK);
- cs->writeisac(cs, IPACX_MASK, _MASK_IMASK); // global mask register
-
- // reset HDLC Transmitters/receivers
- cs->writeisac(cs, IPACX_CMDRD, 0x41);
- cs->BC_Write_Reg(cs, 0, IPACX_CMDRB, 0x41);
- cs->BC_Write_Reg(cs, 1, IPACX_CMDRB, 0x41);
- ph_command(cs, IPACX_CMD_RES);
- }
-}
-
-//----------------- end of file -----------------------
diff --git a/drivers/isdn/hisax/ipacx.h b/drivers/isdn/hisax/ipacx.h
deleted file mode 100644
index e8a22e8f34b6..000000000000
--- a/drivers/isdn/hisax/ipacx.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- *
- * IPACX specific defines
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/* All Registers original Siemens Spec */
-
-#ifndef INCLUDE_IPACX_H
-#define INCLUDE_IPACX_H
-
-/* D-channel registers */
-#define IPACX_RFIFOD 0x00 /* RD */
-#define IPACX_XFIFOD 0x00 /* WR */
-#define IPACX_ISTAD 0x20 /* RD */
-#define IPACX_MASKD 0x20 /* WR */
-#define IPACX_STARD 0x21 /* RD */
-#define IPACX_CMDRD 0x21 /* WR */
-#define IPACX_MODED 0x22 /* RD/WR */
-#define IPACX_EXMD1 0x23 /* RD/WR */
-#define IPACX_TIMR1 0x24 /* RD/WR */
-#define IPACX_SAP1 0x25 /* WR */
-#define IPACX_SAP2 0x26 /* WR */
-#define IPACX_RBCLD 0x26 /* RD */
-#define IPACX_RBCHD 0x27 /* RD */
-#define IPACX_TEI1 0x27 /* WR */
-#define IPACX_TEI2 0x28 /* WR */
-#define IPACX_RSTAD 0x28 /* RD */
-#define IPACX_TMD 0x29 /* RD/WR */
-#define IPACX_CIR0 0x2E /* RD */
-#define IPACX_CIX0 0x2E /* WR */
-#define IPACX_CIR1 0x2F /* RD */
-#define IPACX_CIX1 0x2F /* WR */
-
-/* Transceiver registers */
-#define IPACX_TR_CONF0 0x30 /* RD/WR */
-#define IPACX_TR_CONF1 0x31 /* RD/WR */
-#define IPACX_TR_CONF2 0x32 /* RD/WR */
-#define IPACX_TR_STA 0x33 /* RD */
-#define IPACX_TR_CMD 0x34 /* RD/WR */
-#define IPACX_SQRR1 0x35 /* RD */
-#define IPACX_SQXR1 0x35 /* WR */
-#define IPACX_SQRR2 0x36 /* RD */
-#define IPACX_SQXR2 0x36 /* WR */
-#define IPACX_SQRR3 0x37 /* RD */
-#define IPACX_SQXR3 0x37 /* WR */
-#define IPACX_ISTATR 0x38 /* RD */
-#define IPACX_MASKTR 0x39 /* RD/WR */
-#define IPACX_TR_MODE 0x3A /* RD/WR */
-#define IPACX_ACFG1 0x3C /* RD/WR */
-#define IPACX_ACFG2 0x3D /* RD/WR */
-#define IPACX_AOE 0x3E /* RD/WR */
-#define IPACX_ARX 0x3F /* RD */
-#define IPACX_ATX 0x3F /* WR */
-
-/* IOM: Timeslot, DPS, CDA */
-#define IPACX_CDA10 0x40 /* RD/WR */
-#define IPACX_CDA11 0x41 /* RD/WR */
-#define IPACX_CDA20 0x42 /* RD/WR */
-#define IPACX_CDA21 0x43 /* RD/WR */
-#define IPACX_CDA_TSDP10 0x44 /* RD/WR */
-#define IPACX_CDA_TSDP11 0x45 /* RD/WR */
-#define IPACX_CDA_TSDP20 0x46 /* RD/WR */
-#define IPACX_CDA_TSDP21 0x47 /* RD/WR */
-#define IPACX_BCHA_TSDP_BC1 0x48 /* RD/WR */
-#define IPACX_BCHA_TSDP_BC2 0x49 /* RD/WR */
-#define IPACX_BCHB_TSDP_BC1 0x4A /* RD/WR */
-#define IPACX_BCHB_TSDP_BC2 0x4B /* RD/WR */
-#define IPACX_TR_TSDP_BC1 0x4C /* RD/WR */
-#define IPACX_TR_TSDP_BC2 0x4D /* RD/WR */
-#define IPACX_CDA1_CR 0x4E /* RD/WR */
-#define IPACX_CDA2_CR 0x4F /* RD/WR */
-
-/* IOM: Contol, Sync transfer, Monitor */
-#define IPACX_TR_CR 0x50 /* RD/WR */
-#define IPACX_TRC_CR 0x50 /* RD/WR */
-#define IPACX_BCHA_CR 0x51 /* RD/WR */
-#define IPACX_BCHB_CR 0x52 /* RD/WR */
-#define IPACX_DCI_CR 0x53 /* RD/WR */
-#define IPACX_DCIC_CR 0x53 /* RD/WR */
-#define IPACX_MON_CR 0x54 /* RD/WR */
-#define IPACX_SDS1_CR 0x55 /* RD/WR */
-#define IPACX_SDS2_CR 0x56 /* RD/WR */
-#define IPACX_IOM_CR 0x57 /* RD/WR */
-#define IPACX_STI 0x58 /* RD */
-#define IPACX_ASTI 0x58 /* WR */
-#define IPACX_MSTI 0x59 /* RD/WR */
-#define IPACX_SDS_CONF 0x5A /* RD/WR */
-#define IPACX_MCDA 0x5B /* RD */
-#define IPACX_MOR 0x5C /* RD */
-#define IPACX_MOX 0x5C /* WR */
-#define IPACX_MOSR 0x5D /* RD */
-#define IPACX_MOCR 0x5E /* RD/WR */
-#define IPACX_MSTA 0x5F /* RD */
-#define IPACX_MCONF 0x5F /* WR */
-
-/* Interrupt and general registers */
-#define IPACX_ISTA 0x60 /* RD */
-#define IPACX_MASK 0x60 /* WR */
-#define IPACX_AUXI 0x61 /* RD */
-#define IPACX_AUXM 0x61 /* WR */
-#define IPACX_MODE1 0x62 /* RD/WR */
-#define IPACX_MODE2 0x63 /* RD/WR */
-#define IPACX_ID 0x64 /* RD */
-#define IPACX_SRES 0x64 /* WR */
-#define IPACX_TIMR2 0x65 /* RD/WR */
-
-/* B-channel registers */
-#define IPACX_OFF_B1 0x70
-#define IPACX_OFF_B2 0x80
-
-#define IPACX_ISTAB 0x00 /* RD */
-#define IPACX_MASKB 0x00 /* WR */
-#define IPACX_STARB 0x01 /* RD */
-#define IPACX_CMDRB 0x01 /* WR */
-#define IPACX_MODEB 0x02 /* RD/WR */
-#define IPACX_EXMB 0x03 /* RD/WR */
-#define IPACX_RAH1 0x05 /* WR */
-#define IPACX_RAH2 0x06 /* WR */
-#define IPACX_RBCLB 0x06 /* RD */
-#define IPACX_RBCHB 0x07 /* RD */
-#define IPACX_RAL1 0x07 /* WR */
-#define IPACX_RAL2 0x08 /* WR */
-#define IPACX_RSTAB 0x08 /* RD */
-#define IPACX_TMB 0x09 /* RD/WR */
-#define IPACX_RFIFOB 0x0A /*- RD */
-#define IPACX_XFIFOB 0x0A /*- WR */
-
-/* Layer 1 Commands */
-#define IPACX_CMD_TIM 0x0
-#define IPACX_CMD_RES 0x1
-#define IPACX_CMD_SSP 0x2
-#define IPACX_CMD_SCP 0x3
-#define IPACX_CMD_AR8 0x8
-#define IPACX_CMD_AR10 0x9
-#define IPACX_CMD_ARL 0xa
-#define IPACX_CMD_DI 0xf
-
-/* Layer 1 Indications */
-#define IPACX_IND_DR 0x0
-#define IPACX_IND_RES 0x1
-#define IPACX_IND_TMA 0x2
-#define IPACX_IND_SLD 0x3
-#define IPACX_IND_RSY 0x4
-#define IPACX_IND_DR6 0x5
-#define IPACX_IND_PU 0x7
-#define IPACX_IND_AR 0x8
-#define IPACX_IND_ARL 0xa
-#define IPACX_IND_CVR 0xb
-#define IPACX_IND_AI8 0xc
-#define IPACX_IND_AI10 0xd
-#define IPACX_IND_AIL 0xe
-#define IPACX_IND_DC 0xf
-
-extern void init_ipacx(struct IsdnCardState *, int);
-extern void interrupt_ipacx(struct IsdnCardState *);
-extern void setup_isac(struct IsdnCardState *);
-
-#endif
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
deleted file mode 100644
index bd40e0671ded..000000000000
--- a/drivers/isdn/hisax/isac.c
+++ /dev/null
@@ -1,681 +0,0 @@
-/* $Id: isac.c,v 1.31.2.3 2004/01/13 14:31:25 keil Exp $
- *
- * ISAC specific routines
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- */
-
-#include "hisax.h"
-#include "isac.h"
-#include "arcofi.h"
-#include "isdnl1.h"
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#define DBUSY_TIMER_VALUE 80
-#define ARCOFI_USE 1
-
-static char *ISACVer[] =
-{"2086/2186 V1.1", "2085 B1", "2085 B2",
- "2085 V2.3"};
-
-void ISACVersion(struct IsdnCardState *cs, char *s)
-{
- int val;
-
- val = cs->readisac(cs, ISAC_RBCH);
- printk(KERN_INFO "%s ISAC version (%x): %s\n", s, val, ISACVer[(val >> 5) & 3]);
-}
-
-static void
-ph_command(struct IsdnCardState *cs, unsigned int command)
-{
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ph_command %x", command);
- cs->writeisac(cs, ISAC_CIX0, (command << 2) | 3);
-}
-
-
-static void
-isac_new_ph(struct IsdnCardState *cs)
-{
- switch (cs->dc.isac.ph_state) {
- case (ISAC_IND_RS):
- case (ISAC_IND_EI):
- ph_command(cs, ISAC_CMD_DUI);
- l1_msg(cs, HW_RESET | INDICATION, NULL);
- break;
- case (ISAC_IND_DID):
- l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL);
- break;
- case (ISAC_IND_DR):
- l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
- break;
- case (ISAC_IND_PU):
- l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
- break;
- case (ISAC_IND_RSY):
- l1_msg(cs, HW_RSYNC | INDICATION, NULL);
- break;
- case (ISAC_IND_ARD):
- l1_msg(cs, HW_INFO2 | INDICATION, NULL);
- break;
- case (ISAC_IND_AI8):
- l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
- break;
- case (ISAC_IND_AI10):
- l1_msg(cs, HW_INFO4_P10 | INDICATION, NULL);
- break;
- default:
- break;
- }
-}
-
-static void
-isac_bh(struct work_struct *work)
-{
- struct IsdnCardState *cs =
- container_of(work, struct IsdnCardState, tqueue);
- struct PStack *stptr;
-
- if (test_and_clear_bit(D_CLEARBUSY, &cs->event)) {
- if (cs->debug)
- debugl1(cs, "D-Channel Busy cleared");
- stptr = cs->stlist;
- while (stptr != NULL) {
- stptr->l1.l1l2(stptr, PH_PAUSE | CONFIRM, NULL);
- stptr = stptr->next;
- }
- }
- if (test_and_clear_bit(D_L1STATECHANGE, &cs->event))
- isac_new_ph(cs);
- if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
- DChannel_proc_rcv(cs);
- if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
- DChannel_proc_xmt(cs);
-#if ARCOFI_USE
- if (!test_bit(HW_ARCOFI, &cs->HW_Flags))
- return;
- if (test_and_clear_bit(D_RX_MON1, &cs->event))
- arcofi_fsm(cs, ARCOFI_RX_END, NULL);
- if (test_and_clear_bit(D_TX_MON1, &cs->event))
- arcofi_fsm(cs, ARCOFI_TX_END, NULL);
-#endif
-}
-
-static void
-isac_empty_fifo(struct IsdnCardState *cs, int count)
-{
- u_char *ptr;
-
- if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
- debugl1(cs, "isac_empty_fifo");
-
- if ((cs->rcvidx + count) >= MAX_DFRAME_LEN_L1) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isac_empty_fifo overrun %d",
- cs->rcvidx + count);
- cs->writeisac(cs, ISAC_CMDR, 0x80);
- cs->rcvidx = 0;
- return;
- }
- ptr = cs->rcvbuf + cs->rcvidx;
- cs->rcvidx += count;
- cs->readisacfifo(cs, ptr, count);
- cs->writeisac(cs, ISAC_CMDR, 0x80);
- if (cs->debug & L1_DEB_ISAC_FIFO) {
- char *t = cs->dlog;
-
- t += sprintf(t, "isac_empty_fifo cnt %d", count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", cs->dlog);
- }
-}
-
-static void
-isac_fill_fifo(struct IsdnCardState *cs)
-{
- int count, more;
- u_char *ptr;
-
- if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
- debugl1(cs, "isac_fill_fifo");
-
- if (!cs->tx_skb)
- return;
-
- count = cs->tx_skb->len;
- if (count <= 0)
- return;
-
- more = 0;
- if (count > 32) {
- more = !0;
- count = 32;
- }
- ptr = cs->tx_skb->data;
- skb_pull(cs->tx_skb, count);
- cs->tx_cnt += count;
- cs->writeisacfifo(cs, ptr, count);
- cs->writeisac(cs, ISAC_CMDR, more ? 0x8 : 0xa);
- if (test_and_set_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
- debugl1(cs, "isac_fill_fifo dbusytimer running");
- del_timer(&cs->dbusytimer);
- }
- cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ)/1000);
- add_timer(&cs->dbusytimer);
- if (cs->debug & L1_DEB_ISAC_FIFO) {
- char *t = cs->dlog;
-
- t += sprintf(t, "isac_fill_fifo cnt %d", count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", cs->dlog);
- }
-}
-
-void
-isac_interrupt(struct IsdnCardState *cs, u_char val)
-{
- u_char exval, v1;
- struct sk_buff *skb;
- unsigned int count;
-
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC interrupt %x", val);
- if (val & 0x80) { /* RME */
- exval = cs->readisac(cs, ISAC_RSTA);
- if ((exval & 0x70) != 0x20) {
- if (exval & 0x40) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ISAC RDO");
-#ifdef ERROR_STATISTIC
- cs->err_rx++;
-#endif
- }
- if (!(exval & 0x20)) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ISAC CRC error");
-#ifdef ERROR_STATISTIC
- cs->err_crc++;
-#endif
- }
- cs->writeisac(cs, ISAC_CMDR, 0x80);
- } else {
- count = cs->readisac(cs, ISAC_RBCL) & 0x1f;
- if (count == 0)
- count = 32;
- isac_empty_fifo(cs, count);
- count = cs->rcvidx;
- if (count > 0) {
- cs->rcvidx = 0;
- skb = alloc_skb(count, GFP_ATOMIC);
- if (!skb)
- printk(KERN_WARNING "HiSax: D receive out of memory\n");
- else {
- skb_put_data(skb, cs->rcvbuf, count);
- skb_queue_tail(&cs->rq, skb);
- }
- }
- }
- cs->rcvidx = 0;
- schedule_event(cs, D_RCVBUFREADY);
- }
- if (val & 0x40) { /* RPF */
- isac_empty_fifo(cs, 32);
- }
- if (val & 0x20) { /* RSC */
- /* never */
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ISAC RSC interrupt");
- }
- if (val & 0x10) { /* XPR */
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- if (cs->tx_skb) {
- if (cs->tx_skb->len) {
- isac_fill_fifo(cs);
- goto afterXPR;
- } else {
- dev_kfree_skb_irq(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->tx_skb = NULL;
- }
- }
- cs->tx_skb = skb_dequeue(&cs->sq);
- if (cs->tx_skb) {
- cs->tx_cnt = 0;
- isac_fill_fifo(cs);
- } else
- schedule_event(cs, D_XMTBUFREADY);
- }
-afterXPR:
- if (val & 0x04) { /* CISQ */
- exval = cs->readisac(cs, ISAC_CIR0);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC CIR0 %02X", exval);
- if (exval & 2) {
- cs->dc.isac.ph_state = (exval >> 2) & 0xf;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ph_state change %x", cs->dc.isac.ph_state);
- schedule_event(cs, D_L1STATECHANGE);
- }
- if (exval & 1) {
- exval = cs->readisac(cs, ISAC_CIR1);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC CIR1 %02X", exval);
- }
- }
- if (val & 0x02) { /* SIN */
- /* never */
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ISAC SIN interrupt");
- }
- if (val & 0x01) { /* EXI */
- exval = cs->readisac(cs, ISAC_EXIR);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ISAC EXIR %02x", exval);
- if (exval & 0x80) { /* XMR */
- debugl1(cs, "ISAC XMR");
- printk(KERN_WARNING "HiSax: ISAC XMR\n");
- }
- if (exval & 0x40) { /* XDU */
- debugl1(cs, "ISAC XDU");
- printk(KERN_WARNING "HiSax: ISAC XDU\n");
-#ifdef ERROR_STATISTIC
- cs->err_tx++;
-#endif
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- if (cs->tx_skb) { /* Restart frame */
- skb_push(cs->tx_skb, cs->tx_cnt);
- cs->tx_cnt = 0;
- isac_fill_fifo(cs);
- } else {
- printk(KERN_WARNING "HiSax: ISAC XDU no skb\n");
- debugl1(cs, "ISAC XDU no skb");
- }
- }
- if (exval & 0x04) { /* MOS */
- v1 = cs->readisac(cs, ISAC_MOSR);
- if (cs->debug & L1_DEB_MONITOR)
- debugl1(cs, "ISAC MOSR %02x", v1);
-#if ARCOFI_USE
- if (v1 & 0x08) {
- if (!cs->dc.isac.mon_rx) {
- cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
- if (!cs->dc.isac.mon_rx) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ISAC MON RX out of memory!");
- cs->dc.isac.mocr &= 0xf0;
- cs->dc.isac.mocr |= 0x0a;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- goto afterMONR0;
- } else
- cs->dc.isac.mon_rxp = 0;
- }
- if (cs->dc.isac.mon_rxp >= MAX_MON_FRAME) {
- cs->dc.isac.mocr &= 0xf0;
- cs->dc.isac.mocr |= 0x0a;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- cs->dc.isac.mon_rxp = 0;
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ISAC MON RX overflow!");
- goto afterMONR0;
- }
- cs->dc.isac.mon_rx[cs->dc.isac.mon_rxp++] = cs->readisac(cs, ISAC_MOR0);
- if (cs->debug & L1_DEB_MONITOR)
- debugl1(cs, "ISAC MOR0 %02x", cs->dc.isac.mon_rx[cs->dc.isac.mon_rxp - 1]);
- if (cs->dc.isac.mon_rxp == 1) {
- cs->dc.isac.mocr |= 0x04;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- }
- }
- afterMONR0:
- if (v1 & 0x80) {
- if (!cs->dc.isac.mon_rx) {
- cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
- if (!cs->dc.isac.mon_rx) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ISAC MON RX out of memory!");
- cs->dc.isac.mocr &= 0x0f;
- cs->dc.isac.mocr |= 0xa0;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- goto afterMONR1;
- } else
- cs->dc.isac.mon_rxp = 0;
- }
- if (cs->dc.isac.mon_rxp >= MAX_MON_FRAME) {
- cs->dc.isac.mocr &= 0x0f;
- cs->dc.isac.mocr |= 0xa0;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- cs->dc.isac.mon_rxp = 0;
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "ISAC MON RX overflow!");
- goto afterMONR1;
- }
- cs->dc.isac.mon_rx[cs->dc.isac.mon_rxp++] = cs->readisac(cs, ISAC_MOR1);
- if (cs->debug & L1_DEB_MONITOR)
- debugl1(cs, "ISAC MOR1 %02x", cs->dc.isac.mon_rx[cs->dc.isac.mon_rxp - 1]);
- cs->dc.isac.mocr |= 0x40;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- }
- afterMONR1:
- if (v1 & 0x04) {
- cs->dc.isac.mocr &= 0xf0;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- cs->dc.isac.mocr |= 0x0a;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- schedule_event(cs, D_RX_MON0);
- }
- if (v1 & 0x40) {
- cs->dc.isac.mocr &= 0x0f;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- cs->dc.isac.mocr |= 0xa0;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- schedule_event(cs, D_RX_MON1);
- }
- if (v1 & 0x02) {
- if ((!cs->dc.isac.mon_tx) || (cs->dc.isac.mon_txc &&
- (cs->dc.isac.mon_txp >= cs->dc.isac.mon_txc) &&
- !(v1 & 0x08))) {
- cs->dc.isac.mocr &= 0xf0;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- cs->dc.isac.mocr |= 0x0a;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- if (cs->dc.isac.mon_txc &&
- (cs->dc.isac.mon_txp >= cs->dc.isac.mon_txc))
- schedule_event(cs, D_TX_MON0);
- goto AfterMOX0;
- }
- if (cs->dc.isac.mon_txc && (cs->dc.isac.mon_txp >= cs->dc.isac.mon_txc)) {
- schedule_event(cs, D_TX_MON0);
- goto AfterMOX0;
- }
- cs->writeisac(cs, ISAC_MOX0,
- cs->dc.isac.mon_tx[cs->dc.isac.mon_txp++]);
- if (cs->debug & L1_DEB_MONITOR)
- debugl1(cs, "ISAC %02x -> MOX0", cs->dc.isac.mon_tx[cs->dc.isac.mon_txp - 1]);
- }
- AfterMOX0:
- if (v1 & 0x20) {
- if ((!cs->dc.isac.mon_tx) || (cs->dc.isac.mon_txc &&
- (cs->dc.isac.mon_txp >= cs->dc.isac.mon_txc) &&
- !(v1 & 0x80))) {
- cs->dc.isac.mocr &= 0x0f;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- cs->dc.isac.mocr |= 0xa0;
- cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
- if (cs->dc.isac.mon_txc &&
- (cs->dc.isac.mon_txp >= cs->dc.isac.mon_txc))
- schedule_event(cs, D_TX_MON1);
- goto AfterMOX1;
- }
- if (cs->dc.isac.mon_txc && (cs->dc.isac.mon_txp >= cs->dc.isac.mon_txc)) {
- schedule_event(cs, D_TX_MON1);
- goto AfterMOX1;
- }
- cs->writeisac(cs, ISAC_MOX1,
- cs->dc.isac.mon_tx[cs->dc.isac.mon_txp++]);
- if (cs->debug & L1_DEB_MONITOR)
- debugl1(cs, "ISAC %02x -> MOX1", cs->dc.isac.mon_tx[cs->dc.isac.mon_txp - 1]);
- }
- AfterMOX1:;
-#endif
- }
- }
-}
-
-static void
-ISAC_l1hw(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
- struct sk_buff *skb = arg;
- u_long flags;
- int val;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- skb_queue_tail(&cs->sq, skb);
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA Queued", 0);
-#endif
- } else {
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA", 0);
-#endif
- isac_fill_fifo(cs);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
- skb_queue_tail(&cs->sq, skb);
- } else {
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
-#endif
- isac_fill_fifo(cs);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- debugl1(cs, "-> PH_REQUEST_PULL");
-#endif
- if (!cs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (HW_RESET | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- if ((cs->dc.isac.ph_state == ISAC_IND_EI) ||
- (cs->dc.isac.ph_state == ISAC_IND_DR) ||
- (cs->dc.isac.ph_state == ISAC_IND_RS))
- ph_command(cs, ISAC_CMD_TIM);
- else
- ph_command(cs, ISAC_CMD_RS);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_ENABLE | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- ph_command(cs, ISAC_CMD_TIM);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_INFO3 | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- ph_command(cs, ISAC_CMD_AR8);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_TESTLOOP | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- val = 0;
- if (1 & (long) arg)
- val |= 0x0c;
- if (2 & (long) arg)
- val |= 0x3;
- if (test_bit(HW_IOM1, &cs->HW_Flags)) {
- /* IOM 1 Mode */
- if (!val) {
- cs->writeisac(cs, ISAC_SPCR, 0xa);
- cs->writeisac(cs, ISAC_ADF1, 0x2);
- } else {
- cs->writeisac(cs, ISAC_SPCR, val);
- cs->writeisac(cs, ISAC_ADF1, 0xa);
- }
- } else {
- /* IOM 2 Mode */
- cs->writeisac(cs, ISAC_SPCR, val);
- if (val)
- cs->writeisac(cs, ISAC_ADF1, 0x8);
- else
- cs->writeisac(cs, ISAC_ADF1, 0x0);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_DEACTIVATE | RESPONSE):
- skb_queue_purge(&cs->rq);
- skb_queue_purge(&cs->sq);
- if (cs->tx_skb) {
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_skb = NULL;
- }
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- break;
- default:
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isac_l1hw unknown %04x", pr);
- break;
- }
-}
-
-static void
-setstack_isac(struct PStack *st, struct IsdnCardState *cs)
-{
- st->l1.l1hw = ISAC_l1hw;
-}
-
-static void
-DC_Close_isac(struct IsdnCardState *cs)
-{
- kfree(cs->dc.isac.mon_rx);
- cs->dc.isac.mon_rx = NULL;
- kfree(cs->dc.isac.mon_tx);
- cs->dc.isac.mon_tx = NULL;
-}
-
-static void
-dbusy_timer_handler(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, dbusytimer);
- struct PStack *stptr;
- int rbch, star;
-
- if (test_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
- rbch = cs->readisac(cs, ISAC_RBCH);
- star = cs->readisac(cs, ISAC_STAR);
- if (cs->debug)
- debugl1(cs, "D-Channel Busy RBCH %02x STAR %02x",
- rbch, star);
- if (rbch & ISAC_RBCH_XAC) { /* D-Channel Busy */
- test_and_set_bit(FLG_L1_DBUSY, &cs->HW_Flags);
- stptr = cs->stlist;
- while (stptr != NULL) {
- stptr->l1.l1l2(stptr, PH_PAUSE | INDICATION, NULL);
- stptr = stptr->next;
- }
- } else {
- /* discard frame; reset transceiver */
- test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags);
- if (cs->tx_skb) {
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->tx_skb = NULL;
- } else {
- printk(KERN_WARNING "HiSax: ISAC D-Channel Busy no skb\n");
- debugl1(cs, "D-Channel Busy no skb");
- }
- cs->writeisac(cs, ISAC_CMDR, 0x01); /* Transmitter reset */
- cs->irq_func(cs->irq, cs);
- }
- }
-}
-
-void initisac(struct IsdnCardState *cs)
-{
- cs->setstack_d = setstack_isac;
- cs->DC_Close = DC_Close_isac;
- cs->dc.isac.mon_tx = NULL;
- cs->dc.isac.mon_rx = NULL;
- cs->writeisac(cs, ISAC_MASK, 0xff);
- cs->dc.isac.mocr = 0xaa;
- if (test_bit(HW_IOM1, &cs->HW_Flags)) {
- /* IOM 1 Mode */
- cs->writeisac(cs, ISAC_ADF2, 0x0);
- cs->writeisac(cs, ISAC_SPCR, 0xa);
- cs->writeisac(cs, ISAC_ADF1, 0x2);
- cs->writeisac(cs, ISAC_STCR, 0x70);
- cs->writeisac(cs, ISAC_MODE, 0xc9);
- } else {
- /* IOM 2 Mode */
- if (!cs->dc.isac.adf2)
- cs->dc.isac.adf2 = 0x80;
- cs->writeisac(cs, ISAC_ADF2, cs->dc.isac.adf2);
- cs->writeisac(cs, ISAC_SQXR, 0x2f);
- cs->writeisac(cs, ISAC_SPCR, 0x00);
- cs->writeisac(cs, ISAC_STCR, 0x70);
- cs->writeisac(cs, ISAC_MODE, 0xc9);
- cs->writeisac(cs, ISAC_TIMR, 0x00);
- cs->writeisac(cs, ISAC_ADF1, 0x00);
- }
- ph_command(cs, ISAC_CMD_RS);
- cs->writeisac(cs, ISAC_MASK, 0x0);
-}
-
-void clear_pending_isac_ints(struct IsdnCardState *cs)
-{
- int val, eval;
-
- val = cs->readisac(cs, ISAC_STAR);
- debugl1(cs, "ISAC STAR %x", val);
- val = cs->readisac(cs, ISAC_MODE);
- debugl1(cs, "ISAC MODE %x", val);
- val = cs->readisac(cs, ISAC_ADF2);
- debugl1(cs, "ISAC ADF2 %x", val);
- val = cs->readisac(cs, ISAC_ISTA);
- debugl1(cs, "ISAC ISTA %x", val);
- if (val & 0x01) {
- eval = cs->readisac(cs, ISAC_EXIR);
- debugl1(cs, "ISAC EXIR %x", eval);
- }
- val = cs->readisac(cs, ISAC_CIR0);
- debugl1(cs, "ISAC CIR0 %x", val);
- cs->dc.isac.ph_state = (val >> 2) & 0xf;
- schedule_event(cs, D_L1STATECHANGE);
- /* Disable all IRQ */
- cs->writeisac(cs, ISAC_MASK, 0xFF);
-}
-
-void setup_isac(struct IsdnCardState *cs)
-{
- INIT_WORK(&cs->tqueue, isac_bh);
- timer_setup(&cs->dbusytimer, dbusy_timer_handler, 0);
-}
diff --git a/drivers/isdn/hisax/isac.h b/drivers/isdn/hisax/isac.h
deleted file mode 100644
index 04f16b91b822..000000000000
--- a/drivers/isdn/hisax/isac.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* $Id: isac.h,v 1.9.2.2 2004/01/12 22:52:27 keil Exp $
- *
- * ISAC specific defines
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/* All Registers original Siemens Spec */
-
-#define ISAC_MASK 0x20
-#define ISAC_ISTA 0x20
-#define ISAC_STAR 0x21
-#define ISAC_CMDR 0x21
-#define ISAC_EXIR 0x24
-#define ISAC_ADF2 0x39
-#define ISAC_SPCR 0x30
-#define ISAC_ADF1 0x38
-#define ISAC_CIR0 0x31
-#define ISAC_CIX0 0x31
-#define ISAC_CIR1 0x33
-#define ISAC_CIX1 0x33
-#define ISAC_STCR 0x37
-#define ISAC_MODE 0x22
-#define ISAC_RSTA 0x27
-#define ISAC_RBCL 0x25
-#define ISAC_RBCH 0x2A
-#define ISAC_TIMR 0x23
-#define ISAC_SQXR 0x3b
-#define ISAC_MOSR 0x3a
-#define ISAC_MOCR 0x3a
-#define ISAC_MOR0 0x32
-#define ISAC_MOX0 0x32
-#define ISAC_MOR1 0x34
-#define ISAC_MOX1 0x34
-
-#define ISAC_RBCH_XAC 0x80
-
-#define ISAC_CMD_TIM 0x0
-#define ISAC_CMD_RS 0x1
-#define ISAC_CMD_SCZ 0x4
-#define ISAC_CMD_SSZ 0x2
-#define ISAC_CMD_AR8 0x8
-#define ISAC_CMD_AR10 0x9
-#define ISAC_CMD_ARL 0xA
-#define ISAC_CMD_DUI 0xF
-
-#define ISAC_IND_RS 0x1
-#define ISAC_IND_PU 0x7
-#define ISAC_IND_DR 0x0
-#define ISAC_IND_SD 0x2
-#define ISAC_IND_DIS 0x3
-#define ISAC_IND_EI 0x6
-#define ISAC_IND_RSY 0x4
-#define ISAC_IND_ARD 0x8
-#define ISAC_IND_TI 0xA
-#define ISAC_IND_ATI 0xB
-#define ISAC_IND_AI8 0xC
-#define ISAC_IND_AI10 0xD
-#define ISAC_IND_DID 0xF
-
-extern void ISACVersion(struct IsdnCardState *, char *);
-extern void setup_isac(struct IsdnCardState *);
-extern void initisac(struct IsdnCardState *);
-extern void isac_interrupt(struct IsdnCardState *, u_char);
-extern void clear_pending_isac_ints(struct IsdnCardState *);
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
deleted file mode 100644
index 82c1879f5664..000000000000
--- a/drivers/isdn/hisax/isar.c
+++ /dev/null
@@ -1,1910 +0,0 @@
-/* $Id: isar.c,v 1.22.2.6 2004/02/11 13:21:34 keil Exp $
- *
- * isar.c ISAR (Siemens PSB 7110) specific routines
- *
- * Author Karsten Keil (keil@isdn4linux.de)
- *
- * This file is (c) under GNU General Public License
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isar.h"
-#include "isdnl1.h"
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#define DBG_LOADFIRM 0
-#define DUMP_MBOXFRAME 2
-
-#define DLE 0x10
-#define ETX 0x03
-
-#define FAXMODCNT 13
-static const u_char faxmodulation[] = {3, 24, 48, 72, 73, 74, 96, 97, 98, 121, 122, 145, 146};
-static u_int modmask = 0x1fff;
-static int frm_extra_delay = 2;
-static int para_TOA = 6;
-static const u_char *FC1_CMD[] = {"FAE", "FTS", "FRS", "FTM", "FRM", "FTH", "FRH", "CTRL"};
-
-static void isar_setup(struct IsdnCardState *cs);
-static void isar_pump_cmd(struct BCState *bcs, u_char cmd, u_char para);
-static void ll_deliver_faxstat(struct BCState *bcs, u_char status);
-
-static inline int
-waitforHIA(struct IsdnCardState *cs, int timeout)
-{
-
- while ((cs->BC_Read_Reg(cs, 0, ISAR_HIA) & 1) && timeout) {
- udelay(1);
- timeout--;
- }
- if (!timeout)
- printk(KERN_WARNING "HiSax: ISAR waitforHIA timeout\n");
- return (timeout);
-}
-
-
-static int
-sendmsg(struct IsdnCardState *cs, u_char his, u_char creg, u_char len,
- u_char *msg)
-{
- int i;
-
- if (!waitforHIA(cs, 4000))
- return (0);
-#if DUMP_MBOXFRAME
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "sendmsg(%02x,%02x,%d)", his, creg, len);
-#endif
- cs->BC_Write_Reg(cs, 0, ISAR_CTRL_H, creg);
- cs->BC_Write_Reg(cs, 0, ISAR_CTRL_L, len);
- cs->BC_Write_Reg(cs, 0, ISAR_WADR, 0);
- if (msg && len) {
- cs->BC_Write_Reg(cs, 1, ISAR_MBOX, msg[0]);
- for (i = 1; i < len; i++)
- cs->BC_Write_Reg(cs, 2, ISAR_MBOX, msg[i]);
-#if DUMP_MBOXFRAME > 1
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char tmp[256], *t;
-
- i = len;
- while (i > 0) {
- t = tmp;
- t += sprintf(t, "sendmbox cnt %d", len);
- QuickHex(t, &msg[len-i], (i > 64) ? 64 : i);
- debugl1(cs, "%s", tmp);
- i -= 64;
- }
- }
-#endif
- }
- cs->BC_Write_Reg(cs, 1, ISAR_HIS, his);
- waitforHIA(cs, 10000);
- return (1);
-}
-
-/* Call only with IRQ disabled !!! */
-static inline void
-rcv_mbox(struct IsdnCardState *cs, struct isar_reg *ireg, u_char *msg)
-{
- int i;
-
- cs->BC_Write_Reg(cs, 1, ISAR_RADR, 0);
- if (msg && ireg->clsb) {
- msg[0] = cs->BC_Read_Reg(cs, 1, ISAR_MBOX);
- for (i = 1; i < ireg->clsb; i++)
- msg[i] = cs->BC_Read_Reg(cs, 2, ISAR_MBOX);
-#if DUMP_MBOXFRAME > 1
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char tmp[256], *t;
-
- i = ireg->clsb;
- while (i > 0) {
- t = tmp;
- t += sprintf(t, "rcv_mbox cnt %d", ireg->clsb);
- QuickHex(t, &msg[ireg->clsb - i], (i > 64) ? 64 : i);
- debugl1(cs, "%s", tmp);
- i -= 64;
- }
- }
-#endif
- }
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
-}
-
-/* Call only with IRQ disabled !!! */
-static inline void
-get_irq_infos(struct IsdnCardState *cs, struct isar_reg *ireg)
-{
- ireg->iis = cs->BC_Read_Reg(cs, 1, ISAR_IIS);
- ireg->cmsb = cs->BC_Read_Reg(cs, 1, ISAR_CTRL_H);
- ireg->clsb = cs->BC_Read_Reg(cs, 1, ISAR_CTRL_L);
-#if DUMP_MBOXFRAME
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "irq_stat(%02x,%02x,%d)", ireg->iis, ireg->cmsb,
- ireg->clsb);
-#endif
-}
-
-static int
-waitrecmsg(struct IsdnCardState *cs, u_char *len,
- u_char *msg, int maxdelay)
-{
- int timeout = 0;
- struct isar_reg *ir = cs->bcs[0].hw.isar.reg;
-
-
- while ((!(cs->BC_Read_Reg(cs, 0, ISAR_IRQBIT) & ISAR_IRQSTA)) &&
- (timeout++ < maxdelay))
- udelay(1);
- if (timeout > maxdelay) {
- printk(KERN_WARNING"isar recmsg IRQSTA timeout\n");
- return (0);
- }
- get_irq_infos(cs, ir);
- rcv_mbox(cs, ir, msg);
- *len = ir->clsb;
- return (1);
-}
-
-int
-ISARVersion(struct IsdnCardState *cs, char *s)
-{
- int ver;
- u_char msg[] = ISAR_MSG_HWVER;
- u_char tmp[64];
- u_char len;
- u_long flags;
- int debug;
-
- cs->cardmsg(cs, CARD_RESET, NULL);
- spin_lock_irqsave(&cs->lock, flags);
- /* disable ISAR IRQ */
- cs->BC_Write_Reg(cs, 0, ISAR_IRQBIT, 0);
- debug = cs->debug;
- cs->debug &= ~(L1_DEB_HSCX | L1_DEB_HSCX_FIFO);
- if (!sendmsg(cs, ISAR_HIS_VNR, 0, 3, msg)) {
- spin_unlock_irqrestore(&cs->lock, flags);
- return (-1);
- }
- if (!waitrecmsg(cs, &len, tmp, 100000)) {
- spin_unlock_irqrestore(&cs->lock, flags);
- return (-2);
- }
- cs->debug = debug;
- if (cs->bcs[0].hw.isar.reg->iis == ISAR_IIS_VNR) {
- if (len == 1) {
- ver = tmp[0] & 0xf;
- printk(KERN_INFO "%s ISAR version %d\n", s, ver);
- } else
- ver = -3;
- } else
- ver = -4;
- spin_unlock_irqrestore(&cs->lock, flags);
- return (ver);
-}
-
-static int
-isar_load_firmware(struct IsdnCardState *cs, u_char __user *buf)
-{
- int cfu_ret, ret, size, cnt, debug;
- u_char len, nom, noc;
- u_short sadr, left, *sp;
- u_char __user *p = buf;
- u_char *msg, *tmpmsg, *mp, tmp[64];
- u_long flags;
- struct isar_reg *ireg = cs->bcs[0].hw.isar.reg;
-
- struct {u_short sadr;
- u_short len;
- u_short d_key;
- } blk_head;
-
-#define BLK_HEAD_SIZE 6
- if (1 != (ret = ISARVersion(cs, "Testing"))) {
- printk(KERN_ERR"isar_load_firmware wrong isar version %d\n", ret);
- return (1);
- }
- debug = cs->debug;
-#if DBG_LOADFIRM < 2
- cs->debug &= ~(L1_DEB_HSCX | L1_DEB_HSCX_FIFO);
-#endif
-
- cfu_ret = copy_from_user(&size, p, sizeof(int));
- if (cfu_ret) {
- printk(KERN_ERR "isar_load_firmware copy_from_user ret %d\n", cfu_ret);
- return -EFAULT;
- }
- p += sizeof(int);
- printk(KERN_DEBUG"isar_load_firmware size: %d\n", size);
- cnt = 0;
- /* disable ISAR IRQ */
- cs->BC_Write_Reg(cs, 0, ISAR_IRQBIT, 0);
- if (!(msg = kmalloc(256, GFP_KERNEL))) {
- printk(KERN_ERR"isar_load_firmware no buffer\n");
- return (1);
- }
- if (!(tmpmsg = kmalloc(256, GFP_KERNEL))) {
- printk(KERN_ERR"isar_load_firmware no tmp buffer\n");
- kfree(msg);
- return (1);
- }
- spin_lock_irqsave(&cs->lock, flags);
- /* disable ISAR IRQ */
- cs->BC_Write_Reg(cs, 0, ISAR_IRQBIT, 0);
- spin_unlock_irqrestore(&cs->lock, flags);
- while (cnt < size) {
- if ((ret = copy_from_user(&blk_head, p, BLK_HEAD_SIZE))) {
- printk(KERN_ERR"isar_load_firmware copy_from_user ret %d\n", ret);
- goto reterror;
- }
-#ifdef __BIG_ENDIAN
- sadr = (blk_head.sadr & 0xff) * 256 + blk_head.sadr / 256;
- blk_head.sadr = sadr;
- sadr = (blk_head.len & 0xff) * 256 + blk_head.len / 256;
- blk_head.len = sadr;
- sadr = (blk_head.d_key & 0xff) * 256 + blk_head.d_key / 256;
- blk_head.d_key = sadr;
-#endif /* __BIG_ENDIAN */
- cnt += BLK_HEAD_SIZE;
- p += BLK_HEAD_SIZE;
- printk(KERN_DEBUG"isar firmware block (%#x,%5d,%#x)\n",
- blk_head.sadr, blk_head.len, blk_head.d_key & 0xff);
- sadr = blk_head.sadr;
- left = blk_head.len;
- spin_lock_irqsave(&cs->lock, flags);
- if (!sendmsg(cs, ISAR_HIS_DKEY, blk_head.d_key & 0xff, 0, NULL)) {
- printk(KERN_ERR"isar sendmsg dkey failed\n");
- ret = 1; goto reterr_unlock;
- }
- if (!waitrecmsg(cs, &len, tmp, 100000)) {
- printk(KERN_ERR"isar waitrecmsg dkey failed\n");
- ret = 1; goto reterr_unlock;
- }
- if ((ireg->iis != ISAR_IIS_DKEY) || ireg->cmsb || len) {
- printk(KERN_ERR"isar wrong dkey response (%x,%x,%x)\n",
- ireg->iis, ireg->cmsb, len);
- ret = 1; goto reterr_unlock;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- while (left > 0) {
- if (left > 126)
- noc = 126;
- else
- noc = left;
- nom = 2 * noc;
- mp = msg;
- *mp++ = sadr / 256;
- *mp++ = sadr % 256;
- left -= noc;
- *mp++ = noc;
- if ((ret = copy_from_user(tmpmsg, p, nom))) {
- printk(KERN_ERR"isar_load_firmware copy_from_user ret %d\n", ret);
- goto reterror;
- }
- p += nom;
- cnt += nom;
- nom += 3;
- sp = (u_short *)tmpmsg;
-#if DBG_LOADFIRM
- printk(KERN_DEBUG"isar: load %3d words at %04x left %d\n",
- noc, sadr, left);
-#endif
- sadr += noc;
- while (noc) {
-#ifdef __BIG_ENDIAN
- *mp++ = *sp % 256;
- *mp++ = *sp / 256;
-#else
- *mp++ = *sp / 256;
- *mp++ = *sp % 256;
-#endif /* __BIG_ENDIAN */
- sp++;
- noc--;
- }
- spin_lock_irqsave(&cs->lock, flags);
- if (!sendmsg(cs, ISAR_HIS_FIRM, 0, nom, msg)) {
- printk(KERN_ERR"isar sendmsg prog failed\n");
- ret = 1; goto reterr_unlock;
- }
- if (!waitrecmsg(cs, &len, tmp, 100000)) {
- printk(KERN_ERR"isar waitrecmsg prog failed\n");
- ret = 1; goto reterr_unlock;
- }
- if ((ireg->iis != ISAR_IIS_FIRM) || ireg->cmsb || len) {
- printk(KERN_ERR"isar wrong prog response (%x,%x,%x)\n",
- ireg->iis, ireg->cmsb, len);
- ret = 1; goto reterr_unlock;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- }
- printk(KERN_DEBUG"isar firmware block %5d words loaded\n",
- blk_head.len);
- }
- /* 10ms delay */
- cnt = 10;
- while (cnt--)
- udelay(1000);
- msg[0] = 0xff;
- msg[1] = 0xfe;
- ireg->bstat = 0;
- spin_lock_irqsave(&cs->lock, flags);
- if (!sendmsg(cs, ISAR_HIS_STDSP, 0, 2, msg)) {
- printk(KERN_ERR"isar sendmsg start dsp failed\n");
- ret = 1; goto reterr_unlock;
- }
- if (!waitrecmsg(cs, &len, tmp, 100000)) {
- printk(KERN_ERR"isar waitrecmsg start dsp failed\n");
- ret = 1; goto reterr_unlock;
- }
- if ((ireg->iis != ISAR_IIS_STDSP) || ireg->cmsb || len) {
- printk(KERN_ERR"isar wrong start dsp response (%x,%x,%x)\n",
- ireg->iis, ireg->cmsb, len);
- ret = 1; goto reterr_unlock;
- } else
- printk(KERN_DEBUG"isar start dsp success\n");
- /* NORMAL mode entered */
- /* Enable IRQs of ISAR */
- cs->BC_Write_Reg(cs, 0, ISAR_IRQBIT, ISAR_IRQSTA);
- spin_unlock_irqrestore(&cs->lock, flags);
- cnt = 1000; /* max 1s */
- while ((!ireg->bstat) && cnt) {
- udelay(1000);
- cnt--;
- }
- if (!cnt) {
- printk(KERN_ERR"isar no general status event received\n");
- ret = 1; goto reterror;
- } else {
- printk(KERN_DEBUG"isar general status event %x\n",
- ireg->bstat);
- }
- /* 10ms delay */
- cnt = 10;
- while (cnt--)
- udelay(1000);
- spin_lock_irqsave(&cs->lock, flags);
- ireg->iis = 0;
- if (!sendmsg(cs, ISAR_HIS_DIAG, ISAR_CTRL_STST, 0, NULL)) {
- printk(KERN_ERR"isar sendmsg self tst failed\n");
- ret = 1; goto reterr_unlock;
- }
- cnt = 10000; /* max 100 ms */
- spin_unlock_irqrestore(&cs->lock, flags);
- while ((ireg->iis != ISAR_IIS_DIAG) && cnt) {
- udelay(10);
- cnt--;
- }
- udelay(1000);
- if (!cnt) {
- printk(KERN_ERR"isar no self tst response\n");
- ret = 1; goto reterror;
- }
- if ((ireg->cmsb == ISAR_CTRL_STST) && (ireg->clsb == 1)
- && (ireg->par[0] == 0)) {
- printk(KERN_DEBUG"isar selftest OK\n");
- } else {
- printk(KERN_DEBUG"isar selftest not OK %x/%x/%x\n",
- ireg->cmsb, ireg->clsb, ireg->par[0]);
- ret = 1; goto reterror;
- }
- spin_lock_irqsave(&cs->lock, flags);
- ireg->iis = 0;
- if (!sendmsg(cs, ISAR_HIS_DIAG, ISAR_CTRL_SWVER, 0, NULL)) {
- printk(KERN_ERR"isar RQST SVN failed\n");
- ret = 1; goto reterr_unlock;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- cnt = 30000; /* max 300 ms */
- while ((ireg->iis != ISAR_IIS_DIAG) && cnt) {
- udelay(10);
- cnt--;
- }
- udelay(1000);
- if (!cnt) {
- printk(KERN_ERR"isar no SVN response\n");
- ret = 1; goto reterror;
- } else {
- if ((ireg->cmsb == ISAR_CTRL_SWVER) && (ireg->clsb == 1))
- printk(KERN_DEBUG"isar software version %#x\n",
- ireg->par[0]);
- else {
- printk(KERN_ERR"isar wrong swver response (%x,%x) cnt(%d)\n",
- ireg->cmsb, ireg->clsb, cnt);
- ret = 1; goto reterror;
- }
- }
- spin_lock_irqsave(&cs->lock, flags);
- cs->debug = debug;
- isar_setup(cs);
-
- ret = 0;
-reterr_unlock:
- spin_unlock_irqrestore(&cs->lock, flags);
-reterror:
- cs->debug = debug;
- if (ret)
- /* disable ISAR IRQ */
- cs->BC_Write_Reg(cs, 0, ISAR_IRQBIT, 0);
- kfree(msg);
- kfree(tmpmsg);
- return (ret);
-}
-
-#define B_LL_NOCARRIER 8
-#define B_LL_CONNECT 9
-#define B_LL_OK 10
-
-static void
-isar_bh(struct work_struct *work)
-{
- struct BCState *bcs = container_of(work, struct BCState, tqueue);
-
- BChannel_bh(work);
- if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event))
- ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR);
- if (test_and_clear_bit(B_LL_CONNECT, &bcs->event))
- ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_CONNECT);
- if (test_and_clear_bit(B_LL_OK, &bcs->event))
- ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_OK);
-}
-
-static void
-send_DLE_ETX(struct BCState *bcs)
-{
- u_char dleetx[2] = {DLE, ETX};
- struct sk_buff *skb;
-
- if ((skb = dev_alloc_skb(2))) {
- skb_put_data(skb, dleetx, 2);
- skb_queue_tail(&bcs->rqueue, skb);
- schedule_event(bcs, B_RCVBUFREADY);
- } else {
- printk(KERN_WARNING "HiSax: skb out of memory\n");
- }
-}
-
-static inline int
-dle_count(unsigned char *buf, int len)
-{
- int count = 0;
-
- while (len--)
- if (*buf++ == DLE)
- count++;
- return count;
-}
-
-static inline void
-insert_dle(unsigned char *dest, unsigned char *src, int count) {
- /* <DLE> in input stream have to be flagged as <DLE><DLE> */
- while (count--) {
- *dest++ = *src;
- if (*src++ == DLE)
- *dest++ = DLE;
- }
-}
-
-static void
-isar_rcv_frame(struct IsdnCardState *cs, struct BCState *bcs)
-{
- u_char *ptr;
- struct sk_buff *skb;
- struct isar_reg *ireg = bcs->hw.isar.reg;
-
- if (!ireg->clsb) {
- debugl1(cs, "isar zero len frame");
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- return;
- }
- switch (bcs->mode) {
- case L1_MODE_NULL:
- debugl1(cs, "isar mode 0 spurious IIS_RDATA %x/%x/%x",
- ireg->iis, ireg->cmsb, ireg->clsb);
- printk(KERN_WARNING"isar mode 0 spurious IIS_RDATA %x/%x/%x\n",
- ireg->iis, ireg->cmsb, ireg->clsb);
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- break;
- case L1_MODE_TRANS:
- case L1_MODE_V32:
- if ((skb = dev_alloc_skb(ireg->clsb))) {
- rcv_mbox(cs, ireg, (u_char *)skb_put(skb, ireg->clsb));
- skb_queue_tail(&bcs->rqueue, skb);
- schedule_event(bcs, B_RCVBUFREADY);
- } else {
- printk(KERN_WARNING "HiSax: skb out of memory\n");
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- }
- break;
- case L1_MODE_HDLC:
- if ((bcs->hw.isar.rcvidx + ireg->clsb) > HSCX_BUFMAX) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar_rcv_frame: incoming packet too large");
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- bcs->hw.isar.rcvidx = 0;
- } else if (ireg->cmsb & HDLC_ERROR) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar frame error %x len %d",
- ireg->cmsb, ireg->clsb);
-#ifdef ERROR_STATISTIC
- if (ireg->cmsb & HDLC_ERR_RER)
- bcs->err_inv++;
- if (ireg->cmsb & HDLC_ERR_CER)
- bcs->err_crc++;
-#endif
- bcs->hw.isar.rcvidx = 0;
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- } else {
- if (ireg->cmsb & HDLC_FSD)
- bcs->hw.isar.rcvidx = 0;
- ptr = bcs->hw.isar.rcvbuf + bcs->hw.isar.rcvidx;
- bcs->hw.isar.rcvidx += ireg->clsb;
- rcv_mbox(cs, ireg, ptr);
- if (ireg->cmsb & HDLC_FED) {
- if (bcs->hw.isar.rcvidx < 3) { /* last 2 bytes are the FCS */
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar frame to short %d",
- bcs->hw.isar.rcvidx);
- } else if (!(skb = dev_alloc_skb(bcs->hw.isar.rcvidx - 2))) {
- printk(KERN_WARNING "ISAR: receive out of memory\n");
- } else {
- skb_put_data(skb, bcs->hw.isar.rcvbuf,
- bcs->hw.isar.rcvidx - 2);
- skb_queue_tail(&bcs->rqueue, skb);
- schedule_event(bcs, B_RCVBUFREADY);
- }
- bcs->hw.isar.rcvidx = 0;
- }
- }
- break;
- case L1_MODE_FAX:
- if (bcs->hw.isar.state != STFAX_ACTIV) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar_rcv_frame: not ACTIV");
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- bcs->hw.isar.rcvidx = 0;
- break;
- }
- if (bcs->hw.isar.cmd == PCTRL_CMD_FRM) {
- rcv_mbox(cs, ireg, bcs->hw.isar.rcvbuf);
- bcs->hw.isar.rcvidx = ireg->clsb +
- dle_count(bcs->hw.isar.rcvbuf, ireg->clsb);
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "isar_rcv_frame: raw(%d) dle(%d)",
- ireg->clsb, bcs->hw.isar.rcvidx);
- if ((skb = dev_alloc_skb(bcs->hw.isar.rcvidx))) {
- insert_dle((u_char *)skb_put(skb, bcs->hw.isar.rcvidx),
- bcs->hw.isar.rcvbuf, ireg->clsb);
- skb_queue_tail(&bcs->rqueue, skb);
- schedule_event(bcs, B_RCVBUFREADY);
- if (ireg->cmsb & SART_NMD) { /* ABORT */
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar_rcv_frame: no more data");
- bcs->hw.isar.rcvidx = 0;
- send_DLE_ETX(bcs);
- sendmsg(cs, SET_DPS(bcs->hw.isar.dpath) |
- ISAR_HIS_PUMPCTRL, PCTRL_CMD_ESC,
- 0, NULL);
- bcs->hw.isar.state = STFAX_ESCAPE;
- schedule_event(bcs, B_LL_NOCARRIER);
- }
- } else {
- printk(KERN_WARNING "HiSax: skb out of memory\n");
- }
- break;
- }
- if (bcs->hw.isar.cmd != PCTRL_CMD_FRH) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar_rcv_frame: unknown fax mode %x",
- bcs->hw.isar.cmd);
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- bcs->hw.isar.rcvidx = 0;
- break;
- }
- /* PCTRL_CMD_FRH */
- if ((bcs->hw.isar.rcvidx + ireg->clsb) > HSCX_BUFMAX) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar_rcv_frame: incoming packet too large");
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- bcs->hw.isar.rcvidx = 0;
- } else if (ireg->cmsb & HDLC_ERROR) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar frame error %x len %d",
- ireg->cmsb, ireg->clsb);
- bcs->hw.isar.rcvidx = 0;
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- } else {
- if (ireg->cmsb & HDLC_FSD) {
- bcs->hw.isar.rcvidx = 0;
- }
- ptr = bcs->hw.isar.rcvbuf + bcs->hw.isar.rcvidx;
- bcs->hw.isar.rcvidx += ireg->clsb;
- rcv_mbox(cs, ireg, ptr);
- if (ireg->cmsb & HDLC_FED) {
- int len = bcs->hw.isar.rcvidx +
- dle_count(bcs->hw.isar.rcvbuf, bcs->hw.isar.rcvidx);
- if (bcs->hw.isar.rcvidx < 3) { /* last 2 bytes are the FCS */
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar frame to short %d",
- bcs->hw.isar.rcvidx);
- printk(KERN_WARNING "ISAR: frame to short %d\n",
- bcs->hw.isar.rcvidx);
- } else if (!(skb = dev_alloc_skb(len))) {
- printk(KERN_WARNING "ISAR: receive out of memory\n");
- } else {
- insert_dle((u_char *)skb_put(skb, len),
- bcs->hw.isar.rcvbuf,
- bcs->hw.isar.rcvidx);
- skb_queue_tail(&bcs->rqueue, skb);
- schedule_event(bcs, B_RCVBUFREADY);
- send_DLE_ETX(bcs);
- schedule_event(bcs, B_LL_OK);
- test_and_clear_bit(BC_FLG_FRH_WAIT, &bcs->Flag);
- }
- bcs->hw.isar.rcvidx = 0;
- }
- }
- if (ireg->cmsb & SART_NMD) { /* ABORT */
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar_rcv_frame: no more data");
- bcs->hw.isar.rcvidx = 0;
- sendmsg(cs, SET_DPS(bcs->hw.isar.dpath) |
- ISAR_HIS_PUMPCTRL, PCTRL_CMD_ESC, 0, NULL);
- bcs->hw.isar.state = STFAX_ESCAPE;
- if (test_and_clear_bit(BC_FLG_FRH_WAIT, &bcs->Flag)) {
- send_DLE_ETX(bcs);
- schedule_event(bcs, B_LL_NOCARRIER);
- }
- }
- break;
- default:
- printk(KERN_ERR"isar_rcv_frame mode (%x)error\n", bcs->mode);
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- break;
- }
-}
-
-void
-isar_fill_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int count;
- u_char msb;
- u_char *ptr;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "isar_fill_fifo");
- if (!bcs->tx_skb)
- return;
- if (bcs->tx_skb->len <= 0)
- return;
- if (!(bcs->hw.isar.reg->bstat &
- (bcs->hw.isar.dpath == 1 ? BSTAT_RDM1 : BSTAT_RDM2)))
- return;
- if (bcs->tx_skb->len > bcs->hw.isar.mml) {
- msb = 0;
- count = bcs->hw.isar.mml;
- } else {
- count = bcs->tx_skb->len;
- msb = HDLC_FED;
- }
- ptr = bcs->tx_skb->data;
- if (!bcs->hw.isar.txcnt) {
- msb |= HDLC_FST;
- if ((bcs->mode == L1_MODE_FAX) &&
- (bcs->hw.isar.cmd == PCTRL_CMD_FTH)) {
- if (bcs->tx_skb->len > 1) {
- if ((ptr[0] == 0xff) && (ptr[1] == 0x13))
- /* last frame */
- test_and_set_bit(BC_FLG_LASTDATA,
- &bcs->Flag);
- }
- }
- }
- skb_pull(bcs->tx_skb, count);
- bcs->tx_cnt -= count;
- bcs->hw.isar.txcnt += count;
- switch (bcs->mode) {
- case L1_MODE_NULL:
- printk(KERN_ERR"isar_fill_fifo wrong mode 0\n");
- break;
- case L1_MODE_TRANS:
- case L1_MODE_V32:
- sendmsg(cs, SET_DPS(bcs->hw.isar.dpath) | ISAR_HIS_SDATA,
- 0, count, ptr);
- break;
- case L1_MODE_HDLC:
- sendmsg(cs, SET_DPS(bcs->hw.isar.dpath) | ISAR_HIS_SDATA,
- msb, count, ptr);
- break;
- case L1_MODE_FAX:
- if (bcs->hw.isar.state != STFAX_ACTIV) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar_fill_fifo: not ACTIV");
- } else if (bcs->hw.isar.cmd == PCTRL_CMD_FTH) {
- sendmsg(cs, SET_DPS(bcs->hw.isar.dpath) | ISAR_HIS_SDATA,
- msb, count, ptr);
- } else if (bcs->hw.isar.cmd == PCTRL_CMD_FTM) {
- sendmsg(cs, SET_DPS(bcs->hw.isar.dpath) | ISAR_HIS_SDATA,
- 0, count, ptr);
- } else {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar_fill_fifo: not FTH/FTM");
- }
- break;
- default:
- if (cs->debug)
- debugl1(cs, "isar_fill_fifo mode(%x) error", bcs->mode);
- printk(KERN_ERR"isar_fill_fifo mode(%x) error\n", bcs->mode);
- break;
- }
-}
-
-static inline
-struct BCState *sel_bcs_isar(struct IsdnCardState *cs, u_char dpath)
-{
- if ((!dpath) || (dpath == 3))
- return (NULL);
- if (cs->bcs[0].hw.isar.dpath == dpath)
- return (&cs->bcs[0]);
- if (cs->bcs[1].hw.isar.dpath == dpath)
- return (&cs->bcs[1]);
- return (NULL);
-}
-
-static void
-send_frames(struct BCState *bcs)
-{
- if (bcs->tx_skb) {
- if (bcs->tx_skb->len) {
- isar_fill_fifo(bcs);
- return;
- } else {
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->hw.isar.txcnt;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- if (bcs->mode == L1_MODE_FAX) {
- if (bcs->hw.isar.cmd == PCTRL_CMD_FTH) {
- if (test_bit(BC_FLG_LASTDATA, &bcs->Flag)) {
- test_and_set_bit(BC_FLG_NMD_DATA, &bcs->Flag);
- }
- } else if (bcs->hw.isar.cmd == PCTRL_CMD_FTM) {
- if (test_bit(BC_FLG_DLEETX, &bcs->Flag)) {
- test_and_set_bit(BC_FLG_LASTDATA, &bcs->Flag);
- test_and_set_bit(BC_FLG_NMD_DATA, &bcs->Flag);
- }
- }
- }
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->hw.isar.txcnt = 0;
- bcs->tx_skb = NULL;
- }
- }
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- bcs->hw.isar.txcnt = 0;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- isar_fill_fifo(bcs);
- } else {
- if (test_and_clear_bit(BC_FLG_DLEETX, &bcs->Flag)) {
- if (test_and_clear_bit(BC_FLG_LASTDATA, &bcs->Flag)) {
- if (test_and_clear_bit(BC_FLG_NMD_DATA, &bcs->Flag)) {
- u_char dummy = 0;
- sendmsg(bcs->cs, SET_DPS(bcs->hw.isar.dpath) |
- ISAR_HIS_SDATA, 0x01, 1, &dummy);
- }
- test_and_set_bit(BC_FLG_LL_OK, &bcs->Flag);
- } else {
- schedule_event(bcs, B_LL_CONNECT);
- }
- }
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- schedule_event(bcs, B_XMTBUFREADY);
- }
-}
-
-static inline void
-check_send(struct IsdnCardState *cs, u_char rdm)
-{
- struct BCState *bcs;
-
- if (rdm & BSTAT_RDM1) {
- if ((bcs = sel_bcs_isar(cs, 1))) {
- if (bcs->mode) {
- send_frames(bcs);
- }
- }
- }
- if (rdm & BSTAT_RDM2) {
- if ((bcs = sel_bcs_isar(cs, 2))) {
- if (bcs->mode) {
- send_frames(bcs);
- }
- }
- }
-
-}
-
-static const char *dmril[] = {"NO SPEED", "1200/75", "NODEF2", "75/1200",
- "NODEF4", "300", "600", "1200", "2400",
- "4800", "7200", "9600nt", "9600t", "12000",
- "14400", "WRONG"};
-static const char *dmrim[] = {"NO MOD", "NO DEF", "V32/V32b", "V22", "V21",
- "Bell103", "V23", "Bell202", "V17", "V29",
- "V27ter"};
-
-static void
-isar_pump_status_rsp(struct BCState *bcs, struct isar_reg *ireg) {
- struct IsdnCardState *cs = bcs->cs;
- u_char ril = ireg->par[0];
- u_char rim;
-
- if (!test_and_clear_bit(ISAR_RATE_REQ, &bcs->hw.isar.reg->Flags))
- return;
- if (ril > 14) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "wrong pstrsp ril=%d", ril);
- ril = 15;
- }
- switch (ireg->par[1]) {
- case 0:
- rim = 0;
- break;
- case 0x20:
- rim = 2;
- break;
- case 0x40:
- rim = 3;
- break;
- case 0x41:
- rim = 4;
- break;
- case 0x51:
- rim = 5;
- break;
- case 0x61:
- rim = 6;
- break;
- case 0x71:
- rim = 7;
- break;
- case 0x82:
- rim = 8;
- break;
- case 0x92:
- rim = 9;
- break;
- case 0xa2:
- rim = 10;
- break;
- default:
- rim = 1;
- break;
- }
- sprintf(bcs->hw.isar.conmsg, "%s %s", dmril[ril], dmrim[rim]);
- bcs->conmsg = bcs->hw.isar.conmsg;
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump strsp %s", bcs->conmsg);
-}
-
-static void
-isar_pump_statev_modem(struct BCState *bcs, u_char devt) {
- struct IsdnCardState *cs = bcs->cs;
- u_char dps = SET_DPS(bcs->hw.isar.dpath);
-
- switch (devt) {
- case PSEV_10MS_TIMER:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev TIMER");
- break;
- case PSEV_CON_ON:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev CONNECT");
- l1_msg_b(bcs->st, PH_ACTIVATE | REQUEST, NULL);
- break;
- case PSEV_CON_OFF:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev NO CONNECT");
- sendmsg(cs, dps | ISAR_HIS_PSTREQ, 0, 0, NULL);
- l1_msg_b(bcs->st, PH_DEACTIVATE | REQUEST, NULL);
- break;
- case PSEV_V24_OFF:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev V24 OFF");
- break;
- case PSEV_CTS_ON:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev CTS ON");
- break;
- case PSEV_CTS_OFF:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev CTS OFF");
- break;
- case PSEV_DCD_ON:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev CARRIER ON");
- test_and_set_bit(ISAR_RATE_REQ, &bcs->hw.isar.reg->Flags);
- sendmsg(cs, dps | ISAR_HIS_PSTREQ, 0, 0, NULL);
- break;
- case PSEV_DCD_OFF:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev CARRIER OFF");
- break;
- case PSEV_DSR_ON:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev DSR ON");
- break;
- case PSEV_DSR_OFF:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev DSR_OFF");
- break;
- case PSEV_REM_RET:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev REMOTE RETRAIN");
- break;
- case PSEV_REM_REN:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev REMOTE RENEGOTIATE");
- break;
- case PSEV_GSTN_CLR:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev GSTN CLEAR");
- break;
- default:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "unknown pump stev %x", devt);
- break;
- }
-}
-
-static void
-ll_deliver_faxstat(struct BCState *bcs, u_char status)
-{
- isdn_ctrl ic;
- struct Channel *chanp = (struct Channel *) bcs->st->lli.userdata;
-
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "HL->LL FAXIND %x", status);
- ic.driver = bcs->cs->myid;
- ic.command = ISDN_STAT_FAXIND;
- ic.arg = chanp->chan;
- ic.parm.aux.cmd = status;
- bcs->cs->iif.statcallb(&ic);
-}
-
-static void
-isar_pump_statev_fax(struct BCState *bcs, u_char devt) {
- struct IsdnCardState *cs = bcs->cs;
- u_char dps = SET_DPS(bcs->hw.isar.dpath);
- u_char p1;
-
- switch (devt) {
- case PSEV_10MS_TIMER:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev TIMER");
- break;
- case PSEV_RSP_READY:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev RSP_READY");
- bcs->hw.isar.state = STFAX_READY;
- l1_msg_b(bcs->st, PH_ACTIVATE | REQUEST, NULL);
- if (test_bit(BC_FLG_ORIG, &bcs->Flag)) {
- isar_pump_cmd(bcs, ISDN_FAX_CLASS1_FRH, 3);
- } else {
- isar_pump_cmd(bcs, ISDN_FAX_CLASS1_FTH, 3);
- }
- break;
- case PSEV_LINE_TX_H:
- if (bcs->hw.isar.state == STFAX_LINE) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev LINE_TX_H");
- bcs->hw.isar.state = STFAX_CONT;
- sendmsg(cs, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_CONT, 0, NULL);
- } else {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "pump stev LINE_TX_H wrong st %x",
- bcs->hw.isar.state);
- }
- break;
- case PSEV_LINE_RX_H:
- if (bcs->hw.isar.state == STFAX_LINE) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev LINE_RX_H");
- bcs->hw.isar.state = STFAX_CONT;
- sendmsg(cs, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_CONT, 0, NULL);
- } else {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "pump stev LINE_RX_H wrong st %x",
- bcs->hw.isar.state);
- }
- break;
- case PSEV_LINE_TX_B:
- if (bcs->hw.isar.state == STFAX_LINE) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev LINE_TX_B");
- bcs->hw.isar.state = STFAX_CONT;
- sendmsg(cs, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_CONT, 0, NULL);
- } else {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "pump stev LINE_TX_B wrong st %x",
- bcs->hw.isar.state);
- }
- break;
- case PSEV_LINE_RX_B:
- if (bcs->hw.isar.state == STFAX_LINE) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev LINE_RX_B");
- bcs->hw.isar.state = STFAX_CONT;
- sendmsg(cs, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_CONT, 0, NULL);
- } else {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "pump stev LINE_RX_B wrong st %x",
- bcs->hw.isar.state);
- }
- break;
- case PSEV_RSP_CONN:
- if (bcs->hw.isar.state == STFAX_CONT) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev RSP_CONN");
- bcs->hw.isar.state = STFAX_ACTIV;
- test_and_set_bit(ISAR_RATE_REQ, &bcs->hw.isar.reg->Flags);
- sendmsg(cs, dps | ISAR_HIS_PSTREQ, 0, 0, NULL);
- if (bcs->hw.isar.cmd == PCTRL_CMD_FTH) {
- /* 1s Flags before data */
- if (test_and_set_bit(BC_FLG_FTI_RUN, &bcs->Flag))
- del_timer(&bcs->hw.isar.ftimer);
- /* 1000 ms */
- bcs->hw.isar.ftimer.expires =
- jiffies + ((1000 * HZ) / 1000);
- test_and_set_bit(BC_FLG_LL_CONN,
- &bcs->Flag);
- add_timer(&bcs->hw.isar.ftimer);
- } else {
- schedule_event(bcs, B_LL_CONNECT);
- }
- } else {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "pump stev RSP_CONN wrong st %x",
- bcs->hw.isar.state);
- }
- break;
- case PSEV_FLAGS_DET:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev FLAGS_DET");
- break;
- case PSEV_RSP_DISC:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev RSP_DISC");
- if (bcs->hw.isar.state == STFAX_ESCAPE) {
- p1 = 5;
- switch (bcs->hw.isar.newcmd) {
- case 0:
- bcs->hw.isar.state = STFAX_READY;
- break;
- case PCTRL_CMD_FTM:
- p1 = 2;
- /* fall through */
- case PCTRL_CMD_FTH:
- sendmsg(cs, dps | ISAR_HIS_PUMPCTRL,
- PCTRL_CMD_SILON, 1, &p1);
- bcs->hw.isar.state = STFAX_SILDET;
- break;
- case PCTRL_CMD_FRM:
- if (frm_extra_delay)
- mdelay(frm_extra_delay);
- /* fall through */
- case PCTRL_CMD_FRH:
- p1 = bcs->hw.isar.mod = bcs->hw.isar.newmod;
- bcs->hw.isar.newmod = 0;
- bcs->hw.isar.cmd = bcs->hw.isar.newcmd;
- bcs->hw.isar.newcmd = 0;
- sendmsg(cs, dps | ISAR_HIS_PUMPCTRL,
- bcs->hw.isar.cmd, 1, &p1);
- bcs->hw.isar.state = STFAX_LINE;
- bcs->hw.isar.try_mod = 3;
- break;
- default:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "RSP_DISC unknown newcmd %x", bcs->hw.isar.newcmd);
- break;
- }
- } else if (bcs->hw.isar.state == STFAX_ACTIV) {
- if (test_and_clear_bit(BC_FLG_LL_OK, &bcs->Flag)) {
- schedule_event(bcs, B_LL_OK);
- } else if (bcs->hw.isar.cmd == PCTRL_CMD_FRM) {
- send_DLE_ETX(bcs);
- schedule_event(bcs, B_LL_NOCARRIER);
- } else {
- ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_FCERROR);
- }
- bcs->hw.isar.state = STFAX_READY;
- } else {
- bcs->hw.isar.state = STFAX_READY;
- ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_FCERROR);
- }
- break;
- case PSEV_RSP_SILDET:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev RSP_SILDET");
- if (bcs->hw.isar.state == STFAX_SILDET) {
- p1 = bcs->hw.isar.mod = bcs->hw.isar.newmod;
- bcs->hw.isar.newmod = 0;
- bcs->hw.isar.cmd = bcs->hw.isar.newcmd;
- bcs->hw.isar.newcmd = 0;
- sendmsg(cs, dps | ISAR_HIS_PUMPCTRL,
- bcs->hw.isar.cmd, 1, &p1);
- bcs->hw.isar.state = STFAX_LINE;
- bcs->hw.isar.try_mod = 3;
- }
- break;
- case PSEV_RSP_SILOFF:
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev RSP_SILOFF");
- break;
- case PSEV_RSP_FCERR:
- if (bcs->hw.isar.state == STFAX_LINE) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev RSP_FCERR try %d",
- bcs->hw.isar.try_mod);
- if (bcs->hw.isar.try_mod--) {
- sendmsg(cs, dps | ISAR_HIS_PUMPCTRL,
- bcs->hw.isar.cmd, 1,
- &bcs->hw.isar.mod);
- break;
- }
- }
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev RSP_FCERR");
- bcs->hw.isar.state = STFAX_ESCAPE;
- sendmsg(cs, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_ESC, 0, NULL);
- ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_FCERROR);
- break;
- default:
- break;
- }
-}
-
-static char debbuf[128];
-
-void
-isar_int_main(struct IsdnCardState *cs)
-{
- struct isar_reg *ireg = cs->bcs[0].hw.isar.reg;
- struct BCState *bcs;
-
- get_irq_infos(cs, ireg);
- switch (ireg->iis & ISAR_IIS_MSCMSD) {
- case ISAR_IIS_RDATA:
- if ((bcs = sel_bcs_isar(cs, ireg->iis >> 6))) {
- isar_rcv_frame(cs, bcs);
- } else {
- debugl1(cs, "isar spurious IIS_RDATA %x/%x/%x",
- ireg->iis, ireg->cmsb, ireg->clsb);
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- }
- break;
- case ISAR_IIS_GSTEV:
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- ireg->bstat |= ireg->cmsb;
- check_send(cs, ireg->cmsb);
- break;
- case ISAR_IIS_BSTEV:
-#ifdef ERROR_STATISTIC
- if ((bcs = sel_bcs_isar(cs, ireg->iis >> 6))) {
- if (ireg->cmsb == BSTEV_TBO)
- bcs->err_tx++;
- if (ireg->cmsb == BSTEV_RBO)
- bcs->err_rdo++;
- }
-#endif
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "Buffer STEV dpath%d msb(%x)",
- ireg->iis >> 6, ireg->cmsb);
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- break;
- case ISAR_IIS_PSTEV:
- if ((bcs = sel_bcs_isar(cs, ireg->iis >> 6))) {
- rcv_mbox(cs, ireg, (u_char *)ireg->par);
- if (bcs->mode == L1_MODE_V32) {
- isar_pump_statev_modem(bcs, ireg->cmsb);
- } else if (bcs->mode == L1_MODE_FAX) {
- isar_pump_statev_fax(bcs, ireg->cmsb);
- } else if (ireg->cmsb == PSEV_10MS_TIMER) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "pump stev TIMER");
- } else {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "isar IIS_PSTEV pmode %d stat %x",
- bcs->mode, ireg->cmsb);
- }
- } else {
- debugl1(cs, "isar spurious IIS_PSTEV %x/%x/%x",
- ireg->iis, ireg->cmsb, ireg->clsb);
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- }
- break;
- case ISAR_IIS_PSTRSP:
- if ((bcs = sel_bcs_isar(cs, ireg->iis >> 6))) {
- rcv_mbox(cs, ireg, (u_char *)ireg->par);
- isar_pump_status_rsp(bcs, ireg);
- } else {
- debugl1(cs, "isar spurious IIS_PSTRSP %x/%x/%x",
- ireg->iis, ireg->cmsb, ireg->clsb);
- cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
- }
- break;
- case ISAR_IIS_DIAG:
- case ISAR_IIS_BSTRSP:
- case ISAR_IIS_IOM2RSP:
- rcv_mbox(cs, ireg, (u_char *)ireg->par);
- if ((cs->debug & (L1_DEB_HSCX | L1_DEB_HSCX_FIFO))
- == L1_DEB_HSCX) {
- u_char *tp = debbuf;
-
- tp += sprintf(debbuf, "msg iis(%x) msb(%x)",
- ireg->iis, ireg->cmsb);
- QuickHex(tp, (u_char *)ireg->par, ireg->clsb);
- debugl1(cs, "%s", debbuf);
- }
- break;
- case ISAR_IIS_INVMSG:
- rcv_mbox(cs, ireg, debbuf);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "invalid msg his:%x",
- ireg->cmsb);
- break;
- default:
- rcv_mbox(cs, ireg, debbuf);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "unhandled msg iis(%x) ctrl(%x/%x)",
- ireg->iis, ireg->cmsb, ireg->clsb);
- break;
- }
-}
-
-static void
-ftimer_handler(struct timer_list *t) {
- struct BCState *bcs = from_timer(bcs, t, hw.isar.ftimer);
- if (bcs->cs->debug)
- debugl1(bcs->cs, "ftimer flags %04lx",
- bcs->Flag);
- test_and_clear_bit(BC_FLG_FTI_RUN, &bcs->Flag);
- if (test_and_clear_bit(BC_FLG_LL_CONN, &bcs->Flag)) {
- schedule_event(bcs, B_LL_CONNECT);
- }
- if (test_and_clear_bit(BC_FLG_FTI_FTS, &bcs->Flag)) {
- schedule_event(bcs, B_LL_OK);
- }
-}
-
-static void
-setup_pump(struct BCState *bcs) {
- struct IsdnCardState *cs = bcs->cs;
- u_char dps = SET_DPS(bcs->hw.isar.dpath);
- u_char ctrl, param[6];
-
- switch (bcs->mode) {
- case L1_MODE_NULL:
- case L1_MODE_TRANS:
- case L1_MODE_HDLC:
- sendmsg(cs, dps | ISAR_HIS_PUMPCFG, PMOD_BYPASS, 0, NULL);
- break;
- case L1_MODE_V32:
- ctrl = PMOD_DATAMODEM;
- if (test_bit(BC_FLG_ORIG, &bcs->Flag)) {
- ctrl |= PCTRL_ORIG;
- param[5] = PV32P6_CTN;
- } else {
- param[5] = PV32P6_ATN;
- }
- param[0] = para_TOA; /* 6 db */
- param[1] = PV32P2_V23R | PV32P2_V22A | PV32P2_V22B |
- PV32P2_V22C | PV32P2_V21 | PV32P2_BEL;
- param[2] = PV32P3_AMOD | PV32P3_V32B | PV32P3_V23B;
- param[3] = PV32P4_UT144;
- param[4] = PV32P5_UT144;
- sendmsg(cs, dps | ISAR_HIS_PUMPCFG, ctrl, 6, param);
- break;
- case L1_MODE_FAX:
- ctrl = PMOD_FAX;
- if (test_bit(BC_FLG_ORIG, &bcs->Flag)) {
- ctrl |= PCTRL_ORIG;
- param[1] = PFAXP2_CTN;
- } else {
- param[1] = PFAXP2_ATN;
- }
- param[0] = para_TOA; /* 6 db */
- sendmsg(cs, dps | ISAR_HIS_PUMPCFG, ctrl, 2, param);
- bcs->hw.isar.state = STFAX_NULL;
- bcs->hw.isar.newcmd = 0;
- bcs->hw.isar.newmod = 0;
- test_and_set_bit(BC_FLG_FTI_RUN, &bcs->Flag);
- break;
- }
- udelay(1000);
- sendmsg(cs, dps | ISAR_HIS_PSTREQ, 0, 0, NULL);
- udelay(1000);
-}
-
-static void
-setup_sart(struct BCState *bcs) {
- struct IsdnCardState *cs = bcs->cs;
- u_char dps = SET_DPS(bcs->hw.isar.dpath);
- u_char ctrl, param[2];
-
- switch (bcs->mode) {
- case L1_MODE_NULL:
- sendmsg(cs, dps | ISAR_HIS_SARTCFG, SMODE_DISABLE, 0,
- NULL);
- break;
- case L1_MODE_TRANS:
- sendmsg(cs, dps | ISAR_HIS_SARTCFG, SMODE_BINARY, 2,
- "\0\0");
- break;
- case L1_MODE_HDLC:
- param[0] = 0;
- sendmsg(cs, dps | ISAR_HIS_SARTCFG, SMODE_HDLC, 1,
- param);
- break;
- case L1_MODE_V32:
- ctrl = SMODE_V14 | SCTRL_HDMC_BOTH;
- param[0] = S_P1_CHS_8;
- param[1] = S_P2_BFT_DEF;
- sendmsg(cs, dps | ISAR_HIS_SARTCFG, ctrl, 2,
- param);
- break;
- case L1_MODE_FAX:
- /* SART must not configured with FAX */
- break;
- }
- udelay(1000);
- sendmsg(cs, dps | ISAR_HIS_BSTREQ, 0, 0, NULL);
- udelay(1000);
-}
-
-static void
-setup_iom2(struct BCState *bcs) {
- struct IsdnCardState *cs = bcs->cs;
- u_char dps = SET_DPS(bcs->hw.isar.dpath);
- u_char cmsb = IOM_CTRL_ENA, msg[5] = {IOM_P1_TXD, 0, 0, 0, 0};
-
- if (bcs->channel)
- msg[1] = msg[3] = 1;
- switch (bcs->mode) {
- case L1_MODE_NULL:
- cmsb = 0;
- /* dummy slot */
- msg[1] = msg[3] = bcs->hw.isar.dpath + 2;
- break;
- case L1_MODE_TRANS:
- case L1_MODE_HDLC:
- break;
- case L1_MODE_V32:
- case L1_MODE_FAX:
- cmsb |= IOM_CTRL_ALAW | IOM_CTRL_RCV;
- break;
- }
- sendmsg(cs, dps | ISAR_HIS_IOM2CFG, cmsb, 5, msg);
- udelay(1000);
- sendmsg(cs, dps | ISAR_HIS_IOM2REQ, 0, 0, NULL);
- udelay(1000);
-}
-
-static int
-modeisar(struct BCState *bcs, int mode, int bc)
-{
- struct IsdnCardState *cs = bcs->cs;
-
- /* Here we are selecting the best datapath for requested mode */
- if (bcs->mode == L1_MODE_NULL) { /* New Setup */
- bcs->channel = bc;
- switch (mode) {
- case L1_MODE_NULL: /* init */
- if (!bcs->hw.isar.dpath)
- /* no init for dpath 0 */
- return (0);
- break;
- case L1_MODE_TRANS:
- case L1_MODE_HDLC:
- /* best is datapath 2 */
- if (!test_and_set_bit(ISAR_DP2_USE,
- &bcs->hw.isar.reg->Flags))
- bcs->hw.isar.dpath = 2;
- else if (!test_and_set_bit(ISAR_DP1_USE,
- &bcs->hw.isar.reg->Flags))
- bcs->hw.isar.dpath = 1;
- else {
- printk(KERN_WARNING"isar modeisar both paths in use\n");
- return (1);
- }
- break;
- case L1_MODE_V32:
- case L1_MODE_FAX:
- /* only datapath 1 */
- if (!test_and_set_bit(ISAR_DP1_USE,
- &bcs->hw.isar.reg->Flags))
- bcs->hw.isar.dpath = 1;
- else {
- printk(KERN_WARNING"isar modeisar analog functions only with DP1\n");
- debugl1(cs, "isar modeisar analog functions only with DP1");
- return (1);
- }
- break;
- }
- }
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "isar dp%d mode %d->%d ichan %d",
- bcs->hw.isar.dpath, bcs->mode, mode, bc);
- bcs->mode = mode;
- setup_pump(bcs);
- setup_iom2(bcs);
- setup_sart(bcs);
- if (bcs->mode == L1_MODE_NULL) {
- /* Clear resources */
- if (bcs->hw.isar.dpath == 1)
- test_and_clear_bit(ISAR_DP1_USE, &bcs->hw.isar.reg->Flags);
- else if (bcs->hw.isar.dpath == 2)
- test_and_clear_bit(ISAR_DP2_USE, &bcs->hw.isar.reg->Flags);
- bcs->hw.isar.dpath = 0;
- }
- return (0);
-}
-
-static void
-isar_pump_cmd(struct BCState *bcs, u_char cmd, u_char para)
-{
- struct IsdnCardState *cs = bcs->cs;
- u_char dps = SET_DPS(bcs->hw.isar.dpath);
- u_char ctrl = 0, nom = 0, p1 = 0;
-
- switch (cmd) {
- case ISDN_FAX_CLASS1_FTM:
- test_and_clear_bit(BC_FLG_FRH_WAIT, &bcs->Flag);
- if (bcs->hw.isar.state == STFAX_READY) {
- p1 = para;
- ctrl = PCTRL_CMD_FTM;
- nom = 1;
- bcs->hw.isar.state = STFAX_LINE;
- bcs->hw.isar.cmd = ctrl;
- bcs->hw.isar.mod = para;
- bcs->hw.isar.newmod = 0;
- bcs->hw.isar.newcmd = 0;
- bcs->hw.isar.try_mod = 3;
- } else if ((bcs->hw.isar.state == STFAX_ACTIV) &&
- (bcs->hw.isar.cmd == PCTRL_CMD_FTM) &&
- (bcs->hw.isar.mod == para)) {
- ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_CONNECT);
- } else {
- bcs->hw.isar.newmod = para;
- bcs->hw.isar.newcmd = PCTRL_CMD_FTM;
- nom = 0;
- ctrl = PCTRL_CMD_ESC;
- bcs->hw.isar.state = STFAX_ESCAPE;
- }
- break;
- case ISDN_FAX_CLASS1_FTH:
- test_and_clear_bit(BC_FLG_FRH_WAIT, &bcs->Flag);
- if (bcs->hw.isar.state == STFAX_READY) {
- p1 = para;
- ctrl = PCTRL_CMD_FTH;
- nom = 1;
- bcs->hw.isar.state = STFAX_LINE;
- bcs->hw.isar.cmd = ctrl;
- bcs->hw.isar.mod = para;
- bcs->hw.isar.newmod = 0;
- bcs->hw.isar.newcmd = 0;
- bcs->hw.isar.try_mod = 3;
- } else if ((bcs->hw.isar.state == STFAX_ACTIV) &&
- (bcs->hw.isar.cmd == PCTRL_CMD_FTH) &&
- (bcs->hw.isar.mod == para)) {
- ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_CONNECT);
- } else {
- bcs->hw.isar.newmod = para;
- bcs->hw.isar.newcmd = PCTRL_CMD_FTH;
- nom = 0;
- ctrl = PCTRL_CMD_ESC;
- bcs->hw.isar.state = STFAX_ESCAPE;
- }
- break;
- case ISDN_FAX_CLASS1_FRM:
- test_and_clear_bit(BC_FLG_FRH_WAIT, &bcs->Flag);
- if (bcs->hw.isar.state == STFAX_READY) {
- p1 = para;
- ctrl = PCTRL_CMD_FRM;
- nom = 1;
- bcs->hw.isar.state = STFAX_LINE;
- bcs->hw.isar.cmd = ctrl;
- bcs->hw.isar.mod = para;
- bcs->hw.isar.newmod = 0;
- bcs->hw.isar.newcmd = 0;
- bcs->hw.isar.try_mod = 3;
- } else if ((bcs->hw.isar.state == STFAX_ACTIV) &&
- (bcs->hw.isar.cmd == PCTRL_CMD_FRM) &&
- (bcs->hw.isar.mod == para)) {
- ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_CONNECT);
- } else {
- bcs->hw.isar.newmod = para;
- bcs->hw.isar.newcmd = PCTRL_CMD_FRM;
- nom = 0;
- ctrl = PCTRL_CMD_ESC;
- bcs->hw.isar.state = STFAX_ESCAPE;
- }
- break;
- case ISDN_FAX_CLASS1_FRH:
- test_and_set_bit(BC_FLG_FRH_WAIT, &bcs->Flag);
- if (bcs->hw.isar.state == STFAX_READY) {
- p1 = para;
- ctrl = PCTRL_CMD_FRH;
- nom = 1;
- bcs->hw.isar.state = STFAX_LINE;
- bcs->hw.isar.cmd = ctrl;
- bcs->hw.isar.mod = para;
- bcs->hw.isar.newmod = 0;
- bcs->hw.isar.newcmd = 0;
- bcs->hw.isar.try_mod = 3;
- } else if ((bcs->hw.isar.state == STFAX_ACTIV) &&
- (bcs->hw.isar.cmd == PCTRL_CMD_FRH) &&
- (bcs->hw.isar.mod == para)) {
- ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_CONNECT);
- } else {
- bcs->hw.isar.newmod = para;
- bcs->hw.isar.newcmd = PCTRL_CMD_FRH;
- nom = 0;
- ctrl = PCTRL_CMD_ESC;
- bcs->hw.isar.state = STFAX_ESCAPE;
- }
- break;
- case ISDN_FAXPUMP_HALT:
- bcs->hw.isar.state = STFAX_NULL;
- nom = 0;
- ctrl = PCTRL_CMD_HALT;
- break;
- }
- if (ctrl)
- sendmsg(cs, dps | ISAR_HIS_PUMPCTRL, ctrl, nom, &p1);
-}
-
-static void
-isar_setup(struct IsdnCardState *cs)
-{
- u_char msg;
- int i;
-
- /* Dpath 1, 2 */
- msg = 61;
- for (i = 0; i < 2; i++) {
- /* Buffer Config */
- sendmsg(cs, (i ? ISAR_HIS_DPS2 : ISAR_HIS_DPS1) |
- ISAR_HIS_P12CFG, 4, 1, &msg);
- cs->bcs[i].hw.isar.mml = msg;
- cs->bcs[i].mode = 0;
- cs->bcs[i].hw.isar.dpath = i + 1;
- modeisar(&cs->bcs[i], 0, 0);
- INIT_WORK(&cs->bcs[i].tqueue, isar_bh);
- }
-}
-
-static void
-isar_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- struct sk_buff *skb = arg;
- int ret;
- u_long flags;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "DRQ set BC_FLG_BUSY");
- bcs->hw.isar.txcnt = 0;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- printk(KERN_WARNING "isar_l2l1: this shouldn't happen\n");
- } else {
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "PUI set BC_FLG_BUSY");
- bcs->tx_skb = skb;
- bcs->hw.isar.txcnt = 0;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
- if (!bcs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (PH_ACTIVATE | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
- bcs->hw.isar.conmsg[0] = 0;
- if (test_bit(FLG_ORIG, &st->l2.flag))
- test_and_set_bit(BC_FLG_ORIG, &bcs->Flag);
- else
- test_and_clear_bit(BC_FLG_ORIG, &bcs->Flag);
- switch (st->l1.mode) {
- case L1_MODE_TRANS:
- case L1_MODE_HDLC:
- ret = modeisar(bcs, st->l1.mode, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- if (ret)
- l1_msg_b(st, PH_DEACTIVATE | REQUEST, arg);
- else
- l1_msg_b(st, PH_ACTIVATE | REQUEST, arg);
- break;
- case L1_MODE_V32:
- case L1_MODE_FAX:
- ret = modeisar(bcs, st->l1.mode, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- if (ret)
- l1_msg_b(st, PH_DEACTIVATE | REQUEST, arg);
- break;
- default:
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- }
- break;
- case (PH_DEACTIVATE | REQUEST):
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | CONFIRM):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- switch (st->l1.mode) {
- case L1_MODE_TRANS:
- case L1_MODE_HDLC:
- case L1_MODE_V32:
- break;
- case L1_MODE_FAX:
- isar_pump_cmd(bcs, ISDN_FAXPUMP_HALT, 0);
- break;
- }
- test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "PDAC clear BC_FLG_BUSY");
- modeisar(bcs, 0, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
- break;
- }
-}
-
-static void
-close_isarstate(struct BCState *bcs)
-{
- modeisar(bcs, 0, bcs->channel);
- if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
- kfree(bcs->hw.isar.rcvbuf);
- bcs->hw.isar.rcvbuf = NULL;
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "closeisar clear BC_FLG_BUSY");
- }
- }
- del_timer(&bcs->hw.isar.ftimer);
-}
-
-static int
-open_isarstate(struct IsdnCardState *cs, struct BCState *bcs)
-{
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- if (!(bcs->hw.isar.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for isar.rcvbuf\n");
- return (1);
- }
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "openisar clear BC_FLG_BUSY");
- bcs->event = 0;
- bcs->hw.isar.rcvidx = 0;
- bcs->tx_cnt = 0;
- return (0);
-}
-
-static int
-setstack_isar(struct PStack *st, struct BCState *bcs)
-{
- bcs->channel = st->l1.bc;
- if (open_isarstate(st->l1.hardware, bcs))
- return (-1);
- st->l1.bcs = bcs;
- st->l2.l2l1 = isar_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-int
-isar_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) {
- u_long adr;
- int features, i;
- struct BCState *bcs;
-
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "isar_auxcmd cmd/ch %x/%ld", ic->command, ic->arg);
- switch (ic->command) {
- case (ISDN_CMD_FAXCMD):
- bcs = cs->channel[ic->arg].bcs;
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "isar_auxcmd cmd/subcmd %d/%d",
- ic->parm.aux.cmd, ic->parm.aux.subcmd);
- switch (ic->parm.aux.cmd) {
- case ISDN_FAX_CLASS1_CTRL:
- if (ic->parm.aux.subcmd == ETX)
- test_and_set_bit(BC_FLG_DLEETX,
- &bcs->Flag);
- break;
- case ISDN_FAX_CLASS1_FTS:
- if (ic->parm.aux.subcmd == AT_QUERY) {
- ic->command = ISDN_STAT_FAXIND;
- ic->parm.aux.cmd = ISDN_FAX_CLASS1_OK;
- cs->iif.statcallb(ic);
- return (0);
- } else if (ic->parm.aux.subcmd == AT_EQ_QUERY) {
- strcpy(ic->parm.aux.para, "0-255");
- ic->command = ISDN_STAT_FAXIND;
- ic->parm.aux.cmd = ISDN_FAX_CLASS1_QUERY;
- cs->iif.statcallb(ic);
- return (0);
- } else if (ic->parm.aux.subcmd == AT_EQ_VALUE) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "isar_auxcmd %s=%d",
- FC1_CMD[ic->parm.aux.cmd], ic->parm.aux.para[0]);
- if (bcs->hw.isar.state == STFAX_READY) {
- if (!ic->parm.aux.para[0]) {
- ic->command = ISDN_STAT_FAXIND;
- ic->parm.aux.cmd = ISDN_FAX_CLASS1_OK;
- cs->iif.statcallb(ic);
- return (0);
- }
- if (!test_and_set_bit(BC_FLG_FTI_RUN, &bcs->Flag)) {
- /* n*10 ms */
- bcs->hw.isar.ftimer.expires =
- jiffies + ((ic->parm.aux.para[0] * 10 * HZ) / 1000);
- test_and_set_bit(BC_FLG_FTI_FTS, &bcs->Flag);
- add_timer(&bcs->hw.isar.ftimer);
- return (0);
- } else {
- if (cs->debug)
- debugl1(cs, "isar FTS=%d and FTI busy",
- ic->parm.aux.para[0]);
- }
- } else {
- if (cs->debug)
- debugl1(cs, "isar FTS=%d and isar.state not ready(%x)",
- ic->parm.aux.para[0], bcs->hw.isar.state);
- }
- ic->command = ISDN_STAT_FAXIND;
- ic->parm.aux.cmd = ISDN_FAX_CLASS1_ERROR;
- cs->iif.statcallb(ic);
- }
- break;
- case ISDN_FAX_CLASS1_FRM:
- case ISDN_FAX_CLASS1_FRH:
- case ISDN_FAX_CLASS1_FTM:
- case ISDN_FAX_CLASS1_FTH:
- if (ic->parm.aux.subcmd == AT_QUERY) {
- sprintf(ic->parm.aux.para,
- "%d", bcs->hw.isar.mod);
- ic->command = ISDN_STAT_FAXIND;
- ic->parm.aux.cmd = ISDN_FAX_CLASS1_QUERY;
- cs->iif.statcallb(ic);
- return (0);
- } else if (ic->parm.aux.subcmd == AT_EQ_QUERY) {
- char *p = ic->parm.aux.para;
- for (i = 0; i < FAXMODCNT; i++)
- if ((1 << i) & modmask)
- p += sprintf(p, "%d,", faxmodulation[i]);
- p--;
- *p = 0;
- ic->command = ISDN_STAT_FAXIND;
- ic->parm.aux.cmd = ISDN_FAX_CLASS1_QUERY;
- cs->iif.statcallb(ic);
- return (0);
- } else if (ic->parm.aux.subcmd == AT_EQ_VALUE) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "isar_auxcmd %s=%d",
- FC1_CMD[ic->parm.aux.cmd], ic->parm.aux.para[0]);
- for (i = 0; i < FAXMODCNT; i++)
- if (faxmodulation[i] == ic->parm.aux.para[0])
- break;
- if ((i < FAXMODCNT) && ((1 << i) & modmask) &&
- test_bit(BC_FLG_INIT, &bcs->Flag)) {
- isar_pump_cmd(bcs,
- ic->parm.aux.cmd,
- ic->parm.aux.para[0]);
- return (0);
- }
- }
- /* wrong modulation or not activ */
- /* fall through */
- default:
- ic->command = ISDN_STAT_FAXIND;
- ic->parm.aux.cmd = ISDN_FAX_CLASS1_ERROR;
- cs->iif.statcallb(ic);
- }
- break;
- case (ISDN_CMD_IOCTL):
- switch (ic->arg) {
- case 9: /* load firmware */
- features = ISDN_FEATURE_L2_MODEM |
- ISDN_FEATURE_L2_FAX |
- ISDN_FEATURE_L3_FCLASS1;
- memcpy(&adr, ic->parm.num, sizeof(ulong));
- if (isar_load_firmware(cs, (u_char __user *)adr))
- return (1);
- else
- ll_run(cs, features);
- break;
- case 20:
- features = *(unsigned int *) ic->parm.num;
- printk(KERN_DEBUG "HiSax: max modulation old(%04x) new(%04x)\n",
- modmask, features);
- modmask = features;
- break;
- case 21:
- features = *(unsigned int *) ic->parm.num;
- printk(KERN_DEBUG "HiSax: FRM extra delay old(%d) new(%d) ms\n",
- frm_extra_delay, features);
- if (features >= 0)
- frm_extra_delay = features;
- break;
- case 22:
- features = *(unsigned int *) ic->parm.num;
- printk(KERN_DEBUG "HiSax: TOA old(%d) new(%d) db\n",
- para_TOA, features);
- if (features >= 0 && features < 32)
- para_TOA = features;
- break;
- default:
- printk(KERN_DEBUG "HiSax: invalid ioctl %d\n",
- (int) ic->arg);
- return (-EINVAL);
- }
- break;
- default:
- return (-EINVAL);
- }
- return (0);
-}
-
-void initisar(struct IsdnCardState *cs)
-{
- cs->bcs[0].BC_SetStack = setstack_isar;
- cs->bcs[1].BC_SetStack = setstack_isar;
- cs->bcs[0].BC_Close = close_isarstate;
- cs->bcs[1].BC_Close = close_isarstate;
- timer_setup(&cs->bcs[0].hw.isar.ftimer, ftimer_handler, 0);
- timer_setup(&cs->bcs[1].hw.isar.ftimer, ftimer_handler, 0);
-}
diff --git a/drivers/isdn/hisax/isar.h b/drivers/isdn/hisax/isar.h
deleted file mode 100644
index 0f4d101faf37..000000000000
--- a/drivers/isdn/hisax/isar.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/* $Id: isar.h,v 1.11.2.2 2004/01/12 22:52:27 keil Exp $
- *
- * ISAR (Siemens PSB 7110) specific defines
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#define ISAR_IRQMSK 0x04
-#define ISAR_IRQSTA 0x04
-#define ISAR_IRQBIT 0x75
-#define ISAR_CTRL_H 0x61
-#define ISAR_CTRL_L 0x60
-#define ISAR_IIS 0x58
-#define ISAR_IIA 0x58
-#define ISAR_HIS 0x50
-#define ISAR_HIA 0x50
-#define ISAR_MBOX 0x4c
-#define ISAR_WADR 0x4a
-#define ISAR_RADR 0x48
-
-#define ISAR_HIS_VNR 0x14
-#define ISAR_HIS_DKEY 0x02
-#define ISAR_HIS_FIRM 0x1e
-#define ISAR_HIS_STDSP 0x08
-#define ISAR_HIS_DIAG 0x05
-#define ISAR_HIS_WAITSTATE 0x27
-#define ISAR_HIS_TIMERIRQ 0x25
-#define ISAR_HIS_P0CFG 0x3c
-#define ISAR_HIS_P12CFG 0x24
-#define ISAR_HIS_SARTCFG 0x25
-#define ISAR_HIS_PUMPCFG 0x26
-#define ISAR_HIS_PUMPCTRL 0x2a
-#define ISAR_HIS_IOM2CFG 0x27
-#define ISAR_HIS_IOM2REQ 0x07
-#define ISAR_HIS_IOM2CTRL 0x2b
-#define ISAR_HIS_BSTREQ 0x0c
-#define ISAR_HIS_PSTREQ 0x0e
-#define ISAR_HIS_SDATA 0x20
-#define ISAR_HIS_DPS1 0x40
-#define ISAR_HIS_DPS2 0x80
-#define SET_DPS(x) ((x << 6) & 0xc0)
-
-#define ISAR_CMD_TIMERIRQ_OFF 0x20
-#define ISAR_CMD_TIMERIRQ_ON 0x21
-
-
-#define ISAR_IIS_MSCMSD 0x3f
-#define ISAR_IIS_VNR 0x15
-#define ISAR_IIS_DKEY 0x03
-#define ISAR_IIS_FIRM 0x1f
-#define ISAR_IIS_STDSP 0x09
-#define ISAR_IIS_DIAG 0x25
-#define ISAR_IIS_GSTEV 0x00
-#define ISAR_IIS_BSTEV 0x28
-#define ISAR_IIS_BSTRSP 0x2c
-#define ISAR_IIS_PSTRSP 0x2e
-#define ISAR_IIS_PSTEV 0x2a
-#define ISAR_IIS_IOM2RSP 0x27
-#define ISAR_IIS_RDATA 0x20
-#define ISAR_IIS_INVMSG 0x3f
-
-#define ISAR_CTRL_SWVER 0x10
-#define ISAR_CTRL_STST 0x40
-
-#define ISAR_MSG_HWVER {0x20, 0, 1}
-
-#define ISAR_DP1_USE 1
-#define ISAR_DP2_USE 2
-#define ISAR_RATE_REQ 3
-
-#define PMOD_DISABLE 0
-#define PMOD_FAX 1
-#define PMOD_DATAMODEM 2
-#define PMOD_HALFDUPLEX 3
-#define PMOD_V110 4
-#define PMOD_DTMF 5
-#define PMOD_DTMF_TRANS 6
-#define PMOD_BYPASS 7
-
-#define PCTRL_ORIG 0x80
-#define PV32P2_V23R 0x40
-#define PV32P2_V22A 0x20
-#define PV32P2_V22B 0x10
-#define PV32P2_V22C 0x08
-#define PV32P2_V21 0x02
-#define PV32P2_BEL 0x01
-
-// LSB MSB in ISAR doc wrong !!! Arghhh
-#define PV32P3_AMOD 0x80
-#define PV32P3_V32B 0x02
-#define PV32P3_V23B 0x01
-#define PV32P4_48 0x11
-#define PV32P5_48 0x05
-#define PV32P4_UT48 0x11
-#define PV32P5_UT48 0x0d
-#define PV32P4_96 0x11
-#define PV32P5_96 0x03
-#define PV32P4_UT96 0x11
-#define PV32P5_UT96 0x0f
-#define PV32P4_B96 0x91
-#define PV32P5_B96 0x0b
-#define PV32P4_UTB96 0xd1
-#define PV32P5_UTB96 0x0f
-#define PV32P4_120 0xb1
-#define PV32P5_120 0x09
-#define PV32P4_UT120 0xf1
-#define PV32P5_UT120 0x0f
-#define PV32P4_144 0x99
-#define PV32P5_144 0x09
-#define PV32P4_UT144 0xf9
-#define PV32P5_UT144 0x0f
-#define PV32P6_CTN 0x01
-#define PV32P6_ATN 0x02
-
-#define PFAXP2_CTN 0x01
-#define PFAXP2_ATN 0x04
-
-#define PSEV_10MS_TIMER 0x02
-#define PSEV_CON_ON 0x18
-#define PSEV_CON_OFF 0x19
-#define PSEV_V24_OFF 0x20
-#define PSEV_CTS_ON 0x21
-#define PSEV_CTS_OFF 0x22
-#define PSEV_DCD_ON 0x23
-#define PSEV_DCD_OFF 0x24
-#define PSEV_DSR_ON 0x25
-#define PSEV_DSR_OFF 0x26
-#define PSEV_REM_RET 0xcc
-#define PSEV_REM_REN 0xcd
-#define PSEV_GSTN_CLR 0xd4
-
-#define PSEV_RSP_READY 0xbc
-#define PSEV_LINE_TX_H 0xb3
-#define PSEV_LINE_TX_B 0xb2
-#define PSEV_LINE_RX_H 0xb1
-#define PSEV_LINE_RX_B 0xb0
-#define PSEV_RSP_CONN 0xb5
-#define PSEV_RSP_DISC 0xb7
-#define PSEV_RSP_FCERR 0xb9
-#define PSEV_RSP_SILDET 0xbe
-#define PSEV_RSP_SILOFF 0xab
-#define PSEV_FLAGS_DET 0xba
-
-#define PCTRL_CMD_FTH 0xa7
-#define PCTRL_CMD_FRH 0xa5
-#define PCTRL_CMD_FTM 0xa8
-#define PCTRL_CMD_FRM 0xa6
-#define PCTRL_CMD_SILON 0xac
-#define PCTRL_CMD_CONT 0xa2
-#define PCTRL_CMD_ESC 0xa4
-#define PCTRL_CMD_SILOFF 0xab
-#define PCTRL_CMD_HALT 0xa9
-
-#define PCTRL_LOC_RET 0xcf
-#define PCTRL_LOC_REN 0xce
-
-#define SMODE_DISABLE 0
-#define SMODE_V14 2
-#define SMODE_HDLC 3
-#define SMODE_BINARY 4
-#define SMODE_FSK_V14 5
-
-#define SCTRL_HDMC_BOTH 0x00
-#define SCTRL_HDMC_DTX 0x80
-#define SCTRL_HDMC_DRX 0x40
-#define S_P1_OVSP 0x40
-#define S_P1_SNP 0x20
-#define S_P1_EOP 0x10
-#define S_P1_EDP 0x08
-#define S_P1_NSB 0x04
-#define S_P1_CHS_8 0x03
-#define S_P1_CHS_7 0x02
-#define S_P1_CHS_6 0x01
-#define S_P1_CHS_5 0x00
-
-#define S_P2_BFT_DEF 0x10
-
-#define IOM_CTRL_ENA 0x80
-#define IOM_CTRL_NOPCM 0x00
-#define IOM_CTRL_ALAW 0x02
-#define IOM_CTRL_ULAW 0x04
-#define IOM_CTRL_RCV 0x01
-
-#define IOM_P1_TXD 0x10
-
-#define HDLC_FED 0x40
-#define HDLC_FSD 0x20
-#define HDLC_FST 0x20
-#define HDLC_ERROR 0x1c
-#define HDLC_ERR_FAD 0x10
-#define HDLC_ERR_RER 0x08
-#define HDLC_ERR_CER 0x04
-#define SART_NMD 0x01
-
-#define BSTAT_RDM0 0x1
-#define BSTAT_RDM1 0x2
-#define BSTAT_RDM2 0x4
-#define BSTAT_RDM3 0x8
-#define BSTEV_TBO 0x1f
-#define BSTEV_RBO 0x2f
-
-/* FAX State Machine */
-#define STFAX_NULL 0
-#define STFAX_READY 1
-#define STFAX_LINE 2
-#define STFAX_CONT 3
-#define STFAX_ACTIV 4
-#define STFAX_ESCAPE 5
-#define STFAX_SILDET 6
-
-#define ISDN_FAXPUMP_HALT 100
-
-extern int ISARVersion(struct IsdnCardState *cs, char *s);
-extern void isar_int_main(struct IsdnCardState *cs);
-extern void initisar(struct IsdnCardState *cs);
-extern void isar_fill_fifo(struct BCState *bcs);
-extern int isar_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic);
diff --git a/drivers/isdn/hisax/isdnl1.c b/drivers/isdn/hisax/isdnl1.c
deleted file mode 100644
index a560842c0e48..000000000000
--- a/drivers/isdn/hisax/isdnl1.c
+++ /dev/null
@@ -1,930 +0,0 @@
-/* $Id: isdnl1.c,v 2.46.2.5 2004/02/11 13:21:34 keil Exp $
- *
- * common low level stuff for Siemens Chipsetbased isdn cards
- *
- * Author Karsten Keil
- * based on the teles driver from Jan den Ouden
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- * Thanks to Jan den Ouden
- * Fritz Elfert
- * Beat Doebeli
- *
- */
-
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include "hisax.h"
-#include "isdnl1.h"
-
-const char *l1_revision = "$Revision: 2.46.2.5 $";
-
-#define TIMER3_VALUE 7000
-
-static struct Fsm l1fsm_b;
-static struct Fsm l1fsm_s;
-
-enum {
- ST_L1_F2,
- ST_L1_F3,
- ST_L1_F4,
- ST_L1_F5,
- ST_L1_F6,
- ST_L1_F7,
- ST_L1_F8,
-};
-
-#define L1S_STATE_COUNT (ST_L1_F8 + 1)
-
-static char *strL1SState[] =
-{
- "ST_L1_F2",
- "ST_L1_F3",
- "ST_L1_F4",
- "ST_L1_F5",
- "ST_L1_F6",
- "ST_L1_F7",
- "ST_L1_F8",
-};
-
-#ifdef HISAX_UINTERFACE
-static
-struct Fsm l1fsm_u =
-{NULL, 0, 0, NULL, NULL};
-
-enum {
- ST_L1_RESET,
- ST_L1_DEACT,
- ST_L1_SYNC2,
- ST_L1_TRANS,
-};
-
-#define L1U_STATE_COUNT (ST_L1_TRANS + 1)
-
-static char *strL1UState[] =
-{
- "ST_L1_RESET",
- "ST_L1_DEACT",
- "ST_L1_SYNC2",
- "ST_L1_TRANS",
-};
-#endif
-
-enum {
- ST_L1_NULL,
- ST_L1_WAIT_ACT,
- ST_L1_WAIT_DEACT,
- ST_L1_ACTIV,
-};
-
-#define L1B_STATE_COUNT (ST_L1_ACTIV + 1)
-
-static char *strL1BState[] =
-{
- "ST_L1_NULL",
- "ST_L1_WAIT_ACT",
- "ST_L1_WAIT_DEACT",
- "ST_L1_ACTIV",
-};
-
-enum {
- EV_PH_ACTIVATE,
- EV_PH_DEACTIVATE,
- EV_RESET_IND,
- EV_DEACT_CNF,
- EV_DEACT_IND,
- EV_POWER_UP,
- EV_RSYNC_IND,
- EV_INFO2_IND,
- EV_INFO4_IND,
- EV_TIMER_DEACT,
- EV_TIMER_ACT,
- EV_TIMER3,
-};
-
-#define L1_EVENT_COUNT (EV_TIMER3 + 1)
-
-static char *strL1Event[] =
-{
- "EV_PH_ACTIVATE",
- "EV_PH_DEACTIVATE",
- "EV_RESET_IND",
- "EV_DEACT_CNF",
- "EV_DEACT_IND",
- "EV_POWER_UP",
- "EV_RSYNC_IND",
- "EV_INFO2_IND",
- "EV_INFO4_IND",
- "EV_TIMER_DEACT",
- "EV_TIMER_ACT",
- "EV_TIMER3",
-};
-
-void
-debugl1(struct IsdnCardState *cs, char *fmt, ...)
-{
- va_list args;
- char tmp[8];
-
- va_start(args, fmt);
- sprintf(tmp, "Card%d ", cs->cardnr + 1);
- VHiSax_putstatus(cs, tmp, fmt, args);
- va_end(args);
-}
-
-static void
-l1m_debug(struct FsmInst *fi, char *fmt, ...)
-{
- va_list args;
- struct PStack *st = fi->userdata;
- struct IsdnCardState *cs = st->l1.hardware;
- char tmp[8];
-
- va_start(args, fmt);
- sprintf(tmp, "Card%d ", cs->cardnr + 1);
- VHiSax_putstatus(cs, tmp, fmt, args);
- va_end(args);
-}
-
-static void
-L1activated(struct IsdnCardState *cs)
-{
- struct PStack *st;
-
- st = cs->stlist;
- while (st) {
- if (test_and_clear_bit(FLG_L1_ACTIVATING, &st->l1.Flags))
- st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
- else
- st->l1.l1l2(st, PH_ACTIVATE | INDICATION, NULL);
- st = st->next;
- }
-}
-
-static void
-L1deactivated(struct IsdnCardState *cs)
-{
- struct PStack *st;
-
- st = cs->stlist;
- while (st) {
- if (test_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- st->l1.l1l2(st, PH_PAUSE | CONFIRM, NULL);
- st->l1.l1l2(st, PH_DEACTIVATE | INDICATION, NULL);
- st = st->next;
- }
- test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags);
-}
-
-void
-DChannel_proc_xmt(struct IsdnCardState *cs)
-{
- struct PStack *stptr;
-
- if (cs->tx_skb)
- return;
-
- stptr = cs->stlist;
- while (stptr != NULL) {
- if (test_and_clear_bit(FLG_L1_PULL_REQ, &stptr->l1.Flags)) {
- stptr->l1.l1l2(stptr, PH_PULL | CONFIRM, NULL);
- break;
- } else
- stptr = stptr->next;
- }
-}
-
-void
-DChannel_proc_rcv(struct IsdnCardState *cs)
-{
- struct sk_buff *skb, *nskb;
- struct PStack *stptr = cs->stlist;
- int found, tei, sapi;
-
- if (stptr)
- if (test_bit(FLG_L1_ACTTIMER, &stptr->l1.Flags))
- FsmEvent(&stptr->l1.l1m, EV_TIMER_ACT, NULL);
- while ((skb = skb_dequeue(&cs->rq))) {
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA", 1);
-#endif
- stptr = cs->stlist;
- if (skb->len < 3) {
- debugl1(cs, "D-channel frame too short(%d)", skb->len);
- dev_kfree_skb(skb);
- return;
- }
- if ((skb->data[0] & 1) || !(skb->data[1] & 1)) {
- debugl1(cs, "D-channel frame wrong EA0/EA1");
- dev_kfree_skb(skb);
- return;
- }
- sapi = skb->data[0] >> 2;
- tei = skb->data[1] >> 1;
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 1);
- if (tei == GROUP_TEI) {
- if (sapi == CTRL_SAPI) { /* sapi 0 */
- while (stptr != NULL) {
- if ((nskb = skb_clone(skb, GFP_ATOMIC)))
- stptr->l1.l1l2(stptr, PH_DATA | INDICATION, nskb);
- else
- printk(KERN_WARNING "HiSax: isdn broadcast buffer shortage\n");
- stptr = stptr->next;
- }
- } else if (sapi == TEI_SAPI) {
- while (stptr != NULL) {
- if ((nskb = skb_clone(skb, GFP_ATOMIC)))
- stptr->l1.l1tei(stptr, PH_DATA | INDICATION, nskb);
- else
- printk(KERN_WARNING "HiSax: tei broadcast buffer shortage\n");
- stptr = stptr->next;
- }
- }
- dev_kfree_skb(skb);
- } else if (sapi == CTRL_SAPI) { /* sapi 0 */
- found = 0;
- while (stptr != NULL)
- if (tei == stptr->l2.tei) {
- stptr->l1.l1l2(stptr, PH_DATA | INDICATION, skb);
- found = !0;
- break;
- } else
- stptr = stptr->next;
- if (!found)
- dev_kfree_skb(skb);
- } else
- dev_kfree_skb(skb);
- }
-}
-
-static void
-BChannel_proc_xmt(struct BCState *bcs)
-{
- struct PStack *st = bcs->st;
-
- if (test_bit(BC_FLG_BUSY, &bcs->Flag)) {
- debugl1(bcs->cs, "BC_BUSY Error");
- return;
- }
-
- if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags))
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- if (!test_bit(BC_FLG_ACTIV, &bcs->Flag)) {
- if (!test_bit(BC_FLG_BUSY, &bcs->Flag) &&
- skb_queue_empty(&bcs->squeue)) {
- st->l2.l2l1(st, PH_DEACTIVATE | CONFIRM, NULL);
- }
- }
-}
-
-static void
-BChannel_proc_rcv(struct BCState *bcs)
-{
- struct sk_buff *skb;
-
- if (bcs->st->l1.l1m.state == ST_L1_WAIT_ACT) {
- FsmDelTimer(&bcs->st->l1.timer, 4);
- FsmEvent(&bcs->st->l1.l1m, EV_TIMER_ACT, NULL);
- }
- while ((skb = skb_dequeue(&bcs->rqueue))) {
- bcs->st->l1.l1l2(bcs->st, PH_DATA | INDICATION, skb);
- }
-}
-
-static void
-BChannel_proc_ack(struct BCState *bcs)
-{
- u_long flags;
- int ack;
-
- spin_lock_irqsave(&bcs->aclock, flags);
- ack = bcs->ackcnt;
- bcs->ackcnt = 0;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- if (ack)
- lli_writewakeup(bcs->st, ack);
-}
-
-void
-BChannel_bh(struct work_struct *work)
-{
- struct BCState *bcs = container_of(work, struct BCState, tqueue);
-
- if (!bcs)
- return;
- if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event))
- BChannel_proc_rcv(bcs);
- if (test_and_clear_bit(B_XMTBUFREADY, &bcs->event))
- BChannel_proc_xmt(bcs);
- if (test_and_clear_bit(B_ACKPENDING, &bcs->event))
- BChannel_proc_ack(bcs);
-}
-
-void
-HiSax_addlist(struct IsdnCardState *cs,
- struct PStack *st)
-{
- st->next = cs->stlist;
- cs->stlist = st;
-}
-
-void
-HiSax_rmlist(struct IsdnCardState *cs,
- struct PStack *st)
-{
- struct PStack *p;
-
- FsmDelTimer(&st->l1.timer, 0);
- if (cs->stlist == st)
- cs->stlist = st->next;
- else {
- p = cs->stlist;
- while (p)
- if (p->next == st) {
- p->next = st->next;
- return;
- } else
- p = p->next;
- }
-}
-
-void
-init_bcstate(struct IsdnCardState *cs, int bc)
-{
- struct BCState *bcs = cs->bcs + bc;
-
- bcs->cs = cs;
- bcs->channel = bc;
- INIT_WORK(&bcs->tqueue, BChannel_bh);
- spin_lock_init(&bcs->aclock);
- bcs->BC_SetStack = NULL;
- bcs->BC_Close = NULL;
- bcs->Flag = 0;
-}
-
-#ifdef L2FRAME_DEBUG /* psa */
-
-static char *
-l2cmd(u_char cmd)
-{
- switch (cmd & ~0x10) {
- case 1:
- return "RR";
- case 5:
- return "RNR";
- case 9:
- return "REJ";
- case 0x6f:
- return "SABME";
- case 0x0f:
- return "DM";
- case 3:
- return "UI";
- case 0x43:
- return "DISC";
- case 0x63:
- return "UA";
- case 0x87:
- return "FRMR";
- case 0xaf:
- return "XID";
- default:
- if (!(cmd & 1))
- return "I";
- else
- return "invalid command";
- }
-}
-
-static char tmpdeb[32];
-
-static char *
-l2frames(u_char *ptr)
-{
- switch (ptr[2] & ~0x10) {
- case 1:
- case 5:
- case 9:
- sprintf(tmpdeb, "%s[%d](nr %d)", l2cmd(ptr[2]), ptr[3] & 1, ptr[3] >> 1);
- break;
- case 0x6f:
- case 0x0f:
- case 3:
- case 0x43:
- case 0x63:
- case 0x87:
- case 0xaf:
- sprintf(tmpdeb, "%s[%d]", l2cmd(ptr[2]), (ptr[2] & 0x10) >> 4);
- break;
- default:
- if (!(ptr[2] & 1)) {
- sprintf(tmpdeb, "I[%d](ns %d, nr %d)", ptr[3] & 1, ptr[2] >> 1, ptr[3] >> 1);
- break;
- } else
- return "invalid command";
- }
-
-
- return tmpdeb;
-}
-
-void
-Logl2Frame(struct IsdnCardState *cs, struct sk_buff *skb, char *buf, int dir)
-{
- u_char *ptr;
-
- ptr = skb->data;
-
- if (ptr[0] & 1 || !(ptr[1] & 1))
- debugl1(cs, "Address not LAPD");
- else
- debugl1(cs, "%s %s: %s%c (sapi %d, tei %d)",
- (dir ? "<-" : "->"), buf, l2frames(ptr),
- ((ptr[0] & 2) >> 1) == dir ? 'C' : 'R', ptr[0] >> 2, ptr[1] >> 1);
-}
-#endif
-
-static void
-l1_reset(struct FsmInst *fi, int event, void *arg)
-{
- FsmChangeState(fi, ST_L1_F3);
-}
-
-static void
-l1_deact_cnf(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L1_F3);
- if (test_bit(FLG_L1_ACTIVATING, &st->l1.Flags))
- st->l1.l1hw(st, HW_ENABLE | REQUEST, NULL);
-}
-
-static void
-l1_deact_req_s(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L1_F3);
- FsmRestartTimer(&st->l1.timer, 550, EV_TIMER_DEACT, NULL, 2);
- test_and_set_bit(FLG_L1_DEACTTIMER, &st->l1.Flags);
-}
-
-static void
-l1_power_up_s(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if (test_bit(FLG_L1_ACTIVATING, &st->l1.Flags)) {
- FsmChangeState(fi, ST_L1_F4);
- st->l1.l1hw(st, HW_INFO3 | REQUEST, NULL);
- FsmRestartTimer(&st->l1.timer, TIMER3_VALUE, EV_TIMER3, NULL, 2);
- test_and_set_bit(FLG_L1_T3RUN, &st->l1.Flags);
- } else
- FsmChangeState(fi, ST_L1_F3);
-}
-
-static void
-l1_go_F5(struct FsmInst *fi, int event, void *arg)
-{
- FsmChangeState(fi, ST_L1_F5);
-}
-
-static void
-l1_go_F8(struct FsmInst *fi, int event, void *arg)
-{
- FsmChangeState(fi, ST_L1_F8);
-}
-
-static void
-l1_info2_ind(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
-#ifdef HISAX_UINTERFACE
- if (test_bit(FLG_L1_UINT, &st->l1.Flags))
- FsmChangeState(fi, ST_L1_SYNC2);
- else
-#endif
- FsmChangeState(fi, ST_L1_F6);
- st->l1.l1hw(st, HW_INFO3 | REQUEST, NULL);
-}
-
-static void
-l1_info4_ind(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
-#ifdef HISAX_UINTERFACE
- if (test_bit(FLG_L1_UINT, &st->l1.Flags))
- FsmChangeState(fi, ST_L1_TRANS);
- else
-#endif
- FsmChangeState(fi, ST_L1_F7);
- st->l1.l1hw(st, HW_INFO3 | REQUEST, NULL);
- if (test_and_clear_bit(FLG_L1_DEACTTIMER, &st->l1.Flags))
- FsmDelTimer(&st->l1.timer, 4);
- if (!test_bit(FLG_L1_ACTIVATED, &st->l1.Flags)) {
- if (test_and_clear_bit(FLG_L1_T3RUN, &st->l1.Flags))
- FsmDelTimer(&st->l1.timer, 3);
- FsmRestartTimer(&st->l1.timer, 110, EV_TIMER_ACT, NULL, 2);
- test_and_set_bit(FLG_L1_ACTTIMER, &st->l1.Flags);
- }
-}
-
-static void
-l1_timer3(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- test_and_clear_bit(FLG_L1_T3RUN, &st->l1.Flags);
- if (test_and_clear_bit(FLG_L1_ACTIVATING, &st->l1.Flags))
- L1deactivated(st->l1.hardware);
-
-#ifdef HISAX_UINTERFACE
- if (!test_bit(FLG_L1_UINT, &st->l1.Flags))
-#endif
- if (st->l1.l1m.state != ST_L1_F6) {
- FsmChangeState(fi, ST_L1_F3);
- st->l1.l1hw(st, HW_ENABLE | REQUEST, NULL);
- }
-}
-
-static void
-l1_timer_act(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- test_and_clear_bit(FLG_L1_ACTTIMER, &st->l1.Flags);
- test_and_set_bit(FLG_L1_ACTIVATED, &st->l1.Flags);
- L1activated(st->l1.hardware);
-}
-
-static void
-l1_timer_deact(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- test_and_clear_bit(FLG_L1_DEACTTIMER, &st->l1.Flags);
- test_and_clear_bit(FLG_L1_ACTIVATED, &st->l1.Flags);
- L1deactivated(st->l1.hardware);
- st->l1.l1hw(st, HW_DEACTIVATE | RESPONSE, NULL);
-}
-
-static void
-l1_activate_s(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- st->l1.l1hw(st, HW_RESET | REQUEST, NULL);
-}
-
-static void
-l1_activate_no(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if ((!test_bit(FLG_L1_DEACTTIMER, &st->l1.Flags)) && (!test_bit(FLG_L1_T3RUN, &st->l1.Flags))) {
- test_and_clear_bit(FLG_L1_ACTIVATING, &st->l1.Flags);
- L1deactivated(st->l1.hardware);
- }
-}
-
-static struct FsmNode L1SFnList[] __initdata =
-{
- {ST_L1_F3, EV_PH_ACTIVATE, l1_activate_s},
- {ST_L1_F6, EV_PH_ACTIVATE, l1_activate_no},
- {ST_L1_F8, EV_PH_ACTIVATE, l1_activate_no},
- {ST_L1_F3, EV_RESET_IND, l1_reset},
- {ST_L1_F4, EV_RESET_IND, l1_reset},
- {ST_L1_F5, EV_RESET_IND, l1_reset},
- {ST_L1_F6, EV_RESET_IND, l1_reset},
- {ST_L1_F7, EV_RESET_IND, l1_reset},
- {ST_L1_F8, EV_RESET_IND, l1_reset},
- {ST_L1_F3, EV_DEACT_CNF, l1_deact_cnf},
- {ST_L1_F4, EV_DEACT_CNF, l1_deact_cnf},
- {ST_L1_F5, EV_DEACT_CNF, l1_deact_cnf},
- {ST_L1_F6, EV_DEACT_CNF, l1_deact_cnf},
- {ST_L1_F7, EV_DEACT_CNF, l1_deact_cnf},
- {ST_L1_F8, EV_DEACT_CNF, l1_deact_cnf},
- {ST_L1_F6, EV_DEACT_IND, l1_deact_req_s},
- {ST_L1_F7, EV_DEACT_IND, l1_deact_req_s},
- {ST_L1_F8, EV_DEACT_IND, l1_deact_req_s},
- {ST_L1_F3, EV_POWER_UP, l1_power_up_s},
- {ST_L1_F4, EV_RSYNC_IND, l1_go_F5},
- {ST_L1_F6, EV_RSYNC_IND, l1_go_F8},
- {ST_L1_F7, EV_RSYNC_IND, l1_go_F8},
- {ST_L1_F3, EV_INFO2_IND, l1_info2_ind},
- {ST_L1_F4, EV_INFO2_IND, l1_info2_ind},
- {ST_L1_F5, EV_INFO2_IND, l1_info2_ind},
- {ST_L1_F7, EV_INFO2_IND, l1_info2_ind},
- {ST_L1_F8, EV_INFO2_IND, l1_info2_ind},
- {ST_L1_F3, EV_INFO4_IND, l1_info4_ind},
- {ST_L1_F4, EV_INFO4_IND, l1_info4_ind},
- {ST_L1_F5, EV_INFO4_IND, l1_info4_ind},
- {ST_L1_F6, EV_INFO4_IND, l1_info4_ind},
- {ST_L1_F8, EV_INFO4_IND, l1_info4_ind},
- {ST_L1_F3, EV_TIMER3, l1_timer3},
- {ST_L1_F4, EV_TIMER3, l1_timer3},
- {ST_L1_F5, EV_TIMER3, l1_timer3},
- {ST_L1_F6, EV_TIMER3, l1_timer3},
- {ST_L1_F8, EV_TIMER3, l1_timer3},
- {ST_L1_F7, EV_TIMER_ACT, l1_timer_act},
- {ST_L1_F3, EV_TIMER_DEACT, l1_timer_deact},
- {ST_L1_F4, EV_TIMER_DEACT, l1_timer_deact},
- {ST_L1_F5, EV_TIMER_DEACT, l1_timer_deact},
- {ST_L1_F6, EV_TIMER_DEACT, l1_timer_deact},
- {ST_L1_F7, EV_TIMER_DEACT, l1_timer_deact},
- {ST_L1_F8, EV_TIMER_DEACT, l1_timer_deact},
-};
-
-#ifdef HISAX_UINTERFACE
-static void
-l1_deact_req_u(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L1_RESET);
- FsmRestartTimer(&st->l1.timer, 550, EV_TIMER_DEACT, NULL, 2);
- test_and_set_bit(FLG_L1_DEACTTIMER, &st->l1.Flags);
- st->l1.l1hw(st, HW_ENABLE | REQUEST, NULL);
-}
-
-static void
-l1_power_up_u(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmRestartTimer(&st->l1.timer, TIMER3_VALUE, EV_TIMER3, NULL, 2);
- test_and_set_bit(FLG_L1_T3RUN, &st->l1.Flags);
-}
-
-static void
-l1_info0_ind(struct FsmInst *fi, int event, void *arg)
-{
- FsmChangeState(fi, ST_L1_DEACT);
-}
-
-static void
-l1_activate_u(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- st->l1.l1hw(st, HW_INFO1 | REQUEST, NULL);
-}
-
-static struct FsmNode L1UFnList[] __initdata =
-{
- {ST_L1_RESET, EV_DEACT_IND, l1_deact_req_u},
- {ST_L1_DEACT, EV_DEACT_IND, l1_deact_req_u},
- {ST_L1_SYNC2, EV_DEACT_IND, l1_deact_req_u},
- {ST_L1_TRANS, EV_DEACT_IND, l1_deact_req_u},
- {ST_L1_DEACT, EV_PH_ACTIVATE, l1_activate_u},
- {ST_L1_DEACT, EV_POWER_UP, l1_power_up_u},
- {ST_L1_DEACT, EV_INFO2_IND, l1_info2_ind},
- {ST_L1_TRANS, EV_INFO2_IND, l1_info2_ind},
- {ST_L1_RESET, EV_DEACT_CNF, l1_info0_ind},
- {ST_L1_DEACT, EV_INFO4_IND, l1_info4_ind},
- {ST_L1_SYNC2, EV_INFO4_IND, l1_info4_ind},
- {ST_L1_RESET, EV_INFO4_IND, l1_info4_ind},
- {ST_L1_DEACT, EV_TIMER3, l1_timer3},
- {ST_L1_SYNC2, EV_TIMER3, l1_timer3},
- {ST_L1_TRANS, EV_TIMER_ACT, l1_timer_act},
- {ST_L1_DEACT, EV_TIMER_DEACT, l1_timer_deact},
- {ST_L1_SYNC2, EV_TIMER_DEACT, l1_timer_deact},
- {ST_L1_RESET, EV_TIMER_DEACT, l1_timer_deact},
-};
-
-#endif
-
-static void
-l1b_activate(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L1_WAIT_ACT);
- FsmRestartTimer(&st->l1.timer, st->l1.delay, EV_TIMER_ACT, NULL, 2);
-}
-
-static void
-l1b_deactivate(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L1_WAIT_DEACT);
- FsmRestartTimer(&st->l1.timer, 10, EV_TIMER_DEACT, NULL, 2);
-}
-
-static void
-l1b_timer_act(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L1_ACTIV);
- st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
-}
-
-static void
-l1b_timer_deact(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L1_NULL);
- st->l2.l2l1(st, PH_DEACTIVATE | CONFIRM, NULL);
-}
-
-static struct FsmNode L1BFnList[] __initdata =
-{
- {ST_L1_NULL, EV_PH_ACTIVATE, l1b_activate},
- {ST_L1_WAIT_ACT, EV_TIMER_ACT, l1b_timer_act},
- {ST_L1_ACTIV, EV_PH_DEACTIVATE, l1b_deactivate},
- {ST_L1_WAIT_DEACT, EV_TIMER_DEACT, l1b_timer_deact},
-};
-
-int __init
-Isdnl1New(void)
-{
- int retval;
-
- l1fsm_s.state_count = L1S_STATE_COUNT;
- l1fsm_s.event_count = L1_EVENT_COUNT;
- l1fsm_s.strEvent = strL1Event;
- l1fsm_s.strState = strL1SState;
- retval = FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
- if (retval)
- return retval;
-
- l1fsm_b.state_count = L1B_STATE_COUNT;
- l1fsm_b.event_count = L1_EVENT_COUNT;
- l1fsm_b.strEvent = strL1Event;
- l1fsm_b.strState = strL1BState;
- retval = FsmNew(&l1fsm_b, L1BFnList, ARRAY_SIZE(L1BFnList));
- if (retval) {
- FsmFree(&l1fsm_s);
- return retval;
- }
-#ifdef HISAX_UINTERFACE
- l1fsm_u.state_count = L1U_STATE_COUNT;
- l1fsm_u.event_count = L1_EVENT_COUNT;
- l1fsm_u.strEvent = strL1Event;
- l1fsm_u.strState = strL1UState;
- retval = FsmNew(&l1fsm_u, L1UFnList, ARRAY_SIZE(L1UFnList));
- if (retval) {
- FsmFree(&l1fsm_s);
- FsmFree(&l1fsm_b);
- return retval;
- }
-#endif
- return 0;
-}
-
-void Isdnl1Free(void)
-{
-#ifdef HISAX_UINTERFACE
- FsmFree(&l1fsm_u);
-#endif
- FsmFree(&l1fsm_s);
- FsmFree(&l1fsm_b);
-}
-
-static void
-dch_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- case (PH_PULL | REQUEST):
- case (PH_PULL | INDICATION):
- st->l1.l1hw(st, pr, arg);
- break;
- case (PH_ACTIVATE | REQUEST):
- if (cs->debug)
- debugl1(cs, "PH_ACTIVATE_REQ %s",
- st->l1.l1m.fsm->strState[st->l1.l1m.state]);
- if (test_bit(FLG_L1_ACTIVATED, &st->l1.Flags))
- st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
- else {
- test_and_set_bit(FLG_L1_ACTIVATING, &st->l1.Flags);
- FsmEvent(&st->l1.l1m, EV_PH_ACTIVATE, arg);
- }
- break;
- case (PH_TESTLOOP | REQUEST):
- if (1 & (long) arg)
- debugl1(cs, "PH_TEST_LOOP B1");
- if (2 & (long) arg)
- debugl1(cs, "PH_TEST_LOOP B2");
- if (!(3 & (long) arg))
- debugl1(cs, "PH_TEST_LOOP DISABLED");
- st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg);
- break;
- default:
- if (cs->debug)
- debugl1(cs, "dch_l2l1 msg %04X unhandled", pr);
- break;
- }
-}
-
-void
-l1_msg(struct IsdnCardState *cs, int pr, void *arg) {
- struct PStack *st;
-
- st = cs->stlist;
-
- while (st) {
- switch (pr) {
- case (HW_RESET | INDICATION):
- FsmEvent(&st->l1.l1m, EV_RESET_IND, arg);
- break;
- case (HW_DEACTIVATE | CONFIRM):
- FsmEvent(&st->l1.l1m, EV_DEACT_CNF, arg);
- break;
- case (HW_DEACTIVATE | INDICATION):
- FsmEvent(&st->l1.l1m, EV_DEACT_IND, arg);
- break;
- case (HW_POWERUP | CONFIRM):
- FsmEvent(&st->l1.l1m, EV_POWER_UP, arg);
- break;
- case (HW_RSYNC | INDICATION):
- FsmEvent(&st->l1.l1m, EV_RSYNC_IND, arg);
- break;
- case (HW_INFO2 | INDICATION):
- FsmEvent(&st->l1.l1m, EV_INFO2_IND, arg);
- break;
- case (HW_INFO4_P8 | INDICATION):
- case (HW_INFO4_P10 | INDICATION):
- FsmEvent(&st->l1.l1m, EV_INFO4_IND, arg);
- break;
- default:
- if (cs->debug)
- debugl1(cs, "%s %04X unhandled", __func__, pr);
- break;
- }
- st = st->next;
- }
-}
-
-void
-l1_msg_b(struct PStack *st, int pr, void *arg) {
- switch (pr) {
- case (PH_ACTIVATE | REQUEST):
- FsmEvent(&st->l1.l1m, EV_PH_ACTIVATE, NULL);
- break;
- case (PH_DEACTIVATE | REQUEST):
- FsmEvent(&st->l1.l1m, EV_PH_DEACTIVATE, NULL);
- break;
- }
-}
-
-void
-setstack_HiSax(struct PStack *st, struct IsdnCardState *cs)
-{
- st->l1.hardware = cs;
- st->protocol = cs->protocol;
- st->l1.l1m.fsm = &l1fsm_s;
- st->l1.l1m.state = ST_L1_F3;
- st->l1.Flags = 0;
-#ifdef HISAX_UINTERFACE
- if (test_bit(FLG_HW_L1_UINT, &cs->HW_Flags)) {
- st->l1.l1m.fsm = &l1fsm_u;
- st->l1.l1m.state = ST_L1_RESET;
- st->l1.Flags = FLG_L1_UINT;
- }
-#endif
- st->l1.l1m.debug = cs->debug;
- st->l1.l1m.userdata = st;
- st->l1.l1m.userint = 0;
- st->l1.l1m.printdebug = l1m_debug;
- FsmInitTimer(&st->l1.l1m, &st->l1.timer);
- setstack_tei(st);
- setstack_manager(st);
- st->l1.stlistp = &(cs->stlist);
- st->l2.l2l1 = dch_l2l1;
- if (cs->setstack_d)
- cs->setstack_d(st, cs);
-}
-
-void
-setstack_l1_B(struct PStack *st)
-{
- struct IsdnCardState *cs = st->l1.hardware;
-
- st->l1.l1m.fsm = &l1fsm_b;
- st->l1.l1m.state = ST_L1_NULL;
- st->l1.l1m.debug = cs->debug;
- st->l1.l1m.userdata = st;
- st->l1.l1m.userint = 0;
- st->l1.l1m.printdebug = l1m_debug;
- st->l1.Flags = 0;
- FsmInitTimer(&st->l1.l1m, &st->l1.timer);
-}
diff --git a/drivers/isdn/hisax/isdnl1.h b/drivers/isdn/hisax/isdnl1.h
deleted file mode 100644
index 66ddcab19bba..000000000000
--- a/drivers/isdn/hisax/isdnl1.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* $Id: isdnl1.h,v 2.12.2.3 2004/02/11 13:21:34 keil Exp $
- *
- * Layer 1 defines
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#define D_RCVBUFREADY 0
-#define D_XMTBUFREADY 1
-#define D_L1STATECHANGE 2
-#define D_CLEARBUSY 3
-#define D_RX_MON0 4
-#define D_RX_MON1 5
-#define D_TX_MON0 6
-#define D_TX_MON1 7
-#define E_RCVBUFREADY 8
-
-#define B_RCVBUFREADY 0
-#define B_XMTBUFREADY 1
-#define B_ACKPENDING 2
-
-__printf(2, 3)
-void debugl1(struct IsdnCardState *cs, char *fmt, ...);
-void DChannel_proc_xmt(struct IsdnCardState *cs);
-void DChannel_proc_rcv(struct IsdnCardState *cs);
-void l1_msg(struct IsdnCardState *cs, int pr, void *arg);
-void l1_msg_b(struct PStack *st, int pr, void *arg);
-void Logl2Frame(struct IsdnCardState *cs, struct sk_buff *skb, char *buf,
- int dir);
-void BChannel_bh(struct work_struct *work);
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
deleted file mode 100644
index 1a40ed04cb52..000000000000
--- a/drivers/isdn/hisax/isdnl2.c
+++ /dev/null
@@ -1,1839 +0,0 @@
-/* $Id: isdnl2.c,v 2.30.2.4 2004/02/11 13:21:34 keil Exp $
- *
- * Author Karsten Keil
- * based on the teles driver from Jan den Ouden
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- * Thanks to Jan den Ouden
- * Fritz Elfert
- *
- */
-
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include "hisax.h"
-#include "isdnl2.h"
-
-const char *l2_revision = "$Revision: 2.30.2.4 $";
-
-static void l2m_debug(struct FsmInst *fi, char *fmt, ...);
-
-static struct Fsm l2fsm;
-
-enum {
- ST_L2_1,
- ST_L2_2,
- ST_L2_3,
- ST_L2_4,
- ST_L2_5,
- ST_L2_6,
- ST_L2_7,
- ST_L2_8,
-};
-
-#define L2_STATE_COUNT (ST_L2_8 + 1)
-
-static char *strL2State[] =
-{
- "ST_L2_1",
- "ST_L2_2",
- "ST_L2_3",
- "ST_L2_4",
- "ST_L2_5",
- "ST_L2_6",
- "ST_L2_7",
- "ST_L2_8",
-};
-
-enum {
- EV_L2_UI,
- EV_L2_SABME,
- EV_L2_DISC,
- EV_L2_DM,
- EV_L2_UA,
- EV_L2_FRMR,
- EV_L2_SUPER,
- EV_L2_I,
- EV_L2_DL_DATA,
- EV_L2_ACK_PULL,
- EV_L2_DL_UNIT_DATA,
- EV_L2_DL_ESTABLISH_REQ,
- EV_L2_DL_RELEASE_REQ,
- EV_L2_MDL_ASSIGN,
- EV_L2_MDL_REMOVE,
- EV_L2_MDL_ERROR,
- EV_L1_DEACTIVATE,
- EV_L2_T200,
- EV_L2_T203,
- EV_L2_SET_OWN_BUSY,
- EV_L2_CLEAR_OWN_BUSY,
- EV_L2_FRAME_ERROR,
-};
-
-#define L2_EVENT_COUNT (EV_L2_FRAME_ERROR + 1)
-
-static char *strL2Event[] =
-{
- "EV_L2_UI",
- "EV_L2_SABME",
- "EV_L2_DISC",
- "EV_L2_DM",
- "EV_L2_UA",
- "EV_L2_FRMR",
- "EV_L2_SUPER",
- "EV_L2_I",
- "EV_L2_DL_DATA",
- "EV_L2_ACK_PULL",
- "EV_L2_DL_UNIT_DATA",
- "EV_L2_DL_ESTABLISH_REQ",
- "EV_L2_DL_RELEASE_REQ",
- "EV_L2_MDL_ASSIGN",
- "EV_L2_MDL_REMOVE",
- "EV_L2_MDL_ERROR",
- "EV_L1_DEACTIVATE",
- "EV_L2_T200",
- "EV_L2_T203",
- "EV_L2_SET_OWN_BUSY",
- "EV_L2_CLEAR_OWN_BUSY",
- "EV_L2_FRAME_ERROR",
-};
-
-static int l2addrsize(struct Layer2 *l2);
-
-static void
-set_peer_busy(struct Layer2 *l2) {
- test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
- if (!skb_queue_empty(&l2->i_queue) ||
- !skb_queue_empty(&l2->ui_queue))
- test_and_set_bit(FLG_L2BLOCK, &l2->flag);
-}
-
-static void
-clear_peer_busy(struct Layer2 *l2) {
- if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
- test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
-}
-
-static void
-InitWin(struct Layer2 *l2)
-{
- int i;
-
- for (i = 0; i < MAX_WINDOW; i++)
- l2->windowar[i] = NULL;
-}
-
-static int
-freewin1(struct Layer2 *l2)
-{
- int i, cnt = 0;
-
- for (i = 0; i < MAX_WINDOW; i++) {
- if (l2->windowar[i]) {
- cnt++;
- dev_kfree_skb(l2->windowar[i]);
- l2->windowar[i] = NULL;
- }
- }
- return cnt;
-}
-
-static inline void
-freewin(struct PStack *st)
-{
- freewin1(&st->l2);
-}
-
-static void
-ReleaseWin(struct Layer2 *l2)
-{
- int cnt;
-
- if ((cnt = freewin1(l2)))
- printk(KERN_WARNING "isdl2 freed %d skbuffs in release\n", cnt);
-}
-
-static inline unsigned int
-cansend(struct PStack *st)
-{
- unsigned int p1;
-
- if (test_bit(FLG_MOD128, &st->l2.flag))
- p1 = (st->l2.vs - st->l2.va) % 128;
- else
- p1 = (st->l2.vs - st->l2.va) % 8;
- return ((p1 < st->l2.window) && !test_bit(FLG_PEER_BUSY, &st->l2.flag));
-}
-
-static inline void
-clear_exception(struct Layer2 *l2)
-{
- test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
- test_and_clear_bit(FLG_REJEXC, &l2->flag);
- test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
- clear_peer_busy(l2);
-}
-
-static inline int
-l2headersize(struct Layer2 *l2, int ui)
-{
- return (((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
- (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1));
-}
-
-inline int
-l2addrsize(struct Layer2 *l2)
-{
- return (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
-}
-
-static int
-sethdraddr(struct Layer2 *l2, u_char *header, int rsp)
-{
- u_char *ptr = header;
- int crbit = rsp;
-
- if (test_bit(FLG_LAPD, &l2->flag)) {
- *ptr++ = (l2->sap << 2) | (rsp ? 2 : 0);
- *ptr++ = (l2->tei << 1) | 1;
- return (2);
- } else {
- if (test_bit(FLG_ORIG, &l2->flag))
- crbit = !crbit;
- if (crbit)
- *ptr++ = 1;
- else
- *ptr++ = 3;
- return (1);
- }
-}
-
-static inline void
-enqueue_super(struct PStack *st,
- struct sk_buff *skb)
-{
- if (test_bit(FLG_LAPB, &st->l2.flag))
- st->l1.bcs->tx_cnt += skb->len;
- st->l2.l2l1(st, PH_DATA | REQUEST, skb);
-}
-
-#define enqueue_ui(a, b) enqueue_super(a, b)
-
-static inline int
-IsUI(u_char *data)
-{
- return ((data[0] & 0xef) == UI);
-}
-
-static inline int
-IsUA(u_char *data)
-{
- return ((data[0] & 0xef) == UA);
-}
-
-static inline int
-IsDM(u_char *data)
-{
- return ((data[0] & 0xef) == DM);
-}
-
-static inline int
-IsDISC(u_char *data)
-{
- return ((data[0] & 0xef) == DISC);
-}
-
-static inline int
-IsSFrame(u_char *data, struct PStack *st)
-{
- register u_char d = *data;
-
- if (!test_bit(FLG_MOD128, &st->l2.flag))
- d &= 0xf;
- return (((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c));
-}
-
-static inline int
-IsSABME(u_char *data, struct PStack *st)
-{
- u_char d = data[0] & ~0x10;
-
- return (test_bit(FLG_MOD128, &st->l2.flag) ? d == SABME : d == SABM);
-}
-
-static inline int
-IsREJ(u_char *data, struct PStack *st)
-{
- return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == REJ : (data[0] & 0xf) == REJ);
-}
-
-static inline int
-IsFRMR(u_char *data)
-{
- return ((data[0] & 0xef) == FRMR);
-}
-
-static inline int
-IsRNR(u_char *data, struct PStack *st)
-{
- return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == RNR : (data[0] & 0xf) == RNR);
-}
-
-static int
-iframe_error(struct PStack *st, struct sk_buff *skb)
-{
- int i = l2addrsize(&st->l2) + (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1);
- int rsp = *skb->data & 0x2;
-
- if (test_bit(FLG_ORIG, &st->l2.flag))
- rsp = !rsp;
-
- if (rsp)
- return 'L';
-
-
- if (skb->len < i)
- return 'N';
-
- if ((skb->len - i) > st->l2.maxlen)
- return 'O';
-
-
- return 0;
-}
-
-static int
-super_error(struct PStack *st, struct sk_buff *skb)
-{
- if (skb->len != l2addrsize(&st->l2) +
- (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1))
- return 'N';
-
- return 0;
-}
-
-static int
-unnum_error(struct PStack *st, struct sk_buff *skb, int wantrsp)
-{
- int rsp = (*skb->data & 0x2) >> 1;
- if (test_bit(FLG_ORIG, &st->l2.flag))
- rsp = !rsp;
-
- if (rsp != wantrsp)
- return 'L';
-
- if (skb->len != l2addrsize(&st->l2) + 1)
- return 'N';
-
- return 0;
-}
-
-static int
-UI_error(struct PStack *st, struct sk_buff *skb)
-{
- int rsp = *skb->data & 0x2;
- if (test_bit(FLG_ORIG, &st->l2.flag))
- rsp = !rsp;
-
- if (rsp)
- return 'L';
-
- if (skb->len > st->l2.maxlen + l2addrsize(&st->l2) + 1)
- return 'O';
-
- return 0;
-}
-
-static int
-FRMR_error(struct PStack *st, struct sk_buff *skb)
-{
- int headers = l2addrsize(&st->l2) + 1;
- u_char *datap = skb->data + headers;
- int rsp = *skb->data & 0x2;
-
- if (test_bit(FLG_ORIG, &st->l2.flag))
- rsp = !rsp;
-
- if (!rsp)
- return 'L';
-
- if (test_bit(FLG_MOD128, &st->l2.flag)) {
- if (skb->len < headers + 5)
- return 'N';
- else
- l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x %2x %2x",
- datap[0], datap[1], datap[2],
- datap[3], datap[4]);
- } else {
- if (skb->len < headers + 3)
- return 'N';
- else
- l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x",
- datap[0], datap[1], datap[2]);
- }
-
- return 0;
-}
-
-static unsigned int
-legalnr(struct PStack *st, unsigned int nr)
-{
- struct Layer2 *l2 = &st->l2;
-
- if (test_bit(FLG_MOD128, &l2->flag))
- return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
- else
- return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
-}
-
-static void
-setva(struct PStack *st, unsigned int nr)
-{
- struct Layer2 *l2 = &st->l2;
- int len;
- u_long flags;
-
- spin_lock_irqsave(&l2->lock, flags);
- while (l2->va != nr) {
- (l2->va)++;
- if (test_bit(FLG_MOD128, &l2->flag))
- l2->va %= 128;
- else
- l2->va %= 8;
- len = l2->windowar[l2->sow]->len;
- if (PACKET_NOACK == l2->windowar[l2->sow]->pkt_type)
- len = -1;
- dev_kfree_skb(l2->windowar[l2->sow]);
- l2->windowar[l2->sow] = NULL;
- l2->sow = (l2->sow + 1) % l2->window;
- spin_unlock_irqrestore(&l2->lock, flags);
- if (test_bit(FLG_LLI_L2WAKEUP, &st->lli.flag) && (len >= 0))
- lli_writewakeup(st, len);
- spin_lock_irqsave(&l2->lock, flags);
- }
- spin_unlock_irqrestore(&l2->lock, flags);
-}
-
-static void
-send_uframe(struct PStack *st, u_char cmd, u_char cr)
-{
- struct sk_buff *skb;
- u_char tmp[MAX_HEADER_LEN];
- int i;
-
- i = sethdraddr(&st->l2, tmp, cr);
- tmp[i++] = cmd;
- if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
- printk(KERN_WARNING "isdl2 can't alloc sbbuff for send_uframe\n");
- return;
- }
- skb_put_data(skb, tmp, i);
- enqueue_super(st, skb);
-}
-
-static inline u_char
-get_PollFlag(struct PStack *st, struct sk_buff *skb)
-{
- return (skb->data[l2addrsize(&(st->l2))] & 0x10);
-}
-
-static inline u_char
-get_PollFlagFree(struct PStack *st, struct sk_buff *skb)
-{
- u_char PF;
-
- PF = get_PollFlag(st, skb);
- dev_kfree_skb(skb);
- return (PF);
-}
-
-static inline void
-start_t200(struct PStack *st, int i)
-{
- FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
- test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
-}
-
-static inline void
-restart_t200(struct PStack *st, int i)
-{
- FsmRestartTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
- test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
-}
-
-static inline void
-stop_t200(struct PStack *st, int i)
-{
- if (test_and_clear_bit(FLG_T200_RUN, &st->l2.flag))
- FsmDelTimer(&st->l2.t200, i);
-}
-
-static inline void
-st5_dl_release_l2l3(struct PStack *st)
-{
- int pr;
-
- if (test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
- pr = DL_RELEASE | CONFIRM;
- else
- pr = DL_RELEASE | INDICATION;
-
- st->l2.l2l3(st, pr, NULL);
-}
-
-static inline void
-lapb_dl_release_l2l3(struct PStack *st, int f)
-{
- if (test_bit(FLG_LAPB, &st->l2.flag))
- st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
- st->l2.l2l3(st, DL_RELEASE | f, NULL);
-}
-
-static void
-establishlink(struct FsmInst *fi)
-{
- struct PStack *st = fi->userdata;
- u_char cmd;
-
- clear_exception(&st->l2);
- st->l2.rc = 0;
- cmd = (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM) | 0x10;
- send_uframe(st, cmd, CMD);
- FsmDelTimer(&st->l2.t203, 1);
- restart_t200(st, 1);
- test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
- freewin(st);
- FsmChangeState(fi, ST_L2_5);
-}
-
-static void
-l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
-{
- struct sk_buff *skb = arg;
- struct PStack *st = fi->userdata;
-
- if (get_PollFlagFree(st, skb))
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'C');
- else
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'D');
-}
-
-static void
-l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
-{
- struct sk_buff *skb = arg;
- struct PStack *st = fi->userdata;
-
- if (get_PollFlagFree(st, skb))
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
- else {
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
- establishlink(fi);
- test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
- }
-}
-
-static void
-l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
-{
- struct sk_buff *skb = arg;
- struct PStack *st = fi->userdata;
-
- if (get_PollFlagFree(st, skb))
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
- else {
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
- }
- establishlink(fi);
- test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
-}
-
-static void
-l2_go_st3(struct FsmInst *fi, int event, void *arg)
-{
- FsmChangeState(fi, ST_L2_3);
-}
-
-static void
-l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L2_3);
- st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
-}
-
-static void
-l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- skb_queue_tail(&st->l2.ui_queue, skb);
- FsmChangeState(fi, ST_L2_2);
- st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
-}
-
-static void
-l2_queue_ui(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- skb_queue_tail(&st->l2.ui_queue, skb);
-}
-
-static void
-tx_ui(struct PStack *st)
-{
- struct sk_buff *skb;
- u_char header[MAX_HEADER_LEN];
- int i;
-
- i = sethdraddr(&(st->l2), header, CMD);
- header[i++] = UI;
- while ((skb = skb_dequeue(&st->l2.ui_queue))) {
- memcpy(skb_push(skb, i), header, i);
- enqueue_ui(st, skb);
- }
-}
-
-static void
-l2_send_ui(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- skb_queue_tail(&st->l2.ui_queue, skb);
- tx_ui(st);
-}
-
-static void
-l2_got_ui(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- skb_pull(skb, l2headersize(&st->l2, 1));
- st->l2.l2l3(st, DL_UNIT_DATA | INDICATION, skb);
-/* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- * in states 1-3 for broadcast
- */
-
-
-}
-
-static void
-l2_establish(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- establishlink(fi);
- test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
-}
-
-static void
-l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.i_queue);
- test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
- test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
-}
-
-static void
-l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.i_queue);
- establishlink(fi);
- test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
-}
-
-static void
-l2_release(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
-}
-
-static void
-l2_pend_rel(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- test_and_set_bit(FLG_PEND_REL, &st->l2.flag);
-}
-
-static void
-l2_disconnect(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.i_queue);
- freewin(st);
- FsmChangeState(fi, ST_L2_6);
- st->l2.rc = 0;
- send_uframe(st, DISC | 0x10, CMD);
- FsmDelTimer(&st->l2.t203, 1);
- restart_t200(st, 2);
-}
-
-static void
-l2_start_multi(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
-
- clear_exception(&st->l2);
- st->l2.vs = 0;
- st->l2.va = 0;
- st->l2.vr = 0;
- st->l2.sow = 0;
- FsmChangeState(fi, ST_L2_7);
- FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
-
- st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
-}
-
-static void
-l2_send_UA(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
-}
-
-static void
-l2_send_DM(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- send_uframe(st, DM | get_PollFlagFree(st, skb), RSP);
-}
-
-static void
-l2_restart_multi(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
- int est = 0, state;
-
- state = fi->state;
-
- send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
-
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'F');
-
- if (st->l2.vs != st->l2.va) {
- skb_queue_purge(&st->l2.i_queue);
- est = 1;
- }
-
- clear_exception(&st->l2);
- st->l2.vs = 0;
- st->l2.va = 0;
- st->l2.vr = 0;
- st->l2.sow = 0;
- FsmChangeState(fi, ST_L2_7);
- stop_t200(st, 3);
- FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
-
- if (est)
- st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
-
- if ((ST_L2_7 == state) || (ST_L2_8 == state))
- if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
- st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
-}
-
-static void
-l2_stop_multi(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- FsmChangeState(fi, ST_L2_4);
- FsmDelTimer(&st->l2.t203, 3);
- stop_t200(st, 4);
-
- send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
-
- skb_queue_purge(&st->l2.i_queue);
- freewin(st);
- lapb_dl_release_l2l3(st, INDICATION);
-}
-
-static void
-l2_connected(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
- int pr = -1;
-
- if (!get_PollFlag(st, skb)) {
- l2_mdl_error_ua(fi, event, arg);
- return;
- }
- dev_kfree_skb(skb);
-
- if (test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
- l2_disconnect(fi, event, arg);
-
- if (test_and_clear_bit(FLG_L3_INIT, &st->l2.flag)) {
- pr = DL_ESTABLISH | CONFIRM;
- } else if (st->l2.vs != st->l2.va) {
- skb_queue_purge(&st->l2.i_queue);
- pr = DL_ESTABLISH | INDICATION;
- }
-
- stop_t200(st, 5);
-
- st->l2.vr = 0;
- st->l2.vs = 0;
- st->l2.va = 0;
- st->l2.sow = 0;
- FsmChangeState(fi, ST_L2_7);
- FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 4);
-
- if (pr != -1)
- st->l2.l2l3(st, pr, NULL);
-
- if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
- st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
-}
-
-static void
-l2_released(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- if (!get_PollFlag(st, skb)) {
- l2_mdl_error_ua(fi, event, arg);
- return;
- }
- dev_kfree_skb(skb);
-
- stop_t200(st, 6);
- lapb_dl_release_l2l3(st, CONFIRM);
- FsmChangeState(fi, ST_L2_4);
-}
-
-static void
-l2_reestablish(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- if (!get_PollFlagFree(st, skb)) {
- establishlink(fi);
- test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
- }
-}
-
-static void
-l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- if (get_PollFlagFree(st, skb)) {
- stop_t200(st, 7);
- if (!test_bit(FLG_L3_INIT, &st->l2.flag))
- skb_queue_purge(&st->l2.i_queue);
- if (test_bit(FLG_LAPB, &st->l2.flag))
- st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
- st5_dl_release_l2l3(st);
- FsmChangeState(fi, ST_L2_4);
- }
-}
-
-static void
-l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- if (get_PollFlagFree(st, skb)) {
- stop_t200(st, 8);
- lapb_dl_release_l2l3(st, CONFIRM);
- FsmChangeState(fi, ST_L2_4);
- }
-}
-
-static inline void
-enquiry_cr(struct PStack *st, u_char typ, u_char cr, u_char pf)
-{
- struct sk_buff *skb;
- struct Layer2 *l2;
- u_char tmp[MAX_HEADER_LEN];
- int i;
-
- l2 = &st->l2;
- i = sethdraddr(l2, tmp, cr);
- if (test_bit(FLG_MOD128, &l2->flag)) {
- tmp[i++] = typ;
- tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
- } else
- tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
- if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
- printk(KERN_WARNING "isdl2 can't alloc sbbuff for enquiry_cr\n");
- return;
- }
- skb_put_data(skb, tmp, i);
- enqueue_super(st, skb);
-}
-
-static inline void
-enquiry_response(struct PStack *st)
-{
- if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
- enquiry_cr(st, RNR, RSP, 1);
- else
- enquiry_cr(st, RR, RSP, 1);
- test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
-}
-
-static inline void
-transmit_enquiry(struct PStack *st)
-{
- if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
- enquiry_cr(st, RNR, CMD, 1);
- else
- enquiry_cr(st, RR, CMD, 1);
- test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
- start_t200(st, 9);
-}
-
-
-static void
-nrerrorrecovery(struct FsmInst *fi)
-{
- struct PStack *st = fi->userdata;
-
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'J');
- establishlink(fi);
- test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
-}
-
-static void
-invoke_retransmission(struct PStack *st, unsigned int nr)
-{
- struct Layer2 *l2 = &st->l2;
- u_int p1;
- u_long flags;
-
- spin_lock_irqsave(&l2->lock, flags);
- if (l2->vs != nr) {
- while (l2->vs != nr) {
- (l2->vs)--;
- if (test_bit(FLG_MOD128, &l2->flag)) {
- l2->vs %= 128;
- p1 = (l2->vs - l2->va) % 128;
- } else {
- l2->vs %= 8;
- p1 = (l2->vs - l2->va) % 8;
- }
- p1 = (p1 + l2->sow) % l2->window;
- if (test_bit(FLG_LAPB, &l2->flag))
- st->l1.bcs->tx_cnt += l2->windowar[p1]->len + l2headersize(l2, 0);
- skb_queue_head(&l2->i_queue, l2->windowar[p1]);
- l2->windowar[p1] = NULL;
- }
- spin_unlock_irqrestore(&l2->lock, flags);
- st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
- return;
- }
- spin_unlock_irqrestore(&l2->lock, flags);
-}
-
-static void
-l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
- int PollFlag, rsp, typ = RR;
- unsigned int nr;
- struct Layer2 *l2 = &st->l2;
-
- rsp = *skb->data & 0x2;
- if (test_bit(FLG_ORIG, &l2->flag))
- rsp = !rsp;
-
- skb_pull(skb, l2addrsize(l2));
- if (IsRNR(skb->data, st)) {
- set_peer_busy(l2);
- typ = RNR;
- } else
- clear_peer_busy(l2);
- if (IsREJ(skb->data, st))
- typ = REJ;
-
- if (test_bit(FLG_MOD128, &l2->flag)) {
- PollFlag = (skb->data[1] & 0x1) == 0x1;
- nr = skb->data[1] >> 1;
- } else {
- PollFlag = (skb->data[0] & 0x10);
- nr = (skb->data[0] >> 5) & 0x7;
- }
- dev_kfree_skb(skb);
-
- if (PollFlag) {
- if (rsp)
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'A');
- else
- enquiry_response(st);
- }
- if (legalnr(st, nr)) {
- if (typ == REJ) {
- setva(st, nr);
- invoke_retransmission(st, nr);
- stop_t200(st, 10);
- if (FsmAddTimer(&st->l2.t203, st->l2.T203,
- EV_L2_T203, NULL, 6))
- l2m_debug(&st->l2.l2m, "Restart T203 ST7 REJ");
- } else if ((nr == l2->vs) && (typ == RR)) {
- setva(st, nr);
- stop_t200(st, 11);
- FsmRestartTimer(&st->l2.t203, st->l2.T203,
- EV_L2_T203, NULL, 7);
- } else if ((l2->va != nr) || (typ == RNR)) {
- setva(st, nr);
- if (typ != RR) FsmDelTimer(&st->l2.t203, 9);
- restart_t200(st, 12);
- }
- if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR))
- st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
- } else
- nrerrorrecovery(fi);
-}
-
-static void
-l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- if (test_bit(FLG_LAPB, &st->l2.flag))
- st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
- if (!test_bit(FLG_L3_INIT, &st->l2.flag))
- skb_queue_tail(&st->l2.i_queue, skb);
- else
- dev_kfree_skb(skb);
-}
-
-static void
-l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- if (test_bit(FLG_LAPB, &st->l2.flag))
- st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
- skb_queue_tail(&st->l2.i_queue, skb);
- st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
-}
-
-static void
-l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- if (test_bit(FLG_LAPB, &st->l2.flag))
- st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
- skb_queue_tail(&st->l2.i_queue, skb);
-}
-
-static void
-l2_got_iframe(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
- struct Layer2 *l2 = &(st->l2);
- int PollFlag, ns, i;
- unsigned int nr;
-
- i = l2addrsize(l2);
- if (test_bit(FLG_MOD128, &l2->flag)) {
- PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
- ns = skb->data[i] >> 1;
- nr = (skb->data[i + 1] >> 1) & 0x7f;
- } else {
- PollFlag = (skb->data[i] & 0x10);
- ns = (skb->data[i] >> 1) & 0x7;
- nr = (skb->data[i] >> 5) & 0x7;
- }
- if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
- dev_kfree_skb(skb);
- if (PollFlag) enquiry_response(st);
- } else if (l2->vr == ns) {
- (l2->vr)++;
- if (test_bit(FLG_MOD128, &l2->flag))
- l2->vr %= 128;
- else
- l2->vr %= 8;
- test_and_clear_bit(FLG_REJEXC, &l2->flag);
-
- if (PollFlag)
- enquiry_response(st);
- else
- test_and_set_bit(FLG_ACK_PEND, &l2->flag);
- skb_pull(skb, l2headersize(l2, 0));
- st->l2.l2l3(st, DL_DATA | INDICATION, skb);
- } else {
- /* n(s)!=v(r) */
- dev_kfree_skb(skb);
- if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
- if (PollFlag)
- enquiry_response(st);
- } else {
- enquiry_cr(st, REJ, RSP, PollFlag);
- test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
- }
- }
-
- if (legalnr(st, nr)) {
- if (!test_bit(FLG_PEER_BUSY, &st->l2.flag) && (fi->state == ST_L2_7)) {
- if (nr == st->l2.vs) {
- stop_t200(st, 13);
- FsmRestartTimer(&st->l2.t203, st->l2.T203,
- EV_L2_T203, NULL, 7);
- } else if (nr != st->l2.va)
- restart_t200(st, 14);
- }
- setva(st, nr);
- } else {
- nrerrorrecovery(fi);
- return;
- }
-
- if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7))
- st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
- if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag))
- enquiry_cr(st, RR, RSP, 0);
-}
-
-static void
-l2_got_tei(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- st->l2.tei = (long) arg;
-
- if (fi->state == ST_L2_3) {
- establishlink(fi);
- test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
- } else
- FsmChangeState(fi, ST_L2_4);
- if (!skb_queue_empty(&st->l2.ui_queue))
- tx_ui(st);
-}
-
-static void
-l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if (test_bit(FLG_LAPD, &st->l2.flag) &&
- test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
- FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
- } else if (st->l2.rc == st->l2.N200) {
- FsmChangeState(fi, ST_L2_4);
- test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
- skb_queue_purge(&st->l2.i_queue);
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'G');
- if (test_bit(FLG_LAPB, &st->l2.flag))
- st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
- st5_dl_release_l2l3(st);
- } else {
- st->l2.rc++;
- FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
- send_uframe(st, (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM)
- | 0x10, CMD);
- }
-}
-
-static void
-l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if (test_bit(FLG_LAPD, &st->l2.flag) &&
- test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
- FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
- } else if (st->l2.rc == st->l2.N200) {
- FsmChangeState(fi, ST_L2_4);
- test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'H');
- lapb_dl_release_l2l3(st, CONFIRM);
- } else {
- st->l2.rc++;
- FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200,
- NULL, 9);
- send_uframe(st, DISC | 0x10, CMD);
- }
-}
-
-static void
-l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if (test_bit(FLG_LAPD, &st->l2.flag) &&
- test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
- FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
- return;
- }
- test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
- st->l2.rc = 0;
- FsmChangeState(fi, ST_L2_8);
-
- transmit_enquiry(st);
- st->l2.rc++;
-}
-
-static void
-l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if (test_bit(FLG_LAPD, &st->l2.flag) &&
- test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
- FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
- return;
- }
- test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
- if (st->l2.rc == st->l2.N200) {
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'I');
- establishlink(fi);
- test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
- } else {
- transmit_enquiry(st);
- st->l2.rc++;
- }
-}
-
-static void
-l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if (test_bit(FLG_LAPD, &st->l2.flag) &&
- test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
- FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 9);
- return;
- }
- FsmChangeState(fi, ST_L2_8);
- transmit_enquiry(st);
- st->l2.rc = 0;
-}
-
-static void
-l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb, *nskb;
- struct Layer2 *l2 = &st->l2;
- u_char header[MAX_HEADER_LEN];
- int i, hdr_space_needed;
- int unsigned p1;
- u_long flags;
-
- if (!cansend(st))
- return;
-
- skb = skb_dequeue(&l2->i_queue);
- if (!skb)
- return;
-
- hdr_space_needed = l2headersize(l2, 0);
- nskb = skb_realloc_headroom(skb, hdr_space_needed);
- if (!nskb) {
- skb_queue_head(&l2->i_queue, skb);
- return;
- }
- spin_lock_irqsave(&l2->lock, flags);
- if (test_bit(FLG_MOD128, &l2->flag))
- p1 = (l2->vs - l2->va) % 128;
- else
- p1 = (l2->vs - l2->va) % 8;
- p1 = (p1 + l2->sow) % l2->window;
- if (l2->windowar[p1]) {
- printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
- p1);
- dev_kfree_skb(l2->windowar[p1]);
- }
- l2->windowar[p1] = skb;
-
- i = sethdraddr(&st->l2, header, CMD);
-
- if (test_bit(FLG_MOD128, &l2->flag)) {
- header[i++] = l2->vs << 1;
- header[i++] = l2->vr << 1;
- l2->vs = (l2->vs + 1) % 128;
- } else {
- header[i++] = (l2->vr << 5) | (l2->vs << 1);
- l2->vs = (l2->vs + 1) % 8;
- }
- spin_unlock_irqrestore(&l2->lock, flags);
- memcpy(skb_push(nskb, i), header, i);
- st->l2.l2l1(st, PH_PULL | INDICATION, nskb);
- test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
- if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
- FsmDelTimer(&st->l2.t203, 13);
- FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11);
- }
- if (!skb_queue_empty(&l2->i_queue) && cansend(st))
- st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
-}
-
-static void
-l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
- int PollFlag, rsp, rnr = 0;
- unsigned int nr;
- struct Layer2 *l2 = &st->l2;
-
- rsp = *skb->data & 0x2;
- if (test_bit(FLG_ORIG, &l2->flag))
- rsp = !rsp;
-
- skb_pull(skb, l2addrsize(l2));
-
- if (IsRNR(skb->data, st)) {
- set_peer_busy(l2);
- rnr = 1;
- } else
- clear_peer_busy(l2);
-
- if (test_bit(FLG_MOD128, &l2->flag)) {
- PollFlag = (skb->data[1] & 0x1) == 0x1;
- nr = skb->data[1] >> 1;
- } else {
- PollFlag = (skb->data[0] & 0x10);
- nr = (skb->data[0] >> 5) & 0x7;
- }
- dev_kfree_skb(skb);
-
- if (rsp && PollFlag) {
- if (legalnr(st, nr)) {
- if (rnr) {
- restart_t200(st, 15);
- } else {
- stop_t200(st, 16);
- FsmAddTimer(&l2->t203, l2->T203,
- EV_L2_T203, NULL, 5);
- setva(st, nr);
- }
- invoke_retransmission(st, nr);
- FsmChangeState(fi, ST_L2_7);
- if (!skb_queue_empty(&l2->i_queue) && cansend(st))
- st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
- } else
- nrerrorrecovery(fi);
- } else {
- if (!rsp && PollFlag)
- enquiry_response(st);
- if (legalnr(st, nr)) {
- setva(st, nr);
- } else
- nrerrorrecovery(fi);
- }
-}
-
-static void
-l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
-
- skb_pull(skb, l2addrsize(&st->l2) + 1);
-
- if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
- (IsUA(skb->data) && (fi->state == ST_L2_7))) {
- st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'K');
- establishlink(fi);
- test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
- }
- dev_kfree_skb(skb);
-}
-
-static void
-l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.ui_queue);
- st->l2.tei = -1;
- FsmChangeState(fi, ST_L2_1);
-}
-
-static void
-l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.ui_queue);
- st->l2.tei = -1;
- st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
- FsmChangeState(fi, ST_L2_1);
-}
-
-static void
-l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.i_queue);
- skb_queue_purge(&st->l2.ui_queue);
- freewin(st);
- st->l2.tei = -1;
- stop_t200(st, 17);
- st5_dl_release_l2l3(st);
- FsmChangeState(fi, ST_L2_1);
-}
-
-static void
-l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.ui_queue);
- st->l2.tei = -1;
- stop_t200(st, 18);
- st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
- FsmChangeState(fi, ST_L2_1);
-}
-
-static void
-l2_tei_remove(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.i_queue);
- skb_queue_purge(&st->l2.ui_queue);
- freewin(st);
- st->l2.tei = -1;
- stop_t200(st, 17);
- FsmDelTimer(&st->l2.t203, 19);
- st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
- FsmChangeState(fi, ST_L2_1);
-}
-
-static void
-l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.i_queue);
- skb_queue_purge(&st->l2.ui_queue);
- if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
- st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
-}
-
-static void
-l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.i_queue);
- skb_queue_purge(&st->l2.ui_queue);
- freewin(st);
- stop_t200(st, 19);
- st5_dl_release_l2l3(st);
- FsmChangeState(fi, ST_L2_4);
-}
-
-static void
-l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.ui_queue);
- stop_t200(st, 20);
- st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
- FsmChangeState(fi, ST_L2_4);
-}
-
-static void
-l2_persistent_da(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- skb_queue_purge(&st->l2.i_queue);
- skb_queue_purge(&st->l2.ui_queue);
- freewin(st);
- stop_t200(st, 19);
- FsmDelTimer(&st->l2.t203, 19);
- st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
- FsmChangeState(fi, ST_L2_4);
-}
-
-static void
-l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if (!test_and_set_bit(FLG_OWN_BUSY, &st->l2.flag)) {
- enquiry_cr(st, RNR, RSP, 0);
- test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
- }
-}
-
-static void
-l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if (!test_and_clear_bit(FLG_OWN_BUSY, &st->l2.flag)) {
- enquiry_cr(st, RR, RSP, 0);
- test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
- }
-}
-
-static void
-l2_frame_error(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- st->ma.layer(st, MDL_ERROR | INDICATION, arg);
-}
-
-static void
-l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- st->ma.layer(st, MDL_ERROR | INDICATION, arg);
- establishlink(fi);
- test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
-}
-
-static struct FsmNode L2FnList[] __initdata =
-{
- {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
- {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
- {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
- {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
- {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
- {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
- {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
- {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
- {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
- {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
- {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
- {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
- {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
- {ST_L2_1, EV_L2_DL_UNIT_DATA, l2_queue_ui_assign},
- {ST_L2_2, EV_L2_DL_UNIT_DATA, l2_queue_ui},
- {ST_L2_3, EV_L2_DL_UNIT_DATA, l2_queue_ui},
- {ST_L2_4, EV_L2_DL_UNIT_DATA, l2_send_ui},
- {ST_L2_5, EV_L2_DL_UNIT_DATA, l2_send_ui},
- {ST_L2_6, EV_L2_DL_UNIT_DATA, l2_send_ui},
- {ST_L2_7, EV_L2_DL_UNIT_DATA, l2_send_ui},
- {ST_L2_8, EV_L2_DL_UNIT_DATA, l2_send_ui},
- {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
- {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
- {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
- {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
- {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
- {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
- {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
- {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
- {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
- {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
- {ST_L2_4, EV_L2_SABME, l2_start_multi},
- {ST_L2_5, EV_L2_SABME, l2_send_UA},
- {ST_L2_6, EV_L2_SABME, l2_send_DM},
- {ST_L2_7, EV_L2_SABME, l2_restart_multi},
- {ST_L2_8, EV_L2_SABME, l2_restart_multi},
- {ST_L2_4, EV_L2_DISC, l2_send_DM},
- {ST_L2_5, EV_L2_DISC, l2_send_DM},
- {ST_L2_6, EV_L2_DISC, l2_send_UA},
- {ST_L2_7, EV_L2_DISC, l2_stop_multi},
- {ST_L2_8, EV_L2_DISC, l2_stop_multi},
- {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
- {ST_L2_5, EV_L2_UA, l2_connected},
- {ST_L2_6, EV_L2_UA, l2_released},
- {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
- {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
- {ST_L2_4, EV_L2_DM, l2_reestablish},
- {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
- {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
- {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
- {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
- {ST_L2_1, EV_L2_UI, l2_got_ui},
- {ST_L2_2, EV_L2_UI, l2_got_ui},
- {ST_L2_3, EV_L2_UI, l2_got_ui},
- {ST_L2_4, EV_L2_UI, l2_got_ui},
- {ST_L2_5, EV_L2_UI, l2_got_ui},
- {ST_L2_6, EV_L2_UI, l2_got_ui},
- {ST_L2_7, EV_L2_UI, l2_got_ui},
- {ST_L2_8, EV_L2_UI, l2_got_ui},
- {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
- {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
- {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
- {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
- {ST_L2_7, EV_L2_I, l2_got_iframe},
- {ST_L2_8, EV_L2_I, l2_got_iframe},
- {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
- {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
- {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
- {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
- {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
- {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
- {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
- {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
- {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
- {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
- {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
- {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
- {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
- {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
- {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
- {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
- {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
- {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
- {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
- {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
- {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
- {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
- {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
-};
-
-static void
-isdnl2_l1l2(struct PStack *st, int pr, void *arg)
-{
- struct sk_buff *skb = arg;
- u_char *datap;
- int ret = 1, len;
- int c = 0;
-
- switch (pr) {
- case (PH_DATA | INDICATION):
- datap = skb->data;
- len = l2addrsize(&st->l2);
- if (skb->len > len)
- datap += len;
- else {
- FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'N');
- dev_kfree_skb(skb);
- return;
- }
- if (!(*datap & 1)) { /* I-Frame */
- if (!(c = iframe_error(st, skb)))
- ret = FsmEvent(&st->l2.l2m, EV_L2_I, skb);
- } else if (IsSFrame(datap, st)) { /* S-Frame */
- if (!(c = super_error(st, skb)))
- ret = FsmEvent(&st->l2.l2m, EV_L2_SUPER, skb);
- } else if (IsUI(datap)) {
- if (!(c = UI_error(st, skb)))
- ret = FsmEvent(&st->l2.l2m, EV_L2_UI, skb);
- } else if (IsSABME(datap, st)) {
- if (!(c = unnum_error(st, skb, CMD)))
- ret = FsmEvent(&st->l2.l2m, EV_L2_SABME, skb);
- } else if (IsUA(datap)) {
- if (!(c = unnum_error(st, skb, RSP)))
- ret = FsmEvent(&st->l2.l2m, EV_L2_UA, skb);
- } else if (IsDISC(datap)) {
- if (!(c = unnum_error(st, skb, CMD)))
- ret = FsmEvent(&st->l2.l2m, EV_L2_DISC, skb);
- } else if (IsDM(datap)) {
- if (!(c = unnum_error(st, skb, RSP)))
- ret = FsmEvent(&st->l2.l2m, EV_L2_DM, skb);
- } else if (IsFRMR(datap)) {
- if (!(c = FRMR_error(st, skb)))
- ret = FsmEvent(&st->l2.l2m, EV_L2_FRMR, skb);
- } else {
- FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'L');
- dev_kfree_skb(skb);
- ret = 0;
- }
- if (c) {
- dev_kfree_skb(skb);
- FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
- ret = 0;
- }
- if (ret)
- dev_kfree_skb(skb);
- break;
- case (PH_PULL | CONFIRM):
- FsmEvent(&st->l2.l2m, EV_L2_ACK_PULL, arg);
- break;
- case (PH_PAUSE | INDICATION):
- test_and_set_bit(FLG_DCHAN_BUSY, &st->l2.flag);
- break;
- case (PH_PAUSE | CONFIRM):
- test_and_clear_bit(FLG_DCHAN_BUSY, &st->l2.flag);
- break;
- case (PH_ACTIVATE | CONFIRM):
- case (PH_ACTIVATE | INDICATION):
- test_and_set_bit(FLG_L1_ACTIV, &st->l2.flag);
- if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
- FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
- break;
- case (PH_DEACTIVATE | INDICATION):
- case (PH_DEACTIVATE | CONFIRM):
- test_and_clear_bit(FLG_L1_ACTIV, &st->l2.flag);
- FsmEvent(&st->l2.l2m, EV_L1_DEACTIVATE, arg);
- break;
- default:
- l2m_debug(&st->l2.l2m, "l2 unknown pr %04x", pr);
- break;
- }
-}
-
-static void
-isdnl2_l3l2(struct PStack *st, int pr, void *arg)
-{
- switch (pr) {
- case (DL_DATA | REQUEST):
- if (FsmEvent(&st->l2.l2m, EV_L2_DL_DATA, arg)) {
- dev_kfree_skb((struct sk_buff *) arg);
- }
- break;
- case (DL_UNIT_DATA | REQUEST):
- if (FsmEvent(&st->l2.l2m, EV_L2_DL_UNIT_DATA, arg)) {
- dev_kfree_skb((struct sk_buff *) arg);
- }
- break;
- case (DL_ESTABLISH | REQUEST):
- if (test_bit(FLG_L1_ACTIV, &st->l2.flag)) {
- if (test_bit(FLG_LAPD, &st->l2.flag) ||
- test_bit(FLG_ORIG, &st->l2.flag)) {
- FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
- }
- } else {
- if (test_bit(FLG_LAPD, &st->l2.flag) ||
- test_bit(FLG_ORIG, &st->l2.flag)) {
- test_and_set_bit(FLG_ESTAB_PEND, &st->l2.flag);
- }
- st->l2.l2l1(st, PH_ACTIVATE, NULL);
- }
- break;
- case (DL_RELEASE | REQUEST):
- if (test_bit(FLG_LAPB, &st->l2.flag)) {
- st->l2.l2l1(st, PH_DEACTIVATE, NULL);
- }
- FsmEvent(&st->l2.l2m, EV_L2_DL_RELEASE_REQ, arg);
- break;
- case (MDL_ASSIGN | REQUEST):
- FsmEvent(&st->l2.l2m, EV_L2_MDL_ASSIGN, arg);
- break;
- case (MDL_REMOVE | REQUEST):
- FsmEvent(&st->l2.l2m, EV_L2_MDL_REMOVE, arg);
- break;
- case (MDL_ERROR | RESPONSE):
- FsmEvent(&st->l2.l2m, EV_L2_MDL_ERROR, arg);
- break;
- }
-}
-
-void
-releasestack_isdnl2(struct PStack *st)
-{
- FsmDelTimer(&st->l2.t200, 21);
- FsmDelTimer(&st->l2.t203, 16);
- skb_queue_purge(&st->l2.i_queue);
- skb_queue_purge(&st->l2.ui_queue);
- ReleaseWin(&st->l2);
-}
-
-static void
-l2m_debug(struct FsmInst *fi, char *fmt, ...)
-{
- va_list args;
- struct PStack *st = fi->userdata;
-
- va_start(args, fmt);
- VHiSax_putstatus(st->l1.hardware, st->l2.debug_id, fmt, args);
- va_end(args);
-}
-
-void
-setstack_isdnl2(struct PStack *st, char *debug_id)
-{
- spin_lock_init(&st->l2.lock);
- st->l1.l1l2 = isdnl2_l1l2;
- st->l3.l3l2 = isdnl2_l3l2;
-
- skb_queue_head_init(&st->l2.i_queue);
- skb_queue_head_init(&st->l2.ui_queue);
- InitWin(&st->l2);
- st->l2.debug = 0;
-
- st->l2.l2m.fsm = &l2fsm;
- if (test_bit(FLG_LAPB, &st->l2.flag))
- st->l2.l2m.state = ST_L2_4;
- else
- st->l2.l2m.state = ST_L2_1;
- st->l2.l2m.debug = 0;
- st->l2.l2m.userdata = st;
- st->l2.l2m.userint = 0;
- st->l2.l2m.printdebug = l2m_debug;
- strcpy(st->l2.debug_id, debug_id);
-
- FsmInitTimer(&st->l2.l2m, &st->l2.t200);
- FsmInitTimer(&st->l2.l2m, &st->l2.t203);
-}
-
-static void
-transl2_l3l2(struct PStack *st, int pr, void *arg)
-{
- switch (pr) {
- case (DL_DATA | REQUEST):
- case (DL_UNIT_DATA | REQUEST):
- st->l2.l2l1(st, PH_DATA | REQUEST, arg);
- break;
- case (DL_ESTABLISH | REQUEST):
- st->l2.l2l1(st, PH_ACTIVATE | REQUEST, NULL);
- break;
- case (DL_RELEASE | REQUEST):
- st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
- break;
- }
-}
-
-void
-setstack_transl2(struct PStack *st)
-{
- st->l3.l3l2 = transl2_l3l2;
-}
-
-void
-releasestack_transl2(struct PStack *st)
-{
-}
-
-int __init
-Isdnl2New(void)
-{
- l2fsm.state_count = L2_STATE_COUNT;
- l2fsm.event_count = L2_EVENT_COUNT;
- l2fsm.strEvent = strL2Event;
- l2fsm.strState = strL2State;
- return FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
-}
-
-void
-Isdnl2Free(void)
-{
- FsmFree(&l2fsm);
-}
diff --git a/drivers/isdn/hisax/isdnl2.h b/drivers/isdn/hisax/isdnl2.h
deleted file mode 100644
index 7e447fb8ed1d..000000000000
--- a/drivers/isdn/hisax/isdnl2.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* $Id: isdnl2.h,v 1.3.6.2 2001/09/23 22:24:49 kai Exp $
- *
- * Layer 2 defines
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#define RR 0x01
-#define RNR 0x05
-#define REJ 0x09
-#define SABME 0x6f
-#define SABM 0x2f
-#define DM 0x0f
-#define UI 0x03
-#define DISC 0x43
-#define UA 0x63
-#define FRMR 0x87
-#define XID 0xaf
-
-#define CMD 0
-#define RSP 1
-
-#define LC_FLUSH_WAIT 1
diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c
deleted file mode 100644
index bb3f9ec62749..000000000000
--- a/drivers/isdn/hisax/isdnl3.c
+++ /dev/null
@@ -1,594 +0,0 @@
-/* $Id: isdnl3.c,v 2.22.2.3 2004/01/13 14:31:25 keil Exp $
- *
- * Author Karsten Keil
- * based on the teles driver from Jan den Ouden
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- * Thanks to Jan den Ouden
- * Fritz Elfert
- *
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include "hisax.h"
-#include "isdnl3.h"
-
-const char *l3_revision = "$Revision: 2.22.2.3 $";
-
-static struct Fsm l3fsm;
-
-enum {
- ST_L3_LC_REL,
- ST_L3_LC_ESTAB_WAIT,
- ST_L3_LC_REL_DELAY,
- ST_L3_LC_REL_WAIT,
- ST_L3_LC_ESTAB,
-};
-
-#define L3_STATE_COUNT (ST_L3_LC_ESTAB + 1)
-
-static char *strL3State[] =
-{
- "ST_L3_LC_REL",
- "ST_L3_LC_ESTAB_WAIT",
- "ST_L3_LC_REL_DELAY",
- "ST_L3_LC_REL_WAIT",
- "ST_L3_LC_ESTAB",
-};
-
-enum {
- EV_ESTABLISH_REQ,
- EV_ESTABLISH_IND,
- EV_ESTABLISH_CNF,
- EV_RELEASE_REQ,
- EV_RELEASE_CNF,
- EV_RELEASE_IND,
- EV_TIMEOUT,
-};
-
-#define L3_EVENT_COUNT (EV_TIMEOUT + 1)
-
-static char *strL3Event[] =
-{
- "EV_ESTABLISH_REQ",
- "EV_ESTABLISH_IND",
- "EV_ESTABLISH_CNF",
- "EV_RELEASE_REQ",
- "EV_RELEASE_CNF",
- "EV_RELEASE_IND",
- "EV_TIMEOUT",
-};
-
-static __printf(2, 3) void
- l3m_debug(struct FsmInst *fi, char *fmt, ...)
-{
- va_list args;
- struct PStack *st = fi->userdata;
-
- va_start(args, fmt);
- VHiSax_putstatus(st->l1.hardware, st->l3.debug_id, fmt, args);
- va_end(args);
-}
-
-u_char *
-findie(u_char *p, int size, u_char ie, int wanted_set)
-{
- int l, codeset, maincodeset;
- u_char *pend = p + size;
-
- /* skip protocol discriminator, callref and message type */
- p++;
- l = (*p++) & 0xf;
- p += l;
- p++;
- codeset = 0;
- maincodeset = 0;
- /* while there are bytes left... */
- while (p < pend) {
- if ((*p & 0xf0) == 0x90) {
- codeset = *p & 0x07;
- if (!(*p & 0x08))
- maincodeset = codeset;
- }
- if (*p & 0x80)
- p++;
- else {
- if (codeset == wanted_set) {
- if (*p == ie)
- { /* improved length check (Werner Cornelius) */
- if ((pend - p) < 2)
- return (NULL);
- if (*(p + 1) > (pend - (p + 2)))
- return (NULL);
- return (p);
- }
-
- if (*p > ie)
- return (NULL);
- }
- p++;
- l = *p++;
- p += l;
- codeset = maincodeset;
- }
- }
- return (NULL);
-}
-
-int
-getcallref(u_char *p)
-{
- int l, cr = 0;
-
- p++; /* prot discr */
- if (*p & 0xfe) /* wrong callref BRI only 1 octet*/
- return (-2);
- l = 0xf & *p++; /* callref length */
- if (!l) /* dummy CallRef */
- return (-1);
- cr = *p++;
- return (cr);
-}
-
-static int OrigCallRef = 0;
-
-int
-newcallref(void)
-{
- if (OrigCallRef == 127)
- OrigCallRef = 1;
- else
- OrigCallRef++;
- return (OrigCallRef);
-}
-
-void
-newl3state(struct l3_process *pc, int state)
-{
- if (pc->debug & L3_DEB_STATE)
- l3_debug(pc->st, "%s cr %d %d --> %d", __func__,
- pc->callref & 0x7F,
- pc->state, state);
- pc->state = state;
-}
-
-static void
-L3ExpireTimer(struct timer_list *timer)
-{
- struct L3Timer *t = from_timer(t, timer, tl);
- t->pc->st->lli.l4l3(t->pc->st, t->event, t->pc);
-}
-
-void
-L3InitTimer(struct l3_process *pc, struct L3Timer *t)
-{
- t->pc = pc;
- timer_setup(&t->tl, L3ExpireTimer, 0);
-}
-
-void
-L3DelTimer(struct L3Timer *t)
-{
- del_timer(&t->tl);
-}
-
-int
-L3AddTimer(struct L3Timer *t,
- int millisec, int event)
-{
- if (timer_pending(&t->tl)) {
- printk(KERN_WARNING "L3AddTimer: timer already active!\n");
- return -1;
- }
- t->event = event;
- t->tl.expires = jiffies + (millisec * HZ) / 1000;
- add_timer(&t->tl);
- return 0;
-}
-
-void
-StopAllL3Timer(struct l3_process *pc)
-{
- L3DelTimer(&pc->timer);
-}
-
-struct sk_buff *
-l3_alloc_skb(int len)
-{
- struct sk_buff *skb;
-
- if (!(skb = alloc_skb(len + MAX_HEADER_LEN, GFP_ATOMIC))) {
- printk(KERN_WARNING "HiSax: No skb for D-channel\n");
- return (NULL);
- }
- skb_reserve(skb, MAX_HEADER_LEN);
- return (skb);
-}
-
-static void
-no_l3_proto(struct PStack *st, int pr, void *arg)
-{
- struct sk_buff *skb = arg;
-
- HiSax_putstatus(st->l1.hardware, "L3", "no D protocol");
- if (skb) {
- dev_kfree_skb(skb);
- }
-}
-
-static int
-no_l3_proto_spec(struct PStack *st, isdn_ctrl *ic)
-{
- printk(KERN_WARNING "HiSax: no specific protocol handler for proto %lu\n", ic->arg & 0xFF);
- return (-1);
-}
-
-struct l3_process
-*getl3proc(struct PStack *st, int cr)
-{
- struct l3_process *p = st->l3.proc;
-
- while (p)
- if (p->callref == cr)
- return (p);
- else
- p = p->next;
- return (NULL);
-}
-
-struct l3_process
-*new_l3_process(struct PStack *st, int cr)
-{
- struct l3_process *p, *np;
-
- if (!(p = kmalloc(sizeof(struct l3_process), GFP_ATOMIC))) {
- printk(KERN_ERR "HiSax can't get memory for cr %d\n", cr);
- return (NULL);
- }
- if (!st->l3.proc)
- st->l3.proc = p;
- else {
- np = st->l3.proc;
- while (np->next)
- np = np->next;
- np->next = p;
- }
- p->next = NULL;
- p->debug = st->l3.debug;
- p->callref = cr;
- p->state = 0;
- p->chan = NULL;
- p->st = st;
- p->N303 = st->l3.N303;
- L3InitTimer(p, &p->timer);
- return (p);
-};
-
-void
-release_l3_process(struct l3_process *p)
-{
- struct l3_process *np, *pp = NULL;
-
- if (!p)
- return;
- np = p->st->l3.proc;
- while (np) {
- if (np == p) {
- StopAllL3Timer(p);
- if (pp)
- pp->next = np->next;
- else if (!(p->st->l3.proc = np->next) &&
- !test_bit(FLG_PTP, &p->st->l2.flag)) {
- if (p->debug)
- l3_debug(p->st, "release_l3_process: last process");
- if (skb_queue_empty(&p->st->l3.squeue)) {
- if (p->debug)
- l3_debug(p->st, "release_l3_process: release link");
- if (p->st->protocol != ISDN_PTYPE_NI1)
- FsmEvent(&p->st->l3.l3m, EV_RELEASE_REQ, NULL);
- else
- FsmEvent(&p->st->l3.l3m, EV_RELEASE_IND, NULL);
- } else {
- if (p->debug)
- l3_debug(p->st, "release_l3_process: not release link");
- }
- }
- kfree(p);
- return;
- }
- pp = np;
- np = np->next;
- }
- printk(KERN_ERR "HiSax internal L3 error CR(%d) not in list\n", p->callref);
- l3_debug(p->st, "HiSax internal L3 error CR(%d) not in list", p->callref);
-};
-
-static void
-l3ml3p(struct PStack *st, int pr)
-{
- struct l3_process *p = st->l3.proc;
- struct l3_process *np;
-
- while (p) {
- /* p might be kfreed under us, so we need to save where we want to go on */
- np = p->next;
- st->l3.l3ml3(st, pr, p);
- p = np;
- }
-}
-
-void
-setstack_l3dc(struct PStack *st, struct Channel *chanp)
-{
- char tmp[64];
-
- st->l3.proc = NULL;
- st->l3.global = NULL;
- skb_queue_head_init(&st->l3.squeue);
- st->l3.l3m.fsm = &l3fsm;
- st->l3.l3m.state = ST_L3_LC_REL;
- st->l3.l3m.debug = 1;
- st->l3.l3m.userdata = st;
- st->l3.l3m.userint = 0;
- st->l3.l3m.printdebug = l3m_debug;
- FsmInitTimer(&st->l3.l3m, &st->l3.l3m_timer);
- strcpy(st->l3.debug_id, "L3DC ");
- st->lli.l4l3_proto = no_l3_proto_spec;
-
-#ifdef CONFIG_HISAX_EURO
- if (st->protocol == ISDN_PTYPE_EURO) {
- setstack_dss1(st);
- } else
-#endif
-#ifdef CONFIG_HISAX_NI1
- if (st->protocol == ISDN_PTYPE_NI1) {
- setstack_ni1(st);
- } else
-#endif
-#ifdef CONFIG_HISAX_1TR6
- if (st->protocol == ISDN_PTYPE_1TR6) {
- setstack_1tr6(st);
- } else
-#endif
- if (st->protocol == ISDN_PTYPE_LEASED) {
- st->lli.l4l3 = no_l3_proto;
- st->l2.l2l3 = no_l3_proto;
- st->l3.l3ml3 = no_l3_proto;
- printk(KERN_INFO "HiSax: Leased line mode\n");
- } else {
- st->lli.l4l3 = no_l3_proto;
- st->l2.l2l3 = no_l3_proto;
- st->l3.l3ml3 = no_l3_proto;
- sprintf(tmp, "protocol %s not supported",
- (st->protocol == ISDN_PTYPE_1TR6) ? "1tr6" :
- (st->protocol == ISDN_PTYPE_EURO) ? "euro" :
- (st->protocol == ISDN_PTYPE_NI1) ? "ni1" :
- "unknown");
- printk(KERN_WARNING "HiSax: %s\n", tmp);
- st->protocol = -1;
- }
-}
-
-static void
-isdnl3_trans(struct PStack *st, int pr, void *arg) {
- st->l3.l3l2(st, pr, arg);
-}
-
-void
-releasestack_isdnl3(struct PStack *st)
-{
- while (st->l3.proc)
- release_l3_process(st->l3.proc);
- if (st->l3.global) {
- StopAllL3Timer(st->l3.global);
- kfree(st->l3.global);
- st->l3.global = NULL;
- }
- FsmDelTimer(&st->l3.l3m_timer, 54);
- skb_queue_purge(&st->l3.squeue);
-}
-
-void
-setstack_l3bc(struct PStack *st, struct Channel *chanp)
-{
-
- st->l3.proc = NULL;
- st->l3.global = NULL;
- skb_queue_head_init(&st->l3.squeue);
- st->l3.l3m.fsm = &l3fsm;
- st->l3.l3m.state = ST_L3_LC_REL;
- st->l3.l3m.debug = 1;
- st->l3.l3m.userdata = st;
- st->l3.l3m.userint = 0;
- st->l3.l3m.printdebug = l3m_debug;
- strcpy(st->l3.debug_id, "L3BC ");
- st->lli.l4l3 = isdnl3_trans;
-}
-
-#define DREL_TIMER_VALUE 40000
-
-static void
-lc_activate(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L3_LC_ESTAB_WAIT);
- st->l3.l3l2(st, DL_ESTABLISH | REQUEST, NULL);
-}
-
-static void
-lc_connect(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
- int dequeued = 0;
-
- FsmChangeState(fi, ST_L3_LC_ESTAB);
- while ((skb = skb_dequeue(&st->l3.squeue))) {
- st->l3.l3l2(st, DL_DATA | REQUEST, skb);
- dequeued++;
- }
- if ((!st->l3.proc) && dequeued) {
- if (st->l3.debug)
- l3_debug(st, "lc_connect: release link");
- FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
- } else
- l3ml3p(st, DL_ESTABLISH | INDICATION);
-}
-
-static void
-lc_connected(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
- int dequeued = 0;
-
- FsmDelTimer(&st->l3.l3m_timer, 51);
- FsmChangeState(fi, ST_L3_LC_ESTAB);
- while ((skb = skb_dequeue(&st->l3.squeue))) {
- st->l3.l3l2(st, DL_DATA | REQUEST, skb);
- dequeued++;
- }
- if ((!st->l3.proc) && dequeued) {
- if (st->l3.debug)
- l3_debug(st, "lc_connected: release link");
- FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
- } else
- l3ml3p(st, DL_ESTABLISH | CONFIRM);
-}
-
-static void
-lc_start_delay(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L3_LC_REL_DELAY);
- FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 50);
-}
-
-static void
-lc_start_delay_check(struct FsmInst *fi, int event, void *arg)
-/* 20/09/00 - GE timer not user for NI-1 as layer 2 should stay up */
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L3_LC_REL_DELAY);
- /* 19/09/00 - GE timer not user for NI-1 */
- if (st->protocol != ISDN_PTYPE_NI1)
- FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 50);
-}
-
-static void
-lc_release_req(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if (test_bit(FLG_L2BLOCK, &st->l2.flag)) {
- if (st->l3.debug)
- l3_debug(st, "lc_release_req: l2 blocked");
- /* restart release timer */
- FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 51);
- } else {
- FsmChangeState(fi, ST_L3_LC_REL_WAIT);
- st->l3.l3l2(st, DL_RELEASE | REQUEST, NULL);
- }
-}
-
-static void
-lc_release_ind(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmDelTimer(&st->l3.l3m_timer, 52);
- FsmChangeState(fi, ST_L3_LC_REL);
- skb_queue_purge(&st->l3.squeue);
- l3ml3p(st, DL_RELEASE | INDICATION);
-}
-
-static void
-lc_release_cnf(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- FsmChangeState(fi, ST_L3_LC_REL);
- skb_queue_purge(&st->l3.squeue);
- l3ml3p(st, DL_RELEASE | CONFIRM);
-}
-
-
-/* *INDENT-OFF* */
-static struct FsmNode L3FnList[] __initdata =
-{
- {ST_L3_LC_REL, EV_ESTABLISH_REQ, lc_activate},
- {ST_L3_LC_REL, EV_ESTABLISH_IND, lc_connect},
- {ST_L3_LC_REL, EV_ESTABLISH_CNF, lc_connect},
- {ST_L3_LC_ESTAB_WAIT, EV_ESTABLISH_CNF, lc_connected},
- {ST_L3_LC_ESTAB_WAIT, EV_RELEASE_REQ, lc_start_delay},
- {ST_L3_LC_ESTAB_WAIT, EV_RELEASE_IND, lc_release_ind},
- {ST_L3_LC_ESTAB, EV_RELEASE_IND, lc_release_ind},
- {ST_L3_LC_ESTAB, EV_RELEASE_REQ, lc_start_delay_check},
- {ST_L3_LC_REL_DELAY, EV_RELEASE_IND, lc_release_ind},
- {ST_L3_LC_REL_DELAY, EV_ESTABLISH_REQ, lc_connected},
- {ST_L3_LC_REL_DELAY, EV_TIMEOUT, lc_release_req},
- {ST_L3_LC_REL_WAIT, EV_RELEASE_CNF, lc_release_cnf},
- {ST_L3_LC_REL_WAIT, EV_ESTABLISH_REQ, lc_activate},
-};
-/* *INDENT-ON* */
-
-void
-l3_msg(struct PStack *st, int pr, void *arg)
-{
- switch (pr) {
- case (DL_DATA | REQUEST):
- if (st->l3.l3m.state == ST_L3_LC_ESTAB) {
- st->l3.l3l2(st, pr, arg);
- } else {
- struct sk_buff *skb = arg;
-
- skb_queue_tail(&st->l3.squeue, skb);
- FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
- }
- break;
- case (DL_ESTABLISH | REQUEST):
- FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
- break;
- case (DL_ESTABLISH | CONFIRM):
- FsmEvent(&st->l3.l3m, EV_ESTABLISH_CNF, NULL);
- break;
- case (DL_ESTABLISH | INDICATION):
- FsmEvent(&st->l3.l3m, EV_ESTABLISH_IND, NULL);
- break;
- case (DL_RELEASE | INDICATION):
- FsmEvent(&st->l3.l3m, EV_RELEASE_IND, NULL);
- break;
- case (DL_RELEASE | CONFIRM):
- FsmEvent(&st->l3.l3m, EV_RELEASE_CNF, NULL);
- break;
- case (DL_RELEASE | REQUEST):
- FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
- break;
- }
-}
-
-int __init
-Isdnl3New(void)
-{
- l3fsm.state_count = L3_STATE_COUNT;
- l3fsm.event_count = L3_EVENT_COUNT;
- l3fsm.strEvent = strL3Event;
- l3fsm.strState = strL3State;
- return FsmNew(&l3fsm, L3FnList, ARRAY_SIZE(L3FnList));
-}
-
-void
-Isdnl3Free(void)
-{
- FsmFree(&l3fsm);
-}
diff --git a/drivers/isdn/hisax/isdnl3.h b/drivers/isdn/hisax/isdnl3.h
deleted file mode 100644
index 0edc99d40dc2..000000000000
--- a/drivers/isdn/hisax/isdnl3.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* $Id: isdnl3.h,v 2.6.6.2 2001/09/23 22:24:49 kai Exp $
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#define SBIT(state) (1 << state)
-#define ALL_STATES 0x03ffffff
-
-#define PROTO_DIS_EURO 0x08
-
-#define L3_DEB_WARN 0x01
-#define L3_DEB_PROTERR 0x02
-#define L3_DEB_STATE 0x04
-#define L3_DEB_CHARGE 0x08
-#define L3_DEB_CHECK 0x10
-#define L3_DEB_SI 0x20
-
-struct stateentry {
- int state;
- int primitive;
- void (*rout) (struct l3_process *, u8, void *);
-};
-
-#define l3_debug(st, fmt, args...) HiSax_putstatus(st->l1.hardware, "l3 ", fmt, ## args)
-
-struct PStack;
-
-void newl3state(struct l3_process *pc, int state);
-void L3InitTimer(struct l3_process *pc, struct L3Timer *t);
-void L3DelTimer(struct L3Timer *t);
-int L3AddTimer(struct L3Timer *t, int millisec, int event);
-void StopAllL3Timer(struct l3_process *pc);
-struct sk_buff *l3_alloc_skb(int len);
-struct l3_process *new_l3_process(struct PStack *st, int cr);
-void release_l3_process(struct l3_process *p);
-struct l3_process *getl3proc(struct PStack *st, int cr);
-void l3_msg(struct PStack *st, int pr, void *arg);
-void setstack_dss1(struct PStack *st);
-void setstack_ni1(struct PStack *st);
-void setstack_1tr6(struct PStack *st);
diff --git a/drivers/isdn/hisax/isurf.c b/drivers/isdn/hisax/isurf.c
deleted file mode 100644
index 53e299be4304..000000000000
--- a/drivers/isdn/hisax/isurf.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/* $Id: isurf.c,v 1.12.2.4 2004/01/13 21:46:03 keil Exp $
- *
- * low level stuff for Siemens I-Surf/I-Talk cards
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "isar.h"
-#include "isdnl1.h"
-#include <linux/isapnp.h>
-
-static const char *ISurf_revision = "$Revision: 1.12.2.4 $";
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-#define ISURF_ISAR_RESET 1
-#define ISURF_ISAC_RESET 2
-#define ISURF_ISAR_EA 4
-#define ISURF_ARCOFI_RESET 8
-#define ISURF_RESET (ISURF_ISAR_RESET | ISURF_ISAC_RESET | ISURF_ARCOFI_RESET)
-
-#define ISURF_ISAR_OFFSET 0
-#define ISURF_ISAC_OFFSET 0x100
-#define ISURF_IOMEM_SIZE 0x400
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readb(cs->hw.isurf.isac + offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writeb(value, cs->hw.isurf.isac + offset); mb();
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- register int i;
- for (i = 0; i < size; i++)
- data[i] = readb(cs->hw.isurf.isac);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- register int i;
- for (i = 0; i < size; i++) {
- writeb(data[i], cs->hw.isurf.isac); mb();
- }
-}
-
-/* ISAR access routines
- * mode = 0 access with IRQ on
- * mode = 1 access with IRQ off
- * mode = 2 access with IRQ off and using last offset
- */
-
-static u_char
-ReadISAR(struct IsdnCardState *cs, int mode, u_char offset)
-{
- return (readb(cs->hw.isurf.isar + offset));
-}
-
-static void
-WriteISAR(struct IsdnCardState *cs, int mode, u_char offset, u_char value)
-{
- writeb(value, cs->hw.isurf.isar + offset); mb();
-}
-
-static irqreturn_t
-isurf_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- int cnt = 5;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = readb(cs->hw.isurf.isar + ISAR_IRQBIT);
-Start_ISAR:
- if (val & ISAR_IRQSTA)
- isar_int_main(cs);
- val = readb(cs->hw.isurf.isac + ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- val = readb(cs->hw.isurf.isar + ISAR_IRQBIT);
- if ((val & ISAR_IRQSTA) && --cnt) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "ISAR IntStat after IntRoutine");
- goto Start_ISAR;
- }
- val = readb(cs->hw.isurf.isac + ISAC_ISTA);
- if (val && --cnt) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- if (!cnt)
- printk(KERN_WARNING "ISurf IRQ LOOP\n");
-
- writeb(0, cs->hw.isurf.isar + ISAR_IRQBIT); mb();
- writeb(0xFF, cs->hw.isurf.isac + ISAC_MASK); mb();
- writeb(0, cs->hw.isurf.isac + ISAC_MASK); mb();
- writeb(ISAR_IRQMSK, cs->hw.isurf.isar + ISAR_IRQBIT); mb();
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_isurf(struct IsdnCardState *cs)
-{
- release_region(cs->hw.isurf.reset, 1);
- iounmap(cs->hw.isurf.isar);
- release_mem_region(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE);
-}
-
-static void
-reset_isurf(struct IsdnCardState *cs, u_char chips)
-{
- printk(KERN_INFO "ISurf: resetting card\n");
-
- byteout(cs->hw.isurf.reset, chips); /* Reset On */
- mdelay(10);
- byteout(cs->hw.isurf.reset, ISURF_ISAR_EA); /* Reset Off */
- mdelay(10);
-}
-
-static int
-ISurf_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_isurf(cs, ISURF_RESET);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_isurf(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- reset_isurf(cs, ISURF_RESET);
- clear_pending_isac_ints(cs);
- writeb(0, cs->hw.isurf.isar + ISAR_IRQBIT); mb();
- initisac(cs);
- initisar(cs);
- /* Reenable ISAC IRQ */
- cs->writeisac(cs, ISAC_MASK, 0);
- /* RESET Receiver and Transmitter */
- cs->writeisac(cs, ISAC_CMDR, 0x41);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-static int
-isurf_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) {
- int ret;
- u_long flags;
-
- if ((ic->command == ISDN_CMD_IOCTL) && (ic->arg == 9)) {
- ret = isar_auxcmd(cs, ic);
- spin_lock_irqsave(&cs->lock, flags);
- if (!ret) {
- reset_isurf(cs, ISURF_ISAR_EA | ISURF_ISAC_RESET |
- ISURF_ARCOFI_RESET);
- initisac(cs);
- cs->writeisac(cs, ISAC_MASK, 0);
- cs->writeisac(cs, ISAC_CMDR, 0x41);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return (ret);
- }
- return (isar_auxcmd(cs, ic));
-}
-
-#ifdef __ISAPNP__
-static struct pnp_card *pnp_c = NULL;
-#endif
-
-int setup_isurf(struct IsdnCard *card)
-{
- int ver;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, ISurf_revision);
- printk(KERN_INFO "HiSax: ISurf driver Rev. %s\n", HiSax_getrev(tmp));
-
- if (cs->typ != ISDN_CTYPE_ISURF)
- return (0);
- if (card->para[1] && card->para[2]) {
- cs->hw.isurf.reset = card->para[1];
- cs->hw.isurf.phymem = card->para[2];
- cs->irq = card->para[0];
- } else {
-#ifdef __ISAPNP__
- if (isapnp_present()) {
- struct pnp_dev *pnp_d = NULL;
- int err;
-
- cs->subtyp = 0;
- if ((pnp_c = pnp_find_card(
- ISAPNP_VENDOR('S', 'I', 'E'),
- ISAPNP_FUNCTION(0x0010), pnp_c))) {
- if (!(pnp_d = pnp_find_dev(pnp_c,
- ISAPNP_VENDOR('S', 'I', 'E'),
- ISAPNP_FUNCTION(0x0010), pnp_d))) {
- printk(KERN_ERR "ISurfPnP: PnP error card found, no device\n");
- return (0);
- }
- pnp_disable_dev(pnp_d);
- err = pnp_activate_dev(pnp_d);
- if (err < 0) {
- pr_warn("%s: pnp_activate_dev ret=%d\n",
- __func__, err);
- return 0;
- }
- cs->hw.isurf.reset = pnp_port_start(pnp_d, 0);
- cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1);
- cs->irq = pnp_irq(pnp_d, 0);
- if (cs->irq == -1 || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) {
- printk(KERN_ERR "ISurfPnP:some resources are missing %d/%x/%lx\n",
- cs->irq, cs->hw.isurf.reset, cs->hw.isurf.phymem);
- pnp_disable_dev(pnp_d);
- return (0);
- }
- } else {
- printk(KERN_INFO "ISurfPnP: no ISAPnP card found\n");
- return (0);
- }
- } else {
- printk(KERN_INFO "ISurfPnP: no ISAPnP bus found\n");
- return (0);
- }
-#else
- printk(KERN_WARNING "HiSax: Siemens I-Surf port/mem not set\n");
- return (0);
-#endif
- }
- if (!request_region(cs->hw.isurf.reset, 1, "isurf isdn")) {
- printk(KERN_WARNING
- "HiSax: Siemens I-Surf config port %x already in use\n",
- cs->hw.isurf.reset);
- return (0);
- }
- if (!request_region(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE, "isurf iomem")) {
- printk(KERN_WARNING "HiSax: Siemens I-Surf memory region "
- "%lx-%lx already in use\n",
- cs->hw.isurf.phymem,
- cs->hw.isurf.phymem + ISURF_IOMEM_SIZE);
- release_region(cs->hw.isurf.reset, 1);
- return (0);
- }
- cs->hw.isurf.isar = ioremap(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE);
- cs->hw.isurf.isac = cs->hw.isurf.isar + ISURF_ISAC_OFFSET;
- printk(KERN_INFO
- "ISurf: defined at 0x%x 0x%lx IRQ %d\n",
- cs->hw.isurf.reset,
- cs->hw.isurf.phymem,
- cs->irq);
-
- setup_isac(cs);
- cs->cardmsg = &ISurf_card_msg;
- cs->irq_func = &isurf_interrupt;
- cs->auxcmd = &isurf_auxcmd;
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->bcs[0].hw.isar.reg = &cs->hw.isurf.isar_r;
- cs->bcs[1].hw.isar.reg = &cs->hw.isurf.isar_r;
- test_and_set_bit(HW_ISAR, &cs->HW_Flags);
- ISACVersion(cs, "ISurf:");
- cs->BC_Read_Reg = &ReadISAR;
- cs->BC_Write_Reg = &WriteISAR;
- cs->BC_Send_Data = &isar_fill_fifo;
- ver = ISARVersion(cs, "ISurf:");
- if (ver < 0) {
- printk(KERN_WARNING
- "ISurf: wrong ISAR version (ret = %d)\n", ver);
- release_io_isurf(cs);
- return (0);
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/ix1_micro.c b/drivers/isdn/hisax/ix1_micro.c
deleted file mode 100644
index bfb79f3f0a49..000000000000
--- a/drivers/isdn/hisax/ix1_micro.c
+++ /dev/null
@@ -1,316 +0,0 @@
-/* $Id: ix1_micro.c,v 2.12.2.4 2004/01/13 23:48:39 keil Exp $
- *
- * low level stuff for ITK ix1-micro Rev.2 isdn cards
- * derived from the original file teles3.c from Karsten Keil
- *
- * Author Klaus-Peter Nischke
- * Copyright by Klaus-Peter Nischke, ITK AG
- * <klaus@nischke.do.eunet.de>
- * by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Klaus-Peter Nischke
- * Deusener Str. 287
- * 44369 Dortmund
- * Germany
- */
-
-#include <linux/init.h>
-#include <linux/isapnp.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-
-static const char *ix1_revision = "$Revision: 2.12.2.4 $";
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-#define SPECIAL_PORT_OFFSET 3
-
-#define ISAC_COMMAND_OFFSET 2
-#define ISAC_DATA_OFFSET 0
-#define HSCX_COMMAND_OFFSET 2
-#define HSCX_DATA_OFFSET 1
-
-#define TIMEOUT 50
-
-static inline u_char
-readreg(unsigned int ale, unsigned int adr, u_char off)
-{
- register u_char ret;
-
- byteout(ale, off);
- ret = bytein(adr);
- return (ret);
-}
-
-static inline void
-readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- insb(adr, data, size);
-}
-
-
-static inline void
-writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
-{
- byteout(ale, off);
- byteout(adr, data);
-}
-
-static inline void
-writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- outsb(adr, data, size);
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, 0, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, 0, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readreg(cs->hw.ix1.hscx_ale,
- cs->hw.ix1.hscx, offset + (hscx ? 0x40 : 0)));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writereg(cs->hw.ix1.hscx_ale,
- cs->hw.ix1.hscx, offset + (hscx ? 0x40 : 0), value);
-}
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.ix1.hscx_ale, \
- cs->hw.ix1.hscx, reg + (nr ? 0x40 : 0))
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.ix1.hscx_ale, \
- cs->hw.ix1.hscx, reg + (nr ? 0x40 : 0), data)
-
-#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.ix1.hscx_ale, \
- cs->hw.ix1.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.ix1.hscx_ale, \
- cs->hw.ix1.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-ix1micro_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = readreg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_ISTA + 0x40);
-Start_HSCX:
- if (val)
- hscx_int_main(cs, val);
- val = readreg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- val = readreg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_ISTA + 0x40);
- if (val) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX IntStat after IntRoutine");
- goto Start_HSCX;
- }
- val = readreg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_ISTA);
- if (val) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK, 0xFF);
- writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK + 0x40, 0xFF);
- writereg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_MASK, 0xFF);
- writereg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_MASK, 0);
- writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK, 0);
- writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK + 0x40, 0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_ix1micro(struct IsdnCardState *cs)
-{
- if (cs->hw.ix1.cfg_reg)
- release_region(cs->hw.ix1.cfg_reg, 4);
-}
-
-static void
-ix1_reset(struct IsdnCardState *cs)
-{
- int cnt;
-
- /* reset isac */
- cnt = 3 * (HZ / 10) + 1;
- while (cnt--) {
- byteout(cs->hw.ix1.cfg_reg + SPECIAL_PORT_OFFSET, 1);
- HZDELAY(1); /* wait >=10 ms */
- }
- byteout(cs->hw.ix1.cfg_reg + SPECIAL_PORT_OFFSET, 0);
-}
-
-static int
-ix1_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- ix1_reset(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_ix1micro(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- ix1_reset(cs);
- inithscxisac(cs, 3);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-#ifdef __ISAPNP__
-static struct isapnp_device_id itk_ids[] = {
- { ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x25),
- ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x25),
- (unsigned long) "ITK micro 2" },
- { ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x29),
- ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x29),
- (unsigned long) "ITK micro 2." },
- { 0, }
-};
-
-static struct isapnp_device_id *ipid = &itk_ids[0];
-static struct pnp_card *pnp_c = NULL;
-#endif
-
-
-int setup_ix1micro(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, ix1_revision);
- printk(KERN_INFO "HiSax: ITK IX1 driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_IX1MICROR2)
- return (0);
-
-#ifdef __ISAPNP__
- if (!card->para[1] && isapnp_present()) {
- struct pnp_dev *pnp_d;
- while (ipid->card_vendor) {
- if ((pnp_c = pnp_find_card(ipid->card_vendor,
- ipid->card_device, pnp_c))) {
- pnp_d = NULL;
- if ((pnp_d = pnp_find_dev(pnp_c,
- ipid->vendor, ipid->function, pnp_d))) {
- int err;
-
- printk(KERN_INFO "HiSax: %s detected\n",
- (char *)ipid->driver_data);
- pnp_disable_dev(pnp_d);
- err = pnp_activate_dev(pnp_d);
- if (err < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __func__, err);
- return (0);
- }
- card->para[1] = pnp_port_start(pnp_d, 0);
- card->para[0] = pnp_irq(pnp_d, 0);
- if (card->para[0] == -1 || !card->para[1]) {
- printk(KERN_ERR "ITK PnP:some resources are missing %ld/%lx\n",
- card->para[0], card->para[1]);
- pnp_disable_dev(pnp_d);
- return (0);
- }
- break;
- } else {
- printk(KERN_ERR "ITK PnP: PnP error card found, no device\n");
- }
- }
- ipid++;
- pnp_c = NULL;
- }
- if (!ipid->card_vendor) {
- printk(KERN_INFO "ITK PnP: no ISAPnP card found\n");
- return (0);
- }
- }
-#endif
- /* IO-Ports */
- cs->hw.ix1.isac_ale = card->para[1] + ISAC_COMMAND_OFFSET;
- cs->hw.ix1.hscx_ale = card->para[1] + HSCX_COMMAND_OFFSET;
- cs->hw.ix1.isac = card->para[1] + ISAC_DATA_OFFSET;
- cs->hw.ix1.hscx = card->para[1] + HSCX_DATA_OFFSET;
- cs->hw.ix1.cfg_reg = card->para[1];
- cs->irq = card->para[0];
- if (cs->hw.ix1.cfg_reg) {
- if (!request_region(cs->hw.ix1.cfg_reg, 4, "ix1micro cfg")) {
- printk(KERN_WARNING
- "HiSax: ITK ix1-micro Rev.2 config port "
- "%x-%x already in use\n",
- cs->hw.ix1.cfg_reg,
- cs->hw.ix1.cfg_reg + 4);
- return (0);
- }
- }
- printk(KERN_INFO "HiSax: ITK ix1-micro Rev.2 config irq:%d io:0x%X\n",
- cs->irq, cs->hw.ix1.cfg_reg);
- setup_isac(cs);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &ix1_card_msg;
- cs->irq_func = &ix1micro_interrupt;
- ISACVersion(cs, "ix1-Micro:");
- if (HscxVersion(cs, "ix1-Micro:")) {
- printk(KERN_WARNING
- "ix1-Micro: wrong HSCX versions check IO address\n");
- release_io_ix1micro(cs);
- return (0);
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/jade.c b/drivers/isdn/hisax/jade.c
deleted file mode 100644
index e2ae7871a209..000000000000
--- a/drivers/isdn/hisax/jade.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/* $Id: jade.c,v 1.9.2.4 2004/01/14 16:04:48 keil Exp $
- *
- * JADE stuff (derived from original hscx.c)
- *
- * Author Roland Klabunde
- * Copyright by Roland Klabunde <R.Klabunde@Berkom.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "hscx.h"
-#include "jade.h"
-#include "isdnl1.h"
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-
-int
-JadeVersion(struct IsdnCardState *cs, char *s)
-{
- int ver;
- int to = 50;
- cs->BC_Write_Reg(cs, -1, 0x50, 0x19);
- while (to) {
- udelay(1);
- ver = cs->BC_Read_Reg(cs, -1, 0x60);
- to--;
- if (ver)
- break;
- if (!to) {
- printk(KERN_INFO "%s JADE version not obtainable\n", s);
- return (0);
- }
- }
- /* Wait for the JADE */
- udelay(10);
- /* Read version */
- ver = cs->BC_Read_Reg(cs, -1, 0x60);
- printk(KERN_INFO "%s JADE version: %d\n", s, ver);
- return (1);
-}
-
-/* Write to indirect accessible jade register set */
-static void
-jade_write_indirect(struct IsdnCardState *cs, u_char reg, u_char value)
-{
- int to = 50;
- u_char ret;
-
- /* Write the data */
- cs->BC_Write_Reg(cs, -1, COMM_JADE + 1, value);
- /* Say JADE we wanna write indirect reg 'reg' */
- cs->BC_Write_Reg(cs, -1, COMM_JADE, reg);
- to = 50;
- /* Wait for RDY goes high */
- while (to) {
- udelay(1);
- ret = cs->BC_Read_Reg(cs, -1, COMM_JADE);
- to--;
- if (ret & 1)
- /* Got acknowledge */
- break;
- if (!to) {
- printk(KERN_INFO "Can not see ready bit from JADE DSP (reg=0x%X, value=0x%X)\n", reg, value);
- return;
- }
- }
-}
-
-
-
-static void
-modejade(struct BCState *bcs, int mode, int bc)
-{
- struct IsdnCardState *cs = bcs->cs;
- int jade = bcs->hw.hscx.hscx;
-
- if (cs->debug & L1_DEB_HSCX) {
- debugl1(cs, "jade %c mode %d ichan %d", 'A' + jade, mode, bc);
- }
- bcs->mode = mode;
- bcs->channel = bc;
-
- cs->BC_Write_Reg(cs, jade, jade_HDLC_MODE, (mode == L1_MODE_TRANS ? jadeMODE_TMO : 0x00));
- cs->BC_Write_Reg(cs, jade, jade_HDLC_CCR0, (jadeCCR0_PU | jadeCCR0_ITF));
- cs->BC_Write_Reg(cs, jade, jade_HDLC_CCR1, 0x00);
-
- jade_write_indirect(cs, jade_HDLC1SERRXPATH, 0x08);
- jade_write_indirect(cs, jade_HDLC2SERRXPATH, 0x08);
- jade_write_indirect(cs, jade_HDLC1SERTXPATH, 0x00);
- jade_write_indirect(cs, jade_HDLC2SERTXPATH, 0x00);
-
- cs->BC_Write_Reg(cs, jade, jade_HDLC_XCCR, 0x07);
- cs->BC_Write_Reg(cs, jade, jade_HDLC_RCCR, 0x07);
-
- if (bc == 0) {
- cs->BC_Write_Reg(cs, jade, jade_HDLC_TSAX, 0x00);
- cs->BC_Write_Reg(cs, jade, jade_HDLC_TSAR, 0x00);
- } else {
- cs->BC_Write_Reg(cs, jade, jade_HDLC_TSAX, 0x04);
- cs->BC_Write_Reg(cs, jade, jade_HDLC_TSAR, 0x04);
- }
- switch (mode) {
- case (L1_MODE_NULL):
- cs->BC_Write_Reg(cs, jade, jade_HDLC_MODE, jadeMODE_TMO);
- break;
- case (L1_MODE_TRANS):
- cs->BC_Write_Reg(cs, jade, jade_HDLC_MODE, (jadeMODE_TMO | jadeMODE_RAC | jadeMODE_XAC));
- break;
- case (L1_MODE_HDLC):
- cs->BC_Write_Reg(cs, jade, jade_HDLC_MODE, (jadeMODE_RAC | jadeMODE_XAC));
- break;
- }
- if (mode) {
- cs->BC_Write_Reg(cs, jade, jade_HDLC_RCMD, (jadeRCMD_RRES | jadeRCMD_RMC));
- cs->BC_Write_Reg(cs, jade, jade_HDLC_XCMD, jadeXCMD_XRES);
- /* Unmask ints */
- cs->BC_Write_Reg(cs, jade, jade_HDLC_IMR, 0xF8);
- }
- else
- /* Mask ints */
- cs->BC_Write_Reg(cs, jade, jade_HDLC_IMR, 0x00);
-}
-
-static void
-jade_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- struct sk_buff *skb = arg;
- u_long flags;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->hw.hscx.count = 0;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- printk(KERN_WARNING "jade_l2l1: this shouldn't happen\n");
- } else {
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->tx_skb = skb;
- bcs->hw.hscx.count = 0;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
- if (!bcs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (PH_ACTIVATE | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
- modejade(bcs, st->l1.mode, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | REQUEST):
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | CONFIRM):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- modejade(bcs, 0, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
- break;
- }
-}
-
-static void
-close_jadestate(struct BCState *bcs)
-{
- modejade(bcs, 0, bcs->channel);
- if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
- kfree(bcs->hw.hscx.rcvbuf);
- bcs->hw.hscx.rcvbuf = NULL;
- kfree(bcs->blog);
- bcs->blog = NULL;
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- }
-}
-
-static int
-open_jadestate(struct IsdnCardState *cs, struct BCState *bcs)
-{
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- if (!(bcs->hw.hscx.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for hscx.rcvbuf\n");
- test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
- return (1);
- }
- if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for bcs->blog\n");
- test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
- kfree(bcs->hw.hscx.rcvbuf);
- bcs->hw.hscx.rcvbuf = NULL;
- return (2);
- }
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->event = 0;
- bcs->hw.hscx.rcvidx = 0;
- bcs->tx_cnt = 0;
- return (0);
-}
-
-
-static int
-setstack_jade(struct PStack *st, struct BCState *bcs)
-{
- bcs->channel = st->l1.bc;
- if (open_jadestate(st->l1.hardware, bcs))
- return (-1);
- st->l1.bcs = bcs;
- st->l2.l2l1 = jade_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-void
-clear_pending_jade_ints(struct IsdnCardState *cs)
-{
- int val;
-
- cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00);
- cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00);
-
- val = cs->BC_Read_Reg(cs, 1, jade_HDLC_ISR);
- debugl1(cs, "jade B ISTA %x", val);
- val = cs->BC_Read_Reg(cs, 0, jade_HDLC_ISR);
- debugl1(cs, "jade A ISTA %x", val);
- val = cs->BC_Read_Reg(cs, 1, jade_HDLC_STAR);
- debugl1(cs, "jade B STAR %x", val);
- val = cs->BC_Read_Reg(cs, 0, jade_HDLC_STAR);
- debugl1(cs, "jade A STAR %x", val);
- /* Unmask ints */
- cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0xF8);
- cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0xF8);
-}
-
-void
-initjade(struct IsdnCardState *cs)
-{
- cs->bcs[0].BC_SetStack = setstack_jade;
- cs->bcs[1].BC_SetStack = setstack_jade;
- cs->bcs[0].BC_Close = close_jadestate;
- cs->bcs[1].BC_Close = close_jadestate;
- cs->bcs[0].hw.hscx.hscx = 0;
- cs->bcs[1].hw.hscx.hscx = 1;
-
- /* Stop DSP audio tx/rx */
- jade_write_indirect(cs, 0x11, 0x0f);
- jade_write_indirect(cs, 0x17, 0x2f);
-
- /* Transparent Mode, RxTx inactive, No Test, No RFS/TFS */
- cs->BC_Write_Reg(cs, 0, jade_HDLC_MODE, jadeMODE_TMO);
- cs->BC_Write_Reg(cs, 1, jade_HDLC_MODE, jadeMODE_TMO);
- /* Power down, 1-Idle, RxTx least significant bit first */
- cs->BC_Write_Reg(cs, 0, jade_HDLC_CCR0, 0x00);
- cs->BC_Write_Reg(cs, 1, jade_HDLC_CCR0, 0x00);
- /* Mask all interrupts */
- cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00);
- cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00);
- /* Setup host access to hdlc controller */
- jade_write_indirect(cs, jade_HDLCCNTRACCESS, (jadeINDIRECT_HAH1 | jadeINDIRECT_HAH2));
- /* Unmask HDLC int (don't forget DSP int later on)*/
- cs->BC_Write_Reg(cs, -1, jade_INT, (jadeINT_HDLC1 | jadeINT_HDLC2));
-
- /* once again TRANSPARENT */
- modejade(cs->bcs, 0, 0);
- modejade(cs->bcs + 1, 0, 0);
-}
diff --git a/drivers/isdn/hisax/jade.h b/drivers/isdn/hisax/jade.h
deleted file mode 100644
index 4b98096a5858..000000000000
--- a/drivers/isdn/hisax/jade.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* $Id: jade.h,v 1.5.2.3 2004/01/14 16:04:48 keil Exp $
- *
- * JADE specific defines
- *
- * Author Roland Klabunde
- * Copyright by Roland Klabunde <R.Klabunde@Berkom.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/* All Registers original Siemens Spec */
-#ifndef __JADE_H__
-#define __JADE_H__
-
-/* Special registers for access to indirect accessible JADE regs */
-#define DIRECT_IO_JADE 0x0000 /* Jade direct io access area */
-#define COMM_JADE 0x0040 /* Jade communication area */
-
-/********************************************************************/
-/* JADE-HDLC registers */
-/********************************************************************/
-#define jade_HDLC_RFIFO 0x00 /* R */
-#define jade_HDLC_XFIFO 0x00 /* W */
-
-#define jade_HDLC_STAR 0x20 /* R */
-#define jadeSTAR_XDOV 0x80
-#define jadeSTAR_XFW 0x40 /* Does not work*/
-#define jadeSTAR_XCEC 0x20
-#define jadeSTAR_RCEC 0x10
-#define jadeSTAR_BSY 0x08
-#define jadeSTAR_RNA 0x04
-#define jadeSTAR_STR 0x02
-#define jadeSTAR_STX 0x01
-
-#define jade_HDLC_XCMD 0x20 /* W */
-#define jadeXCMD_XF 0x80
-#define jadeXCMD_XME 0x40
-#define jadeXCMD_XRES 0x20
-#define jadeXCMD_STX 0x01
-
-#define jade_HDLC_RSTA 0x21 /* R */
-#define jadeRSTA_VFR 0x80
-#define jadeRSTA_RDO 0x40
-#define jadeRSTA_CRC 0x20
-#define jadeRSTA_RAB 0x10
-#define jadeRSTA_MASK 0xF0
-
-#define jade_HDLC_MODE 0x22 /* RW*/
-#define jadeMODE_TMO 0x80
-#define jadeMODE_RAC 0x40
-#define jadeMODE_XAC 0x20
-#define jadeMODE_TLP 0x10
-#define jadeMODE_ERFS 0x02
-#define jadeMODE_ETFS 0x01
-
-#define jade_HDLC_RBCH 0x24 /* R */
-
-#define jade_HDLC_RBCL 0x25 /* R */
-#define jade_HDLC_RCMD 0x25 /* W */
-#define jadeRCMD_RMC 0x80
-#define jadeRCMD_RRES 0x40
-#define jadeRCMD_RMD 0x20
-#define jadeRCMD_STR 0x02
-
-#define jade_HDLC_CCR0 0x26 /* RW*/
-#define jadeCCR0_PU 0x80
-#define jadeCCR0_ITF 0x40
-#define jadeCCR0_C32 0x20
-#define jadeCCR0_CRL 0x10
-#define jadeCCR0_RCRC 0x08
-#define jadeCCR0_XCRC 0x04
-#define jadeCCR0_RMSB 0x02
-#define jadeCCR0_XMSB 0x01
-
-#define jade_HDLC_CCR1 0x27 /* RW*/
-#define jadeCCR1_RCS0 0x80
-#define jadeCCR1_RCONT 0x40
-#define jadeCCR1_RFDIS 0x20
-#define jadeCCR1_XCS0 0x10
-#define jadeCCR1_XCONT 0x08
-#define jadeCCR1_XFDIS 0x04
-
-#define jade_HDLC_TSAR 0x28 /* RW*/
-#define jade_HDLC_TSAX 0x29 /* RW*/
-#define jade_HDLC_RCCR 0x2A /* RW*/
-#define jade_HDLC_XCCR 0x2B /* RW*/
-
-#define jade_HDLC_ISR 0x2C /* R */
-#define jade_HDLC_IMR 0x2C /* W */
-#define jadeISR_RME 0x80
-#define jadeISR_RPF 0x40
-#define jadeISR_RFO 0x20
-#define jadeISR_XPR 0x10
-#define jadeISR_XDU 0x08
-#define jadeISR_ALLS 0x04
-
-#define jade_INT 0x75
-#define jadeINT_HDLC1 0x02
-#define jadeINT_HDLC2 0x01
-#define jadeINT_DSP 0x04
-#define jade_INTR 0x70
-
-/********************************************************************/
-/* Indirect accessible JADE registers of common interest */
-/********************************************************************/
-#define jade_CHIPVERSIONNR 0x00 /* Does not work*/
-
-#define jade_HDLCCNTRACCESS 0x10
-#define jadeINDIRECT_HAH1 0x02
-#define jadeINDIRECT_HAH2 0x01
-
-#define jade_HDLC1SERRXPATH 0x1D
-#define jade_HDLC1SERTXPATH 0x1E
-#define jade_HDLC2SERRXPATH 0x1F
-#define jade_HDLC2SERTXPATH 0x20
-#define jadeINDIRECT_SLIN1 0x10
-#define jadeINDIRECT_SLIN0 0x08
-#define jadeINDIRECT_LMOD1 0x04
-#define jadeINDIRECT_LMOD0 0x02
-#define jadeINDIRECT_HHR 0x01
-#define jadeINDIRECT_HHX 0x01
-
-#define jade_RXAUDIOCH1CFG 0x11
-#define jade_RXAUDIOCH2CFG 0x14
-#define jade_TXAUDIOCH1CFG 0x17
-#define jade_TXAUDIOCH2CFG 0x1A
-
-extern int JadeVersion(struct IsdnCardState *cs, char *s);
-extern void clear_pending_jade_ints(struct IsdnCardState *cs);
-extern void initjade(struct IsdnCardState *cs);
-
-#endif /* __JADE_H__ */
diff --git a/drivers/isdn/hisax/jade_irq.c b/drivers/isdn/hisax/jade_irq.c
deleted file mode 100644
index a89e2df911c5..000000000000
--- a/drivers/isdn/hisax/jade_irq.c
+++ /dev/null
@@ -1,238 +0,0 @@
-/* $Id: jade_irq.c,v 1.7.2.4 2004/02/11 13:21:34 keil Exp $
- *
- * Low level JADE IRQ stuff (derived from original hscx_irq.c)
- *
- * Author Roland Klabunde
- * Copyright by Roland Klabunde <R.Klabunde@Berkom.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-static inline void
-waitforCEC(struct IsdnCardState *cs, int jade, int reg)
-{
- int to = 50;
- int mask = (reg == jade_HDLC_XCMD ? jadeSTAR_XCEC : jadeSTAR_RCEC);
- while ((READJADE(cs, jade, jade_HDLC_STAR) & mask) && to) {
- udelay(1);
- to--;
- }
- if (!to)
- printk(KERN_WARNING "HiSax: waitforCEC (jade) timeout\n");
-}
-
-
-static inline void
-waitforXFW(struct IsdnCardState *cs, int jade)
-{
- /* Does not work on older jade versions, don't care */
-}
-
-static inline void
-WriteJADECMDR(struct IsdnCardState *cs, int jade, int reg, u_char data)
-{
- waitforCEC(cs, jade, reg);
- WRITEJADE(cs, jade, reg, data);
-}
-
-
-
-static void
-jade_empty_fifo(struct BCState *bcs, int count)
-{
- u_char *ptr;
- struct IsdnCardState *cs = bcs->cs;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "jade_empty_fifo");
-
- if (bcs->hw.hscx.rcvidx + count > HSCX_BUFMAX) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "jade_empty_fifo: incoming packet too large");
- WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_RCMD, jadeRCMD_RMC);
- bcs->hw.hscx.rcvidx = 0;
- return;
- }
- ptr = bcs->hw.hscx.rcvbuf + bcs->hw.hscx.rcvidx;
- bcs->hw.hscx.rcvidx += count;
- READJADEFIFO(cs, bcs->hw.hscx.hscx, ptr, count);
- WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_RCMD, jadeRCMD_RMC);
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- t += sprintf(t, "jade_empty_fifo %c cnt %d",
- bcs->hw.hscx.hscx ? 'B' : 'A', count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-static void
-jade_fill_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int more, count;
- int fifo_size = 32;
- u_char *ptr;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "jade_fill_fifo");
-
- if (!bcs->tx_skb)
- return;
- if (bcs->tx_skb->len <= 0)
- return;
-
- more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0;
- if (bcs->tx_skb->len > fifo_size) {
- more = !0;
- count = fifo_size;
- } else
- count = bcs->tx_skb->len;
-
- waitforXFW(cs, bcs->hw.hscx.hscx);
- ptr = bcs->tx_skb->data;
- skb_pull(bcs->tx_skb, count);
- bcs->tx_cnt -= count;
- bcs->hw.hscx.count += count;
- WRITEJADEFIFO(cs, bcs->hw.hscx.hscx, ptr, count);
- WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_XCMD, more ? jadeXCMD_XF : (jadeXCMD_XF | jadeXCMD_XME));
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- t += sprintf(t, "jade_fill_fifo %c cnt %d",
- bcs->hw.hscx.hscx ? 'B' : 'A', count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-
-static void
-jade_interrupt(struct IsdnCardState *cs, u_char val, u_char jade)
-{
- u_char r;
- struct BCState *bcs = cs->bcs + jade;
- struct sk_buff *skb;
- int fifo_size = 32;
- int count;
- int i_jade = (int) jade; /* To satisfy the compiler */
-
- if (!test_bit(BC_FLG_INIT, &bcs->Flag))
- return;
-
- if (val & 0x80) { /* RME */
- r = READJADE(cs, i_jade, jade_HDLC_RSTA);
- if ((r & 0xf0) != 0xa0) {
- if (!(r & 0x80))
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "JADE %s invalid frame", (jade ? "B" : "A"));
- if ((r & 0x40) && bcs->mode)
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "JADE %c RDO mode=%d", 'A' + jade, bcs->mode);
- if (!(r & 0x20))
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "JADE %c CRC error", 'A' + jade);
- WriteJADECMDR(cs, jade, jade_HDLC_RCMD, jadeRCMD_RMC);
- } else {
- count = READJADE(cs, i_jade, jade_HDLC_RBCL) & 0x1F;
- if (count == 0)
- count = fifo_size;
- jade_empty_fifo(bcs, count);
- if ((count = bcs->hw.hscx.rcvidx - 1) > 0) {
- if (cs->debug & L1_DEB_HSCX_FIFO)
- debugl1(cs, "HX Frame %d", count);
- if (!(skb = dev_alloc_skb(count)))
- printk(KERN_WARNING "JADE %s receive out of memory\n", (jade ? "B" : "A"));
- else {
- skb_put_data(skb, bcs->hw.hscx.rcvbuf,
- count);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- }
- }
- bcs->hw.hscx.rcvidx = 0;
- schedule_event(bcs, B_RCVBUFREADY);
- }
- if (val & 0x40) { /* RPF */
- jade_empty_fifo(bcs, fifo_size);
- if (bcs->mode == L1_MODE_TRANS) {
- /* receive audio data */
- if (!(skb = dev_alloc_skb(fifo_size)))
- printk(KERN_WARNING "HiSax: receive out of memory\n");
- else {
- skb_put_data(skb, bcs->hw.hscx.rcvbuf,
- fifo_size);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- bcs->hw.hscx.rcvidx = 0;
- schedule_event(bcs, B_RCVBUFREADY);
- }
- }
- if (val & 0x10) { /* XPR */
- if (bcs->tx_skb) {
- if (bcs->tx_skb->len) {
- jade_fill_fifo(bcs);
- return;
- } else {
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->hw.hscx.count;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- dev_kfree_skb_irq(bcs->tx_skb);
- bcs->hw.hscx.count = 0;
- bcs->tx_skb = NULL;
- }
- }
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- bcs->hw.hscx.count = 0;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- jade_fill_fifo(bcs);
- } else {
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- schedule_event(bcs, B_XMTBUFREADY);
- }
- }
-}
-
-static inline void
-jade_int_main(struct IsdnCardState *cs, u_char val, int jade)
-{
- struct BCState *bcs;
- bcs = cs->bcs + jade;
-
- if (val & jadeISR_RFO) {
- /* handled with RDO */
- val &= ~jadeISR_RFO;
- }
- if (val & jadeISR_XDU) {
- /* relevant in HDLC mode only */
- /* don't reset XPR here */
- if (bcs->mode == 1)
- jade_fill_fifo(bcs);
- else {
- /* Here we lost an TX interrupt, so
- * restart transmitting the whole frame.
- */
- if (bcs->tx_skb) {
- skb_push(bcs->tx_skb, bcs->hw.hscx.count);
- bcs->tx_cnt += bcs->hw.hscx.count;
- bcs->hw.hscx.count = 0;
- }
- WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_XCMD, jadeXCMD_XRES);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "JADE %c EXIR %x Lost TX", 'A' + jade, val);
- }
- }
- if (val & (jadeISR_RME | jadeISR_RPF | jadeISR_XPR)) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "JADE %c interrupt %x", 'A' + jade, val);
- jade_interrupt(cs, val, jade);
- }
-}
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
deleted file mode 100644
index 98f60d1523f4..000000000000
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ /dev/null
@@ -1,932 +0,0 @@
-/* $Id: l3_1tr6.c,v 2.15.2.3 2004/01/13 14:31:25 keil Exp $
- *
- * German 1TR6 D-channel protocol
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- */
-
-#include "hisax.h"
-#include "l3_1tr6.h"
-#include "isdnl3.h"
-#include <linux/ctype.h>
-
-extern char *HiSax_getrev(const char *revision);
-static const char *l3_1tr6_revision = "$Revision: 2.15.2.3 $";
-
-#define MsgHead(ptr, cref, mty, dis) \
- *ptr++ = dis; \
- *ptr++ = 0x1; \
- *ptr++ = cref ^ 0x80; \
- *ptr++ = mty
-
-static void
-l3_1TR6_message(struct l3_process *pc, u_char mt, u_char pd)
-{
- struct sk_buff *skb;
- u_char *p;
-
- if (!(skb = l3_alloc_skb(4)))
- return;
- p = skb_put(skb, 4);
- MsgHead(p, pc->callref, mt, pd);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3_1tr6_release_req(struct l3_process *pc, u_char pr, void *arg)
-{
- StopAllL3Timer(pc);
- newl3state(pc, 19);
- l3_1TR6_message(pc, MT_N1_REL, PROTO_DIS_N1);
- L3AddTimer(&pc->timer, T308, CC_T308_1);
-}
-
-static void
-l3_1tr6_invalid(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
-
- dev_kfree_skb(skb);
- l3_1tr6_release_req(pc, 0, NULL);
-}
-
-static void
-l3_1tr6_error(struct l3_process *pc, u_char *msg, struct sk_buff *skb)
-{
- dev_kfree_skb(skb);
- if (pc->st->l3.debug & L3_DEB_WARN)
- l3_debug(pc->st, "%s", msg);
- l3_1tr6_release_req(pc, 0, NULL);
-}
-
-static void
-l3_1tr6_setup_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[128];
- u_char *p = tmp;
- u_char *teln;
- u_char *eaz;
- u_char channel = 0;
- int l;
-
- MsgHead(p, pc->callref, MT_N1_SETUP, PROTO_DIS_N1);
- teln = pc->para.setup.phone;
- pc->para.spv = 0;
- if (!isdigit(*teln)) {
- switch (0x5f & *teln) {
- case 'S':
- pc->para.spv = 1;
- break;
- case 'C':
- channel = 0x08;
- /* fall through */
- case 'P':
- channel |= 0x80;
- teln++;
- if (*teln == '1')
- channel |= 0x01;
- else
- channel |= 0x02;
- break;
- default:
- if (pc->st->l3.debug & L3_DEB_WARN)
- l3_debug(pc->st, "Wrong MSN Code");
- break;
- }
- teln++;
- }
- if (channel) {
- *p++ = 0x18; /* channel indicator */
- *p++ = 1;
- *p++ = channel;
- }
- if (pc->para.spv) { /* SPV ? */
- /* NSF SPV */
- *p++ = WE0_netSpecFac;
- *p++ = 4; /* Laenge */
- *p++ = 0;
- *p++ = FAC_SPV; /* SPV */
- *p++ = pc->para.setup.si1; /* 0 for all Services */
- *p++ = pc->para.setup.si2; /* 0 for all Services */
- *p++ = WE0_netSpecFac;
- *p++ = 4; /* Laenge */
- *p++ = 0;
- *p++ = FAC_Activate; /* aktiviere SPV (default) */
- *p++ = pc->para.setup.si1; /* 0 for all Services */
- *p++ = pc->para.setup.si2; /* 0 for all Services */
- }
- eaz = pc->para.setup.eazmsn;
- if (*eaz) {
- *p++ = WE0_origAddr;
- *p++ = strlen(eaz) + 1;
- /* Classify as AnyPref. */
- *p++ = 0x81; /* Ext = '1'B, Type = '000'B, Plan = '0001'B. */
- while (*eaz)
- *p++ = *eaz++ & 0x7f;
- }
- *p++ = WE0_destAddr;
- *p++ = strlen(teln) + 1;
- /* Classify as AnyPref. */
- *p++ = 0x81; /* Ext = '1'B, Type = '000'B, Plan = '0001'B. */
- while (*teln)
- *p++ = *teln++ & 0x7f;
-
- *p++ = WE_Shift_F6;
- /* Codesatz 6 fuer Service */
- *p++ = WE6_serviceInd;
- *p++ = 2; /* len=2 info,info2 */
- *p++ = pc->para.setup.si1;
- *p++ = pc->para.setup.si2;
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- L3DelTimer(&pc->timer);
- L3AddTimer(&pc->timer, T303, CC_T303);
- newl3state(pc, 1);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char *p;
- int bcfound = 0;
- struct sk_buff *skb = arg;
-
- /* Channel Identification */
- p = findie(skb->data, skb->len, WE0_chanID, 0);
- if (p) {
- if (p[1] != 1) {
- l3_1tr6_error(pc, "setup wrong chanID len", skb);
- return;
- }
- if ((p[2] & 0xf4) != 0x80) {
- l3_1tr6_error(pc, "setup wrong WE0_chanID", skb);
- return;
- }
- if ((pc->para.bchannel = p[2] & 0x3))
- bcfound++;
- } else {
- l3_1tr6_error(pc, "missing setup chanID", skb);
- return;
- }
-
- p = skb->data;
- if ((p = findie(p, skb->len, WE6_serviceInd, 6))) {
- pc->para.setup.si1 = p[2];
- pc->para.setup.si2 = p[3];
- } else {
- l3_1tr6_error(pc, "missing setup SI", skb);
- return;
- }
-
- p = skb->data;
- if ((p = findie(p, skb->len, WE0_destAddr, 0)))
- iecpy(pc->para.setup.eazmsn, p, 1);
- else
- pc->para.setup.eazmsn[0] = 0;
-
- p = skb->data;
- if ((p = findie(p, skb->len, WE0_origAddr, 0))) {
- iecpy(pc->para.setup.phone, p, 1);
- } else
- pc->para.setup.phone[0] = 0;
-
- p = skb->data;
- pc->para.spv = 0;
- if ((p = findie(p, skb->len, WE0_netSpecFac, 0))) {
- if ((FAC_SPV == p[3]) || (FAC_Activate == p[3]))
- pc->para.spv = 1;
- }
- dev_kfree_skb(skb);
-
- /* Signal all services, linklevel takes care of Service-Indicator */
- if (bcfound) {
- if ((pc->para.setup.si1 != 7) && (pc->st->l3.debug & L3_DEB_WARN)) {
- l3_debug(pc->st, "non-digital call: %s -> %s",
- pc->para.setup.phone,
- pc->para.setup.eazmsn);
- }
- newl3state(pc, 6);
- pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc);
- } else
- release_l3_process(pc);
-}
-
-static void
-l3_1tr6_setup_ack(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char *p;
- struct sk_buff *skb = arg;
-
- L3DelTimer(&pc->timer);
- p = skb->data;
- newl3state(pc, 2);
- if ((p = findie(p, skb->len, WE0_chanID, 0))) {
- if (p[1] != 1) {
- l3_1tr6_error(pc, "setup_ack wrong chanID len", skb);
- return;
- }
- if ((p[2] & 0xf4) != 0x80) {
- l3_1tr6_error(pc, "setup_ack wrong WE0_chanID", skb);
- return;
- }
- pc->para.bchannel = p[2] & 0x3;
- } else {
- l3_1tr6_error(pc, "missing setup_ack WE0_chanID", skb);
- return;
- }
- dev_kfree_skb(skb);
- L3AddTimer(&pc->timer, T304, CC_T304);
- pc->st->l3.l3l4(pc->st, CC_MORE_INFO | INDICATION, pc);
-}
-
-static void
-l3_1tr6_call_sent(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char *p;
- struct sk_buff *skb = arg;
-
- L3DelTimer(&pc->timer);
- p = skb->data;
- if ((p = findie(p, skb->len, WE0_chanID, 0))) {
- if (p[1] != 1) {
- l3_1tr6_error(pc, "call sent wrong chanID len", skb);
- return;
- }
- if ((p[2] & 0xf4) != 0x80) {
- l3_1tr6_error(pc, "call sent wrong WE0_chanID", skb);
- return;
- }
- if ((pc->state == 2) && (pc->para.bchannel != (p[2] & 0x3))) {
- l3_1tr6_error(pc, "call sent wrong chanID value", skb);
- return;
- }
- pc->para.bchannel = p[2] & 0x3;
- } else {
- l3_1tr6_error(pc, "missing call sent WE0_chanID", skb);
- return;
- }
- dev_kfree_skb(skb);
- L3AddTimer(&pc->timer, T310, CC_T310);
- newl3state(pc, 3);
- pc->st->l3.l3l4(pc->st, CC_PROCEEDING | INDICATION, pc);
-}
-
-static void
-l3_1tr6_alert(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
-
- dev_kfree_skb(skb);
- L3DelTimer(&pc->timer); /* T304 */
- newl3state(pc, 4);
- pc->st->l3.l3l4(pc->st, CC_ALERTING | INDICATION, pc);
-}
-
-static void
-l3_1tr6_info(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char *p;
- int i, tmpcharge = 0;
- char a_charge[8];
- struct sk_buff *skb = arg;
-
- p = skb->data;
- if ((p = findie(p, skb->len, WE6_chargingInfo, 6))) {
- iecpy(a_charge, p, 1);
- for (i = 0; i < strlen(a_charge); i++) {
- tmpcharge *= 10;
- tmpcharge += a_charge[i] & 0xf;
- }
- if (tmpcharge > pc->para.chargeinfo) {
- pc->para.chargeinfo = tmpcharge;
- pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
- }
- if (pc->st->l3.debug & L3_DEB_CHARGE) {
- l3_debug(pc->st, "charging info %d",
- pc->para.chargeinfo);
- }
- } else if (pc->st->l3.debug & L3_DEB_CHARGE)
- l3_debug(pc->st, "charging info not found");
- dev_kfree_skb(skb);
-
-}
-
-static void
-l3_1tr6_info_s2(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
-
- dev_kfree_skb(skb);
-}
-
-static void
-l3_1tr6_connect(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
-
- L3DelTimer(&pc->timer); /* T310 */
- if (!findie(skb->data, skb->len, WE6_date, 6)) {
- l3_1tr6_error(pc, "missing connect date", skb);
- return;
- }
- newl3state(pc, 10);
- dev_kfree_skb(skb);
- pc->para.chargeinfo = 0;
- pc->st->l3.l3l4(pc->st, CC_SETUP | CONFIRM, pc);
-}
-
-static void
-l3_1tr6_rel(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- u_char *p;
-
- p = skb->data;
- if ((p = findie(p, skb->len, WE0_cause, 0))) {
- if (p[1] > 0) {
- pc->para.cause = p[2];
- if (p[1] > 1)
- pc->para.loc = p[3];
- else
- pc->para.loc = 0;
- } else {
- pc->para.cause = 0;
- pc->para.loc = 0;
- }
- } else {
- pc->para.cause = NO_CAUSE;
- l3_1tr6_error(pc, "missing REL cause", skb);
- return;
- }
- dev_kfree_skb(skb);
- StopAllL3Timer(pc);
- newl3state(pc, 0);
- l3_1TR6_message(pc, MT_N1_REL_ACK, PROTO_DIS_N1);
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- release_l3_process(pc);
-}
-
-static void
-l3_1tr6_rel_ack(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
-
- dev_kfree_skb(skb);
- StopAllL3Timer(pc);
- newl3state(pc, 0);
- pc->para.cause = NO_CAUSE;
- pc->st->l3.l3l4(pc->st, CC_RELEASE | CONFIRM, pc);
- release_l3_process(pc);
-}
-
-static void
-l3_1tr6_disc(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- u_char *p;
- int i, tmpcharge = 0;
- char a_charge[8];
-
- StopAllL3Timer(pc);
- p = skb->data;
- if ((p = findie(p, skb->len, WE6_chargingInfo, 6))) {
- iecpy(a_charge, p, 1);
- for (i = 0; i < strlen(a_charge); i++) {
- tmpcharge *= 10;
- tmpcharge += a_charge[i] & 0xf;
- }
- if (tmpcharge > pc->para.chargeinfo) {
- pc->para.chargeinfo = tmpcharge;
- pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
- }
- if (pc->st->l3.debug & L3_DEB_CHARGE) {
- l3_debug(pc->st, "charging info %d",
- pc->para.chargeinfo);
- }
- } else if (pc->st->l3.debug & L3_DEB_CHARGE)
- l3_debug(pc->st, "charging info not found");
-
-
- p = skb->data;
- if ((p = findie(p, skb->len, WE0_cause, 0))) {
- if (p[1] > 0) {
- pc->para.cause = p[2];
- if (p[1] > 1)
- pc->para.loc = p[3];
- else
- pc->para.loc = 0;
- } else {
- pc->para.cause = 0;
- pc->para.loc = 0;
- }
- } else {
- if (pc->st->l3.debug & L3_DEB_WARN)
- l3_debug(pc->st, "cause not found");
- pc->para.cause = NO_CAUSE;
- }
- if (!findie(skb->data, skb->len, WE6_date, 6)) {
- l3_1tr6_error(pc, "missing connack date", skb);
- return;
- }
- dev_kfree_skb(skb);
- newl3state(pc, 12);
- pc->st->l3.l3l4(pc->st, CC_DISCONNECT | INDICATION, pc);
-}
-
-
-static void
-l3_1tr6_connect_ack(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
-
- if (!findie(skb->data, skb->len, WE6_date, 6)) {
- l3_1tr6_error(pc, "missing connack date", skb);
- return;
- }
- dev_kfree_skb(skb);
- newl3state(pc, 10);
- pc->para.chargeinfo = 0;
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_SETUP_COMPL | INDICATION, pc);
-}
-
-static void
-l3_1tr6_alert_req(struct l3_process *pc, u_char pr, void *arg)
-{
- newl3state(pc, 7);
- l3_1TR6_message(pc, MT_N1_ALERT, PROTO_DIS_N1);
-}
-
-static void
-l3_1tr6_setup_rsp(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[24];
- u_char *p = tmp;
- int l;
-
- MsgHead(p, pc->callref, MT_N1_CONN, PROTO_DIS_N1);
- if (pc->para.spv) { /* SPV ? */
- /* NSF SPV */
- *p++ = WE0_netSpecFac;
- *p++ = 4; /* Laenge */
- *p++ = 0;
- *p++ = FAC_SPV; /* SPV */
- *p++ = pc->para.setup.si1;
- *p++ = pc->para.setup.si2;
- *p++ = WE0_netSpecFac;
- *p++ = 4; /* Laenge */
- *p++ = 0;
- *p++ = FAC_Activate; /* aktiviere SPV */
- *p++ = pc->para.setup.si1;
- *p++ = pc->para.setup.si2;
- }
- newl3state(pc, 8);
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- L3DelTimer(&pc->timer);
- L3AddTimer(&pc->timer, T313, CC_T313);
-}
-
-static void
-l3_1tr6_reset(struct l3_process *pc, u_char pr, void *arg)
-{
- release_l3_process(pc);
-}
-
-static void
-l3_1tr6_disconnect_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- u_char cause = 0x10;
- u_char clen = 1;
-
- if (pc->para.cause > 0)
- cause = pc->para.cause;
- /* Map DSS1 causes */
- switch (cause & 0x7f) {
- case 0x10:
- clen = 0;
- break;
- case 0x11:
- cause = CAUSE_UserBusy;
- break;
- case 0x15:
- cause = CAUSE_CallRejected;
- break;
- }
- StopAllL3Timer(pc);
- MsgHead(p, pc->callref, MT_N1_DISC, PROTO_DIS_N1);
- *p++ = WE0_cause;
- *p++ = clen; /* Laenge */
- if (clen)
- *p++ = cause | 0x80;
- newl3state(pc, 11);
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- L3AddTimer(&pc->timer, T305, CC_T305);
-}
-
-static void
-l3_1tr6_t303(struct l3_process *pc, u_char pr, void *arg)
-{
- if (pc->N303 > 0) {
- pc->N303--;
- L3DelTimer(&pc->timer);
- l3_1tr6_setup_req(pc, pr, arg);
- } else {
- L3DelTimer(&pc->timer);
- pc->para.cause = 0;
- l3_1tr6_disconnect_req(pc, 0, NULL);
- }
-}
-
-static void
-l3_1tr6_t304(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.cause = 0xE6;
- l3_1tr6_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc);
-}
-
-static void
-l3_1tr6_t305(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- u_char cause = 0x90;
- u_char clen = 1;
-
- L3DelTimer(&pc->timer);
- if (pc->para.cause != NO_CAUSE)
- cause = pc->para.cause;
- /* Map DSS1 causes */
- switch (cause & 0x7f) {
- case 0x10:
- clen = 0;
- break;
- case 0x15:
- cause = CAUSE_CallRejected;
- break;
- }
- MsgHead(p, pc->callref, MT_N1_REL, PROTO_DIS_N1);
- *p++ = WE0_cause;
- *p++ = clen; /* Laenge */
- if (clen)
- *p++ = cause;
- newl3state(pc, 19);
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- L3AddTimer(&pc->timer, T308, CC_T308_1);
-}
-
-static void
-l3_1tr6_t310(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.cause = 0xE6;
- l3_1tr6_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc);
-}
-
-static void
-l3_1tr6_t313(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.cause = 0xE6;
- l3_1tr6_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_CONNECT_ERR, pc);
-}
-
-static void
-l3_1tr6_t308_1(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- l3_1TR6_message(pc, MT_N1_REL, PROTO_DIS_N1);
- L3AddTimer(&pc->timer, T308, CC_T308_2);
- newl3state(pc, 19);
-}
-
-static void
-l3_1tr6_t308_2(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_RELEASE_ERR, pc);
- release_l3_process(pc);
-}
-
-static void
-l3_1tr6_dl_reset(struct l3_process *pc, u_char pr, void *arg)
-{
- pc->para.cause = CAUSE_LocalProcErr;
- l3_1tr6_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc);
-}
-
-static void
-l3_1tr6_dl_release(struct l3_process *pc, u_char pr, void *arg)
-{
- newl3state(pc, 0);
- pc->para.cause = 0x1b; /* Destination out of order */
- pc->para.loc = 0;
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- release_l3_process(pc);
-}
-
-/* *INDENT-OFF* */
-static struct stateentry downstl[] =
-{
- {SBIT(0),
- CC_SETUP | REQUEST, l3_1tr6_setup_req},
- {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(6) | SBIT(7) | SBIT(8) |
- SBIT(10),
- CC_DISCONNECT | REQUEST, l3_1tr6_disconnect_req},
- {SBIT(12),
- CC_RELEASE | REQUEST, l3_1tr6_release_req},
- {SBIT(6),
- CC_IGNORE | REQUEST, l3_1tr6_reset},
- {SBIT(6),
- CC_REJECT | REQUEST, l3_1tr6_disconnect_req},
- {SBIT(6),
- CC_ALERTING | REQUEST, l3_1tr6_alert_req},
- {SBIT(6) | SBIT(7),
- CC_SETUP | RESPONSE, l3_1tr6_setup_rsp},
- {SBIT(1),
- CC_T303, l3_1tr6_t303},
- {SBIT(2),
- CC_T304, l3_1tr6_t304},
- {SBIT(3),
- CC_T310, l3_1tr6_t310},
- {SBIT(8),
- CC_T313, l3_1tr6_t313},
- {SBIT(11),
- CC_T305, l3_1tr6_t305},
- {SBIT(19),
- CC_T308_1, l3_1tr6_t308_1},
- {SBIT(19),
- CC_T308_2, l3_1tr6_t308_2},
-};
-
-static struct stateentry datastln1[] =
-{
- {SBIT(0),
- MT_N1_INVALID, l3_1tr6_invalid},
- {SBIT(0),
- MT_N1_SETUP, l3_1tr6_setup},
- {SBIT(1),
- MT_N1_SETUP_ACK, l3_1tr6_setup_ack},
- {SBIT(1) | SBIT(2),
- MT_N1_CALL_SENT, l3_1tr6_call_sent},
- {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(10),
- MT_N1_DISC, l3_1tr6_disc},
- {SBIT(2) | SBIT(3) | SBIT(4),
- MT_N1_ALERT, l3_1tr6_alert},
- {SBIT(2) | SBIT(3) | SBIT(4),
- MT_N1_CONN, l3_1tr6_connect},
- {SBIT(2),
- MT_N1_INFO, l3_1tr6_info_s2},
- {SBIT(8),
- MT_N1_CONN_ACK, l3_1tr6_connect_ack},
- {SBIT(10),
- MT_N1_INFO, l3_1tr6_info},
- {SBIT(0) | SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) |
- SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17),
- MT_N1_REL, l3_1tr6_rel},
- {SBIT(19),
- MT_N1_REL, l3_1tr6_rel_ack},
- {SBIT(0) | SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) |
- SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17),
- MT_N1_REL_ACK, l3_1tr6_invalid},
- {SBIT(19),
- MT_N1_REL_ACK, l3_1tr6_rel_ack}
-};
-
-static struct stateentry manstatelist[] =
-{
- {SBIT(2),
- DL_ESTABLISH | INDICATION, l3_1tr6_dl_reset},
- {ALL_STATES,
- DL_RELEASE | INDICATION, l3_1tr6_dl_release},
-};
-
-/* *INDENT-ON* */
-
-static void
-up1tr6(struct PStack *st, int pr, void *arg)
-{
- int i, mt, cr;
- struct l3_process *proc;
- struct sk_buff *skb = arg;
-
- switch (pr) {
- case (DL_DATA | INDICATION):
- case (DL_UNIT_DATA | INDICATION):
- break;
- case (DL_ESTABLISH | CONFIRM):
- case (DL_ESTABLISH | INDICATION):
- case (DL_RELEASE | INDICATION):
- case (DL_RELEASE | CONFIRM):
- l3_msg(st, pr, arg);
- return;
- break;
- }
- if (skb->len < 4) {
- if (st->l3.debug & L3_DEB_PROTERR) {
- l3_debug(st, "up1tr6 len only %d", skb->len);
- }
- dev_kfree_skb(skb);
- return;
- }
- if ((skb->data[0] & 0xfe) != PROTO_DIS_N0) {
- if (st->l3.debug & L3_DEB_PROTERR) {
- l3_debug(st, "up1tr6%sunexpected discriminator %x message len %d",
- (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
- skb->data[0], skb->len);
- }
- dev_kfree_skb(skb);
- return;
- }
- if (skb->data[1] != 1) {
- if (st->l3.debug & L3_DEB_PROTERR) {
- l3_debug(st, "up1tr6 CR len not 1");
- }
- dev_kfree_skb(skb);
- return;
- }
- cr = skb->data[2];
- mt = skb->data[3];
- if (skb->data[0] == PROTO_DIS_N0) {
- dev_kfree_skb(skb);
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "up1tr6%s N0 mt %x unhandled",
- (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", mt);
- }
- } else if (skb->data[0] == PROTO_DIS_N1) {
- if (!(proc = getl3proc(st, cr))) {
- if (mt == MT_N1_SETUP) {
- if (cr < 128) {
- if (!(proc = new_l3_process(st, cr))) {
- if (st->l3.debug & L3_DEB_PROTERR) {
- l3_debug(st, "up1tr6 no roc mem");
- }
- dev_kfree_skb(skb);
- return;
- }
- } else {
- dev_kfree_skb(skb);
- return;
- }
- } else if ((mt == MT_N1_REL) || (mt == MT_N1_REL_ACK) ||
- (mt == MT_N1_CANC_ACK) || (mt == MT_N1_CANC_REJ) ||
- (mt == MT_N1_REG_ACK) || (mt == MT_N1_REG_REJ) ||
- (mt == MT_N1_SUSP_ACK) || (mt == MT_N1_RES_REJ) ||
- (mt == MT_N1_INFO)) {
- dev_kfree_skb(skb);
- return;
- } else {
- if (!(proc = new_l3_process(st, cr))) {
- if (st->l3.debug & L3_DEB_PROTERR) {
- l3_debug(st, "up1tr6 no roc mem");
- }
- dev_kfree_skb(skb);
- return;
- }
- mt = MT_N1_INVALID;
- }
- }
- for (i = 0; i < ARRAY_SIZE(datastln1); i++)
- if ((mt == datastln1[i].primitive) &&
- ((1 << proc->state) & datastln1[i].state))
- break;
- if (i == ARRAY_SIZE(datastln1)) {
- dev_kfree_skb(skb);
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "up1tr6%sstate %d mt %x unhandled",
- (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
- proc->state, mt);
- }
- return;
- } else {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "up1tr6%sstate %d mt %x",
- (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
- proc->state, mt);
- }
- datastln1[i].rout(proc, pr, skb);
- }
- }
-}
-
-static void
-down1tr6(struct PStack *st, int pr, void *arg)
-{
- int i, cr;
- struct l3_process *proc;
- struct Channel *chan;
-
- if ((DL_ESTABLISH | REQUEST) == pr) {
- l3_msg(st, pr, NULL);
- return;
- } else if ((CC_SETUP | REQUEST) == pr) {
- chan = arg;
- cr = newcallref();
- cr |= 0x80;
- if (!(proc = new_l3_process(st, cr))) {
- return;
- } else {
- proc->chan = chan;
- chan->proc = proc;
- memcpy(&proc->para.setup, &chan->setup, sizeof(setup_parm));
- proc->callref = cr;
- }
- } else {
- proc = arg;
- }
-
- for (i = 0; i < ARRAY_SIZE(downstl); i++)
- if ((pr == downstl[i].primitive) &&
- ((1 << proc->state) & downstl[i].state))
- break;
- if (i == ARRAY_SIZE(downstl)) {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "down1tr6 state %d prim %d unhandled",
- proc->state, pr);
- }
- } else {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "down1tr6 state %d prim %d",
- proc->state, pr);
- }
- downstl[i].rout(proc, pr, arg);
- }
-}
-
-static void
-man1tr6(struct PStack *st, int pr, void *arg)
-{
- int i;
- struct l3_process *proc = arg;
-
- if (!proc) {
- printk(KERN_ERR "HiSax man1tr6 without proc pr=%04x\n", pr);
- return;
- }
- for (i = 0; i < ARRAY_SIZE(manstatelist); i++)
- if ((pr == manstatelist[i].primitive) &&
- ((1 << proc->state) & manstatelist[i].state))
- break;
- if (i == ARRAY_SIZE(manstatelist)) {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "cr %d man1tr6 state %d prim %d unhandled",
- proc->callref & 0x7f, proc->state, pr);
- }
- } else {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "cr %d man1tr6 state %d prim %d",
- proc->callref & 0x7f, proc->state, pr);
- }
- manstatelist[i].rout(proc, pr, arg);
- }
-}
-
-void
-setstack_1tr6(struct PStack *st)
-{
- char tmp[64];
-
- st->lli.l4l3 = down1tr6;
- st->l2.l2l3 = up1tr6;
- st->l3.l3ml3 = man1tr6;
- st->l3.N303 = 0;
-
- strcpy(tmp, l3_1tr6_revision);
- printk(KERN_INFO "HiSax: 1TR6 Rev. %s\n", HiSax_getrev(tmp));
-}
diff --git a/drivers/isdn/hisax/l3_1tr6.h b/drivers/isdn/hisax/l3_1tr6.h
deleted file mode 100644
index 43215c00cada..000000000000
--- a/drivers/isdn/hisax/l3_1tr6.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/* $Id: l3_1tr6.h,v 2.2.6.2 2001/09/23 22:24:49 kai Exp $
- *
- * German 1TR6 D-channel protocol defines
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef l3_1tr6
-#define l3_1tr6
-
-#define PROTO_DIS_N0 0x40
-#define PROTO_DIS_N1 0x41
-
-/*
- * MsgType N0
- */
-#define MT_N0_REG_IND 0x61
-#define MT_N0_CANC_IND 0x62
-#define MT_N0_FAC_STA 0x63
-#define MT_N0_STA_ACK 0x64
-#define MT_N0_STA_REJ 0x65
-#define MT_N0_FAC_INF 0x66
-#define MT_N0_INF_ACK 0x67
-#define MT_N0_INF_REJ 0x68
-#define MT_N0_CLOSE 0x75
-#define MT_N0_CLO_ACK 0x77
-
-/*
- * MsgType N1
- */
-
-#define MT_N1_ESC 0x00
-#define MT_N1_ALERT 0x01
-#define MT_N1_CALL_SENT 0x02
-#define MT_N1_CONN 0x07
-#define MT_N1_CONN_ACK 0x0F
-#define MT_N1_SETUP 0x05
-#define MT_N1_SETUP_ACK 0x0D
-#define MT_N1_RES 0x26
-#define MT_N1_RES_ACK 0x2E
-#define MT_N1_RES_REJ 0x22
-#define MT_N1_SUSP 0x25
-#define MT_N1_SUSP_ACK 0x2D
-#define MT_N1_SUSP_REJ 0x21
-#define MT_N1_USER_INFO 0x20
-#define MT_N1_DET 0x40
-#define MT_N1_DISC 0x45
-#define MT_N1_REL 0x4D
-#define MT_N1_REL_ACK 0x5A
-#define MT_N1_CANC_ACK 0x6E
-#define MT_N1_CANC_REJ 0x67
-#define MT_N1_CON_CON 0x69
-#define MT_N1_FAC 0x60
-#define MT_N1_FAC_ACK 0x68
-#define MT_N1_FAC_CAN 0x66
-#define MT_N1_FAC_REG 0x64
-#define MT_N1_FAC_REJ 0x65
-#define MT_N1_INFO 0x6D
-#define MT_N1_REG_ACK 0x6C
-#define MT_N1_REG_REJ 0x6F
-#define MT_N1_STAT 0x63
-#define MT_N1_INVALID 0
-
-/*
- * W Elemente
- */
-
-#define WE_Shift_F0 0x90
-#define WE_Shift_F6 0x96
-#define WE_Shift_OF0 0x98
-#define WE_Shift_OF6 0x9E
-
-#define WE0_cause 0x08
-#define WE0_connAddr 0x0C
-#define WE0_callID 0x10
-#define WE0_chanID 0x18
-#define WE0_netSpecFac 0x20
-#define WE0_display 0x28
-#define WE0_keypad 0x2C
-#define WE0_origAddr 0x6C
-#define WE0_destAddr 0x70
-#define WE0_userInfo 0x7E
-
-#define WE0_moreData 0xA0
-#define WE0_congestLevel 0xB0
-
-#define WE6_serviceInd 0x01
-#define WE6_chargingInfo 0x02
-#define WE6_date 0x03
-#define WE6_facSelect 0x05
-#define WE6_facStatus 0x06
-#define WE6_statusCalled 0x07
-#define WE6_addTransAttr 0x08
-
-/*
- * FacCodes
- */
-#define FAC_Sperre 0x01
-#define FAC_Sperre_All 0x02
-#define FAC_Sperre_Fern 0x03
-#define FAC_Sperre_Intl 0x04
-#define FAC_Sperre_Interk 0x05
-
-#define FAC_Forward1 0x02
-#define FAC_Forward2 0x03
-#define FAC_Konferenz 0x06
-#define FAC_GrabBchan 0x0F
-#define FAC_Reactivate 0x10
-#define FAC_Konferenz3 0x11
-#define FAC_Dienstwechsel1 0x12
-#define FAC_Dienstwechsel2 0x13
-#define FAC_NummernIdent 0x14
-#define FAC_GBG 0x15
-#define FAC_DisplayUebergeben 0x17
-#define FAC_DisplayUmgeleitet 0x1A
-#define FAC_Unterdruecke 0x1B
-#define FAC_Deactivate 0x1E
-#define FAC_Activate 0x1D
-#define FAC_SPV 0x1F
-#define FAC_Rueckwechsel 0x23
-#define FAC_Umleitung 0x24
-
-/*
- * Cause codes
- */
-#define CAUSE_InvCRef 0x01
-#define CAUSE_BearerNotImpl 0x03
-#define CAUSE_CIDunknown 0x07
-#define CAUSE_CIDinUse 0x08
-#define CAUSE_NoChans 0x0A
-#define CAUSE_FacNotImpl 0x10
-#define CAUSE_FacNotSubscr 0x11
-#define CAUSE_OutgoingBarred 0x20
-#define CAUSE_UserAccessBusy 0x21
-#define CAUSE_NegativeGBG 0x22
-#define CAUSE_UnknownGBG 0x23
-#define CAUSE_NoSPVknown 0x25
-#define CAUSE_DestNotObtain 0x35
-#define CAUSE_NumberChanged 0x38
-#define CAUSE_OutOfOrder 0x39
-#define CAUSE_NoUserResponse 0x3A
-#define CAUSE_UserBusy 0x3B
-#define CAUSE_IncomingBarred 0x3D
-#define CAUSE_CallRejected 0x3E
-#define CAUSE_NetworkCongestion 0x59
-#define CAUSE_RemoteUser 0x5A
-#define CAUSE_LocalProcErr 0x70
-#define CAUSE_RemoteProcErr 0x71
-#define CAUSE_RemoteUserSuspend 0x72
-#define CAUSE_RemoteUserResumed 0x73
-#define CAUSE_UserInfoDiscarded 0x7F
-
-#define T303 4000
-#define T304 20000
-#define T305 4000
-#define T308 4000
-#define T310 120000
-#define T313 4000
-#define T318 4000
-#define T319 4000
-
-#endif
diff --git a/drivers/isdn/hisax/l3dss1.c b/drivers/isdn/hisax/l3dss1.c
deleted file mode 100644
index 368d152a8f1d..000000000000
--- a/drivers/isdn/hisax/l3dss1.c
+++ /dev/null
@@ -1,3227 +0,0 @@
-/* $Id: l3dss1.c,v 2.32.2.3 2004/01/13 14:31:25 keil Exp $
- *
- * EURO/DSS1 D-channel protocol
- *
- * German 1TR6 D-channel protocol
- *
- * Author Karsten Keil
- * based on the teles driver from Jan den Ouden
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- * Thanks to Jan den Ouden
- * Fritz Elfert
- *
- */
-
-#include "hisax.h"
-#include "isdnl3.h"
-#include "l3dss1.h"
-#include <linux/ctype.h>
-#include <linux/slab.h>
-
-extern char *HiSax_getrev(const char *revision);
-static const char *dss1_revision = "$Revision: 2.32.2.3 $";
-
-#define EXT_BEARER_CAPS 1
-
-#define MsgHead(ptr, cref, mty) \
- *ptr++ = 0x8; \
- if (cref == -1) { \
- *ptr++ = 0x0; \
- } else { \
- *ptr++ = 0x1; \
- *ptr++ = cref^0x80; \
- } \
- *ptr++ = mty
-
-
-/**********************************************/
-/* get a new invoke id for remote operations. */
-/* Only a return value != 0 is valid */
-/**********************************************/
-static unsigned char new_invoke_id(struct PStack *p)
-{
- unsigned char retval;
- int i;
-
- i = 32; /* maximum search depth */
-
- retval = p->prot.dss1.last_invoke_id + 1; /* try new id */
- while ((i) && (p->prot.dss1.invoke_used[retval >> 3] == 0xFF)) {
- p->prot.dss1.last_invoke_id = (retval & 0xF8) + 8;
- i--;
- }
- if (i) {
- while (p->prot.dss1.invoke_used[retval >> 3] & (1 << (retval & 7)))
- retval++;
- } else
- retval = 0;
- p->prot.dss1.last_invoke_id = retval;
- p->prot.dss1.invoke_used[retval >> 3] |= (1 << (retval & 7));
- return (retval);
-} /* new_invoke_id */
-
-/*************************/
-/* free a used invoke id */
-/*************************/
-static void free_invoke_id(struct PStack *p, unsigned char id)
-{
-
- if (!id) return; /* 0 = invalid value */
-
- p->prot.dss1.invoke_used[id >> 3] &= ~(1 << (id & 7));
-} /* free_invoke_id */
-
-
-/**********************************************************/
-/* create a new l3 process and fill in dss1 specific data */
-/**********************************************************/
-static struct l3_process
-*dss1_new_l3_process(struct PStack *st, int cr)
-{ struct l3_process *proc;
-
- if (!(proc = new_l3_process(st, cr)))
- return (NULL);
-
- proc->prot.dss1.invoke_id = 0;
- proc->prot.dss1.remote_operation = 0;
- proc->prot.dss1.uus1_data[0] = '\0';
-
- return (proc);
-} /* dss1_new_l3_process */
-
-/************************************************/
-/* free a l3 process and all dss1 specific data */
-/************************************************/
-static void
-dss1_release_l3_process(struct l3_process *p)
-{
- free_invoke_id(p->st, p->prot.dss1.invoke_id);
- release_l3_process(p);
-} /* dss1_release_l3_process */
-
-/********************************************************/
-/* search a process with invoke id id and dummy callref */
-/********************************************************/
-static struct l3_process *
-l3dss1_search_dummy_proc(struct PStack *st, int id)
-{ struct l3_process *pc = st->l3.proc; /* start of processes */
-
- if (!id) return (NULL);
-
- while (pc)
- { if ((pc->callref == -1) && (pc->prot.dss1.invoke_id == id))
- return (pc);
- pc = pc->next;
- }
- return (NULL);
-} /* l3dss1_search_dummy_proc */
-
-/*******************************************************************/
-/* called when a facility message with a dummy callref is received */
-/* and a return result is delivered. id specifies the invoke id. */
-/*******************************************************************/
-static void
-l3dss1_dummy_return_result(struct PStack *st, int id, u_char *p, u_char nlen)
-{ isdn_ctrl ic;
- struct IsdnCardState *cs;
- struct l3_process *pc = NULL;
-
- if ((pc = l3dss1_search_dummy_proc(st, id)))
- { L3DelTimer(&pc->timer); /* remove timer */
-
- cs = pc->st->l1.hardware;
- ic.driver = cs->myid;
- ic.command = ISDN_STAT_PROT;
- ic.arg = DSS1_STAT_INVOKE_RES;
- ic.parm.dss1_io.hl_id = pc->prot.dss1.invoke_id;
- ic.parm.dss1_io.ll_id = pc->prot.dss1.ll_id;
- ic.parm.dss1_io.proc = pc->prot.dss1.proc;
- ic.parm.dss1_io.timeout = 0;
- ic.parm.dss1_io.datalen = nlen;
- ic.parm.dss1_io.data = p;
- free_invoke_id(pc->st, pc->prot.dss1.invoke_id);
- pc->prot.dss1.invoke_id = 0; /* reset id */
-
- cs->iif.statcallb(&ic);
- dss1_release_l3_process(pc);
- }
- else
- l3_debug(st, "dummy return result id=0x%x result len=%d", id, nlen);
-} /* l3dss1_dummy_return_result */
-
-/*******************************************************************/
-/* called when a facility message with a dummy callref is received */
-/* and a return error is delivered. id specifies the invoke id. */
-/*******************************************************************/
-static void
-l3dss1_dummy_error_return(struct PStack *st, int id, ulong error)
-{ isdn_ctrl ic;
- struct IsdnCardState *cs;
- struct l3_process *pc = NULL;
-
- if ((pc = l3dss1_search_dummy_proc(st, id)))
- { L3DelTimer(&pc->timer); /* remove timer */
-
- cs = pc->st->l1.hardware;
- ic.driver = cs->myid;
- ic.command = ISDN_STAT_PROT;
- ic.arg = DSS1_STAT_INVOKE_ERR;
- ic.parm.dss1_io.hl_id = pc->prot.dss1.invoke_id;
- ic.parm.dss1_io.ll_id = pc->prot.dss1.ll_id;
- ic.parm.dss1_io.proc = pc->prot.dss1.proc;
- ic.parm.dss1_io.timeout = error;
- ic.parm.dss1_io.datalen = 0;
- ic.parm.dss1_io.data = NULL;
- free_invoke_id(pc->st, pc->prot.dss1.invoke_id);
- pc->prot.dss1.invoke_id = 0; /* reset id */
-
- cs->iif.statcallb(&ic);
- dss1_release_l3_process(pc);
- }
- else
- l3_debug(st, "dummy return error id=0x%x error=0x%lx", id, error);
-} /* l3dss1_error_return */
-
-/*******************************************************************/
-/* called when a facility message with a dummy callref is received */
-/* and a invoke is delivered. id specifies the invoke id. */
-/*******************************************************************/
-static void
-l3dss1_dummy_invoke(struct PStack *st, int cr, int id,
- int ident, u_char *p, u_char nlen)
-{ isdn_ctrl ic;
- struct IsdnCardState *cs;
-
- l3_debug(st, "dummy invoke %s id=0x%x ident=0x%x datalen=%d",
- (cr == -1) ? "local" : "broadcast", id, ident, nlen);
- if (cr >= -1) return; /* ignore local data */
-
- cs = st->l1.hardware;
- ic.driver = cs->myid;
- ic.command = ISDN_STAT_PROT;
- ic.arg = DSS1_STAT_INVOKE_BRD;
- ic.parm.dss1_io.hl_id = id;
- ic.parm.dss1_io.ll_id = 0;
- ic.parm.dss1_io.proc = ident;
- ic.parm.dss1_io.timeout = 0;
- ic.parm.dss1_io.datalen = nlen;
- ic.parm.dss1_io.data = p;
-
- cs->iif.statcallb(&ic);
-} /* l3dss1_dummy_invoke */
-
-static void
-l3dss1_parse_facility(struct PStack *st, struct l3_process *pc,
- int cr, u_char *p)
-{
- int qd_len = 0;
- unsigned char nlen = 0, ilen, cp_tag;
- int ident, id;
- ulong err_ret;
-
- if (pc)
- st = pc->st; /* valid Stack */
- else
- if ((!st) || (cr >= 0)) return; /* neither pc nor st specified */
-
- p++;
- qd_len = *p++;
- if (qd_len == 0) {
- l3_debug(st, "qd_len == 0");
- return;
- }
- if ((*p & 0x1F) != 0x11) { /* Service discriminator, supplementary service */
- l3_debug(st, "supplementary service != 0x11");
- return;
- }
- while (qd_len > 0 && !(*p & 0x80)) { /* extension ? */
- p++;
- qd_len--;
- }
- if (qd_len < 2) {
- l3_debug(st, "qd_len < 2");
- return;
- }
- p++;
- qd_len--;
- if ((*p & 0xE0) != 0xA0) { /* class and form */
- l3_debug(st, "class and form != 0xA0");
- return;
- }
-
- cp_tag = *p & 0x1F; /* remember tag value */
-
- p++;
- qd_len--;
- if (qd_len < 1)
- { l3_debug(st, "qd_len < 1");
- return;
- }
- if (*p & 0x80)
- { /* length format indefinite or limited */
- nlen = *p++ & 0x7F; /* number of len bytes or indefinite */
- if ((qd_len-- < ((!nlen) ? 3 : (1 + nlen))) ||
- (nlen > 1))
- { l3_debug(st, "length format error or not implemented");
- return;
- }
- if (nlen == 1)
- { nlen = *p++; /* complete length */
- qd_len--;
- }
- else
- { qd_len -= 2; /* trailing null bytes */
- if ((*(p + qd_len)) || (*(p + qd_len + 1)))
- { l3_debug(st, "length format indefinite error");
- return;
- }
- nlen = qd_len;
- }
- }
- else
- { nlen = *p++;
- qd_len--;
- }
- if (qd_len < nlen)
- { l3_debug(st, "qd_len < nlen");
- return;
- }
- qd_len -= nlen;
-
- if (nlen < 2)
- { l3_debug(st, "nlen < 2");
- return;
- }
- if (*p != 0x02)
- { /* invoke identifier tag */
- l3_debug(st, "invoke identifier tag !=0x02");
- return;
- }
- p++;
- nlen--;
- if (*p & 0x80)
- { /* length format */
- l3_debug(st, "invoke id length format 2");
- return;
- }
- ilen = *p++;
- nlen--;
- if (ilen > nlen || ilen == 0)
- { l3_debug(st, "ilen > nlen || ilen == 0");
- return;
- }
- nlen -= ilen;
- id = 0;
- while (ilen > 0)
- { id = (id << 8) | (*p++ & 0xFF); /* invoke identifier */
- ilen--;
- }
-
- switch (cp_tag) { /* component tag */
- case 1: /* invoke */
- if (nlen < 2) {
- l3_debug(st, "nlen < 2 22");
- return;
- }
- if (*p != 0x02) { /* operation value */
- l3_debug(st, "operation value !=0x02");
- return;
- }
- p++;
- nlen--;
- ilen = *p++;
- nlen--;
- if (ilen > nlen || ilen == 0) {
- l3_debug(st, "ilen > nlen || ilen == 0 22");
- return;
- }
- nlen -= ilen;
- ident = 0;
- while (ilen > 0) {
- ident = (ident << 8) | (*p++ & 0xFF);
- ilen--;
- }
-
- if (!pc)
- { l3dss1_dummy_invoke(st, cr, id, ident, p, nlen);
- return;
- }
-#ifdef CONFIG_DE_AOC
- {
-
-#define FOO1(s, a, b) \
- while (nlen > 1) { \
- int ilen = p[1]; \
- if (nlen < ilen + 2) { \
- l3_debug(st, "FOO1 nlen < ilen+2"); \
- return; \
- } \
- nlen -= ilen + 2; \
- if ((*p & 0xFF) == (a)) { \
- int nlen = ilen; \
- p += 2; \
- b; \
- } else { \
- p += ilen + 2; \
- } \
- }
-
- switch (ident) {
- case 0x22: /* during */
- FOO1("1A", 0x30, FOO1("1C", 0xA1, FOO1("1D", 0x30, FOO1("1E", 0x02, ( {
- ident = 0;
- nlen = (nlen) ? nlen : 0; /* Make gcc happy */
- while (ilen > 0) {
- ident = (ident << 8) | *p++;
- ilen--;
- }
- if (ident > pc->para.chargeinfo) {
- pc->para.chargeinfo = ident;
- st->l3.l3l4(st, CC_CHARGE | INDICATION, pc);
- }
- if (st->l3.debug & L3_DEB_CHARGE) {
- if (*(p + 2) == 0) {
- l3_debug(st, "charging info during %d", pc->para.chargeinfo);
- }
- else {
- l3_debug(st, "charging info final %d", pc->para.chargeinfo);
- }
- }
- }
- )))))
- break;
- case 0x24: /* final */
- FOO1("2A", 0x30, FOO1("2B", 0x30, FOO1("2C", 0xA1, FOO1("2D", 0x30, FOO1("2E", 0x02, ( {
- ident = 0;
- nlen = (nlen) ? nlen : 0; /* Make gcc happy */
- while (ilen > 0) {
- ident = (ident << 8) | *p++;
- ilen--;
- }
- if (ident > pc->para.chargeinfo) {
- pc->para.chargeinfo = ident;
- st->l3.l3l4(st, CC_CHARGE | INDICATION, pc);
- }
- if (st->l3.debug & L3_DEB_CHARGE) {
- l3_debug(st, "charging info final %d", pc->para.chargeinfo);
- }
- }
- ))))))
- break;
- default:
- l3_debug(st, "invoke break invalid ident %02x", ident);
- break;
- }
-#undef FOO1
-
- }
-#else /* not CONFIG_DE_AOC */
- l3_debug(st, "invoke break");
-#endif /* not CONFIG_DE_AOC */
- break;
- case 2: /* return result */
- /* if no process available handle separately */
- if (!pc)
- { if (cr == -1)
- l3dss1_dummy_return_result(st, id, p, nlen);
- return;
- }
- if ((pc->prot.dss1.invoke_id) && (pc->prot.dss1.invoke_id == id))
- { /* Diversion successful */
- free_invoke_id(st, pc->prot.dss1.invoke_id);
- pc->prot.dss1.remote_result = 0; /* success */
- pc->prot.dss1.invoke_id = 0;
- pc->redir_result = pc->prot.dss1.remote_result;
- st->l3.l3l4(st, CC_REDIR | INDICATION, pc); } /* Diversion successful */
- else
- l3_debug(st, "return error unknown identifier");
- break;
- case 3: /* return error */
- err_ret = 0;
- if (nlen < 2)
- { l3_debug(st, "return error nlen < 2");
- return;
- }
- if (*p != 0x02)
- { /* result tag */
- l3_debug(st, "invoke error tag !=0x02");
- return;
- }
- p++;
- nlen--;
- if (*p > 4)
- { /* length format */
- l3_debug(st, "invoke return errlen > 4 ");
- return;
- }
- ilen = *p++;
- nlen--;
- if (ilen > nlen || ilen == 0)
- { l3_debug(st, "error return ilen > nlen || ilen == 0");
- return;
- }
- nlen -= ilen;
- while (ilen > 0)
- { err_ret = (err_ret << 8) | (*p++ & 0xFF); /* error value */
- ilen--;
- }
- /* if no process available handle separately */
- if (!pc)
- { if (cr == -1)
- l3dss1_dummy_error_return(st, id, err_ret);
- return;
- }
- if ((pc->prot.dss1.invoke_id) && (pc->prot.dss1.invoke_id == id))
- { /* Deflection error */
- free_invoke_id(st, pc->prot.dss1.invoke_id);
- pc->prot.dss1.remote_result = err_ret; /* result */
- pc->prot.dss1.invoke_id = 0;
- pc->redir_result = pc->prot.dss1.remote_result;
- st->l3.l3l4(st, CC_REDIR | INDICATION, pc);
- } /* Deflection error */
- else
- l3_debug(st, "return result unknown identifier");
- break;
- default:
- l3_debug(st, "facility default break tag=0x%02x", cp_tag);
- break;
- }
-}
-
-static void
-l3dss1_message(struct l3_process *pc, u_char mt)
-{
- struct sk_buff *skb;
- u_char *p;
-
- if (!(skb = l3_alloc_skb(4)))
- return;
- p = skb_put(skb, 4);
- MsgHead(p, pc->callref, mt);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3dss1_message_cause(struct l3_process *pc, u_char mt, u_char cause)
-{
- struct sk_buff *skb;
- u_char tmp[16];
- u_char *p = tmp;
- int l;
-
- MsgHead(p, pc->callref, mt);
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = cause | 0x80;
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3dss1_status_send(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- struct sk_buff *skb;
-
- MsgHead(p, pc->callref, MT_STATUS);
-
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = pc->para.cause | 0x80;
-
- *p++ = IE_CALL_STATE;
- *p++ = 0x1;
- *p++ = pc->state & 0x3f;
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3dss1_msg_without_setup(struct l3_process *pc, u_char pr, void *arg)
-{
- /* This routine is called if here was no SETUP made (checks in dss1up and in
- * l3dss1_setup) and a RELEASE_COMPLETE have to be sent with an error code
- * MT_STATUS_ENQUIRE in the NULL state is handled too
- */
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- struct sk_buff *skb;
-
- switch (pc->para.cause) {
- case 81: /* invalid callreference */
- case 88: /* incomp destination */
- case 96: /* mandory IE missing */
- case 100: /* invalid IE contents */
- case 101: /* incompatible Callstate */
- MsgHead(p, pc->callref, MT_RELEASE_COMPLETE);
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = pc->para.cause | 0x80;
- break;
- default:
- printk(KERN_ERR "HiSax l3dss1_msg_without_setup wrong cause %d\n",
- pc->para.cause);
- return;
- }
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- dss1_release_l3_process(pc);
-}
-
-static int ie_ALERTING[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1,
- IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, IE_HLC,
- IE_USER_USER, -1};
-static int ie_CALL_PROCEEDING[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1,
- IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_HLC, -1};
-static int ie_CONNECT[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1,
- IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_DATE, IE_SIGNAL,
- IE_CONNECT_PN, IE_CONNECT_SUB, IE_LLC, IE_HLC, IE_USER_USER, -1};
-static int ie_CONNECT_ACKNOWLEDGE[] = {IE_CHANNEL_ID, IE_DISPLAY, IE_SIGNAL, -1};
-static int ie_DISCONNECT[] = {IE_CAUSE | IE_MANDATORY, IE_FACILITY,
- IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1};
-static int ie_INFORMATION[] = {IE_COMPLETE, IE_DISPLAY, IE_KEYPAD, IE_SIGNAL,
- IE_CALLED_PN, -1};
-static int ie_NOTIFY[] = {IE_BEARER, IE_NOTIFY | IE_MANDATORY, IE_DISPLAY, -1};
-static int ie_PROGRESS[] = {IE_BEARER, IE_CAUSE, IE_FACILITY, IE_PROGRESS |
- IE_MANDATORY, IE_DISPLAY, IE_HLC, IE_USER_USER, -1};
-static int ie_RELEASE[] = {IE_CAUSE | IE_MANDATORY_1, IE_FACILITY, IE_DISPLAY,
- IE_SIGNAL, IE_USER_USER, -1};
-/* a RELEASE_COMPLETE with errors don't require special actions
- static int ie_RELEASE_COMPLETE[] = {IE_CAUSE | IE_MANDATORY_1, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1};
-*/
-static int ie_RESUME_ACKNOWLEDGE[] = {IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY,
- IE_DISPLAY, -1};
-static int ie_RESUME_REJECT[] = {IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1};
-static int ie_SETUP[] = {IE_COMPLETE, IE_BEARER | IE_MANDATORY,
- IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY, IE_PROGRESS,
- IE_NET_FAC, IE_DISPLAY, IE_KEYPAD, IE_SIGNAL, IE_CALLING_PN,
- IE_CALLING_SUB, IE_CALLED_PN, IE_CALLED_SUB, IE_REDIR_NR,
- IE_LLC, IE_HLC, IE_USER_USER, -1};
-static int ie_SETUP_ACKNOWLEDGE[] = {IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY,
- IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, -1};
-static int ie_STATUS[] = {IE_CAUSE | IE_MANDATORY, IE_CALL_STATE |
- IE_MANDATORY, IE_DISPLAY, -1};
-static int ie_STATUS_ENQUIRY[] = {IE_DISPLAY, -1};
-static int ie_SUSPEND_ACKNOWLEDGE[] = {IE_DISPLAY, IE_FACILITY, -1};
-static int ie_SUSPEND_REJECT[] = {IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1};
-/* not used
- * static int ie_CONGESTION_CONTROL[] = {IE_CONGESTION | IE_MANDATORY,
- * IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1};
- * static int ie_USER_INFORMATION[] = {IE_MORE_DATA, IE_USER_USER | IE_MANDATORY, -1};
- * static int ie_RESTART[] = {IE_CHANNEL_ID, IE_DISPLAY, IE_RESTART_IND |
- * IE_MANDATORY, -1};
- */
-static int ie_FACILITY[] = {IE_FACILITY | IE_MANDATORY, IE_DISPLAY, -1};
-static int comp_required[] = {1, 2, 3, 5, 6, 7, 9, 10, 11, 14, 15, -1};
-static int l3_valid_states[] = {0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 15, 17, 19, 25, -1};
-
-struct ie_len {
- int ie;
- int len;
-};
-
-static
-struct ie_len max_ie_len[] = {
- {IE_SEGMENT, 4},
- {IE_BEARER, 12},
- {IE_CAUSE, 32},
- {IE_CALL_ID, 10},
- {IE_CALL_STATE, 3},
- {IE_CHANNEL_ID, 34},
- {IE_FACILITY, 255},
- {IE_PROGRESS, 4},
- {IE_NET_FAC, 255},
- {IE_NOTIFY, 3},
- {IE_DISPLAY, 82},
- {IE_DATE, 8},
- {IE_KEYPAD, 34},
- {IE_SIGNAL, 3},
- {IE_INFORATE, 6},
- {IE_E2E_TDELAY, 11},
- {IE_TDELAY_SEL, 5},
- {IE_PACK_BINPARA, 3},
- {IE_PACK_WINSIZE, 4},
- {IE_PACK_SIZE, 4},
- {IE_CUG, 7},
- {IE_REV_CHARGE, 3},
- {IE_CALLING_PN, 24},
- {IE_CALLING_SUB, 23},
- {IE_CALLED_PN, 24},
- {IE_CALLED_SUB, 23},
- {IE_REDIR_NR, 255},
- {IE_TRANS_SEL, 255},
- {IE_RESTART_IND, 3},
- {IE_LLC, 18},
- {IE_HLC, 5},
- {IE_USER_USER, 131},
- {-1, 0},
-};
-
-static int
-getmax_ie_len(u_char ie) {
- int i = 0;
- while (max_ie_len[i].ie != -1) {
- if (max_ie_len[i].ie == ie)
- return (max_ie_len[i].len);
- i++;
- }
- return (255);
-}
-
-static int
-ie_in_set(struct l3_process *pc, u_char ie, int *checklist) {
- int ret = 1;
-
- while (*checklist != -1) {
- if ((*checklist & 0xff) == ie) {
- if (ie & 0x80)
- return (-ret);
- else
- return (ret);
- }
- ret++;
- checklist++;
- }
- return (0);
-}
-
-static int
-check_infoelements(struct l3_process *pc, struct sk_buff *skb, int *checklist)
-{
- int *cl = checklist;
- u_char mt;
- u_char *p, ie;
- int l, newpos, oldpos;
- int err_seq = 0, err_len = 0, err_compr = 0, err_ureg = 0;
- u_char codeset = 0;
- u_char old_codeset = 0;
- u_char codelock = 1;
-
- p = skb->data;
- /* skip cr */
- p++;
- l = (*p++) & 0xf;
- p += l;
- mt = *p++;
- oldpos = 0;
- while ((p - skb->data) < skb->len) {
- if ((*p & 0xf0) == 0x90) { /* shift codeset */
- old_codeset = codeset;
- codeset = *p & 7;
- if (*p & 0x08)
- codelock = 0;
- else
- codelock = 1;
- if (pc->debug & L3_DEB_CHECK)
- l3_debug(pc->st, "check IE shift%scodeset %d->%d",
- codelock ? " locking " : " ", old_codeset, codeset);
- p++;
- continue;
- }
- if (!codeset) { /* only codeset 0 */
- if ((newpos = ie_in_set(pc, *p, cl))) {
- if (newpos > 0) {
- if (newpos < oldpos)
- err_seq++;
- else
- oldpos = newpos;
- }
- } else {
- if (ie_in_set(pc, *p, comp_required))
- err_compr++;
- else
- err_ureg++;
- }
- }
- ie = *p++;
- if (ie & 0x80) {
- l = 1;
- } else {
- l = *p++;
- p += l;
- l += 2;
- }
- if (!codeset && (l > getmax_ie_len(ie)))
- err_len++;
- if (!codelock) {
- if (pc->debug & L3_DEB_CHECK)
- l3_debug(pc->st, "check IE shift back codeset %d->%d",
- codeset, old_codeset);
- codeset = old_codeset;
- codelock = 1;
- }
- }
- if (err_compr | err_ureg | err_len | err_seq) {
- if (pc->debug & L3_DEB_CHECK)
- l3_debug(pc->st, "check IE MT(%x) %d/%d/%d/%d",
- mt, err_compr, err_ureg, err_len, err_seq);
- if (err_compr)
- return (ERR_IE_COMPREHENSION);
- if (err_ureg)
- return (ERR_IE_UNRECOGNIZED);
- if (err_len)
- return (ERR_IE_LENGTH);
- if (err_seq)
- return (ERR_IE_SEQUENCE);
- }
- return (0);
-}
-
-/* verify if a message type exists and contain no IE error */
-static int
-l3dss1_check_messagetype_validity(struct l3_process *pc, int mt, void *arg)
-{
- switch (mt) {
- case MT_ALERTING:
- case MT_CALL_PROCEEDING:
- case MT_CONNECT:
- case MT_CONNECT_ACKNOWLEDGE:
- case MT_DISCONNECT:
- case MT_INFORMATION:
- case MT_FACILITY:
- case MT_NOTIFY:
- case MT_PROGRESS:
- case MT_RELEASE:
- case MT_RELEASE_COMPLETE:
- case MT_SETUP:
- case MT_SETUP_ACKNOWLEDGE:
- case MT_RESUME_ACKNOWLEDGE:
- case MT_RESUME_REJECT:
- case MT_SUSPEND_ACKNOWLEDGE:
- case MT_SUSPEND_REJECT:
- case MT_USER_INFORMATION:
- case MT_RESTART:
- case MT_RESTART_ACKNOWLEDGE:
- case MT_CONGESTION_CONTROL:
- case MT_STATUS:
- case MT_STATUS_ENQUIRY:
- if (pc->debug & L3_DEB_CHECK)
- l3_debug(pc->st, "l3dss1_check_messagetype_validity mt(%x) OK", mt);
- break;
- case MT_RESUME: /* RESUME only in user->net */
- case MT_SUSPEND: /* SUSPEND only in user->net */
- default:
- if (pc->debug & (L3_DEB_CHECK | L3_DEB_WARN))
- l3_debug(pc->st, "l3dss1_check_messagetype_validity mt(%x) fail", mt);
- pc->para.cause = 97;
- l3dss1_status_send(pc, 0, NULL);
- return (1);
- }
- return (0);
-}
-
-static void
-l3dss1_std_ie_err(struct l3_process *pc, int ret) {
-
- if (pc->debug & L3_DEB_CHECK)
- l3_debug(pc->st, "check_infoelements ret %d", ret);
- switch (ret) {
- case 0:
- break;
- case ERR_IE_COMPREHENSION:
- pc->para.cause = 96;
- l3dss1_status_send(pc, 0, NULL);
- break;
- case ERR_IE_UNRECOGNIZED:
- pc->para.cause = 99;
- l3dss1_status_send(pc, 0, NULL);
- break;
- case ERR_IE_LENGTH:
- pc->para.cause = 100;
- l3dss1_status_send(pc, 0, NULL);
- break;
- case ERR_IE_SEQUENCE:
- default:
- break;
- }
-}
-
-static int
-l3dss1_get_channel_id(struct l3_process *pc, struct sk_buff *skb) {
- u_char *p;
-
- p = skb->data;
- if ((p = findie(p, skb->len, IE_CHANNEL_ID, 0))) {
- p++;
- if (*p != 1) { /* len for BRI = 1 */
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "wrong chid len %d", *p);
- return (-2);
- }
- p++;
- if (*p & 0x60) { /* only base rate interface */
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "wrong chid %x", *p);
- return (-3);
- }
- return (*p & 0x3);
- } else
- return (-1);
-}
-
-static int
-l3dss1_get_cause(struct l3_process *pc, struct sk_buff *skb) {
- u_char l, i = 0;
- u_char *p;
-
- p = skb->data;
- pc->para.cause = 31;
- pc->para.loc = 0;
- if ((p = findie(p, skb->len, IE_CAUSE, 0))) {
- p++;
- l = *p++;
- if (l > 30)
- return (1);
- if (l) {
- pc->para.loc = *p++;
- l--;
- } else {
- return (2);
- }
- if (l && !(pc->para.loc & 0x80)) {
- l--;
- p++; /* skip recommendation */
- }
- if (l) {
- pc->para.cause = *p++;
- l--;
- if (!(pc->para.cause & 0x80))
- return (3);
- } else
- return (4);
- while (l && (i < 6)) {
- pc->para.diag[i++] = *p++;
- l--;
- }
- } else
- return (-1);
- return (0);
-}
-
-static void
-l3dss1_msg_with_uus(struct l3_process *pc, u_char cmd)
-{
- struct sk_buff *skb;
- u_char tmp[16 + 40];
- u_char *p = tmp;
- int l;
-
- MsgHead(p, pc->callref, cmd);
-
- if (pc->prot.dss1.uus1_data[0])
- { *p++ = IE_USER_USER; /* UUS info element */
- *p++ = strlen(pc->prot.dss1.uus1_data) + 1;
- *p++ = 0x04; /* IA5 chars */
- strcpy(p, pc->prot.dss1.uus1_data);
- p += strlen(pc->prot.dss1.uus1_data);
- pc->prot.dss1.uus1_data[0] = '\0';
- }
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-} /* l3dss1_msg_with_uus */
-
-static void
-l3dss1_release_req(struct l3_process *pc, u_char pr, void *arg)
-{
- StopAllL3Timer(pc);
- newl3state(pc, 19);
- if (!pc->prot.dss1.uus1_data[0])
- l3dss1_message(pc, MT_RELEASE);
- else
- l3dss1_msg_with_uus(pc, MT_RELEASE);
- L3AddTimer(&pc->timer, T308, CC_T308_1);
-}
-
-static void
-l3dss1_release_cmpl(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- if ((ret = l3dss1_get_cause(pc, skb)) > 0) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "RELCMPL get_cause ret(%d)", ret);
- } else if (ret < 0)
- pc->para.cause = NO_CAUSE;
- StopAllL3Timer(pc);
- newl3state(pc, 0);
- pc->st->l3.l3l4(pc->st, CC_RELEASE | CONFIRM, pc);
- dss1_release_l3_process(pc);
-}
-
-#ifdef EXT_BEARER_CAPS
-
-static u_char *
-EncodeASyncParams(u_char *p, u_char si2)
-{ // 7c 06 88 90 21 42 00 bb
-
- p[0] = 0;
- p[1] = 0x40; // Intermediate rate: 16 kbit/s jj 2000.02.19
- p[2] = 0x80;
- if (si2 & 32) // 7 data bits
-
- p[2] += 16;
- else // 8 data bits
-
- p[2] += 24;
-
- if (si2 & 16) // 2 stop bits
-
- p[2] += 96;
- else // 1 stop bit
-
- p[2] += 32;
-
- if (si2 & 8) // even parity
-
- p[2] += 2;
- else // no parity
-
- p[2] += 3;
-
- switch (si2 & 0x07) {
- case 0:
- p[0] = 66; // 1200 bit/s
-
- break;
- case 1:
- p[0] = 88; // 1200/75 bit/s
-
- break;
- case 2:
- p[0] = 87; // 75/1200 bit/s
-
- break;
- case 3:
- p[0] = 67; // 2400 bit/s
-
- break;
- case 4:
- p[0] = 69; // 4800 bit/s
-
- break;
- case 5:
- p[0] = 72; // 9600 bit/s
-
- break;
- case 6:
- p[0] = 73; // 14400 bit/s
-
- break;
- case 7:
- p[0] = 75; // 19200 bit/s
-
- break;
- }
- return p + 3;
-}
-
-static u_char
-EncodeSyncParams(u_char si2, u_char ai)
-{
-
- switch (si2) {
- case 0:
- return ai + 2; // 1200 bit/s
-
- case 1:
- return ai + 24; // 1200/75 bit/s
-
- case 2:
- return ai + 23; // 75/1200 bit/s
-
- case 3:
- return ai + 3; // 2400 bit/s
-
- case 4:
- return ai + 5; // 4800 bit/s
-
- case 5:
- return ai + 8; // 9600 bit/s
-
- case 6:
- return ai + 9; // 14400 bit/s
-
- case 7:
- return ai + 11; // 19200 bit/s
-
- case 8:
- return ai + 14; // 48000 bit/s
-
- case 9:
- return ai + 15; // 56000 bit/s
-
- case 15:
- return ai + 40; // negotiate bit/s
-
- default:
- break;
- }
- return ai;
-}
-
-
-static u_char
-DecodeASyncParams(u_char si2, u_char *p)
-{
- u_char info;
-
- switch (p[5]) {
- case 66: // 1200 bit/s
-
- break; // si2 don't change
-
- case 88: // 1200/75 bit/s
-
- si2 += 1;
- break;
- case 87: // 75/1200 bit/s
-
- si2 += 2;
- break;
- case 67: // 2400 bit/s
-
- si2 += 3;
- break;
- case 69: // 4800 bit/s
-
- si2 += 4;
- break;
- case 72: // 9600 bit/s
-
- si2 += 5;
- break;
- case 73: // 14400 bit/s
-
- si2 += 6;
- break;
- case 75: // 19200 bit/s
-
- si2 += 7;
- break;
- }
-
- info = p[7] & 0x7f;
- if ((info & 16) && (!(info & 8))) // 7 data bits
-
- si2 += 32; // else 8 data bits
-
- if ((info & 96) == 96) // 2 stop bits
-
- si2 += 16; // else 1 stop bit
-
- if ((info & 2) && (!(info & 1))) // even parity
-
- si2 += 8; // else no parity
-
- return si2;
-}
-
-
-static u_char
-DecodeSyncParams(u_char si2, u_char info)
-{
- info &= 0x7f;
- switch (info) {
- case 40: // bit/s negotiation failed ai := 165 not 175!
-
- return si2 + 15;
- case 15: // 56000 bit/s failed, ai := 0 not 169 !
-
- return si2 + 9;
- case 14: // 48000 bit/s
-
- return si2 + 8;
- case 11: // 19200 bit/s
-
- return si2 + 7;
- case 9: // 14400 bit/s
-
- return si2 + 6;
- case 8: // 9600 bit/s
-
- return si2 + 5;
- case 5: // 4800 bit/s
-
- return si2 + 4;
- case 3: // 2400 bit/s
-
- return si2 + 3;
- case 23: // 75/1200 bit/s
-
- return si2 + 2;
- case 24: // 1200/75 bit/s
-
- return si2 + 1;
- default: // 1200 bit/s
-
- return si2;
- }
-}
-
-static u_char
-DecodeSI2(struct sk_buff *skb)
-{
- u_char *p; //, *pend=skb->data + skb->len;
-
- if ((p = findie(skb->data, skb->len, 0x7c, 0))) {
- switch (p[4] & 0x0f) {
- case 0x01:
- if (p[1] == 0x04) // sync. Bitratenadaption
-
- return DecodeSyncParams(160, p[5]); // V.110/X.30
-
- else if (p[1] == 0x06) // async. Bitratenadaption
-
- return DecodeASyncParams(192, p); // V.110/X.30
-
- break;
- case 0x08: // if (p[5] == 0x02) // sync. Bitratenadaption
- if (p[1] > 3)
- return DecodeSyncParams(176, p[5]); // V.120
- break;
- }
- }
- return 0;
-}
-
-#endif
-
-
-static void
-l3dss1_setup_req(struct l3_process *pc, u_char pr,
- void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[128];
- u_char *p = tmp;
- u_char channel = 0;
-
- u_char send_keypad;
- u_char screen = 0x80;
- u_char *teln;
- u_char *msn;
- u_char *sub;
- u_char *sp;
- int l;
-
- MsgHead(p, pc->callref, MT_SETUP);
-
- teln = pc->para.setup.phone;
-#ifndef CONFIG_HISAX_NO_KEYPAD
- send_keypad = (strchr(teln, '*') || strchr(teln, '#')) ? 1 : 0;
-#else
- send_keypad = 0;
-#endif
-#ifndef CONFIG_HISAX_NO_SENDCOMPLETE
- if (!send_keypad)
- *p++ = 0xa1; /* complete indicator */
-#endif
- /*
- * Set Bearer Capability, Map info from 1TR6-convention to EDSS1
- */
- switch (pc->para.setup.si1) {
- case 1: /* Telephony */
- *p++ = IE_BEARER;
- *p++ = 0x3; /* Length */
- *p++ = 0x90; /* Coding Std. CCITT, 3.1 kHz audio */
- *p++ = 0x90; /* Circuit-Mode 64kbps */
- *p++ = 0xa3; /* A-Law Audio */
- break;
- case 5: /* Datatransmission 64k, BTX */
- case 7: /* Datatransmission 64k */
- default:
- *p++ = IE_BEARER;
- *p++ = 0x2; /* Length */
- *p++ = 0x88; /* Coding Std. CCITT, unrestr. dig. Inform. */
- *p++ = 0x90; /* Circuit-Mode 64kbps */
- break;
- }
-
- if (send_keypad) {
- *p++ = IE_KEYPAD;
- *p++ = strlen(teln);
- while (*teln)
- *p++ = (*teln++) & 0x7F;
- }
-
- /*
- * What about info2? Mapping to High-Layer-Compatibility?
- */
- if ((*teln) && (!send_keypad)) {
- /* parse number for special things */
- if (!isdigit(*teln)) {
- switch (0x5f & *teln) {
- case 'C':
- channel = 0x08;
- /* fall through */
- case 'P':
- channel |= 0x80;
- teln++;
- if (*teln == '1')
- channel |= 0x01;
- else
- channel |= 0x02;
- break;
- case 'R':
- screen = 0xA0;
- break;
- case 'D':
- screen = 0x80;
- break;
-
- default:
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "Wrong MSN Code");
- break;
- }
- teln++;
- }
- }
- if (channel) {
- *p++ = IE_CHANNEL_ID;
- *p++ = 1;
- *p++ = channel;
- }
- msn = pc->para.setup.eazmsn;
- sub = NULL;
- sp = msn;
- while (*sp) {
- if ('.' == *sp) {
- sub = sp;
- *sp = 0;
- } else
- sp++;
- }
- if (*msn) {
- *p++ = IE_CALLING_PN;
- *p++ = strlen(msn) + (screen ? 2 : 1);
- /* Classify as AnyPref. */
- if (screen) {
- *p++ = 0x01; /* Ext = '0'B, Type = '000'B, Plan = '0001'B. */
- *p++ = screen;
- } else
- *p++ = 0x81; /* Ext = '1'B, Type = '000'B, Plan = '0001'B. */
- while (*msn)
- *p++ = *msn++ & 0x7f;
- }
- if (sub) {
- *sub++ = '.';
- *p++ = IE_CALLING_SUB;
- *p++ = strlen(sub) + 2;
- *p++ = 0x80; /* NSAP coded */
- *p++ = 0x50; /* local IDI format */
- while (*sub)
- *p++ = *sub++ & 0x7f;
- }
- sub = NULL;
- sp = teln;
- while (*sp) {
- if ('.' == *sp) {
- sub = sp;
- *sp = 0;
- } else
- sp++;
- }
-
- if (!send_keypad) {
- *p++ = IE_CALLED_PN;
- *p++ = strlen(teln) + 1;
- /* Classify as AnyPref. */
- *p++ = 0x81; /* Ext = '1'B, Type = '000'B, Plan = '0001'B. */
- while (*teln)
- *p++ = *teln++ & 0x7f;
-
- if (sub) {
- *sub++ = '.';
- *p++ = IE_CALLED_SUB;
- *p++ = strlen(sub) + 2;
- *p++ = 0x80; /* NSAP coded */
- *p++ = 0x50; /* local IDI format */
- while (*sub)
- *p++ = *sub++ & 0x7f;
- }
- }
-#ifdef EXT_BEARER_CAPS
- if ((pc->para.setup.si2 >= 160) && (pc->para.setup.si2 <= 175)) { // sync. Bitratenadaption, V.110/X.30
-
- *p++ = IE_LLC;
- *p++ = 0x04;
- *p++ = 0x88;
- *p++ = 0x90;
- *p++ = 0x21;
- *p++ = EncodeSyncParams(pc->para.setup.si2 - 160, 0x80);
- } else if ((pc->para.setup.si2 >= 176) && (pc->para.setup.si2 <= 191)) { // sync. Bitratenadaption, V.120
-
- *p++ = IE_LLC;
- *p++ = 0x05;
- *p++ = 0x88;
- *p++ = 0x90;
- *p++ = 0x28;
- *p++ = EncodeSyncParams(pc->para.setup.si2 - 176, 0);
- *p++ = 0x82;
- } else if (pc->para.setup.si2 >= 192) { // async. Bitratenadaption, V.110/X.30
-
- *p++ = IE_LLC;
- *p++ = 0x06;
- *p++ = 0x88;
- *p++ = 0x90;
- *p++ = 0x21;
- p = EncodeASyncParams(p, pc->para.setup.si2 - 192);
-#ifndef CONFIG_HISAX_NO_LLC
- } else {
- switch (pc->para.setup.si1) {
- case 1: /* Telephony */
- *p++ = IE_LLC;
- *p++ = 0x3; /* Length */
- *p++ = 0x90; /* Coding Std. CCITT, 3.1 kHz audio */
- *p++ = 0x90; /* Circuit-Mode 64kbps */
- *p++ = 0xa3; /* A-Law Audio */
- break;
- case 5: /* Datatransmission 64k, BTX */
- case 7: /* Datatransmission 64k */
- default:
- *p++ = IE_LLC;
- *p++ = 0x2; /* Length */
- *p++ = 0x88; /* Coding Std. CCITT, unrestr. dig. Inform. */
- *p++ = 0x90; /* Circuit-Mode 64kbps */
- break;
- }
-#endif
- }
-#endif
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- L3DelTimer(&pc->timer);
- L3AddTimer(&pc->timer, T303, CC_T303);
- newl3state(pc, 1);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3dss1_call_proc(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int id, ret;
-
- if ((id = l3dss1_get_channel_id(pc, skb)) >= 0) {
- if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup answer with wrong chid %x", id);
- pc->para.cause = 100;
- l3dss1_status_send(pc, pr, NULL);
- return;
- }
- pc->para.bchannel = id;
- } else if (1 == pc->state) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup answer wrong chid (ret %d)", id);
- if (id == -1)
- pc->para.cause = 96;
- else
- pc->para.cause = 100;
- l3dss1_status_send(pc, pr, NULL);
- return;
- }
- /* Now we are on none mandatory IEs */
- ret = check_infoelements(pc, skb, ie_CALL_PROCEEDING);
- if (ERR_IE_COMPREHENSION == ret) {
- l3dss1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer);
- newl3state(pc, 3);
- L3AddTimer(&pc->timer, T310, CC_T310);
- if (ret) /* STATUS for none mandatory IE errors after actions are taken */
- l3dss1_std_ie_err(pc, ret);
- pc->st->l3.l3l4(pc->st, CC_PROCEEDING | INDICATION, pc);
-}
-
-static void
-l3dss1_setup_ack(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int id, ret;
-
- if ((id = l3dss1_get_channel_id(pc, skb)) >= 0) {
- if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup answer with wrong chid %x", id);
- pc->para.cause = 100;
- l3dss1_status_send(pc, pr, NULL);
- return;
- }
- pc->para.bchannel = id;
- } else {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup answer wrong chid (ret %d)", id);
- if (id == -1)
- pc->para.cause = 96;
- else
- pc->para.cause = 100;
- l3dss1_status_send(pc, pr, NULL);
- return;
- }
- /* Now we are on none mandatory IEs */
- ret = check_infoelements(pc, skb, ie_SETUP_ACKNOWLEDGE);
- if (ERR_IE_COMPREHENSION == ret) {
- l3dss1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer);
- newl3state(pc, 2);
- L3AddTimer(&pc->timer, T304, CC_T304);
- if (ret) /* STATUS for none mandatory IE errors after actions are taken */
- l3dss1_std_ie_err(pc, ret);
- pc->st->l3.l3l4(pc->st, CC_MORE_INFO | INDICATION, pc);
-}
-
-static void
-l3dss1_disconnect(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- u_char *p;
- int ret;
- u_char cause = 0;
-
- StopAllL3Timer(pc);
- if ((ret = l3dss1_get_cause(pc, skb))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "DISC get_cause ret(%d)", ret);
- if (ret < 0)
- cause = 96;
- else if (ret > 0)
- cause = 100;
- }
- if ((p = findie(skb->data, skb->len, IE_FACILITY, 0)))
- l3dss1_parse_facility(pc->st, pc, pc->callref, p);
- ret = check_infoelements(pc, skb, ie_DISCONNECT);
- if (ERR_IE_COMPREHENSION == ret)
- cause = 96;
- else if ((!cause) && (ERR_IE_UNRECOGNIZED == ret))
- cause = 99;
- ret = pc->state;
- newl3state(pc, 12);
- if (cause)
- newl3state(pc, 19);
- if (11 != ret)
- pc->st->l3.l3l4(pc->st, CC_DISCONNECT | INDICATION, pc);
- else if (!cause)
- l3dss1_release_req(pc, pr, NULL);
- if (cause) {
- l3dss1_message_cause(pc, MT_RELEASE, cause);
- L3AddTimer(&pc->timer, T308, CC_T308_1);
- }
-}
-
-static void
-l3dss1_connect(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- ret = check_infoelements(pc, skb, ie_CONNECT);
- if (ERR_IE_COMPREHENSION == ret) {
- l3dss1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer); /* T310 */
- newl3state(pc, 10);
- pc->para.chargeinfo = 0;
- /* here should inserted COLP handling KKe */
- if (ret)
- l3dss1_std_ie_err(pc, ret);
- pc->st->l3.l3l4(pc->st, CC_SETUP | CONFIRM, pc);
-}
-
-static void
-l3dss1_alerting(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- ret = check_infoelements(pc, skb, ie_ALERTING);
- if (ERR_IE_COMPREHENSION == ret) {
- l3dss1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer); /* T304 */
- newl3state(pc, 4);
- if (ret)
- l3dss1_std_ie_err(pc, ret);
- pc->st->l3.l3l4(pc->st, CC_ALERTING | INDICATION, pc);
-}
-
-static void
-l3dss1_setup(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char *p;
- int bcfound = 0;
- char tmp[80];
- struct sk_buff *skb = arg;
- int id;
- int err = 0;
-
- /*
- * Bearer Capabilities
- */
- p = skb->data;
- /* only the first occurrence 'll be detected ! */
- if ((p = findie(p, skb->len, 0x04, 0))) {
- if ((p[1] < 2) || (p[1] > 11))
- err = 1;
- else {
- pc->para.setup.si2 = 0;
- switch (p[2] & 0x7f) {
- case 0x00: /* Speech */
- case 0x10: /* 3.1 Khz audio */
- pc->para.setup.si1 = 1;
- break;
- case 0x08: /* Unrestricted digital information */
- pc->para.setup.si1 = 7;
-/* JIM, 05.11.97 I wanna set service indicator 2 */
-#ifdef EXT_BEARER_CAPS
- pc->para.setup.si2 = DecodeSI2(skb);
-#endif
- break;
- case 0x09: /* Restricted digital information */
- pc->para.setup.si1 = 2;
- break;
- case 0x11:
- /* Unrestr. digital information with
- * tones/announcements ( or 7 kHz audio
- */
- pc->para.setup.si1 = 3;
- break;
- case 0x18: /* Video */
- pc->para.setup.si1 = 4;
- break;
- default:
- err = 2;
- break;
- }
- switch (p[3] & 0x7f) {
- case 0x40: /* packed mode */
- pc->para.setup.si1 = 8;
- break;
- case 0x10: /* 64 kbit */
- case 0x11: /* 2*64 kbit */
- case 0x13: /* 384 kbit */
- case 0x15: /* 1536 kbit */
- case 0x17: /* 1920 kbit */
- pc->para.moderate = p[3] & 0x7f;
- break;
- default:
- err = 3;
- break;
- }
- }
- if (pc->debug & L3_DEB_SI)
- l3_debug(pc->st, "SI=%d, AI=%d",
- pc->para.setup.si1, pc->para.setup.si2);
- if (err) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup with wrong bearer(l=%d:%x,%x)",
- p[1], p[2], p[3]);
- pc->para.cause = 100;
- l3dss1_msg_without_setup(pc, pr, NULL);
- return;
- }
- } else {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup without bearer capabilities");
- /* ETS 300-104 1.3.3 */
- pc->para.cause = 96;
- l3dss1_msg_without_setup(pc, pr, NULL);
- return;
- }
- /*
- * Channel Identification
- */
- if ((id = l3dss1_get_channel_id(pc, skb)) >= 0) {
- if ((pc->para.bchannel = id)) {
- if ((3 == id) && (0x10 == pc->para.moderate)) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup with wrong chid %x",
- id);
- pc->para.cause = 100;
- l3dss1_msg_without_setup(pc, pr, NULL);
- return;
- }
- bcfound++;
- } else
- { if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup without bchannel, call waiting");
- bcfound++;
- }
- } else {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup with wrong chid ret %d", id);
- if (id == -1)
- pc->para.cause = 96;
- else
- pc->para.cause = 100;
- l3dss1_msg_without_setup(pc, pr, NULL);
- return;
- }
- /* Now we are on none mandatory IEs */
- err = check_infoelements(pc, skb, ie_SETUP);
- if (ERR_IE_COMPREHENSION == err) {
- pc->para.cause = 96;
- l3dss1_msg_without_setup(pc, pr, NULL);
- return;
- }
- p = skb->data;
- if ((p = findie(p, skb->len, 0x70, 0)))
- iecpy(pc->para.setup.eazmsn, p, 1);
- else
- pc->para.setup.eazmsn[0] = 0;
-
- p = skb->data;
- if ((p = findie(p, skb->len, 0x71, 0))) {
- /* Called party subaddress */
- if ((p[1] >= 2) && (p[2] == 0x80) && (p[3] == 0x50)) {
- tmp[0] = '.';
- iecpy(&tmp[1], p, 2);
- strcat(pc->para.setup.eazmsn, tmp);
- } else if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "wrong called subaddress");
- }
- p = skb->data;
- if ((p = findie(p, skb->len, 0x6c, 0))) {
- pc->para.setup.plan = p[2];
- if (p[2] & 0x80) {
- iecpy(pc->para.setup.phone, p, 1);
- pc->para.setup.screen = 0;
- } else {
- iecpy(pc->para.setup.phone, p, 2);
- pc->para.setup.screen = p[3];
- }
- } else {
- pc->para.setup.phone[0] = 0;
- pc->para.setup.plan = 0;
- pc->para.setup.screen = 0;
- }
- p = skb->data;
- if ((p = findie(p, skb->len, 0x6d, 0))) {
- /* Calling party subaddress */
- if ((p[1] >= 2) && (p[2] == 0x80) && (p[3] == 0x50)) {
- tmp[0] = '.';
- iecpy(&tmp[1], p, 2);
- strcat(pc->para.setup.phone, tmp);
- } else if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "wrong calling subaddress");
- }
- newl3state(pc, 6);
- if (err) /* STATUS for none mandatory IE errors after actions are taken */
- l3dss1_std_ie_err(pc, err);
- pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc);
-}
-
-static void
-l3dss1_reset(struct l3_process *pc, u_char pr, void *arg)
-{
- dss1_release_l3_process(pc);
-}
-
-static void
-l3dss1_disconnect_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[16 + 40];
- u_char *p = tmp;
- int l;
- u_char cause = 16;
-
- if (pc->para.cause != NO_CAUSE)
- cause = pc->para.cause;
-
- StopAllL3Timer(pc);
-
- MsgHead(p, pc->callref, MT_DISCONNECT);
-
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = cause | 0x80;
-
- if (pc->prot.dss1.uus1_data[0])
- { *p++ = IE_USER_USER; /* UUS info element */
- *p++ = strlen(pc->prot.dss1.uus1_data) + 1;
- *p++ = 0x04; /* IA5 chars */
- strcpy(p, pc->prot.dss1.uus1_data);
- p += strlen(pc->prot.dss1.uus1_data);
- pc->prot.dss1.uus1_data[0] = '\0';
- }
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- newl3state(pc, 11);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- L3AddTimer(&pc->timer, T305, CC_T305);
-}
-
-static void
-l3dss1_setup_rsp(struct l3_process *pc, u_char pr,
- void *arg)
-{
- if (!pc->para.bchannel)
- { if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "D-chan connect for waiting call");
- l3dss1_disconnect_req(pc, pr, arg);
- return;
- }
- newl3state(pc, 8);
- l3dss1_message(pc, MT_CONNECT);
- L3DelTimer(&pc->timer);
- L3AddTimer(&pc->timer, T313, CC_T313);
-}
-
-static void
-l3dss1_connect_ack(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- ret = check_infoelements(pc, skb, ie_CONNECT_ACKNOWLEDGE);
- if (ERR_IE_COMPREHENSION == ret) {
- l3dss1_std_ie_err(pc, ret);
- return;
- }
- newl3state(pc, 10);
- L3DelTimer(&pc->timer);
- if (ret)
- l3dss1_std_ie_err(pc, ret);
- pc->st->l3.l3l4(pc->st, CC_SETUP_COMPL | INDICATION, pc);
-}
-
-static void
-l3dss1_reject_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- u_char cause = 21;
-
- if (pc->para.cause != NO_CAUSE)
- cause = pc->para.cause;
-
- MsgHead(p, pc->callref, MT_RELEASE_COMPLETE);
-
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = cause | 0x80;
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- newl3state(pc, 0);
- dss1_release_l3_process(pc);
-}
-
-static void
-l3dss1_release(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- u_char *p;
- int ret, cause = 0;
-
- StopAllL3Timer(pc);
- if ((ret = l3dss1_get_cause(pc, skb)) > 0) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "REL get_cause ret(%d)", ret);
- } else if (ret < 0)
- pc->para.cause = NO_CAUSE;
- if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) {
- l3dss1_parse_facility(pc->st, pc, pc->callref, p);
- }
- if ((ret < 0) && (pc->state != 11))
- cause = 96;
- else if (ret > 0)
- cause = 100;
- ret = check_infoelements(pc, skb, ie_RELEASE);
- if (ERR_IE_COMPREHENSION == ret)
- cause = 96;
- else if ((ERR_IE_UNRECOGNIZED == ret) && (!cause))
- cause = 99;
- if (cause)
- l3dss1_message_cause(pc, MT_RELEASE_COMPLETE, cause);
- else
- l3dss1_message(pc, MT_RELEASE_COMPLETE);
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- newl3state(pc, 0);
- dss1_release_l3_process(pc);
-}
-
-static void
-l3dss1_alert_req(struct l3_process *pc, u_char pr,
- void *arg)
-{
- newl3state(pc, 7);
- if (!pc->prot.dss1.uus1_data[0])
- l3dss1_message(pc, MT_ALERTING);
- else
- l3dss1_msg_with_uus(pc, MT_ALERTING);
-}
-
-static void
-l3dss1_proceed_req(struct l3_process *pc, u_char pr,
- void *arg)
-{
- newl3state(pc, 9);
- l3dss1_message(pc, MT_CALL_PROCEEDING);
- pc->st->l3.l3l4(pc->st, CC_PROCEED_SEND | INDICATION, pc);
-}
-
-static void
-l3dss1_setup_ack_req(struct l3_process *pc, u_char pr,
- void *arg)
-{
- newl3state(pc, 25);
- L3DelTimer(&pc->timer);
- L3AddTimer(&pc->timer, T302, CC_T302);
- l3dss1_message(pc, MT_SETUP_ACKNOWLEDGE);
-}
-
-/********************************************/
-/* deliver a incoming display message to HL */
-/********************************************/
-static void
-l3dss1_deliver_display(struct l3_process *pc, int pr, u_char *infp)
-{ u_char len;
- isdn_ctrl ic;
- struct IsdnCardState *cs;
- char *p;
-
- if (*infp++ != IE_DISPLAY) return;
- if ((len = *infp++) > 80) return; /* total length <= 82 */
- if (!pc->chan) return;
-
- p = ic.parm.display;
- while (len--)
- *p++ = *infp++;
- *p = '\0';
- ic.command = ISDN_STAT_DISPLAY;
- cs = pc->st->l1.hardware;
- ic.driver = cs->myid;
- ic.arg = pc->chan->chan;
- cs->iif.statcallb(&ic);
-} /* l3dss1_deliver_display */
-
-
-static void
-l3dss1_progress(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int err = 0;
- u_char *p;
-
- if ((p = findie(skb->data, skb->len, IE_PROGRESS, 0))) {
- if (p[1] != 2) {
- err = 1;
- pc->para.cause = 100;
- } else if (!(p[2] & 0x70)) {
- switch (p[2]) {
- case 0x80:
- case 0x81:
- case 0x82:
- case 0x84:
- case 0x85:
- case 0x87:
- case 0x8a:
- switch (p[3]) {
- case 0x81:
- case 0x82:
- case 0x83:
- case 0x84:
- case 0x88:
- break;
- default:
- err = 2;
- pc->para.cause = 100;
- break;
- }
- break;
- default:
- err = 3;
- pc->para.cause = 100;
- break;
- }
- }
- } else {
- pc->para.cause = 96;
- err = 4;
- }
- if (err) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "progress error %d", err);
- l3dss1_status_send(pc, pr, NULL);
- return;
- }
- /* Now we are on none mandatory IEs */
- err = check_infoelements(pc, skb, ie_PROGRESS);
- if (err)
- l3dss1_std_ie_err(pc, err);
- if (ERR_IE_COMPREHENSION != err)
- pc->st->l3.l3l4(pc->st, CC_PROGRESS | INDICATION, pc);
-}
-
-static void
-l3dss1_notify(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int err = 0;
- u_char *p;
-
- if ((p = findie(skb->data, skb->len, IE_NOTIFY, 0))) {
- if (p[1] != 1) {
- err = 1;
- pc->para.cause = 100;
- } else {
- switch (p[2]) {
- case 0x80:
- case 0x81:
- case 0x82:
- break;
- default:
- pc->para.cause = 100;
- err = 2;
- break;
- }
- }
- } else {
- pc->para.cause = 96;
- err = 3;
- }
- if (err) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "notify error %d", err);
- l3dss1_status_send(pc, pr, NULL);
- return;
- }
- /* Now we are on none mandatory IEs */
- err = check_infoelements(pc, skb, ie_NOTIFY);
- if (err)
- l3dss1_std_ie_err(pc, err);
- if (ERR_IE_COMPREHENSION != err)
- pc->st->l3.l3l4(pc->st, CC_NOTIFY | INDICATION, pc);
-}
-
-static void
-l3dss1_status_enq(struct l3_process *pc, u_char pr, void *arg)
-{
- int ret;
- struct sk_buff *skb = arg;
-
- ret = check_infoelements(pc, skb, ie_STATUS_ENQUIRY);
- l3dss1_std_ie_err(pc, ret);
- pc->para.cause = 30; /* response to STATUS_ENQUIRY */
- l3dss1_status_send(pc, pr, NULL);
-}
-
-static void
-l3dss1_information(struct l3_process *pc, u_char pr, void *arg)
-{
- int ret;
- struct sk_buff *skb = arg;
- u_char *p;
- char tmp[32];
-
- ret = check_infoelements(pc, skb, ie_INFORMATION);
- if (ret)
- l3dss1_std_ie_err(pc, ret);
- if (pc->state == 25) { /* overlap receiving */
- L3DelTimer(&pc->timer);
- p = skb->data;
- if ((p = findie(p, skb->len, 0x70, 0))) {
- iecpy(tmp, p, 1);
- strcat(pc->para.setup.eazmsn, tmp);
- pc->st->l3.l3l4(pc->st, CC_MORE_INFO | INDICATION, pc);
- }
- L3AddTimer(&pc->timer, T302, CC_T302);
- }
-}
-
-/******************************/
-/* handle deflection requests */
-/******************************/
-static void l3dss1_redir_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[128];
- u_char *p = tmp;
- u_char *subp;
- u_char len_phone = 0;
- u_char len_sub = 0;
- int l;
-
-
- strcpy(pc->prot.dss1.uus1_data, pc->chan->setup.eazmsn); /* copy uus element if available */
- if (!pc->chan->setup.phone[0])
- { pc->para.cause = -1;
- l3dss1_disconnect_req(pc, pr, arg); /* disconnect immediately */
- return;
- } /* only uus */
-
- if (pc->prot.dss1.invoke_id)
- free_invoke_id(pc->st, pc->prot.dss1.invoke_id);
-
- if (!(pc->prot.dss1.invoke_id = new_invoke_id(pc->st)))
- return;
-
- MsgHead(p, pc->callref, MT_FACILITY);
-
- for (subp = pc->chan->setup.phone; (*subp) && (*subp != '.'); subp++) len_phone++; /* len of phone number */
- if (*subp++ == '.') len_sub = strlen(subp) + 2; /* length including info subaddress element */
-
- *p++ = 0x1c; /* Facility info element */
- *p++ = len_phone + len_sub + 2 + 2 + 8 + 3 + 3; /* length of element */
- *p++ = 0x91; /* remote operations protocol */
- *p++ = 0xa1; /* invoke component */
-
- *p++ = len_phone + len_sub + 2 + 2 + 8 + 3; /* length of data */
- *p++ = 0x02; /* invoke id tag, integer */
- *p++ = 0x01; /* length */
- *p++ = pc->prot.dss1.invoke_id; /* invoke id */
- *p++ = 0x02; /* operation value tag, integer */
- *p++ = 0x01; /* length */
- *p++ = 0x0D; /* Call Deflect */
-
- *p++ = 0x30; /* sequence phone number */
- *p++ = len_phone + 2 + 2 + 3 + len_sub; /* length */
-
- *p++ = 0x30; /* Deflected to UserNumber */
- *p++ = len_phone + 2 + len_sub; /* length */
- *p++ = 0x80; /* NumberDigits */
- *p++ = len_phone; /* length */
- for (l = 0; l < len_phone; l++)
- *p++ = pc->chan->setup.phone[l];
-
- if (len_sub)
- { *p++ = 0x04; /* called party subaddress */
- *p++ = len_sub - 2;
- while (*subp) *p++ = *subp++;
- }
-
- *p++ = 0x01; /* screening identifier */
- *p++ = 0x01;
- *p++ = pc->chan->setup.screen;
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l))) return;
- skb_put_data(skb, tmp, l);
-
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-} /* l3dss1_redir_req */
-
-/********************************************/
-/* handle deflection request in early state */
-/********************************************/
-static void l3dss1_redir_req_early(struct l3_process *pc, u_char pr, void *arg)
-{
- l3dss1_proceed_req(pc, pr, arg);
- l3dss1_redir_req(pc, pr, arg);
-} /* l3dss1_redir_req_early */
-
-/***********************************************/
-/* handle special commands for this protocol. */
-/* Examples are call independent services like */
-/* remote operations with dummy callref. */
-/***********************************************/
-static int l3dss1_cmd_global(struct PStack *st, isdn_ctrl *ic)
-{ u_char id;
- u_char temp[265];
- u_char *p = temp;
- int i, l, proc_len;
- struct sk_buff *skb;
- struct l3_process *pc = NULL;
-
- switch (ic->arg)
- { case DSS1_CMD_INVOKE:
- if (ic->parm.dss1_io.datalen < 0) return (-2); /* invalid parameter */
-
- for (proc_len = 1, i = ic->parm.dss1_io.proc >> 8; i; i++)
- i = i >> 8; /* add one byte */
- l = ic->parm.dss1_io.datalen + proc_len + 8; /* length excluding ie header */
- if (l > 255)
- return (-2); /* too long */
-
- if (!(id = new_invoke_id(st)))
- return (0); /* first get a invoke id -> return if no available */
-
- i = -1;
- MsgHead(p, i, MT_FACILITY); /* build message head */
- *p++ = 0x1C; /* Facility IE */
- *p++ = l; /* length of ie */
- *p++ = 0x91; /* remote operations */
- *p++ = 0xA1; /* invoke */
- *p++ = l - 3; /* length of invoke */
- *p++ = 0x02; /* invoke id tag */
- *p++ = 0x01; /* length is 1 */
- *p++ = id; /* invoke id */
- *p++ = 0x02; /* operation */
- *p++ = proc_len; /* length of operation */
-
- for (i = proc_len; i; i--)
- *p++ = (ic->parm.dss1_io.proc >> (i - 1)) & 0xFF;
- memcpy(p, ic->parm.dss1_io.data, ic->parm.dss1_io.datalen); /* copy data */
- l = (p - temp) + ic->parm.dss1_io.datalen; /* total length */
-
- if (ic->parm.dss1_io.timeout > 0)
- if (!(pc = dss1_new_l3_process(st, -1)))
- { free_invoke_id(st, id);
- return (-2);
- }
- pc->prot.dss1.ll_id = ic->parm.dss1_io.ll_id; /* remember id */
- pc->prot.dss1.proc = ic->parm.dss1_io.proc; /* and procedure */
-
- if (!(skb = l3_alloc_skb(l)))
- { free_invoke_id(st, id);
- if (pc) dss1_release_l3_process(pc);
- return (-2);
- }
- skb_put_data(skb, temp, l);
-
- if (pc)
- { pc->prot.dss1.invoke_id = id; /* remember id */
- L3AddTimer(&pc->timer, ic->parm.dss1_io.timeout, CC_TDSS1_IO | REQUEST);
- }
-
- l3_msg(st, DL_DATA | REQUEST, skb);
- ic->parm.dss1_io.hl_id = id; /* return id */
- return (0);
-
- case DSS1_CMD_INVOKE_ABORT:
- if ((pc = l3dss1_search_dummy_proc(st, ic->parm.dss1_io.hl_id)))
- { L3DelTimer(&pc->timer); /* remove timer */
- dss1_release_l3_process(pc);
- return (0);
- }
- else
- { l3_debug(st, "l3dss1_cmd_global abort unknown id");
- return (-2);
- }
- break;
-
- default:
- l3_debug(st, "l3dss1_cmd_global unknown cmd 0x%lx", ic->arg);
- return (-1);
- } /* switch ic-> arg */
- return (-1);
-} /* l3dss1_cmd_global */
-
-static void
-l3dss1_io_timer(struct l3_process *pc)
-{ isdn_ctrl ic;
- struct IsdnCardState *cs = pc->st->l1.hardware;
-
- L3DelTimer(&pc->timer); /* remove timer */
-
- ic.driver = cs->myid;
- ic.command = ISDN_STAT_PROT;
- ic.arg = DSS1_STAT_INVOKE_ERR;
- ic.parm.dss1_io.hl_id = pc->prot.dss1.invoke_id;
- ic.parm.dss1_io.ll_id = pc->prot.dss1.ll_id;
- ic.parm.dss1_io.proc = pc->prot.dss1.proc;
- ic.parm.dss1_io.timeout = -1;
- ic.parm.dss1_io.datalen = 0;
- ic.parm.dss1_io.data = NULL;
- free_invoke_id(pc->st, pc->prot.dss1.invoke_id);
- pc->prot.dss1.invoke_id = 0; /* reset id */
-
- cs->iif.statcallb(&ic);
-
- dss1_release_l3_process(pc);
-} /* l3dss1_io_timer */
-
-static void
-l3dss1_release_ind(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char *p;
- struct sk_buff *skb = arg;
- int callState = 0;
- p = skb->data;
-
- if ((p = findie(p, skb->len, IE_CALL_STATE, 0))) {
- p++;
- if (1 == *p++)
- callState = *p;
- }
- if (callState == 0) {
- /* ETS 300-104 7.6.1, 8.6.1, 10.6.1... and 16.1
- * set down layer 3 without sending any message
- */
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- newl3state(pc, 0);
- dss1_release_l3_process(pc);
- } else {
- pc->st->l3.l3l4(pc->st, CC_IGNORE | INDICATION, pc);
- }
-}
-
-static void
-l3dss1_dummy(struct l3_process *pc, u_char pr, void *arg)
-{
-}
-
-static void
-l3dss1_t302(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.loc = 0;
- pc->para.cause = 28; /* invalid number */
- l3dss1_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc);
-}
-
-static void
-l3dss1_t303(struct l3_process *pc, u_char pr, void *arg)
-{
- if (pc->N303 > 0) {
- pc->N303--;
- L3DelTimer(&pc->timer);
- l3dss1_setup_req(pc, pr, arg);
- } else {
- L3DelTimer(&pc->timer);
- l3dss1_message_cause(pc, MT_RELEASE_COMPLETE, 102);
- pc->st->l3.l3l4(pc->st, CC_NOSETUP_RSP, pc);
- dss1_release_l3_process(pc);
- }
-}
-
-static void
-l3dss1_t304(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.loc = 0;
- pc->para.cause = 102;
- l3dss1_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc);
-
-}
-
-static void
-l3dss1_t305(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- struct sk_buff *skb;
- u_char cause = 16;
-
- L3DelTimer(&pc->timer);
- if (pc->para.cause != NO_CAUSE)
- cause = pc->para.cause;
-
- MsgHead(p, pc->callref, MT_RELEASE);
-
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = cause | 0x80;
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- newl3state(pc, 19);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- L3AddTimer(&pc->timer, T308, CC_T308_1);
-}
-
-static void
-l3dss1_t310(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.loc = 0;
- pc->para.cause = 102;
- l3dss1_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc);
-}
-
-static void
-l3dss1_t313(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.loc = 0;
- pc->para.cause = 102;
- l3dss1_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_CONNECT_ERR, pc);
-}
-
-static void
-l3dss1_t308_1(struct l3_process *pc, u_char pr, void *arg)
-{
- newl3state(pc, 19);
- L3DelTimer(&pc->timer);
- l3dss1_message(pc, MT_RELEASE);
- L3AddTimer(&pc->timer, T308, CC_T308_2);
-}
-
-static void
-l3dss1_t308_2(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_RELEASE_ERR, pc);
- dss1_release_l3_process(pc);
-}
-
-static void
-l3dss1_t318(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.cause = 102; /* Timer expiry */
- pc->para.loc = 0; /* local */
- pc->st->l3.l3l4(pc->st, CC_RESUME_ERR, pc);
- newl3state(pc, 19);
- l3dss1_message(pc, MT_RELEASE);
- L3AddTimer(&pc->timer, T308, CC_T308_1);
-}
-
-static void
-l3dss1_t319(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.cause = 102; /* Timer expiry */
- pc->para.loc = 0; /* local */
- pc->st->l3.l3l4(pc->st, CC_SUSPEND_ERR, pc);
- newl3state(pc, 10);
-}
-
-static void
-l3dss1_restart(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- dss1_release_l3_process(pc);
-}
-
-static void
-l3dss1_status(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char *p;
- struct sk_buff *skb = arg;
- int ret;
- u_char cause = 0, callState = 0;
-
- if ((ret = l3dss1_get_cause(pc, skb))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "STATUS get_cause ret(%d)", ret);
- if (ret < 0)
- cause = 96;
- else if (ret > 0)
- cause = 100;
- }
- if ((p = findie(skb->data, skb->len, IE_CALL_STATE, 0))) {
- p++;
- if (1 == *p++) {
- callState = *p;
- if (!ie_in_set(pc, *p, l3_valid_states))
- cause = 100;
- } else
- cause = 100;
- } else
- cause = 96;
- if (!cause) { /* no error before */
- ret = check_infoelements(pc, skb, ie_STATUS);
- if (ERR_IE_COMPREHENSION == ret)
- cause = 96;
- else if (ERR_IE_UNRECOGNIZED == ret)
- cause = 99;
- }
- if (cause) {
- u_char tmp;
-
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "STATUS error(%d/%d)", ret, cause);
- tmp = pc->para.cause;
- pc->para.cause = cause;
- l3dss1_status_send(pc, 0, NULL);
- if (cause == 99)
- pc->para.cause = tmp;
- else
- return;
- }
- cause = pc->para.cause;
- if (((cause & 0x7f) == 111) && (callState == 0)) {
- /* ETS 300-104 7.6.1, 8.6.1, 10.6.1...
- * if received MT_STATUS with cause == 111 and call
- * state == 0, then we must set down layer 3
- */
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- newl3state(pc, 0);
- dss1_release_l3_process(pc);
- }
-}
-
-static void
-l3dss1_facility(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- ret = check_infoelements(pc, skb, ie_FACILITY);
- l3dss1_std_ie_err(pc, ret);
- {
- u_char *p;
- if ((p = findie(skb->data, skb->len, IE_FACILITY, 0)))
- l3dss1_parse_facility(pc->st, pc, pc->callref, p);
- }
-}
-
-static void
-l3dss1_suspend_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[32];
- u_char *p = tmp;
- u_char i, l;
- u_char *msg = pc->chan->setup.phone;
-
- MsgHead(p, pc->callref, MT_SUSPEND);
- l = *msg++;
- if (l && (l <= 10)) { /* Max length 10 octets */
- *p++ = IE_CALL_ID;
- *p++ = l;
- for (i = 0; i < l; i++)
- *p++ = *msg++;
- } else if (l) {
- l3_debug(pc->st, "SUS wrong CALL_ID len %d", l);
- return;
- }
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- newl3state(pc, 15);
- L3AddTimer(&pc->timer, T319, CC_T319);
-}
-
-static void
-l3dss1_suspend_ack(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- L3DelTimer(&pc->timer);
- newl3state(pc, 0);
- pc->para.cause = NO_CAUSE;
- pc->st->l3.l3l4(pc->st, CC_SUSPEND | CONFIRM, pc);
- /* We don't handle suspend_ack for IE errors now */
- if ((ret = check_infoelements(pc, skb, ie_SUSPEND_ACKNOWLEDGE)))
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "SUSPACK check ie(%d)", ret);
- dss1_release_l3_process(pc);
-}
-
-static void
-l3dss1_suspend_rej(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- if ((ret = l3dss1_get_cause(pc, skb))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "SUSP_REJ get_cause ret(%d)", ret);
- if (ret < 0)
- pc->para.cause = 96;
- else
- pc->para.cause = 100;
- l3dss1_status_send(pc, pr, NULL);
- return;
- }
- ret = check_infoelements(pc, skb, ie_SUSPEND_REJECT);
- if (ERR_IE_COMPREHENSION == ret) {
- l3dss1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_SUSPEND_ERR, pc);
- newl3state(pc, 10);
- if (ret) /* STATUS for none mandatory IE errors after actions are taken */
- l3dss1_std_ie_err(pc, ret);
-}
-
-static void
-l3dss1_resume_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[32];
- u_char *p = tmp;
- u_char i, l;
- u_char *msg = pc->para.setup.phone;
-
- MsgHead(p, pc->callref, MT_RESUME);
-
- l = *msg++;
- if (l && (l <= 10)) { /* Max length 10 octets */
- *p++ = IE_CALL_ID;
- *p++ = l;
- for (i = 0; i < l; i++)
- *p++ = *msg++;
- } else if (l) {
- l3_debug(pc->st, "RES wrong CALL_ID len %d", l);
- return;
- }
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- newl3state(pc, 17);
- L3AddTimer(&pc->timer, T318, CC_T318);
-}
-
-static void
-l3dss1_resume_ack(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int id, ret;
-
- if ((id = l3dss1_get_channel_id(pc, skb)) > 0) {
- if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "resume ack with wrong chid %x", id);
- pc->para.cause = 100;
- l3dss1_status_send(pc, pr, NULL);
- return;
- }
- pc->para.bchannel = id;
- } else if (1 == pc->state) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "resume ack without chid (ret %d)", id);
- pc->para.cause = 96;
- l3dss1_status_send(pc, pr, NULL);
- return;
- }
- ret = check_infoelements(pc, skb, ie_RESUME_ACKNOWLEDGE);
- if (ERR_IE_COMPREHENSION == ret) {
- l3dss1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_RESUME | CONFIRM, pc);
- newl3state(pc, 10);
- if (ret) /* STATUS for none mandatory IE errors after actions are taken */
- l3dss1_std_ie_err(pc, ret);
-}
-
-static void
-l3dss1_resume_rej(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- if ((ret = l3dss1_get_cause(pc, skb))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "RES_REJ get_cause ret(%d)", ret);
- if (ret < 0)
- pc->para.cause = 96;
- else
- pc->para.cause = 100;
- l3dss1_status_send(pc, pr, NULL);
- return;
- }
- ret = check_infoelements(pc, skb, ie_RESUME_REJECT);
- if (ERR_IE_COMPREHENSION == ret) {
- l3dss1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_RESUME_ERR, pc);
- newl3state(pc, 0);
- if (ret) /* STATUS for none mandatory IE errors after actions are taken */
- l3dss1_std_ie_err(pc, ret);
- dss1_release_l3_process(pc);
-}
-
-static void
-l3dss1_global_restart(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char tmp[32];
- u_char *p;
- u_char ri, ch = 0, chan = 0;
- int l;
- struct sk_buff *skb = arg;
- struct l3_process *up;
-
- newl3state(pc, 2);
- L3DelTimer(&pc->timer);
- p = skb->data;
- if ((p = findie(p, skb->len, IE_RESTART_IND, 0))) {
- ri = p[2];
- l3_debug(pc->st, "Restart %x", ri);
- } else {
- l3_debug(pc->st, "Restart without restart IE");
- ri = 0x86;
- }
- p = skb->data;
- if ((p = findie(p, skb->len, IE_CHANNEL_ID, 0))) {
- chan = p[2] & 3;
- ch = p[2];
- if (pc->st->l3.debug)
- l3_debug(pc->st, "Restart for channel %d", chan);
- }
- newl3state(pc, 2);
- up = pc->st->l3.proc;
- while (up) {
- if ((ri & 7) == 7)
- up->st->lli.l4l3(up->st, CC_RESTART | REQUEST, up);
- else if (up->para.bchannel == chan)
- up->st->lli.l4l3(up->st, CC_RESTART | REQUEST, up);
- up = up->next;
- }
- p = tmp;
- MsgHead(p, pc->callref, MT_RESTART_ACKNOWLEDGE);
- if (chan) {
- *p++ = IE_CHANNEL_ID;
- *p++ = 1;
- *p++ = ch | 0x80;
- }
- *p++ = 0x79; /* RESTART Ind */
- *p++ = 1;
- *p++ = ri;
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- newl3state(pc, 0);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3dss1_dl_reset(struct l3_process *pc, u_char pr, void *arg)
-{
- pc->para.cause = 0x29; /* Temporary failure */
- pc->para.loc = 0;
- l3dss1_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc);
-}
-
-static void
-l3dss1_dl_release(struct l3_process *pc, u_char pr, void *arg)
-{
- newl3state(pc, 0);
- pc->para.cause = 0x1b; /* Destination out of order */
- pc->para.loc = 0;
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- release_l3_process(pc);
-}
-
-static void
-l3dss1_dl_reestablish(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- L3AddTimer(&pc->timer, T309, CC_T309);
- l3_msg(pc->st, DL_ESTABLISH | REQUEST, NULL);
-}
-
-static void
-l3dss1_dl_reest_status(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
-
- pc->para.cause = 0x1F; /* normal, unspecified */
- l3dss1_status_send(pc, 0, NULL);
-}
-
-/* *INDENT-OFF* */
-static struct stateentry downstatelist[] =
-{
- {SBIT(0),
- CC_SETUP | REQUEST, l3dss1_setup_req},
- {SBIT(0),
- CC_RESUME | REQUEST, l3dss1_resume_req},
- {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(6) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(25),
- CC_DISCONNECT | REQUEST, l3dss1_disconnect_req},
- {SBIT(12),
- CC_RELEASE | REQUEST, l3dss1_release_req},
- {ALL_STATES,
- CC_RESTART | REQUEST, l3dss1_restart},
- {SBIT(6) | SBIT(25),
- CC_IGNORE | REQUEST, l3dss1_reset},
- {SBIT(6) | SBIT(25),
- CC_REJECT | REQUEST, l3dss1_reject_req},
- {SBIT(6) | SBIT(25),
- CC_PROCEED_SEND | REQUEST, l3dss1_proceed_req},
- {SBIT(6),
- CC_MORE_INFO | REQUEST, l3dss1_setup_ack_req},
- {SBIT(25),
- CC_MORE_INFO | REQUEST, l3dss1_dummy},
- {SBIT(6) | SBIT(9) | SBIT(25),
- CC_ALERTING | REQUEST, l3dss1_alert_req},
- {SBIT(6) | SBIT(7) | SBIT(9) | SBIT(25),
- CC_SETUP | RESPONSE, l3dss1_setup_rsp},
- {SBIT(10),
- CC_SUSPEND | REQUEST, l3dss1_suspend_req},
- {SBIT(7) | SBIT(9) | SBIT(25),
- CC_REDIR | REQUEST, l3dss1_redir_req},
- {SBIT(6),
- CC_REDIR | REQUEST, l3dss1_redir_req_early},
- {SBIT(9) | SBIT(25),
- CC_DISCONNECT | REQUEST, l3dss1_disconnect_req},
- {SBIT(25),
- CC_T302, l3dss1_t302},
- {SBIT(1),
- CC_T303, l3dss1_t303},
- {SBIT(2),
- CC_T304, l3dss1_t304},
- {SBIT(3),
- CC_T310, l3dss1_t310},
- {SBIT(8),
- CC_T313, l3dss1_t313},
- {SBIT(11),
- CC_T305, l3dss1_t305},
- {SBIT(15),
- CC_T319, l3dss1_t319},
- {SBIT(17),
- CC_T318, l3dss1_t318},
- {SBIT(19),
- CC_T308_1, l3dss1_t308_1},
- {SBIT(19),
- CC_T308_2, l3dss1_t308_2},
- {SBIT(10),
- CC_T309, l3dss1_dl_release},
-};
-
-static struct stateentry datastatelist[] =
-{
- {ALL_STATES,
- MT_STATUS_ENQUIRY, l3dss1_status_enq},
- {ALL_STATES,
- MT_FACILITY, l3dss1_facility},
- {SBIT(19),
- MT_STATUS, l3dss1_release_ind},
- {ALL_STATES,
- MT_STATUS, l3dss1_status},
- {SBIT(0),
- MT_SETUP, l3dss1_setup},
- {SBIT(6) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) |
- SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25),
- MT_SETUP, l3dss1_dummy},
- {SBIT(1) | SBIT(2),
- MT_CALL_PROCEEDING, l3dss1_call_proc},
- {SBIT(1),
- MT_SETUP_ACKNOWLEDGE, l3dss1_setup_ack},
- {SBIT(2) | SBIT(3),
- MT_ALERTING, l3dss1_alerting},
- {SBIT(2) | SBIT(3),
- MT_PROGRESS, l3dss1_progress},
- {SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) |
- SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25),
- MT_INFORMATION, l3dss1_information},
- {SBIT(10) | SBIT(11) | SBIT(15),
- MT_NOTIFY, l3dss1_notify},
- {SBIT(0) | SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(10) |
- SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25),
- MT_RELEASE_COMPLETE, l3dss1_release_cmpl},
- {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(25),
- MT_RELEASE, l3dss1_release},
- {SBIT(19), MT_RELEASE, l3dss1_release_ind},
- {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(15) | SBIT(17) | SBIT(25),
- MT_DISCONNECT, l3dss1_disconnect},
- {SBIT(19),
- MT_DISCONNECT, l3dss1_dummy},
- {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4),
- MT_CONNECT, l3dss1_connect},
- {SBIT(8),
- MT_CONNECT_ACKNOWLEDGE, l3dss1_connect_ack},
- {SBIT(15),
- MT_SUSPEND_ACKNOWLEDGE, l3dss1_suspend_ack},
- {SBIT(15),
- MT_SUSPEND_REJECT, l3dss1_suspend_rej},
- {SBIT(17),
- MT_RESUME_ACKNOWLEDGE, l3dss1_resume_ack},
- {SBIT(17),
- MT_RESUME_REJECT, l3dss1_resume_rej},
-};
-
-static struct stateentry globalmes_list[] =
-{
- {ALL_STATES,
- MT_STATUS, l3dss1_status},
- {SBIT(0),
- MT_RESTART, l3dss1_global_restart},
-/* {SBIT(1),
- MT_RESTART_ACKNOWLEDGE, l3dss1_restart_ack},
-*/
-};
-
-static struct stateentry manstatelist[] =
-{
- {SBIT(2),
- DL_ESTABLISH | INDICATION, l3dss1_dl_reset},
- {SBIT(10),
- DL_ESTABLISH | CONFIRM, l3dss1_dl_reest_status},
- {SBIT(10),
- DL_RELEASE | INDICATION, l3dss1_dl_reestablish},
- {ALL_STATES,
- DL_RELEASE | INDICATION, l3dss1_dl_release},
-};
-
-/* *INDENT-ON* */
-
-
-static void
-global_handler(struct PStack *st, int mt, struct sk_buff *skb)
-{
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- int i;
- struct l3_process *proc = st->l3.global;
-
- proc->callref = skb->data[2]; /* cr flag */
- for (i = 0; i < ARRAY_SIZE(globalmes_list); i++)
- if ((mt == globalmes_list[i].primitive) &&
- ((1 << proc->state) & globalmes_list[i].state))
- break;
- if (i == ARRAY_SIZE(globalmes_list)) {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "dss1 global state %d mt %x unhandled",
- proc->state, mt);
- }
- MsgHead(p, proc->callref, MT_STATUS);
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = 81 | 0x80; /* invalid cr */
- *p++ = 0x14; /* CallState */
- *p++ = 0x1;
- *p++ = proc->state & 0x3f;
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(proc->st, DL_DATA | REQUEST, skb);
- } else {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "dss1 global %d mt %x",
- proc->state, mt);
- }
- globalmes_list[i].rout(proc, mt, skb);
- }
-}
-
-static void
-dss1up(struct PStack *st, int pr, void *arg)
-{
- int i, mt, cr, callState;
- char *ptr;
- u_char *p;
- struct sk_buff *skb = arg;
- struct l3_process *proc;
-
- switch (pr) {
- case (DL_DATA | INDICATION):
- case (DL_UNIT_DATA | INDICATION):
- break;
- case (DL_ESTABLISH | CONFIRM):
- case (DL_ESTABLISH | INDICATION):
- case (DL_RELEASE | INDICATION):
- case (DL_RELEASE | CONFIRM):
- l3_msg(st, pr, arg);
- return;
- break;
- default:
- printk(KERN_ERR "HiSax dss1up unknown pr=%04x\n", pr);
- return;
- }
- if (skb->len < 3) {
- l3_debug(st, "dss1up frame too short(%d)", skb->len);
- dev_kfree_skb(skb);
- return;
- }
-
- if (skb->data[0] != PROTO_DIS_EURO) {
- if (st->l3.debug & L3_DEB_PROTERR) {
- l3_debug(st, "dss1up%sunexpected discriminator %x message len %d",
- (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
- skb->data[0], skb->len);
- }
- dev_kfree_skb(skb);
- return;
- }
- cr = getcallref(skb->data);
- if (skb->len < ((skb->data[1] & 0x0f) + 3)) {
- l3_debug(st, "dss1up frame too short(%d)", skb->len);
- dev_kfree_skb(skb);
- return;
- }
- mt = skb->data[skb->data[1] + 2];
- if (st->l3.debug & L3_DEB_STATE)
- l3_debug(st, "dss1up cr %d", cr);
- if (cr == -2) { /* wrong Callref */
- if (st->l3.debug & L3_DEB_WARN)
- l3_debug(st, "dss1up wrong Callref");
- dev_kfree_skb(skb);
- return;
- } else if (cr == -1) { /* Dummy Callref */
- if (mt == MT_FACILITY)
- if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) {
- l3dss1_parse_facility(st, NULL,
- (pr == (DL_DATA | INDICATION)) ? -1 : -2, p);
- dev_kfree_skb(skb);
- return;
- }
- if (st->l3.debug & L3_DEB_WARN)
- l3_debug(st, "dss1up dummy Callref (no facility msg or ie)");
- dev_kfree_skb(skb);
- return;
- } else if ((((skb->data[1] & 0x0f) == 1) && (0 == (cr & 0x7f))) ||
- (((skb->data[1] & 0x0f) == 2) && (0 == (cr & 0x7fff)))) { /* Global CallRef */
- if (st->l3.debug & L3_DEB_STATE)
- l3_debug(st, "dss1up Global CallRef");
- global_handler(st, mt, skb);
- dev_kfree_skb(skb);
- return;
- } else if (!(proc = getl3proc(st, cr))) {
- /* No transaction process exist, that means no call with
- * this callreference is active
- */
- if (mt == MT_SETUP) {
- /* Setup creates a new transaction process */
- if (skb->data[2] & 0x80) {
- /* Setup with wrong CREF flag */
- if (st->l3.debug & L3_DEB_STATE)
- l3_debug(st, "dss1up wrong CRef flag");
- dev_kfree_skb(skb);
- return;
- }
- if (!(proc = dss1_new_l3_process(st, cr))) {
- /* May be to answer with RELEASE_COMPLETE and
- * CAUSE 0x2f "Resource unavailable", but this
- * need a new_l3_process too ... arghh
- */
- dev_kfree_skb(skb);
- return;
- }
- } else if (mt == MT_STATUS) {
- if ((ptr = findie(skb->data, skb->len, IE_CAUSE, 0)) != NULL) {
- ptr++;
- if (*ptr++ == 2)
- ptr++;
- }
- callState = 0;
- if ((ptr = findie(skb->data, skb->len, IE_CALL_STATE, 0)) != NULL) {
- ptr++;
- if (*ptr++ == 2)
- ptr++;
- callState = *ptr;
- }
- /* ETS 300-104 part 2.4.1
- * if setup has not been made and a message type
- * MT_STATUS is received with call state == 0,
- * we must send nothing
- */
- if (callState != 0) {
- /* ETS 300-104 part 2.4.2
- * if setup has not been made and a message type
- * MT_STATUS is received with call state != 0,
- * we must send MT_RELEASE_COMPLETE cause 101
- */
- if ((proc = dss1_new_l3_process(st, cr))) {
- proc->para.cause = 101;
- l3dss1_msg_without_setup(proc, 0, NULL);
- }
- }
- dev_kfree_skb(skb);
- return;
- } else if (mt == MT_RELEASE_COMPLETE) {
- dev_kfree_skb(skb);
- return;
- } else {
- /* ETS 300-104 part 2
- * if setup has not been made and a message type
- * (except MT_SETUP and RELEASE_COMPLETE) is received,
- * we must send MT_RELEASE_COMPLETE cause 81 */
- dev_kfree_skb(skb);
- if ((proc = dss1_new_l3_process(st, cr))) {
- proc->para.cause = 81;
- l3dss1_msg_without_setup(proc, 0, NULL);
- }
- return;
- }
- }
- if (l3dss1_check_messagetype_validity(proc, mt, skb)) {
- dev_kfree_skb(skb);
- return;
- }
- if ((p = findie(skb->data, skb->len, IE_DISPLAY, 0)) != NULL)
- l3dss1_deliver_display(proc, pr, p); /* Display IE included */
- for (i = 0; i < ARRAY_SIZE(datastatelist); i++)
- if ((mt == datastatelist[i].primitive) &&
- ((1 << proc->state) & datastatelist[i].state))
- break;
- if (i == ARRAY_SIZE(datastatelist)) {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "dss1up%sstate %d mt %#x unhandled",
- (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
- proc->state, mt);
- }
- if ((MT_RELEASE_COMPLETE != mt) && (MT_RELEASE != mt)) {
- proc->para.cause = 101;
- l3dss1_status_send(proc, pr, skb);
- }
- } else {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "dss1up%sstate %d mt %x",
- (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
- proc->state, mt);
- }
- datastatelist[i].rout(proc, pr, skb);
- }
- dev_kfree_skb(skb);
- return;
-}
-
-static void
-dss1down(struct PStack *st, int pr, void *arg)
-{
- int i, cr;
- struct l3_process *proc;
- struct Channel *chan;
-
- if ((DL_ESTABLISH | REQUEST) == pr) {
- l3_msg(st, pr, NULL);
- return;
- } else if (((CC_SETUP | REQUEST) == pr) || ((CC_RESUME | REQUEST) == pr)) {
- chan = arg;
- cr = newcallref();
- cr |= 0x80;
- if ((proc = dss1_new_l3_process(st, cr))) {
- proc->chan = chan;
- chan->proc = proc;
- memcpy(&proc->para.setup, &chan->setup, sizeof(setup_parm));
- proc->callref = cr;
- }
- } else {
- proc = arg;
- }
- if (!proc) {
- printk(KERN_ERR "HiSax dss1down without proc pr=%04x\n", pr);
- return;
- }
-
- if (pr == (CC_TDSS1_IO | REQUEST)) {
- l3dss1_io_timer(proc); /* timer expires */
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(downstatelist); i++)
- if ((pr == downstatelist[i].primitive) &&
- ((1 << proc->state) & downstatelist[i].state))
- break;
- if (i == ARRAY_SIZE(downstatelist)) {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "dss1down state %d prim %#x unhandled",
- proc->state, pr);
- }
- } else {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "dss1down state %d prim %#x",
- proc->state, pr);
- }
- downstatelist[i].rout(proc, pr, arg);
- }
-}
-
-static void
-dss1man(struct PStack *st, int pr, void *arg)
-{
- int i;
- struct l3_process *proc = arg;
-
- if (!proc) {
- printk(KERN_ERR "HiSax dss1man without proc pr=%04x\n", pr);
- return;
- }
- for (i = 0; i < ARRAY_SIZE(manstatelist); i++)
- if ((pr == manstatelist[i].primitive) &&
- ((1 << proc->state) & manstatelist[i].state))
- break;
- if (i == ARRAY_SIZE(manstatelist)) {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "cr %d dss1man state %d prim %#x unhandled",
- proc->callref & 0x7f, proc->state, pr);
- }
- } else {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "cr %d dss1man state %d prim %#x",
- proc->callref & 0x7f, proc->state, pr);
- }
- manstatelist[i].rout(proc, pr, arg);
- }
-}
-
-void
-setstack_dss1(struct PStack *st)
-{
- char tmp[64];
- int i;
-
- st->lli.l4l3 = dss1down;
- st->lli.l4l3_proto = l3dss1_cmd_global;
- st->l2.l2l3 = dss1up;
- st->l3.l3ml3 = dss1man;
- st->l3.N303 = 1;
- st->prot.dss1.last_invoke_id = 0;
- st->prot.dss1.invoke_used[0] = 1; /* Bit 0 must always be set to 1 */
- i = 1;
- while (i < 32)
- st->prot.dss1.invoke_used[i++] = 0;
-
- if (!(st->l3.global = kmalloc(sizeof(struct l3_process), GFP_ATOMIC))) {
- printk(KERN_ERR "HiSax can't get memory for dss1 global CR\n");
- } else {
- st->l3.global->state = 0;
- st->l3.global->callref = 0;
- st->l3.global->next = NULL;
- st->l3.global->debug = L3_DEB_WARN;
- st->l3.global->st = st;
- st->l3.global->N303 = 1;
- st->l3.global->prot.dss1.invoke_id = 0;
-
- L3InitTimer(st->l3.global, &st->l3.global->timer);
- }
- strcpy(tmp, dss1_revision);
- printk(KERN_INFO "HiSax: DSS1 Rev. %s\n", HiSax_getrev(tmp));
-}
diff --git a/drivers/isdn/hisax/l3dss1.h b/drivers/isdn/hisax/l3dss1.h
deleted file mode 100644
index a7807e8a94f1..000000000000
--- a/drivers/isdn/hisax/l3dss1.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* $Id: l3dss1.h,v 1.10.6.2 2001/09/23 22:24:50 kai Exp $
- *
- * DSS1 (Euro) D-channel protocol defines
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef l3dss1_process
-
-#define T302 15000
-#define T303 4000
-#define T304 30000
-#define T305 30000
-#define T308 4000
-/* for layer 1 certification T309 < layer1 T3 (e.g. 4000) */
-/* This makes some tests easier and quicker */
-#define T309 40000
-#define T310 30000
-#define T313 4000
-#define T318 4000
-#define T319 4000
-
-/*
- * Message-Types
- */
-
-#define MT_ALERTING 0x01
-#define MT_CALL_PROCEEDING 0x02
-#define MT_CONNECT 0x07
-#define MT_CONNECT_ACKNOWLEDGE 0x0f
-#define MT_PROGRESS 0x03
-#define MT_SETUP 0x05
-#define MT_SETUP_ACKNOWLEDGE 0x0d
-#define MT_RESUME 0x26
-#define MT_RESUME_ACKNOWLEDGE 0x2e
-#define MT_RESUME_REJECT 0x22
-#define MT_SUSPEND 0x25
-#define MT_SUSPEND_ACKNOWLEDGE 0x2d
-#define MT_SUSPEND_REJECT 0x21
-#define MT_USER_INFORMATION 0x20
-#define MT_DISCONNECT 0x45
-#define MT_RELEASE 0x4d
-#define MT_RELEASE_COMPLETE 0x5a
-#define MT_RESTART 0x46
-#define MT_RESTART_ACKNOWLEDGE 0x4e
-#define MT_SEGMENT 0x60
-#define MT_CONGESTION_CONTROL 0x79
-#define MT_INFORMATION 0x7b
-#define MT_FACILITY 0x62
-#define MT_NOTIFY 0x6e
-#define MT_STATUS 0x7d
-#define MT_STATUS_ENQUIRY 0x75
-
-#define IE_SEGMENT 0x00
-#define IE_BEARER 0x04
-#define IE_CAUSE 0x08
-#define IE_CALL_ID 0x10
-#define IE_CALL_STATE 0x14
-#define IE_CHANNEL_ID 0x18
-#define IE_FACILITY 0x1c
-#define IE_PROGRESS 0x1e
-#define IE_NET_FAC 0x20
-#define IE_NOTIFY 0x27
-#define IE_DISPLAY 0x28
-#define IE_DATE 0x29
-#define IE_KEYPAD 0x2c
-#define IE_SIGNAL 0x34
-#define IE_INFORATE 0x40
-#define IE_E2E_TDELAY 0x42
-#define IE_TDELAY_SEL 0x43
-#define IE_PACK_BINPARA 0x44
-#define IE_PACK_WINSIZE 0x45
-#define IE_PACK_SIZE 0x46
-#define IE_CUG 0x47
-#define IE_REV_CHARGE 0x4a
-#define IE_CONNECT_PN 0x4c
-#define IE_CONNECT_SUB 0x4d
-#define IE_CALLING_PN 0x6c
-#define IE_CALLING_SUB 0x6d
-#define IE_CALLED_PN 0x70
-#define IE_CALLED_SUB 0x71
-#define IE_REDIR_NR 0x74
-#define IE_TRANS_SEL 0x78
-#define IE_RESTART_IND 0x79
-#define IE_LLC 0x7c
-#define IE_HLC 0x7d
-#define IE_USER_USER 0x7e
-#define IE_ESCAPE 0x7f
-#define IE_SHIFT 0x90
-#define IE_MORE_DATA 0xa0
-#define IE_COMPLETE 0xa1
-#define IE_CONGESTION 0xb0
-#define IE_REPEAT 0xd0
-
-#define IE_MANDATORY 0x0100
-/* mandatory not in every case */
-#define IE_MANDATORY_1 0x0200
-
-#define ERR_IE_COMPREHENSION 1
-#define ERR_IE_UNRECOGNIZED -1
-#define ERR_IE_LENGTH -2
-#define ERR_IE_SEQUENCE -3
-
-#else /* only l3dss1_process */
-
-/* l3dss1 specific data in l3 process */
-typedef struct
-{ unsigned char invoke_id; /* used invoke id in remote ops, 0 = not active */
- ulong ll_id; /* remebered ll id */
- u8 remote_operation; /* handled remote operation, 0 = not active */
- int proc; /* rememered procedure */
- ulong remote_result; /* result of remote operation for statcallb */
- char uus1_data[35]; /* data send during alerting or disconnect */
-} dss1_proc_priv;
-
-/* l3dss1 specific data in protocol stack */
-typedef struct
-{ unsigned char last_invoke_id; /* last used value for invoking */
- unsigned char invoke_used[32]; /* 256 bits for 256 values */
-} dss1_stk_priv;
-
-#endif /* only l3dss1_process */
diff --git a/drivers/isdn/hisax/l3ni1.c b/drivers/isdn/hisax/l3ni1.c
deleted file mode 100644
index ea311e7df48e..000000000000
--- a/drivers/isdn/hisax/l3ni1.c
+++ /dev/null
@@ -1,3182 +0,0 @@
-/* $Id: l3ni1.c,v 2.8.2.3 2004/01/13 14:31:25 keil Exp $
- *
- * NI1 D-channel protocol
- *
- * Author Matt Henderson & Guy Ellis
- * Copyright by Traverse Technologies Pty Ltd, www.travers.com.au
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * 2000.6.6 Initial implementation of routines for US NI1
- * Layer 3 protocol based on the EURO/DSS1 D-channel protocol
- * driver written by Karsten Keil et al.
- * NI-1 Hall of Fame - Thanks to....
- * Ragnar Paulson - for some handy code fragments
- * Will Scales - beta tester extraordinaire
- * Brett Whittacre - beta tester and remote devel system in Vegas
- *
- */
-
-#include "hisax.h"
-#include "isdnl3.h"
-#include "l3ni1.h"
-#include <linux/ctype.h>
-#include <linux/slab.h>
-
-extern char *HiSax_getrev(const char *revision);
-static const char *ni1_revision = "$Revision: 2.8.2.3 $";
-
-#define EXT_BEARER_CAPS 1
-
-#define MsgHead(ptr, cref, mty) \
- *ptr++ = 0x8; \
- if (cref == -1) { \
- *ptr++ = 0x0; \
- } else { \
- *ptr++ = 0x1; \
- *ptr++ = cref^0x80; \
- } \
- *ptr++ = mty
-
-
-/**********************************************/
-/* get a new invoke id for remote operations. */
-/* Only a return value != 0 is valid */
-/**********************************************/
-static unsigned char new_invoke_id(struct PStack *p)
-{
- unsigned char retval;
- int i;
-
- i = 32; /* maximum search depth */
-
- retval = p->prot.ni1.last_invoke_id + 1; /* try new id */
- while ((i) && (p->prot.ni1.invoke_used[retval >> 3] == 0xFF)) {
- p->prot.ni1.last_invoke_id = (retval & 0xF8) + 8;
- i--;
- }
- if (i) {
- while (p->prot.ni1.invoke_used[retval >> 3] & (1 << (retval & 7)))
- retval++;
- } else
- retval = 0;
- p->prot.ni1.last_invoke_id = retval;
- p->prot.ni1.invoke_used[retval >> 3] |= (1 << (retval & 7));
- return (retval);
-} /* new_invoke_id */
-
-/*************************/
-/* free a used invoke id */
-/*************************/
-static void free_invoke_id(struct PStack *p, unsigned char id)
-{
-
- if (!id) return; /* 0 = invalid value */
-
- p->prot.ni1.invoke_used[id >> 3] &= ~(1 << (id & 7));
-} /* free_invoke_id */
-
-
-/**********************************************************/
-/* create a new l3 process and fill in ni1 specific data */
-/**********************************************************/
-static struct l3_process
-*ni1_new_l3_process(struct PStack *st, int cr)
-{ struct l3_process *proc;
-
- if (!(proc = new_l3_process(st, cr)))
- return (NULL);
-
- proc->prot.ni1.invoke_id = 0;
- proc->prot.ni1.remote_operation = 0;
- proc->prot.ni1.uus1_data[0] = '\0';
-
- return (proc);
-} /* ni1_new_l3_process */
-
-/************************************************/
-/* free a l3 process and all ni1 specific data */
-/************************************************/
-static void
-ni1_release_l3_process(struct l3_process *p)
-{
- free_invoke_id(p->st, p->prot.ni1.invoke_id);
- release_l3_process(p);
-} /* ni1_release_l3_process */
-
-/********************************************************/
-/* search a process with invoke id id and dummy callref */
-/********************************************************/
-static struct l3_process *
-l3ni1_search_dummy_proc(struct PStack *st, int id)
-{ struct l3_process *pc = st->l3.proc; /* start of processes */
-
- if (!id) return (NULL);
-
- while (pc)
- { if ((pc->callref == -1) && (pc->prot.ni1.invoke_id == id))
- return (pc);
- pc = pc->next;
- }
- return (NULL);
-} /* l3ni1_search_dummy_proc */
-
-/*******************************************************************/
-/* called when a facility message with a dummy callref is received */
-/* and a return result is delivered. id specifies the invoke id. */
-/*******************************************************************/
-static void
-l3ni1_dummy_return_result(struct PStack *st, int id, u_char *p, u_char nlen)
-{ isdn_ctrl ic;
- struct IsdnCardState *cs;
- struct l3_process *pc = NULL;
-
- if ((pc = l3ni1_search_dummy_proc(st, id)))
- { L3DelTimer(&pc->timer); /* remove timer */
-
- cs = pc->st->l1.hardware;
- ic.driver = cs->myid;
- ic.command = ISDN_STAT_PROT;
- ic.arg = NI1_STAT_INVOKE_RES;
- ic.parm.ni1_io.hl_id = pc->prot.ni1.invoke_id;
- ic.parm.ni1_io.ll_id = pc->prot.ni1.ll_id;
- ic.parm.ni1_io.proc = pc->prot.ni1.proc;
- ic.parm.ni1_io.timeout = 0;
- ic.parm.ni1_io.datalen = nlen;
- ic.parm.ni1_io.data = p;
- free_invoke_id(pc->st, pc->prot.ni1.invoke_id);
- pc->prot.ni1.invoke_id = 0; /* reset id */
-
- cs->iif.statcallb(&ic);
- ni1_release_l3_process(pc);
- }
- else
- l3_debug(st, "dummy return result id=0x%x result len=%d", id, nlen);
-} /* l3ni1_dummy_return_result */
-
-/*******************************************************************/
-/* called when a facility message with a dummy callref is received */
-/* and a return error is delivered. id specifies the invoke id. */
-/*******************************************************************/
-static void
-l3ni1_dummy_error_return(struct PStack *st, int id, ulong error)
-{ isdn_ctrl ic;
- struct IsdnCardState *cs;
- struct l3_process *pc = NULL;
-
- if ((pc = l3ni1_search_dummy_proc(st, id)))
- { L3DelTimer(&pc->timer); /* remove timer */
-
- cs = pc->st->l1.hardware;
- ic.driver = cs->myid;
- ic.command = ISDN_STAT_PROT;
- ic.arg = NI1_STAT_INVOKE_ERR;
- ic.parm.ni1_io.hl_id = pc->prot.ni1.invoke_id;
- ic.parm.ni1_io.ll_id = pc->prot.ni1.ll_id;
- ic.parm.ni1_io.proc = pc->prot.ni1.proc;
- ic.parm.ni1_io.timeout = error;
- ic.parm.ni1_io.datalen = 0;
- ic.parm.ni1_io.data = NULL;
- free_invoke_id(pc->st, pc->prot.ni1.invoke_id);
- pc->prot.ni1.invoke_id = 0; /* reset id */
-
- cs->iif.statcallb(&ic);
- ni1_release_l3_process(pc);
- }
- else
- l3_debug(st, "dummy return error id=0x%x error=0x%lx", id, error);
-} /* l3ni1_error_return */
-
-/*******************************************************************/
-/* called when a facility message with a dummy callref is received */
-/* and a invoke is delivered. id specifies the invoke id. */
-/*******************************************************************/
-static void
-l3ni1_dummy_invoke(struct PStack *st, int cr, int id,
- int ident, u_char *p, u_char nlen)
-{ isdn_ctrl ic;
- struct IsdnCardState *cs;
-
- l3_debug(st, "dummy invoke %s id=0x%x ident=0x%x datalen=%d",
- (cr == -1) ? "local" : "broadcast", id, ident, nlen);
- if (cr >= -1) return; /* ignore local data */
-
- cs = st->l1.hardware;
- ic.driver = cs->myid;
- ic.command = ISDN_STAT_PROT;
- ic.arg = NI1_STAT_INVOKE_BRD;
- ic.parm.ni1_io.hl_id = id;
- ic.parm.ni1_io.ll_id = 0;
- ic.parm.ni1_io.proc = ident;
- ic.parm.ni1_io.timeout = 0;
- ic.parm.ni1_io.datalen = nlen;
- ic.parm.ni1_io.data = p;
-
- cs->iif.statcallb(&ic);
-} /* l3ni1_dummy_invoke */
-
-static void
-l3ni1_parse_facility(struct PStack *st, struct l3_process *pc,
- int cr, u_char *p)
-{
- int qd_len = 0;
- unsigned char nlen = 0, ilen, cp_tag;
- int ident, id;
- ulong err_ret;
-
- if (pc)
- st = pc->st; /* valid Stack */
- else
- if ((!st) || (cr >= 0)) return; /* neither pc nor st specified */
-
- p++;
- qd_len = *p++;
- if (qd_len == 0) {
- l3_debug(st, "qd_len == 0");
- return;
- }
- if ((*p & 0x1F) != 0x11) { /* Service discriminator, supplementary service */
- l3_debug(st, "supplementary service != 0x11");
- return;
- }
- while (qd_len > 0 && !(*p & 0x80)) { /* extension ? */
- p++;
- qd_len--;
- }
- if (qd_len < 2) {
- l3_debug(st, "qd_len < 2");
- return;
- }
- p++;
- qd_len--;
- if ((*p & 0xE0) != 0xA0) { /* class and form */
- l3_debug(st, "class and form != 0xA0");
- return;
- }
-
- cp_tag = *p & 0x1F; /* remember tag value */
-
- p++;
- qd_len--;
- if (qd_len < 1)
- { l3_debug(st, "qd_len < 1");
- return;
- }
- if (*p & 0x80)
- { /* length format indefinite or limited */
- nlen = *p++ & 0x7F; /* number of len bytes or indefinite */
- if ((qd_len-- < ((!nlen) ? 3 : (1 + nlen))) ||
- (nlen > 1))
- { l3_debug(st, "length format error or not implemented");
- return;
- }
- if (nlen == 1)
- { nlen = *p++; /* complete length */
- qd_len--;
- }
- else
- { qd_len -= 2; /* trailing null bytes */
- if ((*(p + qd_len)) || (*(p + qd_len + 1)))
- { l3_debug(st, "length format indefinite error");
- return;
- }
- nlen = qd_len;
- }
- }
- else
- { nlen = *p++;
- qd_len--;
- }
- if (qd_len < nlen)
- { l3_debug(st, "qd_len < nlen");
- return;
- }
- qd_len -= nlen;
-
- if (nlen < 2)
- { l3_debug(st, "nlen < 2");
- return;
- }
- if (*p != 0x02)
- { /* invoke identifier tag */
- l3_debug(st, "invoke identifier tag !=0x02");
- return;
- }
- p++;
- nlen--;
- if (*p & 0x80)
- { /* length format */
- l3_debug(st, "invoke id length format 2");
- return;
- }
- ilen = *p++;
- nlen--;
- if (ilen > nlen || ilen == 0)
- { l3_debug(st, "ilen > nlen || ilen == 0");
- return;
- }
- nlen -= ilen;
- id = 0;
- while (ilen > 0)
- { id = (id << 8) | (*p++ & 0xFF); /* invoke identifier */
- ilen--;
- }
-
- switch (cp_tag) { /* component tag */
- case 1: /* invoke */
- if (nlen < 2) {
- l3_debug(st, "nlen < 2 22");
- return;
- }
- if (*p != 0x02) { /* operation value */
- l3_debug(st, "operation value !=0x02");
- return;
- }
- p++;
- nlen--;
- ilen = *p++;
- nlen--;
- if (ilen > nlen || ilen == 0) {
- l3_debug(st, "ilen > nlen || ilen == 0 22");
- return;
- }
- nlen -= ilen;
- ident = 0;
- while (ilen > 0) {
- ident = (ident << 8) | (*p++ & 0xFF);
- ilen--;
- }
-
- if (!pc)
- {
- l3ni1_dummy_invoke(st, cr, id, ident, p, nlen);
- return;
- }
- l3_debug(st, "invoke break");
- break;
- case 2: /* return result */
- /* if no process available handle separately */
- if (!pc)
- { if (cr == -1)
- l3ni1_dummy_return_result(st, id, p, nlen);
- return;
- }
- if ((pc->prot.ni1.invoke_id) && (pc->prot.ni1.invoke_id == id))
- { /* Diversion successful */
- free_invoke_id(st, pc->prot.ni1.invoke_id);
- pc->prot.ni1.remote_result = 0; /* success */
- pc->prot.ni1.invoke_id = 0;
- pc->redir_result = pc->prot.ni1.remote_result;
- st->l3.l3l4(st, CC_REDIR | INDICATION, pc); } /* Diversion successful */
- else
- l3_debug(st, "return error unknown identifier");
- break;
- case 3: /* return error */
- err_ret = 0;
- if (nlen < 2)
- { l3_debug(st, "return error nlen < 2");
- return;
- }
- if (*p != 0x02)
- { /* result tag */
- l3_debug(st, "invoke error tag !=0x02");
- return;
- }
- p++;
- nlen--;
- if (*p > 4)
- { /* length format */
- l3_debug(st, "invoke return errlen > 4 ");
- return;
- }
- ilen = *p++;
- nlen--;
- if (ilen > nlen || ilen == 0)
- { l3_debug(st, "error return ilen > nlen || ilen == 0");
- return;
- }
- nlen -= ilen;
- while (ilen > 0)
- { err_ret = (err_ret << 8) | (*p++ & 0xFF); /* error value */
- ilen--;
- }
- /* if no process available handle separately */
- if (!pc)
- { if (cr == -1)
- l3ni1_dummy_error_return(st, id, err_ret);
- return;
- }
- if ((pc->prot.ni1.invoke_id) && (pc->prot.ni1.invoke_id == id))
- { /* Deflection error */
- free_invoke_id(st, pc->prot.ni1.invoke_id);
- pc->prot.ni1.remote_result = err_ret; /* result */
- pc->prot.ni1.invoke_id = 0;
- pc->redir_result = pc->prot.ni1.remote_result;
- st->l3.l3l4(st, CC_REDIR | INDICATION, pc);
- } /* Deflection error */
- else
- l3_debug(st, "return result unknown identifier");
- break;
- default:
- l3_debug(st, "facility default break tag=0x%02x", cp_tag);
- break;
- }
-}
-
-static void
-l3ni1_message(struct l3_process *pc, u_char mt)
-{
- struct sk_buff *skb;
- u_char *p;
-
- if (!(skb = l3_alloc_skb(4)))
- return;
- p = skb_put(skb, 4);
- MsgHead(p, pc->callref, mt);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3ni1_message_plus_chid(struct l3_process *pc, u_char mt)
-/* sends an l3 messages plus channel id - added GE 05/09/00 */
-{
- struct sk_buff *skb;
- u_char tmp[16];
- u_char *p = tmp;
- u_char chid;
-
- chid = (u_char)(pc->para.bchannel & 0x03) | 0x88;
- MsgHead(p, pc->callref, mt);
- *p++ = IE_CHANNEL_ID;
- *p++ = 0x01;
- *p++ = chid;
-
- if (!(skb = l3_alloc_skb(7)))
- return;
- skb_put_data(skb, tmp, 7);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3ni1_message_cause(struct l3_process *pc, u_char mt, u_char cause)
-{
- struct sk_buff *skb;
- u_char tmp[16];
- u_char *p = tmp;
- int l;
-
- MsgHead(p, pc->callref, mt);
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = cause | 0x80;
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3ni1_status_send(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- struct sk_buff *skb;
-
- MsgHead(p, pc->callref, MT_STATUS);
-
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = pc->para.cause | 0x80;
-
- *p++ = IE_CALL_STATE;
- *p++ = 0x1;
- *p++ = pc->state & 0x3f;
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3ni1_msg_without_setup(struct l3_process *pc, u_char pr, void *arg)
-{
- /* This routine is called if here was no SETUP made (checks in ni1up and in
- * l3ni1_setup) and a RELEASE_COMPLETE have to be sent with an error code
- * MT_STATUS_ENQUIRE in the NULL state is handled too
- */
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- struct sk_buff *skb;
-
- switch (pc->para.cause) {
- case 81: /* invalid callreference */
- case 88: /* incomp destination */
- case 96: /* mandory IE missing */
- case 100: /* invalid IE contents */
- case 101: /* incompatible Callstate */
- MsgHead(p, pc->callref, MT_RELEASE_COMPLETE);
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = pc->para.cause | 0x80;
- break;
- default:
- printk(KERN_ERR "HiSax l3ni1_msg_without_setup wrong cause %d\n",
- pc->para.cause);
- return;
- }
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- ni1_release_l3_process(pc);
-}
-
-static int ie_ALERTING[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1,
- IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, IE_HLC,
- IE_USER_USER, -1};
-static int ie_CALL_PROCEEDING[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1,
- IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_HLC, -1};
-static int ie_CONNECT[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1,
- IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_DATE, IE_SIGNAL,
- IE_CONNECT_PN, IE_CONNECT_SUB, IE_LLC, IE_HLC, IE_USER_USER, -1};
-static int ie_CONNECT_ACKNOWLEDGE[] = {IE_CHANNEL_ID, IE_DISPLAY, IE_SIGNAL, -1};
-static int ie_DISCONNECT[] = {IE_CAUSE | IE_MANDATORY, IE_FACILITY,
- IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1};
-static int ie_INFORMATION[] = {IE_COMPLETE, IE_DISPLAY, IE_KEYPAD, IE_SIGNAL,
- IE_CALLED_PN, -1};
-static int ie_NOTIFY[] = {IE_BEARER, IE_NOTIFY | IE_MANDATORY, IE_DISPLAY, -1};
-static int ie_PROGRESS[] = {IE_BEARER, IE_CAUSE, IE_FACILITY, IE_PROGRESS |
- IE_MANDATORY, IE_DISPLAY, IE_HLC, IE_USER_USER, -1};
-static int ie_RELEASE[] = {IE_CAUSE | IE_MANDATORY_1, IE_FACILITY, IE_DISPLAY,
- IE_SIGNAL, IE_USER_USER, -1};
-/* a RELEASE_COMPLETE with errors don't require special actions
- static int ie_RELEASE_COMPLETE[] = {IE_CAUSE | IE_MANDATORY_1, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1};
-*/
-static int ie_RESUME_ACKNOWLEDGE[] = {IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY,
- IE_DISPLAY, -1};
-static int ie_RESUME_REJECT[] = {IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1};
-static int ie_SETUP[] = {IE_COMPLETE, IE_BEARER | IE_MANDATORY,
- IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY, IE_PROGRESS,
- IE_NET_FAC, IE_DISPLAY, IE_KEYPAD, IE_SIGNAL, IE_CALLING_PN,
- IE_CALLING_SUB, IE_CALLED_PN, IE_CALLED_SUB, IE_REDIR_NR,
- IE_LLC, IE_HLC, IE_USER_USER, -1};
-static int ie_SETUP_ACKNOWLEDGE[] = {IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY,
- IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, -1};
-static int ie_STATUS[] = {IE_CAUSE | IE_MANDATORY, IE_CALL_STATE |
- IE_MANDATORY, IE_DISPLAY, -1};
-static int ie_STATUS_ENQUIRY[] = {IE_DISPLAY, -1};
-static int ie_SUSPEND_ACKNOWLEDGE[] = {IE_DISPLAY, IE_FACILITY, -1};
-static int ie_SUSPEND_REJECT[] = {IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1};
-/* not used
- * static int ie_CONGESTION_CONTROL[] = {IE_CONGESTION | IE_MANDATORY,
- * IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1};
- * static int ie_USER_INFORMATION[] = {IE_MORE_DATA, IE_USER_USER | IE_MANDATORY, -1};
- * static int ie_RESTART[] = {IE_CHANNEL_ID, IE_DISPLAY, IE_RESTART_IND |
- * IE_MANDATORY, -1};
- */
-static int ie_FACILITY[] = {IE_FACILITY | IE_MANDATORY, IE_DISPLAY, -1};
-static int comp_required[] = {1, 2, 3, 5, 6, 7, 9, 10, 11, 14, 15, -1};
-static int l3_valid_states[] = {0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 15, 17, 19, 25, -1};
-
-struct ie_len {
- int ie;
- int len;
-};
-
-static
-struct ie_len max_ie_len[] = {
- {IE_SEGMENT, 4},
- {IE_BEARER, 12},
- {IE_CAUSE, 32},
- {IE_CALL_ID, 10},
- {IE_CALL_STATE, 3},
- {IE_CHANNEL_ID, 34},
- {IE_FACILITY, 255},
- {IE_PROGRESS, 4},
- {IE_NET_FAC, 255},
- {IE_NOTIFY, 3},
- {IE_DISPLAY, 82},
- {IE_DATE, 8},
- {IE_KEYPAD, 34},
- {IE_SIGNAL, 3},
- {IE_INFORATE, 6},
- {IE_E2E_TDELAY, 11},
- {IE_TDELAY_SEL, 5},
- {IE_PACK_BINPARA, 3},
- {IE_PACK_WINSIZE, 4},
- {IE_PACK_SIZE, 4},
- {IE_CUG, 7},
- {IE_REV_CHARGE, 3},
- {IE_CALLING_PN, 24},
- {IE_CALLING_SUB, 23},
- {IE_CALLED_PN, 24},
- {IE_CALLED_SUB, 23},
- {IE_REDIR_NR, 255},
- {IE_TRANS_SEL, 255},
- {IE_RESTART_IND, 3},
- {IE_LLC, 18},
- {IE_HLC, 5},
- {IE_USER_USER, 131},
- {-1, 0},
-};
-
-static int
-getmax_ie_len(u_char ie) {
- int i = 0;
- while (max_ie_len[i].ie != -1) {
- if (max_ie_len[i].ie == ie)
- return (max_ie_len[i].len);
- i++;
- }
- return (255);
-}
-
-static int
-ie_in_set(struct l3_process *pc, u_char ie, int *checklist) {
- int ret = 1;
-
- while (*checklist != -1) {
- if ((*checklist & 0xff) == ie) {
- if (ie & 0x80)
- return (-ret);
- else
- return (ret);
- }
- ret++;
- checklist++;
- }
- return (0);
-}
-
-static int
-check_infoelements(struct l3_process *pc, struct sk_buff *skb, int *checklist)
-{
- int *cl = checklist;
- u_char mt;
- u_char *p, ie;
- int l, newpos, oldpos;
- int err_seq = 0, err_len = 0, err_compr = 0, err_ureg = 0;
- u_char codeset = 0;
- u_char old_codeset = 0;
- u_char codelock = 1;
-
- p = skb->data;
- /* skip cr */
- p++;
- l = (*p++) & 0xf;
- p += l;
- mt = *p++;
- oldpos = 0;
- while ((p - skb->data) < skb->len) {
- if ((*p & 0xf0) == 0x90) { /* shift codeset */
- old_codeset = codeset;
- codeset = *p & 7;
- if (*p & 0x08)
- codelock = 0;
- else
- codelock = 1;
- if (pc->debug & L3_DEB_CHECK)
- l3_debug(pc->st, "check IE shift%scodeset %d->%d",
- codelock ? " locking " : " ", old_codeset, codeset);
- p++;
- continue;
- }
- if (!codeset) { /* only codeset 0 */
- if ((newpos = ie_in_set(pc, *p, cl))) {
- if (newpos > 0) {
- if (newpos < oldpos)
- err_seq++;
- else
- oldpos = newpos;
- }
- } else {
- if (ie_in_set(pc, *p, comp_required))
- err_compr++;
- else
- err_ureg++;
- }
- }
- ie = *p++;
- if (ie & 0x80) {
- l = 1;
- } else {
- l = *p++;
- p += l;
- l += 2;
- }
- if (!codeset && (l > getmax_ie_len(ie)))
- err_len++;
- if (!codelock) {
- if (pc->debug & L3_DEB_CHECK)
- l3_debug(pc->st, "check IE shift back codeset %d->%d",
- codeset, old_codeset);
- codeset = old_codeset;
- codelock = 1;
- }
- }
- if (err_compr | err_ureg | err_len | err_seq) {
- if (pc->debug & L3_DEB_CHECK)
- l3_debug(pc->st, "check IE MT(%x) %d/%d/%d/%d",
- mt, err_compr, err_ureg, err_len, err_seq);
- if (err_compr)
- return (ERR_IE_COMPREHENSION);
- if (err_ureg)
- return (ERR_IE_UNRECOGNIZED);
- if (err_len)
- return (ERR_IE_LENGTH);
- if (err_seq)
- return (ERR_IE_SEQUENCE);
- }
- return (0);
-}
-
-/* verify if a message type exists and contain no IE error */
-static int
-l3ni1_check_messagetype_validity(struct l3_process *pc, int mt, void *arg)
-{
- switch (mt) {
- case MT_ALERTING:
- case MT_CALL_PROCEEDING:
- case MT_CONNECT:
- case MT_CONNECT_ACKNOWLEDGE:
- case MT_DISCONNECT:
- case MT_INFORMATION:
- case MT_FACILITY:
- case MT_NOTIFY:
- case MT_PROGRESS:
- case MT_RELEASE:
- case MT_RELEASE_COMPLETE:
- case MT_SETUP:
- case MT_SETUP_ACKNOWLEDGE:
- case MT_RESUME_ACKNOWLEDGE:
- case MT_RESUME_REJECT:
- case MT_SUSPEND_ACKNOWLEDGE:
- case MT_SUSPEND_REJECT:
- case MT_USER_INFORMATION:
- case MT_RESTART:
- case MT_RESTART_ACKNOWLEDGE:
- case MT_CONGESTION_CONTROL:
- case MT_STATUS:
- case MT_STATUS_ENQUIRY:
- if (pc->debug & L3_DEB_CHECK)
- l3_debug(pc->st, "l3ni1_check_messagetype_validity mt(%x) OK", mt);
- break;
- case MT_RESUME: /* RESUME only in user->net */
- case MT_SUSPEND: /* SUSPEND only in user->net */
- default:
- if (pc->debug & (L3_DEB_CHECK | L3_DEB_WARN))
- l3_debug(pc->st, "l3ni1_check_messagetype_validity mt(%x) fail", mt);
- pc->para.cause = 97;
- l3ni1_status_send(pc, 0, NULL);
- return (1);
- }
- return (0);
-}
-
-static void
-l3ni1_std_ie_err(struct l3_process *pc, int ret) {
-
- if (pc->debug & L3_DEB_CHECK)
- l3_debug(pc->st, "check_infoelements ret %d", ret);
- switch (ret) {
- case 0:
- break;
- case ERR_IE_COMPREHENSION:
- pc->para.cause = 96;
- l3ni1_status_send(pc, 0, NULL);
- break;
- case ERR_IE_UNRECOGNIZED:
- pc->para.cause = 99;
- l3ni1_status_send(pc, 0, NULL);
- break;
- case ERR_IE_LENGTH:
- pc->para.cause = 100;
- l3ni1_status_send(pc, 0, NULL);
- break;
- case ERR_IE_SEQUENCE:
- default:
- break;
- }
-}
-
-static int
-l3ni1_get_channel_id(struct l3_process *pc, struct sk_buff *skb) {
- u_char *p;
-
- p = skb->data;
- if ((p = findie(p, skb->len, IE_CHANNEL_ID, 0))) {
- p++;
- if (*p != 1) { /* len for BRI = 1 */
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "wrong chid len %d", *p);
- return (-2);
- }
- p++;
- if (*p & 0x60) { /* only base rate interface */
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "wrong chid %x", *p);
- return (-3);
- }
- return (*p & 0x3);
- } else
- return (-1);
-}
-
-static int
-l3ni1_get_cause(struct l3_process *pc, struct sk_buff *skb) {
- u_char l, i = 0;
- u_char *p;
-
- p = skb->data;
- pc->para.cause = 31;
- pc->para.loc = 0;
- if ((p = findie(p, skb->len, IE_CAUSE, 0))) {
- p++;
- l = *p++;
- if (l > 30)
- return (1);
- if (l) {
- pc->para.loc = *p++;
- l--;
- } else {
- return (2);
- }
- if (l && !(pc->para.loc & 0x80)) {
- l--;
- p++; /* skip recommendation */
- }
- if (l) {
- pc->para.cause = *p++;
- l--;
- if (!(pc->para.cause & 0x80))
- return (3);
- } else
- return (4);
- while (l && (i < 6)) {
- pc->para.diag[i++] = *p++;
- l--;
- }
- } else
- return (-1);
- return (0);
-}
-
-static void
-l3ni1_msg_with_uus(struct l3_process *pc, u_char cmd)
-{
- struct sk_buff *skb;
- u_char tmp[16 + 40];
- u_char *p = tmp;
- int l;
-
- MsgHead(p, pc->callref, cmd);
-
- if (pc->prot.ni1.uus1_data[0])
- { *p++ = IE_USER_USER; /* UUS info element */
- *p++ = strlen(pc->prot.ni1.uus1_data) + 1;
- *p++ = 0x04; /* IA5 chars */
- strcpy(p, pc->prot.ni1.uus1_data);
- p += strlen(pc->prot.ni1.uus1_data);
- pc->prot.ni1.uus1_data[0] = '\0';
- }
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-} /* l3ni1_msg_with_uus */
-
-static void
-l3ni1_release_req(struct l3_process *pc, u_char pr, void *arg)
-{
- StopAllL3Timer(pc);
- newl3state(pc, 19);
- if (!pc->prot.ni1.uus1_data[0])
- l3ni1_message(pc, MT_RELEASE);
- else
- l3ni1_msg_with_uus(pc, MT_RELEASE);
- L3AddTimer(&pc->timer, T308, CC_T308_1);
-}
-
-static void
-l3ni1_release_cmpl(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- if ((ret = l3ni1_get_cause(pc, skb)) > 0) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "RELCMPL get_cause ret(%d)", ret);
- } else if (ret < 0)
- pc->para.cause = NO_CAUSE;
- StopAllL3Timer(pc);
- newl3state(pc, 0);
- pc->st->l3.l3l4(pc->st, CC_RELEASE | CONFIRM, pc);
- ni1_release_l3_process(pc);
-}
-
-#if EXT_BEARER_CAPS
-
-static u_char *
-EncodeASyncParams(u_char *p, u_char si2)
-{ // 7c 06 88 90 21 42 00 bb
-
- p[0] = 0;
- p[1] = 0x40; // Intermediate rate: 16 kbit/s jj 2000.02.19
- p[2] = 0x80;
- if (si2 & 32) // 7 data bits
-
- p[2] += 16;
- else // 8 data bits
-
- p[2] += 24;
-
- if (si2 & 16) // 2 stop bits
-
- p[2] += 96;
- else // 1 stop bit
-
- p[2] += 32;
-
- if (si2 & 8) // even parity
-
- p[2] += 2;
- else // no parity
-
- p[2] += 3;
-
- switch (si2 & 0x07) {
- case 0:
- p[0] = 66; // 1200 bit/s
-
- break;
- case 1:
- p[0] = 88; // 1200/75 bit/s
-
- break;
- case 2:
- p[0] = 87; // 75/1200 bit/s
-
- break;
- case 3:
- p[0] = 67; // 2400 bit/s
-
- break;
- case 4:
- p[0] = 69; // 4800 bit/s
-
- break;
- case 5:
- p[0] = 72; // 9600 bit/s
-
- break;
- case 6:
- p[0] = 73; // 14400 bit/s
-
- break;
- case 7:
- p[0] = 75; // 19200 bit/s
-
- break;
- }
- return p + 3;
-}
-
-static u_char
-EncodeSyncParams(u_char si2, u_char ai)
-{
-
- switch (si2) {
- case 0:
- return ai + 2; // 1200 bit/s
-
- case 1:
- return ai + 24; // 1200/75 bit/s
-
- case 2:
- return ai + 23; // 75/1200 bit/s
-
- case 3:
- return ai + 3; // 2400 bit/s
-
- case 4:
- return ai + 5; // 4800 bit/s
-
- case 5:
- return ai + 8; // 9600 bit/s
-
- case 6:
- return ai + 9; // 14400 bit/s
-
- case 7:
- return ai + 11; // 19200 bit/s
-
- case 8:
- return ai + 14; // 48000 bit/s
-
- case 9:
- return ai + 15; // 56000 bit/s
-
- case 15:
- return ai + 40; // negotiate bit/s
-
- default:
- break;
- }
- return ai;
-}
-
-
-static u_char
-DecodeASyncParams(u_char si2, u_char *p)
-{
- u_char info;
-
- switch (p[5]) {
- case 66: // 1200 bit/s
-
- break; // si2 don't change
-
- case 88: // 1200/75 bit/s
-
- si2 += 1;
- break;
- case 87: // 75/1200 bit/s
-
- si2 += 2;
- break;
- case 67: // 2400 bit/s
-
- si2 += 3;
- break;
- case 69: // 4800 bit/s
-
- si2 += 4;
- break;
- case 72: // 9600 bit/s
-
- si2 += 5;
- break;
- case 73: // 14400 bit/s
-
- si2 += 6;
- break;
- case 75: // 19200 bit/s
-
- si2 += 7;
- break;
- }
-
- info = p[7] & 0x7f;
- if ((info & 16) && (!(info & 8))) // 7 data bits
-
- si2 += 32; // else 8 data bits
-
- if ((info & 96) == 96) // 2 stop bits
-
- si2 += 16; // else 1 stop bit
-
- if ((info & 2) && (!(info & 1))) // even parity
-
- si2 += 8; // else no parity
-
- return si2;
-}
-
-
-static u_char
-DecodeSyncParams(u_char si2, u_char info)
-{
- info &= 0x7f;
- switch (info) {
- case 40: // bit/s negotiation failed ai := 165 not 175!
-
- return si2 + 15;
- case 15: // 56000 bit/s failed, ai := 0 not 169 !
-
- return si2 + 9;
- case 14: // 48000 bit/s
-
- return si2 + 8;
- case 11: // 19200 bit/s
-
- return si2 + 7;
- case 9: // 14400 bit/s
-
- return si2 + 6;
- case 8: // 9600 bit/s
-
- return si2 + 5;
- case 5: // 4800 bit/s
-
- return si2 + 4;
- case 3: // 2400 bit/s
-
- return si2 + 3;
- case 23: // 75/1200 bit/s
-
- return si2 + 2;
- case 24: // 1200/75 bit/s
-
- return si2 + 1;
- default: // 1200 bit/s
-
- return si2;
- }
-}
-
-static u_char
-DecodeSI2(struct sk_buff *skb)
-{
- u_char *p; //, *pend=skb->data + skb->len;
-
- if ((p = findie(skb->data, skb->len, 0x7c, 0))) {
- switch (p[4] & 0x0f) {
- case 0x01:
- if (p[1] == 0x04) // sync. Bitratenadaption
-
- return DecodeSyncParams(160, p[5]); // V.110/X.30
-
- else if (p[1] == 0x06) // async. Bitratenadaption
-
- return DecodeASyncParams(192, p); // V.110/X.30
-
- break;
- case 0x08: // if (p[5] == 0x02) // sync. Bitratenadaption
- if (p[1] > 3)
- return DecodeSyncParams(176, p[5]); // V.120
- break;
- }
- }
- return 0;
-}
-
-#endif
-
-
-static void
-l3ni1_setup_req(struct l3_process *pc, u_char pr,
- void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[128];
- u_char *p = tmp;
-
- u_char *teln;
- u_char *sub;
- u_char *sp;
- int l;
-
- MsgHead(p, pc->callref, MT_SETUP);
-
- teln = pc->para.setup.phone;
-
- *p++ = 0xa1; /* complete indicator */
- /*
- * Set Bearer Capability, Map info from 1TR6-convention to NI1
- */
- switch (pc->para.setup.si1) {
- case 1: /* Telephony */
- *p++ = IE_BEARER;
- *p++ = 0x3; /* Length */
- *p++ = 0x90; /* 3.1khz Audio */
- *p++ = 0x90; /* Circuit-Mode 64kbps */
- *p++ = 0xa2; /* u-Law Audio */
- break;
- case 5: /* Datatransmission 64k, BTX */
- case 7: /* Datatransmission 64k */
- default:
- *p++ = IE_BEARER;
- *p++ = 0x2; /* Length */
- *p++ = 0x88; /* Coding Std. CCITT, unrestr. dig. Inform. */
- *p++ = 0x90; /* Circuit-Mode 64kbps */
- break;
- }
-
- sub = NULL;
- sp = teln;
- while (*sp) {
- if ('.' == *sp) {
- sub = sp;
- *sp = 0;
- } else
- sp++;
- }
-
- *p++ = IE_KEYPAD;
- *p++ = strlen(teln);
- while (*teln)
- *p++ = (*teln++) & 0x7F;
-
- if (sub)
- *sub++ = '.';
-
-#if EXT_BEARER_CAPS
- if ((pc->para.setup.si2 >= 160) && (pc->para.setup.si2 <= 175)) { // sync. Bitratenadaption, V.110/X.30
-
- *p++ = IE_LLC;
- *p++ = 0x04;
- *p++ = 0x88;
- *p++ = 0x90;
- *p++ = 0x21;
- *p++ = EncodeSyncParams(pc->para.setup.si2 - 160, 0x80);
- } else if ((pc->para.setup.si2 >= 176) && (pc->para.setup.si2 <= 191)) { // sync. Bitratenadaption, V.120
-
- *p++ = IE_LLC;
- *p++ = 0x05;
- *p++ = 0x88;
- *p++ = 0x90;
- *p++ = 0x28;
- *p++ = EncodeSyncParams(pc->para.setup.si2 - 176, 0);
- *p++ = 0x82;
- } else if (pc->para.setup.si2 >= 192) { // async. Bitratenadaption, V.110/X.30
-
- *p++ = IE_LLC;
- *p++ = 0x06;
- *p++ = 0x88;
- *p++ = 0x90;
- *p++ = 0x21;
- p = EncodeASyncParams(p, pc->para.setup.si2 - 192);
- } else {
- switch (pc->para.setup.si1) {
- case 1: /* Telephony */
- *p++ = IE_LLC;
- *p++ = 0x3; /* Length */
- *p++ = 0x90; /* Coding Std. CCITT, 3.1 kHz audio */
- *p++ = 0x90; /* Circuit-Mode 64kbps */
- *p++ = 0xa2; /* u-Law Audio */
- break;
- case 5: /* Datatransmission 64k, BTX */
- case 7: /* Datatransmission 64k */
- default:
- *p++ = IE_LLC;
- *p++ = 0x2; /* Length */
- *p++ = 0x88; /* Coding Std. CCITT, unrestr. dig. Inform. */
- *p++ = 0x90; /* Circuit-Mode 64kbps */
- break;
- }
- }
-#endif
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- {
- return;
- }
- skb_put_data(skb, tmp, l);
- L3DelTimer(&pc->timer);
- L3AddTimer(&pc->timer, T303, CC_T303);
- newl3state(pc, 1);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3ni1_call_proc(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int id, ret;
-
- if ((id = l3ni1_get_channel_id(pc, skb)) >= 0) {
- if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup answer with wrong chid %x", id);
- pc->para.cause = 100;
- l3ni1_status_send(pc, pr, NULL);
- return;
- }
- pc->para.bchannel = id;
- } else if (1 == pc->state) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup answer wrong chid (ret %d)", id);
- if (id == -1)
- pc->para.cause = 96;
- else
- pc->para.cause = 100;
- l3ni1_status_send(pc, pr, NULL);
- return;
- }
- /* Now we are on none mandatory IEs */
- ret = check_infoelements(pc, skb, ie_CALL_PROCEEDING);
- if (ERR_IE_COMPREHENSION == ret) {
- l3ni1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer);
- newl3state(pc, 3);
- L3AddTimer(&pc->timer, T310, CC_T310);
- if (ret) /* STATUS for none mandatory IE errors after actions are taken */
- l3ni1_std_ie_err(pc, ret);
- pc->st->l3.l3l4(pc->st, CC_PROCEEDING | INDICATION, pc);
-}
-
-static void
-l3ni1_setup_ack(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int id, ret;
-
- if ((id = l3ni1_get_channel_id(pc, skb)) >= 0) {
- if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup answer with wrong chid %x", id);
- pc->para.cause = 100;
- l3ni1_status_send(pc, pr, NULL);
- return;
- }
- pc->para.bchannel = id;
- } else {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup answer wrong chid (ret %d)", id);
- if (id == -1)
- pc->para.cause = 96;
- else
- pc->para.cause = 100;
- l3ni1_status_send(pc, pr, NULL);
- return;
- }
- /* Now we are on none mandatory IEs */
- ret = check_infoelements(pc, skb, ie_SETUP_ACKNOWLEDGE);
- if (ERR_IE_COMPREHENSION == ret) {
- l3ni1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer);
- newl3state(pc, 2);
- L3AddTimer(&pc->timer, T304, CC_T304);
- if (ret) /* STATUS for none mandatory IE errors after actions are taken */
- l3ni1_std_ie_err(pc, ret);
- pc->st->l3.l3l4(pc->st, CC_MORE_INFO | INDICATION, pc);
-}
-
-static void
-l3ni1_disconnect(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- u_char *p;
- int ret;
- u_char cause = 0;
-
- StopAllL3Timer(pc);
- if ((ret = l3ni1_get_cause(pc, skb))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "DISC get_cause ret(%d)", ret);
- if (ret < 0)
- cause = 96;
- else if (ret > 0)
- cause = 100;
- }
- if ((p = findie(skb->data, skb->len, IE_FACILITY, 0)))
- l3ni1_parse_facility(pc->st, pc, pc->callref, p);
- ret = check_infoelements(pc, skb, ie_DISCONNECT);
- if (ERR_IE_COMPREHENSION == ret)
- cause = 96;
- else if ((!cause) && (ERR_IE_UNRECOGNIZED == ret))
- cause = 99;
- ret = pc->state;
- newl3state(pc, 12);
- if (cause)
- newl3state(pc, 19);
- if (11 != ret)
- pc->st->l3.l3l4(pc->st, CC_DISCONNECT | INDICATION, pc);
- else if (!cause)
- l3ni1_release_req(pc, pr, NULL);
- if (cause) {
- l3ni1_message_cause(pc, MT_RELEASE, cause);
- L3AddTimer(&pc->timer, T308, CC_T308_1);
- }
-}
-
-static void
-l3ni1_connect(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- ret = check_infoelements(pc, skb, ie_CONNECT);
- if (ERR_IE_COMPREHENSION == ret) {
- l3ni1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer); /* T310 */
- newl3state(pc, 10);
- pc->para.chargeinfo = 0;
- /* here should inserted COLP handling KKe */
- if (ret)
- l3ni1_std_ie_err(pc, ret);
- pc->st->l3.l3l4(pc->st, CC_SETUP | CONFIRM, pc);
-}
-
-static void
-l3ni1_alerting(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- ret = check_infoelements(pc, skb, ie_ALERTING);
- if (ERR_IE_COMPREHENSION == ret) {
- l3ni1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer); /* T304 */
- newl3state(pc, 4);
- if (ret)
- l3ni1_std_ie_err(pc, ret);
- pc->st->l3.l3l4(pc->st, CC_ALERTING | INDICATION, pc);
-}
-
-static void
-l3ni1_setup(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char *p;
- int bcfound = 0;
- char tmp[80];
- struct sk_buff *skb = arg;
- int id;
- int err = 0;
-
- /*
- * Bearer Capabilities
- */
- p = skb->data;
- /* only the first occurrence 'll be detected ! */
- if ((p = findie(p, skb->len, 0x04, 0))) {
- if ((p[1] < 2) || (p[1] > 11))
- err = 1;
- else {
- pc->para.setup.si2 = 0;
- switch (p[2] & 0x7f) {
- case 0x00: /* Speech */
- case 0x10: /* 3.1 Khz audio */
- pc->para.setup.si1 = 1;
- break;
- case 0x08: /* Unrestricted digital information */
- pc->para.setup.si1 = 7;
-/* JIM, 05.11.97 I wanna set service indicator 2 */
-#if EXT_BEARER_CAPS
- pc->para.setup.si2 = DecodeSI2(skb);
-#endif
- break;
- case 0x09: /* Restricted digital information */
- pc->para.setup.si1 = 2;
- break;
- case 0x11:
- /* Unrestr. digital information with
- * tones/announcements ( or 7 kHz audio
- */
- pc->para.setup.si1 = 3;
- break;
- case 0x18: /* Video */
- pc->para.setup.si1 = 4;
- break;
- default:
- err = 2;
- break;
- }
- switch (p[3] & 0x7f) {
- case 0x40: /* packed mode */
- pc->para.setup.si1 = 8;
- break;
- case 0x10: /* 64 kbit */
- case 0x11: /* 2*64 kbit */
- case 0x13: /* 384 kbit */
- case 0x15: /* 1536 kbit */
- case 0x17: /* 1920 kbit */
- pc->para.moderate = p[3] & 0x7f;
- break;
- default:
- err = 3;
- break;
- }
- }
- if (pc->debug & L3_DEB_SI)
- l3_debug(pc->st, "SI=%d, AI=%d",
- pc->para.setup.si1, pc->para.setup.si2);
- if (err) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup with wrong bearer(l=%d:%x,%x)",
- p[1], p[2], p[3]);
- pc->para.cause = 100;
- l3ni1_msg_without_setup(pc, pr, NULL);
- return;
- }
- } else {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup without bearer capabilities");
- /* ETS 300-104 1.3.3 */
- pc->para.cause = 96;
- l3ni1_msg_without_setup(pc, pr, NULL);
- return;
- }
- /*
- * Channel Identification
- */
- if ((id = l3ni1_get_channel_id(pc, skb)) >= 0) {
- if ((pc->para.bchannel = id)) {
- if ((3 == id) && (0x10 == pc->para.moderate)) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup with wrong chid %x",
- id);
- pc->para.cause = 100;
- l3ni1_msg_without_setup(pc, pr, NULL);
- return;
- }
- bcfound++;
- } else
- { if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup without bchannel, call waiting");
- bcfound++;
- }
- } else {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "setup with wrong chid ret %d", id);
- if (id == -1)
- pc->para.cause = 96;
- else
- pc->para.cause = 100;
- l3ni1_msg_without_setup(pc, pr, NULL);
- return;
- }
- /* Now we are on none mandatory IEs */
- err = check_infoelements(pc, skb, ie_SETUP);
- if (ERR_IE_COMPREHENSION == err) {
- pc->para.cause = 96;
- l3ni1_msg_without_setup(pc, pr, NULL);
- return;
- }
- p = skb->data;
- if ((p = findie(p, skb->len, 0x70, 0)))
- iecpy(pc->para.setup.eazmsn, p, 1);
- else
- pc->para.setup.eazmsn[0] = 0;
-
- p = skb->data;
- if ((p = findie(p, skb->len, 0x71, 0))) {
- /* Called party subaddress */
- if ((p[1] >= 2) && (p[2] == 0x80) && (p[3] == 0x50)) {
- tmp[0] = '.';
- iecpy(&tmp[1], p, 2);
- strcat(pc->para.setup.eazmsn, tmp);
- } else if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "wrong called subaddress");
- }
- p = skb->data;
- if ((p = findie(p, skb->len, 0x6c, 0))) {
- pc->para.setup.plan = p[2];
- if (p[2] & 0x80) {
- iecpy(pc->para.setup.phone, p, 1);
- pc->para.setup.screen = 0;
- } else {
- iecpy(pc->para.setup.phone, p, 2);
- pc->para.setup.screen = p[3];
- }
- } else {
- pc->para.setup.phone[0] = 0;
- pc->para.setup.plan = 0;
- pc->para.setup.screen = 0;
- }
- p = skb->data;
- if ((p = findie(p, skb->len, 0x6d, 0))) {
- /* Calling party subaddress */
- if ((p[1] >= 2) && (p[2] == 0x80) && (p[3] == 0x50)) {
- tmp[0] = '.';
- iecpy(&tmp[1], p, 2);
- strcat(pc->para.setup.phone, tmp);
- } else if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "wrong calling subaddress");
- }
- newl3state(pc, 6);
- if (err) /* STATUS for none mandatory IE errors after actions are taken */
- l3ni1_std_ie_err(pc, err);
- pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc);
-}
-
-static void
-l3ni1_reset(struct l3_process *pc, u_char pr, void *arg)
-{
- ni1_release_l3_process(pc);
-}
-
-static void
-l3ni1_disconnect_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[16 + 40];
- u_char *p = tmp;
- int l;
- u_char cause = 16;
-
- if (pc->para.cause != NO_CAUSE)
- cause = pc->para.cause;
-
- StopAllL3Timer(pc);
-
- MsgHead(p, pc->callref, MT_DISCONNECT);
-
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = cause | 0x80;
-
- if (pc->prot.ni1.uus1_data[0])
- { *p++ = IE_USER_USER; /* UUS info element */
- *p++ = strlen(pc->prot.ni1.uus1_data) + 1;
- *p++ = 0x04; /* IA5 chars */
- strcpy(p, pc->prot.ni1.uus1_data);
- p += strlen(pc->prot.ni1.uus1_data);
- pc->prot.ni1.uus1_data[0] = '\0';
- }
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- newl3state(pc, 11);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- L3AddTimer(&pc->timer, T305, CC_T305);
-}
-
-static void
-l3ni1_setup_rsp(struct l3_process *pc, u_char pr,
- void *arg)
-{
- if (!pc->para.bchannel)
- { if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "D-chan connect for waiting call");
- l3ni1_disconnect_req(pc, pr, arg);
- return;
- }
- newl3state(pc, 8);
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "D-chan connect for waiting call");
- l3ni1_message_plus_chid(pc, MT_CONNECT); /* GE 05/09/00 */
- L3DelTimer(&pc->timer);
- L3AddTimer(&pc->timer, T313, CC_T313);
-}
-
-static void
-l3ni1_connect_ack(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- ret = check_infoelements(pc, skb, ie_CONNECT_ACKNOWLEDGE);
- if (ERR_IE_COMPREHENSION == ret) {
- l3ni1_std_ie_err(pc, ret);
- return;
- }
- newl3state(pc, 10);
- L3DelTimer(&pc->timer);
- if (ret)
- l3ni1_std_ie_err(pc, ret);
- pc->st->l3.l3l4(pc->st, CC_SETUP_COMPL | INDICATION, pc);
-}
-
-static void
-l3ni1_reject_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- u_char cause = 21;
-
- if (pc->para.cause != NO_CAUSE)
- cause = pc->para.cause;
-
- MsgHead(p, pc->callref, MT_RELEASE_COMPLETE);
-
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = cause | 0x80;
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- newl3state(pc, 0);
- ni1_release_l3_process(pc);
-}
-
-static void
-l3ni1_release(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- u_char *p;
- int ret, cause = 0;
-
- StopAllL3Timer(pc);
- if ((ret = l3ni1_get_cause(pc, skb)) > 0) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "REL get_cause ret(%d)", ret);
- } else if (ret < 0)
- pc->para.cause = NO_CAUSE;
- if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) {
- l3ni1_parse_facility(pc->st, pc, pc->callref, p);
- }
- if ((ret < 0) && (pc->state != 11))
- cause = 96;
- else if (ret > 0)
- cause = 100;
- ret = check_infoelements(pc, skb, ie_RELEASE);
- if (ERR_IE_COMPREHENSION == ret)
- cause = 96;
- else if ((ERR_IE_UNRECOGNIZED == ret) && (!cause))
- cause = 99;
- if (cause)
- l3ni1_message_cause(pc, MT_RELEASE_COMPLETE, cause);
- else
- l3ni1_message(pc, MT_RELEASE_COMPLETE);
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- newl3state(pc, 0);
- ni1_release_l3_process(pc);
-}
-
-static void
-l3ni1_alert_req(struct l3_process *pc, u_char pr,
- void *arg)
-{
- newl3state(pc, 7);
- if (!pc->prot.ni1.uus1_data[0])
- l3ni1_message(pc, MT_ALERTING);
- else
- l3ni1_msg_with_uus(pc, MT_ALERTING);
-}
-
-static void
-l3ni1_proceed_req(struct l3_process *pc, u_char pr,
- void *arg)
-{
- newl3state(pc, 9);
- l3ni1_message(pc, MT_CALL_PROCEEDING);
- pc->st->l3.l3l4(pc->st, CC_PROCEED_SEND | INDICATION, pc);
-}
-
-static void
-l3ni1_setup_ack_req(struct l3_process *pc, u_char pr,
- void *arg)
-{
- newl3state(pc, 25);
- L3DelTimer(&pc->timer);
- L3AddTimer(&pc->timer, T302, CC_T302);
- l3ni1_message(pc, MT_SETUP_ACKNOWLEDGE);
-}
-
-/********************************************/
-/* deliver a incoming display message to HL */
-/********************************************/
-static void
-l3ni1_deliver_display(struct l3_process *pc, int pr, u_char *infp)
-{ u_char len;
- isdn_ctrl ic;
- struct IsdnCardState *cs;
- char *p;
-
- if (*infp++ != IE_DISPLAY) return;
- if ((len = *infp++) > 80) return; /* total length <= 82 */
- if (!pc->chan) return;
-
- p = ic.parm.display;
- while (len--)
- *p++ = *infp++;
- *p = '\0';
- ic.command = ISDN_STAT_DISPLAY;
- cs = pc->st->l1.hardware;
- ic.driver = cs->myid;
- ic.arg = pc->chan->chan;
- cs->iif.statcallb(&ic);
-} /* l3ni1_deliver_display */
-
-
-static void
-l3ni1_progress(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int err = 0;
- u_char *p;
-
- if ((p = findie(skb->data, skb->len, IE_PROGRESS, 0))) {
- if (p[1] != 2) {
- err = 1;
- pc->para.cause = 100;
- } else if (!(p[2] & 0x70)) {
- switch (p[2]) {
- case 0x80:
- case 0x81:
- case 0x82:
- case 0x84:
- case 0x85:
- case 0x87:
- case 0x8a:
- switch (p[3]) {
- case 0x81:
- case 0x82:
- case 0x83:
- case 0x84:
- case 0x88:
- break;
- default:
- err = 2;
- pc->para.cause = 100;
- break;
- }
- break;
- default:
- err = 3;
- pc->para.cause = 100;
- break;
- }
- }
- } else {
- pc->para.cause = 96;
- err = 4;
- }
- if (err) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "progress error %d", err);
- l3ni1_status_send(pc, pr, NULL);
- return;
- }
- /* Now we are on none mandatory IEs */
- err = check_infoelements(pc, skb, ie_PROGRESS);
- if (err)
- l3ni1_std_ie_err(pc, err);
- if (ERR_IE_COMPREHENSION != err)
- pc->st->l3.l3l4(pc->st, CC_PROGRESS | INDICATION, pc);
-}
-
-static void
-l3ni1_notify(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int err = 0;
- u_char *p;
-
- if ((p = findie(skb->data, skb->len, IE_NOTIFY, 0))) {
- if (p[1] != 1) {
- err = 1;
- pc->para.cause = 100;
- } else {
- switch (p[2]) {
- case 0x80:
- case 0x81:
- case 0x82:
- break;
- default:
- pc->para.cause = 100;
- err = 2;
- break;
- }
- }
- } else {
- pc->para.cause = 96;
- err = 3;
- }
- if (err) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "notify error %d", err);
- l3ni1_status_send(pc, pr, NULL);
- return;
- }
- /* Now we are on none mandatory IEs */
- err = check_infoelements(pc, skb, ie_NOTIFY);
- if (err)
- l3ni1_std_ie_err(pc, err);
- if (ERR_IE_COMPREHENSION != err)
- pc->st->l3.l3l4(pc->st, CC_NOTIFY | INDICATION, pc);
-}
-
-static void
-l3ni1_status_enq(struct l3_process *pc, u_char pr, void *arg)
-{
- int ret;
- struct sk_buff *skb = arg;
-
- ret = check_infoelements(pc, skb, ie_STATUS_ENQUIRY);
- l3ni1_std_ie_err(pc, ret);
- pc->para.cause = 30; /* response to STATUS_ENQUIRY */
- l3ni1_status_send(pc, pr, NULL);
-}
-
-static void
-l3ni1_information(struct l3_process *pc, u_char pr, void *arg)
-{
- int ret;
- struct sk_buff *skb = arg;
- u_char *p;
- char tmp[32];
-
- ret = check_infoelements(pc, skb, ie_INFORMATION);
- if (ret)
- l3ni1_std_ie_err(pc, ret);
- if (pc->state == 25) { /* overlap receiving */
- L3DelTimer(&pc->timer);
- p = skb->data;
- if ((p = findie(p, skb->len, 0x70, 0))) {
- iecpy(tmp, p, 1);
- strcat(pc->para.setup.eazmsn, tmp);
- pc->st->l3.l3l4(pc->st, CC_MORE_INFO | INDICATION, pc);
- }
- L3AddTimer(&pc->timer, T302, CC_T302);
- }
-}
-
-/******************************/
-/* handle deflection requests */
-/******************************/
-static void l3ni1_redir_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[128];
- u_char *p = tmp;
- u_char *subp;
- u_char len_phone = 0;
- u_char len_sub = 0;
- int l;
-
-
- strcpy(pc->prot.ni1.uus1_data, pc->chan->setup.eazmsn); /* copy uus element if available */
- if (!pc->chan->setup.phone[0])
- { pc->para.cause = -1;
- l3ni1_disconnect_req(pc, pr, arg); /* disconnect immediately */
- return;
- } /* only uus */
-
- if (pc->prot.ni1.invoke_id)
- free_invoke_id(pc->st, pc->prot.ni1.invoke_id);
-
- if (!(pc->prot.ni1.invoke_id = new_invoke_id(pc->st)))
- return;
-
- MsgHead(p, pc->callref, MT_FACILITY);
-
- for (subp = pc->chan->setup.phone; (*subp) && (*subp != '.'); subp++) len_phone++; /* len of phone number */
- if (*subp++ == '.') len_sub = strlen(subp) + 2; /* length including info subaddress element */
-
- *p++ = 0x1c; /* Facility info element */
- *p++ = len_phone + len_sub + 2 + 2 + 8 + 3 + 3; /* length of element */
- *p++ = 0x91; /* remote operations protocol */
- *p++ = 0xa1; /* invoke component */
-
- *p++ = len_phone + len_sub + 2 + 2 + 8 + 3; /* length of data */
- *p++ = 0x02; /* invoke id tag, integer */
- *p++ = 0x01; /* length */
- *p++ = pc->prot.ni1.invoke_id; /* invoke id */
- *p++ = 0x02; /* operation value tag, integer */
- *p++ = 0x01; /* length */
- *p++ = 0x0D; /* Call Deflect */
-
- *p++ = 0x30; /* sequence phone number */
- *p++ = len_phone + 2 + 2 + 3 + len_sub; /* length */
-
- *p++ = 0x30; /* Deflected to UserNumber */
- *p++ = len_phone + 2 + len_sub; /* length */
- *p++ = 0x80; /* NumberDigits */
- *p++ = len_phone; /* length */
- for (l = 0; l < len_phone; l++)
- *p++ = pc->chan->setup.phone[l];
-
- if (len_sub)
- { *p++ = 0x04; /* called party subaddress */
- *p++ = len_sub - 2;
- while (*subp) *p++ = *subp++;
- }
-
- *p++ = 0x01; /* screening identifier */
- *p++ = 0x01;
- *p++ = pc->chan->setup.screen;
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l))) return;
- skb_put_data(skb, tmp, l);
-
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-} /* l3ni1_redir_req */
-
-/********************************************/
-/* handle deflection request in early state */
-/********************************************/
-static void l3ni1_redir_req_early(struct l3_process *pc, u_char pr, void *arg)
-{
- l3ni1_proceed_req(pc, pr, arg);
- l3ni1_redir_req(pc, pr, arg);
-} /* l3ni1_redir_req_early */
-
-/***********************************************/
-/* handle special commands for this protocol. */
-/* Examples are call independent services like */
-/* remote operations with dummy callref. */
-/***********************************************/
-static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic)
-{ u_char id;
- u_char temp[265];
- u_char *p = temp;
- int i, l, proc_len;
- struct sk_buff *skb;
- struct l3_process *pc = NULL;
-
- switch (ic->arg)
- { case NI1_CMD_INVOKE:
- if (ic->parm.ni1_io.datalen < 0) return (-2); /* invalid parameter */
-
- for (proc_len = 1, i = ic->parm.ni1_io.proc >> 8; i; i++)
- i = i >> 8; /* add one byte */
- l = ic->parm.ni1_io.datalen + proc_len + 8; /* length excluding ie header */
- if (l > 255)
- return (-2); /* too long */
-
- if (!(id = new_invoke_id(st)))
- return (0); /* first get a invoke id -> return if no available */
-
- i = -1;
- MsgHead(p, i, MT_FACILITY); /* build message head */
- *p++ = 0x1C; /* Facility IE */
- *p++ = l; /* length of ie */
- *p++ = 0x91; /* remote operations */
- *p++ = 0xA1; /* invoke */
- *p++ = l - 3; /* length of invoke */
- *p++ = 0x02; /* invoke id tag */
- *p++ = 0x01; /* length is 1 */
- *p++ = id; /* invoke id */
- *p++ = 0x02; /* operation */
- *p++ = proc_len; /* length of operation */
-
- for (i = proc_len; i; i--)
- *p++ = (ic->parm.ni1_io.proc >> (i - 1)) & 0xFF;
- memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */
- l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */
-
- if (ic->parm.ni1_io.timeout > 0) {
- pc = ni1_new_l3_process(st, -1);
- if (!pc) {
- free_invoke_id(st, id);
- return (-2);
- }
- /* remember id */
- pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id;
- /* and procedure */
- pc->prot.ni1.proc = ic->parm.ni1_io.proc;
- }
-
- if (!(skb = l3_alloc_skb(l)))
- { free_invoke_id(st, id);
- if (pc) ni1_release_l3_process(pc);
- return (-2);
- }
- skb_put_data(skb, temp, l);
-
- if (pc)
- { pc->prot.ni1.invoke_id = id; /* remember id */
- L3AddTimer(&pc->timer, ic->parm.ni1_io.timeout, CC_TNI1_IO | REQUEST);
- }
-
- l3_msg(st, DL_DATA | REQUEST, skb);
- ic->parm.ni1_io.hl_id = id; /* return id */
- return (0);
-
- case NI1_CMD_INVOKE_ABORT:
- if ((pc = l3ni1_search_dummy_proc(st, ic->parm.ni1_io.hl_id)))
- { L3DelTimer(&pc->timer); /* remove timer */
- ni1_release_l3_process(pc);
- return (0);
- }
- else
- { l3_debug(st, "l3ni1_cmd_global abort unknown id");
- return (-2);
- }
- break;
-
- default:
- l3_debug(st, "l3ni1_cmd_global unknown cmd 0x%lx", ic->arg);
- return (-1);
- } /* switch ic-> arg */
- return (-1);
-} /* l3ni1_cmd_global */
-
-static void
-l3ni1_io_timer(struct l3_process *pc)
-{ isdn_ctrl ic;
- struct IsdnCardState *cs = pc->st->l1.hardware;
-
- L3DelTimer(&pc->timer); /* remove timer */
-
- ic.driver = cs->myid;
- ic.command = ISDN_STAT_PROT;
- ic.arg = NI1_STAT_INVOKE_ERR;
- ic.parm.ni1_io.hl_id = pc->prot.ni1.invoke_id;
- ic.parm.ni1_io.ll_id = pc->prot.ni1.ll_id;
- ic.parm.ni1_io.proc = pc->prot.ni1.proc;
- ic.parm.ni1_io.timeout = -1;
- ic.parm.ni1_io.datalen = 0;
- ic.parm.ni1_io.data = NULL;
- free_invoke_id(pc->st, pc->prot.ni1.invoke_id);
- pc->prot.ni1.invoke_id = 0; /* reset id */
-
- cs->iif.statcallb(&ic);
-
- ni1_release_l3_process(pc);
-} /* l3ni1_io_timer */
-
-static void
-l3ni1_release_ind(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char *p;
- struct sk_buff *skb = arg;
- int callState = 0;
- p = skb->data;
-
- if ((p = findie(p, skb->len, IE_CALL_STATE, 0))) {
- p++;
- if (1 == *p++)
- callState = *p;
- }
- if (callState == 0) {
- /* ETS 300-104 7.6.1, 8.6.1, 10.6.1... and 16.1
- * set down layer 3 without sending any message
- */
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- newl3state(pc, 0);
- ni1_release_l3_process(pc);
- } else {
- pc->st->l3.l3l4(pc->st, CC_IGNORE | INDICATION, pc);
- }
-}
-
-static void
-l3ni1_dummy(struct l3_process *pc, u_char pr, void *arg)
-{
-}
-
-static void
-l3ni1_t302(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.loc = 0;
- pc->para.cause = 28; /* invalid number */
- l3ni1_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc);
-}
-
-static void
-l3ni1_t303(struct l3_process *pc, u_char pr, void *arg)
-{
- if (pc->N303 > 0) {
- pc->N303--;
- L3DelTimer(&pc->timer);
- l3ni1_setup_req(pc, pr, arg);
- } else {
- L3DelTimer(&pc->timer);
- l3ni1_message_cause(pc, MT_RELEASE_COMPLETE, 102);
- pc->st->l3.l3l4(pc->st, CC_NOSETUP_RSP, pc);
- ni1_release_l3_process(pc);
- }
-}
-
-static void
-l3ni1_t304(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.loc = 0;
- pc->para.cause = 102;
- l3ni1_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc);
-
-}
-
-static void
-l3ni1_t305(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- struct sk_buff *skb;
- u_char cause = 16;
-
- L3DelTimer(&pc->timer);
- if (pc->para.cause != NO_CAUSE)
- cause = pc->para.cause;
-
- MsgHead(p, pc->callref, MT_RELEASE);
-
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = cause | 0x80;
-
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- newl3state(pc, 19);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- L3AddTimer(&pc->timer, T308, CC_T308_1);
-}
-
-static void
-l3ni1_t310(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.loc = 0;
- pc->para.cause = 102;
- l3ni1_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc);
-}
-
-static void
-l3ni1_t313(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.loc = 0;
- pc->para.cause = 102;
- l3ni1_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_CONNECT_ERR, pc);
-}
-
-static void
-l3ni1_t308_1(struct l3_process *pc, u_char pr, void *arg)
-{
- newl3state(pc, 19);
- L3DelTimer(&pc->timer);
- l3ni1_message(pc, MT_RELEASE);
- L3AddTimer(&pc->timer, T308, CC_T308_2);
-}
-
-static void
-l3ni1_t308_2(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_RELEASE_ERR, pc);
- ni1_release_l3_process(pc);
-}
-
-static void
-l3ni1_t318(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.cause = 102; /* Timer expiry */
- pc->para.loc = 0; /* local */
- pc->st->l3.l3l4(pc->st, CC_RESUME_ERR, pc);
- newl3state(pc, 19);
- l3ni1_message(pc, MT_RELEASE);
- L3AddTimer(&pc->timer, T308, CC_T308_1);
-}
-
-static void
-l3ni1_t319(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->para.cause = 102; /* Timer expiry */
- pc->para.loc = 0; /* local */
- pc->st->l3.l3l4(pc->st, CC_SUSPEND_ERR, pc);
- newl3state(pc, 10);
-}
-
-static void
-l3ni1_restart(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- ni1_release_l3_process(pc);
-}
-
-static void
-l3ni1_status(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char *p;
- struct sk_buff *skb = arg;
- int ret;
- u_char cause = 0, callState = 0;
-
- if ((ret = l3ni1_get_cause(pc, skb))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "STATUS get_cause ret(%d)", ret);
- if (ret < 0)
- cause = 96;
- else if (ret > 0)
- cause = 100;
- }
- if ((p = findie(skb->data, skb->len, IE_CALL_STATE, 0))) {
- p++;
- if (1 == *p++) {
- callState = *p;
- if (!ie_in_set(pc, *p, l3_valid_states))
- cause = 100;
- } else
- cause = 100;
- } else
- cause = 96;
- if (!cause) { /* no error before */
- ret = check_infoelements(pc, skb, ie_STATUS);
- if (ERR_IE_COMPREHENSION == ret)
- cause = 96;
- else if (ERR_IE_UNRECOGNIZED == ret)
- cause = 99;
- }
- if (cause) {
- u_char tmp;
-
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "STATUS error(%d/%d)", ret, cause);
- tmp = pc->para.cause;
- pc->para.cause = cause;
- l3ni1_status_send(pc, 0, NULL);
- if (cause == 99)
- pc->para.cause = tmp;
- else
- return;
- }
- cause = pc->para.cause;
- if (((cause & 0x7f) == 111) && (callState == 0)) {
- /* ETS 300-104 7.6.1, 8.6.1, 10.6.1...
- * if received MT_STATUS with cause == 111 and call
- * state == 0, then we must set down layer 3
- */
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- newl3state(pc, 0);
- ni1_release_l3_process(pc);
- }
-}
-
-static void
-l3ni1_facility(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- ret = check_infoelements(pc, skb, ie_FACILITY);
- l3ni1_std_ie_err(pc, ret);
- {
- u_char *p;
- if ((p = findie(skb->data, skb->len, IE_FACILITY, 0)))
- l3ni1_parse_facility(pc->st, pc, pc->callref, p);
- }
-}
-
-static void
-l3ni1_suspend_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[32];
- u_char *p = tmp;
- u_char i, l;
- u_char *msg = pc->chan->setup.phone;
-
- MsgHead(p, pc->callref, MT_SUSPEND);
- l = *msg++;
- if (l && (l <= 10)) { /* Max length 10 octets */
- *p++ = IE_CALL_ID;
- *p++ = l;
- for (i = 0; i < l; i++)
- *p++ = *msg++;
- } else if (l) {
- l3_debug(pc->st, "SUS wrong CALL_ID len %d", l);
- return;
- }
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- newl3state(pc, 15);
- L3AddTimer(&pc->timer, T319, CC_T319);
-}
-
-static void
-l3ni1_suspend_ack(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- L3DelTimer(&pc->timer);
- newl3state(pc, 0);
- pc->para.cause = NO_CAUSE;
- pc->st->l3.l3l4(pc->st, CC_SUSPEND | CONFIRM, pc);
- /* We don't handle suspend_ack for IE errors now */
- if ((ret = check_infoelements(pc, skb, ie_SUSPEND_ACKNOWLEDGE)))
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "SUSPACK check ie(%d)", ret);
- ni1_release_l3_process(pc);
-}
-
-static void
-l3ni1_suspend_rej(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- if ((ret = l3ni1_get_cause(pc, skb))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "SUSP_REJ get_cause ret(%d)", ret);
- if (ret < 0)
- pc->para.cause = 96;
- else
- pc->para.cause = 100;
- l3ni1_status_send(pc, pr, NULL);
- return;
- }
- ret = check_infoelements(pc, skb, ie_SUSPEND_REJECT);
- if (ERR_IE_COMPREHENSION == ret) {
- l3ni1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_SUSPEND_ERR, pc);
- newl3state(pc, 10);
- if (ret) /* STATUS for none mandatory IE errors after actions are taken */
- l3ni1_std_ie_err(pc, ret);
-}
-
-static void
-l3ni1_resume_req(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb;
- u_char tmp[32];
- u_char *p = tmp;
- u_char i, l;
- u_char *msg = pc->para.setup.phone;
-
- MsgHead(p, pc->callref, MT_RESUME);
-
- l = *msg++;
- if (l && (l <= 10)) { /* Max length 10 octets */
- *p++ = IE_CALL_ID;
- *p++ = l;
- for (i = 0; i < l; i++)
- *p++ = *msg++;
- } else if (l) {
- l3_debug(pc->st, "RES wrong CALL_ID len %d", l);
- return;
- }
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
- newl3state(pc, 17);
- L3AddTimer(&pc->timer, T318, CC_T318);
-}
-
-static void
-l3ni1_resume_ack(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int id, ret;
-
- if ((id = l3ni1_get_channel_id(pc, skb)) > 0) {
- if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "resume ack with wrong chid %x", id);
- pc->para.cause = 100;
- l3ni1_status_send(pc, pr, NULL);
- return;
- }
- pc->para.bchannel = id;
- } else if (1 == pc->state) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "resume ack without chid (ret %d)", id);
- pc->para.cause = 96;
- l3ni1_status_send(pc, pr, NULL);
- return;
- }
- ret = check_infoelements(pc, skb, ie_RESUME_ACKNOWLEDGE);
- if (ERR_IE_COMPREHENSION == ret) {
- l3ni1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_RESUME | CONFIRM, pc);
- newl3state(pc, 10);
- if (ret) /* STATUS for none mandatory IE errors after actions are taken */
- l3ni1_std_ie_err(pc, ret);
-}
-
-static void
-l3ni1_resume_rej(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int ret;
-
- if ((ret = l3ni1_get_cause(pc, skb))) {
- if (pc->debug & L3_DEB_WARN)
- l3_debug(pc->st, "RES_REJ get_cause ret(%d)", ret);
- if (ret < 0)
- pc->para.cause = 96;
- else
- pc->para.cause = 100;
- l3ni1_status_send(pc, pr, NULL);
- return;
- }
- ret = check_infoelements(pc, skb, ie_RESUME_REJECT);
- if (ERR_IE_COMPREHENSION == ret) {
- l3ni1_std_ie_err(pc, ret);
- return;
- }
- L3DelTimer(&pc->timer);
- pc->st->l3.l3l4(pc->st, CC_RESUME_ERR, pc);
- newl3state(pc, 0);
- if (ret) /* STATUS for none mandatory IE errors after actions are taken */
- l3ni1_std_ie_err(pc, ret);
- ni1_release_l3_process(pc);
-}
-
-static void
-l3ni1_global_restart(struct l3_process *pc, u_char pr, void *arg)
-{
- u_char tmp[32];
- u_char *p;
- u_char ri, ch = 0, chan = 0;
- int l;
- struct sk_buff *skb = arg;
- struct l3_process *up;
-
- newl3state(pc, 2);
- L3DelTimer(&pc->timer);
- p = skb->data;
- if ((p = findie(p, skb->len, IE_RESTART_IND, 0))) {
- ri = p[2];
- l3_debug(pc->st, "Restart %x", ri);
- } else {
- l3_debug(pc->st, "Restart without restart IE");
- ri = 0x86;
- }
- p = skb->data;
- if ((p = findie(p, skb->len, IE_CHANNEL_ID, 0))) {
- chan = p[2] & 3;
- ch = p[2];
- if (pc->st->l3.debug)
- l3_debug(pc->st, "Restart for channel %d", chan);
- }
- newl3state(pc, 2);
- up = pc->st->l3.proc;
- while (up) {
- if ((ri & 7) == 7)
- up->st->lli.l4l3(up->st, CC_RESTART | REQUEST, up);
- else if (up->para.bchannel == chan)
- up->st->lli.l4l3(up->st, CC_RESTART | REQUEST, up);
-
- up = up->next;
- }
- p = tmp;
- MsgHead(p, pc->callref, MT_RESTART_ACKNOWLEDGE);
- if (chan) {
- *p++ = IE_CHANNEL_ID;
- *p++ = 1;
- *p++ = ch | 0x80;
- }
- *p++ = 0x79; /* RESTART Ind */
- *p++ = 1;
- *p++ = ri;
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- newl3state(pc, 0);
- l3_msg(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void
-l3ni1_dl_reset(struct l3_process *pc, u_char pr, void *arg)
-{
- pc->para.cause = 0x29; /* Temporary failure */
- pc->para.loc = 0;
- l3ni1_disconnect_req(pc, pr, NULL);
- pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc);
-}
-
-static void
-l3ni1_dl_release(struct l3_process *pc, u_char pr, void *arg)
-{
- newl3state(pc, 0);
- pc->para.cause = 0x1b; /* Destination out of order */
- pc->para.loc = 0;
- pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc);
- release_l3_process(pc);
-}
-
-static void
-l3ni1_dl_reestablish(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
- L3AddTimer(&pc->timer, T309, CC_T309);
- l3_msg(pc->st, DL_ESTABLISH | REQUEST, NULL);
-}
-
-static void
-l3ni1_dl_reest_status(struct l3_process *pc, u_char pr, void *arg)
-{
- L3DelTimer(&pc->timer);
-
- pc->para.cause = 0x1F; /* normal, unspecified */
- l3ni1_status_send(pc, 0, NULL);
-}
-
-static void l3ni1_SendSpid(struct l3_process *pc, u_char pr, struct sk_buff *skb, int iNewState)
-{
- u_char *p;
- char *pSPID;
- struct Channel *pChan = pc->st->lli.userdata;
- int l;
-
- if (skb)
- dev_kfree_skb(skb);
-
- if (!(pSPID = strchr(pChan->setup.eazmsn, ':')))
- {
- printk(KERN_ERR "SPID not supplied in EAZMSN %s\n", pChan->setup.eazmsn);
- newl3state(pc, 0);
- pc->st->l3.l3l2(pc->st, DL_RELEASE | REQUEST, NULL);
- return;
- }
-
- l = strlen(++pSPID);
- if (!(skb = l3_alloc_skb(5 + l)))
- {
- printk(KERN_ERR "HiSax can't get memory to send SPID\n");
- return;
- }
-
- p = skb_put(skb, 5);
- *p++ = PROTO_DIS_EURO;
- *p++ = 0;
- *p++ = MT_INFORMATION;
- *p++ = IE_SPID;
- *p++ = l;
-
- skb_put_data(skb, pSPID, l);
-
- newl3state(pc, iNewState);
-
- L3DelTimer(&pc->timer);
- L3AddTimer(&pc->timer, TSPID, CC_TSPID);
-
- pc->st->l3.l3l2(pc->st, DL_DATA | REQUEST, skb);
-}
-
-static void l3ni1_spid_send(struct l3_process *pc, u_char pr, void *arg)
-{
- l3ni1_SendSpid(pc, pr, arg, 20);
-}
-
-static void l3ni1_spid_epid(struct l3_process *pc, u_char pr, void *arg)
-{
- struct sk_buff *skb = arg;
-
- if (skb->data[1] == 0)
- if (skb->data[3] == IE_ENDPOINT_ID)
- {
- L3DelTimer(&pc->timer);
- newl3state(pc, 0);
- l3_msg(pc->st, DL_ESTABLISH | CONFIRM, NULL);
- }
- dev_kfree_skb(skb);
-}
-
-static void l3ni1_spid_tout(struct l3_process *pc, u_char pr, void *arg)
-{
- if (pc->state < 22)
- l3ni1_SendSpid(pc, pr, arg, pc->state + 1);
- else
- {
- L3DelTimer(&pc->timer);
- dev_kfree_skb(arg);
-
- printk(KERN_ERR "SPID not accepted\n");
- newl3state(pc, 0);
- pc->st->l3.l3l2(pc->st, DL_RELEASE | REQUEST, NULL);
- }
-}
-
-/* *INDENT-OFF* */
-static struct stateentry downstatelist[] =
-{
- {SBIT(0),
- CC_SETUP | REQUEST, l3ni1_setup_req},
- {SBIT(0),
- CC_RESUME | REQUEST, l3ni1_resume_req},
- {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(6) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(25),
- CC_DISCONNECT | REQUEST, l3ni1_disconnect_req},
- {SBIT(12),
- CC_RELEASE | REQUEST, l3ni1_release_req},
- {ALL_STATES,
- CC_RESTART | REQUEST, l3ni1_restart},
- {SBIT(6) | SBIT(25),
- CC_IGNORE | REQUEST, l3ni1_reset},
- {SBIT(6) | SBIT(25),
- CC_REJECT | REQUEST, l3ni1_reject_req},
- {SBIT(6) | SBIT(25),
- CC_PROCEED_SEND | REQUEST, l3ni1_proceed_req},
- {SBIT(6),
- CC_MORE_INFO | REQUEST, l3ni1_setup_ack_req},
- {SBIT(25),
- CC_MORE_INFO | REQUEST, l3ni1_dummy},
- {SBIT(6) | SBIT(9) | SBIT(25),
- CC_ALERTING | REQUEST, l3ni1_alert_req},
- {SBIT(6) | SBIT(7) | SBIT(9) | SBIT(25),
- CC_SETUP | RESPONSE, l3ni1_setup_rsp},
- {SBIT(10),
- CC_SUSPEND | REQUEST, l3ni1_suspend_req},
- {SBIT(7) | SBIT(9) | SBIT(25),
- CC_REDIR | REQUEST, l3ni1_redir_req},
- {SBIT(6),
- CC_REDIR | REQUEST, l3ni1_redir_req_early},
- {SBIT(9) | SBIT(25),
- CC_DISCONNECT | REQUEST, l3ni1_disconnect_req},
- {SBIT(25),
- CC_T302, l3ni1_t302},
- {SBIT(1),
- CC_T303, l3ni1_t303},
- {SBIT(2),
- CC_T304, l3ni1_t304},
- {SBIT(3),
- CC_T310, l3ni1_t310},
- {SBIT(8),
- CC_T313, l3ni1_t313},
- {SBIT(11),
- CC_T305, l3ni1_t305},
- {SBIT(15),
- CC_T319, l3ni1_t319},
- {SBIT(17),
- CC_T318, l3ni1_t318},
- {SBIT(19),
- CC_T308_1, l3ni1_t308_1},
- {SBIT(19),
- CC_T308_2, l3ni1_t308_2},
- {SBIT(10),
- CC_T309, l3ni1_dl_release},
- { SBIT(20) | SBIT(21) | SBIT(22),
- CC_TSPID, l3ni1_spid_tout },
-};
-
-static struct stateentry datastatelist[] =
-{
- {ALL_STATES,
- MT_STATUS_ENQUIRY, l3ni1_status_enq},
- {ALL_STATES,
- MT_FACILITY, l3ni1_facility},
- {SBIT(19),
- MT_STATUS, l3ni1_release_ind},
- {ALL_STATES,
- MT_STATUS, l3ni1_status},
- {SBIT(0),
- MT_SETUP, l3ni1_setup},
- {SBIT(6) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) |
- SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25),
- MT_SETUP, l3ni1_dummy},
- {SBIT(1) | SBIT(2),
- MT_CALL_PROCEEDING, l3ni1_call_proc},
- {SBIT(1),
- MT_SETUP_ACKNOWLEDGE, l3ni1_setup_ack},
- {SBIT(2) | SBIT(3),
- MT_ALERTING, l3ni1_alerting},
- {SBIT(2) | SBIT(3),
- MT_PROGRESS, l3ni1_progress},
- {SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) |
- SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25),
- MT_INFORMATION, l3ni1_information},
- {SBIT(10) | SBIT(11) | SBIT(15),
- MT_NOTIFY, l3ni1_notify},
- {SBIT(0) | SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(10) |
- SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25),
- MT_RELEASE_COMPLETE, l3ni1_release_cmpl},
- {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(25),
- MT_RELEASE, l3ni1_release},
- {SBIT(19), MT_RELEASE, l3ni1_release_ind},
- {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(15) | SBIT(17) | SBIT(25),
- MT_DISCONNECT, l3ni1_disconnect},
- {SBIT(19),
- MT_DISCONNECT, l3ni1_dummy},
- {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4),
- MT_CONNECT, l3ni1_connect},
- {SBIT(8),
- MT_CONNECT_ACKNOWLEDGE, l3ni1_connect_ack},
- {SBIT(15),
- MT_SUSPEND_ACKNOWLEDGE, l3ni1_suspend_ack},
- {SBIT(15),
- MT_SUSPEND_REJECT, l3ni1_suspend_rej},
- {SBIT(17),
- MT_RESUME_ACKNOWLEDGE, l3ni1_resume_ack},
- {SBIT(17),
- MT_RESUME_REJECT, l3ni1_resume_rej},
-};
-
-static struct stateentry globalmes_list[] =
-{
- {ALL_STATES,
- MT_STATUS, l3ni1_status},
- {SBIT(0),
- MT_RESTART, l3ni1_global_restart},
-/* {SBIT(1),
- MT_RESTART_ACKNOWLEDGE, l3ni1_restart_ack},
-*/
- { SBIT(0), MT_DL_ESTABLISHED, l3ni1_spid_send },
- { SBIT(20) | SBIT(21) | SBIT(22), MT_INFORMATION, l3ni1_spid_epid },
-};
-
-static struct stateentry manstatelist[] =
-{
- {SBIT(2),
- DL_ESTABLISH | INDICATION, l3ni1_dl_reset},
- {SBIT(10),
- DL_ESTABLISH | CONFIRM, l3ni1_dl_reest_status},
- {SBIT(10),
- DL_RELEASE | INDICATION, l3ni1_dl_reestablish},
- {ALL_STATES,
- DL_RELEASE | INDICATION, l3ni1_dl_release},
-};
-
-/* *INDENT-ON* */
-
-
-static void
-global_handler(struct PStack *st, int mt, struct sk_buff *skb)
-{
- u_char tmp[16];
- u_char *p = tmp;
- int l;
- int i;
- struct l3_process *proc = st->l3.global;
-
- if (skb)
- proc->callref = skb->data[2]; /* cr flag */
- else
- proc->callref = 0;
- for (i = 0; i < ARRAY_SIZE(globalmes_list); i++)
- if ((mt == globalmes_list[i].primitive) &&
- ((1 << proc->state) & globalmes_list[i].state))
- break;
- if (i == ARRAY_SIZE(globalmes_list)) {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "ni1 global state %d mt %x unhandled",
- proc->state, mt);
- }
- MsgHead(p, proc->callref, MT_STATUS);
- *p++ = IE_CAUSE;
- *p++ = 0x2;
- *p++ = 0x80;
- *p++ = 81 | 0x80; /* invalid cr */
- *p++ = 0x14; /* CallState */
- *p++ = 0x1;
- *p++ = proc->state & 0x3f;
- l = p - tmp;
- if (!(skb = l3_alloc_skb(l)))
- return;
- skb_put_data(skb, tmp, l);
- l3_msg(proc->st, DL_DATA | REQUEST, skb);
- } else {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "ni1 global %d mt %x",
- proc->state, mt);
- }
- globalmes_list[i].rout(proc, mt, skb);
- }
-}
-
-static void
-ni1up(struct PStack *st, int pr, void *arg)
-{
- int i, mt, cr, callState;
- char *ptr;
- u_char *p;
- struct sk_buff *skb = arg;
- struct l3_process *proc;
-
- switch (pr) {
- case (DL_DATA | INDICATION):
- case (DL_UNIT_DATA | INDICATION):
- break;
- case (DL_ESTABLISH | INDICATION):
- case (DL_RELEASE | INDICATION):
- case (DL_RELEASE | CONFIRM):
- l3_msg(st, pr, arg);
- return;
- break;
-
- case (DL_ESTABLISH | CONFIRM):
- global_handler(st, MT_DL_ESTABLISHED, NULL);
- return;
-
- default:
- printk(KERN_ERR "HiSax ni1up unknown pr=%04x\n", pr);
- return;
- }
- if (skb->len < 3) {
- l3_debug(st, "ni1up frame too short(%d)", skb->len);
- dev_kfree_skb(skb);
- return;
- }
-
- if (skb->data[0] != PROTO_DIS_EURO) {
- if (st->l3.debug & L3_DEB_PROTERR) {
- l3_debug(st, "ni1up%sunexpected discriminator %x message len %d",
- (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
- skb->data[0], skb->len);
- }
- dev_kfree_skb(skb);
- return;
- }
- cr = getcallref(skb->data);
- if (skb->len < ((skb->data[1] & 0x0f) + 3)) {
- l3_debug(st, "ni1up frame too short(%d)", skb->len);
- dev_kfree_skb(skb);
- return;
- }
- mt = skb->data[skb->data[1] + 2];
- if (st->l3.debug & L3_DEB_STATE)
- l3_debug(st, "ni1up cr %d", cr);
- if (cr == -2) { /* wrong Callref */
- if (st->l3.debug & L3_DEB_WARN)
- l3_debug(st, "ni1up wrong Callref");
- dev_kfree_skb(skb);
- return;
- } else if (cr == -1) { /* Dummy Callref */
- if (mt == MT_FACILITY)
- {
- if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) {
- l3ni1_parse_facility(st, NULL,
- (pr == (DL_DATA | INDICATION)) ? -1 : -2, p);
- dev_kfree_skb(skb);
- return;
- }
- }
- else
- {
- global_handler(st, mt, skb);
- return;
- }
-
- if (st->l3.debug & L3_DEB_WARN)
- l3_debug(st, "ni1up dummy Callref (no facility msg or ie)");
- dev_kfree_skb(skb);
- return;
- } else if ((((skb->data[1] & 0x0f) == 1) && (0 == (cr & 0x7f))) ||
- (((skb->data[1] & 0x0f) == 2) && (0 == (cr & 0x7fff)))) { /* Global CallRef */
- if (st->l3.debug & L3_DEB_STATE)
- l3_debug(st, "ni1up Global CallRef");
- global_handler(st, mt, skb);
- dev_kfree_skb(skb);
- return;
- } else if (!(proc = getl3proc(st, cr))) {
- /* No transaction process exist, that means no call with
- * this callreference is active
- */
- if (mt == MT_SETUP) {
- /* Setup creates a new transaction process */
- if (skb->data[2] & 0x80) {
- /* Setup with wrong CREF flag */
- if (st->l3.debug & L3_DEB_STATE)
- l3_debug(st, "ni1up wrong CRef flag");
- dev_kfree_skb(skb);
- return;
- }
- if (!(proc = ni1_new_l3_process(st, cr))) {
- /* May be to answer with RELEASE_COMPLETE and
- * CAUSE 0x2f "Resource unavailable", but this
- * need a new_l3_process too ... arghh
- */
- dev_kfree_skb(skb);
- return;
- }
- } else if (mt == MT_STATUS) {
- if ((ptr = findie(skb->data, skb->len, IE_CAUSE, 0)) != NULL) {
- ptr++;
- if (*ptr++ == 2)
- ptr++;
- }
- callState = 0;
- if ((ptr = findie(skb->data, skb->len, IE_CALL_STATE, 0)) != NULL) {
- ptr++;
- if (*ptr++ == 2)
- ptr++;
- callState = *ptr;
- }
- /* ETS 300-104 part 2.4.1
- * if setup has not been made and a message type
- * MT_STATUS is received with call state == 0,
- * we must send nothing
- */
- if (callState != 0) {
- /* ETS 300-104 part 2.4.2
- * if setup has not been made and a message type
- * MT_STATUS is received with call state != 0,
- * we must send MT_RELEASE_COMPLETE cause 101
- */
- if ((proc = ni1_new_l3_process(st, cr))) {
- proc->para.cause = 101;
- l3ni1_msg_without_setup(proc, 0, NULL);
- }
- }
- dev_kfree_skb(skb);
- return;
- } else if (mt == MT_RELEASE_COMPLETE) {
- dev_kfree_skb(skb);
- return;
- } else {
- /* ETS 300-104 part 2
- * if setup has not been made and a message type
- * (except MT_SETUP and RELEASE_COMPLETE) is received,
- * we must send MT_RELEASE_COMPLETE cause 81 */
- dev_kfree_skb(skb);
- if ((proc = ni1_new_l3_process(st, cr))) {
- proc->para.cause = 81;
- l3ni1_msg_without_setup(proc, 0, NULL);
- }
- return;
- }
- }
- if (l3ni1_check_messagetype_validity(proc, mt, skb)) {
- dev_kfree_skb(skb);
- return;
- }
- if ((p = findie(skb->data, skb->len, IE_DISPLAY, 0)) != NULL)
- l3ni1_deliver_display(proc, pr, p); /* Display IE included */
- for (i = 0; i < ARRAY_SIZE(datastatelist); i++)
- if ((mt == datastatelist[i].primitive) &&
- ((1 << proc->state) & datastatelist[i].state))
- break;
- if (i == ARRAY_SIZE(datastatelist)) {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "ni1up%sstate %d mt %#x unhandled",
- (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
- proc->state, mt);
- }
- if ((MT_RELEASE_COMPLETE != mt) && (MT_RELEASE != mt)) {
- proc->para.cause = 101;
- l3ni1_status_send(proc, pr, skb);
- }
- } else {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "ni1up%sstate %d mt %x",
- (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
- proc->state, mt);
- }
- datastatelist[i].rout(proc, pr, skb);
- }
- dev_kfree_skb(skb);
- return;
-}
-
-static void
-ni1down(struct PStack *st, int pr, void *arg)
-{
- int i, cr;
- struct l3_process *proc;
- struct Channel *chan;
-
- if ((DL_ESTABLISH | REQUEST) == pr) {
- l3_msg(st, pr, NULL);
- return;
- } else if (((CC_SETUP | REQUEST) == pr) || ((CC_RESUME | REQUEST) == pr)) {
- chan = arg;
- cr = newcallref();
- cr |= 0x80;
- if ((proc = ni1_new_l3_process(st, cr))) {
- proc->chan = chan;
- chan->proc = proc;
- memcpy(&proc->para.setup, &chan->setup, sizeof(setup_parm));
- proc->callref = cr;
- }
- } else {
- proc = arg;
- }
- if (!proc) {
- printk(KERN_ERR "HiSax ni1down without proc pr=%04x\n", pr);
- return;
- }
-
- if (pr == (CC_TNI1_IO | REQUEST)) {
- l3ni1_io_timer(proc); /* timer expires */
- return;
- }
-
- for (i = 0; i < ARRAY_SIZE(downstatelist); i++)
- if ((pr == downstatelist[i].primitive) &&
- ((1 << proc->state) & downstatelist[i].state))
- break;
- if (i == ARRAY_SIZE(downstatelist)) {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "ni1down state %d prim %#x unhandled",
- proc->state, pr);
- }
- } else {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "ni1down state %d prim %#x",
- proc->state, pr);
- }
- downstatelist[i].rout(proc, pr, arg);
- }
-}
-
-static void
-ni1man(struct PStack *st, int pr, void *arg)
-{
- int i;
- struct l3_process *proc = arg;
-
- if (!proc) {
- printk(KERN_ERR "HiSax ni1man without proc pr=%04x\n", pr);
- return;
- }
- for (i = 0; i < ARRAY_SIZE(manstatelist); i++)
- if ((pr == manstatelist[i].primitive) &&
- ((1 << proc->state) & manstatelist[i].state))
- break;
- if (i == ARRAY_SIZE(manstatelist)) {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "cr %d ni1man state %d prim %#x unhandled",
- proc->callref & 0x7f, proc->state, pr);
- }
- } else {
- if (st->l3.debug & L3_DEB_STATE) {
- l3_debug(st, "cr %d ni1man state %d prim %#x",
- proc->callref & 0x7f, proc->state, pr);
- }
- manstatelist[i].rout(proc, pr, arg);
- }
-}
-
-void
-setstack_ni1(struct PStack *st)
-{
- char tmp[64];
- int i;
-
- st->lli.l4l3 = ni1down;
- st->lli.l4l3_proto = l3ni1_cmd_global;
- st->l2.l2l3 = ni1up;
- st->l3.l3ml3 = ni1man;
- st->l3.N303 = 1;
- st->prot.ni1.last_invoke_id = 0;
- st->prot.ni1.invoke_used[0] = 1; /* Bit 0 must always be set to 1 */
- i = 1;
- while (i < 32)
- st->prot.ni1.invoke_used[i++] = 0;
-
- if (!(st->l3.global = kmalloc(sizeof(struct l3_process), GFP_ATOMIC))) {
- printk(KERN_ERR "HiSax can't get memory for ni1 global CR\n");
- } else {
- st->l3.global->state = 0;
- st->l3.global->callref = 0;
- st->l3.global->next = NULL;
- st->l3.global->debug = L3_DEB_WARN;
- st->l3.global->st = st;
- st->l3.global->N303 = 1;
- st->l3.global->prot.ni1.invoke_id = 0;
-
- L3InitTimer(st->l3.global, &st->l3.global->timer);
- }
- strcpy(tmp, ni1_revision);
- printk(KERN_INFO "HiSax: National ISDN-1 Rev. %s\n", HiSax_getrev(tmp));
-}
diff --git a/drivers/isdn/hisax/l3ni1.h b/drivers/isdn/hisax/l3ni1.h
deleted file mode 100644
index 99d37d2cea4f..000000000000
--- a/drivers/isdn/hisax/l3ni1.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* $Id: l3ni1.h,v 2.3.6.2 2001/09/23 22:24:50 kai Exp $
- *
- * NI1 D-channel protocol
- *
- * Author Matt Henderson & Guy Ellis
- * Copyright by Traverse Technologies Pty Ltd, www.travers.com.au
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * 2000.6.6 Initial implementation of routines for US NI1
- * Layer 3 protocol based on the EURO/DSS1 D-channel protocol
- * driver written by Karsten Keil et al. Thanks also for the
- * code provided by Ragnar Paulson.
- *
- */
-
-#ifndef l3ni1_process
-
-#define T302 15000
-#define T303 4000
-#define T304 30000
-#define T305 30000
-#define T308 4000
-/* for layer 1 certification T309 < layer1 T3 (e.g. 4000) */
-/* This makes some tests easier and quicker */
-#define T309 40000
-#define T310 30000
-#define T313 4000
-#define T318 4000
-#define T319 4000
-#define TSPID 5000 /* was 2000 - Guy Ellis */
-
-/*
- * Message-Types
- */
-
-#define MT_ALERTING 0x01
-#define MT_CALL_PROCEEDING 0x02
-#define MT_CONNECT 0x07
-#define MT_CONNECT_ACKNOWLEDGE 0x0f
-#define MT_PROGRESS 0x03
-#define MT_SETUP 0x05
-#define MT_SETUP_ACKNOWLEDGE 0x0d
-#define MT_RESUME 0x26
-#define MT_RESUME_ACKNOWLEDGE 0x2e
-#define MT_RESUME_REJECT 0x22
-#define MT_SUSPEND 0x25
-#define MT_SUSPEND_ACKNOWLEDGE 0x2d
-#define MT_SUSPEND_REJECT 0x21
-#define MT_USER_INFORMATION 0x20
-#define MT_DISCONNECT 0x45
-#define MT_RELEASE 0x4d
-#define MT_RELEASE_COMPLETE 0x5a
-#define MT_RESTART 0x46
-#define MT_RESTART_ACKNOWLEDGE 0x4e
-#define MT_SEGMENT 0x60
-#define MT_CONGESTION_CONTROL 0x79
-#define MT_INFORMATION 0x7b
-#define MT_FACILITY 0x62
-#define MT_NOTIFY 0x6e
-#define MT_STATUS 0x7d
-#define MT_STATUS_ENQUIRY 0x75
-#define MT_DL_ESTABLISHED 0xfe
-
-#define IE_SEGMENT 0x00
-#define IE_BEARER 0x04
-#define IE_CAUSE 0x08
-#define IE_CALL_ID 0x10
-#define IE_CALL_STATE 0x14
-#define IE_CHANNEL_ID 0x18
-#define IE_FACILITY 0x1c
-#define IE_PROGRESS 0x1e
-#define IE_NET_FAC 0x20
-#define IE_NOTIFY 0x27
-#define IE_DISPLAY 0x28
-#define IE_DATE 0x29
-#define IE_KEYPAD 0x2c
-#define IE_SIGNAL 0x34
-#define IE_SPID 0x3a
-#define IE_ENDPOINT_ID 0x3b
-#define IE_INFORATE 0x40
-#define IE_E2E_TDELAY 0x42
-#define IE_TDELAY_SEL 0x43
-#define IE_PACK_BINPARA 0x44
-#define IE_PACK_WINSIZE 0x45
-#define IE_PACK_SIZE 0x46
-#define IE_CUG 0x47
-#define IE_REV_CHARGE 0x4a
-#define IE_CONNECT_PN 0x4c
-#define IE_CONNECT_SUB 0x4d
-#define IE_CALLING_PN 0x6c
-#define IE_CALLING_SUB 0x6d
-#define IE_CALLED_PN 0x70
-#define IE_CALLED_SUB 0x71
-#define IE_REDIR_NR 0x74
-#define IE_TRANS_SEL 0x78
-#define IE_RESTART_IND 0x79
-#define IE_LLC 0x7c
-#define IE_HLC 0x7d
-#define IE_USER_USER 0x7e
-#define IE_ESCAPE 0x7f
-#define IE_SHIFT 0x90
-#define IE_MORE_DATA 0xa0
-#define IE_COMPLETE 0xa1
-#define IE_CONGESTION 0xb0
-#define IE_REPEAT 0xd0
-
-#define IE_MANDATORY 0x0100
-/* mandatory not in every case */
-#define IE_MANDATORY_1 0x0200
-
-#define ERR_IE_COMPREHENSION 1
-#define ERR_IE_UNRECOGNIZED -1
-#define ERR_IE_LENGTH -2
-#define ERR_IE_SEQUENCE -3
-
-#else /* only l3ni1_process */
-
-/* l3ni1 specific data in l3 process */
-typedef struct
-{ unsigned char invoke_id; /* used invoke id in remote ops, 0 = not active */
- ulong ll_id; /* remebered ll id */
- u8 remote_operation; /* handled remote operation, 0 = not active */
- int proc; /* rememered procedure */
- ulong remote_result; /* result of remote operation for statcallb */
- char uus1_data[35]; /* data send during alerting or disconnect */
-} ni1_proc_priv;
-
-/* l3dni1 specific data in protocol stack */
-typedef struct
-{ unsigned char last_invoke_id; /* last used value for invoking */
- unsigned char invoke_used[32]; /* 256 bits for 256 values */
-} ni1_stk_priv;
-
-#endif /* only l3dni1_process */
diff --git a/drivers/isdn/hisax/lmgr.c b/drivers/isdn/hisax/lmgr.c
deleted file mode 100644
index 5b63eb6601aa..000000000000
--- a/drivers/isdn/hisax/lmgr.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/* $Id: lmgr.c,v 1.7.6.2 2001/09/23 22:24:50 kai Exp $
- *
- * Layermanagement module
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include "hisax.h"
-
-static void
-error_handling_dchan(struct PStack *st, int Error)
-{
- switch (Error) {
- case 'C':
- case 'D':
- case 'G':
- case 'H':
- st->l2.l2tei(st, MDL_ERROR | REQUEST, NULL);
- break;
- }
-}
-
-static void
-hisax_manager(struct PStack *st, int pr, void *arg)
-{
- long Code;
-
- switch (pr) {
- case (MDL_ERROR | INDICATION):
- Code = (long) arg;
- HiSax_putstatus(st->l1.hardware, "manager: MDL_ERROR",
- " %c %s", (char)Code,
- test_bit(FLG_LAPD, &st->l2.flag) ?
- "D-channel" : "B-channel");
- if (test_bit(FLG_LAPD, &st->l2.flag))
- error_handling_dchan(st, Code);
- break;
- }
-}
-
-void
-setstack_manager(struct PStack *st)
-{
- st->ma.layer = hisax_manager;
-}
diff --git a/drivers/isdn/hisax/mic.c b/drivers/isdn/hisax/mic.c
deleted file mode 100644
index 93398676f78f..000000000000
--- a/drivers/isdn/hisax/mic.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/* $Id: mic.c,v 1.12.2.4 2004/01/13 23:48:39 keil Exp $
- *
- * low level stuff for mic cards
- *
- * Author Stephan von Krawczynski
- * Copyright by Stephan von Krawczynski <skraw@ithnet.com>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-
-static const char *mic_revision = "$Revision: 1.12.2.4 $";
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-#define MIC_ISAC 2
-#define MIC_HSCX 1
-#define MIC_ADR 7
-
-/* CARD_ADR (Write) */
-#define MIC_RESET 0x3 /* same as DOS driver */
-
-static inline u_char
-readreg(unsigned int ale, unsigned int adr, u_char off)
-{
- register u_char ret;
-
- byteout(ale, off);
- ret = bytein(adr);
- return (ret);
-}
-
-static inline void
-readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- insb(adr, data, size);
-}
-
-
-static inline void
-writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
-{
- byteout(ale, off);
- byteout(adr, data);
-}
-
-static inline void
-writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- outsb(adr, data, size);
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.mic.adr, cs->hw.mic.isac, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.mic.adr, cs->hw.mic.isac, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.mic.adr, cs->hw.mic.isac, 0, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.mic.adr, cs->hw.mic.isac, 0, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readreg(cs->hw.mic.adr,
- cs->hw.mic.hscx, offset + (hscx ? 0x40 : 0)));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writereg(cs->hw.mic.adr,
- cs->hw.mic.hscx, offset + (hscx ? 0x40 : 0), value);
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.mic.adr, \
- cs->hw.mic.hscx, reg + (nr ? 0x40 : 0))
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.mic.adr, \
- cs->hw.mic.hscx, reg + (nr ? 0x40 : 0), data)
-
-#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.mic.adr, \
- cs->hw.mic.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.mic.adr, \
- cs->hw.mic.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-mic_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = readreg(cs->hw.mic.adr, cs->hw.mic.hscx, HSCX_ISTA + 0x40);
-Start_HSCX:
- if (val)
- hscx_int_main(cs, val);
- val = readreg(cs->hw.mic.adr, cs->hw.mic.isac, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- val = readreg(cs->hw.mic.adr, cs->hw.mic.hscx, HSCX_ISTA + 0x40);
- if (val) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX IntStat after IntRoutine");
- goto Start_HSCX;
- }
- val = readreg(cs->hw.mic.adr, cs->hw.mic.isac, ISAC_ISTA);
- if (val) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- writereg(cs->hw.mic.adr, cs->hw.mic.hscx, HSCX_MASK, 0xFF);
- writereg(cs->hw.mic.adr, cs->hw.mic.hscx, HSCX_MASK + 0x40, 0xFF);
- writereg(cs->hw.mic.adr, cs->hw.mic.isac, ISAC_MASK, 0xFF);
- writereg(cs->hw.mic.adr, cs->hw.mic.isac, ISAC_MASK, 0x0);
- writereg(cs->hw.mic.adr, cs->hw.mic.hscx, HSCX_MASK, 0x0);
- writereg(cs->hw.mic.adr, cs->hw.mic.hscx, HSCX_MASK + 0x40, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_mic(struct IsdnCardState *cs)
-{
- int bytecnt = 8;
-
- if (cs->hw.mic.cfg_reg)
- release_region(cs->hw.mic.cfg_reg, bytecnt);
-}
-
-static int
-mic_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- return (0);
- case CARD_RELEASE:
- release_io_mic(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- inithscx(cs); /* /RTSA := ISAC RST */
- inithscxisac(cs, 3);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-int setup_mic(struct IsdnCard *card)
-{
- int bytecnt;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, mic_revision);
- printk(KERN_INFO "HiSax: mic driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_MIC)
- return (0);
-
- bytecnt = 8;
- cs->hw.mic.cfg_reg = card->para[1];
- cs->irq = card->para[0];
- cs->hw.mic.adr = cs->hw.mic.cfg_reg + MIC_ADR;
- cs->hw.mic.isac = cs->hw.mic.cfg_reg + MIC_ISAC;
- cs->hw.mic.hscx = cs->hw.mic.cfg_reg + MIC_HSCX;
-
- if (!request_region(cs->hw.mic.cfg_reg, bytecnt, "mic isdn")) {
- printk(KERN_WARNING
- "HiSax: ith mic config port %x-%x already in use\n",
- cs->hw.mic.cfg_reg,
- cs->hw.mic.cfg_reg + bytecnt);
- return (0);
- }
- printk(KERN_INFO "mic: defined at 0x%x IRQ %d\n",
- cs->hw.mic.cfg_reg, cs->irq);
- setup_isac(cs);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &mic_card_msg;
- cs->irq_func = &mic_interrupt;
- ISACVersion(cs, "mic:");
- if (HscxVersion(cs, "mic:")) {
- printk(KERN_WARNING
- "mic: wrong HSCX versions check IO address\n");
- release_io_mic(cs);
- return (0);
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
deleted file mode 100644
index d7b011c8d692..000000000000
--- a/drivers/isdn/hisax/netjet.c
+++ /dev/null
@@ -1,985 +0,0 @@
-/* $Id: netjet.c,v 1.29.2.4 2004/02/11 13:21:34 keil Exp $
- *
- * low level stuff for Traverse Technologie NETJet ISDN cards
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to Traverse Technologies Australia for documents and information
- *
- * 16-Apr-2002 - led code added - Guy Ellis (guy@traverse.com.au)
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-#include <linux/interrupt.h>
-#include <linux/ppp_defs.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include "netjet.h"
-
-/* Interface functions */
-
-u_char
-NETjet_ReadIC(struct IsdnCardState *cs, u_char offset)
-{
- u_char ret;
-
- cs->hw.njet.auxd &= 0xfc;
- cs->hw.njet.auxd |= (offset >> 4) & 3;
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
- ret = bytein(cs->hw.njet.isac + ((offset & 0xf) << 2));
- return (ret);
-}
-
-void
-NETjet_WriteIC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- cs->hw.njet.auxd &= 0xfc;
- cs->hw.njet.auxd |= (offset >> 4) & 3;
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
- byteout(cs->hw.njet.isac + ((offset & 0xf) << 2), value);
-}
-
-void
-NETjet_ReadICfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- cs->hw.njet.auxd &= 0xfc;
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
- insb(cs->hw.njet.isac, data, size);
-}
-
-void
-NETjet_WriteICfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- cs->hw.njet.auxd &= 0xfc;
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
- outsb(cs->hw.njet.isac, data, size);
-}
-
-static void fill_mem(struct BCState *bcs, u_int *pos, u_int cnt, int chan, u_char fill)
-{
- u_int mask = 0x000000ff, val = 0, *p = pos;
- u_int i;
-
- val |= fill;
- if (chan) {
- val <<= 8;
- mask <<= 8;
- }
- mask ^= 0xffffffff;
- for (i = 0; i < cnt; i++) {
- *p &= mask;
- *p++ |= val;
- if (p > bcs->hw.tiger.s_end)
- p = bcs->hw.tiger.send;
- }
-}
-
-static void
-mode_tiger(struct BCState *bcs, int mode, int bc)
-{
- struct IsdnCardState *cs = bcs->cs;
- u_char led;
-
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "Tiger mode %d bchan %d/%d",
- mode, bc, bcs->channel);
- bcs->mode = mode;
- bcs->channel = bc;
- switch (mode) {
- case (L1_MODE_NULL):
- fill_mem(bcs, bcs->hw.tiger.send,
- NETJET_DMA_TXSIZE, bc, 0xff);
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "Tiger stat rec %d/%d send %d",
- bcs->hw.tiger.r_tot, bcs->hw.tiger.r_err,
- bcs->hw.tiger.s_tot);
- if ((cs->bcs[0].mode == L1_MODE_NULL) &&
- (cs->bcs[1].mode == L1_MODE_NULL)) {
- cs->hw.njet.dmactrl = 0;
- byteout(cs->hw.njet.base + NETJET_DMACTRL,
- cs->hw.njet.dmactrl);
- byteout(cs->hw.njet.base + NETJET_IRQMASK0, 0);
- }
- if (cs->typ == ISDN_CTYPE_NETJET_S)
- {
- // led off
- led = bc & 0x01;
- led = 0x01 << (6 + led); // convert to mask
- led = ~led;
- cs->hw.njet.auxd &= led;
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
- }
- break;
- case (L1_MODE_TRANS):
- break;
- case (L1_MODE_HDLC_56K):
- case (L1_MODE_HDLC):
- fill_mem(bcs, bcs->hw.tiger.send,
- NETJET_DMA_TXSIZE, bc, 0xff);
- bcs->hw.tiger.r_state = HDLC_ZERO_SEARCH;
- bcs->hw.tiger.r_tot = 0;
- bcs->hw.tiger.r_bitcnt = 0;
- bcs->hw.tiger.r_one = 0;
- bcs->hw.tiger.r_err = 0;
- bcs->hw.tiger.s_tot = 0;
- if (!cs->hw.njet.dmactrl) {
- fill_mem(bcs, bcs->hw.tiger.send,
- NETJET_DMA_TXSIZE, !bc, 0xff);
- cs->hw.njet.dmactrl = 1;
- byteout(cs->hw.njet.base + NETJET_DMACTRL,
- cs->hw.njet.dmactrl);
- byteout(cs->hw.njet.base + NETJET_IRQMASK0, 0x0f);
- /* was 0x3f now 0x0f for TJ300 and TJ320 GE 13/07/00 */
- }
- bcs->hw.tiger.sendp = bcs->hw.tiger.send;
- bcs->hw.tiger.free = NETJET_DMA_TXSIZE;
- test_and_set_bit(BC_FLG_EMPTY, &bcs->Flag);
- if (cs->typ == ISDN_CTYPE_NETJET_S)
- {
- // led on
- led = bc & 0x01;
- led = 0x01 << (6 + led); // convert to mask
- cs->hw.njet.auxd |= led;
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
- }
- break;
- }
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "tiger: set %x %x %x %x/%x pulse=%d",
- bytein(cs->hw.njet.base + NETJET_DMACTRL),
- bytein(cs->hw.njet.base + NETJET_IRQMASK0),
- bytein(cs->hw.njet.base + NETJET_IRQSTAT0),
- inl(cs->hw.njet.base + NETJET_DMA_READ_ADR),
- inl(cs->hw.njet.base + NETJET_DMA_WRITE_ADR),
- bytein(cs->hw.njet.base + NETJET_PULSE_CNT));
-}
-
-static void printframe(struct IsdnCardState *cs, u_char *buf, int count, char *s) {
- char tmp[128];
- char *t = tmp;
- int i = count, j;
- u_char *p = buf;
-
- t += sprintf(t, "tiger %s(%4d)", s, count);
- while (i > 0) {
- if (i > 16)
- j = 16;
- else
- j = i;
- QuickHex(t, p, j);
- debugl1(cs, "%s", tmp);
- p += j;
- i -= j;
- t = tmp;
- t += sprintf(t, "tiger %s ", s);
- }
-}
-
-// macro for 64k
-
-#define MAKE_RAW_BYTE for (j = 0; j < 8; j++) { \
- bitcnt++; \
- s_val >>= 1; \
- if (val & 1) { \
- s_one++; \
- s_val |= 0x80; \
- } else { \
- s_one = 0; \
- s_val &= 0x7f; \
- } \
- if (bitcnt == 8) { \
- bcs->hw.tiger.sendbuf[s_cnt++] = s_val; \
- bitcnt = 0; \
- } \
- if (s_one == 5) { \
- s_val >>= 1; \
- s_val &= 0x7f; \
- bitcnt++; \
- s_one = 0; \
- } \
- if (bitcnt == 8) { \
- bcs->hw.tiger.sendbuf[s_cnt++] = s_val; \
- bitcnt = 0; \
- } \
- val >>= 1; \
- }
-
-static int make_raw_data(struct BCState *bcs) {
-// this make_raw is for 64k
- register u_int i, s_cnt = 0;
- register u_char j;
- register u_char val;
- register u_char s_one = 0;
- register u_char s_val = 0;
- register u_char bitcnt = 0;
- u_int fcs;
-
- if (!bcs->tx_skb) {
- debugl1(bcs->cs, "tiger make_raw: NULL skb");
- return (1);
- }
- bcs->hw.tiger.sendbuf[s_cnt++] = HDLC_FLAG_VALUE;
- fcs = PPP_INITFCS;
- for (i = 0; i < bcs->tx_skb->len; i++) {
- val = bcs->tx_skb->data[i];
- fcs = PPP_FCS(fcs, val);
- MAKE_RAW_BYTE;
- }
- fcs ^= 0xffff;
- val = fcs & 0xff;
- MAKE_RAW_BYTE;
- val = (fcs >> 8) & 0xff;
- MAKE_RAW_BYTE;
- val = HDLC_FLAG_VALUE;
- for (j = 0; j < 8; j++) {
- bitcnt++;
- s_val >>= 1;
- if (val & 1)
- s_val |= 0x80;
- else
- s_val &= 0x7f;
- if (bitcnt == 8) {
- bcs->hw.tiger.sendbuf[s_cnt++] = s_val;
- bitcnt = 0;
- }
- val >>= 1;
- }
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger make_raw: in %u out %d.%d",
- bcs->tx_skb->len, s_cnt, bitcnt);
- if (bitcnt) {
- while (8 > bitcnt++) {
- s_val >>= 1;
- s_val |= 0x80;
- }
- bcs->hw.tiger.sendbuf[s_cnt++] = s_val;
- bcs->hw.tiger.sendbuf[s_cnt++] = 0xff; // NJ<->NJ thoughput bug fix
- }
- bcs->hw.tiger.sendcnt = s_cnt;
- bcs->tx_cnt -= bcs->tx_skb->len;
- bcs->hw.tiger.sp = bcs->hw.tiger.sendbuf;
- return (0);
-}
-
-// macro for 56k
-
-#define MAKE_RAW_BYTE_56K for (j = 0; j < 8; j++) { \
- bitcnt++; \
- s_val >>= 1; \
- if (val & 1) { \
- s_one++; \
- s_val |= 0x80; \
- } else { \
- s_one = 0; \
- s_val &= 0x7f; \
- } \
- if (bitcnt == 7) { \
- s_val >>= 1; \
- s_val |= 0x80; \
- bcs->hw.tiger.sendbuf[s_cnt++] = s_val; \
- bitcnt = 0; \
- } \
- if (s_one == 5) { \
- s_val >>= 1; \
- s_val &= 0x7f; \
- bitcnt++; \
- s_one = 0; \
- } \
- if (bitcnt == 7) { \
- s_val >>= 1; \
- s_val |= 0x80; \
- bcs->hw.tiger.sendbuf[s_cnt++] = s_val; \
- bitcnt = 0; \
- } \
- val >>= 1; \
- }
-
-static int make_raw_data_56k(struct BCState *bcs) {
-// this make_raw is for 56k
- register u_int i, s_cnt = 0;
- register u_char j;
- register u_char val;
- register u_char s_one = 0;
- register u_char s_val = 0;
- register u_char bitcnt = 0;
- u_int fcs;
-
- if (!bcs->tx_skb) {
- debugl1(bcs->cs, "tiger make_raw_56k: NULL skb");
- return (1);
- }
- val = HDLC_FLAG_VALUE;
- for (j = 0; j < 8; j++) {
- bitcnt++;
- s_val >>= 1;
- if (val & 1)
- s_val |= 0x80;
- else
- s_val &= 0x7f;
- if (bitcnt == 7) {
- s_val >>= 1;
- s_val |= 0x80;
- bcs->hw.tiger.sendbuf[s_cnt++] = s_val;
- bitcnt = 0;
- }
- val >>= 1;
- }
- fcs = PPP_INITFCS;
- for (i = 0; i < bcs->tx_skb->len; i++) {
- val = bcs->tx_skb->data[i];
- fcs = PPP_FCS(fcs, val);
- MAKE_RAW_BYTE_56K;
- }
- fcs ^= 0xffff;
- val = fcs & 0xff;
- MAKE_RAW_BYTE_56K;
- val = (fcs >> 8) & 0xff;
- MAKE_RAW_BYTE_56K;
- val = HDLC_FLAG_VALUE;
- for (j = 0; j < 8; j++) {
- bitcnt++;
- s_val >>= 1;
- if (val & 1)
- s_val |= 0x80;
- else
- s_val &= 0x7f;
- if (bitcnt == 7) {
- s_val >>= 1;
- s_val |= 0x80;
- bcs->hw.tiger.sendbuf[s_cnt++] = s_val;
- bitcnt = 0;
- }
- val >>= 1;
- }
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger make_raw_56k: in %u out %d.%d",
- bcs->tx_skb->len, s_cnt, bitcnt);
- if (bitcnt) {
- while (8 > bitcnt++) {
- s_val >>= 1;
- s_val |= 0x80;
- }
- bcs->hw.tiger.sendbuf[s_cnt++] = s_val;
- bcs->hw.tiger.sendbuf[s_cnt++] = 0xff; // NJ<->NJ thoughput bug fix
- }
- bcs->hw.tiger.sendcnt = s_cnt;
- bcs->tx_cnt -= bcs->tx_skb->len;
- bcs->hw.tiger.sp = bcs->hw.tiger.sendbuf;
- return (0);
-}
-
-static void got_frame(struct BCState *bcs, int count) {
- struct sk_buff *skb;
-
- if (!(skb = dev_alloc_skb(count)))
- printk(KERN_WARNING "TIGER: receive out of memory\n");
- else {
- skb_put_data(skb, bcs->hw.tiger.rcvbuf, count);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- test_and_set_bit(B_RCVBUFREADY, &bcs->event);
- schedule_work(&bcs->tqueue);
-
- if (bcs->cs->debug & L1_DEB_RECEIVE_FRAME)
- printframe(bcs->cs, bcs->hw.tiger.rcvbuf, count, "rec");
-}
-
-
-
-static void read_raw(struct BCState *bcs, u_int *buf, int cnt) {
- int i;
- register u_char j;
- register u_char val;
- u_int *pend = bcs->hw.tiger.rec + NETJET_DMA_RXSIZE - 1;
- register u_char state = bcs->hw.tiger.r_state;
- register u_char r_one = bcs->hw.tiger.r_one;
- register u_char r_val = bcs->hw.tiger.r_val;
- register u_int bitcnt = bcs->hw.tiger.r_bitcnt;
- u_int *p = buf;
- int bits;
- u_char mask;
-
- if (bcs->mode == L1_MODE_HDLC) { // it's 64k
- mask = 0xff;
- bits = 8;
- }
- else { // it's 56K
- mask = 0x7f;
- bits = 7;
- }
- for (i = 0; i < cnt; i++) {
- val = bcs->channel ? ((*p >> 8) & 0xff) : (*p & 0xff);
- p++;
- if (p > pend)
- p = bcs->hw.tiger.rec;
- if ((val & mask) == mask) {
- state = HDLC_ZERO_SEARCH;
- bcs->hw.tiger.r_tot++;
- bitcnt = 0;
- r_one = 0;
- continue;
- }
- for (j = 0; j < bits; j++) {
- if (state == HDLC_ZERO_SEARCH) {
- if (val & 1) {
- r_one++;
- } else {
- r_one = 0;
- state = HDLC_FLAG_SEARCH;
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger read_raw: zBit(%d,%d,%d) %x",
- bcs->hw.tiger.r_tot, i, j, val);
- }
- } else if (state == HDLC_FLAG_SEARCH) {
- if (val & 1) {
- r_one++;
- if (r_one > 6) {
- state = HDLC_ZERO_SEARCH;
- }
- } else {
- if (r_one == 6) {
- bitcnt = 0;
- r_val = 0;
- state = HDLC_FLAG_FOUND;
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger read_raw: flag(%d,%d,%d) %x",
- bcs->hw.tiger.r_tot, i, j, val);
- }
- r_one = 0;
- }
- } else if (state == HDLC_FLAG_FOUND) {
- if (val & 1) {
- r_one++;
- if (r_one > 6) {
- state = HDLC_ZERO_SEARCH;
- } else {
- r_val >>= 1;
- r_val |= 0x80;
- bitcnt++;
- }
- } else {
- if (r_one == 6) {
- bitcnt = 0;
- r_val = 0;
- r_one = 0;
- val >>= 1;
- continue;
- } else if (r_one != 5) {
- r_val >>= 1;
- r_val &= 0x7f;
- bitcnt++;
- }
- r_one = 0;
- }
- if ((state != HDLC_ZERO_SEARCH) &&
- !(bitcnt & 7)) {
- state = HDLC_FRAME_FOUND;
- bcs->hw.tiger.r_fcs = PPP_INITFCS;
- bcs->hw.tiger.rcvbuf[0] = r_val;
- bcs->hw.tiger.r_fcs = PPP_FCS(bcs->hw.tiger.r_fcs, r_val);
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger read_raw: byte1(%d,%d,%d) rval %x val %x i %x",
- bcs->hw.tiger.r_tot, i, j, r_val, val,
- bcs->cs->hw.njet.irqstat0);
- }
- } else if (state == HDLC_FRAME_FOUND) {
- if (val & 1) {
- r_one++;
- if (r_one > 6) {
- state = HDLC_ZERO_SEARCH;
- bitcnt = 0;
- } else {
- r_val >>= 1;
- r_val |= 0x80;
- bitcnt++;
- }
- } else {
- if (r_one == 6) {
- r_val = 0;
- r_one = 0;
- bitcnt++;
- if (bitcnt & 7) {
- debugl1(bcs->cs, "tiger: frame not byte aligned");
- state = HDLC_FLAG_SEARCH;
- bcs->hw.tiger.r_err++;
-#ifdef ERROR_STATISTIC
- bcs->err_inv++;
-#endif
- } else {
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger frame end(%d,%d): fcs(%x) i %x",
- i, j, bcs->hw.tiger.r_fcs, bcs->cs->hw.njet.irqstat0);
- if (bcs->hw.tiger.r_fcs == PPP_GOODFCS) {
- got_frame(bcs, (bitcnt >> 3) - 3);
- } else {
- if (bcs->cs->debug) {
- debugl1(bcs->cs, "tiger FCS error");
- printframe(bcs->cs, bcs->hw.tiger.rcvbuf,
- (bitcnt >> 3) - 1, "rec");
- bcs->hw.tiger.r_err++;
- }
-#ifdef ERROR_STATISTIC
- bcs->err_crc++;
-#endif
- }
- state = HDLC_FLAG_FOUND;
- }
- bitcnt = 0;
- } else if (r_one == 5) {
- val >>= 1;
- r_one = 0;
- continue;
- } else {
- r_val >>= 1;
- r_val &= 0x7f;
- bitcnt++;
- }
- r_one = 0;
- }
- if ((state == HDLC_FRAME_FOUND) &&
- !(bitcnt & 7)) {
- if ((bitcnt >> 3) >= HSCX_BUFMAX) {
- debugl1(bcs->cs, "tiger: frame too big");
- r_val = 0;
- state = HDLC_FLAG_SEARCH;
- bcs->hw.tiger.r_err++;
-#ifdef ERROR_STATISTIC
- bcs->err_inv++;
-#endif
- } else {
- bcs->hw.tiger.rcvbuf[(bitcnt >> 3) - 1] = r_val;
- bcs->hw.tiger.r_fcs =
- PPP_FCS(bcs->hw.tiger.r_fcs, r_val);
- }
- }
- }
- val >>= 1;
- }
- bcs->hw.tiger.r_tot++;
- }
- bcs->hw.tiger.r_state = state;
- bcs->hw.tiger.r_one = r_one;
- bcs->hw.tiger.r_val = r_val;
- bcs->hw.tiger.r_bitcnt = bitcnt;
-}
-
-void read_tiger(struct IsdnCardState *cs) {
- u_int *p;
- int cnt = NETJET_DMA_RXSIZE / 2;
-
- if ((cs->hw.njet.irqstat0 & cs->hw.njet.last_is0) & NETJET_IRQM0_READ) {
- debugl1(cs, "tiger warn read double dma %x/%x",
- cs->hw.njet.irqstat0, cs->hw.njet.last_is0);
-#ifdef ERROR_STATISTIC
- if (cs->bcs[0].mode)
- cs->bcs[0].err_rdo++;
- if (cs->bcs[1].mode)
- cs->bcs[1].err_rdo++;
-#endif
- return;
- } else {
- cs->hw.njet.last_is0 &= ~NETJET_IRQM0_READ;
- cs->hw.njet.last_is0 |= (cs->hw.njet.irqstat0 & NETJET_IRQM0_READ);
- }
- if (cs->hw.njet.irqstat0 & NETJET_IRQM0_READ_1)
- p = cs->bcs[0].hw.tiger.rec + NETJET_DMA_RXSIZE - 1;
- else
- p = cs->bcs[0].hw.tiger.rec + cnt - 1;
- if ((cs->bcs[0].mode == L1_MODE_HDLC) || (cs->bcs[0].mode == L1_MODE_HDLC_56K))
- read_raw(cs->bcs, p, cnt);
-
- if ((cs->bcs[1].mode == L1_MODE_HDLC) || (cs->bcs[1].mode == L1_MODE_HDLC_56K))
- read_raw(cs->bcs + 1, p, cnt);
- cs->hw.njet.irqstat0 &= ~NETJET_IRQM0_READ;
-}
-
-static void write_raw(struct BCState *bcs, u_int *buf, int cnt);
-
-void netjet_fill_dma(struct BCState *bcs)
-{
- register u_int *p, *sp;
- register int cnt;
-
- if (!bcs->tx_skb)
- return;
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger fill_dma1: c%d %4lx", bcs->channel,
- bcs->Flag);
- if (test_and_set_bit(BC_FLG_BUSY, &bcs->Flag))
- return;
- if (bcs->mode == L1_MODE_HDLC) { // it's 64k
- if (make_raw_data(bcs))
- return;
- }
- else { // it's 56k
- if (make_raw_data_56k(bcs))
- return;
- }
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger fill_dma2: c%d %4lx", bcs->channel,
- bcs->Flag);
- if (test_and_clear_bit(BC_FLG_NOFRAME, &bcs->Flag)) {
- write_raw(bcs, bcs->hw.tiger.sendp, bcs->hw.tiger.free);
- } else if (test_and_clear_bit(BC_FLG_HALF, &bcs->Flag)) {
- p = bus_to_virt(inl(bcs->cs->hw.njet.base + NETJET_DMA_READ_ADR));
- sp = bcs->hw.tiger.sendp;
- if (p == bcs->hw.tiger.s_end)
- p = bcs->hw.tiger.send - 1;
- if (sp == bcs->hw.tiger.s_end)
- sp = bcs->hw.tiger.send - 1;
- cnt = p - sp;
- if (cnt < 0) {
- write_raw(bcs, bcs->hw.tiger.sendp, bcs->hw.tiger.free);
- } else {
- p++;
- cnt++;
- if (p > bcs->hw.tiger.s_end)
- p = bcs->hw.tiger.send;
- p++;
- cnt++;
- if (p > bcs->hw.tiger.s_end)
- p = bcs->hw.tiger.send;
- write_raw(bcs, p, bcs->hw.tiger.free - cnt);
- }
- } else if (test_and_clear_bit(BC_FLG_EMPTY, &bcs->Flag)) {
- p = bus_to_virt(inl(bcs->cs->hw.njet.base + NETJET_DMA_READ_ADR));
- cnt = bcs->hw.tiger.s_end - p;
- if (cnt < 2) {
- p = bcs->hw.tiger.send + 1;
- cnt = NETJET_DMA_TXSIZE / 2 - 2;
- } else {
- p++;
- p++;
- if (cnt <= (NETJET_DMA_TXSIZE / 2))
- cnt += NETJET_DMA_TXSIZE / 2;
- cnt--;
- cnt--;
- }
- write_raw(bcs, p, cnt);
- }
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger fill_dma3: c%d %4lx", bcs->channel,
- bcs->Flag);
-}
-
-static void write_raw(struct BCState *bcs, u_int *buf, int cnt) {
- u_int mask, val, *p = buf;
- u_int i, s_cnt;
-
- if (cnt <= 0)
- return;
- if (test_bit(BC_FLG_BUSY, &bcs->Flag)) {
- if (bcs->hw.tiger.sendcnt > cnt) {
- s_cnt = cnt;
- bcs->hw.tiger.sendcnt -= cnt;
- } else {
- s_cnt = bcs->hw.tiger.sendcnt;
- bcs->hw.tiger.sendcnt = 0;
- }
- if (bcs->channel)
- mask = 0xffff00ff;
- else
- mask = 0xffffff00;
- for (i = 0; i < s_cnt; i++) {
- val = bcs->channel ? ((bcs->hw.tiger.sp[i] << 8) & 0xff00) :
- (bcs->hw.tiger.sp[i]);
- *p &= mask;
- *p++ |= val;
- if (p > bcs->hw.tiger.s_end)
- p = bcs->hw.tiger.send;
- }
- bcs->hw.tiger.s_tot += s_cnt;
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger write_raw: c%d %p-%p %d/%d %d %x", bcs->channel,
- buf, p, s_cnt, cnt,
- bcs->hw.tiger.sendcnt, bcs->cs->hw.njet.irqstat0);
- if (bcs->cs->debug & L1_DEB_HSCX_FIFO)
- printframe(bcs->cs, bcs->hw.tiger.sp, s_cnt, "snd");
- bcs->hw.tiger.sp += s_cnt;
- bcs->hw.tiger.sendp = p;
- if (!bcs->hw.tiger.sendcnt) {
- if (!bcs->tx_skb) {
- debugl1(bcs->cs, "tiger write_raw: NULL skb s_cnt %d", s_cnt);
- } else {
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->tx_skb->len;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- }
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->hw.tiger.free = cnt - s_cnt;
- if (bcs->hw.tiger.free > (NETJET_DMA_TXSIZE / 2))
- test_and_set_bit(BC_FLG_HALF, &bcs->Flag);
- else {
- test_and_clear_bit(BC_FLG_HALF, &bcs->Flag);
- test_and_set_bit(BC_FLG_NOFRAME, &bcs->Flag);
- }
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- netjet_fill_dma(bcs);
- } else {
- mask ^= 0xffffffff;
- if (s_cnt < cnt) {
- for (i = s_cnt; i < cnt; i++) {
- *p++ |= mask;
- if (p > bcs->hw.tiger.s_end)
- p = bcs->hw.tiger.send;
- }
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger write_raw: fill rest %d",
- cnt - s_cnt);
- }
- test_and_set_bit(B_XMTBUFREADY, &bcs->event);
- schedule_work(&bcs->tqueue);
- }
- }
- } else if (test_and_clear_bit(BC_FLG_NOFRAME, &bcs->Flag)) {
- test_and_set_bit(BC_FLG_HALF, &bcs->Flag);
- fill_mem(bcs, buf, cnt, bcs->channel, 0xff);
- bcs->hw.tiger.free += cnt;
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger write_raw: fill half");
- } else if (test_and_clear_bit(BC_FLG_HALF, &bcs->Flag)) {
- test_and_set_bit(BC_FLG_EMPTY, &bcs->Flag);
- fill_mem(bcs, buf, cnt, bcs->channel, 0xff);
- if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs, "tiger write_raw: fill full");
- }
-}
-
-void write_tiger(struct IsdnCardState *cs) {
- u_int *p, cnt = NETJET_DMA_TXSIZE / 2;
-
- if ((cs->hw.njet.irqstat0 & cs->hw.njet.last_is0) & NETJET_IRQM0_WRITE) {
- debugl1(cs, "tiger warn write double dma %x/%x",
- cs->hw.njet.irqstat0, cs->hw.njet.last_is0);
-#ifdef ERROR_STATISTIC
- if (cs->bcs[0].mode)
- cs->bcs[0].err_tx++;
- if (cs->bcs[1].mode)
- cs->bcs[1].err_tx++;
-#endif
- return;
- } else {
- cs->hw.njet.last_is0 &= ~NETJET_IRQM0_WRITE;
- cs->hw.njet.last_is0 |= (cs->hw.njet.irqstat0 & NETJET_IRQM0_WRITE);
- }
- if (cs->hw.njet.irqstat0 & NETJET_IRQM0_WRITE_1)
- p = cs->bcs[0].hw.tiger.send + NETJET_DMA_TXSIZE - 1;
- else
- p = cs->bcs[0].hw.tiger.send + cnt - 1;
- if ((cs->bcs[0].mode == L1_MODE_HDLC) || (cs->bcs[0].mode == L1_MODE_HDLC_56K))
- write_raw(cs->bcs, p, cnt);
- if ((cs->bcs[1].mode == L1_MODE_HDLC) || (cs->bcs[1].mode == L1_MODE_HDLC_56K))
- write_raw(cs->bcs + 1, p, cnt);
- cs->hw.njet.irqstat0 &= ~NETJET_IRQM0_WRITE;
-}
-
-static void
-tiger_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct BCState *bcs = st->l1.bcs;
- struct sk_buff *skb = arg;
- u_long flags;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- printk(KERN_WARNING "tiger_l2l1: this shouldn't happen\n");
- } else {
- bcs->tx_skb = skb;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
- if (!bcs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (PH_ACTIVATE | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
- mode_tiger(bcs, st->l1.mode, st->l1.bc);
- /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG */
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- bcs->cs->cardmsg(bcs->cs, MDL_BC_ASSIGN, (void *)(&st->l1.bc));
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | REQUEST):
- /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG */
- bcs->cs->cardmsg(bcs->cs, MDL_BC_RELEASE, (void *)(&st->l1.bc));
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | CONFIRM):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- mode_tiger(bcs, 0, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
- break;
- }
-}
-
-
-static void
-close_tigerstate(struct BCState *bcs)
-{
- mode_tiger(bcs, 0, bcs->channel);
- if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
- kfree(bcs->hw.tiger.rcvbuf);
- bcs->hw.tiger.rcvbuf = NULL;
- kfree(bcs->hw.tiger.sendbuf);
- bcs->hw.tiger.sendbuf = NULL;
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- }
-}
-
-static int
-open_tigerstate(struct IsdnCardState *cs, struct BCState *bcs)
-{
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- if (!(bcs->hw.tiger.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for tiger.rcvbuf\n");
- return (1);
- }
- if (!(bcs->hw.tiger.sendbuf = kmalloc(RAW_BUFMAX, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for tiger.sendbuf\n");
- return (1);
- }
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- bcs->hw.tiger.sendcnt = 0;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->event = 0;
- bcs->tx_cnt = 0;
- return (0);
-}
-
-static int
-setstack_tiger(struct PStack *st, struct BCState *bcs)
-{
- bcs->channel = st->l1.bc;
- if (open_tigerstate(st->l1.hardware, bcs))
- return (-1);
- st->l1.bcs = bcs;
- st->l2.l2l1 = tiger_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-
-void
-inittiger(struct IsdnCardState *cs)
-{
- cs->bcs[0].hw.tiger.send = kmalloc_array(NETJET_DMA_TXSIZE,
- sizeof(unsigned int),
- GFP_KERNEL | GFP_DMA);
- if (!cs->bcs[0].hw.tiger.send) {
- printk(KERN_WARNING
- "HiSax: No memory for tiger.send\n");
- return;
- }
- cs->bcs[0].hw.tiger.s_irq = cs->bcs[0].hw.tiger.send + NETJET_DMA_TXSIZE / 2 - 1;
- cs->bcs[0].hw.tiger.s_end = cs->bcs[0].hw.tiger.send + NETJET_DMA_TXSIZE - 1;
- cs->bcs[1].hw.tiger.send = cs->bcs[0].hw.tiger.send;
- cs->bcs[1].hw.tiger.s_irq = cs->bcs[0].hw.tiger.s_irq;
- cs->bcs[1].hw.tiger.s_end = cs->bcs[0].hw.tiger.s_end;
-
- memset(cs->bcs[0].hw.tiger.send, 0xff, NETJET_DMA_TXSIZE * sizeof(unsigned int));
- debugl1(cs, "tiger: send buf %p - %p", cs->bcs[0].hw.tiger.send,
- cs->bcs[0].hw.tiger.send + NETJET_DMA_TXSIZE - 1);
- outl(virt_to_bus(cs->bcs[0].hw.tiger.send),
- cs->hw.njet.base + NETJET_DMA_READ_START);
- outl(virt_to_bus(cs->bcs[0].hw.tiger.s_irq),
- cs->hw.njet.base + NETJET_DMA_READ_IRQ);
- outl(virt_to_bus(cs->bcs[0].hw.tiger.s_end),
- cs->hw.njet.base + NETJET_DMA_READ_END);
- cs->bcs[0].hw.tiger.rec = kmalloc_array(NETJET_DMA_RXSIZE,
- sizeof(unsigned int),
- GFP_KERNEL | GFP_DMA);
- if (!cs->bcs[0].hw.tiger.rec) {
- printk(KERN_WARNING
- "HiSax: No memory for tiger.rec\n");
- return;
- }
- debugl1(cs, "tiger: rec buf %p - %p", cs->bcs[0].hw.tiger.rec,
- cs->bcs[0].hw.tiger.rec + NETJET_DMA_RXSIZE - 1);
- cs->bcs[1].hw.tiger.rec = cs->bcs[0].hw.tiger.rec;
- memset(cs->bcs[0].hw.tiger.rec, 0xff, NETJET_DMA_RXSIZE * sizeof(unsigned int));
- outl(virt_to_bus(cs->bcs[0].hw.tiger.rec),
- cs->hw.njet.base + NETJET_DMA_WRITE_START);
- outl(virt_to_bus(cs->bcs[0].hw.tiger.rec + NETJET_DMA_RXSIZE / 2 - 1),
- cs->hw.njet.base + NETJET_DMA_WRITE_IRQ);
- outl(virt_to_bus(cs->bcs[0].hw.tiger.rec + NETJET_DMA_RXSIZE - 1),
- cs->hw.njet.base + NETJET_DMA_WRITE_END);
- debugl1(cs, "tiger: dmacfg %x/%x pulse=%d",
- inl(cs->hw.njet.base + NETJET_DMA_WRITE_ADR),
- inl(cs->hw.njet.base + NETJET_DMA_READ_ADR),
- bytein(cs->hw.njet.base + NETJET_PULSE_CNT));
- cs->hw.njet.last_is0 = 0;
- cs->bcs[0].BC_SetStack = setstack_tiger;
- cs->bcs[1].BC_SetStack = setstack_tiger;
- cs->bcs[0].BC_Close = close_tigerstate;
- cs->bcs[1].BC_Close = close_tigerstate;
-}
-
-static void
-releasetiger(struct IsdnCardState *cs)
-{
- kfree(cs->bcs[0].hw.tiger.send);
- cs->bcs[0].hw.tiger.send = NULL;
- cs->bcs[1].hw.tiger.send = NULL;
- kfree(cs->bcs[0].hw.tiger.rec);
- cs->bcs[0].hw.tiger.rec = NULL;
- cs->bcs[1].hw.tiger.rec = NULL;
-}
-
-void
-release_io_netjet(struct IsdnCardState *cs)
-{
- byteout(cs->hw.njet.base + NETJET_IRQMASK0, 0);
- byteout(cs->hw.njet.base + NETJET_IRQMASK1, 0);
- releasetiger(cs);
- release_region(cs->hw.njet.base, 256);
-}
diff --git a/drivers/isdn/hisax/netjet.h b/drivers/isdn/hisax/netjet.h
deleted file mode 100644
index 70590d5d5e64..000000000000
--- a/drivers/isdn/hisax/netjet.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* $Id: netjet.h,v 2.8.2.2 2004/01/12 22:52:28 keil Exp $
- *
- * NETjet common header file
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- * by Matt Henderson,
- * Traverse Technologies P/L www.traverse.com.au
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-#define NETJET_CTRL 0x00
-#define NETJET_DMACTRL 0x01
-#define NETJET_AUXCTRL 0x02
-#define NETJET_AUXDATA 0x03
-#define NETJET_IRQMASK0 0x04
-#define NETJET_IRQMASK1 0x05
-#define NETJET_IRQSTAT0 0x06
-#define NETJET_IRQSTAT1 0x07
-#define NETJET_DMA_READ_START 0x08
-#define NETJET_DMA_READ_IRQ 0x0c
-#define NETJET_DMA_READ_END 0x10
-#define NETJET_DMA_READ_ADR 0x14
-#define NETJET_DMA_WRITE_START 0x18
-#define NETJET_DMA_WRITE_IRQ 0x1c
-#define NETJET_DMA_WRITE_END 0x20
-#define NETJET_DMA_WRITE_ADR 0x24
-#define NETJET_PULSE_CNT 0x28
-
-#define NETJET_ISAC_OFF 0xc0
-#define NETJET_ISACIRQ 0x10
-#define NETJET_IRQM0_READ 0x0c
-#define NETJET_IRQM0_READ_1 0x04
-#define NETJET_IRQM0_READ_2 0x08
-#define NETJET_IRQM0_WRITE 0x03
-#define NETJET_IRQM0_WRITE_1 0x01
-#define NETJET_IRQM0_WRITE_2 0x02
-
-#define NETJET_DMA_TXSIZE 512
-#define NETJET_DMA_RXSIZE 128
-
-#define HDLC_ZERO_SEARCH 0
-#define HDLC_FLAG_SEARCH 1
-#define HDLC_FLAG_FOUND 2
-#define HDLC_FRAME_FOUND 3
-#define HDLC_NULL 4
-#define HDLC_PART 5
-#define HDLC_FULL 6
-
-#define HDLC_FLAG_VALUE 0x7e
-
-u_char NETjet_ReadIC(struct IsdnCardState *cs, u_char offset);
-void NETjet_WriteIC(struct IsdnCardState *cs, u_char offset, u_char value);
-void NETjet_ReadICfifo(struct IsdnCardState *cs, u_char *data, int size);
-void NETjet_WriteICfifo(struct IsdnCardState *cs, u_char *data, int size);
-
-void read_tiger(struct IsdnCardState *cs);
-void write_tiger(struct IsdnCardState *cs);
-
-void netjet_fill_dma(struct BCState *bcs);
-void netjet_interrupt(int intno, void *dev_id);
-void inittiger(struct IsdnCardState *cs);
-void release_io_netjet(struct IsdnCardState *cs);
diff --git a/drivers/isdn/hisax/niccy.c b/drivers/isdn/hisax/niccy.c
deleted file mode 100644
index dfbcd2eaa81a..000000000000
--- a/drivers/isdn/hisax/niccy.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/* $Id: niccy.c,v 1.21.2.4 2004/01/13 23:48:39 keil Exp $
- *
- * low level stuff for Dr. Neuhaus NICCY PnP and NICCY PCI and
- * compatible (SAGEM cybermodem)
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to Dr. Neuhaus and SAGEM for information
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-#include <linux/pci.h>
-#include <linux/isapnp.h>
-
-static const char *niccy_revision = "$Revision: 1.21.2.4 $";
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-#define ISAC_PCI_DATA 0
-#define HSCX_PCI_DATA 1
-#define ISAC_PCI_ADDR 2
-#define HSCX_PCI_ADDR 3
-#define ISAC_PNP 0
-#define HSCX_PNP 1
-
-/* SUB Types */
-#define NICCY_PNP 1
-#define NICCY_PCI 2
-
-/* PCI stuff */
-#define PCI_IRQ_CTRL_REG 0x38
-#define PCI_IRQ_ENABLE 0x1f00
-#define PCI_IRQ_DISABLE 0xff0000
-#define PCI_IRQ_ASSERT 0x800000
-
-static inline u_char readreg(unsigned int ale, unsigned int adr, u_char off)
-{
- register u_char ret;
-
- byteout(ale, off);
- ret = bytein(adr);
- return ret;
-}
-
-static inline void readfifo(unsigned int ale, unsigned int adr, u_char off,
- u_char *data, int size)
-{
- byteout(ale, off);
- insb(adr, data, size);
-}
-
-static inline void writereg(unsigned int ale, unsigned int adr, u_char off,
- u_char data)
-{
- byteout(ale, off);
- byteout(adr, data);
-}
-
-static inline void writefifo(unsigned int ale, unsigned int adr, u_char off,
- u_char *data, int size)
-{
- byteout(ale, off);
- outsb(adr, data, size);
-}
-
-/* Interface functions */
-
-static u_char ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return readreg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, offset);
-}
-
-static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, offset, value);
-}
-
-static void ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, 0, data, size);
-}
-
-static void WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, 0, data, size);
-}
-
-static u_char ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return readreg(cs->hw.niccy.hscx_ale,
- cs->hw.niccy.hscx, offset + (hscx ? 0x40 : 0));
-}
-
-static void WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset,
- u_char value)
-{
- writereg(cs->hw.niccy.hscx_ale,
- cs->hw.niccy.hscx, offset + (hscx ? 0x40 : 0), value);
-}
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.niccy.hscx_ale, \
- cs->hw.niccy.hscx, reg + (nr ? 0x40 : 0))
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.niccy.hscx_ale, \
- cs->hw.niccy.hscx, reg + (nr ? 0x40 : 0), data)
-
-#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.niccy.hscx_ale, \
- cs->hw.niccy.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.niccy.hscx_ale, \
- cs->hw.niccy.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t niccy_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->subtyp == NICCY_PCI) {
- int ival;
- ival = inl(cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG);
- if (!(ival & PCI_IRQ_ASSERT)) { /* IRQ not for us (shared) */
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE;
- }
- outl(ival, cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG);
- }
- val = readreg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx,
- HSCX_ISTA + 0x40);
-Start_HSCX:
- if (val)
- hscx_int_main(cs, val);
- val = readreg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- val = readreg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx,
- HSCX_ISTA + 0x40);
- if (val) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX IntStat after IntRoutine");
- goto Start_HSCX;
- }
- val = readreg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, ISAC_ISTA);
- if (val) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK, 0xFF);
- writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK + 0x40,
- 0xFF);
- writereg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, ISAC_MASK, 0xFF);
- writereg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, ISAC_MASK, 0);
- writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK, 0);
- writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK + 0x40, 0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void release_io_niccy(struct IsdnCardState *cs)
-{
- if (cs->subtyp == NICCY_PCI) {
- int val;
-
- val = inl(cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG);
- val &= PCI_IRQ_DISABLE;
- outl(val, cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG);
- release_region(cs->hw.niccy.cfg_reg, 0x40);
- release_region(cs->hw.niccy.isac, 4);
- } else {
- release_region(cs->hw.niccy.isac, 2);
- release_region(cs->hw.niccy.isac_ale, 2);
- }
-}
-
-static void niccy_reset(struct IsdnCardState *cs)
-{
- if (cs->subtyp == NICCY_PCI) {
- int val;
-
- val = inl(cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG);
- val |= PCI_IRQ_ENABLE;
- outl(val, cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG);
- }
- inithscxisac(cs, 3);
-}
-
-static int niccy_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- niccy_reset(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return 0;
- case CARD_RELEASE:
- release_io_niccy(cs);
- return 0;
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- niccy_reset(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return 0;
- case CARD_TEST:
- return 0;
- }
- return 0;
-}
-
-#ifdef __ISAPNP__
-static struct pnp_card *pnp_c = NULL;
-#endif
-
-int setup_niccy(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, niccy_revision);
- printk(KERN_INFO "HiSax: Niccy driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_NICCY)
- return 0;
-#ifdef __ISAPNP__
- if (!card->para[1] && isapnp_present()) {
- struct pnp_dev *pnp_d = NULL;
- int err;
-
- pnp_c = pnp_find_card(ISAPNP_VENDOR('S', 'D', 'A'),
- ISAPNP_FUNCTION(0x0150), pnp_c);
- if (pnp_c) {
- pnp_d = pnp_find_dev(pnp_c,
- ISAPNP_VENDOR('S', 'D', 'A'),
- ISAPNP_FUNCTION(0x0150), pnp_d);
- if (!pnp_d) {
- printk(KERN_ERR "NiccyPnP: PnP error card "
- "found, no device\n");
- return 0;
- }
- pnp_disable_dev(pnp_d);
- err = pnp_activate_dev(pnp_d);
- if (err < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev "
- "ret(%d)\n", __func__, err);
- return 0;
- }
- card->para[1] = pnp_port_start(pnp_d, 0);
- card->para[2] = pnp_port_start(pnp_d, 1);
- card->para[0] = pnp_irq(pnp_d, 0);
- if (card->para[0] == -1 || !card->para[1] ||
- !card->para[2]) {
- printk(KERN_ERR "NiccyPnP:some resources are "
- "missing %ld/%lx/%lx\n",
- card->para[0], card->para[1],
- card->para[2]);
- pnp_disable_dev(pnp_d);
- return 0;
- }
- } else
- printk(KERN_INFO "NiccyPnP: no ISAPnP card found\n");
- }
-#endif
- if (card->para[1]) {
- cs->hw.niccy.isac = card->para[1] + ISAC_PNP;
- cs->hw.niccy.hscx = card->para[1] + HSCX_PNP;
- cs->hw.niccy.isac_ale = card->para[2] + ISAC_PNP;
- cs->hw.niccy.hscx_ale = card->para[2] + HSCX_PNP;
- cs->hw.niccy.cfg_reg = 0;
- cs->subtyp = NICCY_PNP;
- cs->irq = card->para[0];
- if (!request_region(cs->hw.niccy.isac, 2, "niccy data")) {
- printk(KERN_WARNING "HiSax: NICCY data port %x-%x "
- "already in use\n",
- cs->hw.niccy.isac, cs->hw.niccy.isac + 1);
- return 0;
- }
- if (!request_region(cs->hw.niccy.isac_ale, 2, "niccy addr")) {
- printk(KERN_WARNING "HiSax: NICCY address port %x-%x "
- "already in use\n",
- cs->hw.niccy.isac_ale,
- cs->hw.niccy.isac_ale + 1);
- release_region(cs->hw.niccy.isac, 2);
- return 0;
- }
- } else {
-#ifdef CONFIG_PCI
- static struct pci_dev *niccy_dev;
-
- u_int pci_ioaddr;
- cs->subtyp = 0;
- if ((niccy_dev = hisax_find_pci_device(PCI_VENDOR_ID_SATSAGEM,
- PCI_DEVICE_ID_SATSAGEM_NICCY,
- niccy_dev))) {
- if (pci_enable_device(niccy_dev))
- return 0;
- /* get IRQ */
- if (!niccy_dev->irq) {
- printk(KERN_WARNING
- "Niccy: No IRQ for PCI card found\n");
- return 0;
- }
- cs->irq = niccy_dev->irq;
- cs->hw.niccy.cfg_reg = pci_resource_start(niccy_dev, 0);
- if (!cs->hw.niccy.cfg_reg) {
- printk(KERN_WARNING
- "Niccy: No IO-Adr for PCI cfg found\n");
- return 0;
- }
- pci_ioaddr = pci_resource_start(niccy_dev, 1);
- if (!pci_ioaddr) {
- printk(KERN_WARNING
- "Niccy: No IO-Adr for PCI card found\n");
- return 0;
- }
- cs->subtyp = NICCY_PCI;
- } else {
- printk(KERN_WARNING "Niccy: No PCI card found\n");
- return 0;
- }
- cs->irq_flags |= IRQF_SHARED;
- cs->hw.niccy.isac = pci_ioaddr + ISAC_PCI_DATA;
- cs->hw.niccy.isac_ale = pci_ioaddr + ISAC_PCI_ADDR;
- cs->hw.niccy.hscx = pci_ioaddr + HSCX_PCI_DATA;
- cs->hw.niccy.hscx_ale = pci_ioaddr + HSCX_PCI_ADDR;
- if (!request_region(cs->hw.niccy.isac, 4, "niccy")) {
- printk(KERN_WARNING
- "HiSax: NICCY data port %x-%x already in use\n",
- cs->hw.niccy.isac, cs->hw.niccy.isac + 4);
- return 0;
- }
- if (!request_region(cs->hw.niccy.cfg_reg, 0x40, "niccy pci")) {
- printk(KERN_WARNING
- "HiSax: NICCY pci port %x-%x already in use\n",
- cs->hw.niccy.cfg_reg,
- cs->hw.niccy.cfg_reg + 0x40);
- release_region(cs->hw.niccy.isac, 4);
- return 0;
- }
-#else
- printk(KERN_WARNING "Niccy: io0 0 and NO_PCI_BIOS\n");
- printk(KERN_WARNING "Niccy: unable to config NICCY PCI\n");
- return 0;
-#endif /* CONFIG_PCI */
- }
- printk(KERN_INFO "HiSax: NICCY %s config irq:%d data:0x%X ale:0x%X\n",
- (cs->subtyp == 1) ? "PnP" : "PCI",
- cs->irq, cs->hw.niccy.isac, cs->hw.niccy.isac_ale);
- setup_isac(cs);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &niccy_card_msg;
- cs->irq_func = &niccy_interrupt;
- ISACVersion(cs, "Niccy:");
- if (HscxVersion(cs, "Niccy:")) {
- printk(KERN_WARNING "Niccy: wrong HSCX versions check IO "
- "address\n");
- release_io_niccy(cs);
- return 0;
- }
- return 1;
-}
diff --git a/drivers/isdn/hisax/nj_s.c b/drivers/isdn/hisax/nj_s.c
deleted file mode 100644
index 32b4bbd18eb9..000000000000
--- a/drivers/isdn/hisax/nj_s.c
+++ /dev/null
@@ -1,294 +0,0 @@
-/* $Id: nj_s.c,v 2.13.2.4 2004/01/16 01:53:48 keil Exp $
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "isdnl1.h"
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/ppp_defs.h>
-#include "netjet.h"
-
-static const char *NETjet_S_revision = "$Revision: 2.13.2.4 $";
-
-static u_char dummyrr(struct IsdnCardState *cs, int chan, u_char off)
-{
- return (5);
-}
-
-static void dummywr(struct IsdnCardState *cs, int chan, u_char off, u_char value)
-{
-}
-
-static irqreturn_t
-netjet_s_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val, s1val, s0val;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- s1val = bytein(cs->hw.njet.base + NETJET_IRQSTAT1);
- if (!(s1val & NETJET_ISACIRQ)) {
- val = NETjet_ReadIC(cs, ISAC_ISTA);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "tiger: i1 %x %x", s1val, val);
- if (val) {
- isac_interrupt(cs, val);
- NETjet_WriteIC(cs, ISAC_MASK, 0xFF);
- NETjet_WriteIC(cs, ISAC_MASK, 0x0);
- }
- s1val = 1;
- } else
- s1val = 0;
- /*
- * read/write stat0 is better, because lower IRQ rate
- * Note the IRQ is on for 125 us if a condition match
- * thats long on modern CPU and so the IRQ is reentered
- * all the time.
- */
- s0val = bytein(cs->hw.njet.base + NETJET_IRQSTAT0);
- if ((s0val | s1val) == 0) { // shared IRQ
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE;
- }
- if (s0val)
- byteout(cs->hw.njet.base + NETJET_IRQSTAT0, s0val);
- /* start new code 13/07/00 GE */
- /* set bits in sval to indicate which page is free */
- if (inl(cs->hw.njet.base + NETJET_DMA_WRITE_ADR) <
- inl(cs->hw.njet.base + NETJET_DMA_WRITE_IRQ))
- /* the 2nd write page is free */
- s0val = 0x08;
- else /* the 1st write page is free */
- s0val = 0x04;
- if (inl(cs->hw.njet.base + NETJET_DMA_READ_ADR) <
- inl(cs->hw.njet.base + NETJET_DMA_READ_IRQ))
- /* the 2nd read page is free */
- s0val |= 0x02;
- else /* the 1st read page is free */
- s0val |= 0x01;
- if (s0val != cs->hw.njet.last_is0) /* we have a DMA interrupt */
- {
- if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- printk(KERN_WARNING "nj LOCK_ATOMIC s0val %x->%x\n",
- cs->hw.njet.last_is0, s0val);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
- }
- cs->hw.njet.irqstat0 = s0val;
- if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_READ) !=
- (cs->hw.njet.last_is0 & NETJET_IRQM0_READ))
- /* we have a read dma int */
- read_tiger(cs);
- if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_WRITE) !=
- (cs->hw.njet.last_is0 & NETJET_IRQM0_WRITE))
- /* we have a write dma int */
- write_tiger(cs);
- /* end new code 13/07/00 GE */
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-reset_netjet_s(struct IsdnCardState *cs)
-{
- cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
- /* now edge triggered for TJ320 GE 13/07/00 */
- /* see comment in IRQ function */
- if (cs->subtyp) /* TJ320 */
- cs->hw.njet.ctrl_reg = 0x40; /* Reset Off and status read clear */
- else
- cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
- cs->hw.njet.auxd = 0;
- cs->hw.njet.dmactrl = 0;
- byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
- byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
-}
-
-static int
-NETjet_S_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_netjet_s(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_netjet(cs);
- return (0);
- case CARD_INIT:
- reset_netjet_s(cs);
- inittiger(cs);
- spin_lock_irqsave(&cs->lock, flags);
- clear_pending_isac_ints(cs);
- initisac(cs);
- /* Reenable all IRQ */
- cs->writeisac(cs, ISAC_MASK, 0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-static int njs_pci_probe(struct pci_dev *dev_netjet, struct IsdnCardState *cs)
-{
- u32 cfg;
-
- if (pci_enable_device(dev_netjet))
- return (0);
- pci_set_master(dev_netjet);
- cs->irq = dev_netjet->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
- return (0);
- }
- cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
- if (!cs->hw.njet.base) {
- printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
- return (0);
- }
- /* the TJ300 and TJ320 must be detected, the IRQ handling is different
- * unfortunately the chips use the same device ID, but the TJ320 has
- * the bit20 in status PCI cfg register set
- */
- pci_read_config_dword(dev_netjet, 0x04, &cfg);
- if (cfg & 0x00100000)
- cs->subtyp = 1; /* TJ320 */
- else
- cs->subtyp = 0; /* TJ300 */
- /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
- if ((dev_netjet->subsystem_vendor == 0x55) &&
- (dev_netjet->subsystem_device == 0x02)) {
- printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
- printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
- return (0);
- }
- /* end new code */
-
- return (1);
-}
-
-static int njs_cs_init(struct IsdnCard *card, struct IsdnCardState *cs)
-{
-
- cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
- cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
-
- cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
-
- cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
-
- cs->hw.njet.auxd = 0xC0;
- cs->hw.njet.dmactrl = 0;
-
- byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
- byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
-
- switch (((NETjet_ReadIC(cs, ISAC_RBCH) >> 5) & 3))
- {
- case 0:
- return 1; /* end loop */
-
- case 3:
- printk(KERN_WARNING "NETjet-S: NETspider-U PCI card found\n");
- return -1; /* continue looping */
-
- default:
- printk(KERN_WARNING "NETjet-S: No PCI card found\n");
- return 0; /* end loop & function */
- }
- return 1; /* end loop */
-}
-
-static int njs_cs_init_rest(struct IsdnCard *card, struct IsdnCardState *cs)
-{
- const int bytecnt = 256;
-
- printk(KERN_INFO
- "NETjet-S: %s card configured at %#lx IRQ %d\n",
- cs->subtyp ? "TJ320" : "TJ300", cs->hw.njet.base, cs->irq);
- if (!request_region(cs->hw.njet.base, bytecnt, "netjet-s isdn")) {
- printk(KERN_WARNING
- "HiSax: NETjet-S config port %#lx-%#lx already in use\n",
- cs->hw.njet.base,
- cs->hw.njet.base + bytecnt);
- return (0);
- }
- cs->readisac = &NETjet_ReadIC;
- cs->writeisac = &NETjet_WriteIC;
- cs->readisacfifo = &NETjet_ReadICfifo;
- cs->writeisacfifo = &NETjet_WriteICfifo;
- cs->BC_Read_Reg = &dummyrr;
- cs->BC_Write_Reg = &dummywr;
- cs->BC_Send_Data = &netjet_fill_dma;
- setup_isac(cs);
- cs->cardmsg = &NETjet_S_card_msg;
- cs->irq_func = &netjet_s_interrupt;
- cs->irq_flags |= IRQF_SHARED;
- ISACVersion(cs, "NETjet-S:");
-
- return (1);
-}
-
-static struct pci_dev *dev_netjet = NULL;
-
-int setup_netjet_s(struct IsdnCard *card)
-{
- int ret;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
- strcpy(tmp, NETjet_S_revision);
- printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_NETJET_S)
- return (0);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
-
- for (;;)
- {
- if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
- PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
- ret = njs_pci_probe(dev_netjet, cs);
- if (!ret)
- return (0);
- } else {
- printk(KERN_WARNING "NETjet-S: No PCI card found\n");
- return (0);
- }
-
- ret = njs_cs_init(card, cs);
- if (!ret)
- return (0);
- if (ret > 0)
- break;
- /* otherwise, ret < 0, continue looping */
- }
-
- return njs_cs_init_rest(card, cs);
-}
diff --git a/drivers/isdn/hisax/nj_u.c b/drivers/isdn/hisax/nj_u.c
deleted file mode 100644
index 4e8adbede361..000000000000
--- a/drivers/isdn/hisax/nj_u.c
+++ /dev/null
@@ -1,258 +0,0 @@
-/* $Id: nj_u.c,v 2.14.2.3 2004/01/13 14:31:26 keil Exp $
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "icc.h"
-#include "isdnl1.h"
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/ppp_defs.h>
-#include "netjet.h"
-
-static const char *NETjet_U_revision = "$Revision: 2.14.2.3 $";
-
-static u_char dummyrr(struct IsdnCardState *cs, int chan, u_char off)
-{
- return (5);
-}
-
-static void dummywr(struct IsdnCardState *cs, int chan, u_char off, u_char value)
-{
-}
-
-static irqreturn_t
-netjet_u_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val, sval;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (!((sval = bytein(cs->hw.njet.base + NETJET_IRQSTAT1)) &
- NETJET_ISACIRQ)) {
- val = NETjet_ReadIC(cs, ICC_ISTA);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "tiger: i1 %x %x", sval, val);
- if (val) {
- icc_interrupt(cs, val);
- NETjet_WriteIC(cs, ICC_MASK, 0xFF);
- NETjet_WriteIC(cs, ICC_MASK, 0x0);
- }
- }
- /* start new code 13/07/00 GE */
- /* set bits in sval to indicate which page is free */
- if (inl(cs->hw.njet.base + NETJET_DMA_WRITE_ADR) <
- inl(cs->hw.njet.base + NETJET_DMA_WRITE_IRQ))
- /* the 2nd write page is free */
- sval = 0x08;
- else /* the 1st write page is free */
- sval = 0x04;
- if (inl(cs->hw.njet.base + NETJET_DMA_READ_ADR) <
- inl(cs->hw.njet.base + NETJET_DMA_READ_IRQ))
- /* the 2nd read page is free */
- sval = sval | 0x02;
- else /* the 1st read page is free */
- sval = sval | 0x01;
- if (sval != cs->hw.njet.last_is0) /* we have a DMA interrupt */
- {
- if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
- }
- cs->hw.njet.irqstat0 = sval;
- if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_READ) !=
- (cs->hw.njet.last_is0 & NETJET_IRQM0_READ))
- /* we have a read dma int */
- read_tiger(cs);
- if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_WRITE) !=
- (cs->hw.njet.last_is0 & NETJET_IRQM0_WRITE))
- /* we have a write dma int */
- write_tiger(cs);
- /* end new code 13/07/00 GE */
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-reset_netjet_u(struct IsdnCardState *cs)
-{
- cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
- cs->hw.njet.ctrl_reg = 0x40; /* Reset Off and status read clear */
- /* now edge triggered for TJ320 GE 13/07/00 */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
- cs->hw.njet.auxd = 0xC0;
- cs->hw.njet.dmactrl = 0;
- byteout(cs->hw.njet.auxa, 0);
- byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
- byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
-}
-
-static int
-NETjet_U_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_netjet_u(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_netjet(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- inittiger(cs);
- reset_netjet_u(cs);
- clear_pending_icc_ints(cs);
- initicc(cs);
- /* Reenable all IRQ */
- cs->writeisac(cs, ICC_MASK, 0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-static int nju_pci_probe(struct pci_dev *dev_netjet, struct IsdnCardState *cs)
-{
- if (pci_enable_device(dev_netjet))
- return (0);
- pci_set_master(dev_netjet);
- cs->irq = dev_netjet->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n");
- return (0);
- }
- cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
- if (!cs->hw.njet.base) {
- printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n");
- return (0);
- }
-
- return (1);
-}
-
-static int nju_cs_init(struct IsdnCard *card, struct IsdnCardState *cs)
-{
- cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
- cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
- mdelay(10);
-
- cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
-
- cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
-
- cs->hw.njet.auxd = 0xC0;
- cs->hw.njet.dmactrl = 0;
-
- byteout(cs->hw.njet.auxa, 0);
- byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
- byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
-
- switch (((NETjet_ReadIC(cs, ICC_RBCH) >> 5) & 3))
- {
- case 3:
- return 1; /* end loop */
-
- case 0:
- printk(KERN_WARNING "NETspider-U: NETjet-S PCI card found\n");
- return -1; /* continue looping */
-
- default:
- printk(KERN_WARNING "NETspider-U: No PCI card found\n");
- return 0; /* end loop & function */
- }
- return 1; /* end loop */
-}
-
-static int nju_cs_init_rest(struct IsdnCard *card, struct IsdnCardState *cs)
-{
- const int bytecnt = 256;
-
- printk(KERN_INFO
- "NETspider-U: PCI card configured at %#lx IRQ %d\n",
- cs->hw.njet.base, cs->irq);
- if (!request_region(cs->hw.njet.base, bytecnt, "netspider-u isdn")) {
- printk(KERN_WARNING
- "HiSax: NETspider-U config port %#lx-%#lx "
- "already in use\n",
- cs->hw.njet.base,
- cs->hw.njet.base + bytecnt);
- return (0);
- }
- setup_icc(cs);
- cs->readisac = &NETjet_ReadIC;
- cs->writeisac = &NETjet_WriteIC;
- cs->readisacfifo = &NETjet_ReadICfifo;
- cs->writeisacfifo = &NETjet_WriteICfifo;
- cs->BC_Read_Reg = &dummyrr;
- cs->BC_Write_Reg = &dummywr;
- cs->BC_Send_Data = &netjet_fill_dma;
- cs->cardmsg = &NETjet_U_card_msg;
- cs->irq_func = &netjet_u_interrupt;
- cs->irq_flags |= IRQF_SHARED;
- ICCVersion(cs, "NETspider-U:");
-
- return (1);
-}
-
-static struct pci_dev *dev_netjet = NULL;
-
-int setup_netjet_u(struct IsdnCard *card)
-{
- int ret;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
- strcpy(tmp, NETjet_U_revision);
- printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_NETJET_U)
- return (0);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
-
- for (;;)
- {
- if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
- PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
- ret = nju_pci_probe(dev_netjet, cs);
- if (!ret)
- return (0);
- } else {
- printk(KERN_WARNING "NETspider-U: No PCI card found\n");
- return (0);
- }
-
- ret = nju_cs_init(card, cs);
- if (!ret)
- return (0);
- if (ret > 0)
- break;
- /* ret < 0 == continue looping */
- }
-
- return nju_cs_init_rest(card, cs);
-}
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
deleted file mode 100644
index 6b8c3fbe3965..000000000000
--- a/drivers/isdn/hisax/q931.c
+++ /dev/null
@@ -1,1513 +0,0 @@
-/* $Id: q931.c,v 1.12.2.3 2004/01/13 14:31:26 keil Exp $
- *
- * code to decode ITU Q.931 call control messages
- *
- * Author Jan den Ouden
- * Copyright by Jan den Ouden
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Changelog:
- *
- * Pauline Middelink general improvements
- * Beat Doebeli cause texts, display information element
- * Karsten Keil cause texts, display information element for 1TR6
- *
- */
-
-
-#include "hisax.h"
-#include "l3_1tr6.h"
-
-void
-iecpy(u_char *dest, u_char *iestart, int ieoffset)
-{
- u_char *p;
- int l;
-
- p = iestart + ieoffset + 2;
- l = iestart[1] - ieoffset;
- while (l--)
- *dest++ = *p++;
- *dest++ = '\0';
-}
-
-/*
- * According to Table 4-2/Q.931
- */
-static
-struct MessageType {
- u_char nr;
- char *descr;
-} mtlist[] = {
-
- {
- 0x1, "ALERTING"
- },
- {
- 0x2, "CALL PROCEEDING"
- },
- {
- 0x7, "CONNECT"
- },
- {
- 0xf, "CONNECT ACKNOWLEDGE"
- },
- {
- 0x3, "PROGRESS"
- },
- {
- 0x5, "SETUP"
- },
- {
- 0xd, "SETUP ACKNOWLEDGE"
- },
- {
- 0x24, "HOLD"
- },
- {
- 0x28, "HOLD ACKNOWLEDGE"
- },
- {
- 0x30, "HOLD REJECT"
- },
- {
- 0x31, "RETRIEVE"
- },
- {
- 0x33, "RETRIEVE ACKNOWLEDGE"
- },
- {
- 0x37, "RETRIEVE REJECT"
- },
- {
- 0x26, "RESUME"
- },
- {
- 0x2e, "RESUME ACKNOWLEDGE"
- },
- {
- 0x22, "RESUME REJECT"
- },
- {
- 0x25, "SUSPEND"
- },
- {
- 0x2d, "SUSPEND ACKNOWLEDGE"
- },
- {
- 0x21, "SUSPEND REJECT"
- },
- {
- 0x20, "USER INFORMATION"
- },
- {
- 0x45, "DISCONNECT"
- },
- {
- 0x4d, "RELEASE"
- },
- {
- 0x5a, "RELEASE COMPLETE"
- },
- {
- 0x46, "RESTART"
- },
- {
- 0x4e, "RESTART ACKNOWLEDGE"
- },
- {
- 0x60, "SEGMENT"
- },
- {
- 0x79, "CONGESTION CONTROL"
- },
- {
- 0x7b, "INFORMATION"
- },
- {
- 0x62, "FACILITY"
- },
- {
- 0x6e, "NOTIFY"
- },
- {
- 0x7d, "STATUS"
- },
- {
- 0x75, "STATUS ENQUIRY"
- }
-};
-
-#define MTSIZE ARRAY_SIZE(mtlist)
-
-static
-struct MessageType mt_n0[] =
-{
- {MT_N0_REG_IND, "REGister INDication"},
- {MT_N0_CANC_IND, "CANCel INDication"},
- {MT_N0_FAC_STA, "FACility STAtus"},
- {MT_N0_STA_ACK, "STAtus ACKnowledge"},
- {MT_N0_STA_REJ, "STAtus REJect"},
- {MT_N0_FAC_INF, "FACility INFormation"},
- {MT_N0_INF_ACK, "INFormation ACKnowledge"},
- {MT_N0_INF_REJ, "INFormation REJect"},
- {MT_N0_CLOSE, "CLOSE"},
- {MT_N0_CLO_ACK, "CLOse ACKnowledge"}
-};
-
-#define MT_N0_LEN ARRAY_SIZE(mt_n0)
-
-static
-struct MessageType mt_n1[] =
-{
- {MT_N1_ESC, "ESCape"},
- {MT_N1_ALERT, "ALERT"},
- {MT_N1_CALL_SENT, "CALL SENT"},
- {MT_N1_CONN, "CONNect"},
- {MT_N1_CONN_ACK, "CONNect ACKnowledge"},
- {MT_N1_SETUP, "SETUP"},
- {MT_N1_SETUP_ACK, "SETUP ACKnowledge"},
- {MT_N1_RES, "RESume"},
- {MT_N1_RES_ACK, "RESume ACKnowledge"},
- {MT_N1_RES_REJ, "RESume REJect"},
- {MT_N1_SUSP, "SUSPend"},
- {MT_N1_SUSP_ACK, "SUSPend ACKnowledge"},
- {MT_N1_SUSP_REJ, "SUSPend REJect"},
- {MT_N1_USER_INFO, "USER INFO"},
- {MT_N1_DET, "DETach"},
- {MT_N1_DISC, "DISConnect"},
- {MT_N1_REL, "RELease"},
- {MT_N1_REL_ACK, "RELease ACKnowledge"},
- {MT_N1_CANC_ACK, "CANCel ACKnowledge"},
- {MT_N1_CANC_REJ, "CANCel REJect"},
- {MT_N1_CON_CON, "CONgestion CONtrol"},
- {MT_N1_FAC, "FACility"},
- {MT_N1_FAC_ACK, "FACility ACKnowledge"},
- {MT_N1_FAC_CAN, "FACility CANcel"},
- {MT_N1_FAC_REG, "FACility REGister"},
- {MT_N1_FAC_REJ, "FACility REJect"},
- {MT_N1_INFO, "INFOrmation"},
- {MT_N1_REG_ACK, "REGister ACKnowledge"},
- {MT_N1_REG_REJ, "REGister REJect"},
- {MT_N1_STAT, "STATus"}
-};
-
-#define MT_N1_LEN ARRAY_SIZE(mt_n1)
-
-
-static int
-prbits(char *dest, u_char b, int start, int len)
-{
- char *dp = dest;
-
- b = b << (8 - start);
- while (len--) {
- if (b & 0x80)
- *dp++ = '1';
- else
- *dp++ = '0';
- b = b << 1;
- }
- return (dp - dest);
-}
-
-static
-u_char *
-skipext(u_char *p)
-{
- while (!(*p++ & 0x80));
- return (p);
-}
-
-/*
- * Cause Values According to Q.850
- * edescr: English description
- * ddescr: German description used by Swissnet II (Swiss Telecom
- * not yet written...
- */
-
-static
-struct CauseValue {
- u_char nr;
- char *edescr;
- char *ddescr;
-} cvlist[] = {
-
- {
- 0x01, "Unallocated (unassigned) number", "Nummer nicht zugeteilt"
- },
- {
- 0x02, "No route to specified transit network", ""
- },
- {
- 0x03, "No route to destination", ""
- },
- {
- 0x04, "Send special information tone", ""
- },
- {
- 0x05, "Misdialled trunk prefix", ""
- },
- {
- 0x06, "Channel unacceptable", "Kanal nicht akzeptierbar"
- },
- {
- 0x07, "Channel awarded and being delivered in an established channel", ""
- },
- {
- 0x08, "Preemption", ""
- },
- {
- 0x09, "Preemption - circuit reserved for reuse", ""
- },
- {
- 0x10, "Normal call clearing", "Normale Ausloesung"
- },
- {
- 0x11, "User busy", "TNB besetzt"
- },
- {
- 0x12, "No user responding", ""
- },
- {
- 0x13, "No answer from user (user alerted)", ""
- },
- {
- 0x14, "Subscriber absent", ""
- },
- {
- 0x15, "Call rejected", ""
- },
- {
- 0x16, "Number changed", ""
- },
- {
- 0x1a, "non-selected user clearing", ""
- },
- {
- 0x1b, "Destination out of order", ""
- },
- {
- 0x1c, "Invalid number format (address incomplete)", ""
- },
- {
- 0x1d, "Facility rejected", ""
- },
- {
- 0x1e, "Response to Status enquiry", ""
- },
- {
- 0x1f, "Normal, unspecified", ""
- },
- {
- 0x22, "No circuit/channel available", ""
- },
- {
- 0x26, "Network out of order", ""
- },
- {
- 0x27, "Permanent frame mode connection out-of-service", ""
- },
- {
- 0x28, "Permanent frame mode connection operational", ""
- },
- {
- 0x29, "Temporary failure", ""
- },
- {
- 0x2a, "Switching equipment congestion", ""
- },
- {
- 0x2b, "Access information discarded", ""
- },
- {
- 0x2c, "Requested circuit/channel not available", ""
- },
- {
- 0x2e, "Precedence call blocked", ""
- },
- {
- 0x2f, "Resource unavailable, unspecified", ""
- },
- {
- 0x31, "Quality of service unavailable", ""
- },
- {
- 0x32, "Requested facility not subscribed", ""
- },
- {
- 0x35, "Outgoing calls barred within CUG", ""
- },
- {
- 0x37, "Incoming calls barred within CUG", ""
- },
- {
- 0x39, "Bearer capability not authorized", ""
- },
- {
- 0x3a, "Bearer capability not presently available", ""
- },
- {
- 0x3e, "Inconsistency in designated outgoing access information and subscriber class ", " "
- },
- {
- 0x3f, "Service or option not available, unspecified", ""
- },
- {
- 0x41, "Bearer capability not implemented", ""
- },
- {
- 0x42, "Channel type not implemented", ""
- },
- {
- 0x43, "Requested facility not implemented", ""
- },
- {
- 0x44, "Only restricted digital information bearer capability is available", ""
- },
- {
- 0x4f, "Service or option not implemented", ""
- },
- {
- 0x51, "Invalid call reference value", ""
- },
- {
- 0x52, "Identified channel does not exist", ""
- },
- {
- 0x53, "A suspended call exists, but this call identity does not", ""
- },
- {
- 0x54, "Call identity in use", ""
- },
- {
- 0x55, "No call suspended", ""
- },
- {
- 0x56, "Call having the requested call identity has been cleared", ""
- },
- {
- 0x57, "User not member of CUG", ""
- },
- {
- 0x58, "Incompatible destination", ""
- },
- {
- 0x5a, "Non-existent CUG", ""
- },
- {
- 0x5b, "Invalid transit network selection", ""
- },
- {
- 0x5f, "Invalid message, unspecified", ""
- },
- {
- 0x60, "Mandatory information element is missing", ""
- },
- {
- 0x61, "Message type non-existent or not implemented", ""
- },
- {
- 0x62, "Message not compatible with call state or message type non-existent or not implemented ", " "
- },
- {
- 0x63, "Information element/parameter non-existent or not implemented", ""
- },
- {
- 0x64, "Invalid information element contents", ""
- },
- {
- 0x65, "Message not compatible with call state", ""
- },
- {
- 0x66, "Recovery on timer expiry", ""
- },
- {
- 0x67, "Parameter non-existent or not implemented - passed on", ""
- },
- {
- 0x6e, "Message with unrecognized parameter discarded", ""
- },
- {
- 0x6f, "Protocol error, unspecified", ""
- },
- {
- 0x7f, "Interworking, unspecified", ""
- },
-};
-
-#define CVSIZE ARRAY_SIZE(cvlist)
-
-static
-int
-prcause(char *dest, u_char *p)
-{
- u_char *end;
- char *dp = dest;
- int i, cause;
-
- end = p + p[1] + 1;
- p += 2;
- dp += sprintf(dp, " coding ");
- dp += prbits(dp, *p, 7, 2);
- dp += sprintf(dp, " location ");
- dp += prbits(dp, *p, 4, 4);
- *dp++ = '\n';
- p = skipext(p);
-
- cause = 0x7f & *p++;
-
- /* locate cause value */
- for (i = 0; i < CVSIZE; i++)
- if (cvlist[i].nr == cause)
- break;
-
- /* display cause value if it exists */
- if (i == CVSIZE)
- dp += sprintf(dp, "Unknown cause type %x!\n", cause);
- else
- dp += sprintf(dp, " cause value %x : %s \n", cause, cvlist[i].edescr);
-
- while (!0) {
- if (p > end)
- break;
- dp += sprintf(dp, " diag attribute %d ", *p++ & 0x7f);
- dp += sprintf(dp, " rej %d ", *p & 0x7f);
- if (*p & 0x80) {
- *dp++ = '\n';
- break;
- } else
- dp += sprintf(dp, " av %d\n", (*++p) & 0x7f);
- }
- return (dp - dest);
-
-}
-
-static
-struct MessageType cause_1tr6[] =
-{
- {CAUSE_InvCRef, "Invalid Call Reference"},
- {CAUSE_BearerNotImpl, "Bearer Service Not Implemented"},
- {CAUSE_CIDunknown, "Caller Identity unknown"},
- {CAUSE_CIDinUse, "Caller Identity in Use"},
- {CAUSE_NoChans, "No Channels available"},
- {CAUSE_FacNotImpl, "Facility Not Implemented"},
- {CAUSE_FacNotSubscr, "Facility Not Subscribed"},
- {CAUSE_OutgoingBarred, "Outgoing calls barred"},
- {CAUSE_UserAccessBusy, "User Access Busy"},
- {CAUSE_NegativeGBG, "Negative GBG"},
- {CAUSE_UnknownGBG, "Unknown GBG"},
- {CAUSE_NoSPVknown, "No SPV known"},
- {CAUSE_DestNotObtain, "Destination not obtainable"},
- {CAUSE_NumberChanged, "Number changed"},
- {CAUSE_OutOfOrder, "Out Of Order"},
- {CAUSE_NoUserResponse, "No User Response"},
- {CAUSE_UserBusy, "User Busy"},
- {CAUSE_IncomingBarred, "Incoming Barred"},
- {CAUSE_CallRejected, "Call Rejected"},
- {CAUSE_NetworkCongestion, "Network Congestion"},
- {CAUSE_RemoteUser, "Remote User initiated"},
- {CAUSE_LocalProcErr, "Local Procedure Error"},
- {CAUSE_RemoteProcErr, "Remote Procedure Error"},
- {CAUSE_RemoteUserSuspend, "Remote User Suspend"},
- {CAUSE_RemoteUserResumed, "Remote User Resumed"},
- {CAUSE_UserInfoDiscarded, "User Info Discarded"}
-};
-
-static int cause_1tr6_len = ARRAY_SIZE(cause_1tr6);
-
-static int
-prcause_1tr6(char *dest, u_char *p)
-{
- char *dp = dest;
- int i, cause;
-
- p++;
- if (0 == *p) {
- dp += sprintf(dp, " OK (cause length=0)\n");
- return (dp - dest);
- } else if (*p > 1) {
- dp += sprintf(dp, " coding ");
- dp += prbits(dp, p[2], 7, 2);
- dp += sprintf(dp, " location ");
- dp += prbits(dp, p[2], 4, 4);
- *dp++ = '\n';
- }
- p++;
- cause = 0x7f & *p;
-
- /* locate cause value */
- for (i = 0; i < cause_1tr6_len; i++)
- if (cause_1tr6[i].nr == cause)
- break;
-
- /* display cause value if it exists */
- if (i == cause_1tr6_len)
- dp += sprintf(dp, "Unknown cause type %x!\n", cause);
- else
- dp += sprintf(dp, " cause value %x : %s \n", cause, cause_1tr6[i].descr);
-
- return (dp - dest);
-
-}
-
-static int
-prchident(char *dest, u_char *p)
-{
- char *dp = dest;
-
- p += 2;
- dp += sprintf(dp, " octet 3 ");
- dp += prbits(dp, *p, 8, 8);
- *dp++ = '\n';
- return (dp - dest);
-}
-
-static int
-prcalled(char *dest, u_char *p)
-{
- int l;
- char *dp = dest;
-
- p++;
- l = *p++ - 1;
- dp += sprintf(dp, " octet 3 ");
- dp += prbits(dp, *p++, 8, 8);
- *dp++ = '\n';
- dp += sprintf(dp, " number digits ");
- while (l--)
- *dp++ = *p++;
- *dp++ = '\n';
- return (dp - dest);
-}
-static int
-prcalling(char *dest, u_char *p)
-{
- int l;
- char *dp = dest;
-
- p++;
- l = *p++ - 1;
- dp += sprintf(dp, " octet 3 ");
- dp += prbits(dp, *p, 8, 8);
- *dp++ = '\n';
- if (!(*p & 0x80)) {
- dp += sprintf(dp, " octet 3a ");
- dp += prbits(dp, *++p, 8, 8);
- *dp++ = '\n';
- l--;
- }
- p++;
-
- dp += sprintf(dp, " number digits ");
- while (l--)
- *dp++ = *p++;
- *dp++ = '\n';
- return (dp - dest);
-}
-
-static
-int
-prbearer(char *dest, u_char *p)
-{
- char *dp = dest, ch;
-
- p += 2;
- dp += sprintf(dp, " octet 3 ");
- dp += prbits(dp, *p++, 8, 8);
- *dp++ = '\n';
- dp += sprintf(dp, " octet 4 ");
- dp += prbits(dp, *p, 8, 8);
- *dp++ = '\n';
- if ((*p++ & 0x1f) == 0x18) {
- dp += sprintf(dp, " octet 4.1 ");
- dp += prbits(dp, *p++, 8, 8);
- *dp++ = '\n';
- }
- /* check for user information layer 1 */
- if ((*p & 0x60) == 0x20) {
- ch = ' ';
- do {
- dp += sprintf(dp, " octet 5%c ", ch);
- dp += prbits(dp, *p, 8, 8);
- *dp++ = '\n';
- if (ch == ' ')
- ch = 'a';
- else
- ch++;
- }
- while (!(*p++ & 0x80));
- }
- /* check for user information layer 2 */
- if ((*p & 0x60) == 0x40) {
- dp += sprintf(dp, " octet 6 ");
- dp += prbits(dp, *p++, 8, 8);
- *dp++ = '\n';
- }
- /* check for user information layer 3 */
- if ((*p & 0x60) == 0x60) {
- dp += sprintf(dp, " octet 7 ");
- dp += prbits(dp, *p++, 8, 8);
- *dp++ = '\n';
- }
- return (dp - dest);
-}
-
-
-static
-int
-prbearer_ni1(char *dest, u_char *p)
-{
- char *dp = dest;
- u_char len;
-
- p++;
- len = *p++;
- dp += sprintf(dp, " octet 3 ");
- dp += prbits(dp, *p, 8, 8);
- switch (*p++) {
- case 0x80:
- dp += sprintf(dp, " Speech");
- break;
- case 0x88:
- dp += sprintf(dp, " Unrestricted digital information");
- break;
- case 0x90:
- dp += sprintf(dp, " 3.1 kHz audio");
- break;
- default:
- dp += sprintf(dp, " Unknown information-transfer capability");
- }
- *dp++ = '\n';
- dp += sprintf(dp, " octet 4 ");
- dp += prbits(dp, *p, 8, 8);
- switch (*p++) {
- case 0x90:
- dp += sprintf(dp, " 64 kbps, circuit mode");
- break;
- case 0xc0:
- dp += sprintf(dp, " Packet mode");
- break;
- default:
- dp += sprintf(dp, " Unknown transfer mode");
- }
- *dp++ = '\n';
- if (len > 2) {
- dp += sprintf(dp, " octet 5 ");
- dp += prbits(dp, *p, 8, 8);
- switch (*p++) {
- case 0x21:
- dp += sprintf(dp, " Rate adaption\n");
- dp += sprintf(dp, " octet 5a ");
- dp += prbits(dp, *p, 8, 8);
- break;
- case 0xa2:
- dp += sprintf(dp, " u-law");
- break;
- default:
- dp += sprintf(dp, " Unknown UI layer 1 protocol");
- }
- *dp++ = '\n';
- }
- return (dp - dest);
-}
-
-static int
-general(char *dest, u_char *p)
-{
- char *dp = dest;
- char ch = ' ';
- int l, octet = 3;
-
- p++;
- l = *p++;
- /* Iterate over all octets in the information element */
- while (l--) {
- dp += sprintf(dp, " octet %d%c ", octet, ch);
- dp += prbits(dp, *p++, 8, 8);
- *dp++ = '\n';
-
- /* last octet in group? */
- if (*p & 0x80) {
- octet++;
- ch = ' ';
- } else if (ch == ' ')
- ch = 'a';
- else
- ch++;
- }
- return (dp - dest);
-}
-
-static int
-general_ni1(char *dest, u_char *p)
-{
- char *dp = dest;
- char ch = ' ';
- int l, octet = 3;
-
- p++;
- l = *p++;
- /* Iterate over all octets in the information element */
- while (l--) {
- dp += sprintf(dp, " octet %d%c ", octet, ch);
- dp += prbits(dp, *p, 8, 8);
- *dp++ = '\n';
-
- /* last octet in group? */
- if (*p++ & 0x80) {
- octet++;
- ch = ' ';
- } else if (ch == ' ')
- ch = 'a';
- else
- ch++;
- }
- return (dp - dest);
-}
-
-static int
-prcharge(char *dest, u_char *p)
-{
- char *dp = dest;
- int l;
-
- p++;
- l = *p++ - 1;
- dp += sprintf(dp, " GEA ");
- dp += prbits(dp, *p++, 8, 8);
- dp += sprintf(dp, " Anzahl: ");
- /* Iterate over all octets in the * information element */
- while (l--)
- *dp++ = *p++;
- *dp++ = '\n';
- return (dp - dest);
-}
-static int
-prtext(char *dest, u_char *p)
-{
- char *dp = dest;
- int l;
-
- p++;
- l = *p++;
- dp += sprintf(dp, " ");
- /* Iterate over all octets in the * information element */
- while (l--)
- *dp++ = *p++;
- *dp++ = '\n';
- return (dp - dest);
-}
-
-static int
-prfeatureind(char *dest, u_char *p)
-{
- char *dp = dest;
-
- p += 2; /* skip id, len */
- dp += sprintf(dp, " octet 3 ");
- dp += prbits(dp, *p, 8, 8);
- *dp++ = '\n';
- if (!(*p++ & 0x80)) {
- dp += sprintf(dp, " octet 4 ");
- dp += prbits(dp, *p++, 8, 8);
- *dp++ = '\n';
- }
- dp += sprintf(dp, " Status: ");
- switch (*p) {
- case 0:
- dp += sprintf(dp, "Idle");
- break;
- case 1:
- dp += sprintf(dp, "Active");
- break;
- case 2:
- dp += sprintf(dp, "Prompt");
- break;
- case 3:
- dp += sprintf(dp, "Pending");
- break;
- default:
- dp += sprintf(dp, "(Reserved)");
- break;
- }
- *dp++ = '\n';
- return (dp - dest);
-}
-
-static
-struct DTag { /* Display tags */
- u_char nr;
- char *descr;
-} dtaglist[] = {
- { 0x82, "Continuation" },
- { 0x83, "Called address" },
- { 0x84, "Cause" },
- { 0x85, "Progress indicator" },
- { 0x86, "Notification indicator" },
- { 0x87, "Prompt" },
- { 0x88, "Accumlated digits" },
- { 0x89, "Status" },
- { 0x8a, "Inband" },
- { 0x8b, "Calling address" },
- { 0x8c, "Reason" },
- { 0x8d, "Calling party name" },
- { 0x8e, "Called party name" },
- { 0x8f, "Original called name" },
- { 0x90, "Redirecting name" },
- { 0x91, "Connected name" },
- { 0x92, "Originating restrictions" },
- { 0x93, "Date & time of day" },
- { 0x94, "Call Appearance ID" },
- { 0x95, "Feature address" },
- { 0x96, "Redirection name" },
- { 0x9e, "Text" },
-};
-#define DTAGSIZE ARRAY_SIZE(dtaglist)
-
-static int
-disptext_ni1(char *dest, u_char *p)
-{
- char *dp = dest;
- int l, tag, len, i;
-
- p++;
- l = *p++ - 1;
- if (*p++ != 0x80) {
- dp += sprintf(dp, " Unknown display type\n");
- return (dp - dest);
- }
- /* Iterate over all tag,length,text fields */
- while (l > 0) {
- tag = *p++;
- len = *p++;
- l -= len + 2;
- /* Don't space or skip */
- if ((tag == 0x80) || (tag == 0x81)) p++;
- else {
- for (i = 0; i < DTAGSIZE; i++)
- if (tag == dtaglist[i].nr)
- break;
-
- /* When not found, give appropriate msg */
- if (i != DTAGSIZE) {
- dp += sprintf(dp, " %s: ", dtaglist[i].descr);
- while (len--)
- *dp++ = *p++;
- } else {
- dp += sprintf(dp, " (unknown display tag %2x): ", tag);
- while (len--)
- *dp++ = *p++;
- }
- dp += sprintf(dp, "\n");
- }
- }
- return (dp - dest);
-}
-static int
-display(char *dest, u_char *p)
-{
- char *dp = dest;
- char ch = ' ';
- int l, octet = 3;
-
- p++;
- l = *p++;
- /* Iterate over all octets in the * display-information element */
- dp += sprintf(dp, " \"");
- while (l--) {
- dp += sprintf(dp, "%c", *p++);
-
- /* last octet in group? */
- if (*p & 0x80) {
- octet++;
- ch = ' ';
- } else if (ch == ' ')
- ch = 'a';
-
- else
- ch++;
- }
- *dp++ = '\"';
- *dp++ = '\n';
- return (dp - dest);
-}
-
-static int
-prfacility(char *dest, u_char *p)
-{
- char *dp = dest;
- int l, l2;
-
- p++;
- l = *p++;
- dp += sprintf(dp, " octet 3 ");
- dp += prbits(dp, *p++, 8, 8);
- dp += sprintf(dp, "\n");
- l -= 1;
-
- while (l > 0) {
- dp += sprintf(dp, " octet 4 ");
- dp += prbits(dp, *p++, 8, 8);
- dp += sprintf(dp, "\n");
- dp += sprintf(dp, " octet 5 %d\n", l2 = *p++ & 0x7f);
- l -= 2;
- dp += sprintf(dp, " contents ");
- while (l2--) {
- dp += sprintf(dp, "%2x ", *p++);
- l--;
- }
- dp += sprintf(dp, "\n");
- }
-
- return (dp - dest);
-}
-
-static
-struct InformationElement {
- u_char nr;
- char *descr;
- int (*f) (char *, u_char *);
-} ielist[] = {
-
- {
- 0x00, "Segmented message", general
- },
- {
- 0x04, "Bearer capability", prbearer
- },
- {
- 0x08, "Cause", prcause
- },
- {
- 0x10, "Call identity", general
- },
- {
- 0x14, "Call state", general
- },
- {
- 0x18, "Channel identification", prchident
- },
- {
- 0x1c, "Facility", prfacility
- },
- {
- 0x1e, "Progress indicator", general
- },
- {
- 0x20, "Network-specific facilities", general
- },
- {
- 0x27, "Notification indicator", general
- },
- {
- 0x28, "Display", display
- },
- {
- 0x29, "Date/Time", general
- },
- {
- 0x2c, "Keypad facility", general
- },
- {
- 0x34, "Signal", general
- },
- {
- 0x40, "Information rate", general
- },
- {
- 0x42, "End-to-end delay", general
- },
- {
- 0x43, "Transit delay selection and indication", general
- },
- {
- 0x44, "Packet layer binary parameters", general
- },
- {
- 0x45, "Packet layer window size", general
- },
- {
- 0x46, "Packet size", general
- },
- {
- 0x47, "Closed user group", general
- },
- {
- 0x4a, "Reverse charge indication", general
- },
- {
- 0x6c, "Calling party number", prcalling
- },
- {
- 0x6d, "Calling party subaddress", general
- },
- {
- 0x70, "Called party number", prcalled
- },
- {
- 0x71, "Called party subaddress", general
- },
- {
- 0x74, "Redirecting number", general
- },
- {
- 0x78, "Transit network selection", general
- },
- {
- 0x79, "Restart indicator", general
- },
- {
- 0x7c, "Low layer compatibility", general
- },
- {
- 0x7d, "High layer compatibility", general
- },
- {
- 0x7e, "User-user", general
- },
- {
- 0x7f, "Escape for extension", general
- },
-};
-
-
-#define IESIZE ARRAY_SIZE(ielist)
-
-static
-struct InformationElement ielist_ni1[] = {
- { 0x04, "Bearer Capability", prbearer_ni1 },
- { 0x08, "Cause", prcause },
- { 0x14, "Call State", general_ni1 },
- { 0x18, "Channel Identification", prchident },
- { 0x1e, "Progress Indicator", general_ni1 },
- { 0x27, "Notification Indicator", general_ni1 },
- { 0x2c, "Keypad Facility", prtext },
- { 0x32, "Information Request", general_ni1 },
- { 0x34, "Signal", general_ni1 },
- { 0x38, "Feature Activation", general_ni1 },
- { 0x39, "Feature Indication", prfeatureind },
- { 0x3a, "Service Profile Identification (SPID)", prtext },
- { 0x3b, "Endpoint Identifier", general_ni1 },
- { 0x6c, "Calling Party Number", prcalling },
- { 0x6d, "Calling Party Subaddress", general_ni1 },
- { 0x70, "Called Party Number", prcalled },
- { 0x71, "Called Party Subaddress", general_ni1 },
- { 0x74, "Redirecting Number", general_ni1 },
- { 0x78, "Transit Network Selection", general_ni1 },
- { 0x7c, "Low Layer Compatibility", general_ni1 },
- { 0x7d, "High Layer Compatibility", general_ni1 },
-};
-
-
-#define IESIZE_NI1 ARRAY_SIZE(ielist_ni1)
-
-static
-struct InformationElement ielist_ni1_cs5[] = {
- { 0x1d, "Operator system access", general_ni1 },
- { 0x2a, "Display text", disptext_ni1 },
-};
-
-#define IESIZE_NI1_CS5 ARRAY_SIZE(ielist_ni1_cs5)
-
-static
-struct InformationElement ielist_ni1_cs6[] = {
- { 0x7b, "Call appearance", general_ni1 },
-};
-
-#define IESIZE_NI1_CS6 ARRAY_SIZE(ielist_ni1_cs6)
-
-static struct InformationElement we_0[] =
-{
- {WE0_cause, "Cause", prcause_1tr6},
- {WE0_connAddr, "Connecting Address", prcalled},
- {WE0_callID, "Call IDentity", general},
- {WE0_chanID, "Channel IDentity", general},
- {WE0_netSpecFac, "Network Specific Facility", general},
- {WE0_display, "Display", general},
- {WE0_keypad, "Keypad", general},
- {WE0_origAddr, "Origination Address", prcalled},
- {WE0_destAddr, "Destination Address", prcalled},
- {WE0_userInfo, "User Info", general}
-};
-
-#define WE_0_LEN ARRAY_SIZE(we_0)
-
-static struct InformationElement we_6[] =
-{
- {WE6_serviceInd, "Service Indicator", general},
- {WE6_chargingInfo, "Charging Information", prcharge},
- {WE6_date, "Date", prtext},
- {WE6_facSelect, "Facility Select", general},
- {WE6_facStatus, "Facility Status", general},
- {WE6_statusCalled, "Status Called", general},
- {WE6_addTransAttr, "Additional Transmission Attributes", general}
-};
-#define WE_6_LEN ARRAY_SIZE(we_6)
-
-int
-QuickHex(char *txt, u_char *p, int cnt)
-{
- register int i;
- register char *t = txt;
-
- for (i = 0; i < cnt; i++) {
- *t++ = ' ';
- *t++ = hex_asc_hi(p[i]);
- *t++ = hex_asc_lo(p[i]);
- }
- *t++ = 0;
- return (t - txt);
-}
-
-void
-LogFrame(struct IsdnCardState *cs, u_char *buf, int size)
-{
- char *dp;
-
- if (size < 1)
- return;
- dp = cs->dlog;
- if (size < MAX_DLOG_SPACE / 3 - 10) {
- *dp++ = 'H';
- *dp++ = 'E';
- *dp++ = 'X';
- *dp++ = ':';
- dp += QuickHex(dp, buf, size);
- dp--;
- *dp++ = '\n';
- *dp = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
- } else
- HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
-}
-
-void
-dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
-{
- u_char *bend, *buf;
- char *dp;
- unsigned char pd, cr_l, cr, mt;
- unsigned char sapi, tei, ftyp;
- int i, cset = 0, cs_old = 0, cs_fest = 0;
- int size, finish = 0;
-
- if (skb->len < 3)
- return;
- /* display header */
- dp = cs->dlog;
- dp += jiftime(dp, jiffies);
- *dp++ = ' ';
- sapi = skb->data[0] >> 2;
- tei = skb->data[1] >> 1;
- ftyp = skb->data[2];
- buf = skb->data;
- dp += sprintf(dp, "frame %s ", dir ? "network->user" : "user->network");
- size = skb->len;
-
- if (tei == GROUP_TEI) {
- if (sapi == CTRL_SAPI) { /* sapi 0 */
- if (ftyp == 3) {
- dp += sprintf(dp, "broadcast\n");
- buf += 3;
- size -= 3;
- } else {
- dp += sprintf(dp, "no UI broadcast\n");
- finish = 1;
- }
- } else if (sapi == TEI_SAPI) {
- dp += sprintf(dp, "tei management\n");
- finish = 1;
- } else {
- dp += sprintf(dp, "unknown sapi %d broadcast\n", sapi);
- finish = 1;
- }
- } else {
- if (sapi == CTRL_SAPI) {
- if (!(ftyp & 1)) { /* IFrame */
- dp += sprintf(dp, "with tei %d\n", tei);
- buf += 4;
- size -= 4;
- } else {
- dp += sprintf(dp, "SFrame with tei %d\n", tei);
- finish = 1;
- }
- } else {
- dp += sprintf(dp, "unknown sapi %d tei %d\n", sapi, tei);
- finish = 1;
- }
- }
- bend = skb->data + skb->len;
- if (buf >= bend) {
- dp += sprintf(dp, "frame too short\n");
- finish = 1;
- }
- if (finish) {
- *dp = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
- return;
- }
- if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */
- /* locate message type */
- pd = *buf++;
- cr_l = *buf++;
- if (cr_l)
- cr = *buf++;
- else
- cr = 0;
- mt = *buf++;
- if (pd == PROTO_DIS_N0) { /* N0 */
- for (i = 0; i < MT_N0_LEN; i++)
- if (mt_n0[i].nr == mt)
- break;
- /* display message type if it exists */
- if (i == MT_N0_LEN)
- dp += sprintf(dp, "callref %d %s size %d unknown message type N0 %x!\n",
- cr & 0x7f, (cr & 0x80) ? "called" : "caller",
- size, mt);
- else
- dp += sprintf(dp, "callref %d %s size %d message type %s\n",
- cr & 0x7f, (cr & 0x80) ? "called" : "caller",
- size, mt_n0[i].descr);
- } else { /* N1 */
- for (i = 0; i < MT_N1_LEN; i++)
- if (mt_n1[i].nr == mt)
- break;
- /* display message type if it exists */
- if (i == MT_N1_LEN)
- dp += sprintf(dp, "callref %d %s size %d unknown message type N1 %x!\n",
- cr & 0x7f, (cr & 0x80) ? "called" : "caller",
- size, mt);
- else
- dp += sprintf(dp, "callref %d %s size %d message type %s\n",
- cr & 0x7f, (cr & 0x80) ? "called" : "caller",
- size, mt_n1[i].descr);
- }
-
- /* display each information element */
- while (buf < bend) {
- /* Is it a single octet information element? */
- if (*buf & 0x80) {
- switch ((*buf >> 4) & 7) {
- case 1:
- dp += sprintf(dp, " Shift %x\n", *buf & 0xf);
- cs_old = cset;
- cset = *buf & 7;
- cs_fest = *buf & 8;
- break;
- case 3:
- dp += sprintf(dp, " Congestion level %x\n", *buf & 0xf);
- break;
- case 2:
- if (*buf == 0xa0) {
- dp += sprintf(dp, " More data\n");
- break;
- }
- if (*buf == 0xa1) {
- dp += sprintf(dp, " Sending complete\n");
- }
- break;
- /* fall through */
- default:
- dp += sprintf(dp, " Reserved %x\n", *buf);
- break;
- }
- buf++;
- continue;
- }
- /* No, locate it in the table */
- if (cset == 0) {
- for (i = 0; i < WE_0_LEN; i++)
- if (*buf == we_0[i].nr)
- break;
-
- /* When found, give appropriate msg */
- if (i != WE_0_LEN) {
- dp += sprintf(dp, " %s\n", we_0[i].descr);
- dp += we_0[i].f(dp, buf);
- } else
- dp += sprintf(dp, " Codeset %d attribute %x attribute size %d\n", cset, *buf, buf[1]);
- } else if (cset == 6) {
- for (i = 0; i < WE_6_LEN; i++)
- if (*buf == we_6[i].nr)
- break;
-
- /* When found, give appropriate msg */
- if (i != WE_6_LEN) {
- dp += sprintf(dp, " %s\n", we_6[i].descr);
- dp += we_6[i].f(dp, buf);
- } else
- dp += sprintf(dp, " Codeset %d attribute %x attribute size %d\n", cset, *buf, buf[1]);
- } else
- dp += sprintf(dp, " Unknown Codeset %d attribute %x attribute size %d\n", cset, *buf, buf[1]);
- /* Skip to next element */
- if (cs_fest == 8) {
- cset = cs_old;
- cs_old = 0;
- cs_fest = 0;
- }
- buf += buf[1] + 2;
- }
- } else if ((buf[0] == 8) && (cs->protocol == ISDN_PTYPE_NI1)) { /* NI-1 */
- /* locate message type */
- buf++;
- cr_l = *buf++;
- if (cr_l)
- cr = *buf++;
- else
- cr = 0;
- mt = *buf++;
- for (i = 0; i < MTSIZE; i++)
- if (mtlist[i].nr == mt)
- break;
-
- /* display message type if it exists */
- if (i == MTSIZE)
- dp += sprintf(dp, "callref %d %s size %d unknown message type %x!\n",
- cr & 0x7f, (cr & 0x80) ? "called" : "caller",
- size, mt);
- else
- dp += sprintf(dp, "callref %d %s size %d message type %s\n",
- cr & 0x7f, (cr & 0x80) ? "called" : "caller",
- size, mtlist[i].descr);
-
- /* display each information element */
- while (buf < bend) {
- /* Is it a single octet information element? */
- if (*buf & 0x80) {
- switch ((*buf >> 4) & 7) {
- case 1:
- dp += sprintf(dp, " Shift %x\n", *buf & 0xf);
- cs_old = cset;
- cset = *buf & 7;
- cs_fest = *buf & 8;
- break;
- default:
- dp += sprintf(dp, " Unknown single-octet IE %x\n", *buf);
- break;
- }
- buf++;
- continue;
- }
- /* No, locate it in the table */
- if (cset == 0) {
- for (i = 0; i < IESIZE_NI1; i++)
- if (*buf == ielist_ni1[i].nr)
- break;
-
- /* When not found, give appropriate msg */
- if (i != IESIZE_NI1) {
- dp += sprintf(dp, " %s\n", ielist_ni1[i].descr);
- dp += ielist_ni1[i].f(dp, buf);
- } else
- dp += sprintf(dp, " attribute %x attribute size %d\n", *buf, buf[1]);
- } else if (cset == 5) {
- for (i = 0; i < IESIZE_NI1_CS5; i++)
- if (*buf == ielist_ni1_cs5[i].nr)
- break;
-
- /* When not found, give appropriate msg */
- if (i != IESIZE_NI1_CS5) {
- dp += sprintf(dp, " %s\n", ielist_ni1_cs5[i].descr);
- dp += ielist_ni1_cs5[i].f(dp, buf);
- } else
- dp += sprintf(dp, " attribute %x attribute size %d\n", *buf, buf[1]);
- } else if (cset == 6) {
- for (i = 0; i < IESIZE_NI1_CS6; i++)
- if (*buf == ielist_ni1_cs6[i].nr)
- break;
-
- /* When not found, give appropriate msg */
- if (i != IESIZE_NI1_CS6) {
- dp += sprintf(dp, " %s\n", ielist_ni1_cs6[i].descr);
- dp += ielist_ni1_cs6[i].f(dp, buf);
- } else
- dp += sprintf(dp, " attribute %x attribute size %d\n", *buf, buf[1]);
- } else
- dp += sprintf(dp, " Unknown Codeset %d attribute %x attribute size %d\n", cset, *buf, buf[1]);
-
- /* Skip to next element */
- if (cs_fest == 8) {
- cset = cs_old;
- cs_old = 0;
- cs_fest = 0;
- }
- buf += buf[1] + 2;
- }
- } else if ((buf[0] == 8) && (cs->protocol == ISDN_PTYPE_EURO)) { /* EURO */
- /* locate message type */
- buf++;
- cr_l = *buf++;
- if (cr_l)
- cr = *buf++;
- else
- cr = 0;
- mt = *buf++;
- for (i = 0; i < MTSIZE; i++)
- if (mtlist[i].nr == mt)
- break;
-
- /* display message type if it exists */
- if (i == MTSIZE)
- dp += sprintf(dp, "callref %d %s size %d unknown message type %x!\n",
- cr & 0x7f, (cr & 0x80) ? "called" : "caller",
- size, mt);
- else
- dp += sprintf(dp, "callref %d %s size %d message type %s\n",
- cr & 0x7f, (cr & 0x80) ? "called" : "caller",
- size, mtlist[i].descr);
-
- /* display each information element */
- while (buf < bend) {
- /* Is it a single octet information element? */
- if (*buf & 0x80) {
- switch ((*buf >> 4) & 7) {
- case 1:
- dp += sprintf(dp, " Shift %x\n", *buf & 0xf);
- break;
- case 3:
- dp += sprintf(dp, " Congestion level %x\n", *buf & 0xf);
- break;
- case 5:
- dp += sprintf(dp, " Repeat indicator %x\n", *buf & 0xf);
- break;
- case 2:
- if (*buf == 0xa0) {
- dp += sprintf(dp, " More data\n");
- break;
- }
- if (*buf == 0xa1) {
- dp += sprintf(dp, " Sending complete\n");
- }
- break;
- /* fall through */
- default:
- dp += sprintf(dp, " Reserved %x\n", *buf);
- break;
- }
- buf++;
- continue;
- }
- /* No, locate it in the table */
- for (i = 0; i < IESIZE; i++)
- if (*buf == ielist[i].nr)
- break;
-
- /* When not found, give appropriate msg */
- if (i != IESIZE) {
- dp += sprintf(dp, " %s\n", ielist[i].descr);
- dp += ielist[i].f(dp, buf);
- } else
- dp += sprintf(dp, " attribute %x attribute size %d\n", *buf, buf[1]);
-
- /* Skip to next element */
- buf += buf[1] + 2;
- }
- } else {
- dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
- }
- *dp = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
-}
diff --git a/drivers/isdn/hisax/s0box.c b/drivers/isdn/hisax/s0box.c
deleted file mode 100644
index 4e7d0aa227ad..000000000000
--- a/drivers/isdn/hisax/s0box.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/* $Id: s0box.c,v 2.6.2.4 2004/01/13 23:48:39 keil Exp $
- *
- * low level stuff for Creatix S0BOX
- *
- * Author Enrik Berkhan
- * Copyright by Enrik Berkhan <enrik@starfleet.inka.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-
-static const char *s0box_revision = "$Revision: 2.6.2.4 $";
-
-static inline void
-writereg(unsigned int padr, signed int addr, u_char off, u_char val) {
- outb_p(0x1c, padr + 2);
- outb_p(0x14, padr + 2);
- outb_p((addr + off) & 0x7f, padr);
- outb_p(0x16, padr + 2);
- outb_p(val, padr);
- outb_p(0x17, padr + 2);
- outb_p(0x14, padr + 2);
- outb_p(0x1c, padr + 2);
-}
-
-static u_char nibtab[] = { 1, 9, 5, 0xd, 3, 0xb, 7, 0xf,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 8, 4, 0xc, 2, 0xa, 6, 0xe };
-
-static inline u_char
-readreg(unsigned int padr, signed int addr, u_char off) {
- register u_char n1, n2;
-
- outb_p(0x1c, padr + 2);
- outb_p(0x14, padr + 2);
- outb_p((addr + off) | 0x80, padr);
- outb_p(0x16, padr + 2);
- outb_p(0x17, padr + 2);
- n1 = (inb_p(padr + 1) >> 3) & 0x17;
- outb_p(0x16, padr + 2);
- n2 = (inb_p(padr + 1) >> 3) & 0x17;
- outb_p(0x14, padr + 2);
- outb_p(0x1c, padr + 2);
- return nibtab[n1] | (nibtab[n2] << 4);
-}
-
-static inline void
-read_fifo(unsigned int padr, signed int adr, u_char *data, int size)
-{
- int i;
- register u_char n1, n2;
-
- outb_p(0x1c, padr + 2);
- outb_p(0x14, padr + 2);
- outb_p(adr | 0x80, padr);
- outb_p(0x16, padr + 2);
- for (i = 0; i < size; i++) {
- outb_p(0x17, padr + 2);
- n1 = (inb_p(padr + 1) >> 3) & 0x17;
- outb_p(0x16, padr + 2);
- n2 = (inb_p(padr + 1) >> 3) & 0x17;
- *(data++) = nibtab[n1] | (nibtab[n2] << 4);
- }
- outb_p(0x14, padr + 2);
- outb_p(0x1c, padr + 2);
- return;
-}
-
-static inline void
-write_fifo(unsigned int padr, signed int adr, u_char *data, int size)
-{
- int i;
- outb_p(0x1c, padr + 2);
- outb_p(0x14, padr + 2);
- outb_p(adr & 0x7f, padr);
- for (i = 0; i < size; i++) {
- outb_p(0x16, padr + 2);
- outb_p(*(data++), padr);
- outb_p(0x17, padr + 2);
- }
- outb_p(0x14, padr + 2);
- outb_p(0x1c, padr + 2);
- return;
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.teles3.cfg_reg, cs->hw.teles3.isac, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.teles3.cfg_reg, cs->hw.teles3.isac, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- read_fifo(cs->hw.teles3.cfg_reg, cs->hw.teles3.isacfifo, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- write_fifo(cs->hw.teles3.cfg_reg, cs->hw.teles3.isacfifo, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readreg(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscx[hscx], offset));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writereg(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscx[hscx], offset, value);
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscx[nr], reg)
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscx[nr], reg, data)
-#define READHSCXFIFO(cs, nr, ptr, cnt) read_fifo(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscxfifo[nr], ptr, cnt)
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) write_fifo(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscxfifo[nr], ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-s0box_interrupt(int intno, void *dev_id)
-{
-#define MAXCOUNT 5
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_long flags;
- int count = 0;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = readreg(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscx[1], HSCX_ISTA);
-Start_HSCX:
- if (val)
- hscx_int_main(cs, val);
- val = readreg(cs->hw.teles3.cfg_reg, cs->hw.teles3.isac, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- count++;
- val = readreg(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscx[1], HSCX_ISTA);
- if (val && count < MAXCOUNT) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX IntStat after IntRoutine");
- goto Start_HSCX;
- }
- val = readreg(cs->hw.teles3.cfg_reg, cs->hw.teles3.isac, ISAC_ISTA);
- if (val && count < MAXCOUNT) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- if (count >= MAXCOUNT)
- printk(KERN_WARNING "S0Box: more than %d loops in s0box_interrupt\n", count);
- writereg(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscx[0], HSCX_MASK, 0xFF);
- writereg(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscx[1], HSCX_MASK, 0xFF);
- writereg(cs->hw.teles3.cfg_reg, cs->hw.teles3.isac, ISAC_MASK, 0xFF);
- writereg(cs->hw.teles3.cfg_reg, cs->hw.teles3.isac, ISAC_MASK, 0x0);
- writereg(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscx[0], HSCX_MASK, 0x0);
- writereg(cs->hw.teles3.cfg_reg, cs->hw.teles3.hscx[1], HSCX_MASK, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_s0box(struct IsdnCardState *cs)
-{
- release_region(cs->hw.teles3.cfg_reg, 8);
-}
-
-static int
-S0Box_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- break;
- case CARD_RELEASE:
- release_io_s0box(cs);
- break;
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- inithscxisac(cs, 3);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case CARD_TEST:
- break;
- }
- return (0);
-}
-
-int setup_s0box(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, s0box_revision);
- printk(KERN_INFO "HiSax: S0Box IO driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_S0BOX)
- return (0);
-
- cs->hw.teles3.cfg_reg = card->para[1];
- cs->hw.teles3.hscx[0] = -0x20;
- cs->hw.teles3.hscx[1] = 0x0;
- cs->hw.teles3.isac = 0x20;
- cs->hw.teles3.isacfifo = cs->hw.teles3.isac + 0x3e;
- cs->hw.teles3.hscxfifo[0] = cs->hw.teles3.hscx[0] + 0x3e;
- cs->hw.teles3.hscxfifo[1] = cs->hw.teles3.hscx[1] + 0x3e;
- cs->irq = card->para[0];
- if (!request_region(cs->hw.teles3.cfg_reg, 8, "S0Box parallel I/O")) {
- printk(KERN_WARNING "HiSax: S0Box ports %x-%x already in use\n",
- cs->hw.teles3.cfg_reg,
- cs->hw.teles3.cfg_reg + 7);
- return 0;
- }
- printk(KERN_INFO "HiSax: S0Box config irq:%d isac:0x%x cfg:0x%x\n",
- cs->irq,
- cs->hw.teles3.isac, cs->hw.teles3.cfg_reg);
- printk(KERN_INFO "HiSax: hscx A:0x%x hscx B:0x%x\n",
- cs->hw.teles3.hscx[0], cs->hw.teles3.hscx[1]);
- setup_isac(cs);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &S0Box_card_msg;
- cs->irq_func = &s0box_interrupt;
- ISACVersion(cs, "S0Box:");
- if (HscxVersion(cs, "S0Box:")) {
- printk(KERN_WARNING
- "S0Box: wrong HSCX versions check IO address\n");
- release_io_s0box(cs);
- return (0);
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/saphir.c b/drivers/isdn/hisax/saphir.c
deleted file mode 100644
index db906cb37a3f..000000000000
--- a/drivers/isdn/hisax/saphir.c
+++ /dev/null
@@ -1,296 +0,0 @@
-/* $Id: saphir.c,v 1.10.2.4 2004/01/13 23:48:39 keil Exp $
- *
- * low level stuff for HST Saphir 1
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to HST High Soft Tech GmbH
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-
-static char *saphir_rev = "$Revision: 1.10.2.4 $";
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-#define ISAC_DATA 0
-#define HSCX_DATA 1
-#define ADDRESS_REG 2
-#define IRQ_REG 3
-#define SPARE_REG 4
-#define RESET_REG 5
-
-static inline u_char
-readreg(unsigned int ale, unsigned int adr, u_char off)
-{
- register u_char ret;
-
- byteout(ale, off);
- ret = bytein(adr);
- return (ret);
-}
-
-static inline void
-readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- insb(adr, data, size);
-}
-
-
-static inline void
-writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
-{
- byteout(ale, off);
- byteout(adr, data);
-}
-
-static inline void
-writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- outsb(adr, data, size);
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.saphir.ale, cs->hw.saphir.isac, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.saphir.ale, cs->hw.saphir.isac, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.saphir.ale, cs->hw.saphir.isac, 0, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.saphir.ale, cs->hw.saphir.isac, 0, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readreg(cs->hw.saphir.ale, cs->hw.saphir.hscx,
- offset + (hscx ? 0x40 : 0)));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writereg(cs->hw.saphir.ale, cs->hw.saphir.hscx,
- offset + (hscx ? 0x40 : 0), value);
-}
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.saphir.ale, \
- cs->hw.saphir.hscx, reg + (nr ? 0x40 : 0))
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.saphir.ale, \
- cs->hw.saphir.hscx, reg + (nr ? 0x40 : 0), data)
-
-#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.saphir.ale, \
- cs->hw.saphir.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.saphir.ale, \
- cs->hw.saphir.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-saphir_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = readreg(cs->hw.saphir.ale, cs->hw.saphir.hscx, HSCX_ISTA + 0x40);
-Start_HSCX:
- if (val)
- hscx_int_main(cs, val);
- val = readreg(cs->hw.saphir.ale, cs->hw.saphir.isac, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- val = readreg(cs->hw.saphir.ale, cs->hw.saphir.hscx, HSCX_ISTA + 0x40);
- if (val) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX IntStat after IntRoutine");
- goto Start_HSCX;
- }
- val = readreg(cs->hw.saphir.ale, cs->hw.saphir.isac, ISAC_ISTA);
- if (val) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- /* Watchdog */
- if (cs->hw.saphir.timer.function)
- mod_timer(&cs->hw.saphir.timer, jiffies + 1 * HZ);
- else
- printk(KERN_WARNING "saphir: Spurious timer!\n");
- writereg(cs->hw.saphir.ale, cs->hw.saphir.hscx, HSCX_MASK, 0xFF);
- writereg(cs->hw.saphir.ale, cs->hw.saphir.hscx, HSCX_MASK + 0x40, 0xFF);
- writereg(cs->hw.saphir.ale, cs->hw.saphir.isac, ISAC_MASK, 0xFF);
- writereg(cs->hw.saphir.ale, cs->hw.saphir.isac, ISAC_MASK, 0);
- writereg(cs->hw.saphir.ale, cs->hw.saphir.hscx, HSCX_MASK, 0);
- writereg(cs->hw.saphir.ale, cs->hw.saphir.hscx, HSCX_MASK + 0x40, 0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-SaphirWatchDog(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, hw.saphir.timer);
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- /* 5 sec WatchDog, so read at least every 4 sec */
- cs->readisac(cs, ISAC_RBCH);
- spin_unlock_irqrestore(&cs->lock, flags);
- mod_timer(&cs->hw.saphir.timer, jiffies + 1 * HZ);
-}
-
-static void
-release_io_saphir(struct IsdnCardState *cs)
-{
- byteout(cs->hw.saphir.cfg_reg + IRQ_REG, 0xff);
- del_timer(&cs->hw.saphir.timer);
- cs->hw.saphir.timer.function = NULL;
- if (cs->hw.saphir.cfg_reg)
- release_region(cs->hw.saphir.cfg_reg, 6);
-}
-
-static int
-saphir_reset(struct IsdnCardState *cs)
-{
- u_char irq_val;
-
- switch (cs->irq) {
- case 5: irq_val = 0;
- break;
- case 3: irq_val = 1;
- break;
- case 11:
- irq_val = 2;
- break;
- case 12:
- irq_val = 3;
- break;
- case 15:
- irq_val = 4;
- break;
- default:
- printk(KERN_WARNING "HiSax: saphir wrong IRQ %d\n",
- cs->irq);
- return (1);
- }
- byteout(cs->hw.saphir.cfg_reg + IRQ_REG, irq_val);
- byteout(cs->hw.saphir.cfg_reg + RESET_REG, 1);
- mdelay(10);
- byteout(cs->hw.saphir.cfg_reg + RESET_REG, 0);
- mdelay(10);
- byteout(cs->hw.saphir.cfg_reg + IRQ_REG, irq_val);
- byteout(cs->hw.saphir.cfg_reg + SPARE_REG, 0x02);
- return (0);
-}
-
-static int
-saphir_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- saphir_reset(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_saphir(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- inithscxisac(cs, 3);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-
-int setup_saphir(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, saphir_rev);
- printk(KERN_INFO "HiSax: HST Saphir driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_HSTSAPHIR)
- return (0);
-
- /* IO-Ports */
- cs->hw.saphir.cfg_reg = card->para[1];
- cs->hw.saphir.isac = card->para[1] + ISAC_DATA;
- cs->hw.saphir.hscx = card->para[1] + HSCX_DATA;
- cs->hw.saphir.ale = card->para[1] + ADDRESS_REG;
- cs->irq = card->para[0];
- if (!request_region(cs->hw.saphir.cfg_reg, 6, "saphir")) {
- printk(KERN_WARNING
- "HiSax: HST Saphir config port %x-%x already in use\n",
- cs->hw.saphir.cfg_reg,
- cs->hw.saphir.cfg_reg + 5);
- return (0);
- }
-
- printk(KERN_INFO "HiSax: HST Saphir config irq:%d io:0x%X\n",
- cs->irq, cs->hw.saphir.cfg_reg);
-
- setup_isac(cs);
- timer_setup(&cs->hw.saphir.timer, SaphirWatchDog, 0);
- cs->hw.saphir.timer.expires = jiffies + 4 * HZ;
- add_timer(&cs->hw.saphir.timer);
- if (saphir_reset(cs)) {
- release_io_saphir(cs);
- return (0);
- }
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &saphir_card_msg;
- cs->irq_func = &saphir_interrupt;
- ISACVersion(cs, "saphir:");
- if (HscxVersion(cs, "saphir:")) {
- printk(KERN_WARNING
- "saphir: wrong HSCX versions check IO address\n");
- release_io_saphir(cs);
- return (0);
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
deleted file mode 100644
index c0b97b893495..000000000000
--- a/drivers/isdn/hisax/sedlbauer.c
+++ /dev/null
@@ -1,873 +0,0 @@
-/* $Id: sedlbauer.c,v 1.34.2.6 2004/01/24 20:47:24 keil Exp $
- *
- * low level stuff for Sedlbauer cards
- * includes support for the Sedlbauer speed star (speed star II),
- * support for the Sedlbauer speed fax+,
- * support for the Sedlbauer ISDN-Controller PC/104 and
- * support for the Sedlbauer speed pci
- * derived from the original file asuscom.c from Karsten Keil
- *
- * Author Marcus Niemann
- * Copyright by Marcus Niemann <niemann@www-bib.fh-bielefeld.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to Karsten Keil
- * Sedlbauer AG for informations
- * Edgar Toernig
- *
- */
-
-/* Supported cards:
- * Card: Chip: Configuration: Comment:
- * ---------------------------------------------------------------------
- * Speed Card ISAC_HSCX DIP-SWITCH
- * Speed Win ISAC_HSCX ISAPNP
- * Speed Fax+ ISAC_ISAR ISAPNP Full analog support
- * Speed Star ISAC_HSCX CARDMGR
- * Speed Win2 IPAC ISAPNP
- * ISDN PC/104 IPAC DIP-SWITCH
- * Speed Star2 IPAC CARDMGR
- * Speed PCI IPAC PCI PNP
- * Speed Fax+ ISAC_ISAR PCI PNP Full analog support
- *
- * Important:
- * For the sedlbauer speed fax+ to work properly you have to download
- * the firmware onto the card.
- * For example: hisaxctrl <DriverID> 9 ISAR.BIN
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "ipac.h"
-#include "hscx.h"
-#include "isar.h"
-#include "isdnl1.h"
-#include <linux/pci.h>
-#include <linux/isapnp.h>
-
-static const char *Sedlbauer_revision = "$Revision: 1.34.2.6 $";
-
-static const char *Sedlbauer_Types[] =
-{"None", "speed card/win", "speed star", "speed fax+",
- "speed win II / ISDN PC/104", "speed star II", "speed pci",
- "speed fax+ pyramid", "speed fax+ pci", "HST Saphir III"};
-
-#define PCI_SUBVENDOR_SPEEDFAX_PYRAMID 0x51
-#define PCI_SUBVENDOR_HST_SAPHIR3 0x52
-#define PCI_SUBVENDOR_SEDLBAUER_PCI 0x53
-#define PCI_SUBVENDOR_SPEEDFAX_PCI 0x54
-#define PCI_SUB_ID_SEDLBAUER 0x01
-
-#define SEDL_SPEED_CARD_WIN 1
-#define SEDL_SPEED_STAR 2
-#define SEDL_SPEED_FAX 3
-#define SEDL_SPEED_WIN2_PC104 4
-#define SEDL_SPEED_STAR2 5
-#define SEDL_SPEED_PCI 6
-#define SEDL_SPEEDFAX_PYRAMID 7
-#define SEDL_SPEEDFAX_PCI 8
-#define HST_SAPHIR3 9
-
-#define SEDL_CHIP_TEST 0
-#define SEDL_CHIP_ISAC_HSCX 1
-#define SEDL_CHIP_ISAC_ISAR 2
-#define SEDL_CHIP_IPAC 3
-
-#define SEDL_BUS_ISA 1
-#define SEDL_BUS_PCI 2
-#define SEDL_BUS_PCMCIA 3
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-#define SEDL_HSCX_ISA_RESET_ON 0
-#define SEDL_HSCX_ISA_RESET_OFF 1
-#define SEDL_HSCX_ISA_ISAC 2
-#define SEDL_HSCX_ISA_HSCX 3
-#define SEDL_HSCX_ISA_ADR 4
-
-#define SEDL_HSCX_PCMCIA_RESET 0
-#define SEDL_HSCX_PCMCIA_ISAC 1
-#define SEDL_HSCX_PCMCIA_HSCX 2
-#define SEDL_HSCX_PCMCIA_ADR 4
-
-#define SEDL_ISAR_ISA_ISAC 4
-#define SEDL_ISAR_ISA_ISAR 6
-#define SEDL_ISAR_ISA_ADR 8
-#define SEDL_ISAR_ISA_ISAR_RESET_ON 10
-#define SEDL_ISAR_ISA_ISAR_RESET_OFF 12
-
-#define SEDL_IPAC_ANY_ADR 0
-#define SEDL_IPAC_ANY_IPAC 2
-
-#define SEDL_IPAC_PCI_BASE 0
-#define SEDL_IPAC_PCI_ADR 0xc0
-#define SEDL_IPAC_PCI_IPAC 0xc8
-#define SEDL_ISAR_PCI_ADR 0xc8
-#define SEDL_ISAR_PCI_ISAC 0xd0
-#define SEDL_ISAR_PCI_ISAR 0xe0
-#define SEDL_ISAR_PCI_ISAR_RESET_ON 0x01
-#define SEDL_ISAR_PCI_ISAR_RESET_OFF 0x18
-#define SEDL_ISAR_PCI_LED1 0x08
-#define SEDL_ISAR_PCI_LED2 0x10
-
-#define SEDL_RESET 0x3 /* same as DOS driver */
-
-static inline u_char
-readreg(unsigned int ale, unsigned int adr, u_char off)
-{
- register u_char ret;
-
- byteout(ale, off);
- ret = bytein(adr);
- return (ret);
-}
-
-static inline void
-readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- insb(adr, data, size);
-}
-
-
-static inline void
-writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
-{
- byteout(ale, off);
- byteout(adr, data);
-}
-
-static inline void
-writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- byteout(ale, off);
- outsb(adr, data, size);
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.sedl.adr, cs->hw.sedl.isac, 0, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.sedl.adr, cs->hw.sedl.isac, 0, data, size);
-}
-
-static u_char
-ReadISAC_IPAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, offset | 0x80));
-}
-
-static void
-WriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, offset | 0x80, value);
-}
-
-static void
-ReadISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
-{
- readfifo(cs->hw.sedl.adr, cs->hw.sedl.isac, 0x80, data, size);
-}
-
-static void
-WriteISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size)
-{
- writefifo(cs->hw.sedl.adr, cs->hw.sedl.isac, 0x80, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readreg(cs->hw.sedl.adr,
- cs->hw.sedl.hscx, offset + (hscx ? 0x40 : 0)));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writereg(cs->hw.sedl.adr,
- cs->hw.sedl.hscx, offset + (hscx ? 0x40 : 0), value);
-}
-
-/* ISAR access routines
- * mode = 0 access with IRQ on
- * mode = 1 access with IRQ off
- * mode = 2 access with IRQ off and using last offset
- */
-
-static u_char
-ReadISAR(struct IsdnCardState *cs, int mode, u_char offset)
-{
- if (mode == 0)
- return (readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, offset));
- else if (mode == 1)
- byteout(cs->hw.sedl.adr, offset);
- return (bytein(cs->hw.sedl.hscx));
-}
-
-static void
-WriteISAR(struct IsdnCardState *cs, int mode, u_char offset, u_char value)
-{
- if (mode == 0)
- writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, offset, value);
- else {
- if (mode == 1)
- byteout(cs->hw.sedl.adr, offset);
- byteout(cs->hw.sedl.hscx, value);
- }
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.sedl.adr, \
- cs->hw.sedl.hscx, reg + (nr ? 0x40 : 0))
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.sedl.adr, \
- cs->hw.sedl.hscx, reg + (nr ? 0x40 : 0), data)
-
-#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.sedl.adr, \
- cs->hw.sedl.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.sedl.adr, \
- cs->hw.sedl.hscx, (nr ? 0x40 : 0), ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-sedlbauer_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- if ((cs->hw.sedl.bus == SEDL_BUS_PCMCIA) && (*cs->busy_flag == 1)) {
- /* The card tends to generate interrupts while being removed
- causing us to just crash the kernel. bad. */
- spin_unlock_irqrestore(&cs->lock, flags);
- printk(KERN_WARNING "Sedlbauer: card not available!\n");
- return IRQ_NONE;
- }
-
- val = readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_ISTA + 0x40);
-Start_HSCX:
- if (val)
- hscx_int_main(cs, val);
- val = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- val = readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_ISTA + 0x40);
- if (val) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX IntStat after IntRoutine");
- goto Start_HSCX;
- }
- val = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_ISTA);
- if (val) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_MASK, 0xFF);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_MASK + 0x40, 0xFF);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_MASK, 0xFF);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_MASK, 0x0);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_MASK, 0x0);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_MASK + 0x40, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-sedlbauer_interrupt_ipac(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char ista, val, icnt = 5;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- ista = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_ISTA);
-Start_IPAC:
- if (cs->debug & L1_DEB_IPAC)
- debugl1(cs, "IPAC ISTA %02X", ista);
- if (ista & 0x0f) {
- val = readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, HSCX_ISTA + 0x40);
- if (ista & 0x01)
- val |= 0x01;
- if (ista & 0x04)
- val |= 0x02;
- if (ista & 0x08)
- val |= 0x04;
- if (val)
- hscx_int_main(cs, val);
- }
- if (ista & 0x20) {
- val = 0xfe & readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_ISTA | 0x80);
- if (val) {
- isac_interrupt(cs, val);
- }
- }
- if (ista & 0x10) {
- val = 0x01;
- isac_interrupt(cs, val);
- }
- ista = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_ISTA);
- if ((ista & 0x3f) && icnt) {
- icnt--;
- goto Start_IPAC;
- }
- if (!icnt)
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Sedlbauer IRQ LOOP");
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_MASK, 0xFF);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_MASK, 0xC0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t
-sedlbauer_interrupt_isar(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- int cnt = 5;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, ISAR_IRQBIT);
-Start_ISAR:
- if (val & ISAR_IRQSTA)
- isar_int_main(cs);
- val = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- val = readreg(cs->hw.sedl.adr, cs->hw.sedl.hscx, ISAR_IRQBIT);
- if ((val & ISAR_IRQSTA) && --cnt) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "ISAR IntStat after IntRoutine");
- goto Start_ISAR;
- }
- val = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_ISTA);
- if (val && --cnt) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- if (!cnt)
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "Sedlbauer IRQ LOOP");
-
- writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, ISAR_IRQBIT, 0);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_MASK, 0xFF);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, ISAC_MASK, 0x0);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, ISAR_IRQBIT, ISAR_IRQMSK);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_sedlbauer(struct IsdnCardState *cs)
-{
- int bytecnt = 8;
-
- if (cs->subtyp == SEDL_SPEED_FAX) {
- bytecnt = 16;
- } else if (cs->hw.sedl.bus == SEDL_BUS_PCI) {
- bytecnt = 256;
- }
- if (cs->hw.sedl.cfg_reg)
- release_region(cs->hw.sedl.cfg_reg, bytecnt);
-}
-
-static void
-reset_sedlbauer(struct IsdnCardState *cs)
-{
- printk(KERN_INFO "Sedlbauer: resetting card\n");
-
- if (!((cs->hw.sedl.bus == SEDL_BUS_PCMCIA) &&
- (cs->hw.sedl.chip == SEDL_CHIP_ISAC_HSCX))) {
- if (cs->hw.sedl.chip == SEDL_CHIP_IPAC) {
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_POTA2, 0x20);
- mdelay(2);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_POTA2, 0x0);
- mdelay(10);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_CONF, 0x0);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_ACFG, 0xff);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_AOE, 0x0);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_MASK, 0xc0);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_PCFG, 0x12);
- } else if ((cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) &&
- (cs->hw.sedl.bus == SEDL_BUS_PCI)) {
- byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_on);
- mdelay(2);
- byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_off);
- mdelay(10);
- } else {
- byteout(cs->hw.sedl.reset_on, SEDL_RESET); /* Reset On */
- mdelay(2);
- byteout(cs->hw.sedl.reset_off, 0); /* Reset Off */
- mdelay(10);
- }
- }
-}
-
-static int
-Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_sedlbauer(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- if (cs->hw.sedl.bus == SEDL_BUS_PCI)
- /* disable all IRQ */
- byteout(cs->hw.sedl.cfg_reg + 5, 0);
- if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
- spin_lock_irqsave(&cs->lock, flags);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx,
- ISAR_IRQBIT, 0);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac,
- ISAC_MASK, 0xFF);
- reset_sedlbauer(cs);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx,
- ISAR_IRQBIT, 0);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.isac,
- ISAC_MASK, 0xFF);
- spin_unlock_irqrestore(&cs->lock, flags);
- }
- release_io_sedlbauer(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->hw.sedl.bus == SEDL_BUS_PCI)
- /* enable all IRQ */
- byteout(cs->hw.sedl.cfg_reg + 5, 0x02);
- reset_sedlbauer(cs);
- if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
- clear_pending_isac_ints(cs);
- writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx,
- ISAR_IRQBIT, 0);
- initisac(cs);
- initisar(cs);
- /* Reenable all IRQ */
- cs->writeisac(cs, ISAC_MASK, 0);
- /* RESET Receiver and Transmitter */
- cs->writeisac(cs, ISAC_CMDR, 0x41);
- } else {
- inithscxisac(cs, 3);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- case MDL_INFO_CONN:
- if (cs->subtyp != SEDL_SPEEDFAX_PYRAMID)
- return (0);
- spin_lock_irqsave(&cs->lock, flags);
- if ((long) arg)
- cs->hw.sedl.reset_off &= ~SEDL_ISAR_PCI_LED2;
- else
- cs->hw.sedl.reset_off &= ~SEDL_ISAR_PCI_LED1;
- byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_off);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case MDL_INFO_REL:
- if (cs->subtyp != SEDL_SPEEDFAX_PYRAMID)
- return (0);
- spin_lock_irqsave(&cs->lock, flags);
- if ((long) arg)
- cs->hw.sedl.reset_off |= SEDL_ISAR_PCI_LED2;
- else
- cs->hw.sedl.reset_off |= SEDL_ISAR_PCI_LED1;
- byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_off);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- }
- return (0);
-}
-
-#ifdef __ISAPNP__
-static struct isapnp_device_id sedl_ids[] = {
- { ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x01),
- ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x01),
- (unsigned long) "Speed win" },
- { ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x02),
- ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x02),
- (unsigned long) "Speed Fax+" },
- { 0, }
-};
-
-static struct isapnp_device_id *ipid = &sedl_ids[0];
-static struct pnp_card *pnp_c = NULL;
-
-static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
-{
- struct IsdnCardState *cs = card->cs;
- struct pnp_dev *pnp_d;
-
- if (!isapnp_present())
- return -1;
-
- while (ipid->card_vendor) {
- if ((pnp_c = pnp_find_card(ipid->card_vendor,
- ipid->card_device, pnp_c))) {
- pnp_d = NULL;
- if ((pnp_d = pnp_find_dev(pnp_c,
- ipid->vendor, ipid->function, pnp_d))) {
- int err;
-
- printk(KERN_INFO "HiSax: %s detected\n",
- (char *)ipid->driver_data);
- pnp_disable_dev(pnp_d);
- err = pnp_activate_dev(pnp_d);
- if (err < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __func__, err);
- return (0);
- }
- card->para[1] = pnp_port_start(pnp_d, 0);
- card->para[0] = pnp_irq(pnp_d, 0);
-
- if (card->para[0] == -1 || !card->para[1]) {
- printk(KERN_ERR "Sedlbauer PnP:some resources are missing %ld/%lx\n",
- card->para[0], card->para[1]);
- pnp_disable_dev(pnp_d);
- return (0);
- }
- cs->hw.sedl.cfg_reg = card->para[1];
- cs->irq = card->para[0];
- if (ipid->function == ISAPNP_FUNCTION(0x2)) {
- cs->subtyp = SEDL_SPEED_FAX;
- cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
- *bytecnt = 16;
- } else {
- cs->subtyp = SEDL_SPEED_CARD_WIN;
- cs->hw.sedl.chip = SEDL_CHIP_TEST;
- }
-
- return (1);
- } else {
- printk(KERN_ERR "Sedlbauer PnP: PnP error card found, no device\n");
- return (0);
- }
- }
- ipid++;
- pnp_c = NULL;
- }
-
- printk(KERN_INFO "Sedlbauer PnP: no ISAPnP card found\n");
- return -1;
-}
-#else
-
-static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
-{
- return -1;
-}
-#endif /* __ISAPNP__ */
-
-#ifdef CONFIG_PCI
-static struct pci_dev *dev_sedl = NULL;
-
-static int setup_sedlbauer_pci(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- u16 sub_vendor_id, sub_id;
-
- if ((dev_sedl = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET,
- PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) {
- if (pci_enable_device(dev_sedl))
- return (0);
- cs->irq = dev_sedl->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "Sedlbauer: No IRQ for PCI card found\n");
- return (0);
- }
- cs->hw.sedl.cfg_reg = pci_resource_start(dev_sedl, 0);
- } else {
- printk(KERN_WARNING "Sedlbauer: No PCI card found\n");
- return (0);
- }
- cs->irq_flags |= IRQF_SHARED;
- cs->hw.sedl.bus = SEDL_BUS_PCI;
- sub_vendor_id = dev_sedl->subsystem_vendor;
- sub_id = dev_sedl->subsystem_device;
- printk(KERN_INFO "Sedlbauer: PCI subvendor:%x subid %x\n",
- sub_vendor_id, sub_id);
- printk(KERN_INFO "Sedlbauer: PCI base adr %#x\n",
- cs->hw.sedl.cfg_reg);
- if (sub_id != PCI_SUB_ID_SEDLBAUER) {
- printk(KERN_ERR "Sedlbauer: unknown sub id %#x\n", sub_id);
- return (0);
- }
- if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PYRAMID) {
- cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
- cs->subtyp = SEDL_SPEEDFAX_PYRAMID;
- } else if (sub_vendor_id == PCI_SUBVENDOR_SPEEDFAX_PCI) {
- cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
- cs->subtyp = SEDL_SPEEDFAX_PCI;
- } else if (sub_vendor_id == PCI_SUBVENDOR_HST_SAPHIR3) {
- cs->hw.sedl.chip = SEDL_CHIP_IPAC;
- cs->subtyp = HST_SAPHIR3;
- } else if (sub_vendor_id == PCI_SUBVENDOR_SEDLBAUER_PCI) {
- cs->hw.sedl.chip = SEDL_CHIP_IPAC;
- cs->subtyp = SEDL_SPEED_PCI;
- } else {
- printk(KERN_ERR "Sedlbauer: unknown sub vendor id %#x\n",
- sub_vendor_id);
- return (0);
- }
-
- cs->hw.sedl.reset_on = SEDL_ISAR_PCI_ISAR_RESET_ON;
- cs->hw.sedl.reset_off = SEDL_ISAR_PCI_ISAR_RESET_OFF;
- byteout(cs->hw.sedl.cfg_reg, 0xff);
- byteout(cs->hw.sedl.cfg_reg, 0x00);
- byteout(cs->hw.sedl.cfg_reg + 2, 0xdd);
- byteout(cs->hw.sedl.cfg_reg + 5, 0); /* disable all IRQ */
- byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_on);
- mdelay(2);
- byteout(cs->hw.sedl.cfg_reg + 3, cs->hw.sedl.reset_off);
- mdelay(10);
-
- return (1);
-}
-
-#else
-
-static int setup_sedlbauer_pci(struct IsdnCard *card)
-{
- return (1);
-}
-
-#endif /* CONFIG_PCI */
-
-int setup_sedlbauer(struct IsdnCard *card)
-{
- int bytecnt = 8, ver, val, rc;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, Sedlbauer_revision);
- printk(KERN_INFO "HiSax: Sedlbauer driver Rev. %s\n", HiSax_getrev(tmp));
-
- if (cs->typ == ISDN_CTYPE_SEDLBAUER) {
- cs->subtyp = SEDL_SPEED_CARD_WIN;
- cs->hw.sedl.bus = SEDL_BUS_ISA;
- cs->hw.sedl.chip = SEDL_CHIP_TEST;
- } else if (cs->typ == ISDN_CTYPE_SEDLBAUER_PCMCIA) {
- cs->subtyp = SEDL_SPEED_STAR;
- cs->hw.sedl.bus = SEDL_BUS_PCMCIA;
- cs->hw.sedl.chip = SEDL_CHIP_TEST;
- } else if (cs->typ == ISDN_CTYPE_SEDLBAUER_FAX) {
- cs->subtyp = SEDL_SPEED_FAX;
- cs->hw.sedl.bus = SEDL_BUS_ISA;
- cs->hw.sedl.chip = SEDL_CHIP_ISAC_ISAR;
- } else
- return (0);
-
- bytecnt = 8;
- if (card->para[1]) {
- cs->hw.sedl.cfg_reg = card->para[1];
- cs->irq = card->para[0];
- if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
- bytecnt = 16;
- }
- } else {
- rc = setup_sedlbauer_isapnp(card, &bytecnt);
- if (!rc)
- return (0);
- if (rc > 0)
- goto ready;
-
- /* Probe for Sedlbauer speed pci */
- rc = setup_sedlbauer_pci(card);
- if (!rc)
- return (0);
-
- bytecnt = 256;
- }
-
-ready:
-
- /* In case of the sedlbauer pcmcia card, this region is in use,
- * reserved for us by the card manager. So we do not check it
- * here, it would fail.
- */
- if (cs->hw.sedl.bus != SEDL_BUS_PCMCIA &&
- !request_region(cs->hw.sedl.cfg_reg, bytecnt, "sedlbauer isdn")) {
- printk(KERN_WARNING
- "HiSax: %s config port %x-%x already in use\n",
- CardType[card->typ],
- cs->hw.sedl.cfg_reg,
- cs->hw.sedl.cfg_reg + bytecnt);
- return (0);
- }
-
- printk(KERN_INFO
- "Sedlbauer: defined at 0x%x-0x%x IRQ %d\n",
- cs->hw.sedl.cfg_reg,
- cs->hw.sedl.cfg_reg + bytecnt,
- cs->irq);
-
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &Sedl_card_msg;
-
-/*
- * testing ISA and PCMCIA Cards for IPAC, default is ISAC
- * do not test for PCI card, because ports are different
- * and PCI card uses only IPAC (for the moment)
- */
- if (cs->hw.sedl.bus != SEDL_BUS_PCI) {
- val = readreg(cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_ADR,
- cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_IPAC, IPAC_ID);
- printk(KERN_DEBUG "Sedlbauer: testing IPAC version %x\n", val);
- if ((val == 1) || (val == 2)) {
- /* IPAC */
- cs->subtyp = SEDL_SPEED_WIN2_PC104;
- if (cs->hw.sedl.bus == SEDL_BUS_PCMCIA) {
- cs->subtyp = SEDL_SPEED_STAR2;
- }
- cs->hw.sedl.chip = SEDL_CHIP_IPAC;
- } else {
- /* ISAC_HSCX oder ISAC_ISAR */
- if (cs->hw.sedl.chip == SEDL_CHIP_TEST) {
- cs->hw.sedl.chip = SEDL_CHIP_ISAC_HSCX;
- }
- }
- }
-
-/*
- * hw.sedl.chip is now properly set
- */
- printk(KERN_INFO "Sedlbauer: %s detected\n",
- Sedlbauer_Types[cs->subtyp]);
-
- setup_isac(cs);
- if (cs->hw.sedl.chip == SEDL_CHIP_IPAC) {
- if (cs->hw.sedl.bus == SEDL_BUS_PCI) {
- cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_IPAC_PCI_ADR;
- cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_IPAC_PCI_IPAC;
- cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_IPAC_PCI_IPAC;
- } else {
- cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_ADR;
- cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_IPAC;
- cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_IPAC_ANY_IPAC;
- }
- test_and_set_bit(HW_IPAC, &cs->HW_Flags);
- cs->readisac = &ReadISAC_IPAC;
- cs->writeisac = &WriteISAC_IPAC;
- cs->readisacfifo = &ReadISACfifo_IPAC;
- cs->writeisacfifo = &WriteISACfifo_IPAC;
- cs->irq_func = &sedlbauer_interrupt_ipac;
- val = readreg(cs->hw.sedl.adr, cs->hw.sedl.isac, IPAC_ID);
- printk(KERN_INFO "Sedlbauer: IPAC version %x\n", val);
- } else {
- /* ISAC_HSCX oder ISAC_ISAR */
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
- if (cs->hw.sedl.bus == SEDL_BUS_PCI) {
- cs->hw.sedl.adr = cs->hw.sedl.cfg_reg +
- SEDL_ISAR_PCI_ADR;
- cs->hw.sedl.isac = cs->hw.sedl.cfg_reg +
- SEDL_ISAR_PCI_ISAC;
- cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg +
- SEDL_ISAR_PCI_ISAR;
- } else {
- cs->hw.sedl.adr = cs->hw.sedl.cfg_reg +
- SEDL_ISAR_ISA_ADR;
- cs->hw.sedl.isac = cs->hw.sedl.cfg_reg +
- SEDL_ISAR_ISA_ISAC;
- cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg +
- SEDL_ISAR_ISA_ISAR;
- cs->hw.sedl.reset_on = cs->hw.sedl.cfg_reg +
- SEDL_ISAR_ISA_ISAR_RESET_ON;
- cs->hw.sedl.reset_off = cs->hw.sedl.cfg_reg +
- SEDL_ISAR_ISA_ISAR_RESET_OFF;
- }
- cs->bcs[0].hw.isar.reg = &cs->hw.sedl.isar;
- cs->bcs[1].hw.isar.reg = &cs->hw.sedl.isar;
- test_and_set_bit(HW_ISAR, &cs->HW_Flags);
- cs->irq_func = &sedlbauer_interrupt_isar;
- cs->auxcmd = &isar_auxcmd;
- ISACVersion(cs, "Sedlbauer:");
- cs->BC_Read_Reg = &ReadISAR;
- cs->BC_Write_Reg = &WriteISAR;
- cs->BC_Send_Data = &isar_fill_fifo;
- bytecnt = 3;
- while (bytecnt) {
- ver = ISARVersion(cs, "Sedlbauer:");
- if (ver < 0)
- printk(KERN_WARNING
- "Sedlbauer: wrong ISAR version (ret = %d)\n", ver);
- else
- break;
- reset_sedlbauer(cs);
- bytecnt--;
- }
- if (!bytecnt) {
- release_io_sedlbauer(cs);
- return (0);
- }
- } else {
- if (cs->hw.sedl.bus == SEDL_BUS_PCMCIA) {
- cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_ADR;
- cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_ISAC;
- cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_HSCX;
- cs->hw.sedl.reset_on = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_RESET;
- cs->hw.sedl.reset_off = cs->hw.sedl.cfg_reg + SEDL_HSCX_PCMCIA_RESET;
- cs->irq_flags |= IRQF_SHARED;
- } else {
- cs->hw.sedl.adr = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_ADR;
- cs->hw.sedl.isac = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_ISAC;
- cs->hw.sedl.hscx = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_HSCX;
- cs->hw.sedl.reset_on = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_RESET_ON;
- cs->hw.sedl.reset_off = cs->hw.sedl.cfg_reg + SEDL_HSCX_ISA_RESET_OFF;
- }
- cs->irq_func = &sedlbauer_interrupt;
- ISACVersion(cs, "Sedlbauer:");
-
- if (HscxVersion(cs, "Sedlbauer:")) {
- printk(KERN_WARNING
- "Sedlbauer: wrong HSCX versions check IO address\n");
- release_io_sedlbauer(cs);
- return (0);
- }
- }
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
deleted file mode 100644
index 92ef62d4caf4..000000000000
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*======================================================================
-
- A Sedlbauer PCMCIA client driver
-
- This driver is for the Sedlbauer Speed Star and Speed Star II,
- which are ISDN PCMCIA Cards.
-
- The contents of this file are subject to the Mozilla Public
- License Version 1.1 (the "License"); you may not use this file
- except in compliance with the License. You may obtain a copy of
- the License at http://www.mozilla.org/MPL/
-
- Software distributed under the License is distributed on an "AS
- IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
- implied. See the License for the specific language governing
- rights and limitations under the License.
-
- The initial developer of the original code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-
- Modifications from dummy_cs.c are Copyright (C) 1999-2001 Marcus Niemann
- <maniemann@users.sourceforge.net>. All Rights Reserved.
-
- Alternatively, the contents of this file may be used under the
- terms of the GNU General Public License version 2 (the "GPL"), in
- which case the provisions of the GPL are applicable instead of the
- above. If you wish to allow the use of your version of this file
- only under the terms of the GPL and not to allow others to use
- your version of this file under the MPL, indicate your decision
- by deleting the provisions above and replace them with the notice
- and other provisions required by the GPL. If you do not delete
- the provisions above, a recipient may use your version of this
- file under either the MPL or the GPL.
-
- ======================================================================*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <asm/io.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-#include "hisax_cfg.h"
-
-MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Sedlbauer cards");
-MODULE_AUTHOR("Marcus Niemann");
-MODULE_LICENSE("Dual MPL/GPL");
-
-
-/*====================================================================*/
-
-/* Parameters that can be set with 'insmod' */
-
-static int protocol = 2; /* EURO-ISDN Default */
-module_param(protocol, int, 0);
-
-static int sedlbauer_config(struct pcmcia_device *link);
-static void sedlbauer_release(struct pcmcia_device *link);
-
-static void sedlbauer_detach(struct pcmcia_device *p_dev);
-
-typedef struct local_info_t {
- struct pcmcia_device *p_dev;
- int stop;
- int cardnr;
-} local_info_t;
-
-static int sedlbauer_probe(struct pcmcia_device *link)
-{
- local_info_t *local;
-
- dev_dbg(&link->dev, "sedlbauer_attach()\n");
-
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local) return -ENOMEM;
- local->cardnr = -1;
-
- local->p_dev = link;
- link->priv = local;
-
- return sedlbauer_config(link);
-} /* sedlbauer_attach */
-
-static void sedlbauer_detach(struct pcmcia_device *link)
-{
- dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link);
-
- ((local_info_t *)link->priv)->stop = 1;
- sedlbauer_release(link);
-
- /* This points to the parent local_info_t struct */
- kfree(link->priv);
-} /* sedlbauer_detach */
-
-static int sedlbauer_config_check(struct pcmcia_device *p_dev, void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- p_dev->io_lines = 3;
- return pcmcia_request_io(p_dev);
-}
-
-static int sedlbauer_config(struct pcmcia_device *link)
-{
- int ret;
- IsdnCard_t icard;
-
- dev_dbg(&link->dev, "sedlbauer_config(0x%p)\n", link);
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_CHECK_VCC |
- CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_SET_IO;
-
- ret = pcmcia_loop_config(link, sedlbauer_config_check, NULL);
- if (ret)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- icard.para[0] = link->irq;
- icard.para[1] = link->resource[0]->start;
- icard.protocol = protocol;
- icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA;
-
- ret = hisax_init_pcmcia(link,
- &(((local_info_t *)link->priv)->stop), &icard);
- if (ret < 0) {
- printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d with %pR\n",
- ret, link->resource[0]);
- sedlbauer_release(link);
- return -ENODEV;
- } else
- ((local_info_t *)link->priv)->cardnr = ret;
-
- return 0;
-
-failed:
- sedlbauer_release(link);
- return -ENODEV;
-
-} /* sedlbauer_config */
-
-static void sedlbauer_release(struct pcmcia_device *link)
-{
- local_info_t *local = link->priv;
- dev_dbg(&link->dev, "sedlbauer_release(0x%p)\n", link);
-
- if (local) {
- if (local->cardnr >= 0) {
- /* no unregister function with hisax */
- HiSax_closecard(local->cardnr);
- }
- }
-
- pcmcia_disable_device(link);
-} /* sedlbauer_release */
-
-static int sedlbauer_suspend(struct pcmcia_device *link)
-{
- local_info_t *dev = link->priv;
-
- dev->stop = 1;
-
- return 0;
-}
-
-static int sedlbauer_resume(struct pcmcia_device *link)
-{
- local_info_t *dev = link->priv;
-
- dev->stop = 0;
-
- return 0;
-}
-
-
-static const struct pcmcia_device_id sedlbauer_ids[] = {
- PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "speed star II", "V 3.1", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a),
- PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D67", 0x81fb79f5, 0xe4e9bc12, 0x397b7e90),
- PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D98", 0x81fb79f5, 0xe4e9bc12, 0x2e5c7fce),
- PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", " (C) 93-94 VK", 0x81fb79f5, 0xe4e9bc12, 0x8db143fe),
- PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", " (c) 93-95 VK", 0x81fb79f5, 0xe4e9bc12, 0xb391ab4c),
- PCMCIA_DEVICE_PROD_ID12("HST High Soft Tech GmbH", "Saphir II B", 0xd79e0b84, 0x21d083ae),
-/* PCMCIA_DEVICE_PROD_ID1234("SEDLBAUER", 0x81fb79f5), */ /* too generic*/
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, sedlbauer_ids);
-
-static struct pcmcia_driver sedlbauer_driver = {
- .owner = THIS_MODULE,
- .name = "sedlbauer_cs",
- .probe = sedlbauer_probe,
- .remove = sedlbauer_detach,
- .id_table = sedlbauer_ids,
- .suspend = sedlbauer_suspend,
- .resume = sedlbauer_resume,
-};
-module_pcmcia_driver(sedlbauer_driver);
diff --git a/drivers/isdn/hisax/sportster.c b/drivers/isdn/hisax/sportster.c
deleted file mode 100644
index 18cee6360d0a..000000000000
--- a/drivers/isdn/hisax/sportster.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/* $Id: sportster.c,v 1.16.2.4 2004/01/13 23:48:39 keil Exp $
- *
- * low level stuff for USR Sportster internal TA
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to Christian "naddy" Weisgerber (3Com, US Robotics) for documentation
- *
- *
- */
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-
-static const char *sportster_revision = "$Revision: 1.16.2.4 $";
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-#define SPORTSTER_ISAC 0xC000
-#define SPORTSTER_HSCXA 0x0000
-#define SPORTSTER_HSCXB 0x4000
-#define SPORTSTER_RES_IRQ 0x8000
-#define SPORTSTER_RESET 0x80
-#define SPORTSTER_INTE 0x40
-
-static inline int
-calc_off(unsigned int base, unsigned int off)
-{
- return (base + ((off & 0xfc) << 8) + ((off & 3) << 1));
-}
-
-static inline void
-read_fifo(unsigned int adr, u_char *data, int size)
-{
- insb(adr, data, size);
-}
-
-static void
-write_fifo(unsigned int adr, u_char *data, int size)
-{
- outsb(adr, data, size);
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (bytein(calc_off(cs->hw.spt.isac, offset)));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- byteout(calc_off(cs->hw.spt.isac, offset), value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- read_fifo(cs->hw.spt.isac, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- write_fifo(cs->hw.spt.isac, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (bytein(calc_off(cs->hw.spt.hscx[hscx], offset)));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- byteout(calc_off(cs->hw.spt.hscx[hscx], offset), value);
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) bytein(calc_off(cs->hw.spt.hscx[nr], reg))
-#define WRITEHSCX(cs, nr, reg, data) byteout(calc_off(cs->hw.spt.hscx[nr], reg), data)
-#define READHSCXFIFO(cs, nr, ptr, cnt) read_fifo(cs->hw.spt.hscx[nr], ptr, cnt)
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) write_fifo(cs->hw.spt.hscx[nr], ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-sportster_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = READHSCX(cs, 1, HSCX_ISTA);
-Start_HSCX:
- if (val)
- hscx_int_main(cs, val);
- val = ReadISAC(cs, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- val = READHSCX(cs, 1, HSCX_ISTA);
- if (val) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX IntStat after IntRoutine");
- goto Start_HSCX;
- }
- val = ReadISAC(cs, ISAC_ISTA);
- if (val) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- /* get a new irq impulse if there any pending */
- bytein(cs->hw.spt.cfg_reg + SPORTSTER_RES_IRQ + 1);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_sportster(struct IsdnCardState *cs)
-{
- int i, adr;
-
- byteout(cs->hw.spt.cfg_reg + SPORTSTER_RES_IRQ, 0);
- for (i = 0; i < 64; i++) {
- adr = cs->hw.spt.cfg_reg + i * 1024;
- release_region(adr, 8);
- }
-}
-
-static void
-reset_sportster(struct IsdnCardState *cs)
-{
- cs->hw.spt.res_irq |= SPORTSTER_RESET; /* Reset On */
- byteout(cs->hw.spt.cfg_reg + SPORTSTER_RES_IRQ, cs->hw.spt.res_irq);
- mdelay(10);
- cs->hw.spt.res_irq &= ~SPORTSTER_RESET; /* Reset Off */
- byteout(cs->hw.spt.cfg_reg + SPORTSTER_RES_IRQ, cs->hw.spt.res_irq);
- mdelay(10);
-}
-
-static int
-Sportster_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_sportster(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_sportster(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- reset_sportster(cs);
- inithscxisac(cs, 1);
- cs->hw.spt.res_irq |= SPORTSTER_INTE; /* IRQ On */
- byteout(cs->hw.spt.cfg_reg + SPORTSTER_RES_IRQ, cs->hw.spt.res_irq);
- inithscxisac(cs, 2);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-static int get_io_range(struct IsdnCardState *cs)
-{
- int i, j, adr;
-
- for (i = 0; i < 64; i++) {
- adr = cs->hw.spt.cfg_reg + i * 1024;
- if (!request_region(adr, 8, "sportster")) {
- printk(KERN_WARNING "HiSax: USR Sportster config port "
- "%x-%x already in use\n",
- adr, adr + 8);
- break;
- }
- }
- if (i == 64)
- return (1);
- else {
- for (j = 0; j < i; j++) {
- adr = cs->hw.spt.cfg_reg + j * 1024;
- release_region(adr, 8);
- }
- return (0);
- }
-}
-
-int setup_sportster(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, sportster_revision);
- printk(KERN_INFO "HiSax: USR Sportster driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_SPORTSTER)
- return (0);
-
- cs->hw.spt.cfg_reg = card->para[1];
- cs->irq = card->para[0];
- if (!get_io_range(cs))
- return (0);
- cs->hw.spt.isac = cs->hw.spt.cfg_reg + SPORTSTER_ISAC;
- cs->hw.spt.hscx[0] = cs->hw.spt.cfg_reg + SPORTSTER_HSCXA;
- cs->hw.spt.hscx[1] = cs->hw.spt.cfg_reg + SPORTSTER_HSCXB;
-
- switch (cs->irq) {
- case 5: cs->hw.spt.res_irq = 1;
- break;
- case 7: cs->hw.spt.res_irq = 2;
- break;
- case 10:cs->hw.spt.res_irq = 3;
- break;
- case 11:cs->hw.spt.res_irq = 4;
- break;
- case 12:cs->hw.spt.res_irq = 5;
- break;
- case 14:cs->hw.spt.res_irq = 6;
- break;
- case 15:cs->hw.spt.res_irq = 7;
- break;
- default:release_io_sportster(cs);
- printk(KERN_WARNING "Sportster: wrong IRQ\n");
- return (0);
- }
- printk(KERN_INFO "HiSax: USR Sportster config irq:%d cfg:0x%X\n",
- cs->irq, cs->hw.spt.cfg_reg);
- setup_isac(cs);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &Sportster_card_msg;
- cs->irq_func = &sportster_interrupt;
- ISACVersion(cs, "Sportster:");
- if (HscxVersion(cs, "Sportster:")) {
- printk(KERN_WARNING
- "Sportster: wrong HSCX versions check IO address\n");
- release_io_sportster(cs);
- return (0);
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/st5481.h b/drivers/isdn/hisax/st5481.h
deleted file mode 100644
index b421b86ca7da..000000000000
--- a/drivers/isdn/hisax/st5481.h
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- * Driver for ST5481 USB ISDN modem
- *
- * Author Frode Isaksen
- * Copyright 2001 by Frode Isaksen <fisaksen@bewan.com>
- * 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _ST5481_H_
-#define _ST5481_H_
-
-
-// USB IDs, the Product Id is in the range 0x4810-0x481F
-
-#define ST_VENDOR_ID 0x0483
-#define ST5481_PRODUCT_ID 0x4810
-#define ST5481_PRODUCT_ID_MASK 0xFFF0
-
-// ST5481 endpoints when using alternative setting 3 (2B+D).
-// To get the endpoint address, OR with 0x80 for IN endpoints.
-
-#define EP_CTRL 0x00U /* Control endpoint */
-#define EP_INT 0x01U /* Interrupt endpoint */
-#define EP_B1_OUT 0x02U /* B1 channel out */
-#define EP_B1_IN 0x03U /* B1 channel in */
-#define EP_B2_OUT 0x04U /* B2 channel out */
-#define EP_B2_IN 0x05U /* B2 channel in */
-#define EP_D_OUT 0x06U /* D channel out */
-#define EP_D_IN 0x07U /* D channel in */
-
-// Number of isochronous packets. With 20 packets we get
-// 50 interrupts/sec for each endpoint.
-
-#define NUM_ISO_PACKETS_D 20
-#define NUM_ISO_PACKETS_B 20
-
-// Size of each isochronous packet.
-// In outgoing direction we need to match ISDN data rates:
-// D: 2 bytes / msec -> 16 kbit / s
-// B: 16 bytes / msec -> 64 kbit / s
-#define SIZE_ISO_PACKETS_D_IN 16
-#define SIZE_ISO_PACKETS_D_OUT 2
-#define SIZE_ISO_PACKETS_B_IN 32
-#define SIZE_ISO_PACKETS_B_OUT 8
-
-// If we overrun/underrun, we send one packet with +/- 2 bytes
-#define B_FLOW_ADJUST 2
-
-// Registers that are written using vendor specific device request
-// on endpoint 0.
-
-#define LBA 0x02 /* S loopback */
-#define SET_DEFAULT 0x06 /* Soft reset */
-#define LBB 0x1D /* S maintenance loopback */
-#define STT 0x1e /* S force transmission signals */
-#define SDA_MIN 0x20 /* SDA-sin minimal value */
-#define SDA_MAX 0x21 /* SDA-sin maximal value */
-#define SDELAY_VALUE 0x22 /* Delay between Tx and Rx clock */
-#define IN_D_COUNTER 0x36 /* D receive channel fifo counter */
-#define OUT_D_COUNTER 0x37 /* D transmit channel fifo counter */
-#define IN_B1_COUNTER 0x38 /* B1 receive channel fifo counter */
-#define OUT_B1_COUNTER 0x39 /* B1 transmit channel fifo counter */
-#define IN_B2_COUNTER 0x3a /* B2 receive channel fifo counter */
-#define OUT_B2_COUNTER 0x3b /* B2 transmit channel fifo counter */
-#define FFCTRL_IN_D 0x3C /* D receive channel fifo threshold low */
-#define FFCTRH_IN_D 0x3D /* D receive channel fifo threshold high */
-#define FFCTRL_OUT_D 0x3E /* D transmit channel fifo threshold low */
-#define FFCTRH_OUT_D 0x3F /* D transmit channel fifo threshold high */
-#define FFCTRL_IN_B1 0x40 /* B1 receive channel fifo threshold low */
-#define FFCTRH_IN_B1 0x41 /* B1 receive channel fifo threshold high */
-#define FFCTRL_OUT_B1 0x42 /* B1 transmit channel fifo threshold low */
-#define FFCTRH_OUT_B1 0x43 /* B1 transmit channel fifo threshold high */
-#define FFCTRL_IN_B2 0x44 /* B2 receive channel fifo threshold low */
-#define FFCTRH_IN_B2 0x45 /* B2 receive channel fifo threshold high */
-#define FFCTRL_OUT_B2 0x46 /* B2 transmit channel fifo threshold low */
-#define FFCTRH_OUT_B2 0x47 /* B2 transmit channel fifo threshold high */
-#define MPMSK 0x4A /* Multi purpose interrupt MASK register */
-#define FFMSK_D 0x4c /* D fifo interrupt MASK register */
-#define FFMSK_B1 0x4e /* B1 fifo interrupt MASK register */
-#define FFMSK_B2 0x50 /* B2 fifo interrupt MASK register */
-#define GPIO_DIR 0x52 /* GPIO pins direction registers */
-#define GPIO_OUT 0x53 /* GPIO pins output register */
-#define GPIO_IN 0x54 /* GPIO pins input register */
-#define TXCI 0x56 /* CI command to be transmitted */
-
-
-// Format of the interrupt packet received on endpoint 1:
-//
-// +--------+--------+--------+--------+--------+--------+
-// !MPINT !FFINT_D !FFINT_B1!FFINT_B2!CCIST !GPIO_INT!
-// +--------+--------+--------+--------+--------+--------+
-
-// Offsets in the interrupt packet
-
-#define MPINT 0
-#define FFINT_D 1
-#define FFINT_B1 2
-#define FFINT_B2 3
-#define CCIST 4
-#define GPIO_INT 5
-#define INT_PKT_SIZE 6
-
-// MPINT
-#define LSD_INT 0x80 /* S line activity detected */
-#define RXCI_INT 0x40 /* Indicate primitive arrived */
-#define DEN_INT 0x20 /* Signal enabling data out of D Tx fifo */
-#define DCOLL_INT 0x10 /* D channel collision */
-#define AMIVN_INT 0x04 /* AMI violation number reached 2 */
-#define INFOI_INT 0x04 /* INFOi changed */
-#define DRXON_INT 0x02 /* Reception channel active */
-#define GPCHG_INT 0x01 /* GPIO pin value changed */
-
-// FFINT_x
-#define IN_OVERRUN 0x80 /* In fifo overrun */
-#define OUT_UNDERRUN 0x40 /* Out fifo underrun */
-#define IN_UP 0x20 /* In fifo thresholdh up-crossed */
-#define IN_DOWN 0x10 /* In fifo thresholdl down-crossed */
-#define OUT_UP 0x08 /* Out fifo thresholdh up-crossed */
-#define OUT_DOWN 0x04 /* Out fifo thresholdl down-crossed */
-#define IN_COUNTER_ZEROED 0x02 /* In down-counter reached 0 */
-#define OUT_COUNTER_ZEROED 0x01 /* Out down-counter reached 0 */
-
-#define ANY_REC_INT (IN_OVERRUN + IN_UP + IN_DOWN + IN_COUNTER_ZEROED)
-#define ANY_XMIT_INT (OUT_UNDERRUN + OUT_UP + OUT_DOWN + OUT_COUNTER_ZEROED)
-
-
-// Level 1 commands that are sent using the TXCI device request
-#define ST5481_CMD_DR 0x0 /* Deactivation Request */
-#define ST5481_CMD_RES 0x1 /* state machine RESet */
-#define ST5481_CMD_TM1 0x2 /* Test Mode 1 */
-#define ST5481_CMD_TM2 0x3 /* Test Mode 2 */
-#define ST5481_CMD_PUP 0x7 /* Power UP */
-#define ST5481_CMD_AR8 0x8 /* Activation Request class 1 */
-#define ST5481_CMD_AR10 0x9 /* Activation Request class 2 */
-#define ST5481_CMD_ARL 0xA /* Activation Request Loopback */
-#define ST5481_CMD_PDN 0xF /* Power DoWn */
-
-// Turn on/off the LEDs using the GPIO device request.
-// To use the B LEDs, number_of_leds must be set to 4
-#define B1_LED 0x10U
-#define B2_LED 0x20U
-#define GREEN_LED 0x40U
-#define RED_LED 0x80U
-
-// D channel out states
-enum {
- ST_DOUT_NONE,
-
- ST_DOUT_SHORT_INIT,
- ST_DOUT_SHORT_WAIT_DEN,
-
- ST_DOUT_LONG_INIT,
- ST_DOUT_LONG_WAIT_DEN,
- ST_DOUT_NORMAL,
-
- ST_DOUT_WAIT_FOR_UNDERRUN,
- ST_DOUT_WAIT_FOR_NOT_BUSY,
- ST_DOUT_WAIT_FOR_STOP,
- ST_DOUT_WAIT_FOR_RESET,
-};
-
-#define DOUT_STATE_COUNT (ST_DOUT_WAIT_FOR_RESET + 1)
-
-// D channel out events
-enum {
- EV_DOUT_START_XMIT,
- EV_DOUT_COMPLETE,
- EV_DOUT_DEN,
- EV_DOUT_RESETED,
- EV_DOUT_STOPPED,
- EV_DOUT_COLL,
- EV_DOUT_UNDERRUN,
-};
-
-#define DOUT_EVENT_COUNT (EV_DOUT_UNDERRUN + 1)
-
-// ----------------------------------------------------------------------
-
-enum {
- ST_L1_F3,
- ST_L1_F4,
- ST_L1_F6,
- ST_L1_F7,
- ST_L1_F8,
-};
-
-#define L1_STATE_COUNT (ST_L1_F8 + 1)
-
-// The first 16 entries match the Level 1 indications that
-// are found at offset 4 (CCIST) in the interrupt packet
-
-enum {
- EV_IND_DP, // 0000 Deactivation Pending
- EV_IND_1, // 0001
- EV_IND_2, // 0010
- EV_IND_3, // 0011
- EV_IND_RSY, // 0100 ReSYnchronizing
- EV_IND_5, // 0101
- EV_IND_6, // 0110
- EV_IND_7, // 0111
- EV_IND_AP, // 1000 Activation Pending
- EV_IND_9, // 1001
- EV_IND_10, // 1010
- EV_IND_11, // 1011
- EV_IND_AI8, // 1100 Activation Indication class 8
- EV_IND_AI10,// 1101 Activation Indication class 10
- EV_IND_AIL, // 1110 Activation Indication Loopback
- EV_IND_DI, // 1111 Deactivation Indication
- EV_PH_ACTIVATE_REQ,
- EV_PH_DEACTIVATE_REQ,
- EV_TIMER3,
-};
-
-#define L1_EVENT_COUNT (EV_TIMER3 + 1)
-
-#define ERR(format, arg...) \
- printk(KERN_ERR "%s:%s: " format "\n" , __FILE__, __func__ , ## arg)
-
-#define WARNING(format, arg...) \
- printk(KERN_WARNING "%s:%s: " format "\n" , __FILE__, __func__ , ## arg)
-
-#define INFO(format, arg...) \
- printk(KERN_INFO "%s:%s: " format "\n" , __FILE__, __func__ , ## arg)
-
-#include <linux/isdn/hdlc.h>
-#include "fsm.h"
-#include "hisax_if.h"
-#include <linux/skbuff.h>
-
-/* ======================================================================
- * FIFO handling
- */
-
-/* Generic FIFO structure */
-struct fifo {
- u_char r, w, count, size;
- spinlock_t lock;
-};
-
-/*
- * Init an FIFO
- */
-static inline void fifo_init(struct fifo *fifo, int size)
-{
- fifo->r = fifo->w = fifo->count = 0;
- fifo->size = size;
- spin_lock_init(&fifo->lock);
-}
-
-/*
- * Add an entry to the FIFO
- */
-static inline int fifo_add(struct fifo *fifo)
-{
- unsigned long flags;
- int index;
-
- if (!fifo) {
- return -1;
- }
-
- spin_lock_irqsave(&fifo->lock, flags);
- if (fifo->count == fifo->size) {
- // FIFO full
- index = -1;
- } else {
- // Return index where to get the next data to add to the FIFO
- index = fifo->w++ & (fifo->size - 1);
- fifo->count++;
- }
- spin_unlock_irqrestore(&fifo->lock, flags);
- return index;
-}
-
-/*
- * Remove an entry from the FIFO with the index returned.
- */
-static inline int fifo_remove(struct fifo *fifo)
-{
- unsigned long flags;
- int index;
-
- if (!fifo) {
- return -1;
- }
-
- spin_lock_irqsave(&fifo->lock, flags);
- if (!fifo->count) {
- // FIFO empty
- index = -1;
- } else {
- // Return index where to get the next data from the FIFO
- index = fifo->r++ & (fifo->size - 1);
- fifo->count--;
- }
- spin_unlock_irqrestore(&fifo->lock, flags);
-
- return index;
-}
-
-/* ======================================================================
- * control pipe
- */
-typedef void (*ctrl_complete_t)(void *);
-
-typedef struct ctrl_msg {
- struct usb_ctrlrequest dr;
- ctrl_complete_t complete;
- void *context;
-} ctrl_msg;
-
-/* FIFO of ctrl messages waiting to be sent */
-#define MAX_EP0_MSG 16
-struct ctrl_msg_fifo {
- struct fifo f;
- struct ctrl_msg data[MAX_EP0_MSG];
-};
-
-#define MAX_DFRAME_LEN_L1 300
-#define HSCX_BUFMAX 4096
-
-struct st5481_ctrl {
- struct ctrl_msg_fifo msg_fifo;
- unsigned long busy;
- struct urb *urb;
-};
-
-struct st5481_intr {
- // struct evt_fifo evt_fifo;
- struct urb *urb;
-};
-
-struct st5481_d_out {
- struct isdnhdlc_vars hdlc_state;
- struct urb *urb[2]; /* double buffering */
- unsigned long busy;
- struct sk_buff *tx_skb;
- struct FsmInst fsm;
-};
-
-struct st5481_b_out {
- struct isdnhdlc_vars hdlc_state;
- struct urb *urb[2]; /* double buffering */
- u_char flow_event;
- u_long busy;
- struct sk_buff *tx_skb;
-};
-
-struct st5481_in {
- struct isdnhdlc_vars hdlc_state;
- struct urb *urb[2]; /* double buffering */
- int mode;
- int bufsize;
- unsigned int num_packets;
- unsigned int packet_size;
- unsigned char ep, counter;
- unsigned char *rcvbuf;
- struct st5481_adapter *adapter;
- struct hisax_if *hisax_if;
-};
-
-int st5481_setup_in(struct st5481_in *in);
-void st5481_release_in(struct st5481_in *in);
-void st5481_in_mode(struct st5481_in *in, int mode);
-
-struct st5481_bcs {
- struct hisax_b_if b_if;
- struct st5481_adapter *adapter;
- struct st5481_in b_in;
- struct st5481_b_out b_out;
- int channel;
- int mode;
-};
-
-struct st5481_adapter {
- int number_of_leds;
- struct usb_device *usb_dev;
- struct hisax_d_if hisax_d_if;
-
- struct st5481_ctrl ctrl;
- struct st5481_intr intr;
- struct st5481_in d_in;
- struct st5481_d_out d_out;
-
- unsigned char leds;
- unsigned int led_counter;
-
- unsigned long event;
-
- struct FsmInst l1m;
- struct FsmTimer timer;
-
- struct st5481_bcs bcs[2];
-};
-
-#define TIMER3_VALUE 7000
-
-/* ======================================================================
- *
- */
-
-/*
- * Submit an URB with error reporting. This is a macro so
- * the __func__ returns the caller function name.
- */
-#define SUBMIT_URB(urb, mem_flags) \
- ({ \
- int status; \
- if ((status = usb_submit_urb(urb, mem_flags)) < 0) { \
- WARNING("usb_submit_urb failed,status=%d", status); \
- } \
- status; \
- })
-
-/*
- * USB double buffering, return the URB index (0 or 1).
- */
-static inline int get_buf_nr(struct urb *urbs[], struct urb *urb)
-{
- return (urbs[0] == urb ? 0 : 1);
-}
-
-/* ---------------------------------------------------------------------- */
-
-/* B Channel */
-
-int st5481_setup_b(struct st5481_bcs *bcs);
-void st5481_release_b(struct st5481_bcs *bcs);
-void st5481_d_l2l1(struct hisax_if *hisax_d_if, int pr, void *arg);
-
-/* D Channel */
-
-int st5481_setup_d(struct st5481_adapter *adapter);
-void st5481_release_d(struct st5481_adapter *adapter);
-void st5481_b_l2l1(struct hisax_if *b_if, int pr, void *arg);
-int st5481_d_init(void);
-void st5481_d_exit(void);
-
-/* USB */
-void st5481_ph_command(struct st5481_adapter *adapter, unsigned int command);
-int st5481_setup_isocpipes(struct urb *urb[2], struct usb_device *dev,
- unsigned int pipe, int num_packets,
- int packet_size, int buf_size,
- usb_complete_t complete, void *context);
-void st5481_release_isocpipes(struct urb *urb[2]);
-
-void st5481_usb_pipe_reset(struct st5481_adapter *adapter,
- u_char pipe, ctrl_complete_t complete, void *context);
-void st5481_usb_device_ctrl_msg(struct st5481_adapter *adapter,
- u8 request, u16 value,
- ctrl_complete_t complete, void *context);
-int st5481_setup_usb(struct st5481_adapter *adapter);
-void st5481_release_usb(struct st5481_adapter *adapter);
-void st5481_start(struct st5481_adapter *adapter);
-void st5481_stop(struct st5481_adapter *adapter);
-
-// ----------------------------------------------------------------------
-// debugging macros
-
-#define __debug_variable st5481_debug
-#include "hisax_debug.h"
-
-extern int st5481_debug;
-
-#ifdef CONFIG_HISAX_DEBUG
-
-#define DBG_ISO_PACKET(level, urb) \
- if (level & __debug_variable) dump_iso_packet(__func__, urb)
-
-static void __attribute__((unused))
-dump_iso_packet(const char *name, struct urb *urb)
-{
- int i, j;
- int len, ofs;
- u_char *data;
-
- printk(KERN_DEBUG "%s: packets=%d,errors=%d\n",
- name, urb->number_of_packets, urb->error_count);
- for (i = 0; i < urb->number_of_packets; ++i) {
- if (urb->pipe & USB_DIR_IN) {
- len = urb->iso_frame_desc[i].actual_length;
- } else {
- len = urb->iso_frame_desc[i].length;
- }
- ofs = urb->iso_frame_desc[i].offset;
- printk(KERN_DEBUG "len=%.2d,ofs=%.3d ", len, ofs);
- if (len) {
- data = urb->transfer_buffer + ofs;
- for (j = 0; j < len; j++) {
- printk("%.2x", data[j]);
- }
- }
- printk("\n");
- }
-}
-
-static inline const char *ST5481_CMD_string(int evt)
-{
- static char s[16];
-
- switch (evt) {
- case ST5481_CMD_DR: return "DR";
- case ST5481_CMD_RES: return "RES";
- case ST5481_CMD_TM1: return "TM1";
- case ST5481_CMD_TM2: return "TM2";
- case ST5481_CMD_PUP: return "PUP";
- case ST5481_CMD_AR8: return "AR8";
- case ST5481_CMD_AR10: return "AR10";
- case ST5481_CMD_ARL: return "ARL";
- case ST5481_CMD_PDN: return "PDN";
- }
-
- sprintf(s, "0x%x", evt);
- return s;
-}
-
-#else
-
-#define DBG_ISO_PACKET(level, urb) do {} while (0)
-
-#endif
-
-
-
-#endif
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c
deleted file mode 100644
index f64a36007800..000000000000
--- a/drivers/isdn/hisax/st5481_b.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Driver for ST5481 USB ISDN modem
- *
- * Author Frode Isaksen
- * Copyright 2001 by Frode Isaksen <fisaksen@bewan.com>
- * 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/bitrev.h>
-#include "st5481.h"
-
-static inline void B_L1L2(struct st5481_bcs *bcs, int pr, void *arg)
-{
- struct hisax_if *ifc = (struct hisax_if *) &bcs->b_if;
-
- ifc->l1l2(ifc, pr, arg);
-}
-
-/*
- * Encode and transmit next frame.
- */
-static void usb_b_out(struct st5481_bcs *bcs, int buf_nr)
-{
- struct st5481_b_out *b_out = &bcs->b_out;
- struct st5481_adapter *adapter = bcs->adapter;
- struct urb *urb;
- unsigned int packet_size, offset;
- int len, buf_size, bytes_sent;
- int i;
- struct sk_buff *skb;
-
- if (test_and_set_bit(buf_nr, &b_out->busy)) {
- DBG(4, "ep %d urb %d busy", (bcs->channel + 1) * 2, buf_nr);
- return;
- }
- urb = b_out->urb[buf_nr];
-
- // Adjust isoc buffer size according to flow state
- if (b_out->flow_event & (OUT_DOWN | OUT_UNDERRUN)) {
- buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST;
- packet_size = SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST;
- DBG(4, "B%d,adjust flow,add %d bytes", bcs->channel + 1, B_FLOW_ADJUST);
- } else if (b_out->flow_event & OUT_UP) {
- buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT - B_FLOW_ADJUST;
- packet_size = SIZE_ISO_PACKETS_B_OUT - B_FLOW_ADJUST;
- DBG(4, "B%d,adjust flow,remove %d bytes", bcs->channel + 1, B_FLOW_ADJUST);
- } else {
- buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT;
- packet_size = 8;
- }
- b_out->flow_event = 0;
-
- len = 0;
- while (len < buf_size) {
- if ((skb = b_out->tx_skb)) {
- DBG_SKB(0x100, skb);
- DBG(4, "B%d,len=%d", bcs->channel + 1, skb->len);
-
- if (bcs->mode == L1_MODE_TRANS) {
- bytes_sent = buf_size - len;
- if (skb->len < bytes_sent)
- bytes_sent = skb->len;
- { /* swap tx bytes to get hearable audio data */
- register unsigned char *src = skb->data;
- register unsigned char *dest = urb->transfer_buffer + len;
- register unsigned int count;
- for (count = 0; count < bytes_sent; count++)
- *dest++ = bitrev8(*src++);
- }
- len += bytes_sent;
- } else {
- len += isdnhdlc_encode(&b_out->hdlc_state,
- skb->data, skb->len, &bytes_sent,
- urb->transfer_buffer + len, buf_size-len);
- }
-
- skb_pull(skb, bytes_sent);
-
- if (!skb->len) {
- // Frame sent
- b_out->tx_skb = NULL;
- B_L1L2(bcs, PH_DATA | CONFIRM, (void *)(unsigned long) skb->truesize);
- dev_kfree_skb_any(skb);
-
-/* if (!(bcs->tx_skb = skb_dequeue(&bcs->sq))) { */
-/* st5481B_sched_event(bcs, B_XMTBUFREADY); */
-/* } */
- }
- } else {
- if (bcs->mode == L1_MODE_TRANS) {
- memset(urb->transfer_buffer + len, 0xff, buf_size-len);
- len = buf_size;
- } else {
- // Send flags
- len += isdnhdlc_encode(&b_out->hdlc_state,
- NULL, 0, &bytes_sent,
- urb->transfer_buffer + len, buf_size-len);
- }
- }
- }
-
- // Prepare the URB
- for (i = 0, offset = 0; offset < len; i++) {
- urb->iso_frame_desc[i].offset = offset;
- urb->iso_frame_desc[i].length = packet_size;
- offset += packet_size;
- packet_size = SIZE_ISO_PACKETS_B_OUT;
- }
- urb->transfer_buffer_length = len;
- urb->number_of_packets = i;
- urb->dev = adapter->usb_dev;
-
- DBG_ISO_PACKET(0x200, urb);
-
- SUBMIT_URB(urb, GFP_NOIO);
-}
-
-/*
- * Start transferring (flags or data) on the B channel, since
- * FIFO counters has been set to a non-zero value.
- */
-static void st5481B_start_xfer(void *context)
-{
- struct st5481_bcs *bcs = context;
-
- DBG(4, "B%d", bcs->channel + 1);
-
- // Start transmitting (flags or data) on B channel
-
- usb_b_out(bcs, 0);
- usb_b_out(bcs, 1);
-}
-
-/*
- * If the adapter has only 2 LEDs, the green
- * LED will blink with a rate depending
- * on the number of channels opened.
- */
-static void led_blink(struct st5481_adapter *adapter)
-{
- u_char leds = adapter->leds;
-
- // 50 frames/sec for each channel
- if (++adapter->led_counter % 50) {
- return;
- }
-
- if (adapter->led_counter % 100) {
- leds |= GREEN_LED;
- } else {
- leds &= ~GREEN_LED;
- }
-
- st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, leds, NULL, NULL);
-}
-
-static void usb_b_out_complete(struct urb *urb)
-{
- struct st5481_bcs *bcs = urb->context;
- struct st5481_b_out *b_out = &bcs->b_out;
- struct st5481_adapter *adapter = bcs->adapter;
- int buf_nr;
-
- buf_nr = get_buf_nr(b_out->urb, urb);
- test_and_clear_bit(buf_nr, &b_out->busy);
-
- if (unlikely(urb->status < 0)) {
- switch (urb->status) {
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNRESET:
- DBG(4, "urb killed status %d", urb->status);
- return; // Give up
- default:
- WARNING("urb status %d", urb->status);
- if (b_out->busy == 0) {
- st5481_usb_pipe_reset(adapter, (bcs->channel + 1) * 2 | USB_DIR_OUT, NULL, NULL);
- }
- break;
- }
- }
-
- usb_b_out(bcs, buf_nr);
-
- if (adapter->number_of_leds == 2)
- led_blink(adapter);
-}
-
-/*
- * Start or stop the transfer on the B channel.
- */
-static void st5481B_mode(struct st5481_bcs *bcs, int mode)
-{
- struct st5481_b_out *b_out = &bcs->b_out;
- struct st5481_adapter *adapter = bcs->adapter;
-
- DBG(4, "B%d,mode=%d", bcs->channel + 1, mode);
-
- if (bcs->mode == mode)
- return;
-
- bcs->mode = mode;
-
- // Cancel all USB transfers on this B channel
- usb_unlink_urb(b_out->urb[0]);
- usb_unlink_urb(b_out->urb[1]);
- b_out->busy = 0;
-
- st5481_in_mode(&bcs->b_in, mode);
- if (bcs->mode != L1_MODE_NULL) {
- // Open the B channel
- if (bcs->mode != L1_MODE_TRANS) {
- u32 features = HDLC_BITREVERSE;
- if (bcs->mode == L1_MODE_HDLC_56K)
- features |= HDLC_56KBIT;
- isdnhdlc_out_init(&b_out->hdlc_state, features);
- }
- st5481_usb_pipe_reset(adapter, (bcs->channel + 1) * 2, NULL, NULL);
-
- // Enable B channel interrupts
- st5481_usb_device_ctrl_msg(adapter, FFMSK_B1 + (bcs->channel * 2),
- OUT_UP + OUT_DOWN + OUT_UNDERRUN, NULL, NULL);
-
- // Enable B channel FIFOs
- st5481_usb_device_ctrl_msg(adapter, OUT_B1_COUNTER+(bcs->channel * 2), 32, st5481B_start_xfer, bcs);
- if (adapter->number_of_leds == 4) {
- if (bcs->channel == 0) {
- adapter->leds |= B1_LED;
- } else {
- adapter->leds |= B2_LED;
- }
- }
- } else {
- // Disable B channel interrupts
- st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL);
-
- // Disable B channel FIFOs
- st5481_usb_device_ctrl_msg(adapter, OUT_B1_COUNTER+(bcs->channel * 2), 0, NULL, NULL);
-
- if (adapter->number_of_leds == 4) {
- if (bcs->channel == 0) {
- adapter->leds &= ~B1_LED;
- } else {
- adapter->leds &= ~B2_LED;
- }
- } else {
- st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, adapter->leds, NULL, NULL);
- }
- if (b_out->tx_skb) {
- dev_kfree_skb_any(b_out->tx_skb);
- b_out->tx_skb = NULL;
- }
-
- }
-}
-
-static int st5481_setup_b_out(struct st5481_bcs *bcs)
-{
- struct usb_device *dev = bcs->adapter->usb_dev;
- struct usb_interface *intf;
- struct usb_host_interface *altsetting = NULL;
- struct usb_host_endpoint *endpoint;
- struct st5481_b_out *b_out = &bcs->b_out;
-
- DBG(4, "");
-
- intf = usb_ifnum_to_if(dev, 0);
- if (intf)
- altsetting = usb_altnum_to_altsetting(intf, 3);
- if (!altsetting)
- return -ENXIO;
-
- // Allocate URBs and buffers for the B channel out
- endpoint = &altsetting->endpoint[EP_B1_OUT - 1 + bcs->channel * 2];
-
- DBG(4, "endpoint address=%02x,packet size=%d",
- endpoint->desc.bEndpointAddress, le16_to_cpu(endpoint->desc.wMaxPacketSize));
-
- // Allocate memory for 8000bytes/sec + extra bytes if underrun
- return st5481_setup_isocpipes(b_out->urb, dev,
- usb_sndisocpipe(dev, endpoint->desc.bEndpointAddress),
- NUM_ISO_PACKETS_B, SIZE_ISO_PACKETS_B_OUT,
- NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST,
- usb_b_out_complete, bcs);
-}
-
-static void st5481_release_b_out(struct st5481_bcs *bcs)
-{
- struct st5481_b_out *b_out = &bcs->b_out;
-
- DBG(4, "");
-
- st5481_release_isocpipes(b_out->urb);
-}
-
-int st5481_setup_b(struct st5481_bcs *bcs)
-{
- int retval;
-
- DBG(4, "");
-
- retval = st5481_setup_b_out(bcs);
- if (retval)
- goto err;
- bcs->b_in.bufsize = HSCX_BUFMAX;
- bcs->b_in.num_packets = NUM_ISO_PACKETS_B;
- bcs->b_in.packet_size = SIZE_ISO_PACKETS_B_IN;
- bcs->b_in.ep = (bcs->channel ? EP_B2_IN : EP_B1_IN) | USB_DIR_IN;
- bcs->b_in.counter = bcs->channel ? IN_B2_COUNTER : IN_B1_COUNTER;
- bcs->b_in.adapter = bcs->adapter;
- bcs->b_in.hisax_if = &bcs->b_if.ifc;
- retval = st5481_setup_in(&bcs->b_in);
- if (retval)
- goto err_b_out;
-
-
- return 0;
-
-err_b_out:
- st5481_release_b_out(bcs);
-err:
- return retval;
-}
-
-/*
- * Release buffers and URBs for the B channels
- */
-void st5481_release_b(struct st5481_bcs *bcs)
-{
- DBG(4, "");
-
- st5481_release_in(&bcs->b_in);
- st5481_release_b_out(bcs);
-}
-
-/*
- * st5481_b_l2l1 is the entry point for upper layer routines that want to
- * transmit on the B channel. PH_DATA | REQUEST is a normal packet that
- * we either start transmitting (if idle) or queue (if busy).
- * PH_PULL | REQUEST can be called to request a callback message
- * (PH_PULL | CONFIRM)
- * once the link is idle. After a "pull" callback, the upper layer
- * routines can use PH_PULL | INDICATION to send data.
- */
-void st5481_b_l2l1(struct hisax_if *ifc, int pr, void *arg)
-{
- struct st5481_bcs *bcs = ifc->priv;
- struct sk_buff *skb = arg;
- long mode;
-
- DBG(4, "");
-
- switch (pr) {
- case PH_DATA | REQUEST:
- BUG_ON(bcs->b_out.tx_skb);
- bcs->b_out.tx_skb = skb;
- break;
- case PH_ACTIVATE | REQUEST:
- mode = (long) arg;
- DBG(4, "B%d,PH_ACTIVATE_REQUEST %ld", bcs->channel + 1, mode);
- st5481B_mode(bcs, mode);
- B_L1L2(bcs, PH_ACTIVATE | INDICATION, NULL);
- break;
- case PH_DEACTIVATE | REQUEST:
- DBG(4, "B%d,PH_DEACTIVATE_REQUEST", bcs->channel + 1);
- st5481B_mode(bcs, L1_MODE_NULL);
- B_L1L2(bcs, PH_DEACTIVATE | INDICATION, NULL);
- break;
- default:
- WARNING("pr %#x\n", pr);
- }
-}
diff --git a/drivers/isdn/hisax/st5481_d.c b/drivers/isdn/hisax/st5481_d.c
deleted file mode 100644
index e88c5c71fca7..000000000000
--- a/drivers/isdn/hisax/st5481_d.c
+++ /dev/null
@@ -1,780 +0,0 @@
-/*
- * Driver for ST5481 USB ISDN modem
- *
- * Author Frode Isaksen
- * Copyright 2001 by Frode Isaksen <fisaksen@bewan.com>
- * 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include "st5481.h"
-
-static void ph_connect(struct st5481_adapter *adapter);
-static void ph_disconnect(struct st5481_adapter *adapter);
-
-static struct Fsm l1fsm;
-
-static char *strL1State[] =
-{
- "ST_L1_F3",
- "ST_L1_F4",
- "ST_L1_F6",
- "ST_L1_F7",
- "ST_L1_F8",
-};
-
-static char *strL1Event[] =
-{
- "EV_IND_DP",
- "EV_IND_1",
- "EV_IND_2",
- "EV_IND_3",
- "EV_IND_RSY",
- "EV_IND_5",
- "EV_IND_6",
- "EV_IND_7",
- "EV_IND_AP",
- "EV_IND_9",
- "EV_IND_10",
- "EV_IND_11",
- "EV_IND_AI8",
- "EV_IND_AI10",
- "EV_IND_AIL",
- "EV_IND_DI",
- "EV_PH_ACTIVATE_REQ",
- "EV_PH_DEACTIVATE_REQ",
- "EV_TIMER3",
-};
-
-static inline void D_L1L2(struct st5481_adapter *adapter, int pr, void *arg)
-{
- struct hisax_if *ifc = (struct hisax_if *) &adapter->hisax_d_if;
-
- ifc->l1l2(ifc, pr, arg);
-}
-
-static void
-l1_go_f3(struct FsmInst *fi, int event, void *arg)
-{
- struct st5481_adapter *adapter = fi->userdata;
-
- if (fi->state == ST_L1_F7)
- ph_disconnect(adapter);
-
- FsmChangeState(fi, ST_L1_F3);
- D_L1L2(adapter, PH_DEACTIVATE | INDICATION, NULL);
-}
-
-static void
-l1_go_f6(struct FsmInst *fi, int event, void *arg)
-{
- struct st5481_adapter *adapter = fi->userdata;
-
- if (fi->state == ST_L1_F7)
- ph_disconnect(adapter);
-
- FsmChangeState(fi, ST_L1_F6);
-}
-
-static void
-l1_go_f7(struct FsmInst *fi, int event, void *arg)
-{
- struct st5481_adapter *adapter = fi->userdata;
-
- FsmDelTimer(&adapter->timer, 0);
- ph_connect(adapter);
- FsmChangeState(fi, ST_L1_F7);
- D_L1L2(adapter, PH_ACTIVATE | INDICATION, NULL);
-}
-
-static void
-l1_go_f8(struct FsmInst *fi, int event, void *arg)
-{
- struct st5481_adapter *adapter = fi->userdata;
-
- if (fi->state == ST_L1_F7)
- ph_disconnect(adapter);
-
- FsmChangeState(fi, ST_L1_F8);
-}
-
-static void
-l1_timer3(struct FsmInst *fi, int event, void *arg)
-{
- struct st5481_adapter *adapter = fi->userdata;
-
- st5481_ph_command(adapter, ST5481_CMD_DR);
- FsmChangeState(fi, ST_L1_F3);
- D_L1L2(adapter, PH_DEACTIVATE | INDICATION, NULL);
-}
-
-static void
-l1_ignore(struct FsmInst *fi, int event, void *arg)
-{
-}
-
-static void
-l1_activate(struct FsmInst *fi, int event, void *arg)
-{
- struct st5481_adapter *adapter = fi->userdata;
-
- st5481_ph_command(adapter, ST5481_CMD_DR);
- st5481_ph_command(adapter, ST5481_CMD_PUP);
- FsmRestartTimer(&adapter->timer, TIMER3_VALUE, EV_TIMER3, NULL, 2);
- st5481_ph_command(adapter, ST5481_CMD_AR8);
- FsmChangeState(fi, ST_L1_F4);
-}
-
-static struct FsmNode L1FnList[] __initdata =
-{
- {ST_L1_F3, EV_IND_DP, l1_ignore},
- {ST_L1_F3, EV_IND_AP, l1_go_f6},
- {ST_L1_F3, EV_IND_AI8, l1_go_f7},
- {ST_L1_F3, EV_IND_AI10, l1_go_f7},
- {ST_L1_F3, EV_PH_ACTIVATE_REQ, l1_activate},
-
- {ST_L1_F4, EV_TIMER3, l1_timer3},
- {ST_L1_F4, EV_IND_DP, l1_go_f3},
- {ST_L1_F4, EV_IND_AP, l1_go_f6},
- {ST_L1_F4, EV_IND_AI8, l1_go_f7},
- {ST_L1_F4, EV_IND_AI10, l1_go_f7},
-
- {ST_L1_F6, EV_TIMER3, l1_timer3},
- {ST_L1_F6, EV_IND_DP, l1_go_f3},
- {ST_L1_F6, EV_IND_AP, l1_ignore},
- {ST_L1_F6, EV_IND_AI8, l1_go_f7},
- {ST_L1_F6, EV_IND_AI10, l1_go_f7},
- {ST_L1_F7, EV_IND_RSY, l1_go_f8},
-
- {ST_L1_F7, EV_IND_DP, l1_go_f3},
- {ST_L1_F7, EV_IND_AP, l1_go_f6},
- {ST_L1_F7, EV_IND_AI8, l1_ignore},
- {ST_L1_F7, EV_IND_AI10, l1_ignore},
- {ST_L1_F7, EV_IND_RSY, l1_go_f8},
-
- {ST_L1_F8, EV_TIMER3, l1_timer3},
- {ST_L1_F8, EV_IND_DP, l1_go_f3},
- {ST_L1_F8, EV_IND_AP, l1_go_f6},
- {ST_L1_F8, EV_IND_AI8, l1_go_f8},
- {ST_L1_F8, EV_IND_AI10, l1_go_f8},
- {ST_L1_F8, EV_IND_RSY, l1_ignore},
-};
-
-static __printf(2, 3)
- void l1m_debug(struct FsmInst *fi, char *fmt, ...)
-{
- va_list args;
- char buf[256];
-
- va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
- DBG(8, "%s", buf);
- va_end(args);
-}
-
-/* ======================================================================
- * D-Channel out
- */
-
-/*
- D OUT state machine:
- ====================
-
- Transmit short frame (< 16 bytes of encoded data):
-
- L1 FRAME D_OUT_STATE USB D CHANNEL
- -------- ----------- --- ---------
-
- FIXME
-
- -> [xx..xx] SHORT_INIT -> [7Exx..xxC1C27EFF]
- SHORT_WAIT_DEN <> OUT_D_COUNTER=16
-
- END_OF_SHORT <- DEN_EVENT -> 7Exx
- xxxx
- xxxx
- xxxx
- xxxx
- xxxx
- C1C1
- 7EFF
- WAIT_FOR_RESET_IDLE <- D_UNDERRUN <- (8ms)
- IDLE <> Reset pipe
-
-
-
- Transmit long frame (>= 16 bytes of encoded data):
-
- L1 FRAME D_OUT_STATE USB D CHANNEL
- -------- ----------- --- ---------
-
- -> [xx...xx] IDLE
- WAIT_FOR_STOP <> OUT_D_COUNTER=0
- WAIT_FOR_RESET <> Reset pipe
- STOP
- INIT_LONG_FRAME -> [7Exx..xx]
- WAIT_DEN <> OUT_D_COUNTER=16
- OUT_NORMAL <- DEN_EVENT -> 7Exx
- END_OF_FRAME_BUSY -> [xxxx] xxxx
- END_OF_FRAME_NOT_BUSY -> [xxxx] xxxx
- -> [xxxx] xxxx
- -> [C1C2] xxxx
- -> [7EFF] xxxx
- xxxx
- xxxx
- ....
- xxxx
- C1C2
- 7EFF
- <- D_UNDERRUN <- (> 8ms)
- WAIT_FOR_STOP <> OUT_D_COUNTER=0
- WAIT_FOR_RESET <> Reset pipe
- STOP
-
-*/
-
-static struct Fsm dout_fsm;
-
-static char *strDoutState[] =
-{
- "ST_DOUT_NONE",
-
- "ST_DOUT_SHORT_INIT",
- "ST_DOUT_SHORT_WAIT_DEN",
-
- "ST_DOUT_LONG_INIT",
- "ST_DOUT_LONG_WAIT_DEN",
- "ST_DOUT_NORMAL",
-
- "ST_DOUT_WAIT_FOR_UNDERRUN",
- "ST_DOUT_WAIT_FOR_NOT_BUSY",
- "ST_DOUT_WAIT_FOR_STOP",
- "ST_DOUT_WAIT_FOR_RESET",
-};
-
-static char *strDoutEvent[] =
-{
- "EV_DOUT_START_XMIT",
- "EV_DOUT_COMPLETE",
- "EV_DOUT_DEN",
- "EV_DOUT_RESETED",
- "EV_DOUT_STOPPED",
- "EV_DOUT_COLL",
- "EV_DOUT_UNDERRUN",
-};
-
-static __printf(2, 3)
- void dout_debug(struct FsmInst *fi, char *fmt, ...)
-{
- va_list args;
- char buf[256];
-
- va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
- DBG(0x2, "%s", buf);
- va_end(args);
-}
-
-static void dout_stop_event(void *context)
-{
- struct st5481_adapter *adapter = context;
-
- FsmEvent(&adapter->d_out.fsm, EV_DOUT_STOPPED, NULL);
-}
-
-/*
- * Start the transfer of a D channel frame.
- */
-static void usb_d_out(struct st5481_adapter *adapter, int buf_nr)
-{
- struct st5481_d_out *d_out = &adapter->d_out;
- struct urb *urb;
- unsigned int num_packets, packet_offset;
- int len, buf_size, bytes_sent;
- struct sk_buff *skb;
- struct usb_iso_packet_descriptor *desc;
-
- if (d_out->fsm.state != ST_DOUT_NORMAL)
- return;
-
- if (test_and_set_bit(buf_nr, &d_out->busy)) {
- DBG(2, "ep %d urb %d busy %#lx", EP_D_OUT, buf_nr, d_out->busy);
- return;
- }
- urb = d_out->urb[buf_nr];
-
- skb = d_out->tx_skb;
-
- buf_size = NUM_ISO_PACKETS_D * SIZE_ISO_PACKETS_D_OUT;
-
- if (skb) {
- len = isdnhdlc_encode(&d_out->hdlc_state,
- skb->data, skb->len, &bytes_sent,
- urb->transfer_buffer, buf_size);
- skb_pull(skb, bytes_sent);
- } else {
- // Send flags or idle
- len = isdnhdlc_encode(&d_out->hdlc_state,
- NULL, 0, &bytes_sent,
- urb->transfer_buffer, buf_size);
- }
-
- if (len < buf_size) {
- FsmChangeState(&d_out->fsm, ST_DOUT_WAIT_FOR_UNDERRUN);
- }
- if (skb && !skb->len) {
- d_out->tx_skb = NULL;
- D_L1L2(adapter, PH_DATA | CONFIRM, NULL);
- dev_kfree_skb_any(skb);
- }
-
- // Prepare the URB
- urb->transfer_buffer_length = len;
- num_packets = 0;
- packet_offset = 0;
- while (packet_offset < len) {
- desc = &urb->iso_frame_desc[num_packets];
- desc->offset = packet_offset;
- desc->length = SIZE_ISO_PACKETS_D_OUT;
- if (len - packet_offset < desc->length)
- desc->length = len - packet_offset;
- num_packets++;
- packet_offset += desc->length;
- }
- urb->number_of_packets = num_packets;
-
- // Prepare the URB
- urb->dev = adapter->usb_dev;
- // Need to transmit the next buffer 2ms after the DEN_EVENT
- urb->transfer_flags = 0;
- urb->start_frame = usb_get_current_frame_number(adapter->usb_dev) + 2;
-
- DBG_ISO_PACKET(0x20, urb);
-
- if (usb_submit_urb(urb, GFP_KERNEL) < 0) {
- // There is another URB queued up
- urb->transfer_flags = URB_ISO_ASAP;
- SUBMIT_URB(urb, GFP_KERNEL);
- }
-}
-
-static void fifo_reseted(void *context)
-{
- struct st5481_adapter *adapter = context;
-
- FsmEvent(&adapter->d_out.fsm, EV_DOUT_RESETED, NULL);
-}
-
-static void usb_d_out_complete(struct urb *urb)
-{
- struct st5481_adapter *adapter = urb->context;
- struct st5481_d_out *d_out = &adapter->d_out;
- long buf_nr;
-
- DBG(2, "");
-
- buf_nr = get_buf_nr(d_out->urb, urb);
- test_and_clear_bit(buf_nr, &d_out->busy);
-
- if (unlikely(urb->status < 0)) {
- switch (urb->status) {
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNRESET:
- DBG(1, "urb killed status %d", urb->status);
- break;
- default:
- WARNING("urb status %d", urb->status);
- if (d_out->busy == 0) {
- st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter);
- }
- break;
- }
- return; // Give up
- }
-
- FsmEvent(&adapter->d_out.fsm, EV_DOUT_COMPLETE, (void *) buf_nr);
-}
-
-/* ====================================================================== */
-
-static void dout_start_xmit(struct FsmInst *fsm, int event, void *arg)
-{
- // FIXME unify?
- struct st5481_adapter *adapter = fsm->userdata;
- struct st5481_d_out *d_out = &adapter->d_out;
- struct urb *urb;
- int len, bytes_sent;
- struct sk_buff *skb;
- int buf_nr = 0;
-
- skb = d_out->tx_skb;
-
- DBG(2, "len=%d", skb->len);
-
- isdnhdlc_out_init(&d_out->hdlc_state, HDLC_DCHANNEL | HDLC_BITREVERSE);
-
- if (test_and_set_bit(buf_nr, &d_out->busy)) {
- WARNING("ep %d urb %d busy %#lx", EP_D_OUT, buf_nr, d_out->busy);
- return;
- }
- urb = d_out->urb[buf_nr];
-
- DBG_SKB(0x10, skb);
- len = isdnhdlc_encode(&d_out->hdlc_state,
- skb->data, skb->len, &bytes_sent,
- urb->transfer_buffer, 16);
- skb_pull(skb, bytes_sent);
-
- if (len < 16)
- FsmChangeState(&d_out->fsm, ST_DOUT_SHORT_INIT);
- else
- FsmChangeState(&d_out->fsm, ST_DOUT_LONG_INIT);
-
- if (skb->len == 0) {
- d_out->tx_skb = NULL;
- D_L1L2(adapter, PH_DATA | CONFIRM, NULL);
- dev_kfree_skb_any(skb);
- }
-
-// Prepare the URB
- urb->transfer_buffer_length = len;
-
- urb->iso_frame_desc[0].offset = 0;
- urb->iso_frame_desc[0].length = len;
- urb->number_of_packets = 1;
-
- // Prepare the URB
- urb->dev = adapter->usb_dev;
- urb->transfer_flags = URB_ISO_ASAP;
-
- DBG_ISO_PACKET(0x20, urb);
- SUBMIT_URB(urb, GFP_KERNEL);
-}
-
-static void dout_short_fifo(struct FsmInst *fsm, int event, void *arg)
-{
- struct st5481_adapter *adapter = fsm->userdata;
- struct st5481_d_out *d_out = &adapter->d_out;
-
- FsmChangeState(&d_out->fsm, ST_DOUT_SHORT_WAIT_DEN);
- st5481_usb_device_ctrl_msg(adapter, OUT_D_COUNTER, 16, NULL, NULL);
-}
-
-static void dout_end_short_frame(struct FsmInst *fsm, int event, void *arg)
-{
- struct st5481_adapter *adapter = fsm->userdata;
- struct st5481_d_out *d_out = &adapter->d_out;
-
- FsmChangeState(&d_out->fsm, ST_DOUT_WAIT_FOR_UNDERRUN);
-}
-
-static void dout_long_enable_fifo(struct FsmInst *fsm, int event, void *arg)
-{
- struct st5481_adapter *adapter = fsm->userdata;
- struct st5481_d_out *d_out = &adapter->d_out;
-
- st5481_usb_device_ctrl_msg(adapter, OUT_D_COUNTER, 16, NULL, NULL);
- FsmChangeState(&d_out->fsm, ST_DOUT_LONG_WAIT_DEN);
-}
-
-static void dout_long_den(struct FsmInst *fsm, int event, void *arg)
-{
- struct st5481_adapter *adapter = fsm->userdata;
- struct st5481_d_out *d_out = &adapter->d_out;
-
- FsmChangeState(&d_out->fsm, ST_DOUT_NORMAL);
- usb_d_out(adapter, 0);
- usb_d_out(adapter, 1);
-}
-
-static void dout_reset(struct FsmInst *fsm, int event, void *arg)
-{
- struct st5481_adapter *adapter = fsm->userdata;
- struct st5481_d_out *d_out = &adapter->d_out;
-
- FsmChangeState(&d_out->fsm, ST_DOUT_WAIT_FOR_RESET);
- st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter);
-}
-
-static void dout_stop(struct FsmInst *fsm, int event, void *arg)
-{
- struct st5481_adapter *adapter = fsm->userdata;
- struct st5481_d_out *d_out = &adapter->d_out;
-
- FsmChangeState(&d_out->fsm, ST_DOUT_WAIT_FOR_STOP);
- st5481_usb_device_ctrl_msg(adapter, OUT_D_COUNTER, 0, dout_stop_event, adapter);
-}
-
-static void dout_underrun(struct FsmInst *fsm, int event, void *arg)
-{
- struct st5481_adapter *adapter = fsm->userdata;
- struct st5481_d_out *d_out = &adapter->d_out;
-
- if (test_bit(0, &d_out->busy) || test_bit(1, &d_out->busy)) {
- FsmChangeState(&d_out->fsm, ST_DOUT_WAIT_FOR_NOT_BUSY);
- } else {
- dout_stop(fsm, event, arg);
- }
-}
-
-static void dout_check_busy(struct FsmInst *fsm, int event, void *arg)
-{
- struct st5481_adapter *adapter = fsm->userdata;
- struct st5481_d_out *d_out = &adapter->d_out;
-
- if (!test_bit(0, &d_out->busy) && !test_bit(1, &d_out->busy))
- dout_stop(fsm, event, arg);
-}
-
-static void dout_reseted(struct FsmInst *fsm, int event, void *arg)
-{
- struct st5481_adapter *adapter = fsm->userdata;
- struct st5481_d_out *d_out = &adapter->d_out;
-
- FsmChangeState(&d_out->fsm, ST_DOUT_NONE);
- // FIXME locking
- if (d_out->tx_skb)
- FsmEvent(&d_out->fsm, EV_DOUT_START_XMIT, NULL);
-}
-
-static void dout_complete(struct FsmInst *fsm, int event, void *arg)
-{
- struct st5481_adapter *adapter = fsm->userdata;
- long buf_nr = (long) arg;
-
- usb_d_out(adapter, buf_nr);
-}
-
-static void dout_ignore(struct FsmInst *fsm, int event, void *arg)
-{
-}
-
-static struct FsmNode DoutFnList[] __initdata =
-{
- {ST_DOUT_NONE, EV_DOUT_START_XMIT, dout_start_xmit},
-
- {ST_DOUT_SHORT_INIT, EV_DOUT_COMPLETE, dout_short_fifo},
-
- {ST_DOUT_SHORT_WAIT_DEN, EV_DOUT_DEN, dout_end_short_frame},
- {ST_DOUT_SHORT_WAIT_DEN, EV_DOUT_UNDERRUN, dout_underrun},
-
- {ST_DOUT_LONG_INIT, EV_DOUT_COMPLETE, dout_long_enable_fifo},
-
- {ST_DOUT_LONG_WAIT_DEN, EV_DOUT_DEN, dout_long_den},
- {ST_DOUT_LONG_WAIT_DEN, EV_DOUT_UNDERRUN, dout_underrun},
-
- {ST_DOUT_NORMAL, EV_DOUT_UNDERRUN, dout_underrun},
- {ST_DOUT_NORMAL, EV_DOUT_COMPLETE, dout_complete},
-
- {ST_DOUT_WAIT_FOR_UNDERRUN, EV_DOUT_UNDERRUN, dout_underrun},
- {ST_DOUT_WAIT_FOR_UNDERRUN, EV_DOUT_COMPLETE, dout_ignore},
-
- {ST_DOUT_WAIT_FOR_NOT_BUSY, EV_DOUT_COMPLETE, dout_check_busy},
-
- {ST_DOUT_WAIT_FOR_STOP, EV_DOUT_STOPPED, dout_reset},
-
- {ST_DOUT_WAIT_FOR_RESET, EV_DOUT_RESETED, dout_reseted},
-};
-
-void st5481_d_l2l1(struct hisax_if *hisax_d_if, int pr, void *arg)
-{
- struct st5481_adapter *adapter = hisax_d_if->priv;
- struct sk_buff *skb = arg;
-
- switch (pr) {
- case PH_ACTIVATE | REQUEST:
- FsmEvent(&adapter->l1m, EV_PH_ACTIVATE_REQ, NULL);
- break;
- case PH_DEACTIVATE | REQUEST:
- FsmEvent(&adapter->l1m, EV_PH_DEACTIVATE_REQ, NULL);
- break;
- case PH_DATA | REQUEST:
- DBG(2, "PH_DATA REQUEST len %d", skb->len);
- BUG_ON(adapter->d_out.tx_skb);
- adapter->d_out.tx_skb = skb;
- FsmEvent(&adapter->d_out.fsm, EV_DOUT_START_XMIT, NULL);
- break;
- default:
- WARNING("pr %#x\n", pr);
- break;
- }
-}
-
-/* ======================================================================
- */
-
-/*
- * Start receiving on the D channel since entered state F7.
- */
-static void ph_connect(struct st5481_adapter *adapter)
-{
- struct st5481_d_out *d_out = &adapter->d_out;
- struct st5481_in *d_in = &adapter->d_in;
-
- DBG(8, "");
-
- FsmChangeState(&d_out->fsm, ST_DOUT_NONE);
-
- // st5481_usb_device_ctrl_msg(adapter, FFMSK_D, OUT_UNDERRUN, NULL, NULL);
- st5481_usb_device_ctrl_msg(adapter, FFMSK_D, 0xfc, NULL, NULL);
- st5481_in_mode(d_in, L1_MODE_HDLC);
-
-#ifdef LOOPBACK
- // Turn loopback on (data sent on B and D looped back)
- st5481_usb_device_ctrl_msg(cs, LBB, 0x04, NULL, NULL);
-#endif
-
- st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, NULL, NULL);
-
- // Turn on the green LED to tell that we are in state F7
- adapter->leds |= GREEN_LED;
- st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, adapter->leds, NULL, NULL);
-}
-
-/*
- * Stop receiving on the D channel since not in state F7.
- */
-static void ph_disconnect(struct st5481_adapter *adapter)
-{
- DBG(8, "");
-
- st5481_in_mode(&adapter->d_in, L1_MODE_NULL);
-
- // Turn off the green LED to tell that we left state F7
- adapter->leds &= ~GREEN_LED;
- st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, adapter->leds, NULL, NULL);
-}
-
-static int st5481_setup_d_out(struct st5481_adapter *adapter)
-{
- struct usb_device *dev = adapter->usb_dev;
- struct usb_interface *intf;
- struct usb_host_interface *altsetting = NULL;
- struct usb_host_endpoint *endpoint;
- struct st5481_d_out *d_out = &adapter->d_out;
-
- DBG(2, "");
-
- intf = usb_ifnum_to_if(dev, 0);
- if (intf)
- altsetting = usb_altnum_to_altsetting(intf, 3);
- if (!altsetting)
- return -ENXIO;
-
- // Allocate URBs and buffers for the D channel out
- endpoint = &altsetting->endpoint[EP_D_OUT-1];
-
- DBG(2, "endpoint address=%02x,packet size=%d",
- endpoint->desc.bEndpointAddress, le16_to_cpu(endpoint->desc.wMaxPacketSize));
-
- return st5481_setup_isocpipes(d_out->urb, dev,
- usb_sndisocpipe(dev, endpoint->desc.bEndpointAddress),
- NUM_ISO_PACKETS_D, SIZE_ISO_PACKETS_D_OUT,
- NUM_ISO_PACKETS_D * SIZE_ISO_PACKETS_D_OUT,
- usb_d_out_complete, adapter);
-}
-
-static void st5481_release_d_out(struct st5481_adapter *adapter)
-{
- struct st5481_d_out *d_out = &adapter->d_out;
-
- DBG(2, "");
-
- st5481_release_isocpipes(d_out->urb);
-}
-
-int st5481_setup_d(struct st5481_adapter *adapter)
-{
- int retval;
-
- DBG(2, "");
-
- retval = st5481_setup_d_out(adapter);
- if (retval)
- goto err;
- adapter->d_in.bufsize = MAX_DFRAME_LEN_L1;
- adapter->d_in.num_packets = NUM_ISO_PACKETS_D;
- adapter->d_in.packet_size = SIZE_ISO_PACKETS_D_IN;
- adapter->d_in.ep = EP_D_IN | USB_DIR_IN;
- adapter->d_in.counter = IN_D_COUNTER;
- adapter->d_in.adapter = adapter;
- adapter->d_in.hisax_if = &adapter->hisax_d_if.ifc;
- retval = st5481_setup_in(&adapter->d_in);
- if (retval)
- goto err_d_out;
-
- adapter->l1m.fsm = &l1fsm;
- adapter->l1m.state = ST_L1_F3;
- adapter->l1m.debug = st5481_debug & 0x100;
- adapter->l1m.userdata = adapter;
- adapter->l1m.printdebug = l1m_debug;
- FsmInitTimer(&adapter->l1m, &adapter->timer);
-
- adapter->d_out.fsm.fsm = &dout_fsm;
- adapter->d_out.fsm.state = ST_DOUT_NONE;
- adapter->d_out.fsm.debug = st5481_debug & 0x100;
- adapter->d_out.fsm.userdata = adapter;
- adapter->d_out.fsm.printdebug = dout_debug;
-
- return 0;
-
-err_d_out:
- st5481_release_d_out(adapter);
-err:
- return retval;
-}
-
-void st5481_release_d(struct st5481_adapter *adapter)
-{
- DBG(2, "");
-
- st5481_release_in(&adapter->d_in);
- st5481_release_d_out(adapter);
-}
-
-/* ======================================================================
- * init / exit
- */
-
-int __init st5481_d_init(void)
-{
- int retval;
-
- l1fsm.state_count = L1_STATE_COUNT;
- l1fsm.event_count = L1_EVENT_COUNT;
- l1fsm.strEvent = strL1Event;
- l1fsm.strState = strL1State;
- retval = FsmNew(&l1fsm, L1FnList, ARRAY_SIZE(L1FnList));
- if (retval)
- goto err;
-
- dout_fsm.state_count = DOUT_STATE_COUNT;
- dout_fsm.event_count = DOUT_EVENT_COUNT;
- dout_fsm.strEvent = strDoutEvent;
- dout_fsm.strState = strDoutState;
- retval = FsmNew(&dout_fsm, DoutFnList, ARRAY_SIZE(DoutFnList));
- if (retval)
- goto err_l1;
-
- return 0;
-
-err_l1:
- FsmFree(&l1fsm);
-err:
- return retval;
-}
-
-// can't be __exit
-void st5481_d_exit(void)
-{
- FsmFree(&l1fsm);
- FsmFree(&dout_fsm);
-}
diff --git a/drivers/isdn/hisax/st5481_init.c b/drivers/isdn/hisax/st5481_init.c
deleted file mode 100644
index 54ef9e4f8cbc..000000000000
--- a/drivers/isdn/hisax/st5481_init.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Driver for ST5481 USB ISDN modem
- *
- * Author Frode Isaksen
- * Copyright 2001 by Frode Isaksen <fisaksen@bewan.com>
- * 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/*
- * TODO:
- *
- * b layer1 delay?
- * hotplug / unregister issues
- * mod_inc/dec_use_count
- * unify parts of d/b channel usb handling
- * file header
- * avoid copy to isoc buffer?
- * improve usb delay?
- * merge l1 state machines?
- * clean up debug
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include "st5481.h"
-
-MODULE_DESCRIPTION("ISDN4Linux: driver for ST5481 USB ISDN adapter");
-MODULE_AUTHOR("Frode Isaksen");
-MODULE_LICENSE("GPL");
-
-static int protocol = 2; /* EURO-ISDN Default */
-module_param(protocol, int, 0);
-
-static int number_of_leds = 2; /* 2 LEDs on the adpater default */
-module_param(number_of_leds, int, 0);
-
-#ifdef CONFIG_HISAX_DEBUG
-static int debug = 0;
-module_param(debug, int, 0);
-#endif
-int st5481_debug;
-
-/* ======================================================================
- * registration/deregistration with the USB layer
- */
-
-/*
- * This function will be called when the adapter is plugged
- * into the USB bus.
- */
-static int probe_st5481(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *dev = interface_to_usbdev(intf);
- struct st5481_adapter *adapter;
- struct hisax_b_if *b_if[2];
- int retval, i;
-
- printk(KERN_INFO "st541: found adapter VendorId %04x, ProductId %04x, LEDs %d\n",
- le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct),
- number_of_leds);
-
- adapter = kzalloc(sizeof(struct st5481_adapter), GFP_KERNEL);
- if (!adapter)
- return -ENOMEM;
-
- adapter->number_of_leds = number_of_leds;
- adapter->usb_dev = dev;
-
- adapter->hisax_d_if.owner = THIS_MODULE;
- adapter->hisax_d_if.ifc.priv = adapter;
- adapter->hisax_d_if.ifc.l2l1 = st5481_d_l2l1;
-
- for (i = 0; i < 2; i++) {
- adapter->bcs[i].adapter = adapter;
- adapter->bcs[i].channel = i;
- adapter->bcs[i].b_if.ifc.priv = &adapter->bcs[i];
- adapter->bcs[i].b_if.ifc.l2l1 = st5481_b_l2l1;
- }
-
- retval = st5481_setup_usb(adapter);
- if (retval < 0)
- goto err;
-
- retval = st5481_setup_d(adapter);
- if (retval < 0)
- goto err_usb;
-
- retval = st5481_setup_b(&adapter->bcs[0]);
- if (retval < 0)
- goto err_d;
-
- retval = st5481_setup_b(&adapter->bcs[1]);
- if (retval < 0)
- goto err_b;
-
- for (i = 0; i < 2; i++)
- b_if[i] = &adapter->bcs[i].b_if;
-
- if (hisax_register(&adapter->hisax_d_if, b_if, "st5481_usb",
- protocol) != 0)
- goto err_b1;
-
- st5481_start(adapter);
-
- usb_set_intfdata(intf, adapter);
- return 0;
-
-err_b1:
- st5481_release_b(&adapter->bcs[1]);
-err_b:
- st5481_release_b(&adapter->bcs[0]);
-err_d:
- st5481_release_d(adapter);
-err_usb:
- st5481_release_usb(adapter);
-err:
- kfree(adapter);
- return -EIO;
-}
-
-/*
- * This function will be called when the adapter is removed
- * from the USB bus.
- */
-static void disconnect_st5481(struct usb_interface *intf)
-{
- struct st5481_adapter *adapter = usb_get_intfdata(intf);
-
- DBG(1, "");
-
- usb_set_intfdata(intf, NULL);
- if (!adapter)
- return;
-
- st5481_stop(adapter);
- st5481_release_b(&adapter->bcs[1]);
- st5481_release_b(&adapter->bcs[0]);
- st5481_release_d(adapter);
- // we would actually better wait for completion of outstanding urbs
- mdelay(2);
- st5481_release_usb(adapter);
-
- hisax_unregister(&adapter->hisax_d_if);
-
- kfree(adapter);
-}
-
-/*
- * The last 4 bits in the Product Id is set with 4 pins on the chip.
- */
-static struct usb_device_id st5481_ids[] = {
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x0) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x1) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x2) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x3) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x4) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x5) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x6) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x7) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x8) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0x9) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xA) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xB) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xC) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xD) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xE) },
- { USB_DEVICE(ST_VENDOR_ID, ST5481_PRODUCT_ID + 0xF) },
- { }
-};
-MODULE_DEVICE_TABLE(usb, st5481_ids);
-
-static struct usb_driver st5481_usb_driver = {
- .name = "st5481_usb",
- .probe = probe_st5481,
- .disconnect = disconnect_st5481,
- .id_table = st5481_ids,
- .disable_hub_initiated_lpm = 1,
-};
-
-static int __init st5481_usb_init(void)
-{
- int retval;
-
-#ifdef CONFIG_HISAX_DEBUG
- st5481_debug = debug;
-#endif
-
- printk(KERN_INFO "hisax_st5481: ST5481 USB ISDN driver $Revision: 2.4.2.3 $\n");
-
- retval = st5481_d_init();
- if (retval < 0)
- goto out;
-
- retval = usb_register(&st5481_usb_driver);
- if (retval < 0)
- goto out_d_exit;
-
- return 0;
-
-out_d_exit:
- st5481_d_exit();
-out:
- return retval;
-}
-
-static void __exit st5481_usb_exit(void)
-{
- usb_deregister(&st5481_usb_driver);
- st5481_d_exit();
-}
-
-module_init(st5481_usb_init);
-module_exit(st5481_usb_exit);
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
deleted file mode 100644
index f207fda691c7..000000000000
--- a/drivers/isdn/hisax/st5481_usb.c
+++ /dev/null
@@ -1,659 +0,0 @@
-/*
- * Driver for ST5481 USB ISDN modem
- *
- * Author Frode Isaksen
- * Copyright 2001 by Frode Isaksen <fisaksen@bewan.com>
- * 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include "st5481.h"
-
-static int st5481_isoc_flatten(struct urb *urb);
-
-/* ======================================================================
- * control pipe
- */
-
-/*
- * Send the next endpoint 0 request stored in the FIFO.
- * Called either by the completion or by usb_ctrl_msg.
- */
-static void usb_next_ctrl_msg(struct urb *urb,
- struct st5481_adapter *adapter)
-{
- struct st5481_ctrl *ctrl = &adapter->ctrl;
- int r_index;
-
- if (test_and_set_bit(0, &ctrl->busy)) {
- return;
- }
-
- if ((r_index = fifo_remove(&ctrl->msg_fifo.f)) < 0) {
- test_and_clear_bit(0, &ctrl->busy);
- return;
- }
- urb->setup_packet =
- (unsigned char *)&ctrl->msg_fifo.data[r_index];
-
- DBG(1, "request=0x%02x,value=0x%04x,index=%x",
- ((struct ctrl_msg *)urb->setup_packet)->dr.bRequest,
- ((struct ctrl_msg *)urb->setup_packet)->dr.wValue,
- ((struct ctrl_msg *)urb->setup_packet)->dr.wIndex);
-
- // Prepare the URB
- urb->dev = adapter->usb_dev;
-
- SUBMIT_URB(urb, GFP_ATOMIC);
-}
-
-/*
- * Asynchronous endpoint 0 request (async version of usb_control_msg).
- * The request will be queued up in a FIFO if the endpoint is busy.
- */
-static void usb_ctrl_msg(struct st5481_adapter *adapter,
- u8 request, u8 requesttype, u16 value, u16 index,
- ctrl_complete_t complete, void *context)
-{
- struct st5481_ctrl *ctrl = &adapter->ctrl;
- int w_index;
- struct ctrl_msg *ctrl_msg;
-
- if ((w_index = fifo_add(&ctrl->msg_fifo.f)) < 0) {
- WARNING("control msg FIFO full");
- return;
- }
- ctrl_msg = &ctrl->msg_fifo.data[w_index];
-
- ctrl_msg->dr.bRequestType = requesttype;
- ctrl_msg->dr.bRequest = request;
- ctrl_msg->dr.wValue = cpu_to_le16p(&value);
- ctrl_msg->dr.wIndex = cpu_to_le16p(&index);
- ctrl_msg->dr.wLength = 0;
- ctrl_msg->complete = complete;
- ctrl_msg->context = context;
-
- usb_next_ctrl_msg(ctrl->urb, adapter);
-}
-
-/*
- * Asynchronous endpoint 0 device request.
- */
-void st5481_usb_device_ctrl_msg(struct st5481_adapter *adapter,
- u8 request, u16 value,
- ctrl_complete_t complete, void *context)
-{
- usb_ctrl_msg(adapter, request,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, 0, complete, context);
-}
-
-/*
- * Asynchronous pipe reset (async version of usb_clear_halt).
- */
-void st5481_usb_pipe_reset(struct st5481_adapter *adapter,
- u_char pipe,
- ctrl_complete_t complete, void *context)
-{
- DBG(1, "pipe=%02x", pipe);
-
- usb_ctrl_msg(adapter,
- USB_REQ_CLEAR_FEATURE, USB_DIR_OUT | USB_RECIP_ENDPOINT,
- 0, pipe, complete, context);
-}
-
-
-/*
- Physical level functions
-*/
-
-void st5481_ph_command(struct st5481_adapter *adapter, unsigned int command)
-{
- DBG(8, "command=%s", ST5481_CMD_string(command));
-
- st5481_usb_device_ctrl_msg(adapter, TXCI, command, NULL, NULL);
-}
-
-/*
- * The request on endpoint 0 has completed.
- * Call the user provided completion routine and try
- * to send the next request.
- */
-static void usb_ctrl_complete(struct urb *urb)
-{
- struct st5481_adapter *adapter = urb->context;
- struct st5481_ctrl *ctrl = &adapter->ctrl;
- struct ctrl_msg *ctrl_msg;
-
- if (unlikely(urb->status < 0)) {
- switch (urb->status) {
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNRESET:
- DBG(1, "urb killed status %d", urb->status);
- return; // Give up
- default:
- WARNING("urb status %d", urb->status);
- break;
- }
- }
-
- ctrl_msg = (struct ctrl_msg *)urb->setup_packet;
-
- if (ctrl_msg->dr.bRequest == USB_REQ_CLEAR_FEATURE) {
- /* Special case handling for pipe reset */
- le16_to_cpus(&ctrl_msg->dr.wIndex);
- usb_reset_endpoint(adapter->usb_dev, ctrl_msg->dr.wIndex);
- }
-
- if (ctrl_msg->complete)
- ctrl_msg->complete(ctrl_msg->context);
-
- clear_bit(0, &ctrl->busy);
-
- // Try to send next control message
- usb_next_ctrl_msg(urb, adapter);
- return;
-}
-
-/* ======================================================================
- * interrupt pipe
- */
-
-/*
- * The interrupt endpoint will be called when any
- * of the 6 registers changes state (depending on masks).
- * Decode the register values and schedule a private event.
- * Called at interrupt.
- */
-static void usb_int_complete(struct urb *urb)
-{
- u8 *data = urb->transfer_buffer;
- u8 irqbyte;
- struct st5481_adapter *adapter = urb->context;
- int j;
- int status;
-
- switch (urb->status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- DBG(2, "urb shutting down with status: %d", urb->status);
- return;
- default:
- WARNING("nonzero urb status received: %d", urb->status);
- goto exit;
- }
-
-
- DBG_PACKET(2, data, INT_PKT_SIZE);
-
- if (urb->actual_length == 0) {
- goto exit;
- }
-
- irqbyte = data[MPINT];
- if (irqbyte & DEN_INT)
- FsmEvent(&adapter->d_out.fsm, EV_DOUT_DEN, NULL);
-
- if (irqbyte & DCOLL_INT)
- FsmEvent(&adapter->d_out.fsm, EV_DOUT_COLL, NULL);
-
- irqbyte = data[FFINT_D];
- if (irqbyte & OUT_UNDERRUN)
- FsmEvent(&adapter->d_out.fsm, EV_DOUT_UNDERRUN, NULL);
-
- if (irqbyte & OUT_DOWN)
- ;// printk("OUT_DOWN\n");
-
- irqbyte = data[MPINT];
- if (irqbyte & RXCI_INT)
- FsmEvent(&adapter->l1m, data[CCIST] & 0x0f, NULL);
-
- for (j = 0; j < 2; j++)
- adapter->bcs[j].b_out.flow_event |= data[FFINT_B1 + j];
-
- urb->actual_length = 0;
-
-exit:
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status)
- WARNING("usb_submit_urb failed with result %d", status);
-}
-
-/* ======================================================================
- * initialization
- */
-
-int st5481_setup_usb(struct st5481_adapter *adapter)
-{
- struct usb_device *dev = adapter->usb_dev;
- struct st5481_ctrl *ctrl = &adapter->ctrl;
- struct st5481_intr *intr = &adapter->intr;
- struct usb_interface *intf;
- struct usb_host_interface *altsetting = NULL;
- struct usb_host_endpoint *endpoint;
- int status;
- struct urb *urb;
- u8 *buf;
-
- DBG(2, "");
-
- if ((status = usb_reset_configuration(dev)) < 0) {
- WARNING("reset_configuration failed,status=%d", status);
- return status;
- }
-
- intf = usb_ifnum_to_if(dev, 0);
- if (intf)
- altsetting = usb_altnum_to_altsetting(intf, 3);
- if (!altsetting)
- return -ENXIO;
-
- // Check if the config is sane
- if (altsetting->desc.bNumEndpoints != 7) {
- WARNING("expecting 7 got %d endpoints!", altsetting->desc.bNumEndpoints);
- return -EINVAL;
- }
-
- // The descriptor is wrong for some early samples of the ST5481 chip
- altsetting->endpoint[3].desc.wMaxPacketSize = cpu_to_le16(32);
- altsetting->endpoint[4].desc.wMaxPacketSize = cpu_to_le16(32);
-
- // Use alternative setting 3 on interface 0 to have 2B+D
- if ((status = usb_set_interface(dev, 0, 3)) < 0) {
- WARNING("usb_set_interface failed,status=%d", status);
- return status;
- }
-
- // Allocate URB for control endpoint
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- return -ENOMEM;
- }
- ctrl->urb = urb;
-
- // Fill the control URB
- usb_fill_control_urb(urb, dev,
- usb_sndctrlpipe(dev, 0),
- NULL, NULL, 0, usb_ctrl_complete, adapter);
-
-
- fifo_init(&ctrl->msg_fifo.f, ARRAY_SIZE(ctrl->msg_fifo.data));
-
- // Allocate URBs and buffers for interrupt endpoint
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- goto err1;
- }
- intr->urb = urb;
-
- buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL);
- if (!buf) {
- goto err2;
- }
-
- endpoint = &altsetting->endpoint[EP_INT-1];
-
- // Fill the interrupt URB
- usb_fill_int_urb(urb, dev,
- usb_rcvintpipe(dev, endpoint->desc.bEndpointAddress),
- buf, INT_PKT_SIZE,
- usb_int_complete, adapter,
- endpoint->desc.bInterval);
-
- return 0;
-err2:
- usb_free_urb(intr->urb);
- intr->urb = NULL;
-err1:
- usb_free_urb(ctrl->urb);
- ctrl->urb = NULL;
-
- return -ENOMEM;
-}
-
-/*
- * Release buffers and URBs for the interrupt and control
- * endpoint.
- */
-void st5481_release_usb(struct st5481_adapter *adapter)
-{
- struct st5481_intr *intr = &adapter->intr;
- struct st5481_ctrl *ctrl = &adapter->ctrl;
-
- DBG(1, "");
-
- // Stop and free Control and Interrupt URBs
- usb_kill_urb(ctrl->urb);
- kfree(ctrl->urb->transfer_buffer);
- usb_free_urb(ctrl->urb);
- ctrl->urb = NULL;
-
- usb_kill_urb(intr->urb);
- kfree(intr->urb->transfer_buffer);
- usb_free_urb(intr->urb);
- intr->urb = NULL;
-}
-
-/*
- * Initialize the adapter.
- */
-void st5481_start(struct st5481_adapter *adapter)
-{
- static const u8 init_cmd_table[] = {
- SET_DEFAULT, 0,
- STT, 0,
- SDA_MIN, 0x0d,
- SDA_MAX, 0x29,
- SDELAY_VALUE, 0x14,
- GPIO_DIR, 0x01,
- GPIO_OUT, RED_LED,
-// FFCTRL_OUT_D,4,
-// FFCTRH_OUT_D,12,
- FFCTRL_OUT_B1, 6,
- FFCTRH_OUT_B1, 20,
- FFCTRL_OUT_B2, 6,
- FFCTRH_OUT_B2, 20,
- MPMSK, RXCI_INT + DEN_INT + DCOLL_INT,
- 0
- };
- struct st5481_intr *intr = &adapter->intr;
- int i = 0;
- u8 request, value;
-
- DBG(8, "");
-
- adapter->leds = RED_LED;
-
- // Start receiving on the interrupt endpoint
- SUBMIT_URB(intr->urb, GFP_KERNEL);
-
- while ((request = init_cmd_table[i++])) {
- value = init_cmd_table[i++];
- st5481_usb_device_ctrl_msg(adapter, request, value, NULL, NULL);
- }
- st5481_ph_command(adapter, ST5481_CMD_PUP);
-}
-
-/*
- * Reset the adapter to default values.
- */
-void st5481_stop(struct st5481_adapter *adapter)
-{
- DBG(8, "");
-
- st5481_usb_device_ctrl_msg(adapter, SET_DEFAULT, 0, NULL, NULL);
-}
-
-/* ======================================================================
- * isochronous USB helpers
- */
-
-static void
-fill_isoc_urb(struct urb *urb, struct usb_device *dev,
- unsigned int pipe, void *buf, int num_packets,
- int packet_size, usb_complete_t complete,
- void *context)
-{
- int k;
-
- usb_fill_int_urb(urb, dev, pipe, buf, num_packets * packet_size,
- complete, context, 1);
-
- urb->number_of_packets = num_packets;
- urb->transfer_flags = URB_ISO_ASAP;
- for (k = 0; k < num_packets; k++) {
- urb->iso_frame_desc[k].offset = packet_size * k;
- urb->iso_frame_desc[k].length = packet_size;
- urb->iso_frame_desc[k].actual_length = 0;
- }
-}
-
-int
-st5481_setup_isocpipes(struct urb *urb[2], struct usb_device *dev,
- unsigned int pipe, int num_packets,
- int packet_size, int buf_size,
- usb_complete_t complete, void *context)
-{
- int j, retval;
- unsigned char *buf;
-
- for (j = 0; j < 2; j++) {
- retval = -ENOMEM;
- urb[j] = usb_alloc_urb(num_packets, GFP_KERNEL);
- if (!urb[j])
- goto err;
-
- // Allocate memory for 2000bytes/sec (16Kb/s)
- buf = kmalloc(buf_size, GFP_KERNEL);
- if (!buf)
- goto err;
-
- // Fill the isochronous URB
- fill_isoc_urb(urb[j], dev, pipe, buf,
- num_packets, packet_size, complete,
- context);
- }
- return 0;
-
-err:
- for (j = 0; j < 2; j++) {
- if (urb[j]) {
- kfree(urb[j]->transfer_buffer);
- urb[j]->transfer_buffer = NULL;
- usb_free_urb(urb[j]);
- urb[j] = NULL;
- }
- }
- return retval;
-}
-
-void st5481_release_isocpipes(struct urb *urb[2])
-{
- int j;
-
- for (j = 0; j < 2; j++) {
- usb_kill_urb(urb[j]);
- kfree(urb[j]->transfer_buffer);
- usb_free_urb(urb[j]);
- urb[j] = NULL;
- }
-}
-
-/*
- * Decode frames received on the B/D channel.
- * Note that this function will be called continuously
- * with 64Kbit/s / 16Kbit/s of data and hence it will be
- * called 50 times per second with 20 ISOC descriptors.
- * Called at interrupt.
- */
-static void usb_in_complete(struct urb *urb)
-{
- struct st5481_in *in = urb->context;
- unsigned char *ptr;
- struct sk_buff *skb;
- int len, count, status;
-
- if (unlikely(urb->status < 0)) {
- switch (urb->status) {
- case -ENOENT:
- case -ESHUTDOWN:
- case -ECONNRESET:
- DBG(1, "urb killed status %d", urb->status);
- return; // Give up
- default:
- WARNING("urb status %d", urb->status);
- break;
- }
- }
-
- DBG_ISO_PACKET(0x80, urb);
-
- len = st5481_isoc_flatten(urb);
- ptr = urb->transfer_buffer;
- while (len > 0) {
- if (in->mode == L1_MODE_TRANS) {
- memcpy(in->rcvbuf, ptr, len);
- status = len;
- len = 0;
- } else {
- status = isdnhdlc_decode(&in->hdlc_state, ptr, len, &count,
- in->rcvbuf, in->bufsize);
- ptr += count;
- len -= count;
- }
-
- if (status > 0) {
- // Good frame received
- DBG(4, "count=%d", status);
- DBG_PACKET(0x400, in->rcvbuf, status);
- if (!(skb = dev_alloc_skb(status))) {
- WARNING("receive out of memory\n");
- break;
- }
- skb_put_data(skb, in->rcvbuf, status);
- in->hisax_if->l1l2(in->hisax_if, PH_DATA | INDICATION, skb);
- } else if (status == -HDLC_CRC_ERROR) {
- INFO("CRC error");
- } else if (status == -HDLC_FRAMING_ERROR) {
- INFO("framing error");
- } else if (status == -HDLC_LENGTH_ERROR) {
- INFO("length error");
- }
- }
-
- // Prepare URB for next transfer
- urb->dev = in->adapter->usb_dev;
- urb->actual_length = 0;
-
- SUBMIT_URB(urb, GFP_ATOMIC);
-}
-
-int st5481_setup_in(struct st5481_in *in)
-{
- struct usb_device *dev = in->adapter->usb_dev;
- int retval;
-
- DBG(4, "");
-
- in->rcvbuf = kmalloc(in->bufsize, GFP_KERNEL);
- retval = -ENOMEM;
- if (!in->rcvbuf)
- goto err;
-
- retval = st5481_setup_isocpipes(in->urb, dev,
- usb_rcvisocpipe(dev, in->ep),
- in->num_packets, in->packet_size,
- in->num_packets * in->packet_size,
- usb_in_complete, in);
- if (retval)
- goto err_free;
- return 0;
-
-err_free:
- kfree(in->rcvbuf);
-err:
- return retval;
-}
-
-void st5481_release_in(struct st5481_in *in)
-{
- DBG(2, "");
-
- st5481_release_isocpipes(in->urb);
-}
-
-/*
- * Make the transfer_buffer contiguous by
- * copying from the iso descriptors if necessary.
- */
-static int st5481_isoc_flatten(struct urb *urb)
-{
- struct usb_iso_packet_descriptor *pipd, *pend;
- unsigned char *src, *dst;
- unsigned int len;
-
- if (urb->status < 0) {
- return urb->status;
- }
- for (pipd = &urb->iso_frame_desc[0],
- pend = &urb->iso_frame_desc[urb->number_of_packets],
- dst = urb->transfer_buffer;
- pipd < pend;
- pipd++) {
-
- if (pipd->status < 0) {
- return (pipd->status);
- }
-
- len = pipd->actual_length;
- pipd->actual_length = 0;
- src = urb->transfer_buffer + pipd->offset;
-
- if (src != dst) {
- // Need to copy since isoc buffers not full
- while (len--) {
- *dst++ = *src++;
- }
- } else {
- // No need to copy, just update destination buffer
- dst += len;
- }
- }
- // Return size of flattened buffer
- return (dst - (unsigned char *)urb->transfer_buffer);
-}
-
-static void st5481_start_rcv(void *context)
-{
- struct st5481_in *in = context;
- struct st5481_adapter *adapter = in->adapter;
-
- DBG(4, "");
-
- in->urb[0]->dev = adapter->usb_dev;
- SUBMIT_URB(in->urb[0], GFP_KERNEL);
-
- in->urb[1]->dev = adapter->usb_dev;
- SUBMIT_URB(in->urb[1], GFP_KERNEL);
-}
-
-void st5481_in_mode(struct st5481_in *in, int mode)
-{
- if (in->mode == mode)
- return;
-
- in->mode = mode;
-
- usb_unlink_urb(in->urb[0]);
- usb_unlink_urb(in->urb[1]);
-
- if (in->mode != L1_MODE_NULL) {
- if (in->mode != L1_MODE_TRANS) {
- u32 features = HDLC_BITREVERSE;
-
- if (in->mode == L1_MODE_HDLC_56K)
- features |= HDLC_56KBIT;
- isdnhdlc_rcv_init(&in->hdlc_state, features);
- }
- st5481_usb_pipe_reset(in->adapter, in->ep, NULL, NULL);
- st5481_usb_device_ctrl_msg(in->adapter, in->counter,
- in->packet_size,
- NULL, NULL);
- st5481_start_rcv(in);
- } else {
- st5481_usb_device_ctrl_msg(in->adapter, in->counter,
- 0, NULL, NULL);
- }
-}
diff --git a/drivers/isdn/hisax/tei.c b/drivers/isdn/hisax/tei.c
deleted file mode 100644
index 9195f9fd628f..000000000000
--- a/drivers/isdn/hisax/tei.c
+++ /dev/null
@@ -1,465 +0,0 @@
-/* $Id: tei.c,v 2.20.2.3 2004/01/13 14:31:26 keil Exp $
- *
- * Author Karsten Keil
- * based on the teles driver from Jan den Ouden
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * For changes and modifications please read
- * Documentation/isdn/HiSax.cert
- *
- * Thanks to Jan den Ouden
- * Fritz Elfert
- *
- */
-
-#include "hisax.h"
-#include "isdnl2.h"
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/random.h>
-
-const char *tei_revision = "$Revision: 2.20.2.3 $";
-
-#define ID_REQUEST 1
-#define ID_ASSIGNED 2
-#define ID_DENIED 3
-#define ID_CHK_REQ 4
-#define ID_CHK_RES 5
-#define ID_REMOVE 6
-#define ID_VERIFY 7
-
-#define TEI_ENTITY_ID 0xf
-
-static struct Fsm teifsm;
-
-void tei_handler(struct PStack *st, u_char pr, struct sk_buff *skb);
-
-enum {
- ST_TEI_NOP,
- ST_TEI_IDREQ,
- ST_TEI_IDVERIFY,
-};
-
-#define TEI_STATE_COUNT (ST_TEI_IDVERIFY + 1)
-
-static char *strTeiState[] =
-{
- "ST_TEI_NOP",
- "ST_TEI_IDREQ",
- "ST_TEI_IDVERIFY",
-};
-
-enum {
- EV_IDREQ,
- EV_ASSIGN,
- EV_DENIED,
- EV_CHKREQ,
- EV_REMOVE,
- EV_VERIFY,
- EV_T202,
-};
-
-#define TEI_EVENT_COUNT (EV_T202 + 1)
-
-static char *strTeiEvent[] =
-{
- "EV_IDREQ",
- "EV_ASSIGN",
- "EV_DENIED",
- "EV_CHKREQ",
- "EV_REMOVE",
- "EV_VERIFY",
- "EV_T202",
-};
-
-static unsigned int
-random_ri(void)
-{
- unsigned int x;
-
- get_random_bytes(&x, sizeof(x));
- return (x & 0xffff);
-}
-
-static struct PStack *
-findtei(struct PStack *st, int tei)
-{
- struct PStack *ptr = *(st->l1.stlistp);
-
- if (tei == 127)
- return (NULL);
-
- while (ptr)
- if (ptr->l2.tei == tei)
- return (ptr);
- else
- ptr = ptr->next;
- return (NULL);
-}
-
-static void
-put_tei_msg(struct PStack *st, u_char m_id, unsigned int ri, u_char tei)
-{
- struct sk_buff *skb;
- u_char *bp;
-
- if (!(skb = alloc_skb(8, GFP_ATOMIC))) {
- printk(KERN_WARNING "HiSax: No skb for TEI manager\n");
- return;
- }
- bp = skb_put(skb, 3);
- bp[0] = (TEI_SAPI << 2);
- bp[1] = (GROUP_TEI << 1) | 0x1;
- bp[2] = UI;
- bp = skb_put(skb, 5);
- bp[0] = TEI_ENTITY_ID;
- bp[1] = ri >> 8;
- bp[2] = ri & 0xff;
- bp[3] = m_id;
- bp[4] = (tei << 1) | 1;
- st->l2.l2l1(st, PH_DATA | REQUEST, skb);
-}
-
-static void
-tei_id_request(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if (st->l2.tei != -1) {
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "assign request for already assigned tei %d",
- st->l2.tei);
- return;
- }
- st->ma.ri = random_ri();
- if (st->ma.debug)
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "assign request ri %d", st->ma.ri);
- put_tei_msg(st, ID_REQUEST, st->ma.ri, 127);
- FsmChangeState(&st->ma.tei_m, ST_TEI_IDREQ);
- FsmAddTimer(&st->ma.t202, st->ma.T202, EV_T202, NULL, 1);
- st->ma.N202 = 3;
-}
-
-static void
-tei_id_assign(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *ost, *st = fi->userdata;
- struct sk_buff *skb = arg;
- struct IsdnCardState *cs;
- int ri, tei;
-
- ri = ((unsigned int) skb->data[1] << 8) + skb->data[2];
- tei = skb->data[4] >> 1;
- if (st->ma.debug)
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "identity assign ri %d tei %d", ri, tei);
- if ((ost = findtei(st, tei))) { /* same tei is in use */
- if (ri != ost->ma.ri) {
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "possible duplicate assignment tei %d", tei);
- ost->l2.l2tei(ost, MDL_ERROR | RESPONSE, NULL);
- }
- } else if (ri == st->ma.ri) {
- FsmDelTimer(&st->ma.t202, 1);
- FsmChangeState(&st->ma.tei_m, ST_TEI_NOP);
- st->l3.l3l2(st, MDL_ASSIGN | REQUEST, (void *) (long) tei);
- cs = (struct IsdnCardState *) st->l1.hardware;
- cs->cardmsg(cs, MDL_ASSIGN | REQUEST, NULL);
- }
-}
-
-static void
-tei_id_test_dup(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *ost, *st = fi->userdata;
- struct sk_buff *skb = arg;
- int tei, ri;
-
- ri = ((unsigned int) skb->data[1] << 8) + skb->data[2];
- tei = skb->data[4] >> 1;
- if (st->ma.debug)
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "foreign identity assign ri %d tei %d", ri, tei);
- if ((ost = findtei(st, tei))) { /* same tei is in use */
- if (ri != ost->ma.ri) { /* and it wasn't our request */
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "possible duplicate assignment tei %d", tei);
- FsmEvent(&ost->ma.tei_m, EV_VERIFY, NULL);
- }
- }
-}
-
-static void
-tei_id_denied(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
- int ri, tei;
-
- ri = ((unsigned int) skb->data[1] << 8) + skb->data[2];
- tei = skb->data[4] >> 1;
- if (st->ma.debug)
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "identity denied ri %d tei %d", ri, tei);
-}
-
-static void
-tei_id_chk_req(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
- int tei;
-
- tei = skb->data[4] >> 1;
- if (st->ma.debug)
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "identity check req tei %d", tei);
- if ((st->l2.tei != -1) && ((tei == GROUP_TEI) || (tei == st->l2.tei))) {
- FsmDelTimer(&st->ma.t202, 4);
- FsmChangeState(&st->ma.tei_m, ST_TEI_NOP);
- put_tei_msg(st, ID_CHK_RES, random_ri(), st->l2.tei);
- }
-}
-
-static void
-tei_id_remove(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct sk_buff *skb = arg;
- struct IsdnCardState *cs;
- int tei;
-
- tei = skb->data[4] >> 1;
- if (st->ma.debug)
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "identity remove tei %d", tei);
- if ((st->l2.tei != -1) && ((tei == GROUP_TEI) || (tei == st->l2.tei))) {
- FsmDelTimer(&st->ma.t202, 5);
- FsmChangeState(&st->ma.tei_m, ST_TEI_NOP);
- st->l3.l3l2(st, MDL_REMOVE | REQUEST, NULL);
- cs = (struct IsdnCardState *) st->l1.hardware;
- cs->cardmsg(cs, MDL_REMOVE | REQUEST, NULL);
- }
-}
-
-static void
-tei_id_verify(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
-
- if (st->ma.debug)
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "id verify request for tei %d", st->l2.tei);
- put_tei_msg(st, ID_VERIFY, 0, st->l2.tei);
- FsmChangeState(&st->ma.tei_m, ST_TEI_IDVERIFY);
- FsmAddTimer(&st->ma.t202, st->ma.T202, EV_T202, NULL, 2);
- st->ma.N202 = 2;
-}
-
-static void
-tei_id_req_tout(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct IsdnCardState *cs;
-
- if (--st->ma.N202) {
- st->ma.ri = random_ri();
- if (st->ma.debug)
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "assign req(%d) ri %d", 4 - st->ma.N202,
- st->ma.ri);
- put_tei_msg(st, ID_REQUEST, st->ma.ri, 127);
- FsmAddTimer(&st->ma.t202, st->ma.T202, EV_T202, NULL, 3);
- } else {
- st->ma.tei_m.printdebug(&st->ma.tei_m, "assign req failed");
- st->l3.l3l2(st, MDL_ERROR | RESPONSE, NULL);
- cs = (struct IsdnCardState *) st->l1.hardware;
- cs->cardmsg(cs, MDL_REMOVE | REQUEST, NULL);
- FsmChangeState(fi, ST_TEI_NOP);
- }
-}
-
-static void
-tei_id_ver_tout(struct FsmInst *fi, int event, void *arg)
-{
- struct PStack *st = fi->userdata;
- struct IsdnCardState *cs;
-
- if (--st->ma.N202) {
- if (st->ma.debug)
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "id verify req(%d) for tei %d",
- 3 - st->ma.N202, st->l2.tei);
- put_tei_msg(st, ID_VERIFY, 0, st->l2.tei);
- FsmAddTimer(&st->ma.t202, st->ma.T202, EV_T202, NULL, 4);
- } else {
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "verify req for tei %d failed", st->l2.tei);
- st->l3.l3l2(st, MDL_REMOVE | REQUEST, NULL);
- cs = (struct IsdnCardState *) st->l1.hardware;
- cs->cardmsg(cs, MDL_REMOVE | REQUEST, NULL);
- FsmChangeState(fi, ST_TEI_NOP);
- }
-}
-
-static void
-tei_l1l2(struct PStack *st, int pr, void *arg)
-{
- struct sk_buff *skb = arg;
- int mt;
-
- if (test_bit(FLG_FIXED_TEI, &st->l2.flag)) {
- dev_kfree_skb(skb);
- return;
- }
-
- if (pr == (PH_DATA | INDICATION)) {
- if (skb->len < 3) {
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "short mgr frame %ld/3", skb->len);
- } else if ((skb->data[0] != ((TEI_SAPI << 2) | 2)) ||
- (skb->data[1] != ((GROUP_TEI << 1) | 1))) {
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "wrong mgr sapi/tei %x/%x",
- skb->data[0], skb->data[1]);
- } else if ((skb->data[2] & 0xef) != UI) {
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "mgr frame is not ui %x", skb->data[2]);
- } else {
- skb_pull(skb, 3);
- if (skb->len < 5) {
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "short mgr frame %ld/5", skb->len);
- } else if (skb->data[0] != TEI_ENTITY_ID) {
- /* wrong management entity identifier, ignore */
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "tei handler wrong entity id %x",
- skb->data[0]);
- } else {
- mt = skb->data[3];
- if (mt == ID_ASSIGNED)
- FsmEvent(&st->ma.tei_m, EV_ASSIGN, skb);
- else if (mt == ID_DENIED)
- FsmEvent(&st->ma.tei_m, EV_DENIED, skb);
- else if (mt == ID_CHK_REQ)
- FsmEvent(&st->ma.tei_m, EV_CHKREQ, skb);
- else if (mt == ID_REMOVE)
- FsmEvent(&st->ma.tei_m, EV_REMOVE, skb);
- else {
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "tei handler wrong mt %x\n", mt);
- }
- }
- }
- } else {
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "tei handler wrong pr %x\n", pr);
- }
- dev_kfree_skb(skb);
-}
-
-static void
-tei_l2tei(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs;
-
- if (test_bit(FLG_FIXED_TEI, &st->l2.flag)) {
- if (pr == (MDL_ASSIGN | INDICATION)) {
- if (st->ma.debug)
- st->ma.tei_m.printdebug(&st->ma.tei_m,
- "fixed assign tei %d", st->l2.tei);
- st->l3.l3l2(st, MDL_ASSIGN | REQUEST, (void *) (long) st->l2.tei);
- cs = (struct IsdnCardState *) st->l1.hardware;
- cs->cardmsg(cs, MDL_ASSIGN | REQUEST, NULL);
- }
- return;
- }
- switch (pr) {
- case (MDL_ASSIGN | INDICATION):
- FsmEvent(&st->ma.tei_m, EV_IDREQ, arg);
- break;
- case (MDL_ERROR | REQUEST):
- FsmEvent(&st->ma.tei_m, EV_VERIFY, arg);
- break;
- default:
- break;
- }
-}
-
-static void
-tei_debug(struct FsmInst *fi, char *fmt, ...)
-{
- va_list args;
- struct PStack *st = fi->userdata;
-
- va_start(args, fmt);
- VHiSax_putstatus(st->l1.hardware, "tei ", fmt, args);
- va_end(args);
-}
-
-void
-setstack_tei(struct PStack *st)
-{
- st->l2.l2tei = tei_l2tei;
- st->ma.T202 = 2000; /* T202 2000 milliseconds */
- st->l1.l1tei = tei_l1l2;
- st->ma.debug = 1;
- st->ma.tei_m.fsm = &teifsm;
- st->ma.tei_m.state = ST_TEI_NOP;
- st->ma.tei_m.debug = 1;
- st->ma.tei_m.userdata = st;
- st->ma.tei_m.userint = 0;
- st->ma.tei_m.printdebug = tei_debug;
- FsmInitTimer(&st->ma.tei_m, &st->ma.t202);
-}
-
-void
-init_tei(struct IsdnCardState *cs, int protocol)
-{
-}
-
-void
-release_tei(struct IsdnCardState *cs)
-{
- struct PStack *st = cs->stlist;
-
- while (st) {
- FsmDelTimer(&st->ma.t202, 1);
- st = st->next;
- }
-}
-
-static struct FsmNode TeiFnList[] __initdata =
-{
- {ST_TEI_NOP, EV_IDREQ, tei_id_request},
- {ST_TEI_NOP, EV_ASSIGN, tei_id_test_dup},
- {ST_TEI_NOP, EV_VERIFY, tei_id_verify},
- {ST_TEI_NOP, EV_REMOVE, tei_id_remove},
- {ST_TEI_NOP, EV_CHKREQ, tei_id_chk_req},
- {ST_TEI_IDREQ, EV_T202, tei_id_req_tout},
- {ST_TEI_IDREQ, EV_ASSIGN, tei_id_assign},
- {ST_TEI_IDREQ, EV_DENIED, tei_id_denied},
- {ST_TEI_IDVERIFY, EV_T202, tei_id_ver_tout},
- {ST_TEI_IDVERIFY, EV_REMOVE, tei_id_remove},
- {ST_TEI_IDVERIFY, EV_CHKREQ, tei_id_chk_req},
-};
-
-int __init
-TeiNew(void)
-{
- teifsm.state_count = TEI_STATE_COUNT;
- teifsm.event_count = TEI_EVENT_COUNT;
- teifsm.strEvent = strTeiEvent;
- teifsm.strState = strTeiState;
- return FsmNew(&teifsm, TeiFnList, ARRAY_SIZE(TeiFnList));
-}
-
-void
-TeiFree(void)
-{
- FsmFree(&teifsm);
-}
diff --git a/drivers/isdn/hisax/teleint.c b/drivers/isdn/hisax/teleint.c
deleted file mode 100644
index 247aa33076b1..000000000000
--- a/drivers/isdn/hisax/teleint.c
+++ /dev/null
@@ -1,334 +0,0 @@
-/* $Id: teleint.c,v 1.16.2.5 2004/01/19 15:31:50 keil Exp $
- *
- * low level stuff for TeleInt isdn cards
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hfc_2bs0.h"
-#include "isdnl1.h"
-
-static const char *TeleInt_revision = "$Revision: 1.16.2.5 $";
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-static inline u_char
-readreg(unsigned int ale, unsigned int adr, u_char off)
-{
- register u_char ret;
- int max_delay = 2000;
-
- byteout(ale, off);
- ret = HFC_BUSY & bytein(ale);
- while (ret && --max_delay)
- ret = HFC_BUSY & bytein(ale);
- if (!max_delay) {
- printk(KERN_WARNING "TeleInt Busy not inactive\n");
- return (0);
- }
- ret = bytein(adr);
- return (ret);
-}
-
-static inline void
-readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- register u_char ret;
- register int max_delay = 20000;
- register int i;
-
- byteout(ale, off);
- for (i = 0; i < size; i++) {
- ret = HFC_BUSY & bytein(ale);
- while (ret && --max_delay)
- ret = HFC_BUSY & bytein(ale);
- if (!max_delay) {
- printk(KERN_WARNING "TeleInt Busy not inactive\n");
- return;
- }
- data[i] = bytein(adr);
- }
-}
-
-
-static inline void
-writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
-{
- register u_char ret;
- int max_delay = 2000;
-
- byteout(ale, off);
- ret = HFC_BUSY & bytein(ale);
- while (ret && --max_delay)
- ret = HFC_BUSY & bytein(ale);
- if (!max_delay) {
- printk(KERN_WARNING "TeleInt Busy not inactive\n");
- return;
- }
- byteout(adr, data);
-}
-
-static inline void
-writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size)
-{
- register u_char ret;
- register int max_delay = 20000;
- register int i;
-
- byteout(ale, off);
- for (i = 0; i < size; i++) {
- ret = HFC_BUSY & bytein(ale);
- while (ret && --max_delay)
- ret = HFC_BUSY & bytein(ale);
- if (!max_delay) {
- printk(KERN_WARNING "TeleInt Busy not inactive\n");
- return;
- }
- byteout(adr, data[i]);
- }
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- cs->hw.hfc.cip = offset;
- return (readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- cs->hw.hfc.cip = offset;
- writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- cs->hw.hfc.cip = 0;
- readfifo(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, 0, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- cs->hw.hfc.cip = 0;
- writefifo(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, 0, data, size);
-}
-
-static u_char
-ReadHFC(struct IsdnCardState *cs, int data, u_char reg)
-{
- register u_char ret;
-
- if (data) {
- cs->hw.hfc.cip = reg;
- byteout(cs->hw.hfc.addr | 1, reg);
- ret = bytein(cs->hw.hfc.addr);
- if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2))
- debugl1(cs, "hfc RD %02x %02x", reg, ret);
- } else
- ret = bytein(cs->hw.hfc.addr | 1);
- return (ret);
-}
-
-static void
-WriteHFC(struct IsdnCardState *cs, int data, u_char reg, u_char value)
-{
- byteout(cs->hw.hfc.addr | 1, reg);
- cs->hw.hfc.cip = reg;
- if (data)
- byteout(cs->hw.hfc.addr, value);
- if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2))
- debugl1(cs, "hfc W%c %02x %02x", data ? 'D' : 'C', reg, value);
-}
-
-static irqreturn_t
-TeleInt_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- val = readreg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_ISTA);
- if (val) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_MASK, 0xFF);
- writereg(cs->hw.hfc.addr | 1, cs->hw.hfc.addr, ISAC_MASK, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-TeleInt_Timer(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, hw.hfc.timer);
- int stat = 0;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->bcs[0].mode) {
- stat |= 1;
- main_irq_hfc(&cs->bcs[0]);
- }
- if (cs->bcs[1].mode) {
- stat |= 2;
- main_irq_hfc(&cs->bcs[1]);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- stat = HZ / 100;
- if (!stat)
- stat = 1;
- cs->hw.hfc.timer.expires = jiffies + stat;
- add_timer(&cs->hw.hfc.timer);
-}
-
-static void
-release_io_TeleInt(struct IsdnCardState *cs)
-{
- del_timer(&cs->hw.hfc.timer);
- releasehfc(cs);
- if (cs->hw.hfc.addr)
- release_region(cs->hw.hfc.addr, 2);
-}
-
-static void
-reset_TeleInt(struct IsdnCardState *cs)
-{
- printk(KERN_INFO "TeleInt: resetting card\n");
- cs->hw.hfc.cirm |= HFC_RESET;
- byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm); /* Reset On */
- mdelay(10);
- cs->hw.hfc.cirm &= ~HFC_RESET;
- byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm); /* Reset Off */
- mdelay(10);
-}
-
-static int
-TeleInt_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
- int delay;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_TeleInt(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_TeleInt(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- reset_TeleInt(cs);
- inithfc(cs);
- clear_pending_isac_ints(cs);
- initisac(cs);
- /* Reenable all IRQ */
- cs->writeisac(cs, ISAC_MASK, 0);
- cs->writeisac(cs, ISAC_CMDR, 0x41);
- spin_unlock_irqrestore(&cs->lock, flags);
- delay = HZ / 100;
- if (!delay)
- delay = 1;
- cs->hw.hfc.timer.expires = jiffies + delay;
- add_timer(&cs->hw.hfc.timer);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-int setup_TeleInt(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, TeleInt_revision);
- printk(KERN_INFO "HiSax: TeleInt driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_TELEINT)
- return (0);
-
- cs->hw.hfc.addr = card->para[1] & 0x3fe;
- cs->irq = card->para[0];
- cs->hw.hfc.cirm = HFC_CIRM;
- cs->hw.hfc.isac_spcr = 0x00;
- cs->hw.hfc.cip = 0;
- cs->hw.hfc.ctmt = HFC_CTMT | HFC_CLTIMER;
- cs->bcs[0].hw.hfc.send = NULL;
- cs->bcs[1].hw.hfc.send = NULL;
- cs->hw.hfc.fifosize = 7 * 1024 + 512;
- timer_setup(&cs->hw.hfc.timer, TeleInt_Timer, 0);
- if (!request_region(cs->hw.hfc.addr, 2, "TeleInt isdn")) {
- printk(KERN_WARNING
- "HiSax: TeleInt config port %x-%x already in use\n",
- cs->hw.hfc.addr,
- cs->hw.hfc.addr + 2);
- return (0);
- }
- /* HW IO = IO */
- byteout(cs->hw.hfc.addr, cs->hw.hfc.addr & 0xff);
- byteout(cs->hw.hfc.addr | 1, ((cs->hw.hfc.addr & 0x300) >> 8) | 0x54);
- switch (cs->irq) {
- case 3:
- cs->hw.hfc.cirm |= HFC_INTA;
- break;
- case 4:
- cs->hw.hfc.cirm |= HFC_INTB;
- break;
- case 5:
- cs->hw.hfc.cirm |= HFC_INTC;
- break;
- case 7:
- cs->hw.hfc.cirm |= HFC_INTD;
- break;
- case 10:
- cs->hw.hfc.cirm |= HFC_INTE;
- break;
- case 11:
- cs->hw.hfc.cirm |= HFC_INTF;
- break;
- default:
- printk(KERN_WARNING "TeleInt: wrong IRQ\n");
- release_io_TeleInt(cs);
- return (0);
- }
- byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.cirm);
- byteout(cs->hw.hfc.addr | 1, cs->hw.hfc.ctmt);
-
- printk(KERN_INFO "TeleInt: defined at 0x%x IRQ %d\n",
- cs->hw.hfc.addr, cs->irq);
-
- setup_isac(cs);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHFC;
- cs->BC_Write_Reg = &WriteHFC;
- cs->cardmsg = &TeleInt_card_msg;
- cs->irq_func = &TeleInt_interrupt;
- ISACVersion(cs, "TeleInt:");
- return (1);
-}
diff --git a/drivers/isdn/hisax/teles0.c b/drivers/isdn/hisax/teles0.c
deleted file mode 100644
index ce9eabdd2f6e..000000000000
--- a/drivers/isdn/hisax/teles0.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/* $Id: teles0.c,v 2.15.2.4 2004/01/13 23:48:39 keil Exp $
- *
- * low level stuff for Teles Memory IO isdn cards
- *
- * Author Karsten Keil
- * based on the teles driver from Jan den Ouden
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to Jan den Ouden
- * Fritz Elfert
- * Beat Doebeli
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isdnl1.h"
-#include "isac.h"
-#include "hscx.h"
-
-static const char *teles0_revision = "$Revision: 2.15.2.4 $";
-
-#define TELES_IOMEM_SIZE 0x400
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-static inline u_char
-readisac(void __iomem *adr, u_char off)
-{
- return readb(adr + ((off & 1) ? 0x2ff : 0x100) + off);
-}
-
-static inline void
-writeisac(void __iomem *adr, u_char off, u_char data)
-{
- writeb(data, adr + ((off & 1) ? 0x2ff : 0x100) + off); mb();
-}
-
-
-static inline u_char
-readhscx(void __iomem *adr, int hscx, u_char off)
-{
- return readb(adr + (hscx ? 0x1c0 : 0x180) +
- ((off & 1) ? 0x1ff : 0) + off);
-}
-
-static inline void
-writehscx(void __iomem *adr, int hscx, u_char off, u_char data)
-{
- writeb(data, adr + (hscx ? 0x1c0 : 0x180) +
- ((off & 1) ? 0x1ff : 0) + off); mb();
-}
-
-static inline void
-read_fifo_isac(void __iomem *adr, u_char *data, int size)
-{
- register int i;
- register u_char __iomem *ad = adr + 0x100;
- for (i = 0; i < size; i++)
- data[i] = readb(ad);
-}
-
-static inline void
-write_fifo_isac(void __iomem *adr, u_char *data, int size)
-{
- register int i;
- register u_char __iomem *ad = adr + 0x100;
- for (i = 0; i < size; i++) {
- writeb(data[i], ad); mb();
- }
-}
-
-static inline void
-read_fifo_hscx(void __iomem *adr, int hscx, u_char *data, int size)
-{
- register int i;
- register u_char __iomem *ad = adr + (hscx ? 0x1c0 : 0x180);
- for (i = 0; i < size; i++)
- data[i] = readb(ad);
-}
-
-static inline void
-write_fifo_hscx(void __iomem *adr, int hscx, u_char *data, int size)
-{
- int i;
- register u_char __iomem *ad = adr + (hscx ? 0x1c0 : 0x180);
- for (i = 0; i < size; i++) {
- writeb(data[i], ad); mb();
- }
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readisac(cs->hw.teles0.membase, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writeisac(cs->hw.teles0.membase, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- read_fifo_isac(cs->hw.teles0.membase, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- write_fifo_isac(cs->hw.teles0.membase, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readhscx(cs->hw.teles0.membase, hscx, offset));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writehscx(cs->hw.teles0.membase, hscx, offset, value);
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) readhscx(cs->hw.teles0.membase, nr, reg)
-#define WRITEHSCX(cs, nr, reg, data) writehscx(cs->hw.teles0.membase, nr, reg, data)
-#define READHSCXFIFO(cs, nr, ptr, cnt) read_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt)
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) write_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-teles0_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_long flags;
- int count = 0;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = readhscx(cs->hw.teles0.membase, 1, HSCX_ISTA);
-Start_HSCX:
- if (val)
- hscx_int_main(cs, val);
- val = readisac(cs->hw.teles0.membase, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- count++;
- val = readhscx(cs->hw.teles0.membase, 1, HSCX_ISTA);
- if (val && count < 5) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX IntStat after IntRoutine");
- goto Start_HSCX;
- }
- val = readisac(cs->hw.teles0.membase, ISAC_ISTA);
- if (val && count < 5) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0xFF);
- writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0xFF);
- writeisac(cs->hw.teles0.membase, ISAC_MASK, 0xFF);
- writeisac(cs->hw.teles0.membase, ISAC_MASK, 0x0);
- writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0x0);
- writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_teles0(struct IsdnCardState *cs)
-{
- if (cs->hw.teles0.cfg_reg)
- release_region(cs->hw.teles0.cfg_reg, 8);
- iounmap(cs->hw.teles0.membase);
- release_mem_region(cs->hw.teles0.phymem, TELES_IOMEM_SIZE);
-}
-
-static int
-reset_teles0(struct IsdnCardState *cs)
-{
- u_char cfval;
-
- if (cs->hw.teles0.cfg_reg) {
- switch (cs->irq) {
- case 2:
- case 9:
- cfval = 0x00;
- break;
- case 3:
- cfval = 0x02;
- break;
- case 4:
- cfval = 0x04;
- break;
- case 5:
- cfval = 0x06;
- break;
- case 10:
- cfval = 0x08;
- break;
- case 11:
- cfval = 0x0A;
- break;
- case 12:
- cfval = 0x0C;
- break;
- case 15:
- cfval = 0x0E;
- break;
- default:
- return (1);
- }
- cfval |= ((cs->hw.teles0.phymem >> 9) & 0xF0);
- byteout(cs->hw.teles0.cfg_reg + 4, cfval);
- HZDELAY(HZ / 10 + 1);
- byteout(cs->hw.teles0.cfg_reg + 4, cfval | 1);
- HZDELAY(HZ / 10 + 1);
- }
- writeb(0, cs->hw.teles0.membase + 0x80); mb();
- HZDELAY(HZ / 5 + 1);
- writeb(1, cs->hw.teles0.membase + 0x80); mb();
- HZDELAY(HZ / 5 + 1);
- return (0);
-}
-
-static int
-Teles_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_teles0(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_teles0(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- inithscxisac(cs, 3);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-int setup_teles0(struct IsdnCard *card)
-{
- u_char val;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, teles0_revision);
- printk(KERN_INFO "HiSax: Teles 8.0/16.0 driver Rev. %s\n", HiSax_getrev(tmp));
- if ((cs->typ != ISDN_CTYPE_16_0) && (cs->typ != ISDN_CTYPE_8_0))
- return (0);
-
- if (cs->typ == ISDN_CTYPE_16_0)
- cs->hw.teles0.cfg_reg = card->para[2];
- else /* 8.0 */
- cs->hw.teles0.cfg_reg = 0;
-
- if (card->para[1] < 0x10000) {
- card->para[1] <<= 4;
- printk(KERN_INFO
- "Teles0: membase configured DOSish, assuming 0x%lx\n",
- (unsigned long) card->para[1]);
- }
- cs->irq = card->para[0];
- if (cs->hw.teles0.cfg_reg) {
- if (!request_region(cs->hw.teles0.cfg_reg, 8, "teles cfg")) {
- printk(KERN_WARNING
- "HiSax: %s config port %x-%x already in use\n",
- CardType[card->typ],
- cs->hw.teles0.cfg_reg,
- cs->hw.teles0.cfg_reg + 8);
- return (0);
- }
- }
- if (cs->hw.teles0.cfg_reg) {
- if ((val = bytein(cs->hw.teles0.cfg_reg + 0)) != 0x51) {
- printk(KERN_WARNING "Teles0: 16.0 Byte at %x is %x\n",
- cs->hw.teles0.cfg_reg + 0, val);
- release_region(cs->hw.teles0.cfg_reg, 8);
- return (0);
- }
- if ((val = bytein(cs->hw.teles0.cfg_reg + 1)) != 0x93) {
- printk(KERN_WARNING "Teles0: 16.0 Byte at %x is %x\n",
- cs->hw.teles0.cfg_reg + 1, val);
- release_region(cs->hw.teles0.cfg_reg, 8);
- return (0);
- }
- val = bytein(cs->hw.teles0.cfg_reg + 2); /* 0x1e=without AB
- * 0x1f=with AB
- * 0x1c 16.3 ???
- */
- if (val != 0x1e && val != 0x1f) {
- printk(KERN_WARNING "Teles0: 16.0 Byte at %x is %x\n",
- cs->hw.teles0.cfg_reg + 2, val);
- release_region(cs->hw.teles0.cfg_reg, 8);
- return (0);
- }
- }
- /* 16.0 and 8.0 designed for IOM1 */
- test_and_set_bit(HW_IOM1, &cs->HW_Flags);
- cs->hw.teles0.phymem = card->para[1];
- if (!request_mem_region(cs->hw.teles0.phymem, TELES_IOMEM_SIZE, "teles iomem")) {
- printk(KERN_WARNING
- "HiSax: %s memory region %lx-%lx already in use\n",
- CardType[card->typ],
- cs->hw.teles0.phymem,
- cs->hw.teles0.phymem + TELES_IOMEM_SIZE);
- if (cs->hw.teles0.cfg_reg)
- release_region(cs->hw.teles0.cfg_reg, 8);
- return (0);
- }
- cs->hw.teles0.membase = ioremap(cs->hw.teles0.phymem, TELES_IOMEM_SIZE);
- printk(KERN_INFO
- "HiSax: %s config irq:%d mem:%p cfg:0x%X\n",
- CardType[cs->typ], cs->irq,
- cs->hw.teles0.membase, cs->hw.teles0.cfg_reg);
- if (reset_teles0(cs)) {
- printk(KERN_WARNING "Teles0: wrong IRQ\n");
- release_io_teles0(cs);
- return (0);
- }
- setup_isac(cs);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &Teles_card_msg;
- cs->irq_func = &teles0_interrupt;
- ISACVersion(cs, "Teles0:");
- if (HscxVersion(cs, "Teles0:")) {
- printk(KERN_WARNING
- "Teles0: wrong HSCX versions check IO/MEM addresses\n");
- release_io_teles0(cs);
- return (0);
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/teles3.c b/drivers/isdn/hisax/teles3.c
deleted file mode 100644
index 1eef693f04f0..000000000000
--- a/drivers/isdn/hisax/teles3.c
+++ /dev/null
@@ -1,498 +0,0 @@
-/* $Id: teles3.c,v 2.19.2.4 2004/01/13 23:48:39 keil Exp $
- *
- * low level stuff for Teles 16.3 & PNP isdn cards
- *
- * Author Karsten Keil
- * Copyright by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Thanks to Jan den Ouden
- * Fritz Elfert
- * Beat Doebeli
- *
- */
-#include <linux/init.h>
-#include <linux/isapnp.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-
-static const char *teles3_revision = "$Revision: 2.19.2.4 $";
-
-#define byteout(addr, val) outb(val, addr)
-#define bytein(addr) inb(addr)
-
-static inline u_char
-readreg(unsigned int adr, u_char off)
-{
- return (bytein(adr + off));
-}
-
-static inline void
-writereg(unsigned int adr, u_char off, u_char data)
-{
- byteout(adr + off, data);
-}
-
-
-static inline void
-read_fifo(unsigned int adr, u_char *data, int size)
-{
- insb(adr, data, size);
-}
-
-static void
-write_fifo(unsigned int adr, u_char *data, int size)
-{
- outsb(adr, data, size);
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readreg(cs->hw.teles3.isac, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writereg(cs->hw.teles3.isac, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- read_fifo(cs->hw.teles3.isacfifo, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- write_fifo(cs->hw.teles3.isacfifo, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readreg(cs->hw.teles3.hscx[hscx], offset));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writereg(cs->hw.teles3.hscx[hscx], offset, value);
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) readreg(cs->hw.teles3.hscx[nr], reg)
-#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.teles3.hscx[nr], reg, data)
-#define READHSCXFIFO(cs, nr, ptr, cnt) read_fifo(cs->hw.teles3.hscxfifo[nr], ptr, cnt)
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) write_fifo(cs->hw.teles3.hscxfifo[nr], ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-teles3_interrupt(int intno, void *dev_id)
-{
-#define MAXCOUNT 5
- struct IsdnCardState *cs = dev_id;
- u_char val;
- u_long flags;
- int count = 0;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = readreg(cs->hw.teles3.hscx[1], HSCX_ISTA);
-Start_HSCX:
- if (val)
- hscx_int_main(cs, val);
- val = readreg(cs->hw.teles3.isac, ISAC_ISTA);
-Start_ISAC:
- if (val)
- isac_interrupt(cs, val);
- count++;
- val = readreg(cs->hw.teles3.hscx[1], HSCX_ISTA);
- if (val && count < MAXCOUNT) {
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "HSCX IntStat after IntRoutine");
- goto Start_HSCX;
- }
- val = readreg(cs->hw.teles3.isac, ISAC_ISTA);
- if (val && count < MAXCOUNT) {
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ISAC IntStat after IntRoutine");
- goto Start_ISAC;
- }
- if (count >= MAXCOUNT)
- printk(KERN_WARNING "Teles3: more than %d loops in teles3_interrupt\n", count);
- writereg(cs->hw.teles3.hscx[0], HSCX_MASK, 0xFF);
- writereg(cs->hw.teles3.hscx[1], HSCX_MASK, 0xFF);
- writereg(cs->hw.teles3.isac, ISAC_MASK, 0xFF);
- writereg(cs->hw.teles3.isac, ISAC_MASK, 0x0);
- writereg(cs->hw.teles3.hscx[0], HSCX_MASK, 0x0);
- writereg(cs->hw.teles3.hscx[1], HSCX_MASK, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static inline void
-release_ioregs(struct IsdnCardState *cs, int mask)
-{
- if (mask & 1)
- release_region(cs->hw.teles3.isac + 32, 32);
- if (mask & 2)
- release_region(cs->hw.teles3.hscx[0] + 32, 32);
- if (mask & 4)
- release_region(cs->hw.teles3.hscx[1] + 32, 32);
-}
-
-static void
-release_io_teles3(struct IsdnCardState *cs)
-{
- if (cs->typ == ISDN_CTYPE_TELESPCMCIA) {
- release_region(cs->hw.teles3.hscx[1], 96);
- } else {
- if (cs->hw.teles3.cfg_reg) {
- if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
- release_region(cs->hw.teles3.cfg_reg, 1);
- } else {
- release_region(cs->hw.teles3.cfg_reg, 8);
- }
- }
- release_ioregs(cs, 0x7);
- }
-}
-
-static int
-reset_teles3(struct IsdnCardState *cs)
-{
- u_char irqcfg;
-
- if (cs->typ != ISDN_CTYPE_TELESPCMCIA) {
- if ((cs->hw.teles3.cfg_reg) && (cs->typ != ISDN_CTYPE_COMPAQ_ISA)) {
- switch (cs->irq) {
- case 2:
- case 9:
- irqcfg = 0x00;
- break;
- case 3:
- irqcfg = 0x02;
- break;
- case 4:
- irqcfg = 0x04;
- break;
- case 5:
- irqcfg = 0x06;
- break;
- case 10:
- irqcfg = 0x08;
- break;
- case 11:
- irqcfg = 0x0A;
- break;
- case 12:
- irqcfg = 0x0C;
- break;
- case 15:
- irqcfg = 0x0E;
- break;
- default:
- return (1);
- }
- byteout(cs->hw.teles3.cfg_reg + 4, irqcfg);
- HZDELAY(HZ / 10 + 1);
- byteout(cs->hw.teles3.cfg_reg + 4, irqcfg | 1);
- HZDELAY(HZ / 10 + 1);
- } else if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
- byteout(cs->hw.teles3.cfg_reg, 0xff);
- HZDELAY(2);
- byteout(cs->hw.teles3.cfg_reg, 0x00);
- HZDELAY(2);
- } else {
- /* Reset off for 16.3 PnP , thanks to Georg Acher */
- byteout(cs->hw.teles3.isac + 0x3c, 0);
- HZDELAY(2);
- byteout(cs->hw.teles3.isac + 0x3c, 1);
- HZDELAY(2);
- }
- }
- return (0);
-}
-
-static int
-Teles_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- spin_lock_irqsave(&cs->lock, flags);
- reset_teles3(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_RELEASE:
- release_io_teles3(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- inithscxisac(cs, 3);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-#ifdef __ISAPNP__
-
-static struct isapnp_device_id teles_ids[] = {
- { ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2110),
- ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2110),
- (unsigned long) "Teles 16.3 PnP" },
- { ISAPNP_VENDOR('C', 'T', 'X'), ISAPNP_FUNCTION(0x0),
- ISAPNP_VENDOR('C', 'T', 'X'), ISAPNP_FUNCTION(0x0),
- (unsigned long) "Creatix 16.3 PnP" },
- { ISAPNP_VENDOR('C', 'P', 'Q'), ISAPNP_FUNCTION(0x1002),
- ISAPNP_VENDOR('C', 'P', 'Q'), ISAPNP_FUNCTION(0x1002),
- (unsigned long) "Compaq ISDN S0" },
- { 0, }
-};
-
-static struct isapnp_device_id *ipid = &teles_ids[0];
-static struct pnp_card *pnp_c = NULL;
-#endif
-
-int setup_teles3(struct IsdnCard *card)
-{
- u_char val;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, teles3_revision);
- printk(KERN_INFO "HiSax: Teles IO driver Rev. %s\n", HiSax_getrev(tmp));
- if ((cs->typ != ISDN_CTYPE_16_3) && (cs->typ != ISDN_CTYPE_PNP)
- && (cs->typ != ISDN_CTYPE_TELESPCMCIA) && (cs->typ != ISDN_CTYPE_COMPAQ_ISA))
- return (0);
-
-#ifdef __ISAPNP__
- if (!card->para[1] && isapnp_present()) {
- struct pnp_dev *pnp_d;
- while (ipid->card_vendor) {
- if ((pnp_c = pnp_find_card(ipid->card_vendor,
- ipid->card_device, pnp_c))) {
- pnp_d = NULL;
- if ((pnp_d = pnp_find_dev(pnp_c,
- ipid->vendor, ipid->function, pnp_d))) {
- int err;
-
- printk(KERN_INFO "HiSax: %s detected\n",
- (char *)ipid->driver_data);
- pnp_disable_dev(pnp_d);
- err = pnp_activate_dev(pnp_d);
- if (err < 0) {
- printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
- __func__, err);
- return (0);
- }
- card->para[3] = pnp_port_start(pnp_d, 2);
- card->para[2] = pnp_port_start(pnp_d, 1);
- card->para[1] = pnp_port_start(pnp_d, 0);
- card->para[0] = pnp_irq(pnp_d, 0);
- if (card->para[0] == -1 || !card->para[1] || !card->para[2]) {
- printk(KERN_ERR "Teles PnP:some resources are missing %ld/%lx/%lx\n",
- card->para[0], card->para[1], card->para[2]);
- pnp_disable_dev(pnp_d);
- return (0);
- }
- break;
- } else {
- printk(KERN_ERR "Teles PnP: PnP error card found, no device\n");
- }
- }
- ipid++;
- pnp_c = NULL;
- }
- if (!ipid->card_vendor) {
- printk(KERN_INFO "Teles PnP: no ISAPnP card found\n");
- return (0);
- }
- }
-#endif
- if (cs->typ == ISDN_CTYPE_16_3) {
- cs->hw.teles3.cfg_reg = card->para[1];
- switch (cs->hw.teles3.cfg_reg) {
- case 0x180:
- case 0x280:
- case 0x380:
- cs->hw.teles3.cfg_reg |= 0xc00;
- break;
- }
- cs->hw.teles3.isac = cs->hw.teles3.cfg_reg - 0x420;
- cs->hw.teles3.hscx[0] = cs->hw.teles3.cfg_reg - 0xc20;
- cs->hw.teles3.hscx[1] = cs->hw.teles3.cfg_reg - 0x820;
- } else if (cs->typ == ISDN_CTYPE_TELESPCMCIA) {
- cs->hw.teles3.cfg_reg = 0;
- cs->hw.teles3.hscx[0] = card->para[1] - 0x20;
- cs->hw.teles3.hscx[1] = card->para[1];
- cs->hw.teles3.isac = card->para[1] + 0x20;
- } else if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
- cs->hw.teles3.cfg_reg = card->para[3];
- cs->hw.teles3.isac = card->para[2] - 32;
- cs->hw.teles3.hscx[0] = card->para[1] - 32;
- cs->hw.teles3.hscx[1] = card->para[1];
- } else { /* PNP */
- cs->hw.teles3.cfg_reg = 0;
- cs->hw.teles3.isac = card->para[1] - 32;
- cs->hw.teles3.hscx[0] = card->para[2] - 32;
- cs->hw.teles3.hscx[1] = card->para[2];
- }
- cs->irq = card->para[0];
- cs->hw.teles3.isacfifo = cs->hw.teles3.isac + 0x3e;
- cs->hw.teles3.hscxfifo[0] = cs->hw.teles3.hscx[0] + 0x3e;
- cs->hw.teles3.hscxfifo[1] = cs->hw.teles3.hscx[1] + 0x3e;
- if (cs->typ == ISDN_CTYPE_TELESPCMCIA) {
- if (!request_region(cs->hw.teles3.hscx[1], 96, "HiSax Teles PCMCIA")) {
- printk(KERN_WARNING
- "HiSax: %s ports %x-%x already in use\n",
- CardType[cs->typ],
- cs->hw.teles3.hscx[1],
- cs->hw.teles3.hscx[1] + 96);
- return (0);
- }
- cs->irq_flags |= IRQF_SHARED; /* cardbus can share */
- } else {
- if (cs->hw.teles3.cfg_reg) {
- if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
- if (!request_region(cs->hw.teles3.cfg_reg, 1, "teles3 cfg")) {
- printk(KERN_WARNING
- "HiSax: %s config port %x already in use\n",
- CardType[card->typ],
- cs->hw.teles3.cfg_reg);
- return (0);
- }
- } else {
- if (!request_region(cs->hw.teles3.cfg_reg, 8, "teles3 cfg")) {
- printk(KERN_WARNING
- "HiSax: %s config port %x-%x already in use\n",
- CardType[card->typ],
- cs->hw.teles3.cfg_reg,
- cs->hw.teles3.cfg_reg + 8);
- return (0);
- }
- }
- }
- if (!request_region(cs->hw.teles3.isac + 32, 32, "HiSax isac")) {
- printk(KERN_WARNING
- "HiSax: %s isac ports %x-%x already in use\n",
- CardType[cs->typ],
- cs->hw.teles3.isac + 32,
- cs->hw.teles3.isac + 64);
- if (cs->hw.teles3.cfg_reg) {
- if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
- release_region(cs->hw.teles3.cfg_reg, 1);
- } else {
- release_region(cs->hw.teles3.cfg_reg, 8);
- }
- }
- return (0);
- }
- if (!request_region(cs->hw.teles3.hscx[0] + 32, 32, "HiSax hscx A")) {
- printk(KERN_WARNING
- "HiSax: %s hscx A ports %x-%x already in use\n",
- CardType[cs->typ],
- cs->hw.teles3.hscx[0] + 32,
- cs->hw.teles3.hscx[0] + 64);
- if (cs->hw.teles3.cfg_reg) {
- if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
- release_region(cs->hw.teles3.cfg_reg, 1);
- } else {
- release_region(cs->hw.teles3.cfg_reg, 8);
- }
- }
- release_ioregs(cs, 1);
- return (0);
- }
- if (!request_region(cs->hw.teles3.hscx[1] + 32, 32, "HiSax hscx B")) {
- printk(KERN_WARNING
- "HiSax: %s hscx B ports %x-%x already in use\n",
- CardType[cs->typ],
- cs->hw.teles3.hscx[1] + 32,
- cs->hw.teles3.hscx[1] + 64);
- if (cs->hw.teles3.cfg_reg) {
- if (cs->typ == ISDN_CTYPE_COMPAQ_ISA) {
- release_region(cs->hw.teles3.cfg_reg, 1);
- } else {
- release_region(cs->hw.teles3.cfg_reg, 8);
- }
- }
- release_ioregs(cs, 3);
- return (0);
- }
- }
- if ((cs->hw.teles3.cfg_reg) && (cs->typ != ISDN_CTYPE_COMPAQ_ISA)) {
- if ((val = bytein(cs->hw.teles3.cfg_reg + 0)) != 0x51) {
- printk(KERN_WARNING "Teles: 16.3 Byte at %x is %x\n",
- cs->hw.teles3.cfg_reg + 0, val);
- release_io_teles3(cs);
- return (0);
- }
- if ((val = bytein(cs->hw.teles3.cfg_reg + 1)) != 0x93) {
- printk(KERN_WARNING "Teles: 16.3 Byte at %x is %x\n",
- cs->hw.teles3.cfg_reg + 1, val);
- release_io_teles3(cs);
- return (0);
- }
- val = bytein(cs->hw.teles3.cfg_reg + 2);/* 0x1e=without AB
- * 0x1f=with AB
- * 0x1c 16.3 ???
- * 0x39 16.3 1.1
- * 0x38 16.3 1.3
- * 0x46 16.3 with AB + Video (Teles-Vision)
- */
- if (val != 0x46 && val != 0x39 && val != 0x38 && val != 0x1c && val != 0x1e && val != 0x1f) {
- printk(KERN_WARNING "Teles: 16.3 Byte at %x is %x\n",
- cs->hw.teles3.cfg_reg + 2, val);
- release_io_teles3(cs);
- return (0);
- }
- }
- printk(KERN_INFO
- "HiSax: %s config irq:%d isac:0x%X cfg:0x%X\n",
- CardType[cs->typ], cs->irq,
- cs->hw.teles3.isac + 32, cs->hw.teles3.cfg_reg);
- printk(KERN_INFO
- "HiSax: hscx A:0x%X hscx B:0x%X\n",
- cs->hw.teles3.hscx[0] + 32, cs->hw.teles3.hscx[1] + 32);
-
- setup_isac(cs);
- if (reset_teles3(cs)) {
- printk(KERN_WARNING "Teles3: wrong IRQ\n");
- release_io_teles3(cs);
- return (0);
- }
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &Teles_card_msg;
- cs->irq_func = &teles3_interrupt;
- ISACVersion(cs, "Teles3:");
- if (HscxVersion(cs, "Teles3:")) {
- printk(KERN_WARNING
- "Teles3: wrong HSCX versions check IO address\n");
- release_io_teles3(cs);
- return (0);
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
deleted file mode 100644
index bcc37e955622..000000000000
--- a/drivers/isdn/hisax/teles_cs.c
+++ /dev/null
@@ -1,201 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* $Id: teles_cs.c,v 1.1.2.2 2004/01/25 15:07:06 keil Exp $ */
-/*======================================================================
-
- A teles S0 PCMCIA client driver
-
- Based on skeleton by David Hinds, dhinds@allegro.stanford.edu
- Written by Christof Petig, christof.petig@wtal.de
-
- Also inspired by ELSA PCMCIA driver
- by Klaus Lichtenwalder <Lichtenwalder@ACM.org>
-
- Extensions to new hisax_pcmcia by Karsten Keil
-
- minor changes to be compatible with kernel 2.4.x
- by Jan.Schubert@GMX.li
-
- ======================================================================*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <asm/io.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-#include "hisax_cfg.h"
-
-MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Teles PCMCIA cards");
-MODULE_AUTHOR("Christof Petig, christof.petig@wtal.de, Karsten Keil, kkeil@suse.de");
-MODULE_LICENSE("GPL");
-
-
-/*====================================================================*/
-
-/* Parameters that can be set with 'insmod' */
-
-static int protocol = 2; /* EURO-ISDN Default */
-module_param(protocol, int, 0);
-
-static int teles_cs_config(struct pcmcia_device *link);
-static void teles_cs_release(struct pcmcia_device *link);
-static void teles_detach(struct pcmcia_device *p_dev);
-
-typedef struct local_info_t {
- struct pcmcia_device *p_dev;
- int busy;
- int cardnr;
-} local_info_t;
-
-static int teles_probe(struct pcmcia_device *link)
-{
- local_info_t *local;
-
- dev_dbg(&link->dev, "teles_attach()\n");
-
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local) return -ENOMEM;
- local->cardnr = -1;
-
- local->p_dev = link;
- link->priv = local;
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
-
- return teles_cs_config(link);
-} /* teles_attach */
-
-static void teles_detach(struct pcmcia_device *link)
-{
- local_info_t *info = link->priv;
-
- dev_dbg(&link->dev, "teles_detach(0x%p)\n", link);
-
- info->busy = 1;
- teles_cs_release(link);
-
- kfree(info);
-} /* teles_detach */
-
-static int teles_cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
-{
- int j;
-
- p_dev->io_lines = 5;
- p_dev->resource[0]->end = 96;
- p_dev->resource[0]->flags &= IO_DATA_PATH_WIDTH;
- p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
-
- if ((p_dev->resource[0]->end) && p_dev->resource[0]->start) {
- printk(KERN_INFO "(teles_cs: looks like the 96 model)\n");
- if (!pcmcia_request_io(p_dev))
- return 0;
- } else {
- printk(KERN_INFO "(teles_cs: looks like the 97 model)\n");
- for (j = 0x2f0; j > 0x100; j -= 0x10) {
- p_dev->resource[0]->start = j;
- if (!pcmcia_request_io(p_dev))
- return 0;
- }
- }
- return -ENODEV;
-}
-
-static int teles_cs_config(struct pcmcia_device *link)
-{
- int i;
- IsdnCard_t icard;
-
- dev_dbg(&link->dev, "teles_config(0x%p)\n", link);
-
- i = pcmcia_loop_config(link, teles_cs_configcheck, NULL);
- if (i != 0)
- goto cs_failed;
-
- if (!link->irq)
- goto cs_failed;
-
- i = pcmcia_enable_device(link);
- if (i != 0)
- goto cs_failed;
-
- icard.para[0] = link->irq;
- icard.para[1] = link->resource[0]->start;
- icard.protocol = protocol;
- icard.typ = ISDN_CTYPE_TELESPCMCIA;
-
- i = hisax_init_pcmcia(link, &(((local_info_t *)link->priv)->busy), &icard);
- if (i < 0) {
- printk(KERN_ERR "teles_cs: failed to initialize Teles PCMCIA %d at i/o %#x\n",
- i, (unsigned int) link->resource[0]->start);
- teles_cs_release(link);
- return -ENODEV;
- }
-
- ((local_info_t *)link->priv)->cardnr = i;
- return 0;
-
-cs_failed:
- teles_cs_release(link);
- return -ENODEV;
-} /* teles_cs_config */
-
-static void teles_cs_release(struct pcmcia_device *link)
-{
- local_info_t *local = link->priv;
-
- dev_dbg(&link->dev, "teles_cs_release(0x%p)\n", link);
-
- if (local) {
- if (local->cardnr >= 0) {
- /* no unregister function with hisax */
- HiSax_closecard(local->cardnr);
- }
- }
-
- pcmcia_disable_device(link);
-} /* teles_cs_release */
-
-static int teles_suspend(struct pcmcia_device *link)
-{
- local_info_t *dev = link->priv;
-
- dev->busy = 1;
-
- return 0;
-}
-
-static int teles_resume(struct pcmcia_device *link)
-{
- local_info_t *dev = link->priv;
-
- dev->busy = 0;
-
- return 0;
-}
-
-
-static const struct pcmcia_device_id teles_ids[] = {
- PCMCIA_DEVICE_PROD_ID12("TELES", "S0/PC", 0x67b50eae, 0xe9e70119),
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, teles_ids);
-
-static struct pcmcia_driver teles_cs_driver = {
- .owner = THIS_MODULE,
- .name = "teles_cs",
- .probe = teles_probe,
- .remove = teles_detach,
- .id_table = teles_ids,
- .suspend = teles_suspend,
- .resume = teles_resume,
-};
-module_pcmcia_driver(teles_cs_driver);
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
deleted file mode 100644
index 33eeb4602c7e..000000000000
--- a/drivers/isdn/hisax/telespci.c
+++ /dev/null
@@ -1,349 +0,0 @@
-/* $Id: telespci.c,v 2.23.2.3 2004/01/13 14:31:26 keil Exp $
- *
- * low level stuff for Teles PCI isdn cards
- *
- * Author Ton van Rosmalen
- * Karsten Keil
- * Copyright by Ton van Rosmalen
- * by Karsten Keil <keil@isdn4linux.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "isac.h"
-#include "hscx.h"
-#include "isdnl1.h"
-#include <linux/pci.h>
-
-static const char *telespci_revision = "$Revision: 2.23.2.3 $";
-
-#define ZORAN_PO_RQ_PEN 0x02000000
-#define ZORAN_PO_WR 0x00800000
-#define ZORAN_PO_GID0 0x00000000
-#define ZORAN_PO_GID1 0x00100000
-#define ZORAN_PO_GREG0 0x00000000
-#define ZORAN_PO_GREG1 0x00010000
-#define ZORAN_PO_DMASK 0xFF
-
-#define WRITE_ADDR_ISAC (ZORAN_PO_WR | ZORAN_PO_GID0 | ZORAN_PO_GREG0)
-#define READ_DATA_ISAC (ZORAN_PO_GID0 | ZORAN_PO_GREG1)
-#define WRITE_DATA_ISAC (ZORAN_PO_WR | ZORAN_PO_GID0 | ZORAN_PO_GREG1)
-#define WRITE_ADDR_HSCX (ZORAN_PO_WR | ZORAN_PO_GID1 | ZORAN_PO_GREG0)
-#define READ_DATA_HSCX (ZORAN_PO_GID1 | ZORAN_PO_GREG1)
-#define WRITE_DATA_HSCX (ZORAN_PO_WR | ZORAN_PO_GID1 | ZORAN_PO_GREG1)
-
-#define ZORAN_WAIT_NOBUSY do { \
- portdata = readl(adr + 0x200); \
- } while (portdata & ZORAN_PO_RQ_PEN)
-
-static inline u_char
-readisac(void __iomem *adr, u_char off)
-{
- register unsigned int portdata;
-
- ZORAN_WAIT_NOBUSY;
-
- /* set address for ISAC */
- writel(WRITE_ADDR_ISAC | off, adr + 0x200);
- ZORAN_WAIT_NOBUSY;
-
- /* read data from ISAC */
- writel(READ_DATA_ISAC, adr + 0x200);
- ZORAN_WAIT_NOBUSY;
- return ((u_char)(portdata & ZORAN_PO_DMASK));
-}
-
-static inline void
-writeisac(void __iomem *adr, u_char off, u_char data)
-{
- register unsigned int portdata;
-
- ZORAN_WAIT_NOBUSY;
-
- /* set address for ISAC */
- writel(WRITE_ADDR_ISAC | off, adr + 0x200);
- ZORAN_WAIT_NOBUSY;
-
- /* write data to ISAC */
- writel(WRITE_DATA_ISAC | data, adr + 0x200);
- ZORAN_WAIT_NOBUSY;
-}
-
-static inline u_char
-readhscx(void __iomem *adr, int hscx, u_char off)
-{
- register unsigned int portdata;
-
- ZORAN_WAIT_NOBUSY;
- /* set address for HSCX */
- writel(WRITE_ADDR_HSCX | ((hscx ? 0x40 : 0) + off), adr + 0x200);
- ZORAN_WAIT_NOBUSY;
-
- /* read data from HSCX */
- writel(READ_DATA_HSCX, adr + 0x200);
- ZORAN_WAIT_NOBUSY;
- return ((u_char)(portdata & ZORAN_PO_DMASK));
-}
-
-static inline void
-writehscx(void __iomem *adr, int hscx, u_char off, u_char data)
-{
- register unsigned int portdata;
-
- ZORAN_WAIT_NOBUSY;
- /* set address for HSCX */
- writel(WRITE_ADDR_HSCX | ((hscx ? 0x40 : 0) + off), adr + 0x200);
- ZORAN_WAIT_NOBUSY;
-
- /* write data to HSCX */
- writel(WRITE_DATA_HSCX | data, adr + 0x200);
- ZORAN_WAIT_NOBUSY;
-}
-
-static inline void
-read_fifo_isac(void __iomem *adr, u_char *data, int size)
-{
- register unsigned int portdata;
- register int i;
-
- ZORAN_WAIT_NOBUSY;
- /* read data from ISAC */
- for (i = 0; i < size; i++) {
- /* set address for ISAC fifo */
- writel(WRITE_ADDR_ISAC | 0x1E, adr + 0x200);
- ZORAN_WAIT_NOBUSY;
- writel(READ_DATA_ISAC, adr + 0x200);
- ZORAN_WAIT_NOBUSY;
- data[i] = (u_char)(portdata & ZORAN_PO_DMASK);
- }
-}
-
-static void
-write_fifo_isac(void __iomem *adr, u_char *data, int size)
-{
- register unsigned int portdata;
- register int i;
-
- ZORAN_WAIT_NOBUSY;
- /* write data to ISAC */
- for (i = 0; i < size; i++) {
- /* set address for ISAC fifo */
- writel(WRITE_ADDR_ISAC | 0x1E, adr + 0x200);
- ZORAN_WAIT_NOBUSY;
- writel(WRITE_DATA_ISAC | data[i], adr + 0x200);
- ZORAN_WAIT_NOBUSY;
- }
-}
-
-static inline void
-read_fifo_hscx(void __iomem *adr, int hscx, u_char *data, int size)
-{
- register unsigned int portdata;
- register int i;
-
- ZORAN_WAIT_NOBUSY;
- /* read data from HSCX */
- for (i = 0; i < size; i++) {
- /* set address for HSCX fifo */
- writel(WRITE_ADDR_HSCX | (hscx ? 0x5F : 0x1F), adr + 0x200);
- ZORAN_WAIT_NOBUSY;
- writel(READ_DATA_HSCX, adr + 0x200);
- ZORAN_WAIT_NOBUSY;
- data[i] = (u_char) (portdata & ZORAN_PO_DMASK);
- }
-}
-
-static inline void
-write_fifo_hscx(void __iomem *adr, int hscx, u_char *data, int size)
-{
- unsigned int portdata;
- register int i;
-
- ZORAN_WAIT_NOBUSY;
- /* write data to HSCX */
- for (i = 0; i < size; i++) {
- /* set address for HSCX fifo */
- writel(WRITE_ADDR_HSCX | (hscx ? 0x5F : 0x1F), adr + 0x200);
- ZORAN_WAIT_NOBUSY;
- writel(WRITE_DATA_HSCX | data[i], adr + 0x200);
- ZORAN_WAIT_NOBUSY;
- udelay(10);
- }
-}
-
-/* Interface functions */
-
-static u_char
-ReadISAC(struct IsdnCardState *cs, u_char offset)
-{
- return (readisac(cs->hw.teles0.membase, offset));
-}
-
-static void
-WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- writeisac(cs->hw.teles0.membase, offset, value);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- read_fifo_isac(cs->hw.teles0.membase, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- write_fifo_isac(cs->hw.teles0.membase, data, size);
-}
-
-static u_char
-ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
-{
- return (readhscx(cs->hw.teles0.membase, hscx, offset));
-}
-
-static void
-WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
-{
- writehscx(cs->hw.teles0.membase, hscx, offset, value);
-}
-
-/*
- * fast interrupt HSCX stuff goes here
- */
-
-#define READHSCX(cs, nr, reg) readhscx(cs->hw.teles0.membase, nr, reg)
-#define WRITEHSCX(cs, nr, reg, data) writehscx(cs->hw.teles0.membase, nr, reg, data)
-#define READHSCXFIFO(cs, nr, ptr, cnt) read_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt)
-#define WRITEHSCXFIFO(cs, nr, ptr, cnt) write_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt)
-
-#include "hscx_irq.c"
-
-static irqreturn_t
-telespci_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char hval, ival;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- hval = readhscx(cs->hw.teles0.membase, 1, HSCX_ISTA);
- if (hval)
- hscx_int_main(cs, hval);
- ival = readisac(cs->hw.teles0.membase, ISAC_ISTA);
- if ((hval | ival) == 0) {
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE;
- }
- if (ival)
- isac_interrupt(cs, ival);
- /* Clear interrupt register for Zoran PCI controller */
- writel(0x70000000, cs->hw.teles0.membase + 0x3C);
-
- writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0xFF);
- writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0xFF);
- writeisac(cs->hw.teles0.membase, ISAC_MASK, 0xFF);
- writeisac(cs->hw.teles0.membase, ISAC_MASK, 0x0);
- writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0x0);
- writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0x0);
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-release_io_telespci(struct IsdnCardState *cs)
-{
- iounmap(cs->hw.teles0.membase);
-}
-
-static int
-TelesPCI_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- u_long flags;
-
- switch (mt) {
- case CARD_RESET:
- return (0);
- case CARD_RELEASE:
- release_io_telespci(cs);
- return (0);
- case CARD_INIT:
- spin_lock_irqsave(&cs->lock, flags);
- inithscxisac(cs, 3);
- spin_unlock_irqrestore(&cs->lock, flags);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-static struct pci_dev *dev_tel = NULL;
-
-int setup_telespci(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
- strcpy(tmp, telespci_revision);
- printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_TELESPCI)
- return (0);
-
- if ((dev_tel = hisax_find_pci_device(PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) {
- if (pci_enable_device(dev_tel))
- return (0);
- cs->irq = dev_tel->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "Teles: No IRQ for PCI card found\n");
- return (0);
- }
- cs->hw.teles0.membase = ioremap(pci_resource_start(dev_tel, 0),
- PAGE_SIZE);
- printk(KERN_INFO "Found: Zoran, base-address: 0x%llx, irq: 0x%x\n",
- (unsigned long long)pci_resource_start(dev_tel, 0),
- dev_tel->irq);
- } else {
- printk(KERN_WARNING "TelesPCI: No PCI card found\n");
- return (0);
- }
-
- /* Initialize Zoran PCI controller */
- writel(0x00000000, cs->hw.teles0.membase + 0x28);
- writel(0x01000000, cs->hw.teles0.membase + 0x28);
- writel(0x01000000, cs->hw.teles0.membase + 0x28);
- writel(0x7BFFFFFF, cs->hw.teles0.membase + 0x2C);
- writel(0x70000000, cs->hw.teles0.membase + 0x3C);
- writel(0x61000000, cs->hw.teles0.membase + 0x40);
- /* writel(0x00800000, cs->hw.teles0.membase + 0x200); */
-
- printk(KERN_INFO
- "HiSax: Teles PCI config irq:%d mem:%p\n",
- cs->irq,
- cs->hw.teles0.membase);
-
- setup_isac(cs);
- cs->readisac = &ReadISAC;
- cs->writeisac = &WriteISAC;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadHSCX;
- cs->BC_Write_Reg = &WriteHSCX;
- cs->BC_Send_Data = &hscx_fill_fifo;
- cs->cardmsg = &TelesPCI_card_msg;
- cs->irq_func = &telespci_interrupt;
- cs->irq_flags |= IRQF_SHARED;
- ISACVersion(cs, "TelesPCI:");
- if (HscxVersion(cs, "TelesPCI:")) {
- printk(KERN_WARNING
- "TelesPCI: wrong HSCX versions check IO/MEM addresses\n");
- release_io_telespci(cs);
- return (0);
- }
- return (1);
-}
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
deleted file mode 100644
index 36eefaa3a7d9..000000000000
--- a/drivers/isdn/hisax/w6692.c
+++ /dev/null
@@ -1,1085 +0,0 @@
-/* $Id: w6692.c,v 1.18.2.4 2004/02/11 13:21:34 keil Exp $
- *
- * Winbond W6692 specific routines
- *
- * Author Petr Novak
- * Copyright by Petr Novak <petr.novak@i.cz>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/init.h>
-#include "hisax.h"
-#include "w6692.h"
-#include "isdnl1.h"
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-/* table entry in the PCI devices list */
-typedef struct {
- int vendor_id;
- int device_id;
- char *vendor_name;
- char *card_name;
-} PCI_ENTRY;
-
-static const PCI_ENTRY id_list[] =
-{
- {PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692, "Winbond", "W6692"},
- {PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH, "Dynalink/AsusCom", "IS64PH"},
- {0, 0, "U.S.Robotics", "ISDN PCI Card TA"}
-};
-
-#define W6692_SV_USR 0x16ec
-#define W6692_SD_USR 0x3409
-#define W6692_WINBOND 0
-#define W6692_DYNALINK 1
-#define W6692_USR 2
-
-static const char *w6692_revision = "$Revision: 1.18.2.4 $";
-
-#define DBUSY_TIMER_VALUE 80
-
-static char *W6692Ver[] =
-{"W6692 V00", "W6692 V01", "W6692 V10",
- "W6692 V11"};
-
-static void
-W6692Version(struct IsdnCardState *cs, char *s)
-{
- int val;
-
- val = cs->readW6692(cs, W_D_RBCH);
- printk(KERN_INFO "%s Winbond W6692 version (%x): %s\n", s, val, W6692Ver[(val >> 6) & 3]);
-}
-
-static void
-ph_command(struct IsdnCardState *cs, unsigned int command)
-{
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ph_command %x", command);
- cs->writeisac(cs, W_CIX, command);
-}
-
-
-static void
-W6692_new_ph(struct IsdnCardState *cs)
-{
- switch (cs->dc.w6692.ph_state) {
- case (W_L1CMD_RST):
- ph_command(cs, W_L1CMD_DRC);
- l1_msg(cs, HW_RESET | INDICATION, NULL);
- /* fall through */
- case (W_L1IND_CD):
- l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL);
- break;
- case (W_L1IND_DRD):
- l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
- break;
- case (W_L1IND_CE):
- l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
- break;
- case (W_L1IND_LD):
- l1_msg(cs, HW_RSYNC | INDICATION, NULL);
- break;
- case (W_L1IND_ARD):
- l1_msg(cs, HW_INFO2 | INDICATION, NULL);
- break;
- case (W_L1IND_AI8):
- l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
- break;
- case (W_L1IND_AI10):
- l1_msg(cs, HW_INFO4_P10 | INDICATION, NULL);
- break;
- default:
- break;
- }
-}
-
-static void
-W6692_bh(struct work_struct *work)
-{
- struct IsdnCardState *cs =
- container_of(work, struct IsdnCardState, tqueue);
- struct PStack *stptr;
-
- if (test_and_clear_bit(D_CLEARBUSY, &cs->event)) {
- if (cs->debug)
- debugl1(cs, "D-Channel Busy cleared");
- stptr = cs->stlist;
- while (stptr != NULL) {
- stptr->l1.l1l2(stptr, PH_PAUSE | CONFIRM, NULL);
- stptr = stptr->next;
- }
- }
- if (test_and_clear_bit(D_L1STATECHANGE, &cs->event))
- W6692_new_ph(cs);
- if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
- DChannel_proc_rcv(cs);
- if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
- DChannel_proc_xmt(cs);
-/*
- if (test_and_clear_bit(D_RX_MON1, &cs->event))
- arcofi_fsm(cs, ARCOFI_RX_END, NULL);
- if (test_and_clear_bit(D_TX_MON1, &cs->event))
- arcofi_fsm(cs, ARCOFI_TX_END, NULL);
-*/
-}
-
-static void
-W6692_empty_fifo(struct IsdnCardState *cs, int count)
-{
- u_char *ptr;
-
- if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
- debugl1(cs, "W6692_empty_fifo");
-
- if ((cs->rcvidx + count) >= MAX_DFRAME_LEN_L1) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692_empty_fifo overrun %d",
- cs->rcvidx + count);
- cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK);
- cs->rcvidx = 0;
- return;
- }
- ptr = cs->rcvbuf + cs->rcvidx;
- cs->rcvidx += count;
- cs->readW6692fifo(cs, ptr, count);
- cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK);
- if (cs->debug & L1_DEB_ISAC_FIFO) {
- char *t = cs->dlog;
-
- t += sprintf(t, "W6692_empty_fifo cnt %d", count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", cs->dlog);
- }
-}
-
-static void
-W6692_fill_fifo(struct IsdnCardState *cs)
-{
- int count, more;
- u_char *ptr;
-
- if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO))
- debugl1(cs, "W6692_fill_fifo");
-
- if (!cs->tx_skb)
- return;
-
- count = cs->tx_skb->len;
- if (count <= 0)
- return;
-
- more = 0;
- if (count > W_D_FIFO_THRESH) {
- more = !0;
- count = W_D_FIFO_THRESH;
- }
- ptr = cs->tx_skb->data;
- skb_pull(cs->tx_skb, count);
- cs->tx_cnt += count;
- cs->writeW6692fifo(cs, ptr, count);
- cs->writeW6692(cs, W_D_CMDR, more ? W_D_CMDR_XMS : (W_D_CMDR_XMS | W_D_CMDR_XME));
- if (test_and_set_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
- debugl1(cs, "W6692_fill_fifo dbusytimer running");
- del_timer(&cs->dbusytimer);
- }
- cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000);
- add_timer(&cs->dbusytimer);
- if (cs->debug & L1_DEB_ISAC_FIFO) {
- char *t = cs->dlog;
-
- t += sprintf(t, "W6692_fill_fifo cnt %d", count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", cs->dlog);
- }
-}
-
-static void
-W6692B_empty_fifo(struct BCState *bcs, int count)
-{
- u_char *ptr;
- struct IsdnCardState *cs = bcs->cs;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "W6692B_empty_fifo");
-
- if (bcs->hw.w6692.rcvidx + count > HSCX_BUFMAX) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692B_empty_fifo: incoming packet too large");
- cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
- bcs->hw.w6692.rcvidx = 0;
- return;
- }
- ptr = bcs->hw.w6692.rcvbuf + bcs->hw.w6692.rcvidx;
- bcs->hw.w6692.rcvidx += count;
- READW6692BFIFO(cs, bcs->channel, ptr, count);
- cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- t += sprintf(t, "W6692B_empty_fifo %c cnt %d",
- bcs->channel + '1', count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-static void
-W6692B_fill_fifo(struct BCState *bcs)
-{
- struct IsdnCardState *cs = bcs->cs;
- int more, count;
- u_char *ptr;
-
- if (!bcs->tx_skb)
- return;
- if (bcs->tx_skb->len <= 0)
- return;
-
- more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0;
- if (bcs->tx_skb->len > W_B_FIFO_THRESH) {
- more = 1;
- count = W_B_FIFO_THRESH;
- } else
- count = bcs->tx_skb->len;
-
- if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
- debugl1(cs, "W6692B_fill_fifo%s%d", (more ? " " : " last "), count);
-
- ptr = bcs->tx_skb->data;
- skb_pull(bcs->tx_skb, count);
- bcs->tx_cnt -= count;
- bcs->hw.w6692.count += count;
- WRITEW6692BFIFO(cs, bcs->channel, ptr, count);
- cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACT | W_B_CMDR_XMS | (more ? 0 : W_B_CMDR_XME));
- if (cs->debug & L1_DEB_HSCX_FIFO) {
- char *t = bcs->blog;
-
- t += sprintf(t, "W6692B_fill_fifo %c cnt %d",
- bcs->channel + '1', count);
- QuickHex(t, ptr, count);
- debugl1(cs, "%s", bcs->blog);
- }
-}
-
-static void
-W6692B_interrupt(struct IsdnCardState *cs, u_char bchan)
-{
- u_char val;
- u_char r;
- struct BCState *bcs;
- struct sk_buff *skb;
- int count;
-
- bcs = (cs->bcs->channel == bchan) ? cs->bcs : (cs->bcs + 1);
- val = cs->BC_Read_Reg(cs, bchan, W_B_EXIR);
- debugl1(cs, "W6692B chan %d B_EXIR 0x%02X", bchan, val);
-
- if (!test_bit(BC_FLG_INIT, &bcs->Flag)) {
- debugl1(cs, "W6692B not INIT yet");
- return;
- }
- if (val & W_B_EXI_RME) { /* RME */
- r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
- if (r & (W_B_STAR_RDOV | W_B_STAR_CRCE | W_B_STAR_RMB)) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692 B STAR %x", r);
- if ((r & W_B_STAR_RDOV) && bcs->mode)
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692 B RDOV mode=%d",
- bcs->mode);
- if (r & W_B_STAR_CRCE)
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692 B CRC error");
- cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT);
- } else {
- count = cs->BC_Read_Reg(cs, bchan, W_B_RBCL) & (W_B_FIFO_THRESH - 1);
- if (count == 0)
- count = W_B_FIFO_THRESH;
- W6692B_empty_fifo(bcs, count);
- if ((count = bcs->hw.w6692.rcvidx) > 0) {
- if (cs->debug & L1_DEB_HSCX_FIFO)
- debugl1(cs, "W6692 Bchan Frame %d", count);
- if (!(skb = dev_alloc_skb(count)))
- printk(KERN_WARNING "W6692: Bchan receive out of memory\n");
- else {
- skb_put_data(skb,
- bcs->hw.w6692.rcvbuf,
- count);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- }
- }
- bcs->hw.w6692.rcvidx = 0;
- schedule_event(bcs, B_RCVBUFREADY);
- }
- if (val & W_B_EXI_RMR) { /* RMR */
- W6692B_empty_fifo(bcs, W_B_FIFO_THRESH);
- r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
- if (r & W_B_STAR_RDOV) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692 B RDOV(RMR) mode=%d", bcs->mode);
- cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT);
- if (bcs->mode != L1_MODE_TRANS)
- bcs->hw.w6692.rcvidx = 0;
- }
- if (bcs->mode == L1_MODE_TRANS) {
- /* receive audio data */
- if (!(skb = dev_alloc_skb(W_B_FIFO_THRESH)))
- printk(KERN_WARNING "HiSax: receive out of memory\n");
- else {
- skb_put_data(skb, bcs->hw.w6692.rcvbuf,
- W_B_FIFO_THRESH);
- skb_queue_tail(&bcs->rqueue, skb);
- }
- bcs->hw.w6692.rcvidx = 0;
- schedule_event(bcs, B_RCVBUFREADY);
- }
- }
- if (val & W_B_EXI_XDUN) { /* XDUN */
- cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692 B EXIR %x Lost TX", val);
- if (bcs->mode == 1)
- W6692B_fill_fifo(bcs);
- else {
- /* Here we lost an TX interrupt, so
- * restart transmitting the whole frame.
- */
- if (bcs->tx_skb) {
- skb_push(bcs->tx_skb, bcs->hw.w6692.count);
- bcs->tx_cnt += bcs->hw.w6692.count;
- bcs->hw.w6692.count = 0;
- }
- }
- return;
- }
- if (val & W_B_EXI_XFR) { /* XFR */
- r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
- if (r & W_B_STAR_XDOW) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692 B STAR %x XDOW", r);
- cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT);
- if (bcs->tx_skb && (bcs->mode != 1)) {
- skb_push(bcs->tx_skb, bcs->hw.w6692.count);
- bcs->tx_cnt += bcs->hw.w6692.count;
- bcs->hw.w6692.count = 0;
- }
- }
- if (bcs->tx_skb) {
- if (bcs->tx_skb->len) {
- W6692B_fill_fifo(bcs);
- return;
- } else {
- if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
- (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
- u_long flags;
- spin_lock_irqsave(&bcs->aclock, flags);
- bcs->ackcnt += bcs->hw.w6692.count;
- spin_unlock_irqrestore(&bcs->aclock, flags);
- schedule_event(bcs, B_ACKPENDING);
- }
- dev_kfree_skb_irq(bcs->tx_skb);
- bcs->hw.w6692.count = 0;
- bcs->tx_skb = NULL;
- }
- }
- if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
- bcs->hw.w6692.count = 0;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- W6692B_fill_fifo(bcs);
- } else {
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- schedule_event(bcs, B_XMTBUFREADY);
- }
- }
-}
-
-static irqreturn_t
-W6692_interrupt(int intno, void *dev_id)
-{
- struct IsdnCardState *cs = dev_id;
- u_char val, exval, v1;
- struct sk_buff *skb;
- u_int count;
- u_long flags;
- int icnt = 5;
-
- spin_lock_irqsave(&cs->lock, flags);
- val = cs->readW6692(cs, W_ISTA);
- if (!val) {
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_NONE;
- }
-StartW6692:
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "W6692 ISTA %x", val);
-
- if (val & W_INT_D_RME) { /* RME */
- exval = cs->readW6692(cs, W_D_RSTA);
- if (exval & (W_D_RSTA_RDOV | W_D_RSTA_CRCE | W_D_RSTA_RMB)) {
- if (exval & W_D_RSTA_RDOV)
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692 RDOV");
- if (exval & W_D_RSTA_CRCE)
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692 D-channel CRC error");
- if (exval & W_D_RSTA_RMB)
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692 D-channel ABORT");
- cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK | W_D_CMDR_RRST);
- } else {
- count = cs->readW6692(cs, W_D_RBCL) & (W_D_FIFO_THRESH - 1);
- if (count == 0)
- count = W_D_FIFO_THRESH;
- W6692_empty_fifo(cs, count);
- if ((count = cs->rcvidx) > 0) {
- cs->rcvidx = 0;
- if (!(skb = alloc_skb(count, GFP_ATOMIC)))
- printk(KERN_WARNING "HiSax: D receive out of memory\n");
- else {
- skb_put_data(skb, cs->rcvbuf, count);
- skb_queue_tail(&cs->rq, skb);
- }
- }
- }
- cs->rcvidx = 0;
- schedule_event(cs, D_RCVBUFREADY);
- }
- if (val & W_INT_D_RMR) { /* RMR */
- W6692_empty_fifo(cs, W_D_FIFO_THRESH);
- }
- if (val & W_INT_D_XFR) { /* XFR */
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- if (cs->tx_skb) {
- if (cs->tx_skb->len) {
- W6692_fill_fifo(cs);
- goto afterXFR;
- } else {
- dev_kfree_skb_irq(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->tx_skb = NULL;
- }
- }
- if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
- cs->tx_cnt = 0;
- W6692_fill_fifo(cs);
- } else
- schedule_event(cs, D_XMTBUFREADY);
- }
-afterXFR:
- if (val & (W_INT_XINT0 | W_INT_XINT1)) { /* XINT0/1 - never */
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "W6692 spurious XINT!");
- }
- if (val & W_INT_D_EXI) { /* EXI */
- exval = cs->readW6692(cs, W_D_EXIR);
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692 D_EXIR %02x", exval);
- if (exval & (W_D_EXI_XDUN | W_D_EXI_XCOL)) { /* Transmit underrun/collision */
- debugl1(cs, "W6692 D-chan underrun/collision");
- printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL\n");
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- if (cs->tx_skb) { /* Restart frame */
- skb_push(cs->tx_skb, cs->tx_cnt);
- cs->tx_cnt = 0;
- W6692_fill_fifo(cs);
- } else {
- printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL no skb\n");
- debugl1(cs, "W6692 XDUN/XCOL no skb");
- cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_XRST);
- }
- }
- if (exval & W_D_EXI_RDOV) { /* RDOV */
- debugl1(cs, "W6692 D-channel RDOV");
- printk(KERN_WARNING "HiSax: W6692 D-RDOV\n");
- cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RRST);
- }
- if (exval & W_D_EXI_TIN2) { /* TIN2 - never */
- debugl1(cs, "W6692 spurious TIN2 interrupt");
- }
- if (exval & W_D_EXI_MOC) { /* MOC - not supported */
- debugl1(cs, "W6692 spurious MOC interrupt");
- v1 = cs->readW6692(cs, W_MOSR);
- debugl1(cs, "W6692 MOSR %02x", v1);
- }
- if (exval & W_D_EXI_ISC) { /* ISC - Level1 change */
- v1 = cs->readW6692(cs, W_CIR);
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "W6692 ISC CIR=0x%02X", v1);
- if (v1 & W_CIR_ICC) {
- cs->dc.w6692.ph_state = v1 & W_CIR_COD_MASK;
- if (cs->debug & L1_DEB_ISAC)
- debugl1(cs, "ph_state_change %x", cs->dc.w6692.ph_state);
- schedule_event(cs, D_L1STATECHANGE);
- }
- if (v1 & W_CIR_SCC) {
- v1 = cs->readW6692(cs, W_SQR);
- debugl1(cs, "W6692 SCC SQR=0x%02X", v1);
- }
- }
- if (exval & W_D_EXI_WEXP) {
- debugl1(cs, "W6692 spurious WEXP interrupt!");
- }
- if (exval & W_D_EXI_TEXP) {
- debugl1(cs, "W6692 spurious TEXP interrupt!");
- }
- }
- if (val & W_INT_B1_EXI) {
- debugl1(cs, "W6692 B channel 1 interrupt");
- W6692B_interrupt(cs, 0);
- }
- if (val & W_INT_B2_EXI) {
- debugl1(cs, "W6692 B channel 2 interrupt");
- W6692B_interrupt(cs, 1);
- }
- val = cs->readW6692(cs, W_ISTA);
- if (val && icnt) {
- icnt--;
- goto StartW6692;
- }
- if (!icnt) {
- printk(KERN_WARNING "W6692 IRQ LOOP\n");
- cs->writeW6692(cs, W_IMASK, 0xff);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-W6692_l1hw(struct PStack *st, int pr, void *arg)
-{
- struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
- struct sk_buff *skb = arg;
- u_long flags;
- int val;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- skb_queue_tail(&cs->sq, skb);
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA Queued", 0);
-#endif
- } else {
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA", 0);
-#endif
- W6692_fill_fifo(cs);
- }
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->tx_skb) {
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
- skb_queue_tail(&cs->sq, skb);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- }
- if (cs->debug & DEB_DLOG_HEX)
- LogFrame(cs, skb->data, skb->len);
- if (cs->debug & DEB_DLOG_VERBOSE)
- dlogframe(cs, skb, 0);
- cs->tx_skb = skb;
- cs->tx_cnt = 0;
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
-#endif
- W6692_fill_fifo(cs);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
-#ifdef L2FRAME_DEBUG /* psa */
- if (cs->debug & L1_DEB_LAPD)
- debugl1(cs, "-> PH_REQUEST_PULL");
-#endif
- if (!cs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (HW_RESET | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- if (cs->dc.w6692.ph_state == W_L1IND_DRD) {
- ph_command(cs, W_L1CMD_ECK);
- spin_unlock_irqrestore(&cs->lock, flags);
- } else {
- ph_command(cs, W_L1CMD_RST);
- cs->dc.w6692.ph_state = W_L1CMD_RST;
- spin_unlock_irqrestore(&cs->lock, flags);
- W6692_new_ph(cs);
- }
- break;
- case (HW_ENABLE | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- ph_command(cs, W_L1CMD_ECK);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_INFO3 | REQUEST):
- spin_lock_irqsave(&cs->lock, flags);
- ph_command(cs, W_L1CMD_AR8);
- spin_unlock_irqrestore(&cs->lock, flags);
- break;
- case (HW_TESTLOOP | REQUEST):
- val = 0;
- if (1 & (long) arg)
- val |= 0x0c;
- if (2 & (long) arg)
- val |= 0x3;
- /* !!! not implemented yet */
- break;
- case (HW_DEACTIVATE | RESPONSE):
- skb_queue_purge(&cs->rq);
- skb_queue_purge(&cs->sq);
- if (cs->tx_skb) {
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_skb = NULL;
- }
- if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
- del_timer(&cs->dbusytimer);
- if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
- schedule_event(cs, D_CLEARBUSY);
- break;
- default:
- if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "W6692_l1hw unknown %04x", pr);
- break;
- }
-}
-
-static void
-setstack_W6692(struct PStack *st, struct IsdnCardState *cs)
-{
- st->l1.l1hw = W6692_l1hw;
-}
-
-static void
-DC_Close_W6692(struct IsdnCardState *cs)
-{
-}
-
-static void
-dbusy_timer_handler(struct timer_list *t)
-{
- struct IsdnCardState *cs = from_timer(cs, t, dbusytimer);
- struct PStack *stptr;
- int rbch, star;
- u_long flags;
-
- spin_lock_irqsave(&cs->lock, flags);
- if (test_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) {
- rbch = cs->readW6692(cs, W_D_RBCH);
- star = cs->readW6692(cs, W_D_STAR);
- if (cs->debug)
- debugl1(cs, "D-Channel Busy D_RBCH %02x D_STAR %02x",
- rbch, star);
- if (star & W_D_STAR_XBZ) { /* D-Channel Busy */
- test_and_set_bit(FLG_L1_DBUSY, &cs->HW_Flags);
- stptr = cs->stlist;
- while (stptr != NULL) {
- stptr->l1.l1l2(stptr, PH_PAUSE | INDICATION, NULL);
- stptr = stptr->next;
- }
- } else {
- /* discard frame; reset transceiver */
- test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags);
- if (cs->tx_skb) {
- dev_kfree_skb_any(cs->tx_skb);
- cs->tx_cnt = 0;
- cs->tx_skb = NULL;
- } else {
- printk(KERN_WARNING "HiSax: W6692 D-Channel Busy no skb\n");
- debugl1(cs, "D-Channel Busy no skb");
- }
- cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_XRST); /* Transmitter reset */
- spin_unlock_irqrestore(&cs->lock, flags);
- cs->irq_func(cs->irq, cs);
- return;
- }
- }
- spin_unlock_irqrestore(&cs->lock, flags);
-}
-
-static void
-W6692Bmode(struct BCState *bcs, int mode, int bchan)
-{
- struct IsdnCardState *cs = bcs->cs;
-
- if (cs->debug & L1_DEB_HSCX)
- debugl1(cs, "w6692 %c mode %d ichan %d",
- '1' + bchan, mode, bchan);
- bcs->mode = mode;
- bcs->channel = bchan;
- bcs->hw.w6692.bchan = bchan;
-
- switch (mode) {
- case (L1_MODE_NULL):
- cs->BC_Write_Reg(cs, bchan, W_B_MODE, 0);
- break;
- case (L1_MODE_TRANS):
- cs->BC_Write_Reg(cs, bchan, W_B_MODE, W_B_MODE_MMS);
- break;
- case (L1_MODE_HDLC):
- cs->BC_Write_Reg(cs, bchan, W_B_MODE, W_B_MODE_ITF);
- cs->BC_Write_Reg(cs, bchan, W_B_ADM1, 0xff);
- cs->BC_Write_Reg(cs, bchan, W_B_ADM2, 0xff);
- break;
- }
- if (mode)
- cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RRST |
- W_B_CMDR_RACT | W_B_CMDR_XRST);
- cs->BC_Write_Reg(cs, bchan, W_B_EXIM, 0x00);
-}
-
-static void
-W6692_l2l1(struct PStack *st, int pr, void *arg)
-{
- struct sk_buff *skb = arg;
- struct BCState *bcs = st->l1.bcs;
- u_long flags;
-
- switch (pr) {
- case (PH_DATA | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->tx_skb) {
- skb_queue_tail(&bcs->squeue, skb);
- } else {
- bcs->tx_skb = skb;
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->hw.w6692.count = 0;
- bcs->cs->BC_Send_Data(bcs);
- }
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | INDICATION):
- if (bcs->tx_skb) {
- printk(KERN_WARNING "W6692_l2l1: this shouldn't happen\n");
- break;
- }
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->tx_skb = skb;
- bcs->hw.w6692.count = 0;
- bcs->cs->BC_Send_Data(bcs);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- break;
- case (PH_PULL | REQUEST):
- if (!bcs->tx_skb) {
- test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
- } else
- test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
- break;
- case (PH_ACTIVATE | REQUEST):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
- W6692Bmode(bcs, st->l1.mode, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | REQUEST):
- l1_msg_b(st, pr, arg);
- break;
- case (PH_DEACTIVATE | CONFIRM):
- spin_lock_irqsave(&bcs->cs->lock, flags);
- test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- W6692Bmode(bcs, 0, st->l1.bc);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
- st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
- break;
- }
-}
-
-static void
-close_w6692state(struct BCState *bcs)
-{
- W6692Bmode(bcs, 0, bcs->channel);
- if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
- kfree(bcs->hw.w6692.rcvbuf);
- bcs->hw.w6692.rcvbuf = NULL;
- kfree(bcs->blog);
- bcs->blog = NULL;
- skb_queue_purge(&bcs->rqueue);
- skb_queue_purge(&bcs->squeue);
- if (bcs->tx_skb) {
- dev_kfree_skb_any(bcs->tx_skb);
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- }
- }
-}
-
-static int
-open_w6692state(struct IsdnCardState *cs, struct BCState *bcs)
-{
- if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
- if (!(bcs->hw.w6692.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for w6692.rcvbuf\n");
- test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
- return (1);
- }
- if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for bcs->blog\n");
- test_and_clear_bit(BC_FLG_INIT, &bcs->Flag);
- kfree(bcs->hw.w6692.rcvbuf);
- bcs->hw.w6692.rcvbuf = NULL;
- return (2);
- }
- skb_queue_head_init(&bcs->rqueue);
- skb_queue_head_init(&bcs->squeue);
- }
- bcs->tx_skb = NULL;
- test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- bcs->event = 0;
- bcs->hw.w6692.rcvidx = 0;
- bcs->tx_cnt = 0;
- return (0);
-}
-
-static int
-setstack_w6692(struct PStack *st, struct BCState *bcs)
-{
- bcs->channel = st->l1.bc;
- if (open_w6692state(st->l1.hardware, bcs))
- return (-1);
- st->l1.bcs = bcs;
- st->l2.l2l1 = W6692_l2l1;
- setstack_manager(st);
- bcs->st = st;
- setstack_l1_B(st);
- return (0);
-}
-
-static void resetW6692(struct IsdnCardState *cs)
-{
- cs->writeW6692(cs, W_D_CTL, W_D_CTL_SRST);
- mdelay(10);
- cs->writeW6692(cs, W_D_CTL, 0x00);
- mdelay(10);
- cs->writeW6692(cs, W_IMASK, 0xff);
- cs->writeW6692(cs, W_D_SAM, 0xff);
- cs->writeW6692(cs, W_D_TAM, 0xff);
- cs->writeW6692(cs, W_D_EXIM, 0x00);
- cs->writeW6692(cs, W_D_MODE, W_D_MODE_RACT);
- cs->writeW6692(cs, W_IMASK, 0x18);
- if (cs->subtyp == W6692_USR) {
- /* seems that USR implemented some power control features
- * Pin 79 is connected to the oscilator circuit so we
- * have to handle it here
- */
- cs->writeW6692(cs, W_PCTL, 0x80);
- cs->writeW6692(cs, W_XDATA, 0x00);
- }
-}
-
-static void initW6692(struct IsdnCardState *cs, int part)
-{
- if (part & 1) {
- cs->setstack_d = setstack_W6692;
- cs->DC_Close = DC_Close_W6692;
- timer_setup(&cs->dbusytimer, dbusy_timer_handler, 0);
- resetW6692(cs);
- ph_command(cs, W_L1CMD_RST);
- cs->dc.w6692.ph_state = W_L1CMD_RST;
- W6692_new_ph(cs);
- ph_command(cs, W_L1CMD_ECK);
-
- cs->bcs[0].BC_SetStack = setstack_w6692;
- cs->bcs[1].BC_SetStack = setstack_w6692;
- cs->bcs[0].BC_Close = close_w6692state;
- cs->bcs[1].BC_Close = close_w6692state;
- W6692Bmode(cs->bcs, 0, 0);
- W6692Bmode(cs->bcs + 1, 0, 0);
- }
- if (part & 2) {
- /* Reenable all IRQ */
- cs->writeW6692(cs, W_IMASK, 0x18);
- cs->writeW6692(cs, W_D_EXIM, 0x00);
- cs->BC_Write_Reg(cs, 0, W_B_EXIM, 0x00);
- cs->BC_Write_Reg(cs, 1, W_B_EXIM, 0x00);
- /* Reset D-chan receiver and transmitter */
- cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RRST | W_D_CMDR_XRST);
- }
-}
-
-/* Interface functions */
-
-static u_char
-ReadW6692(struct IsdnCardState *cs, u_char offset)
-{
- return (inb(cs->hw.w6692.iobase + offset));
-}
-
-static void
-WriteW6692(struct IsdnCardState *cs, u_char offset, u_char value)
-{
- outb(value, cs->hw.w6692.iobase + offset);
-}
-
-static void
-ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- insb(cs->hw.w6692.iobase + W_D_RFIFO, data, size);
-}
-
-static void
-WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
-{
- outsb(cs->hw.w6692.iobase + W_D_XFIFO, data, size);
-}
-
-static u_char
-ReadW6692B(struct IsdnCardState *cs, int bchan, u_char offset)
-{
- return (inb(cs->hw.w6692.iobase + (bchan ? 0x40 : 0) + offset));
-}
-
-static void
-WriteW6692B(struct IsdnCardState *cs, int bchan, u_char offset, u_char value)
-{
- outb(value, cs->hw.w6692.iobase + (bchan ? 0x40 : 0) + offset);
-}
-
-static int
-w6692_card_msg(struct IsdnCardState *cs, int mt, void *arg)
-{
- switch (mt) {
- case CARD_RESET:
- resetW6692(cs);
- return (0);
- case CARD_RELEASE:
- cs->writeW6692(cs, W_IMASK, 0xff);
- release_region(cs->hw.w6692.iobase, 256);
- if (cs->subtyp == W6692_USR) {
- cs->writeW6692(cs, W_XDATA, 0x04);
- }
- return (0);
- case CARD_INIT:
- initW6692(cs, 3);
- return (0);
- case CARD_TEST:
- return (0);
- }
- return (0);
-}
-
-static int id_idx;
-
-static struct pci_dev *dev_w6692 = NULL;
-
-int setup_w6692(struct IsdnCard *card)
-{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
- u_char found = 0;
- u_char pci_irq = 0;
- u_int pci_ioaddr = 0;
-
- strcpy(tmp, w6692_revision);
- printk(KERN_INFO "HiSax: W6692 driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_W6692)
- return (0);
-
- while (id_list[id_idx].vendor_id) {
- dev_w6692 = hisax_find_pci_device(id_list[id_idx].vendor_id,
- id_list[id_idx].device_id,
- dev_w6692);
- if (dev_w6692) {
- if (pci_enable_device(dev_w6692))
- continue;
- cs->subtyp = id_idx;
- break;
- }
- id_idx++;
- }
- if (dev_w6692) {
- found = 1;
- pci_irq = dev_w6692->irq;
- /* I think address 0 is allways the configuration area */
- /* and address 1 is the real IO space KKe 03.09.99 */
- pci_ioaddr = pci_resource_start(dev_w6692, 1);
- /* USR ISDN PCI card TA need some special handling */
- if (cs->subtyp == W6692_WINBOND) {
- if ((W6692_SV_USR == dev_w6692->subsystem_vendor) &&
- (W6692_SD_USR == dev_w6692->subsystem_device)) {
- cs->subtyp = W6692_USR;
- }
- }
- }
- if (!found) {
- printk(KERN_WARNING "W6692: No PCI card found\n");
- return (0);
- }
- cs->irq = pci_irq;
- if (!cs->irq) {
- printk(KERN_WARNING "W6692: No IRQ for PCI card found\n");
- return (0);
- }
- if (!pci_ioaddr) {
- printk(KERN_WARNING "W6692: NO I/O Base Address found\n");
- return (0);
- }
- cs->hw.w6692.iobase = pci_ioaddr;
- printk(KERN_INFO "Found: %s %s, I/O base: 0x%x, irq: %d\n",
- id_list[cs->subtyp].vendor_name, id_list[cs->subtyp].card_name,
- pci_ioaddr, pci_irq);
- if (!request_region(cs->hw.w6692.iobase, 256, id_list[cs->subtyp].card_name)) {
- printk(KERN_WARNING
- "HiSax: %s I/O ports %x-%x already in use\n",
- id_list[cs->subtyp].card_name,
- cs->hw.w6692.iobase,
- cs->hw.w6692.iobase + 255);
- return (0);
- }
-
- printk(KERN_INFO
- "HiSax: %s config irq:%d I/O:%x\n",
- id_list[cs->subtyp].card_name, cs->irq,
- cs->hw.w6692.iobase);
-
- INIT_WORK(&cs->tqueue, W6692_bh);
- cs->readW6692 = &ReadW6692;
- cs->writeW6692 = &WriteW6692;
- cs->readisacfifo = &ReadISACfifo;
- cs->writeisacfifo = &WriteISACfifo;
- cs->BC_Read_Reg = &ReadW6692B;
- cs->BC_Write_Reg = &WriteW6692B;
- cs->BC_Send_Data = &W6692B_fill_fifo;
- cs->cardmsg = &w6692_card_msg;
- cs->irq_func = &W6692_interrupt;
- cs->irq_flags |= IRQF_SHARED;
- W6692Version(cs, "W6692:");
- printk(KERN_INFO "W6692 ISTA=0x%X\n", ReadW6692(cs, W_ISTA));
- printk(KERN_INFO "W6692 IMASK=0x%X\n", ReadW6692(cs, W_IMASK));
- printk(KERN_INFO "W6692 D_EXIR=0x%X\n", ReadW6692(cs, W_D_EXIR));
- printk(KERN_INFO "W6692 D_EXIM=0x%X\n", ReadW6692(cs, W_D_EXIM));
- printk(KERN_INFO "W6692 D_RSTA=0x%X\n", ReadW6692(cs, W_D_RSTA));
- return (1);
-}
diff --git a/drivers/isdn/hisax/w6692.h b/drivers/isdn/hisax/w6692.h
deleted file mode 100644
index 024b04d33e43..000000000000
--- a/drivers/isdn/hisax/w6692.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/* $Id: w6692.h,v 1.4.2.2 2004/01/12 22:52:29 keil Exp $
- *
- * Winbond W6692 specific defines
- *
- * Author Petr Novak
- * Copyright by Petr Novak <petr.novak@i.cz>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/* map W6692 functions to ISAC functions */
-#define readW6692 readisac
-#define writeW6692 writeisac
-#define readW6692fifo readisacfifo
-#define writeW6692fifo writeisacfifo
-
-/* B-channel FIFO read/write routines */
-
-#define READW6692BFIFO(cs, bchan, ptr, count) \
- insb(cs->hw.w6692.iobase + W_B_RFIFO + (bchan ? 0x40 : 0), ptr, count)
-
-#define WRITEW6692BFIFO(cs, bchan, ptr, count) \
- outsb(cs->hw.w6692.iobase + W_B_XFIFO + (bchan ? 0x40 : 0), ptr, count)
-
-/* Specifications of W6692 registers */
-
-#define W_D_RFIFO 0x00 /* R */
-#define W_D_XFIFO 0x04 /* W */
-#define W_D_CMDR 0x08 /* W */
-#define W_D_MODE 0x0c /* R/W */
-#define W_D_TIMR 0x10 /* R/W */
-#define W_ISTA 0x14 /* R_clr */
-#define W_IMASK 0x18 /* R/W */
-#define W_D_EXIR 0x1c /* R_clr */
-#define W_D_EXIM 0x20 /* R/W */
-#define W_D_STAR 0x24 /* R */
-#define W_D_RSTA 0x28 /* R */
-#define W_D_SAM 0x2c /* R/W */
-#define W_D_SAP1 0x30 /* R/W */
-#define W_D_SAP2 0x34 /* R/W */
-#define W_D_TAM 0x38 /* R/W */
-#define W_D_TEI1 0x3c /* R/W */
-#define W_D_TEI2 0x40 /* R/W */
-#define W_D_RBCH 0x44 /* R */
-#define W_D_RBCL 0x48 /* R */
-#define W_TIMR2 0x4c /* W */
-#define W_L1_RC 0x50 /* R/W */
-#define W_D_CTL 0x54 /* R/W */
-#define W_CIR 0x58 /* R */
-#define W_CIX 0x5c /* W */
-#define W_SQR 0x60 /* R */
-#define W_SQX 0x64 /* W */
-#define W_PCTL 0x68 /* R/W */
-#define W_MOR 0x6c /* R */
-#define W_MOX 0x70 /* R/W */
-#define W_MOSR 0x74 /* R_clr */
-#define W_MOCR 0x78 /* R/W */
-#define W_GCR 0x7c /* R/W */
-
-#define W_B_RFIFO 0x80 /* R */
-#define W_B_XFIFO 0x84 /* W */
-#define W_B_CMDR 0x88 /* W */
-#define W_B_MODE 0x8c /* R/W */
-#define W_B_EXIR 0x90 /* R_clr */
-#define W_B_EXIM 0x94 /* R/W */
-#define W_B_STAR 0x98 /* R */
-#define W_B_ADM1 0x9c /* R/W */
-#define W_B_ADM2 0xa0 /* R/W */
-#define W_B_ADR1 0xa4 /* R/W */
-#define W_B_ADR2 0xa8 /* R/W */
-#define W_B_RBCL 0xac /* R */
-#define W_B_RBCH 0xb0 /* R */
-
-#define W_XADDR 0xf4 /* R/W */
-#define W_XDATA 0xf8 /* R/W */
-#define W_EPCTL 0xfc /* W */
-
-/* W6692 register bits */
-
-#define W_D_CMDR_XRST 0x01
-#define W_D_CMDR_XME 0x02
-#define W_D_CMDR_XMS 0x08
-#define W_D_CMDR_STT 0x10
-#define W_D_CMDR_RRST 0x40
-#define W_D_CMDR_RACK 0x80
-
-#define W_D_MODE_RLP 0x01
-#define W_D_MODE_DLP 0x02
-#define W_D_MODE_MFD 0x04
-#define W_D_MODE_TEE 0x08
-#define W_D_MODE_TMS 0x10
-#define W_D_MODE_RACT 0x40
-#define W_D_MODE_MMS 0x80
-
-#define W_INT_B2_EXI 0x01
-#define W_INT_B1_EXI 0x02
-#define W_INT_D_EXI 0x04
-#define W_INT_XINT0 0x08
-#define W_INT_XINT1 0x10
-#define W_INT_D_XFR 0x20
-#define W_INT_D_RME 0x40
-#define W_INT_D_RMR 0x80
-
-#define W_D_EXI_WEXP 0x01
-#define W_D_EXI_TEXP 0x02
-#define W_D_EXI_ISC 0x04
-#define W_D_EXI_MOC 0x08
-#define W_D_EXI_TIN2 0x10
-#define W_D_EXI_XCOL 0x20
-#define W_D_EXI_XDUN 0x40
-#define W_D_EXI_RDOV 0x80
-
-#define W_D_STAR_DRDY 0x10
-#define W_D_STAR_XBZ 0x20
-#define W_D_STAR_XDOW 0x80
-
-#define W_D_RSTA_RMB 0x10
-#define W_D_RSTA_CRCE 0x20
-#define W_D_RSTA_RDOV 0x40
-
-#define W_D_CTL_SRST 0x20
-
-#define W_CIR_SCC 0x80
-#define W_CIR_ICC 0x40
-#define W_CIR_COD_MASK 0x0f
-
-#define W_B_CMDR_XRST 0x01
-#define W_B_CMDR_XME 0x02
-#define W_B_CMDR_XMS 0x04
-#define W_B_CMDR_RACT 0x20
-#define W_B_CMDR_RRST 0x40
-#define W_B_CMDR_RACK 0x80
-
-#define W_B_MODE_FTS0 0x01
-#define W_B_MODE_FTS1 0x02
-#define W_B_MODE_SW56 0x04
-#define W_B_MODE_BSW0 0x08
-#define W_B_MODE_BSW1 0x10
-#define W_B_MODE_EPCM 0x20
-#define W_B_MODE_ITF 0x40
-#define W_B_MODE_MMS 0x80
-
-#define W_B_EXI_XDUN 0x01
-#define W_B_EXI_XFR 0x02
-#define W_B_EXI_RDOV 0x10
-#define W_B_EXI_RME 0x20
-#define W_B_EXI_RMR 0x40
-
-#define W_B_STAR_XBZ 0x01
-#define W_B_STAR_XDOW 0x04
-#define W_B_STAR_RMB 0x10
-#define W_B_STAR_CRCE 0x20
-#define W_B_STAR_RDOV 0x40
-
-#define W_B_RBCH_LOV 0x20
-
-/* W6692 Layer1 commands */
-
-#define W_L1CMD_ECK 0x00
-#define W_L1CMD_RST 0x01
-#define W_L1CMD_SCP 0x04
-#define W_L1CMD_SSP 0x02
-#define W_L1CMD_AR8 0x08
-#define W_L1CMD_AR10 0x09
-#define W_L1CMD_EAL 0x0a
-#define W_L1CMD_DRC 0x0f
-
-/* W6692 Layer1 indications */
-
-#define W_L1IND_CE 0x07
-#define W_L1IND_DRD 0x00
-#define W_L1IND_LD 0x04
-#define W_L1IND_ARD 0x08
-#define W_L1IND_TI 0x0a
-#define W_L1IND_ATI 0x0b
-#define W_L1IND_AI8 0x0c
-#define W_L1IND_AI10 0x0d
-#define W_L1IND_CD 0x0f
-
-/* FIFO thresholds */
-#define W_D_FIFO_THRESH 64
-#define W_B_FIFO_THRESH 64
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
deleted file mode 100644
index caa1b52f06f7..000000000000
--- a/drivers/isdn/i4l/Kconfig
+++ /dev/null
@@ -1,129 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Old ISDN4Linux config
-#
-
-if ISDN_I4L
-
-config ISDN_PPP
- bool "Support synchronous PPP"
- depends on INET
- select SLHC
- help
- Over digital connections such as ISDN, there is no need to
- synchronize sender and recipient's clocks with start and stop bits
- as is done over analog telephone lines. Instead, one can use
- "synchronous PPP". Saying Y here will include this protocol. This
- protocol is used by Cisco and Sun for example. So you want to say Y
- here if the other end of your ISDN connection supports it. You will
- need a special version of pppd (called ipppd) for using this
- feature. See <file:Documentation/isdn/README.syncppp> and
- <file:Documentation/isdn/syncPPP.FAQ> for more information.
-
-config ISDN_PPP_VJ
- bool "Use VJ-compression with synchronous PPP"
- depends on ISDN_PPP
- help
- This enables Van Jacobson header compression for synchronous PPP.
- Say Y if the other end of the connection supports it.
-
-config ISDN_MPP
- bool "Support generic MP (RFC 1717)"
- depends on ISDN_PPP
- help
- With synchronous PPP enabled, it is possible to increase throughput
- by bundling several ISDN-connections, using this protocol. See
- <file:Documentation/isdn/README.syncppp> for more information.
-
-config IPPP_FILTER
- bool "Filtering for synchronous PPP"
- depends on ISDN_PPP
- help
- Say Y here if you want to be able to filter the packets passing over
- IPPP interfaces. This allows you to control which packets count as
- activity (i.e. which packets will reset the idle timer or bring up
- a demand-dialled link) and which packets are to be dropped entirely.
- You need to say Y here if you wish to use the pass-filter and
- active-filter options to ipppd.
-
-config ISDN_PPP_BSDCOMP
- tristate "Support BSD compression"
- depends on ISDN_PPP
- help
- Support for the BSD-Compress compression method for PPP, which uses
- the LZW compression method to compress each PPP packet before it is
- sent over the wire. The machine at the other end of the PPP link
- (usually your ISP) has to support the BSD-Compress compression
- method as well for this to be useful. Even if they don't support it,
- it is safe to say Y here.
-
-config ISDN_AUDIO
- bool "Support audio via ISDN"
- help
- If you say Y here, the modem-emulator will support a subset of the
- EIA Class 8 Voice commands. Using a getty with voice-support
- (mgetty+sendfax by <gert@greenie.muc.de> with an extension, available
- with the ISDN utility package for example), you will be able to use
- your Linux box as an ISDN-answering machine. Of course, this must be
- supported by the lowlevel driver also. Currently, the HiSax driver
- is the only voice-supporting driver. See
- <file:Documentation/isdn/README.audio> for more information.
-
-config ISDN_TTY_FAX
- bool "Support AT-Fax Class 1 and 2 commands"
- depends on ISDN_AUDIO
- help
- If you say Y here, the modem-emulator will support a subset of the
- Fax Class 1 and 2 commands. Using a getty with fax-support
- (mgetty+sendfax, hylafax), you will be able to use your Linux box as
- an ISDN-fax-machine. This must be supported by the lowlevel driver
- also. See <file:Documentation/isdn/README.fax> for more information.
-
-config ISDN_X25
- bool "X.25 PLP on top of ISDN"
- depends on X25
- help
- This feature provides the X.25 protocol over ISDN connections.
- See <file:Documentation/isdn/README.x25> for more information
- if you are thinking about using this.
-
-
-menu "ISDN feature submodules"
-
-config ISDN_DRV_LOOP
- tristate "isdnloop support"
- depends on BROKEN_ON_SMP
- help
- This driver provides a virtual ISDN card. Its primary purpose is
- testing of linklevel features or configuration without getting
- charged by your service-provider for lots of phone calls.
- You need will need the loopctrl utility from the latest isdn4k-utils
- package to set up this driver.
-
-config ISDN_DIVERSION
- tristate "Support isdn diversion services"
- help
- This option allows you to use some supplementary diversion
- services in conjunction with the HiSax driver on an EURO/DSS1
- line.
-
- Supported options are CD (call deflection), CFU (Call forward
- unconditional), CFB (Call forward when busy) and CFNR (call forward
- not reachable). Additionally the actual CFU, CFB and CFNR state may
- be interrogated.
-
- The use of CFU, CFB, CFNR and interrogation may be limited to some
- countries. The keypad protocol is still not implemented. CD should
- work in all countries if the service has been subscribed to.
-
- Please read the file <file:Documentation/isdn/README.diversion>.
-
-endmenu
-
-comment "ISDN4Linux hardware drivers"
-
-source "drivers/isdn/hisax/Kconfig"
-
-# end ISDN_I4L
-endif
-
diff --git a/drivers/isdn/i4l/Makefile b/drivers/isdn/i4l/Makefile
deleted file mode 100644
index be77500c9e86..000000000000
--- a/drivers/isdn/i4l/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for the kernel ISDN subsystem and device drivers.
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_ISDN_I4L) += isdn.o
-obj-$(CONFIG_ISDN_PPP_BSDCOMP) += isdn_bsdcomp.o
-obj-$(CONFIG_ISDN_HDLC) += isdnhdlc.o
-
-# Multipart objects.
-
-isdn-y := isdn_net.o isdn_tty.o isdn_v110.o isdn_common.o
-
-# Optional parts of multipart objects.
-
-isdn-$(CONFIG_ISDN_PPP) += isdn_ppp.o
-isdn-$(CONFIG_ISDN_X25) += isdn_concap.o isdn_x25iface.o
-isdn-$(CONFIG_ISDN_AUDIO) += isdn_audio.o
-isdn-$(CONFIG_ISDN_TTY_FAX) += isdn_ttyfax.o
-
diff --git a/drivers/isdn/i4l/isdn_audio.c b/drivers/isdn/i4l/isdn_audio.c
deleted file mode 100644
index b6bcd1eca128..000000000000
--- a/drivers/isdn/i4l/isdn_audio.c
+++ /dev/null
@@ -1,711 +0,0 @@
-/* $Id: isdn_audio.c,v 1.1.2.2 2004/01/12 22:37:18 keil Exp $
- *
- * Linux ISDN subsystem, audio conversion and compression (linklevel).
- *
- * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de)
- * DTMF code (c) 1996 by Christian Mock (cm@kukuruz.ping.at)
- * Silence detection (c) 1998 by Armin Schindler (mac@gismo.telekom.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/isdn.h>
-#include <linux/slab.h>
-#include "isdn_audio.h"
-#include "isdn_common.h"
-
-char *isdn_audio_revision = "$Revision: 1.1.2.2 $";
-
-/*
- * Misc. lookup-tables.
- */
-
-/* ulaw -> signed 16-bit */
-static short isdn_audio_ulaw_to_s16[] =
-{
- 0x8284, 0x8684, 0x8a84, 0x8e84, 0x9284, 0x9684, 0x9a84, 0x9e84,
- 0xa284, 0xa684, 0xaa84, 0xae84, 0xb284, 0xb684, 0xba84, 0xbe84,
- 0xc184, 0xc384, 0xc584, 0xc784, 0xc984, 0xcb84, 0xcd84, 0xcf84,
- 0xd184, 0xd384, 0xd584, 0xd784, 0xd984, 0xdb84, 0xdd84, 0xdf84,
- 0xe104, 0xe204, 0xe304, 0xe404, 0xe504, 0xe604, 0xe704, 0xe804,
- 0xe904, 0xea04, 0xeb04, 0xec04, 0xed04, 0xee04, 0xef04, 0xf004,
- 0xf0c4, 0xf144, 0xf1c4, 0xf244, 0xf2c4, 0xf344, 0xf3c4, 0xf444,
- 0xf4c4, 0xf544, 0xf5c4, 0xf644, 0xf6c4, 0xf744, 0xf7c4, 0xf844,
- 0xf8a4, 0xf8e4, 0xf924, 0xf964, 0xf9a4, 0xf9e4, 0xfa24, 0xfa64,
- 0xfaa4, 0xfae4, 0xfb24, 0xfb64, 0xfba4, 0xfbe4, 0xfc24, 0xfc64,
- 0xfc94, 0xfcb4, 0xfcd4, 0xfcf4, 0xfd14, 0xfd34, 0xfd54, 0xfd74,
- 0xfd94, 0xfdb4, 0xfdd4, 0xfdf4, 0xfe14, 0xfe34, 0xfe54, 0xfe74,
- 0xfe8c, 0xfe9c, 0xfeac, 0xfebc, 0xfecc, 0xfedc, 0xfeec, 0xfefc,
- 0xff0c, 0xff1c, 0xff2c, 0xff3c, 0xff4c, 0xff5c, 0xff6c, 0xff7c,
- 0xff88, 0xff90, 0xff98, 0xffa0, 0xffa8, 0xffb0, 0xffb8, 0xffc0,
- 0xffc8, 0xffd0, 0xffd8, 0xffe0, 0xffe8, 0xfff0, 0xfff8, 0x0000,
- 0x7d7c, 0x797c, 0x757c, 0x717c, 0x6d7c, 0x697c, 0x657c, 0x617c,
- 0x5d7c, 0x597c, 0x557c, 0x517c, 0x4d7c, 0x497c, 0x457c, 0x417c,
- 0x3e7c, 0x3c7c, 0x3a7c, 0x387c, 0x367c, 0x347c, 0x327c, 0x307c,
- 0x2e7c, 0x2c7c, 0x2a7c, 0x287c, 0x267c, 0x247c, 0x227c, 0x207c,
- 0x1efc, 0x1dfc, 0x1cfc, 0x1bfc, 0x1afc, 0x19fc, 0x18fc, 0x17fc,
- 0x16fc, 0x15fc, 0x14fc, 0x13fc, 0x12fc, 0x11fc, 0x10fc, 0x0ffc,
- 0x0f3c, 0x0ebc, 0x0e3c, 0x0dbc, 0x0d3c, 0x0cbc, 0x0c3c, 0x0bbc,
- 0x0b3c, 0x0abc, 0x0a3c, 0x09bc, 0x093c, 0x08bc, 0x083c, 0x07bc,
- 0x075c, 0x071c, 0x06dc, 0x069c, 0x065c, 0x061c, 0x05dc, 0x059c,
- 0x055c, 0x051c, 0x04dc, 0x049c, 0x045c, 0x041c, 0x03dc, 0x039c,
- 0x036c, 0x034c, 0x032c, 0x030c, 0x02ec, 0x02cc, 0x02ac, 0x028c,
- 0x026c, 0x024c, 0x022c, 0x020c, 0x01ec, 0x01cc, 0x01ac, 0x018c,
- 0x0174, 0x0164, 0x0154, 0x0144, 0x0134, 0x0124, 0x0114, 0x0104,
- 0x00f4, 0x00e4, 0x00d4, 0x00c4, 0x00b4, 0x00a4, 0x0094, 0x0084,
- 0x0078, 0x0070, 0x0068, 0x0060, 0x0058, 0x0050, 0x0048, 0x0040,
- 0x0038, 0x0030, 0x0028, 0x0020, 0x0018, 0x0010, 0x0008, 0x0000
-};
-
-/* alaw -> signed 16-bit */
-static short isdn_audio_alaw_to_s16[] =
-{
- 0x13fc, 0xec04, 0x0144, 0xfebc, 0x517c, 0xae84, 0x051c, 0xfae4,
- 0x0a3c, 0xf5c4, 0x0048, 0xffb8, 0x287c, 0xd784, 0x028c, 0xfd74,
- 0x1bfc, 0xe404, 0x01cc, 0xfe34, 0x717c, 0x8e84, 0x071c, 0xf8e4,
- 0x0e3c, 0xf1c4, 0x00c4, 0xff3c, 0x387c, 0xc784, 0x039c, 0xfc64,
- 0x0ffc, 0xf004, 0x0104, 0xfefc, 0x417c, 0xbe84, 0x041c, 0xfbe4,
- 0x083c, 0xf7c4, 0x0008, 0xfff8, 0x207c, 0xdf84, 0x020c, 0xfdf4,
- 0x17fc, 0xe804, 0x018c, 0xfe74, 0x617c, 0x9e84, 0x061c, 0xf9e4,
- 0x0c3c, 0xf3c4, 0x0084, 0xff7c, 0x307c, 0xcf84, 0x030c, 0xfcf4,
- 0x15fc, 0xea04, 0x0164, 0xfe9c, 0x597c, 0xa684, 0x059c, 0xfa64,
- 0x0b3c, 0xf4c4, 0x0068, 0xff98, 0x2c7c, 0xd384, 0x02cc, 0xfd34,
- 0x1dfc, 0xe204, 0x01ec, 0xfe14, 0x797c, 0x8684, 0x07bc, 0xf844,
- 0x0f3c, 0xf0c4, 0x00e4, 0xff1c, 0x3c7c, 0xc384, 0x03dc, 0xfc24,
- 0x11fc, 0xee04, 0x0124, 0xfedc, 0x497c, 0xb684, 0x049c, 0xfb64,
- 0x093c, 0xf6c4, 0x0028, 0xffd8, 0x247c, 0xdb84, 0x024c, 0xfdb4,
- 0x19fc, 0xe604, 0x01ac, 0xfe54, 0x697c, 0x9684, 0x069c, 0xf964,
- 0x0d3c, 0xf2c4, 0x00a4, 0xff5c, 0x347c, 0xcb84, 0x034c, 0xfcb4,
- 0x12fc, 0xed04, 0x0134, 0xfecc, 0x4d7c, 0xb284, 0x04dc, 0xfb24,
- 0x09bc, 0xf644, 0x0038, 0xffc8, 0x267c, 0xd984, 0x026c, 0xfd94,
- 0x1afc, 0xe504, 0x01ac, 0xfe54, 0x6d7c, 0x9284, 0x06dc, 0xf924,
- 0x0dbc, 0xf244, 0x00b4, 0xff4c, 0x367c, 0xc984, 0x036c, 0xfc94,
- 0x0f3c, 0xf0c4, 0x00f4, 0xff0c, 0x3e7c, 0xc184, 0x03dc, 0xfc24,
- 0x07bc, 0xf844, 0x0008, 0xfff8, 0x1efc, 0xe104, 0x01ec, 0xfe14,
- 0x16fc, 0xe904, 0x0174, 0xfe8c, 0x5d7c, 0xa284, 0x05dc, 0xfa24,
- 0x0bbc, 0xf444, 0x0078, 0xff88, 0x2e7c, 0xd184, 0x02ec, 0xfd14,
- 0x14fc, 0xeb04, 0x0154, 0xfeac, 0x557c, 0xaa84, 0x055c, 0xfaa4,
- 0x0abc, 0xf544, 0x0058, 0xffa8, 0x2a7c, 0xd584, 0x02ac, 0xfd54,
- 0x1cfc, 0xe304, 0x01cc, 0xfe34, 0x757c, 0x8a84, 0x075c, 0xf8a4,
- 0x0ebc, 0xf144, 0x00d4, 0xff2c, 0x3a7c, 0xc584, 0x039c, 0xfc64,
- 0x10fc, 0xef04, 0x0114, 0xfeec, 0x457c, 0xba84, 0x045c, 0xfba4,
- 0x08bc, 0xf744, 0x0018, 0xffe8, 0x227c, 0xdd84, 0x022c, 0xfdd4,
- 0x18fc, 0xe704, 0x018c, 0xfe74, 0x657c, 0x9a84, 0x065c, 0xf9a4,
- 0x0cbc, 0xf344, 0x0094, 0xff6c, 0x327c, 0xcd84, 0x032c, 0xfcd4
-};
-
-/* alaw -> ulaw */
-static char isdn_audio_alaw_to_ulaw[] =
-{
- 0xab, 0x2b, 0xe3, 0x63, 0x8b, 0x0b, 0xc9, 0x49,
- 0xba, 0x3a, 0xf6, 0x76, 0x9b, 0x1b, 0xd7, 0x57,
- 0xa3, 0x23, 0xdd, 0x5d, 0x83, 0x03, 0xc1, 0x41,
- 0xb2, 0x32, 0xeb, 0x6b, 0x93, 0x13, 0xcf, 0x4f,
- 0xaf, 0x2f, 0xe7, 0x67, 0x8f, 0x0f, 0xcd, 0x4d,
- 0xbe, 0x3e, 0xfe, 0x7e, 0x9f, 0x1f, 0xdb, 0x5b,
- 0xa7, 0x27, 0xdf, 0x5f, 0x87, 0x07, 0xc5, 0x45,
- 0xb6, 0x36, 0xef, 0x6f, 0x97, 0x17, 0xd3, 0x53,
- 0xa9, 0x29, 0xe1, 0x61, 0x89, 0x09, 0xc7, 0x47,
- 0xb8, 0x38, 0xf2, 0x72, 0x99, 0x19, 0xd5, 0x55,
- 0xa1, 0x21, 0xdc, 0x5c, 0x81, 0x01, 0xbf, 0x3f,
- 0xb0, 0x30, 0xe9, 0x69, 0x91, 0x11, 0xce, 0x4e,
- 0xad, 0x2d, 0xe5, 0x65, 0x8d, 0x0d, 0xcb, 0x4b,
- 0xbc, 0x3c, 0xfa, 0x7a, 0x9d, 0x1d, 0xd9, 0x59,
- 0xa5, 0x25, 0xde, 0x5e, 0x85, 0x05, 0xc3, 0x43,
- 0xb4, 0x34, 0xed, 0x6d, 0x95, 0x15, 0xd1, 0x51,
- 0xac, 0x2c, 0xe4, 0x64, 0x8c, 0x0c, 0xca, 0x4a,
- 0xbb, 0x3b, 0xf8, 0x78, 0x9c, 0x1c, 0xd8, 0x58,
- 0xa4, 0x24, 0xde, 0x5e, 0x84, 0x04, 0xc2, 0x42,
- 0xb3, 0x33, 0xec, 0x6c, 0x94, 0x14, 0xd0, 0x50,
- 0xb0, 0x30, 0xe8, 0x68, 0x90, 0x10, 0xce, 0x4e,
- 0xbf, 0x3f, 0xfe, 0x7e, 0xa0, 0x20, 0xdc, 0x5c,
- 0xa8, 0x28, 0xe0, 0x60, 0x88, 0x08, 0xc6, 0x46,
- 0xb7, 0x37, 0xf0, 0x70, 0x98, 0x18, 0xd4, 0x54,
- 0xaa, 0x2a, 0xe2, 0x62, 0x8a, 0x0a, 0xc8, 0x48,
- 0xb9, 0x39, 0xf4, 0x74, 0x9a, 0x1a, 0xd6, 0x56,
- 0xa2, 0x22, 0xdd, 0x5d, 0x82, 0x02, 0xc0, 0x40,
- 0xb1, 0x31, 0xea, 0x6a, 0x92, 0x12, 0xcf, 0x4f,
- 0xae, 0x2e, 0xe6, 0x66, 0x8e, 0x0e, 0xcc, 0x4c,
- 0xbd, 0x3d, 0xfc, 0x7c, 0x9e, 0x1e, 0xda, 0x5a,
- 0xa6, 0x26, 0xdf, 0x5f, 0x86, 0x06, 0xc4, 0x44,
- 0xb5, 0x35, 0xee, 0x6e, 0x96, 0x16, 0xd2, 0x52
-};
-
-/* ulaw -> alaw */
-static char isdn_audio_ulaw_to_alaw[] =
-{
- 0xab, 0x55, 0xd5, 0x15, 0x95, 0x75, 0xf5, 0x35,
- 0xb5, 0x45, 0xc5, 0x05, 0x85, 0x65, 0xe5, 0x25,
- 0xa5, 0x5d, 0xdd, 0x1d, 0x9d, 0x7d, 0xfd, 0x3d,
- 0xbd, 0x4d, 0xcd, 0x0d, 0x8d, 0x6d, 0xed, 0x2d,
- 0xad, 0x51, 0xd1, 0x11, 0x91, 0x71, 0xf1, 0x31,
- 0xb1, 0x41, 0xc1, 0x01, 0x81, 0x61, 0xe1, 0x21,
- 0x59, 0xd9, 0x19, 0x99, 0x79, 0xf9, 0x39, 0xb9,
- 0x49, 0xc9, 0x09, 0x89, 0x69, 0xe9, 0x29, 0xa9,
- 0xd7, 0x17, 0x97, 0x77, 0xf7, 0x37, 0xb7, 0x47,
- 0xc7, 0x07, 0x87, 0x67, 0xe7, 0x27, 0xa7, 0xdf,
- 0x9f, 0x7f, 0xff, 0x3f, 0xbf, 0x4f, 0xcf, 0x0f,
- 0x8f, 0x6f, 0xef, 0x2f, 0x53, 0x13, 0x73, 0x33,
- 0xb3, 0x43, 0xc3, 0x03, 0x83, 0x63, 0xe3, 0x23,
- 0xa3, 0x5b, 0xdb, 0x1b, 0x9b, 0x7b, 0xfb, 0x3b,
- 0xbb, 0xbb, 0x4b, 0x4b, 0xcb, 0xcb, 0x0b, 0x0b,
- 0x8b, 0x8b, 0x6b, 0x6b, 0xeb, 0xeb, 0x2b, 0x2b,
- 0xab, 0x54, 0xd4, 0x14, 0x94, 0x74, 0xf4, 0x34,
- 0xb4, 0x44, 0xc4, 0x04, 0x84, 0x64, 0xe4, 0x24,
- 0xa4, 0x5c, 0xdc, 0x1c, 0x9c, 0x7c, 0xfc, 0x3c,
- 0xbc, 0x4c, 0xcc, 0x0c, 0x8c, 0x6c, 0xec, 0x2c,
- 0xac, 0x50, 0xd0, 0x10, 0x90, 0x70, 0xf0, 0x30,
- 0xb0, 0x40, 0xc0, 0x00, 0x80, 0x60, 0xe0, 0x20,
- 0x58, 0xd8, 0x18, 0x98, 0x78, 0xf8, 0x38, 0xb8,
- 0x48, 0xc8, 0x08, 0x88, 0x68, 0xe8, 0x28, 0xa8,
- 0xd6, 0x16, 0x96, 0x76, 0xf6, 0x36, 0xb6, 0x46,
- 0xc6, 0x06, 0x86, 0x66, 0xe6, 0x26, 0xa6, 0xde,
- 0x9e, 0x7e, 0xfe, 0x3e, 0xbe, 0x4e, 0xce, 0x0e,
- 0x8e, 0x6e, 0xee, 0x2e, 0x52, 0x12, 0x72, 0x32,
- 0xb2, 0x42, 0xc2, 0x02, 0x82, 0x62, 0xe2, 0x22,
- 0xa2, 0x5a, 0xda, 0x1a, 0x9a, 0x7a, 0xfa, 0x3a,
- 0xba, 0xba, 0x4a, 0x4a, 0xca, 0xca, 0x0a, 0x0a,
- 0x8a, 0x8a, 0x6a, 0x6a, 0xea, 0xea, 0x2a, 0x2a
-};
-
-#define NCOEFF 8 /* number of frequencies to be analyzed */
-#define DTMF_TRESH 4000 /* above this is dtmf */
-#define SILENCE_TRESH 200 /* below this is silence */
-#define AMP_BITS 9 /* bits per sample, reduced to avoid overflow */
-#define LOGRP 0
-#define HIGRP 1
-
-/* For DTMF recognition:
- * 2 * cos(2 * PI * k / N) precalculated for all k
- */
-static int cos2pik[NCOEFF] =
-{
- 55813, 53604, 51193, 48591, 38114, 33057, 25889, 18332
-};
-
-static char dtmf_matrix[4][4] =
-{
- {'1', '2', '3', 'A'},
- {'4', '5', '6', 'B'},
- {'7', '8', '9', 'C'},
- {'*', '0', '#', 'D'}
-};
-
-static inline void
-isdn_audio_tlookup(const u_char *table, u_char *buff, unsigned long n)
-{
-#ifdef __i386__
- unsigned long d0, d1, d2, d3;
- __asm__ __volatile__(
- "cld\n"
- "1:\tlodsb\n\t"
- "xlatb\n\t"
- "stosb\n\t"
- "loop 1b\n\t"
- : "=&b"(d0), "=&c"(d1), "=&D"(d2), "=&S"(d3)
- : "0"((long) table), "1"(n), "2"((long) buff), "3"((long) buff)
- : "memory", "ax");
-#else
- while (n--)
- *buff = table[*(unsigned char *)buff], buff++;
-#endif
-}
-
-void
-isdn_audio_ulaw2alaw(unsigned char *buff, unsigned long len)
-{
- isdn_audio_tlookup(isdn_audio_ulaw_to_alaw, buff, len);
-}
-
-void
-isdn_audio_alaw2ulaw(unsigned char *buff, unsigned long len)
-{
- isdn_audio_tlookup(isdn_audio_alaw_to_ulaw, buff, len);
-}
-
-/*
- * linear <-> adpcm conversion stuff
- * Most parts from the mgetty-package.
- * (C) by Gert Doering and Klaus Weidner
- * Used by permission of Gert Doering
- */
-
-
-#define ZEROTRAP /* turn on the trap as per the MIL-STD */
-#undef ZEROTRAP
-#define BIAS 0x84 /* define the add-in bias for 16 bit samples */
-#define CLIP 32635
-
-static unsigned char
-isdn_audio_linear2ulaw(int sample)
-{
- static int exp_lut[256] =
- {
- 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- };
- int sign,
- exponent,
- mantissa;
- unsigned char ulawbyte;
-
- /* Get the sample into sign-magnitude. */
- sign = (sample >> 8) & 0x80; /* set aside the sign */
- if (sign != 0)
- sample = -sample; /* get magnitude */
- if (sample > CLIP)
- sample = CLIP; /* clip the magnitude */
-
- /* Convert from 16 bit linear to ulaw. */
- sample = sample + BIAS;
- exponent = exp_lut[(sample >> 7) & 0xFF];
- mantissa = (sample >> (exponent + 3)) & 0x0F;
- ulawbyte = ~(sign | (exponent << 4) | mantissa);
-#ifdef ZEROTRAP
- /* optional CCITT trap */
- if (ulawbyte == 0)
- ulawbyte = 0x02;
-#endif
- return (ulawbyte);
-}
-
-
-static int Mx[3][8] =
-{
- {0x3800, 0x5600, 0, 0, 0, 0, 0, 0},
- {0x399a, 0x3a9f, 0x4d14, 0x6607, 0, 0, 0, 0},
- {0x3556, 0x3556, 0x399A, 0x3A9F, 0x4200, 0x4D14, 0x6607, 0x6607},
-};
-
-static int bitmask[9] =
-{
- 0, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff
-};
-
-static int
-isdn_audio_get_bits(adpcm_state *s, unsigned char **in, int *len)
-{
- while (s->nleft < s->nbits) {
- int d = *((*in)++);
- (*len)--;
- s->word = (s->word << 8) | d;
- s->nleft += 8;
- }
- s->nleft -= s->nbits;
- return (s->word >> s->nleft) & bitmask[s->nbits];
-}
-
-static void
-isdn_audio_put_bits(int data, int nbits, adpcm_state *s,
- unsigned char **out, int *len)
-{
- s->word = (s->word << nbits) | (data & bitmask[nbits]);
- s->nleft += nbits;
- while (s->nleft >= 8) {
- int d = (s->word >> (s->nleft - 8));
- *(out[0]++) = d & 255;
- (*len)++;
- s->nleft -= 8;
- }
-}
-
-adpcm_state *
-isdn_audio_adpcm_init(adpcm_state *s, int nbits)
-{
- if (!s)
- s = kmalloc(sizeof(adpcm_state), GFP_ATOMIC);
- if (s) {
- s->a = 0;
- s->d = 5;
- s->word = 0;
- s->nleft = 0;
- s->nbits = nbits;
- }
- return s;
-}
-
-dtmf_state *
-isdn_audio_dtmf_init(dtmf_state *s)
-{
- if (!s)
- s = kmalloc(sizeof(dtmf_state), GFP_ATOMIC);
- if (s) {
- s->idx = 0;
- s->last = ' ';
- }
- return s;
-}
-
-/*
- * Decompression of adpcm data to a/u-law
- *
- */
-
-int
-isdn_audio_adpcm2xlaw(adpcm_state *s, int fmt, unsigned char *in,
- unsigned char *out, int len)
-{
- int a = s->a;
- int d = s->d;
- int nbits = s->nbits;
- int olen = 0;
-
- while (len) {
- int e = isdn_audio_get_bits(s, &in, &len);
- int sign;
-
- if (nbits == 4 && e == 0)
- d = 4;
- sign = (e >> (nbits - 1)) ? -1 : 1;
- e &= bitmask[nbits - 1];
- a += sign * ((e << 1) + 1) * d >> 1;
- if (d & 1)
- a++;
- if (fmt)
- *out++ = isdn_audio_ulaw_to_alaw[
- isdn_audio_linear2ulaw(a << 2)];
- else
- *out++ = isdn_audio_linear2ulaw(a << 2);
- olen++;
- d = (d * Mx[nbits - 2][e] + 0x2000) >> 14;
- if (d < 5)
- d = 5;
- }
- s->a = a;
- s->d = d;
- return olen;
-}
-
-int
-isdn_audio_xlaw2adpcm(adpcm_state *s, int fmt, unsigned char *in,
- unsigned char *out, int len)
-{
- int a = s->a;
- int d = s->d;
- int nbits = s->nbits;
- int olen = 0;
-
- while (len--) {
- int e = 0,
- nmax = 1 << (nbits - 1);
- int sign,
- delta;
-
- if (fmt)
- delta = (isdn_audio_alaw_to_s16[*in++] >> 2) - a;
- else
- delta = (isdn_audio_ulaw_to_s16[*in++] >> 2) - a;
- if (delta < 0) {
- e = nmax;
- delta = -delta;
- }
- while (--nmax && delta > d) {
- delta -= d;
- e++;
- }
- if (nbits == 4 && ((e & 0x0f) == 0))
- e = 8;
- isdn_audio_put_bits(e, nbits, s, &out, &olen);
- sign = (e >> (nbits - 1)) ? -1 : 1;
- e &= bitmask[nbits - 1];
-
- a += sign * ((e << 1) + 1) * d >> 1;
- if (d & 1)
- a++;
- d = (d * Mx[nbits - 2][e] + 0x2000) >> 14;
- if (d < 5)
- d = 5;
- }
- s->a = a;
- s->d = d;
- return olen;
-}
-
-/*
- * Goertzel algorithm.
- * See http://ptolemy.eecs.berkeley.edu/papers/96/dtmf_ict/
- * for more info.
- * Result is stored into an sk_buff and queued up for later
- * evaluation.
- */
-static void
-isdn_audio_goertzel(int *sample, modem_info *info)
-{
- int sk,
- sk1,
- sk2;
- int k,
- n;
- struct sk_buff *skb;
- int *result;
-
- skb = dev_alloc_skb(sizeof(int) * NCOEFF);
- if (!skb) {
- printk(KERN_WARNING
- "isdn_audio: Could not alloc DTMF result for ttyI%d\n",
- info->line);
- return;
- }
- result = skb_put(skb, sizeof(int) * NCOEFF);
- for (k = 0; k < NCOEFF; k++) {
- sk = sk1 = sk2 = 0;
- for (n = 0; n < DTMF_NPOINTS; n++) {
- sk = sample[n] + ((cos2pik[k] * sk1) >> 15) - sk2;
- sk2 = sk1;
- sk1 = sk;
- }
- /* Avoid overflows */
- sk >>= 1;
- sk2 >>= 1;
- /* compute |X(k)|**2 */
- /* report overflows. This should not happen. */
- /* Comment this out if desired */
- if (sk < -32768 || sk > 32767)
- printk(KERN_DEBUG
- "isdn_audio: dtmf goertzel overflow, sk=%d\n", sk);
- if (sk2 < -32768 || sk2 > 32767)
- printk(KERN_DEBUG
- "isdn_audio: dtmf goertzel overflow, sk2=%d\n", sk2);
- result[k] =
- ((sk * sk) >> AMP_BITS) -
- ((((cos2pik[k] * sk) >> 15) * sk2) >> AMP_BITS) +
- ((sk2 * sk2) >> AMP_BITS);
- }
- skb_queue_tail(&info->dtmf_queue, skb);
- isdn_timer_ctrl(ISDN_TIMER_MODEMREAD, 1);
-}
-
-void
-isdn_audio_eval_dtmf(modem_info *info)
-{
- struct sk_buff *skb;
- int *result;
- dtmf_state *s;
- int silence;
- int i;
- int di;
- int ch;
- int grp[2];
- char what;
- char *p;
- int thresh;
-
- while ((skb = skb_dequeue(&info->dtmf_queue))) {
- result = (int *) skb->data;
- s = info->dtmf_state;
- grp[LOGRP] = grp[HIGRP] = -1;
- silence = 0;
- thresh = 0;
- for (i = 0; i < NCOEFF; i++) {
- if (result[i] > DTMF_TRESH) {
- if (result[i] > thresh)
- thresh = result[i];
- }
- else if (result[i] < SILENCE_TRESH)
- silence++;
- }
- if (silence == NCOEFF)
- what = ' ';
- else {
- if (thresh > 0) {
- thresh = thresh >> 4; /* touchtones must match within 12 dB */
- for (i = 0; i < NCOEFF; i++) {
- if (result[i] < thresh)
- continue; /* ignore */
- /* good level found. This is allowed only one time per group */
- if (i < NCOEFF / 2) {
- /* lowgroup*/
- if (grp[LOGRP] >= 0) {
- // Bad. Another tone found. */
- grp[LOGRP] = -1;
- break;
- }
- else
- grp[LOGRP] = i;
- }
- else { /* higroup */
- if (grp[HIGRP] >= 0) { // Bad. Another tone found. */
- grp[HIGRP] = -1;
- break;
- }
- else
- grp[HIGRP] = i - NCOEFF/2;
- }
- }
- if ((grp[LOGRP] >= 0) && (grp[HIGRP] >= 0)) {
- what = dtmf_matrix[grp[LOGRP]][grp[HIGRP]];
- if (s->last != ' ' && s->last != '.')
- s->last = what; /* min. 1 non-DTMF between DTMF */
- } else
- what = '.';
- }
- else
- what = '.';
- }
- if ((what != s->last) && (what != ' ') && (what != '.')) {
- printk(KERN_DEBUG "dtmf: tt='%c'\n", what);
- p = skb->data;
- *p++ = 0x10;
- *p = what;
- skb_trim(skb, 2);
- ISDN_AUDIO_SKB_DLECOUNT(skb) = 0;
- ISDN_AUDIO_SKB_LOCK(skb) = 0;
- di = info->isdn_driver;
- ch = info->isdn_channel;
- __skb_queue_tail(&dev->drv[di]->rpqueue[ch], skb);
- dev->drv[di]->rcvcount[ch] += 2;
- /* Schedule dequeuing */
- if ((dev->modempoll) && (info->rcvsched))
- isdn_timer_ctrl(ISDN_TIMER_MODEMREAD, 1);
- wake_up_interruptible(&dev->drv[di]->rcv_waitq[ch]);
- } else
- kfree_skb(skb);
- s->last = what;
- }
-}
-
-/*
- * Decode DTMF tones, queue result in separate sk_buf for
- * later examination.
- * Parameters:
- * s = pointer to state-struct.
- * buf = input audio data
- * len = size of audio data.
- * fmt = audio data format (0 = ulaw, 1 = alaw)
- */
-void
-isdn_audio_calc_dtmf(modem_info *info, unsigned char *buf, int len, int fmt)
-{
- dtmf_state *s = info->dtmf_state;
- int i;
- int c;
-
- while (len) {
- c = DTMF_NPOINTS - s->idx;
- if (c > len)
- c = len;
- if (c <= 0)
- break;
- for (i = 0; i < c; i++) {
- if (fmt)
- s->buf[s->idx++] =
- isdn_audio_alaw_to_s16[*buf++] >> (15 - AMP_BITS);
- else
- s->buf[s->idx++] =
- isdn_audio_ulaw_to_s16[*buf++] >> (15 - AMP_BITS);
- }
- if (s->idx == DTMF_NPOINTS) {
- isdn_audio_goertzel(s->buf, info);
- s->idx = 0;
- }
- len -= c;
- }
-}
-
-silence_state *
-isdn_audio_silence_init(silence_state *s)
-{
- if (!s)
- s = kmalloc(sizeof(silence_state), GFP_ATOMIC);
- if (s) {
- s->idx = 0;
- s->state = 0;
- }
- return s;
-}
-
-void
-isdn_audio_calc_silence(modem_info *info, unsigned char *buf, int len, int fmt)
-{
- silence_state *s = info->silence_state;
- int i;
- signed char c;
-
- if (!info->emu.vpar[1]) return;
-
- for (i = 0; i < len; i++) {
- if (fmt)
- c = isdn_audio_alaw_to_ulaw[*buf++];
- else
- c = *buf++;
-
- if (c > 0) c -= 128;
- c = abs(c);
-
- if (c > (info->emu.vpar[1] * 4)) {
- s->idx = 0;
- s->state = 1;
- } else {
- if (s->idx < 210000) s->idx++;
- }
- }
-}
-
-void
-isdn_audio_put_dle_code(modem_info *info, u_char code)
-{
- struct sk_buff *skb;
- int di;
- int ch;
- char *p;
-
- skb = dev_alloc_skb(2);
- if (!skb) {
- printk(KERN_WARNING
- "isdn_audio: Could not alloc skb for ttyI%d\n",
- info->line);
- return;
- }
- p = skb_put(skb, 2);
- p[0] = 0x10;
- p[1] = code;
- ISDN_AUDIO_SKB_DLECOUNT(skb) = 0;
- ISDN_AUDIO_SKB_LOCK(skb) = 0;
- di = info->isdn_driver;
- ch = info->isdn_channel;
- __skb_queue_tail(&dev->drv[di]->rpqueue[ch], skb);
- dev->drv[di]->rcvcount[ch] += 2;
- /* Schedule dequeuing */
- if ((dev->modempoll) && (info->rcvsched))
- isdn_timer_ctrl(ISDN_TIMER_MODEMREAD, 1);
- wake_up_interruptible(&dev->drv[di]->rcv_waitq[ch]);
-}
-
-void
-isdn_audio_eval_silence(modem_info *info)
-{
- silence_state *s = info->silence_state;
- char what;
-
- what = ' ';
-
- if (s->idx > (info->emu.vpar[2] * 800)) {
- s->idx = 0;
- if (!s->state) { /* silence from beginning of rec */
- what = 's';
- } else {
- what = 'q';
- }
- }
- if ((what == 's') || (what == 'q')) {
- printk(KERN_DEBUG "ttyI%d: %s\n", info->line,
- (what == 's') ? "silence" : "quiet");
- isdn_audio_put_dle_code(info, what);
- }
-}
diff --git a/drivers/isdn/i4l/isdn_audio.h b/drivers/isdn/i4l/isdn_audio.h
deleted file mode 100644
index 013c3582e0d1..000000000000
--- a/drivers/isdn/i4l/isdn_audio.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* $Id: isdn_audio.h,v 1.1.2.2 2004/01/12 22:37:18 keil Exp $
- *
- * Linux ISDN subsystem, audio conversion and compression (linklevel).
- *
- * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#define DTMF_NPOINTS 205 /* Number of samples for DTMF recognition */
-typedef struct adpcm_state {
- int a;
- int d;
- int word;
- int nleft;
- int nbits;
-} adpcm_state;
-
-typedef struct dtmf_state {
- char last;
- char llast;
- int idx;
- int buf[DTMF_NPOINTS];
-} dtmf_state;
-
-typedef struct silence_state {
- int state;
- unsigned int idx;
-} silence_state;
-
-extern void isdn_audio_ulaw2alaw(unsigned char *, unsigned long);
-extern void isdn_audio_alaw2ulaw(unsigned char *, unsigned long);
-extern adpcm_state *isdn_audio_adpcm_init(adpcm_state *, int);
-extern int isdn_audio_adpcm2xlaw(adpcm_state *, int, unsigned char *, unsigned char *, int);
-extern int isdn_audio_xlaw2adpcm(adpcm_state *, int, unsigned char *, unsigned char *, int);
-extern void isdn_audio_calc_dtmf(modem_info *, unsigned char *, int, int);
-extern void isdn_audio_eval_dtmf(modem_info *);
-dtmf_state *isdn_audio_dtmf_init(dtmf_state *);
-extern void isdn_audio_calc_silence(modem_info *, unsigned char *, int, int);
-extern void isdn_audio_eval_silence(modem_info *);
-silence_state *isdn_audio_silence_init(silence_state *);
-extern void isdn_audio_put_dle_code(modem_info *, u_char);
diff --git a/drivers/isdn/i4l/isdn_bsdcomp.c b/drivers/isdn/i4l/isdn_bsdcomp.c
deleted file mode 100644
index 7f28b967ed19..000000000000
--- a/drivers/isdn/i4l/isdn_bsdcomp.c
+++ /dev/null
@@ -1,930 +0,0 @@
-/*
- * BSD compression module
- *
- * Patched version for ISDN syncPPP written 1997/1998 by Michael Hipp
- * The whole module is now SKB based.
- *
- */
-
-/*
- * Update: The Berkeley copyright was changed, and the change
- * is retroactive to all "true" BSD software (ie everything
- * from UCB as opposed to other peoples code that just carried
- * the same license). The new copyright doesn't clash with the
- * GPL, so the module-only restriction has been removed..
- */
-
-/*
- * Original copyright notice:
- *
- * Copyright (c) 1985, 1986 The Regents of the University of California.
- * All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * James A. Woods, derived from original work by Spencer Thomas
- * and Joseph Orost.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/tty.h>
-#include <linux/errno.h>
-#include <linux/string.h> /* used in new tty drivers */
-#include <linux/signal.h> /* used in new tty drivers */
-#include <linux/bitops.h>
-
-#include <asm/byteorder.h>
-#include <asm/types.h>
-
-#include <linux/if.h>
-
-#include <linux/if_ether.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/inet.h>
-#include <linux/ioctl.h>
-#include <linux/vmalloc.h>
-
-#include <linux/ppp_defs.h>
-
-#include <linux/isdn.h>
-#include <linux/isdn_ppp.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/if_arp.h>
-#include <linux/ppp-comp.h>
-
-#include "isdn_ppp.h"
-
-MODULE_DESCRIPTION("ISDN4Linux: BSD Compression for PPP over ISDN");
-MODULE_LICENSE("Dual BSD/GPL");
-
-#define BSD_VERSION(x) ((x) >> 5)
-#define BSD_NBITS(x) ((x) & 0x1F)
-
-#define BSD_CURRENT_VERSION 1
-
-#define DEBUG 1
-
-/*
- * A dictionary for doing BSD compress.
- */
-
-struct bsd_dict {
- u32 fcode;
- u16 codem1; /* output of hash table -1 */
- u16 cptr; /* map code to hash table entry */
-};
-
-struct bsd_db {
- int totlen; /* length of this structure */
- unsigned int hsize; /* size of the hash table */
- unsigned char hshift; /* used in hash function */
- unsigned char n_bits; /* current bits/code */
- unsigned char maxbits; /* maximum bits/code */
- unsigned char debug; /* non-zero if debug desired */
- unsigned char unit; /* ppp unit number */
- u16 seqno; /* sequence # of next packet */
- unsigned int mru; /* size of receive (decompress) bufr */
- unsigned int maxmaxcode; /* largest valid code */
- unsigned int max_ent; /* largest code in use */
- unsigned int in_count; /* uncompressed bytes, aged */
- unsigned int bytes_out; /* compressed bytes, aged */
- unsigned int ratio; /* recent compression ratio */
- unsigned int checkpoint; /* when to next check the ratio */
- unsigned int clear_count; /* times dictionary cleared */
- unsigned int incomp_count; /* incompressible packets */
- unsigned int incomp_bytes; /* incompressible bytes */
- unsigned int uncomp_count; /* uncompressed packets */
- unsigned int uncomp_bytes; /* uncompressed bytes */
- unsigned int comp_count; /* compressed packets */
- unsigned int comp_bytes; /* compressed bytes */
- unsigned short *lens; /* array of lengths of codes */
- struct bsd_dict *dict; /* dictionary */
- int xmit;
-};
-
-#define BSD_OVHD 2 /* BSD compress overhead/packet */
-#define MIN_BSD_BITS 9
-#define BSD_INIT_BITS MIN_BSD_BITS
-#define MAX_BSD_BITS 15
-
-/*
- * the next two codes should not be changed lightly, as they must not
- * lie within the contiguous general code space.
- */
-#define CLEAR 256 /* table clear output code */
-#define FIRST 257 /* first free entry */
-#define LAST 255
-
-#define MAXCODE(b) ((1 << (b)) - 1)
-#define BADCODEM1 MAXCODE(MAX_BSD_BITS)
-
-#define BSD_HASH(prefix, suffix, hshift) ((((unsigned long)(suffix)) << (hshift)) \
- ^ (unsigned long)(prefix))
-#define BSD_KEY(prefix, suffix) ((((unsigned long)(suffix)) << 16) \
- + (unsigned long)(prefix))
-
-#define CHECK_GAP 10000 /* Ratio check interval */
-
-#define RATIO_SCALE_LOG 8
-#define RATIO_SCALE (1 << RATIO_SCALE_LOG)
-#define RATIO_MAX (0x7fffffff >> RATIO_SCALE_LOG)
-
-/*
- * clear the dictionary
- */
-
-static void bsd_clear(struct bsd_db *db)
-{
- db->clear_count++;
- db->max_ent = FIRST - 1;
- db->n_bits = BSD_INIT_BITS;
- db->bytes_out = 0;
- db->in_count = 0;
- db->incomp_count = 0;
- db->ratio = 0;
- db->checkpoint = CHECK_GAP;
-}
-
-/*
- * If the dictionary is full, then see if it is time to reset it.
- *
- * Compute the compression ratio using fixed-point arithmetic
- * with 8 fractional bits.
- *
- * Since we have an infinite stream instead of a single file,
- * watch only the local compression ratio.
- *
- * Since both peers must reset the dictionary at the same time even in
- * the absence of CLEAR codes (while packets are incompressible), they
- * must compute the same ratio.
- */
-static int bsd_check(struct bsd_db *db) /* 1=output CLEAR */
-{
- unsigned int new_ratio;
-
- if (db->in_count >= db->checkpoint)
- {
- /* age the ratio by limiting the size of the counts */
- if (db->in_count >= RATIO_MAX || db->bytes_out >= RATIO_MAX)
- {
- db->in_count -= (db->in_count >> 2);
- db->bytes_out -= (db->bytes_out >> 2);
- }
-
- db->checkpoint = db->in_count + CHECK_GAP;
-
- if (db->max_ent >= db->maxmaxcode)
- {
- /* Reset the dictionary only if the ratio is worse,
- * or if it looks as if it has been poisoned
- * by incompressible data.
- *
- * This does not overflow, because
- * db->in_count <= RATIO_MAX.
- */
-
- new_ratio = db->in_count << RATIO_SCALE_LOG;
- if (db->bytes_out != 0)
- {
- new_ratio /= db->bytes_out;
- }
-
- if (new_ratio < db->ratio || new_ratio < 1 * RATIO_SCALE)
- {
- bsd_clear(db);
- return 1;
- }
- db->ratio = new_ratio;
- }
- }
- return 0;
-}
-
-/*
- * Return statistics.
- */
-
-static void bsd_stats(void *state, struct compstat *stats)
-{
- struct bsd_db *db = (struct bsd_db *) state;
-
- stats->unc_bytes = db->uncomp_bytes;
- stats->unc_packets = db->uncomp_count;
- stats->comp_bytes = db->comp_bytes;
- stats->comp_packets = db->comp_count;
- stats->inc_bytes = db->incomp_bytes;
- stats->inc_packets = db->incomp_count;
- stats->in_count = db->in_count;
- stats->bytes_out = db->bytes_out;
-}
-
-/*
- * Reset state, as on a CCP ResetReq.
- */
-static void bsd_reset(void *state, unsigned char code, unsigned char id,
- unsigned char *data, unsigned len,
- struct isdn_ppp_resetparams *rsparm)
-{
- struct bsd_db *db = (struct bsd_db *) state;
-
- bsd_clear(db);
- db->seqno = 0;
- db->clear_count = 0;
-}
-
-/*
- * Release the compression structure
- */
-static void bsd_free(void *state)
-{
- struct bsd_db *db = (struct bsd_db *) state;
-
- if (db) {
- /*
- * Release the dictionary
- */
- vfree(db->dict);
- db->dict = NULL;
-
- /*
- * Release the string buffer
- */
- vfree(db->lens);
- db->lens = NULL;
-
- /*
- * Finally release the structure itself.
- */
- kfree(db);
- }
-}
-
-
-/*
- * Allocate space for a (de) compressor.
- */
-static void *bsd_alloc(struct isdn_ppp_comp_data *data)
-{
- int bits;
- unsigned int hsize, hshift, maxmaxcode;
- struct bsd_db *db;
- int decomp;
-
- static unsigned int htab[][2] = {
- { 5003 , 4 } , { 5003 , 4 } , { 5003 , 4 } , { 5003 , 4 } ,
- { 9001 , 5 } , { 18013 , 6 } , { 35023 , 7 } , { 69001 , 8 }
- };
-
- if (data->optlen != 1 || data->num != CI_BSD_COMPRESS
- || BSD_VERSION(data->options[0]) != BSD_CURRENT_VERSION)
- return NULL;
-
- bits = BSD_NBITS(data->options[0]);
-
- if (bits < 9 || bits > 15)
- return NULL;
-
- hsize = htab[bits - 9][0];
- hshift = htab[bits - 9][1];
-
- /*
- * Allocate the main control structure for this instance.
- */
- maxmaxcode = MAXCODE(bits);
- db = kzalloc(sizeof(struct bsd_db), GFP_KERNEL);
- if (!db)
- return NULL;
-
- db->xmit = data->flags & IPPP_COMP_FLAG_XMIT;
- decomp = db->xmit ? 0 : 1;
-
- /*
- * Allocate space for the dictionary. This may be more than one page in
- * length.
- */
- db->dict = vmalloc(array_size(hsize, sizeof(struct bsd_dict)));
- if (!db->dict) {
- bsd_free(db);
- return NULL;
- }
-
- /*
- * If this is the compression buffer then there is no length data.
- * For decompression, the length information is needed as well.
- */
- if (!decomp)
- db->lens = NULL;
- else {
- db->lens = vmalloc(array_size(sizeof(db->lens[0]),
- maxmaxcode + 1));
- if (!db->lens) {
- bsd_free(db);
- return (NULL);
- }
- }
-
- /*
- * Initialize the data information for the compression code
- */
- db->totlen = sizeof(struct bsd_db) + (sizeof(struct bsd_dict) * hsize);
- db->hsize = hsize;
- db->hshift = hshift;
- db->maxmaxcode = maxmaxcode;
- db->maxbits = bits;
-
- return (void *)db;
-}
-
-/*
- * Initialize the database.
- */
-static int bsd_init(void *state, struct isdn_ppp_comp_data *data, int unit, int debug)
-{
- struct bsd_db *db = state;
- int indx;
- int decomp;
-
- if (!state || !data) {
- printk(KERN_ERR "isdn_bsd_init: [%d] ERR, state %lx data %lx\n", unit, (long)state, (long)data);
- return 0;
- }
-
- decomp = db->xmit ? 0 : 1;
-
- if (data->optlen != 1 || data->num != CI_BSD_COMPRESS
- || (BSD_VERSION(data->options[0]) != BSD_CURRENT_VERSION)
- || (BSD_NBITS(data->options[0]) != db->maxbits)
- || (decomp && db->lens == NULL)) {
- printk(KERN_ERR "isdn_bsd: %d %d %d %d %lx\n", data->optlen, data->num, data->options[0], decomp, (unsigned long)db->lens);
- return 0;
- }
-
- if (decomp)
- for (indx = LAST; indx >= 0; indx--)
- db->lens[indx] = 1;
-
- indx = db->hsize;
- while (indx-- != 0) {
- db->dict[indx].codem1 = BADCODEM1;
- db->dict[indx].cptr = 0;
- }
-
- db->unit = unit;
- db->mru = 0;
-
- db->debug = 1;
-
- bsd_reset(db, 0, 0, NULL, 0, NULL);
-
- return 1;
-}
-
-/*
- * Obtain pointers to the various structures in the compression tables
- */
-
-#define dict_ptrx(p, idx) &(p->dict[idx])
-#define lens_ptrx(p, idx) &(p->lens[idx])
-
-#ifdef DEBUG
-static unsigned short *lens_ptr(struct bsd_db *db, int idx)
-{
- if ((unsigned int) idx > (unsigned int) db->maxmaxcode) {
- printk(KERN_DEBUG "<9>ppp: lens_ptr(%d) > max\n", idx);
- idx = 0;
- }
- return lens_ptrx(db, idx);
-}
-
-static struct bsd_dict *dict_ptr(struct bsd_db *db, int idx)
-{
- if ((unsigned int) idx >= (unsigned int) db->hsize) {
- printk(KERN_DEBUG "<9>ppp: dict_ptr(%d) > max\n", idx);
- idx = 0;
- }
- return dict_ptrx(db, idx);
-}
-
-#else
-#define lens_ptr(db, idx) lens_ptrx(db, idx)
-#define dict_ptr(db, idx) dict_ptrx(db, idx)
-#endif
-
-/*
- * compress a packet
- */
-static int bsd_compress(void *state, struct sk_buff *skb_in, struct sk_buff *skb_out, int proto)
-{
- struct bsd_db *db;
- int hshift;
- unsigned int max_ent;
- unsigned int n_bits;
- unsigned int bitno;
- unsigned long accm;
- int ent;
- unsigned long fcode;
- struct bsd_dict *dictp;
- unsigned char c;
- int hval, disp, ilen, mxcode;
- unsigned char *rptr = skb_in->data;
- int isize = skb_in->len;
-
-#define OUTPUT(ent) \
- { \
- bitno -= n_bits; \
- accm |= ((ent) << bitno); \
- do { \
- if (skb_out && skb_tailroom(skb_out) > 0) \
- skb_put_u8(skb_out, (u8)(accm >> 24)); \
- accm <<= 8; \
- bitno += 8; \
- } while (bitno <= 24); \
- }
-
- /*
- * If the protocol is not in the range we're interested in,
- * just return without compressing the packet. If it is,
- * the protocol becomes the first byte to compress.
- */
- printk(KERN_DEBUG "bsd_compress called with %x\n", proto);
-
- ent = proto;
- if (proto < 0x21 || proto > 0xf9 || !(proto & 0x1))
- return 0;
-
- db = (struct bsd_db *) state;
- hshift = db->hshift;
- max_ent = db->max_ent;
- n_bits = db->n_bits;
- bitno = 32;
- accm = 0;
- mxcode = MAXCODE(n_bits);
-
- /* This is the PPP header information */
- if (skb_out && skb_tailroom(skb_out) >= 2) {
- char *v = skb_put(skb_out, 2);
- /* we only push our own data on the header,
- AC,PC and protos is pushed by caller */
- v[0] = db->seqno >> 8;
- v[1] = db->seqno;
- }
-
- ilen = ++isize; /* This is off by one, but that is what is in draft! */
-
- while (--ilen > 0) {
- c = *rptr++;
- fcode = BSD_KEY(ent, c);
- hval = BSD_HASH(ent, c, hshift);
- dictp = dict_ptr(db, hval);
-
- /* Validate and then check the entry. */
- if (dictp->codem1 >= max_ent)
- goto nomatch;
-
- if (dictp->fcode == fcode) {
- ent = dictp->codem1 + 1;
- continue; /* found (prefix,suffix) */
- }
-
- /* continue probing until a match or invalid entry */
- disp = (hval == 0) ? 1 : hval;
-
- do {
- hval += disp;
- if (hval >= db->hsize)
- hval -= db->hsize;
- dictp = dict_ptr(db, hval);
- if (dictp->codem1 >= max_ent)
- goto nomatch;
- } while (dictp->fcode != fcode);
-
- ent = dictp->codem1 + 1; /* finally found (prefix,suffix) */
- continue;
-
- nomatch:
- OUTPUT(ent); /* output the prefix */
-
- /* code -> hashtable */
- if (max_ent < db->maxmaxcode) {
- struct bsd_dict *dictp2;
- struct bsd_dict *dictp3;
- int indx;
-
- /* expand code size if needed */
- if (max_ent >= mxcode) {
- db->n_bits = ++n_bits;
- mxcode = MAXCODE(n_bits);
- }
-
- /*
- * Invalidate old hash table entry using
- * this code, and then take it over.
- */
- dictp2 = dict_ptr(db, max_ent + 1);
- indx = dictp2->cptr;
- dictp3 = dict_ptr(db, indx);
-
- if (dictp3->codem1 == max_ent)
- dictp3->codem1 = BADCODEM1;
-
- dictp2->cptr = hval;
- dictp->codem1 = max_ent;
- dictp->fcode = fcode;
- db->max_ent = ++max_ent;
-
- if (db->lens) {
- unsigned short *len1 = lens_ptr(db, max_ent);
- unsigned short *len2 = lens_ptr(db, ent);
- *len1 = *len2 + 1;
- }
- }
- ent = c;
- }
-
- OUTPUT(ent); /* output the last code */
-
- if (skb_out)
- db->bytes_out += skb_out->len; /* Do not count bytes from here */
- db->uncomp_bytes += isize;
- db->in_count += isize;
- ++db->uncomp_count;
- ++db->seqno;
-
- if (bitno < 32)
- ++db->bytes_out; /* must be set before calling bsd_check */
-
- /*
- * Generate the clear command if needed
- */
-
- if (bsd_check(db))
- OUTPUT(CLEAR);
-
- /*
- * Pad dribble bits of last code with ones.
- * Do not emit a completely useless byte of ones.
- */
- if (bitno < 32 && skb_out && skb_tailroom(skb_out) > 0)
- skb_put_u8(skb_out,
- (unsigned char)((accm | (0xff << (bitno - 8))) >> 24));
-
- /*
- * Increase code size if we would have without the packet
- * boundary because the decompressor will do so.
- */
- if (max_ent >= mxcode && max_ent < db->maxmaxcode)
- db->n_bits++;
-
- /* If output length is too large then this is an incompressible frame. */
- if (!skb_out || skb_out->len >= skb_in->len) {
- ++db->incomp_count;
- db->incomp_bytes += isize;
- return 0;
- }
-
- /* Count the number of compressed frames */
- ++db->comp_count;
- db->comp_bytes += skb_out->len;
- return skb_out->len;
-
-#undef OUTPUT
-}
-
-/*
- * Update the "BSD Compress" dictionary on the receiver for
- * incompressible data by pretending to compress the incoming data.
- */
-static void bsd_incomp(void *state, struct sk_buff *skb_in, int proto)
-{
- bsd_compress(state, skb_in, NULL, proto);
-}
-
-/*
- * Decompress "BSD Compress".
- */
-static int bsd_decompress(void *state, struct sk_buff *skb_in, struct sk_buff *skb_out,
- struct isdn_ppp_resetparams *rsparm)
-{
- struct bsd_db *db;
- unsigned int max_ent;
- unsigned long accm;
- unsigned int bitno; /* 1st valid bit in accm */
- unsigned int n_bits;
- unsigned int tgtbitno; /* bitno when we have a code */
- struct bsd_dict *dictp;
- int seq;
- unsigned int incode;
- unsigned int oldcode;
- unsigned int finchar;
- unsigned char *p, *ibuf;
- int ilen;
- int codelen;
- int extra;
-
- db = (struct bsd_db *) state;
- max_ent = db->max_ent;
- accm = 0;
- bitno = 32; /* 1st valid bit in accm */
- n_bits = db->n_bits;
- tgtbitno = 32 - n_bits; /* bitno when we have a code */
-
- printk(KERN_DEBUG "bsd_decompress called\n");
-
- if (!skb_in || !skb_out) {
- printk(KERN_ERR "bsd_decompress called with NULL parameter\n");
- return DECOMP_ERROR;
- }
-
- /*
- * Get the sequence number.
- */
- if ((p = skb_pull(skb_in, 2)) == NULL) {
- return DECOMP_ERROR;
- }
- p -= 2;
- seq = (p[0] << 8) + p[1];
- ilen = skb_in->len;
- ibuf = skb_in->data;
-
- /*
- * Check the sequence number and give up if it differs from
- * the value we're expecting.
- */
- if (seq != db->seqno) {
- if (db->debug) {
- printk(KERN_DEBUG "bsd_decomp%d: bad sequence # %d, expected %d\n",
- db->unit, seq, db->seqno - 1);
- }
- return DECOMP_ERROR;
- }
-
- ++db->seqno;
- db->bytes_out += ilen;
-
- if (skb_tailroom(skb_out) > 0)
- skb_put_u8(skb_out, 0);
- else
- return DECOMP_ERR_NOMEM;
-
- oldcode = CLEAR;
-
- /*
- * Keep the checkpoint correctly so that incompressible packets
- * clear the dictionary at the proper times.
- */
-
- for (;;) {
- if (ilen-- <= 0) {
- db->in_count += (skb_out->len - 1); /* don't count the header */
- break;
- }
-
- /*
- * Accumulate bytes until we have a complete code.
- * Then get the next code, relying on the 32-bit,
- * unsigned accm to mask the result.
- */
-
- bitno -= 8;
- accm |= *ibuf++ << bitno;
- if (tgtbitno < bitno)
- continue;
-
- incode = accm >> tgtbitno;
- accm <<= n_bits;
- bitno += n_bits;
-
- /*
- * The dictionary must only be cleared at the end of a packet.
- */
-
- if (incode == CLEAR) {
- if (ilen > 0) {
- if (db->debug)
- printk(KERN_DEBUG "bsd_decomp%d: bad CLEAR\n", db->unit);
- return DECOMP_FATALERROR; /* probably a bug */
- }
- bsd_clear(db);
- break;
- }
-
- if ((incode > max_ent + 2) || (incode > db->maxmaxcode)
- || (incode > max_ent && oldcode == CLEAR)) {
- if (db->debug) {
- printk(KERN_DEBUG "bsd_decomp%d: bad code 0x%x oldcode=0x%x ",
- db->unit, incode, oldcode);
- printk(KERN_DEBUG "max_ent=0x%x skb->Len=%d seqno=%d\n",
- max_ent, skb_out->len, db->seqno);
- }
- return DECOMP_FATALERROR; /* probably a bug */
- }
-
- /* Special case for KwKwK string. */
- if (incode > max_ent) {
- finchar = oldcode;
- extra = 1;
- } else {
- finchar = incode;
- extra = 0;
- }
-
- codelen = *(lens_ptr(db, finchar));
- if (skb_tailroom(skb_out) < codelen + extra) {
- if (db->debug) {
- printk(KERN_DEBUG "bsd_decomp%d: ran out of mru\n", db->unit);
-#ifdef DEBUG
- printk(KERN_DEBUG " len=%d, finchar=0x%x, codelen=%d,skblen=%d\n",
- ilen, finchar, codelen, skb_out->len);
-#endif
- }
- return DECOMP_FATALERROR;
- }
-
- /*
- * Decode this code and install it in the decompressed buffer.
- */
-
- p = skb_put(skb_out, codelen);
- p += codelen;
- while (finchar > LAST) {
- struct bsd_dict *dictp2 = dict_ptr(db, finchar);
-
- dictp = dict_ptr(db, dictp2->cptr);
-
-#ifdef DEBUG
- if (--codelen <= 0 || dictp->codem1 != finchar - 1) {
- if (codelen <= 0) {
- printk(KERN_ERR "bsd_decomp%d: fell off end of chain ", db->unit);
- printk(KERN_ERR "0x%x at 0x%x by 0x%x, max_ent=0x%x\n", incode, finchar, dictp2->cptr, max_ent);
- } else {
- if (dictp->codem1 != finchar - 1) {
- printk(KERN_ERR "bsd_decomp%d: bad code chain 0x%x finchar=0x%x ", db->unit, incode, finchar);
- printk(KERN_ERR "oldcode=0x%x cptr=0x%x codem1=0x%x\n", oldcode, dictp2->cptr, dictp->codem1);
- }
- }
- return DECOMP_FATALERROR;
- }
-#endif
-
- {
- u32 fcode = dictp->fcode;
- *--p = (fcode >> 16) & 0xff;
- finchar = fcode & 0xffff;
- }
- }
- *--p = finchar;
-
-#ifdef DEBUG
- if (--codelen != 0)
- printk(KERN_ERR "bsd_decomp%d: short by %d after code 0x%x, max_ent=0x%x\n", db->unit, codelen, incode, max_ent);
-#endif
-
- if (extra) /* the KwKwK case again */
- skb_put_u8(skb_out, finchar);
-
- /*
- * If not first code in a packet, and
- * if not out of code space, then allocate a new code.
- *
- * Keep the hash table correct so it can be used
- * with uncompressed packets.
- */
- if (oldcode != CLEAR && max_ent < db->maxmaxcode) {
- struct bsd_dict *dictp2, *dictp3;
- u16 *lens1, *lens2;
- unsigned long fcode;
- int hval, disp, indx;
-
- fcode = BSD_KEY(oldcode, finchar);
- hval = BSD_HASH(oldcode, finchar, db->hshift);
- dictp = dict_ptr(db, hval);
-
- /* look for a free hash table entry */
- if (dictp->codem1 < max_ent) {
- disp = (hval == 0) ? 1 : hval;
- do {
- hval += disp;
- if (hval >= db->hsize)
- hval -= db->hsize;
- dictp = dict_ptr(db, hval);
- } while (dictp->codem1 < max_ent);
- }
-
- /*
- * Invalidate previous hash table entry
- * assigned this code, and then take it over
- */
-
- dictp2 = dict_ptr(db, max_ent + 1);
- indx = dictp2->cptr;
- dictp3 = dict_ptr(db, indx);
-
- if (dictp3->codem1 == max_ent)
- dictp3->codem1 = BADCODEM1;
-
- dictp2->cptr = hval;
- dictp->codem1 = max_ent;
- dictp->fcode = fcode;
- db->max_ent = ++max_ent;
-
- /* Update the length of this string. */
- lens1 = lens_ptr(db, max_ent);
- lens2 = lens_ptr(db, oldcode);
- *lens1 = *lens2 + 1;
-
- /* Expand code size if needed. */
- if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode) {
- db->n_bits = ++n_bits;
- tgtbitno = 32-n_bits;
- }
- }
- oldcode = incode;
- }
-
- ++db->comp_count;
- ++db->uncomp_count;
- db->comp_bytes += skb_in->len - BSD_OVHD;
- db->uncomp_bytes += skb_out->len;
-
- if (bsd_check(db)) {
- if (db->debug)
- printk(KERN_DEBUG "bsd_decomp%d: peer should have cleared dictionary on %d\n",
- db->unit, db->seqno - 1);
- }
- return skb_out->len;
-}
-
-/*************************************************************
- * Table of addresses for the BSD compression module
- *************************************************************/
-
-static struct isdn_ppp_compressor ippp_bsd_compress = {
- .owner = THIS_MODULE,
- .num = CI_BSD_COMPRESS,
- .alloc = bsd_alloc,
- .free = bsd_free,
- .init = bsd_init,
- .reset = bsd_reset,
- .compress = bsd_compress,
- .decompress = bsd_decompress,
- .incomp = bsd_incomp,
- .stat = bsd_stats,
-};
-
-/*************************************************************
- * Module support routines
- *************************************************************/
-
-static int __init isdn_bsdcomp_init(void)
-{
- int answer = isdn_ppp_register_compressor(&ippp_bsd_compress);
- if (answer == 0)
- printk(KERN_INFO "PPP BSD Compression module registered\n");
- return answer;
-}
-
-static void __exit isdn_bsdcomp_exit(void)
-{
- isdn_ppp_unregister_compressor(&ippp_bsd_compress);
-}
-
-module_init(isdn_bsdcomp_init);
-module_exit(isdn_bsdcomp_exit);
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
deleted file mode 100644
index 74ee00f5b310..000000000000
--- a/drivers/isdn/i4l/isdn_common.c
+++ /dev/null
@@ -1,2368 +0,0 @@
-/* $Id: isdn_common.c,v 1.1.2.3 2004/02/10 01:07:13 keil Exp $
- *
- * Linux ISDN subsystem, common used functions (linklevel).
- *
- * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg
- * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/isdn.h>
-#include <linux/mutex.h>
-#include "isdn_common.h"
-#include "isdn_tty.h"
-#include "isdn_net.h"
-#include "isdn_ppp.h"
-#ifdef CONFIG_ISDN_AUDIO
-#include "isdn_audio.h"
-#endif
-#ifdef CONFIG_ISDN_DIVERSION_MODULE
-#define CONFIG_ISDN_DIVERSION
-#endif
-#ifdef CONFIG_ISDN_DIVERSION
-#include <linux/isdn_divertif.h>
-#endif /* CONFIG_ISDN_DIVERSION */
-#include "isdn_v110.h"
-
-/* Debugflags */
-#undef ISDN_DEBUG_STATCALLB
-
-MODULE_DESCRIPTION("ISDN4Linux: link layer");
-MODULE_AUTHOR("Fritz Elfert");
-MODULE_LICENSE("GPL");
-
-isdn_dev *dev;
-
-static DEFINE_MUTEX(isdn_mutex);
-static char *isdn_revision = "$Revision: 1.1.2.3 $";
-
-extern char *isdn_net_revision;
-#ifdef CONFIG_ISDN_PPP
-extern char *isdn_ppp_revision;
-#else
-static char *isdn_ppp_revision = ": none $";
-#endif
-#ifdef CONFIG_ISDN_AUDIO
-extern char *isdn_audio_revision;
-#else
-static char *isdn_audio_revision = ": none $";
-#endif
-extern char *isdn_v110_revision;
-
-#ifdef CONFIG_ISDN_DIVERSION
-static isdn_divert_if *divert_if; /* = NULL */
-#endif /* CONFIG_ISDN_DIVERSION */
-
-
-static int isdn_writebuf_stub(int, int, const u_char __user *, int);
-static void set_global_features(void);
-static int isdn_wildmat(char *s, char *p);
-static int isdn_add_channels(isdn_driver_t *d, int drvidx, int n, int adding);
-
-static inline void
-isdn_lock_driver(isdn_driver_t *drv)
-{
- try_module_get(drv->interface->owner);
- drv->locks++;
-}
-
-void
-isdn_lock_drivers(void)
-{
- int i;
-
- for (i = 0; i < ISDN_MAX_DRIVERS; i++) {
- if (!dev->drv[i])
- continue;
- isdn_lock_driver(dev->drv[i]);
- }
-}
-
-static inline void
-isdn_unlock_driver(isdn_driver_t *drv)
-{
- if (drv->locks > 0) {
- drv->locks--;
- module_put(drv->interface->owner);
- }
-}
-
-void
-isdn_unlock_drivers(void)
-{
- int i;
-
- for (i = 0; i < ISDN_MAX_DRIVERS; i++) {
- if (!dev->drv[i])
- continue;
- isdn_unlock_driver(dev->drv[i]);
- }
-}
-
-#if defined(ISDN_DEBUG_NET_DUMP) || defined(ISDN_DEBUG_MODEM_DUMP)
-void
-isdn_dumppkt(char *s, u_char *p, int len, int dumplen)
-{
- int dumpc;
-
- printk(KERN_DEBUG "%s(%d) ", s, len);
- for (dumpc = 0; (dumpc < dumplen) && (len); len--, dumpc++)
- printk(" %02x", *p++);
- printk("\n");
-}
-#endif
-
-/*
- * I picked the pattern-matching-functions from an old GNU-tar version (1.10)
- * It was originally written and put to PD by rs@mirror.TMC.COM (Rich Salz)
- */
-static int
-isdn_star(char *s, char *p)
-{
- while (isdn_wildmat(s, p)) {
- if (*++s == '\0')
- return (2);
- }
- return (0);
-}
-
-/*
- * Shell-type Pattern-matching for incoming caller-Ids
- * This function gets a string in s and checks, if it matches the pattern
- * given in p.
- *
- * Return:
- * 0 = match.
- * 1 = no match.
- * 2 = no match. Would eventually match, if s would be longer.
- *
- * Possible Patterns:
- *
- * '?' matches one character
- * '*' matches zero or more characters
- * [xyz] matches the set of characters in brackets.
- * [^xyz] matches any single character not in the set of characters
- */
-
-static int
-isdn_wildmat(char *s, char *p)
-{
- register int last;
- register int matched;
- register int reverse;
- register int nostar = 1;
-
- if (!(*s) && !(*p))
- return (1);
- for (; *p; s++, p++)
- switch (*p) {
- case '\\':
- /* Literal match with following character. */
- p++;
- /* fall through */
- default:
- if (*s != *p)
- return (*s == '\0') ? 2 : 1;
- continue;
- case '?':
- /* Match anything. */
- if (*s == '\0')
- return (2);
- continue;
- case '*':
- nostar = 0;
- /* Trailing star matches everything. */
- return (*++p ? isdn_star(s, p) : 0);
- case '[':
- /* [^....] means inverse character class. */
- if ((reverse = (p[1] == '^')))
- p++;
- for (last = 0, matched = 0; *++p && (*p != ']'); last = *p)
- /* This next line requires a good C compiler. */
- if (*p == '-' ? *s <= *++p && *s >= last : *s == *p)
- matched = 1;
- if (matched == reverse)
- return (1);
- continue;
- }
- return (*s == '\0') ? 0 : nostar;
-}
-
-int isdn_msncmp(const char *msn1, const char *msn2)
-{
- char TmpMsn1[ISDN_MSNLEN];
- char TmpMsn2[ISDN_MSNLEN];
- char *p;
-
- for (p = TmpMsn1; *msn1 && *msn1 != ':';) // Strip off a SPID
- *p++ = *msn1++;
- *p = '\0';
-
- for (p = TmpMsn2; *msn2 && *msn2 != ':';) // Strip off a SPID
- *p++ = *msn2++;
- *p = '\0';
-
- return isdn_wildmat(TmpMsn1, TmpMsn2);
-}
-
-int
-isdn_dc2minor(int di, int ch)
-{
- int i;
- for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- if (dev->chanmap[i] == ch && dev->drvmap[i] == di)
- return i;
- return -1;
-}
-
-static int isdn_timer_cnt1 = 0;
-static int isdn_timer_cnt2 = 0;
-static int isdn_timer_cnt3 = 0;
-
-static void
-isdn_timer_funct(struct timer_list *unused)
-{
- int tf = dev->tflags;
- if (tf & ISDN_TIMER_FAST) {
- if (tf & ISDN_TIMER_MODEMREAD)
- isdn_tty_readmodem();
- if (tf & ISDN_TIMER_MODEMPLUS)
- isdn_tty_modem_escape();
- if (tf & ISDN_TIMER_MODEMXMIT)
- isdn_tty_modem_xmit();
- }
- if (tf & ISDN_TIMER_SLOW) {
- if (++isdn_timer_cnt1 >= ISDN_TIMER_02SEC) {
- isdn_timer_cnt1 = 0;
- if (tf & ISDN_TIMER_NETDIAL)
- isdn_net_dial();
- }
- if (++isdn_timer_cnt2 >= ISDN_TIMER_1SEC) {
- isdn_timer_cnt2 = 0;
- if (tf & ISDN_TIMER_NETHANGUP)
- isdn_net_autohup();
- if (++isdn_timer_cnt3 >= ISDN_TIMER_RINGING) {
- isdn_timer_cnt3 = 0;
- if (tf & ISDN_TIMER_MODEMRING)
- isdn_tty_modem_ring();
- }
- if (tf & ISDN_TIMER_CARRIER)
- isdn_tty_carrier_timeout();
- }
- }
- if (tf)
- mod_timer(&dev->timer, jiffies + ISDN_TIMER_RES);
-}
-
-void
-isdn_timer_ctrl(int tf, int onoff)
-{
- unsigned long flags;
- int old_tflags;
-
- spin_lock_irqsave(&dev->timerlock, flags);
- if ((tf & ISDN_TIMER_SLOW) && (!(dev->tflags & ISDN_TIMER_SLOW))) {
- /* If the slow-timer wasn't activated until now */
- isdn_timer_cnt1 = 0;
- isdn_timer_cnt2 = 0;
- }
- old_tflags = dev->tflags;
- if (onoff)
- dev->tflags |= tf;
- else
- dev->tflags &= ~tf;
- if (dev->tflags && !old_tflags)
- mod_timer(&dev->timer, jiffies + ISDN_TIMER_RES);
- spin_unlock_irqrestore(&dev->timerlock, flags);
-}
-
-/*
- * Receive a packet from B-Channel. (Called from low-level-module)
- */
-static void
-isdn_receive_skb_callback(int di, int channel, struct sk_buff *skb)
-{
- int i;
-
- if ((i = isdn_dc2minor(di, channel)) == -1) {
- dev_kfree_skb(skb);
- return;
- }
- /* Update statistics */
- dev->ibytes[i] += skb->len;
-
- /* First, try to deliver data to network-device */
- if (isdn_net_rcv_skb(i, skb))
- return;
-
- /* V.110 handling
- * makes sense for async streams only, so it is
- * called after possible net-device delivery.
- */
- if (dev->v110[i]) {
- atomic_inc(&dev->v110use[i]);
- skb = isdn_v110_decode(dev->v110[i], skb);
- atomic_dec(&dev->v110use[i]);
- if (!skb)
- return;
- }
-
- /* No network-device found, deliver to tty or raw-channel */
- if (skb->len) {
- if (isdn_tty_rcv_skb(i, di, channel, skb))
- return;
- wake_up_interruptible(&dev->drv[di]->rcv_waitq[channel]);
- } else
- dev_kfree_skb(skb);
-}
-
-/*
- * Intercept command from Linklevel to Lowlevel.
- * If layer 2 protocol is V.110 and this is not supported by current
- * lowlevel-driver, use driver's transparent mode and handle V.110 in
- * linklevel instead.
- */
-int
-isdn_command(isdn_ctrl *cmd)
-{
- if (cmd->driver == -1) {
- printk(KERN_WARNING "isdn_command command(%x) driver -1\n", cmd->command);
- return (1);
- }
- if (!dev->drv[cmd->driver]) {
- printk(KERN_WARNING "isdn_command command(%x) dev->drv[%d] NULL\n",
- cmd->command, cmd->driver);
- return (1);
- }
- if (!dev->drv[cmd->driver]->interface) {
- printk(KERN_WARNING "isdn_command command(%x) dev->drv[%d]->interface NULL\n",
- cmd->command, cmd->driver);
- return (1);
- }
- if (cmd->command == ISDN_CMD_SETL2) {
- int idx = isdn_dc2minor(cmd->driver, cmd->arg & 255);
- unsigned long l2prot = (cmd->arg >> 8) & 255;
- unsigned long features = (dev->drv[cmd->driver]->interface->features
- >> ISDN_FEATURE_L2_SHIFT) &
- ISDN_FEATURE_L2_MASK;
- unsigned long l2_feature = (1 << l2prot);
-
- switch (l2prot) {
- case ISDN_PROTO_L2_V11096:
- case ISDN_PROTO_L2_V11019:
- case ISDN_PROTO_L2_V11038:
- /* If V.110 requested, but not supported by
- * HL-driver, set emulator-flag and change
- * Layer-2 to transparent
- */
- if (!(features & l2_feature)) {
- dev->v110emu[idx] = l2prot;
- cmd->arg = (cmd->arg & 255) |
- (ISDN_PROTO_L2_TRANS << 8);
- } else
- dev->v110emu[idx] = 0;
- }
- }
- return dev->drv[cmd->driver]->interface->command(cmd);
-}
-
-void
-isdn_all_eaz(int di, int ch)
-{
- isdn_ctrl cmd;
-
- if (di < 0)
- return;
- cmd.driver = di;
- cmd.arg = ch;
- cmd.command = ISDN_CMD_SETEAZ;
- cmd.parm.num[0] = '\0';
- isdn_command(&cmd);
-}
-
-/*
- * Begin of a CAPI like LL<->HL interface, currently used only for
- * supplementary service (CAPI 2.0 part III)
- */
-#include <linux/isdn/capicmd.h>
-
-static int
-isdn_capi_rec_hl_msg(capi_msg *cm)
-{
- switch (cm->Command) {
- case CAPI_FACILITY:
- /* in the moment only handled in tty */
- return (isdn_tty_capi_facility(cm));
- default:
- return (-1);
- }
-}
-
-static int
-isdn_status_callback(isdn_ctrl *c)
-{
- int di;
- u_long flags;
- int i;
- int r;
- int retval = 0;
- isdn_ctrl cmd;
- isdn_net_dev *p;
-
- di = c->driver;
- i = isdn_dc2minor(di, c->arg);
- switch (c->command) {
- case ISDN_STAT_BSENT:
- if (i < 0)
- return -1;
- if (dev->global_flags & ISDN_GLOBAL_STOPPED)
- return 0;
- if (isdn_net_stat_callback(i, c))
- return 0;
- if (isdn_v110_stat_callback(i, c))
- return 0;
- if (isdn_tty_stat_callback(i, c))
- return 0;
- wake_up_interruptible(&dev->drv[di]->snd_waitq[c->arg]);
- break;
- case ISDN_STAT_STAVAIL:
- dev->drv[di]->stavail += c->arg;
- wake_up_interruptible(&dev->drv[di]->st_waitq);
- break;
- case ISDN_STAT_RUN:
- dev->drv[di]->flags |= DRV_FLAG_RUNNING;
- for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- if (dev->drvmap[i] == di)
- isdn_all_eaz(di, dev->chanmap[i]);
- set_global_features();
- break;
- case ISDN_STAT_STOP:
- dev->drv[di]->flags &= ~DRV_FLAG_RUNNING;
- break;
- case ISDN_STAT_ICALL:
- if (i < 0)
- return -1;
-#ifdef ISDN_DEBUG_STATCALLB
- printk(KERN_DEBUG "ICALL (net): %d %ld %s\n", di, c->arg, c->parm.num);
-#endif
- if (dev->global_flags & ISDN_GLOBAL_STOPPED) {
- cmd.driver = di;
- cmd.arg = c->arg;
- cmd.command = ISDN_CMD_HANGUP;
- isdn_command(&cmd);
- return 0;
- }
- /* Try to find a network-interface which will accept incoming call */
- r = ((c->command == ISDN_STAT_ICALLW) ? 0 : isdn_net_find_icall(di, c->arg, i, &c->parm.setup));
- switch (r) {
- case 0:
- /* No network-device replies.
- * Try ttyI's.
- * These return 0 on no match, 1 on match and
- * 3 on eventually match, if CID is longer.
- */
- if (c->command == ISDN_STAT_ICALL)
- if ((retval = isdn_tty_find_icall(di, c->arg, &c->parm.setup))) return (retval);
-#ifdef CONFIG_ISDN_DIVERSION
- if (divert_if)
- if ((retval = divert_if->stat_callback(c)))
- return (retval); /* processed */
-#endif /* CONFIG_ISDN_DIVERSION */
- if ((!retval) && (dev->drv[di]->flags & DRV_FLAG_REJBUS)) {
- /* No tty responding */
- cmd.driver = di;
- cmd.arg = c->arg;
- cmd.command = ISDN_CMD_HANGUP;
- isdn_command(&cmd);
- retval = 2;
- }
- break;
- case 1:
- /* Schedule connection-setup */
- isdn_net_dial();
- cmd.driver = di;
- cmd.arg = c->arg;
- cmd.command = ISDN_CMD_ACCEPTD;
- for (p = dev->netdev; p; p = p->next)
- if (p->local->isdn_channel == cmd.arg)
- {
- strcpy(cmd.parm.setup.eazmsn, p->local->msn);
- isdn_command(&cmd);
- retval = 1;
- break;
- }
- break;
-
- case 2: /* For calling back, first reject incoming call ... */
- case 3: /* Interface found, but down, reject call actively */
- retval = 2;
- printk(KERN_INFO "isdn: Rejecting Call\n");
- cmd.driver = di;
- cmd.arg = c->arg;
- cmd.command = ISDN_CMD_HANGUP;
- isdn_command(&cmd);
- if (r == 3)
- break;
- /* Fall through */
- case 4:
- /* ... then start callback. */
- isdn_net_dial();
- break;
- case 5:
- /* Number would eventually match, if longer */
- retval = 3;
- break;
- }
-#ifdef ISDN_DEBUG_STATCALLB
- printk(KERN_DEBUG "ICALL: ret=%d\n", retval);
-#endif
- return retval;
- break;
- case ISDN_STAT_CINF:
- if (i < 0)
- return -1;
-#ifdef ISDN_DEBUG_STATCALLB
- printk(KERN_DEBUG "CINF: %ld %s\n", c->arg, c->parm.num);
-#endif
- if (dev->global_flags & ISDN_GLOBAL_STOPPED)
- return 0;
- if (strcmp(c->parm.num, "0"))
- isdn_net_stat_callback(i, c);
- isdn_tty_stat_callback(i, c);
- break;
- case ISDN_STAT_CAUSE:
-#ifdef ISDN_DEBUG_STATCALLB
- printk(KERN_DEBUG "CAUSE: %ld %s\n", c->arg, c->parm.num);
-#endif
- printk(KERN_INFO "isdn: %s,ch%ld cause: %s\n",
- dev->drvid[di], c->arg, c->parm.num);
- isdn_tty_stat_callback(i, c);
-#ifdef CONFIG_ISDN_DIVERSION
- if (divert_if)
- divert_if->stat_callback(c);
-#endif /* CONFIG_ISDN_DIVERSION */
- break;
- case ISDN_STAT_DISPLAY:
-#ifdef ISDN_DEBUG_STATCALLB
- printk(KERN_DEBUG "DISPLAY: %ld %s\n", c->arg, c->parm.display);
-#endif
- isdn_tty_stat_callback(i, c);
-#ifdef CONFIG_ISDN_DIVERSION
- if (divert_if)
- divert_if->stat_callback(c);
-#endif /* CONFIG_ISDN_DIVERSION */
- break;
- case ISDN_STAT_DCONN:
- if (i < 0)
- return -1;
-#ifdef ISDN_DEBUG_STATCALLB
- printk(KERN_DEBUG "DCONN: %ld\n", c->arg);
-#endif
- if (dev->global_flags & ISDN_GLOBAL_STOPPED)
- return 0;
- /* Find any net-device, waiting for D-channel setup */
- if (isdn_net_stat_callback(i, c))
- break;
- isdn_v110_stat_callback(i, c);
- /* Find any ttyI, waiting for D-channel setup */
- if (isdn_tty_stat_callback(i, c)) {
- cmd.driver = di;
- cmd.arg = c->arg;
- cmd.command = ISDN_CMD_ACCEPTB;
- isdn_command(&cmd);
- break;
- }
- break;
- case ISDN_STAT_DHUP:
- if (i < 0)
- return -1;
-#ifdef ISDN_DEBUG_STATCALLB
- printk(KERN_DEBUG "DHUP: %ld\n", c->arg);
-#endif
- if (dev->global_flags & ISDN_GLOBAL_STOPPED)
- return 0;
- dev->drv[di]->online &= ~(1 << (c->arg));
- isdn_info_update();
- /* Signal hangup to network-devices */
- if (isdn_net_stat_callback(i, c))
- break;
- isdn_v110_stat_callback(i, c);
- if (isdn_tty_stat_callback(i, c))
- break;
-#ifdef CONFIG_ISDN_DIVERSION
- if (divert_if)
- divert_if->stat_callback(c);
-#endif /* CONFIG_ISDN_DIVERSION */
- break;
- break;
- case ISDN_STAT_BCONN:
- if (i < 0)
- return -1;
-#ifdef ISDN_DEBUG_STATCALLB
- printk(KERN_DEBUG "BCONN: %ld\n", c->arg);
-#endif
- /* Signal B-channel-connect to network-devices */
- if (dev->global_flags & ISDN_GLOBAL_STOPPED)
- return 0;
- dev->drv[di]->online |= (1 << (c->arg));
- isdn_info_update();
- if (isdn_net_stat_callback(i, c))
- break;
- isdn_v110_stat_callback(i, c);
- if (isdn_tty_stat_callback(i, c))
- break;
- break;
- case ISDN_STAT_BHUP:
- if (i < 0)
- return -1;
-#ifdef ISDN_DEBUG_STATCALLB
- printk(KERN_DEBUG "BHUP: %ld\n", c->arg);
-#endif
- if (dev->global_flags & ISDN_GLOBAL_STOPPED)
- return 0;
- dev->drv[di]->online &= ~(1 << (c->arg));
- isdn_info_update();
-#ifdef CONFIG_ISDN_X25
- /* Signal hangup to network-devices */
- if (isdn_net_stat_callback(i, c))
- break;
-#endif
- isdn_v110_stat_callback(i, c);
- if (isdn_tty_stat_callback(i, c))
- break;
- break;
- case ISDN_STAT_NODCH:
- if (i < 0)
- return -1;
-#ifdef ISDN_DEBUG_STATCALLB
- printk(KERN_DEBUG "NODCH: %ld\n", c->arg);
-#endif
- if (dev->global_flags & ISDN_GLOBAL_STOPPED)
- return 0;
- if (isdn_net_stat_callback(i, c))
- break;
- if (isdn_tty_stat_callback(i, c))
- break;
- break;
- case ISDN_STAT_ADDCH:
- spin_lock_irqsave(&dev->lock, flags);
- if (isdn_add_channels(dev->drv[di], di, c->arg, 1)) {
- spin_unlock_irqrestore(&dev->lock, flags);
- return -1;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_info_update();
- break;
- case ISDN_STAT_DISCH:
- spin_lock_irqsave(&dev->lock, flags);
- for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- if ((dev->drvmap[i] == di) &&
- (dev->chanmap[i] == c->arg)) {
- if (c->parm.num[0])
- dev->usage[i] &= ~ISDN_USAGE_DISABLED;
- else
- if (USG_NONE(dev->usage[i])) {
- dev->usage[i] |= ISDN_USAGE_DISABLED;
- }
- else
- retval = -1;
- break;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_info_update();
- break;
- case ISDN_STAT_UNLOAD:
- while (dev->drv[di]->locks > 0) {
- isdn_unlock_driver(dev->drv[di]);
- }
- spin_lock_irqsave(&dev->lock, flags);
- isdn_tty_stat_callback(i, c);
- for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- if (dev->drvmap[i] == di) {
- dev->drvmap[i] = -1;
- dev->chanmap[i] = -1;
- dev->usage[i] &= ~ISDN_USAGE_DISABLED;
- }
- dev->drivers--;
- dev->channels -= dev->drv[di]->channels;
- kfree(dev->drv[di]->rcverr);
- kfree(dev->drv[di]->rcvcount);
- for (i = 0; i < dev->drv[di]->channels; i++)
- skb_queue_purge(&dev->drv[di]->rpqueue[i]);
- kfree(dev->drv[di]->rpqueue);
- kfree(dev->drv[di]->rcv_waitq);
- kfree(dev->drv[di]);
- dev->drv[di] = NULL;
- dev->drvid[di][0] = '\0';
- isdn_info_update();
- set_global_features();
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
- case ISDN_STAT_L1ERR:
- break;
- case CAPI_PUT_MESSAGE:
- return (isdn_capi_rec_hl_msg(&c->parm.cmsg));
-#ifdef CONFIG_ISDN_TTY_FAX
- case ISDN_STAT_FAXIND:
- isdn_tty_stat_callback(i, c);
- break;
-#endif
-#ifdef CONFIG_ISDN_AUDIO
- case ISDN_STAT_AUDIO:
- isdn_tty_stat_callback(i, c);
- break;
-#endif
-#ifdef CONFIG_ISDN_DIVERSION
- case ISDN_STAT_PROT:
- case ISDN_STAT_REDIR:
- if (divert_if)
- return (divert_if->stat_callback(c));
-#endif /* CONFIG_ISDN_DIVERSION */
- /* fall through */
- default:
- return -1;
- }
- return 0;
-}
-
-/*
- * Get integer from char-pointer, set pointer to end of number
- */
-int
-isdn_getnum(char **p)
-{
- int v = -1;
-
- while (*p[0] >= '0' && *p[0] <= '9')
- v = ((v < 0) ? 0 : (v * 10)) + (int) ((*p[0]++) - '0');
- return v;
-}
-
-#define DLE 0x10
-
-/*
- * isdn_readbchan() tries to get data from the read-queue.
- * It MUST be called with interrupts off.
- *
- * Be aware that this is not an atomic operation when sleep != 0, even though
- * interrupts are turned off! Well, like that we are currently only called
- * on behalf of a read system call on raw device files (which are documented
- * to be dangerous and for debugging purpose only). The inode semaphore
- * takes care that this is not called for the same minor device number while
- * we are sleeping, but access is not serialized against simultaneous read()
- * from the corresponding ttyI device. Can other ugly events, like changes
- * of the mapping (di,ch)<->minor, happen during the sleep? --he
- */
-int
-isdn_readbchan(int di, int channel, u_char *buf, u_char *fp, int len, wait_queue_head_t *sleep)
-{
- int count;
- int count_pull;
- int count_put;
- int dflag;
- struct sk_buff *skb;
- u_char *cp;
-
- if (!dev->drv[di])
- return 0;
- if (skb_queue_empty(&dev->drv[di]->rpqueue[channel])) {
- if (sleep)
- wait_event_interruptible(*sleep,
- !skb_queue_empty(&dev->drv[di]->rpqueue[channel]));
- else
- return 0;
- }
- if (len > dev->drv[di]->rcvcount[channel])
- len = dev->drv[di]->rcvcount[channel];
- cp = buf;
- count = 0;
- while (len) {
- if (!(skb = skb_peek(&dev->drv[di]->rpqueue[channel])))
- break;
-#ifdef CONFIG_ISDN_AUDIO
- if (ISDN_AUDIO_SKB_LOCK(skb))
- break;
- ISDN_AUDIO_SKB_LOCK(skb) = 1;
- if ((ISDN_AUDIO_SKB_DLECOUNT(skb)) || (dev->drv[di]->DLEflag & (1 << channel))) {
- char *p = skb->data;
- unsigned long DLEmask = (1 << channel);
-
- dflag = 0;
- count_pull = count_put = 0;
- while ((count_pull < skb->len) && (len > 0)) {
- len--;
- if (dev->drv[di]->DLEflag & DLEmask) {
- *cp++ = DLE;
- dev->drv[di]->DLEflag &= ~DLEmask;
- } else {
- *cp++ = *p;
- if (*p == DLE) {
- dev->drv[di]->DLEflag |= DLEmask;
- (ISDN_AUDIO_SKB_DLECOUNT(skb))--;
- }
- p++;
- count_pull++;
- }
- count_put++;
- }
- if (count_pull >= skb->len)
- dflag = 1;
- } else {
-#endif
- /* No DLE's in buff, so simply copy it */
- dflag = 1;
- if ((count_pull = skb->len) > len) {
- count_pull = len;
- dflag = 0;
- }
- count_put = count_pull;
- skb_copy_from_linear_data(skb, cp, count_put);
- cp += count_put;
- len -= count_put;
-#ifdef CONFIG_ISDN_AUDIO
- }
-#endif
- count += count_put;
- if (fp) {
- memset(fp, 0, count_put);
- fp += count_put;
- }
- if (dflag) {
- /* We got all the data in this buff.
- * Now we can dequeue it.
- */
- if (fp)
- *(fp - 1) = 0xff;
-#ifdef CONFIG_ISDN_AUDIO
- ISDN_AUDIO_SKB_LOCK(skb) = 0;
-#endif
- skb = skb_dequeue(&dev->drv[di]->rpqueue[channel]);
- dev_kfree_skb(skb);
- } else {
- /* Not yet emptied this buff, so it
- * must stay in the queue, for further calls
- * but we pull off the data we got until now.
- */
- skb_pull(skb, count_pull);
-#ifdef CONFIG_ISDN_AUDIO
- ISDN_AUDIO_SKB_LOCK(skb) = 0;
-#endif
- }
- dev->drv[di]->rcvcount[channel] -= count_put;
- }
- return count;
-}
-
-/*
- * isdn_readbchan_tty() tries to get data from the read-queue.
- * It MUST be called with interrupts off.
- *
- * Be aware that this is not an atomic operation when sleep != 0, even though
- * interrupts are turned off! Well, like that we are currently only called
- * on behalf of a read system call on raw device files (which are documented
- * to be dangerous and for debugging purpose only). The inode semaphore
- * takes care that this is not called for the same minor device number while
- * we are sleeping, but access is not serialized against simultaneous read()
- * from the corresponding ttyI device. Can other ugly events, like changes
- * of the mapping (di,ch)<->minor, happen during the sleep? --he
- */
-int
-isdn_readbchan_tty(int di, int channel, struct tty_port *port, int cisco_hack)
-{
- int count;
- int count_pull;
- int count_put;
- int dflag;
- struct sk_buff *skb;
- char last = 0;
- int len;
-
- if (!dev->drv[di])
- return 0;
- if (skb_queue_empty(&dev->drv[di]->rpqueue[channel]))
- return 0;
-
- len = tty_buffer_request_room(port, dev->drv[di]->rcvcount[channel]);
- if (len == 0)
- return len;
-
- count = 0;
- while (len) {
- if (!(skb = skb_peek(&dev->drv[di]->rpqueue[channel])))
- break;
-#ifdef CONFIG_ISDN_AUDIO
- if (ISDN_AUDIO_SKB_LOCK(skb))
- break;
- ISDN_AUDIO_SKB_LOCK(skb) = 1;
- if ((ISDN_AUDIO_SKB_DLECOUNT(skb)) || (dev->drv[di]->DLEflag & (1 << channel))) {
- char *p = skb->data;
- unsigned long DLEmask = (1 << channel);
-
- dflag = 0;
- count_pull = count_put = 0;
- while ((count_pull < skb->len) && (len > 0)) {
- /* push every character but the last to the tty buffer directly */
- if (count_put)
- tty_insert_flip_char(port, last, TTY_NORMAL);
- len--;
- if (dev->drv[di]->DLEflag & DLEmask) {
- last = DLE;
- dev->drv[di]->DLEflag &= ~DLEmask;
- } else {
- last = *p;
- if (last == DLE) {
- dev->drv[di]->DLEflag |= DLEmask;
- (ISDN_AUDIO_SKB_DLECOUNT(skb))--;
- }
- p++;
- count_pull++;
- }
- count_put++;
- }
- if (count_pull >= skb->len)
- dflag = 1;
- } else {
-#endif
- /* No DLE's in buff, so simply copy it */
- dflag = 1;
- if ((count_pull = skb->len) > len) {
- count_pull = len;
- dflag = 0;
- }
- count_put = count_pull;
- if (count_put > 1)
- tty_insert_flip_string(port, skb->data, count_put - 1);
- last = skb->data[count_put - 1];
- len -= count_put;
-#ifdef CONFIG_ISDN_AUDIO
- }
-#endif
- count += count_put;
- if (dflag) {
- /* We got all the data in this buff.
- * Now we can dequeue it.
- */
- if (cisco_hack)
- tty_insert_flip_char(port, last, 0xFF);
- else
- tty_insert_flip_char(port, last, TTY_NORMAL);
-#ifdef CONFIG_ISDN_AUDIO
- ISDN_AUDIO_SKB_LOCK(skb) = 0;
-#endif
- skb = skb_dequeue(&dev->drv[di]->rpqueue[channel]);
- dev_kfree_skb(skb);
- } else {
- tty_insert_flip_char(port, last, TTY_NORMAL);
- /* Not yet emptied this buff, so it
- * must stay in the queue, for further calls
- * but we pull off the data we got until now.
- */
- skb_pull(skb, count_pull);
-#ifdef CONFIG_ISDN_AUDIO
- ISDN_AUDIO_SKB_LOCK(skb) = 0;
-#endif
- }
- dev->drv[di]->rcvcount[channel] -= count_put;
- }
- return count;
-}
-
-
-static inline int
-isdn_minor2drv(int minor)
-{
- return (dev->drvmap[minor]);
-}
-
-static inline int
-isdn_minor2chan(int minor)
-{
- return (dev->chanmap[minor]);
-}
-
-static char *
-isdn_statstr(void)
-{
- static char istatbuf[2048];
- char *p;
- int i;
-
- sprintf(istatbuf, "idmap:\t");
- p = istatbuf + strlen(istatbuf);
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- sprintf(p, "%s ", (dev->drvmap[i] < 0) ? "-" : dev->drvid[dev->drvmap[i]]);
- p = istatbuf + strlen(istatbuf);
- }
- sprintf(p, "\nchmap:\t");
- p = istatbuf + strlen(istatbuf);
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- sprintf(p, "%d ", dev->chanmap[i]);
- p = istatbuf + strlen(istatbuf);
- }
- sprintf(p, "\ndrmap:\t");
- p = istatbuf + strlen(istatbuf);
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- sprintf(p, "%d ", dev->drvmap[i]);
- p = istatbuf + strlen(istatbuf);
- }
- sprintf(p, "\nusage:\t");
- p = istatbuf + strlen(istatbuf);
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- sprintf(p, "%d ", dev->usage[i]);
- p = istatbuf + strlen(istatbuf);
- }
- sprintf(p, "\nflags:\t");
- p = istatbuf + strlen(istatbuf);
- for (i = 0; i < ISDN_MAX_DRIVERS; i++) {
- if (dev->drv[i]) {
- sprintf(p, "%ld ", dev->drv[i]->online);
- p = istatbuf + strlen(istatbuf);
- } else {
- sprintf(p, "? ");
- p = istatbuf + strlen(istatbuf);
- }
- }
- sprintf(p, "\nphone:\t");
- p = istatbuf + strlen(istatbuf);
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- sprintf(p, "%s ", dev->num[i]);
- p = istatbuf + strlen(istatbuf);
- }
- sprintf(p, "\n");
- return istatbuf;
-}
-
-/* Module interface-code */
-
-void
-isdn_info_update(void)
-{
- infostruct *p = dev->infochain;
-
- while (p) {
- *(p->private) = 1;
- p = (infostruct *) p->next;
- }
- wake_up_interruptible(&(dev->info_waitq));
-}
-
-static ssize_t
-isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
-{
- uint minor = iminor(file_inode(file));
- int len = 0;
- int drvidx;
- int chidx;
- int retval;
- char *p;
-
- mutex_lock(&isdn_mutex);
- if (minor == ISDN_MINOR_STATUS) {
- if (!file->private_data) {
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- goto out;
- }
- wait_event_interruptible(dev->info_waitq,
- file->private_data);
- }
- p = isdn_statstr();
- file->private_data = NULL;
- if ((len = strlen(p)) <= count) {
- if (copy_to_user(buf, p, len)) {
- retval = -EFAULT;
- goto out;
- }
- *off += len;
- retval = len;
- goto out;
- }
- retval = 0;
- goto out;
- }
- if (!dev->drivers) {
- retval = -ENODEV;
- goto out;
- }
- if (minor <= ISDN_MINOR_BMAX) {
- printk(KERN_WARNING "isdn_read minor %d obsolete!\n", minor);
- drvidx = isdn_minor2drv(minor);
- if (drvidx < 0) {
- retval = -ENODEV;
- goto out;
- }
- if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) {
- retval = -ENODEV;
- goto out;
- }
- chidx = isdn_minor2chan(minor);
- if (!(p = kmalloc(count, GFP_KERNEL))) {
- retval = -ENOMEM;
- goto out;
- }
- len = isdn_readbchan(drvidx, chidx, p, NULL, count,
- &dev->drv[drvidx]->rcv_waitq[chidx]);
- *off += len;
- if (copy_to_user(buf, p, len))
- len = -EFAULT;
- kfree(p);
- retval = len;
- goto out;
- }
- if (minor <= ISDN_MINOR_CTRLMAX) {
- drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
- if (drvidx < 0) {
- retval = -ENODEV;
- goto out;
- }
- if (!dev->drv[drvidx]->stavail) {
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- goto out;
- }
- wait_event_interruptible(dev->drv[drvidx]->st_waitq,
- dev->drv[drvidx]->stavail);
- }
- if (dev->drv[drvidx]->interface->readstat) {
- if (count > dev->drv[drvidx]->stavail)
- count = dev->drv[drvidx]->stavail;
- len = dev->drv[drvidx]->interface->readstat(buf, count,
- drvidx, isdn_minor2chan(minor - ISDN_MINOR_CTRL));
- if (len < 0) {
- retval = len;
- goto out;
- }
- } else {
- len = 0;
- }
- if (len)
- dev->drv[drvidx]->stavail -= len;
- else
- dev->drv[drvidx]->stavail = 0;
- *off += len;
- retval = len;
- goto out;
- }
-#ifdef CONFIG_ISDN_PPP
- if (minor <= ISDN_MINOR_PPPMAX) {
- retval = isdn_ppp_read(minor - ISDN_MINOR_PPP, file, buf, count);
- goto out;
- }
-#endif
- retval = -ENODEV;
-out:
- mutex_unlock(&isdn_mutex);
- return retval;
-}
-
-static ssize_t
-isdn_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
-{
- uint minor = iminor(file_inode(file));
- int drvidx;
- int chidx;
- int retval;
-
- if (minor == ISDN_MINOR_STATUS)
- return -EPERM;
- if (!dev->drivers)
- return -ENODEV;
-
- mutex_lock(&isdn_mutex);
- if (minor <= ISDN_MINOR_BMAX) {
- printk(KERN_WARNING "isdn_write minor %d obsolete!\n", minor);
- drvidx = isdn_minor2drv(minor);
- if (drvidx < 0) {
- retval = -ENODEV;
- goto out;
- }
- if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) {
- retval = -ENODEV;
- goto out;
- }
- chidx = isdn_minor2chan(minor);
- wait_event_interruptible(dev->drv[drvidx]->snd_waitq[chidx],
- (retval = isdn_writebuf_stub(drvidx, chidx, buf, count)));
- goto out;
- }
- if (minor <= ISDN_MINOR_CTRLMAX) {
- drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
- if (drvidx < 0) {
- retval = -ENODEV;
- goto out;
- }
- /*
- * We want to use the isdnctrl device to load the firmware
- *
- if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING))
- return -ENODEV;
- */
- if (dev->drv[drvidx]->interface->writecmd)
- retval = dev->drv[drvidx]->interface->
- writecmd(buf, count, drvidx,
- isdn_minor2chan(minor - ISDN_MINOR_CTRL));
- else
- retval = count;
- goto out;
- }
-#ifdef CONFIG_ISDN_PPP
- if (minor <= ISDN_MINOR_PPPMAX) {
- retval = isdn_ppp_write(minor - ISDN_MINOR_PPP, file, buf, count);
- goto out;
- }
-#endif
- retval = -ENODEV;
-out:
- mutex_unlock(&isdn_mutex);
- return retval;
-}
-
-static __poll_t
-isdn_poll(struct file *file, poll_table *wait)
-{
- __poll_t mask = 0;
- unsigned int minor = iminor(file_inode(file));
- int drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
-
- mutex_lock(&isdn_mutex);
- if (minor == ISDN_MINOR_STATUS) {
- poll_wait(file, &(dev->info_waitq), wait);
- /* mask = EPOLLOUT | EPOLLWRNORM; */
- if (file->private_data) {
- mask |= EPOLLIN | EPOLLRDNORM;
- }
- goto out;
- }
- if (minor >= ISDN_MINOR_CTRL && minor <= ISDN_MINOR_CTRLMAX) {
- if (drvidx < 0) {
- /* driver deregistered while file open */
- mask = EPOLLHUP;
- goto out;
- }
- poll_wait(file, &(dev->drv[drvidx]->st_waitq), wait);
- mask = EPOLLOUT | EPOLLWRNORM;
- if (dev->drv[drvidx]->stavail) {
- mask |= EPOLLIN | EPOLLRDNORM;
- }
- goto out;
- }
-#ifdef CONFIG_ISDN_PPP
- if (minor <= ISDN_MINOR_PPPMAX) {
- mask = isdn_ppp_poll(file, wait);
- goto out;
- }
-#endif
- mask = EPOLLERR;
-out:
- mutex_unlock(&isdn_mutex);
- return mask;
-}
-
-
-static int
-isdn_ioctl(struct file *file, uint cmd, ulong arg)
-{
- uint minor = iminor(file_inode(file));
- isdn_ctrl c;
- int drvidx;
- int ret;
- int i;
- char __user *p;
- char *s;
- union iocpar {
- char name[10];
- char bname[22];
- isdn_ioctl_struct iocts;
- isdn_net_ioctl_phone phone;
- isdn_net_ioctl_cfg cfg;
- } iocpar;
- void __user *argp = (void __user *)arg;
-
-#define name iocpar.name
-#define bname iocpar.bname
-#define iocts iocpar.iocts
-#define phone iocpar.phone
-#define cfg iocpar.cfg
-
- if (minor == ISDN_MINOR_STATUS) {
- switch (cmd) {
- case IIOCGETDVR:
- return (TTY_DV +
- (NET_DV << 8) +
- (INF_DV << 16));
- case IIOCGETCPS:
- if (arg) {
- ulong __user *p = argp;
- int i;
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- put_user(dev->ibytes[i], p++);
- put_user(dev->obytes[i], p++);
- }
- return 0;
- } else
- return -EINVAL;
- break;
- case IIOCNETGPN:
- /* Get peer phone number of a connected
- * isdn network interface */
- if (arg) {
- if (copy_from_user(&phone, argp, sizeof(phone)))
- return -EFAULT;
- return isdn_net_getpeer(&phone, argp);
- } else
- return -EINVAL;
- default:
- return -EINVAL;
- }
- }
- if (!dev->drivers)
- return -ENODEV;
- if (minor <= ISDN_MINOR_BMAX) {
- drvidx = isdn_minor2drv(minor);
- if (drvidx < 0)
- return -ENODEV;
- if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING))
- return -ENODEV;
- return 0;
- }
- if (minor <= ISDN_MINOR_CTRLMAX) {
-/*
- * isdn net devices manage lots of configuration variables as linked lists.
- * Those lists must only be manipulated from user space. Some of the ioctl's
- * service routines access user space and are not atomic. Therefore, ioctl's
- * manipulating the lists and ioctl's sleeping while accessing the lists
- * are serialized by means of a semaphore.
- */
- switch (cmd) {
- case IIOCNETDWRSET:
- printk(KERN_INFO "INFO: ISDN_DW_ABC_EXTENSION not enabled\n");
- return (-EINVAL);
- case IIOCNETLCR:
- printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n");
- return -ENODEV;
- case IIOCNETAIF:
- /* Add a network-interface */
- if (arg) {
- if (copy_from_user(name, argp, sizeof(name)))
- return -EFAULT;
- s = name;
- } else {
- s = NULL;
- }
- ret = mutex_lock_interruptible(&dev->mtx);
- if (ret) return ret;
- if ((s = isdn_net_new(s, NULL))) {
- if (copy_to_user(argp, s, strlen(s) + 1)) {
- ret = -EFAULT;
- } else {
- ret = 0;
- }
- } else
- ret = -ENODEV;
- mutex_unlock(&dev->mtx);
- return ret;
- case IIOCNETASL:
- /* Add a slave to a network-interface */
- if (arg) {
- if (copy_from_user(bname, argp, sizeof(bname) - 1))
- return -EFAULT;
- bname[sizeof(bname)-1] = 0;
- } else
- return -EINVAL;
- ret = mutex_lock_interruptible(&dev->mtx);
- if (ret) return ret;
- if ((s = isdn_net_newslave(bname))) {
- if (copy_to_user(argp, s, strlen(s) + 1)) {
- ret = -EFAULT;
- } else {
- ret = 0;
- }
- } else
- ret = -ENODEV;
- mutex_unlock(&dev->mtx);
- return ret;
- case IIOCNETDIF:
- /* Delete a network-interface */
- if (arg) {
- if (copy_from_user(name, argp, sizeof(name)))
- return -EFAULT;
- ret = mutex_lock_interruptible(&dev->mtx);
- if (ret) return ret;
- ret = isdn_net_rm(name);
- mutex_unlock(&dev->mtx);
- return ret;
- } else
- return -EINVAL;
- case IIOCNETSCF:
- /* Set configurable parameters of a network-interface */
- if (arg) {
- if (copy_from_user(&cfg, argp, sizeof(cfg)))
- return -EFAULT;
- return isdn_net_setcfg(&cfg);
- } else
- return -EINVAL;
- case IIOCNETGCF:
- /* Get configurable parameters of a network-interface */
- if (arg) {
- if (copy_from_user(&cfg, argp, sizeof(cfg)))
- return -EFAULT;
- if (!(ret = isdn_net_getcfg(&cfg))) {
- if (copy_to_user(argp, &cfg, sizeof(cfg)))
- return -EFAULT;
- }
- return ret;
- } else
- return -EINVAL;
- case IIOCNETANM:
- /* Add a phone-number to a network-interface */
- if (arg) {
- if (copy_from_user(&phone, argp, sizeof(phone)))
- return -EFAULT;
- ret = mutex_lock_interruptible(&dev->mtx);
- if (ret) return ret;
- ret = isdn_net_addphone(&phone);
- mutex_unlock(&dev->mtx);
- return ret;
- } else
- return -EINVAL;
- case IIOCNETGNM:
- /* Get list of phone-numbers of a network-interface */
- if (arg) {
- if (copy_from_user(&phone, argp, sizeof(phone)))
- return -EFAULT;
- ret = mutex_lock_interruptible(&dev->mtx);
- if (ret) return ret;
- ret = isdn_net_getphones(&phone, argp);
- mutex_unlock(&dev->mtx);
- return ret;
- } else
- return -EINVAL;
- case IIOCNETDNM:
- /* Delete a phone-number of a network-interface */
- if (arg) {
- if (copy_from_user(&phone, argp, sizeof(phone)))
- return -EFAULT;
- ret = mutex_lock_interruptible(&dev->mtx);
- if (ret) return ret;
- ret = isdn_net_delphone(&phone);
- mutex_unlock(&dev->mtx);
- return ret;
- } else
- return -EINVAL;
- case IIOCNETDIL:
- /* Force dialing of a network-interface */
- if (arg) {
- if (copy_from_user(name, argp, sizeof(name)))
- return -EFAULT;
- return isdn_net_force_dial(name);
- } else
- return -EINVAL;
-#ifdef CONFIG_ISDN_PPP
- case IIOCNETALN:
- if (!arg)
- return -EINVAL;
- if (copy_from_user(name, argp, sizeof(name)))
- return -EFAULT;
- return isdn_ppp_dial_slave(name);
- case IIOCNETDLN:
- if (!arg)
- return -EINVAL;
- if (copy_from_user(name, argp, sizeof(name)))
- return -EFAULT;
- return isdn_ppp_hangup_slave(name);
-#endif
- case IIOCNETHUP:
- /* Force hangup of a network-interface */
- if (!arg)
- return -EINVAL;
- if (copy_from_user(name, argp, sizeof(name)))
- return -EFAULT;
- return isdn_net_force_hangup(name);
- break;
- case IIOCSETVER:
- dev->net_verbose = arg;
- printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose);
- return 0;
- case IIOCSETGST:
- if (arg)
- dev->global_flags |= ISDN_GLOBAL_STOPPED;
- else
- dev->global_flags &= ~ISDN_GLOBAL_STOPPED;
- printk(KERN_INFO "isdn: Global Mode %s\n",
- (dev->global_flags & ISDN_GLOBAL_STOPPED) ? "stopped" : "running");
- return 0;
- case IIOCSETBRJ:
- drvidx = -1;
- if (arg) {
- int i;
- char *p;
- if (copy_from_user(&iocts, argp,
- sizeof(isdn_ioctl_struct)))
- return -EFAULT;
- iocts.drvid[sizeof(iocts.drvid) - 1] = 0;
- if (strlen(iocts.drvid)) {
- if ((p = strchr(iocts.drvid, ',')))
- *p = 0;
- drvidx = -1;
- for (i = 0; i < ISDN_MAX_DRIVERS; i++)
- if (!(strcmp(dev->drvid[i], iocts.drvid))) {
- drvidx = i;
- break;
- }
- }
- }
- if (drvidx == -1)
- return -ENODEV;
- if (iocts.arg)
- dev->drv[drvidx]->flags |= DRV_FLAG_REJBUS;
- else
- dev->drv[drvidx]->flags &= ~DRV_FLAG_REJBUS;
- return 0;
- case IIOCSIGPRF:
- dev->profd = current;
- return 0;
- break;
- case IIOCGETPRF:
- /* Get all Modem-Profiles */
- if (arg) {
- char __user *p = argp;
- int i;
-
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- if (copy_to_user(p, dev->mdm.info[i].emu.profile,
- ISDN_MODEM_NUMREG))
- return -EFAULT;
- p += ISDN_MODEM_NUMREG;
- if (copy_to_user(p, dev->mdm.info[i].emu.pmsn, ISDN_MSNLEN))
- return -EFAULT;
- p += ISDN_MSNLEN;
- if (copy_to_user(p, dev->mdm.info[i].emu.plmsn, ISDN_LMSNLEN))
- return -EFAULT;
- p += ISDN_LMSNLEN;
- }
- return (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS;
- } else
- return -EINVAL;
- break;
- case IIOCSETPRF:
- /* Set all Modem-Profiles */
- if (arg) {
- char __user *p = argp;
- int i;
-
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- if (copy_from_user(dev->mdm.info[i].emu.profile, p,
- ISDN_MODEM_NUMREG))
- return -EFAULT;
- p += ISDN_MODEM_NUMREG;
- if (copy_from_user(dev->mdm.info[i].emu.plmsn, p, ISDN_LMSNLEN))
- return -EFAULT;
- p += ISDN_LMSNLEN;
- if (copy_from_user(dev->mdm.info[i].emu.pmsn, p, ISDN_MSNLEN))
- return -EFAULT;
- p += ISDN_MSNLEN;
- }
- return 0;
- } else
- return -EINVAL;
- break;
- case IIOCSETMAP:
- case IIOCGETMAP:
- /* Set/Get MSN->EAZ-Mapping for a driver */
- if (arg) {
-
- if (copy_from_user(&iocts, argp,
- sizeof(isdn_ioctl_struct)))
- return -EFAULT;
- iocts.drvid[sizeof(iocts.drvid) - 1] = 0;
- if (strlen(iocts.drvid)) {
- drvidx = -1;
- for (i = 0; i < ISDN_MAX_DRIVERS; i++)
- if (!(strcmp(dev->drvid[i], iocts.drvid))) {
- drvidx = i;
- break;
- }
- } else
- drvidx = 0;
- if (drvidx == -1)
- return -ENODEV;
- if (cmd == IIOCSETMAP) {
- int loop = 1;
-
- p = (char __user *) iocts.arg;
- i = 0;
- while (loop) {
- int j = 0;
-
- while (1) {
- get_user(bname[j], p++);
- switch (bname[j]) {
- case '\0':
- loop = 0;
- /* Fall through */
- case ',':
- bname[j] = '\0';
- strcpy(dev->drv[drvidx]->msn2eaz[i], bname);
- j = ISDN_MSNLEN;
- break;
- default:
- j++;
- }
- if (j >= ISDN_MSNLEN)
- break;
- }
- if (++i > 9)
- break;
- }
- } else {
- p = (char __user *) iocts.arg;
- for (i = 0; i < 10; i++) {
- snprintf(bname, sizeof(bname), "%s%s",
- strlen(dev->drv[drvidx]->msn2eaz[i]) ?
- dev->drv[drvidx]->msn2eaz[i] : "_",
- (i < 9) ? "," : "\0");
- if (copy_to_user(p, bname, strlen(bname) + 1))
- return -EFAULT;
- p += strlen(bname);
- }
- }
- return 0;
- } else
- return -EINVAL;
- case IIOCDBGVAR:
- return -EINVAL;
- default:
- if ((cmd & IIOCDRVCTL) == IIOCDRVCTL)
- cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK;
- else
- return -EINVAL;
- if (arg) {
- int i;
- char *p;
- if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct)))
- return -EFAULT;
- iocts.drvid[sizeof(iocts.drvid) - 1] = 0;
- if (strlen(iocts.drvid)) {
- if ((p = strchr(iocts.drvid, ',')))
- *p = 0;
- drvidx = -1;
- for (i = 0; i < ISDN_MAX_DRIVERS; i++)
- if (!(strcmp(dev->drvid[i], iocts.drvid))) {
- drvidx = i;
- break;
- }
- } else
- drvidx = 0;
- if (drvidx == -1)
- return -ENODEV;
- c.driver = drvidx;
- c.command = ISDN_CMD_IOCTL;
- c.arg = cmd;
- memcpy(c.parm.num, &iocts.arg, sizeof(ulong));
- ret = isdn_command(&c);
- memcpy(&iocts.arg, c.parm.num, sizeof(ulong));
- if (copy_to_user(argp, &iocts, sizeof(isdn_ioctl_struct)))
- return -EFAULT;
- return ret;
- } else
- return -EINVAL;
- }
- }
-#ifdef CONFIG_ISDN_PPP
- if (minor <= ISDN_MINOR_PPPMAX)
- return (isdn_ppp_ioctl(minor - ISDN_MINOR_PPP, file, cmd, arg));
-#endif
- return -ENODEV;
-
-#undef name
-#undef bname
-#undef iocts
-#undef phone
-#undef cfg
-}
-
-static long
-isdn_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&isdn_mutex);
- ret = isdn_ioctl(file, cmd, arg);
- mutex_unlock(&isdn_mutex);
-
- return ret;
-}
-
-/*
- * Open the device code.
- */
-static int
-isdn_open(struct inode *ino, struct file *filep)
-{
- uint minor = iminor(ino);
- int drvidx;
- int chidx;
- int retval = -ENODEV;
-
- mutex_lock(&isdn_mutex);
- if (minor == ISDN_MINOR_STATUS) {
- infostruct *p;
-
- if ((p = kmalloc(sizeof(infostruct), GFP_KERNEL))) {
- p->next = (char *) dev->infochain;
- p->private = (char *) &(filep->private_data);
- dev->infochain = p;
- /* At opening we allow a single update */
- filep->private_data = (char *) 1;
- retval = 0;
- goto out;
- } else {
- retval = -ENOMEM;
- goto out;
- }
- }
- if (!dev->channels)
- goto out;
- if (minor <= ISDN_MINOR_BMAX) {
- printk(KERN_WARNING "isdn_open minor %d obsolete!\n", minor);
- drvidx = isdn_minor2drv(minor);
- if (drvidx < 0)
- goto out;
- chidx = isdn_minor2chan(minor);
- if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING))
- goto out;
- if (!(dev->drv[drvidx]->online & (1 << chidx)))
- goto out;
- isdn_lock_drivers();
- retval = 0;
- goto out;
- }
- if (minor <= ISDN_MINOR_CTRLMAX) {
- drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
- if (drvidx < 0)
- goto out;
- isdn_lock_drivers();
- retval = 0;
- goto out;
- }
-#ifdef CONFIG_ISDN_PPP
- if (minor <= ISDN_MINOR_PPPMAX) {
- retval = isdn_ppp_open(minor - ISDN_MINOR_PPP, filep);
- if (retval == 0)
- isdn_lock_drivers();
- goto out;
- }
-#endif
-out:
- nonseekable_open(ino, filep);
- mutex_unlock(&isdn_mutex);
- return retval;
-}
-
-static int
-isdn_close(struct inode *ino, struct file *filep)
-{
- uint minor = iminor(ino);
-
- mutex_lock(&isdn_mutex);
- if (minor == ISDN_MINOR_STATUS) {
- infostruct *p = dev->infochain;
- infostruct *q = NULL;
-
- while (p) {
- if (p->private == (char *) &(filep->private_data)) {
- if (q)
- q->next = p->next;
- else
- dev->infochain = (infostruct *) (p->next);
- kfree(p);
- goto out;
- }
- q = p;
- p = (infostruct *) (p->next);
- }
- printk(KERN_WARNING "isdn: No private data while closing isdnctrl\n");
- goto out;
- }
- isdn_unlock_drivers();
- if (minor <= ISDN_MINOR_BMAX)
- goto out;
- if (minor <= ISDN_MINOR_CTRLMAX) {
- if (dev->profd == current)
- dev->profd = NULL;
- goto out;
- }
-#ifdef CONFIG_ISDN_PPP
- if (minor <= ISDN_MINOR_PPPMAX)
- isdn_ppp_release(minor - ISDN_MINOR_PPP, filep);
-#endif
-
-out:
- mutex_unlock(&isdn_mutex);
- return 0;
-}
-
-static const struct file_operations isdn_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = isdn_read,
- .write = isdn_write,
- .poll = isdn_poll,
- .unlocked_ioctl = isdn_unlocked_ioctl,
- .open = isdn_open,
- .release = isdn_close,
-};
-
-char *
-isdn_map_eaz2msn(char *msn, int di)
-{
- isdn_driver_t *this = dev->drv[di];
- int i;
-
- if (strlen(msn) == 1) {
- i = msn[0] - '0';
- if ((i >= 0) && (i <= 9))
- if (strlen(this->msn2eaz[i]))
- return (this->msn2eaz[i]);
- }
- return (msn);
-}
-
-/*
- * Find an unused ISDN-channel, whose feature-flags match the
- * given L2- and L3-protocols.
- */
-#define L2V (~(ISDN_FEATURE_L2_V11096 | ISDN_FEATURE_L2_V11019 | ISDN_FEATURE_L2_V11038))
-
-/*
- * This function must be called with holding the dev->lock.
- */
-int
-isdn_get_free_channel(int usage, int l2_proto, int l3_proto, int pre_dev
- , int pre_chan, char *msn)
-{
- int i;
- ulong features;
- ulong vfeatures;
-
- features = ((1 << l2_proto) | (0x10000 << l3_proto));
- vfeatures = (((1 << l2_proto) | (0x10000 << l3_proto)) &
- ~(ISDN_FEATURE_L2_V11096 | ISDN_FEATURE_L2_V11019 | ISDN_FEATURE_L2_V11038));
- /* If Layer-2 protocol is V.110, accept drivers with
- * transparent feature even if these don't support V.110
- * because we can emulate this in linklevel.
- */
- for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- if (USG_NONE(dev->usage[i]) &&
- (dev->drvmap[i] != -1)) {
- int d = dev->drvmap[i];
- if ((dev->usage[i] & ISDN_USAGE_EXCLUSIVE) &&
- ((pre_dev != d) || (pre_chan != dev->chanmap[i])))
- continue;
- if (!strcmp(isdn_map_eaz2msn(msn, d), "-"))
- continue;
- if (dev->usage[i] & ISDN_USAGE_DISABLED)
- continue; /* usage not allowed */
- if (dev->drv[d]->flags & DRV_FLAG_RUNNING) {
- if (((dev->drv[d]->interface->features & features) == features) ||
- (((dev->drv[d]->interface->features & vfeatures) == vfeatures) &&
- (dev->drv[d]->interface->features & ISDN_FEATURE_L2_TRANS))) {
- if ((pre_dev < 0) || (pre_chan < 0)) {
- dev->usage[i] &= ISDN_USAGE_EXCLUSIVE;
- dev->usage[i] |= usage;
- isdn_info_update();
- return i;
- } else {
- if ((pre_dev == d) && (pre_chan == dev->chanmap[i])) {
- dev->usage[i] &= ISDN_USAGE_EXCLUSIVE;
- dev->usage[i] |= usage;
- isdn_info_update();
- return i;
- }
- }
- }
- }
- }
- return -1;
-}
-
-/*
- * Set state of ISDN-channel to 'unused'
- */
-void
-isdn_free_channel(int di, int ch, int usage)
-{
- int i;
-
- if ((di < 0) || (ch < 0)) {
- printk(KERN_WARNING "%s: called with invalid drv(%d) or channel(%d)\n",
- __func__, di, ch);
- return;
- }
- for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- if (((!usage) || ((dev->usage[i] & ISDN_USAGE_MASK) == usage)) &&
- (dev->drvmap[i] == di) &&
- (dev->chanmap[i] == ch)) {
- dev->usage[i] &= (ISDN_USAGE_NONE | ISDN_USAGE_EXCLUSIVE);
- strcpy(dev->num[i], "???");
- dev->ibytes[i] = 0;
- dev->obytes[i] = 0;
-// 20.10.99 JIM, try to reinitialize v110 !
- dev->v110emu[i] = 0;
- atomic_set(&(dev->v110use[i]), 0);
- isdn_v110_close(dev->v110[i]);
- dev->v110[i] = NULL;
-// 20.10.99 JIM, try to reinitialize v110 !
- isdn_info_update();
- if (dev->drv[di])
- skb_queue_purge(&dev->drv[di]->rpqueue[ch]);
- }
-}
-
-/*
- * Cancel Exclusive-Flag for ISDN-channel
- */
-void
-isdn_unexclusive_channel(int di, int ch)
-{
- int i;
-
- for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- if ((dev->drvmap[i] == di) &&
- (dev->chanmap[i] == ch)) {
- dev->usage[i] &= ~ISDN_USAGE_EXCLUSIVE;
- isdn_info_update();
- return;
- }
-}
-
-/*
- * writebuf replacement for SKB_ABLE drivers
- */
-static int
-isdn_writebuf_stub(int drvidx, int chan, const u_char __user *buf, int len)
-{
- int ret;
- int hl = dev->drv[drvidx]->interface->hl_hdrlen;
- struct sk_buff *skb = alloc_skb(hl + len, GFP_ATOMIC);
-
- if (!skb)
- return -ENOMEM;
- skb_reserve(skb, hl);
- if (copy_from_user(skb_put(skb, len), buf, len)) {
- dev_kfree_skb(skb);
- return -EFAULT;
- }
- ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, 1, skb);
- if (ret <= 0)
- dev_kfree_skb(skb);
- if (ret > 0)
- dev->obytes[isdn_dc2minor(drvidx, chan)] += ret;
- return ret;
-}
-
-/*
- * Return: length of data on success, -ERRcode on failure.
- */
-int
-isdn_writebuf_skb_stub(int drvidx, int chan, int ack, struct sk_buff *skb)
-{
- int ret;
- struct sk_buff *nskb = NULL;
- int v110_ret = skb->len;
- int idx = isdn_dc2minor(drvidx, chan);
-
- if (dev->v110[idx]) {
- atomic_inc(&dev->v110use[idx]);
- nskb = isdn_v110_encode(dev->v110[idx], skb);
- atomic_dec(&dev->v110use[idx]);
- if (!nskb)
- return 0;
- v110_ret = *((int *)nskb->data);
- skb_pull(nskb, sizeof(int));
- if (!nskb->len) {
- dev_kfree_skb(nskb);
- return v110_ret;
- }
- /* V.110 must always be acknowledged */
- ack = 1;
- ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, ack, nskb);
- } else {
- int hl = dev->drv[drvidx]->interface->hl_hdrlen;
-
- if (skb_headroom(skb) < hl) {
- /*
- * This should only occur when new HL driver with
- * increased hl_hdrlen was loaded after netdevice
- * was created and connected to the new driver.
- *
- * The V.110 branch (re-allocates on its own) does
- * not need this
- */
- struct sk_buff *skb_tmp;
-
- skb_tmp = skb_realloc_headroom(skb, hl);
- printk(KERN_DEBUG "isdn_writebuf_skb_stub: reallocating headroom%s\n", skb_tmp ? "" : " failed");
- if (!skb_tmp) return -ENOMEM; /* 0 better? */
- ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, ack, skb_tmp);
- if (ret > 0) {
- dev_kfree_skb(skb);
- } else {
- dev_kfree_skb(skb_tmp);
- }
- } else {
- ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, ack, skb);
- }
- }
- if (ret > 0) {
- dev->obytes[idx] += ret;
- if (dev->v110[idx]) {
- atomic_inc(&dev->v110use[idx]);
- dev->v110[idx]->skbuser++;
- atomic_dec(&dev->v110use[idx]);
- /* For V.110 return unencoded data length */
- ret = v110_ret;
- /* if the complete frame was send we free the skb;
- if not upper function will requeue the skb */
- if (ret == skb->len)
- dev_kfree_skb(skb);
- }
- } else
- if (dev->v110[idx])
- dev_kfree_skb(nskb);
- return ret;
-}
-
-static int
-isdn_add_channels(isdn_driver_t *d, int drvidx, int n, int adding)
-{
- int j, k, m;
-
- init_waitqueue_head(&d->st_waitq);
- if (d->flags & DRV_FLAG_RUNNING)
- return -1;
- if (n < 1) return 0;
-
- m = (adding) ? d->channels + n : n;
-
- if (dev->channels + n > ISDN_MAX_CHANNELS) {
- printk(KERN_WARNING "register_isdn: Max. %d channels supported\n",
- ISDN_MAX_CHANNELS);
- return -1;
- }
-
- if ((adding) && (d->rcverr))
- kfree(d->rcverr);
- if (!(d->rcverr = kcalloc(m, sizeof(int), GFP_ATOMIC))) {
- printk(KERN_WARNING "register_isdn: Could not alloc rcverr\n");
- return -1;
- }
-
- if ((adding) && (d->rcvcount))
- kfree(d->rcvcount);
- if (!(d->rcvcount = kcalloc(m, sizeof(int), GFP_ATOMIC))) {
- printk(KERN_WARNING "register_isdn: Could not alloc rcvcount\n");
- if (!adding)
- kfree(d->rcverr);
- return -1;
- }
-
- if ((adding) && (d->rpqueue)) {
- for (j = 0; j < d->channels; j++)
- skb_queue_purge(&d->rpqueue[j]);
- kfree(d->rpqueue);
- }
- d->rpqueue = kmalloc_array(m, sizeof(struct sk_buff_head), GFP_ATOMIC);
- if (!d->rpqueue) {
- printk(KERN_WARNING "register_isdn: Could not alloc rpqueue\n");
- if (!adding) {
- kfree(d->rcvcount);
- kfree(d->rcverr);
- }
- return -1;
- }
- for (j = 0; j < m; j++) {
- skb_queue_head_init(&d->rpqueue[j]);
- }
-
- if ((adding) && (d->rcv_waitq))
- kfree(d->rcv_waitq);
- d->rcv_waitq = kmalloc(array3_size(sizeof(wait_queue_head_t), 2, m),
- GFP_ATOMIC);
- if (!d->rcv_waitq) {
- printk(KERN_WARNING "register_isdn: Could not alloc rcv_waitq\n");
- if (!adding) {
- kfree(d->rpqueue);
- kfree(d->rcvcount);
- kfree(d->rcverr);
- }
- return -1;
- }
- d->snd_waitq = d->rcv_waitq + m;
- for (j = 0; j < m; j++) {
- init_waitqueue_head(&d->rcv_waitq[j]);
- init_waitqueue_head(&d->snd_waitq[j]);
- }
-
- dev->channels += n;
- for (j = d->channels; j < m; j++)
- for (k = 0; k < ISDN_MAX_CHANNELS; k++)
- if (dev->chanmap[k] < 0) {
- dev->chanmap[k] = j;
- dev->drvmap[k] = drvidx;
- break;
- }
- d->channels = m;
- return 0;
-}
-
-/*
- * Low-level-driver registration
- */
-
-static void
-set_global_features(void)
-{
- int drvidx;
-
- dev->global_features = 0;
- for (drvidx = 0; drvidx < ISDN_MAX_DRIVERS; drvidx++) {
- if (!dev->drv[drvidx])
- continue;
- if (dev->drv[drvidx]->interface)
- dev->global_features |= dev->drv[drvidx]->interface->features;
- }
-}
-
-#ifdef CONFIG_ISDN_DIVERSION
-
-static char *map_drvname(int di)
-{
- if ((di < 0) || (di >= ISDN_MAX_DRIVERS))
- return (NULL);
- return (dev->drvid[di]); /* driver name */
-} /* map_drvname */
-
-static int map_namedrv(char *id)
-{ int i;
-
- for (i = 0; i < ISDN_MAX_DRIVERS; i++)
- { if (!strcmp(dev->drvid[i], id))
- return (i);
- }
- return (-1);
-} /* map_namedrv */
-
-int DIVERT_REG_NAME(isdn_divert_if *i_div)
-{
- if (i_div->if_magic != DIVERT_IF_MAGIC)
- return (DIVERT_VER_ERR);
- switch (i_div->cmd)
- {
- case DIVERT_CMD_REL:
- if (divert_if != i_div)
- return (DIVERT_REL_ERR);
- divert_if = NULL; /* free interface */
- return (DIVERT_NO_ERR);
-
- case DIVERT_CMD_REG:
- if (divert_if)
- return (DIVERT_REG_ERR);
- i_div->ll_cmd = isdn_command; /* set command function */
- i_div->drv_to_name = map_drvname;
- i_div->name_to_drv = map_namedrv;
- divert_if = i_div; /* remember interface */
- return (DIVERT_NO_ERR);
-
- default:
- return (DIVERT_CMD_ERR);
- }
-} /* DIVERT_REG_NAME */
-
-EXPORT_SYMBOL(DIVERT_REG_NAME);
-
-#endif /* CONFIG_ISDN_DIVERSION */
-
-
-EXPORT_SYMBOL(register_isdn);
-#ifdef CONFIG_ISDN_PPP
-EXPORT_SYMBOL(isdn_ppp_register_compressor);
-EXPORT_SYMBOL(isdn_ppp_unregister_compressor);
-#endif
-
-int
-register_isdn(isdn_if *i)
-{
- isdn_driver_t *d;
- int j;
- ulong flags;
- int drvidx;
-
- if (dev->drivers >= ISDN_MAX_DRIVERS) {
- printk(KERN_WARNING "register_isdn: Max. %d drivers supported\n",
- ISDN_MAX_DRIVERS);
- return 0;
- }
- if (!i->writebuf_skb) {
- printk(KERN_WARNING "register_isdn: No write routine given.\n");
- return 0;
- }
- if (!(d = kzalloc(sizeof(isdn_driver_t), GFP_KERNEL))) {
- printk(KERN_WARNING "register_isdn: Could not alloc driver-struct\n");
- return 0;
- }
-
- d->maxbufsize = i->maxbufsize;
- d->pktcount = 0;
- d->stavail = 0;
- d->flags = DRV_FLAG_LOADED;
- d->online = 0;
- d->interface = i;
- d->channels = 0;
- spin_lock_irqsave(&dev->lock, flags);
- for (drvidx = 0; drvidx < ISDN_MAX_DRIVERS; drvidx++)
- if (!dev->drv[drvidx])
- break;
- if (isdn_add_channels(d, drvidx, i->channels, 0)) {
- spin_unlock_irqrestore(&dev->lock, flags);
- kfree(d);
- return 0;
- }
- i->channels = drvidx;
- i->rcvcallb_skb = isdn_receive_skb_callback;
- i->statcallb = isdn_status_callback;
- if (!strlen(i->id))
- sprintf(i->id, "line%d", drvidx);
- for (j = 0; j < drvidx; j++)
- if (!strcmp(i->id, dev->drvid[j]))
- sprintf(i->id, "line%d", drvidx);
- dev->drv[drvidx] = d;
- strcpy(dev->drvid[drvidx], i->id);
- isdn_info_update();
- dev->drivers++;
- set_global_features();
- spin_unlock_irqrestore(&dev->lock, flags);
- return 1;
-}
-
-/*
-*****************************************************************************
-* And now the modules code.
-*****************************************************************************
-*/
-
-static char *
-isdn_getrev(const char *revision)
-{
- char *rev;
- char *p;
-
- if ((p = strchr(revision, ':'))) {
- rev = p + 2;
- p = strchr(rev, '$');
- *--p = 0;
- } else
- rev = "???";
- return rev;
-}
-
-/*
- * Allocate and initialize all data, register modem-devices
- */
-static int __init isdn_init(void)
-{
- int i;
- char tmprev[50];
-
- dev = vzalloc(sizeof(isdn_dev));
- if (!dev) {
- printk(KERN_WARNING "isdn: Could not allocate device-struct.\n");
- return -EIO;
- }
- timer_setup(&dev->timer, isdn_timer_funct, 0);
- spin_lock_init(&dev->lock);
- spin_lock_init(&dev->timerlock);
-#ifdef MODULE
- dev->owner = THIS_MODULE;
-#endif
- mutex_init(&dev->mtx);
- init_waitqueue_head(&dev->info_waitq);
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- dev->drvmap[i] = -1;
- dev->chanmap[i] = -1;
- dev->m_idx[i] = -1;
- strcpy(dev->num[i], "???");
- }
- if (register_chrdev(ISDN_MAJOR, "isdn", &isdn_fops)) {
- printk(KERN_WARNING "isdn: Could not register control devices\n");
- vfree(dev);
- return -EIO;
- }
- if ((isdn_tty_modem_init()) < 0) {
- printk(KERN_WARNING "isdn: Could not register tty devices\n");
- vfree(dev);
- unregister_chrdev(ISDN_MAJOR, "isdn");
- return -EIO;
- }
-#ifdef CONFIG_ISDN_PPP
- if (isdn_ppp_init() < 0) {
- printk(KERN_WARNING "isdn: Could not create PPP-device-structs\n");
- isdn_tty_exit();
- unregister_chrdev(ISDN_MAJOR, "isdn");
- vfree(dev);
- return -EIO;
- }
-#endif /* CONFIG_ISDN_PPP */
-
- strcpy(tmprev, isdn_revision);
- printk(KERN_NOTICE "ISDN subsystem Rev: %s/", isdn_getrev(tmprev));
- strcpy(tmprev, isdn_net_revision);
- printk("%s/", isdn_getrev(tmprev));
- strcpy(tmprev, isdn_ppp_revision);
- printk("%s/", isdn_getrev(tmprev));
- strcpy(tmprev, isdn_audio_revision);
- printk("%s/", isdn_getrev(tmprev));
- strcpy(tmprev, isdn_v110_revision);
- printk("%s", isdn_getrev(tmprev));
-
-#ifdef MODULE
- printk(" loaded\n");
-#else
- printk("\n");
-#endif
- isdn_info_update();
- return 0;
-}
-
-/*
- * Unload module
- */
-static void __exit isdn_exit(void)
-{
-#ifdef CONFIG_ISDN_PPP
- isdn_ppp_cleanup();
-#endif
- if (isdn_net_rmall() < 0) {
- printk(KERN_WARNING "isdn: net-device busy, remove cancelled\n");
- return;
- }
- isdn_tty_exit();
- unregister_chrdev(ISDN_MAJOR, "isdn");
- del_timer_sync(&dev->timer);
- /* call vfree with interrupts enabled, else it will hang */
- vfree(dev);
- printk(KERN_NOTICE "ISDN-subsystem unloaded\n");
-}
-
-module_init(isdn_init);
-module_exit(isdn_exit);
diff --git a/drivers/isdn/i4l/isdn_common.h b/drivers/isdn/i4l/isdn_common.h
deleted file mode 100644
index 2260ef07ab9c..000000000000
--- a/drivers/isdn/i4l/isdn_common.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* $Id: isdn_common.h,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * header for Linux ISDN subsystem
- * common used functions and debugging-switches (linklevel).
- *
- * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg
- * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#undef ISDN_DEBUG_MODEM_OPEN
-#undef ISDN_DEBUG_MODEM_IOCTL
-#undef ISDN_DEBUG_MODEM_WAITSENT
-#undef ISDN_DEBUG_MODEM_HUP
-#undef ISDN_DEBUG_MODEM_ICALL
-#undef ISDN_DEBUG_MODEM_DUMP
-#undef ISDN_DEBUG_MODEM_VOICE
-#undef ISDN_DEBUG_AT
-#undef ISDN_DEBUG_NET_DUMP
-#undef ISDN_DEBUG_NET_DIAL
-#undef ISDN_DEBUG_NET_ICALL
-
-/* Prototypes */
-extern void isdn_lock_drivers(void);
-extern void isdn_unlock_drivers(void);
-extern void isdn_free_channel(int di, int ch, int usage);
-extern void isdn_all_eaz(int di, int ch);
-extern int isdn_command(isdn_ctrl *);
-extern int isdn_dc2minor(int di, int ch);
-extern void isdn_info_update(void);
-extern char *isdn_map_eaz2msn(char *msn, int di);
-extern void isdn_timer_ctrl(int tf, int onoff);
-extern void isdn_unexclusive_channel(int di, int ch);
-extern int isdn_getnum(char **);
-extern int isdn_readbchan(int, int, u_char *, u_char *, int, wait_queue_head_t *);
-extern int isdn_readbchan_tty(int, int, struct tty_port *, int);
-extern int isdn_get_free_channel(int, int, int, int, int, char *);
-extern int isdn_writebuf_skb_stub(int, int, int, struct sk_buff *);
-extern int register_isdn(isdn_if *i);
-extern int isdn_msncmp(const char *, const char *);
-#if defined(ISDN_DEBUG_NET_DUMP) || defined(ISDN_DEBUG_MODEM_DUMP)
-extern void isdn_dumppkt(char *, u_char *, int, int);
-#endif
diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
deleted file mode 100644
index 336523ec077c..000000000000
--- a/drivers/isdn/i4l/isdn_concap.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/* $Id: isdn_concap.c,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * Linux ISDN subsystem, protocol encapsulation
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/* Stuff to support the concap_proto by isdn4linux. isdn4linux - specific
- * stuff goes here. Stuff that depends only on the concap protocol goes to
- * another -- protocol specific -- source file.
- *
- */
-
-
-#include <linux/isdn.h>
-#include "isdn_x25iface.h"
-#include "isdn_net.h"
-#include <linux/concap.h>
-#include "isdn_concap.h"
-
-
-/* The following set of device service operations are for encapsulation
- protocols that require for reliable datalink semantics. That means:
-
- - before any data is to be submitted the connection must explicitly
- be set up.
- - after the successful set up of the connection is signalled the
- connection is considered to be reliably up.
-
- Auto-dialing ist not compatible with this requirements. Thus, auto-dialing
- is completely bypassed.
-
- It might be possible to implement a (non standardized) datalink protocol
- that provides a reliable data link service while using some auto dialing
- mechanism. Such a protocol would need an auxiliary channel (i.e. user-user-
- signaling on the D-channel) while the B-channel is down.
-*/
-
-
-static int isdn_concap_dl_data_req(struct concap_proto *concap, struct sk_buff *skb)
-{
- struct net_device *ndev = concap->net_dev;
- isdn_net_dev *nd = ((isdn_net_local *) netdev_priv(ndev))->netdev;
- isdn_net_local *lp = isdn_net_get_locked_lp(nd);
-
- IX25DEBUG("isdn_concap_dl_data_req: %s \n", concap->net_dev->name);
- if (!lp) {
- IX25DEBUG("isdn_concap_dl_data_req: %s : isdn_net_send_skb returned %d\n", concap->net_dev->name, 1);
- return 1;
- }
- lp->huptimer = 0;
- isdn_net_writebuf_skb(lp, skb);
- spin_unlock_bh(&lp->xmit_lock);
- IX25DEBUG("isdn_concap_dl_data_req: %s : isdn_net_send_skb returned %d\n", concap->net_dev->name, 0);
- return 0;
-}
-
-
-static int isdn_concap_dl_connect_req(struct concap_proto *concap)
-{
- struct net_device *ndev = concap->net_dev;
- isdn_net_local *lp = netdev_priv(ndev);
- int ret;
- IX25DEBUG("isdn_concap_dl_connect_req: %s \n", ndev->name);
-
- /* dial ... */
- ret = isdn_net_dial_req(lp);
- if (ret) IX25DEBUG("dialing failed\n");
- return ret;
-}
-
-static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
-{
- IX25DEBUG("isdn_concap_dl_disconn_req: %s \n", concap->net_dev->name);
-
- isdn_net_hangup(concap->net_dev);
- return 0;
-}
-
-struct concap_device_ops isdn_concap_reliable_dl_dops = {
- .data_req = &isdn_concap_dl_data_req,
- .connect_req = &isdn_concap_dl_connect_req,
- .disconn_req = &isdn_concap_dl_disconn_req
-};
-
-/* The following should better go into a dedicated source file such that
- this sourcefile does not need to include any protocol specific header
- files. For now:
-*/
-struct concap_proto *isdn_concap_new(int encap)
-{
- switch (encap) {
- case ISDN_NET_ENCAP_X25IFACE:
- return isdn_x25iface_proto_new();
- }
- return NULL;
-}
diff --git a/drivers/isdn/i4l/isdn_concap.h b/drivers/isdn/i4l/isdn_concap.h
deleted file mode 100644
index cd7e3ba74e25..000000000000
--- a/drivers/isdn/i4l/isdn_concap.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* $Id: isdn_concap.h,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * Linux ISDN subsystem, protocol encapsulation
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-extern struct concap_device_ops isdn_concap_reliable_dl_dops;
-extern struct concap_proto *isdn_concap_new(int);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
deleted file mode 100644
index c138f66f2659..000000000000
--- a/drivers/isdn/i4l/isdn_net.c
+++ /dev/null
@@ -1,3198 +0,0 @@
-/* $Id: isdn_net.c,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * Linux ISDN subsystem, network interfaces and related functions (linklevel).
- *
- * Copyright 1994-1998 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg
- * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Data Over Voice (DOV) support added - Guy Ellis 23-Mar-02
- * guy@traverse.com.au
- * Outgoing calls - looks for a 'V' in first char of dialed number
- * Incoming calls - checks first character of eaz as follows:
- * Numeric - accept DATA only - original functionality
- * 'V' - accept VOICE (DOV) only
- * 'B' - accept BOTH DATA and DOV types
- *
- * Jan 2001: fix CISCO HDLC Bjoern A. Zeeb <i4l@zabbadoz.net>
- * for info on the protocol, see
- * http://i4l.zabbadoz.net/i4l/cisco-hdlc.txt
- */
-
-#include <linux/isdn.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-#include <net/dst.h>
-#include <net/pkt_sched.h>
-#include <linux/inetdevice.h>
-#include "isdn_common.h"
-#include "isdn_net.h"
-#ifdef CONFIG_ISDN_PPP
-#include "isdn_ppp.h"
-#endif
-#ifdef CONFIG_ISDN_X25
-#include <linux/concap.h>
-#include "isdn_concap.h"
-#endif
-
-
-/*
- * Outline of new tbusy handling:
- *
- * Old method, roughly spoken, consisted of setting tbusy when entering
- * isdn_net_start_xmit() and at several other locations and clearing
- * it from isdn_net_start_xmit() thread when sending was successful.
- *
- * With 2.3.x multithreaded network core, to prevent problems, tbusy should
- * only be set by the isdn_net_start_xmit() thread and only when a tx-busy
- * condition is detected. Other threads (in particular isdn_net_stat_callb())
- * are only allowed to clear tbusy.
- *
- * -HE
- */
-
-/*
- * About SOFTNET:
- * Most of the changes were pretty obvious and basically done by HE already.
- *
- * One problem of the isdn net device code is that it uses struct net_device
- * for masters and slaves. However, only master interface are registered to
- * the network layer, and therefore, it only makes sense to call netif_*
- * functions on them.
- *
- * --KG
- */
-
-/*
- * Find out if the netdevice has been ifup-ed yet.
- * For slaves, look at the corresponding master.
- */
-static __inline__ int isdn_net_device_started(isdn_net_dev *n)
-{
- isdn_net_local *lp = n->local;
- struct net_device *dev;
-
- if (lp->master)
- dev = lp->master;
- else
- dev = n->dev;
- return netif_running(dev);
-}
-
-/*
- * wake up the network -> net_device queue.
- * For slaves, wake the corresponding master interface.
- */
-static __inline__ void isdn_net_device_wake_queue(isdn_net_local *lp)
-{
- if (lp->master)
- netif_wake_queue(lp->master);
- else
- netif_wake_queue(lp->netdev->dev);
-}
-
-/*
- * stop the network -> net_device queue.
- * For slaves, stop the corresponding master interface.
- */
-static __inline__ void isdn_net_device_stop_queue(isdn_net_local *lp)
-{
- if (lp->master)
- netif_stop_queue(lp->master);
- else
- netif_stop_queue(lp->netdev->dev);
-}
-
-/*
- * find out if the net_device which this lp belongs to (lp can be
- * master or slave) is busy. It's busy iff all (master and slave)
- * queues are busy
- */
-static __inline__ int isdn_net_device_busy(isdn_net_local *lp)
-{
- isdn_net_local *nlp;
- isdn_net_dev *nd;
- unsigned long flags;
-
- if (!isdn_net_lp_busy(lp))
- return 0;
-
- if (lp->master)
- nd = ISDN_MASTER_PRIV(lp)->netdev;
- else
- nd = lp->netdev;
-
- spin_lock_irqsave(&nd->queue_lock, flags);
- nlp = lp->next;
- while (nlp != lp) {
- if (!isdn_net_lp_busy(nlp)) {
- spin_unlock_irqrestore(&nd->queue_lock, flags);
- return 0;
- }
- nlp = nlp->next;
- }
- spin_unlock_irqrestore(&nd->queue_lock, flags);
- return 1;
-}
-
-static __inline__ void isdn_net_inc_frame_cnt(isdn_net_local *lp)
-{
- atomic_inc(&lp->frame_cnt);
- if (isdn_net_device_busy(lp))
- isdn_net_device_stop_queue(lp);
-}
-
-static __inline__ void isdn_net_dec_frame_cnt(isdn_net_local *lp)
-{
- atomic_dec(&lp->frame_cnt);
-
- if (!(isdn_net_device_busy(lp))) {
- if (!skb_queue_empty(&lp->super_tx_queue)) {
- schedule_work(&lp->tqueue);
- } else {
- isdn_net_device_wake_queue(lp);
- }
- }
-}
-
-static __inline__ void isdn_net_zero_frame_cnt(isdn_net_local *lp)
-{
- atomic_set(&lp->frame_cnt, 0);
-}
-
-/* For 2.2.x we leave the transmitter busy timeout at 2 secs, just
- * to be safe.
- * For 2.3.x we push it up to 20 secs, because call establishment
- * (in particular callback) may take such a long time, and we
- * don't want confusing messages in the log. However, there is a slight
- * possibility that this large timeout will break other things like MPPP,
- * which might rely on the tx timeout. If so, we'll find out this way...
- */
-
-#define ISDN_NET_TX_TIMEOUT (20 * HZ)
-
-/* Prototypes */
-
-static int isdn_net_force_dial_lp(isdn_net_local *);
-static netdev_tx_t isdn_net_start_xmit(struct sk_buff *,
- struct net_device *);
-
-static void isdn_net_ciscohdlck_connected(isdn_net_local *lp);
-static void isdn_net_ciscohdlck_disconnected(isdn_net_local *lp);
-
-char *isdn_net_revision = "$Revision: 1.1.2.2 $";
-
-/*
- * Code for raw-networking over ISDN
- */
-
-static void
-isdn_net_unreachable(struct net_device *dev, struct sk_buff *skb, char *reason)
-{
- if (skb) {
-
- u_short proto = ntohs(skb->protocol);
-
- printk(KERN_DEBUG "isdn_net: %s: %s, signalling dst_link_failure %s\n",
- dev->name,
- (reason != NULL) ? reason : "unknown",
- (proto != ETH_P_IP) ? "Protocol != ETH_P_IP" : "");
-
- dst_link_failure(skb);
- }
- else { /* dial not triggered by rawIP packet */
- printk(KERN_DEBUG "isdn_net: %s: %s\n",
- dev->name,
- (reason != NULL) ? reason : "reason unknown");
- }
-}
-
-static void
-isdn_net_reset(struct net_device *dev)
-{
-#ifdef CONFIG_ISDN_X25
- struct concap_device_ops *dops =
- ((isdn_net_local *)netdev_priv(dev))->dops;
- struct concap_proto *cprot =
- ((isdn_net_local *)netdev_priv(dev))->netdev->cprot;
-#endif
-#ifdef CONFIG_ISDN_X25
- if (cprot && cprot->pops && dops)
- cprot->pops->restart(cprot, dev, dops);
-#endif
-}
-
-/* Open/initialize the board. */
-static int
-isdn_net_open(struct net_device *dev)
-{
- int i;
- struct net_device *p;
- struct in_device *in_dev;
-
- /* moved here from isdn_net_reset, because only the master has an
- interface associated which is supposed to be started. BTW:
- we need to call netif_start_queue, not netif_wake_queue here */
- netif_start_queue(dev);
-
- isdn_net_reset(dev);
- /* Fill in the MAC-level header (not needed, but for compatibility... */
- for (i = 0; i < ETH_ALEN - sizeof(u32); i++)
- dev->dev_addr[i] = 0xfc;
- if ((in_dev = dev->ip_ptr) != NULL) {
- /*
- * Any address will do - we take the first
- */
- struct in_ifaddr *ifa = in_dev->ifa_list;
- if (ifa != NULL)
- memcpy(dev->dev_addr + 2, &ifa->ifa_local, 4);
- }
-
- /* If this interface has slaves, start them also */
- p = MASTER_TO_SLAVE(dev);
- if (p) {
- while (p) {
- isdn_net_reset(p);
- p = MASTER_TO_SLAVE(p);
- }
- }
- isdn_lock_drivers();
- return 0;
-}
-
-/*
- * Assign an ISDN-channel to a net-interface
- */
-static void
-isdn_net_bind_channel(isdn_net_local *lp, int idx)
-{
- lp->flags |= ISDN_NET_CONNECTED;
- lp->isdn_device = dev->drvmap[idx];
- lp->isdn_channel = dev->chanmap[idx];
- dev->rx_netdev[idx] = lp->netdev;
- dev->st_netdev[idx] = lp->netdev;
-}
-
-/*
- * unbind a net-interface (resets interface after an error)
- */
-static void
-isdn_net_unbind_channel(isdn_net_local *lp)
-{
- skb_queue_purge(&lp->super_tx_queue);
-
- if (!lp->master) { /* reset only master device */
- /* Moral equivalent of dev_purge_queues():
- BEWARE! This chunk of code cannot be called from hardware
- interrupt handler. I hope it is true. --ANK
- */
- qdisc_reset_all_tx(lp->netdev->dev);
- }
- lp->dialstate = 0;
- dev->rx_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL;
- dev->st_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL;
- if (lp->isdn_device != -1 && lp->isdn_channel != -1)
- isdn_free_channel(lp->isdn_device, lp->isdn_channel,
- ISDN_USAGE_NET);
- lp->flags &= ~ISDN_NET_CONNECTED;
- lp->isdn_device = -1;
- lp->isdn_channel = -1;
-}
-
-/*
- * Perform auto-hangup and cps-calculation for net-interfaces.
- *
- * auto-hangup:
- * Increment idle-counter (this counter is reset on any incoming or
- * outgoing packet), if counter exceeds configured limit either do a
- * hangup immediately or - if configured - wait until just before the next
- * charge-info.
- *
- * cps-calculation (needed for dynamic channel-bundling):
- * Since this function is called every second, simply reset the
- * byte-counter of the interface after copying it to the cps-variable.
- */
-static unsigned long last_jiffies = -HZ;
-
-void
-isdn_net_autohup(void)
-{
- isdn_net_dev *p = dev->netdev;
- int anymore;
-
- anymore = 0;
- while (p) {
- isdn_net_local *l = p->local;
- if (jiffies == last_jiffies)
- l->cps = l->transcount;
- else
- l->cps = (l->transcount * HZ) / (jiffies - last_jiffies);
- l->transcount = 0;
- if (dev->net_verbose > 3)
- printk(KERN_DEBUG "%s: %d bogocps\n", p->dev->name, l->cps);
- if ((l->flags & ISDN_NET_CONNECTED) && (!l->dialstate)) {
- anymore = 1;
- l->huptimer++;
- /*
- * if there is some dialmode where timeout-hangup
- * should _not_ be done, check for that here
- */
- if ((l->onhtime) &&
- (l->huptimer > l->onhtime))
- {
- if (l->hupflags & ISDN_MANCHARGE &&
- l->hupflags & ISDN_CHARGEHUP) {
- while (time_after(jiffies, l->chargetime + l->chargeint))
- l->chargetime += l->chargeint;
- if (time_after(jiffies, l->chargetime + l->chargeint - 2 * HZ))
- if (l->outgoing || l->hupflags & ISDN_INHUP)
- isdn_net_hangup(p->dev);
- } else if (l->outgoing) {
- if (l->hupflags & ISDN_CHARGEHUP) {
- if (l->hupflags & ISDN_WAITCHARGE) {
- printk(KERN_DEBUG "isdn_net: Hupflags of %s are %X\n",
- p->dev->name, l->hupflags);
- isdn_net_hangup(p->dev);
- } else if (time_after(jiffies, l->chargetime + l->chargeint)) {
- printk(KERN_DEBUG
- "isdn_net: %s: chtime = %lu, chint = %d\n",
- p->dev->name, l->chargetime, l->chargeint);
- isdn_net_hangup(p->dev);
- }
- } else
- isdn_net_hangup(p->dev);
- } else if (l->hupflags & ISDN_INHUP)
- isdn_net_hangup(p->dev);
- }
-
- if (dev->global_flags & ISDN_GLOBAL_STOPPED || (ISDN_NET_DIALMODE(*l) == ISDN_NET_DM_OFF)) {
- isdn_net_hangup(p->dev);
- break;
- }
- }
- p = (isdn_net_dev *) p->next;
- }
- last_jiffies = jiffies;
- isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, anymore);
-}
-
-static void isdn_net_lp_disconnected(isdn_net_local *lp)
-{
- isdn_net_rm_from_bundle(lp);
-}
-
-/*
- * Handle status-messages from ISDN-interfacecard.
- * This function is called from within the main-status-dispatcher
- * isdn_status_callback, which itself is called from the low-level driver.
- * Return: 1 = Event handled, 0 = not for us or unknown Event.
- */
-int
-isdn_net_stat_callback(int idx, isdn_ctrl *c)
-{
- isdn_net_dev *p = dev->st_netdev[idx];
- int cmd = c->command;
-
- if (p) {
- isdn_net_local *lp = p->local;
-#ifdef CONFIG_ISDN_X25
- struct concap_proto *cprot = lp->netdev->cprot;
- struct concap_proto_ops *pops = cprot ? cprot->pops : NULL;
-#endif
- switch (cmd) {
- case ISDN_STAT_BSENT:
- /* A packet has successfully been sent out */
- if ((lp->flags & ISDN_NET_CONNECTED) &&
- (!lp->dialstate)) {
- isdn_net_dec_frame_cnt(lp);
- lp->stats.tx_packets++;
- lp->stats.tx_bytes += c->parm.length;
- }
- return 1;
- case ISDN_STAT_DCONN:
- /* D-Channel is up */
- switch (lp->dialstate) {
- case 4:
- case 7:
- case 8:
- lp->dialstate++;
- return 1;
- case 12:
- lp->dialstate = 5;
- return 1;
- }
- break;
- case ISDN_STAT_DHUP:
- /* Either D-Channel-hangup or error during dialout */
-#ifdef CONFIG_ISDN_X25
- /* If we are not connencted then dialing had
- failed. If there are generic encap protocol
- receiver routines signal the closure of
- the link*/
-
- if (!(lp->flags & ISDN_NET_CONNECTED)
- && pops && pops->disconn_ind)
- pops->disconn_ind(cprot);
-#endif /* CONFIG_ISDN_X25 */
- if ((!lp->dialstate) && (lp->flags & ISDN_NET_CONNECTED)) {
- if (lp->p_encap == ISDN_NET_ENCAP_CISCOHDLCK)
- isdn_net_ciscohdlck_disconnected(lp);
-#ifdef CONFIG_ISDN_PPP
- if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP)
- isdn_ppp_free(lp);
-#endif
- isdn_net_lp_disconnected(lp);
- isdn_all_eaz(lp->isdn_device, lp->isdn_channel);
- printk(KERN_INFO "%s: remote hangup\n", p->dev->name);
- printk(KERN_INFO "%s: Chargesum is %d\n", p->dev->name,
- lp->charge);
- isdn_net_unbind_channel(lp);
- return 1;
- }
- break;
-#ifdef CONFIG_ISDN_X25
- case ISDN_STAT_BHUP:
- /* B-Channel-hangup */
- /* try if there are generic encap protocol
- receiver routines and signal the closure of
- the link */
- if (pops && pops->disconn_ind) {
- pops->disconn_ind(cprot);
- return 1;
- }
- break;
-#endif /* CONFIG_ISDN_X25 */
- case ISDN_STAT_BCONN:
- /* B-Channel is up */
- isdn_net_zero_frame_cnt(lp);
- switch (lp->dialstate) {
- case 5:
- case 6:
- case 7:
- case 8:
- case 9:
- case 10:
- case 12:
- if (lp->dialstate <= 6) {
- dev->usage[idx] |= ISDN_USAGE_OUTGOING;
- isdn_info_update();
- } else
- dev->rx_netdev[idx] = p;
- lp->dialstate = 0;
- isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, 1);
- if (lp->p_encap == ISDN_NET_ENCAP_CISCOHDLCK)
- isdn_net_ciscohdlck_connected(lp);
- if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP) {
- if (lp->master) { /* is lp a slave? */
- isdn_net_dev *nd = ISDN_MASTER_PRIV(lp)->netdev;
- isdn_net_add_to_bundle(nd, lp);
- }
- }
- printk(KERN_INFO "isdn_net: %s connected\n", p->dev->name);
- /* If first Chargeinfo comes before B-Channel connect,
- * we correct the timestamp here.
- */
- lp->chargetime = jiffies;
-
- /* reset dial-timeout */
- lp->dialstarted = 0;
- lp->dialwait_timer = 0;
-
-#ifdef CONFIG_ISDN_PPP
- if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP)
- isdn_ppp_wakeup_daemon(lp);
-#endif
-#ifdef CONFIG_ISDN_X25
- /* try if there are generic concap receiver routines */
- if (pops)
- if (pops->connect_ind)
- pops->connect_ind(cprot);
-#endif /* CONFIG_ISDN_X25 */
- /* ppp needs to do negotiations first */
- if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP)
- isdn_net_device_wake_queue(lp);
- return 1;
- }
- break;
- case ISDN_STAT_NODCH:
- /* No D-Channel avail. */
- if (lp->dialstate == 4) {
- lp->dialstate--;
- return 1;
- }
- break;
- case ISDN_STAT_CINF:
- /* Charge-info from TelCo. Calculate interval between
- * charge-infos and set timestamp for last info for
- * usage by isdn_net_autohup()
- */
- lp->charge++;
- if (lp->hupflags & ISDN_HAVECHARGE) {
- lp->hupflags &= ~ISDN_WAITCHARGE;
- lp->chargeint = jiffies - lp->chargetime - (2 * HZ);
- }
- if (lp->hupflags & ISDN_WAITCHARGE)
- lp->hupflags |= ISDN_HAVECHARGE;
- lp->chargetime = jiffies;
- printk(KERN_DEBUG "isdn_net: Got CINF chargetime of %s now %lu\n",
- p->dev->name, lp->chargetime);
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * Perform dialout for net-interfaces and timeout-handling for
- * D-Channel-up and B-Channel-up Messages.
- * This function is initially called from within isdn_net_start_xmit() or
- * or isdn_net_find_icall() after initializing the dialstate for an
- * interface. If further calls are needed, the function schedules itself
- * for a timer-callback via isdn_timer_function().
- * The dialstate is also affected by incoming status-messages from
- * the ISDN-Channel which are handled in isdn_net_stat_callback() above.
- */
-void
-isdn_net_dial(void)
-{
- isdn_net_dev *p = dev->netdev;
- int anymore = 0;
- int i;
- isdn_ctrl cmd;
- u_char *phone_number;
-
- while (p) {
- isdn_net_local *lp = p->local;
-
-#ifdef ISDN_DEBUG_NET_DIAL
- if (lp->dialstate)
- printk(KERN_DEBUG "%s: dialstate=%d\n", p->dev->name, lp->dialstate);
-#endif
- switch (lp->dialstate) {
- case 0:
- /* Nothing to do for this interface */
- break;
- case 1:
- /* Initiate dialout. Set phone-number-pointer to first number
- * of interface.
- */
- lp->dial = lp->phone[1];
- if (!lp->dial) {
- printk(KERN_WARNING "%s: phone number deleted?\n",
- p->dev->name);
- isdn_net_hangup(p->dev);
- break;
- }
- anymore = 1;
-
- if (lp->dialtimeout > 0)
- if (lp->dialstarted == 0 || time_after(jiffies, lp->dialstarted + lp->dialtimeout + lp->dialwait)) {
- lp->dialstarted = jiffies;
- lp->dialwait_timer = 0;
- }
-
- lp->dialstate++;
- /* Fall through */
- case 2:
- /* Prepare dialing. Clear EAZ, then set EAZ. */
- cmd.driver = lp->isdn_device;
- cmd.arg = lp->isdn_channel;
- cmd.command = ISDN_CMD_CLREAZ;
- isdn_command(&cmd);
- sprintf(cmd.parm.num, "%s", isdn_map_eaz2msn(lp->msn, cmd.driver));
- cmd.command = ISDN_CMD_SETEAZ;
- isdn_command(&cmd);
- lp->dialretry = 0;
- anymore = 1;
- lp->dialstate++;
- /* Fall through */
- case 3:
- /* Setup interface, dial current phone-number, switch to next number.
- * If list of phone-numbers is exhausted, increment
- * retry-counter.
- */
- if (dev->global_flags & ISDN_GLOBAL_STOPPED || (ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_OFF)) {
- char *s;
- if (dev->global_flags & ISDN_GLOBAL_STOPPED)
- s = "dial suppressed: isdn system stopped";
- else
- s = "dial suppressed: dialmode `off'";
- isdn_net_unreachable(p->dev, NULL, s);
- isdn_net_hangup(p->dev);
- break;
- }
- cmd.driver = lp->isdn_device;
- cmd.command = ISDN_CMD_SETL2;
- cmd.arg = lp->isdn_channel + (lp->l2_proto << 8);
- isdn_command(&cmd);
- cmd.driver = lp->isdn_device;
- cmd.command = ISDN_CMD_SETL3;
- cmd.arg = lp->isdn_channel + (lp->l3_proto << 8);
- isdn_command(&cmd);
- cmd.driver = lp->isdn_device;
- cmd.arg = lp->isdn_channel;
- if (!lp->dial) {
- printk(KERN_WARNING "%s: phone number deleted?\n",
- p->dev->name);
- isdn_net_hangup(p->dev);
- break;
- }
- if (!strncmp(lp->dial->num, "LEASED", strlen("LEASED"))) {
- lp->dialstate = 4;
- printk(KERN_INFO "%s: Open leased line ...\n", p->dev->name);
- } else {
- if (lp->dialtimeout > 0)
- if (time_after(jiffies, lp->dialstarted + lp->dialtimeout)) {
- lp->dialwait_timer = jiffies + lp->dialwait;
- lp->dialstarted = 0;
- isdn_net_unreachable(p->dev, NULL, "dial: timed out");
- isdn_net_hangup(p->dev);
- break;
- }
-
- cmd.driver = lp->isdn_device;
- cmd.command = ISDN_CMD_DIAL;
- cmd.parm.setup.si2 = 0;
-
- /* check for DOV */
- phone_number = lp->dial->num;
- if ((*phone_number == 'v') ||
- (*phone_number == 'V')) { /* DOV call */
- cmd.parm.setup.si1 = 1;
- } else { /* DATA call */
- cmd.parm.setup.si1 = 7;
- }
-
- strcpy(cmd.parm.setup.phone, phone_number);
- /*
- * Switch to next number or back to start if at end of list.
- */
- if (!(lp->dial = (isdn_net_phone *) lp->dial->next)) {
- lp->dial = lp->phone[1];
- lp->dialretry++;
-
- if (lp->dialretry > lp->dialmax) {
- if (lp->dialtimeout == 0) {
- lp->dialwait_timer = jiffies + lp->dialwait;
- lp->dialstarted = 0;
- isdn_net_unreachable(p->dev, NULL, "dial: tried all numbers dialmax times");
- }
- isdn_net_hangup(p->dev);
- break;
- }
- }
- sprintf(cmd.parm.setup.eazmsn, "%s",
- isdn_map_eaz2msn(lp->msn, cmd.driver));
- i = isdn_dc2minor(lp->isdn_device, lp->isdn_channel);
- if (i >= 0) {
- strcpy(dev->num[i], cmd.parm.setup.phone);
- dev->usage[i] |= ISDN_USAGE_OUTGOING;
- isdn_info_update();
- }
- printk(KERN_INFO "%s: dialing %d %s... %s\n", p->dev->name,
- lp->dialretry, cmd.parm.setup.phone,
- (cmd.parm.setup.si1 == 1) ? "DOV" : "");
- lp->dtimer = 0;
-#ifdef ISDN_DEBUG_NET_DIAL
- printk(KERN_DEBUG "dial: d=%d c=%d\n", lp->isdn_device,
- lp->isdn_channel);
-#endif
- isdn_command(&cmd);
- }
- lp->huptimer = 0;
- lp->outgoing = 1;
- if (lp->chargeint) {
- lp->hupflags |= ISDN_HAVECHARGE;
- lp->hupflags &= ~ISDN_WAITCHARGE;
- } else {
- lp->hupflags |= ISDN_WAITCHARGE;
- lp->hupflags &= ~ISDN_HAVECHARGE;
- }
- anymore = 1;
- lp->dialstate =
- (lp->cbdelay &&
- (lp->flags & ISDN_NET_CBOUT)) ? 12 : 4;
- break;
- case 4:
- /* Wait for D-Channel-connect.
- * If timeout, switch back to state 3.
- * Dialmax-handling moved to state 3.
- */
- if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT10)
- lp->dialstate = 3;
- anymore = 1;
- break;
- case 5:
- /* Got D-Channel-Connect, send B-Channel-request */
- cmd.driver = lp->isdn_device;
- cmd.arg = lp->isdn_channel;
- cmd.command = ISDN_CMD_ACCEPTB;
- anymore = 1;
- lp->dtimer = 0;
- lp->dialstate++;
- isdn_command(&cmd);
- break;
- case 6:
- /* Wait for B- or D-Channel-connect. If timeout,
- * switch back to state 3.
- */
-#ifdef ISDN_DEBUG_NET_DIAL
- printk(KERN_DEBUG "dialtimer2: %d\n", lp->dtimer);
-#endif
- if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT10)
- lp->dialstate = 3;
- anymore = 1;
- break;
- case 7:
- /* Got incoming Call, setup L2 and L3 protocols,
- * then wait for D-Channel-connect
- */
-#ifdef ISDN_DEBUG_NET_DIAL
- printk(KERN_DEBUG "dialtimer4: %d\n", lp->dtimer);
-#endif
- cmd.driver = lp->isdn_device;
- cmd.command = ISDN_CMD_SETL2;
- cmd.arg = lp->isdn_channel + (lp->l2_proto << 8);
- isdn_command(&cmd);
- cmd.driver = lp->isdn_device;
- cmd.command = ISDN_CMD_SETL3;
- cmd.arg = lp->isdn_channel + (lp->l3_proto << 8);
- isdn_command(&cmd);
- if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT15)
- isdn_net_hangup(p->dev);
- else {
- anymore = 1;
- lp->dialstate++;
- }
- break;
- case 9:
- /* Got incoming D-Channel-Connect, send B-Channel-request */
- cmd.driver = lp->isdn_device;
- cmd.arg = lp->isdn_channel;
- cmd.command = ISDN_CMD_ACCEPTB;
- isdn_command(&cmd);
- anymore = 1;
- lp->dtimer = 0;
- lp->dialstate++;
- break;
- case 8:
- case 10:
- /* Wait for B- or D-channel-connect */
-#ifdef ISDN_DEBUG_NET_DIAL
- printk(KERN_DEBUG "dialtimer4: %d\n", lp->dtimer);
-#endif
- if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT10)
- isdn_net_hangup(p->dev);
- else
- anymore = 1;
- break;
- case 11:
- /* Callback Delay */
- if (lp->dtimer++ > lp->cbdelay)
- lp->dialstate = 1;
- anymore = 1;
- break;
- case 12:
- /* Remote does callback. Hangup after cbdelay, then wait for incoming
- * call (in state 4).
- */
- if (lp->dtimer++ > lp->cbdelay)
- {
- printk(KERN_INFO "%s: hangup waiting for callback ...\n", p->dev->name);
- lp->dtimer = 0;
- lp->dialstate = 4;
- cmd.driver = lp->isdn_device;
- cmd.command = ISDN_CMD_HANGUP;
- cmd.arg = lp->isdn_channel;
- isdn_command(&cmd);
- isdn_all_eaz(lp->isdn_device, lp->isdn_channel);
- }
- anymore = 1;
- break;
- default:
- printk(KERN_WARNING "isdn_net: Illegal dialstate %d for device %s\n",
- lp->dialstate, p->dev->name);
- }
- p = (isdn_net_dev *) p->next;
- }
- isdn_timer_ctrl(ISDN_TIMER_NETDIAL, anymore);
-}
-
-/*
- * Perform hangup for a net-interface.
- */
-void
-isdn_net_hangup(struct net_device *d)
-{
- isdn_net_local *lp = netdev_priv(d);
- isdn_ctrl cmd;
-#ifdef CONFIG_ISDN_X25
- struct concap_proto *cprot = lp->netdev->cprot;
- struct concap_proto_ops *pops = cprot ? cprot->pops : NULL;
-#endif
-
- if (lp->flags & ISDN_NET_CONNECTED) {
- if (lp->slave != NULL) {
- isdn_net_local *slp = ISDN_SLAVE_PRIV(lp);
- if (slp->flags & ISDN_NET_CONNECTED) {
- printk(KERN_INFO
- "isdn_net: hang up slave %s before %s\n",
- lp->slave->name, d->name);
- isdn_net_hangup(lp->slave);
- }
- }
- printk(KERN_INFO "isdn_net: local hangup %s\n", d->name);
-#ifdef CONFIG_ISDN_PPP
- if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP)
- isdn_ppp_free(lp);
-#endif
- isdn_net_lp_disconnected(lp);
-#ifdef CONFIG_ISDN_X25
- /* try if there are generic encap protocol
- receiver routines and signal the closure of
- the link */
- if (pops && pops->disconn_ind)
- pops->disconn_ind(cprot);
-#endif /* CONFIG_ISDN_X25 */
-
- cmd.driver = lp->isdn_device;
- cmd.command = ISDN_CMD_HANGUP;
- cmd.arg = lp->isdn_channel;
- isdn_command(&cmd);
- printk(KERN_INFO "%s: Chargesum is %d\n", d->name, lp->charge);
- isdn_all_eaz(lp->isdn_device, lp->isdn_channel);
- }
- isdn_net_unbind_channel(lp);
-}
-
-typedef struct {
- __be16 source;
- __be16 dest;
-} ip_ports;
-
-static void
-isdn_net_log_skb(struct sk_buff *skb, isdn_net_local *lp)
-{
- /* hopefully, this was set correctly */
- const u_char *p = skb_network_header(skb);
- unsigned short proto = ntohs(skb->protocol);
- int data_ofs;
- ip_ports *ipp;
- char addinfo[100];
-
- addinfo[0] = '\0';
- /* This check stolen from 2.1.72 dev_queue_xmit_nit() */
- if (p < skb->data || skb_network_header(skb) >= skb_tail_pointer(skb)) {
- /* fall back to old isdn_net_log_packet method() */
- char *buf = skb->data;
-
- printk(KERN_DEBUG "isdn_net: protocol %04x is buggy, dev %s\n", skb->protocol, lp->netdev->dev->name);
- p = buf;
- proto = ETH_P_IP;
- switch (lp->p_encap) {
- case ISDN_NET_ENCAP_IPTYP:
- proto = ntohs(*(__be16 *)&buf[0]);
- p = &buf[2];
- break;
- case ISDN_NET_ENCAP_ETHER:
- proto = ntohs(*(__be16 *)&buf[12]);
- p = &buf[14];
- break;
- case ISDN_NET_ENCAP_CISCOHDLC:
- proto = ntohs(*(__be16 *)&buf[2]);
- p = &buf[4];
- break;
-#ifdef CONFIG_ISDN_PPP
- case ISDN_NET_ENCAP_SYNCPPP:
- proto = ntohs(skb->protocol);
- p = &buf[IPPP_MAX_HEADER];
- break;
-#endif
- }
- }
- data_ofs = ((p[0] & 15) * 4);
- switch (proto) {
- case ETH_P_IP:
- switch (p[9]) {
- case 1:
- strcpy(addinfo, " ICMP");
- break;
- case 2:
- strcpy(addinfo, " IGMP");
- break;
- case 4:
- strcpy(addinfo, " IPIP");
- break;
- case 6:
- ipp = (ip_ports *) (&p[data_ofs]);
- sprintf(addinfo, " TCP, port: %d -> %d", ntohs(ipp->source),
- ntohs(ipp->dest));
- break;
- case 8:
- strcpy(addinfo, " EGP");
- break;
- case 12:
- strcpy(addinfo, " PUP");
- break;
- case 17:
- ipp = (ip_ports *) (&p[data_ofs]);
- sprintf(addinfo, " UDP, port: %d -> %d", ntohs(ipp->source),
- ntohs(ipp->dest));
- break;
- case 22:
- strcpy(addinfo, " IDP");
- break;
- }
- printk(KERN_INFO "OPEN: %pI4 -> %pI4%s\n",
- p + 12, p + 16, addinfo);
- break;
- case ETH_P_ARP:
- printk(KERN_INFO "OPEN: ARP %pI4 -> *.*.*.* ?%pI4\n",
- p + 14, p + 24);
- break;
- }
-}
-
-/*
- * this function is used to send supervisory data, i.e. data which was
- * not received from the network layer, but e.g. frames from ipppd, CCP
- * reset frames etc.
- */
-void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb)
-{
- if (in_irq()) {
- // we can't grab the lock from irq context,
- // so we just queue the packet
- skb_queue_tail(&lp->super_tx_queue, skb);
- schedule_work(&lp->tqueue);
- return;
- }
-
- spin_lock_bh(&lp->xmit_lock);
- if (!isdn_net_lp_busy(lp)) {
- isdn_net_writebuf_skb(lp, skb);
- } else {
- skb_queue_tail(&lp->super_tx_queue, skb);
- }
- spin_unlock_bh(&lp->xmit_lock);
-}
-
-/*
- * called from tq_immediate
- */
-static void isdn_net_softint(struct work_struct *work)
-{
- isdn_net_local *lp = container_of(work, isdn_net_local, tqueue);
- struct sk_buff *skb;
-
- spin_lock_bh(&lp->xmit_lock);
- while (!isdn_net_lp_busy(lp)) {
- skb = skb_dequeue(&lp->super_tx_queue);
- if (!skb)
- break;
- isdn_net_writebuf_skb(lp, skb);
- }
- spin_unlock_bh(&lp->xmit_lock);
-}
-
-/*
- * all frames sent from the (net) LL to a HL driver should go via this function
- * it's serialized by the caller holding the lp->xmit_lock spinlock
- */
-void isdn_net_writebuf_skb(isdn_net_local *lp, struct sk_buff *skb)
-{
- int ret;
- int len = skb->len; /* save len */
-
- /* before obtaining the lock the caller should have checked that
- the lp isn't busy */
- if (isdn_net_lp_busy(lp)) {
- printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__);
- goto error;
- }
-
- if (!(lp->flags & ISDN_NET_CONNECTED)) {
- printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__);
- goto error;
- }
- ret = isdn_writebuf_skb_stub(lp->isdn_device, lp->isdn_channel, 1, skb);
- if (ret != len) {
- /* we should never get here */
- printk(KERN_WARNING "%s: HL driver queue full\n", lp->netdev->dev->name);
- goto error;
- }
-
- lp->transcount += len;
- isdn_net_inc_frame_cnt(lp);
- return;
-
-error:
- dev_kfree_skb(skb);
- lp->stats.tx_errors++;
-
-}
-
-
-/*
- * Helper function for isdn_net_start_xmit.
- * When called, the connection is already established.
- * Based on cps-calculation, check if device is overloaded.
- * If so, and if a slave exists, trigger dialing for it.
- * If any slave is online, deliver packets using a simple round robin
- * scheme.
- *
- * Return: 0 on success, !0 on failure.
- */
-
-static int
-isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
-{
- isdn_net_dev *nd;
- isdn_net_local *slp;
- isdn_net_local *lp = netdev_priv(ndev);
- int retv = NETDEV_TX_OK;
-
- if (((isdn_net_local *) netdev_priv(ndev))->master) {
- printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__);
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- /* For the other encaps the header has already been built */
-#ifdef CONFIG_ISDN_PPP
- if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) {
- return isdn_ppp_xmit(skb, ndev);
- }
-#endif
- nd = ((isdn_net_local *) netdev_priv(ndev))->netdev;
- lp = isdn_net_get_locked_lp(nd);
- if (!lp) {
- printk(KERN_WARNING "%s: all channels busy - requeuing!\n", ndev->name);
- return NETDEV_TX_BUSY;
- }
- /* we have our lp locked from now on */
-
- /* Reset hangup-timeout */
- lp->huptimer = 0; // FIXME?
- isdn_net_writebuf_skb(lp, skb);
- spin_unlock_bh(&lp->xmit_lock);
-
- /* the following stuff is here for backwards compatibility.
- * in future, start-up and hangup of slaves (based on current load)
- * should move to userspace and get based on an overall cps
- * calculation
- */
- if (lp->cps > lp->triggercps) {
- if (lp->slave) {
- if (!lp->sqfull) {
- /* First time overload: set timestamp only */
- lp->sqfull = 1;
- lp->sqfull_stamp = jiffies;
- } else {
- /* subsequent overload: if slavedelay exceeded, start dialing */
- if (time_after(jiffies, lp->sqfull_stamp + lp->slavedelay)) {
- slp = ISDN_SLAVE_PRIV(lp);
- if (!(slp->flags & ISDN_NET_CONNECTED)) {
- isdn_net_force_dial_lp(ISDN_SLAVE_PRIV(lp));
- }
- }
- }
- }
- } else {
- if (lp->sqfull && time_after(jiffies, lp->sqfull_stamp + lp->slavedelay + (10 * HZ))) {
- lp->sqfull = 0;
- }
- /* this is a hack to allow auto-hangup for slaves on moderate loads */
- nd->queue = nd->local;
- }
-
- return retv;
-
-}
-
-static void
-isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
-{
- isdn_net_local *lp = netdev_priv(dev);
- if (!skb)
- return;
- if (lp->p_encap == ISDN_NET_ENCAP_ETHER) {
- const int pullsize = skb_network_offset(skb) - ETH_HLEN;
- if (pullsize > 0) {
- printk(KERN_DEBUG "isdn_net: Pull junk %d\n", pullsize);
- skb_pull(skb, pullsize);
- }
- }
-}
-
-
-static void isdn_net_tx_timeout(struct net_device *ndev)
-{
- isdn_net_local *lp = netdev_priv(ndev);
-
- printk(KERN_WARNING "isdn_tx_timeout dev %s dialstate %d\n", ndev->name, lp->dialstate);
- if (!lp->dialstate) {
- lp->stats.tx_errors++;
- /*
- * There is a certain probability that this currently
- * works at all because if we always wake up the interface,
- * then upper layer will try to send the next packet
- * immediately. And then, the old clean_up logic in the
- * driver will hopefully continue to work as it used to do.
- *
- * This is rather primitive right know, we better should
- * clean internal queues here, in particular for multilink and
- * ppp, and reset HL driver's channel, too. --HE
- *
- * actually, this may not matter at all, because ISDN hardware
- * should not see transmitter hangs at all IMO
- * changed KERN_DEBUG to KERN_WARNING to find out if this is
- * ever called --KG
- */
- }
- netif_trans_update(ndev);
- netif_wake_queue(ndev);
-}
-
-/*
- * Try sending a packet.
- * If this interface isn't connected to a ISDN-Channel, find a free channel,
- * and start dialing.
- */
-static netdev_tx_t
-isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- isdn_net_local *lp = netdev_priv(ndev);
-#ifdef CONFIG_ISDN_X25
- struct concap_proto *cprot = lp->netdev->cprot;
-/* At this point hard_start_xmit() passes control to the encapsulation
- protocol (if present).
- For X.25 auto-dialing is completly bypassed because:
- - It does not conform with the semantics of a reliable datalink
- service as needed by X.25 PLP.
- - I don't want that the interface starts dialing when the network layer
- sends a message which requests to disconnect the lapb link (or if it
- sends any other message not resulting in data transmission).
- Instead, dialing will be initiated by the encapsulation protocol entity
- when a dl_establish request is received from the upper layer.
-*/
- if (cprot && cprot->pops) {
- int ret = cprot->pops->encap_and_xmit(cprot, skb);
-
- if (ret)
- netif_stop_queue(ndev);
- return ret;
- } else
-#endif
- /* auto-dialing xmit function */
- {
-#ifdef ISDN_DEBUG_NET_DUMP
- u_char *buf;
-#endif
- isdn_net_adjust_hdr(skb, ndev);
-#ifdef ISDN_DEBUG_NET_DUMP
- buf = skb->data;
- isdn_dumppkt("S:", buf, skb->len, 40);
-#endif
-
- if (!(lp->flags & ISDN_NET_CONNECTED)) {
- int chi;
- /* only do autodial if allowed by config */
- if (!(ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_AUTO)) {
- isdn_net_unreachable(ndev, skb, "dial rejected: interface not in dialmode `auto'");
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- if (lp->phone[1]) {
- ulong flags;
-
- if (lp->dialwait_timer <= 0)
- if (lp->dialstarted > 0 && lp->dialtimeout > 0 && time_before(jiffies, lp->dialstarted + lp->dialtimeout + lp->dialwait))
- lp->dialwait_timer = lp->dialstarted + lp->dialtimeout + lp->dialwait;
-
- if (lp->dialwait_timer > 0) {
- if (time_before(jiffies, lp->dialwait_timer)) {
- isdn_net_unreachable(ndev, skb, "dial rejected: retry-time not reached");
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- } else
- lp->dialwait_timer = 0;
- }
- /* Grab a free ISDN-Channel */
- spin_lock_irqsave(&dev->lock, flags);
- if (((chi =
- isdn_get_free_channel(
- ISDN_USAGE_NET,
- lp->l2_proto,
- lp->l3_proto,
- lp->pre_device,
- lp->pre_channel,
- lp->msn)
- ) < 0) &&
- ((chi =
- isdn_get_free_channel(
- ISDN_USAGE_NET,
- lp->l2_proto,
- lp->l3_proto,
- lp->pre_device,
- lp->pre_channel^1,
- lp->msn)
- ) < 0)) {
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_net_unreachable(ndev, skb,
- "No channel");
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- /* Log packet, which triggered dialing */
- if (dev->net_verbose)
- isdn_net_log_skb(skb, lp);
- lp->dialstate = 1;
- /* Connect interface with channel */
- isdn_net_bind_channel(lp, chi);
-#ifdef CONFIG_ISDN_PPP
- if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) {
- /* no 'first_skb' handling for syncPPP */
- if (isdn_ppp_bind(lp) < 0) {
- dev_kfree_skb(skb);
- isdn_net_unbind_channel(lp);
- spin_unlock_irqrestore(&dev->lock, flags);
- return NETDEV_TX_OK; /* STN (skb to nirvana) ;) */
- }
-#ifdef CONFIG_IPPP_FILTER
- if (isdn_ppp_autodial_filter(skb, lp)) {
- isdn_ppp_free(lp);
- isdn_net_unbind_channel(lp);
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_net_unreachable(ndev, skb, "dial rejected: packet filtered");
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-#endif
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_net_dial(); /* Initiate dialing */
- netif_stop_queue(ndev);
- return NETDEV_TX_BUSY; /* let upper layer requeue skb packet */
- }
-#endif
- /* Initiate dialing */
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_net_dial();
- isdn_net_device_stop_queue(lp);
- return NETDEV_TX_BUSY;
- } else {
- isdn_net_unreachable(ndev, skb,
- "No phone number");
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- } else {
- /* Device is connected to an ISDN channel */
- netif_trans_update(ndev);
- if (!lp->dialstate) {
- /* ISDN connection is established, try sending */
- int ret;
- ret = (isdn_net_xmit(ndev, skb));
- if (ret) netif_stop_queue(ndev);
- return ret;
- } else
- netif_stop_queue(ndev);
- }
- }
- return NETDEV_TX_BUSY;
-}
-
-/*
- * Shutdown a net-interface.
- */
-static int
-isdn_net_close(struct net_device *dev)
-{
- struct net_device *p;
-#ifdef CONFIG_ISDN_X25
- struct concap_proto *cprot =
- ((isdn_net_local *)netdev_priv(dev))->netdev->cprot;
- /* printk(KERN_DEBUG "isdn_net_close %s\n" , dev-> name); */
-#endif
-
-#ifdef CONFIG_ISDN_X25
- if (cprot && cprot->pops) cprot->pops->close(cprot);
-#endif
- netif_stop_queue(dev);
- p = MASTER_TO_SLAVE(dev);
- if (p) {
- /* If this interface has slaves, stop them also */
- while (p) {
-#ifdef CONFIG_ISDN_X25
- cprot = ((isdn_net_local *)netdev_priv(p))
- ->netdev->cprot;
- if (cprot && cprot->pops)
- cprot->pops->close(cprot);
-#endif
- isdn_net_hangup(p);
- p = MASTER_TO_SLAVE(p);
- }
- }
- isdn_net_hangup(dev);
- isdn_unlock_drivers();
- return 0;
-}
-
-/*
- * Get statistics
- */
-static struct net_device_stats *
-isdn_net_get_stats(struct net_device *dev)
-{
- isdn_net_local *lp = netdev_priv(dev);
- return &lp->stats;
-}
-
-/* This is simply a copy from std. eth.c EXCEPT we pull ETH_HLEN
- * instead of dev->hard_header_len off. This is done because the
- * lowlevel-driver has already pulled off its stuff when we get
- * here and this routine only gets called with p_encap == ETHER.
- * Determine the packet's protocol ID. The rule here is that we
- * assume 802.3 if the type field is short enough to be a length.
- * This is normal practice and works for any 'now in use' protocol.
- */
-
-static __be16
-isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
-{
- struct ethhdr *eth;
- unsigned char *rawp;
-
- skb_reset_mac_header(skb);
- skb_pull(skb, ETH_HLEN);
- eth = eth_hdr(skb);
-
- if (*eth->h_dest & 1) {
- if (ether_addr_equal(eth->h_dest, dev->broadcast))
- skb->pkt_type = PACKET_BROADCAST;
- else
- skb->pkt_type = PACKET_MULTICAST;
- }
- /*
- * This ALLMULTI check should be redundant by 1.4
- * so don't forget to remove it.
- */
-
- else if (dev->flags & (IFF_PROMISC /*| IFF_ALLMULTI*/)) {
- if (!ether_addr_equal(eth->h_dest, dev->dev_addr))
- skb->pkt_type = PACKET_OTHERHOST;
- }
- if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
- return eth->h_proto;
-
- rawp = skb->data;
-
- /*
- * This is a magic hack to spot IPX packets. Older Novell breaks
- * the protocol design and runs IPX over 802.3 without an 802.2 LLC
- * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
- * won't work for fault tolerant netware but does for the rest.
- */
- if (*(unsigned short *) rawp == 0xFFFF)
- return htons(ETH_P_802_3);
- /*
- * Real 802.2 LLC
- */
- return htons(ETH_P_802_2);
-}
-
-
-/*
- * CISCO HDLC keepalive specific stuff
- */
-static struct sk_buff*
-isdn_net_ciscohdlck_alloc_skb(isdn_net_local *lp, int len)
-{
- unsigned short hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen;
- struct sk_buff *skb;
-
- skb = alloc_skb(hl + len, GFP_ATOMIC);
- if (skb)
- skb_reserve(skb, hl);
- else
- printk("isdn out of mem at %s:%d!\n", __FILE__, __LINE__);
- return skb;
-}
-
-/* cisco hdlck device private ioctls */
-static int
-isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- isdn_net_local *lp = netdev_priv(dev);
- unsigned long len = 0;
- unsigned long expires = 0;
- int tmp = 0;
- int period = lp->cisco_keepalive_period;
- s8 debserint = lp->cisco_debserint;
- int rc = 0;
-
- if (lp->p_encap != ISDN_NET_ENCAP_CISCOHDLCK)
- return -EINVAL;
-
- switch (cmd) {
- /* get/set keepalive period */
- case SIOCGKEEPPERIOD:
- len = (unsigned long)sizeof(lp->cisco_keepalive_period);
- if (copy_to_user(ifr->ifr_data,
- &lp->cisco_keepalive_period, len))
- rc = -EFAULT;
- break;
- case SIOCSKEEPPERIOD:
- tmp = lp->cisco_keepalive_period;
- len = (unsigned long)sizeof(lp->cisco_keepalive_period);
- if (copy_from_user(&period, ifr->ifr_data, len))
- rc = -EFAULT;
- if ((period > 0) && (period <= 32767))
- lp->cisco_keepalive_period = period;
- else
- rc = -EINVAL;
- if (!rc && (tmp != lp->cisco_keepalive_period)) {
- expires = (unsigned long)(jiffies +
- lp->cisco_keepalive_period * HZ);
- mod_timer(&lp->cisco_timer, expires);
- printk(KERN_INFO "%s: Keepalive period set "
- "to %d seconds.\n",
- dev->name, lp->cisco_keepalive_period);
- }
- break;
-
- /* get/set debugging */
- case SIOCGDEBSERINT:
- len = (unsigned long)sizeof(lp->cisco_debserint);
- if (copy_to_user(ifr->ifr_data,
- &lp->cisco_debserint, len))
- rc = -EFAULT;
- break;
- case SIOCSDEBSERINT:
- len = (unsigned long)sizeof(lp->cisco_debserint);
- if (copy_from_user(&debserint,
- ifr->ifr_data, len))
- rc = -EFAULT;
- if ((debserint >= 0) && (debserint <= 64))
- lp->cisco_debserint = debserint;
- else
- rc = -EINVAL;
- break;
-
- default:
- rc = -EINVAL;
- break;
- }
- return (rc);
-}
-
-
-static int isdn_net_ioctl(struct net_device *dev,
- struct ifreq *ifr, int cmd)
-{
- isdn_net_local *lp = netdev_priv(dev);
-
- switch (lp->p_encap) {
-#ifdef CONFIG_ISDN_PPP
- case ISDN_NET_ENCAP_SYNCPPP:
- return isdn_ppp_dev_ioctl(dev, ifr, cmd);
-#endif
- case ISDN_NET_ENCAP_CISCOHDLCK:
- return isdn_ciscohdlck_dev_ioctl(dev, ifr, cmd);
- default:
- return -EINVAL;
- }
-}
-
-/* called via cisco_timer.function */
-static void
-isdn_net_ciscohdlck_slarp_send_keepalive(struct timer_list *t)
-{
- isdn_net_local *lp = from_timer(lp, t, cisco_timer);
- struct sk_buff *skb;
- unsigned char *p;
- unsigned long last_cisco_myseq = lp->cisco_myseq;
- int myseq_diff = 0;
-
- if (!(lp->flags & ISDN_NET_CONNECTED) || lp->dialstate) {
- printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__);
- return;
- }
- lp->cisco_myseq++;
-
- myseq_diff = (lp->cisco_myseq - lp->cisco_mineseen);
- if ((lp->cisco_line_state) && ((myseq_diff >= 3) || (myseq_diff <= -3))) {
- /* line up -> down */
- lp->cisco_line_state = 0;
- printk(KERN_WARNING
- "UPDOWN: Line protocol on Interface %s,"
- " changed state to down\n", lp->netdev->dev->name);
- /* should stop routing higher-level data across */
- } else if ((!lp->cisco_line_state) &&
- (myseq_diff >= 0) && (myseq_diff <= 2)) {
- /* line down -> up */
- lp->cisco_line_state = 1;
- printk(KERN_WARNING
- "UPDOWN: Line protocol on Interface %s,"
- " changed state to up\n", lp->netdev->dev->name);
- /* restart routing higher-level data across */
- }
-
- if (lp->cisco_debserint)
- printk(KERN_DEBUG "%s: HDLC "
- "myseq %lu, mineseen %lu%c, yourseen %lu, %s\n",
- lp->netdev->dev->name, last_cisco_myseq, lp->cisco_mineseen,
- ((last_cisco_myseq == lp->cisco_mineseen) ? '*' : 040),
- lp->cisco_yourseq,
- ((lp->cisco_line_state) ? "line up" : "line down"));
-
- skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14);
- if (!skb)
- return;
-
- p = skb_put(skb, 4 + 14);
-
- /* cisco header */
- *(u8 *)(p + 0) = CISCO_ADDR_UNICAST;
- *(u8 *)(p + 1) = CISCO_CTRL;
- *(__be16 *)(p + 2) = cpu_to_be16(CISCO_TYPE_SLARP);
-
- /* slarp keepalive */
- *(__be32 *)(p + 4) = cpu_to_be32(CISCO_SLARP_KEEPALIVE);
- *(__be32 *)(p + 8) = cpu_to_be32(lp->cisco_myseq);
- *(__be32 *)(p + 12) = cpu_to_be32(lp->cisco_yourseq);
- *(__be16 *)(p + 16) = cpu_to_be16(0xffff); // reliability, always 0xffff
- p += 18;
-
- isdn_net_write_super(lp, skb);
-
- lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
-
- add_timer(&lp->cisco_timer);
-}
-
-static void
-isdn_net_ciscohdlck_slarp_send_request(isdn_net_local *lp)
-{
- struct sk_buff *skb;
- unsigned char *p;
-
- skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14);
- if (!skb)
- return;
-
- p = skb_put(skb, 4 + 14);
-
- /* cisco header */
- *(u8 *)(p + 0) = CISCO_ADDR_UNICAST;
- *(u8 *)(p + 1) = CISCO_CTRL;
- *(__be16 *)(p + 2) = cpu_to_be16(CISCO_TYPE_SLARP);
-
- /* slarp request */
- *(__be32 *)(p + 4) = cpu_to_be32(CISCO_SLARP_REQUEST);
- *(__be32 *)(p + 8) = cpu_to_be32(0); // address
- *(__be32 *)(p + 12) = cpu_to_be32(0); // netmask
- *(__be16 *)(p + 16) = cpu_to_be16(0); // unused
- p += 18;
-
- isdn_net_write_super(lp, skb);
-}
-
-static void
-isdn_net_ciscohdlck_connected(isdn_net_local *lp)
-{
- lp->cisco_myseq = 0;
- lp->cisco_mineseen = 0;
- lp->cisco_yourseq = 0;
- lp->cisco_keepalive_period = ISDN_TIMER_KEEPINT;
- lp->cisco_last_slarp_in = 0;
- lp->cisco_line_state = 0;
- lp->cisco_debserint = 0;
-
- /* send slarp request because interface/seq.no.s reset */
- isdn_net_ciscohdlck_slarp_send_request(lp);
-
- timer_setup(&lp->cisco_timer,
- isdn_net_ciscohdlck_slarp_send_keepalive, 0);
- lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
- add_timer(&lp->cisco_timer);
-}
-
-static void
-isdn_net_ciscohdlck_disconnected(isdn_net_local *lp)
-{
- del_timer(&lp->cisco_timer);
-}
-
-static void
-isdn_net_ciscohdlck_slarp_send_reply(isdn_net_local *lp)
-{
- struct sk_buff *skb;
- unsigned char *p;
- struct in_device *in_dev = NULL;
- __be32 addr = 0; /* local ipv4 address */
- __be32 mask = 0; /* local netmask */
-
- if ((in_dev = lp->netdev->dev->ip_ptr) != NULL) {
- /* take primary(first) address of interface */
- struct in_ifaddr *ifa = in_dev->ifa_list;
- if (ifa != NULL) {
- addr = ifa->ifa_local;
- mask = ifa->ifa_mask;
- }
- }
-
- skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14);
- if (!skb)
- return;
-
- p = skb_put(skb, 4 + 14);
-
- /* cisco header */
- *(u8 *)(p + 0) = CISCO_ADDR_UNICAST;
- *(u8 *)(p + 1) = CISCO_CTRL;
- *(__be16 *)(p + 2) = cpu_to_be16(CISCO_TYPE_SLARP);
-
- /* slarp reply, send own ip/netmask; if values are nonsense remote
- * should think we are unable to provide it with an address via SLARP */
- *(__be32 *)(p + 4) = cpu_to_be32(CISCO_SLARP_REPLY);
- *(__be32 *)(p + 8) = addr; // address
- *(__be32 *)(p + 12) = mask; // netmask
- *(__be16 *)(p + 16) = cpu_to_be16(0); // unused
- p += 18;
-
- isdn_net_write_super(lp, skb);
-}
-
-static void
-isdn_net_ciscohdlck_slarp_in(isdn_net_local *lp, struct sk_buff *skb)
-{
- unsigned char *p;
- int period;
- u32 code;
- u32 my_seq;
- u32 your_seq;
- __be32 local;
- __be32 *addr, *mask;
-
- if (skb->len < 14)
- return;
-
- p = skb->data;
- code = be32_to_cpup((__be32 *)p);
- p += 4;
-
- switch (code) {
- case CISCO_SLARP_REQUEST:
- lp->cisco_yourseq = 0;
- isdn_net_ciscohdlck_slarp_send_reply(lp);
- break;
- case CISCO_SLARP_REPLY:
- addr = (__be32 *)p;
- mask = (__be32 *)(p + 4);
- if (*mask != cpu_to_be32(0xfffffffc))
- goto slarp_reply_out;
- if ((*addr & cpu_to_be32(3)) == cpu_to_be32(0) ||
- (*addr & cpu_to_be32(3)) == cpu_to_be32(3))
- goto slarp_reply_out;
- local = *addr ^ cpu_to_be32(3);
- printk(KERN_INFO "%s: got slarp reply: remote ip: %pI4, local ip: %pI4 mask: %pI4\n",
- lp->netdev->dev->name, addr, &local, mask);
- break;
- slarp_reply_out:
- printk(KERN_INFO "%s: got invalid slarp reply (%pI4/%pI4) - ignored\n",
- lp->netdev->dev->name, addr, mask);
- break;
- case CISCO_SLARP_KEEPALIVE:
- period = (int)((jiffies - lp->cisco_last_slarp_in
- + HZ / 2 - 1) / HZ);
- if (lp->cisco_debserint &&
- (period != lp->cisco_keepalive_period) &&
- lp->cisco_last_slarp_in) {
- printk(KERN_DEBUG "%s: Keepalive period mismatch - "
- "is %d but should be %d.\n",
- lp->netdev->dev->name, period,
- lp->cisco_keepalive_period);
- }
- lp->cisco_last_slarp_in = jiffies;
- my_seq = be32_to_cpup((__be32 *)(p + 0));
- your_seq = be32_to_cpup((__be32 *)(p + 4));
- p += 10;
- lp->cisco_yourseq = my_seq;
- lp->cisco_mineseen = your_seq;
- break;
- }
-}
-
-static void
-isdn_net_ciscohdlck_receive(isdn_net_local *lp, struct sk_buff *skb)
-{
- unsigned char *p;
- u8 addr;
- u8 ctrl;
- u16 type;
-
- if (skb->len < 4)
- goto out_free;
-
- p = skb->data;
- addr = *(u8 *)(p + 0);
- ctrl = *(u8 *)(p + 1);
- type = be16_to_cpup((__be16 *)(p + 2));
- p += 4;
- skb_pull(skb, 4);
-
- if (addr != CISCO_ADDR_UNICAST && addr != CISCO_ADDR_BROADCAST) {
- printk(KERN_WARNING "%s: Unknown Cisco addr 0x%02x\n",
- lp->netdev->dev->name, addr);
- goto out_free;
- }
- if (ctrl != CISCO_CTRL) {
- printk(KERN_WARNING "%s: Unknown Cisco ctrl 0x%02x\n",
- lp->netdev->dev->name, ctrl);
- goto out_free;
- }
-
- switch (type) {
- case CISCO_TYPE_SLARP:
- isdn_net_ciscohdlck_slarp_in(lp, skb);
- goto out_free;
- case CISCO_TYPE_CDP:
- if (lp->cisco_debserint)
- printk(KERN_DEBUG "%s: Received CDP packet. use "
- "\"no cdp enable\" on cisco.\n",
- lp->netdev->dev->name);
- goto out_free;
- default:
- /* no special cisco protocol */
- skb->protocol = htons(type);
- netif_rx(skb);
- return;
- }
-
-out_free:
- kfree_skb(skb);
-}
-
-/*
- * Got a packet from ISDN-Channel.
- */
-static void
-isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
-{
- isdn_net_local *lp = netdev_priv(ndev);
- isdn_net_local *olp = lp; /* original 'lp' */
-#ifdef CONFIG_ISDN_X25
- struct concap_proto *cprot = lp->netdev->cprot;
-#endif
- lp->transcount += skb->len;
-
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += skb->len;
- if (lp->master) {
- /* Bundling: If device is a slave-device, deliver to master, also
- * handle master's statistics and hangup-timeout
- */
- ndev = lp->master;
- lp = netdev_priv(ndev);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += skb->len;
- }
- skb->dev = ndev;
- skb->pkt_type = PACKET_HOST;
- skb_reset_mac_header(skb);
-#ifdef ISDN_DEBUG_NET_DUMP
- isdn_dumppkt("R:", skb->data, skb->len, 40);
-#endif
- switch (lp->p_encap) {
- case ISDN_NET_ENCAP_ETHER:
- /* Ethernet over ISDN */
- olp->huptimer = 0;
- lp->huptimer = 0;
- skb->protocol = isdn_net_type_trans(skb, ndev);
- break;
- case ISDN_NET_ENCAP_UIHDLC:
- /* HDLC with UI-frame (for ispa with -h1 option) */
- olp->huptimer = 0;
- lp->huptimer = 0;
- skb_pull(skb, 2);
- /* Fall through */
- case ISDN_NET_ENCAP_RAWIP:
- /* RAW-IP without MAC-Header */
- olp->huptimer = 0;
- lp->huptimer = 0;
- skb->protocol = htons(ETH_P_IP);
- break;
- case ISDN_NET_ENCAP_CISCOHDLCK:
- isdn_net_ciscohdlck_receive(lp, skb);
- return;
- case ISDN_NET_ENCAP_CISCOHDLC:
- /* CISCO-HDLC IP with type field and fake I-frame-header */
- skb_pull(skb, 2);
- /* Fall through */
- case ISDN_NET_ENCAP_IPTYP:
- /* IP with type field */
- olp->huptimer = 0;
- lp->huptimer = 0;
- skb->protocol = *(__be16 *)&(skb->data[0]);
- skb_pull(skb, 2);
- if (*(unsigned short *) skb->data == 0xFFFF)
- skb->protocol = htons(ETH_P_802_3);
- break;
-#ifdef CONFIG_ISDN_PPP
- case ISDN_NET_ENCAP_SYNCPPP:
- /* huptimer is done in isdn_ppp_push_higher */
- isdn_ppp_receive(lp->netdev, olp, skb);
- return;
-#endif
-
- default:
-#ifdef CONFIG_ISDN_X25
- /* try if there are generic sync_device receiver routines */
- if (cprot) if (cprot->pops)
- if (cprot->pops->data_ind) {
- cprot->pops->data_ind(cprot, skb);
- return;
- };
-#endif /* CONFIG_ISDN_X25 */
- printk(KERN_WARNING "%s: unknown encapsulation, dropping\n",
- lp->netdev->dev->name);
- kfree_skb(skb);
- return;
- }
-
- netif_rx(skb);
- return;
-}
-
-/*
- * A packet arrived via ISDN. Search interface-chain for a corresponding
- * interface. If found, deliver packet to receiver-function and return 1,
- * else return 0.
- */
-int
-isdn_net_rcv_skb(int idx, struct sk_buff *skb)
-{
- isdn_net_dev *p = dev->rx_netdev[idx];
-
- if (p) {
- isdn_net_local *lp = p->local;
- if ((lp->flags & ISDN_NET_CONNECTED) &&
- (!lp->dialstate)) {
- isdn_net_receive(p->dev, skb);
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * build an header
- * depends on encaps that is being used.
- */
-
-static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type,
- const void *daddr, const void *saddr, unsigned plen)
-{
- isdn_net_local *lp = netdev_priv(dev);
- unsigned char *p;
- int len = 0;
-
- switch (lp->p_encap) {
- case ISDN_NET_ENCAP_ETHER:
- len = eth_header(skb, dev, type, daddr, saddr, plen);
- break;
-#ifdef CONFIG_ISDN_PPP
- case ISDN_NET_ENCAP_SYNCPPP:
- /* stick on a fake header to keep fragmentation code happy. */
- len = IPPP_MAX_HEADER;
- skb_push(skb, len);
- break;
-#endif
- case ISDN_NET_ENCAP_RAWIP:
- printk(KERN_WARNING "isdn_net_header called with RAW_IP!\n");
- len = 0;
- break;
- case ISDN_NET_ENCAP_IPTYP:
- /* ethernet type field */
- *((__be16 *)skb_push(skb, 2)) = htons(type);
- len = 2;
- break;
- case ISDN_NET_ENCAP_UIHDLC:
- /* HDLC with UI-Frames (for ispa with -h1 option) */
- *((__be16 *)skb_push(skb, 2)) = htons(0x0103);
- len = 2;
- break;
- case ISDN_NET_ENCAP_CISCOHDLC:
- case ISDN_NET_ENCAP_CISCOHDLCK:
- p = skb_push(skb, 4);
- *(u8 *)(p + 0) = CISCO_ADDR_UNICAST;
- *(u8 *)(p + 1) = CISCO_CTRL;
- *(__be16 *)(p + 2) = cpu_to_be16(type);
- p += 4;
- len = 4;
- break;
-#ifdef CONFIG_ISDN_X25
- default:
- /* try if there are generic concap protocol routines */
- if (lp->netdev->cprot) {
- printk(KERN_WARNING "isdn_net_header called with concap_proto!\n");
- len = 0;
- break;
- }
- break;
-#endif /* CONFIG_ISDN_X25 */
- }
- return len;
-}
-
-static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
- __be16 type)
-{
- const struct net_device *dev = neigh->dev;
- isdn_net_local *lp = netdev_priv(dev);
-
- if (lp->p_encap == ISDN_NET_ENCAP_ETHER)
- return eth_header_cache(neigh, hh, type);
- return -1;
-}
-
-static void isdn_header_cache_update(struct hh_cache *hh,
- const struct net_device *dev,
- const unsigned char *haddr)
-{
- isdn_net_local *lp = netdev_priv(dev);
- if (lp->p_encap == ISDN_NET_ENCAP_ETHER)
- eth_header_cache_update(hh, dev, haddr);
-}
-
-static const struct header_ops isdn_header_ops = {
- .create = isdn_net_header,
- .cache = isdn_header_cache,
- .cache_update = isdn_header_cache_update,
-};
-
-/*
- * Interface-setup. (just after registering a new interface)
- */
-static int
-isdn_net_init(struct net_device *ndev)
-{
- ushort max_hlhdr_len = 0;
- int drvidx;
-
- /*
- * up till binding we ask the protocol layer to reserve as much
- * as we might need for HL layer
- */
-
- for (drvidx = 0; drvidx < ISDN_MAX_DRIVERS; drvidx++)
- if (dev->drv[drvidx])
- if (max_hlhdr_len < dev->drv[drvidx]->interface->hl_hdrlen)
- max_hlhdr_len = dev->drv[drvidx]->interface->hl_hdrlen;
-
- ndev->hard_header_len = ETH_HLEN + max_hlhdr_len;
- return 0;
-}
-
-static void
-isdn_net_swapbind(int drvidx)
-{
- isdn_net_dev *p;
-
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: swapping ch of %d\n", drvidx);
-#endif
- p = dev->netdev;
- while (p) {
- if (p->local->pre_device == drvidx)
- switch (p->local->pre_channel) {
- case 0:
- p->local->pre_channel = 1;
- break;
- case 1:
- p->local->pre_channel = 0;
- break;
- }
- p = (isdn_net_dev *) p->next;
- }
-}
-
-static void
-isdn_net_swap_usage(int i1, int i2)
-{
- int u1 = dev->usage[i1] & ISDN_USAGE_EXCLUSIVE;
- int u2 = dev->usage[i2] & ISDN_USAGE_EXCLUSIVE;
-
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: usage of %d and %d\n", i1, i2);
-#endif
- dev->usage[i1] &= ~ISDN_USAGE_EXCLUSIVE;
- dev->usage[i1] |= u2;
- dev->usage[i2] &= ~ISDN_USAGE_EXCLUSIVE;
- dev->usage[i2] |= u1;
- isdn_info_update();
-}
-
-/*
- * An incoming call-request has arrived.
- * Search the interface-chain for an appropriate interface.
- * If found, connect the interface to the ISDN-channel and initiate
- * D- and B-Channel-setup. If secure-flag is set, accept only
- * configured phone-numbers. If callback-flag is set, initiate
- * callback-dialing.
- *
- * Return-Value: 0 = No appropriate interface for this call.
- * 1 = Call accepted
- * 2 = Reject call, wait cbdelay, then call back
- * 3 = Reject call
- * 4 = Wait cbdelay, then call back
- * 5 = No appropriate interface for this call,
- * would eventually match if CID was longer.
- */
-
-int
-isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup)
-{
- char *eaz;
- int si1;
- int si2;
- int ematch;
- int wret;
- int swapped;
- int sidx = 0;
- u_long flags;
- isdn_net_dev *p;
- isdn_net_phone *n;
- char nr[ISDN_MSNLEN];
- char *my_eaz;
-
- /* Search name in netdev-chain */
- if (!setup->phone[0]) {
- nr[0] = '0';
- nr[1] = '\0';
- printk(KERN_INFO "isdn_net: Incoming call without OAD, assuming '0'\n");
- } else
- strlcpy(nr, setup->phone, ISDN_MSNLEN);
- si1 = (int) setup->si1;
- si2 = (int) setup->si2;
- if (!setup->eazmsn[0]) {
- printk(KERN_WARNING "isdn_net: Incoming call without CPN, assuming '0'\n");
- eaz = "0";
- } else
- eaz = setup->eazmsn;
- if (dev->net_verbose > 1)
- printk(KERN_INFO "isdn_net: call from %s,%d,%d -> %s\n", nr, si1, si2, eaz);
- /* Accept DATA and VOICE calls at this stage
- * local eaz is checked later for allowed call types
- */
- if ((si1 != 7) && (si1 != 1)) {
- if (dev->net_verbose > 1)
- printk(KERN_INFO "isdn_net: Service-Indicator not 1 or 7, ignored\n");
- return 0;
- }
- n = (isdn_net_phone *) 0;
- p = dev->netdev;
- ematch = wret = swapped = 0;
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: di=%d ch=%d idx=%d usg=%d\n", di, ch, idx,
- dev->usage[idx]);
-#endif
- while (p) {
- int matchret;
- isdn_net_local *lp = p->local;
-
- /* If last check has triggered as binding-swap, revert it */
- switch (swapped) {
- case 2:
- isdn_net_swap_usage(idx, sidx);
- /* fall through */
- case 1:
- isdn_net_swapbind(di);
- break;
- }
- swapped = 0;
- /* check acceptable call types for DOV */
- my_eaz = isdn_map_eaz2msn(lp->msn, di);
- if (si1 == 1) { /* it's a DOV call, check if we allow it */
- if (*my_eaz == 'v' || *my_eaz == 'V' ||
- *my_eaz == 'b' || *my_eaz == 'B')
- my_eaz++; /* skip to allow a match */
- else
- my_eaz = NULL; /* force non match */
- } else { /* it's a DATA call, check if we allow it */
- if (*my_eaz == 'b' || *my_eaz == 'B')
- my_eaz++; /* skip to allow a match */
- }
- if (my_eaz)
- matchret = isdn_msncmp(eaz, my_eaz);
- else
- matchret = 1;
- if (!matchret)
- ematch = 1;
-
- /* Remember if more numbers eventually can match */
- if (matchret > wret)
- wret = matchret;
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: if='%s', l.msn=%s, l.flags=%d, l.dstate=%d\n",
- p->dev->name, lp->msn, lp->flags, lp->dialstate);
-#endif
- if ((!matchret) && /* EAZ is matching */
- (((!(lp->flags & ISDN_NET_CONNECTED)) && /* but not connected */
- (USG_NONE(dev->usage[idx]))) || /* and ch. unused or */
- ((((lp->dialstate == 4) || (lp->dialstate == 12)) && /* if dialing */
- (!(lp->flags & ISDN_NET_CALLBACK))) /* but no callback */
- )))
- {
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: match1, pdev=%d pch=%d\n",
- lp->pre_device, lp->pre_channel);
-#endif
- if (dev->usage[idx] & ISDN_USAGE_EXCLUSIVE) {
- if ((lp->pre_channel != ch) ||
- (lp->pre_device != di)) {
- /* Here we got a problem:
- * If using an ICN-Card, an incoming call is always signaled on
- * on the first channel of the card, if both channels are
- * down. However this channel may be bound exclusive. If the
- * second channel is free, this call should be accepted.
- * The solution is horribly but it runs, so what:
- * We exchange the exclusive bindings of the two channels, the
- * corresponding variables in the interface-structs.
- */
- if (ch == 0) {
- sidx = isdn_dc2minor(di, 1);
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: ch is 0\n");
-#endif
- if (USG_NONE(dev->usage[sidx])) {
- /* Second Channel is free, now see if it is bound
- * exclusive too. */
- if (dev->usage[sidx] & ISDN_USAGE_EXCLUSIVE) {
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: 2nd channel is down and bound\n");
-#endif
- /* Yes, swap bindings only, if the original
- * binding is bound to channel 1 of this driver */
- if ((lp->pre_device == di) &&
- (lp->pre_channel == 1)) {
- isdn_net_swapbind(di);
- swapped = 1;
- } else {
- /* ... else iterate next device */
- p = (isdn_net_dev *) p->next;
- continue;
- }
- } else {
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: 2nd channel is down and unbound\n");
-#endif
- /* No, swap always and swap excl-usage also */
- isdn_net_swap_usage(idx, sidx);
- isdn_net_swapbind(di);
- swapped = 2;
- }
- /* Now check for exclusive binding again */
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: final check\n");
-#endif
- if ((dev->usage[idx] & ISDN_USAGE_EXCLUSIVE) &&
- ((lp->pre_channel != ch) ||
- (lp->pre_device != di))) {
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: final check failed\n");
-#endif
- p = (isdn_net_dev *) p->next;
- continue;
- }
- }
- } else {
- /* We are already on the second channel, so nothing to do */
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: already on 2nd channel\n");
-#endif
- }
- }
- }
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: match2\n");
-#endif
- n = lp->phone[0];
- if (lp->flags & ISDN_NET_SECURE) {
- while (n) {
- if (!isdn_msncmp(nr, n->num))
- break;
- n = (isdn_net_phone *) n->next;
- }
- }
- if (n || (!(lp->flags & ISDN_NET_SECURE))) {
-#ifdef ISDN_DEBUG_NET_ICALL
- printk(KERN_DEBUG "n_fi: match3\n");
-#endif
- /* matching interface found */
-
- /*
- * Is the state STOPPED?
- * If so, no dialin is allowed,
- * so reject actively.
- * */
- if (ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_OFF) {
- printk(KERN_INFO "incoming call, interface %s `stopped' -> rejected\n",
- p->dev->name);
- return 3;
- }
- /*
- * Is the interface up?
- * If not, reject the call actively.
- */
- if (!isdn_net_device_started(p)) {
- printk(KERN_INFO "%s: incoming call, interface down -> rejected\n",
- p->dev->name);
- return 3;
- }
- /* Interface is up, now see if it's a slave. If so, see if
- * it's master and parent slave is online. If not, reject the call.
- */
- if (lp->master) {
- isdn_net_local *mlp = ISDN_MASTER_PRIV(lp);
- printk(KERN_DEBUG "ICALLslv: %s\n", p->dev->name);
- printk(KERN_DEBUG "master=%s\n", lp->master->name);
- if (mlp->flags & ISDN_NET_CONNECTED) {
- printk(KERN_DEBUG "master online\n");
- /* Master is online, find parent-slave (master if first slave) */
- while (mlp->slave) {
- if (ISDN_SLAVE_PRIV(mlp) == lp)
- break;
- mlp = ISDN_SLAVE_PRIV(mlp);
- }
- } else
- printk(KERN_DEBUG "master offline\n");
- /* Found parent, if it's offline iterate next device */
- printk(KERN_DEBUG "mlpf: %d\n", mlp->flags & ISDN_NET_CONNECTED);
- if (!(mlp->flags & ISDN_NET_CONNECTED)) {
- p = (isdn_net_dev *) p->next;
- continue;
- }
- }
- if (lp->flags & ISDN_NET_CALLBACK) {
- int chi;
- /*
- * Is the state MANUAL?
- * If so, no callback can be made,
- * so reject actively.
- * */
- if (ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_OFF) {
- printk(KERN_INFO "incoming call for callback, interface %s `off' -> rejected\n",
- p->dev->name);
- return 3;
- }
- printk(KERN_DEBUG "%s: call from %s -> %s, start callback\n",
- p->dev->name, nr, eaz);
- if (lp->phone[1]) {
- /* Grab a free ISDN-Channel */
- spin_lock_irqsave(&dev->lock, flags);
- if ((chi =
- isdn_get_free_channel(
- ISDN_USAGE_NET,
- lp->l2_proto,
- lp->l3_proto,
- lp->pre_device,
- lp->pre_channel,
- lp->msn)
- ) < 0) {
-
- printk(KERN_WARNING "isdn_net_find_icall: No channel for %s\n",
- p->dev->name);
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
- }
- /* Setup dialstate. */
- lp->dtimer = 0;
- lp->dialstate = 11;
- /* Connect interface with channel */
- isdn_net_bind_channel(lp, chi);
-#ifdef CONFIG_ISDN_PPP
- if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP)
- if (isdn_ppp_bind(lp) < 0) {
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_net_unbind_channel(lp);
- return 0;
- }
-#endif
- spin_unlock_irqrestore(&dev->lock, flags);
- /* Initiate dialing by returning 2 or 4 */
- return (lp->flags & ISDN_NET_CBHUP) ? 2 : 4;
- } else
- printk(KERN_WARNING "isdn_net: %s: No phone number\n",
- p->dev->name);
- return 0;
- } else {
- printk(KERN_DEBUG "%s: call from %s -> %s accepted\n",
- p->dev->name, nr, eaz);
- /* if this interface is dialing, it does it probably on a different
- device, so free this device */
- if ((lp->dialstate == 4) || (lp->dialstate == 12)) {
-#ifdef CONFIG_ISDN_PPP
- if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP)
- isdn_ppp_free(lp);
-#endif
- isdn_net_lp_disconnected(lp);
- isdn_free_channel(lp->isdn_device, lp->isdn_channel,
- ISDN_USAGE_NET);
- }
- spin_lock_irqsave(&dev->lock, flags);
- dev->usage[idx] &= ISDN_USAGE_EXCLUSIVE;
- dev->usage[idx] |= ISDN_USAGE_NET;
- strcpy(dev->num[idx], nr);
- isdn_info_update();
- dev->st_netdev[idx] = lp->netdev;
- lp->isdn_device = di;
- lp->isdn_channel = ch;
- lp->ppp_slot = -1;
- lp->flags |= ISDN_NET_CONNECTED;
- lp->dialstate = 7;
- lp->dtimer = 0;
- lp->outgoing = 0;
- lp->huptimer = 0;
- lp->hupflags |= ISDN_WAITCHARGE;
- lp->hupflags &= ~ISDN_HAVECHARGE;
-#ifdef CONFIG_ISDN_PPP
- if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) {
- if (isdn_ppp_bind(lp) < 0) {
- isdn_net_unbind_channel(lp);
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
- }
- }
-#endif
- spin_unlock_irqrestore(&dev->lock, flags);
- return 1;
- }
- }
- }
- p = (isdn_net_dev *) p->next;
- }
- /* If none of configured EAZ/MSN matched and not verbose, be silent */
- if (!ematch || dev->net_verbose)
- printk(KERN_INFO "isdn_net: call from %s -> %d %s ignored\n", nr, di, eaz);
- return (wret == 2) ? 5 : 0;
-}
-
-/*
- * Search list of net-interfaces for an interface with given name.
- */
-isdn_net_dev *
-isdn_net_findif(char *name)
-{
- isdn_net_dev *p = dev->netdev;
-
- while (p) {
- if (!strcmp(p->dev->name, name))
- return p;
- p = (isdn_net_dev *) p->next;
- }
- return (isdn_net_dev *) NULL;
-}
-
-/*
- * Force a net-interface to dial out.
- * This is called from the userlevel-routine below or
- * from isdn_net_start_xmit().
- */
-static int
-isdn_net_force_dial_lp(isdn_net_local *lp)
-{
- if ((!(lp->flags & ISDN_NET_CONNECTED)) && !lp->dialstate) {
- int chi;
- if (lp->phone[1]) {
- ulong flags;
-
- /* Grab a free ISDN-Channel */
- spin_lock_irqsave(&dev->lock, flags);
- if ((chi = isdn_get_free_channel(
- ISDN_USAGE_NET,
- lp->l2_proto,
- lp->l3_proto,
- lp->pre_device,
- lp->pre_channel,
- lp->msn)) < 0) {
- printk(KERN_WARNING "isdn_net_force_dial: No channel for %s\n",
- lp->netdev->dev->name);
- spin_unlock_irqrestore(&dev->lock, flags);
- return -EAGAIN;
- }
- lp->dialstate = 1;
- /* Connect interface with channel */
- isdn_net_bind_channel(lp, chi);
-#ifdef CONFIG_ISDN_PPP
- if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP)
- if (isdn_ppp_bind(lp) < 0) {
- isdn_net_unbind_channel(lp);
- spin_unlock_irqrestore(&dev->lock, flags);
- return -EAGAIN;
- }
-#endif
- /* Initiate dialing */
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_net_dial();
- return 0;
- } else
- return -EINVAL;
- } else
- return -EBUSY;
-}
-
-/*
- * This is called from certain upper protocol layers (multilink ppp
- * and x25iface encapsulation module) that want to initiate dialing
- * themselves.
- */
-int
-isdn_net_dial_req(isdn_net_local *lp)
-{
- /* is there a better error code? */
- if (!(ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_AUTO)) return -EBUSY;
-
- return isdn_net_force_dial_lp(lp);
-}
-
-/*
- * Force a net-interface to dial out.
- * This is always called from within userspace (ISDN_IOCTL_NET_DIAL).
- */
-int
-isdn_net_force_dial(char *name)
-{
- isdn_net_dev *p = isdn_net_findif(name);
-
- if (!p)
- return -ENODEV;
- return (isdn_net_force_dial_lp(p->local));
-}
-
-/* The ISDN-specific entries in the device structure. */
-static const struct net_device_ops isdn_netdev_ops = {
- .ndo_init = isdn_net_init,
- .ndo_open = isdn_net_open,
- .ndo_stop = isdn_net_close,
- .ndo_do_ioctl = isdn_net_ioctl,
-
- .ndo_start_xmit = isdn_net_start_xmit,
- .ndo_get_stats = isdn_net_get_stats,
- .ndo_tx_timeout = isdn_net_tx_timeout,
-};
-
-/*
- * Helper for alloc_netdev()
- */
-static void _isdn_setup(struct net_device *dev)
-{
- isdn_net_local *lp = netdev_priv(dev);
-
- ether_setup(dev);
-
- /* Setup the generic properties */
- dev->flags = IFF_NOARP | IFF_POINTOPOINT;
-
- /* isdn prepends a header in the tx path, can't share skbs */
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->header_ops = NULL;
- dev->netdev_ops = &isdn_netdev_ops;
-
- /* for clients with MPPP maybe higher values better */
- dev->tx_queue_len = 30;
-
- lp->p_encap = ISDN_NET_ENCAP_RAWIP;
- lp->magic = ISDN_NET_MAGIC;
- lp->last = lp;
- lp->next = lp;
- lp->isdn_device = -1;
- lp->isdn_channel = -1;
- lp->pre_device = -1;
- lp->pre_channel = -1;
- lp->exclusive = -1;
- lp->ppp_slot = -1;
- lp->pppbind = -1;
- skb_queue_head_init(&lp->super_tx_queue);
- lp->l2_proto = ISDN_PROTO_L2_X75I;
- lp->l3_proto = ISDN_PROTO_L3_TRANS;
- lp->triggercps = 6000;
- lp->slavedelay = 10 * HZ;
- lp->hupflags = ISDN_INHUP; /* Do hangup even on incoming calls */
- lp->onhtime = 10; /* Default hangup-time for saving costs */
- lp->dialmax = 1;
- /* Hangup before Callback, manual dial */
- lp->flags = ISDN_NET_CBHUP | ISDN_NET_DM_MANUAL;
- lp->cbdelay = 25; /* Wait 5 secs before Callback */
- lp->dialtimeout = -1; /* Infinite Dial-Timeout */
- lp->dialwait = 5 * HZ; /* Wait 5 sec. after failed dial */
- lp->dialstarted = 0; /* Jiffies of last dial-start */
- lp->dialwait_timer = 0; /* Jiffies of earliest next dial-start */
-}
-
-/*
- * Allocate a new network-interface and initialize its data structures.
- */
-char *
-isdn_net_new(char *name, struct net_device *master)
-{
- isdn_net_dev *netdev;
-
- /* Avoid creating an existing interface */
- if (isdn_net_findif(name)) {
- printk(KERN_WARNING "isdn_net: interface %s already exists\n", name);
- return NULL;
- }
- if (name == NULL)
- return NULL;
- if (!(netdev = kzalloc(sizeof(isdn_net_dev), GFP_KERNEL))) {
- printk(KERN_WARNING "isdn_net: Could not allocate net-device\n");
- return NULL;
- }
- netdev->dev = alloc_netdev(sizeof(isdn_net_local), name,
- NET_NAME_UNKNOWN, _isdn_setup);
- if (!netdev->dev) {
- printk(KERN_WARNING "isdn_net: Could not allocate network device\n");
- kfree(netdev);
- return NULL;
- }
- netdev->local = netdev_priv(netdev->dev);
-
- if (master) {
- /* Device shall be a slave */
- struct net_device *p = MASTER_TO_SLAVE(master);
- struct net_device *q = master;
-
- netdev->local->master = master;
- /* Put device at end of slave-chain */
- while (p) {
- q = p;
- p = MASTER_TO_SLAVE(p);
- }
- MASTER_TO_SLAVE(q) = netdev->dev;
- } else {
- /* Device shall be a master */
- /*
- * Watchdog timer (currently) for master only.
- */
- netdev->dev->watchdog_timeo = ISDN_NET_TX_TIMEOUT;
- if (register_netdev(netdev->dev) != 0) {
- printk(KERN_WARNING "isdn_net: Could not register net-device\n");
- free_netdev(netdev->dev);
- kfree(netdev);
- return NULL;
- }
- }
- netdev->queue = netdev->local;
- spin_lock_init(&netdev->queue_lock);
-
- netdev->local->netdev = netdev;
-
- INIT_WORK(&netdev->local->tqueue, isdn_net_softint);
- spin_lock_init(&netdev->local->xmit_lock);
-
- /* Put into to netdev-chain */
- netdev->next = (void *) dev->netdev;
- dev->netdev = netdev;
- return netdev->dev->name;
-}
-
-char *
-isdn_net_newslave(char *parm)
-{
- char *p = strchr(parm, ',');
- isdn_net_dev *n;
- char newname[10];
-
- if (p) {
- /* Slave-Name MUST not be empty or overflow 'newname' */
- if (strscpy(newname, p + 1, sizeof(newname)) <= 0)
- return NULL;
- *p = 0;
- /* Master must already exist */
- if (!(n = isdn_net_findif(parm)))
- return NULL;
- /* Master must be a real interface, not a slave */
- if (n->local->master)
- return NULL;
- /* Master must not be started yet */
- if (isdn_net_device_started(n))
- return NULL;
- return (isdn_net_new(newname, n->dev));
- }
- return NULL;
-}
-
-/*
- * Set interface-parameters.
- * Always set all parameters, so the user-level application is responsible
- * for not overwriting existing setups. It has to get the current
- * setup first, if only selected parameters are to be changed.
- */
-int
-isdn_net_setcfg(isdn_net_ioctl_cfg *cfg)
-{
- isdn_net_dev *p = isdn_net_findif(cfg->name);
- ulong features;
- int i;
- int drvidx;
- int chidx;
- char drvid[25];
-
- if (p) {
- isdn_net_local *lp = p->local;
-
- /* See if any registered driver supports the features we want */
- features = ((1 << cfg->l2_proto) << ISDN_FEATURE_L2_SHIFT) |
- ((1 << cfg->l3_proto) << ISDN_FEATURE_L3_SHIFT);
- for (i = 0; i < ISDN_MAX_DRIVERS; i++)
- if (dev->drv[i])
- if ((dev->drv[i]->interface->features & features) == features)
- break;
- if (i == ISDN_MAX_DRIVERS) {
- printk(KERN_WARNING "isdn_net: No driver with selected features\n");
- return -ENODEV;
- }
- if (lp->p_encap != cfg->p_encap) {
-#ifdef CONFIG_ISDN_X25
- struct concap_proto *cprot = p->cprot;
-#endif
- if (isdn_net_device_started(p)) {
- printk(KERN_WARNING "%s: cannot change encap when if is up\n",
- p->dev->name);
- return -EBUSY;
- }
-#ifdef CONFIG_ISDN_X25
- if (cprot && cprot->pops)
- cprot->pops->proto_del(cprot);
- p->cprot = NULL;
- lp->dops = NULL;
- /* ... , prepare for configuration of new one ... */
- switch (cfg->p_encap) {
- case ISDN_NET_ENCAP_X25IFACE:
- lp->dops = &isdn_concap_reliable_dl_dops;
- }
- /* ... and allocate new one ... */
- p->cprot = isdn_concap_new(cfg->p_encap);
- /* p -> cprot == NULL now if p_encap is not supported
- by means of the concap_proto mechanism */
- /* the protocol is not configured yet; this will
- happen later when isdn_net_reset() is called */
-#endif
- }
- switch (cfg->p_encap) {
- case ISDN_NET_ENCAP_SYNCPPP:
-#ifndef CONFIG_ISDN_PPP
- printk(KERN_WARNING "%s: SyncPPP support not configured\n",
- p->dev->name);
- return -EINVAL;
-#else
- p->dev->type = ARPHRD_PPP; /* change ARP type */
- p->dev->addr_len = 0;
-#endif
- break;
- case ISDN_NET_ENCAP_X25IFACE:
-#ifndef CONFIG_ISDN_X25
- printk(KERN_WARNING "%s: isdn-x25 support not configured\n",
- p->dev->name);
- return -EINVAL;
-#else
- p->dev->type = ARPHRD_X25; /* change ARP type */
- p->dev->addr_len = 0;
-#endif
- break;
- case ISDN_NET_ENCAP_CISCOHDLCK:
- break;
- default:
- if (cfg->p_encap >= 0 &&
- cfg->p_encap <= ISDN_NET_ENCAP_MAX_ENCAP)
- break;
- printk(KERN_WARNING
- "%s: encapsulation protocol %d not supported\n",
- p->dev->name, cfg->p_encap);
- return -EINVAL;
- }
- if (strlen(cfg->drvid)) {
- /* A bind has been requested ... */
- char *c,
- *e;
-
- if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
- sizeof(cfg->drvid))
- return -EINVAL;
- drvidx = -1;
- chidx = -1;
- strcpy(drvid, cfg->drvid);
- if ((c = strchr(drvid, ','))) {
- /* The channel-number is appended to the driver-Id with a comma */
- chidx = (int) simple_strtoul(c + 1, &e, 10);
- if (e == c)
- chidx = -1;
- *c = '\0';
- }
- for (i = 0; i < ISDN_MAX_DRIVERS; i++)
- /* Lookup driver-Id in array */
- if (!(strcmp(dev->drvid[i], drvid))) {
- drvidx = i;
- break;
- }
- if ((drvidx == -1) || (chidx == -1))
- /* Either driver-Id or channel-number invalid */
- return -ENODEV;
- } else {
- /* Parameters are valid, so get them */
- drvidx = lp->pre_device;
- chidx = lp->pre_channel;
- }
- if (cfg->exclusive > 0) {
- unsigned long flags;
-
- /* If binding is exclusive, try to grab the channel */
- spin_lock_irqsave(&dev->lock, flags);
- if ((i = isdn_get_free_channel(ISDN_USAGE_NET,
- lp->l2_proto, lp->l3_proto, drvidx,
- chidx, lp->msn)) < 0) {
- /* Grab failed, because desired channel is in use */
- lp->exclusive = -1;
- spin_unlock_irqrestore(&dev->lock, flags);
- return -EBUSY;
- }
- /* All went ok, so update isdninfo */
- dev->usage[i] = ISDN_USAGE_EXCLUSIVE;
- isdn_info_update();
- spin_unlock_irqrestore(&dev->lock, flags);
- lp->exclusive = i;
- } else {
- /* Non-exclusive binding or unbind. */
- lp->exclusive = -1;
- if ((lp->pre_device != -1) && (cfg->exclusive == -1)) {
- isdn_unexclusive_channel(lp->pre_device, lp->pre_channel);
- isdn_free_channel(lp->pre_device, lp->pre_channel, ISDN_USAGE_NET);
- drvidx = -1;
- chidx = -1;
- }
- }
- strlcpy(lp->msn, cfg->eaz, sizeof(lp->msn));
- lp->pre_device = drvidx;
- lp->pre_channel = chidx;
- lp->onhtime = cfg->onhtime;
- lp->charge = cfg->charge;
- lp->l2_proto = cfg->l2_proto;
- lp->l3_proto = cfg->l3_proto;
- lp->cbdelay = cfg->cbdelay;
- lp->dialmax = cfg->dialmax;
- lp->triggercps = cfg->triggercps;
- lp->slavedelay = cfg->slavedelay * HZ;
- lp->pppbind = cfg->pppbind;
- lp->dialtimeout = cfg->dialtimeout >= 0 ? cfg->dialtimeout * HZ : -1;
- lp->dialwait = cfg->dialwait * HZ;
- if (cfg->secure)
- lp->flags |= ISDN_NET_SECURE;
- else
- lp->flags &= ~ISDN_NET_SECURE;
- if (cfg->cbhup)
- lp->flags |= ISDN_NET_CBHUP;
- else
- lp->flags &= ~ISDN_NET_CBHUP;
- switch (cfg->callback) {
- case 0:
- lp->flags &= ~(ISDN_NET_CALLBACK | ISDN_NET_CBOUT);
- break;
- case 1:
- lp->flags |= ISDN_NET_CALLBACK;
- lp->flags &= ~ISDN_NET_CBOUT;
- break;
- case 2:
- lp->flags |= ISDN_NET_CBOUT;
- lp->flags &= ~ISDN_NET_CALLBACK;
- break;
- }
- lp->flags &= ~ISDN_NET_DIALMODE_MASK; /* first all bits off */
- if (cfg->dialmode && !(cfg->dialmode & ISDN_NET_DIALMODE_MASK)) {
- /* old isdnctrl version, where only 0 or 1 is given */
- printk(KERN_WARNING
- "Old isdnctrl version detected! Please update.\n");
- lp->flags |= ISDN_NET_DM_OFF; /* turn on `off' bit */
- }
- else {
- lp->flags |= cfg->dialmode; /* turn on selected bits */
- }
- if (cfg->chargehup)
- lp->hupflags |= ISDN_CHARGEHUP;
- else
- lp->hupflags &= ~ISDN_CHARGEHUP;
- if (cfg->ihup)
- lp->hupflags |= ISDN_INHUP;
- else
- lp->hupflags &= ~ISDN_INHUP;
- if (cfg->chargeint > 10) {
- lp->hupflags |= ISDN_CHARGEHUP | ISDN_HAVECHARGE | ISDN_MANCHARGE;
- lp->chargeint = cfg->chargeint * HZ;
- }
- if (cfg->p_encap != lp->p_encap) {
- if (cfg->p_encap == ISDN_NET_ENCAP_RAWIP) {
- p->dev->header_ops = NULL;
- p->dev->flags = IFF_NOARP | IFF_POINTOPOINT;
- } else {
- p->dev->header_ops = &isdn_header_ops;
- if (cfg->p_encap == ISDN_NET_ENCAP_ETHER)
- p->dev->flags = IFF_BROADCAST | IFF_MULTICAST;
- else
- p->dev->flags = IFF_NOARP | IFF_POINTOPOINT;
- }
- }
- lp->p_encap = cfg->p_encap;
- return 0;
- }
- return -ENODEV;
-}
-
-/*
- * Perform get-interface-parameters.ioctl
- */
-int
-isdn_net_getcfg(isdn_net_ioctl_cfg *cfg)
-{
- isdn_net_dev *p = isdn_net_findif(cfg->name);
-
- if (p) {
- isdn_net_local *lp = p->local;
-
- strcpy(cfg->eaz, lp->msn);
- cfg->exclusive = lp->exclusive;
- if (lp->pre_device >= 0) {
- sprintf(cfg->drvid, "%s,%d", dev->drvid[lp->pre_device],
- lp->pre_channel);
- } else
- cfg->drvid[0] = '\0';
- cfg->onhtime = lp->onhtime;
- cfg->charge = lp->charge;
- cfg->l2_proto = lp->l2_proto;
- cfg->l3_proto = lp->l3_proto;
- cfg->p_encap = lp->p_encap;
- cfg->secure = (lp->flags & ISDN_NET_SECURE) ? 1 : 0;
- cfg->callback = 0;
- if (lp->flags & ISDN_NET_CALLBACK)
- cfg->callback = 1;
- if (lp->flags & ISDN_NET_CBOUT)
- cfg->callback = 2;
- cfg->cbhup = (lp->flags & ISDN_NET_CBHUP) ? 1 : 0;
- cfg->dialmode = lp->flags & ISDN_NET_DIALMODE_MASK;
- cfg->chargehup = (lp->hupflags & ISDN_CHARGEHUP) ? 1 : 0;
- cfg->ihup = (lp->hupflags & ISDN_INHUP) ? 1 : 0;
- cfg->cbdelay = lp->cbdelay;
- cfg->dialmax = lp->dialmax;
- cfg->triggercps = lp->triggercps;
- cfg->slavedelay = lp->slavedelay / HZ;
- cfg->chargeint = (lp->hupflags & ISDN_CHARGEHUP) ?
- (lp->chargeint / HZ) : 0;
- cfg->pppbind = lp->pppbind;
- cfg->dialtimeout = lp->dialtimeout >= 0 ? lp->dialtimeout / HZ : -1;
- cfg->dialwait = lp->dialwait / HZ;
- if (lp->slave) {
- if (strlen(lp->slave->name) >= 10)
- strcpy(cfg->slave, "too-long");
- else
- strcpy(cfg->slave, lp->slave->name);
- } else
- cfg->slave[0] = '\0';
- if (lp->master) {
- if (strlen(lp->master->name) >= 10)
- strcpy(cfg->master, "too-long");
- else
- strcpy(cfg->master, lp->master->name);
- } else
- cfg->master[0] = '\0';
- return 0;
- }
- return -ENODEV;
-}
-
-/*
- * Add a phone-number to an interface.
- */
-int
-isdn_net_addphone(isdn_net_ioctl_phone *phone)
-{
- isdn_net_dev *p = isdn_net_findif(phone->name);
- isdn_net_phone *n;
-
- if (p) {
- if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL)))
- return -ENOMEM;
- strlcpy(n->num, phone->phone, sizeof(n->num));
- n->next = p->local->phone[phone->outgoing & 1];
- p->local->phone[phone->outgoing & 1] = n;
- return 0;
- }
- return -ENODEV;
-}
-
-/*
- * Copy a string of all phone-numbers of an interface to user space.
- * This might sleep and must be called with the isdn semaphore down.
- */
-int
-isdn_net_getphones(isdn_net_ioctl_phone *phone, char __user *phones)
-{
- isdn_net_dev *p = isdn_net_findif(phone->name);
- int inout = phone->outgoing & 1;
- int more = 0;
- int count = 0;
- isdn_net_phone *n;
-
- if (!p)
- return -ENODEV;
- inout &= 1;
- for (n = p->local->phone[inout]; n; n = n->next) {
- if (more) {
- put_user(' ', phones++);
- count++;
- }
- if (copy_to_user(phones, n->num, strlen(n->num) + 1)) {
- return -EFAULT;
- }
- phones += strlen(n->num);
- count += strlen(n->num);
- more = 1;
- }
- put_user(0, phones);
- count++;
- return count;
-}
-
-/*
- * Copy a string containing the peer's phone number of a connected interface
- * to user space.
- */
-int
-isdn_net_getpeer(isdn_net_ioctl_phone *phone, isdn_net_ioctl_phone __user *peer)
-{
- isdn_net_dev *p = isdn_net_findif(phone->name);
- int ch, dv, idx;
-
- if (!p)
- return -ENODEV;
- /*
- * Theoretical race: while this executes, the remote number might
- * become invalid (hang up) or change (new connection), resulting
- * in (partially) wrong number copied to user. This race
- * currently ignored.
- */
- ch = p->local->isdn_channel;
- dv = p->local->isdn_device;
- if (ch < 0 && dv < 0)
- return -ENOTCONN;
- idx = isdn_dc2minor(dv, ch);
- if (idx < 0)
- return -ENODEV;
- /* for pre-bound channels, we need this extra check */
- if (strncmp(dev->num[idx], "???", 3) == 0)
- return -ENOTCONN;
- strncpy(phone->phone, dev->num[idx], ISDN_MSNLEN);
- phone->outgoing = USG_OUTGOING(dev->usage[idx]);
- if (copy_to_user(peer, phone, sizeof(*peer)))
- return -EFAULT;
- return 0;
-}
-/*
- * Delete a phone-number from an interface.
- */
-int
-isdn_net_delphone(isdn_net_ioctl_phone *phone)
-{
- isdn_net_dev *p = isdn_net_findif(phone->name);
- int inout = phone->outgoing & 1;
- isdn_net_phone *n;
- isdn_net_phone *m;
-
- if (p) {
- n = p->local->phone[inout];
- m = NULL;
- while (n) {
- if (!strcmp(n->num, phone->phone)) {
- if (p->local->dial == n)
- p->local->dial = n->next;
- if (m)
- m->next = n->next;
- else
- p->local->phone[inout] = n->next;
- kfree(n);
- return 0;
- }
- m = n;
- n = (isdn_net_phone *) n->next;
- }
- return -EINVAL;
- }
- return -ENODEV;
-}
-
-/*
- * Delete all phone-numbers of an interface.
- */
-static int
-isdn_net_rmallphone(isdn_net_dev *p)
-{
- isdn_net_phone *n;
- isdn_net_phone *m;
- int i;
-
- for (i = 0; i < 2; i++) {
- n = p->local->phone[i];
- while (n) {
- m = n->next;
- kfree(n);
- n = m;
- }
- p->local->phone[i] = NULL;
- }
- p->local->dial = NULL;
- return 0;
-}
-
-/*
- * Force a hangup of a network-interface.
- */
-int
-isdn_net_force_hangup(char *name)
-{
- isdn_net_dev *p = isdn_net_findif(name);
- struct net_device *q;
-
- if (p) {
- if (p->local->isdn_device < 0)
- return 1;
- q = p->local->slave;
- /* If this interface has slaves, do a hangup for them also. */
- while (q) {
- isdn_net_hangup(q);
- q = MASTER_TO_SLAVE(q);
- }
- isdn_net_hangup(p->dev);
- return 0;
- }
- return -ENODEV;
-}
-
-/*
- * Helper-function for isdn_net_rm: Do the real work.
- */
-static int
-isdn_net_realrm(isdn_net_dev *p, isdn_net_dev *q)
-{
- u_long flags;
-
- if (isdn_net_device_started(p)) {
- return -EBUSY;
- }
-#ifdef CONFIG_ISDN_X25
- if (p->cprot && p->cprot->pops)
- p->cprot->pops->proto_del(p->cprot);
-#endif
- /* Free all phone-entries */
- isdn_net_rmallphone(p);
- /* If interface is bound exclusive, free channel-usage */
- if (p->local->exclusive != -1)
- isdn_unexclusive_channel(p->local->pre_device, p->local->pre_channel);
- if (p->local->master) {
- /* It's a slave-device, so update master's slave-pointer if necessary */
- if (((isdn_net_local *) ISDN_MASTER_PRIV(p->local))->slave ==
- p->dev)
- ((isdn_net_local *)ISDN_MASTER_PRIV(p->local))->slave =
- p->local->slave;
- } else {
- /* Unregister only if it's a master-device */
- unregister_netdev(p->dev);
- }
- /* Unlink device from chain */
- spin_lock_irqsave(&dev->lock, flags);
- if (q)
- q->next = p->next;
- else
- dev->netdev = p->next;
- if (p->local->slave) {
- /* If this interface has a slave, remove it also */
- char *slavename = p->local->slave->name;
- isdn_net_dev *n = dev->netdev;
- q = NULL;
- while (n) {
- if (!strcmp(n->dev->name, slavename)) {
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_net_realrm(n, q);
- spin_lock_irqsave(&dev->lock, flags);
- break;
- }
- q = n;
- n = (isdn_net_dev *)n->next;
- }
- }
- spin_unlock_irqrestore(&dev->lock, flags);
- /* If no more net-devices remain, disable auto-hangup timer */
- if (dev->netdev == NULL)
- isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, 0);
- free_netdev(p->dev);
- kfree(p);
-
- return 0;
-}
-
-/*
- * Remove a single network-interface.
- */
-int
-isdn_net_rm(char *name)
-{
- u_long flags;
- isdn_net_dev *p;
- isdn_net_dev *q;
-
- /* Search name in netdev-chain */
- spin_lock_irqsave(&dev->lock, flags);
- p = dev->netdev;
- q = NULL;
- while (p) {
- if (!strcmp(p->dev->name, name)) {
- spin_unlock_irqrestore(&dev->lock, flags);
- return (isdn_net_realrm(p, q));
- }
- q = p;
- p = (isdn_net_dev *) p->next;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
- /* If no more net-devices remain, disable auto-hangup timer */
- if (dev->netdev == NULL)
- isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, 0);
- return -ENODEV;
-}
-
-/*
- * Remove all network-interfaces
- */
-int
-isdn_net_rmall(void)
-{
- u_long flags;
- int ret;
-
- /* Walk through netdev-chain */
- spin_lock_irqsave(&dev->lock, flags);
- while (dev->netdev) {
- if (!dev->netdev->local->master) {
- /* Remove master-devices only, slaves get removed with their master */
- spin_unlock_irqrestore(&dev->lock, flags);
- if ((ret = isdn_net_realrm(dev->netdev, NULL))) {
- return ret;
- }
- spin_lock_irqsave(&dev->lock, flags);
- }
- }
- dev->netdev = NULL;
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
-}
diff --git a/drivers/isdn/i4l/isdn_net.h b/drivers/isdn/i4l/isdn_net.h
deleted file mode 100644
index cca6d68da171..000000000000
--- a/drivers/isdn/i4l/isdn_net.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/* $Id: isdn_net.h,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * header for Linux ISDN subsystem, network related functions (linklevel).
- *
- * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg
- * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/* Definitions for hupflags: */
-#define ISDN_WAITCHARGE 1 /* did not get a charge info yet */
-#define ISDN_HAVECHARGE 2 /* We know a charge info */
-#define ISDN_CHARGEHUP 4 /* We want to use the charge mechanism */
-#define ISDN_INHUP 8 /* Even if incoming, close after huptimeout */
-#define ISDN_MANCHARGE 16 /* Charge Interval manually set */
-
-/*
- * Definitions for Cisco-HDLC header.
- */
-
-#define CISCO_ADDR_UNICAST 0x0f
-#define CISCO_ADDR_BROADCAST 0x8f
-#define CISCO_CTRL 0x00
-#define CISCO_TYPE_CDP 0x2000
-#define CISCO_TYPE_SLARP 0x8035
-#define CISCO_SLARP_REQUEST 0
-#define CISCO_SLARP_REPLY 1
-#define CISCO_SLARP_KEEPALIVE 2
-
-extern char *isdn_net_new(char *, struct net_device *);
-extern char *isdn_net_newslave(char *);
-extern int isdn_net_rm(char *);
-extern int isdn_net_rmall(void);
-extern int isdn_net_stat_callback(int, isdn_ctrl *);
-extern int isdn_net_setcfg(isdn_net_ioctl_cfg *);
-extern int isdn_net_getcfg(isdn_net_ioctl_cfg *);
-extern int isdn_net_addphone(isdn_net_ioctl_phone *);
-extern int isdn_net_getphones(isdn_net_ioctl_phone *, char __user *);
-extern int isdn_net_getpeer(isdn_net_ioctl_phone *, isdn_net_ioctl_phone __user *);
-extern int isdn_net_delphone(isdn_net_ioctl_phone *);
-extern int isdn_net_find_icall(int, int, int, setup_parm *);
-extern void isdn_net_hangup(struct net_device *);
-extern void isdn_net_dial(void);
-extern void isdn_net_autohup(void);
-extern int isdn_net_force_hangup(char *);
-extern int isdn_net_force_dial(char *);
-extern isdn_net_dev *isdn_net_findif(char *);
-extern int isdn_net_rcv_skb(int, struct sk_buff *);
-extern int isdn_net_dial_req(isdn_net_local *);
-extern void isdn_net_writebuf_skb(isdn_net_local *lp, struct sk_buff *skb);
-extern void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb);
-
-#define ISDN_NET_MAX_QUEUE_LENGTH 2
-
-#define ISDN_MASTER_PRIV(lp) ((isdn_net_local *) netdev_priv(lp->master))
-#define ISDN_SLAVE_PRIV(lp) ((isdn_net_local *) netdev_priv(lp->slave))
-#define MASTER_TO_SLAVE(master) \
- (((isdn_net_local *) netdev_priv(master))->slave)
-
-/*
- * is this particular channel busy?
- */
-static __inline__ int isdn_net_lp_busy(isdn_net_local *lp)
-{
- if (atomic_read(&lp->frame_cnt) < ISDN_NET_MAX_QUEUE_LENGTH)
- return 0;
- else
- return 1;
-}
-
-/*
- * For the given net device, this will get a non-busy channel out of the
- * corresponding bundle. The returned channel is locked.
- */
-static __inline__ isdn_net_local *isdn_net_get_locked_lp(isdn_net_dev *nd)
-{
- unsigned long flags;
- isdn_net_local *lp;
-
- spin_lock_irqsave(&nd->queue_lock, flags);
- lp = nd->queue; /* get lp on top of queue */
- while (isdn_net_lp_busy(nd->queue)) {
- nd->queue = nd->queue->next;
- if (nd->queue == lp) { /* not found -- should never happen */
- lp = NULL;
- goto errout;
- }
- }
- lp = nd->queue;
- nd->queue = nd->queue->next;
- spin_unlock_irqrestore(&nd->queue_lock, flags);
- spin_lock(&lp->xmit_lock);
- local_bh_disable();
- return lp;
-errout:
- spin_unlock_irqrestore(&nd->queue_lock, flags);
- return lp;
-}
-
-/*
- * add a channel to a bundle
- */
-static __inline__ void isdn_net_add_to_bundle(isdn_net_dev *nd, isdn_net_local *nlp)
-{
- isdn_net_local *lp;
- unsigned long flags;
-
- spin_lock_irqsave(&nd->queue_lock, flags);
-
- lp = nd->queue;
-// printk(KERN_DEBUG "%s: lp:%s(%p) nlp:%s(%p) last(%p)\n",
-// __func__, lp->name, lp, nlp->name, nlp, lp->last);
- nlp->last = lp->last;
- lp->last->next = nlp;
- lp->last = nlp;
- nlp->next = lp;
- nd->queue = nlp;
-
- spin_unlock_irqrestore(&nd->queue_lock, flags);
-}
-/*
- * remove a channel from the bundle it belongs to
- */
-static __inline__ void isdn_net_rm_from_bundle(isdn_net_local *lp)
-{
- isdn_net_local *master_lp = lp;
- unsigned long flags;
-
- if (lp->master)
- master_lp = ISDN_MASTER_PRIV(lp);
-
-// printk(KERN_DEBUG "%s: lp:%s(%p) mlp:%s(%p) last(%p) next(%p) mndq(%p)\n",
-// __func__, lp->name, lp, master_lp->name, master_lp, lp->last, lp->next, master_lp->netdev->queue);
- spin_lock_irqsave(&master_lp->netdev->queue_lock, flags);
- lp->last->next = lp->next;
- lp->next->last = lp->last;
- if (master_lp->netdev->queue == lp) {
- master_lp->netdev->queue = lp->next;
- if (lp->next == lp) { /* last in queue */
- master_lp->netdev->queue = master_lp->netdev->local;
- }
- }
- lp->next = lp->last = lp; /* (re)set own pointers */
-// printk(KERN_DEBUG "%s: mndq(%p)\n",
-// __func__, master_lp->netdev->queue);
- spin_unlock_irqrestore(&master_lp->netdev->queue_lock, flags);
-}
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
deleted file mode 100644
index 7e0f419c14f8..000000000000
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ /dev/null
@@ -1,3046 +0,0 @@
-/* $Id: isdn_ppp.c,v 1.1.2.3 2004/02/10 01:07:13 keil Exp $
- *
- * Linux ISDN subsystem, functions for synchronous PPP (linklevel).
- *
- * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/isdn.h>
-#include <linux/poll.h>
-#include <linux/ppp-comp.h>
-#include <linux/slab.h>
-#ifdef CONFIG_IPPP_FILTER
-#include <linux/filter.h>
-#endif
-
-#include "isdn_common.h"
-#include "isdn_ppp.h"
-#include "isdn_net.h"
-
-#ifndef PPP_IPX
-#define PPP_IPX 0x002b
-#endif
-
-/* Prototypes */
-static int isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot);
-static int isdn_ppp_closewait(int slot);
-static void isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp,
- struct sk_buff *skb, int proto);
-static int isdn_ppp_if_get_unit(char *namebuf);
-static int isdn_ppp_set_compressor(struct ippp_struct *is, struct isdn_ppp_comp_data *);
-static struct sk_buff *isdn_ppp_decompress(struct sk_buff *,
- struct ippp_struct *, struct ippp_struct *, int *proto);
-static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
- struct sk_buff *skb, int proto);
-static struct sk_buff *isdn_ppp_compress(struct sk_buff *skb_in, int *proto,
- struct ippp_struct *is, struct ippp_struct *master, int type);
-static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
- struct sk_buff *skb);
-
-/* New CCP stuff */
-static void isdn_ppp_ccp_kickup(struct ippp_struct *is);
-static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto,
- unsigned char code, unsigned char id,
- unsigned char *data, int len);
-static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is);
-static void isdn_ppp_ccp_reset_free(struct ippp_struct *is);
-static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
- unsigned char id);
-static void isdn_ppp_ccp_timer_callback(struct timer_list *t);
-static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is,
- unsigned char id);
-static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is,
- struct isdn_ppp_resetparams *rp);
-static void isdn_ppp_ccp_reset_ack_rcvd(struct ippp_struct *is,
- unsigned char id);
-
-
-
-#ifdef CONFIG_ISDN_MPP
-static ippp_bundle *isdn_ppp_bundle_arr = NULL;
-
-static int isdn_ppp_mp_bundle_array_init(void);
-static int isdn_ppp_mp_init(isdn_net_local *lp, ippp_bundle *add_to);
-static void isdn_ppp_mp_receive(isdn_net_dev *net_dev, isdn_net_local *lp,
- struct sk_buff *skb);
-static void isdn_ppp_mp_cleanup(isdn_net_local *lp);
-
-static int isdn_ppp_bundle(struct ippp_struct *, int unit);
-#endif /* CONFIG_ISDN_MPP */
-
-char *isdn_ppp_revision = "$Revision: 1.1.2.3 $";
-
-static struct ippp_struct *ippp_table[ISDN_MAX_CHANNELS];
-
-static struct isdn_ppp_compressor *ipc_head = NULL;
-
-/*
- * frame log (debug)
- */
-static void
-isdn_ppp_frame_log(char *info, char *data, int len, int maxlen, int unit, int slot)
-{
- int cnt,
- j,
- i;
- char buf[80];
-
- if (len < maxlen)
- maxlen = len;
-
- for (i = 0, cnt = 0; cnt < maxlen; i++) {
- for (j = 0; j < 16 && cnt < maxlen; j++, cnt++)
- sprintf(buf + j * 3, "%02x ", (unsigned char)data[cnt]);
- printk(KERN_DEBUG "[%d/%d].%s[%d]: %s\n", unit, slot, info, i, buf);
- }
-}
-
-/*
- * unbind isdn_net_local <=> ippp-device
- * note: it can happen, that we hangup/free the master before the slaves
- * in this case we bind another lp to the master device
- */
-int
-isdn_ppp_free(isdn_net_local *lp)
-{
- struct ippp_struct *is;
-
- if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "%s: ppp_slot(%d) out of range\n",
- __func__, lp->ppp_slot);
- return 0;
- }
-
-#ifdef CONFIG_ISDN_MPP
- spin_lock(&lp->netdev->pb->lock);
-#endif
- isdn_net_rm_from_bundle(lp);
-#ifdef CONFIG_ISDN_MPP
- if (lp->netdev->pb->ref_ct == 1) /* last link in queue? */
- isdn_ppp_mp_cleanup(lp);
-
- lp->netdev->pb->ref_ct--;
- spin_unlock(&lp->netdev->pb->lock);
-#endif /* CONFIG_ISDN_MPP */
- if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "%s: ppp_slot(%d) now invalid\n",
- __func__, lp->ppp_slot);
- return 0;
- }
- is = ippp_table[lp->ppp_slot];
- if ((is->state & IPPP_CONNECT))
- isdn_ppp_closewait(lp->ppp_slot); /* force wakeup on ippp device */
- else if (is->state & IPPP_ASSIGNED)
- is->state = IPPP_OPEN; /* fallback to 'OPEN but not ASSIGNED' state */
-
- if (is->debug & 0x1)
- printk(KERN_DEBUG "isdn_ppp_free %d %lx %lx\n", lp->ppp_slot, (long) lp, (long) is->lp);
-
- is->lp = NULL; /* link is down .. set lp to NULL */
- lp->ppp_slot = -1; /* is this OK ?? */
-
- return 0;
-}
-
-/*
- * bind isdn_net_local <=> ippp-device
- *
- * This function is allways called with holding dev->lock so
- * no additional lock is needed
- */
-int
-isdn_ppp_bind(isdn_net_local *lp)
-{
- int i;
- int unit = 0;
- struct ippp_struct *is;
- int retval;
-
- if (lp->pppbind < 0) { /* device bounded to ippp device ? */
- isdn_net_dev *net_dev = dev->netdev;
- char exclusive[ISDN_MAX_CHANNELS]; /* exclusive flags */
- memset(exclusive, 0, ISDN_MAX_CHANNELS);
- while (net_dev) { /* step through net devices to find exclusive minors */
- isdn_net_local *lp = net_dev->local;
- if (lp->pppbind >= 0)
- exclusive[lp->pppbind] = 1;
- net_dev = net_dev->next;
- }
- /*
- * search a free device / slot
- */
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- if (ippp_table[i]->state == IPPP_OPEN && !exclusive[ippp_table[i]->minor]) { /* OPEN, but not connected! */
- break;
- }
- }
- } else {
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- if (ippp_table[i]->minor == lp->pppbind &&
- (ippp_table[i]->state & IPPP_OPEN) == IPPP_OPEN)
- break;
- }
- }
-
- if (i >= ISDN_MAX_CHANNELS) {
- printk(KERN_WARNING "isdn_ppp_bind: Can't find a (free) connection to the ipppd daemon.\n");
- retval = -1;
- goto out;
- }
- /* get unit number from interface name .. ugly! */
- unit = isdn_ppp_if_get_unit(lp->netdev->dev->name);
- if (unit < 0) {
- printk(KERN_ERR "isdn_ppp_bind: illegal interface name %s.\n",
- lp->netdev->dev->name);
- retval = -1;
- goto out;
- }
-
- lp->ppp_slot = i;
- is = ippp_table[i];
- is->lp = lp;
- is->unit = unit;
- is->state = IPPP_OPEN | IPPP_ASSIGNED; /* assigned to a netdevice but not connected */
-#ifdef CONFIG_ISDN_MPP
- retval = isdn_ppp_mp_init(lp, NULL);
- if (retval < 0)
- goto out;
-#endif /* CONFIG_ISDN_MPP */
-
- retval = lp->ppp_slot;
-
-out:
- return retval;
-}
-
-/*
- * kick the ipppd on the device
- * (wakes up daemon after B-channel connect)
- */
-
-void
-isdn_ppp_wakeup_daemon(isdn_net_local *lp)
-{
- if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "%s: ppp_slot(%d) out of range\n",
- __func__, lp->ppp_slot);
- return;
- }
- ippp_table[lp->ppp_slot]->state = IPPP_OPEN | IPPP_CONNECT | IPPP_NOBLOCK;
- wake_up_interruptible(&ippp_table[lp->ppp_slot]->wq);
-}
-
-/*
- * there was a hangup on the netdevice
- * force wakeup of the ippp device
- * go into 'device waits for release' state
- */
-static int
-isdn_ppp_closewait(int slot)
-{
- struct ippp_struct *is;
-
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "%s: slot(%d) out of range\n",
- __func__, slot);
- return 0;
- }
- is = ippp_table[slot];
- if (is->state)
- wake_up_interruptible(&is->wq);
- is->state = IPPP_CLOSEWAIT;
- return 1;
-}
-
-/*
- * isdn_ppp_find_slot / isdn_ppp_free_slot
- */
-
-static int
-isdn_ppp_get_slot(void)
-{
- int i;
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- if (!ippp_table[i]->state)
- return i;
- }
- return -1;
-}
-
-/*
- * isdn_ppp_open
- */
-
-int
-isdn_ppp_open(int min, struct file *file)
-{
- int slot;
- struct ippp_struct *is;
-
- if (min < 0 || min >= ISDN_MAX_CHANNELS)
- return -ENODEV;
-
- slot = isdn_ppp_get_slot();
- if (slot < 0) {
- return -EBUSY;
- }
- is = file->private_data = ippp_table[slot];
-
- printk(KERN_DEBUG "ippp, open, slot: %d, minor: %d, state: %04x\n",
- slot, min, is->state);
-
- /* compression stuff */
- is->link_compressor = is->compressor = NULL;
- is->link_decompressor = is->decompressor = NULL;
- is->link_comp_stat = is->comp_stat = NULL;
- is->link_decomp_stat = is->decomp_stat = NULL;
- is->compflags = 0;
-
- is->reset = isdn_ppp_ccp_reset_alloc(is);
- if (!is->reset)
- return -ENOMEM;
-
- is->lp = NULL;
- is->mp_seqno = 0; /* MP sequence number */
- is->pppcfg = 0; /* ppp configuration */
- is->mpppcfg = 0; /* mppp configuration */
- is->last_link_seqno = -1; /* MP: maybe set to Bundle-MIN, when joining a bundle ?? */
- is->unit = -1; /* set, when we have our interface */
- is->mru = 1524; /* MRU, default 1524 */
- is->maxcid = 16; /* VJ: maxcid */
- is->tk = current;
- init_waitqueue_head(&is->wq);
- is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */
- is->last = is->rq;
- is->minor = min;
-#ifdef CONFIG_ISDN_PPP_VJ
- /*
- * VJ header compression init
- */
- is->slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */
- if (IS_ERR(is->slcomp)) {
- isdn_ppp_ccp_reset_free(is);
- return PTR_ERR(is->slcomp);
- }
-#endif
-#ifdef CONFIG_IPPP_FILTER
- is->pass_filter = NULL;
- is->active_filter = NULL;
-#endif
- is->state = IPPP_OPEN;
-
- return 0;
-}
-
-/*
- * release ippp device
- */
-void
-isdn_ppp_release(int min, struct file *file)
-{
- int i;
- struct ippp_struct *is;
-
- if (min < 0 || min >= ISDN_MAX_CHANNELS)
- return;
- is = file->private_data;
-
- if (!is) {
- printk(KERN_ERR "%s: no file->private_data\n", __func__);
- return;
- }
- if (is->debug & 0x1)
- printk(KERN_DEBUG "ippp: release, minor: %d %lx\n", min, (long) is->lp);
-
- if (is->lp) { /* a lp address says: this link is still up */
- isdn_net_dev *p = is->lp->netdev;
-
- if (!p) {
- printk(KERN_ERR "%s: no lp->netdev\n", __func__);
- return;
- }
- is->state &= ~IPPP_CONNECT; /* -> effect: no call of wakeup */
- /*
- * isdn_net_hangup() calls isdn_ppp_free()
- * isdn_ppp_free() sets is->lp to NULL and lp->ppp_slot to -1
- * removing the IPPP_CONNECT flag omits calling of isdn_ppp_wakeup_daemon()
- */
- isdn_net_hangup(p->dev);
- }
- for (i = 0; i < NUM_RCV_BUFFS; i++) {
- kfree(is->rq[i].buf);
- is->rq[i].buf = NULL;
- }
- is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */
- is->last = is->rq;
-
-#ifdef CONFIG_ISDN_PPP_VJ
-/* TODO: if this was the previous master: link the slcomp to the new master */
- slhc_free(is->slcomp);
- is->slcomp = NULL;
-#endif
-#ifdef CONFIG_IPPP_FILTER
- if (is->pass_filter) {
- bpf_prog_destroy(is->pass_filter);
- is->pass_filter = NULL;
- }
-
- if (is->active_filter) {
- bpf_prog_destroy(is->active_filter);
- is->active_filter = NULL;
- }
-#endif
-
-/* TODO: if this was the previous master: link the stuff to the new master */
- if (is->comp_stat)
- is->compressor->free(is->comp_stat);
- if (is->link_comp_stat)
- is->link_compressor->free(is->link_comp_stat);
- if (is->link_decomp_stat)
- is->link_decompressor->free(is->link_decomp_stat);
- if (is->decomp_stat)
- is->decompressor->free(is->decomp_stat);
- is->compressor = is->link_compressor = NULL;
- is->decompressor = is->link_decompressor = NULL;
- is->comp_stat = is->link_comp_stat = NULL;
- is->decomp_stat = is->link_decomp_stat = NULL;
-
- /* Clean up if necessary */
- if (is->reset)
- isdn_ppp_ccp_reset_free(is);
-
- /* this slot is ready for new connections */
- is->state = 0;
-}
-
-/*
- * get_arg .. ioctl helper
- */
-static int
-get_arg(void __user *b, void *val, int len)
-{
- if (len <= 0)
- len = sizeof(void *);
- if (copy_from_user(val, b, len))
- return -EFAULT;
- return 0;
-}
-
-/*
- * set arg .. ioctl helper
- */
-static int
-set_arg(void __user *b, void *val, int len)
-{
- if (len <= 0)
- len = sizeof(void *);
- if (copy_to_user(b, val, len))
- return -EFAULT;
- return 0;
-}
-
-#ifdef CONFIG_IPPP_FILTER
-static int get_filter(void __user *arg, struct sock_filter **p)
-{
- struct sock_fprog uprog;
- struct sock_filter *code = NULL;
- int len;
-
- if (copy_from_user(&uprog, arg, sizeof(uprog)))
- return -EFAULT;
-
- if (!uprog.len) {
- *p = NULL;
- return 0;
- }
-
- /* uprog.len is unsigned short, so no overflow here */
- len = uprog.len * sizeof(struct sock_filter);
- code = memdup_user(uprog.filter, len);
- if (IS_ERR(code))
- return PTR_ERR(code);
-
- *p = code;
- return uprog.len;
-}
-#endif /* CONFIG_IPPP_FILTER */
-
-/*
- * ippp device ioctl
- */
-int
-isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
-{
- unsigned long val;
- int r, i, j;
- struct ippp_struct *is;
- isdn_net_local *lp;
- struct isdn_ppp_comp_data data;
- void __user *argp = (void __user *)arg;
-
- is = file->private_data;
- lp = is->lp;
-
- if (is->debug & 0x1)
- printk(KERN_DEBUG "isdn_ppp_ioctl: minor: %d cmd: %x state: %x\n", min, cmd, is->state);
-
- if (!(is->state & IPPP_OPEN))
- return -EINVAL;
-
- switch (cmd) {
- case PPPIOCBUNDLE:
-#ifdef CONFIG_ISDN_MPP
- if (!(is->state & IPPP_CONNECT))
- return -EINVAL;
- if ((r = get_arg(argp, &val, sizeof(val))))
- return r;
- printk(KERN_DEBUG "iPPP-bundle: minor: %d, slave unit: %d, master unit: %d\n",
- (int) min, (int) is->unit, (int) val);
- return isdn_ppp_bundle(is, val);
-#else
- return -1;
-#endif
- break;
- case PPPIOCGUNIT: /* get ppp/isdn unit number */
- if ((r = set_arg(argp, &is->unit, sizeof(is->unit))))
- return r;
- break;
- case PPPIOCGIFNAME:
- if (!lp)
- return -EINVAL;
- if ((r = set_arg(argp, lp->netdev->dev->name,
- strlen(lp->netdev->dev->name))))
- return r;
- break;
- case PPPIOCGMPFLAGS: /* get configuration flags */
- if ((r = set_arg(argp, &is->mpppcfg, sizeof(is->mpppcfg))))
- return r;
- break;
- case PPPIOCSMPFLAGS: /* set configuration flags */
- if ((r = get_arg(argp, &val, sizeof(val))))
- return r;
- is->mpppcfg = val;
- break;
- case PPPIOCGFLAGS: /* get configuration flags */
- if ((r = set_arg(argp, &is->pppcfg, sizeof(is->pppcfg))))
- return r;
- break;
- case PPPIOCSFLAGS: /* set configuration flags */
- if ((r = get_arg(argp, &val, sizeof(val)))) {
- return r;
- }
- if (val & SC_ENABLE_IP && !(is->pppcfg & SC_ENABLE_IP) && (is->state & IPPP_CONNECT)) {
- if (lp) {
- /* OK .. we are ready to send buffers */
- is->pppcfg = val; /* isdn_ppp_xmit test for SC_ENABLE_IP !!! */
- netif_wake_queue(lp->netdev->dev);
- break;
- }
- }
- is->pppcfg = val;
- break;
- case PPPIOCGIDLE: /* get idle time information */
- if (lp) {
- struct ppp_idle pidle;
- pidle.xmit_idle = pidle.recv_idle = lp->huptimer;
- if ((r = set_arg(argp, &pidle, sizeof(struct ppp_idle))))
- return r;
- }
- break;
- case PPPIOCSMRU: /* set receive unit size for PPP */
- if ((r = get_arg(argp, &val, sizeof(val))))
- return r;
- is->mru = val;
- break;
- case PPPIOCSMPMRU:
- break;
- case PPPIOCSMPMTU:
- break;
- case PPPIOCSMAXCID: /* set the maximum compression slot id */
- if ((r = get_arg(argp, &val, sizeof(val))))
- return r;
- val++;
- if (is->maxcid != val) {
-#ifdef CONFIG_ISDN_PPP_VJ
- struct slcompress *sltmp;
-#endif
- if (is->debug & 0x1)
- printk(KERN_DEBUG "ippp, ioctl: changed MAXCID to %ld\n", val);
- is->maxcid = val;
-#ifdef CONFIG_ISDN_PPP_VJ
- sltmp = slhc_init(16, val);
- if (IS_ERR(sltmp))
- return PTR_ERR(sltmp);
- if (is->slcomp)
- slhc_free(is->slcomp);
- is->slcomp = sltmp;
-#endif
- }
- break;
- case PPPIOCGDEBUG:
- if ((r = set_arg(argp, &is->debug, sizeof(is->debug))))
- return r;
- break;
- case PPPIOCSDEBUG:
- if ((r = get_arg(argp, &val, sizeof(val))))
- return r;
- is->debug = val;
- break;
- case PPPIOCGCOMPRESSORS:
- {
- unsigned long protos[8] = {0,};
- struct isdn_ppp_compressor *ipc = ipc_head;
- while (ipc) {
- j = ipc->num / (sizeof(long) * 8);
- i = ipc->num % (sizeof(long) * 8);
- if (j < 8)
- protos[j] |= (1UL << i);
- ipc = ipc->next;
- }
- if ((r = set_arg(argp, protos, 8 * sizeof(long))))
- return r;
- }
- break;
- case PPPIOCSCOMPRESSOR:
- if ((r = get_arg(argp, &data, sizeof(struct isdn_ppp_comp_data))))
- return r;
- return isdn_ppp_set_compressor(is, &data);
- case PPPIOCGCALLINFO:
- {
- struct pppcallinfo pci;
- memset((char *)&pci, 0, sizeof(struct pppcallinfo));
- if (lp)
- {
- strncpy(pci.local_num, lp->msn, 63);
- if (lp->dial) {
- strncpy(pci.remote_num, lp->dial->num, 63);
- }
- pci.charge_units = lp->charge;
- if (lp->outgoing)
- pci.calltype = CALLTYPE_OUTGOING;
- else
- pci.calltype = CALLTYPE_INCOMING;
- if (lp->flags & ISDN_NET_CALLBACK)
- pci.calltype |= CALLTYPE_CALLBACK;
- }
- return set_arg(argp, &pci, sizeof(struct pppcallinfo));
- }
-#ifdef CONFIG_IPPP_FILTER
- case PPPIOCSPASS:
- {
- struct sock_fprog_kern fprog;
- struct sock_filter *code;
- int err, len = get_filter(argp, &code);
-
- if (len < 0)
- return len;
-
- fprog.len = len;
- fprog.filter = code;
-
- if (is->pass_filter) {
- bpf_prog_destroy(is->pass_filter);
- is->pass_filter = NULL;
- }
- if (fprog.filter != NULL)
- err = bpf_prog_create(&is->pass_filter, &fprog);
- else
- err = 0;
- kfree(code);
-
- return err;
- }
- case PPPIOCSACTIVE:
- {
- struct sock_fprog_kern fprog;
- struct sock_filter *code;
- int err, len = get_filter(argp, &code);
-
- if (len < 0)
- return len;
-
- fprog.len = len;
- fprog.filter = code;
-
- if (is->active_filter) {
- bpf_prog_destroy(is->active_filter);
- is->active_filter = NULL;
- }
- if (fprog.filter != NULL)
- err = bpf_prog_create(&is->active_filter, &fprog);
- else
- err = 0;
- kfree(code);
-
- return err;
- }
-#endif /* CONFIG_IPPP_FILTER */
- default:
- break;
- }
- return 0;
-}
-
-__poll_t
-isdn_ppp_poll(struct file *file, poll_table *wait)
-{
- __poll_t mask;
- struct ippp_buf_queue *bf, *bl;
- u_long flags;
- struct ippp_struct *is;
-
- is = file->private_data;
-
- if (is->debug & 0x2)
- printk(KERN_DEBUG "isdn_ppp_poll: minor: %d\n",
- iminor(file_inode(file)));
-
- /* just registers wait_queue hook. This doesn't really wait. */
- poll_wait(file, &is->wq, wait);
-
- if (!(is->state & IPPP_OPEN)) {
- if (is->state == IPPP_CLOSEWAIT)
- return EPOLLHUP;
- printk(KERN_DEBUG "isdn_ppp: device not open\n");
- return EPOLLERR;
- }
- /* we're always ready to send .. */
- mask = EPOLLOUT | EPOLLWRNORM;
-
- spin_lock_irqsave(&is->buflock, flags);
- bl = is->last;
- bf = is->first;
- /*
- * if IPPP_NOBLOCK is set we return even if we have nothing to read
- */
- if (bf->next != bl || (is->state & IPPP_NOBLOCK)) {
- is->state &= ~IPPP_NOBLOCK;
- mask |= EPOLLIN | EPOLLRDNORM;
- }
- spin_unlock_irqrestore(&is->buflock, flags);
- return mask;
-}
-
-/*
- * fill up isdn_ppp_read() queue ..
- */
-
-static int
-isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot)
-{
- struct ippp_buf_queue *bf, *bl;
- u_long flags;
- u_char *nbuf;
- struct ippp_struct *is;
-
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_WARNING "ippp: illegal slot(%d).\n", slot);
- return 0;
- }
- is = ippp_table[slot];
-
- if (!(is->state & IPPP_CONNECT)) {
- printk(KERN_DEBUG "ippp: device not activated.\n");
- return 0;
- }
- nbuf = kmalloc(len + 4, GFP_ATOMIC);
- if (!nbuf) {
- printk(KERN_WARNING "ippp: Can't alloc buf\n");
- return 0;
- }
- nbuf[0] = PPP_ALLSTATIONS;
- nbuf[1] = PPP_UI;
- nbuf[2] = proto >> 8;
- nbuf[3] = proto & 0xff;
- memcpy(nbuf + 4, buf, len);
-
- spin_lock_irqsave(&is->buflock, flags);
- bf = is->first;
- bl = is->last;
-
- if (bf == bl) {
- printk(KERN_WARNING "ippp: Queue is full; discarding first buffer\n");
- bf = bf->next;
- kfree(bf->buf);
- is->first = bf;
- }
- bl->buf = (char *) nbuf;
- bl->len = len + 4;
-
- is->last = bl->next;
- spin_unlock_irqrestore(&is->buflock, flags);
- wake_up_interruptible(&is->wq);
- return len;
-}
-
-/*
- * read() .. non-blocking: ipppd calls it only after select()
- * reports, that there is data
- */
-
-int
-isdn_ppp_read(int min, struct file *file, char __user *buf, int count)
-{
- struct ippp_struct *is;
- struct ippp_buf_queue *b;
- u_long flags;
- u_char *save_buf;
-
- is = file->private_data;
-
- if (!(is->state & IPPP_OPEN))
- return 0;
-
- spin_lock_irqsave(&is->buflock, flags);
- b = is->first->next;
- save_buf = b->buf;
- if (!save_buf) {
- spin_unlock_irqrestore(&is->buflock, flags);
- return -EAGAIN;
- }
- if (b->len < count)
- count = b->len;
- b->buf = NULL;
- is->first = b;
-
- spin_unlock_irqrestore(&is->buflock, flags);
- if (copy_to_user(buf, save_buf, count))
- count = -EFAULT;
- kfree(save_buf);
-
- return count;
-}
-
-/*
- * ipppd wanna write a packet to the card .. non-blocking
- */
-
-int
-isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
-{
- isdn_net_local *lp;
- struct ippp_struct *is;
- int proto;
-
- is = file->private_data;
-
- if (!(is->state & IPPP_CONNECT))
- return 0;
-
- lp = is->lp;
-
- /* -> push it directly to the lowlevel interface */
-
- if (!lp)
- printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
- else {
- if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
- unsigned char protobuf[4];
- /*
- * Don't reset huptimer for
- * LCP packets. (Echo requests).
- */
- if (copy_from_user(protobuf, buf, 4))
- return -EFAULT;
-
- proto = PPP_PROTOCOL(protobuf);
- if (proto != PPP_LCP)
- lp->huptimer = 0;
-
- return 0;
- }
-
- if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
- lp->dialstate == 0 &&
- (lp->flags & ISDN_NET_CONNECTED)) {
- unsigned short hl;
- struct sk_buff *skb;
- unsigned char *cpy_buf;
- /*
- * we need to reserve enough space in front of
- * sk_buff. old call to dev_alloc_skb only reserved
- * 16 bytes, now we are looking what the driver want
- */
- hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen;
- skb = alloc_skb(hl + count, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_WARNING "isdn_ppp_write: out of memory!\n");
- return count;
- }
- skb_reserve(skb, hl);
- cpy_buf = skb_put(skb, count);
- if (copy_from_user(cpy_buf, buf, count))
- {
- kfree_skb(skb);
- return -EFAULT;
- }
-
- /*
- * Don't reset huptimer for
- * LCP packets. (Echo requests).
- */
- proto = PPP_PROTOCOL(cpy_buf);
- if (proto != PPP_LCP)
- lp->huptimer = 0;
-
- if (is->debug & 0x40) {
- printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
- isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
- }
-
- isdn_ppp_send_ccp(lp->netdev, lp, skb); /* keeps CCP/compression states in sync */
-
- isdn_net_write_super(lp, skb);
- }
- }
- return count;
-}
-
-/*
- * init memory, structures etc.
- */
-
-int
-isdn_ppp_init(void)
-{
- int i,
- j;
-
-#ifdef CONFIG_ISDN_MPP
- if (isdn_ppp_mp_bundle_array_init() < 0)
- return -ENOMEM;
-#endif /* CONFIG_ISDN_MPP */
-
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- if (!(ippp_table[i] = kzalloc(sizeof(struct ippp_struct), GFP_KERNEL))) {
- printk(KERN_WARNING "isdn_ppp_init: Could not alloc ippp_table\n");
- for (j = 0; j < i; j++)
- kfree(ippp_table[j]);
- return -1;
- }
- spin_lock_init(&ippp_table[i]->buflock);
- ippp_table[i]->state = 0;
- ippp_table[i]->first = ippp_table[i]->rq + NUM_RCV_BUFFS - 1;
- ippp_table[i]->last = ippp_table[i]->rq;
-
- for (j = 0; j < NUM_RCV_BUFFS; j++) {
- ippp_table[i]->rq[j].buf = NULL;
- ippp_table[i]->rq[j].last = ippp_table[i]->rq +
- (NUM_RCV_BUFFS + j - 1) % NUM_RCV_BUFFS;
- ippp_table[i]->rq[j].next = ippp_table[i]->rq + (j + 1) % NUM_RCV_BUFFS;
- }
- }
- return 0;
-}
-
-void
-isdn_ppp_cleanup(void)
-{
- int i;
-
- for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- kfree(ippp_table[i]);
-
-#ifdef CONFIG_ISDN_MPP
- kfree(isdn_ppp_bundle_arr);
-#endif /* CONFIG_ISDN_MPP */
-
-}
-
-/*
- * check for address/control field and skip if allowed
- * retval != 0 -> discard packet silently
- */
-static int isdn_ppp_skip_ac(struct ippp_struct *is, struct sk_buff *skb)
-{
- if (skb->len < 1)
- return -1;
-
- if (skb->data[0] == 0xff) {
- if (skb->len < 2)
- return -1;
-
- if (skb->data[1] != 0x03)
- return -1;
-
- // skip address/control (AC) field
- skb_pull(skb, 2);
- } else {
- if (is->pppcfg & SC_REJ_COMP_AC)
- // if AC compression was not negotiated, but used, discard packet
- return -1;
- }
- return 0;
-}
-
-/*
- * get the PPP protocol header and pull skb
- * retval < 0 -> discard packet silently
- */
-static int isdn_ppp_strip_proto(struct sk_buff *skb)
-{
- int proto;
-
- if (skb->len < 1)
- return -1;
-
- if (skb->data[0] & 0x1) {
- // protocol field is compressed
- proto = skb->data[0];
- skb_pull(skb, 1);
- } else {
- if (skb->len < 2)
- return -1;
- proto = ((int) skb->data[0] << 8) + skb->data[1];
- skb_pull(skb, 2);
- }
- return proto;
-}
-
-
-/*
- * handler for incoming packets on a syncPPP interface
- */
-void isdn_ppp_receive(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb)
-{
- struct ippp_struct *is;
- int slot;
- int proto;
-
- BUG_ON(net_dev->local->master); // we're called with the master device always
-
- slot = lp->ppp_slot;
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "isdn_ppp_receive: lp->ppp_slot(%d)\n",
- lp->ppp_slot);
- kfree_skb(skb);
- return;
- }
- is = ippp_table[slot];
-
- if (is->debug & 0x4) {
- printk(KERN_DEBUG "ippp_receive: is:%08lx lp:%08lx slot:%d unit:%d len:%d\n",
- (long)is, (long)lp, lp->ppp_slot, is->unit, (int)skb->len);
- isdn_ppp_frame_log("receive", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
- }
-
- if (isdn_ppp_skip_ac(is, skb) < 0) {
- kfree_skb(skb);
- return;
- }
- proto = isdn_ppp_strip_proto(skb);
- if (proto < 0) {
- kfree_skb(skb);
- return;
- }
-
-#ifdef CONFIG_ISDN_MPP
- if (is->compflags & SC_LINK_DECOMP_ON) {
- skb = isdn_ppp_decompress(skb, is, NULL, &proto);
- if (!skb) // decompression error
- return;
- }
-
- if (!(is->mpppcfg & SC_REJ_MP_PROT)) { // we agreed to receive MPPP
- if (proto == PPP_MP) {
- isdn_ppp_mp_receive(net_dev, lp, skb);
- return;
- }
- }
-#endif
- isdn_ppp_push_higher(net_dev, lp, skb, proto);
-}
-
-/*
- * we receive a reassembled frame, MPPP has been taken care of before.
- * address/control and protocol have been stripped from the skb
- * note: net_dev has to be master net_dev
- */
-static void
-isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb, int proto)
-{
- struct net_device *dev = net_dev->dev;
- struct ippp_struct *is, *mis;
- isdn_net_local *mlp = NULL;
- int slot;
-
- slot = lp->ppp_slot;
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "isdn_ppp_push_higher: lp->ppp_slot(%d)\n",
- lp->ppp_slot);
- goto drop_packet;
- }
- is = ippp_table[slot];
-
- if (lp->master) { // FIXME?
- mlp = ISDN_MASTER_PRIV(lp);
- slot = mlp->ppp_slot;
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "isdn_ppp_push_higher: master->ppp_slot(%d)\n",
- lp->ppp_slot);
- goto drop_packet;
- }
- }
- mis = ippp_table[slot];
-
- if (is->debug & 0x10) {
- printk(KERN_DEBUG "push, skb %d %04x\n", (int) skb->len, proto);
- isdn_ppp_frame_log("rpush", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
- }
- if (mis->compflags & SC_DECOMP_ON) {
- skb = isdn_ppp_decompress(skb, is, mis, &proto);
- if (!skb) // decompression error
- return;
- }
- switch (proto) {
- case PPP_IPX: /* untested */
- if (is->debug & 0x20)
- printk(KERN_DEBUG "isdn_ppp: IPX\n");
- skb->protocol = htons(ETH_P_IPX);
- break;
- case PPP_IP:
- if (is->debug & 0x20)
- printk(KERN_DEBUG "isdn_ppp: IP\n");
- skb->protocol = htons(ETH_P_IP);
- break;
- case PPP_COMP:
- case PPP_COMPFRAG:
- printk(KERN_INFO "isdn_ppp: unexpected compressed frame dropped\n");
- goto drop_packet;
-#ifdef CONFIG_ISDN_PPP_VJ
- case PPP_VJC_UNCOMP:
- if (is->debug & 0x20)
- printk(KERN_DEBUG "isdn_ppp: VJC_UNCOMP\n");
- if (net_dev->local->ppp_slot < 0) {
- printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
- __func__, net_dev->local->ppp_slot);
- goto drop_packet;
- }
- if (slhc_remember(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb->len) <= 0) {
- printk(KERN_WARNING "isdn_ppp: received illegal VJC_UNCOMP frame!\n");
- goto drop_packet;
- }
- skb->protocol = htons(ETH_P_IP);
- break;
- case PPP_VJC_COMP:
- if (is->debug & 0x20)
- printk(KERN_DEBUG "isdn_ppp: VJC_COMP\n");
- {
- struct sk_buff *skb_old = skb;
- int pkt_len;
- skb = dev_alloc_skb(skb_old->len + 128);
-
- if (!skb) {
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
- skb = skb_old;
- goto drop_packet;
- }
- skb_put(skb, skb_old->len + 128);
- skb_copy_from_linear_data(skb_old, skb->data,
- skb_old->len);
- if (net_dev->local->ppp_slot < 0) {
- printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
- __func__, net_dev->local->ppp_slot);
- goto drop_packet;
- }
- pkt_len = slhc_uncompress(ippp_table[net_dev->local->ppp_slot]->slcomp,
- skb->data, skb_old->len);
- kfree_skb(skb_old);
- if (pkt_len < 0)
- goto drop_packet;
-
- skb_trim(skb, pkt_len);
- skb->protocol = htons(ETH_P_IP);
- }
- break;
-#endif
- case PPP_CCP:
- case PPP_CCPFRAG:
- isdn_ppp_receive_ccp(net_dev, lp, skb, proto);
- /* Dont pop up ResetReq/Ack stuff to the daemon any
- longer - the job is done already */
- if (skb->data[0] == CCP_RESETREQ ||
- skb->data[0] == CCP_RESETACK)
- break;
- /* fall through */
- default:
- isdn_ppp_fill_rq(skb->data, skb->len, proto, lp->ppp_slot); /* push data to pppd device */
- kfree_skb(skb);
- return;
- }
-
-#ifdef CONFIG_IPPP_FILTER
- /* check if the packet passes the pass and active filters
- * the filter instructions are constructed assuming
- * a four-byte PPP header on each packet (which is still present) */
- skb_push(skb, 4);
-
- {
- u_int16_t *p = (u_int16_t *) skb->data;
-
- *p = 0; /* indicate inbound */
- }
-
- if (is->pass_filter
- && BPF_PROG_RUN(is->pass_filter, skb) == 0) {
- if (is->debug & 0x2)
- printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
- kfree_skb(skb);
- return;
- }
- if (!(is->active_filter
- && BPF_PROG_RUN(is->active_filter, skb) == 0)) {
- if (is->debug & 0x2)
- printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
- lp->huptimer = 0;
- if (mlp)
- mlp->huptimer = 0;
- }
- skb_pull(skb, 4);
-#else /* CONFIG_IPPP_FILTER */
- lp->huptimer = 0;
- if (mlp)
- mlp->huptimer = 0;
-#endif /* CONFIG_IPPP_FILTER */
- skb->dev = dev;
- skb_reset_mac_header(skb);
- netif_rx(skb);
- /* net_dev->local->stats.rx_packets++; done in isdn_net.c */
- return;
-
-drop_packet:
- net_dev->local->stats.rx_dropped++;
- kfree_skb(skb);
-}
-
-/*
- * isdn_ppp_skb_push ..
- * checks whether we have enough space at the beginning of the skb
- * and allocs a new SKB if necessary
- */
-static unsigned char *isdn_ppp_skb_push(struct sk_buff **skb_p, int len)
-{
- struct sk_buff *skb = *skb_p;
-
- if (skb_headroom(skb) < len) {
- struct sk_buff *nskb = skb_realloc_headroom(skb, len);
-
- if (!nskb) {
- printk(KERN_ERR "isdn_ppp_skb_push: can't realloc headroom!\n");
- dev_kfree_skb(skb);
- return NULL;
- }
- printk(KERN_DEBUG "isdn_ppp_skb_push:under %d %d\n", skb_headroom(skb), len);
- dev_kfree_skb(skb);
- *skb_p = nskb;
- return skb_push(nskb, len);
- }
- return skb_push(skb, len);
-}
-
-/*
- * send ppp frame .. we expect a PIDCOMPressable proto --
- * (here: currently always PPP_IP,PPP_VJC_COMP,PPP_VJC_UNCOMP)
- *
- * VJ compression may change skb pointer!!! .. requeue with old
- * skb isn't allowed!!
- */
-
-int
-isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- isdn_net_local *lp, *mlp;
- isdn_net_dev *nd;
- unsigned int proto = PPP_IP; /* 0x21 */
- struct ippp_struct *ipt, *ipts;
- int slot, retval = NETDEV_TX_OK;
-
- mlp = netdev_priv(netdev);
- nd = mlp->netdev; /* get master lp */
-
- slot = mlp->ppp_slot;
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n",
- mlp->ppp_slot);
- kfree_skb(skb);
- goto out;
- }
- ipts = ippp_table[slot];
-
- if (!(ipts->pppcfg & SC_ENABLE_IP)) { /* PPP connected ? */
- if (ipts->debug & 0x1)
- printk(KERN_INFO "%s: IP frame delayed.\n", netdev->name);
- retval = NETDEV_TX_BUSY;
- goto out;
- }
-
- switch (ntohs(skb->protocol)) {
- case ETH_P_IP:
- proto = PPP_IP;
- break;
- case ETH_P_IPX:
- proto = PPP_IPX; /* untested */
- break;
- default:
- printk(KERN_ERR "isdn_ppp: skipped unsupported protocol: %#x.\n",
- skb->protocol);
- dev_kfree_skb(skb);
- goto out;
- }
-
- lp = isdn_net_get_locked_lp(nd);
- if (!lp) {
- printk(KERN_WARNING "%s: all channels busy - requeuing!\n", netdev->name);
- retval = NETDEV_TX_BUSY;
- goto out;
- }
- /* we have our lp locked from now on */
-
- slot = lp->ppp_slot;
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n",
- lp->ppp_slot);
- kfree_skb(skb);
- goto unlock;
- }
- ipt = ippp_table[slot];
-
- /*
- * after this line .. requeueing in the device queue is no longer allowed!!!
- */
-
- /* Pull off the fake header we stuck on earlier to keep
- * the fragmentation code happy.
- */
- skb_pull(skb, IPPP_MAX_HEADER);
-
-#ifdef CONFIG_IPPP_FILTER
- /* check if we should pass this packet
- * the filter instructions are constructed assuming
- * a four-byte PPP header on each packet */
- *(u8 *)skb_push(skb, 4) = 1; /* indicate outbound */
-
- {
- __be16 *p = (__be16 *)skb->data;
-
- p++;
- *p = htons(proto);
- }
-
- if (ipt->pass_filter
- && BPF_PROG_RUN(ipt->pass_filter, skb) == 0) {
- if (ipt->debug & 0x4)
- printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
- kfree_skb(skb);
- goto unlock;
- }
- if (!(ipt->active_filter
- && BPF_PROG_RUN(ipt->active_filter, skb) == 0)) {
- if (ipt->debug & 0x4)
- printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
- lp->huptimer = 0;
- }
- skb_pull(skb, 4);
-#else /* CONFIG_IPPP_FILTER */
- lp->huptimer = 0;
-#endif /* CONFIG_IPPP_FILTER */
-
- if (ipt->debug & 0x4)
- printk(KERN_DEBUG "xmit skb, len %d\n", (int) skb->len);
- if (ipts->debug & 0x40)
- isdn_ppp_frame_log("xmit0", skb->data, skb->len, 32, ipts->unit, lp->ppp_slot);
-
-#ifdef CONFIG_ISDN_PPP_VJ
- if (proto == PPP_IP && ipts->pppcfg & SC_COMP_TCP) { /* ipts here? probably yes, but check this again */
- struct sk_buff *new_skb;
- unsigned short hl;
- /*
- * we need to reserve enough space in front of
- * sk_buff. old call to dev_alloc_skb only reserved
- * 16 bytes, now we are looking what the driver want.
- */
- hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen + IPPP_MAX_HEADER;
- /*
- * Note: hl might still be insufficient because the method
- * above does not account for a possibible MPPP slave channel
- * which had larger HL header space requirements than the
- * master.
- */
- new_skb = alloc_skb(hl + skb->len, GFP_ATOMIC);
- if (new_skb) {
- u_char *buf;
- int pktlen;
-
- skb_reserve(new_skb, hl);
- new_skb->dev = skb->dev;
- skb_put(new_skb, skb->len);
- buf = skb->data;
-
- pktlen = slhc_compress(ipts->slcomp, skb->data, skb->len, new_skb->data,
- &buf, !(ipts->pppcfg & SC_NO_TCP_CCID));
-
- if (buf != skb->data) {
- if (new_skb->data != buf)
- printk(KERN_ERR "isdn_ppp: FATAL error after slhc_compress!!\n");
- dev_kfree_skb(skb);
- skb = new_skb;
- } else {
- dev_kfree_skb(new_skb);
- }
-
- skb_trim(skb, pktlen);
- if (skb->data[0] & SL_TYPE_COMPRESSED_TCP) { /* cslip? style -> PPP */
- proto = PPP_VJC_COMP;
- skb->data[0] ^= SL_TYPE_COMPRESSED_TCP;
- } else {
- if (skb->data[0] >= SL_TYPE_UNCOMPRESSED_TCP)
- proto = PPP_VJC_UNCOMP;
- skb->data[0] = (skb->data[0] & 0x0f) | 0x40;
- }
- }
- }
-#endif
-
- /*
- * normal (single link) or bundle compression
- */
- if (ipts->compflags & SC_COMP_ON) {
- /* We send compressed only if both down- und upstream
- compression is negotiated, that means, CCP is up */
- if (ipts->compflags & SC_DECOMP_ON) {
- skb = isdn_ppp_compress(skb, &proto, ipt, ipts, 0);
- } else {
- printk(KERN_DEBUG "isdn_ppp: CCP not yet up - sending as-is\n");
- }
- }
-
- if (ipt->debug & 0x24)
- printk(KERN_DEBUG "xmit2 skb, len %d, proto %04x\n", (int) skb->len, proto);
-
-#ifdef CONFIG_ISDN_MPP
- if (ipt->mpppcfg & SC_MP_PROT) {
- /* we get mp_seqno from static isdn_net_local */
- long mp_seqno = ipts->mp_seqno;
- ipts->mp_seqno++;
- if (ipt->mpppcfg & SC_OUT_SHORT_SEQ) {
- unsigned char *data = isdn_ppp_skb_push(&skb, 3);
- if (!data)
- goto unlock;
- mp_seqno &= 0xfff;
- data[0] = MP_BEGIN_FRAG | MP_END_FRAG | ((mp_seqno >> 8) & 0xf); /* (B)egin & (E)ndbit .. */
- data[1] = mp_seqno & 0xff;
- data[2] = proto; /* PID compression */
- } else {
- unsigned char *data = isdn_ppp_skb_push(&skb, 5);
- if (!data)
- goto unlock;
- data[0] = MP_BEGIN_FRAG | MP_END_FRAG; /* (B)egin & (E)ndbit .. */
- data[1] = (mp_seqno >> 16) & 0xff; /* sequence number: 24bit */
- data[2] = (mp_seqno >> 8) & 0xff;
- data[3] = (mp_seqno >> 0) & 0xff;
- data[4] = proto; /* PID compression */
- }
- proto = PPP_MP; /* MP Protocol, 0x003d */
- }
-#endif
-
- /*
- * 'link in bundle' compression ...
- */
- if (ipt->compflags & SC_LINK_COMP_ON)
- skb = isdn_ppp_compress(skb, &proto, ipt, ipts, 1);
-
- if ((ipt->pppcfg & SC_COMP_PROT) && (proto <= 0xff)) {
- unsigned char *data = isdn_ppp_skb_push(&skb, 1);
- if (!data)
- goto unlock;
- data[0] = proto & 0xff;
- }
- else {
- unsigned char *data = isdn_ppp_skb_push(&skb, 2);
- if (!data)
- goto unlock;
- data[0] = (proto >> 8) & 0xff;
- data[1] = proto & 0xff;
- }
- if (!(ipt->pppcfg & SC_COMP_AC)) {
- unsigned char *data = isdn_ppp_skb_push(&skb, 2);
- if (!data)
- goto unlock;
- data[0] = 0xff; /* All Stations */
- data[1] = 0x03; /* Unnumbered information */
- }
-
- /* tx-stats are now updated via BSENT-callback */
-
- if (ipts->debug & 0x40) {
- printk(KERN_DEBUG "skb xmit: len: %d\n", (int) skb->len);
- isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, ipt->unit, lp->ppp_slot);
- }
-
- isdn_net_writebuf_skb(lp, skb);
-
-unlock:
- spin_unlock_bh(&lp->xmit_lock);
-out:
- return retval;
-}
-
-#ifdef CONFIG_IPPP_FILTER
-/*
- * check if this packet may trigger auto-dial.
- */
-
-int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
-{
- struct ippp_struct *is = ippp_table[lp->ppp_slot];
- u_int16_t proto;
- int drop = 0;
-
- switch (ntohs(skb->protocol)) {
- case ETH_P_IP:
- proto = PPP_IP;
- break;
- case ETH_P_IPX:
- proto = PPP_IPX;
- break;
- default:
- printk(KERN_ERR "isdn_ppp_autodial_filter: unsupported protocol 0x%x.\n",
- skb->protocol);
- return 1;
- }
-
- /* the filter instructions are constructed assuming
- * a four-byte PPP header on each packet. we have to
- * temporarily remove part of the fake header stuck on
- * earlier.
- */
- *(u8 *)skb_pull(skb, IPPP_MAX_HEADER - 4) = 1; /* indicate outbound */
-
- {
- __be16 *p = (__be16 *)skb->data;
-
- p++;
- *p = htons(proto);
- }
-
- drop |= is->pass_filter
- && BPF_PROG_RUN(is->pass_filter, skb) == 0;
- drop |= is->active_filter
- && BPF_PROG_RUN(is->active_filter, skb) == 0;
-
- skb_push(skb, IPPP_MAX_HEADER - 4);
- return drop;
-}
-#endif
-#ifdef CONFIG_ISDN_MPP
-
-/* this is _not_ rfc1990 header, but something we convert both short and long
- * headers to for convinience's sake:
- * byte 0 is flags as in rfc1990
- * bytes 1...4 is 24-bit seqence number converted to host byte order
- */
-#define MP_HEADER_LEN 5
-
-#define MP_LONGSEQ_MASK 0x00ffffff
-#define MP_SHORTSEQ_MASK 0x00000fff
-#define MP_LONGSEQ_MAX MP_LONGSEQ_MASK
-#define MP_SHORTSEQ_MAX MP_SHORTSEQ_MASK
-#define MP_LONGSEQ_MAXBIT ((MP_LONGSEQ_MASK + 1) >> 1)
-#define MP_SHORTSEQ_MAXBIT ((MP_SHORTSEQ_MASK + 1) >> 1)
-
-/* sequence-wrap safe comparisons (for long sequence)*/
-#define MP_LT(a, b) ((a - b) & MP_LONGSEQ_MAXBIT)
-#define MP_LE(a, b) !((b - a) & MP_LONGSEQ_MAXBIT)
-#define MP_GT(a, b) ((b - a) & MP_LONGSEQ_MAXBIT)
-#define MP_GE(a, b) !((a - b) & MP_LONGSEQ_MAXBIT)
-
-#define MP_SEQ(f) ((*(u32 *)(f->data + 1)))
-#define MP_FLAGS(f) (f->data[0])
-
-static int isdn_ppp_mp_bundle_array_init(void)
-{
- int i;
- int sz = ISDN_MAX_CHANNELS * sizeof(ippp_bundle);
- if ((isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL)
- return -ENOMEM;
- for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- spin_lock_init(&isdn_ppp_bundle_arr[i].lock);
- return 0;
-}
-
-static ippp_bundle *isdn_ppp_mp_bundle_alloc(void)
-{
- int i;
- for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- if (isdn_ppp_bundle_arr[i].ref_ct <= 0)
- return (isdn_ppp_bundle_arr + i);
- return NULL;
-}
-
-static int isdn_ppp_mp_init(isdn_net_local *lp, ippp_bundle *add_to)
-{
- struct ippp_struct *is;
-
- if (lp->ppp_slot < 0) {
- printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
- __func__, lp->ppp_slot);
- return (-EINVAL);
- }
-
- is = ippp_table[lp->ppp_slot];
- if (add_to) {
- if (lp->netdev->pb)
- lp->netdev->pb->ref_ct--;
- lp->netdev->pb = add_to;
- } else { /* first link in a bundle */
- is->mp_seqno = 0;
- if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL)
- return -ENOMEM;
- lp->next = lp->last = lp; /* nobody else in a queue */
- lp->netdev->pb->frags = NULL;
- lp->netdev->pb->frames = 0;
- lp->netdev->pb->seq = UINT_MAX;
- }
- lp->netdev->pb->ref_ct++;
-
- is->last_link_seqno = 0;
- return 0;
-}
-
-static u32 isdn_ppp_mp_get_seq(int short_seq,
- struct sk_buff *skb, u32 last_seq);
-static struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp,
- struct sk_buff *from, struct sk_buff *to);
-static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
- struct sk_buff *from, struct sk_buff *to);
-static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb);
-static void isdn_ppp_mp_print_recv_pkt(int slot, struct sk_buff *skb);
-
-static void isdn_ppp_mp_receive(isdn_net_dev *net_dev, isdn_net_local *lp,
- struct sk_buff *skb)
-{
- struct ippp_struct *is;
- isdn_net_local *lpq;
- ippp_bundle *mp;
- isdn_mppp_stats *stats;
- struct sk_buff *newfrag, *frag, *start, *nextf;
- u32 newseq, minseq, thisseq;
- unsigned long flags;
- int slot;
-
- spin_lock_irqsave(&net_dev->pb->lock, flags);
- mp = net_dev->pb;
- stats = &mp->stats;
- slot = lp->ppp_slot;
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
- __func__, lp->ppp_slot);
- stats->frame_drops++;
- dev_kfree_skb(skb);
- spin_unlock_irqrestore(&mp->lock, flags);
- return;
- }
- is = ippp_table[slot];
- if (++mp->frames > stats->max_queue_len)
- stats->max_queue_len = mp->frames;
-
- if (is->debug & 0x8)
- isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb);
-
- newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ,
- skb, is->last_link_seqno);
-
-
- /* if this packet seq # is less than last already processed one,
- * toss it right away, but check for sequence start case first
- */
- if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) {
- mp->seq = newseq; /* the first packet: required for
- * rfc1990 non-compliant clients --
- * prevents constant packet toss */
- } else if (MP_LT(newseq, mp->seq)) {
- stats->frame_drops++;
- isdn_ppp_mp_free_skb(mp, skb);
- spin_unlock_irqrestore(&mp->lock, flags);
- return;
- }
-
- /* find the minimum received sequence number over all links */
- is->last_link_seqno = minseq = newseq;
- for (lpq = net_dev->queue;;) {
- slot = lpq->ppp_slot;
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "%s: lpq->ppp_slot(%d)\n",
- __func__, lpq->ppp_slot);
- } else {
- u32 lls = ippp_table[slot]->last_link_seqno;
- if (MP_LT(lls, minseq))
- minseq = lls;
- }
- if ((lpq = lpq->next) == net_dev->queue)
- break;
- }
- if (MP_LT(minseq, mp->seq))
- minseq = mp->seq; /* can't go beyond already processed
- * packets */
- newfrag = skb;
-
- /* if this new fragment is before the first one, then enqueue it now. */
- if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) {
- newfrag->next = frag;
- mp->frags = frag = newfrag;
- newfrag = NULL;
- }
-
- start = MP_FLAGS(frag) & MP_BEGIN_FRAG &&
- MP_SEQ(frag) == mp->seq ? frag : NULL;
-
- /*
- * main fragment traversing loop
- *
- * try to accomplish several tasks:
- * - insert new fragment into the proper sequence slot (once that's done
- * newfrag will be set to NULL)
- * - reassemble any complete fragment sequence (non-null 'start'
- * indicates there is a contiguous sequence present)
- * - discard any incomplete sequences that are below minseq -- due
- * to the fact that sender always increment sequence number, if there
- * is an incomplete sequence below minseq, no new fragments would
- * come to complete such sequence and it should be discarded
- *
- * loop completes when we accomplished the following tasks:
- * - new fragment is inserted in the proper sequence ('newfrag' is
- * set to NULL)
- * - we hit a gap in the sequence, so no reassembly/processing is
- * possible ('start' would be set to NULL)
- *
- * algorithm for this code is derived from code in the book
- * 'PPP Design And Debugging' by James Carlson (Addison-Wesley)
- */
- while (start != NULL || newfrag != NULL) {
-
- thisseq = MP_SEQ(frag);
- nextf = frag->next;
-
- /* drop any duplicate fragments */
- if (newfrag != NULL && thisseq == newseq) {
- isdn_ppp_mp_free_skb(mp, newfrag);
- newfrag = NULL;
- }
-
- /* insert new fragment before next element if possible. */
- if (newfrag != NULL && (nextf == NULL ||
- MP_LT(newseq, MP_SEQ(nextf)))) {
- newfrag->next = nextf;
- frag->next = nextf = newfrag;
- newfrag = NULL;
- }
-
- if (start != NULL) {
- /* check for misplaced start */
- if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
- printk(KERN_WARNING"isdn_mppp(seq %d): new "
- "BEGIN flag with no prior END", thisseq);
- stats->seqerrs++;
- stats->frame_drops++;
- start = isdn_ppp_mp_discard(mp, start, frag);
- nextf = frag->next;
- }
- } else if (MP_LE(thisseq, minseq)) {
- if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
- start = frag;
- else {
- if (MP_FLAGS(frag) & MP_END_FRAG)
- stats->frame_drops++;
- if (mp->frags == frag)
- mp->frags = nextf;
- isdn_ppp_mp_free_skb(mp, frag);
- frag = nextf;
- continue;
- }
- }
-
- /* if start is non-null and we have end fragment, then
- * we have full reassembly sequence -- reassemble
- * and process packet now
- */
- if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) {
- minseq = mp->seq = (thisseq + 1) & MP_LONGSEQ_MASK;
- /* Reassemble the packet then dispatch it */
- isdn_ppp_mp_reassembly(net_dev, lp, start, nextf);
-
- start = NULL;
- frag = NULL;
-
- mp->frags = nextf;
- }
-
- /* check if need to update start pointer: if we just
- * reassembled the packet and sequence is contiguous
- * then next fragment should be the start of new reassembly
- * if sequence is contiguous, but we haven't reassembled yet,
- * keep going.
- * if sequence is not contiguous, either clear everything
- * below low watermark and set start to the next frag or
- * clear start ptr.
- */
- if (nextf != NULL &&
- ((thisseq + 1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) {
- /* if we just reassembled and the next one is here,
- * then start another reassembly. */
-
- if (frag == NULL) {
- if (MP_FLAGS(nextf) & MP_BEGIN_FRAG)
- start = nextf;
- else
- {
- printk(KERN_WARNING"isdn_mppp(seq %d):"
- " END flag with no following "
- "BEGIN", thisseq);
- stats->seqerrs++;
- }
- }
-
- } else {
- if (nextf != NULL && frag != NULL &&
- MP_LT(thisseq, minseq)) {
- /* we've got a break in the sequence
- * and we not at the end yet
- * and we did not just reassembled
- *(if we did, there wouldn't be anything before)
- * and we below the low watermark
- * discard all the frames below low watermark
- * and start over */
- stats->frame_drops++;
- mp->frags = isdn_ppp_mp_discard(mp, start, nextf);
- }
- /* break in the sequence, no reassembly */
- start = NULL;
- }
-
- frag = nextf;
- } /* while -- main loop */
-
- if (mp->frags == NULL)
- mp->frags = frag;
-
- /* rather straighforward way to deal with (not very) possible
- * queue overflow */
- if (mp->frames > MP_MAX_QUEUE_LEN) {
- stats->overflows++;
- while (mp->frames > MP_MAX_QUEUE_LEN) {
- frag = mp->frags->next;
- isdn_ppp_mp_free_skb(mp, mp->frags);
- mp->frags = frag;
- }
- }
- spin_unlock_irqrestore(&mp->lock, flags);
-}
-
-static void isdn_ppp_mp_cleanup(isdn_net_local *lp)
-{
- struct sk_buff *frag = lp->netdev->pb->frags;
- struct sk_buff *nextfrag;
- while (frag) {
- nextfrag = frag->next;
- isdn_ppp_mp_free_skb(lp->netdev->pb, frag);
- frag = nextfrag;
- }
- lp->netdev->pb->frags = NULL;
-}
-
-static u32 isdn_ppp_mp_get_seq(int short_seq,
- struct sk_buff *skb, u32 last_seq)
-{
- u32 seq;
- int flags = skb->data[0] & (MP_BEGIN_FRAG | MP_END_FRAG);
-
- if (!short_seq)
- {
- seq = ntohl(*(__be32 *)skb->data) & MP_LONGSEQ_MASK;
- skb_push(skb, 1);
- }
- else
- {
- /* convert 12-bit short seq number to 24-bit long one
- */
- seq = ntohs(*(__be16 *)skb->data) & MP_SHORTSEQ_MASK;
-
- /* check for seqence wrap */
- if (!(seq & MP_SHORTSEQ_MAXBIT) &&
- (last_seq & MP_SHORTSEQ_MAXBIT) &&
- (unsigned long)last_seq <= MP_LONGSEQ_MAX)
- seq |= (last_seq + MP_SHORTSEQ_MAX + 1) &
- (~MP_SHORTSEQ_MASK & MP_LONGSEQ_MASK);
- else
- seq |= last_seq & (~MP_SHORTSEQ_MASK & MP_LONGSEQ_MASK);
-
- skb_push(skb, 3); /* put converted seqence back in skb */
- }
- *(u32 *)(skb->data + 1) = seq; /* put seqence back in _host_ byte
- * order */
- skb->data[0] = flags; /* restore flags */
- return seq;
-}
-
-static struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp,
- struct sk_buff *from,
- struct sk_buff *to)
-{
- if (from)
- while (from != to) {
- struct sk_buff *next = from->next;
- isdn_ppp_mp_free_skb(mp, from);
- from = next;
- }
- return from;
-}
-
-static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
- struct sk_buff *from, struct sk_buff *to)
-{
- ippp_bundle *mp = net_dev->pb;
- int proto;
- struct sk_buff *skb;
- unsigned int tot_len;
-
- if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
- __func__, lp->ppp_slot);
- return;
- }
- if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) {
- if (ippp_table[lp->ppp_slot]->debug & 0x40)
- printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, "
- "len %d\n", MP_SEQ(from), from->len);
- skb = from;
- skb_pull(skb, MP_HEADER_LEN);
- mp->frames--;
- } else {
- struct sk_buff *frag;
- int n;
-
- for (tot_len = n = 0, frag = from; frag != to; frag = frag->next, n++)
- tot_len += frag->len - MP_HEADER_LEN;
-
- if (ippp_table[lp->ppp_slot]->debug & 0x40)
- printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
- "to %d, len %d\n", MP_SEQ(from),
- (MP_SEQ(from) + n - 1) & MP_LONGSEQ_MASK, tot_len);
- if ((skb = dev_alloc_skb(tot_len)) == NULL) {
- printk(KERN_ERR "isdn_mppp: cannot allocate sk buff "
- "of size %d\n", tot_len);
- isdn_ppp_mp_discard(mp, from, to);
- return;
- }
-
- while (from != to) {
- unsigned int len = from->len - MP_HEADER_LEN;
-
- skb_copy_from_linear_data_offset(from, MP_HEADER_LEN,
- skb_put(skb, len),
- len);
- frag = from->next;
- isdn_ppp_mp_free_skb(mp, from);
- from = frag;
- }
- }
- proto = isdn_ppp_strip_proto(skb);
- isdn_ppp_push_higher(net_dev, lp, skb, proto);
-}
-
-static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb)
-{
- dev_kfree_skb(skb);
- mp->frames--;
-}
-
-static void isdn_ppp_mp_print_recv_pkt(int slot, struct sk_buff *skb)
-{
- printk(KERN_DEBUG "mp_recv: %d/%d -> %02x %02x %02x %02x %02x %02x\n",
- slot, (int) skb->len,
- (int) skb->data[0], (int) skb->data[1], (int) skb->data[2],
- (int) skb->data[3], (int) skb->data[4], (int) skb->data[5]);
-}
-
-static int
-isdn_ppp_bundle(struct ippp_struct *is, int unit)
-{
- char ifn[IFNAMSIZ + 1];
- isdn_net_dev *p;
- isdn_net_local *lp, *nlp;
- int rc;
- unsigned long flags;
-
- sprintf(ifn, "ippp%d", unit);
- p = isdn_net_findif(ifn);
- if (!p) {
- printk(KERN_ERR "ippp_bundle: cannot find %s\n", ifn);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&p->pb->lock, flags);
-
- nlp = is->lp;
- lp = p->queue;
- if (nlp->ppp_slot < 0 || nlp->ppp_slot >= ISDN_MAX_CHANNELS ||
- lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "ippp_bundle: binding to invalid slot %d\n",
- nlp->ppp_slot < 0 || nlp->ppp_slot >= ISDN_MAX_CHANNELS ?
- nlp->ppp_slot : lp->ppp_slot);
- rc = -EINVAL;
- goto out;
- }
-
- isdn_net_add_to_bundle(p, nlp);
-
- ippp_table[nlp->ppp_slot]->unit = ippp_table[lp->ppp_slot]->unit;
-
- /* maybe also SC_CCP stuff */
- ippp_table[nlp->ppp_slot]->pppcfg |= ippp_table[lp->ppp_slot]->pppcfg &
- (SC_ENABLE_IP | SC_NO_TCP_CCID | SC_REJ_COMP_TCP);
- ippp_table[nlp->ppp_slot]->mpppcfg |= ippp_table[lp->ppp_slot]->mpppcfg &
- (SC_MP_PROT | SC_REJ_MP_PROT | SC_OUT_SHORT_SEQ | SC_IN_SHORT_SEQ);
- rc = isdn_ppp_mp_init(nlp, p->pb);
-out:
- spin_unlock_irqrestore(&p->pb->lock, flags);
- return rc;
-}
-
-#endif /* CONFIG_ISDN_MPP */
-
-/*
- * network device ioctl handlers
- */
-
-static int
-isdn_ppp_dev_ioctl_stats(int slot, struct ifreq *ifr, struct net_device *dev)
-{
- struct ppp_stats __user *res = ifr->ifr_data;
- struct ppp_stats t;
- isdn_net_local *lp = netdev_priv(dev);
-
- /* build a temporary stat struct and copy it to user space */
-
- memset(&t, 0, sizeof(struct ppp_stats));
- if (dev->flags & IFF_UP) {
- t.p.ppp_ipackets = lp->stats.rx_packets;
- t.p.ppp_ibytes = lp->stats.rx_bytes;
- t.p.ppp_ierrors = lp->stats.rx_errors;
- t.p.ppp_opackets = lp->stats.tx_packets;
- t.p.ppp_obytes = lp->stats.tx_bytes;
- t.p.ppp_oerrors = lp->stats.tx_errors;
-#ifdef CONFIG_ISDN_PPP_VJ
- if (slot >= 0 && ippp_table[slot]->slcomp) {
- struct slcompress *slcomp = ippp_table[slot]->slcomp;
- t.vj.vjs_packets = slcomp->sls_o_compressed + slcomp->sls_o_uncompressed;
- t.vj.vjs_compressed = slcomp->sls_o_compressed;
- t.vj.vjs_searches = slcomp->sls_o_searches;
- t.vj.vjs_misses = slcomp->sls_o_misses;
- t.vj.vjs_errorin = slcomp->sls_i_error;
- t.vj.vjs_tossed = slcomp->sls_i_tossed;
- t.vj.vjs_uncompressedin = slcomp->sls_i_uncompressed;
- t.vj.vjs_compressedin = slcomp->sls_i_compressed;
- }
-#endif
- }
- if (copy_to_user(res, &t, sizeof(struct ppp_stats)))
- return -EFAULT;
- return 0;
-}
-
-int
-isdn_ppp_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- int error = 0;
- int len;
- isdn_net_local *lp = netdev_priv(dev);
-
-
- if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP)
- return -EINVAL;
-
- switch (cmd) {
-#define PPP_VERSION "2.3.7"
- case SIOCGPPPVER:
- len = strlen(PPP_VERSION) + 1;
- if (copy_to_user(ifr->ifr_data, PPP_VERSION, len))
- error = -EFAULT;
- break;
-
- case SIOCGPPPSTATS:
- error = isdn_ppp_dev_ioctl_stats(lp->ppp_slot, ifr, dev);
- break;
- default:
- error = -EINVAL;
- break;
- }
- return error;
-}
-
-static int
-isdn_ppp_if_get_unit(char *name)
-{
- int len,
- i,
- unit = 0,
- deci;
-
- len = strlen(name);
-
- if (strncmp("ippp", name, 4) || len > 8)
- return -1;
-
- for (i = 0, deci = 1; i < len; i++, deci *= 10) {
- char a = name[len - i - 1];
- if (a >= '0' && a <= '9')
- unit += (a - '0') * deci;
- else
- break;
- }
- if (!i || len - i != 4)
- unit = -1;
-
- return unit;
-}
-
-
-int
-isdn_ppp_dial_slave(char *name)
-{
-#ifdef CONFIG_ISDN_MPP
- isdn_net_dev *ndev;
- isdn_net_local *lp;
- struct net_device *sdev;
-
- if (!(ndev = isdn_net_findif(name)))
- return 1;
- lp = ndev->local;
- if (!(lp->flags & ISDN_NET_CONNECTED))
- return 5;
-
- sdev = lp->slave;
- while (sdev) {
- isdn_net_local *mlp = netdev_priv(sdev);
- if (!(mlp->flags & ISDN_NET_CONNECTED))
- break;
- sdev = mlp->slave;
- }
- if (!sdev)
- return 2;
-
- isdn_net_dial_req(netdev_priv(sdev));
- return 0;
-#else
- return -1;
-#endif
-}
-
-int
-isdn_ppp_hangup_slave(char *name)
-{
-#ifdef CONFIG_ISDN_MPP
- isdn_net_dev *ndev;
- isdn_net_local *lp;
- struct net_device *sdev;
-
- if (!(ndev = isdn_net_findif(name)))
- return 1;
- lp = ndev->local;
- if (!(lp->flags & ISDN_NET_CONNECTED))
- return 5;
-
- sdev = lp->slave;
- while (sdev) {
- isdn_net_local *mlp = netdev_priv(sdev);
-
- if (mlp->slave) { /* find last connected link in chain */
- isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp);
-
- if (!(nlp->flags & ISDN_NET_CONNECTED))
- break;
- } else if (mlp->flags & ISDN_NET_CONNECTED)
- break;
-
- sdev = mlp->slave;
- }
- if (!sdev)
- return 2;
-
- isdn_net_hangup(sdev);
- return 0;
-#else
- return -1;
-#endif
-}
-
-/*
- * PPP compression stuff
- */
-
-
-/* Push an empty CCP Data Frame up to the daemon to wake it up and let it
- generate a CCP Reset-Request or tear down CCP altogether */
-
-static void isdn_ppp_ccp_kickup(struct ippp_struct *is)
-{
- isdn_ppp_fill_rq(NULL, 0, PPP_COMP, is->lp->ppp_slot);
-}
-
-/* In-kernel handling of CCP Reset-Request and Reset-Ack is necessary,
- but absolutely nontrivial. The most abstruse problem we are facing is
- that the generation, reception and all the handling of timeouts and
- resends including proper request id management should be entirely left
- to the (de)compressor, but indeed is not covered by the current API to
- the (de)compressor. The API is a prototype version from PPP where only
- some (de)compressors have yet been implemented and all of them are
- rather simple in their reset handling. Especially, their is only one
- outstanding ResetAck at a time with all of them and ResetReq/-Acks do
- not have parameters. For this very special case it was sufficient to
- just return an error code from the decompressor and have a single
- reset() entry to communicate all the necessary information between
- the framework and the (de)compressor. Bad enough, LZS is different
- (and any other compressor may be different, too). It has multiple
- histories (eventually) and needs to Reset each of them independently
- and thus uses multiple outstanding Acks and history numbers as an
- additional parameter to Reqs/Acks.
- All that makes it harder to port the reset state engine into the
- kernel because it is not just the same simple one as in (i)pppd but
- it must be able to pass additional parameters and have multiple out-
- standing Acks. We are trying to achieve the impossible by handling
- reset transactions independent by their id. The id MUST change when
- the data portion changes, thus any (de)compressor who uses more than
- one resettable state must provide and recognize individual ids for
- each individual reset transaction. The framework itself does _only_
- differentiate them by id, because it has no other semantics like the
- (de)compressor might.
- This looks like a major redesign of the interface would be nice,
- but I don't have an idea how to do it better. */
-
-/* Send a CCP Reset-Request or Reset-Ack directly from the kernel. This is
- getting that lengthy because there is no simple "send-this-frame-out"
- function above but every wrapper does a bit different. Hope I guess
- correct in this hack... */
-
-static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto,
- unsigned char code, unsigned char id,
- unsigned char *data, int len)
-{
- struct sk_buff *skb;
- unsigned char *p;
- int hl;
- int cnt = 0;
- isdn_net_local *lp = is->lp;
-
- /* Alloc large enough skb */
- hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen;
- skb = alloc_skb(len + hl + 16, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_WARNING
- "ippp: CCP cannot send reset - out of memory\n");
- return;
- }
- skb_reserve(skb, hl);
-
- /* We may need to stuff an address and control field first */
- if (!(is->pppcfg & SC_COMP_AC)) {
- p = skb_put(skb, 2);
- *p++ = 0xff;
- *p++ = 0x03;
- }
-
- /* Stuff proto, code, id and length */
- p = skb_put(skb, 6);
- *p++ = (proto >> 8);
- *p++ = (proto & 0xff);
- *p++ = code;
- *p++ = id;
- cnt = 4 + len;
- *p++ = (cnt >> 8);
- *p++ = (cnt & 0xff);
-
- /* Now stuff remaining bytes */
- if (len) {
- skb_put_data(skb, data, len);
- }
-
- /* skb is now ready for xmit */
- printk(KERN_DEBUG "Sending CCP Frame:\n");
- isdn_ppp_frame_log("ccp-xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
-
- isdn_net_write_super(lp, skb);
-}
-
-/* Allocate the reset state vector */
-static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is)
-{
- struct ippp_ccp_reset *r;
- r = kzalloc(sizeof(struct ippp_ccp_reset), GFP_KERNEL);
- if (!r) {
- printk(KERN_ERR "ippp_ccp: failed to allocate reset data"
- " structure - no mem\n");
- return NULL;
- }
- printk(KERN_DEBUG "ippp_ccp: allocated reset data structure %p\n", r);
- is->reset = r;
- return r;
-}
-
-/* Destroy the reset state vector. Kill all pending timers first. */
-static void isdn_ppp_ccp_reset_free(struct ippp_struct *is)
-{
- unsigned int id;
-
- printk(KERN_DEBUG "ippp_ccp: freeing reset data structure %p\n",
- is->reset);
- for (id = 0; id < 256; id++) {
- if (is->reset->rs[id]) {
- isdn_ppp_ccp_reset_free_state(is, (unsigned char)id);
- }
- }
- kfree(is->reset);
- is->reset = NULL;
-}
-
-/* Free a given state and clear everything up for later reallocation */
-static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
- unsigned char id)
-{
- struct ippp_ccp_reset_state *rs;
-
- if (is->reset->rs[id]) {
- printk(KERN_DEBUG "ippp_ccp: freeing state for id %d\n", id);
- rs = is->reset->rs[id];
- /* Make sure the kernel will not call back later */
- if (rs->ta)
- del_timer(&rs->timer);
- is->reset->rs[id] = NULL;
- kfree(rs);
- } else {
- printk(KERN_WARNING "ippp_ccp: id %d is not allocated\n", id);
- }
-}
-
-/* The timer callback function which is called when a ResetReq has timed out,
- aka has never been answered by a ResetAck */
-static void isdn_ppp_ccp_timer_callback(struct timer_list *t)
-{
- struct ippp_ccp_reset_state *rs =
- from_timer(rs, t, timer);
-
- if (!rs) {
- printk(KERN_ERR "ippp_ccp: timer cb with zero closure.\n");
- return;
- }
- if (rs->ta && rs->state == CCPResetSentReq) {
- /* We are correct here */
- if (!rs->expra) {
- /* Hmm, there is no Ack really expected. We can clean
- up the state now, it will be reallocated if the
- decompressor insists on another reset */
- rs->ta = 0;
- isdn_ppp_ccp_reset_free_state(rs->is, rs->id);
- return;
- }
- printk(KERN_DEBUG "ippp_ccp: CCP Reset timed out for id %d\n",
- rs->id);
- /* Push it again */
- isdn_ppp_ccp_xmit_reset(rs->is, PPP_CCP, CCP_RESETREQ, rs->id,
- rs->data, rs->dlen);
- /* Restart timer */
- rs->timer.expires = jiffies + HZ * 5;
- add_timer(&rs->timer);
- } else {
- printk(KERN_WARNING "ippp_ccp: timer cb in wrong state %d\n",
- rs->state);
- }
-}
-
-/* Allocate a new reset transaction state */
-static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is,
- unsigned char id)
-{
- struct ippp_ccp_reset_state *rs;
- if (is->reset->rs[id]) {
- printk(KERN_WARNING "ippp_ccp: old state exists for id %d\n",
- id);
- return NULL;
- } else {
- rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
- if (!rs)
- return NULL;
- rs->state = CCPResetIdle;
- rs->is = is;
- rs->id = id;
- timer_setup(&rs->timer, isdn_ppp_ccp_timer_callback, 0);
- is->reset->rs[id] = rs;
- }
- return rs;
-}
-
-
-/* A decompressor wants a reset with a set of parameters - do what is
- necessary to fulfill it */
-static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is,
- struct isdn_ppp_resetparams *rp)
-{
- struct ippp_ccp_reset_state *rs;
-
- if (rp->valid) {
- /* The decompressor defines parameters by itself */
- if (rp->rsend) {
- /* And he wants us to send a request */
- if (!(rp->idval)) {
- printk(KERN_ERR "ippp_ccp: decompressor must"
- " specify reset id\n");
- return;
- }
- if (is->reset->rs[rp->id]) {
- /* There is already a transaction in existence
- for this id. May be still waiting for a
- Ack or may be wrong. */
- rs = is->reset->rs[rp->id];
- if (rs->state == CCPResetSentReq && rs->ta) {
- printk(KERN_DEBUG "ippp_ccp: reset"
- " trans still in progress"
- " for id %d\n", rp->id);
- } else {
- printk(KERN_WARNING "ippp_ccp: reset"
- " trans in wrong state %d for"
- " id %d\n", rs->state, rp->id);
- }
- } else {
- /* Ok, this is a new transaction */
- printk(KERN_DEBUG "ippp_ccp: new trans for id"
- " %d to be started\n", rp->id);
- rs = isdn_ppp_ccp_reset_alloc_state(is, rp->id);
- if (!rs) {
- printk(KERN_ERR "ippp_ccp: out of mem"
- " allocing ccp trans\n");
- return;
- }
- rs->state = CCPResetSentReq;
- rs->expra = rp->expra;
- if (rp->dtval) {
- rs->dlen = rp->dlen;
- memcpy(rs->data, rp->data, rp->dlen);
- }
- /* HACK TODO - add link comp here */
- isdn_ppp_ccp_xmit_reset(is, PPP_CCP,
- CCP_RESETREQ, rs->id,
- rs->data, rs->dlen);
- /* Start the timer */
- rs->timer.expires = jiffies + 5 * HZ;
- add_timer(&rs->timer);
- rs->ta = 1;
- }
- } else {
- printk(KERN_DEBUG "ippp_ccp: no reset sent\n");
- }
- } else {
- /* The reset params are invalid. The decompressor does not
- care about them, so we just send the minimal requests
- and increase ids only when an Ack is received for a
- given id */
- if (is->reset->rs[is->reset->lastid]) {
- /* There is already a transaction in existence
- for this id. May be still waiting for a
- Ack or may be wrong. */
- rs = is->reset->rs[is->reset->lastid];
- if (rs->state == CCPResetSentReq && rs->ta) {
- printk(KERN_DEBUG "ippp_ccp: reset"
- " trans still in progress"
- " for id %d\n", rp->id);
- } else {
- printk(KERN_WARNING "ippp_ccp: reset"
- " trans in wrong state %d for"
- " id %d\n", rs->state, rp->id);
- }
- } else {
- printk(KERN_DEBUG "ippp_ccp: new trans for id"
- " %d to be started\n", is->reset->lastid);
- rs = isdn_ppp_ccp_reset_alloc_state(is,
- is->reset->lastid);
- if (!rs) {
- printk(KERN_ERR "ippp_ccp: out of mem"
- " allocing ccp trans\n");
- return;
- }
- rs->state = CCPResetSentReq;
- /* We always expect an Ack if the decompressor doesn't
- know better */
- rs->expra = 1;
- rs->dlen = 0;
- /* HACK TODO - add link comp here */
- isdn_ppp_ccp_xmit_reset(is, PPP_CCP, CCP_RESETREQ,
- rs->id, NULL, 0);
- /* Start the timer */
- rs->timer.expires = jiffies + 5 * HZ;
- add_timer(&rs->timer);
- rs->ta = 1;
- }
- }
-}
-
-/* An Ack was received for this id. This means we stop the timer and clean
- up the state prior to calling the decompressors reset routine. */
-static void isdn_ppp_ccp_reset_ack_rcvd(struct ippp_struct *is,
- unsigned char id)
-{
- struct ippp_ccp_reset_state *rs = is->reset->rs[id];
-
- if (rs) {
- if (rs->ta && rs->state == CCPResetSentReq) {
- /* Great, we are correct */
- if (!rs->expra)
- printk(KERN_DEBUG "ippp_ccp: ResetAck received"
- " for id %d but not expected\n", id);
- } else {
- printk(KERN_INFO "ippp_ccp: ResetAck received out of"
- "sync for id %d\n", id);
- }
- if (rs->ta) {
- rs->ta = 0;
- del_timer(&rs->timer);
- }
- isdn_ppp_ccp_reset_free_state(is, id);
- } else {
- printk(KERN_INFO "ippp_ccp: ResetAck received for unknown id"
- " %d\n", id);
- }
- /* Make sure the simple reset stuff uses a new id next time */
- is->reset->lastid++;
-}
-
-/*
- * decompress packet
- *
- * if master = 0, we're trying to uncompress an per-link compressed packet,
- * as opposed to an compressed reconstructed-from-MPPP packet.
- * proto is updated to protocol field of uncompressed packet.
- *
- * retval: decompressed packet,
- * same packet if uncompressed,
- * NULL if decompression error
- */
-
-static struct sk_buff *isdn_ppp_decompress(struct sk_buff *skb, struct ippp_struct *is, struct ippp_struct *master,
- int *proto)
-{
- void *stat = NULL;
- struct isdn_ppp_compressor *ipc = NULL;
- struct sk_buff *skb_out;
- int len;
- struct ippp_struct *ri;
- struct isdn_ppp_resetparams rsparm;
- unsigned char rsdata[IPPP_RESET_MAXDATABYTES];
-
- if (!master) {
- // per-link decompression
- stat = is->link_decomp_stat;
- ipc = is->link_decompressor;
- ri = is;
- } else {
- stat = master->decomp_stat;
- ipc = master->decompressor;
- ri = master;
- }
-
- if (!ipc) {
- // no decompressor -> we can't decompress.
- printk(KERN_DEBUG "ippp: no decompressor defined!\n");
- return skb;
- }
- BUG_ON(!stat); // if we have a compressor, stat has been set as well
-
- if ((master && *proto == PPP_COMP) || (!master && *proto == PPP_COMPFRAG)) {
- // compressed packets are compressed by their protocol type
-
- // Set up reset params for the decompressor
- memset(&rsparm, 0, sizeof(rsparm));
- rsparm.data = rsdata;
- rsparm.maxdlen = IPPP_RESET_MAXDATABYTES;
-
- skb_out = dev_alloc_skb(is->mru + PPP_HDRLEN);
- if (!skb_out) {
- kfree_skb(skb);
- printk(KERN_ERR "ippp: decomp memory allocation failure\n");
- return NULL;
- }
- len = ipc->decompress(stat, skb, skb_out, &rsparm);
- kfree_skb(skb);
- if (len <= 0) {
- switch (len) {
- case DECOMP_ERROR:
- printk(KERN_INFO "ippp: decomp wants reset %s params\n",
- rsparm.valid ? "with" : "without");
-
- isdn_ppp_ccp_reset_trans(ri, &rsparm);
- break;
- case DECOMP_FATALERROR:
- ri->pppcfg |= SC_DC_FERROR;
- /* Kick ipppd to recognize the error */
- isdn_ppp_ccp_kickup(ri);
- break;
- }
- kfree_skb(skb_out);
- return NULL;
- }
- *proto = isdn_ppp_strip_proto(skb_out);
- if (*proto < 0) {
- kfree_skb(skb_out);
- return NULL;
- }
- return skb_out;
- } else {
- // uncompressed packets are fed through the decompressor to
- // update the decompressor state
- ipc->incomp(stat, skb, *proto);
- return skb;
- }
-}
-
-/*
- * compress a frame
- * type=0: normal/bundle compression
- * =1: link compression
- * returns original skb if we haven't compressed the frame
- * and a new skb pointer if we've done it
- */
-static struct sk_buff *isdn_ppp_compress(struct sk_buff *skb_in, int *proto,
- struct ippp_struct *is, struct ippp_struct *master, int type)
-{
- int ret;
- int new_proto;
- struct isdn_ppp_compressor *compressor;
- void *stat;
- struct sk_buff *skb_out;
-
- /* we do not compress control protocols */
- if (*proto < 0 || *proto > 0x3fff) {
- return skb_in;
- }
-
- if (type) { /* type=1 => Link compression */
- return skb_in;
- }
- else {
- if (!master) {
- compressor = is->compressor;
- stat = is->comp_stat;
- }
- else {
- compressor = master->compressor;
- stat = master->comp_stat;
- }
- new_proto = PPP_COMP;
- }
-
- if (!compressor) {
- printk(KERN_ERR "isdn_ppp: No compressor set!\n");
- return skb_in;
- }
- if (!stat) {
- printk(KERN_ERR "isdn_ppp: Compressor not initialized?\n");
- return skb_in;
- }
-
- /* Allow for at least 150 % expansion (for now) */
- skb_out = alloc_skb(skb_in->len + skb_in->len / 2 + 32 +
- skb_headroom(skb_in), GFP_ATOMIC);
- if (!skb_out)
- return skb_in;
- skb_reserve(skb_out, skb_headroom(skb_in));
-
- ret = (compressor->compress)(stat, skb_in, skb_out, *proto);
- if (!ret) {
- dev_kfree_skb(skb_out);
- return skb_in;
- }
-
- dev_kfree_skb(skb_in);
- *proto = new_proto;
- return skb_out;
-}
-
-/*
- * we received a CCP frame ..
- * not a clean solution, but we MUST handle a few cases in the kernel
- */
-static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
- struct sk_buff *skb, int proto)
-{
- struct ippp_struct *is;
- struct ippp_struct *mis;
- int len;
- struct isdn_ppp_resetparams rsparm;
- unsigned char rsdata[IPPP_RESET_MAXDATABYTES];
-
- printk(KERN_DEBUG "Received CCP frame from peer slot(%d)\n",
- lp->ppp_slot);
- if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
- __func__, lp->ppp_slot);
- return;
- }
- is = ippp_table[lp->ppp_slot];
- isdn_ppp_frame_log("ccp-rcv", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
-
- if (lp->master) {
- int slot = ISDN_MASTER_PRIV(lp)->ppp_slot;
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "%s: slot(%d) out of range\n",
- __func__, slot);
- return;
- }
- mis = ippp_table[slot];
- } else
- mis = is;
-
- switch (skb->data[0]) {
- case CCP_CONFREQ:
- if (is->debug & 0x10)
- printk(KERN_DEBUG "Disable compression here!\n");
- if (proto == PPP_CCP)
- mis->compflags &= ~SC_COMP_ON;
- else
- is->compflags &= ~SC_LINK_COMP_ON;
- break;
- case CCP_TERMREQ:
- case CCP_TERMACK:
- if (is->debug & 0x10)
- printk(KERN_DEBUG "Disable (de)compression here!\n");
- if (proto == PPP_CCP)
- mis->compflags &= ~(SC_DECOMP_ON | SC_COMP_ON);
- else
- is->compflags &= ~(SC_LINK_DECOMP_ON | SC_LINK_COMP_ON);
- break;
- case CCP_CONFACK:
- /* if we RECEIVE an ackowledge we enable the decompressor */
- if (is->debug & 0x10)
- printk(KERN_DEBUG "Enable decompression here!\n");
- if (proto == PPP_CCP) {
- if (!mis->decompressor)
- break;
- mis->compflags |= SC_DECOMP_ON;
- } else {
- if (!is->decompressor)
- break;
- is->compflags |= SC_LINK_DECOMP_ON;
- }
- break;
-
- case CCP_RESETACK:
- printk(KERN_DEBUG "Received ResetAck from peer\n");
- len = (skb->data[2] << 8) | skb->data[3];
- len -= 4;
-
- if (proto == PPP_CCP) {
- /* If a reset Ack was outstanding for this id, then
- clean up the state engine */
- isdn_ppp_ccp_reset_ack_rcvd(mis, skb->data[1]);
- if (mis->decompressor && mis->decomp_stat)
- mis->decompressor->
- reset(mis->decomp_stat,
- skb->data[0],
- skb->data[1],
- len ? &skb->data[4] : NULL,
- len, NULL);
- /* TODO: This is not easy to decide here */
- mis->compflags &= ~SC_DECOMP_DISCARD;
- }
- else {
- isdn_ppp_ccp_reset_ack_rcvd(is, skb->data[1]);
- if (is->link_decompressor && is->link_decomp_stat)
- is->link_decompressor->
- reset(is->link_decomp_stat,
- skb->data[0],
- skb->data[1],
- len ? &skb->data[4] : NULL,
- len, NULL);
- /* TODO: neither here */
- is->compflags &= ~SC_LINK_DECOMP_DISCARD;
- }
- break;
-
- case CCP_RESETREQ:
- printk(KERN_DEBUG "Received ResetReq from peer\n");
- /* Receiving a ResetReq means we must reset our compressor */
- /* Set up reset params for the reset entry */
- memset(&rsparm, 0, sizeof(rsparm));
- rsparm.data = rsdata;
- rsparm.maxdlen = IPPP_RESET_MAXDATABYTES;
- /* Isolate data length */
- len = (skb->data[2] << 8) | skb->data[3];
- len -= 4;
- if (proto == PPP_CCP) {
- if (mis->compressor && mis->comp_stat)
- mis->compressor->
- reset(mis->comp_stat,
- skb->data[0],
- skb->data[1],
- len ? &skb->data[4] : NULL,
- len, &rsparm);
- }
- else {
- if (is->link_compressor && is->link_comp_stat)
- is->link_compressor->
- reset(is->link_comp_stat,
- skb->data[0],
- skb->data[1],
- len ? &skb->data[4] : NULL,
- len, &rsparm);
- }
- /* Ack the Req as specified by rsparm */
- if (rsparm.valid) {
- /* Compressor reset handler decided how to answer */
- if (rsparm.rsend) {
- /* We should send a Frame */
- isdn_ppp_ccp_xmit_reset(is, proto, CCP_RESETACK,
- rsparm.idval ? rsparm.id
- : skb->data[1],
- rsparm.dtval ?
- rsparm.data : NULL,
- rsparm.dtval ?
- rsparm.dlen : 0);
- } else {
- printk(KERN_DEBUG "ResetAck suppressed\n");
- }
- } else {
- /* We answer with a straight reflected Ack */
- isdn_ppp_ccp_xmit_reset(is, proto, CCP_RESETACK,
- skb->data[1],
- len ? &skb->data[4] : NULL,
- len);
- }
- break;
- }
-}
-
-
-/*
- * Daemon sends a CCP frame ...
- */
-
-/* TODO: Clean this up with new Reset semantics */
-
-/* I believe the CCP handling as-is is done wrong. Compressed frames
- * should only be sent/received after CCP reaches UP state, which means
- * both sides have sent CONF_ACK. Currently, we handle both directions
- * independently, which means we may accept compressed frames too early
- * (supposedly not a problem), but may also mean we send compressed frames
- * too early, which may turn out to be a problem.
- * This part of state machine should actually be handled by (i)pppd, but
- * that's too big of a change now. --kai
- */
-
-/* Actually, we might turn this into an advantage: deal with the RFC in
- * the old tradition of beeing generous on what we accept, but beeing
- * strict on what we send. Thus we should just
- * - accept compressed frames as soon as decompression is negotiated
- * - send compressed frames only when decomp *and* comp are negotiated
- * - drop rx compressed frames if we cannot decomp (instead of pushing them
- * up to ipppd)
- * and I tried to modify this file according to that. --abp
- */
-
-static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb)
-{
- struct ippp_struct *mis, *is;
- int proto, slot = lp->ppp_slot;
- unsigned char *data;
-
- if (!skb || skb->len < 3)
- return;
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
- __func__, slot);
- return;
- }
- is = ippp_table[slot];
- /* Daemon may send with or without address and control field comp */
- data = skb->data;
- if (!(is->pppcfg & SC_COMP_AC) && data[0] == 0xff && data[1] == 0x03) {
- data += 2;
- if (skb->len < 5)
- return;
- }
-
- proto = ((int)data[0]<<8) + data[1];
- if (proto != PPP_CCP && proto != PPP_CCPFRAG)
- return;
-
- printk(KERN_DEBUG "Received CCP frame from daemon:\n");
- isdn_ppp_frame_log("ccp-xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
-
- if (lp->master) {
- slot = ISDN_MASTER_PRIV(lp)->ppp_slot;
- if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
- printk(KERN_ERR "%s: slot(%d) out of range\n",
- __func__, slot);
- return;
- }
- mis = ippp_table[slot];
- } else
- mis = is;
- if (mis != is)
- printk(KERN_DEBUG "isdn_ppp: Ouch! Master CCP sends on slave slot!\n");
-
- switch (data[2]) {
- case CCP_CONFREQ:
- if (is->debug & 0x10)
- printk(KERN_DEBUG "Disable decompression here!\n");
- if (proto == PPP_CCP)
- is->compflags &= ~SC_DECOMP_ON;
- else
- is->compflags &= ~SC_LINK_DECOMP_ON;
- break;
- case CCP_TERMREQ:
- case CCP_TERMACK:
- if (is->debug & 0x10)
- printk(KERN_DEBUG "Disable (de)compression here!\n");
- if (proto == PPP_CCP)
- is->compflags &= ~(SC_DECOMP_ON | SC_COMP_ON);
- else
- is->compflags &= ~(SC_LINK_DECOMP_ON | SC_LINK_COMP_ON);
- break;
- case CCP_CONFACK:
- /* if we SEND an ackowledge we can/must enable the compressor */
- if (is->debug & 0x10)
- printk(KERN_DEBUG "Enable compression here!\n");
- if (proto == PPP_CCP) {
- if (!is->compressor)
- break;
- is->compflags |= SC_COMP_ON;
- } else {
- if (!is->compressor)
- break;
- is->compflags |= SC_LINK_COMP_ON;
- }
- break;
- case CCP_RESETACK:
- /* If we send a ACK we should reset our compressor */
- if (is->debug & 0x10)
- printk(KERN_DEBUG "Reset decompression state here!\n");
- printk(KERN_DEBUG "ResetAck from daemon passed by\n");
- if (proto == PPP_CCP) {
- /* link to master? */
- if (is->compressor && is->comp_stat)
- is->compressor->reset(is->comp_stat, 0, 0,
- NULL, 0, NULL);
- is->compflags &= ~SC_COMP_DISCARD;
- }
- else {
- if (is->link_compressor && is->link_comp_stat)
- is->link_compressor->reset(is->link_comp_stat,
- 0, 0, NULL, 0, NULL);
- is->compflags &= ~SC_LINK_COMP_DISCARD;
- }
- break;
- case CCP_RESETREQ:
- /* Just let it pass by */
- printk(KERN_DEBUG "ResetReq from daemon passed by\n");
- break;
- }
-}
-
-int isdn_ppp_register_compressor(struct isdn_ppp_compressor *ipc)
-{
- ipc->next = ipc_head;
- ipc->prev = NULL;
- if (ipc_head) {
- ipc_head->prev = ipc;
- }
- ipc_head = ipc;
- return 0;
-}
-
-int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *ipc)
-{
- if (ipc->prev)
- ipc->prev->next = ipc->next;
- else
- ipc_head = ipc->next;
- if (ipc->next)
- ipc->next->prev = ipc->prev;
- ipc->prev = ipc->next = NULL;
- return 0;
-}
-
-static int isdn_ppp_set_compressor(struct ippp_struct *is, struct isdn_ppp_comp_data *data)
-{
- struct isdn_ppp_compressor *ipc = ipc_head;
- int ret;
- void *stat;
- int num = data->num;
-
- if (is->debug & 0x10)
- printk(KERN_DEBUG "[%d] Set %s type %d\n", is->unit,
- (data->flags & IPPP_COMP_FLAG_XMIT) ? "compressor" : "decompressor", num);
-
- /* If is has no valid reset state vector, we cannot allocate a
- decompressor. The decompressor would cause reset transactions
- sooner or later, and they need that vector. */
-
- if (!(data->flags & IPPP_COMP_FLAG_XMIT) && !is->reset) {
- printk(KERN_ERR "ippp_ccp: no reset data structure - can't"
- " allow decompression.\n");
- return -ENOMEM;
- }
-
- while (ipc) {
- if (ipc->num == num) {
- stat = ipc->alloc(data);
- if (stat) {
- ret = ipc->init(stat, data, is->unit, 0);
- if (!ret) {
- printk(KERN_ERR "Can't init (de)compression!\n");
- ipc->free(stat);
- stat = NULL;
- break;
- }
- }
- else {
- printk(KERN_ERR "Can't alloc (de)compression!\n");
- break;
- }
-
- if (data->flags & IPPP_COMP_FLAG_XMIT) {
- if (data->flags & IPPP_COMP_FLAG_LINK) {
- if (is->link_comp_stat)
- is->link_compressor->free(is->link_comp_stat);
- is->link_comp_stat = stat;
- is->link_compressor = ipc;
- }
- else {
- if (is->comp_stat)
- is->compressor->free(is->comp_stat);
- is->comp_stat = stat;
- is->compressor = ipc;
- }
- }
- else {
- if (data->flags & IPPP_COMP_FLAG_LINK) {
- if (is->link_decomp_stat)
- is->link_decompressor->free(is->link_decomp_stat);
- is->link_decomp_stat = stat;
- is->link_decompressor = ipc;
- }
- else {
- if (is->decomp_stat)
- is->decompressor->free(is->decomp_stat);
- is->decomp_stat = stat;
- is->decompressor = ipc;
- }
- }
- return 0;
- }
- ipc = ipc->next;
- }
- return -EINVAL;
-}
diff --git a/drivers/isdn/i4l/isdn_ppp.h b/drivers/isdn/i4l/isdn_ppp.h
deleted file mode 100644
index 34b8a2ce84f3..000000000000
--- a/drivers/isdn/i4l/isdn_ppp.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* $Id: isdn_ppp.h,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * header for Linux ISDN subsystem, functions for synchronous PPP (linklevel).
- *
- * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/ppp_defs.h> /* for PPP_PROTOCOL */
-#include <linux/isdn_ppp.h> /* for isdn_ppp info */
-
-extern int isdn_ppp_read(int, struct file *, char __user *, int);
-extern int isdn_ppp_write(int, struct file *, const char __user *, int);
-extern int isdn_ppp_open(int, struct file *);
-extern int isdn_ppp_init(void);
-extern void isdn_ppp_cleanup(void);
-extern int isdn_ppp_free(isdn_net_local *);
-extern int isdn_ppp_bind(isdn_net_local *);
-extern int isdn_ppp_autodial_filter(struct sk_buff *, isdn_net_local *);
-extern int isdn_ppp_xmit(struct sk_buff *, struct net_device *);
-extern void isdn_ppp_receive(isdn_net_dev *, isdn_net_local *, struct sk_buff *);
-extern int isdn_ppp_dev_ioctl(struct net_device *, struct ifreq *, int);
-extern __poll_t isdn_ppp_poll(struct file *, struct poll_table_struct *);
-extern int isdn_ppp_ioctl(int, struct file *, unsigned int, unsigned long);
-extern void isdn_ppp_release(int, struct file *);
-extern int isdn_ppp_dial_slave(char *);
-extern void isdn_ppp_wakeup_daemon(isdn_net_local *);
-
-extern int isdn_ppp_register_compressor(struct isdn_ppp_compressor *ipc);
-extern int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *ipc);
-
-#define IPPP_OPEN 0x01
-#define IPPP_CONNECT 0x02
-#define IPPP_CLOSEWAIT 0x04
-#define IPPP_NOBLOCK 0x08
-#define IPPP_ASSIGNED 0x10
-
-#define IPPP_MAX_HEADER 10
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
deleted file mode 100644
index 43700fc19a31..000000000000
--- a/drivers/isdn/i4l/isdn_tty.c
+++ /dev/null
@@ -1,3756 +0,0 @@
-/*
- * Linux ISDN subsystem, tty functions and AT-command emulator (linklevel).
- *
- * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-#undef ISDN_TTY_STAT_DEBUG
-
-#include <linux/isdn.h>
-#include <linux/serial.h> /* ASYNC_* flags */
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/sched/signal.h>
-#include "isdn_common.h"
-#include "isdn_tty.h"
-#ifdef CONFIG_ISDN_AUDIO
-#include "isdn_audio.h"
-#define VBUF 0x3e0
-#define VBUFX (VBUF/16)
-#endif
-
-#define FIX_FILE_TRANSFER
-#define DUMMY_HAYES_AT
-
-/* Prototypes */
-
-static DEFINE_MUTEX(modem_info_mutex);
-static int isdn_tty_edit_at(const char *, int, modem_info *);
-static void isdn_tty_check_esc(const u_char *, u_char, int, int *, u_long *);
-static void isdn_tty_modem_reset_regs(modem_info *, int);
-static void isdn_tty_cmd_ATA(modem_info *);
-static void isdn_tty_flush_buffer(struct tty_struct *);
-static void isdn_tty_modem_result(int, modem_info *);
-#ifdef CONFIG_ISDN_AUDIO
-static int isdn_tty_countDLE(unsigned char *, int);
-#endif
-
-/* Leave this unchanged unless you know what you do! */
-#define MODEM_PARANOIA_CHECK
-#define MODEM_DO_RESTART
-
-static int bit2si[8] =
-{1, 5, 7, 7, 7, 7, 7, 7};
-static int si2bit[8] =
-{4, 1, 4, 4, 4, 4, 4, 4};
-
-/* isdn_tty_try_read() is called from within isdn_tty_rcv_skb()
- * to stuff incoming data directly into a tty's flip-buffer. This
- * is done to speed up tty-receiving if the receive-queue is empty.
- * This routine MUST be called with interrupts off.
- * Return:
- * 1 = Success
- * 0 = Failure, data has to be buffered and later processed by
- * isdn_tty_readmodem().
- */
-static int
-isdn_tty_try_read(modem_info *info, struct sk_buff *skb)
-{
- struct tty_port *port = &info->port;
- int c;
- int len;
- char last;
-
- if (!info->online)
- return 0;
-
- if (!(info->mcr & UART_MCR_RTS))
- return 0;
-
- len = skb->len
-#ifdef CONFIG_ISDN_AUDIO
- + ISDN_AUDIO_SKB_DLECOUNT(skb)
-#endif
- ;
-
- c = tty_buffer_request_room(port, len);
- if (c < len)
- return 0;
-
-#ifdef CONFIG_ISDN_AUDIO
- if (ISDN_AUDIO_SKB_DLECOUNT(skb)) {
- int l = skb->len;
- unsigned char *dp = skb->data;
- while (--l) {
- if (*dp == DLE)
- tty_insert_flip_char(port, DLE, 0);
- tty_insert_flip_char(port, *dp++, 0);
- }
- if (*dp == DLE)
- tty_insert_flip_char(port, DLE, 0);
- last = *dp;
- } else {
-#endif
- if (len > 1)
- tty_insert_flip_string(port, skb->data, len - 1);
- last = skb->data[len - 1];
-#ifdef CONFIG_ISDN_AUDIO
- }
-#endif
- if (info->emu.mdmreg[REG_CPPP] & BIT_CPPP)
- tty_insert_flip_char(port, last, 0xFF);
- else
- tty_insert_flip_char(port, last, TTY_NORMAL);
- tty_flip_buffer_push(port);
- kfree_skb(skb);
-
- return 1;
-}
-
-/* isdn_tty_readmodem() is called periodically from within timer-interrupt.
- * It tries getting received data from the receive queue an stuff it into
- * the tty's flip-buffer.
- */
-void
-isdn_tty_readmodem(void)
-{
- int resched = 0;
- int midx;
- int i;
- int r;
- modem_info *info;
-
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- midx = dev->m_idx[i];
- if (midx < 0)
- continue;
-
- info = &dev->mdm.info[midx];
- if (!info->online)
- continue;
-
- r = 0;
-#ifdef CONFIG_ISDN_AUDIO
- isdn_audio_eval_dtmf(info);
- if ((info->vonline & 1) && (info->emu.vpar[1]))
- isdn_audio_eval_silence(info);
-#endif
- if (info->mcr & UART_MCR_RTS) {
- /* CISCO AsyncPPP Hack */
- if (!(info->emu.mdmreg[REG_CPPP] & BIT_CPPP))
- r = isdn_readbchan_tty(info->isdn_driver,
- info->isdn_channel,
- &info->port, 0);
- else
- r = isdn_readbchan_tty(info->isdn_driver,
- info->isdn_channel,
- &info->port, 1);
- if (r)
- tty_flip_buffer_push(&info->port);
- } else
- r = 1;
-
- if (r) {
- info->rcvsched = 0;
- resched = 1;
- } else
- info->rcvsched = 1;
- }
- if (!resched)
- isdn_timer_ctrl(ISDN_TIMER_MODEMREAD, 0);
-}
-
-int
-isdn_tty_rcv_skb(int i, int di, int channel, struct sk_buff *skb)
-{
- ulong flags;
- int midx;
-#ifdef CONFIG_ISDN_AUDIO
- int ifmt;
-#endif
- modem_info *info;
-
- if ((midx = dev->m_idx[i]) < 0) {
- /* if midx is invalid, packet is not for tty */
- return 0;
- }
- info = &dev->mdm.info[midx];
-#ifdef CONFIG_ISDN_AUDIO
- ifmt = 1;
-
- if ((info->vonline) && (!info->emu.vpar[4]))
- isdn_audio_calc_dtmf(info, skb->data, skb->len, ifmt);
- if ((info->vonline & 1) && (info->emu.vpar[1]))
- isdn_audio_calc_silence(info, skb->data, skb->len, ifmt);
-#endif
- if ((info->online < 2)
-#ifdef CONFIG_ISDN_AUDIO
- && (!(info->vonline & 1))
-#endif
- ) {
- /* If Modem not listening, drop data */
- kfree_skb(skb);
- return 1;
- }
- if (info->emu.mdmreg[REG_T70] & BIT_T70) {
- if (info->emu.mdmreg[REG_T70] & BIT_T70_EXT) {
- /* T.70 decoding: throw away the T.70 header (2 or 4 bytes) */
- if (skb->data[0] == 3) /* pure data packet -> 4 byte headers */
- skb_pull(skb, 4);
- else
- if (skb->data[0] == 1) /* keepalive packet -> 2 byte hdr */
- skb_pull(skb, 2);
- } else
- /* T.70 decoding: Simply throw away the T.70 header (4 bytes) */
- if ((skb->data[0] == 1) && ((skb->data[1] == 0) || (skb->data[1] == 1)))
- skb_pull(skb, 4);
- }
-#ifdef CONFIG_ISDN_AUDIO
- ISDN_AUDIO_SKB_DLECOUNT(skb) = 0;
- ISDN_AUDIO_SKB_LOCK(skb) = 0;
- if (info->vonline & 1) {
- /* voice conversion/compression */
- switch (info->emu.vpar[3]) {
- case 2:
- case 3:
- case 4:
- /* adpcm
- * Since compressed data takes less
- * space, we can overwrite the buffer.
- */
- skb_trim(skb, isdn_audio_xlaw2adpcm(info->adpcmr,
- ifmt,
- skb->data,
- skb->data,
- skb->len));
- break;
- case 5:
- /* a-law */
- if (!ifmt)
- isdn_audio_ulaw2alaw(skb->data, skb->len);
- break;
- case 6:
- /* u-law */
- if (ifmt)
- isdn_audio_alaw2ulaw(skb->data, skb->len);
- break;
- }
- ISDN_AUDIO_SKB_DLECOUNT(skb) =
- isdn_tty_countDLE(skb->data, skb->len);
- }
-#ifdef CONFIG_ISDN_TTY_FAX
- else {
- if (info->faxonline & 2) {
- isdn_tty_fax_bitorder(info, skb);
- ISDN_AUDIO_SKB_DLECOUNT(skb) =
- isdn_tty_countDLE(skb->data, skb->len);
- }
- }
-#endif
-#endif
- /* Try to deliver directly via tty-buf if queue is empty */
- spin_lock_irqsave(&info->readlock, flags);
- if (skb_queue_empty(&dev->drv[di]->rpqueue[channel]))
- if (isdn_tty_try_read(info, skb)) {
- spin_unlock_irqrestore(&info->readlock, flags);
- return 1;
- }
- /* Direct deliver failed or queue wasn't empty.
- * Queue up for later dequeueing via timer-irq.
- */
- __skb_queue_tail(&dev->drv[di]->rpqueue[channel], skb);
- dev->drv[di]->rcvcount[channel] +=
- (skb->len
-#ifdef CONFIG_ISDN_AUDIO
- + ISDN_AUDIO_SKB_DLECOUNT(skb)
-#endif
- );
- spin_unlock_irqrestore(&info->readlock, flags);
- /* Schedule dequeuing */
- if ((dev->modempoll) && (info->rcvsched))
- isdn_timer_ctrl(ISDN_TIMER_MODEMREAD, 1);
- return 1;
-}
-
-static void
-isdn_tty_cleanup_xmit(modem_info *info)
-{
- skb_queue_purge(&info->xmit_queue);
-#ifdef CONFIG_ISDN_AUDIO
- skb_queue_purge(&info->dtmf_queue);
-#endif
-}
-
-static void
-isdn_tty_tint(modem_info *info)
-{
- struct sk_buff *skb = skb_dequeue(&info->xmit_queue);
- int len, slen;
-
- if (!skb)
- return;
- len = skb->len;
- if ((slen = isdn_writebuf_skb_stub(info->isdn_driver,
- info->isdn_channel, 1, skb)) == len) {
- struct tty_struct *tty = info->port.tty;
- info->send_outstanding++;
- info->msr &= ~UART_MSR_CTS;
- info->lsr &= ~UART_LSR_TEMT;
- tty_wakeup(tty);
- return;
- }
- if (slen < 0) {
- /* Error: no channel, already shutdown, or wrong parameter */
- dev_kfree_skb(skb);
- return;
- }
- skb_queue_head(&info->xmit_queue, skb);
-}
-
-#ifdef CONFIG_ISDN_AUDIO
-static int
-isdn_tty_countDLE(unsigned char *buf, int len)
-{
- int count = 0;
-
- while (len--)
- if (*buf++ == DLE)
- count++;
- return count;
-}
-
-/* This routine is called from within isdn_tty_write() to perform
- * DLE-decoding when sending audio-data.
- */
-static int
-isdn_tty_handleDLEdown(modem_info *info, atemu *m, int len)
-{
- unsigned char *p = &info->port.xmit_buf[info->xmit_count];
- int count = 0;
-
- while (len > 0) {
- if (m->lastDLE) {
- m->lastDLE = 0;
- switch (*p) {
- case DLE:
- /* Escape code */
- if (len > 1)
- memmove(p, p + 1, len - 1);
- p--;
- count++;
- break;
- case ETX:
- /* End of data */
- info->vonline |= 4;
- return count;
- case DC4:
- /* Abort RX */
- info->vonline &= ~1;
-#ifdef ISDN_DEBUG_MODEM_VOICE
- printk(KERN_DEBUG
- "DLEdown: got DLE-DC4, send DLE-ETX on ttyI%d\n",
- info->line);
-#endif
- isdn_tty_at_cout("\020\003", info);
- if (!info->vonline) {
-#ifdef ISDN_DEBUG_MODEM_VOICE
- printk(KERN_DEBUG
- "DLEdown: send VCON on ttyI%d\n",
- info->line);
-#endif
- isdn_tty_at_cout("\r\nVCON\r\n", info);
- }
- /* Fall through */
- case 'q':
- case 's':
- /* Silence */
- if (len > 1)
- memmove(p, p + 1, len - 1);
- p--;
- break;
- }
- } else {
- if (*p == DLE)
- m->lastDLE = 1;
- else
- count++;
- }
- p++;
- len--;
- }
- if (len < 0) {
- printk(KERN_WARNING "isdn_tty: len<0 in DLEdown\n");
- return 0;
- }
- return count;
-}
-
-/* This routine is called from within isdn_tty_write() when receiving
- * audio-data. It interrupts receiving, if an character other than
- * ^S or ^Q is sent.
- */
-static int
-isdn_tty_end_vrx(const char *buf, int c)
-{
- char ch;
-
- while (c--) {
- ch = *buf;
- if ((ch != 0x11) && (ch != 0x13))
- return 1;
- buf++;
- }
- return 0;
-}
-
-static int voice_cf[7] =
-{0, 0, 4, 3, 2, 0, 0};
-
-#endif /* CONFIG_ISDN_AUDIO */
-
-/* isdn_tty_senddown() is called either directly from within isdn_tty_write()
- * or via timer-interrupt from within isdn_tty_modem_xmit(). It pulls
- * outgoing data from the tty's xmit-buffer, handles voice-decompression or
- * T.70 if necessary, and finally queues it up for sending via isdn_tty_tint.
- */
-static void
-isdn_tty_senddown(modem_info *info)
-{
- int buflen;
- int skb_res;
-#ifdef CONFIG_ISDN_AUDIO
- int audio_len;
-#endif
- struct sk_buff *skb;
-
-#ifdef CONFIG_ISDN_AUDIO
- if (info->vonline & 4) {
- info->vonline &= ~6;
- if (!info->vonline) {
-#ifdef ISDN_DEBUG_MODEM_VOICE
- printk(KERN_DEBUG
- "senddown: send VCON on ttyI%d\n",
- info->line);
-#endif
- isdn_tty_at_cout("\r\nVCON\r\n", info);
- }
- }
-#endif
- if (!(buflen = info->xmit_count))
- return;
- if ((info->emu.mdmreg[REG_CTS] & BIT_CTS) != 0)
- info->msr &= ~UART_MSR_CTS;
- info->lsr &= ~UART_LSR_TEMT;
- /* info->xmit_count is modified here and in isdn_tty_write().
- * So we return here if isdn_tty_write() is in the
- * critical section.
- */
- atomic_inc(&info->xmit_lock);
- if (!(atomic_dec_and_test(&info->xmit_lock)))
- return;
- if (info->isdn_driver < 0) {
- info->xmit_count = 0;
- return;
- }
- skb_res = dev->drv[info->isdn_driver]->interface->hl_hdrlen + 4;
-#ifdef CONFIG_ISDN_AUDIO
- if (info->vonline & 2)
- audio_len = buflen * voice_cf[info->emu.vpar[3]];
- else
- audio_len = 0;
- skb = dev_alloc_skb(skb_res + buflen + audio_len);
-#else
- skb = dev_alloc_skb(skb_res + buflen);
-#endif
- if (!skb) {
- printk(KERN_WARNING
- "isdn_tty: Out of memory in ttyI%d senddown\n",
- info->line);
- return;
- }
- skb_reserve(skb, skb_res);
- skb_put_data(skb, info->port.xmit_buf, buflen);
- info->xmit_count = 0;
-#ifdef CONFIG_ISDN_AUDIO
- if (info->vonline & 2) {
- /* For now, ifmt is fixed to 1 (alaw), since this
- * is used with ISDN everywhere in the world, except
- * US, Canada and Japan.
- * Later, when US-ISDN protocols are implemented,
- * this setting will depend on the D-channel protocol.
- */
- int ifmt = 1;
-
- /* voice conversion/decompression */
- switch (info->emu.vpar[3]) {
- case 2:
- case 3:
- case 4:
- /* adpcm, compatible to ZyXel 1496 modem
- * with ROM revision 6.01
- */
- audio_len = isdn_audio_adpcm2xlaw(info->adpcms,
- ifmt,
- skb->data,
- skb_put(skb, audio_len),
- buflen);
- skb_pull(skb, buflen);
- skb_trim(skb, audio_len);
- break;
- case 5:
- /* a-law */
- if (!ifmt)
- isdn_audio_alaw2ulaw(skb->data,
- buflen);
- break;
- case 6:
- /* u-law */
- if (ifmt)
- isdn_audio_ulaw2alaw(skb->data,
- buflen);
- break;
- }
- }
-#endif /* CONFIG_ISDN_AUDIO */
- if (info->emu.mdmreg[REG_T70] & BIT_T70) {
- /* Add T.70 simplified header */
- if (info->emu.mdmreg[REG_T70] & BIT_T70_EXT)
- memcpy(skb_push(skb, 2), "\1\0", 2);
- else
- memcpy(skb_push(skb, 4), "\1\0\1\0", 4);
- }
- skb_queue_tail(&info->xmit_queue, skb);
-}
-
-/************************************************************
- *
- * Modem-functions
- *
- * mostly "stolen" from original Linux-serial.c and friends.
- *
- ************************************************************/
-
-/* The next routine is called once from within timer-interrupt
- * triggered within isdn_tty_modem_ncarrier(). It calls
- * isdn_tty_modem_result() to stuff a "NO CARRIER" Message
- * into the tty's buffer.
- */
-static void
-isdn_tty_modem_do_ncarrier(struct timer_list *t)
-{
- modem_info *info = from_timer(info, t, nc_timer);
- isdn_tty_modem_result(RESULT_NO_CARRIER, info);
-}
-
-/* Next routine is called, whenever the DTR-signal is raised.
- * It checks the ncarrier-flag, and triggers the above routine
- * when necessary. The ncarrier-flag is set, whenever DTR goes
- * low.
- */
-static void
-isdn_tty_modem_ncarrier(modem_info *info)
-{
- if (info->ncarrier) {
- info->nc_timer.expires = jiffies + HZ;
- add_timer(&info->nc_timer);
- }
-}
-
-/*
- * return the usage calculated by si and layer 2 protocol
- */
-static int
-isdn_calc_usage(int si, int l2)
-{
- int usg = ISDN_USAGE_MODEM;
-
-#ifdef CONFIG_ISDN_AUDIO
- if (si == 1) {
- switch (l2) {
- case ISDN_PROTO_L2_MODEM:
- usg = ISDN_USAGE_MODEM;
- break;
-#ifdef CONFIG_ISDN_TTY_FAX
- case ISDN_PROTO_L2_FAX:
- usg = ISDN_USAGE_FAX;
- break;
-#endif
- case ISDN_PROTO_L2_TRANS:
- default:
- usg = ISDN_USAGE_VOICE;
- break;
- }
- }
-#endif
- return (usg);
-}
-
-/* isdn_tty_dial() performs dialing of a tty an the necessary
- * setup of the lower levels before that.
- */
-static void
-isdn_tty_dial(char *n, modem_info *info, atemu *m)
-{
- int usg = ISDN_USAGE_MODEM;
- int si = 7;
- int l2 = m->mdmreg[REG_L2PROT];
- u_long flags;
- isdn_ctrl cmd;
- int i;
- int j;
-
- for (j = 7; j >= 0; j--)
- if (m->mdmreg[REG_SI1] & (1 << j)) {
- si = bit2si[j];
- break;
- }
- usg = isdn_calc_usage(si, l2);
-#ifdef CONFIG_ISDN_AUDIO
- if ((si == 1) &&
- (l2 != ISDN_PROTO_L2_MODEM)
-#ifdef CONFIG_ISDN_TTY_FAX
- && (l2 != ISDN_PROTO_L2_FAX)
-#endif
- ) {
- l2 = ISDN_PROTO_L2_TRANS;
- usg = ISDN_USAGE_VOICE;
- }
-#endif
- m->mdmreg[REG_SI1I] = si2bit[si];
- spin_lock_irqsave(&dev->lock, flags);
- i = isdn_get_free_channel(usg, l2, m->mdmreg[REG_L3PROT], -1, -1, m->msn);
- if (i < 0) {
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_tty_modem_result(RESULT_NO_DIALTONE, info);
- } else {
- info->isdn_driver = dev->drvmap[i];
- info->isdn_channel = dev->chanmap[i];
- info->drv_index = i;
- dev->m_idx[i] = info->line;
- dev->usage[i] |= ISDN_USAGE_OUTGOING;
- info->last_dir = 1;
- strcpy(info->last_num, n);
- isdn_info_update();
- spin_unlock_irqrestore(&dev->lock, flags);
- cmd.driver = info->isdn_driver;
- cmd.arg = info->isdn_channel;
- cmd.command = ISDN_CMD_CLREAZ;
- isdn_command(&cmd);
- strcpy(cmd.parm.num, isdn_map_eaz2msn(m->msn, info->isdn_driver));
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_SETEAZ;
- isdn_command(&cmd);
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_SETL2;
- info->last_l2 = l2;
- cmd.arg = info->isdn_channel + (l2 << 8);
- isdn_command(&cmd);
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_SETL3;
- cmd.arg = info->isdn_channel + (m->mdmreg[REG_L3PROT] << 8);
-#ifdef CONFIG_ISDN_TTY_FAX
- if (l2 == ISDN_PROTO_L2_FAX) {
- cmd.parm.fax = info->fax;
- info->fax->direction = ISDN_TTY_FAX_CONN_OUT;
- }
-#endif
- isdn_command(&cmd);
- cmd.driver = info->isdn_driver;
- cmd.arg = info->isdn_channel;
- sprintf(cmd.parm.setup.phone, "%s", n);
- sprintf(cmd.parm.setup.eazmsn, "%s",
- isdn_map_eaz2msn(m->msn, info->isdn_driver));
- cmd.parm.setup.si1 = si;
- cmd.parm.setup.si2 = m->mdmreg[REG_SI2];
- cmd.command = ISDN_CMD_DIAL;
- info->dialing = 1;
- info->emu.carrierwait = 0;
- strcpy(dev->num[i], n);
- isdn_info_update();
- isdn_command(&cmd);
- isdn_timer_ctrl(ISDN_TIMER_CARRIER, 1);
- }
-}
-
-/* isdn_tty_hangup() disassociates a tty from the real
- * ISDN-line (hangup). The usage-status is cleared
- * and some cleanup is done also.
- */
-void
-isdn_tty_modem_hup(modem_info *info, int local)
-{
- isdn_ctrl cmd;
- int di, ch;
-
- if (!info)
- return;
-
- di = info->isdn_driver;
- ch = info->isdn_channel;
- if (di < 0 || ch < 0)
- return;
-
- info->isdn_driver = -1;
- info->isdn_channel = -1;
-
-#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup ttyI%d\n", info->line);
-#endif
- info->rcvsched = 0;
- isdn_tty_flush_buffer(info->port.tty);
- if (info->online) {
- info->last_lhup = local;
- info->online = 0;
- isdn_tty_modem_result(RESULT_NO_CARRIER, info);
- }
-#ifdef CONFIG_ISDN_AUDIO
- info->vonline = 0;
-#ifdef CONFIG_ISDN_TTY_FAX
- info->faxonline = 0;
- info->fax->phase = ISDN_FAX_PHASE_IDLE;
-#endif
- info->emu.vpar[4] = 0;
- info->emu.vpar[5] = 8;
- kfree(info->dtmf_state);
- info->dtmf_state = NULL;
- kfree(info->silence_state);
- info->silence_state = NULL;
- kfree(info->adpcms);
- info->adpcms = NULL;
- kfree(info->adpcmr);
- info->adpcmr = NULL;
-#endif
- if ((info->msr & UART_MSR_RI) &&
- (info->emu.mdmreg[REG_RUNG] & BIT_RUNG))
- isdn_tty_modem_result(RESULT_RUNG, info);
- info->msr &= ~(UART_MSR_DCD | UART_MSR_RI);
- info->lsr |= UART_LSR_TEMT;
-
- if (local) {
- cmd.driver = di;
- cmd.command = ISDN_CMD_HANGUP;
- cmd.arg = ch;
- isdn_command(&cmd);
- }
-
- isdn_all_eaz(di, ch);
- info->emu.mdmreg[REG_RINGCNT] = 0;
- isdn_free_channel(di, ch, 0);
-
- if (info->drv_index >= 0) {
- dev->m_idx[info->drv_index] = -1;
- info->drv_index = -1;
- }
-}
-
-/*
- * Begin of a CAPI like interface, currently used only for
- * supplementary service (CAPI 2.0 part III)
- */
-#include <linux/isdn/capicmd.h>
-#include <linux/module.h>
-
-int
-isdn_tty_capi_facility(capi_msg *cm) {
- return (-1); /* dummy */
-}
-
-/* isdn_tty_suspend() tries to suspend the current tty connection
- */
-static void
-isdn_tty_suspend(char *id, modem_info *info, atemu *m)
-{
- isdn_ctrl cmd;
-
- int l;
-
- if (!info)
- return;
-
-#ifdef ISDN_DEBUG_MODEM_SERVICES
- printk(KERN_DEBUG "Msusp ttyI%d\n", info->line);
-#endif
- l = strlen(id);
- if ((info->isdn_driver >= 0)) {
- cmd.parm.cmsg.Length = l + 18;
- cmd.parm.cmsg.Command = CAPI_FACILITY;
- cmd.parm.cmsg.Subcommand = CAPI_REQ;
- cmd.parm.cmsg.adr.Controller = info->isdn_driver + 1;
- cmd.parm.cmsg.para[0] = 3; /* 16 bit 0x0003 suplementary service */
- cmd.parm.cmsg.para[1] = 0;
- cmd.parm.cmsg.para[2] = l + 3;
- cmd.parm.cmsg.para[3] = 4; /* 16 bit 0x0004 Suspend */
- cmd.parm.cmsg.para[4] = 0;
- cmd.parm.cmsg.para[5] = l;
- memcpy(&cmd.parm.cmsg.para[6], id, l);
- cmd.command = CAPI_PUT_MESSAGE;
- cmd.driver = info->isdn_driver;
- cmd.arg = info->isdn_channel;
- isdn_command(&cmd);
- }
-}
-
-/* isdn_tty_resume() tries to resume a suspended call
- * setup of the lower levels before that. unfortunately here is no
- * checking for compatibility of used protocols implemented by Q931
- * It does the same things like isdn_tty_dial, the last command
- * is different, may be we can merge it.
- */
-
-static void
-isdn_tty_resume(char *id, modem_info *info, atemu *m)
-{
- int usg = ISDN_USAGE_MODEM;
- int si = 7;
- int l2 = m->mdmreg[REG_L2PROT];
- isdn_ctrl cmd;
- ulong flags;
- int i;
- int j;
- int l;
-
- l = strlen(id);
- for (j = 7; j >= 0; j--)
- if (m->mdmreg[REG_SI1] & (1 << j)) {
- si = bit2si[j];
- break;
- }
- usg = isdn_calc_usage(si, l2);
-#ifdef CONFIG_ISDN_AUDIO
- if ((si == 1) &&
- (l2 != ISDN_PROTO_L2_MODEM)
-#ifdef CONFIG_ISDN_TTY_FAX
- && (l2 != ISDN_PROTO_L2_FAX)
-#endif
- ) {
- l2 = ISDN_PROTO_L2_TRANS;
- usg = ISDN_USAGE_VOICE;
- }
-#endif
- m->mdmreg[REG_SI1I] = si2bit[si];
- spin_lock_irqsave(&dev->lock, flags);
- i = isdn_get_free_channel(usg, l2, m->mdmreg[REG_L3PROT], -1, -1, m->msn);
- if (i < 0) {
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_tty_modem_result(RESULT_NO_DIALTONE, info);
- } else {
- info->isdn_driver = dev->drvmap[i];
- info->isdn_channel = dev->chanmap[i];
- info->drv_index = i;
- dev->m_idx[i] = info->line;
- dev->usage[i] |= ISDN_USAGE_OUTGOING;
- info->last_dir = 1;
-// strcpy(info->last_num, n);
- isdn_info_update();
- spin_unlock_irqrestore(&dev->lock, flags);
- cmd.driver = info->isdn_driver;
- cmd.arg = info->isdn_channel;
- cmd.command = ISDN_CMD_CLREAZ;
- isdn_command(&cmd);
- strcpy(cmd.parm.num, isdn_map_eaz2msn(m->msn, info->isdn_driver));
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_SETEAZ;
- isdn_command(&cmd);
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_SETL2;
- info->last_l2 = l2;
- cmd.arg = info->isdn_channel + (l2 << 8);
- isdn_command(&cmd);
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_SETL3;
- cmd.arg = info->isdn_channel + (m->mdmreg[REG_L3PROT] << 8);
- isdn_command(&cmd);
- cmd.driver = info->isdn_driver;
- cmd.arg = info->isdn_channel;
- cmd.parm.cmsg.Length = l + 18;
- cmd.parm.cmsg.Command = CAPI_FACILITY;
- cmd.parm.cmsg.Subcommand = CAPI_REQ;
- cmd.parm.cmsg.adr.Controller = info->isdn_driver + 1;
- cmd.parm.cmsg.para[0] = 3; /* 16 bit 0x0003 suplementary service */
- cmd.parm.cmsg.para[1] = 0;
- cmd.parm.cmsg.para[2] = l + 3;
- cmd.parm.cmsg.para[3] = 5; /* 16 bit 0x0005 Resume */
- cmd.parm.cmsg.para[4] = 0;
- cmd.parm.cmsg.para[5] = l;
- memcpy(&cmd.parm.cmsg.para[6], id, l);
- cmd.command = CAPI_PUT_MESSAGE;
- info->dialing = 1;
-// strcpy(dev->num[i], n);
- isdn_info_update();
- isdn_command(&cmd);
- isdn_timer_ctrl(ISDN_TIMER_CARRIER, 1);
- }
-}
-
-/* isdn_tty_send_msg() sends a message to a HL driver
- * This is used for hybrid modem cards to send AT commands to it
- */
-
-static void
-isdn_tty_send_msg(modem_info *info, atemu *m, char *msg)
-{
- int usg = ISDN_USAGE_MODEM;
- int si = 7;
- int l2 = m->mdmreg[REG_L2PROT];
- isdn_ctrl cmd;
- ulong flags;
- int i;
- int j;
- int l;
-
- l = min(strlen(msg), sizeof(cmd.parm) - sizeof(cmd.parm.cmsg)
- + sizeof(cmd.parm.cmsg.para) - 2);
-
- if (!l) {
- isdn_tty_modem_result(RESULT_ERROR, info);
- return;
- }
- for (j = 7; j >= 0; j--)
- if (m->mdmreg[REG_SI1] & (1 << j)) {
- si = bit2si[j];
- break;
- }
- usg = isdn_calc_usage(si, l2);
-#ifdef CONFIG_ISDN_AUDIO
- if ((si == 1) &&
- (l2 != ISDN_PROTO_L2_MODEM)
-#ifdef CONFIG_ISDN_TTY_FAX
- && (l2 != ISDN_PROTO_L2_FAX)
-#endif
- ) {
- l2 = ISDN_PROTO_L2_TRANS;
- usg = ISDN_USAGE_VOICE;
- }
-#endif
- m->mdmreg[REG_SI1I] = si2bit[si];
- spin_lock_irqsave(&dev->lock, flags);
- i = isdn_get_free_channel(usg, l2, m->mdmreg[REG_L3PROT], -1, -1, m->msn);
- if (i < 0) {
- spin_unlock_irqrestore(&dev->lock, flags);
- isdn_tty_modem_result(RESULT_NO_DIALTONE, info);
- } else {
- info->isdn_driver = dev->drvmap[i];
- info->isdn_channel = dev->chanmap[i];
- info->drv_index = i;
- dev->m_idx[i] = info->line;
- dev->usage[i] |= ISDN_USAGE_OUTGOING;
- info->last_dir = 1;
- isdn_info_update();
- spin_unlock_irqrestore(&dev->lock, flags);
- cmd.driver = info->isdn_driver;
- cmd.arg = info->isdn_channel;
- cmd.command = ISDN_CMD_CLREAZ;
- isdn_command(&cmd);
- strcpy(cmd.parm.num, isdn_map_eaz2msn(m->msn, info->isdn_driver));
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_SETEAZ;
- isdn_command(&cmd);
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_SETL2;
- info->last_l2 = l2;
- cmd.arg = info->isdn_channel + (l2 << 8);
- isdn_command(&cmd);
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_SETL3;
- cmd.arg = info->isdn_channel + (m->mdmreg[REG_L3PROT] << 8);
- isdn_command(&cmd);
- cmd.driver = info->isdn_driver;
- cmd.arg = info->isdn_channel;
- cmd.parm.cmsg.Length = l + 14;
- cmd.parm.cmsg.Command = CAPI_MANUFACTURER;
- cmd.parm.cmsg.Subcommand = CAPI_REQ;
- cmd.parm.cmsg.adr.Controller = info->isdn_driver + 1;
- cmd.parm.cmsg.para[0] = l + 1;
- strncpy(&cmd.parm.cmsg.para[1], msg, l);
- cmd.parm.cmsg.para[l + 1] = 0xd;
- cmd.command = CAPI_PUT_MESSAGE;
-/* info->dialing = 1;
- strcpy(dev->num[i], n);
- isdn_info_update();
-*/
- isdn_command(&cmd);
- }
-}
-
-static inline int
-isdn_tty_paranoia_check(modem_info *info, char *name, const char *routine)
-{
-#ifdef MODEM_PARANOIA_CHECK
- if (!info) {
- printk(KERN_WARNING "isdn_tty: null info_struct for %s in %s\n",
- name, routine);
- return 1;
- }
- if (info->magic != ISDN_ASYNC_MAGIC) {
- printk(KERN_WARNING "isdn_tty: bad magic for modem struct %s in %s\n",
- name, routine);
- return 1;
- }
-#endif
- return 0;
-}
-
-/*
- * This routine is called to set the UART divisor registers to match
- * the specified baud rate for a serial port.
- */
-static void
-isdn_tty_change_speed(modem_info *info)
-{
- struct tty_port *port = &info->port;
- uint cflag,
- cval,
- quot;
- int i;
-
- if (!port->tty)
- return;
- cflag = port->tty->termios.c_cflag;
-
- quot = i = cflag & CBAUD;
- if (i & CBAUDEX) {
- i &= ~CBAUDEX;
- if (i < 1 || i > 2)
- port->tty->termios.c_cflag &= ~CBAUDEX;
- else
- i += 15;
- }
- if (quot) {
- info->mcr |= UART_MCR_DTR;
- isdn_tty_modem_ncarrier(info);
- } else {
- info->mcr &= ~UART_MCR_DTR;
- if (info->emu.mdmreg[REG_DTRHUP] & BIT_DTRHUP) {
-#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup in changespeed\n");
-#endif
- if (info->online)
- info->ncarrier = 1;
- isdn_tty_modem_reset_regs(info, 0);
- isdn_tty_modem_hup(info, 1);
- }
- return;
- }
- /* byte size and parity */
- cval = cflag & (CSIZE | CSTOPB);
- cval >>= 4;
- if (cflag & PARENB)
- cval |= UART_LCR_PARITY;
- if (!(cflag & PARODD))
- cval |= UART_LCR_EPAR;
-
- tty_port_set_check_carrier(port, ~cflag & CLOCAL);
-}
-
-static int
-isdn_tty_startup(modem_info *info)
-{
- if (tty_port_initialized(&info->port))
- return 0;
- isdn_lock_drivers();
-#ifdef ISDN_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "starting up ttyi%d ...\n", info->line);
-#endif
- /*
- * Now, initialize the UART
- */
- info->mcr = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
- if (info->port.tty)
- clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
- /*
- * and set the speed of the serial port
- */
- isdn_tty_change_speed(info);
-
- tty_port_set_initialized(&info->port, 1);
- info->msr |= (UART_MSR_DSR | UART_MSR_CTS);
- info->send_outstanding = 0;
- return 0;
-}
-
-/*
- * This routine will shutdown a serial port; interrupts are disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void
-isdn_tty_shutdown(modem_info *info)
-{
- if (!tty_port_initialized(&info->port))
- return;
-#ifdef ISDN_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "Shutting down isdnmodem port %d ....\n", info->line);
-#endif
- isdn_unlock_drivers();
- info->msr &= ~UART_MSR_RI;
- if (!info->port.tty || (info->port.tty->termios.c_cflag & HUPCL)) {
- info->mcr &= ~(UART_MCR_DTR | UART_MCR_RTS);
- if (info->emu.mdmreg[REG_DTRHUP] & BIT_DTRHUP) {
- isdn_tty_modem_reset_regs(info, 0);
-#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup in isdn_tty_shutdown\n");
-#endif
- isdn_tty_modem_hup(info, 1);
- }
- }
- if (info->port.tty)
- set_bit(TTY_IO_ERROR, &info->port.tty->flags);
-
- tty_port_set_initialized(&info->port, 0);
-}
-
-/* isdn_tty_write() is the main send-routine. It is called from the upper
- * levels within the kernel to perform sending data. Depending on the
- * online-flag it either directs output to the at-command-interpreter or
- * to the lower level. Additional tasks done here:
- * - If online, check for escape-sequence (+++)
- * - If sending audio-data, call isdn_tty_DLEdown() to parse DLE-codes.
- * - If receiving audio-data, call isdn_tty_end_vrx() to abort if needed.
- * - If dialing, abort dial.
- */
-static int
-isdn_tty_write(struct tty_struct *tty, const u_char *buf, int count)
-{
- int c;
- int total = 0;
- modem_info *info = (modem_info *) tty->driver_data;
- atemu *m = &info->emu;
-
- if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_write"))
- return 0;
- /* See isdn_tty_senddown() */
- atomic_inc(&info->xmit_lock);
- while (1) {
- c = count;
- if (c > info->xmit_size - info->xmit_count)
- c = info->xmit_size - info->xmit_count;
- if (info->isdn_driver >= 0 && c > dev->drv[info->isdn_driver]->maxbufsize)
- c = dev->drv[info->isdn_driver]->maxbufsize;
- if (c <= 0)
- break;
- if ((info->online > 1)
-#ifdef CONFIG_ISDN_AUDIO
- || (info->vonline & 3)
-#endif
- ) {
-#ifdef CONFIG_ISDN_AUDIO
- if (!info->vonline)
-#endif
- isdn_tty_check_esc(buf, m->mdmreg[REG_ESC], c,
- &(m->pluscount),
- &(m->lastplus));
- memcpy(&info->port.xmit_buf[info->xmit_count], buf, c);
-#ifdef CONFIG_ISDN_AUDIO
- if (info->vonline) {
- int cc = isdn_tty_handleDLEdown(info, m, c);
- if (info->vonline & 2) {
- if (!cc) {
- /* If DLE decoding results in zero-transmit, but
- * c originally was non-zero, do a wakeup.
- */
- tty_wakeup(tty);
- info->msr |= UART_MSR_CTS;
- info->lsr |= UART_LSR_TEMT;
- }
- info->xmit_count += cc;
- }
- if ((info->vonline & 3) == 1) {
- /* Do NOT handle Ctrl-Q or Ctrl-S
- * when in full-duplex audio mode.
- */
- if (isdn_tty_end_vrx(buf, c)) {
- info->vonline &= ~1;
-#ifdef ISDN_DEBUG_MODEM_VOICE
- printk(KERN_DEBUG
- "got !^Q/^S, send DLE-ETX,VCON on ttyI%d\n",
- info->line);
-#endif
- isdn_tty_at_cout("\020\003\r\nVCON\r\n", info);
- }
- }
- } else
- if (TTY_IS_FCLASS1(info)) {
- int cc = isdn_tty_handleDLEdown(info, m, c);
-
- if (info->vonline & 4) { /* ETX seen */
- isdn_ctrl c;
-
- c.command = ISDN_CMD_FAXCMD;
- c.driver = info->isdn_driver;
- c.arg = info->isdn_channel;
- c.parm.aux.cmd = ISDN_FAX_CLASS1_CTRL;
- c.parm.aux.subcmd = ETX;
- isdn_command(&c);
- }
- info->vonline = 0;
-#ifdef ISDN_DEBUG_MODEM_VOICE
- printk(KERN_DEBUG "fax dle cc/c %d/%d\n", cc, c);
-#endif
- info->xmit_count += cc;
- } else
-#endif
- info->xmit_count += c;
- } else {
- info->msr |= UART_MSR_CTS;
- info->lsr |= UART_LSR_TEMT;
- if (info->dialing) {
- info->dialing = 0;
-#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup in isdn_tty_write\n");
-#endif
- isdn_tty_modem_result(RESULT_NO_CARRIER, info);
- isdn_tty_modem_hup(info, 1);
- } else
- c = isdn_tty_edit_at(buf, c, info);
- }
- buf += c;
- count -= c;
- total += c;
- }
- atomic_dec(&info->xmit_lock);
- if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue)) {
- if (m->mdmreg[REG_DXMT] & BIT_DXMT) {
- isdn_tty_senddown(info);
- isdn_tty_tint(info);
- }
- isdn_timer_ctrl(ISDN_TIMER_MODEMXMIT, 1);
- }
- return total;
-}
-
-static int
-isdn_tty_write_room(struct tty_struct *tty)
-{
- modem_info *info = (modem_info *) tty->driver_data;
- int ret;
-
- if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_write_room"))
- return 0;
- if (!info->online)
- return info->xmit_size;
- ret = info->xmit_size - info->xmit_count;
- return (ret < 0) ? 0 : ret;
-}
-
-static int
-isdn_tty_chars_in_buffer(struct tty_struct *tty)
-{
- modem_info *info = (modem_info *) tty->driver_data;
-
- if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_chars_in_buffer"))
- return 0;
- if (!info->online)
- return 0;
- return (info->xmit_count);
-}
-
-static void
-isdn_tty_flush_buffer(struct tty_struct *tty)
-{
- modem_info *info;
-
- if (!tty) {
- return;
- }
- info = (modem_info *) tty->driver_data;
- if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_flush_buffer")) {
- return;
- }
- isdn_tty_cleanup_xmit(info);
- info->xmit_count = 0;
- tty_wakeup(tty);
-}
-
-static void
-isdn_tty_flush_chars(struct tty_struct *tty)
-{
- modem_info *info = (modem_info *) tty->driver_data;
-
- if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_flush_chars"))
- return;
- if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue))
- isdn_timer_ctrl(ISDN_TIMER_MODEMXMIT, 1);
-}
-
-/*
- * ------------------------------------------------------------
- * isdn_tty_throttle()
- *
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- * ------------------------------------------------------------
- */
-static void
-isdn_tty_throttle(struct tty_struct *tty)
-{
- modem_info *info = (modem_info *) tty->driver_data;
-
- if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_throttle"))
- return;
- if (I_IXOFF(tty))
- info->x_char = STOP_CHAR(tty);
- info->mcr &= ~UART_MCR_RTS;
-}
-
-static void
-isdn_tty_unthrottle(struct tty_struct *tty)
-{
- modem_info *info = (modem_info *) tty->driver_data;
-
- if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_unthrottle"))
- return;
- if (I_IXOFF(tty)) {
- if (info->x_char)
- info->x_char = 0;
- else
- info->x_char = START_CHAR(tty);
- }
- info->mcr |= UART_MCR_RTS;
-}
-
-/*
- * ------------------------------------------------------------
- * isdn_tty_ioctl() and friends
- * ------------------------------------------------------------
- */
-
-/*
- * isdn_tty_get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- * is emptied. On bus types like RS485, the transmitter must
- * release the bus after transmitting. This must be done when
- * the transmit shift register is empty, not be done when the
- * transmit holding register is empty. This functionality
- * allows RS485 driver to be written in user space.
- */
-static int
-isdn_tty_get_lsr_info(modem_info *info, uint __user *value)
-{
- u_char status;
- uint result;
-
- status = info->lsr;
- result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
- return put_user(result, value);
-}
-
-
-static int
-isdn_tty_tiocmget(struct tty_struct *tty)
-{
- modem_info *info = (modem_info *) tty->driver_data;
- u_char control, status;
-
- if (isdn_tty_paranoia_check(info, tty->name, __func__))
- return -ENODEV;
- if (tty_io_error(tty))
- return -EIO;
-
- mutex_lock(&modem_info_mutex);
-#ifdef ISDN_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line);
-#endif
-
- control = info->mcr;
- status = info->msr;
- mutex_unlock(&modem_info_mutex);
- return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
- | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
- | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
- | ((status & UART_MSR_RI) ? TIOCM_RNG : 0)
- | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0)
- | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
-}
-
-static int
-isdn_tty_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- modem_info *info = (modem_info *) tty->driver_data;
-
- if (isdn_tty_paranoia_check(info, tty->name, __func__))
- return -ENODEV;
- if (tty_io_error(tty))
- return -EIO;
-
-#ifdef ISDN_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "ttyI%d ioctl TIOCMxxx: %x %x\n", info->line, set, clear);
-#endif
-
- mutex_lock(&modem_info_mutex);
- if (set & TIOCM_RTS)
- info->mcr |= UART_MCR_RTS;
- if (set & TIOCM_DTR) {
- info->mcr |= UART_MCR_DTR;
- isdn_tty_modem_ncarrier(info);
- }
-
- if (clear & TIOCM_RTS)
- info->mcr &= ~UART_MCR_RTS;
- if (clear & TIOCM_DTR) {
- info->mcr &= ~UART_MCR_DTR;
- if (info->emu.mdmreg[REG_DTRHUP] & BIT_DTRHUP) {
- isdn_tty_modem_reset_regs(info, 0);
-#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup in TIOCMSET\n");
-#endif
- if (info->online)
- info->ncarrier = 1;
- isdn_tty_modem_hup(info, 1);
- }
- }
- mutex_unlock(&modem_info_mutex);
- return 0;
-}
-
-static int
-isdn_tty_ioctl(struct tty_struct *tty, uint cmd, ulong arg)
-{
- modem_info *info = (modem_info *) tty->driver_data;
-
- if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_ioctl"))
- return -ENODEV;
- if (tty_io_error(tty))
- return -EIO;
- switch (cmd) {
- case TIOCSERGETLSR: /* Get line status register */
-#ifdef ISDN_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "ttyI%d ioctl TIOCSERGETLSR\n", info->line);
-#endif
- return isdn_tty_get_lsr_info(info, (uint __user *) arg);
- default:
-#ifdef ISDN_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "UNKNOWN ioctl 0x%08x on ttyi%d\n", cmd, info->line);
-#endif
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static void
-isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- modem_info *info = (modem_info *) tty->driver_data;
-
- mutex_lock(&modem_info_mutex);
- if (!old_termios)
- isdn_tty_change_speed(info);
- else {
- if (tty->termios.c_cflag == old_termios->c_cflag &&
- tty->termios.c_ispeed == old_termios->c_ispeed &&
- tty->termios.c_ospeed == old_termios->c_ospeed) {
- mutex_unlock(&modem_info_mutex);
- return;
- }
- isdn_tty_change_speed(info);
- }
- mutex_unlock(&modem_info_mutex);
-}
-
-/*
- * ------------------------------------------------------------
- * isdn_tty_open() and friends
- * ------------------------------------------------------------
- */
-
-static int isdn_tty_install(struct tty_driver *driver, struct tty_struct *tty)
-{
- modem_info *info = &dev->mdm.info[tty->index];
-
- if (isdn_tty_paranoia_check(info, tty->name, __func__))
- return -ENODEV;
-
- tty->driver_data = info;
-
- return tty_port_install(&info->port, driver, tty);
-}
-
-/*
- * This routine is called whenever a serial port is opened. It
- * enables interrupts for a serial port, linking in its async structure into
- * the IRQ chain. It also performs the serial-specific
- * initialization for the tty structure.
- */
-static int
-isdn_tty_open(struct tty_struct *tty, struct file *filp)
-{
- modem_info *info = tty->driver_data;
- struct tty_port *port = &info->port;
- int retval;
-
-#ifdef ISDN_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
- port->count);
-#endif
- port->count++;
- port->tty = tty;
- /*
- * Start up serial port
- */
- retval = isdn_tty_startup(info);
- if (retval) {
-#ifdef ISDN_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "isdn_tty_open return after startup\n");
-#endif
- return retval;
- }
- retval = tty_port_block_til_ready(port, tty, filp);
- if (retval) {
-#ifdef ISDN_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "isdn_tty_open return after isdn_tty_block_til_ready \n");
-#endif
- return retval;
- }
-#ifdef ISDN_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "isdn_tty_open ttyi%d successful...\n", info->line);
-#endif
- dev->modempoll++;
-#ifdef ISDN_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "isdn_tty_open normal exit\n");
-#endif
- return 0;
-}
-
-static void
-isdn_tty_close(struct tty_struct *tty, struct file *filp)
-{
- modem_info *info = (modem_info *) tty->driver_data;
- struct tty_port *port = &info->port;
- ulong timeout;
-
- if (!info || isdn_tty_paranoia_check(info, tty->name, "isdn_tty_close"))
- return;
- if (tty_hung_up_p(filp)) {
-#ifdef ISDN_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "isdn_tty_close return after tty_hung_up_p\n");
-#endif
- return;
- }
- if ((tty->count == 1) && (port->count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. Info->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
- "info->count is %d\n", port->count);
- port->count = 1;
- }
- if (--port->count < 0) {
- printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
- info->line, port->count);
- port->count = 0;
- }
- if (port->count) {
-#ifdef ISDN_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
-#endif
- return;
- }
- info->closing = 1;
-
- tty->closing = 1;
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receive line status interrupts, and tell the
- * interrupt driver to stop checking the data ready bit in the
- * line status register.
- */
- if (tty_port_initialized(port)) {
- tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- timeout = jiffies + HZ;
- while (!(info->lsr & UART_LSR_TEMT)) {
- schedule_timeout_interruptible(20);
- if (time_after(jiffies, timeout))
- break;
- }
- }
- dev->modempoll--;
- isdn_tty_shutdown(info);
- isdn_tty_flush_buffer(tty);
- tty_ldisc_flush(tty);
- port->tty = NULL;
- info->ncarrier = 0;
-
- tty_port_close_end(port, tty);
- info->closing = 0;
-#ifdef ISDN_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "isdn_tty_close normal exit\n");
-#endif
-}
-
-/*
- * isdn_tty_hangup() --- called by tty_hangup() when a hangup is signaled.
- */
-static void
-isdn_tty_hangup(struct tty_struct *tty)
-{
- modem_info *info = (modem_info *) tty->driver_data;
- struct tty_port *port = &info->port;
-
- if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
- return;
- isdn_tty_shutdown(info);
- port->count = 0;
- tty_port_set_active(port, 0);
- port->tty = NULL;
- wake_up_interruptible(&port->open_wait);
-}
-
-/* This routine initializes all emulator-data.
- */
-static void
-isdn_tty_reset_profile(atemu *m)
-{
- m->profile[0] = 0;
- m->profile[1] = 0;
- m->profile[2] = 43;
- m->profile[3] = 13;
- m->profile[4] = 10;
- m->profile[5] = 8;
- m->profile[6] = 3;
- m->profile[7] = 60;
- m->profile[8] = 2;
- m->profile[9] = 6;
- m->profile[10] = 7;
- m->profile[11] = 70;
- m->profile[12] = 0x45;
- m->profile[13] = 4;
- m->profile[14] = ISDN_PROTO_L2_X75I;
- m->profile[15] = ISDN_PROTO_L3_TRANS;
- m->profile[16] = ISDN_SERIAL_XMIT_SIZE / 16;
- m->profile[17] = ISDN_MODEM_WINSIZE;
- m->profile[18] = 4;
- m->profile[19] = 0;
- m->profile[20] = 0;
- m->profile[23] = 0;
- m->pmsn[0] = '\0';
- m->plmsn[0] = '\0';
-}
-
-#ifdef CONFIG_ISDN_AUDIO
-static void
-isdn_tty_modem_reset_vpar(atemu *m)
-{
- m->vpar[0] = 2; /* Voice-device (2 = phone line) */
- m->vpar[1] = 0; /* Silence detection level (0 = none ) */
- m->vpar[2] = 70; /* Silence interval (7 sec. ) */
- m->vpar[3] = 2; /* Compression type (1 = ADPCM-2 ) */
- m->vpar[4] = 0; /* DTMF detection level (0 = softcode ) */
- m->vpar[5] = 8; /* DTMF interval (8 * 5 ms. ) */
-}
-#endif
-
-#ifdef CONFIG_ISDN_TTY_FAX
-static void
-isdn_tty_modem_reset_faxpar(modem_info *info)
-{
- T30_s *f = info->fax;
-
- f->code = 0;
- f->phase = ISDN_FAX_PHASE_IDLE;
- f->direction = 0;
- f->resolution = 1; /* fine */
- f->rate = 5; /* 14400 bit/s */
- f->width = 0;
- f->length = 0;
- f->compression = 0;
- f->ecm = 0;
- f->binary = 0;
- f->scantime = 0;
- memset(&f->id[0], 32, FAXIDLEN - 1);
- f->id[FAXIDLEN - 1] = 0;
- f->badlin = 0;
- f->badmul = 0;
- f->bor = 0;
- f->nbc = 0;
- f->cq = 0;
- f->cr = 0;
- f->ctcrty = 0;
- f->minsp = 0;
- f->phcto = 30;
- f->rel = 0;
- memset(&f->pollid[0], 32, FAXIDLEN - 1);
- f->pollid[FAXIDLEN - 1] = 0;
-}
-#endif
-
-static void
-isdn_tty_modem_reset_regs(modem_info *info, int force)
-{
- atemu *m = &info->emu;
- if ((m->mdmreg[REG_DTRR] & BIT_DTRR) || force) {
- memcpy(m->mdmreg, m->profile, ISDN_MODEM_NUMREG);
- memcpy(m->msn, m->pmsn, ISDN_MSNLEN);
- memcpy(m->lmsn, m->plmsn, ISDN_LMSNLEN);
- info->xmit_size = m->mdmreg[REG_PSIZE] * 16;
- }
-#ifdef CONFIG_ISDN_AUDIO
- isdn_tty_modem_reset_vpar(m);
-#endif
-#ifdef CONFIG_ISDN_TTY_FAX
- isdn_tty_modem_reset_faxpar(info);
-#endif
- m->mdmcmdl = 0;
-}
-
-static void
-modem_write_profile(atemu *m)
-{
- memcpy(m->profile, m->mdmreg, ISDN_MODEM_NUMREG);
- memcpy(m->pmsn, m->msn, ISDN_MSNLEN);
- memcpy(m->plmsn, m->lmsn, ISDN_LMSNLEN);
- if (dev->profd)
- send_sig(SIGIO, dev->profd, 1);
-}
-
-static const struct tty_operations modem_ops = {
- .install = isdn_tty_install,
- .open = isdn_tty_open,
- .close = isdn_tty_close,
- .write = isdn_tty_write,
- .flush_chars = isdn_tty_flush_chars,
- .write_room = isdn_tty_write_room,
- .chars_in_buffer = isdn_tty_chars_in_buffer,
- .flush_buffer = isdn_tty_flush_buffer,
- .ioctl = isdn_tty_ioctl,
- .throttle = isdn_tty_throttle,
- .unthrottle = isdn_tty_unthrottle,
- .set_termios = isdn_tty_set_termios,
- .hangup = isdn_tty_hangup,
- .tiocmget = isdn_tty_tiocmget,
- .tiocmset = isdn_tty_tiocmset,
-};
-
-static int isdn_tty_carrier_raised(struct tty_port *port)
-{
- modem_info *info = container_of(port, modem_info, port);
- return info->msr & UART_MSR_DCD;
-}
-
-static const struct tty_port_operations isdn_tty_port_ops = {
- .carrier_raised = isdn_tty_carrier_raised,
-};
-
-int
-isdn_tty_modem_init(void)
-{
- isdn_modem_t *m;
- int i, retval;
- modem_info *info;
-
- m = &dev->mdm;
- m->tty_modem = alloc_tty_driver(ISDN_MAX_CHANNELS);
- if (!m->tty_modem)
- return -ENOMEM;
- m->tty_modem->name = "ttyI";
- m->tty_modem->major = ISDN_TTY_MAJOR;
- m->tty_modem->minor_start = 0;
- m->tty_modem->type = TTY_DRIVER_TYPE_SERIAL;
- m->tty_modem->subtype = SERIAL_TYPE_NORMAL;
- m->tty_modem->init_termios = tty_std_termios;
- m->tty_modem->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- m->tty_modem->flags = TTY_DRIVER_REAL_RAW;
- m->tty_modem->driver_name = "isdn_tty";
- tty_set_operations(m->tty_modem, &modem_ops);
- retval = tty_register_driver(m->tty_modem);
- if (retval) {
- printk(KERN_WARNING "isdn_tty: Couldn't register modem-device\n");
- goto err;
- }
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- info = &m->info[i];
-#ifdef CONFIG_ISDN_TTY_FAX
- if (!(info->fax = kmalloc(sizeof(T30_s), GFP_KERNEL))) {
- printk(KERN_ERR "Could not allocate fax t30-buffer\n");
- retval = -ENOMEM;
- goto err_unregister;
- }
-#endif
- tty_port_init(&info->port);
- info->port.ops = &isdn_tty_port_ops;
- spin_lock_init(&info->readlock);
- sprintf(info->last_cause, "0000");
- sprintf(info->last_num, "none");
- info->last_dir = 0;
- info->last_lhup = 1;
- info->last_l2 = -1;
- info->last_si = 0;
- isdn_tty_reset_profile(&info->emu);
- isdn_tty_modem_reset_regs(info, 1);
- info->magic = ISDN_ASYNC_MAGIC;
- info->line = i;
- info->x_char = 0;
- info->isdn_driver = -1;
- info->isdn_channel = -1;
- info->drv_index = -1;
- info->xmit_size = ISDN_SERIAL_XMIT_SIZE;
- timer_setup(&info->nc_timer, isdn_tty_modem_do_ncarrier, 0);
- skb_queue_head_init(&info->xmit_queue);
-#ifdef CONFIG_ISDN_AUDIO
- skb_queue_head_init(&info->dtmf_queue);
-#endif
- info->port.xmit_buf = kmalloc(ISDN_SERIAL_XMIT_MAX + 5,
- GFP_KERNEL);
- if (!info->port.xmit_buf) {
- printk(KERN_ERR "Could not allocate modem xmit-buffer\n");
- retval = -ENOMEM;
- goto err_unregister;
- }
- /* Make room for T.70 header */
- info->port.xmit_buf += 4;
- }
- return 0;
-err_unregister:
- for (i--; i >= 0; i--) {
- info = &m->info[i];
-#ifdef CONFIG_ISDN_TTY_FAX
- kfree(info->fax);
-#endif
- kfree(info->port.xmit_buf - 4);
- info->port.xmit_buf = NULL;
- tty_port_destroy(&info->port);
- }
- tty_unregister_driver(m->tty_modem);
-err:
- put_tty_driver(m->tty_modem);
- m->tty_modem = NULL;
- return retval;
-}
-
-void
-isdn_tty_exit(void)
-{
- modem_info *info;
- int i;
-
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- info = &dev->mdm.info[i];
- isdn_tty_cleanup_xmit(info);
-#ifdef CONFIG_ISDN_TTY_FAX
- kfree(info->fax);
-#endif
- kfree(info->port.xmit_buf - 4);
- info->port.xmit_buf = NULL;
- tty_port_destroy(&info->port);
- }
- tty_unregister_driver(dev->mdm.tty_modem);
- put_tty_driver(dev->mdm.tty_modem);
- dev->mdm.tty_modem = NULL;
-}
-
-
-/*
- * isdn_tty_match_icall(char *MSN, atemu *tty_emulator, int dev_idx)
- * match the MSN against the MSNs (glob patterns) defined for tty_emulator,
- * and return 0 for match, 1 for no match, 2 if MSN could match if longer.
- */
-
-static int
-isdn_tty_match_icall(char *cid, atemu *emu, int di)
-{
-#ifdef ISDN_DEBUG_MODEM_ICALL
- printk(KERN_DEBUG "m_fi: msn=%s lmsn=%s mmsn=%s mreg[SI1]=%d mreg[SI2]=%d\n",
- emu->msn, emu->lmsn, isdn_map_eaz2msn(emu->msn, di),
- emu->mdmreg[REG_SI1], emu->mdmreg[REG_SI2]);
-#endif
- if (strlen(emu->lmsn)) {
- char *p = emu->lmsn;
- char *q;
- int tmp;
- int ret = 0;
-
- while (1) {
- if ((q = strchr(p, ';')))
- *q = '\0';
- if ((tmp = isdn_msncmp(cid, isdn_map_eaz2msn(p, di))) > ret)
- ret = tmp;
-#ifdef ISDN_DEBUG_MODEM_ICALL
- printk(KERN_DEBUG "m_fi: lmsnX=%s mmsn=%s -> tmp=%d\n",
- p, isdn_map_eaz2msn(emu->msn, di), tmp);
-#endif
- if (q) {
- *q = ';';
- p = q;
- p++;
- }
- if (!tmp)
- return 0;
- if (!q)
- break;
- }
- return ret;
- } else {
- int tmp;
- tmp = isdn_msncmp(cid, isdn_map_eaz2msn(emu->msn, di));
-#ifdef ISDN_DEBUG_MODEM_ICALL
- printk(KERN_DEBUG "m_fi: mmsn=%s -> tmp=%d\n",
- isdn_map_eaz2msn(emu->msn, di), tmp);
-#endif
- return tmp;
- }
-}
-
-/*
- * An incoming call-request has arrived.
- * Search the tty-devices for an appropriate device and bind
- * it to the ISDN-Channel.
- * Return:
- *
- * 0 = No matching device found.
- * 1 = A matching device found.
- * 3 = No match found, but eventually would match, if
- * CID is longer.
- */
-int
-isdn_tty_find_icall(int di, int ch, setup_parm *setup)
-{
- char *eaz;
- int i;
- int wret;
- int idx;
- int si1;
- int si2;
- char *nr;
- ulong flags;
-
- if (!setup->phone[0]) {
- nr = "0";
- printk(KERN_INFO "isdn_tty: Incoming call without OAD, assuming '0'\n");
- } else
- nr = setup->phone;
- si1 = (int) setup->si1;
- si2 = (int) setup->si2;
- if (!setup->eazmsn[0]) {
- printk(KERN_WARNING "isdn_tty: Incoming call without CPN, assuming '0'\n");
- eaz = "0";
- } else
- eaz = setup->eazmsn;
-#ifdef ISDN_DEBUG_MODEM_ICALL
- printk(KERN_DEBUG "m_fi: eaz=%s si1=%d si2=%d\n", eaz, si1, si2);
-#endif
- wret = 0;
- spin_lock_irqsave(&dev->lock, flags);
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- modem_info *info = &dev->mdm.info[i];
-
- if (info->port.count == 0)
- continue;
- if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
- (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
- idx = isdn_dc2minor(di, ch);
-#ifdef ISDN_DEBUG_MODEM_ICALL
- printk(KERN_DEBUG "m_fi: match1 wret=%d\n", wret);
- printk(KERN_DEBUG "m_fi: idx=%d flags=%08lx drv=%d ch=%d usg=%d\n", idx,
- info->port.flags, info->isdn_driver,
- info->isdn_channel, dev->usage[idx]);
-#endif
- if (
-#ifndef FIX_FILE_TRANSFER
- tty_port_active(&info->port) &&
-#endif
- (info->isdn_driver == -1) &&
- (info->isdn_channel == -1) &&
- (USG_NONE(dev->usage[idx]))) {
- int matchret;
-
- if ((matchret = isdn_tty_match_icall(eaz, &info->emu, di)) > wret)
- wret = matchret;
- if (!matchret) { /* EAZ is matching */
- info->isdn_driver = di;
- info->isdn_channel = ch;
- info->drv_index = idx;
- dev->m_idx[idx] = info->line;
- dev->usage[idx] &= ISDN_USAGE_EXCLUSIVE;
- dev->usage[idx] |= isdn_calc_usage(si1, info->emu.mdmreg[REG_L2PROT]);
- strcpy(dev->num[idx], nr);
- strcpy(info->emu.cpn, eaz);
- info->emu.mdmreg[REG_SI1I] = si2bit[si1];
- info->emu.mdmreg[REG_PLAN] = setup->plan;
- info->emu.mdmreg[REG_SCREEN] = setup->screen;
- isdn_info_update();
- spin_unlock_irqrestore(&dev->lock, flags);
- printk(KERN_INFO "isdn_tty: call from %s, -> RING on ttyI%d\n", nr,
- info->line);
- info->msr |= UART_MSR_RI;
- isdn_tty_modem_result(RESULT_RING, info);
- isdn_timer_ctrl(ISDN_TIMER_MODEMRING, 1);
- return 1;
- }
- }
- }
- }
- spin_unlock_irqrestore(&dev->lock, flags);
- printk(KERN_INFO "isdn_tty: call from %s -> %s %s\n", nr, eaz,
- ((dev->drv[di]->flags & DRV_FLAG_REJBUS) && (wret != 2)) ? "rejected" : "ignored");
- return (wret == 2) ? 3 : 0;
-}
-
-int
-isdn_tty_stat_callback(int i, isdn_ctrl *c)
-{
- int mi;
- modem_info *info;
- char *e;
-
- if (i < 0)
- return 0;
- if ((mi = dev->m_idx[i]) >= 0) {
- info = &dev->mdm.info[mi];
- switch (c->command) {
- case ISDN_STAT_CINF:
- printk(KERN_DEBUG "CHARGEINFO on ttyI%d: %ld %s\n", info->line, c->arg, c->parm.num);
- info->emu.charge = (unsigned) simple_strtoul(c->parm.num, &e, 10);
- if (e == (char *)c->parm.num)
- info->emu.charge = 0;
-
- break;
- case ISDN_STAT_BSENT:
-#ifdef ISDN_TTY_STAT_DEBUG
- printk(KERN_DEBUG "tty_STAT_BSENT ttyI%d\n", info->line);
-#endif
- if ((info->isdn_driver == c->driver) &&
- (info->isdn_channel == c->arg)) {
- info->msr |= UART_MSR_CTS;
- if (info->send_outstanding)
- if (!(--info->send_outstanding))
- info->lsr |= UART_LSR_TEMT;
- isdn_tty_tint(info);
- return 1;
- }
- break;
- case ISDN_STAT_CAUSE:
-#ifdef ISDN_TTY_STAT_DEBUG
- printk(KERN_DEBUG "tty_STAT_CAUSE ttyI%d\n", info->line);
-#endif
- /* Signal cause to tty-device */
- strncpy(info->last_cause, c->parm.num, 5);
- return 1;
- case ISDN_STAT_DISPLAY:
-#ifdef ISDN_TTY_STAT_DEBUG
- printk(KERN_DEBUG "tty_STAT_DISPLAY ttyI%d\n", info->line);
-#endif
- /* Signal display to tty-device */
- if ((info->emu.mdmreg[REG_DISPLAY] & BIT_DISPLAY) &&
- !(info->emu.mdmreg[REG_RESPNUM] & BIT_RESPNUM)) {
- isdn_tty_at_cout("\r\n", info);
- isdn_tty_at_cout("DISPLAY: ", info);
- isdn_tty_at_cout(c->parm.display, info);
- isdn_tty_at_cout("\r\n", info);
- }
- return 1;
- case ISDN_STAT_DCONN:
-#ifdef ISDN_TTY_STAT_DEBUG
- printk(KERN_DEBUG "tty_STAT_DCONN ttyI%d\n", info->line);
-#endif
- if (tty_port_active(&info->port)) {
- if (info->dialing == 1) {
- info->dialing = 2;
- return 1;
- }
- }
- break;
- case ISDN_STAT_DHUP:
-#ifdef ISDN_TTY_STAT_DEBUG
- printk(KERN_DEBUG "tty_STAT_DHUP ttyI%d\n", info->line);
-#endif
- if (tty_port_active(&info->port)) {
- if (info->dialing == 1)
- isdn_tty_modem_result(RESULT_BUSY, info);
- if (info->dialing > 1)
- isdn_tty_modem_result(RESULT_NO_CARRIER, info);
- info->dialing = 0;
-#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup in ISDN_STAT_DHUP\n");
-#endif
- isdn_tty_modem_hup(info, 0);
- return 1;
- }
- break;
- case ISDN_STAT_BCONN:
-#ifdef ISDN_TTY_STAT_DEBUG
- printk(KERN_DEBUG "tty_STAT_BCONN ttyI%d\n", info->line);
-#endif
- /* Wake up any processes waiting
- * for incoming call of this device when
- * DCD follow the state of incoming carrier
- */
- if (info->port.blocked_open &&
- (info->emu.mdmreg[REG_DCD] & BIT_DCD)) {
- wake_up_interruptible(&info->port.open_wait);
- }
-
- /* Schedule CONNECT-Message to any tty
- * waiting for it and
- * set DCD-bit of its modem-status.
- */
- if (tty_port_active(&info->port) ||
- (info->port.blocked_open &&
- (info->emu.mdmreg[REG_DCD] & BIT_DCD))) {
- info->msr |= UART_MSR_DCD;
- info->emu.charge = 0;
- if (info->dialing & 0xf)
- info->last_dir = 1;
- else
- info->last_dir = 0;
- info->dialing = 0;
- info->rcvsched = 1;
- if (USG_MODEM(dev->usage[i])) {
- if (info->emu.mdmreg[REG_L2PROT] == ISDN_PROTO_L2_MODEM) {
- strcpy(info->emu.connmsg, c->parm.num);
- isdn_tty_modem_result(RESULT_CONNECT, info);
- } else
- isdn_tty_modem_result(RESULT_CONNECT64000, info);
- }
- if (USG_VOICE(dev->usage[i]))
- isdn_tty_modem_result(RESULT_VCON, info);
- return 1;
- }
- break;
- case ISDN_STAT_BHUP:
-#ifdef ISDN_TTY_STAT_DEBUG
- printk(KERN_DEBUG "tty_STAT_BHUP ttyI%d\n", info->line);
-#endif
- if (tty_port_active(&info->port)) {
-#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup in ISDN_STAT_BHUP\n");
-#endif
- isdn_tty_modem_hup(info, 0);
- return 1;
- }
- break;
- case ISDN_STAT_NODCH:
-#ifdef ISDN_TTY_STAT_DEBUG
- printk(KERN_DEBUG "tty_STAT_NODCH ttyI%d\n", info->line);
-#endif
- if (tty_port_active(&info->port)) {
- if (info->dialing) {
- info->dialing = 0;
- info->last_l2 = -1;
- info->last_si = 0;
- sprintf(info->last_cause, "0000");
- isdn_tty_modem_result(RESULT_NO_DIALTONE, info);
- }
- isdn_tty_modem_hup(info, 0);
- return 1;
- }
- break;
- case ISDN_STAT_UNLOAD:
-#ifdef ISDN_TTY_STAT_DEBUG
- printk(KERN_DEBUG "tty_STAT_UNLOAD ttyI%d\n", info->line);
-#endif
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- info = &dev->mdm.info[i];
- if (info->isdn_driver == c->driver) {
- if (info->online)
- isdn_tty_modem_hup(info, 1);
- }
- }
- return 1;
-#ifdef CONFIG_ISDN_TTY_FAX
- case ISDN_STAT_FAXIND:
- if (tty_port_active(&info->port)) {
- isdn_tty_fax_command(info, c);
- }
- break;
-#endif
-#ifdef CONFIG_ISDN_AUDIO
- case ISDN_STAT_AUDIO:
- if (tty_port_active(&info->port)) {
- switch (c->parm.num[0]) {
- case ISDN_AUDIO_DTMF:
- if (info->vonline) {
- isdn_audio_put_dle_code(info,
- c->parm.num[1]);
- }
- break;
- }
- }
- break;
-#endif
- }
- }
- return 0;
-}
-
-/*********************************************************************
- Modem-Emulator-Routines
-*********************************************************************/
-
-#define cmdchar(c) ((c >= ' ') && (c <= 0x7f))
-
-/*
- * Put a message from the AT-emulator into receive-buffer of tty,
- * convert CR, LF, and BS to values in modem-registers 3, 4 and 5.
- */
-void
-isdn_tty_at_cout(char *msg, modem_info *info)
-{
- struct tty_port *port = &info->port;
- atemu *m = &info->emu;
- char *p;
- char c;
- u_long flags;
- struct sk_buff *skb = NULL;
- char *sp = NULL;
- int l;
-
- if (!msg) {
- printk(KERN_WARNING "isdn_tty: Null-Message in isdn_tty_at_cout\n");
- return;
- }
-
- l = strlen(msg);
-
- spin_lock_irqsave(&info->readlock, flags);
- if (info->closing) {
- spin_unlock_irqrestore(&info->readlock, flags);
- return;
- }
-
- /* use queue instead of direct, if online and */
- /* data is in queue or buffer is full */
- if (info->online && ((tty_buffer_request_room(port, l) < l) ||
- !skb_queue_empty(&dev->drv[info->isdn_driver]->rpqueue[info->isdn_channel]))) {
- skb = alloc_skb(l, GFP_ATOMIC);
- if (!skb) {
- spin_unlock_irqrestore(&info->readlock, flags);
- return;
- }
- sp = skb_put(skb, l);
-#ifdef CONFIG_ISDN_AUDIO
- ISDN_AUDIO_SKB_DLECOUNT(skb) = 0;
- ISDN_AUDIO_SKB_LOCK(skb) = 0;
-#endif
- }
-
- for (p = msg; *p; p++) {
- switch (*p) {
- case '\r':
- c = m->mdmreg[REG_CR];
- break;
- case '\n':
- c = m->mdmreg[REG_LF];
- break;
- case '\b':
- c = m->mdmreg[REG_BS];
- break;
- default:
- c = *p;
- }
- if (skb) {
- *sp++ = c;
- } else {
- if (tty_insert_flip_char(port, c, TTY_NORMAL) == 0)
- break;
- }
- }
- if (skb) {
- __skb_queue_tail(&dev->drv[info->isdn_driver]->rpqueue[info->isdn_channel], skb);
- dev->drv[info->isdn_driver]->rcvcount[info->isdn_channel] += skb->len;
- spin_unlock_irqrestore(&info->readlock, flags);
- /* Schedule dequeuing */
- if (dev->modempoll && info->rcvsched)
- isdn_timer_ctrl(ISDN_TIMER_MODEMREAD, 1);
-
- } else {
- spin_unlock_irqrestore(&info->readlock, flags);
- tty_flip_buffer_push(port);
- }
-}
-
-/*
- * Perform ATH Hangup
- */
-static void
-isdn_tty_on_hook(modem_info *info)
-{
- if (info->isdn_channel >= 0) {
-#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup in isdn_tty_on_hook\n");
-#endif
- isdn_tty_modem_hup(info, 1);
- }
-}
-
-static void
-isdn_tty_off_hook(void)
-{
- printk(KERN_DEBUG "isdn_tty_off_hook\n");
-}
-
-#define PLUSWAIT1 (HZ / 2) /* 0.5 sec. */
-#define PLUSWAIT2 (HZ * 3 / 2) /* 1.5 sec */
-
-/*
- * Check Buffer for Modem-escape-sequence, activate timer-callback to
- * isdn_tty_modem_escape() if sequence found.
- *
- * Parameters:
- * p pointer to databuffer
- * plus escape-character
- * count length of buffer
- * pluscount count of valid escape-characters so far
- * lastplus timestamp of last character
- */
-static void
-isdn_tty_check_esc(const u_char *p, u_char plus, int count, int *pluscount,
- u_long *lastplus)
-{
- if (plus > 127)
- return;
- if (count > 3) {
- p += count - 3;
- count = 3;
- *pluscount = 0;
- }
- while (count > 0) {
- if (*(p++) == plus) {
- if ((*pluscount)++) {
- /* Time since last '+' > 0.5 sec. ? */
- if (time_after(jiffies, *lastplus + PLUSWAIT1))
- *pluscount = 1;
- } else {
- /* Time since last non-'+' < 1.5 sec. ? */
- if (time_before(jiffies, *lastplus + PLUSWAIT2))
- *pluscount = 0;
- }
- if ((*pluscount == 3) && (count == 1))
- isdn_timer_ctrl(ISDN_TIMER_MODEMPLUS, 1);
- if (*pluscount > 3)
- *pluscount = 1;
- } else
- *pluscount = 0;
- *lastplus = jiffies;
- count--;
- }
-}
-
-/*
- * Return result of AT-emulator to tty-receive-buffer, depending on
- * modem-register 12, bit 0 and 1.
- * For CONNECT-messages also switch to online-mode.
- * For RING-message handle auto-ATA if register 0 != 0
- */
-
-static void
-isdn_tty_modem_result(int code, modem_info *info)
-{
- atemu *m = &info->emu;
- static char *msg[] =
- {"OK", "CONNECT", "RING", "NO CARRIER", "ERROR",
- "CONNECT 64000", "NO DIALTONE", "BUSY", "NO ANSWER",
- "RINGING", "NO MSN/EAZ", "VCON", "RUNG"};
- char s[ISDN_MSNLEN + 10];
-
- switch (code) {
- case RESULT_RING:
- m->mdmreg[REG_RINGCNT]++;
- if (m->mdmreg[REG_RINGCNT] == m->mdmreg[REG_RINGATA])
- /* Automatically accept incoming call */
- isdn_tty_cmd_ATA(info);
- break;
- case RESULT_NO_CARRIER:
-#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "modem_result: NO CARRIER %d %d\n",
- info->closing, !info->port.tty);
-#endif
- m->mdmreg[REG_RINGCNT] = 0;
- del_timer(&info->nc_timer);
- info->ncarrier = 0;
- if (info->closing || !info->port.tty)
- return;
-
-#ifdef CONFIG_ISDN_AUDIO
- if (info->vonline & 1) {
-#ifdef ISDN_DEBUG_MODEM_VOICE
- printk(KERN_DEBUG "res3: send DLE-ETX on ttyI%d\n",
- info->line);
-#endif
- /* voice-recording, add DLE-ETX */
- isdn_tty_at_cout("\020\003", info);
- }
- if (info->vonline & 2) {
-#ifdef ISDN_DEBUG_MODEM_VOICE
- printk(KERN_DEBUG "res3: send DLE-DC4 on ttyI%d\n",
- info->line);
-#endif
- /* voice-playing, add DLE-DC4 */
- isdn_tty_at_cout("\020\024", info);
- }
-#endif
- break;
- case RESULT_CONNECT:
- case RESULT_CONNECT64000:
- sprintf(info->last_cause, "0000");
- if (!info->online)
- info->online = 2;
- break;
- case RESULT_VCON:
-#ifdef ISDN_DEBUG_MODEM_VOICE
- printk(KERN_DEBUG "res3: send VCON on ttyI%d\n",
- info->line);
-#endif
- sprintf(info->last_cause, "0000");
- if (!info->online)
- info->online = 1;
- break;
- } /* switch (code) */
-
- if (m->mdmreg[REG_RESP] & BIT_RESP) {
- /* Show results */
- if (m->mdmreg[REG_RESPNUM] & BIT_RESPNUM) {
- /* Show numeric results only */
- sprintf(s, "\r\n%d\r\n", code);
- isdn_tty_at_cout(s, info);
- } else {
- if (code == RESULT_RING) {
- /* return if "show RUNG" and ringcounter>1 */
- if ((m->mdmreg[REG_RUNG] & BIT_RUNG) &&
- (m->mdmreg[REG_RINGCNT] > 1))
- return;
- /* print CID, _before_ _every_ ring */
- if (!(m->mdmreg[REG_CIDONCE] & BIT_CIDONCE)) {
- isdn_tty_at_cout("\r\nCALLER NUMBER: ", info);
- isdn_tty_at_cout(dev->num[info->drv_index], info);
- if (m->mdmreg[REG_CDN] & BIT_CDN) {
- isdn_tty_at_cout("\r\nCALLED NUMBER: ", info);
- isdn_tty_at_cout(info->emu.cpn, info);
- }
- }
- }
- isdn_tty_at_cout("\r\n", info);
- isdn_tty_at_cout(msg[code], info);
- switch (code) {
- case RESULT_CONNECT:
- switch (m->mdmreg[REG_L2PROT]) {
- case ISDN_PROTO_L2_MODEM:
- isdn_tty_at_cout(" ", info);
- isdn_tty_at_cout(m->connmsg, info);
- break;
- }
- break;
- case RESULT_RING:
- /* Append CPN, if enabled */
- if ((m->mdmreg[REG_CPN] & BIT_CPN)) {
- sprintf(s, "/%s", m->cpn);
- isdn_tty_at_cout(s, info);
- }
- /* Print CID only once, _after_ 1st RING */
- if ((m->mdmreg[REG_CIDONCE] & BIT_CIDONCE) &&
- (m->mdmreg[REG_RINGCNT] == 1)) {
- isdn_tty_at_cout("\r\n", info);
- isdn_tty_at_cout("CALLER NUMBER: ", info);
- isdn_tty_at_cout(dev->num[info->drv_index], info);
- if (m->mdmreg[REG_CDN] & BIT_CDN) {
- isdn_tty_at_cout("\r\nCALLED NUMBER: ", info);
- isdn_tty_at_cout(info->emu.cpn, info);
- }
- }
- break;
- case RESULT_NO_CARRIER:
- case RESULT_NO_DIALTONE:
- case RESULT_BUSY:
- case RESULT_NO_ANSWER:
- m->mdmreg[REG_RINGCNT] = 0;
- /* Append Cause-Message if enabled */
- if (m->mdmreg[REG_RESPXT] & BIT_RESPXT) {
- sprintf(s, "/%s", info->last_cause);
- isdn_tty_at_cout(s, info);
- }
- break;
- case RESULT_CONNECT64000:
- /* Append Protocol to CONNECT message */
- switch (m->mdmreg[REG_L2PROT]) {
- case ISDN_PROTO_L2_X75I:
- case ISDN_PROTO_L2_X75UI:
- case ISDN_PROTO_L2_X75BUI:
- isdn_tty_at_cout("/X.75", info);
- break;
- case ISDN_PROTO_L2_HDLC:
- isdn_tty_at_cout("/HDLC", info);
- break;
- case ISDN_PROTO_L2_V11096:
- isdn_tty_at_cout("/V110/9600", info);
- break;
- case ISDN_PROTO_L2_V11019:
- isdn_tty_at_cout("/V110/19200", info);
- break;
- case ISDN_PROTO_L2_V11038:
- isdn_tty_at_cout("/V110/38400", info);
- break;
- }
- if (m->mdmreg[REG_T70] & BIT_T70) {
- isdn_tty_at_cout("/T.70", info);
- if (m->mdmreg[REG_T70] & BIT_T70_EXT)
- isdn_tty_at_cout("+", info);
- }
- break;
- }
- isdn_tty_at_cout("\r\n", info);
- }
- }
- if (code == RESULT_NO_CARRIER) {
- if (info->closing || (!info->port.tty))
- return;
-
- if (tty_port_check_carrier(&info->port))
- tty_hangup(info->port.tty);
- }
-}
-
-
-/*
- * Display a modem-register-value.
- */
-static void
-isdn_tty_show_profile(int ridx, modem_info *info)
-{
- char v[6];
-
- sprintf(v, "\r\n%d", info->emu.mdmreg[ridx]);
- isdn_tty_at_cout(v, info);
-}
-
-/*
- * Get MSN-string from char-pointer, set pointer to end of number
- */
-static void
-isdn_tty_get_msnstr(char *n, char **p)
-{
- int limit = ISDN_MSNLEN - 1;
-
- while (((*p[0] >= '0' && *p[0] <= '9') ||
- /* Why a comma ??? */
- (*p[0] == ',') || (*p[0] == ':')) &&
- (limit--))
- *n++ = *p[0]++;
- *n = '\0';
-}
-
-/*
- * Get phone-number from modem-commandbuffer
- */
-static void
-isdn_tty_getdial(char *p, char *q, int cnt)
-{
- int first = 1;
- int limit = ISDN_MSNLEN - 1; /* MUST match the size of interface var to avoid
- buffer overflow */
-
- while (strchr(" 0123456789,#.*WPTSR-", *p) && *p && --cnt > 0) {
- if ((*p >= '0' && *p <= '9') || ((*p == 'S') && first) ||
- ((*p == 'R') && first) ||
- (*p == '*') || (*p == '#')) {
- *q++ = *p;
- limit--;
- }
- if (!limit)
- break;
- p++;
- first = 0;
- }
- *q = 0;
-}
-
-#define PARSE_ERROR { isdn_tty_modem_result(RESULT_ERROR, info); return; }
-#define PARSE_ERROR1 { isdn_tty_modem_result(RESULT_ERROR, info); return 1; }
-
-static void
-isdn_tty_report(modem_info *info)
-{
- atemu *m = &info->emu;
- char s[80];
-
- isdn_tty_at_cout("\r\nStatistics of last connection:\r\n\r\n", info);
- sprintf(s, " Remote Number: %s\r\n", info->last_num);
- isdn_tty_at_cout(s, info);
- sprintf(s, " Direction: %s\r\n", info->last_dir ? "outgoing" : "incoming");
- isdn_tty_at_cout(s, info);
- isdn_tty_at_cout(" Layer-2 Protocol: ", info);
- switch (info->last_l2) {
- case ISDN_PROTO_L2_X75I:
- isdn_tty_at_cout("X.75i", info);
- break;
- case ISDN_PROTO_L2_X75UI:
- isdn_tty_at_cout("X.75ui", info);
- break;
- case ISDN_PROTO_L2_X75BUI:
- isdn_tty_at_cout("X.75bui", info);
- break;
- case ISDN_PROTO_L2_HDLC:
- isdn_tty_at_cout("HDLC", info);
- break;
- case ISDN_PROTO_L2_V11096:
- isdn_tty_at_cout("V.110 9600 Baud", info);
- break;
- case ISDN_PROTO_L2_V11019:
- isdn_tty_at_cout("V.110 19200 Baud", info);
- break;
- case ISDN_PROTO_L2_V11038:
- isdn_tty_at_cout("V.110 38400 Baud", info);
- break;
- case ISDN_PROTO_L2_TRANS:
- isdn_tty_at_cout("transparent", info);
- break;
- case ISDN_PROTO_L2_MODEM:
- isdn_tty_at_cout("modem", info);
- break;
- case ISDN_PROTO_L2_FAX:
- isdn_tty_at_cout("fax", info);
- break;
- default:
- isdn_tty_at_cout("unknown", info);
- break;
- }
- if (m->mdmreg[REG_T70] & BIT_T70) {
- isdn_tty_at_cout("/T.70", info);
- if (m->mdmreg[REG_T70] & BIT_T70_EXT)
- isdn_tty_at_cout("+", info);
- }
- isdn_tty_at_cout("\r\n", info);
- isdn_tty_at_cout(" Service: ", info);
- switch (info->last_si) {
- case 1:
- isdn_tty_at_cout("audio\r\n", info);
- break;
- case 5:
- isdn_tty_at_cout("btx\r\n", info);
- break;
- case 7:
- isdn_tty_at_cout("data\r\n", info);
- break;
- default:
- sprintf(s, "%d\r\n", info->last_si);
- isdn_tty_at_cout(s, info);
- break;
- }
- sprintf(s, " Hangup location: %s\r\n", info->last_lhup ? "local" : "remote");
- isdn_tty_at_cout(s, info);
- sprintf(s, " Last cause: %s\r\n", info->last_cause);
- isdn_tty_at_cout(s, info);
-}
-
-/*
- * Parse AT&.. commands.
- */
-static int
-isdn_tty_cmd_ATand(char **p, modem_info *info)
-{
- atemu *m = &info->emu;
- int i;
- char rb[100];
-
-#define MAXRB (sizeof(rb) - 1)
-
- switch (*p[0]) {
- case 'B':
- /* &B - Set Buffersize */
- p[0]++;
- i = isdn_getnum(p);
- if ((i < 0) || (i > ISDN_SERIAL_XMIT_MAX))
- PARSE_ERROR1;
-#ifdef CONFIG_ISDN_AUDIO
- if ((m->mdmreg[REG_SI1] & 1) && (i > VBUF))
- PARSE_ERROR1;
-#endif
- m->mdmreg[REG_PSIZE] = i / 16;
- info->xmit_size = m->mdmreg[REG_PSIZE] * 16;
- switch (m->mdmreg[REG_L2PROT]) {
- case ISDN_PROTO_L2_V11096:
- case ISDN_PROTO_L2_V11019:
- case ISDN_PROTO_L2_V11038:
- info->xmit_size /= 10;
- }
- break;
- case 'C':
- /* &C - DCD Status */
- p[0]++;
- switch (isdn_getnum(p)) {
- case 0:
- m->mdmreg[REG_DCD] &= ~BIT_DCD;
- break;
- case 1:
- m->mdmreg[REG_DCD] |= BIT_DCD;
- break;
- default:
- PARSE_ERROR1
- }
- break;
- case 'D':
- /* &D - Set DTR-Low-behavior */
- p[0]++;
- switch (isdn_getnum(p)) {
- case 0:
- m->mdmreg[REG_DTRHUP] &= ~BIT_DTRHUP;
- m->mdmreg[REG_DTRR] &= ~BIT_DTRR;
- break;
- case 2:
- m->mdmreg[REG_DTRHUP] |= BIT_DTRHUP;
- m->mdmreg[REG_DTRR] &= ~BIT_DTRR;
- break;
- case 3:
- m->mdmreg[REG_DTRHUP] |= BIT_DTRHUP;
- m->mdmreg[REG_DTRR] |= BIT_DTRR;
- break;
- default:
- PARSE_ERROR1
- }
- break;
- case 'E':
- /* &E -Set EAZ/MSN */
- p[0]++;
- isdn_tty_get_msnstr(m->msn, p);
- break;
- case 'F':
- /* &F -Set Factory-Defaults */
- p[0]++;
- if (info->msr & UART_MSR_DCD)
- PARSE_ERROR1;
- isdn_tty_reset_profile(m);
- isdn_tty_modem_reset_regs(info, 1);
- break;
-#ifdef DUMMY_HAYES_AT
- case 'K':
- /* only for be compilant with common scripts */
- /* &K Flowcontrol - no function */
- p[0]++;
- isdn_getnum(p);
- break;
-#endif
- case 'L':
- /* &L -Set Numbers to listen on */
- p[0]++;
- i = 0;
- while (*p[0] && (strchr("0123456789,-*[]?;", *p[0])) &&
- (i < ISDN_LMSNLEN - 1))
- m->lmsn[i++] = *p[0]++;
- m->lmsn[i] = '\0';
- break;
- case 'R':
- /* &R - Set V.110 bitrate adaption */
- p[0]++;
- i = isdn_getnum(p);
- switch (i) {
- case 0:
- /* Switch off V.110, back to X.75 */
- m->mdmreg[REG_L2PROT] = ISDN_PROTO_L2_X75I;
- m->mdmreg[REG_SI2] = 0;
- info->xmit_size = m->mdmreg[REG_PSIZE] * 16;
- break;
- case 9600:
- m->mdmreg[REG_L2PROT] = ISDN_PROTO_L2_V11096;
- m->mdmreg[REG_SI2] = 197;
- info->xmit_size = m->mdmreg[REG_PSIZE] * 16 / 10;
- break;
- case 19200:
- m->mdmreg[REG_L2PROT] = ISDN_PROTO_L2_V11019;
- m->mdmreg[REG_SI2] = 199;
- info->xmit_size = m->mdmreg[REG_PSIZE] * 16 / 10;
- break;
- case 38400:
- m->mdmreg[REG_L2PROT] = ISDN_PROTO_L2_V11038;
- m->mdmreg[REG_SI2] = 198; /* no existing standard for this */
- info->xmit_size = m->mdmreg[REG_PSIZE] * 16 / 10;
- break;
- default:
- PARSE_ERROR1;
- }
- /* Switch off T.70 */
- m->mdmreg[REG_T70] &= ~(BIT_T70 | BIT_T70_EXT);
- /* Set Service 7 */
- m->mdmreg[REG_SI1] |= 4;
- break;
- case 'S':
- /* &S - Set Windowsize */
- p[0]++;
- i = isdn_getnum(p);
- if ((i > 0) && (i < 9))
- m->mdmreg[REG_WSIZE] = i;
- else
- PARSE_ERROR1;
- break;
- case 'V':
- /* &V - Show registers */
- p[0]++;
- isdn_tty_at_cout("\r\n", info);
- for (i = 0; i < ISDN_MODEM_NUMREG; i++) {
- sprintf(rb, "S%02d=%03d%s", i,
- m->mdmreg[i], ((i + 1) % 10) ? " " : "\r\n");
- isdn_tty_at_cout(rb, info);
- }
- sprintf(rb, "\r\nEAZ/MSN: %.50s\r\n",
- strlen(m->msn) ? m->msn : "None");
- isdn_tty_at_cout(rb, info);
- if (strlen(m->lmsn)) {
- isdn_tty_at_cout("\r\nListen: ", info);
- isdn_tty_at_cout(m->lmsn, info);
- isdn_tty_at_cout("\r\n", info);
- }
- break;
- case 'W':
- /* &W - Write Profile */
- p[0]++;
- switch (*p[0]) {
- case '0':
- p[0]++;
- modem_write_profile(m);
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- case 'X':
- /* &X - Switch to BTX-Mode and T.70 */
- p[0]++;
- switch (isdn_getnum(p)) {
- case 0:
- m->mdmreg[REG_T70] &= ~(BIT_T70 | BIT_T70_EXT);
- info->xmit_size = m->mdmreg[REG_PSIZE] * 16;
- break;
- case 1:
- m->mdmreg[REG_T70] |= BIT_T70;
- m->mdmreg[REG_T70] &= ~BIT_T70_EXT;
- m->mdmreg[REG_L2PROT] = ISDN_PROTO_L2_X75I;
- info->xmit_size = 112;
- m->mdmreg[REG_SI1] = 4;
- m->mdmreg[REG_SI2] = 0;
- break;
- case 2:
- m->mdmreg[REG_T70] |= (BIT_T70 | BIT_T70_EXT);
- m->mdmreg[REG_L2PROT] = ISDN_PROTO_L2_X75I;
- info->xmit_size = 112;
- m->mdmreg[REG_SI1] = 4;
- m->mdmreg[REG_SI2] = 0;
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
-}
-
-static int
-isdn_tty_check_ats(int mreg, int mval, modem_info *info, atemu *m)
-{
- /* Some plausibility checks */
- switch (mreg) {
- case REG_L2PROT:
- if (mval > ISDN_PROTO_L2_MAX)
- return 1;
- break;
- case REG_PSIZE:
- if ((mval * 16) > ISDN_SERIAL_XMIT_MAX)
- return 1;
-#ifdef CONFIG_ISDN_AUDIO
- if ((m->mdmreg[REG_SI1] & 1) && (mval > VBUFX))
- return 1;
-#endif
- info->xmit_size = mval * 16;
- switch (m->mdmreg[REG_L2PROT]) {
- case ISDN_PROTO_L2_V11096:
- case ISDN_PROTO_L2_V11019:
- case ISDN_PROTO_L2_V11038:
- info->xmit_size /= 10;
- }
- break;
- case REG_SI1I:
- case REG_PLAN:
- case REG_SCREEN:
- /* readonly registers */
- return 1;
- }
- return 0;
-}
-
-/*
- * Perform ATS command
- */
-static int
-isdn_tty_cmd_ATS(char **p, modem_info *info)
-{
- atemu *m = &info->emu;
- int bitpos;
- int mreg;
- int mval;
- int bval;
-
- mreg = isdn_getnum(p);
- if (mreg < 0 || mreg >= ISDN_MODEM_NUMREG)
- PARSE_ERROR1;
- switch (*p[0]) {
- case '=':
- p[0]++;
- mval = isdn_getnum(p);
- if (mval < 0 || mval > 255)
- PARSE_ERROR1;
- if (isdn_tty_check_ats(mreg, mval, info, m))
- PARSE_ERROR1;
- m->mdmreg[mreg] = mval;
- break;
- case '.':
- /* Set/Clear a single bit */
- p[0]++;
- bitpos = isdn_getnum(p);
- if ((bitpos < 0) || (bitpos > 7))
- PARSE_ERROR1;
- switch (*p[0]) {
- case '=':
- p[0]++;
- bval = isdn_getnum(p);
- if (bval < 0 || bval > 1)
- PARSE_ERROR1;
- if (bval)
- mval = m->mdmreg[mreg] | (1 << bitpos);
- else
- mval = m->mdmreg[mreg] & ~(1 << bitpos);
- if (isdn_tty_check_ats(mreg, mval, info, m))
- PARSE_ERROR1;
- m->mdmreg[mreg] = mval;
- break;
- case '?':
- p[0]++;
- isdn_tty_at_cout("\r\n", info);
- isdn_tty_at_cout((m->mdmreg[mreg] & (1 << bitpos)) ? "1" : "0",
- info);
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- case '?':
- p[0]++;
- isdn_tty_show_profile(mreg, info);
- break;
- default:
- PARSE_ERROR1;
- break;
- }
- return 0;
-}
-
-/*
- * Perform ATA command
- */
-static void
-isdn_tty_cmd_ATA(modem_info *info)
-{
- atemu *m = &info->emu;
- isdn_ctrl cmd;
- int l2;
-
- if (info->msr & UART_MSR_RI) {
- /* Accept incoming call */
- info->last_dir = 0;
- strcpy(info->last_num, dev->num[info->drv_index]);
- m->mdmreg[REG_RINGCNT] = 0;
- info->msr &= ~UART_MSR_RI;
- l2 = m->mdmreg[REG_L2PROT];
-#ifdef CONFIG_ISDN_AUDIO
- /* If more than one bit set in reg18, autoselect Layer2 */
- if ((m->mdmreg[REG_SI1] & m->mdmreg[REG_SI1I]) != m->mdmreg[REG_SI1]) {
- if (m->mdmreg[REG_SI1I] == 1) {
- if ((l2 != ISDN_PROTO_L2_MODEM) && (l2 != ISDN_PROTO_L2_FAX))
- l2 = ISDN_PROTO_L2_TRANS;
- } else
- l2 = ISDN_PROTO_L2_X75I;
- }
-#endif
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_SETL2;
- cmd.arg = info->isdn_channel + (l2 << 8);
- info->last_l2 = l2;
- isdn_command(&cmd);
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_SETL3;
- cmd.arg = info->isdn_channel + (m->mdmreg[REG_L3PROT] << 8);
-#ifdef CONFIG_ISDN_TTY_FAX
- if (l2 == ISDN_PROTO_L2_FAX) {
- cmd.parm.fax = info->fax;
- info->fax->direction = ISDN_TTY_FAX_CONN_IN;
- }
-#endif
- isdn_command(&cmd);
- cmd.driver = info->isdn_driver;
- cmd.arg = info->isdn_channel;
- cmd.command = ISDN_CMD_ACCEPTD;
- info->dialing = 16;
- info->emu.carrierwait = 0;
- isdn_command(&cmd);
- isdn_timer_ctrl(ISDN_TIMER_CARRIER, 1);
- } else
- isdn_tty_modem_result(RESULT_NO_ANSWER, info);
-}
-
-#ifdef CONFIG_ISDN_AUDIO
-/*
- * Parse AT+F.. commands
- */
-static int
-isdn_tty_cmd_PLUSF(char **p, modem_info *info)
-{
- atemu *m = &info->emu;
- char rs[20];
-
- if (!strncmp(p[0], "CLASS", 5)) {
- p[0] += 5;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d",
- (m->mdmreg[REG_SI1] & 1) ? 8 : 0);
-#ifdef CONFIG_ISDN_TTY_FAX
- if (TTY_IS_FCLASS2(info))
- sprintf(rs, "\r\n2");
- else if (TTY_IS_FCLASS1(info))
- sprintf(rs, "\r\n1");
-#endif
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- switch (*p[0]) {
- case '0':
- p[0]++;
- m->mdmreg[REG_L2PROT] = ISDN_PROTO_L2_X75I;
- m->mdmreg[REG_L3PROT] = ISDN_PROTO_L3_TRANS;
- m->mdmreg[REG_SI1] = 4;
- info->xmit_size =
- m->mdmreg[REG_PSIZE] * 16;
- break;
-#ifdef CONFIG_ISDN_TTY_FAX
- case '1':
- p[0]++;
- if (!(dev->global_features &
- ISDN_FEATURE_L3_FCLASS1))
- PARSE_ERROR1;
- m->mdmreg[REG_SI1] = 1;
- m->mdmreg[REG_L2PROT] = ISDN_PROTO_L2_FAX;
- m->mdmreg[REG_L3PROT] = ISDN_PROTO_L3_FCLASS1;
- info->xmit_size =
- m->mdmreg[REG_PSIZE] * 16;
- break;
- case '2':
- p[0]++;
- if (!(dev->global_features &
- ISDN_FEATURE_L3_FCLASS2))
- PARSE_ERROR1;
- m->mdmreg[REG_SI1] = 1;
- m->mdmreg[REG_L2PROT] = ISDN_PROTO_L2_FAX;
- m->mdmreg[REG_L3PROT] = ISDN_PROTO_L3_FCLASS2;
- info->xmit_size =
- m->mdmreg[REG_PSIZE] * 16;
- break;
-#endif
- case '8':
- p[0]++;
- /* L2 will change on dialout with si=1 */
- m->mdmreg[REG_L2PROT] = ISDN_PROTO_L2_X75I;
- m->mdmreg[REG_L3PROT] = ISDN_PROTO_L3_TRANS;
- m->mdmreg[REG_SI1] = 5;
- info->xmit_size = VBUF;
- break;
- case '?':
- p[0]++;
- strcpy(rs, "\r\n0,");
-#ifdef CONFIG_ISDN_TTY_FAX
- if (dev->global_features &
- ISDN_FEATURE_L3_FCLASS1)
- strcat(rs, "1,");
- if (dev->global_features &
- ISDN_FEATURE_L3_FCLASS2)
- strcat(rs, "2,");
-#endif
- strcat(rs, "8");
- isdn_tty_at_cout(rs, info);
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
-#ifdef CONFIG_ISDN_TTY_FAX
- return (isdn_tty_cmd_PLUSF_FAX(p, info));
-#else
- PARSE_ERROR1;
-#endif
-}
-
-/*
- * Parse AT+V.. commands
- */
-static int
-isdn_tty_cmd_PLUSV(char **p, modem_info *info)
-{
- atemu *m = &info->emu;
- isdn_ctrl cmd;
- static char *vcmd[] =
- {"NH", "IP", "LS", "RX", "SD", "SM", "TX", "DD", NULL};
- int i;
- int par1;
- int par2;
- char rs[20];
-
- i = 0;
- while (vcmd[i]) {
- if (!strncmp(vcmd[i], p[0], 2)) {
- p[0] += 2;
- break;
- }
- i++;
- }
- switch (i) {
- case 0:
- /* AT+VNH - Auto hangup feature */
- switch (*p[0]) {
- case '?':
- p[0]++;
- isdn_tty_at_cout("\r\n1", info);
- break;
- case '=':
- p[0]++;
- switch (*p[0]) {
- case '1':
- p[0]++;
- break;
- case '?':
- p[0]++;
- isdn_tty_at_cout("\r\n1", info);
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- case 1:
- /* AT+VIP - Reset all voice parameters */
- isdn_tty_modem_reset_vpar(m);
- break;
- case 2:
- /* AT+VLS - Select device, accept incoming call */
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", m->vpar[0]);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- switch (*p[0]) {
- case '0':
- p[0]++;
- m->vpar[0] = 0;
- break;
- case '2':
- p[0]++;
- m->vpar[0] = 2;
- break;
- case '?':
- p[0]++;
- isdn_tty_at_cout("\r\n0,2", info);
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- case 3:
- /* AT+VRX - Start recording */
- if (!m->vpar[0])
- PARSE_ERROR1;
- if (info->online != 1) {
- isdn_tty_modem_result(RESULT_NO_ANSWER, info);
- return 1;
- }
- info->dtmf_state = isdn_audio_dtmf_init(info->dtmf_state);
- if (!info->dtmf_state) {
- printk(KERN_WARNING "isdn_tty: Couldn't malloc dtmf state\n");
- PARSE_ERROR1;
- }
- info->silence_state = isdn_audio_silence_init(info->silence_state);
- if (!info->silence_state) {
- printk(KERN_WARNING "isdn_tty: Couldn't malloc silence state\n");
- PARSE_ERROR1;
- }
- if (m->vpar[3] < 5) {
- info->adpcmr = isdn_audio_adpcm_init(info->adpcmr, m->vpar[3]);
- if (!info->adpcmr) {
- printk(KERN_WARNING "isdn_tty: Couldn't malloc adpcm state\n");
- PARSE_ERROR1;
- }
- }
-#ifdef ISDN_DEBUG_AT
- printk(KERN_DEBUG "AT: +VRX\n");
-#endif
- info->vonline |= 1;
- isdn_tty_modem_result(RESULT_CONNECT, info);
- return 0;
- break;
- case 4:
- /* AT+VSD - Silence detection */
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n<%d>,<%d>",
- m->vpar[1],
- m->vpar[2]);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if ((*p[0] >= '0') && (*p[0] <= '9')) {
- par1 = isdn_getnum(p);
- if ((par1 < 0) || (par1 > 31))
- PARSE_ERROR1;
- if (*p[0] != ',')
- PARSE_ERROR1;
- p[0]++;
- par2 = isdn_getnum(p);
- if ((par2 < 0) || (par2 > 255))
- PARSE_ERROR1;
- m->vpar[1] = par1;
- m->vpar[2] = par2;
- break;
- } else
- if (*p[0] == '?') {
- p[0]++;
- isdn_tty_at_cout("\r\n<0-31>,<0-255>",
- info);
- break;
- } else
- PARSE_ERROR1;
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- case 5:
- /* AT+VSM - Select compression */
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n<%d>,<%d><8000>",
- m->vpar[3],
- m->vpar[1]);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- switch (*p[0]) {
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- par1 = isdn_getnum(p);
- if ((par1 < 2) || (par1 > 6))
- PARSE_ERROR1;
- m->vpar[3] = par1;
- break;
- case '?':
- p[0]++;
- isdn_tty_at_cout("\r\n2;ADPCM;2;0;(8000)\r\n",
- info);
- isdn_tty_at_cout("3;ADPCM;3;0;(8000)\r\n",
- info);
- isdn_tty_at_cout("4;ADPCM;4;0;(8000)\r\n",
- info);
- isdn_tty_at_cout("5;ALAW;8;0;(8000)\r\n",
- info);
- isdn_tty_at_cout("6;ULAW;8;0;(8000)\r\n",
- info);
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- case 6:
- /* AT+VTX - Start sending */
- if (!m->vpar[0])
- PARSE_ERROR1;
- if (info->online != 1) {
- isdn_tty_modem_result(RESULT_NO_ANSWER, info);
- return 1;
- }
- info->dtmf_state = isdn_audio_dtmf_init(info->dtmf_state);
- if (!info->dtmf_state) {
- printk(KERN_WARNING "isdn_tty: Couldn't malloc dtmf state\n");
- PARSE_ERROR1;
- }
- if (m->vpar[3] < 5) {
- info->adpcms = isdn_audio_adpcm_init(info->adpcms, m->vpar[3]);
- if (!info->adpcms) {
- printk(KERN_WARNING "isdn_tty: Couldn't malloc adpcm state\n");
- PARSE_ERROR1;
- }
- }
-#ifdef ISDN_DEBUG_AT
- printk(KERN_DEBUG "AT: +VTX\n");
-#endif
- m->lastDLE = 0;
- info->vonline |= 2;
- isdn_tty_modem_result(RESULT_CONNECT, info);
- return 0;
- break;
- case 7:
- /* AT+VDD - DTMF detection */
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n<%d>,<%d>",
- m->vpar[4],
- m->vpar[5]);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if ((*p[0] >= '0') && (*p[0] <= '9')) {
- if (info->online != 1)
- PARSE_ERROR1;
- par1 = isdn_getnum(p);
- if ((par1 < 0) || (par1 > 15))
- PARSE_ERROR1;
- if (*p[0] != ',')
- PARSE_ERROR1;
- p[0]++;
- par2 = isdn_getnum(p);
- if ((par2 < 0) || (par2 > 255))
- PARSE_ERROR1;
- m->vpar[4] = par1;
- m->vpar[5] = par2;
- cmd.driver = info->isdn_driver;
- cmd.command = ISDN_CMD_AUDIO;
- cmd.arg = info->isdn_channel + (ISDN_AUDIO_SETDD << 8);
- cmd.parm.num[0] = par1;
- cmd.parm.num[1] = par2;
- isdn_command(&cmd);
- break;
- } else
- if (*p[0] == '?') {
- p[0]++;
- isdn_tty_at_cout("\r\n<0-15>,<0-255>",
- info);
- break;
- } else
- PARSE_ERROR1;
- break;
- default:
- PARSE_ERROR1;
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
-}
-#endif /* CONFIG_ISDN_AUDIO */
-
-/*
- * Parse and perform an AT-command-line.
- */
-static void
-isdn_tty_parse_at(modem_info *info)
-{
- atemu *m = &info->emu;
- char *p;
- char ds[ISDN_MSNLEN];
-
-#ifdef ISDN_DEBUG_AT
- printk(KERN_DEBUG "AT: '%s'\n", m->mdmcmd);
-#endif
- for (p = &m->mdmcmd[2]; *p;) {
- switch (*p) {
- case ' ':
- p++;
- break;
- case 'A':
- /* A - Accept incoming call */
- p++;
- isdn_tty_cmd_ATA(info);
- return;
- case 'D':
- /* D - Dial */
- if (info->msr & UART_MSR_DCD)
- PARSE_ERROR;
- if (info->msr & UART_MSR_RI) {
- isdn_tty_modem_result(RESULT_NO_CARRIER, info);
- return;
- }
- isdn_tty_getdial(++p, ds, sizeof ds);
- p += strlen(p);
- if (!strlen(m->msn))
- isdn_tty_modem_result(RESULT_NO_MSN_EAZ, info);
- else if (strlen(ds))
- isdn_tty_dial(ds, info, m);
- else
- PARSE_ERROR;
- return;
- case 'E':
- /* E - Turn Echo on/off */
- p++;
- switch (isdn_getnum(&p)) {
- case 0:
- m->mdmreg[REG_ECHO] &= ~BIT_ECHO;
- break;
- case 1:
- m->mdmreg[REG_ECHO] |= BIT_ECHO;
- break;
- default:
- PARSE_ERROR;
- }
- break;
- case 'H':
- /* H - On/Off-hook */
- p++;
- switch (*p) {
- case '0':
- p++;
- isdn_tty_on_hook(info);
- break;
- case '1':
- p++;
- isdn_tty_off_hook();
- break;
- default:
- isdn_tty_on_hook(info);
- break;
- }
- break;
- case 'I':
- /* I - Information */
- p++;
- isdn_tty_at_cout("\r\nLinux ISDN", info);
- switch (*p) {
- case '0':
- case '1':
- p++;
- break;
- case '2':
- p++;
- isdn_tty_report(info);
- break;
- case '3':
- p++;
- snprintf(ds, sizeof(ds), "\r\n%d", info->emu.charge);
- isdn_tty_at_cout(ds, info);
- break;
- default:;
- }
- break;
-#ifdef DUMMY_HAYES_AT
- case 'L':
- case 'M':
- /* only for be compilant with common scripts */
- /* no function */
- p++;
- isdn_getnum(&p);
- break;
-#endif
- case 'O':
- /* O - Go online */
- p++;
- if (info->msr & UART_MSR_DCD)
- /* if B-Channel is up */
- isdn_tty_modem_result((m->mdmreg[REG_L2PROT] == ISDN_PROTO_L2_MODEM) ? RESULT_CONNECT : RESULT_CONNECT64000, info);
- else
- isdn_tty_modem_result(RESULT_NO_CARRIER, info);
- return;
- case 'Q':
- /* Q - Turn Emulator messages on/off */
- p++;
- switch (isdn_getnum(&p)) {
- case 0:
- m->mdmreg[REG_RESP] |= BIT_RESP;
- break;
- case 1:
- m->mdmreg[REG_RESP] &= ~BIT_RESP;
- break;
- default:
- PARSE_ERROR;
- }
- break;
- case 'S':
- /* S - Set/Get Register */
- p++;
- if (isdn_tty_cmd_ATS(&p, info))
- return;
- break;
- case 'V':
- /* V - Numeric or ASCII Emulator-messages */
- p++;
- switch (isdn_getnum(&p)) {
- case 0:
- m->mdmreg[REG_RESP] |= BIT_RESPNUM;
- break;
- case 1:
- m->mdmreg[REG_RESP] &= ~BIT_RESPNUM;
- break;
- default:
- PARSE_ERROR;
- }
- break;
- case 'Z':
- /* Z - Load Registers from Profile */
- p++;
- if (info->msr & UART_MSR_DCD) {
- info->online = 0;
- isdn_tty_on_hook(info);
- }
- isdn_tty_modem_reset_regs(info, 1);
- break;
- case '+':
- p++;
- switch (*p) {
-#ifdef CONFIG_ISDN_AUDIO
- case 'F':
- p++;
- if (isdn_tty_cmd_PLUSF(&p, info))
- return;
- break;
- case 'V':
- if ((!(m->mdmreg[REG_SI1] & 1)) ||
- (m->mdmreg[REG_L2PROT] == ISDN_PROTO_L2_MODEM))
- PARSE_ERROR;
- p++;
- if (isdn_tty_cmd_PLUSV(&p, info))
- return;
- break;
-#endif /* CONFIG_ISDN_AUDIO */
- case 'S': /* SUSPEND */
- p++;
- isdn_tty_get_msnstr(ds, &p);
- isdn_tty_suspend(ds, info, m);
- break;
- case 'R': /* RESUME */
- p++;
- isdn_tty_get_msnstr(ds, &p);
- isdn_tty_resume(ds, info, m);
- break;
- case 'M': /* MESSAGE */
- p++;
- isdn_tty_send_msg(info, m, p);
- break;
- default:
- PARSE_ERROR;
- }
- break;
- case '&':
- p++;
- if (isdn_tty_cmd_ATand(&p, info))
- return;
- break;
- default:
- PARSE_ERROR;
- }
- }
-#ifdef CONFIG_ISDN_AUDIO
- if (!info->vonline)
-#endif
- isdn_tty_modem_result(RESULT_OK, info);
-}
-
-/* Need own toupper() because standard-toupper is not available
- * within modules.
- */
-#define my_toupper(c) (((c >= 'a') && (c <= 'z')) ? (c & 0xdf) : c)
-
-/*
- * Perform line-editing of AT-commands
- *
- * Parameters:
- * p inputbuffer
- * count length of buffer
- * channel index to line (minor-device)
- */
-static int
-isdn_tty_edit_at(const char *p, int count, modem_info *info)
-{
- atemu *m = &info->emu;
- int total = 0;
- u_char c;
- char eb[2];
- int cnt;
-
- for (cnt = count; cnt > 0; p++, cnt--) {
- c = *p;
- total++;
- if (c == m->mdmreg[REG_CR] || c == m->mdmreg[REG_LF]) {
- /* Separator (CR or LF) */
- m->mdmcmd[m->mdmcmdl] = 0;
- if (m->mdmreg[REG_ECHO] & BIT_ECHO) {
- eb[0] = c;
- eb[1] = 0;
- isdn_tty_at_cout(eb, info);
- }
- if ((m->mdmcmdl >= 2) && (!(strncmp(m->mdmcmd, "AT", 2))))
- isdn_tty_parse_at(info);
- m->mdmcmdl = 0;
- continue;
- }
- if (c == m->mdmreg[REG_BS] && m->mdmreg[REG_BS] < 128) {
- /* Backspace-Function */
- if ((m->mdmcmdl > 2) || (!m->mdmcmdl)) {
- if (m->mdmcmdl)
- m->mdmcmdl--;
- if (m->mdmreg[REG_ECHO] & BIT_ECHO)
- isdn_tty_at_cout("\b", info);
- }
- continue;
- }
- if (cmdchar(c)) {
- if (m->mdmreg[REG_ECHO] & BIT_ECHO) {
- eb[0] = c;
- eb[1] = 0;
- isdn_tty_at_cout(eb, info);
- }
- if (m->mdmcmdl < 255) {
- c = my_toupper(c);
- switch (m->mdmcmdl) {
- case 1:
- if (c == 'T') {
- m->mdmcmd[m->mdmcmdl] = c;
- m->mdmcmd[++m->mdmcmdl] = 0;
- break;
- } else
- m->mdmcmdl = 0;
- /* Fall through - check for 'A' */
- case 0:
- if (c == 'A') {
- m->mdmcmd[m->mdmcmdl] = c;
- m->mdmcmd[++m->mdmcmdl] = 0;
- }
- break;
- default:
- m->mdmcmd[m->mdmcmdl] = c;
- m->mdmcmd[++m->mdmcmdl] = 0;
- }
- }
- }
- }
- return total;
-}
-
-/*
- * Switch all modem-channels who are online and got a valid
- * escape-sequence 1.5 seconds ago, to command-mode.
- * This function is called every second via timer-interrupt from within
- * timer-dispatcher isdn_timer_function()
- */
-void
-isdn_tty_modem_escape(void)
-{
- int ton = 0;
- int i;
- int midx;
-
- for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- if (USG_MODEM(dev->usage[i]) && (midx = dev->m_idx[i]) >= 0) {
- modem_info *info = &dev->mdm.info[midx];
- if (info->online) {
- ton = 1;
- if ((info->emu.pluscount == 3) &&
- time_after(jiffies,
- info->emu.lastplus + PLUSWAIT2)) {
- info->emu.pluscount = 0;
- info->online = 0;
- isdn_tty_modem_result(RESULT_OK, info);
- }
- }
- }
- isdn_timer_ctrl(ISDN_TIMER_MODEMPLUS, ton);
-}
-
-/*
- * Put a RING-message to all modem-channels who have the RI-bit set.
- * This function is called every second via timer-interrupt from within
- * timer-dispatcher isdn_timer_function()
- */
-void
-isdn_tty_modem_ring(void)
-{
- int ton = 0;
- int i;
-
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- modem_info *info = &dev->mdm.info[i];
- if (info->msr & UART_MSR_RI) {
- ton = 1;
- isdn_tty_modem_result(RESULT_RING, info);
- }
- }
- isdn_timer_ctrl(ISDN_TIMER_MODEMRING, ton);
-}
-
-/*
- * For all online tty's, try sending data to
- * the lower levels.
- */
-void
-isdn_tty_modem_xmit(void)
-{
- int ton = 1;
- int i;
-
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- modem_info *info = &dev->mdm.info[i];
- if (info->online) {
- ton = 1;
- isdn_tty_senddown(info);
- isdn_tty_tint(info);
- }
- }
- isdn_timer_ctrl(ISDN_TIMER_MODEMXMIT, ton);
-}
-
-/*
- * Check all channels if we have a 'no carrier' timeout.
- * Timeout value is set by Register S7.
- */
-void
-isdn_tty_carrier_timeout(void)
-{
- int ton = 0;
- int i;
-
- for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- modem_info *info = &dev->mdm.info[i];
- if (!info->dialing)
- continue;
- if (info->emu.carrierwait++ > info->emu.mdmreg[REG_WAITC]) {
- info->dialing = 0;
- isdn_tty_modem_result(RESULT_NO_CARRIER, info);
- isdn_tty_modem_hup(info, 1);
- } else
- ton = 1;
- }
- isdn_timer_ctrl(ISDN_TIMER_CARRIER, ton);
-}
diff --git a/drivers/isdn/i4l/isdn_tty.h b/drivers/isdn/i4l/isdn_tty.h
deleted file mode 100644
index a6f801d2263b..000000000000
--- a/drivers/isdn/i4l/isdn_tty.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* $Id: isdn_tty.h,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * header for Linux ISDN subsystem, tty related functions (linklevel).
- *
- * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-
-#define DLE 0x10
-#define ETX 0x03
-#define DC4 0x14
-
-
-/*
- * Definition of some special Registers of AT-Emulator
- */
-#define REG_RINGATA 0
-#define REG_RINGCNT 1 /* ring counter register */
-#define REG_ESC 2
-#define REG_CR 3
-#define REG_LF 4
-#define REG_BS 5
-
-#define REG_WAITC 7
-
-#define REG_RESP 12 /* show response messages register */
-#define BIT_RESP 1 /* show response messages bit */
-#define REG_RESPNUM 12 /* show numeric responses register */
-#define BIT_RESPNUM 2 /* show numeric responses bit */
-#define REG_ECHO 12
-#define BIT_ECHO 4
-#define REG_DCD 12
-#define BIT_DCD 8
-#define REG_CTS 12
-#define BIT_CTS 16
-#define REG_DTRR 12
-#define BIT_DTRR 32
-#define REG_DSR 12
-#define BIT_DSR 64
-#define REG_CPPP 12
-#define BIT_CPPP 128
-
-#define REG_DXMT 13
-#define BIT_DXMT 1
-#define REG_T70 13
-#define BIT_T70 2
-#define BIT_T70_EXT 32
-#define REG_DTRHUP 13
-#define BIT_DTRHUP 4
-#define REG_RESPXT 13
-#define BIT_RESPXT 8
-#define REG_CIDONCE 13
-#define BIT_CIDONCE 16
-#define REG_RUNG 13 /* show RUNG message register */
-#define BIT_RUNG 64 /* show RUNG message bit */
-#define REG_DISPLAY 13
-#define BIT_DISPLAY 128
-
-#define REG_L2PROT 14
-#define REG_L3PROT 15
-#define REG_PSIZE 16
-#define REG_WSIZE 17
-#define REG_SI1 18
-#define REG_SI2 19
-#define REG_SI1I 20
-#define REG_PLAN 21
-#define REG_SCREEN 22
-
-#define REG_CPN 23
-#define BIT_CPN 1
-#define REG_CPNFCON 23
-#define BIT_CPNFCON 2
-#define REG_CDN 23
-#define BIT_CDN 4
-
-/* defines for result codes */
-#define RESULT_OK 0
-#define RESULT_CONNECT 1
-#define RESULT_RING 2
-#define RESULT_NO_CARRIER 3
-#define RESULT_ERROR 4
-#define RESULT_CONNECT64000 5
-#define RESULT_NO_DIALTONE 6
-#define RESULT_BUSY 7
-#define RESULT_NO_ANSWER 8
-#define RESULT_RINGING 9
-#define RESULT_NO_MSN_EAZ 10
-#define RESULT_VCON 11
-#define RESULT_RUNG 12
-
-#define TTY_IS_FCLASS1(info) \
- ((info->emu.mdmreg[REG_L2PROT] == ISDN_PROTO_L2_FAX) && \
- (info->emu.mdmreg[REG_L3PROT] == ISDN_PROTO_L3_FCLASS1))
-#define TTY_IS_FCLASS2(info) \
- ((info->emu.mdmreg[REG_L2PROT] == ISDN_PROTO_L2_FAX) && \
- (info->emu.mdmreg[REG_L3PROT] == ISDN_PROTO_L3_FCLASS2))
-
-extern void isdn_tty_modem_escape(void);
-extern void isdn_tty_modem_ring(void);
-extern void isdn_tty_carrier_timeout(void);
-extern void isdn_tty_modem_xmit(void);
-extern int isdn_tty_modem_init(void);
-extern void isdn_tty_exit(void);
-extern void isdn_tty_readmodem(void);
-extern int isdn_tty_find_icall(int, int, setup_parm *);
-extern int isdn_tty_stat_callback(int, isdn_ctrl *);
-extern int isdn_tty_rcv_skb(int, int, int, struct sk_buff *);
-extern int isdn_tty_capi_facility(capi_msg *cm);
-extern void isdn_tty_at_cout(char *, modem_info *);
-extern void isdn_tty_modem_hup(modem_info *, int);
-#ifdef CONFIG_ISDN_TTY_FAX
-extern int isdn_tty_cmd_PLUSF_FAX(char **, modem_info *);
-extern int isdn_tty_fax_command(modem_info *, isdn_ctrl *);
-extern void isdn_tty_fax_bitorder(modem_info *, struct sk_buff *);
-#endif
diff --git a/drivers/isdn/i4l/isdn_ttyfax.c b/drivers/isdn/i4l/isdn_ttyfax.c
deleted file mode 100644
index 47aae4916730..000000000000
--- a/drivers/isdn/i4l/isdn_ttyfax.c
+++ /dev/null
@@ -1,1123 +0,0 @@
-/* $Id: isdn_ttyfax.c,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * Linux ISDN subsystem, tty_fax AT-command emulator (linklevel).
- *
- * Copyright 1999 by Armin Schindler (mac@melware.de)
- * Copyright 1999 by Ralf Spachmann (mel@melware.de)
- * Copyright 1999 by Cytronics & Melware
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#undef ISDN_TTY_FAX_STAT_DEBUG
-#undef ISDN_TTY_FAX_CMD_DEBUG
-
-#include <linux/isdn.h>
-#include "isdn_common.h"
-#include "isdn_tty.h"
-#include "isdn_ttyfax.h"
-
-
-static char *isdn_tty_fax_revision = "$Revision: 1.1.2.2 $";
-
-#define PARSE_ERROR1 { isdn_tty_fax_modem_result(1, info); return 1; }
-
-static char *
-isdn_getrev(const char *revision)
-{
- char *rev;
- char *p;
-
- if ((p = strchr(revision, ':'))) {
- rev = p + 2;
- p = strchr(rev, '$');
- *--p = 0;
- } else
- rev = "???";
- return rev;
-}
-
-/*
- * Fax Class 2 Modem results
- *
- */
-
-static void
-isdn_tty_fax_modem_result(int code, modem_info *info)
-{
- atemu *m = &info->emu;
- T30_s *f = info->fax;
- char rs[50];
- char rss[50];
- char *rp;
- int i;
- static char *msg[] =
- {"OK", "ERROR", "+FCON", "+FCSI:", "+FDIS:",
- "+FHNG:", "+FDCS:", "CONNECT", "+FTSI:",
- "+FCFR", "+FPTS:", "+FET:"};
-
-
- isdn_tty_at_cout("\r\n", info);
- isdn_tty_at_cout(msg[code], info);
-
-#ifdef ISDN_TTY_FAX_CMD_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax send %s on ttyI%d\n",
- msg[code], info->line);
-#endif
- switch (code) {
- case 0: /* OK */
- break;
- case 1: /* ERROR */
- break;
- case 2: /* +FCON */
- /* Append CPN, if enabled */
- if ((m->mdmreg[REG_CPNFCON] & BIT_CPNFCON) &&
- (!(dev->usage[info->isdn_channel] & ISDN_USAGE_OUTGOING))) {
- sprintf(rs, "/%s", m->cpn);
- isdn_tty_at_cout(rs, info);
- }
- info->online = 1;
- f->fet = 0;
- if (f->phase == ISDN_FAX_PHASE_A)
- f->phase = ISDN_FAX_PHASE_B;
- break;
- case 3: /* +FCSI */
- case 8: /* +FTSI */
- sprintf(rs, "\"%s\"", f->r_id);
- isdn_tty_at_cout(rs, info);
- break;
- case 4: /* +FDIS */
- rs[0] = 0;
- rp = &f->r_resolution;
- for (i = 0; i < 8; i++) {
- sprintf(rss, "%c%s", rp[i] + 48,
- (i < 7) ? "," : "");
- strcat(rs, rss);
- }
- isdn_tty_at_cout(rs, info);
-#ifdef ISDN_TTY_FAX_CMD_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax DIS=%s on ttyI%d\n",
- rs, info->line);
-#endif
- break;
- case 5: /* +FHNG */
- sprintf(rs, "%d", f->code);
- isdn_tty_at_cout(rs, info);
- info->faxonline = 0;
- break;
- case 6: /* +FDCS */
- rs[0] = 0;
- rp = &f->r_resolution;
- for (i = 0; i < 8; i++) {
- sprintf(rss, "%c%s", rp[i] + 48,
- (i < 7) ? "," : "");
- strcat(rs, rss);
- }
- isdn_tty_at_cout(rs, info);
-#ifdef ISDN_TTY_FAX_CMD_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax DCS=%s on ttyI%d\n",
- rs, info->line);
-#endif
- break;
- case 7: /* CONNECT */
- info->faxonline |= 2;
- break;
- case 9: /* FCFR */
- break;
- case 10: /* FPTS */
- isdn_tty_at_cout("1", info);
- break;
- case 11: /* FET */
- sprintf(rs, "%d", f->fet);
- isdn_tty_at_cout(rs, info);
- break;
- }
-
- isdn_tty_at_cout("\r\n", info);
-
- switch (code) {
- case 7: /* CONNECT */
- info->online = 2;
- if (info->faxonline & 1) {
- sprintf(rs, "%c", XON);
- isdn_tty_at_cout(rs, info);
- }
- break;
- }
-}
-
-static int
-isdn_tty_fax_command1(modem_info *info, isdn_ctrl *c)
-{
- static char *msg[] =
- {"OK", "CONNECT", "NO CARRIER", "ERROR", "FCERROR"};
-
-#ifdef ISDN_TTY_FAX_CMD_DEBUG
- printk(KERN_DEBUG "isdn_tty: FCLASS1 cmd(%d)\n", c->parm.aux.cmd);
-#endif
- if (c->parm.aux.cmd < ISDN_FAX_CLASS1_QUERY) {
- if (info->online)
- info->online = 1;
- isdn_tty_at_cout("\r\n", info);
- isdn_tty_at_cout(msg[c->parm.aux.cmd], info);
- isdn_tty_at_cout("\r\n", info);
- }
- switch (c->parm.aux.cmd) {
- case ISDN_FAX_CLASS1_CONNECT:
- info->online = 2;
- break;
- case ISDN_FAX_CLASS1_OK:
- case ISDN_FAX_CLASS1_FCERROR:
- case ISDN_FAX_CLASS1_ERROR:
- case ISDN_FAX_CLASS1_NOCARR:
- break;
- case ISDN_FAX_CLASS1_QUERY:
- isdn_tty_at_cout("\r\n", info);
- if (!c->parm.aux.para[0]) {
- isdn_tty_at_cout(msg[ISDN_FAX_CLASS1_ERROR], info);
- isdn_tty_at_cout("\r\n", info);
- } else {
- isdn_tty_at_cout(c->parm.aux.para, info);
- isdn_tty_at_cout("\r\nOK\r\n", info);
- }
- break;
- }
- return (0);
-}
-
-int
-isdn_tty_fax_command(modem_info *info, isdn_ctrl *c)
-{
- T30_s *f = info->fax;
- char rs[10];
-
- if (TTY_IS_FCLASS1(info))
- return (isdn_tty_fax_command1(info, c));
-
-#ifdef ISDN_TTY_FAX_CMD_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax cmd %d on ttyI%d\n",
- f->r_code, info->line);
-#endif
- switch (f->r_code) {
- case ISDN_TTY_FAX_FCON:
- info->faxonline = 1;
- isdn_tty_fax_modem_result(2, info); /* +FCON */
- return (0);
- case ISDN_TTY_FAX_FCON_I:
- info->faxonline = 16;
- isdn_tty_fax_modem_result(2, info); /* +FCON */
- return (0);
- case ISDN_TTY_FAX_RID:
- if (info->faxonline & 1)
- isdn_tty_fax_modem_result(3, info); /* +FCSI */
- if (info->faxonline & 16)
- isdn_tty_fax_modem_result(8, info); /* +FTSI */
- return (0);
- case ISDN_TTY_FAX_DIS:
- isdn_tty_fax_modem_result(4, info); /* +FDIS */
- return (0);
- case ISDN_TTY_FAX_HNG:
- if (f->phase == ISDN_FAX_PHASE_C) {
- if (f->direction == ISDN_TTY_FAX_CONN_IN) {
- sprintf(rs, "%c%c", DLE, ETX);
- isdn_tty_at_cout(rs, info);
- } else {
- sprintf(rs, "%c", 0x18);
- isdn_tty_at_cout(rs, info);
- }
- info->faxonline &= ~2; /* leave data mode */
- info->online = 1;
- }
- f->phase = ISDN_FAX_PHASE_E;
- isdn_tty_fax_modem_result(5, info); /* +FHNG */
- isdn_tty_fax_modem_result(0, info); /* OK */
- return (0);
- case ISDN_TTY_FAX_DCS:
- isdn_tty_fax_modem_result(6, info); /* +FDCS */
- isdn_tty_fax_modem_result(7, info); /* CONNECT */
- f->phase = ISDN_FAX_PHASE_C;
- return (0);
- case ISDN_TTY_FAX_TRAIN_OK:
- isdn_tty_fax_modem_result(6, info); /* +FDCS */
- isdn_tty_fax_modem_result(0, info); /* OK */
- return (0);
- case ISDN_TTY_FAX_SENT:
- isdn_tty_fax_modem_result(0, info); /* OK */
- return (0);
- case ISDN_TTY_FAX_CFR:
- isdn_tty_fax_modem_result(9, info); /* +FCFR */
- return (0);
- case ISDN_TTY_FAX_ET:
- sprintf(rs, "%c%c", DLE, ETX);
- isdn_tty_at_cout(rs, info);
- isdn_tty_fax_modem_result(10, info); /* +FPTS */
- isdn_tty_fax_modem_result(11, info); /* +FET */
- isdn_tty_fax_modem_result(0, info); /* OK */
- info->faxonline &= ~2; /* leave data mode */
- info->online = 1;
- f->phase = ISDN_FAX_PHASE_D;
- return (0);
- case ISDN_TTY_FAX_PTS:
- isdn_tty_fax_modem_result(10, info); /* +FPTS */
- if (f->direction == ISDN_TTY_FAX_CONN_OUT) {
- if (f->fet == 1)
- f->phase = ISDN_FAX_PHASE_B;
- if (f->fet == 0)
- isdn_tty_fax_modem_result(0, info); /* OK */
- }
- return (0);
- case ISDN_TTY_FAX_EOP:
- info->faxonline &= ~2; /* leave data mode */
- info->online = 1;
- f->phase = ISDN_FAX_PHASE_D;
- return (0);
-
- }
- return (-1);
-}
-
-
-void
-isdn_tty_fax_bitorder(modem_info *info, struct sk_buff *skb)
-{
- __u8 LeftMask;
- __u8 RightMask;
- __u8 fBit;
- __u8 Data;
- int i;
-
- if (!info->fax->bor) {
- for (i = 0; i < skb->len; i++) {
- Data = skb->data[i];
- for (
- LeftMask = 0x80, RightMask = 0x01;
- LeftMask > RightMask;
- LeftMask >>= 1, RightMask <<= 1
- ) {
- fBit = (Data & LeftMask);
- if (Data & RightMask)
- Data |= LeftMask;
- else
- Data &= ~LeftMask;
- if (fBit)
- Data |= RightMask;
- else
- Data &= ~RightMask;
-
- }
- skb->data[i] = Data;
- }
- }
-}
-
-/*
- * Parse AT+F.. FAX class 1 commands
- */
-
-static int
-isdn_tty_cmd_FCLASS1(char **p, modem_info *info)
-{
- static char *cmd[] =
- {"AE", "TS", "RS", "TM", "RM", "TH", "RH"};
- isdn_ctrl c;
- int par, i;
- u_long flags;
-
- for (c.parm.aux.cmd = 0; c.parm.aux.cmd < 7; c.parm.aux.cmd++)
- if (!strncmp(p[0], cmd[c.parm.aux.cmd], 2))
- break;
-
-#ifdef ISDN_TTY_FAX_CMD_DEBUG
- printk(KERN_DEBUG "isdn_tty_cmd_FCLASS1 (%s,%d)\n", p[0], c.parm.aux.cmd);
-#endif
- if (c.parm.aux.cmd == 7)
- PARSE_ERROR1;
-
- p[0] += 2;
- switch (*p[0]) {
- case '?':
- p[0]++;
- c.parm.aux.subcmd = AT_QUERY;
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- c.parm.aux.subcmd = AT_EQ_QUERY;
- } else {
- par = isdn_getnum(p);
- if ((par < 0) || (par > 255))
- PARSE_ERROR1;
- c.parm.aux.subcmd = AT_EQ_VALUE;
- c.parm.aux.para[0] = par;
- }
- break;
- case 0:
- c.parm.aux.subcmd = AT_COMMAND;
- break;
- default:
- PARSE_ERROR1;
- }
- c.command = ISDN_CMD_FAXCMD;
-#ifdef ISDN_TTY_FAX_CMD_DEBUG
- printk(KERN_DEBUG "isdn_tty_cmd_FCLASS1 %d/%d/%d)\n",
- c.parm.aux.cmd, c.parm.aux.subcmd, c.parm.aux.para[0]);
-#endif
- if (info->isdn_driver < 0) {
- if ((c.parm.aux.subcmd == AT_EQ_VALUE) ||
- (c.parm.aux.subcmd == AT_COMMAND)) {
- PARSE_ERROR1;
- }
- spin_lock_irqsave(&dev->lock, flags);
- /* get a temporary connection to the first free fax driver */
- i = isdn_get_free_channel(ISDN_USAGE_FAX, ISDN_PROTO_L2_FAX,
- ISDN_PROTO_L3_FCLASS1, -1, -1, "00");
- if (i < 0) {
- spin_unlock_irqrestore(&dev->lock, flags);
- PARSE_ERROR1;
- }
- info->isdn_driver = dev->drvmap[i];
- info->isdn_channel = dev->chanmap[i];
- info->drv_index = i;
- dev->m_idx[i] = info->line;
- spin_unlock_irqrestore(&dev->lock, flags);
- c.driver = info->isdn_driver;
- c.arg = info->isdn_channel;
- isdn_command(&c);
- spin_lock_irqsave(&dev->lock, flags);
- isdn_free_channel(info->isdn_driver, info->isdn_channel,
- ISDN_USAGE_FAX);
- info->isdn_driver = -1;
- info->isdn_channel = -1;
- if (info->drv_index >= 0) {
- dev->m_idx[info->drv_index] = -1;
- info->drv_index = -1;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
- } else {
- c.driver = info->isdn_driver;
- c.arg = info->isdn_channel;
- isdn_command(&c);
- }
- return 1;
-}
-
-/*
- * Parse AT+F.. FAX class 2 commands
- */
-
-static int
-isdn_tty_cmd_FCLASS2(char **p, modem_info *info)
-{
- atemu *m = &info->emu;
- T30_s *f = info->fax;
- isdn_ctrl cmd;
- int par;
- char rs[50];
- char rss[50];
- int maxdccval[] =
- {1, 5, 2, 2, 3, 2, 0, 7};
-
- /* FAA still unchanged */
- if (!strncmp(p[0], "AA", 2)) { /* TODO */
- p[0] += 2;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", 0);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- par = isdn_getnum(p);
- if ((par < 0) || (par > 255))
- PARSE_ERROR1;
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* BADLIN=value - dummy 0=disable errorchk disabled, 1-255 nr. of lines for making page bad */
- if (!strncmp(p[0], "BADLIN", 6)) {
- p[0] += 6;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", f->badlin);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0-255");
- isdn_tty_at_cout(rs, info);
- } else {
- par = isdn_getnum(p);
- if ((par < 0) || (par > 255))
- PARSE_ERROR1;
- f->badlin = par;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FBADLIN=%d\n", par);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* BADMUL=value - dummy 0=disable errorchk disabled (threshold multiplier) */
- if (!strncmp(p[0], "BADMUL", 6)) {
- p[0] += 6;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", f->badmul);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0-255");
- isdn_tty_at_cout(rs, info);
- } else {
- par = isdn_getnum(p);
- if ((par < 0) || (par > 255))
- PARSE_ERROR1;
- f->badmul = par;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FBADMUL=%d\n", par);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* BOR=n - Phase C bit order, 0=direct, 1=reverse */
- if (!strncmp(p[0], "BOR", 3)) {
- p[0] += 3;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", f->bor);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0,1");
- isdn_tty_at_cout(rs, info);
- } else {
- par = isdn_getnum(p);
- if ((par < 0) || (par > 1))
- PARSE_ERROR1;
- f->bor = par;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FBOR=%d\n", par);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* NBC=n - No Best Capabilities */
- if (!strncmp(p[0], "NBC", 3)) {
- p[0] += 3;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", f->nbc);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0,1");
- isdn_tty_at_cout(rs, info);
- } else {
- par = isdn_getnum(p);
- if ((par < 0) || (par > 1))
- PARSE_ERROR1;
- f->nbc = par;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FNBC=%d\n", par);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* BUF? - Readonly buffersize readout */
- if (!strncmp(p[0], "BUF?", 4)) {
- p[0] += 4;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FBUF? (%d) \n", (16 * m->mdmreg[REG_PSIZE]));
-#endif
- p[0]++;
- sprintf(rs, "\r\n %d ", (16 * m->mdmreg[REG_PSIZE]));
- isdn_tty_at_cout(rs, info);
- return 0;
- }
- /* CIG=string - local fax station id string for polling rx */
- if (!strncmp(p[0], "CIG", 3)) {
- int i, r;
- p[0] += 3;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n\"%s\"", f->pollid);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n\"STRING\"");
- isdn_tty_at_cout(rs, info);
- } else {
- if (*p[0] == '"')
- p[0]++;
- for (i = 0; (*p[0]) && i < (FAXIDLEN - 1) && (*p[0] != '"'); i++) {
- f->pollid[i] = *p[0]++;
- }
- if (*p[0] == '"')
- p[0]++;
- for (r = i; r < FAXIDLEN; r++) {
- f->pollid[r] = 32;
- }
- f->pollid[FAXIDLEN - 1] = 0;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax local poll ID rx \"%s\"\n", f->pollid);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* CQ=n - copy qlty chk, 0= no chk, 1=only 1D chk, 2=1D+2D chk */
- if (!strncmp(p[0], "CQ", 2)) {
- p[0] += 2;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", f->cq);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0,1,2");
- isdn_tty_at_cout(rs, info);
- } else {
- par = isdn_getnum(p);
- if ((par < 0) || (par > 2))
- PARSE_ERROR1;
- f->cq = par;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FCQ=%d\n", par);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* CR=n - can receive? 0= no data rx or poll remote dev, 1=do receive data or poll remote dev */
- if (!strncmp(p[0], "CR", 2)) {
- p[0] += 2;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", f->cr); /* read actual value from struct and print */
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0,1"); /* display online help */
- isdn_tty_at_cout(rs, info);
- } else {
- par = isdn_getnum(p);
- if ((par < 0) || (par > 1))
- PARSE_ERROR1;
- f->cr = par;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FCR=%d\n", par);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* CTCRTY=value - ECM retry count */
- if (!strncmp(p[0], "CTCRTY", 6)) {
- p[0] += 6;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", f->ctcrty);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0-255");
- isdn_tty_at_cout(rs, info);
- } else {
- par = isdn_getnum(p);
- if ((par < 0) || (par > 255))
- PARSE_ERROR1;
- f->ctcrty = par;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FCTCRTY=%d\n", par);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* DCC=vr,br,wd,ln,df,ec,bf,st - DCE capabilities parms */
- if (!strncmp(p[0], "DCC", 3)) {
- char *rp = &f->resolution;
- int i;
-
- p[0] += 3;
- switch (*p[0]) {
- case '?':
- p[0]++;
- strcpy(rs, "\r\n");
- for (i = 0; i < 8; i++) {
- sprintf(rss, "%c%s", rp[i] + 48,
- (i < 7) ? "," : "");
- strcat(rs, rss);
- }
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- isdn_tty_at_cout("\r\n(0,1),(0-5),(0-2),(0-2),(0-3),(0-2),(0),(0-7)", info);
- p[0]++;
- } else {
- for (i = 0; (((*p[0] >= '0') && (*p[0] <= '9')) || (*p[0] == ',')) && (i < 8); i++) {
- if (*p[0] != ',') {
- if ((*p[0] - 48) > maxdccval[i]) {
- PARSE_ERROR1;
- }
- rp[i] = *p[0] - 48;
- p[0]++;
- if (*p[0] == ',')
- p[0]++;
- } else
- p[0]++;
- }
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FDCC capabilities DCE=%d,%d,%d,%d,%d,%d,%d,%d\n",
- rp[0], rp[1], rp[2], rp[3], rp[4], rp[5], rp[6], rp[7]);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* DIS=vr,br,wd,ln,df,ec,bf,st - current session parms */
- if (!strncmp(p[0], "DIS", 3)) {
- char *rp = &f->resolution;
- int i;
-
- p[0] += 3;
- switch (*p[0]) {
- case '?':
- p[0]++;
- strcpy(rs, "\r\n");
- for (i = 0; i < 8; i++) {
- sprintf(rss, "%c%s", rp[i] + 48,
- (i < 7) ? "," : "");
- strcat(rs, rss);
- }
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- isdn_tty_at_cout("\r\n(0,1),(0-5),(0-2),(0-2),(0-3),(0-2),(0),(0-7)", info);
- p[0]++;
- } else {
- for (i = 0; (((*p[0] >= '0') && (*p[0] <= '9')) || (*p[0] == ',')) && (i < 8); i++) {
- if (*p[0] != ',') {
- if ((*p[0] - 48) > maxdccval[i]) {
- PARSE_ERROR1;
- }
- rp[i] = *p[0] - 48;
- p[0]++;
- if (*p[0] == ',')
- p[0]++;
- } else
- p[0]++;
- }
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FDIS session parms=%d,%d,%d,%d,%d,%d,%d,%d\n",
- rp[0], rp[1], rp[2], rp[3], rp[4], rp[5], rp[6], rp[7]);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* DR - Receive Phase C data command, initiates document reception */
- if (!strncmp(p[0], "DR", 2)) {
- p[0] += 2;
- if ((info->faxonline & 16) && /* incoming connection */
- ((f->phase == ISDN_FAX_PHASE_B) || (f->phase == ISDN_FAX_PHASE_D))) {
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FDR\n");
-#endif
- f->code = ISDN_TTY_FAX_DR;
- cmd.driver = info->isdn_driver;
- cmd.arg = info->isdn_channel;
- cmd.command = ISDN_CMD_FAXCMD;
- isdn_command(&cmd);
- if (f->phase == ISDN_FAX_PHASE_B) {
- f->phase = ISDN_FAX_PHASE_C;
- } else if (f->phase == ISDN_FAX_PHASE_D) {
- switch (f->fet) {
- case 0: /* next page will be received */
- f->phase = ISDN_FAX_PHASE_C;
- isdn_tty_fax_modem_result(7, info); /* CONNECT */
- break;
- case 1: /* next doc will be received */
- f->phase = ISDN_FAX_PHASE_B;
- break;
- case 2: /* fax session is terminating */
- f->phase = ISDN_FAX_PHASE_E;
- break;
- default:
- PARSE_ERROR1;
- }
- }
- } else {
- PARSE_ERROR1;
- }
- return 1;
- }
- /* DT=df,vr,wd,ln - TX phase C data command (release DCE to proceed with negotiation) */
- if (!strncmp(p[0], "DT", 2)) {
- int i, val[] =
- {4, 0, 2, 3};
- char *rp = &f->resolution;
-
- p[0] += 2;
- if (!(info->faxonline & 1)) /* not outgoing connection */
- PARSE_ERROR1;
-
- for (i = 0; (((*p[0] >= '0') && (*p[0] <= '9')) || (*p[0] == ',')) && (i < 4); i++) {
- if (*p[0] != ',') {
- if ((*p[0] - 48) > maxdccval[val[i]]) {
- PARSE_ERROR1;
- }
- rp[val[i]] = *p[0] - 48;
- p[0]++;
- if (*p[0] == ',')
- p[0]++;
- } else
- p[0]++;
- }
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FDT tx data command parms=%d,%d,%d,%d\n",
- rp[4], rp[0], rp[2], rp[3]);
-#endif
- if ((f->phase == ISDN_FAX_PHASE_B) || (f->phase == ISDN_FAX_PHASE_D)) {
- f->code = ISDN_TTY_FAX_DT;
- cmd.driver = info->isdn_driver;
- cmd.arg = info->isdn_channel;
- cmd.command = ISDN_CMD_FAXCMD;
- isdn_command(&cmd);
- if (f->phase == ISDN_FAX_PHASE_D) {
- f->phase = ISDN_FAX_PHASE_C;
- isdn_tty_fax_modem_result(7, info); /* CONNECT */
- }
- } else {
- PARSE_ERROR1;
- }
- return 1;
- }
- /* ECM=n - Error mode control 0=disabled, 2=enabled, handled by DCE alone incl. buff of partial pages */
- if (!strncmp(p[0], "ECM", 3)) {
- p[0] += 3;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", f->ecm);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0,2");
- isdn_tty_at_cout(rs, info);
- } else {
- par = isdn_getnum(p);
- if ((par != 0) && (par != 2))
- PARSE_ERROR1;
- f->ecm = par;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FECM=%d\n", par);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* ET=n - End of page or document */
- if (!strncmp(p[0], "ET=", 3)) {
- p[0] += 3;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0-2");
- isdn_tty_at_cout(rs, info);
- } else {
- if ((f->phase != ISDN_FAX_PHASE_D) ||
- (!(info->faxonline & 1)))
- PARSE_ERROR1;
- par = isdn_getnum(p);
- if ((par < 0) || (par > 2))
- PARSE_ERROR1;
- f->fet = par;
- f->code = ISDN_TTY_FAX_ET;
- cmd.driver = info->isdn_driver;
- cmd.arg = info->isdn_channel;
- cmd.command = ISDN_CMD_FAXCMD;
- isdn_command(&cmd);
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FET=%d\n", par);
-#endif
- return 1;
- }
- return 0;
- }
- /* K - terminate */
- if (!strncmp(p[0], "K", 1)) {
- p[0] += 1;
- if ((f->phase == ISDN_FAX_PHASE_IDLE) || (f->phase == ISDN_FAX_PHASE_E))
- PARSE_ERROR1;
- isdn_tty_modem_hup(info, 1);
- return 1;
- }
- /* LID=string - local fax ID */
- if (!strncmp(p[0], "LID", 3)) {
- int i, r;
- p[0] += 3;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n\"%s\"", f->id);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n\"STRING\"");
- isdn_tty_at_cout(rs, info);
- } else {
- if (*p[0] == '"')
- p[0]++;
- for (i = 0; (*p[0]) && i < (FAXIDLEN - 1) && (*p[0] != '"'); i++) {
- f->id[i] = *p[0]++;
- }
- if (*p[0] == '"')
- p[0]++;
- for (r = i; r < FAXIDLEN; r++) {
- f->id[r] = 32;
- }
- f->id[FAXIDLEN - 1] = 0;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax local ID \"%s\"\n", f->id);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
-
- /* MDL? - DCE Model */
- if (!strncmp(p[0], "MDL?", 4)) {
- p[0] += 4;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: FMDL?\n");
-#endif
- isdn_tty_at_cout("\r\nisdn4linux", info);
- return 0;
- }
- /* MFR? - DCE Manufacturer */
- if (!strncmp(p[0], "MFR?", 4)) {
- p[0] += 4;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: FMFR?\n");
-#endif
- isdn_tty_at_cout("\r\nisdn4linux", info);
- return 0;
- }
- /* MINSP=n - Minimum Speed for Phase C */
- if (!strncmp(p[0], "MINSP", 5)) {
- p[0] += 5;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", f->minsp);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0-5");
- isdn_tty_at_cout(rs, info);
- } else {
- par = isdn_getnum(p);
- if ((par < 0) || (par > 5))
- PARSE_ERROR1;
- f->minsp = par;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FMINSP=%d\n", par);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* PHCTO=value - DTE phase C timeout */
- if (!strncmp(p[0], "PHCTO", 5)) {
- p[0] += 5;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", f->phcto);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0-255");
- isdn_tty_at_cout(rs, info);
- } else {
- par = isdn_getnum(p);
- if ((par < 0) || (par > 255))
- PARSE_ERROR1;
- f->phcto = par;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FPHCTO=%d\n", par);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
-
- /* REL=n - Phase C received EOL alignment */
- if (!strncmp(p[0], "REL", 3)) {
- p[0] += 3;
- switch (*p[0]) {
- case '?':
- p[0]++;
- sprintf(rs, "\r\n%d", f->rel);
- isdn_tty_at_cout(rs, info);
- break;
- case '=':
- p[0]++;
- if (*p[0] == '?') {
- p[0]++;
- sprintf(rs, "\r\n0,1");
- isdn_tty_at_cout(rs, info);
- } else {
- par = isdn_getnum(p);
- if ((par < 0) || (par > 1))
- PARSE_ERROR1;
- f->rel = par;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FREL=%d\n", par);
-#endif
- }
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- /* REV? - DCE Revision */
- if (!strncmp(p[0], "REV?", 4)) {
- p[0] += 4;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: FREV?\n");
-#endif
- strcpy(rss, isdn_tty_fax_revision);
- sprintf(rs, "\r\nRev: %s", isdn_getrev(rss));
- isdn_tty_at_cout(rs, info);
- return 0;
- }
-
- /* Phase C Transmit Data Block Size */
- if (!strncmp(p[0], "TBC=", 4)) { /* dummy, not used */
- p[0] += 4;
-#ifdef ISDN_TTY_FAX_STAT_DEBUG
- printk(KERN_DEBUG "isdn_tty: Fax FTBC=%c\n", *p[0]);
-#endif
- switch (*p[0]) {
- case '0':
- p[0]++;
- break;
- default:
- PARSE_ERROR1;
- }
- return 0;
- }
- printk(KERN_DEBUG "isdn_tty: unknown token=>AT+F%s<\n", p[0]);
- PARSE_ERROR1;
-}
-
-int
-isdn_tty_cmd_PLUSF_FAX(char **p, modem_info *info)
-{
- if (TTY_IS_FCLASS2(info))
- return (isdn_tty_cmd_FCLASS2(p, info));
- else if (TTY_IS_FCLASS1(info))
- return (isdn_tty_cmd_FCLASS1(p, info));
- PARSE_ERROR1;
-}
diff --git a/drivers/isdn/i4l/isdn_ttyfax.h b/drivers/isdn/i4l/isdn_ttyfax.h
deleted file mode 100644
index ccda4fcf8f7b..000000000000
--- a/drivers/isdn/i4l/isdn_ttyfax.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* $Id: isdn_ttyfax.h,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * header for Linux ISDN subsystem, tty_fax related functions (linklevel).
- *
- * Copyright 1999 by Armin Schindler (mac@melware.de)
- * Copyright 1999 by Ralf Spachmann (mel@melware.de)
- * Copyright 1999 by Cytronics & Melware
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-
-#define XON 0x11
-#define XOFF 0x13
-#define DC2 0x12
diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c
deleted file mode 100644
index d11fe76f138f..000000000000
--- a/drivers/isdn/i4l/isdn_v110.c
+++ /dev/null
@@ -1,625 +0,0 @@
-/* $Id: isdn_v110.c,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * Linux ISDN subsystem, V.110 related functions (linklevel).
- *
- * Copyright by Thomas Pfeiffer (pfeiffer@pds.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-
-#include <linux/isdn.h>
-#include "isdn_v110.h"
-
-#undef ISDN_V110_DEBUG
-
-char *isdn_v110_revision = "$Revision: 1.1.2.2 $";
-
-#define V110_38400 255
-#define V110_19200 15
-#define V110_9600 3
-
-/*
- * The following data are precoded matrices, online and offline matrix
- * for 9600, 19200 und 38400, respectively
- */
-static unsigned char V110_OnMatrix_9600[] =
-{0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff,
- 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfd,
- 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff,
- 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfd};
-
-static unsigned char V110_OffMatrix_9600[] =
-{0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
-static unsigned char V110_OnMatrix_19200[] =
-{0xf0, 0xf0, 0xff, 0xf7, 0xff, 0xf7, 0xff, 0xf7, 0xff, 0xf7,
- 0xfd, 0xff, 0xff, 0xf7, 0xff, 0xf7, 0xff, 0xf7, 0xff, 0xf7};
-
-static unsigned char V110_OffMatrix_19200[] =
-{0xf0, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
-static unsigned char V110_OnMatrix_38400[] =
-{0x00, 0x7f, 0x7f, 0x7f, 0x7f, 0xfd, 0x7f, 0x7f, 0x7f, 0x7f};
-
-static unsigned char V110_OffMatrix_38400[] =
-{0x00, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff};
-
-/*
- * FlipBits reorders sequences of keylen bits in one byte.
- * E.g. source order 7654321 will be converted to 45670123 when keylen = 4,
- * and to 67452301 when keylen = 2. This is necessary because ordering on
- * the isdn line is the other way.
- */
-static inline unsigned char
-FlipBits(unsigned char c, int keylen)
-{
- unsigned char b = c;
- unsigned char bit = 128;
- int i;
- int j;
- int hunks = (8 / keylen);
-
- c = 0;
- for (i = 0; i < hunks; i++) {
- for (j = 0; j < keylen; j++) {
- if (b & (bit >> j))
- c |= bit >> (keylen - j - 1);
- }
- bit >>= keylen;
- }
- return c;
-}
-
-
-/* isdn_v110_open allocates and initializes private V.110 data
- * structures and returns a pointer to these.
- */
-static isdn_v110_stream *
-isdn_v110_open(unsigned char key, int hdrlen, int maxsize)
-{
- int i;
- isdn_v110_stream *v;
-
- if ((v = kzalloc(sizeof(isdn_v110_stream), GFP_ATOMIC)) == NULL)
- return NULL;
- v->key = key;
- v->nbits = 0;
- for (i = 0; key & (1 << i); i++)
- v->nbits++;
-
- v->nbytes = 8 / v->nbits;
- v->decodelen = 0;
-
- switch (key) {
- case V110_38400:
- v->OnlineFrame = V110_OnMatrix_38400;
- v->OfflineFrame = V110_OffMatrix_38400;
- break;
- case V110_19200:
- v->OnlineFrame = V110_OnMatrix_19200;
- v->OfflineFrame = V110_OffMatrix_19200;
- break;
- default:
- v->OnlineFrame = V110_OnMatrix_9600;
- v->OfflineFrame = V110_OffMatrix_9600;
- break;
- }
- v->framelen = v->nbytes * 10;
- v->SyncInit = 5;
- v->introducer = 0;
- v->dbit = 1;
- v->b = 0;
- v->skbres = hdrlen;
- v->maxsize = maxsize - hdrlen;
- if ((v->encodebuf = kmalloc(maxsize, GFP_ATOMIC)) == NULL) {
- kfree(v);
- return NULL;
- }
- return v;
-}
-
-/* isdn_v110_close frees private V.110 data structures */
-void
-isdn_v110_close(isdn_v110_stream *v)
-{
- if (v == NULL)
- return;
-#ifdef ISDN_V110_DEBUG
- printk(KERN_DEBUG "v110 close\n");
-#endif
- kfree(v->encodebuf);
- kfree(v);
-}
-
-
-/*
- * ValidHeaderBytes return the number of valid bytes in v->decodebuf
- */
-static int
-ValidHeaderBytes(isdn_v110_stream *v)
-{
- int i;
- for (i = 0; (i < v->decodelen) && (i < v->nbytes); i++)
- if ((v->decodebuf[i] & v->key) != 0)
- break;
- return i;
-}
-
-/*
- * SyncHeader moves the decodebuf ptr to the next valid header
- */
-static void
-SyncHeader(isdn_v110_stream *v)
-{
- unsigned char *rbuf = v->decodebuf;
- int len = v->decodelen;
-
- if (len == 0)
- return;
- for (rbuf++, len--; len > 0; len--, rbuf++) /* such den SyncHeader in buf ! */
- if ((*rbuf & v->key) == 0) /* erstes byte gefunden ? */
- break; /* jupp! */
- if (len)
- memcpy(v->decodebuf, rbuf, len);
-
- v->decodelen = len;
-#ifdef ISDN_V110_DEBUG
- printk(KERN_DEBUG "isdn_v110: Header resync\n");
-#endif
-}
-
-/* DecodeMatrix takes n (n>=1) matrices (v110 frames, 10 bytes) where
- len is the number of matrix-lines. len must be a multiple of 10, i.e.
- only complete matices must be given.
- From these, netto data is extracted and returned in buf. The return-value
- is the bytecount of the decoded data.
-*/
-static int
-DecodeMatrix(isdn_v110_stream *v, unsigned char *m, int len, unsigned char *buf)
-{
- int line = 0;
- int buflen = 0;
- int mbit = 64;
- int introducer = v->introducer;
- int dbit = v->dbit;
- unsigned char b = v->b;
-
- while (line < len) { /* Are we done with all lines of the matrix? */
- if ((line % 10) == 0) { /* the 0. line of the matrix is always 0 ! */
- if (m[line] != 0x00) { /* not 0 ? -> error! */
-#ifdef ISDN_V110_DEBUG
- printk(KERN_DEBUG "isdn_v110: DecodeMatrix, V110 Bad Header\n");
- /* returning now is not the right thing, though :-( */
-#endif
- }
- line++; /* next line of matrix */
- continue;
- } else if ((line % 10) == 5) { /* in line 5 there's only e-bits ! */
- if ((m[line] & 0x70) != 0x30) { /* 011 has to be at the beginning! */
-#ifdef ISDN_V110_DEBUG
- printk(KERN_DEBUG "isdn_v110: DecodeMatrix, V110 Bad 5th line\n");
- /* returning now is not the right thing, though :-( */
-#endif
- }
- line++; /* next line */
- continue;
- } else if (!introducer) { /* every byte starts with 10 (stopbit, startbit) */
- introducer = (m[line] & mbit) ? 0 : 1; /* current bit of the matrix */
- next_byte:
- if (mbit > 2) { /* was it the last bit in this line ? */
- mbit >>= 1; /* no -> take next */
- continue;
- } /* otherwise start with leftmost bit in the next line */
- mbit = 64;
- line++;
- continue;
- } else { /* otherwise we need to set a data bit */
- if (m[line] & mbit) /* was that bit set in the matrix ? */
- b |= dbit; /* yes -> set it in the data byte */
- else
- b &= dbit - 1; /* no -> clear it in the data byte */
- if (dbit < 128) /* is that data byte done ? */
- dbit <<= 1; /* no, got the next bit */
- else { /* data byte is done */
- buf[buflen++] = b; /* copy byte into the output buffer */
- introducer = b = 0; /* init of the intro sequence and of the data byte */
- dbit = 1; /* next we look for the 0th bit */
- }
- goto next_byte; /* look for next bit in the matrix */
- }
- }
- v->introducer = introducer;
- v->dbit = dbit;
- v->b = b;
- return buflen; /* return number of bytes in the output buffer */
-}
-
-/*
- * DecodeStream receives V.110 coded data from the input stream. It recovers the
- * original frames.
- * The input stream doesn't need to be framed
- */
-struct sk_buff *
-isdn_v110_decode(isdn_v110_stream *v, struct sk_buff *skb)
-{
- int i;
- int j;
- int len;
- unsigned char *v110_buf;
- unsigned char *rbuf;
-
- if (!skb) {
- printk(KERN_WARNING "isdn_v110_decode called with NULL skb!\n");
- return NULL;
- }
- rbuf = skb->data;
- len = skb->len;
- if (v == NULL) {
- /* invalid handle, no chance to proceed */
- printk(KERN_WARNING "isdn_v110_decode called with NULL stream!\n");
- dev_kfree_skb(skb);
- return NULL;
- }
- if (v->decodelen == 0) /* cache empty? */
- for (; len > 0; len--, rbuf++) /* scan for SyncHeader in buf */
- if ((*rbuf & v->key) == 0)
- break; /* found first byte */
- if (len == 0) {
- dev_kfree_skb(skb);
- return NULL;
- }
- /* copy new data to decode-buffer */
- memcpy(&(v->decodebuf[v->decodelen]), rbuf, len);
- v->decodelen += len;
-ReSync:
- if (v->decodelen < v->nbytes) { /* got a new header ? */
- dev_kfree_skb(skb);
- return NULL; /* no, try later */
- }
- if (ValidHeaderBytes(v) != v->nbytes) { /* is that a valid header? */
- SyncHeader(v); /* no -> look for header */
- goto ReSync;
- }
- len = (v->decodelen - (v->decodelen % (10 * v->nbytes))) / v->nbytes;
- if ((v110_buf = kmalloc(len, GFP_ATOMIC)) == NULL) {
- printk(KERN_WARNING "isdn_v110_decode: Couldn't allocate v110_buf\n");
- dev_kfree_skb(skb);
- return NULL;
- }
- for (i = 0; i < len; i++) {
- v110_buf[i] = 0;
- for (j = 0; j < v->nbytes; j++)
- v110_buf[i] |= (v->decodebuf[(i * v->nbytes) + j] & v->key) << (8 - ((j + 1) * v->nbits));
- v110_buf[i] = FlipBits(v110_buf[i], v->nbits);
- }
- v->decodelen = (v->decodelen % (10 * v->nbytes));
- memcpy(v->decodebuf, &(v->decodebuf[len * v->nbytes]), v->decodelen);
-
- skb_trim(skb, DecodeMatrix(v, v110_buf, len, skb->data));
- kfree(v110_buf);
- if (skb->len)
- return skb;
- else {
- kfree_skb(skb);
- return NULL;
- }
-}
-
-/* EncodeMatrix takes input data in buf, len is the bytecount.
- Data is encoded into v110 frames in m. Return value is the number of
- matrix-lines generated.
-*/
-static int
-EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen)
-{
- int line = 0;
- int i = 0;
- int mbit = 128;
- int dbit = 1;
- int introducer = 3;
- int ibit[] = {0, 1, 1};
-
- while ((i < len) && (line < mlen)) { /* while we still have input data */
- switch (line % 10) { /* in which line of the matrix are we? */
- case 0:
- m[line++] = 0x00; /* line 0 is always 0 */
- mbit = 128; /* go on with the 7th bit */
- break;
- case 5:
- m[line++] = 0xbf; /* line 5 is always 10111111 */
- mbit = 128; /* go on with the 7th bit */
- break;
- }
- if (line >= mlen) {
- printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n");
- return line;
- }
- next_bit:
- switch (mbit) { /* leftmost or rightmost bit ? */
- case 1:
- line++; /* rightmost -> go to next line */
- if (line >= mlen) {
- printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n");
- return line;
- }
- /* fall through */
- case 128:
- m[line] = 128; /* leftmost -> set byte to 1000000 */
- mbit = 64; /* current bit in the matrix line */
- continue;
- }
- if (introducer) { /* set 110 sequence ? */
- introducer--; /* set on digit less */
- m[line] |= ibit[introducer] ? mbit : 0; /* set corresponding bit */
- mbit >>= 1; /* bit of matrix line >> 1 */
- goto next_bit; /* and go on there */
- } /* else push data bits into the matrix! */
- m[line] |= (buf[i] & dbit) ? mbit : 0; /* set data bit in matrix */
- if (dbit == 128) { /* was it the last one? */
- dbit = 1; /* then go on with first bit of */
- i++; /* next byte in input buffer */
- if (i < len) /* input buffer done ? */
- introducer = 3; /* no, write introducer 110 */
- else { /* input buffer done ! */
- m[line] |= (mbit - 1) & 0xfe; /* set remaining bits in line to 1 */
- break;
- }
- } else /* not the last data bit */
- dbit <<= 1; /* then go to next data bit */
- mbit >>= 1; /* go to next bit of matrix */
- goto next_bit;
-
- }
- /* if necessary, generate remaining lines of the matrix... */
- if ((line) && ((line + 10) < mlen))
- switch (++line % 10) {
- case 1:
- m[line++] = 0xfe;
- /* fall through */
- case 2:
- m[line++] = 0xfe;
- /* fall through */
- case 3:
- m[line++] = 0xfe;
- /* fall through */
- case 4:
- m[line++] = 0xfe;
- /* fall through */
- case 5:
- m[line++] = 0xbf;
- /* fall through */
- case 6:
- m[line++] = 0xfe;
- /* fall through */
- case 7:
- m[line++] = 0xfe;
- /* fall through */
- case 8:
- m[line++] = 0xfe;
- /* fall through */
- case 9:
- m[line++] = 0xfe;
- }
- return line; /* that's how many lines we have */
-}
-
-/*
- * Build a sync frame.
- */
-static struct sk_buff *
-isdn_v110_sync(isdn_v110_stream *v)
-{
- struct sk_buff *skb;
-
- if (v == NULL) {
- /* invalid handle, no chance to proceed */
- printk(KERN_WARNING "isdn_v110_sync called with NULL stream!\n");
- return NULL;
- }
- if ((skb = dev_alloc_skb(v->framelen + v->skbres))) {
- skb_reserve(skb, v->skbres);
- skb_put_data(skb, v->OfflineFrame, v->framelen);
- }
- return skb;
-}
-
-/*
- * Build an idle frame.
- */
-static struct sk_buff *
-isdn_v110_idle(isdn_v110_stream *v)
-{
- struct sk_buff *skb;
-
- if (v == NULL) {
- /* invalid handle, no chance to proceed */
- printk(KERN_WARNING "isdn_v110_sync called with NULL stream!\n");
- return NULL;
- }
- if ((skb = dev_alloc_skb(v->framelen + v->skbres))) {
- skb_reserve(skb, v->skbres);
- skb_put_data(skb, v->OnlineFrame, v->framelen);
- }
- return skb;
-}
-
-struct sk_buff *
-isdn_v110_encode(isdn_v110_stream *v, struct sk_buff *skb)
-{
- int i;
- int j;
- int rlen;
- int mlen;
- int olen;
- int size;
- int sval1;
- int sval2;
- int nframes;
- unsigned char *v110buf;
- unsigned char *rbuf;
- struct sk_buff *nskb;
-
- if (v == NULL) {
- /* invalid handle, no chance to proceed */
- printk(KERN_WARNING "isdn_v110_encode called with NULL stream!\n");
- return NULL;
- }
- if (!skb) {
- /* invalid skb, no chance to proceed */
- printk(KERN_WARNING "isdn_v110_encode called with NULL skb!\n");
- return NULL;
- }
- rlen = skb->len;
- nframes = (rlen + 3) / 4;
- v110buf = v->encodebuf;
- if ((nframes * 40) > v->maxsize) {
- size = v->maxsize;
- rlen = v->maxsize / 40;
- } else
- size = nframes * 40;
- if (!(nskb = dev_alloc_skb(size + v->skbres + sizeof(int)))) {
- printk(KERN_WARNING "isdn_v110_encode: Couldn't alloc skb\n");
- return NULL;
- }
- skb_reserve(nskb, v->skbres + sizeof(int));
- if (skb->len == 0) {
- skb_put_data(nskb, v->OnlineFrame, v->framelen);
- *((int *)skb_push(nskb, sizeof(int))) = 0;
- return nskb;
- }
- mlen = EncodeMatrix(skb->data, rlen, v110buf, size);
- /* now distribute 2 or 4 bits each to the output stream! */
- rbuf = skb_put(nskb, size);
- olen = 0;
- sval1 = 8 - v->nbits;
- sval2 = v->key << sval1;
- for (i = 0; i < mlen; i++) {
- v110buf[i] = FlipBits(v110buf[i], v->nbits);
- for (j = 0; j < v->nbytes; j++) {
- if (size--)
- *rbuf++ = ~v->key | (((v110buf[i] << (j * v->nbits)) & sval2) >> sval1);
- else {
- printk(KERN_WARNING "isdn_v110_encode: buffers full!\n");
- goto buffer_full;
- }
- olen++;
- }
- }
-buffer_full:
- skb_trim(nskb, olen);
- *((int *)skb_push(nskb, sizeof(int))) = rlen;
- return nskb;
-}
-
-int
-isdn_v110_stat_callback(int idx, isdn_ctrl *c)
-{
- isdn_v110_stream *v = NULL;
- int i;
- int ret = 0;
-
- if (idx < 0)
- return 0;
- switch (c->command) {
- case ISDN_STAT_BSENT:
- /* Keep the send-queue of the driver filled
- * with frames:
- * If number of outstanding frames < 3,
- * send down an Idle-Frame (or an Sync-Frame, if
- * v->SyncInit != 0).
- */
- if (!(v = dev->v110[idx]))
- return 0;
- atomic_inc(&dev->v110use[idx]);
- for (i = 0; i * v->framelen < c->parm.length; i++) {
- if (v->skbidle > 0) {
- v->skbidle--;
- ret = 1;
- } else {
- if (v->skbuser > 0)
- v->skbuser--;
- ret = 0;
- }
- }
- for (i = v->skbuser + v->skbidle; i < 2; i++) {
- struct sk_buff *skb;
- if (v->SyncInit > 0)
- skb = isdn_v110_sync(v);
- else
- skb = isdn_v110_idle(v);
- if (skb) {
- if (dev->drv[c->driver]->interface->writebuf_skb(c->driver, c->arg, 1, skb) <= 0) {
- dev_kfree_skb(skb);
- break;
- } else {
- if (v->SyncInit)
- v->SyncInit--;
- v->skbidle++;
- }
- } else
- break;
- }
- atomic_dec(&dev->v110use[idx]);
- return ret;
- case ISDN_STAT_DHUP:
- case ISDN_STAT_BHUP:
- while (1) {
- atomic_inc(&dev->v110use[idx]);
- if (atomic_dec_and_test(&dev->v110use[idx])) {
- isdn_v110_close(dev->v110[idx]);
- dev->v110[idx] = NULL;
- break;
- }
- mdelay(1);
- }
- break;
- case ISDN_STAT_BCONN:
- if (dev->v110emu[idx] && (dev->v110[idx] == NULL)) {
- int hdrlen = dev->drv[c->driver]->interface->hl_hdrlen;
- int maxsize = dev->drv[c->driver]->interface->maxbufsize;
- atomic_inc(&dev->v110use[idx]);
- switch (dev->v110emu[idx]) {
- case ISDN_PROTO_L2_V11096:
- dev->v110[idx] = isdn_v110_open(V110_9600, hdrlen, maxsize);
- break;
- case ISDN_PROTO_L2_V11019:
- dev->v110[idx] = isdn_v110_open(V110_19200, hdrlen, maxsize);
- break;
- case ISDN_PROTO_L2_V11038:
- dev->v110[idx] = isdn_v110_open(V110_38400, hdrlen, maxsize);
- break;
- default:;
- }
- if ((v = dev->v110[idx])) {
- while (v->SyncInit) {
- struct sk_buff *skb = isdn_v110_sync(v);
- if (dev->drv[c->driver]->interface->writebuf_skb(c->driver, c->arg, 1, skb) <= 0) {
- dev_kfree_skb(skb);
- /* Unable to send, try later */
- break;
- }
- v->SyncInit--;
- v->skbidle++;
- }
- } else
- printk(KERN_WARNING "isdn_v110: Couldn't open stream for chan %d\n", idx);
- atomic_dec(&dev->v110use[idx]);
- }
- break;
- default:
- return 0;
- }
- return 0;
-}
diff --git a/drivers/isdn/i4l/isdn_v110.h b/drivers/isdn/i4l/isdn_v110.h
deleted file mode 100644
index de774ab598c9..000000000000
--- a/drivers/isdn/i4l/isdn_v110.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* $Id: isdn_v110.h,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * Linux ISDN subsystem, V.110 related functions (linklevel).
- *
- * Copyright by Thomas Pfeiffer (pfeiffer@pds.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _isdn_v110_h_
-#define _isdn_v110_h_
-
-/*
- * isdn_v110_encode will take raw data and encode it using V.110
- */
-extern struct sk_buff *isdn_v110_encode(isdn_v110_stream *, struct sk_buff *);
-
-/*
- * isdn_v110_decode receives V.110 coded data from the stream and rebuilds
- * frames from them. The source stream doesn't need to be framed.
- */
-extern struct sk_buff *isdn_v110_decode(isdn_v110_stream *, struct sk_buff *);
-
-extern int isdn_v110_stat_callback(int, isdn_ctrl *);
-extern void isdn_v110_close(isdn_v110_stream *v);
-
-#endif
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
deleted file mode 100644
index 48bfbcb4a09d..000000000000
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ /dev/null
@@ -1,332 +0,0 @@
-/* $Id: isdn_x25iface.c,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * Linux ISDN subsystem, X.25 related functions
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * stuff needed to support the Linux X.25 PLP code on top of devices that
- * can provide a lab_b service using the concap_proto mechanism.
- * This module supports a network interface which provides lapb_sematics
- * -- as defined in Documentation/networking/x25-iface.txt -- to
- * the upper layer and assumes that the lower layer provides a reliable
- * data link service by means of the concap_device_ops callbacks.
- *
- * Only protocol specific stuff goes here. Device specific stuff
- * goes to another -- device related -- concap_proto support source file.
- *
- */
-
-/* #include <linux/isdn.h> */
-#include <linux/netdevice.h>
-#include <linux/concap.h>
-#include <linux/slab.h>
-#include <linux/wanrouter.h>
-#include <net/x25device.h>
-#include "isdn_x25iface.h"
-
-/* for debugging messages not to cause an oops when device pointer is NULL*/
-#define MY_DEVNAME(dev) ((dev) ? (dev)->name : "DEVICE UNSPECIFIED")
-
-
-typedef struct isdn_x25iface_proto_data {
- int magic;
- enum wan_states state;
- /* Private stuff, not to be accessed via proto_data. We provide the
- other storage for the concap_proto instance here as well,
- enabling us to allocate both with just one kmalloc(): */
- struct concap_proto priv;
-} ix25_pdata_t;
-
-
-
-/* is now in header file (extern): struct concap_proto * isdn_x25iface_proto_new(void); */
-static void isdn_x25iface_proto_del(struct concap_proto *);
-static int isdn_x25iface_proto_close(struct concap_proto *);
-static int isdn_x25iface_proto_restart(struct concap_proto *,
- struct net_device *,
- struct concap_device_ops *);
-static int isdn_x25iface_xmit(struct concap_proto *, struct sk_buff *);
-static int isdn_x25iface_receive(struct concap_proto *, struct sk_buff *);
-static int isdn_x25iface_connect_ind(struct concap_proto *);
-static int isdn_x25iface_disconn_ind(struct concap_proto *);
-
-
-static struct concap_proto_ops ix25_pops = {
- .proto_new = &isdn_x25iface_proto_new,
- .proto_del = &isdn_x25iface_proto_del,
- .restart = &isdn_x25iface_proto_restart,
- .close = &isdn_x25iface_proto_close,
- .encap_and_xmit = &isdn_x25iface_xmit,
- .data_ind = &isdn_x25iface_receive,
- .connect_ind = &isdn_x25iface_connect_ind,
- .disconn_ind = &isdn_x25iface_disconn_ind
-};
-
-/* error message helper function */
-static void illegal_state_warn(unsigned state, unsigned char firstbyte)
-{
- printk(KERN_WARNING "isdn_x25iface: firstbyte %x illegal in"
- "current state %d\n", firstbyte, state);
-}
-
-/* check protocol data field for consistency */
-static int pdata_is_bad(ix25_pdata_t *pda) {
-
- if (pda && pda->magic == ISDN_X25IFACE_MAGIC) return 0;
- printk(KERN_WARNING
- "isdn_x25iface_xxx: illegal pointer to proto data\n");
- return 1;
-}
-
-/* create a new x25 interface protocol instance
- */
-struct concap_proto *isdn_x25iface_proto_new(void)
-{
- ix25_pdata_t *tmp = kmalloc(sizeof(ix25_pdata_t), GFP_KERNEL);
- IX25DEBUG("isdn_x25iface_proto_new\n");
- if (tmp) {
- tmp->magic = ISDN_X25IFACE_MAGIC;
- tmp->state = WAN_UNCONFIGURED;
- /* private data space used to hold the concap_proto data.
- Only to be accessed via the returned pointer */
- spin_lock_init(&tmp->priv.lock);
- tmp->priv.dops = NULL;
- tmp->priv.net_dev = NULL;
- tmp->priv.pops = &ix25_pops;
- tmp->priv.flags = 0;
- tmp->priv.proto_data = tmp;
- return (&(tmp->priv));
- }
- return NULL;
-};
-
-/* close the x25iface encapsulation protocol
- */
-static int isdn_x25iface_proto_close(struct concap_proto *cprot) {
-
- ix25_pdata_t *tmp;
- int ret = 0;
- ulong flags;
-
- if (!cprot) {
- printk(KERN_ERR "isdn_x25iface_proto_close: "
- "invalid concap_proto pointer\n");
- return -1;
- }
- IX25DEBUG("isdn_x25iface_proto_close %s \n", MY_DEVNAME(cprot->net_dev));
- spin_lock_irqsave(&cprot->lock, flags);
- cprot->dops = NULL;
- cprot->net_dev = NULL;
- tmp = cprot->proto_data;
- if (pdata_is_bad(tmp)) {
- ret = -1;
- } else {
- tmp->state = WAN_UNCONFIGURED;
- }
- spin_unlock_irqrestore(&cprot->lock, flags);
- return ret;
-}
-
-/* Delete the x25iface encapsulation protocol instance
- */
-static void isdn_x25iface_proto_del(struct concap_proto *cprot) {
-
- ix25_pdata_t *tmp;
-
- IX25DEBUG("isdn_x25iface_proto_del \n");
- if (!cprot) {
- printk(KERN_ERR "isdn_x25iface_proto_del: "
- "concap_proto pointer is NULL\n");
- return;
- }
- tmp = cprot->proto_data;
- if (tmp == NULL) {
- printk(KERN_ERR "isdn_x25iface_proto_del: inconsistent "
- "proto_data pointer (maybe already deleted?)\n");
- return;
- }
- /* close if the protocol is still open */
- if (cprot->dops) isdn_x25iface_proto_close(cprot);
- /* freeing the storage should be sufficient now. But some additional
- settings might help to catch wild pointer bugs */
- tmp->magic = 0;
- cprot->proto_data = NULL;
-
- kfree(tmp);
- return;
-}
-
-/* (re-)initialize the data structures for x25iface encapsulation
- */
-static int isdn_x25iface_proto_restart(struct concap_proto *cprot,
- struct net_device *ndev,
- struct concap_device_ops *dops)
-{
- ix25_pdata_t *pda = cprot->proto_data;
- ulong flags;
-
- IX25DEBUG("isdn_x25iface_proto_restart %s \n", MY_DEVNAME(ndev));
-
- if (pdata_is_bad(pda)) return -1;
-
- if (!(dops && dops->data_req && dops->connect_req
- && dops->disconn_req)) {
- printk(KERN_WARNING "isdn_x25iface_restart: required dops"
- " missing\n");
- isdn_x25iface_proto_close(cprot);
- return -1;
- }
- spin_lock_irqsave(&cprot->lock, flags);
- cprot->net_dev = ndev;
- cprot->pops = &ix25_pops;
- cprot->dops = dops;
- pda->state = WAN_DISCONNECTED;
- spin_unlock_irqrestore(&cprot->lock, flags);
- return 0;
-}
-
-/* deliver a dl_data frame received from i4l HL driver to the network layer
- */
-static int isdn_x25iface_receive(struct concap_proto *cprot, struct sk_buff *skb)
-{
- IX25DEBUG("isdn_x25iface_receive %s \n", MY_DEVNAME(cprot->net_dev));
- if (((ix25_pdata_t *)(cprot->proto_data))
- ->state == WAN_CONNECTED) {
- if (skb_push(skb, 1)) {
- skb->data[0] = X25_IFACE_DATA;
- skb->protocol = x25_type_trans(skb, cprot->net_dev);
- netif_rx(skb);
- return 0;
- }
- }
- printk(KERN_WARNING "isdn_x25iface_receive %s: not connected, skb dropped\n", MY_DEVNAME(cprot->net_dev));
- dev_kfree_skb(skb);
- return -1;
-}
-
-/* a connection set up is indicated by lower layer
- */
-static int isdn_x25iface_connect_ind(struct concap_proto *cprot)
-{
- struct sk_buff *skb;
- enum wan_states *state_p
- = &(((ix25_pdata_t *)(cprot->proto_data))->state);
- IX25DEBUG("isdn_x25iface_connect_ind %s \n"
- , MY_DEVNAME(cprot->net_dev));
- if (*state_p == WAN_UNCONFIGURED) {
- printk(KERN_WARNING
- "isdn_x25iface_connect_ind while unconfigured %s\n"
- , MY_DEVNAME(cprot->net_dev));
- return -1;
- }
- *state_p = WAN_CONNECTED;
-
- skb = dev_alloc_skb(1);
- if (skb) {
- skb_put_u8(skb, X25_IFACE_CONNECT);
- skb->protocol = x25_type_trans(skb, cprot->net_dev);
- netif_rx(skb);
- return 0;
- } else {
- printk(KERN_WARNING "isdn_x25iface_connect_ind: "
- " out of memory -- disconnecting\n");
- cprot->dops->disconn_req(cprot);
- return -1;
- }
-}
-
-/* a disconnect is indicated by lower layer
- */
-static int isdn_x25iface_disconn_ind(struct concap_proto *cprot)
-{
- struct sk_buff *skb;
- enum wan_states *state_p
- = &(((ix25_pdata_t *)(cprot->proto_data))->state);
- IX25DEBUG("isdn_x25iface_disconn_ind %s \n", MY_DEVNAME(cprot->net_dev));
- if (*state_p == WAN_UNCONFIGURED) {
- printk(KERN_WARNING
- "isdn_x25iface_disconn_ind while unconfigured\n");
- return -1;
- }
- if (!cprot->net_dev) return -1;
- *state_p = WAN_DISCONNECTED;
- skb = dev_alloc_skb(1);
- if (skb) {
- skb_put_u8(skb, X25_IFACE_DISCONNECT);
- skb->protocol = x25_type_trans(skb, cprot->net_dev);
- netif_rx(skb);
- return 0;
- } else {
- printk(KERN_WARNING "isdn_x25iface_disconn_ind:"
- " out of memory\n");
- return -1;
- }
-}
-
-/* process a frame handed over to us from linux network layer. First byte
- semantics as defined in Documentation/networking/x25-iface.txt
-*/
-static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
-{
- unsigned char firstbyte = skb->data[0];
- enum wan_states *state = &((ix25_pdata_t *)cprot->proto_data)->state;
- int ret = 0;
- IX25DEBUG("isdn_x25iface_xmit: %s first=%x state=%d\n",
- MY_DEVNAME(cprot->net_dev), firstbyte, *state);
- switch (firstbyte) {
- case X25_IFACE_DATA:
- if (*state == WAN_CONNECTED) {
- skb_pull(skb, 1);
- netif_trans_update(cprot->net_dev);
- ret = (cprot->dops->data_req(cprot, skb));
- /* prepare for future retransmissions */
- if (ret) skb_push(skb, 1);
- return ret;
- }
- illegal_state_warn(*state, firstbyte);
- break;
- case X25_IFACE_CONNECT:
- if (*state == WAN_DISCONNECTED) {
- *state = WAN_CONNECTING;
- ret = cprot->dops->connect_req(cprot);
- if (ret) {
- /* reset state and notify upper layer about
- * immidiatly failed attempts */
- isdn_x25iface_disconn_ind(cprot);
- }
- } else {
- illegal_state_warn(*state, firstbyte);
- }
- break;
- case X25_IFACE_DISCONNECT:
- switch (*state) {
- case WAN_DISCONNECTED:
- /* Should not happen. However, give upper layer a
- chance to recover from inconstistency but don't
- trust the lower layer sending the disconn_confirm
- when already disconnected */
- printk(KERN_WARNING "isdn_x25iface_xmit: disconnect "
- " requested while disconnected\n");
- isdn_x25iface_disconn_ind(cprot);
- break; /* prevent infinite loops */
- case WAN_CONNECTING:
- case WAN_CONNECTED:
- *state = WAN_DISCONNECTED;
- cprot->dops->disconn_req(cprot);
- break;
- default:
- illegal_state_warn(*state, firstbyte);
- }
- break;
- case X25_IFACE_PARAMS:
- printk(KERN_WARNING "isdn_x25iface_xmit: setting of lapb"
- " options not yet supported\n");
- break;
- default:
- printk(KERN_WARNING "isdn_x25iface_xmit: frame with illegal"
- " first byte %x ignored:\n", firstbyte);
- }
- dev_kfree_skb(skb);
- return 0;
-}
diff --git a/drivers/isdn/i4l/isdn_x25iface.h b/drivers/isdn/i4l/isdn_x25iface.h
deleted file mode 100644
index ca08e082cf7c..000000000000
--- a/drivers/isdn/i4l/isdn_x25iface.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* $Id: isdn_x25iface.h,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $
- *
- * header for Linux ISDN subsystem, x.25 related functions
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _LINUX_ISDN_X25IFACE_H
-#define _LINUX_ISDN_X25IFACE_H
-
-#define ISDN_X25IFACE_MAGIC 0x1e75a2b9
-/* #define DEBUG_ISDN_X25 if you want isdn_x25 debugging messages */
-#ifdef DEBUG_ISDN_X25
-# define IX25DEBUG(fmt, args...) printk(KERN_DEBUG fmt, ##args)
-#else
-# define IX25DEBUG(fmt, args...)
-#endif
-
-#include <linux/skbuff.h>
-#include <linux/isdn.h>
-#include <linux/concap.h>
-
-extern struct concap_proto_ops *isdn_x25iface_concap_proto_ops_pt;
-extern struct concap_proto *isdn_x25iface_proto_new(void);
-
-
-
-#endif
diff --git a/drivers/isdn/isdnloop/Makefile b/drivers/isdn/isdnloop/Makefile
deleted file mode 100644
index 5ff4c0e09768..000000000000
--- a/drivers/isdn/isdnloop/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-# Makefile for the isdnloop ISDN device driver
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop.o
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
deleted file mode 100644
index 755c6bbc9553..000000000000
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ /dev/null
@@ -1,1528 +0,0 @@
-/* $Id: isdnloop.c,v 1.11.6.7 2001/11/11 19:54:31 kai Exp $
- *
- * ISDN low-level module implementing a dummy loop driver.
- *
- * Copyright 1997 by Fritz Elfert (fritz@isdn4linux.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include "isdnloop.h"
-
-static char *isdnloop_id = "loop0";
-
-MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
-MODULE_AUTHOR("Fritz Elfert");
-MODULE_LICENSE("GPL");
-module_param(isdnloop_id, charp, 0);
-MODULE_PARM_DESC(isdnloop_id, "ID-String of first card");
-
-static int isdnloop_addcard(char *);
-
-/*
- * Free queue completely.
- *
- * Parameter:
- * card = pointer to card struct
- * channel = channel number
- */
-static void
-isdnloop_free_queue(isdnloop_card *card, int channel)
-{
- struct sk_buff_head *queue = &card->bqueue[channel];
-
- skb_queue_purge(queue);
- card->sndcount[channel] = 0;
-}
-
-/*
- * Send B-Channel data to another virtual card.
- * This routine is called via timer-callback from isdnloop_pollbchan().
- *
- * Parameter:
- * card = pointer to card struct.
- * ch = channel number (0-based)
- */
-static void
-isdnloop_bchan_send(isdnloop_card *card, int ch)
-{
- isdnloop_card *rcard = card->rcard[ch];
- int rch = card->rch[ch], len, ack;
- struct sk_buff *skb;
- isdn_ctrl cmd;
-
- while (card->sndcount[ch]) {
- skb = skb_dequeue(&card->bqueue[ch]);
- if (skb) {
- len = skb->len;
- card->sndcount[ch] -= len;
- ack = *(skb->head); /* used as scratch area */
- cmd.driver = card->myid;
- cmd.arg = ch;
- if (rcard) {
- rcard->interface.rcvcallb_skb(rcard->myid, rch, skb);
- } else {
- printk(KERN_WARNING "isdnloop: no rcard, skb dropped\n");
- dev_kfree_skb(skb);
-
- }
- cmd.command = ISDN_STAT_BSENT;
- cmd.parm.length = len;
- card->interface.statcallb(&cmd);
- } else
- card->sndcount[ch] = 0;
- }
-}
-
-/*
- * Send/Receive Data to/from the B-Channel.
- * This routine is called via timer-callback.
- * It schedules itself while any B-Channel is open.
- *
- * Parameter:
- * data = pointer to card struct, set by kernel timer.data
- */
-static void
-isdnloop_pollbchan(struct timer_list *t)
-{
- isdnloop_card *card = from_timer(card, t, rb_timer);
- unsigned long flags;
-
- if (card->flags & ISDNLOOP_FLAGS_B1ACTIVE)
- isdnloop_bchan_send(card, 0);
- if (card->flags & ISDNLOOP_FLAGS_B2ACTIVE)
- isdnloop_bchan_send(card, 1);
- if (card->flags & (ISDNLOOP_FLAGS_B1ACTIVE | ISDNLOOP_FLAGS_B2ACTIVE)) {
- /* schedule b-channel polling again */
- spin_lock_irqsave(&card->isdnloop_lock, flags);
- card->rb_timer.expires = jiffies + ISDNLOOP_TIMER_BCREAD;
- add_timer(&card->rb_timer);
- card->flags |= ISDNLOOP_FLAGS_RBTIMER;
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- } else
- card->flags &= ~ISDNLOOP_FLAGS_RBTIMER;
-}
-
-/*
- * Parse ICN-type setup string and fill fields of setup-struct
- * with parsed data.
- *
- * Parameter:
- * setup = setup string, format: [caller-id],si1,si2,[called-id]
- * cmd = pointer to struct to be filled.
- */
-static void
-isdnloop_parse_setup(char *setup, isdn_ctrl *cmd)
-{
- char *t = setup;
- char *s = strchr(t, ',');
-
- *s++ = '\0';
- strlcpy(cmd->parm.setup.phone, t, sizeof(cmd->parm.setup.phone));
- s = strchr(t = s, ',');
- *s++ = '\0';
- if (!strlen(t))
- cmd->parm.setup.si1 = 0;
- else
- cmd->parm.setup.si1 = simple_strtoul(t, NULL, 10);
- s = strchr(t = s, ',');
- *s++ = '\0';
- if (!strlen(t))
- cmd->parm.setup.si2 = 0;
- else
- cmd->parm.setup.si2 =
- simple_strtoul(t, NULL, 10);
- strlcpy(cmd->parm.setup.eazmsn, s, sizeof(cmd->parm.setup.eazmsn));
- cmd->parm.setup.plan = 0;
- cmd->parm.setup.screen = 0;
-}
-
-typedef struct isdnloop_stat {
- char *statstr;
- int command;
- int action;
-} isdnloop_stat;
-/* *INDENT-OFF* */
-static isdnloop_stat isdnloop_stat_table[] = {
- {"BCON_", ISDN_STAT_BCONN, 1}, /* B-Channel connected */
- {"BDIS_", ISDN_STAT_BHUP, 2}, /* B-Channel disconnected */
- {"DCON_", ISDN_STAT_DCONN, 0}, /* D-Channel connected */
- {"DDIS_", ISDN_STAT_DHUP, 0}, /* D-Channel disconnected */
- {"DCAL_I", ISDN_STAT_ICALL, 3}, /* Incoming call dialup-line */
- {"DSCA_I", ISDN_STAT_ICALL, 3}, /* Incoming call 1TR6-SPV */
- {"FCALL", ISDN_STAT_ICALL, 4}, /* Leased line connection up */
- {"CIF", ISDN_STAT_CINF, 5}, /* Charge-info, 1TR6-type */
- {"AOC", ISDN_STAT_CINF, 6}, /* Charge-info, DSS1-type */
- {"CAU", ISDN_STAT_CAUSE, 7}, /* Cause code */
- {"TEI OK", ISDN_STAT_RUN, 0}, /* Card connected to wallplug */
- {"E_L1: ACT FAIL", ISDN_STAT_BHUP, 8}, /* Layer-1 activation failed */
- {"E_L2: DATA LIN", ISDN_STAT_BHUP, 8}, /* Layer-2 data link lost */
- {"E_L1: ACTIVATION FAILED",
- ISDN_STAT_BHUP, 8}, /* Layer-1 activation failed */
- {NULL, 0, -1}
-};
-/* *INDENT-ON* */
-
-
-/*
- * Parse Status message-strings from virtual card.
- * Depending on status, call statcallb for sending messages to upper
- * levels. Also set/reset B-Channel active-flags.
- *
- * Parameter:
- * status = status string to parse.
- * channel = channel where message comes from.
- * card = card where message comes from.
- */
-static void
-isdnloop_parse_status(u_char *status, int channel, isdnloop_card *card)
-{
- isdnloop_stat *s = isdnloop_stat_table;
- int action = -1;
- isdn_ctrl cmd;
-
- while (s->statstr) {
- if (!strncmp(status, s->statstr, strlen(s->statstr))) {
- cmd.command = s->command;
- action = s->action;
- break;
- }
- s++;
- }
- if (action == -1)
- return;
- cmd.driver = card->myid;
- cmd.arg = channel;
- switch (action) {
- case 1:
- /* BCON_x */
- card->flags |= (channel) ?
- ISDNLOOP_FLAGS_B2ACTIVE : ISDNLOOP_FLAGS_B1ACTIVE;
- break;
- case 2:
- /* BDIS_x */
- card->flags &= ~((channel) ?
- ISDNLOOP_FLAGS_B2ACTIVE : ISDNLOOP_FLAGS_B1ACTIVE);
- isdnloop_free_queue(card, channel);
- break;
- case 3:
- /* DCAL_I and DSCA_I */
- isdnloop_parse_setup(status + 6, &cmd);
- break;
- case 4:
- /* FCALL */
- sprintf(cmd.parm.setup.phone, "LEASED%d", card->myid);
- sprintf(cmd.parm.setup.eazmsn, "%d", channel + 1);
- cmd.parm.setup.si1 = 7;
- cmd.parm.setup.si2 = 0;
- cmd.parm.setup.plan = 0;
- cmd.parm.setup.screen = 0;
- break;
- case 5:
- /* CIF */
- strlcpy(cmd.parm.num, status + 3, sizeof(cmd.parm.num));
- break;
- case 6:
- /* AOC */
- snprintf(cmd.parm.num, sizeof(cmd.parm.num), "%d",
- (int) simple_strtoul(status + 7, NULL, 16));
- break;
- case 7:
- /* CAU */
- status += 3;
- if (strlen(status) == 4)
- snprintf(cmd.parm.num, sizeof(cmd.parm.num), "%s%c%c",
- status + 2, *status, *(status + 1));
- else
- strlcpy(cmd.parm.num, status + 1, sizeof(cmd.parm.num));
- break;
- case 8:
- /* Misc Errors on L1 and L2 */
- card->flags &= ~ISDNLOOP_FLAGS_B1ACTIVE;
- isdnloop_free_queue(card, 0);
- cmd.arg = 0;
- cmd.driver = card->myid;
- card->interface.statcallb(&cmd);
- cmd.command = ISDN_STAT_DHUP;
- cmd.arg = 0;
- cmd.driver = card->myid;
- card->interface.statcallb(&cmd);
- cmd.command = ISDN_STAT_BHUP;
- card->flags &= ~ISDNLOOP_FLAGS_B2ACTIVE;
- isdnloop_free_queue(card, 1);
- cmd.arg = 1;
- cmd.driver = card->myid;
- card->interface.statcallb(&cmd);
- cmd.command = ISDN_STAT_DHUP;
- cmd.arg = 1;
- cmd.driver = card->myid;
- break;
- }
- card->interface.statcallb(&cmd);
-}
-
-/*
- * Store a cwcharacter into ringbuffer for reading from /dev/isdnctrl
- *
- * Parameter:
- * card = pointer to card struct.
- * c = char to store.
- */
-static void
-isdnloop_putmsg(isdnloop_card *card, unsigned char c)
-{
- ulong flags;
-
- spin_lock_irqsave(&card->isdnloop_lock, flags);
- *card->msg_buf_write++ = (c == 0xff) ? '\n' : c;
- if (card->msg_buf_write == card->msg_buf_read) {
- if (++card->msg_buf_read > card->msg_buf_end)
- card->msg_buf_read = card->msg_buf;
- }
- if (card->msg_buf_write > card->msg_buf_end)
- card->msg_buf_write = card->msg_buf;
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
-}
-
-/*
- * Poll a virtual cards message queue.
- * If there are new status-replies from the card, copy them to
- * ringbuffer for reading on /dev/isdnctrl and call
- * isdnloop_parse_status() for processing them. Watch for special
- * Firmware bootmessage and parse it, to get the D-Channel protocol.
- * If there are B-Channels open, initiate a timer-callback to
- * isdnloop_pollbchan().
- * This routine is called periodically via timer interrupt.
- *
- * Parameter:
- * data = pointer to card struct
- */
-static void
-isdnloop_polldchan(struct timer_list *t)
-{
- isdnloop_card *card = from_timer(card, t, st_timer);
- struct sk_buff *skb;
- int avail;
- int left;
- u_char c;
- int ch;
- unsigned long flags;
- u_char *p;
- isdn_ctrl cmd;
-
- skb = skb_dequeue(&card->dqueue);
- if (skb)
- avail = skb->len;
- else
- avail = 0;
- for (left = avail; left > 0; left--) {
- c = *skb->data;
- skb_pull(skb, 1);
- isdnloop_putmsg(card, c);
- card->imsg[card->iptr] = c;
- if (card->iptr < 59)
- card->iptr++;
- if (!skb->len) {
- avail++;
- isdnloop_putmsg(card, '\n');
- card->imsg[card->iptr] = 0;
- card->iptr = 0;
- if (card->imsg[0] == '0' && card->imsg[1] >= '0' &&
- card->imsg[1] <= '2' && card->imsg[2] == ';') {
- ch = (card->imsg[1] - '0') - 1;
- p = &card->imsg[3];
- isdnloop_parse_status(p, ch, card);
- } else {
- p = card->imsg;
- if (!strncmp(p, "DRV1.", 5)) {
- printk(KERN_INFO "isdnloop: (%s) %s\n", CID, p);
- if (!strncmp(p + 7, "TC", 2)) {
- card->ptype = ISDN_PTYPE_1TR6;
- card->interface.features |= ISDN_FEATURE_P_1TR6;
- printk(KERN_INFO
- "isdnloop: (%s) 1TR6-Protocol loaded and running\n", CID);
- }
- if (!strncmp(p + 7, "EC", 2)) {
- card->ptype = ISDN_PTYPE_EURO;
- card->interface.features |= ISDN_FEATURE_P_EURO;
- printk(KERN_INFO
- "isdnloop: (%s) Euro-Protocol loaded and running\n", CID);
- }
- continue;
-
- }
- }
- }
- }
- if (avail) {
- cmd.command = ISDN_STAT_STAVAIL;
- cmd.driver = card->myid;
- cmd.arg = avail;
- card->interface.statcallb(&cmd);
- }
- if (card->flags & (ISDNLOOP_FLAGS_B1ACTIVE | ISDNLOOP_FLAGS_B2ACTIVE))
- if (!(card->flags & ISDNLOOP_FLAGS_RBTIMER)) {
- /* schedule b-channel polling */
- card->flags |= ISDNLOOP_FLAGS_RBTIMER;
- spin_lock_irqsave(&card->isdnloop_lock, flags);
- del_timer(&card->rb_timer);
- card->rb_timer.expires = jiffies + ISDNLOOP_TIMER_BCREAD;
- add_timer(&card->rb_timer);
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- }
- /* schedule again */
- spin_lock_irqsave(&card->isdnloop_lock, flags);
- card->st_timer.expires = jiffies + ISDNLOOP_TIMER_DCREAD;
- add_timer(&card->st_timer);
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
-}
-
-/*
- * Append a packet to the transmit buffer-queue.
- *
- * Parameter:
- * channel = Number of B-channel
- * skb = packet to send.
- * card = pointer to card-struct
- * Return:
- * Number of bytes transferred, -E??? on error
- */
-static int
-isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card *card)
-{
- int len = skb->len;
- unsigned long flags;
- struct sk_buff *nskb;
-
- if (len > 4000) {
- printk(KERN_WARNING
- "isdnloop: Send packet too large\n");
- return -EINVAL;
- }
- if (len) {
- if (!(card->flags & (channel ? ISDNLOOP_FLAGS_B2ACTIVE : ISDNLOOP_FLAGS_B1ACTIVE)))
- return 0;
- if (card->sndcount[channel] > ISDNLOOP_MAX_SQUEUE)
- return 0;
- spin_lock_irqsave(&card->isdnloop_lock, flags);
- nskb = dev_alloc_skb(skb->len);
- if (nskb) {
- skb_copy_from_linear_data(skb,
- skb_put(nskb, len), len);
- skb_queue_tail(&card->bqueue[channel], nskb);
- dev_kfree_skb(skb);
- } else
- len = 0;
- card->sndcount[channel] += len;
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- }
- return len;
-}
-
-/*
- * Read the messages from the card's ringbuffer
- *
- * Parameter:
- * buf = pointer to buffer.
- * len = number of bytes to read.
- * user = flag, 1: called from userlevel 0: called from kernel.
- * card = pointer to card struct.
- * Return:
- * number of bytes actually transferred.
- */
-static int
-isdnloop_readstatus(u_char __user *buf, int len, isdnloop_card *card)
-{
- int count;
- u_char __user *p;
-
- for (p = buf, count = 0; count < len; p++, count++) {
- if (card->msg_buf_read == card->msg_buf_write)
- return count;
- if (put_user(*card->msg_buf_read++, p))
- return -EFAULT;
- if (card->msg_buf_read > card->msg_buf_end)
- card->msg_buf_read = card->msg_buf;
- }
- return count;
-}
-
-/*
- * Simulate a card's response by appending it to the cards
- * message queue.
- *
- * Parameter:
- * card = pointer to card struct.
- * s = pointer to message-string.
- * ch = channel: 0 = generic messages, 1 and 2 = D-channel messages.
- * Return:
- * 0 on success, 1 on memory squeeze.
- */
-static int
-isdnloop_fake(isdnloop_card *card, char *s, int ch)
-{
- struct sk_buff *skb;
- int len = strlen(s) + ((ch >= 0) ? 3 : 0);
- skb = dev_alloc_skb(len);
- if (!skb) {
- printk(KERN_WARNING "isdnloop: Out of memory in isdnloop_fake\n");
- return 1;
- }
- if (ch >= 0)
- sprintf(skb_put(skb, 3), "%02d;", ch);
- skb_put_data(skb, s, strlen(s));
- skb_queue_tail(&card->dqueue, skb);
- return 0;
-}
-/* *INDENT-OFF* */
-static isdnloop_stat isdnloop_cmd_table[] = {
- {"BCON_R", 0, 1}, /* B-Channel connect */
- {"BCON_I", 0, 17}, /* B-Channel connect ind */
- {"BDIS_R", 0, 2}, /* B-Channel disconnect */
- {"DDIS_R", 0, 3}, /* D-Channel disconnect */
- {"DCON_R", 0, 16}, /* D-Channel connect */
- {"DSCA_R", 0, 4}, /* Dial 1TR6-SPV */
- {"DCAL_R", 0, 5}, /* Dial */
- {"EAZC", 0, 6}, /* Clear EAZ listener */
- {"EAZ", 0, 7}, /* Set EAZ listener */
- {"SEEAZ", 0, 8}, /* Get EAZ listener */
- {"MSN", 0, 9}, /* Set/Clear MSN listener */
- {"MSALL", 0, 10}, /* Set multi MSN listeners */
- {"SETSIL", 0, 11}, /* Set SI list */
- {"SEESIL", 0, 12}, /* Get SI list */
- {"SILC", 0, 13}, /* Clear SI list */
- {"LOCK", 0, -1}, /* LOCK channel */
- {"UNLOCK", 0, -1}, /* UNLOCK channel */
- {"FV2ON", 1, 14}, /* Leased mode on */
- {"FV2OFF", 1, 15}, /* Leased mode off */
- {NULL, 0, -1}
-};
-/* *INDENT-ON* */
-
-
-/*
- * Simulate an error-response from a card.
- *
- * Parameter:
- * card = pointer to card struct.
- */
-static void
-isdnloop_fake_err(isdnloop_card *card)
-{
- char buf[64];
-
- snprintf(buf, sizeof(buf), "E%s", card->omsg);
- isdnloop_fake(card, buf, -1);
- isdnloop_fake(card, "NAK", -1);
-}
-
-static u_char ctable_eu[] = {0x00, 0x11, 0x01, 0x12};
-static u_char ctable_1t[] = {0x00, 0x3b, 0x01, 0x3a};
-
-/*
- * Assemble a simplified cause message depending on the
- * D-channel protocol used.
- *
- * Parameter:
- * card = pointer to card struct.
- * loc = location: 0 = local, 1 = remote.
- * cau = cause: 1 = busy, 2 = nonexistent callerid, 3 = no user responding.
- * Return:
- * Pointer to buffer containing the assembled message.
- */
-static char *
-isdnloop_unicause(isdnloop_card *card, int loc, int cau)
-{
- static char buf[6];
-
- switch (card->ptype) {
- case ISDN_PTYPE_EURO:
- sprintf(buf, "E%02X%02X", (loc) ? 4 : 2, ctable_eu[cau]);
- break;
- case ISDN_PTYPE_1TR6:
- sprintf(buf, "%02X44", ctable_1t[cau]);
- break;
- default:
- return "0000";
- }
- return buf;
-}
-
-/*
- * Release a virtual connection. Called from timer interrupt, when
- * called party did not respond.
- *
- * Parameter:
- * card = pointer to card struct.
- * ch = channel (0-based)
- */
-static void
-isdnloop_atimeout(isdnloop_card *card, int ch)
-{
- unsigned long flags;
- char buf[60];
-
- spin_lock_irqsave(&card->isdnloop_lock, flags);
- if (card->rcard[ch]) {
- isdnloop_fake(card->rcard[ch], "DDIS_I", card->rch[ch] + 1);
- card->rcard[ch]->rcard[card->rch[ch]] = NULL;
- card->rcard[ch] = NULL;
- }
- isdnloop_fake(card, "DDIS_I", ch + 1);
- /* No user responding */
- sprintf(buf, "CAU%s", isdnloop_unicause(card, 1, 3));
- isdnloop_fake(card, buf, ch + 1);
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
-}
-
-/*
- * Wrapper for isdnloop_atimeout().
- */
-static void
-isdnloop_atimeout0(struct timer_list *t)
-{
- isdnloop_card *card = from_timer(card, t, c_timer[0]);
-
- isdnloop_atimeout(card, 0);
-}
-
-/*
- * Wrapper for isdnloop_atimeout().
- */
-static void
-isdnloop_atimeout1(struct timer_list *t)
-{
- isdnloop_card *card = from_timer(card, t, c_timer[1]);
-
- isdnloop_atimeout(card, 1);
-}
-
-/*
- * Install a watchdog for a user, not responding.
- *
- * Parameter:
- * card = pointer to card struct.
- * ch = channel to watch for.
- */
-static void
-isdnloop_start_ctimer(isdnloop_card *card, int ch)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&card->isdnloop_lock, flags);
- timer_setup(&card->c_timer[ch], ch ? isdnloop_atimeout1
- : isdnloop_atimeout0, 0);
- card->c_timer[ch].expires = jiffies + ISDNLOOP_TIMER_ALERTWAIT;
- add_timer(&card->c_timer[ch]);
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
-}
-
-/*
- * Kill a pending channel watchdog.
- *
- * Parameter:
- * card = pointer to card struct.
- * ch = channel (0-based).
- */
-static void
-isdnloop_kill_ctimer(isdnloop_card *card, int ch)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&card->isdnloop_lock, flags);
- del_timer(&card->c_timer[ch]);
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
-}
-
-static u_char si2bit[] = {0, 1, 0, 0, 0, 2, 0, 4, 0, 0};
-static u_char bit2si[] = {1, 5, 7};
-
-/*
- * Try finding a listener for an outgoing call.
- *
- * Parameter:
- * card = pointer to calling card.
- * p = pointer to ICN-type setup-string.
- * lch = channel of calling card.
- * cmd = pointer to struct to be filled when parsing setup.
- * Return:
- * 0 = found match, alerting should happen.
- * 1 = found matching number but it is busy.
- * 2 = no matching listener.
- * 3 = found matching number but SI does not match.
- */
-static int
-isdnloop_try_call(isdnloop_card *card, char *p, int lch, isdn_ctrl *cmd)
-{
- isdnloop_card *cc = cards;
- unsigned long flags;
- int ch;
- int num_match;
- int i;
- char *e;
- char nbuf[32];
-
- isdnloop_parse_setup(p, cmd);
- while (cc) {
- for (ch = 0; ch < 2; ch++) {
- /* Exclude ourself */
- if ((cc == card) && (ch == lch))
- continue;
- num_match = 0;
- switch (cc->ptype) {
- case ISDN_PTYPE_EURO:
- for (i = 0; i < 3; i++)
- if (!(strcmp(cc->s0num[i], cmd->parm.setup.phone)))
- num_match = 1;
- break;
- case ISDN_PTYPE_1TR6:
- e = cc->eazlist[ch];
- while (*e) {
- sprintf(nbuf, "%s%c", cc->s0num[0], *e);
- if (!(strcmp(nbuf, cmd->parm.setup.phone)))
- num_match = 1;
- e++;
- }
- }
- if (num_match) {
- spin_lock_irqsave(&card->isdnloop_lock, flags);
- /* channel idle? */
- if (!(cc->rcard[ch])) {
- /* Check SI */
- if (!(si2bit[cmd->parm.setup.si1] & cc->sil[ch])) {
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- return 3;
- }
- /* ch is idle, si and number matches */
- cc->rcard[ch] = card;
- cc->rch[ch] = lch;
- card->rcard[lch] = cc;
- card->rch[lch] = ch;
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- return 0;
- } else {
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- /* num matches, but busy */
- if (ch == 1)
- return 1;
- }
- }
- }
- cc = cc->next;
- }
- return 2;
-}
-
-/*
- * Depending on D-channel protocol and caller/called, modify
- * phone number.
- *
- * Parameter:
- * card = pointer to card struct.
- * phone = pointer phone number.
- * caller = flag: 1 = caller, 0 = called.
- * Return:
- * pointer to new phone number.
- */
-static char *
-isdnloop_vstphone(isdnloop_card *card, char *phone, int caller)
-{
- int i;
- static char nphone[30];
-
- if (!card) {
- printk("BUG!!!\n");
- return "";
- }
- switch (card->ptype) {
- case ISDN_PTYPE_EURO:
- if (caller) {
- for (i = 0; i < 2; i++)
- if (!(strcmp(card->s0num[i], phone)))
- return phone;
- return card->s0num[0];
- }
- return phone;
- break;
- case ISDN_PTYPE_1TR6:
- if (caller) {
- sprintf(nphone, "%s%c", card->s0num[0], phone[0]);
- return nphone;
- } else
- return &phone[strlen(phone) - 1];
- break;
- }
- return "";
-}
-
-/*
- * Parse an ICN-type command string sent to the 'card'.
- * Perform misc. actions depending on the command.
- *
- * Parameter:
- * card = pointer to card struct.
- */
-static void
-isdnloop_parse_cmd(isdnloop_card *card)
-{
- char *p = card->omsg;
- isdn_ctrl cmd;
- char buf[60];
- isdnloop_stat *s = isdnloop_cmd_table;
- int action = -1;
- int i;
- int ch;
-
- if ((card->omsg[0] != '0') && (card->omsg[2] != ';')) {
- isdnloop_fake_err(card);
- return;
- }
- ch = card->omsg[1] - '0';
- if ((ch < 0) || (ch > 2)) {
- isdnloop_fake_err(card);
- return;
- }
- p += 3;
- while (s->statstr) {
- if (!strncmp(p, s->statstr, strlen(s->statstr))) {
- action = s->action;
- if (s->command && (ch != 0)) {
- isdnloop_fake_err(card);
- return;
- }
- break;
- }
- s++;
- }
- if (action == -1)
- return;
- switch (action) {
- case 1:
- /* 0x;BCON_R */
- if (card->rcard[ch - 1]) {
- isdnloop_fake(card->rcard[ch - 1], "BCON_I",
- card->rch[ch - 1] + 1);
- isdnloop_fake(card, "BCON_C", ch);
- }
- break;
- case 17:
- /* 0x;BCON_I */
- if (card->rcard[ch - 1]) {
- isdnloop_fake(card->rcard[ch - 1], "BCON_C",
- card->rch[ch - 1] + 1);
- }
- break;
- case 2:
- /* 0x;BDIS_R */
- isdnloop_fake(card, "BDIS_C", ch);
- if (card->rcard[ch - 1]) {
- isdnloop_fake(card->rcard[ch - 1], "BDIS_I",
- card->rch[ch - 1] + 1);
- }
- break;
- case 16:
- /* 0x;DCON_R */
- isdnloop_kill_ctimer(card, ch - 1);
- if (card->rcard[ch - 1]) {
- isdnloop_kill_ctimer(card->rcard[ch - 1], card->rch[ch - 1]);
- isdnloop_fake(card->rcard[ch - 1], "DCON_C",
- card->rch[ch - 1] + 1);
- isdnloop_fake(card, "DCON_C", ch);
- }
- break;
- case 3:
- /* 0x;DDIS_R */
- isdnloop_kill_ctimer(card, ch - 1);
- if (card->rcard[ch - 1]) {
- isdnloop_kill_ctimer(card->rcard[ch - 1], card->rch[ch - 1]);
- isdnloop_fake(card->rcard[ch - 1], "DDIS_I",
- card->rch[ch - 1] + 1);
- card->rcard[ch - 1] = NULL;
- }
- isdnloop_fake(card, "DDIS_C", ch);
- break;
- case 4:
- /* 0x;DSCA_Rdd,yy,zz,oo */
- if (card->ptype != ISDN_PTYPE_1TR6) {
- isdnloop_fake_err(card);
- return;
- }
- /* Fall through */
- case 5:
- /* 0x;DCAL_Rdd,yy,zz,oo */
- p += 6;
- switch (isdnloop_try_call(card, p, ch - 1, &cmd)) {
- case 0:
- /* Alerting */
- sprintf(buf, "D%s_I%s,%02d,%02d,%s",
- (action == 4) ? "SCA" : "CAL",
- isdnloop_vstphone(card, cmd.parm.setup.eazmsn, 1),
- cmd.parm.setup.si1,
- cmd.parm.setup.si2,
- isdnloop_vstphone(card->rcard[ch - 1],
- cmd.parm.setup.phone, 0));
- isdnloop_fake(card->rcard[ch - 1], buf, card->rch[ch - 1] + 1);
- /* Fall through */
- case 3:
- /* si1 does not match, don't alert but start timer */
- isdnloop_start_ctimer(card, ch - 1);
- break;
- case 1:
- /* Remote busy */
- isdnloop_fake(card, "DDIS_I", ch);
- sprintf(buf, "CAU%s", isdnloop_unicause(card, 1, 1));
- isdnloop_fake(card, buf, ch);
- break;
- case 2:
- /* No such user */
- isdnloop_fake(card, "DDIS_I", ch);
- sprintf(buf, "CAU%s", isdnloop_unicause(card, 1, 2));
- isdnloop_fake(card, buf, ch);
- break;
- }
- break;
- case 6:
- /* 0x;EAZC */
- card->eazlist[ch - 1][0] = '\0';
- break;
- case 7:
- /* 0x;EAZ */
- p += 3;
- if (strlen(p) >= sizeof(card->eazlist[0]))
- break;
- strcpy(card->eazlist[ch - 1], p);
- break;
- case 8:
- /* 0x;SEEAZ */
- sprintf(buf, "EAZ-LIST: %s", card->eazlist[ch - 1]);
- isdnloop_fake(card, buf, ch + 1);
- break;
- case 9:
- /* 0x;MSN */
- break;
- case 10:
- /* 0x;MSNALL */
- break;
- case 11:
- /* 0x;SETSIL */
- p += 6;
- i = 0;
- while (strchr("0157", *p)) {
- if (i)
- card->sil[ch - 1] |= si2bit[*p - '0'];
- i = (*p++ == '0');
- }
- if (*p)
- isdnloop_fake_err(card);
- break;
- case 12:
- /* 0x;SEESIL */
- sprintf(buf, "SIN-LIST: ");
- p = buf + 10;
- for (i = 0; i < 3; i++)
- if (card->sil[ch - 1] & (1 << i))
- p += sprintf(p, "%02d", bit2si[i]);
- isdnloop_fake(card, buf, ch + 1);
- break;
- case 13:
- /* 0x;SILC */
- card->sil[ch - 1] = 0;
- break;
- case 14:
- /* 00;FV2ON */
- break;
- case 15:
- /* 00;FV2OFF */
- break;
- }
-}
-
-/*
- * Put command-strings into the of the 'card'. In reality, execute them
- * right in place by calling isdnloop_parse_cmd(). Also copy every
- * command to the read message ringbuffer, preceding it with a '>'.
- * These mesagges can be read at /dev/isdnctrl.
- *
- * Parameter:
- * buf = pointer to command buffer.
- * len = length of buffer data.
- * user = flag: 1 = called form userlevel, 0 called from kernel.
- * card = pointer to card struct.
- * Return:
- * number of bytes transferred (currently always equals len).
- */
-static int
-isdnloop_writecmd(const u_char *buf, int len, int user, isdnloop_card *card)
-{
- int xcount = 0;
- int ocount = 1;
- isdn_ctrl cmd;
-
- while (len) {
- int count = len;
- u_char *p;
- u_char msg[0x100];
-
- if (count > 255)
- count = 255;
- if (user) {
- if (copy_from_user(msg, buf, count))
- return -EFAULT;
- } else
- memcpy(msg, buf, count);
- isdnloop_putmsg(card, '>');
- for (p = msg; count > 0; count--, p++) {
- len--;
- xcount++;
- isdnloop_putmsg(card, *p);
- card->omsg[card->optr] = *p;
- if (*p == '\n') {
- card->omsg[card->optr] = '\0';
- card->optr = 0;
- isdnloop_parse_cmd(card);
- if (len) {
- isdnloop_putmsg(card, '>');
- ocount++;
- }
- } else {
- if (card->optr < 59)
- card->optr++;
- }
- ocount++;
- }
- }
- cmd.command = ISDN_STAT_STAVAIL;
- cmd.driver = card->myid;
- cmd.arg = ocount;
- card->interface.statcallb(&cmd);
- return xcount;
-}
-
-/*
- * Delete card's pending timers, send STOP to linklevel
- */
-static void
-isdnloop_stopcard(isdnloop_card *card)
-{
- unsigned long flags;
- isdn_ctrl cmd;
-
- spin_lock_irqsave(&card->isdnloop_lock, flags);
- if (card->flags & ISDNLOOP_FLAGS_RUNNING) {
- card->flags &= ~ISDNLOOP_FLAGS_RUNNING;
- del_timer(&card->st_timer);
- del_timer(&card->rb_timer);
- del_timer(&card->c_timer[0]);
- del_timer(&card->c_timer[1]);
- cmd.command = ISDN_STAT_STOP;
- cmd.driver = card->myid;
- card->interface.statcallb(&cmd);
- }
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
-}
-
-/*
- * Stop all cards before unload.
- */
-static void
-isdnloop_stopallcards(void)
-{
- isdnloop_card *p = cards;
-
- while (p) {
- isdnloop_stopcard(p);
- p = p->next;
- }
-}
-
-/*
- * Start a 'card'. Simulate card's boot message and set the phone
- * number(s) of the virtual 'S0-Interface'. Install D-channel
- * poll timer.
- *
- * Parameter:
- * card = pointer to card struct.
- * sdefp = pointer to struct holding ioctl parameters.
- * Return:
- * 0 on success, -E??? otherwise.
- */
-static int
-isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
-{
- unsigned long flags;
- isdnloop_sdef sdef;
- int i;
-
- if (card->flags & ISDNLOOP_FLAGS_RUNNING)
- return -EBUSY;
- if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
- return -EFAULT;
-
- for (i = 0; i < 3; i++) {
- if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
- return -EINVAL;
- }
-
- spin_lock_irqsave(&card->isdnloop_lock, flags);
- switch (sdef.ptype) {
- case ISDN_PTYPE_EURO:
- if (isdnloop_fake(card, "DRV1.23EC-Q.931-CAPI-CNS-BASIS-20.02.96",
- -1)) {
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- return -ENOMEM;
- }
- card->sil[0] = card->sil[1] = 4;
- if (isdnloop_fake(card, "TEI OK", 0)) {
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- return -ENOMEM;
- }
- for (i = 0; i < 3; i++) {
- strlcpy(card->s0num[i], sdef.num[i],
- sizeof(card->s0num[0]));
- }
- break;
- case ISDN_PTYPE_1TR6:
- if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
- -1)) {
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- return -ENOMEM;
- }
- card->sil[0] = card->sil[1] = 4;
- if (isdnloop_fake(card, "TEI OK", 0)) {
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- return -ENOMEM;
- }
- strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0]));
- card->s0num[1][0] = '\0';
- card->s0num[2][0] = '\0';
- break;
- default:
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- printk(KERN_WARNING "isdnloop: Illegal D-channel protocol %d\n",
- sdef.ptype);
- return -EINVAL;
- }
- timer_setup(&card->rb_timer, isdnloop_pollbchan, 0);
- timer_setup(&card->st_timer, isdnloop_polldchan, 0);
- card->st_timer.expires = jiffies + ISDNLOOP_TIMER_DCREAD;
- add_timer(&card->st_timer);
- card->flags |= ISDNLOOP_FLAGS_RUNNING;
- spin_unlock_irqrestore(&card->isdnloop_lock, flags);
- return 0;
-}
-
-/*
- * Main handler for commands sent by linklevel.
- */
-static int
-isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
-{
- ulong a;
- int i;
- char cbuf[80];
- isdn_ctrl cmd;
- isdnloop_cdef cdef;
-
- switch (c->command) {
- case ISDN_CMD_IOCTL:
- memcpy(&a, c->parm.num, sizeof(ulong));
- switch (c->arg) {
- case ISDNLOOP_IOCTL_DEBUGVAR:
- return (ulong) card;
- case ISDNLOOP_IOCTL_STARTUP:
- return isdnloop_start(card, (isdnloop_sdef *) a);
- break;
- case ISDNLOOP_IOCTL_ADDCARD:
- if (copy_from_user((char *)&cdef,
- (char *)a,
- sizeof(cdef)))
- return -EFAULT;
- return isdnloop_addcard(cdef.id1);
- break;
- case ISDNLOOP_IOCTL_LEASEDCFG:
- if (a) {
- if (!card->leased) {
- card->leased = 1;
- while (card->ptype == ISDN_PTYPE_UNKNOWN)
- schedule_timeout_interruptible(10);
- schedule_timeout_interruptible(10);
- sprintf(cbuf, "00;FV2ON\n01;EAZ1\n02;EAZ2\n");
- i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
- printk(KERN_INFO
- "isdnloop: (%s) Leased-line mode enabled\n",
- CID);
- cmd.command = ISDN_STAT_RUN;
- cmd.driver = card->myid;
- cmd.arg = 0;
- card->interface.statcallb(&cmd);
- }
- } else {
- if (card->leased) {
- card->leased = 0;
- sprintf(cbuf, "00;FV2OFF\n");
- i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
- printk(KERN_INFO
- "isdnloop: (%s) Leased-line mode disabled\n",
- CID);
- cmd.command = ISDN_STAT_RUN;
- cmd.driver = card->myid;
- cmd.arg = 0;
- card->interface.statcallb(&cmd);
- }
- }
- return 0;
- default:
- return -EINVAL;
- }
- break;
- case ISDN_CMD_DIAL:
- if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
- return -ENODEV;
- if (card->leased)
- break;
- if ((c->arg & 255) < ISDNLOOP_BCH) {
- char *p;
- char dcode[4];
-
- a = c->arg;
- p = c->parm.setup.phone;
- if (*p == 's' || *p == 'S') {
- /* Dial for SPV */
- p++;
- strcpy(dcode, "SCA");
- } else
- /* Normal Dial */
- strcpy(dcode, "CAL");
- snprintf(cbuf, sizeof(cbuf),
- "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
- dcode, p, c->parm.setup.si1,
- c->parm.setup.si2, c->parm.setup.eazmsn);
- i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
- }
- break;
- case ISDN_CMD_ACCEPTD:
- if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
- return -ENODEV;
- if (c->arg < ISDNLOOP_BCH) {
- a = c->arg + 1;
- cbuf[0] = 0;
- switch (card->l2_proto[a - 1]) {
- case ISDN_PROTO_L2_X75I:
- sprintf(cbuf, "%02d;BX75\n", (int) a);
- break;
-#ifdef CONFIG_ISDN_X25
- case ISDN_PROTO_L2_X25DTE:
- sprintf(cbuf, "%02d;BX2T\n", (int) a);
- break;
- case ISDN_PROTO_L2_X25DCE:
- sprintf(cbuf, "%02d;BX2C\n", (int) a);
- break;
-#endif
- case ISDN_PROTO_L2_HDLC:
- sprintf(cbuf, "%02d;BTRA\n", (int) a);
- break;
- }
- if (strlen(cbuf))
- i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
- sprintf(cbuf, "%02d;DCON_R\n", (int) a);
- i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
- }
- break;
- case ISDN_CMD_ACCEPTB:
- if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
- return -ENODEV;
- if (c->arg < ISDNLOOP_BCH) {
- a = c->arg + 1;
- switch (card->l2_proto[a - 1]) {
- case ISDN_PROTO_L2_X75I:
- sprintf(cbuf, "%02d;BCON_R,BX75\n", (int) a);
- break;
-#ifdef CONFIG_ISDN_X25
- case ISDN_PROTO_L2_X25DTE:
- sprintf(cbuf, "%02d;BCON_R,BX2T\n", (int) a);
- break;
- case ISDN_PROTO_L2_X25DCE:
- sprintf(cbuf, "%02d;BCON_R,BX2C\n", (int) a);
- break;
-#endif
- case ISDN_PROTO_L2_HDLC:
- sprintf(cbuf, "%02d;BCON_R,BTRA\n", (int) a);
- break;
- default:
- sprintf(cbuf, "%02d;BCON_R\n", (int) a);
- }
- printk(KERN_DEBUG "isdnloop writecmd '%s'\n", cbuf);
- i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
- break;
- case ISDN_CMD_HANGUP:
- if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
- return -ENODEV;
- if (c->arg < ISDNLOOP_BCH) {
- a = c->arg + 1;
- sprintf(cbuf, "%02d;BDIS_R\n%02d;DDIS_R\n", (int) a, (int) a);
- i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
- }
- break;
- case ISDN_CMD_SETEAZ:
- if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
- return -ENODEV;
- if (card->leased)
- break;
- if (c->arg < ISDNLOOP_BCH) {
- a = c->arg + 1;
- if (card->ptype == ISDN_PTYPE_EURO) {
- sprintf(cbuf, "%02d;MS%s%s\n", (int) a,
- c->parm.num[0] ? "N" : "ALL", c->parm.num);
- } else
- sprintf(cbuf, "%02d;EAZ%s\n", (int) a,
- c->parm.num[0] ? c->parm.num : (u_char *) "0123456789");
- i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
- }
- break;
- case ISDN_CMD_CLREAZ:
- if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
- return -ENODEV;
- if (card->leased)
- break;
- if (c->arg < ISDNLOOP_BCH) {
- a = c->arg + 1;
- if (card->ptype == ISDN_PTYPE_EURO)
- sprintf(cbuf, "%02d;MSNC\n", (int) a);
- else
- sprintf(cbuf, "%02d;EAZC\n", (int) a);
- i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
- }
- break;
- case ISDN_CMD_SETL2:
- if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
- return -ENODEV;
- if ((c->arg & 255) < ISDNLOOP_BCH) {
- a = c->arg;
- switch (a >> 8) {
- case ISDN_PROTO_L2_X75I:
- sprintf(cbuf, "%02d;BX75\n", (int) (a & 255) + 1);
- break;
-#ifdef CONFIG_ISDN_X25
- case ISDN_PROTO_L2_X25DTE:
- sprintf(cbuf, "%02d;BX2T\n", (int) (a & 255) + 1);
- break;
- case ISDN_PROTO_L2_X25DCE:
- sprintf(cbuf, "%02d;BX2C\n", (int) (a & 255) + 1);
- break;
-#endif
- case ISDN_PROTO_L2_HDLC:
- sprintf(cbuf, "%02d;BTRA\n", (int) (a & 255) + 1);
- break;
- case ISDN_PROTO_L2_TRANS:
- sprintf(cbuf, "%02d;BTRA\n", (int) (a & 255) + 1);
- break;
- default:
- return -EINVAL;
- }
- i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
- card->l2_proto[a & 255] = (a >> 8);
- }
- break;
- case ISDN_CMD_SETL3:
- if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
- return -ENODEV;
- return 0;
- default:
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/*
- * Find card with given driverId
- */
-static inline isdnloop_card *
-isdnloop_findcard(int driverid)
-{
- isdnloop_card *p = cards;
-
- while (p) {
- if (p->myid == driverid)
- return p;
- p = p->next;
- }
- return (isdnloop_card *) 0;
-}
-
-/*
- * Wrapper functions for interface to linklevel
- */
-static int
-if_command(isdn_ctrl *c)
-{
- isdnloop_card *card = isdnloop_findcard(c->driver);
-
- if (card)
- return isdnloop_command(c, card);
- printk(KERN_ERR
- "isdnloop: if_command called with invalid driverId!\n");
- return -ENODEV;
-}
-
-static int
-if_writecmd(const u_char __user *buf, int len, int id, int channel)
-{
- isdnloop_card *card = isdnloop_findcard(id);
-
- if (card) {
- if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
- return -ENODEV;
- return isdnloop_writecmd(buf, len, 1, card);
- }
- printk(KERN_ERR
- "isdnloop: if_writecmd called with invalid driverId!\n");
- return -ENODEV;
-}
-
-static int
-if_readstatus(u_char __user *buf, int len, int id, int channel)
-{
- isdnloop_card *card = isdnloop_findcard(id);
-
- if (card) {
- if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
- return -ENODEV;
- return isdnloop_readstatus(buf, len, card);
- }
- printk(KERN_ERR
- "isdnloop: if_readstatus called with invalid driverId!\n");
- return -ENODEV;
-}
-
-static int
-if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
-{
- isdnloop_card *card = isdnloop_findcard(id);
-
- if (card) {
- if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
- return -ENODEV;
- /* ack request stored in skb scratch area */
- *(skb->head) = ack;
- return isdnloop_sendbuf(channel, skb, card);
- }
- printk(KERN_ERR
- "isdnloop: if_sendbuf called with invalid driverId!\n");
- return -ENODEV;
-}
-
-/*
- * Allocate a new card-struct, initialize it
- * link it into cards-list and register it at linklevel.
- */
-static isdnloop_card *
-isdnloop_initcard(char *id)
-{
- isdnloop_card *card;
- int i;
- card = kzalloc(sizeof(isdnloop_card), GFP_KERNEL);
- if (!card) {
- printk(KERN_WARNING
- "isdnloop: (%s) Could not allocate card-struct.\n", id);
- return (isdnloop_card *) 0;
- }
- card->interface.owner = THIS_MODULE;
- card->interface.channels = ISDNLOOP_BCH;
- card->interface.hl_hdrlen = 1; /* scratch area for storing ack flag*/
- card->interface.maxbufsize = 4000;
- card->interface.command = if_command;
- card->interface.writebuf_skb = if_sendbuf;
- card->interface.writecmd = if_writecmd;
- card->interface.readstat = if_readstatus;
- card->interface.features = ISDN_FEATURE_L2_X75I |
-#ifdef CONFIG_ISDN_X25
- ISDN_FEATURE_L2_X25DTE |
- ISDN_FEATURE_L2_X25DCE |
-#endif
- ISDN_FEATURE_L2_HDLC |
- ISDN_FEATURE_L3_TRANS |
- ISDN_FEATURE_P_UNKNOWN;
- card->ptype = ISDN_PTYPE_UNKNOWN;
- strlcpy(card->interface.id, id, sizeof(card->interface.id));
- card->msg_buf_write = card->msg_buf;
- card->msg_buf_read = card->msg_buf;
- card->msg_buf_end = &card->msg_buf[sizeof(card->msg_buf) - 1];
- for (i = 0; i < ISDNLOOP_BCH; i++) {
- card->l2_proto[i] = ISDN_PROTO_L2_X75I;
- skb_queue_head_init(&card->bqueue[i]);
- }
- skb_queue_head_init(&card->dqueue);
- spin_lock_init(&card->isdnloop_lock);
- card->next = cards;
- cards = card;
- if (!register_isdn(&card->interface)) {
- cards = cards->next;
- printk(KERN_WARNING
- "isdnloop: Unable to register %s\n", id);
- kfree(card);
- return (isdnloop_card *) 0;
- }
- card->myid = card->interface.channels;
- return card;
-}
-
-static int
-isdnloop_addcard(char *id1)
-{
- isdnloop_card *card;
- card = isdnloop_initcard(id1);
- if (!card) {
- return -EIO;
- }
- printk(KERN_INFO
- "isdnloop: (%s) virtual card added\n",
- card->interface.id);
- return 0;
-}
-
-static int __init
-isdnloop_init(void)
-{
- if (isdnloop_id)
- return isdnloop_addcard(isdnloop_id);
-
- return 0;
-}
-
-static void __exit
-isdnloop_exit(void)
-{
- isdn_ctrl cmd;
- isdnloop_card *card = cards;
- isdnloop_card *last;
- int i;
-
- isdnloop_stopallcards();
- while (card) {
- cmd.command = ISDN_STAT_UNLOAD;
- cmd.driver = card->myid;
- card->interface.statcallb(&cmd);
- for (i = 0; i < ISDNLOOP_BCH; i++)
- isdnloop_free_queue(card, i);
- card = card->next;
- }
- card = cards;
- while (card) {
- last = card;
- skb_queue_purge(&card->dqueue);
- card = card->next;
- kfree(last);
- }
- printk(KERN_NOTICE "isdnloop-ISDN-driver unloaded\n");
-}
-
-module_init(isdnloop_init);
-module_exit(isdnloop_exit);
diff --git a/drivers/isdn/isdnloop/isdnloop.h b/drivers/isdn/isdnloop/isdnloop.h
deleted file mode 100644
index e9e035552bb4..000000000000
--- a/drivers/isdn/isdnloop/isdnloop.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* $Id: isdnloop.h,v 1.5.6.3 2001/09/23 22:24:56 kai Exp $
- *
- * Loopback lowlevel module for testing of linklevel.
- *
- * Copyright 1997 by Fritz Elfert (fritz@isdn4linux.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef isdnloop_h
-#define isdnloop_h
-
-#define ISDNLOOP_IOCTL_DEBUGVAR 0
-#define ISDNLOOP_IOCTL_ADDCARD 1
-#define ISDNLOOP_IOCTL_LEASEDCFG 2
-#define ISDNLOOP_IOCTL_STARTUP 3
-
-/* Struct for adding new cards */
-typedef struct isdnloop_cdef {
- char id1[10];
-} isdnloop_cdef;
-
-/* Struct for configuring cards */
-typedef struct isdnloop_sdef {
- int ptype;
- char num[3][20];
-} isdnloop_sdef;
-
-#if defined(__KERNEL__) || defined(__DEBUGVAR__)
-
-#ifdef __KERNEL__
-/* Kernel includes */
-
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/major.h>
-#include <asm/io.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/ioport.h>
-#include <linux/timer.h>
-#include <linux/wait.h>
-#include <linux/isdnif.h>
-
-#endif /* __KERNEL__ */
-
-#define ISDNLOOP_FLAGS_B1ACTIVE 1 /* B-Channel-1 is open */
-#define ISDNLOOP_FLAGS_B2ACTIVE 2 /* B-Channel-2 is open */
-#define ISDNLOOP_FLAGS_RUNNING 4 /* Cards driver activated */
-#define ISDNLOOP_FLAGS_RBTIMER 8 /* scheduling of B-Channel-poll */
-#define ISDNLOOP_TIMER_BCREAD 1 /* B-Channel poll-cycle */
-#define ISDNLOOP_TIMER_DCREAD (HZ/2) /* D-Channel poll-cycle */
-#define ISDNLOOP_TIMER_ALERTWAIT (10 * HZ) /* Alert timeout */
-#define ISDNLOOP_MAX_SQUEUE 65536 /* Max. outstanding send-data */
-#define ISDNLOOP_BCH 2 /* channels per card */
-
-/*
- * Per card driver data
- */
-typedef struct isdnloop_card {
- struct isdnloop_card *next; /* Pointer to next device struct */
- struct isdnloop_card
- *rcard[ISDNLOOP_BCH]; /* Pointer to 'remote' card */
- int rch[ISDNLOOP_BCH]; /* 'remote' channel */
- int myid; /* Driver-Nr. assigned by linklevel */
- int leased; /* Flag: This Adapter is connected */
- /* to a leased line */
- int sil[ISDNLOOP_BCH]; /* SI's to listen for */
- char eazlist[ISDNLOOP_BCH][11];
- /* EAZ's to listen for */
- char s0num[3][20]; /* 1TR6 base-number or MSN's */
- unsigned short flags; /* Statusflags */
- int ptype; /* Protocol type (1TR6 or Euro) */
- struct timer_list st_timer; /* Timer for Status-Polls */
- struct timer_list rb_timer; /* Timer for B-Channel-Polls */
- struct timer_list
- c_timer[ISDNLOOP_BCH]; /* Timer for Alerting */
- int l2_proto[ISDNLOOP_BCH]; /* Current layer-2-protocol */
- isdn_if interface; /* Interface to upper layer */
- int iptr; /* Index to imsg-buffer */
- char imsg[60]; /* Internal buf for status-parsing */
- int optr; /* Index to omsg-buffer */
- char omsg[60]; /* Internal buf for cmd-parsing */
- char msg_buf[2048]; /* Buffer for status-messages */
- char *msg_buf_write; /* Writepointer for statusbuffer */
- char *msg_buf_read; /* Readpointer for statusbuffer */
- char *msg_buf_end; /* Pointer to end of statusbuffer */
- int sndcount[ISDNLOOP_BCH]; /* Byte-counters for B-Ch.-send */
- struct sk_buff_head
- bqueue[ISDNLOOP_BCH]; /* B-Channel queues */
- struct sk_buff_head dqueue; /* D-Channel queue */
- spinlock_t isdnloop_lock;
-} isdnloop_card;
-
-/*
- * Main driver data
- */
-#ifdef __KERNEL__
-static isdnloop_card *cards = (isdnloop_card *) 0;
-#endif /* __KERNEL__ */
-
-/* Utility-Macros */
-
-#define CID (card->interface.id)
-
-#endif /* defined(__KERNEL__) || defined(__DEBUGVAR__) */
-#endif /* isdnloop_h */
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index cd036e87335a..038e72a84b33 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -4,8 +4,6 @@
* Karsten Keil (keil@isdn4linux.de)
*
* This file is (c) under GNU PUBLIC LICENSE
- * For changes and modifications please read
- * ../../../Documentation/isdn/mISDN.cert
*
* Thanks to Karsten Keil (great drivers)
* Cologne Chip (great chips)
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 760f73a49c9f..b0fdeef10bd9 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -784,6 +784,41 @@ config LEDS_NIC78BX
To compile this driver as a module, choose M here: the module
will be called leds-nic78bx.
+config LEDS_SPI_BYTE
+ tristate "LED support for SPI LED controller with a single byte"
+ depends on LEDS_CLASS
+ depends on SPI
+ depends on OF
+ help
+ This option enables support for LED controller which use a single byte
+ for controlling the brightness. Currently the following controller is
+ supported: Ubiquiti airCube ISP microcontroller based LED controller.
+
+config LEDS_TI_LMU_COMMON
+ tristate "LED driver for TI LMU"
+ depends on LEDS_CLASS
+ depends on REGMAP
+ help
+ Say Y to enable the LED driver for TI LMU devices.
+ This supports common features between the TI LM3532, LM3631, LM3632,
+ LM3633, LM3695 and LM3697.
+
+config LEDS_LM3697
+ tristate "LED driver for LM3697"
+ depends on LEDS_TI_LMU_COMMON
+ depends on I2C && OF
+ help
+ Say Y to enable the LM3697 LED driver for TI LMU devices.
+ This supports the LED device LM3697.
+
+config LEDS_LM36274
+ tristate "LED driver for LM36274"
+ depends on LEDS_TI_LMU_COMMON
+ depends on MFD_TI_LMU
+ help
+ Say Y to enable the LM36274 LED driver for TI LMU devices.
+ This supports the LED device LM36274.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 1e9702ebffee..41fb073a39c1 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -77,10 +77,14 @@ obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
obj-$(CONFIG_LEDS_MLXREG) += leds-mlxreg.o
obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o
+obj-$(CONFIG_LEDS_SPI_BYTE) += leds-spi-byte.o
obj-$(CONFIG_LEDS_MT6323) += leds-mt6323.o
obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o
obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o
obj-$(CONFIG_LEDS_LM3601X) += leds-lm3601x.o
+obj-$(CONFIG_LEDS_TI_LMU_COMMON) += leds-ti-lmu-common.o
+obj-$(CONFIG_LEDS_LM3697) += leds-lm3697.o
+obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_CR0014114) += leds-cr0014114.o
diff --git a/drivers/leds/leds-lm36274.c b/drivers/leds/leds-lm36274.c
new file mode 100644
index 000000000000..ed9dc857ec8f
--- /dev/null
+++ b/drivers/leds/leds-lm36274.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+// TI LM36274 LED chip family driver
+// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/leds.h>
+#include <linux/leds-ti-lmu-common.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/ti-lmu.h>
+#include <linux/mfd/ti-lmu-register.h>
+
+#include <uapi/linux/uleds.h>
+
+#define LM36274_MAX_STRINGS 4
+#define LM36274_BL_EN BIT(4)
+
+/**
+ * struct lm36274
+ * @pdev: platform device
+ * @led_dev: led class device
+ * @lmu_data: Register and setting values for common code
+ * @regmap: Devices register map
+ * @dev: Pointer to the devices device struct
+ * @led_sources - The LED strings supported in this array
+ * @num_leds - Number of LED strings are supported in this array
+ */
+struct lm36274 {
+ struct platform_device *pdev;
+ struct led_classdev led_dev;
+ struct ti_lmu_bank lmu_data;
+ struct regmap *regmap;
+ struct device *dev;
+
+ u32 led_sources[LM36274_MAX_STRINGS];
+ int num_leds;
+};
+
+static int lm36274_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lm36274 *led = container_of(led_cdev, struct lm36274, led_dev);
+
+ return ti_lmu_common_set_brightness(&led->lmu_data, brt_val);
+}
+
+static int lm36274_init(struct lm36274 *lm36274_data)
+{
+ int enable_val = 0;
+ int i;
+
+ for (i = 0; i < lm36274_data->num_leds; i++)
+ enable_val |= (1 << lm36274_data->led_sources[i]);
+
+ if (!enable_val) {
+ dev_err(lm36274_data->dev, "No LEDs were enabled\n");
+ return -EINVAL;
+ }
+
+ enable_val |= LM36274_BL_EN;
+
+ return regmap_write(lm36274_data->regmap, LM36274_REG_BL_EN,
+ enable_val);
+}
+
+static int lm36274_parse_dt(struct lm36274 *lm36274_data)
+{
+ struct fwnode_handle *child = NULL;
+ char label[LED_MAX_NAME_SIZE];
+ struct device *dev = &lm36274_data->pdev->dev;
+ const char *name;
+ int child_cnt;
+ int ret = -EINVAL;
+
+ /* There should only be 1 node */
+ child_cnt = device_get_child_node_count(dev);
+ if (child_cnt != 1)
+ return -EINVAL;
+
+ device_for_each_child_node(dev, child) {
+ ret = fwnode_property_read_string(child, "label", &name);
+ if (ret)
+ snprintf(label, sizeof(label),
+ "%s::", lm36274_data->pdev->name);
+ else
+ snprintf(label, sizeof(label),
+ "%s:%s", lm36274_data->pdev->name, name);
+
+ lm36274_data->num_leds = fwnode_property_read_u32_array(child,
+ "led-sources",
+ NULL, 0);
+ if (lm36274_data->num_leds <= 0)
+ return -ENODEV;
+
+ ret = fwnode_property_read_u32_array(child, "led-sources",
+ lm36274_data->led_sources,
+ lm36274_data->num_leds);
+ if (ret) {
+ dev_err(dev, "led-sources property missing\n");
+ return ret;
+ }
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &lm36274_data->led_dev.default_trigger);
+
+ }
+
+ lm36274_data->lmu_data.regmap = lm36274_data->regmap;
+ lm36274_data->lmu_data.max_brightness = MAX_BRIGHTNESS_11BIT;
+ lm36274_data->lmu_data.msb_brightness_reg = LM36274_REG_BRT_MSB;
+ lm36274_data->lmu_data.lsb_brightness_reg = LM36274_REG_BRT_LSB;
+
+ lm36274_data->led_dev.name = label;
+ lm36274_data->led_dev.max_brightness = MAX_BRIGHTNESS_11BIT;
+ lm36274_data->led_dev.brightness_set_blocking = lm36274_brightness_set;
+
+ return 0;
+}
+
+static int lm36274_probe(struct platform_device *pdev)
+{
+ struct ti_lmu *lmu = dev_get_drvdata(pdev->dev.parent);
+ struct lm36274 *lm36274_data;
+ int ret;
+
+ lm36274_data = devm_kzalloc(&pdev->dev, sizeof(*lm36274_data),
+ GFP_KERNEL);
+ if (!lm36274_data)
+ return -ENOMEM;
+
+ lm36274_data->pdev = pdev;
+ lm36274_data->dev = lmu->dev;
+ lm36274_data->regmap = lmu->regmap;
+ dev_set_drvdata(&pdev->dev, lm36274_data);
+
+ ret = lm36274_parse_dt(lm36274_data);
+ if (ret) {
+ dev_err(lm36274_data->dev, "Failed to parse DT node\n");
+ return ret;
+ }
+
+ ret = lm36274_init(lm36274_data);
+ if (ret) {
+ dev_err(lm36274_data->dev, "Failed to init the device\n");
+ return ret;
+ }
+
+ return devm_led_classdev_register(lm36274_data->dev,
+ &lm36274_data->led_dev);
+}
+
+static const struct of_device_id of_lm36274_leds_match[] = {
+ { .compatible = "ti,lm36274-backlight", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lm36274_leds_match);
+
+static struct platform_driver lm36274_driver = {
+ .probe = lm36274_probe,
+ .driver = {
+ .name = "lm36274-leds",
+ },
+};
+module_platform_driver(lm36274_driver)
+
+MODULE_DESCRIPTION("Texas Instruments LM36274 LED driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
new file mode 100644
index 000000000000..54e0e35df824
--- /dev/null
+++ b/drivers/leds/leds-lm3697.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+// TI LM3697 LED chip family driver
+// Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/leds-ti-lmu-common.h>
+
+#define LM3697_REV 0x0
+#define LM3697_RESET 0x1
+#define LM3697_OUTPUT_CONFIG 0x10
+#define LM3697_CTRL_A_RAMP 0x11
+#define LM3697_CTRL_B_RAMP 0x12
+#define LM3697_CTRL_A_B_RT_RAMP 0x13
+#define LM3697_CTRL_A_B_RAMP_CFG 0x14
+#define LM3697_CTRL_A_B_BRT_CFG 0x16
+#define LM3697_CTRL_A_FS_CURR_CFG 0x17
+#define LM3697_CTRL_B_FS_CURR_CFG 0x18
+#define LM3697_PWM_CFG 0x1c
+#define LM3697_CTRL_A_BRT_LSB 0x20
+#define LM3697_CTRL_A_BRT_MSB 0x21
+#define LM3697_CTRL_B_BRT_LSB 0x22
+#define LM3697_CTRL_B_BRT_MSB 0x23
+#define LM3697_CTRL_ENABLE 0x24
+
+#define LM3697_SW_RESET BIT(0)
+
+#define LM3697_CTRL_A_EN BIT(0)
+#define LM3697_CTRL_B_EN BIT(1)
+#define LM3697_CTRL_A_B_EN (LM3697_CTRL_A_EN | LM3697_CTRL_B_EN)
+
+#define LM3697_MAX_LED_STRINGS 3
+
+#define LM3697_CONTROL_A 0
+#define LM3697_CONTROL_B 1
+#define LM3697_MAX_CONTROL_BANKS 2
+
+/**
+ * struct lm3697_led -
+ * @hvled_strings: Array of LED strings associated with a control bank
+ * @label: LED label
+ * @led_dev: LED class device
+ * @priv: Pointer to the device struct
+ * @lmu_data: Register and setting values for common code
+ * @control_bank: Control bank the LED is associated to. 0 is control bank A
+ * 1 is control bank B
+ */
+struct lm3697_led {
+ u32 hvled_strings[LM3697_MAX_LED_STRINGS];
+ char label[LED_MAX_NAME_SIZE];
+ struct led_classdev led_dev;
+ struct lm3697 *priv;
+ struct ti_lmu_bank lmu_data;
+ int control_bank;
+ int enabled;
+ int num_leds;
+};
+
+/**
+ * struct lm3697 -
+ * @enable_gpio: Hardware enable gpio
+ * @regulator: LED supply regulator pointer
+ * @client: Pointer to the I2C client
+ * @regmap: Devices register map
+ * @dev: Pointer to the devices device struct
+ * @lock: Lock for reading/writing the device
+ * @leds: Array of LED strings
+ */
+struct lm3697 {
+ struct gpio_desc *enable_gpio;
+ struct regulator *regulator;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct device *dev;
+ struct mutex lock;
+
+ int bank_cfg;
+
+ struct lm3697_led leds[];
+};
+
+static const struct reg_default lm3697_reg_defs[] = {
+ {LM3697_OUTPUT_CONFIG, 0x6},
+ {LM3697_CTRL_A_RAMP, 0x0},
+ {LM3697_CTRL_B_RAMP, 0x0},
+ {LM3697_CTRL_A_B_RT_RAMP, 0x0},
+ {LM3697_CTRL_A_B_RAMP_CFG, 0x0},
+ {LM3697_CTRL_A_B_BRT_CFG, 0x0},
+ {LM3697_CTRL_A_FS_CURR_CFG, 0x13},
+ {LM3697_CTRL_B_FS_CURR_CFG, 0x13},
+ {LM3697_PWM_CFG, 0xc},
+ {LM3697_CTRL_A_BRT_LSB, 0x0},
+ {LM3697_CTRL_A_BRT_MSB, 0x0},
+ {LM3697_CTRL_B_BRT_LSB, 0x0},
+ {LM3697_CTRL_B_BRT_MSB, 0x0},
+ {LM3697_CTRL_ENABLE, 0x0},
+};
+
+static const struct regmap_config lm3697_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LM3697_CTRL_ENABLE,
+ .reg_defaults = lm3697_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(lm3697_reg_defs),
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int lm3697_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lm3697_led *led = container_of(led_cdev, struct lm3697_led,
+ led_dev);
+ int ctrl_en_val = (1 << led->control_bank);
+ int ret;
+
+ mutex_lock(&led->priv->lock);
+
+ if (brt_val == LED_OFF) {
+ ret = regmap_update_bits(led->priv->regmap, LM3697_CTRL_ENABLE,
+ ctrl_en_val, ~ctrl_en_val);
+ if (ret) {
+ dev_err(&led->priv->client->dev, "Cannot write ctrl register\n");
+ goto brightness_out;
+ }
+
+ led->enabled = LED_OFF;
+ } else {
+ ret = ti_lmu_common_set_brightness(&led->lmu_data, brt_val);
+ if (ret) {
+ dev_err(&led->priv->client->dev,
+ "Cannot write brightness\n");
+ goto brightness_out;
+ }
+
+ if (!led->enabled) {
+ ret = regmap_update_bits(led->priv->regmap,
+ LM3697_CTRL_ENABLE,
+ ctrl_en_val, ctrl_en_val);
+ if (ret) {
+ dev_err(&led->priv->client->dev,
+ "Cannot enable the device\n");
+ goto brightness_out;
+ }
+
+ led->enabled = brt_val;
+ }
+ }
+
+brightness_out:
+ mutex_unlock(&led->priv->lock);
+ return ret;
+}
+
+static int lm3697_init(struct lm3697 *priv)
+{
+ struct lm3697_led *led;
+ int i, ret;
+
+ if (priv->enable_gpio) {
+ gpiod_direction_output(priv->enable_gpio, 1);
+ } else {
+ ret = regmap_write(priv->regmap, LM3697_RESET, LM3697_SW_RESET);
+ if (ret) {
+ dev_err(&priv->client->dev, "Cannot reset the device\n");
+ goto out;
+ }
+ }
+
+ ret = regmap_write(priv->regmap, LM3697_CTRL_ENABLE, 0x0);
+ if (ret) {
+ dev_err(&priv->client->dev, "Cannot write ctrl enable\n");
+ goto out;
+ }
+
+ ret = regmap_write(priv->regmap, LM3697_OUTPUT_CONFIG, priv->bank_cfg);
+ if (ret)
+ dev_err(&priv->client->dev, "Cannot write OUTPUT config\n");
+
+ for (i = 0; i < LM3697_MAX_CONTROL_BANKS; i++) {
+ led = &priv->leds[i];
+ ret = ti_lmu_common_set_ramp(&led->lmu_data);
+ if (ret)
+ dev_err(&priv->client->dev, "Setting the ramp rate failed\n");
+ }
+out:
+ return ret;
+}
+
+static int lm3697_probe_dt(struct lm3697 *priv)
+{
+ struct fwnode_handle *child = NULL;
+ struct lm3697_led *led;
+ const char *name;
+ int control_bank;
+ size_t i = 0;
+ int ret = -EINVAL;
+ int j;
+
+ priv->enable_gpio = devm_gpiod_get_optional(&priv->client->dev,
+ "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->enable_gpio)) {
+ ret = PTR_ERR(priv->enable_gpio);
+ dev_err(&priv->client->dev, "Failed to get enable gpio: %d\n",
+ ret);
+ return ret;
+ }
+
+ priv->regulator = devm_regulator_get(&priv->client->dev, "vled");
+ if (IS_ERR(priv->regulator))
+ priv->regulator = NULL;
+
+ device_for_each_child_node(priv->dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &control_bank);
+ if (ret) {
+ dev_err(&priv->client->dev, "reg property missing\n");
+ fwnode_handle_put(child);
+ goto child_out;
+ }
+
+ if (control_bank > LM3697_CONTROL_B) {
+ dev_err(&priv->client->dev, "reg property is invalid\n");
+ ret = -EINVAL;
+ fwnode_handle_put(child);
+ goto child_out;
+ }
+
+ led = &priv->leds[i];
+
+ ret = ti_lmu_common_get_brt_res(&priv->client->dev,
+ child, &led->lmu_data);
+ if (ret)
+ dev_warn(&priv->client->dev, "brightness resolution property missing\n");
+
+ led->control_bank = control_bank;
+ led->lmu_data.regmap = priv->regmap;
+ led->lmu_data.runtime_ramp_reg = LM3697_CTRL_A_RAMP +
+ control_bank;
+ led->lmu_data.msb_brightness_reg = LM3697_CTRL_A_BRT_MSB +
+ led->control_bank * 2;
+ led->lmu_data.lsb_brightness_reg = LM3697_CTRL_A_BRT_LSB +
+ led->control_bank * 2;
+
+ led->num_leds = fwnode_property_read_u32_array(child,
+ "led-sources",
+ NULL, 0);
+
+ if (led->num_leds > LM3697_MAX_LED_STRINGS) {
+ dev_err(&priv->client->dev, "To many LED strings defined\n");
+ continue;
+ }
+
+ ret = fwnode_property_read_u32_array(child, "led-sources",
+ led->hvled_strings,
+ led->num_leds);
+ if (ret) {
+ dev_err(&priv->client->dev, "led-sources property missing\n");
+ fwnode_handle_put(child);
+ goto child_out;
+ }
+
+ for (j = 0; j < led->num_leds; j++)
+ priv->bank_cfg |=
+ (led->control_bank << led->hvled_strings[j]);
+
+ ret = ti_lmu_common_get_ramp_params(&priv->client->dev,
+ child, &led->lmu_data);
+ if (ret)
+ dev_warn(&priv->client->dev, "runtime-ramp properties missing\n");
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led->led_dev.default_trigger);
+
+ ret = fwnode_property_read_string(child, "label", &name);
+ if (ret)
+ snprintf(led->label, sizeof(led->label),
+ "%s::", priv->client->name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s:%s", priv->client->name, name);
+
+ led->priv = priv;
+ led->led_dev.name = led->label;
+ led->led_dev.max_brightness = led->lmu_data.max_brightness;
+ led->led_dev.brightness_set_blocking = lm3697_brightness_set;
+
+ ret = devm_led_classdev_register(priv->dev, &led->led_dev);
+ if (ret) {
+ dev_err(&priv->client->dev, "led register err: %d\n",
+ ret);
+ fwnode_handle_put(child);
+ goto child_out;
+ }
+
+ i++;
+ }
+
+child_out:
+ return ret;
+}
+
+static int lm3697_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lm3697 *led;
+ int count;
+ int ret;
+
+ count = device_get_child_node_count(&client->dev);
+ if (!count) {
+ dev_err(&client->dev, "LEDs are not defined in device tree!");
+ return -ENODEV;
+ }
+
+ led = devm_kzalloc(&client->dev, struct_size(led, leds, count),
+ GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ mutex_init(&led->lock);
+ i2c_set_clientdata(client, led);
+
+ led->client = client;
+ led->dev = &client->dev;
+ led->regmap = devm_regmap_init_i2c(client, &lm3697_regmap_config);
+ if (IS_ERR(led->regmap)) {
+ ret = PTR_ERR(led->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = lm3697_probe_dt(led);
+ if (ret)
+ return ret;
+
+ return lm3697_init(led);
+}
+
+static int lm3697_remove(struct i2c_client *client)
+{
+ struct lm3697 *led = i2c_get_clientdata(client);
+ int ret;
+
+ ret = regmap_update_bits(led->regmap, LM3697_CTRL_ENABLE,
+ LM3697_CTRL_A_B_EN, 0);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed to disable the device\n");
+ return ret;
+ }
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ mutex_destroy(&led->lock);
+
+ return 0;
+}
+
+static const struct i2c_device_id lm3697_id[] = {
+ { "lm3697", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm3697_id);
+
+static const struct of_device_id of_lm3697_leds_match[] = {
+ { .compatible = "ti,lm3697", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lm3697_leds_match);
+
+static struct i2c_driver lm3697_driver = {
+ .driver = {
+ .name = "lm3697",
+ .of_match_table = of_lm3697_leds_match,
+ },
+ .probe = lm3697_probe,
+ .remove = lm3697_remove,
+ .id_table = lm3697_id,
+};
+module_i2c_driver(lm3697_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LM3697 LED driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c
index 6b74ce9cac12..8a8e5c65b157 100644
--- a/drivers/leds/leds-max77650.c
+++ b/drivers/leds/leds-max77650.c
@@ -64,7 +64,6 @@ static int max77650_led_probe(struct platform_device *pdev)
{
struct device_node *of_node, *child;
struct max77650_led *leds, *led;
- struct device *parent;
struct device *dev;
struct regmap *map;
const char *label;
@@ -72,7 +71,6 @@ static int max77650_led_probe(struct platform_device *pdev)
u32 reg;
dev = &pdev->dev;
- parent = dev->parent;
of_node = dev->of_node;
if (!of_node)
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index c2bc8f569760..4037c504589c 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -429,7 +429,7 @@ static int pca955x_probe(struct i2c_client *client,
int ngpios = 0;
chip = &pca955x_chipdefs[id->driver_data];
- adapter = to_i2c_adapter(client->dev.parent);
+ adapter = client->adapter;
pdata = dev_get_platdata(&client->dev);
if (!pdata) {
pdata = pca955x_get_pdata(client, chip);
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 9328193189ba..48d068f80f11 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -72,7 +72,7 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
}
static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
- struct led_pwm *led, struct device_node *child)
+ struct led_pwm *led, struct fwnode_handle *fwnode)
{
struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
struct pwm_args pargs;
@@ -85,8 +85,8 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
led_data->cdev.max_brightness = led->max_brightness;
led_data->cdev.flags = LED_CORE_SUSPENDRESUME;
- if (child)
- led_data->pwm = devm_of_pwm_get(dev, child, NULL);
+ if (fwnode)
+ led_data->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL);
else
led_data->pwm = devm_pwm_get(dev, led->name);
if (IS_ERR(led_data->pwm)) {
@@ -111,7 +111,8 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
if (!led_data->period && (led->pwm_period_ns > 0))
led_data->period = led->pwm_period_ns;
- ret = devm_of_led_classdev_register(dev, child, &led_data->cdev);
+ ret = devm_of_led_classdev_register(dev, to_of_node(fwnode),
+ &led_data->cdev);
if (ret == 0) {
priv->num_leds++;
led_pwm_set(&led_data->cdev, led_data->cdev.brightness);
@@ -123,27 +124,35 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
return ret;
}
-static int led_pwm_create_of(struct device *dev, struct led_pwm_priv *priv)
+static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
{
- struct device_node *child;
+ struct fwnode_handle *fwnode;
struct led_pwm led;
int ret = 0;
memset(&led, 0, sizeof(led));
- for_each_child_of_node(dev->of_node, child) {
- led.name = of_get_property(child, "label", NULL) ? :
- child->name;
+ device_for_each_child_node(dev, fwnode) {
+ ret = fwnode_property_read_string(fwnode, "label", &led.name);
+ if (ret && is_of_node(fwnode))
+ led.name = to_of_node(fwnode)->name;
- led.default_trigger = of_get_property(child,
- "linux,default-trigger", NULL);
- led.active_low = of_property_read_bool(child, "active-low");
- of_property_read_u32(child, "max-brightness",
- &led.max_brightness);
+ if (!led.name) {
+ fwnode_handle_put(fwnode);
+ return -EINVAL;
+ }
+
+ fwnode_property_read_string(fwnode, "linux,default-trigger",
+ &led.default_trigger);
+
+ led.active_low = fwnode_property_read_bool(fwnode,
+ "active-low");
+ fwnode_property_read_u32(fwnode, "max-brightness",
+ &led.max_brightness);
- ret = led_pwm_add(dev, priv, &led, child);
+ ret = led_pwm_add(dev, priv, &led, fwnode);
if (ret) {
- of_node_put(child);
+ fwnode_handle_put(fwnode);
break;
}
}
@@ -161,7 +170,7 @@ static int led_pwm_probe(struct platform_device *pdev)
if (pdata)
count = pdata->num_leds;
else
- count = of_get_child_count(pdev->dev.of_node);
+ count = device_get_child_node_count(&pdev->dev);
if (!count)
return -EINVAL;
@@ -179,7 +188,7 @@ static int led_pwm_probe(struct platform_device *pdev)
break;
}
} else {
- ret = led_pwm_create_of(&pdev->dev, priv);
+ ret = led_pwm_create_fwnode(&pdev->dev, priv);
}
if (ret)
diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
new file mode 100644
index 000000000000..b231b563b7bb
--- /dev/null
+++ b/drivers/leds/leds-spi-byte.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Christian Mauderer <oss@c-mauderer.de>
+
+/*
+ * The driver supports controllers with a very simple SPI protocol:
+ * - one LED is controlled by a single byte on MOSI
+ * - the value of the byte gives the brightness between two values (lowest to
+ * highest)
+ * - no return value is necessary (no MISO signal)
+ *
+ * The value for minimum and maximum brightness depends on the device
+ * (compatible string).
+ *
+ * Supported devices:
+ * - "ubnt,acb-spi-led": Microcontroller (SONiX 8F26E611LA) based device used
+ * for example in Ubiquiti airCube ISP. Reverse engineered protocol for this
+ * controller:
+ * * Higher two bits set a mode. Lower six bits are a parameter.
+ * * Mode: 00 -> set brightness between 0x00 (min) and 0x3F (max)
+ * * Mode: 01 -> pulsing pattern (min -> max -> min) with an interval. From
+ * some tests, the period is about (50ms + 102ms * parameter). There is a
+ * slightly different pattern starting from 0x10 (longer gap between the
+ * pulses) but the time still follows that calculation.
+ * * Mode: 10 -> same as 01 but with only a ramp from min to max. Again a
+ * slight jump in the pattern at 0x10.
+ * * Mode: 11 -> blinking (off -> 25% -> off -> 25% -> ...) with a period of
+ * (105ms * parameter)
+ * NOTE: This driver currently only supports mode 00.
+ */
+
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/mutex.h>
+#include <uapi/linux/uleds.h>
+
+struct spi_byte_chipdef {
+ /* SPI byte that will be send to switch the LED off */
+ u8 off_value;
+ /* SPI byte that will be send to switch the LED to maximum brightness */
+ u8 max_value;
+};
+
+struct spi_byte_led {
+ struct led_classdev ldev;
+ struct spi_device *spi;
+ char name[LED_MAX_NAME_SIZE];
+ struct mutex mutex;
+ const struct spi_byte_chipdef *cdef;
+};
+
+static const struct spi_byte_chipdef ubnt_acb_spi_led_cdef = {
+ .off_value = 0x0,
+ .max_value = 0x3F,
+};
+
+static const struct of_device_id spi_byte_dt_ids[] = {
+ { .compatible = "ubnt,acb-spi-led", .data = &ubnt_acb_spi_led_cdef },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, spi_byte_dt_ids);
+
+static int spi_byte_brightness_set_blocking(struct led_classdev *dev,
+ enum led_brightness brightness)
+{
+ struct spi_byte_led *led = container_of(dev, struct spi_byte_led, ldev);
+ u8 value;
+ int ret;
+
+ value = (u8) brightness + led->cdef->off_value;
+
+ mutex_lock(&led->mutex);
+ ret = spi_write(led->spi, &value, sizeof(value));
+ mutex_unlock(&led->mutex);
+
+ return ret;
+}
+
+static int spi_byte_probe(struct spi_device *spi)
+{
+ const struct of_device_id *of_dev_id;
+ struct device_node *child;
+ struct device *dev = &spi->dev;
+ struct spi_byte_led *led;
+ const char *name = "leds-spi-byte::";
+ const char *state;
+ int ret;
+
+ of_dev_id = of_match_device(spi_byte_dt_ids, dev);
+ if (!of_dev_id)
+ return -EINVAL;
+
+ if (of_get_child_count(dev->of_node) != 1) {
+ dev_err(dev, "Device must have exactly one LED sub-node.");
+ return -EINVAL;
+ }
+ child = of_get_next_child(dev->of_node, NULL);
+
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ of_property_read_string(child, "label", &name);
+ strlcpy(led->name, name, sizeof(led->name));
+ led->spi = spi;
+ mutex_init(&led->mutex);
+ led->cdef = of_dev_id->data;
+ led->ldev.name = led->name;
+ led->ldev.brightness = LED_OFF;
+ led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value;
+ led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking;
+
+ state = of_get_property(child, "default-state", NULL);
+ if (state) {
+ if (!strcmp(state, "on")) {
+ led->ldev.brightness = led->ldev.max_brightness;
+ } else if (strcmp(state, "off")) {
+ /* all other cases except "off" */
+ dev_err(dev, "default-state can only be 'on' or 'off'");
+ return -EINVAL;
+ }
+ }
+ spi_byte_brightness_set_blocking(&led->ldev,
+ led->ldev.brightness);
+
+ ret = devm_led_classdev_register(&spi->dev, &led->ldev);
+ if (ret) {
+ mutex_destroy(&led->mutex);
+ return ret;
+ }
+ spi_set_drvdata(spi, led);
+
+ return 0;
+}
+
+static int spi_byte_remove(struct spi_device *spi)
+{
+ struct spi_byte_led *led = spi_get_drvdata(spi);
+
+ mutex_destroy(&led->mutex);
+
+ return 0;
+}
+
+static struct spi_driver spi_byte_driver = {
+ .probe = spi_byte_probe,
+ .remove = spi_byte_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = spi_byte_dt_ids,
+ },
+};
+
+module_spi_driver(spi_byte_driver);
+
+MODULE_AUTHOR("Christian Mauderer <oss@c-mauderer.de>");
+MODULE_DESCRIPTION("single byte SPI LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:leds-spi-byte");
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index c59035e157d1..58be20cae183 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -758,7 +758,7 @@ static int tca6507_probe(struct i2c_client *client,
int err;
int i = 0;
- adapter = to_i2c_adapter(client->dev.parent);
+ adapter = client->adapter;
pdata = dev_get_platdata(&client->dev);
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
diff --git a/drivers/leds/leds-ti-lmu-common.c b/drivers/leds/leds-ti-lmu-common.c
new file mode 100644
index 000000000000..adc7293004f1
--- /dev/null
+++ b/drivers/leds/leds-ti-lmu-common.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2015 Texas Instruments
+// Copyright 2018 Sebastian Reichel
+// Copyright 2018 Pavel Machek <pavel@ucw.cz>
+// TI LMU LED common framework, based on previous work from
+// Milo Kim <milo.kim@ti.com>
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/of_device.h>
+
+#include <linux/leds-ti-lmu-common.h>
+
+const static int ramp_table[16] = {2048, 262000, 524000, 1049000, 2090000,
+ 4194000, 8389000, 16780000, 33550000, 41940000,
+ 50330000, 58720000, 67110000, 83880000,
+ 100660000, 117440000};
+
+static int ti_lmu_common_update_brightness(struct ti_lmu_bank *lmu_bank,
+ int brightness)
+{
+ struct regmap *regmap = lmu_bank->regmap;
+ u8 reg, val;
+ int ret;
+
+ /*
+ * Brightness register update
+ *
+ * 11 bit dimming: update LSB bits and write MSB byte.
+ * MSB brightness should be shifted.
+ * 8 bit dimming: write MSB byte.
+ */
+ if (lmu_bank->max_brightness == MAX_BRIGHTNESS_11BIT) {
+ reg = lmu_bank->lsb_brightness_reg;
+ ret = regmap_update_bits(regmap, reg,
+ LMU_11BIT_LSB_MASK,
+ brightness);
+ if (ret)
+ return ret;
+
+ val = brightness >> LMU_11BIT_MSB_SHIFT;
+ } else {
+ val = brightness;
+ }
+
+ reg = lmu_bank->msb_brightness_reg;
+
+ return regmap_write(regmap, reg, val);
+}
+
+int ti_lmu_common_set_brightness(struct ti_lmu_bank *lmu_bank, int brightness)
+{
+ return ti_lmu_common_update_brightness(lmu_bank, brightness);
+}
+EXPORT_SYMBOL(ti_lmu_common_set_brightness);
+
+static int ti_lmu_common_convert_ramp_to_index(unsigned int usec)
+{
+ int size = ARRAY_SIZE(ramp_table);
+ int i;
+
+ if (usec <= ramp_table[0])
+ return 0;
+
+ if (usec > ramp_table[size - 1])
+ return size - 1;
+
+ for (i = 1; i < size; i++) {
+ if (usec == ramp_table[i])
+ return i;
+
+ /* Find an approximate index by looking up the table */
+ if (usec > ramp_table[i - 1] && usec < ramp_table[i]) {
+ if (usec - ramp_table[i - 1] < ramp_table[i] - usec)
+ return i - 1;
+ else
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int ti_lmu_common_set_ramp(struct ti_lmu_bank *lmu_bank)
+{
+ struct regmap *regmap = lmu_bank->regmap;
+ u8 ramp, ramp_up, ramp_down;
+
+ if (lmu_bank->ramp_up_usec == 0 && lmu_bank->ramp_down_usec == 0) {
+ ramp_up = 0;
+ ramp_down = 0;
+ } else {
+ ramp_up = ti_lmu_common_convert_ramp_to_index(lmu_bank->ramp_up_usec);
+ ramp_down = ti_lmu_common_convert_ramp_to_index(lmu_bank->ramp_down_usec);
+ }
+
+ if (ramp_up < 0 || ramp_down < 0)
+ return -EINVAL;
+
+ ramp = (ramp_up << 4) | ramp_down;
+
+ return regmap_write(regmap, lmu_bank->runtime_ramp_reg, ramp);
+
+}
+EXPORT_SYMBOL(ti_lmu_common_set_ramp);
+
+int ti_lmu_common_get_ramp_params(struct device *dev,
+ struct fwnode_handle *child,
+ struct ti_lmu_bank *lmu_data)
+{
+ int ret;
+
+ ret = fwnode_property_read_u32(child, "ramp-up-us",
+ &lmu_data->ramp_up_usec);
+ if (ret)
+ dev_warn(dev, "ramp-up-us property missing\n");
+
+
+ ret = fwnode_property_read_u32(child, "ramp-down-us",
+ &lmu_data->ramp_down_usec);
+ if (ret)
+ dev_warn(dev, "ramp-down-us property missing\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(ti_lmu_common_get_ramp_params);
+
+int ti_lmu_common_get_brt_res(struct device *dev, struct fwnode_handle *child,
+ struct ti_lmu_bank *lmu_data)
+{
+ int ret;
+
+ ret = device_property_read_u32(dev, "ti,brightness-resolution",
+ &lmu_data->max_brightness);
+ if (ret)
+ ret = fwnode_property_read_u32(child,
+ "ti,brightness-resolution",
+ &lmu_data->max_brightness);
+ if (lmu_data->max_brightness <= 0) {
+ lmu_data->max_brightness = MAX_BRIGHTNESS_8BIT;
+ return ret;
+ }
+
+ if (lmu_data->max_brightness > MAX_BRIGHTNESS_11BIT)
+ lmu_data->max_brightness = MAX_BRIGHTNESS_11BIT;
+
+
+ return 0;
+}
+EXPORT_SYMBOL(ti_lmu_common_get_brt_res);
+
+MODULE_DESCRIPTION("TI LMU common LED framework");
+MODULE_AUTHOR("Sebastian Reichel");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ti-lmu-led-common");
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 7fa9d174a40c..ce9429ca6dde 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -15,7 +15,7 @@ config LEDS_TRIGGER_TIMER
This allows LEDs to be controlled by a programmable timer
via sysfs. Some LED hardware can be programmed to start
blinking the LED without any further software interaction.
- For more details read Documentation/leds/leds-class.txt.
+ For more details read Documentation/leds/leds-class.rst.
If unsure, say Y.
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
index 4c8b0c3cf284..6a72b7e13719 100644
--- a/drivers/leds/trigger/ledtrig-activity.c
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -70,7 +70,7 @@ static void led_activity_function(struct timer_list *t)
* down to 16us, ensuring we won't overflow 32-bit computations below
* even up to 3k CPUs, while keeping divides cheap on smaller systems.
*/
- curr_boot = ktime_get_boot_ns() * cpus;
+ curr_boot = ktime_get_boottime_ns() * cpus;
diff_boot = (curr_boot - activity_data->last_boot) >> 16;
diff_used = (curr_used - activity_data->last_used) >> 16;
activity_data->last_boot = curr_boot;
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index a80bb82aacc2..80635183fac8 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -3,7 +3,7 @@
// LED Kernel Transient Trigger
//
// Transient trigger allows one shot timer activation. Please refer to
-// Documentation/leds/ledtrig-transient.txt for details
+// Documentation/leds/ledtrig-transient.rst for details
// Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
//
// Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 7d555b110ecd..a600934fdd9c 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -478,7 +478,7 @@ static void __nvm_remove_target(struct nvm_target *t, bool graceful)
*/
static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
{
- struct nvm_target *t;
+ struct nvm_target *t = NULL;
struct nvm_dev *dev;
down_read(&nvm_lock);
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 773537804319..f546e6f28b8a 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -323,14 +323,16 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
int nr_pages)
{
- struct bio_vec bv;
- int i;
-
- WARN_ON(off + nr_pages != bio->bi_vcnt);
-
- for (i = off; i < nr_pages + off; i++) {
- bv = bio->bi_io_vec[i];
- mempool_free(bv.bv_page, &pblk->page_bio_pool);
+ struct bio_vec *bv;
+ struct page *page;
+ int i, e, nbv = 0;
+
+ for (i = 0; i < bio->bi_vcnt; i++) {
+ bv = &bio->bi_io_vec[i];
+ page = bv->bv_page;
+ for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
+ if (nbv >= off)
+ mempool_free(page++, &pblk->page_bio_pool);
}
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 45254b3ef715..5ccac0b77f17 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -453,7 +453,7 @@ config DM_INIT
Enable "dm-mod.create=" parameter to create mapped devices at init time.
This option is useful to allow mounting rootfs without requiring an
initramfs.
- See Documentation/device-mapper/dm-init.txt for dm-mod.create="..."
+ See Documentation/device-mapper/dm-init.rst for dm-mod.create="..."
format.
If unsure, say N.
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index f8986effcb50..6f776823b9ba 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -393,6 +393,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
struct bucket *b;
long r;
+
+ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
+ return -1;
+
/* fastpath */
if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
fifo_pop(&ca->free[reserve], r))
@@ -484,6 +489,10 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
{
int i;
+ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return -1;
+
lockdep_assert_held(&c->bucket_lock);
BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index fdf75352e16a..013e35a9e317 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -705,8 +705,8 @@ struct cache_set {
atomic_long_t writeback_keys_failed;
atomic_long_t reclaim;
+ atomic_long_t reclaimed_journal_buckets;
atomic_long_t flush_write;
- atomic_long_t retry_flush_write;
enum {
ON_ERROR_UNREGISTER,
@@ -726,8 +726,6 @@ struct cache_set {
#define BUCKET_HASH_BITS 12
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
-
- DECLARE_HEAP(struct btree *, flush_btree);
};
struct bbio {
@@ -1006,7 +1004,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size);
int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
uint8_t *set_uuid);
void bch_cached_dev_detach(struct cached_dev *dc);
-void bch_cached_dev_run(struct cached_dev *dc);
+int bch_cached_dev_run(struct cached_dev *dc);
void bcache_device_stop(struct bcache_device *d);
void bch_cache_set_unregister(struct cache_set *c);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 268f1b685084..08768796b543 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -347,22 +347,19 @@ EXPORT_SYMBOL(bch_btree_keys_alloc);
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
bool *expensive_debug_checks)
{
- unsigned int i;
-
b->ops = ops;
b->expensive_debug_checks = expensive_debug_checks;
b->nsets = 0;
b->last_set_unwritten = 0;
- /* XXX: shouldn't be needed */
- for (i = 0; i < MAX_BSETS; i++)
- b->set[i].size = 0;
/*
- * Second loop starts at 1 because b->keys[0]->data is the memory we
- * allocated
+ * struct btree_keys in embedded in struct btree, and struct
+ * bset_tree is embedded into struct btree_keys. They are all
+ * initialized as 0 by kzalloc() in mca_bucket_alloc(), and
+ * b->set[0].data is allocated in bch_btree_keys_alloc(), so we
+ * don't have to initiate b->set[].size and b->set[].data here
+ * any more.
*/
- for (i = 1; i < MAX_BSETS; i++)
- b->set[i].data = NULL;
}
EXPORT_SYMBOL(bch_btree_keys_init);
@@ -970,45 +967,25 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
unsigned int inorder, j, n = 1;
do {
- /*
- * A bit trick here.
- * If p < t->size, (int)(p - t->size) is a minus value and
- * the most significant bit is set, right shifting 31 bits
- * gets 1. If p >= t->size, the most significant bit is
- * not set, right shifting 31 bits gets 0.
- * So the following 2 lines equals to
- * if (p >= t->size)
- * p = 0;
- * but a branch instruction is avoided.
- */
unsigned int p = n << 4;
- p &= ((int) (p - t->size)) >> 31;
-
- prefetch(&t->tree[p]);
+ if (p < t->size)
+ prefetch(&t->tree[p]);
j = n;
f = &t->tree[j];
- /*
- * Similar bit trick, use subtract operation to avoid a branch
- * instruction.
- *
- * n = (f->mantissa > bfloat_mantissa())
- * ? j * 2
- * : j * 2 + 1;
- *
- * We need to subtract 1 from f->mantissa for the sign bit trick
- * to work - that's done in make_bfloat()
- */
- if (likely(f->exponent != 127))
- n = j * 2 + (((unsigned int)
- (f->mantissa -
- bfloat_mantissa(search, f))) >> 31);
- else
- n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
- ? j * 2
- : j * 2 + 1;
+ if (likely(f->exponent != 127)) {
+ if (f->mantissa >= bfloat_mantissa(search, f))
+ n = j * 2;
+ else
+ n = j * 2 + 1;
+ } else {
+ if (bkey_cmp(tree_to_bkey(t, j), search) > 0)
+ n = j * 2;
+ else
+ n = j * 2 + 1;
+ }
} while (n < t->size);
inorder = to_inorder(j, t);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 773f5fdad25f..ba434d9ac720 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -35,7 +35,7 @@
#include <linux/rcupdate.h>
#include <linux/sched/clock.h>
#include <linux/rculist.h>
-
+#include <linux/delay.h>
#include <trace/events/bcache.h>
/*
@@ -613,6 +613,10 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
static struct btree *mca_bucket_alloc(struct cache_set *c,
struct bkey *k, gfp_t gfp)
{
+ /*
+ * kzalloc() is necessary here for initialization,
+ * see code comments in bch_btree_keys_init().
+ */
struct btree *b = kzalloc(sizeof(struct btree), gfp);
if (!b)
@@ -655,7 +659,25 @@ static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
up(&b->io_mutex);
}
+retry:
+ /*
+ * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
+ * __bch_btree_node_write(). To avoid an extra flush, acquire
+ * b->write_lock before checking BTREE_NODE_dirty bit.
+ */
mutex_lock(&b->write_lock);
+ /*
+ * If this btree node is selected in btree_flush_write() by journal
+ * code, delay and retry until the node is flushed by journal code
+ * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
+ */
+ if (btree_node_journal_flush(b)) {
+ pr_debug("bnode %p is flushing by journal, retry", b);
+ mutex_unlock(&b->write_lock);
+ udelay(1);
+ goto retry;
+ }
+
if (btree_node_dirty(b))
__bch_btree_node_write(b, &cl);
mutex_unlock(&b->write_lock);
@@ -778,10 +800,15 @@ void bch_btree_cache_free(struct cache_set *c)
while (!list_empty(&c->btree_cache)) {
b = list_first_entry(&c->btree_cache, struct btree, list);
- if (btree_node_dirty(b))
+ /*
+ * This function is called by cache_set_free(), no I/O
+ * request on cache now, it is unnecessary to acquire
+ * b->write_lock before clearing BTREE_NODE_dirty anymore.
+ */
+ if (btree_node_dirty(b)) {
btree_complete_write(b, btree_current_write(b));
- clear_bit(BTREE_NODE_dirty, &b->flags);
-
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+ }
mca_data_free(b);
}
@@ -1067,11 +1094,25 @@ static void btree_node_free(struct btree *b)
BUG_ON(b == b->c->root);
+retry:
mutex_lock(&b->write_lock);
+ /*
+ * If the btree node is selected and flushing in btree_flush_write(),
+ * delay and retry until the BTREE_NODE_journal_flush bit cleared,
+ * then it is safe to free the btree node here. Otherwise this btree
+ * node will be in race condition.
+ */
+ if (btree_node_journal_flush(b)) {
+ mutex_unlock(&b->write_lock);
+ pr_debug("bnode %p journal_flush set, retry", b);
+ udelay(1);
+ goto retry;
+ }
- if (btree_node_dirty(b))
+ if (btree_node_dirty(b)) {
btree_complete_write(b, btree_current_write(b));
- clear_bit(BTREE_NODE_dirty, &b->flags);
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+ }
mutex_unlock(&b->write_lock);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index d1c72ef64edf..76cfd121a486 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -158,11 +158,13 @@ enum btree_flags {
BTREE_NODE_io_error,
BTREE_NODE_dirty,
BTREE_NODE_write_idx,
+ BTREE_NODE_journal_flush,
};
BTREE_FLAG(io_error);
BTREE_FLAG(dirty);
BTREE_FLAG(write_idx);
+BTREE_FLAG(journal_flush);
static inline struct btree_write *btree_current_write(struct btree *b)
{
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index c25097968319..4d93f07f63e5 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -58,6 +58,18 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
+ /*
+ * Read-ahead requests on a degrading and recovering md raid
+ * (e.g. raid6) device might be failured immediately by md
+ * raid code, which is not a real hardware media failure. So
+ * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
+ */
+ if (bio->bi_opf & REQ_RAHEAD) {
+ pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore",
+ dc->backing_dev_name);
+ return;
+ }
+
errors = atomic_add_return(1, &dc->io_errors);
if (errors < dc->error_limit)
pr_err("%s: IO error on backing device, unrecoverable",
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 12dae9348147..be2a2a201603 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -100,6 +100,20 @@ reread: left = ca->sb.bucket_size - offset;
blocks = set_blocks(j, block_bytes(ca->set));
+ /*
+ * Nodes in 'list' are in linear increasing order of
+ * i->j.seq, the node on head has the smallest (oldest)
+ * journal seq, the node on tail has the biggest
+ * (latest) journal seq.
+ */
+
+ /*
+ * Check from the oldest jset for last_seq. If
+ * i->j.seq < j->last_seq, it means the oldest jset
+ * in list is expired and useless, remove it from
+ * this list. Otherwise, j is a condidate jset for
+ * further following checks.
+ */
while (!list_empty(list)) {
i = list_first_entry(list,
struct journal_replay, list);
@@ -109,13 +123,22 @@ reread: left = ca->sb.bucket_size - offset;
kfree(i);
}
+ /* iterate list in reverse order (from latest jset) */
list_for_each_entry_reverse(i, list, list) {
if (j->seq == i->j.seq)
goto next_set;
+ /*
+ * if j->seq is less than any i->j.last_seq
+ * in list, j is an expired and useless jset.
+ */
if (j->seq < i->j.last_seq)
goto next_set;
+ /*
+ * 'where' points to first jset in list which
+ * is elder then j.
+ */
if (j->seq > i->j.seq) {
where = &i->list;
goto add;
@@ -129,10 +152,12 @@ add:
if (!i)
return -ENOMEM;
memcpy(&i->j, j, bytes);
+ /* Add to the location after 'where' points to */
list_add(&i->list, where);
ret = 1;
- ja->seq[bucket_index] = j->seq;
+ if (j->seq > ja->seq[bucket_index])
+ ja->seq[bucket_index] = j->seq;
next_set:
offset += blocks * ca->sb.block_size;
len -= blocks * ca->sb.block_size;
@@ -268,7 +293,7 @@ bsearch:
struct journal_replay,
list)->j.seq;
- return ret;
+ return 0;
#undef read_bucket
}
@@ -391,60 +416,90 @@ err:
}
/* Journalling */
-#define journal_max_cmp(l, r) \
- (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
- fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
-#define journal_min_cmp(l, r) \
- (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
- fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
static void btree_flush_write(struct cache_set *c)
{
- /*
- * Try to find the btree node with that references the oldest journal
- * entry, best is our current candidate and is locked if non NULL:
- */
- struct btree *b;
- int i;
+ struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
+ unsigned int i, n;
+
+ if (c->journal.btree_flushing)
+ return;
+
+ spin_lock(&c->journal.flush_write_lock);
+ if (c->journal.btree_flushing) {
+ spin_unlock(&c->journal.flush_write_lock);
+ return;
+ }
+ c->journal.btree_flushing = true;
+ spin_unlock(&c->journal.flush_write_lock);
atomic_long_inc(&c->flush_write);
+ memset(btree_nodes, 0, sizeof(btree_nodes));
+ n = 0;
-retry:
- spin_lock(&c->journal.lock);
- if (heap_empty(&c->flush_btree)) {
- for_each_cached_btree(b, c, i)
- if (btree_current_write(b)->journal) {
- if (!heap_full(&c->flush_btree))
- heap_add(&c->flush_btree, b,
- journal_max_cmp);
- else if (journal_max_cmp(b,
- heap_peek(&c->flush_btree))) {
- c->flush_btree.data[0] = b;
- heap_sift(&c->flush_btree, 0,
- journal_max_cmp);
- }
- }
+ mutex_lock(&c->bucket_lock);
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ if (btree_node_journal_flush(b))
+ pr_err("BUG: flush_write bit should not be set here!");
+
+ mutex_lock(&b->write_lock);
- for (i = c->flush_btree.used / 2 - 1; i >= 0; --i)
- heap_sift(&c->flush_btree, i, journal_min_cmp);
+ if (!btree_node_dirty(b)) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
+
+ if (!btree_current_write(b)->journal) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
+
+ set_btree_node_journal_flush(b);
+
+ mutex_unlock(&b->write_lock);
+
+ btree_nodes[n++] = b;
+ if (n == BTREE_FLUSH_NR)
+ break;
}
+ mutex_unlock(&c->bucket_lock);
- b = NULL;
- heap_pop(&c->flush_btree, b, journal_min_cmp);
- spin_unlock(&c->journal.lock);
+ for (i = 0; i < n; i++) {
+ b = btree_nodes[i];
+ if (!b) {
+ pr_err("BUG: btree_nodes[%d] is NULL", i);
+ continue;
+ }
+
+ /* safe to check without holding b->write_lock */
+ if (!btree_node_journal_flush(b)) {
+ pr_err("BUG: bnode %p: journal_flush bit cleaned", b);
+ continue;
+ }
- if (b) {
mutex_lock(&b->write_lock);
if (!btree_current_write(b)->journal) {
+ clear_bit(BTREE_NODE_journal_flush, &b->flags);
+ mutex_unlock(&b->write_lock);
+ pr_debug("bnode %p: written by others", b);
+ continue;
+ }
+
+ if (!btree_node_dirty(b)) {
+ clear_bit(BTREE_NODE_journal_flush, &b->flags);
mutex_unlock(&b->write_lock);
- /* We raced */
- atomic_long_inc(&c->retry_flush_write);
- goto retry;
+ pr_debug("bnode %p: dirty bit cleaned by others", b);
+ continue;
}
__bch_btree_node_write(b, NULL);
+ clear_bit(BTREE_NODE_journal_flush, &b->flags);
mutex_unlock(&b->write_lock);
}
+
+ spin_lock(&c->journal.flush_write_lock);
+ c->journal.btree_flushing = false;
+ spin_unlock(&c->journal.flush_write_lock);
}
#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
@@ -559,6 +614,7 @@ static void journal_reclaim(struct cache_set *c)
k->ptr[n++] = MAKE_PTR(0,
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca->sb.nr_this_dev);
+ atomic_long_inc(&c->reclaimed_journal_buckets);
}
if (n) {
@@ -811,6 +867,10 @@ atomic_t *bch_journal(struct cache_set *c,
struct journal_write *w;
atomic_t *ret;
+ /* No journaling if CACHE_SET_IO_DISABLE set already */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return NULL;
+
if (!CACHE_SYNC(&c->sb))
return NULL;
@@ -855,7 +915,6 @@ void bch_journal_free(struct cache_set *c)
free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
free_fifo(&c->journal.pin);
- free_heap(&c->flush_btree);
}
int bch_journal_alloc(struct cache_set *c)
@@ -863,6 +922,7 @@ int bch_journal_alloc(struct cache_set *c)
struct journal *j = &c->journal;
spin_lock_init(&j->lock);
+ spin_lock_init(&j->flush_write_lock);
INIT_DELAYED_WORK(&j->work, journal_write_work);
c->journal_delay_ms = 100;
@@ -870,8 +930,7 @@ int bch_journal_alloc(struct cache_set *c)
j->w[0].c = c;
j->w[1].c = c;
- if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) ||
- !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
+ if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
!(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
!(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
return -ENOMEM;
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index 66f0facff84b..f2ea34d5f431 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -103,6 +103,8 @@ struct journal_write {
/* Embedded in struct cache_set */
struct journal {
spinlock_t lock;
+ spinlock_t flush_write_lock;
+ bool btree_flushing;
/* used when waiting because the journal was full */
struct closure_waitlist wait;
struct closure io;
@@ -154,6 +156,8 @@ struct journal_device {
struct bio_vec bv[8];
};
+#define BTREE_FLUSH_NR 8
+
#define journal_pin_cmp(c, l, r) \
(fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 1b63ac876169..26e374fbf57c 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -40,6 +40,7 @@ static const char invalid_uuid[] = {
static struct kobject *bcache_kobj;
struct mutex bch_register_lock;
+bool bcache_is_reboot;
LIST_HEAD(bch_cache_sets);
static LIST_HEAD(uncached_devices);
@@ -49,6 +50,7 @@ static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
struct workqueue_struct *bch_journal_wq;
+
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
/* limitation of partitions number on single bcache device */
#define BCACHE_MINORS 128
@@ -197,7 +199,9 @@ err:
static void write_bdev_super_endio(struct bio *bio)
{
struct cached_dev *dc = bio->bi_private;
- /* XXX: error checking */
+
+ if (bio->bi_status)
+ bch_count_backing_io_errors(dc, bio);
closure_put(&dc->sb_write);
}
@@ -691,6 +695,7 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
{
unsigned int i;
struct cache *ca;
+ int ret;
for_each_cache(ca, d->c, i)
bd_link_disk_holder(ca->bdev, d->disk);
@@ -698,9 +703,13 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
snprintf(d->name, BCACHEDEVNAME_SIZE,
"%s%u", name, d->id);
- WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
- sysfs_create_link(&c->kobj, &d->kobj, d->name),
- "Couldn't create device <-> cache set symlinks");
+ ret = sysfs_create_link(&d->kobj, &c->kobj, "cache");
+ if (ret < 0)
+ pr_err("Couldn't create device -> cache set symlink");
+
+ ret = sysfs_create_link(&c->kobj, &d->kobj, d->name);
+ if (ret < 0)
+ pr_err("Couldn't create cache set -> device symlink");
clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
}
@@ -908,7 +917,7 @@ static int cached_dev_status_update(void *arg)
}
-void bch_cached_dev_run(struct cached_dev *dc)
+int bch_cached_dev_run(struct cached_dev *dc)
{
struct bcache_device *d = &dc->disk;
char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL);
@@ -919,11 +928,19 @@ void bch_cached_dev_run(struct cached_dev *dc)
NULL,
};
+ if (dc->io_disable) {
+ pr_err("I/O disabled on cached dev %s",
+ dc->backing_dev_name);
+ return -EIO;
+ }
+
if (atomic_xchg(&dc->running, 1)) {
kfree(env[1]);
kfree(env[2]);
kfree(buf);
- return;
+ pr_info("cached dev %s is running already",
+ dc->backing_dev_name);
+ return -EBUSY;
}
if (!d->c &&
@@ -949,8 +966,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
kfree(buf);
if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
- sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
- pr_debug("error creating sysfs link");
+ sysfs_create_link(&disk_to_dev(d->disk)->kobj,
+ &d->kobj, "bcache")) {
+ pr_err("Couldn't create bcache dev <-> disk sysfs symlinks");
+ return -ENOMEM;
+ }
dc->status_update_thread = kthread_run(cached_dev_status_update,
dc, "bcache_status_update");
@@ -959,6 +979,8 @@ void bch_cached_dev_run(struct cached_dev *dc)
"continue to run without monitoring backing "
"device status");
}
+
+ return 0;
}
/*
@@ -996,7 +1018,6 @@ static void cached_dev_detach_finish(struct work_struct *w)
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
BUG_ON(refcount_read(&dc->count));
- mutex_lock(&bch_register_lock);
if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
cancel_writeback_rate_update_dwork(dc);
@@ -1012,6 +1033,8 @@ static void cached_dev_detach_finish(struct work_struct *w)
bch_write_bdev_super(dc, &cl);
closure_sync(&cl);
+ mutex_lock(&bch_register_lock);
+
calc_cached_dev_sectors(dc->disk.c);
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
@@ -1054,6 +1077,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
struct uuid_entry *u;
struct cached_dev *exist_dc, *t;
+ int ret = 0;
if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
(!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
@@ -1153,6 +1177,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
down_write(&dc->writeback_lock);
if (bch_cached_dev_writeback_start(dc)) {
up_write(&dc->writeback_lock);
+ pr_err("Couldn't start writeback facilities for %s",
+ dc->disk.disk->disk_name);
return -ENOMEM;
}
@@ -1163,7 +1189,22 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
bch_sectors_dirty_init(&dc->disk);
- bch_cached_dev_run(dc);
+ ret = bch_cached_dev_run(dc);
+ if (ret && (ret != -EBUSY)) {
+ up_write(&dc->writeback_lock);
+ /*
+ * bch_register_lock is held, bcache_device_stop() is not
+ * able to be directly called. The kthread and kworker
+ * created previously in bch_cached_dev_writeback_start()
+ * have to be stopped manually here.
+ */
+ kthread_stop(dc->writeback_thread);
+ cancel_writeback_rate_update_dwork(dc);
+ pr_err("Couldn't run cached device %s",
+ dc->backing_dev_name);
+ return ret;
+ }
+
bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr);
@@ -1190,18 +1231,16 @@ static void cached_dev_free(struct closure *cl)
{
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
- mutex_lock(&bch_register_lock);
-
if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
cancel_writeback_rate_update_dwork(dc);
if (!IS_ERR_OR_NULL(dc->writeback_thread))
kthread_stop(dc->writeback_thread);
- if (dc->writeback_write_wq)
- destroy_workqueue(dc->writeback_write_wq);
if (!IS_ERR_OR_NULL(dc->status_update_thread))
kthread_stop(dc->status_update_thread);
+ mutex_lock(&bch_register_lock);
+
if (atomic_read(&dc->running))
bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
bcache_device_free(&dc->disk);
@@ -1290,6 +1329,7 @@ static int register_bdev(struct cache_sb *sb, struct page *sb_page,
{
const char *err = "cannot allocate memory";
struct cache_set *c;
+ int ret = -ENOMEM;
bdevname(bdev, dc->backing_dev_name);
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
@@ -1319,14 +1359,18 @@ static int register_bdev(struct cache_sb *sb, struct page *sb_page,
bch_cached_dev_attach(dc, c, NULL);
if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
- BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
- bch_cached_dev_run(dc);
+ BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) {
+ err = "failed to run cached device";
+ ret = bch_cached_dev_run(dc);
+ if (ret)
+ goto err;
+ }
return 0;
err:
pr_notice("error %s: %s", dc->backing_dev_name, err);
bcache_device_stop(&dc->disk);
- return -EIO;
+ return ret;
}
/* Flash only volumes */
@@ -1437,8 +1481,6 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
bool bch_cached_dev_error(struct cached_dev *dc)
{
- struct cache_set *c;
-
if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
return false;
@@ -1449,21 +1491,6 @@ bool bch_cached_dev_error(struct cached_dev *dc)
pr_err("stop %s: too many IO errors on backing device %s\n",
dc->disk.disk->disk_name, dc->backing_dev_name);
- /*
- * If the cached device is still attached to a cache set,
- * even dc->io_disable is true and no more I/O requests
- * accepted, cache device internal I/O (writeback scan or
- * garbage collection) may still prevent bcache device from
- * being stopped. So here CACHE_SET_IO_DISABLE should be
- * set to c->flags too, to make the internal I/O to cache
- * device rejected and stopped immediately.
- * If c is NULL, that means the bcache device is not attached
- * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
- */
- c = dc->disk.c;
- if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
- pr_info("CACHE_SET_IO_DISABLE already set");
-
bcache_device_stop(&dc->disk);
return true;
}
@@ -1564,19 +1591,23 @@ static void cache_set_flush(struct closure *cl)
kobject_put(&c->internal);
kobject_del(&c->kobj);
- if (c->gc_thread)
+ if (!IS_ERR_OR_NULL(c->gc_thread))
kthread_stop(c->gc_thread);
if (!IS_ERR_OR_NULL(c->root))
list_add(&c->root->list, &c->btree_cache);
- /* Should skip this if we're unregistering because of an error */
- list_for_each_entry(b, &c->btree_cache, list) {
- mutex_lock(&b->write_lock);
- if (btree_node_dirty(b))
- __bch_btree_node_write(b, NULL);
- mutex_unlock(&b->write_lock);
- }
+ /*
+ * Avoid flushing cached nodes if cache set is retiring
+ * due to too many I/O errors detected.
+ */
+ if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags))
+ list_for_each_entry(b, &c->btree_cache, list) {
+ mutex_lock(&b->write_lock);
+ if (btree_node_dirty(b))
+ __bch_btree_node_write(b, NULL);
+ mutex_unlock(&b->write_lock);
+ }
for_each_cache(ca, c, i)
if (ca->alloc_thread)
@@ -1849,6 +1880,23 @@ static int run_cache_set(struct cache_set *c)
if (bch_btree_check(c))
goto err;
+ /*
+ * bch_btree_check() may occupy too much system memory which
+ * has negative effects to user space application (e.g. data
+ * base) performance. Shrink the mca cache memory proactively
+ * here to avoid competing memory with user space workloads..
+ */
+ if (!c->shrinker_disabled) {
+ struct shrink_control sc;
+
+ sc.gfp_mask = GFP_KERNEL;
+ sc.nr_to_scan = c->btree_cache_used * c->btree_pages;
+ /* first run to clear b->accessed tag */
+ c->shrink.scan_objects(&c->shrink, &sc);
+ /* second run to reap non-accessed nodes */
+ c->shrink.scan_objects(&c->shrink, &sc);
+ }
+
bch_journal_mark(c, &journal);
bch_initial_gc_finish(c);
pr_debug("btree_check() done");
@@ -1957,7 +2005,7 @@ err:
}
closure_sync(&cl);
- /* XXX: test this, it's broken */
+
bch_cache_set_error(c, "%s", err);
return -EIO;
@@ -2251,9 +2299,13 @@ err:
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size);
+static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
+ struct kobj_attribute *attr,
+ const char *buffer, size_t size);
kobj_attribute_write(register, register_bcache);
kobj_attribute_write(register_quiet, register_bcache);
+kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
static bool bch_is_open_backing(struct block_device *bdev)
{
@@ -2301,6 +2353,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!try_module_get(THIS_MODULE))
return -EBUSY;
+ /* For latest state of bcache_is_reboot */
+ smp_mb();
+ if (bcache_is_reboot)
+ return -EBUSY;
+
path = kstrndup(buffer, size, GFP_KERNEL);
if (!path)
goto err;
@@ -2378,8 +2435,61 @@ err:
goto out;
}
+
+struct pdev {
+ struct list_head list;
+ struct cached_dev *dc;
+};
+
+static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
+ struct kobj_attribute *attr,
+ const char *buffer,
+ size_t size)
+{
+ LIST_HEAD(pending_devs);
+ ssize_t ret = size;
+ struct cached_dev *dc, *tdc;
+ struct pdev *pdev, *tpdev;
+ struct cache_set *c, *tc;
+
+ mutex_lock(&bch_register_lock);
+ list_for_each_entry_safe(dc, tdc, &uncached_devices, list) {
+ pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL);
+ if (!pdev)
+ break;
+ pdev->dc = dc;
+ list_add(&pdev->list, &pending_devs);
+ }
+
+ list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
+ char *pdev_set_uuid = pdev->dc->sb.set_uuid;
+ char *set_uuid = c->sb.uuid;
+
+ if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
+ list_del(&pdev->list);
+ kfree(pdev);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&bch_register_lock);
+
+ list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ pr_info("delete pdev %p", pdev);
+ list_del(&pdev->list);
+ bcache_device_stop(&pdev->dc->disk);
+ kfree(pdev);
+ }
+
+ return ret;
+}
+
static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
{
+ if (bcache_is_reboot)
+ return NOTIFY_DONE;
+
if (code == SYS_DOWN ||
code == SYS_HALT ||
code == SYS_POWER_OFF) {
@@ -2392,19 +2502,45 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
mutex_lock(&bch_register_lock);
+ if (bcache_is_reboot)
+ goto out;
+
+ /* New registration is rejected since now */
+ bcache_is_reboot = true;
+ /*
+ * Make registering caller (if there is) on other CPU
+ * core know bcache_is_reboot set to true earlier
+ */
+ smp_mb();
+
if (list_empty(&bch_cache_sets) &&
list_empty(&uncached_devices))
goto out;
+ mutex_unlock(&bch_register_lock);
+
pr_info("Stopping all devices:");
+ /*
+ * The reason bch_register_lock is not held to call
+ * bch_cache_set_stop() and bcache_device_stop() is to
+ * avoid potential deadlock during reboot, because cache
+ * set or bcache device stopping process will acqurie
+ * bch_register_lock too.
+ *
+ * We are safe here because bcache_is_reboot sets to
+ * true already, register_bcache() will reject new
+ * registration now. bcache_is_reboot also makes sure
+ * bcache_reboot() won't be re-entered on by other thread,
+ * so there is no race in following list iteration by
+ * list_for_each_entry_safe().
+ */
list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
bch_cache_set_stop(c);
list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
bcache_device_stop(&dc->disk);
- mutex_unlock(&bch_register_lock);
/*
* Give an early chance for other kthreads and
@@ -2496,6 +2632,7 @@ static int __init bcache_init(void)
static const struct attribute *files[] = {
&ksysfs_register.attr,
&ksysfs_register_quiet.attr,
+ &ksysfs_pendings_cleanup.attr,
NULL
};
@@ -2531,6 +2668,8 @@ static int __init bcache_init(void)
bch_debug_init();
closure_debug_init();
+ bcache_is_reboot = false;
+
return 0;
err:
bcache_exit();
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index bfb437ffb13c..9f0826712845 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -16,33 +16,31 @@
#include <linux/sort.h>
#include <linux/sched/clock.h>
+extern bool bcache_is_reboot;
+
/* Default is 0 ("writethrough") */
static const char * const bch_cache_modes[] = {
"writethrough",
"writeback",
"writearound",
- "none",
- NULL
+ "none"
};
/* Default is 0 ("auto") */
static const char * const bch_stop_on_failure_modes[] = {
"auto",
- "always",
- NULL
+ "always"
};
static const char * const cache_replacement_policies[] = {
"lru",
"fifo",
- "random",
- NULL
+ "random"
};
static const char * const error_actions[] = {
"unregister",
- "panic",
- NULL
+ "panic"
};
write_attribute(attach);
@@ -84,8 +82,8 @@ read_attribute(bset_tree_stats);
read_attribute(state);
read_attribute(cache_read_races);
read_attribute(reclaim);
+read_attribute(reclaimed_journal_buckets);
read_attribute(flush_write);
-read_attribute(retry_flush_write);
read_attribute(writeback_keys_done);
read_attribute(writeback_keys_failed);
read_attribute(io_errors);
@@ -180,7 +178,7 @@ SHOW(__bch_cached_dev)
var_print(writeback_percent);
sysfs_hprint(writeback_rate,
wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
- sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
+ sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors));
sysfs_printf(io_error_limit, "%i", dc->error_limit);
sysfs_printf(io_disable, "%i", dc->io_disable);
var_print(writeback_rate_update_seconds);
@@ -271,6 +269,10 @@ STORE(__cached_dev)
struct cache_set *c;
struct kobj_uevent_env *env;
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
#define d_strtoul(var) sysfs_strtoul(var, dc->var)
#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
@@ -329,11 +331,14 @@ STORE(__cached_dev)
bch_cache_accounting_clear(&dc->accounting);
if (attr == &sysfs_running &&
- strtoul_or_return(buf))
- bch_cached_dev_run(dc);
+ strtoul_or_return(buf)) {
+ v = bch_cached_dev_run(dc);
+ if (v)
+ return v;
+ }
if (attr == &sysfs_cache_mode) {
- v = __sysfs_match_string(bch_cache_modes, -1, buf);
+ v = sysfs_match_string(bch_cache_modes, buf);
if (v < 0)
return v;
@@ -344,7 +349,7 @@ STORE(__cached_dev)
}
if (attr == &sysfs_stop_when_cache_set_failed) {
- v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
+ v = sysfs_match_string(bch_stop_on_failure_modes, buf);
if (v < 0)
return v;
@@ -408,6 +413,10 @@ STORE(bch_cached_dev)
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
mutex_lock(&bch_register_lock);
size = __cached_dev_store(kobj, attr, buf, size);
@@ -464,7 +473,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_rate_p_term_inverse,
&sysfs_writeback_rate_minimum,
&sysfs_writeback_rate_debug,
- &sysfs_errors,
+ &sysfs_io_errors,
&sysfs_io_error_limit,
&sysfs_io_disable,
&sysfs_dirty_data,
@@ -511,6 +520,10 @@ STORE(__bch_flash_dev)
kobj);
struct uuid_entry *u = &d->c->uuids[d->id];
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
sysfs_strtoul(data_csum, d->data_csum);
if (attr == &sysfs_size) {
@@ -693,12 +706,12 @@ SHOW(__bch_cache_set)
sysfs_print(reclaim,
atomic_long_read(&c->reclaim));
+ sysfs_print(reclaimed_journal_buckets,
+ atomic_long_read(&c->reclaimed_journal_buckets));
+
sysfs_print(flush_write,
atomic_long_read(&c->flush_write));
- sysfs_print(retry_flush_write,
- atomic_long_read(&c->retry_flush_write));
-
sysfs_print(writeback_keys_done,
atomic_long_read(&c->writeback_keys_done));
sysfs_print(writeback_keys_failed,
@@ -746,6 +759,10 @@ STORE(__bch_cache_set)
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
ssize_t v;
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
if (attr == &sysfs_unregister)
bch_cache_set_unregister(c);
@@ -799,7 +816,7 @@ STORE(__bch_cache_set)
0, UINT_MAX);
if (attr == &sysfs_errors) {
- v = __sysfs_match_string(error_actions, -1, buf);
+ v = sysfs_match_string(error_actions, buf);
if (v < 0)
return v;
@@ -865,6 +882,10 @@ STORE(bch_cache_set_internal)
{
struct cache_set *c = container_of(kobj, struct cache_set, internal);
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
return bch_cache_set_store(&c->kobj, attr, buf, size);
}
@@ -914,8 +935,8 @@ static struct attribute *bch_cache_set_internal_files[] = {
&sysfs_bset_tree_stats,
&sysfs_cache_read_races,
&sysfs_reclaim,
+ &sysfs_reclaimed_journal_buckets,
&sysfs_flush_write,
- &sysfs_retry_flush_write,
&sysfs_writeback_keys_done,
&sysfs_writeback_keys_failed,
@@ -1050,6 +1071,10 @@ STORE(__bch_cache)
struct cache *ca = container_of(kobj, struct cache, kobj);
ssize_t v;
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
if (attr == &sysfs_discard) {
bool v = strtoul_or_return(buf);
@@ -1063,7 +1088,7 @@ STORE(__bch_cache)
}
if (attr == &sysfs_cache_replacement_policy) {
- v = __sysfs_match_string(cache_replacement_policies, -1, buf);
+ v = sysfs_match_string(cache_replacement_policies, buf);
if (v < 0)
return v;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1fbced94e4cc..c029f7443190 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -113,8 +113,6 @@ do { \
#define heap_full(h) ((h)->used == (h)->size)
-#define heap_empty(h) ((h)->used == 0)
-
#define DECLARE_FIFO(type, name) \
struct { \
size_t front, back, size, mask; \
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 73f0efac2b9f..d60268fe49e1 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -122,6 +122,9 @@ static void __update_writeback_rate(struct cached_dev *dc)
static bool set_at_max_writeback_rate(struct cache_set *c,
struct cached_dev *dc)
{
+ /* Don't set max writeback rate if gc is running */
+ if (!c->gc_mark_valid)
+ return false;
/*
* Idle_counter is increased everytime when update_writeback_rate() is
* called. If all backing devices attached to the same cache set have
@@ -735,6 +738,10 @@ static int bch_writeback_thread(void *arg)
}
}
+ if (dc->writeback_write_wq) {
+ flush_workqueue(dc->writeback_write_wq);
+ destroy_workqueue(dc->writeback_write_wq);
+ }
cached_dev_put(dc);
wait_for_kthread_stop();
@@ -830,6 +837,7 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc)
"bcache_writeback");
if (IS_ERR(dc->writeback_thread)) {
cached_dev_put(dc);
+ destroy_workqueue(dc->writeback_write_wq);
return PTR_ERR(dc->writeback_thread);
}
dc->writeback_running = true;
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index 352e803f566e..b65faef2c4b5 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -25,7 +25,7 @@ static char *create;
* Format: dm-mod.create=<name>,<uuid>,<minor>,<flags>,<table>[,<table>+][;<name>,<uuid>,<minor>,<flags>,<table>[,<table>+]+]
* Table format: <start_sector> <num_sectors> <target_type> <target_args>
*
- * See Documentation/device-mapper/dm-init.txt for dm-mod.create="..." format
+ * See Documentation/device-mapper/dm-init.rst for dm-mod.create="..." format
* details.
*/
@@ -140,8 +140,8 @@ static char __init *dm_parse_table_entry(struct dm_device *dev, char *str)
return ERR_PTR(-EINVAL);
}
/* target_args */
- dev->target_args_array[n] = kstrndup(field[3], GFP_KERNEL,
- DM_MAX_STR_SIZE);
+ dev->target_args_array[n] = kstrndup(field[3], DM_MAX_STR_SIZE,
+ GFP_KERNEL);
if (!dev->target_args_array[n])
return ERR_PTR(-ENOMEM);
@@ -272,10 +272,10 @@ static int __init dm_init_init(void)
return 0;
if (strlen(create) >= DM_MAX_STR_SIZE) {
- DMERR("Argument is too big. Limit is %d\n", DM_MAX_STR_SIZE);
+ DMERR("Argument is too big. Limit is %d", DM_MAX_STR_SIZE);
return -EINVAL;
}
- str = kstrndup(create, GFP_KERNEL, DM_MAX_STR_SIZE);
+ str = kstrndup(create, DM_MAX_STR_SIZE, GFP_KERNEL);
if (!str)
return -ENOMEM;
@@ -283,7 +283,7 @@ static int __init dm_init_init(void)
if (r)
goto out;
- DMINFO("waiting for all devices to be available before creating mapped devices\n");
+ DMINFO("waiting for all devices to be available before creating mapped devices");
wait_for_device_probe();
list_for_each_entry(dev, &devices, list) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 9ea2b0291f20..e549392e0ea5 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -60,6 +60,7 @@
#define WRITE_LOG_VERSION 1ULL
#define WRITE_LOG_MAGIC 0x6a736677736872ULL
+#define WRITE_LOG_SUPER_SECTOR 0
/*
* The disk format for this is braindead simple.
@@ -115,6 +116,7 @@ struct log_writes_c {
struct list_head logging_blocks;
wait_queue_head_t wait;
struct task_struct *log_kthread;
+ struct completion super_done;
};
struct pending_block {
@@ -180,6 +182,14 @@ static void log_end_io(struct bio *bio)
bio_put(bio);
}
+static void log_end_super(struct bio *bio)
+{
+ struct log_writes_c *lc = bio->bi_private;
+
+ complete(&lc->super_done);
+ log_end_io(bio);
+}
+
/*
* Meant to be called if there is an error, it will free all the pages
* associated with the block.
@@ -215,7 +225,8 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev);
- bio->bi_end_io = log_end_io;
+ bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
+ log_end_super : log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -418,11 +429,18 @@ static int log_super(struct log_writes_c *lc)
super.nr_entries = cpu_to_le64(lc->logged_entries);
super.sectorsize = cpu_to_le32(lc->sectorsize);
- if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) {
+ if (write_metadata(lc, &super, sizeof(super), NULL, 0,
+ WRITE_LOG_SUPER_SECTOR)) {
DMERR("Couldn't write super");
return -1;
}
+ /*
+ * Super sector should be writen in-order, otherwise the
+ * nr_entries could be rewritten incorrectly by an old bio.
+ */
+ wait_for_completion_io(&lc->super_done);
+
return 0;
}
@@ -531,6 +549,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_LIST_HEAD(&lc->unflushed_blocks);
INIT_LIST_HEAD(&lc->logging_blocks);
init_waitqueue_head(&lc->wait);
+ init_completion(&lc->super_done);
atomic_set(&lc->io_blocks, 0);
atomic_set(&lc->pending_blocks, 0);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9fdef6897316..7a87a640f8ba 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3558,7 +3558,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
* v1.5.0+:
*
* Sync action:
- * See Documentation/device-mapper/dm-raid.txt for
+ * See Documentation/device-mapper/dm-raid.rst for
* information on each of these states.
*/
DMEMIT(" %s", sync_action);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 350cf0451456..ec8b27e20de3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -561,7 +561,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
gfp = GFP_NOIO;
}
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
- if (argv) {
+ if (argv && old_argv) {
memcpy(argv, old_argv, *size * sizeof(*argv));
*size = new_size;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 720d06531aa3..ea24ff0612e3 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -235,8 +235,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
BUG();
}
- DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
- block);
+ DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
+ type_str, block);
if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
DMERR("%s: reached maximum errors", v->data_dev->name);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index c01d41198f5e..b092c7b5282f 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1790,6 +1790,8 @@ void md_bitmap_destroy(struct mddev *mddev)
return;
md_bitmap_wait_behind_writes(mddev);
+ mempool_destroy(mddev->wb_info_pool);
+ mddev->wb_info_pool = NULL;
mutex_lock(&mddev->bitmap_info.mutex);
spin_lock(&mddev->lock);
@@ -1900,10 +1902,14 @@ int md_bitmap_load(struct mddev *mddev)
sector_t start = 0;
sector_t sector = 0;
struct bitmap *bitmap = mddev->bitmap;
+ struct md_rdev *rdev;
if (!bitmap)
goto out;
+ rdev_for_each(rdev, mddev)
+ mddev_create_wb_pool(mddev, rdev, true);
+
if (mddev_is_clustered(mddev))
md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
@@ -2462,12 +2468,26 @@ static ssize_t
backlog_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long backlog;
+ unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
int rv = kstrtoul(buf, 10, &backlog);
if (rv)
return rv;
if (backlog > COUNTER_MAX)
return -EINVAL;
mddev->bitmap_info.max_write_behind = backlog;
+ if (!backlog && mddev->wb_info_pool) {
+ /* wb_info_pool is not needed if backlog is zero */
+ mempool_destroy(mddev->wb_info_pool);
+ mddev->wb_info_pool = NULL;
+ } else if (backlog && !mddev->wb_info_pool) {
+ /* wb_info_pool is needed since backlog is not zero */
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev)
+ mddev_create_wb_pool(mddev, rdev, false);
+ }
+ if (old_mwb != backlog)
+ md_bitmap_update_sb(mddev->bitmap);
return len;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9801d540fea1..a114b05e3db4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -37,6 +37,7 @@
*/
+#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/kthread.h>
#include <linux/blkdev.h>
@@ -124,6 +125,77 @@ static inline int speed_max(struct mddev *mddev)
mddev->sync_speed_max : sysctl_speed_limit_max;
}
+static int rdev_init_wb(struct md_rdev *rdev)
+{
+ if (rdev->bdev->bd_queue->nr_hw_queues == 1)
+ return 0;
+
+ spin_lock_init(&rdev->wb_list_lock);
+ INIT_LIST_HEAD(&rdev->wb_list);
+ init_waitqueue_head(&rdev->wb_io_wait);
+ set_bit(WBCollisionCheck, &rdev->flags);
+
+ return 1;
+}
+
+/*
+ * Create wb_info_pool if rdev is the first multi-queue device flaged
+ * with writemostly, also write-behind mode is enabled.
+ */
+void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend)
+{
+ if (mddev->bitmap_info.max_write_behind == 0)
+ return;
+
+ if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev))
+ return;
+
+ if (mddev->wb_info_pool == NULL) {
+ unsigned int noio_flag;
+
+ if (!is_suspend)
+ mddev_suspend(mddev);
+ noio_flag = memalloc_noio_save();
+ mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS,
+ sizeof(struct wb_info));
+ memalloc_noio_restore(noio_flag);
+ if (!mddev->wb_info_pool)
+ pr_err("can't alloc memory pool for writemostly\n");
+ if (!is_suspend)
+ mddev_resume(mddev);
+ }
+}
+EXPORT_SYMBOL_GPL(mddev_create_wb_pool);
+
+/*
+ * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck.
+ */
+static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev)
+{
+ if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags))
+ return;
+
+ if (mddev->wb_info_pool) {
+ struct md_rdev *temp;
+ int num = 0;
+
+ /*
+ * Check if other rdevs need wb_info_pool.
+ */
+ rdev_for_each(temp, mddev)
+ if (temp != rdev &&
+ test_bit(WBCollisionCheck, &temp->flags))
+ num++;
+ if (!num) {
+ mddev_suspend(rdev->mddev);
+ mempool_destroy(mddev->wb_info_pool);
+ mddev->wb_info_pool = NULL;
+ mddev_resume(rdev->mddev);
+ }
+ }
+}
+
static struct ctl_table_header *raid_table_header;
static struct ctl_table raid_table[] = {
@@ -2210,6 +2282,9 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
rdev->mddev = mddev;
pr_debug("md: bind<%s>\n", b);
+ if (mddev->raid_disks)
+ mddev_create_wb_pool(mddev, rdev, false);
+
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
@@ -2246,6 +2321,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
+ mddev_destroy_wb_pool(rdev->mddev, rdev);
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
@@ -2758,8 +2834,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
+ mddev_create_wb_pool(rdev->mddev, rdev, false);
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
+ mddev_destroy_wb_pool(rdev->mddev, rdev);
clear_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "blocked")) {
@@ -3356,7 +3434,7 @@ rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
if (!rdev->mddev)
- return -EBUSY;
+ return -ENODEV;
return entry->show(rdev, page);
}
@@ -5588,15 +5666,28 @@ int md_run(struct mddev *mddev)
mddev->bitmap = bitmap;
}
- if (err) {
- mddev_detach(mddev);
- if (mddev->private)
- pers->free(mddev, mddev->private);
- mddev->private = NULL;
- module_put(pers->owner);
- md_bitmap_destroy(mddev);
- goto abort;
+ if (err)
+ goto bitmap_abort;
+
+ if (mddev->bitmap_info.max_write_behind > 0) {
+ bool creat_pool = false;
+
+ rdev_for_each(rdev, mddev) {
+ if (test_bit(WriteMostly, &rdev->flags) &&
+ rdev_init_wb(rdev))
+ creat_pool = true;
+ }
+ if (creat_pool && mddev->wb_info_pool == NULL) {
+ mddev->wb_info_pool =
+ mempool_create_kmalloc_pool(NR_WB_INFOS,
+ sizeof(struct wb_info));
+ if (!mddev->wb_info_pool) {
+ err = -ENOMEM;
+ goto bitmap_abort;
+ }
+ }
}
+
if (mddev->queue) {
bool nonrot = true;
@@ -5639,8 +5730,7 @@ int md_run(struct mddev *mddev)
spin_unlock(&mddev->lock);
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0)
- if (sysfs_link_rdev(mddev, rdev))
- /* failure here is OK */;
+ sysfs_link_rdev(mddev, rdev); /* failure here is OK */
if (mddev->degraded && !mddev->ro)
/* This ensures that recovering status is reported immediately
@@ -5658,6 +5748,13 @@ int md_run(struct mddev *mddev)
sysfs_notify(&mddev->kobj, NULL, "degraded");
return 0;
+bitmap_abort:
+ mddev_detach(mddev);
+ if (mddev->private)
+ pers->free(mddev, mddev->private);
+ mddev->private = NULL;
+ module_put(pers->owner);
+ md_bitmap_destroy(mddev);
abort:
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
@@ -5826,6 +5923,8 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
+ mempool_destroy(mddev->wb_info_pool);
+ mddev->wb_info_pool = NULL;
}
void md_stop_writes(struct mddev *mddev)
@@ -8198,8 +8297,7 @@ void md_do_sync(struct md_thread *thread)
{
struct mddev *mddev = thread->mddev;
struct mddev *mddev2;
- unsigned int currspeed = 0,
- window;
+ unsigned int currspeed = 0, window;
sector_t max_sectors,j, io_sectors, recovery_done;
unsigned long mark[SYNC_MARKS];
unsigned long update_time;
@@ -8256,7 +8354,7 @@ void md_do_sync(struct md_thread *thread)
* 0 == not engaged in resync at all
* 2 == checking that there is no conflict with another sync
* 1 == like 2, but have yielded to allow conflicting resync to
- * commense
+ * commence
* other == active in resync - this many blocks
*
* Before starting a resync we must have set curr_resync to
@@ -8387,7 +8485,7 @@ void md_do_sync(struct md_thread *thread)
/*
* Tune reconstruction:
*/
- window = 32*(PAGE_SIZE/512);
+ window = 32 * (PAGE_SIZE / 512);
pr_debug("md: using %dk window, over a total of %lluk.\n",
window/2, (unsigned long long)max_sectors/2);
@@ -9200,7 +9298,6 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
* perform resync with the new activated disk */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
-
}
/* device faulty
* We just want to do the minimum to mark the disk
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7c930c091193..10f98200e2f8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -109,6 +109,14 @@ struct md_rdev {
* for reporting to userspace and storing
* in superblock.
*/
+
+ /*
+ * The members for check collision of write behind IOs.
+ */
+ struct list_head wb_list;
+ spinlock_t wb_list_lock;
+ wait_queue_head_t wb_io_wait;
+
struct work_struct del_work; /* used for delayed sysfs removal */
struct kernfs_node *sysfs_state; /* handle for 'state'
@@ -193,6 +201,10 @@ enum flag_bits {
* it didn't fail, so don't use FailFast
* any more for metadata
*/
+ WBCollisionCheck, /*
+ * multiqueue device should check if there
+ * is collision between write behind bios.
+ */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
@@ -245,6 +257,14 @@ enum mddev_sb_flags {
MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
};
+#define NR_WB_INFOS 8
+/* record current range of write behind IOs */
+struct wb_info {
+ sector_t lo;
+ sector_t hi;
+ struct list_head list;
+};
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -461,6 +481,7 @@ struct mddev {
*/
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
+ mempool_t *wb_info_pool;
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
unsigned int good_device_nr; /* good device num within cluster raid */
@@ -709,6 +730,8 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
+extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 400001b815db..54db34163968 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -3,12 +3,42 @@
#define RESYNC_BLOCK_SIZE (64*1024)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
+/*
+ * Number of guaranteed raid bios in case of extreme VM load:
+ */
+#define NR_RAID_BIOS 256
+
+/* when we get a read error on a read-only array, we redirect to another
+ * device without failing the first device, or trying to over-write to
+ * correct the read error. To keep track of bad blocks on a per-bio
+ * level, we store IO_BLOCKED in the appropriate 'bios' pointer
+ */
+#define IO_BLOCKED ((struct bio *)1)
+/* When we successfully write to a known bad-block, we need to remove the
+ * bad-block marking which must be done from process context. So we record
+ * the success by setting devs[n].bio to IO_MADE_GOOD
+ */
+#define IO_MADE_GOOD ((struct bio *)2)
+
+#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
+
+/* When there are this many requests queue to be written by
+ * the raid thread, we become 'congested' to provide back-pressure
+ * for writeback.
+ */
+static int max_queued_requests = 1024;
+
/* for managing resync I/O pages */
struct resync_pages {
void *raid_bio;
struct page *pages[RESYNC_PAGES];
};
+static void rbio_pool_free(void *rbio, void *data)
+{
+ kfree(rbio);
+}
+
static inline int resync_alloc_pages(struct resync_pages *rp,
gfp_t gfp_flags)
{
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 2aa36e570e04..34e26834ad28 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -42,31 +42,6 @@
(1L << MD_HAS_PPL) | \
(1L << MD_HAS_MULTIPLE_PPLS))
-/*
- * Number of guaranteed r1bios in case of extreme VM load:
- */
-#define NR_RAID1_BIOS 256
-
-/* when we get a read error on a read-only array, we redirect to another
- * device without failing the first device, or trying to over-write to
- * correct the read error. To keep track of bad blocks on a per-bio
- * level, we store IO_BLOCKED in the appropriate 'bios' pointer
- */
-#define IO_BLOCKED ((struct bio *)1)
-/* When we successfully write to a known bad-block, we need to remove the
- * bad-block marking which must be done from process context. So we record
- * the success by setting devs[n].bio to IO_MADE_GOOD
- */
-#define IO_MADE_GOOD ((struct bio *)2)
-
-#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
-
-/* When there are this many requests queue to be written by
- * the raid1 thread, we become 'congested' to provide back-pressure
- * for writeback.
- */
-static int max_queued_requests = 1024;
-
static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
@@ -75,6 +50,57 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
#include "raid1-10.c"
+static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+{
+ struct wb_info *wi, *temp_wi;
+ unsigned long flags;
+ int ret = 0;
+ struct mddev *mddev = rdev->mddev;
+
+ wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO);
+
+ spin_lock_irqsave(&rdev->wb_list_lock, flags);
+ list_for_each_entry(temp_wi, &rdev->wb_list, list) {
+ /* collision happened */
+ if (hi > temp_wi->lo && lo < temp_wi->hi) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ if (!ret) {
+ wi->lo = lo;
+ wi->hi = hi;
+ list_add(&wi->list, &rdev->wb_list);
+ } else
+ mempool_free(wi, mddev->wb_info_pool);
+ spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
+
+ return ret;
+}
+
+static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+{
+ struct wb_info *wi;
+ unsigned long flags;
+ int found = 0;
+ struct mddev *mddev = rdev->mddev;
+
+ spin_lock_irqsave(&rdev->wb_list_lock, flags);
+ list_for_each_entry(wi, &rdev->wb_list, list)
+ if (hi == wi->hi && lo == wi->lo) {
+ list_del(&wi->list);
+ mempool_free(wi, mddev->wb_info_pool);
+ found = 1;
+ break;
+ }
+
+ if (!found)
+ WARN(1, "The write behind IO is not recorded\n");
+ spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
+ wake_up(&rdev->wb_io_wait);
+}
+
/*
* for resync bio, r1bio pointer can be retrieved from the per-bio
* 'struct resync_pages'.
@@ -93,11 +119,6 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
return kzalloc(size, gfp_flags);
}
-static void r1bio_pool_free(void *r1_bio, void *data)
-{
- kfree(r1_bio);
-}
-
#define RESYNC_DEPTH 32
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
@@ -173,7 +194,7 @@ out_free_bio:
kfree(rps);
out_free_r1bio:
- r1bio_pool_free(r1_bio, data);
+ rbio_pool_free(r1_bio, data);
return NULL;
}
@@ -193,7 +214,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data)
/* resync pages array stored in the 1st bio's .bi_private */
kfree(rp);
- r1bio_pool_free(r1bio, data);
+ rbio_pool_free(r1bio, data);
}
static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
@@ -476,6 +497,12 @@ static void raid1_end_write_request(struct bio *bio)
}
if (behind) {
+ if (test_bit(WBCollisionCheck, &rdev->flags)) {
+ sector_t lo = r1_bio->sector;
+ sector_t hi = r1_bio->sector + r1_bio->sectors;
+
+ remove_wb(rdev, lo, hi);
+ }
if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
@@ -1449,7 +1476,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (!r1_bio->bios[i])
continue;
-
if (first_clone) {
/* do behind I/O ?
* Not if there are too many, or cannot
@@ -1474,7 +1500,16 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
if (r1_bio->behind_master_bio) {
- if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
+
+ if (test_bit(WBCollisionCheck, &rdev->flags)) {
+ sector_t lo = r1_bio->sector;
+ sector_t hi = r1_bio->sector + r1_bio->sectors;
+
+ wait_event(rdev->wb_io_wait,
+ check_and_add_wb(rdev, lo, hi) == 0);
+ }
+ if (test_bit(WriteMostly, &rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
}
@@ -1729,9 +1764,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
first = last = rdev->saved_raid_disk;
for (mirror = first; mirror <= last; mirror++) {
- p = conf->mirrors+mirror;
+ p = conf->mirrors + mirror;
if (!p->rdev) {
-
if (mddev->gendisk)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
@@ -2888,7 +2922,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
-
}
return nr_sectors;
}
@@ -2947,8 +2980,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!conf->poolinfo)
goto abort;
conf->poolinfo->raid_disks = mddev->raid_disks * 2;
- err = mempool_init(&conf->r1bio_pool, NR_RAID1_BIOS, r1bio_pool_alloc,
- r1bio_pool_free, conf->poolinfo);
+ err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
+ rbio_pool_free, conf->poolinfo);
if (err)
goto abort;
@@ -3089,7 +3122,7 @@ static int raid1_run(struct mddev *mddev)
}
mddev->degraded = 0;
- for (i=0; i < conf->raid_disks; i++)
+ for (i = 0; i < conf->raid_disks; i++)
if (conf->mirrors[i].rdev == NULL ||
!test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
test_bit(Faulty, &conf->mirrors[i].rdev->flags))
@@ -3124,7 +3157,7 @@ static int raid1_run(struct mddev *mddev)
mddev->queue);
}
- ret = md_integrity_register(mddev);
+ ret = md_integrity_register(mddev);
if (ret) {
md_unregister_thread(&mddev->thread);
raid1_free(mddev, conf);
@@ -3232,8 +3265,8 @@ static int raid1_reshape(struct mddev *mddev)
newpoolinfo->mddev = mddev;
newpoolinfo->raid_disks = raid_disks * 2;
- ret = mempool_init(&newpool, NR_RAID1_BIOS, r1bio_pool_alloc,
- r1bio_pool_free, newpoolinfo);
+ ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
+ rbio_pool_free, newpoolinfo);
if (ret) {
kfree(newpoolinfo);
return ret;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index aea11476fee6..8a1354a08a1a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -64,31 +64,6 @@
* [B A] [D C] [B A] [E C D]
*/
-/*
- * Number of guaranteed r10bios in case of extreme VM load:
- */
-#define NR_RAID10_BIOS 256
-
-/* when we get a read error on a read-only array, we redirect to another
- * device without failing the first device, or trying to over-write to
- * correct the read error. To keep track of bad blocks on a per-bio
- * level, we store IO_BLOCKED in the appropriate 'bios' pointer
- */
-#define IO_BLOCKED ((struct bio *)1)
-/* When we successfully write to a known bad-block, we need to remove the
- * bad-block marking which must be done from process context. So we record
- * the success by setting devs[n].bio to IO_MADE_GOOD
- */
-#define IO_MADE_GOOD ((struct bio *)2)
-
-#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
-
-/* When there are this many requests queued to be written by
- * the raid10 thread, we become 'congested' to provide back-pressure
- * for writeback.
- */
-static int max_queued_requests = 1024;
-
static void allow_barrier(struct r10conf *conf);
static void lower_barrier(struct r10conf *conf);
static int _enough(struct r10conf *conf, int previous, int ignore);
@@ -123,11 +98,6 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
return kzalloc(size, gfp_flags);
}
-static void r10bio_pool_free(void *r10_bio, void *data)
-{
- kfree(r10_bio);
-}
-
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
/* amount of memory to reserve for resync requests */
#define RESYNC_WINDOW (1024*1024)
@@ -233,7 +203,7 @@ out_free_bio:
}
kfree(rps);
out_free_r10bio:
- r10bio_pool_free(r10_bio, conf);
+ rbio_pool_free(r10_bio, conf);
return NULL;
}
@@ -261,7 +231,7 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
/* resync pages array stored in the 1st bio's .bi_private */
kfree(rp);
- r10bio_pool_free(r10bio, conf);
+ rbio_pool_free(r10bio, conf);
}
static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
@@ -737,15 +707,19 @@ static struct md_rdev *read_balance(struct r10conf *conf,
int sectors = r10_bio->sectors;
int best_good_sectors;
sector_t new_distance, best_dist;
- struct md_rdev *best_rdev, *rdev = NULL;
+ struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
int do_balance;
- int best_slot;
+ int best_dist_slot, best_pending_slot;
+ bool has_nonrot_disk = false;
+ unsigned int min_pending;
struct geom *geo = &conf->geo;
raid10_find_phys(conf, r10_bio);
rcu_read_lock();
- best_slot = -1;
- best_rdev = NULL;
+ best_dist_slot = -1;
+ min_pending = UINT_MAX;
+ best_dist_rdev = NULL;
+ best_pending_rdev = NULL;
best_dist = MaxSector;
best_good_sectors = 0;
do_balance = 1;
@@ -767,6 +741,8 @@ static struct md_rdev *read_balance(struct r10conf *conf,
sector_t first_bad;
int bad_sectors;
sector_t dev_sector;
+ unsigned int pending;
+ bool nonrot;
if (r10_bio->devs[slot].bio == IO_BLOCKED)
continue;
@@ -803,8 +779,8 @@ static struct md_rdev *read_balance(struct r10conf *conf,
first_bad - dev_sector;
if (good_sectors > best_good_sectors) {
best_good_sectors = good_sectors;
- best_slot = slot;
- best_rdev = rdev;
+ best_dist_slot = slot;
+ best_dist_rdev = rdev;
}
if (!do_balance)
/* Must read from here */
@@ -817,14 +793,23 @@ static struct md_rdev *read_balance(struct r10conf *conf,
if (!do_balance)
break;
- if (best_slot >= 0)
+ nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
+ has_nonrot_disk |= nonrot;
+ pending = atomic_read(&rdev->nr_pending);
+ if (min_pending > pending && nonrot) {
+ min_pending = pending;
+ best_pending_slot = slot;
+ best_pending_rdev = rdev;
+ }
+
+ if (best_dist_slot >= 0)
/* At least 2 disks to choose from so failfast is OK */
set_bit(R10BIO_FailFast, &r10_bio->state);
/* This optimisation is debatable, and completely destroys
* sequential read speed for 'far copies' arrays. So only
* keep it for 'near' arrays, and review those later.
*/
- if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
+ if (geo->near_copies > 1 && !pending)
new_distance = 0;
/* for far > 1 always use the lowest address */
@@ -833,15 +818,21 @@ static struct md_rdev *read_balance(struct r10conf *conf,
else
new_distance = abs(r10_bio->devs[slot].addr -
conf->mirrors[disk].head_position);
+
if (new_distance < best_dist) {
best_dist = new_distance;
- best_slot = slot;
- best_rdev = rdev;
+ best_dist_slot = slot;
+ best_dist_rdev = rdev;
}
}
if (slot >= conf->copies) {
- slot = best_slot;
- rdev = best_rdev;
+ if (has_nonrot_disk) {
+ slot = best_pending_slot;
+ rdev = best_pending_rdev;
+ } else {
+ slot = best_dist_slot;
+ rdev = best_dist_rdev;
+ }
}
if (slot >= 0) {
@@ -3675,8 +3666,8 @@ static struct r10conf *setup_conf(struct mddev *mddev)
conf->geo = geo;
conf->copies = copies;
- err = mempool_init(&conf->r10bio_pool, NR_RAID10_BIOS, r10bio_pool_alloc,
- r10bio_pool_free, conf);
+ err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
+ rbio_pool_free, conf);
if (err)
goto out;
@@ -4780,8 +4771,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
int idx = 0;
struct page **pages;
- r10b = kmalloc(sizeof(*r10b) +
- sizeof(struct r10dev) * conf->copies, GFP_NOIO);
+ r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
if (!r10b) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
return -ENOMEM;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b83bce2beb66..3de4e13bde98 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5251,7 +5251,6 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
bio_set_dev(align_bi, rdev->bdev);
- bio_clear_flag(align_bi, BIO_SEG_VALID);
if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
bio_sectors(align_bi),
@@ -7672,7 +7671,7 @@ abort:
static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r5conf *conf = mddev->private;
- int err = -EEXIST;
+ int ret, err = -EEXIST;
int disk;
struct disk_info *p;
int first = 0;
@@ -7687,7 +7686,14 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* The array is in readonly mode if journal is missing, so no
* write requests running. We should be safe
*/
- log_init(conf, rdev, false);
+ ret = log_init(conf, rdev, false);
+ if (ret)
+ return ret;
+
+ ret = r5l_start(conf->log);
+ if (ret)
+ return ret;
+
return 0;
}
if (mddev->recovery_disabled == conf->recovery_disabled)
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 092e7509af9b..21cd9c02960b 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -89,40 +89,7 @@ config MEDIA_CEC_SUPPORT
source "drivers/media/cec/Kconfig"
-#
-# Media controller
-# Selectable only for webcam/grabbers, as other drivers don't use it
-#
-
-config MEDIA_CONTROLLER
- bool "Media Controller API"
- depends on MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT
- help
- Enable the media controller API used to query media devices internal
- topology and configure it dynamically.
-
- This API is mostly used by camera interfaces in embedded platforms.
-
-config MEDIA_CONTROLLER_DVB
- bool "Enable Media controller for DVB (EXPERIMENTAL)"
- depends on MEDIA_CONTROLLER && DVB_CORE
- help
- Enable the media controller API support for DVB.
-
- This is currently experimental.
-
-config MEDIA_CONTROLLER_REQUEST_API
- bool "Enable Media controller Request API (EXPERIMENTAL)"
- depends on MEDIA_CONTROLLER && STAGING_MEDIA
- default n
- help
- DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
-
- This option enables the Request API for the Media controller and V4L2
- interfaces. It is currently needed by a few stateless codec drivers.
-
- There is currently no intention to provide API or ABI stability for
- this new API as of yet.
+source "drivers/media/mc/Kconfig"
#
# Video4Linux support
@@ -164,7 +131,6 @@ config DVB_MMAP
depends on DVB_CORE
depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_CORE
select VIDEOBUF2_VMALLOC
- default n
help
This option enables DVB experimental memory-mapped API, which
reduces the number of context switches to read DVB buffers, as
@@ -190,7 +156,6 @@ config DVB_NET
config TTPCI_EEPROM
tristate
depends on I2C
- default n
source "drivers/media/dvb-core/Kconfig"
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 4a330d0e5e40..f215f0a89f9e 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -3,15 +3,6 @@
# Makefile for the kernel multimedia device drivers.
#
-media-objs := media-device.o media-devnode.o media-entity.o \
- media-request.o
-
-ifeq ($(CONFIG_MEDIA_CONTROLLER),y)
- ifeq ($(CONFIG_USB),y)
- media-objs += media-dev-allocator.o
- endif
-endif
-
#
# I2C drivers should come before other drivers, otherwise they'll fail
# when compiled as builtin drivers
@@ -20,10 +11,10 @@ obj-y += i2c/ tuners/
obj-$(CONFIG_DVB_CORE) += dvb-frontends/
#
-# Now, let's link-in the media core
+# Now, let's link-in the media controller core
#
ifeq ($(CONFIG_MEDIA_CONTROLLER),y)
- obj-$(CONFIG_MEDIA_SUPPORT) += media.o
+ obj-$(CONFIG_MEDIA_SUPPORT) += mc/
endif
obj-$(CONFIG_VIDEO_DEV) += v4l2-core/
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index f1261cc2b6fa..451c61bde4d4 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -16,7 +16,10 @@
#include <linux/string.h>
#include <linux/types.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
#include "cec-priv.h"
@@ -75,6 +78,16 @@ u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
}
EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
+void cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info,
+ const struct drm_connector *connector)
+{
+ memset(conn_info, 0, sizeof(*conn_info));
+ conn_info->type = CEC_CONNECTOR_TYPE_DRM;
+ conn_info->drm.card_no = connector->dev->primary->index;
+ conn_info->drm.connector_id = connector->base.id;
+}
+EXPORT_SYMBOL_GPL(cec_fill_conn_info_from_drm);
+
/*
* Queue a new event for this filehandle. If ts == 0, then set it
* to the current time.
@@ -720,6 +733,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
struct cec_fh *fh, bool block)
{
struct cec_data *data;
+ bool is_raw = msg_is_raw(msg);
msg->rx_ts = 0;
msg->tx_ts = 0;
@@ -735,15 +749,10 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
/* Make sure the timeout isn't 0. */
msg->timeout = 1000;
}
- if (msg->timeout)
- msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS;
- else
- msg->flags = 0;
+ msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS | CEC_MSG_FL_RAW;
- if (msg->len > 1 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) {
- msg->msg[2] = adap->phys_addr >> 8;
- msg->msg[3] = adap->phys_addr & 0xff;
- }
+ if (!msg->timeout)
+ msg->flags &= ~CEC_MSG_FL_REPLY_TO_FOLLOWERS;
/* Sanity checks */
if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
@@ -765,44 +774,80 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
dprintk(1, "%s: can't reply to poll msg\n", __func__);
return -EINVAL;
}
- if (msg->len == 1) {
- if (cec_msg_destination(msg) == 0xf) {
- dprintk(1, "%s: invalid poll message\n", __func__);
+
+ if (is_raw) {
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ } else {
+ /* A CDC-Only device can only send CDC messages */
+ if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
+ (msg->len == 1 || msg->msg[1] != CEC_MSG_CDC_MESSAGE)) {
+ dprintk(1, "%s: not a CDC message\n", __func__);
return -EINVAL;
}
- if (cec_has_log_addr(adap, cec_msg_destination(msg))) {
- /*
- * If the destination is a logical address our adapter
- * has already claimed, then just NACK this.
- * It depends on the hardware what it will do with a
- * POLL to itself (some OK this), so it is just as
- * easy to handle it here so the behavior will be
- * consistent.
- */
- msg->tx_ts = ktime_get_ns();
- msg->tx_status = CEC_TX_STATUS_NACK |
- CEC_TX_STATUS_MAX_RETRIES;
- msg->tx_nack_cnt = 1;
- msg->sequence = ++adap->sequence;
- if (!msg->sequence)
+
+ if (msg->len >= 4 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) {
+ msg->msg[2] = adap->phys_addr >> 8;
+ msg->msg[3] = adap->phys_addr & 0xff;
+ }
+
+ if (msg->len == 1) {
+ if (cec_msg_destination(msg) == 0xf) {
+ dprintk(1, "%s: invalid poll message\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (cec_has_log_addr(adap, cec_msg_destination(msg))) {
+ /*
+ * If the destination is a logical address our
+ * adapter has already claimed, then just NACK
+ * this. It depends on the hardware what it will
+ * do with a POLL to itself (some OK this), so
+ * it is just as easy to handle it here so the
+ * behavior will be consistent.
+ */
+ msg->tx_ts = ktime_get_ns();
+ msg->tx_status = CEC_TX_STATUS_NACK |
+ CEC_TX_STATUS_MAX_RETRIES;
+ msg->tx_nack_cnt = 1;
msg->sequence = ++adap->sequence;
- return 0;
+ if (!msg->sequence)
+ msg->sequence = ++adap->sequence;
+ return 0;
+ }
+ }
+ if (msg->len > 1 && !cec_msg_is_broadcast(msg) &&
+ cec_has_log_addr(adap, cec_msg_destination(msg))) {
+ dprintk(1, "%s: destination is the adapter itself\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (msg->len > 1 && adap->is_configured &&
+ !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
+ dprintk(1, "%s: initiator has unknown logical address %d\n",
+ __func__, cec_msg_initiator(msg));
+ return -EINVAL;
+ }
+ /*
+ * Special case: allow Ping and IMAGE/TEXT_VIEW_ON to be
+ * transmitted to a TV, even if the adapter is unconfigured.
+ * This makes it possible to detect or wake up displays that
+ * pull down the HPD when in standby.
+ */
+ if (!adap->is_configured && !adap->is_configuring &&
+ (msg->len > 2 ||
+ cec_msg_destination(msg) != CEC_LOG_ADDR_TV ||
+ (msg->len == 2 && msg->msg[1] != CEC_MSG_IMAGE_VIEW_ON &&
+ msg->msg[1] != CEC_MSG_TEXT_VIEW_ON))) {
+ dprintk(1, "%s: adapter is unconfigured\n", __func__);
+ return -ENONET;
}
}
- if (msg->len > 1 && !cec_msg_is_broadcast(msg) &&
- cec_has_log_addr(adap, cec_msg_destination(msg))) {
- dprintk(1, "%s: destination is the adapter itself\n", __func__);
- return -EINVAL;
- }
- if (msg->len > 1 && adap->is_configured &&
- !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
- dprintk(1, "%s: initiator has unknown logical address %d\n",
- __func__, cec_msg_initiator(msg));
- return -EINVAL;
- }
+
if (!adap->is_configured && !adap->is_configuring) {
- if (adap->needs_hpd || msg->msg[0] != 0xf0) {
- dprintk(1, "%s: adapter is unconfigured\n", __func__);
+ if (adap->needs_hpd) {
+ dprintk(1, "%s: adapter is unconfigured and needs HPD\n",
+ __func__);
return -ENONET;
}
if (msg->reply) {
@@ -1566,6 +1611,22 @@ void cec_s_phys_addr_from_edid(struct cec_adapter *adap,
}
EXPORT_SYMBOL_GPL(cec_s_phys_addr_from_edid);
+void cec_s_conn_info(struct cec_adapter *adap,
+ const struct cec_connector_info *conn_info)
+{
+ if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO))
+ return;
+
+ mutex_lock(&adap->lock);
+ if (conn_info)
+ adap->conn_info = *conn_info;
+ else
+ memset(&adap->conn_info, 0, sizeof(adap->conn_info));
+ cec_post_state_event(adap);
+ mutex_unlock(&adap->lock);
+}
+EXPORT_SYMBOL_GPL(cec_s_conn_info);
+
/*
* Called from either the ioctl or a driver to set the logical addresses.
*
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 156a0d76ab2a..12d676484472 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -198,19 +198,11 @@ static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
if (copy_from_user(&msg, parg, sizeof(msg)))
return -EFAULT;
- /* A CDC-Only device can only send CDC messages */
- if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
- (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
- return -EINVAL;
-
mutex_lock(&adap->lock);
if (adap->log_addrs.num_log_addrs == 0)
err = -EPERM;
else if (adap->is_configuring)
err = -ENONET;
- else if (!adap->is_configured &&
- (adap->needs_hpd || msg.msg[0] != 0xf0))
- err = -ENONET;
else if (cec_is_busy(adap, fh))
err = -EBUSY;
else
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index f5d1578e256a..9c610e1e99b8 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -128,13 +128,14 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
devnode->cdev.owner = owner;
kobject_set_name(&devnode->cdev.kobj, "cec%d", devnode->minor);
+ devnode->registered = true;
ret = cdev_device_add(&devnode->cdev, &devnode->dev);
if (ret) {
+ devnode->registered = false;
pr_err("%s: cdev_device_add failed\n", __func__);
goto clr_bit;
}
- devnode->registered = true;
return 0;
clr_bit:
@@ -256,6 +257,11 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
struct cec_adapter *adap;
int res;
+ /*
+ * Disable this capability until the connector info public API
+ * is ready.
+ */
+ caps &= ~CEC_CAP_CONNECTOR_INFO;
#ifndef CONFIG_MEDIA_CEC_RC
caps &= ~CEC_CAP_RC;
#endif
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 9598c7778871..52a867bde15f 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -21,8 +21,9 @@ struct cec_notifier {
struct mutex lock;
struct list_head head;
struct kref kref;
- struct device *dev;
- const char *conn;
+ struct device *hdmi_dev;
+ struct cec_connector_info conn_info;
+ const char *conn_name;
struct cec_adapter *cec_adap;
void (*callback)(struct cec_adapter *adap, u16 pa);
@@ -32,14 +33,16 @@ struct cec_notifier {
static LIST_HEAD(cec_notifiers);
static DEFINE_MUTEX(cec_notifiers_lock);
-struct cec_notifier *cec_notifier_get_conn(struct device *dev, const char *conn)
+struct cec_notifier *
+cec_notifier_get_conn(struct device *hdmi_dev, const char *conn_name)
{
struct cec_notifier *n;
mutex_lock(&cec_notifiers_lock);
list_for_each_entry(n, &cec_notifiers, head) {
- if (n->dev == dev &&
- (!conn || !strcmp(n->conn, conn))) {
+ if (n->hdmi_dev == hdmi_dev &&
+ (!conn_name ||
+ (n->conn_name && !strcmp(n->conn_name, conn_name)))) {
kref_get(&n->kref);
mutex_unlock(&cec_notifiers_lock);
return n;
@@ -48,10 +51,17 @@ struct cec_notifier *cec_notifier_get_conn(struct device *dev, const char *conn)
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n)
goto unlock;
- n->dev = dev;
- if (conn)
- n->conn = kstrdup(conn, GFP_KERNEL);
+ n->hdmi_dev = hdmi_dev;
+ if (conn_name) {
+ n->conn_name = kstrdup(conn_name, GFP_KERNEL);
+ if (!n->conn_name) {
+ kfree(n);
+ n = NULL;
+ goto unlock;
+ }
+ }
n->phys_addr = CEC_PHYS_ADDR_INVALID;
+
mutex_init(&n->lock);
kref_init(&n->kref);
list_add_tail(&n->head, &cec_notifiers);
@@ -67,7 +77,7 @@ static void cec_notifier_release(struct kref *kref)
container_of(kref, struct cec_notifier, kref);
list_del(&n->head);
- kfree(n->conn);
+ kfree(n->conn_name);
kfree(n);
}
@@ -79,6 +89,84 @@ void cec_notifier_put(struct cec_notifier *n)
}
EXPORT_SYMBOL_GPL(cec_notifier_put);
+struct cec_notifier *
+cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name,
+ const struct cec_connector_info *conn_info)
+{
+ struct cec_notifier *n = cec_notifier_get_conn(hdmi_dev, conn_name);
+
+ if (!n)
+ return n;
+
+ mutex_lock(&n->lock);
+ n->phys_addr = CEC_PHYS_ADDR_INVALID;
+ if (conn_info)
+ n->conn_info = *conn_info;
+ else
+ memset(&n->conn_info, 0, sizeof(n->conn_info));
+ if (n->cec_adap) {
+ cec_phys_addr_invalidate(n->cec_adap);
+ cec_s_conn_info(n->cec_adap, conn_info);
+ }
+ mutex_unlock(&n->lock);
+ return n;
+}
+EXPORT_SYMBOL_GPL(cec_notifier_conn_register);
+
+void cec_notifier_conn_unregister(struct cec_notifier *n)
+{
+ if (!n)
+ return;
+
+ mutex_lock(&n->lock);
+ memset(&n->conn_info, 0, sizeof(n->conn_info));
+ n->phys_addr = CEC_PHYS_ADDR_INVALID;
+ if (n->cec_adap) {
+ cec_phys_addr_invalidate(n->cec_adap);
+ cec_s_conn_info(n->cec_adap, NULL);
+ }
+ mutex_unlock(&n->lock);
+ cec_notifier_put(n);
+}
+EXPORT_SYMBOL_GPL(cec_notifier_conn_unregister);
+
+struct cec_notifier *
+cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
+ struct cec_adapter *adap)
+{
+ struct cec_notifier *n;
+
+ if (WARN_ON(!adap))
+ return NULL;
+
+ n = cec_notifier_get_conn(hdmi_dev, conn_name);
+ if (!n)
+ return n;
+
+ mutex_lock(&n->lock);
+ n->cec_adap = adap;
+ adap->conn_info = n->conn_info;
+ adap->notifier = n;
+ cec_s_phys_addr(adap, n->phys_addr, false);
+ mutex_unlock(&n->lock);
+ return n;
+}
+EXPORT_SYMBOL_GPL(cec_notifier_cec_adap_register);
+
+void cec_notifier_cec_adap_unregister(struct cec_notifier *n)
+{
+ if (!n)
+ return;
+
+ mutex_lock(&n->lock);
+ n->cec_adap->notifier = NULL;
+ n->cec_adap = NULL;
+ n->callback = NULL;
+ mutex_unlock(&n->lock);
+ cec_notifier_put(n);
+}
+EXPORT_SYMBOL_GPL(cec_notifier_cec_adap_unregister);
+
void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
{
if (n == NULL)
@@ -88,6 +176,8 @@ void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
n->phys_addr = pa;
if (n->callback)
n->callback(n->cec_adap, n->phys_addr);
+ else if (n->cec_adap)
+ cec_s_phys_addr(n->cec_adap, n->phys_addr, false);
mutex_unlock(&n->lock);
}
EXPORT_SYMBOL_GPL(cec_notifier_set_phys_addr);
@@ -122,6 +212,10 @@ EXPORT_SYMBOL_GPL(cec_notifier_register);
void cec_notifier_unregister(struct cec_notifier *n)
{
+ /* Do nothing unless cec_notifier_register was called first */
+ if (!n->callback)
+ return;
+
mutex_lock(&n->lock);
n->callback = NULL;
mutex_unlock(&n->lock);
diff --git a/drivers/media/cec/cec-priv.h b/drivers/media/cec/cec-priv.h
index 804e38f849c7..7bdf855aaecd 100644
--- a/drivers/media/cec/cec-priv.h
+++ b/drivers/media/cec/cec-priv.h
@@ -20,6 +20,11 @@
/* devnode to cec_adapter */
#define to_cec_adapter(node) container_of(node, struct cec_adapter, devnode)
+static inline bool msg_is_raw(const struct cec_msg *msg)
+{
+ return msg->flags & CEC_MSG_FL_RAW;
+}
+
/* cec-core.c */
extern int cec_debug;
int cec_get_device(struct cec_devnode *devnode);
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index be4f80a40214..aabb830e7468 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -608,6 +608,15 @@ int saa7146_register_device(struct video_device *vfd, struct saa7146_dev *dev,
for (i = 0; i < dev->ext_vv_data->num_stds; i++)
vfd->tvnorms |= dev->ext_vv_data->stds[i].id;
strscpy(vfd->name, name, sizeof(vfd->name));
+ vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ vfd->device_caps |= dev->ext_vv_data->capabilities;
+ if (type == VFL_TYPE_GRABBER)
+ vfd->device_caps &=
+ ~(V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_OUTPUT);
+ else
+ vfd->device_caps &=
+ ~(V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY | V4L2_CAP_AUDIO);
video_set_drvdata(vfd, dev);
err = video_register_device(vfd, type, -1);
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index a0f0b5eef0bd..4c399a42e874 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -448,25 +448,15 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
static int vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
- struct video_device *vdev = video_devdata(file);
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
strscpy((char *)cap->driver, "saa7146 v4l2", sizeof(cap->driver));
strscpy((char *)cap->card, dev->ext->name, sizeof(cap->card));
sprintf((char *)cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->device_caps =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_VIDEO_OVERLAY |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
- cap->device_caps |= dev->ext_vv_data->capabilities;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
- if (vdev->vfl_type == VFL_TYPE_GRABBER)
- cap->device_caps &=
- ~(V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_OUTPUT);
- else
- cap->device_caps &=
- ~(V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY | V4L2_CAP_AUDIO);
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_DEVICE_CAPS;
+ cap->capabilities |= dev->ext_vv_data->capabilities;
return 0;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 3cf25abf5807..4489744fbbd9 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -205,8 +205,13 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
* NOTE: mmapped areas should be page aligned
*/
for (plane = 0; plane < vb->num_planes; ++plane) {
+ /* Memops alloc requires size to be page aligned. */
unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
+ /* Did it wrap around? */
+ if (size < vb->planes[plane].length)
+ goto free;
+
mem_priv = call_ptr_memop(vb, alloc,
q->alloc_devs[plane] ? : q->dev,
q->dma_attrs, size, q->dma_dir, q->gfp_flags);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index ecbef266130b..7d77e4d30c8a 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -475,8 +475,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
buf->dma_dir = dma_dir;
offset = lower_32_bits(offset_in_page(vaddr));
- vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
- dma_dir == DMA_BIDIRECTIONAL);
+ vec = vb2_create_framevec(vaddr, size);
if (IS_ERR(vec)) {
ret = PTR_ERR(vec);
goto fail_buf;
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 4a4c49d6085c..ed706b2a263c 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -59,7 +59,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
gfp_t gfp_flags)
{
unsigned int last_page = 0;
- int size = buf->size;
+ unsigned long size = buf->size;
while (size > 0) {
struct page *pages;
@@ -239,8 +239,7 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
buf->dma_sgt = &buf->sg_table;
- vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
- dma_dir == DMA_BIDIRECTIONAL);
+ vec = vb2_create_framevec(vaddr, size);
if (IS_ERR(vec))
goto userptr_fail_pfnvec;
buf->vec = vec;
diff --git a/drivers/media/common/videobuf2/videobuf2-memops.c b/drivers/media/common/videobuf2/videobuf2-memops.c
index c4a85be48ac2..6e9e05153f4e 100644
--- a/drivers/media/common/videobuf2/videobuf2-memops.c
+++ b/drivers/media/common/videobuf2/videobuf2-memops.c
@@ -26,7 +26,6 @@
* vb2_create_framevec() - map virtual addresses to pfns
* @start: Virtual user address where we start mapping
* @length: Length of a range to map
- * @write: Should we map for writing into the area
*
* This function allocates and fills in a vector with pfns corresponding to
* virtual address range passed in arguments. If pfns have corresponding pages,
@@ -35,17 +34,13 @@
* failure. Returned vector needs to be freed via vb2_destroy_pfnvec().
*/
struct frame_vector *vb2_create_framevec(unsigned long start,
- unsigned long length,
- bool write)
+ unsigned long length)
{
int ret;
unsigned long first, last;
unsigned long nr;
struct frame_vector *vec;
- unsigned int flags = FOLL_FORCE;
-
- if (write)
- flags |= FOLL_WRITE;
+ unsigned int flags = FOLL_FORCE | FOLL_WRITE;
first = start >> PAGE_SHIFT;
last = (start + length - 1) >> PAGE_SHIFT;
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index fb9ac7696fc6..40d76eb4c2fe 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -563,11 +563,6 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
b->request_fd = vbuf->request_fd;
}
-
- if (!q->is_output &&
- b->flags & V4L2_BUF_FLAG_DONE &&
- b->flags & V4L2_BUF_FLAG_LAST)
- q->last_buffer_dequeued = true;
}
/*
@@ -786,6 +781,11 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
+ if (!q->is_output &&
+ b->flags & V4L2_BUF_FLAG_DONE &&
+ b->flags & V4L2_BUF_FLAG_LAST)
+ q->last_buffer_dequeued = true;
+
/*
* After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
* cleared.
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 1c6659f7c394..04d51ca63223 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -87,8 +87,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
buf->dma_dir = dma_dir;
offset = vaddr & ~PAGE_MASK;
buf->size = size;
- vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
- dma_dir == DMA_BIDIRECTIONAL);
+ vec = vb2_create_framevec(vaddr, size);
if (IS_ERR(vec)) {
ret = PTR_ERR(vec);
goto fail_pfnvec_create;
diff --git a/drivers/media/dvb-core/Kconfig b/drivers/media/dvb-core/Kconfig
index aac4bebb35f7..90e038d5ffd9 100644
--- a/drivers/media/dvb-core/Kconfig
+++ b/drivers/media/dvb-core/Kconfig
@@ -19,7 +19,6 @@ config DVB_MAX_ADAPTERS
config DVB_DYNAMIC_MINORS
bool "Dynamic DVB minor allocation"
depends on DVB_CORE
- default n
help
If you say Y here, the DVB subsystem will use dynamic minor
allocation for any device that uses the DVB major number.
@@ -32,7 +31,6 @@ config DVB_DYNAMIC_MINORS
config DVB_DEMUX_SECTION_LOSS_LOG
bool "Enable DVB demux section packet loss log"
depends on DVB_CORE
- default n
help
Enable extra log messages meant to detect packet loss
inside the Kernel.
@@ -45,7 +43,6 @@ config DVB_DEMUX_SECTION_LOSS_LOG
config DVB_ULE_DEBUG
bool "Enable DVB net ULE packet debug messages"
depends on DVB_CORE
- default n
help
Enable extra log messages meant to detect problems while
handling DVB network ULE packet loss inside the Kernel.
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 6351a97f3d18..209186c5cd9b 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -2311,6 +2311,78 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
return 0;
}
+static int dvb_get_property(struct dvb_frontend *fe, struct file *file,
+ struct dtv_properties *tvps)
+{
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
+ struct dtv_property *tvp = NULL;
+ struct dtv_frontend_properties getp;
+ int i, err;
+
+ memcpy(&getp, &fe->dtv_property_cache, sizeof(getp));
+
+ dev_dbg(fe->dvb->device, "%s: properties.num = %d\n",
+ __func__, tvps->num);
+ dev_dbg(fe->dvb->device, "%s: properties.props = %p\n",
+ __func__, tvps->props);
+
+ /*
+ * Put an arbitrary limit on the number of messages that can
+ * be sent at once
+ */
+ if (!tvps->num || tvps->num > DTV_IOCTL_MAX_MSGS)
+ return -EINVAL;
+
+ tvp = memdup_user((void __user *)tvps->props, tvps->num * sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
+
+ /*
+ * Let's use our own copy of property cache, in order to
+ * avoid mangling with DTV zigzag logic, as drivers might
+ * return crap, if they don't check if the data is available
+ * before updating the properties cache.
+ */
+ if (fepriv->state != FESTATE_IDLE) {
+ err = dtv_get_frontend(fe, &getp, NULL);
+ if (err < 0)
+ goto out;
+ }
+ for (i = 0; i < tvps->num; i++) {
+ err = dtv_property_process_get(fe, &getp,
+ tvp + i, file);
+ if (err < 0)
+ goto out;
+ }
+
+ if (copy_to_user((void __user *)tvps->props, tvp,
+ tvps->num * sizeof(struct dtv_property))) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ err = 0;
+out:
+ kfree(tvp);
+ return err;
+}
+
+static int dvb_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p_out)
+{
+ struct dtv_frontend_properties getp;
+
+ /*
+ * Let's use our own copy of property cache, in order to
+ * avoid mangling with DTV zigzag logic, as drivers might
+ * return crap, if they don't check if the data is available
+ * before updating the properties cache.
+ */
+ memcpy(&getp, &fe->dtv_property_cache, sizeof(getp));
+
+ return dtv_get_frontend(fe, &getp, p_out);
+}
+
static int dvb_frontend_handle_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
@@ -2356,58 +2428,9 @@ static int dvb_frontend_handle_ioctl(struct file *file,
err = 0;
break;
}
- case FE_GET_PROPERTY: {
- struct dtv_properties *tvps = parg;
- struct dtv_property *tvp = NULL;
- struct dtv_frontend_properties getp = fe->dtv_property_cache;
-
- dev_dbg(fe->dvb->device, "%s: properties.num = %d\n",
- __func__, tvps->num);
- dev_dbg(fe->dvb->device, "%s: properties.props = %p\n",
- __func__, tvps->props);
-
- /*
- * Put an arbitrary limit on the number of messages that can
- * be sent at once
- */
- if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
- return -EINVAL;
-
- tvp = memdup_user((void __user *)tvps->props, tvps->num * sizeof(*tvp));
- if (IS_ERR(tvp))
- return PTR_ERR(tvp);
-
- /*
- * Let's use our own copy of property cache, in order to
- * avoid mangling with DTV zigzag logic, as drivers might
- * return crap, if they don't check if the data is available
- * before updating the properties cache.
- */
- if (fepriv->state != FESTATE_IDLE) {
- err = dtv_get_frontend(fe, &getp, NULL);
- if (err < 0) {
- kfree(tvp);
- return err;
- }
- }
- for (i = 0; i < tvps->num; i++) {
- err = dtv_property_process_get(fe, &getp,
- tvp + i, file);
- if (err < 0) {
- kfree(tvp);
- return err;
- }
- }
-
- if (copy_to_user((void __user *)tvps->props, tvp,
- tvps->num * sizeof(struct dtv_property))) {
- kfree(tvp);
- return -EFAULT;
- }
- kfree(tvp);
- err = 0;
+ case FE_GET_PROPERTY:
+ err = dvb_get_property(fe, file, parg);
break;
- }
case FE_GET_INFO: {
struct dvb_frontend_info *info = parg;
@@ -2545,7 +2568,6 @@ static int dvb_frontend_handle_ioctl(struct file *file,
fepriv->tune_mode_flags = (unsigned long)parg;
err = 0;
break;
-
/* DEPRECATED dish control ioctls */
case FE_DISHNETWORK_SEND_LEGACY_CMD:
@@ -2664,22 +2686,14 @@ static int dvb_frontend_handle_ioctl(struct file *file,
break;
err = dtv_set_frontend(fe);
break;
+
case FE_GET_EVENT:
err = dvb_frontend_get_event(fe, parg, file->f_flags);
break;
- case FE_GET_FRONTEND: {
- struct dtv_frontend_properties getp = fe->dtv_property_cache;
-
- /*
- * Let's use our own copy of property cache, in order to
- * avoid mangling with DTV zigzag logic, as drivers might
- * return crap, if they don't check if the data is available
- * before updating the properties cache.
- */
- err = dtv_get_frontend(fe, &getp, parg);
+ case FE_GET_FRONTEND:
+ err = dvb_get_frontend(fe, parg);
break;
- }
default:
return -ENOTSUPP;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 847da72d1256..dc43749177df 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -1,5 +1,5 @@
menu "Customise DVB Frontends"
- visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
+ visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST || EXPERT
comment "Multistandard (satellite) frontends"
depends on DVB_CORE
@@ -945,5 +945,4 @@ comment "Tools to develop new frontends"
config DVB_DUMMY_FE
tristate "Dummy frontend driver"
depends on DVB_CORE
- default n
endmenu
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index cf1a8f77ee02..e05c21d35dc8 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -428,9 +428,6 @@ static int rtl2832_sdr_querycap(struct file *file, void *fh,
strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
strscpy(cap->card, dev->vdev.name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE | V4L2_CAP_TUNER;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1242,6 +1239,8 @@ static struct video_device rtl2832_sdr_template = {
.release = video_device_release_empty,
.fops = &rtl2832_sdr_fops,
.ioctl_ops = &rtl2832_sdr_ioctl_ops,
+ .device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE | V4L2_CAP_TUNER,
};
static int rtl2832_sdr_s_ctrl(struct v4l2_ctrl *ctrl)
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 5dae571e2f62..168c503e9154 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -674,8 +674,11 @@ static const struct dvb_frontend_ops si2168_ops = {
.delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
.info = {
.name = "Silicon Labs Si2168",
- .symbol_rate_min = 1000000,
- .symbol_rate_max = 7200000,
+ .frequency_min_hz = 48 * MHz,
+ .frequency_max_hz = 870 * MHz,
+ .frequency_stepsize_hz = 62500,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 7200000,
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
index dac396c95a59..6d5962d5697a 100644
--- a/drivers/media/dvb-frontends/stv0297.c
+++ b/drivers/media/dvb-frontends/stv0297.c
@@ -682,7 +682,7 @@ static const struct dvb_frontend_ops stv0297_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0297 DVB-C",
- .frequency_min_hz = 470 * MHz,
+ .frequency_min_hz = 47 * MHz,
.frequency_max_hz = 862 * MHz,
.frequency_stepsize_hz = 62500,
.symbol_rate_min = 870000,
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index d1261571dbe4..90d24131d335 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -4889,6 +4889,66 @@ static int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir,
return stv090x_write_reg(state, STV090x_GPIOxCFG(gpio), reg);
}
+static int stv090x_setup_compound(struct stv090x_state *state)
+{
+ struct stv090x_dev *temp_int;
+
+ temp_int = find_dev(state->i2c,
+ state->config->address);
+
+ if (temp_int && state->demod_mode == STV090x_DUAL) {
+ state->internal = temp_int->internal;
+ state->internal->num_used++;
+ dprintk(FE_INFO, 1, "Found Internal Structure!");
+ } else {
+ state->internal = kmalloc(sizeof(*state->internal), GFP_KERNEL);
+ if (!state->internal)
+ goto error;
+ temp_int = append_internal(state->internal);
+ if (!temp_int) {
+ kfree(state->internal);
+ goto error;
+ }
+ state->internal->num_used = 1;
+ state->internal->mclk = 0;
+ state->internal->dev_ver = 0;
+ state->internal->i2c_adap = state->i2c;
+ state->internal->i2c_addr = state->config->address;
+ dprintk(FE_INFO, 1, "Create New Internal Structure!");
+
+ mutex_init(&state->internal->demod_lock);
+ mutex_init(&state->internal->tuner_lock);
+
+ if (stv090x_setup(&state->frontend) < 0) {
+ dprintk(FE_ERROR, 1, "Error setting up device");
+ goto err_remove;
+ }
+ }
+
+ if (state->internal->dev_ver >= 0x30)
+ state->frontend.ops.info.caps |= FE_CAN_MULTISTREAM;
+
+ /* workaround for stuck DiSEqC output */
+ if (state->config->diseqc_envelope_mode)
+ stv090x_send_diseqc_burst(&state->frontend, SEC_MINI_A);
+
+ state->config->set_gpio = stv090x_set_gpio;
+
+ dprintk(FE_ERROR, 1, "Probing %s demodulator(%d) Cut=0x%02x",
+ state->device == STV0900 ? "STV0900" : "STV0903",
+ state->config->demod,
+ state->internal->dev_ver);
+
+ return 0;
+
+error:
+ return -ENOMEM;
+err_remove:
+ remove_dev(state->internal);
+ kfree(state->internal);
+ return -ENODEV;
+}
+
static const struct dvb_frontend_ops stv090x_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
@@ -4921,85 +4981,118 @@ static const struct dvb_frontend_ops stv090x_ops = {
.read_snr = stv090x_read_cnr,
};
+static struct dvb_frontend *stv090x_get_dvb_frontend(struct i2c_client *client)
+{
+ struct stv090x_state *state = i2c_get_clientdata(client);
-struct dvb_frontend *stv090x_attach(struct stv090x_config *config,
- struct i2c_adapter *i2c,
- enum stv090x_demodulator demod)
+ dev_dbg(&client->dev, "\n");
+
+ return &state->frontend;
+}
+
+static int stv090x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
+ int ret = 0;
+ struct stv090x_config *config = client->dev.platform_data;
+
struct stv090x_state *state = NULL;
- struct stv090x_dev *temp_int;
- state = kzalloc(sizeof (struct stv090x_state), GFP_KERNEL);
- if (state == NULL)
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state) {
+ ret = -ENOMEM;
goto error;
+ }
state->verbose = &verbose;
state->config = config;
- state->i2c = i2c;
+ state->i2c = client->adapter;
state->frontend.ops = stv090x_ops;
state->frontend.demodulator_priv = state;
- state->demod = demod;
- state->demod_mode = config->demod_mode; /* Single or Dual mode */
+ state->demod = config->demod;
+ /* Single or Dual mode */
+ state->demod_mode = config->demod_mode;
state->device = config->device;
- state->rolloff = STV090x_RO_35; /* default */
+ /* default */
+ state->rolloff = STV090x_RO_35;
- temp_int = find_dev(state->i2c,
- state->config->address);
+ ret = stv090x_setup_compound(state);
+ if (ret)
+ goto error;
- if ((temp_int != NULL) && (state->demod_mode == STV090x_DUAL)) {
- state->internal = temp_int->internal;
- state->internal->num_used++;
- dprintk(FE_INFO, 1, "Found Internal Structure!");
- } else {
- state->internal = kmalloc(sizeof(struct stv090x_internal),
- GFP_KERNEL);
- if (!state->internal)
- goto error;
- temp_int = append_internal(state->internal);
- if (!temp_int) {
- kfree(state->internal);
- goto error;
- }
- state->internal->num_used = 1;
- state->internal->mclk = 0;
- state->internal->dev_ver = 0;
- state->internal->i2c_adap = state->i2c;
- state->internal->i2c_addr = state->config->address;
- dprintk(FE_INFO, 1, "Create New Internal Structure!");
+ i2c_set_clientdata(client, state);
- mutex_init(&state->internal->demod_lock);
- mutex_init(&state->internal->tuner_lock);
+ /* setup callbacks */
+ config->get_dvb_frontend = stv090x_get_dvb_frontend;
- if (stv090x_setup(&state->frontend) < 0) {
- dprintk(FE_ERROR, 1, "Error setting up device");
- goto err_remove;
- }
- }
+ return 0;
- if (state->internal->dev_ver >= 0x30)
- state->frontend.ops.info.caps |= FE_CAN_MULTISTREAM;
+error:
+ kfree(state);
+ return ret;
+}
- /* workaround for stuck DiSEqC output */
- if (config->diseqc_envelope_mode)
- stv090x_send_diseqc_burst(&state->frontend, SEC_MINI_A);
+static int stv090x_remove(struct i2c_client *client)
+{
+ struct stv090x_state *state = i2c_get_clientdata(client);
+
+ stv090x_release(&state->frontend);
+ return 0;
+}
- config->set_gpio = stv090x_set_gpio;
+struct dvb_frontend *stv090x_attach(struct stv090x_config *config,
+ struct i2c_adapter *i2c,
+ enum stv090x_demodulator demod)
+{
+ int ret = 0;
+ struct stv090x_state *state = NULL;
- dprintk(FE_ERROR, 1, "Attaching %s demodulator(%d) Cut=0x%02x",
- state->device == STV0900 ? "STV0900" : "STV0903",
- demod,
- state->internal->dev_ver);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ goto error;
+
+ state->verbose = &verbose;
+ state->config = config;
+ state->i2c = i2c;
+ state->frontend.ops = stv090x_ops;
+ state->frontend.demodulator_priv = state;
+ state->demod = demod;
+ /* Single or Dual mode */
+ state->demod_mode = config->demod_mode;
+ state->device = config->device;
+ /* default */
+ state->rolloff = STV090x_RO_35;
+
+ ret = stv090x_setup_compound(state);
+ if (ret)
+ goto error;
return &state->frontend;
-err_remove:
- remove_dev(state->internal);
- kfree(state->internal);
error:
kfree(state);
return NULL;
}
EXPORT_SYMBOL(stv090x_attach);
+
+static const struct i2c_device_id stv090x_id_table[] = {
+ {"stv090x", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, stv090x_id_table);
+
+static struct i2c_driver stv090x_driver = {
+ .driver = {
+ .name = "stv090x",
+ .suppress_bind_attrs = true,
+ },
+ .probe = stv090x_probe,
+ .remove = stv090x_remove,
+ .id_table = stv090x_id_table,
+};
+
+module_i2c_driver(stv090x_driver);
+
MODULE_PARM_DESC(verbose, "Set Verbosity level");
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("STV090x Multi-Std Broadcast frontend");
diff --git a/drivers/media/dvb-frontends/stv090x.h b/drivers/media/dvb-frontends/stv090x.h
index 13f251a08abd..89f45d9fa427 100644
--- a/drivers/media/dvb-frontends/stv090x.h
+++ b/drivers/media/dvb-frontends/stv090x.h
@@ -57,6 +57,7 @@ struct stv090x_config {
enum stv090x_device device;
enum stv090x_mode demod_mode;
enum stv090x_clkmode clk_mode;
+ enum stv090x_demodulator demod;
u32 xtal; /* default: 8000000 */
u8 address; /* default: 0x68 */
@@ -93,6 +94,8 @@ struct stv090x_config {
/* dir = 0 -> output, dir = 1 -> input/open-drain */
int (*set_gpio)(struct dvb_frontend *fe, u8 gpio, u8 dir, u8 value,
u8 xor_value);
+
+ struct dvb_frontend* (*get_dvb_frontend)(struct i2c_client *i2c);
};
#if IS_REACHABLE(CONFIG_DVB_STV090x)
diff --git a/drivers/media/dvb-frontends/stv090x_priv.h b/drivers/media/dvb-frontends/stv090x_priv.h
index b22c58968c93..f8ece898c153 100644
--- a/drivers/media/dvb-frontends/stv090x_priv.h
+++ b/drivers/media/dvb-frontends/stv090x_priv.h
@@ -237,7 +237,7 @@ struct stv090x_state {
struct stv090x_internal *internal;
struct i2c_adapter *i2c;
- const struct stv090x_config *config;
+ struct stv090x_config *config;
struct dvb_frontend frontend;
u32 *verbose; /* Cached module verbosity */
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index 0126cfae2e03..5012d0231652 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -333,6 +333,41 @@ static void stv6110x_release(struct dvb_frontend *fe)
kfree(stv6110x);
}
+static void st6110x_init_regs(struct stv6110x_state *stv6110x)
+{
+ u8 default_regs[] = {0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e};
+
+ memcpy(stv6110x->regs, default_regs, 8);
+}
+
+static void stv6110x_setup_divider(struct stv6110x_state *stv6110x)
+{
+ switch (stv6110x->config->clk_div) {
+ default:
+ case 1:
+ STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2],
+ CTRL2_CO_DIV,
+ 0);
+ break;
+ case 2:
+ STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2],
+ CTRL2_CO_DIV,
+ 1);
+ break;
+ case 4:
+ STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2],
+ CTRL2_CO_DIV,
+ 2);
+ break;
+ case 8:
+ case 0:
+ STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2],
+ CTRL2_CO_DIV,
+ 3);
+ break;
+ }
+}
+
static const struct dvb_tuner_ops stv6110x_ops = {
.info = {
.name = "STV6110(A) Silicon Tuner",
@@ -342,7 +377,7 @@ static const struct dvb_tuner_ops stv6110x_ops = {
.release = stv6110x_release
};
-static const struct stv6110x_devctl stv6110x_ctl = {
+static struct stv6110x_devctl stv6110x_ctl = {
.tuner_init = stv6110x_init,
.tuner_sleep = stv6110x_sleep,
.tuner_set_mode = stv6110x_set_mode,
@@ -356,48 +391,104 @@ static const struct stv6110x_devctl stv6110x_ctl = {
.tuner_get_status = stv6110x_get_status,
};
+static void stv6110x_set_frontend_opts(struct stv6110x_state *stv6110x)
+{
+ stv6110x->frontend->tuner_priv = stv6110x;
+ stv6110x->frontend->ops.tuner_ops = stv6110x_ops;
+}
+
+static struct stv6110x_devctl *stv6110x_get_devctl(struct i2c_client *client)
+{
+ struct stv6110x_state *stv6110x = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+ return stv6110x->devctl;
+}
+
+static int stv6110x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct stv6110x_config *config = client->dev.platform_data;
+
+ struct stv6110x_state *stv6110x;
+
+ stv6110x = kzalloc(sizeof(*stv6110x), GFP_KERNEL);
+ if (!stv6110x)
+ return -ENOMEM;
+
+ stv6110x->frontend = config->frontend;
+ stv6110x->i2c = client->adapter;
+ stv6110x->config = config;
+ stv6110x->devctl = &stv6110x_ctl;
+
+ st6110x_init_regs(stv6110x);
+ stv6110x_setup_divider(stv6110x);
+ stv6110x_set_frontend_opts(stv6110x);
+
+ dev_info(&stv6110x->i2c->dev, "Probed STV6110x\n");
+
+ i2c_set_clientdata(client, stv6110x);
+
+ /* setup callbacks */
+ config->get_devctl = stv6110x_get_devctl;
+
+ return 0;
+}
+
+static int stv6110x_remove(struct i2c_client *client)
+{
+ struct stv6110x_state *stv6110x = i2c_get_clientdata(client);
+
+ stv6110x_release(stv6110x->frontend);
+ return 0;
+}
+
const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
const struct stv6110x_config *config,
struct i2c_adapter *i2c)
{
struct stv6110x_state *stv6110x;
- u8 default_regs[] = {0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e};
- stv6110x = kzalloc(sizeof (struct stv6110x_state), GFP_KERNEL);
+ stv6110x = kzalloc(sizeof(*stv6110x), GFP_KERNEL);
if (!stv6110x)
return NULL;
+ stv6110x->frontend = fe;
stv6110x->i2c = i2c;
stv6110x->config = config;
stv6110x->devctl = &stv6110x_ctl;
- memcpy(stv6110x->regs, default_regs, 8);
- /* setup divider */
- switch (stv6110x->config->clk_div) {
- default:
- case 1:
- STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 0);
- break;
- case 2:
- STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 1);
- break;
- case 4:
- STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 2);
- break;
- case 8:
- case 0:
- STV6110x_SETFIELD(stv6110x->regs[STV6110x_CTRL2], CTRL2_CO_DIV, 3);
- break;
- }
+ st6110x_init_regs(stv6110x);
+ stv6110x_setup_divider(stv6110x);
+ stv6110x_set_frontend_opts(stv6110x);
fe->tuner_priv = stv6110x;
fe->ops.tuner_ops = stv6110x_ops;
- printk(KERN_INFO "%s: Attaching STV6110x\n", __func__);
+ dev_info(&stv6110x->i2c->dev, "Attaching STV6110x\n");
return stv6110x->devctl;
}
EXPORT_SYMBOL(stv6110x_attach);
+static const struct i2c_device_id stv6110x_id_table[] = {
+ {"stv6110x", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, stv6110x_id_table);
+
+static struct i2c_driver stv6110x_driver = {
+ .driver = {
+ .name = "stv6110x",
+ .suppress_bind_attrs = true,
+ },
+ .probe = stv6110x_probe,
+ .remove = stv6110x_remove,
+ .id_table = stv6110x_id_table,
+};
+
+module_i2c_driver(stv6110x_driver);
+
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("STV6110x Silicon tuner");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/stv6110x.h b/drivers/media/dvb-frontends/stv6110x.h
index 1630e55255fd..1feade3158c2 100644
--- a/drivers/media/dvb-frontends/stv6110x.h
+++ b/drivers/media/dvb-frontends/stv6110x.h
@@ -15,6 +15,9 @@ struct stv6110x_config {
u8 addr;
u32 refclk;
u8 clk_div; /* divisor value for the output clock */
+ struct dvb_frontend *frontend;
+
+ struct stv6110x_devctl* (*get_devctl)(struct i2c_client *i2c);
};
enum tuner_mode {
diff --git a/drivers/media/dvb-frontends/stv6110x_priv.h b/drivers/media/dvb-frontends/stv6110x_priv.h
index 909094df28df..b27769558f78 100644
--- a/drivers/media/dvb-frontends/stv6110x_priv.h
+++ b/drivers/media/dvb-frontends/stv6110x_priv.h
@@ -54,11 +54,12 @@
#define REFCLOCK_MHz (stv6110x->config->refclk / 1000000)
struct stv6110x_state {
+ struct dvb_frontend *frontend;
struct i2c_adapter *i2c;
const struct stv6110x_config *config;
u8 regs[8];
- const struct stv6110x_devctl *devctl;
+ struct stv6110x_devctl *devctl;
};
#endif /* __STV6110x_PRIV_H */
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index f7c3e6be8e4d..2483f614d0e7 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -67,8 +67,8 @@ static int tua6100_set_params(struct dvb_frontend *fe)
struct i2c_msg msg1 = { .addr = priv->i2c_address, .flags = 0, .buf = reg1, .len = 4 };
struct i2c_msg msg2 = { .addr = priv->i2c_address, .flags = 0, .buf = reg2, .len = 3 };
-#define _R 4
-#define _P 32
+#define _R_VAL 4
+#define _P_VAL 32
#define _ri 4000000
// setup register 0
@@ -83,14 +83,14 @@ static int tua6100_set_params(struct dvb_frontend *fe)
else
reg1[1] = 0x0c;
- if (_P == 64)
+ if (_P_VAL == 64)
reg1[1] |= 0x40;
if (c->frequency >= 1525000)
reg1[1] |= 0x80;
// register 2
- reg2[1] = (_R >> 8) & 0x03;
- reg2[2] = _R;
+ reg2[1] = (_R_VAL >> 8) & 0x03;
+ reg2[2] = _R_VAL;
if (c->frequency < 1455000)
reg2[1] |= 0x1c;
else if (c->frequency < 1630000)
@@ -102,18 +102,18 @@ static int tua6100_set_params(struct dvb_frontend *fe)
* The N divisor ratio (note: c->frequency is in kHz, but we
* need it in Hz)
*/
- prediv = (c->frequency * _R) / (_ri / 1000);
- div = prediv / _P;
+ prediv = (c->frequency * _R_VAL) / (_ri / 1000);
+ div = prediv / _P_VAL;
reg1[1] |= (div >> 9) & 0x03;
reg1[2] = div >> 1;
reg1[3] = (div << 7);
- priv->frequency = ((div * _P) * (_ri / 1000)) / _R;
+ priv->frequency = ((div * _P_VAL) * (_ri / 1000)) / _R_VAL;
// Finally, calculate and store the value for A
- reg1[3] |= (prediv - (div*_P)) & 0x7f;
+ reg1[3] |= (prediv - (div*_P_VAL)) & 0x7f;
-#undef _R
-#undef _P
+#undef _R_VAL
+#undef _P_VAL
#undef _ri
if (fe->ops.i2c_gate_ctrl)
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index cb8db944aa41..79ce9ec6fc1b 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -6,7 +6,7 @@
if VIDEO_V4L2
config VIDEO_IR_I2C
- tristate "I2C module for IR" if !MEDIA_SUBDRV_AUTOSELECT
+ tristate "I2C module for IR" if !MEDIA_SUBDRV_AUTOSELECT || EXPERT
depends on I2C && RC_CORE
default y
help
@@ -23,7 +23,7 @@ config VIDEO_IR_I2C
#
menu "I2C Encoders, decoders, sensors and other helper chips"
- visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
+ visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST || EXPERT
comment "Audio decoders, processors and mixers"
@@ -511,6 +511,7 @@ config VIDEO_ADV7393
config VIDEO_ADV7511
tristate "Analog Devices ADV7511 encoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on DRM_I2C_ADV7511=n || COMPILE_TEST
select HDMI
help
Support for the Analog Devices ADV7511 video encoder.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index d8ad9dad495d..fd4ea86dedd5 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -35,7 +35,7 @@ obj-$(CONFIG_VIDEO_ADV748X) += adv748x/
obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
-obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o
+obj-$(CONFIG_VIDEO_ADV7511) += adv7511-v4l2.o
obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
obj-$(CONFIG_VIDEO_VS6624) += vs6624.o
obj-$(CONFIG_VIDEO_BT819) += bt819.o
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511-v4l2.c
index cec5ebb1c9e6..2ad6bdf1a9fc 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -5,6 +5,11 @@
* Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+/*
+ * This file is named adv7511-v4l2.c so it doesn't conflict with the Analog
+ * Device ADV7511 (config fragment CONFIG_DRM_I2C_ADV7511).
+ */
+
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index e79be9bebe5a..1adaf470c75a 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -229,7 +229,7 @@ static const struct v4l2_subdev_ops ak881x_subdev_ops = {
static int ak881x_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
struct ak881x *ak881x;
u8 ifmode, data;
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 3ecf79d242f2..0de946fe2109 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -21,9 +21,11 @@
*
* CX23888 DIF support for the HVR1850
* Copyright (C) 2011 Steven Toth <stoth@kernellabs.com>
+ *
+ * CX2584x pin to pad mapping and output format configuration support are
+ * Copyright (C) 2011 Maciej S. Szmigiero <mail@maciej.szmigiero.name>
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -64,17 +66,17 @@ MODULE_LICENSE("GPL");
static int cx25840_debug;
-module_param_named(debug,cx25840_debug, int, 0644);
+module_param_named(debug, cx25840_debug, int, 0644);
MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]");
-
/* ----------------------------------------------------------------------- */
static void cx23888_std_setup(struct i2c_client *client);
int cx25840_write(struct i2c_client *client, u16 addr, u8 value)
{
u8 buffer[3];
+
buffer[0] = addr >> 8;
buffer[1] = addr & 0xff;
buffer[2] = value;
@@ -84,6 +86,7 @@ int cx25840_write(struct i2c_client *client, u16 addr, u8 value)
int cx25840_write4(struct i2c_client *client, u16 addr, u32 value)
{
u8 buffer[6];
+
buffer[0] = addr >> 8;
buffer[1] = addr & 0xff;
buffer[2] = value & 0xff;
@@ -93,7 +96,7 @@ int cx25840_write4(struct i2c_client *client, u16 addr, u32 value)
return i2c_master_send(client, buffer, 6);
}
-u8 cx25840_read(struct i2c_client * client, u16 addr)
+u8 cx25840_read(struct i2c_client *client, u16 addr)
{
struct i2c_msg msgs[2];
u8 tx_buf[2], rx_buf[1];
@@ -104,13 +107,13 @@ u8 cx25840_read(struct i2c_client * client, u16 addr)
msgs[0].addr = client->addr;
msgs[0].flags = 0;
msgs[0].len = 2;
- msgs[0].buf = (char *) tx_buf;
+ msgs[0].buf = (char *)tx_buf;
/* Read data from register */
msgs[1].addr = client->addr;
msgs[1].flags = I2C_M_RD;
msgs[1].len = 1;
- msgs[1].buf = (char *) rx_buf;
+ msgs[1].buf = (char *)rx_buf;
if (i2c_transfer(client->adapter, msgs, 2) < 2)
return 0;
@@ -118,7 +121,7 @@ u8 cx25840_read(struct i2c_client * client, u16 addr)
return rx_buf[0];
}
-u32 cx25840_read4(struct i2c_client * client, u16 addr)
+u32 cx25840_read4(struct i2c_client *client, u16 addr)
{
struct i2c_msg msgs[2];
u8 tx_buf[2], rx_buf[4];
@@ -129,13 +132,13 @@ u32 cx25840_read4(struct i2c_client * client, u16 addr)
msgs[0].addr = client->addr;
msgs[0].flags = 0;
msgs[0].len = 2;
- msgs[0].buf = (char *) tx_buf;
+ msgs[0].buf = (char *)tx_buf;
/* Read data from registers */
msgs[1].addr = client->addr;
msgs[1].flags = I2C_M_RD;
msgs[1].len = 4;
- msgs[1].buf = (char *) rx_buf;
+ msgs[1].buf = (char *)rx_buf;
if (i2c_transfer(client->adapter, msgs, 2) < 2)
return 0;
@@ -144,7 +147,7 @@ u32 cx25840_read4(struct i2c_client * client, u16 addr)
rx_buf[0];
}
-int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned and_mask,
+int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned int and_mask,
u8 or_value)
{
return cx25840_write(client, addr,
@@ -162,13 +165,14 @@ int cx25840_and_or4(struct i2c_client *client, u16 addr, u32 and_mask,
/* ----------------------------------------------------------------------- */
-static int set_input(struct i2c_client *client, enum cx25840_video_input vid_input,
- enum cx25840_audio_input aud_input);
+static int set_input(struct i2c_client *client,
+ enum cx25840_video_input vid_input,
+ enum cx25840_audio_input aud_input);
/* ----------------------------------------------------------------------- */
static int cx23885_s_io_pin_config(struct v4l2_subdev *sd, size_t n,
- struct v4l2_subdev_io_pin_config *p)
+ struct v4l2_subdev_io_pin_config *p)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
int i;
@@ -307,13 +311,225 @@ static int cx23885_s_io_pin_config(struct v4l2_subdev *sd, size_t n,
return 0;
}
+static u8 cx25840_function_to_pad(struct i2c_client *client, u8 function)
+{
+ if (function > CX25840_PAD_VRESET) {
+ v4l_err(client, "invalid function %u, assuming default\n",
+ (unsigned int)function);
+ return 0;
+ }
+
+ return function;
+}
+
+static void cx25840_set_invert(u8 *pinctrl3, u8 *voutctrl4, u8 function,
+ u8 pin, bool invert)
+{
+ switch (function) {
+ case CX25840_PAD_IRQ_N:
+ if (invert)
+ *pinctrl3 &= ~2;
+ else
+ *pinctrl3 |= 2;
+ break;
+
+ case CX25840_PAD_ACTIVE:
+ if (invert)
+ *voutctrl4 |= BIT(2);
+ else
+ *voutctrl4 &= ~BIT(2);
+ break;
+
+ case CX25840_PAD_VACTIVE:
+ if (invert)
+ *voutctrl4 |= BIT(5);
+ else
+ *voutctrl4 &= ~BIT(5);
+ break;
+
+ case CX25840_PAD_CBFLAG:
+ if (invert)
+ *voutctrl4 |= BIT(4);
+ else
+ *voutctrl4 &= ~BIT(4);
+ break;
+
+ case CX25840_PAD_VRESET:
+ if (invert)
+ *voutctrl4 |= BIT(0);
+ else
+ *voutctrl4 &= ~BIT(0);
+ break;
+ }
+
+ if (function != CX25840_PAD_DEFAULT)
+ return;
+
+ switch (pin) {
+ case CX25840_PIN_DVALID_PRGM0:
+ if (invert)
+ *voutctrl4 |= BIT(6);
+ else
+ *voutctrl4 &= ~BIT(6);
+ break;
+
+ case CX25840_PIN_HRESET_PRGM2:
+ if (invert)
+ *voutctrl4 |= BIT(1);
+ else
+ *voutctrl4 &= ~BIT(1);
+ break;
+ }
+}
+
+static int cx25840_s_io_pin_config(struct v4l2_subdev *sd, size_t n,
+ struct v4l2_subdev_io_pin_config *p)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ unsigned int i;
+ u8 pinctrl[6], pinconf[10], voutctrl4;
+
+ for (i = 0; i < 6; i++)
+ pinctrl[i] = cx25840_read(client, 0x114 + i);
+
+ for (i = 0; i < 10; i++)
+ pinconf[i] = cx25840_read(client, 0x11c + i);
+
+ voutctrl4 = cx25840_read(client, 0x407);
+
+ for (i = 0; i < n; i++) {
+ u8 strength = p[i].strength;
+
+ if (strength != CX25840_PIN_DRIVE_SLOW &&
+ strength != CX25840_PIN_DRIVE_MEDIUM &&
+ strength != CX25840_PIN_DRIVE_FAST) {
+ v4l_err(client,
+ "invalid drive speed for pin %u (%u), assuming fast\n",
+ (unsigned int)p[i].pin,
+ (unsigned int)strength);
+
+ strength = CX25840_PIN_DRIVE_FAST;
+ }
+
+ switch (p[i].pin) {
+ case CX25840_PIN_DVALID_PRGM0:
+ if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_DISABLE))
+ pinctrl[0] &= ~BIT(6);
+ else
+ pinctrl[0] |= BIT(6);
+
+ pinconf[3] &= 0xf0;
+ pinconf[3] |= cx25840_function_to_pad(client,
+ p[i].function);
+
+ cx25840_set_invert(&pinctrl[3], &voutctrl4,
+ p[i].function,
+ CX25840_PIN_DVALID_PRGM0,
+ p[i].flags &
+ BIT(V4L2_SUBDEV_IO_PIN_ACTIVE_LOW));
+
+ pinctrl[4] &= ~(3 << 2); /* CX25840_PIN_DRIVE_MEDIUM */
+ switch (strength) {
+ case CX25840_PIN_DRIVE_SLOW:
+ pinctrl[4] |= 1 << 2;
+ break;
+
+ case CX25840_PIN_DRIVE_FAST:
+ pinctrl[4] |= 2 << 2;
+ break;
+ }
+
+ break;
+
+ case CX25840_PIN_HRESET_PRGM2:
+ if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_DISABLE))
+ pinctrl[1] &= ~BIT(0);
+ else
+ pinctrl[1] |= BIT(0);
+
+ pinconf[4] &= 0xf0;
+ pinconf[4] |= cx25840_function_to_pad(client,
+ p[i].function);
+
+ cx25840_set_invert(&pinctrl[3], &voutctrl4,
+ p[i].function,
+ CX25840_PIN_HRESET_PRGM2,
+ p[i].flags &
+ BIT(V4L2_SUBDEV_IO_PIN_ACTIVE_LOW));
+
+ pinctrl[4] &= ~(3 << 2); /* CX25840_PIN_DRIVE_MEDIUM */
+ switch (strength) {
+ case CX25840_PIN_DRIVE_SLOW:
+ pinctrl[4] |= 1 << 2;
+ break;
+
+ case CX25840_PIN_DRIVE_FAST:
+ pinctrl[4] |= 2 << 2;
+ break;
+ }
+
+ break;
+
+ case CX25840_PIN_PLL_CLK_PRGM7:
+ if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_DISABLE))
+ pinctrl[2] &= ~BIT(2);
+ else
+ pinctrl[2] |= BIT(2);
+
+ switch (p[i].function) {
+ case CX25840_PAD_XTI_X5_DLL:
+ pinconf[6] = 0;
+ break;
+
+ case CX25840_PAD_AUX_PLL:
+ pinconf[6] = 1;
+ break;
+
+ case CX25840_PAD_VID_PLL:
+ pinconf[6] = 5;
+ break;
+
+ case CX25840_PAD_XTI:
+ pinconf[6] = 2;
+ break;
+
+ default:
+ pinconf[6] = 3;
+ pinconf[6] |=
+ cx25840_function_to_pad(client,
+ p[i].function)
+ << 4;
+ }
+
+ break;
+
+ default:
+ v4l_err(client, "invalid or unsupported pin %u\n",
+ (unsigned int)p[i].pin);
+ break;
+ }
+ }
+
+ cx25840_write(client, 0x407, voutctrl4);
+
+ for (i = 0; i < 6; i++)
+ cx25840_write(client, 0x114 + i, pinctrl[i]);
+
+ for (i = 0; i < 10; i++)
+ cx25840_write(client, 0x11c + i, pinconf[i]);
+
+ return 0;
+}
+
static int common_s_io_pin_config(struct v4l2_subdev *sd, size_t n,
- struct v4l2_subdev_io_pin_config *pincfg)
+ struct v4l2_subdev_io_pin_config *pincfg)
{
struct cx25840_state *state = to_state(sd);
if (is_cx2388x(state))
return cx23885_s_io_pin_config(sd, n, pincfg);
+ else if (is_cx2584x(state))
+ return cx25840_s_io_pin_config(sd, n, pincfg);
return 0;
}
@@ -321,8 +537,10 @@ static int common_s_io_pin_config(struct v4l2_subdev *sd, size_t n,
static void init_dll1(struct i2c_client *client)
{
- /* This is the Hauppauge sequence used to
- * initialize the Delay Lock Loop 1 (ADC DLL). */
+ /*
+ * This is the Hauppauge sequence used to
+ * initialize the Delay Lock Loop 1 (ADC DLL).
+ */
cx25840_write(client, 0x159, 0x23);
cx25840_write(client, 0x15a, 0x87);
cx25840_write(client, 0x15b, 0x06);
@@ -337,8 +555,10 @@ static void init_dll1(struct i2c_client *client)
static void init_dll2(struct i2c_client *client)
{
- /* This is the Hauppauge sequence used to
- * initialize the Delay Lock Loop 2 (ADC DLL). */
+ /*
+ * This is the Hauppauge sequence used to
+ * initialize the Delay Lock Loop 2 (ADC DLL).
+ */
cx25840_write(client, 0x15d, 0xe3);
cx25840_write(client, 0x15e, 0x86);
cx25840_write(client, 0x15f, 0x06);
@@ -350,7 +570,11 @@ static void init_dll2(struct i2c_client *client)
static void cx25836_initialize(struct i2c_client *client)
{
- /* reset configuration is described on page 3-77 of the CX25836 datasheet */
+ /*
+ *reset configuration is described on page 3-77
+ * of the CX25836 datasheet
+ */
+
/* 2. */
cx25840_and_or(client, 0x000, ~0x01, 0x01);
cx25840_and_or(client, 0x000, ~0x01, 0x00);
@@ -376,10 +600,96 @@ static void cx25836_initialize(struct i2c_client *client)
static void cx25840_work_handler(struct work_struct *work)
{
struct cx25840_state *state = container_of(work, struct cx25840_state, fw_work);
+
cx25840_loadfw(state->c);
wake_up(&state->fw_wait);
}
+#define CX25840_VCONFIG_SET_BIT(state, opt_msk, voc, idx, bit, oneval) \
+ do { \
+ if ((state)->vid_config & (opt_msk)) { \
+ if (((state)->vid_config & (opt_msk)) == \
+ (oneval)) \
+ (voc)[idx] |= BIT(bit); \
+ else \
+ (voc)[idx] &= ~BIT(bit); \
+ } \
+ } while (0)
+
+/* apply current vconfig to hardware regs */
+static void cx25840_vconfig_apply(struct i2c_client *client)
+{
+ struct cx25840_state *state = to_state(i2c_get_clientdata(client));
+ u8 voutctrl[3];
+ unsigned int i;
+
+ for (i = 0; i < 3; i++)
+ voutctrl[i] = cx25840_read(client, 0x404 + i);
+
+ if (state->vid_config & CX25840_VCONFIG_FMT_MASK)
+ voutctrl[0] &= ~3;
+ switch (state->vid_config & CX25840_VCONFIG_FMT_MASK) {
+ case CX25840_VCONFIG_FMT_BT656:
+ voutctrl[0] |= 1;
+ break;
+
+ case CX25840_VCONFIG_FMT_VIP11:
+ voutctrl[0] |= 2;
+ break;
+
+ case CX25840_VCONFIG_FMT_VIP2:
+ voutctrl[0] |= 3;
+ break;
+
+ case CX25840_VCONFIG_FMT_BT601:
+ /* zero */
+ default:
+ break;
+ }
+
+ CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_RES_MASK, voutctrl,
+ 0, 2, CX25840_VCONFIG_RES_10BIT);
+ CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_VBIRAW_MASK, voutctrl,
+ 0, 3, CX25840_VCONFIG_VBIRAW_ENABLED);
+ CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_ANCDATA_MASK, voutctrl,
+ 0, 4, CX25840_VCONFIG_ANCDATA_ENABLED);
+ CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_TASKBIT_MASK, voutctrl,
+ 0, 5, CX25840_VCONFIG_TASKBIT_ONE);
+ CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_ACTIVE_MASK, voutctrl,
+ 1, 2, CX25840_VCONFIG_ACTIVE_HORIZONTAL);
+ CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_VALID_MASK, voutctrl,
+ 1, 3, CX25840_VCONFIG_VALID_ANDACTIVE);
+ CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_HRESETW_MASK, voutctrl,
+ 1, 4, CX25840_VCONFIG_HRESETW_PIXCLK);
+
+ if (state->vid_config & CX25840_VCONFIG_CLKGATE_MASK)
+ voutctrl[1] &= ~(3 << 6);
+ switch (state->vid_config & CX25840_VCONFIG_CLKGATE_MASK) {
+ case CX25840_VCONFIG_CLKGATE_VALID:
+ voutctrl[1] |= 2;
+ break;
+
+ case CX25840_VCONFIG_CLKGATE_VALIDACTIVE:
+ voutctrl[1] |= 3;
+ break;
+
+ case CX25840_VCONFIG_CLKGATE_NONE:
+ /* zero */
+ default:
+ break;
+ }
+
+ CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_DCMODE_MASK, voutctrl,
+ 2, 0, CX25840_VCONFIG_DCMODE_BYTES);
+ CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_IDID0S_MASK, voutctrl,
+ 2, 1, CX25840_VCONFIG_IDID0S_LINECNT);
+ CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_VIPCLAMP_MASK, voutctrl,
+ 2, 4, CX25840_VCONFIG_VIPCLAMP_ENABLED);
+
+ for (i = 0; i < 3; i++)
+ cx25840_write(client, 0x404 + i, voutctrl[i]);
+}
+
static void cx25840_initialize(struct i2c_client *client)
{
DEFINE_WAIT(wait);
@@ -389,8 +699,10 @@ static void cx25840_initialize(struct i2c_client *client)
/* datasheet startup in numbered steps, refer to page 3-77 */
/* 2. */
cx25840_and_or(client, 0x803, ~0x10, 0x00);
- /* The default of this register should be 4, but I get 0 instead.
- * Set this register to 4 manually. */
+ /*
+ * The default of this register should be 4, but I get 0 instead.
+ * Set this register to 4 manually.
+ */
cx25840_write(client, 0x000, 0x04);
/* 3. */
init_dll1(client);
@@ -400,10 +712,12 @@ static void cx25840_initialize(struct i2c_client *client)
cx25840_write(client, 0x13c, 0x01);
cx25840_write(client, 0x13c, 0x00);
/* 5. */
- /* Do the firmware load in a work handler to prevent.
- Otherwise the kernel is blocked waiting for the
- bit-banging i2c interface to finish uploading the
- firmware. */
+ /*
+ * Do the firmware load in a work handler to prevent.
+ * Otherwise the kernel is blocked waiting for the
+ * bit-banging i2c interface to finish uploading the
+ * firmware.
+ */
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
@@ -446,6 +760,9 @@ static void cx25840_initialize(struct i2c_client *client)
/* (re)set input */
set_input(client, state->vid_input, state->aud_input);
+ if (state->generic_mode)
+ cx25840_vconfig_apply(client);
+
/* start microcontroller */
cx25840_and_or(client, 0x803, ~0x10, 0x10);
}
@@ -632,10 +949,12 @@ static void cx23885_initialize(struct i2c_client *client)
cx25840_write(client, 0x160, 0x1d);
cx25840_write(client, 0x164, 0x00);
- /* Do the firmware load in a work handler to prevent.
- Otherwise the kernel is blocked waiting for the
- bit-banging i2c interface to finish uploading the
- firmware. */
+ /*
+ * Do the firmware load in a work handler to prevent.
+ * Otherwise the kernel is blocked waiting for the
+ * bit-banging i2c interface to finish uploading the
+ * firmware.
+ */
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
@@ -647,7 +966,8 @@ static void cx23885_initialize(struct i2c_client *client)
destroy_workqueue(q);
}
- /* Call the cx23888 specific std setup func, we no longer rely on
+ /*
+ * Call the cx23888 specific std setup func, we no longer rely on
* the generic cx24840 func.
*/
if (is_cx23888(state))
@@ -669,7 +989,9 @@ static void cx23885_initialize(struct i2c_client *client)
cx25840_write(client, CX25840_AUD_INT_STAT_REG, 0xff);
/* CC raw enable */
- /* - VIP 1.1 control codes - 10bit, blue field enable.
+
+ /*
+ * - VIP 1.1 control codes - 10bit, blue field enable.
* - enable raw data during vertical blanking.
* - enable ancillary Data insertion for 656 or VIP.
*/
@@ -752,10 +1074,12 @@ static void cx231xx_initialize(struct i2c_client *client)
/* White crush, Chroma AGC & Chroma Killer enabled */
cx25840_write(client, 0x401, 0xe8);
- /* Do the firmware load in a work handler to prevent.
- Otherwise the kernel is blocked waiting for the
- bit-banging i2c interface to finish uploading the
- firmware. */
+ /*
+ * Do the firmware load in a work handler to prevent.
+ * Otherwise the kernel is blocked waiting for the
+ * bit-banging i2c interface to finish uploading the
+ * firmware.
+ */
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
@@ -800,13 +1124,20 @@ void cx25840_std_setup(struct i2c_client *client)
else
cx25840_write(client, 0x49f, 0x14);
+ /* generic mode uses the values that the chip autoconfig would set */
if (std & V4L2_STD_625_50) {
hblank = 132;
hactive = 720;
burst = 93;
- vblank = 36;
- vactive = 580;
- vblank656 = 40;
+ if (state->generic_mode) {
+ vblank = 34;
+ vactive = 576;
+ vblank656 = 38;
+ } else {
+ vblank = 36;
+ vactive = 580;
+ vblank656 = 40;
+ }
src_decimation = 0x21f;
luma_lpf = 2;
@@ -815,6 +1146,10 @@ void cx25840_std_setup(struct i2c_client *client)
comb = 0;
sc = 0x0a425f;
} else if (std == V4L2_STD_PAL_Nc) {
+ if (state->generic_mode) {
+ burst = 95;
+ luma_lpf = 1;
+ }
uv_lpf = 1;
comb = 0x20;
sc = 556453;
@@ -829,12 +1164,20 @@ void cx25840_std_setup(struct i2c_client *client)
vactive = 487;
luma_lpf = 1;
uv_lpf = 1;
+ if (state->generic_mode) {
+ vblank = 20;
+ vblank656 = 24;
+ }
src_decimation = 0x21f;
if (std == V4L2_STD_PAL_60) {
- vblank = 26;
- vblank656 = 26;
- burst = 0x5b;
+ if (!state->generic_mode) {
+ vblank = 26;
+ vblank656 = 26;
+ burst = 0x5b;
+ } else {
+ burst = 0x59;
+ }
luma_lpf = 2;
comb = 0x20;
sc = 688739;
@@ -845,8 +1188,10 @@ void cx25840_std_setup(struct i2c_client *client)
comb = 0x20;
sc = 555452;
} else {
- vblank = 26;
- vblank656 = 26;
+ if (!state->generic_mode) {
+ vblank = 26;
+ vblank656 = 26;
+ }
burst = 0x5b;
comb = 0x66;
sc = 556063;
@@ -867,24 +1212,28 @@ void cx25840_std_setup(struct i2c_client *client)
int pll = (28636363L * ((((u64)pll_int) << 25L) + pll_frac)) >> 25L;
pll /= pll_post;
- v4l_dbg(1, cx25840_debug, client, "PLL = %d.%06d MHz\n",
- pll / 1000000, pll % 1000000);
- v4l_dbg(1, cx25840_debug, client, "PLL/8 = %d.%06d MHz\n",
- pll / 8000000, (pll / 8) % 1000000);
+ v4l_dbg(1, cx25840_debug, client,
+ "PLL = %d.%06d MHz\n",
+ pll / 1000000, pll % 1000000);
+ v4l_dbg(1, cx25840_debug, client,
+ "PLL/8 = %d.%06d MHz\n",
+ pll / 8000000, (pll / 8) % 1000000);
fin = ((u64)src_decimation * pll) >> 12;
v4l_dbg(1, cx25840_debug, client,
- "ADC Sampling freq = %d.%06d MHz\n",
- fin / 1000000, fin % 1000000);
+ "ADC Sampling freq = %d.%06d MHz\n",
+ fin / 1000000, fin % 1000000);
fsc = (((u64)sc) * pll) >> 24L;
v4l_dbg(1, cx25840_debug, client,
- "Chroma sub-carrier freq = %d.%06d MHz\n",
- fsc / 1000000, fsc % 1000000);
+ "Chroma sub-carrier freq = %d.%06d MHz\n",
+ fsc / 1000000, fsc % 1000000);
- v4l_dbg(1, cx25840_debug, client, "hblank %i, hactive %i, vblank %i, vactive %i, vblank656 %i, src_dec %i, burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, sc 0x%06x\n",
+ v4l_dbg(1, cx25840_debug, client,
+ "hblank %i, hactive %i, vblank %i, vactive %i, vblank656 %i, src_dec %i, burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, sc 0x%06x\n",
hblank, hactive, vblank, vactive, vblank656,
- src_decimation, burst, luma_lpf, uv_lpf, comb, sc);
+ src_decimation, burst, luma_lpf, uv_lpf,
+ comb, sc);
}
}
@@ -939,10 +1288,10 @@ static void input_change(struct i2c_client *client)
/* Follow step 8c and 8d of section 3.16 in the cx25840 datasheet */
if (std & V4L2_STD_SECAM) {
cx25840_write(client, 0x402, 0);
- }
- else {
+ } else {
cx25840_write(client, 0x402, 0x04);
- cx25840_write(client, 0x49f, (std & V4L2_STD_NTSC) ? 0x14 : 0x11);
+ cx25840_write(client, 0x49f,
+ (std & V4L2_STD_NTSC) ? 0x14 : 0x11);
}
cx25840_and_or(client, 0x401, ~0x60, 0);
cx25840_and_or(client, 0x401, ~0x60, 0x60);
@@ -956,13 +1305,14 @@ static void input_change(struct i2c_client *client)
if (state->radio) {
cx25840_write(client, 0x808, 0xf9);
cx25840_write(client, 0x80b, 0x00);
- }
- else if (std & V4L2_STD_525_60) {
- /* Certain Hauppauge PVR150 models have a hardware bug
- that causes audio to drop out. For these models the
- audio standard must be set explicitly.
- To be precise: it affects cards with tuner models
- 85, 99 and 112 (model numbers from tveeprom). */
+ } else if (std & V4L2_STD_525_60) {
+ /*
+ * Certain Hauppauge PVR150 models have a hardware bug
+ * that causes audio to drop out. For these models the
+ * audio standard must be set explicitly.
+ * To be precise: it affects cards with tuner models
+ * 85, 99 and 112 (model numbers from tveeprom).
+ */
int hw_fix = state->pvr150_workaround;
if (std == V4L2_STD_NTSC_M_JP) {
@@ -979,35 +1329,40 @@ static void input_change(struct i2c_client *client)
} else if (std & V4L2_STD_PAL) {
/* Autodetect audio standard and audio system */
cx25840_write(client, 0x808, 0xff);
- /* Since system PAL-L is pretty much non-existent and
- not used by any public broadcast network, force
- 6.5 MHz carrier to be interpreted as System DK,
- this avoids DK audio detection instability */
+ /*
+ * Since system PAL-L is pretty much non-existent and
+ * not used by any public broadcast network, force
+ * 6.5 MHz carrier to be interpreted as System DK,
+ * this avoids DK audio detection instability
+ */
cx25840_write(client, 0x80b, 0x00);
} else if (std & V4L2_STD_SECAM) {
/* Autodetect audio standard and audio system */
cx25840_write(client, 0x808, 0xff);
- /* If only one of SECAM-DK / SECAM-L is required, then force
- 6.5MHz carrier, else autodetect it */
+ /*
+ * If only one of SECAM-DK / SECAM-L is required, then force
+ * 6.5MHz carrier, else autodetect it
+ */
if ((std & V4L2_STD_SECAM_DK) &&
!(std & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC))) {
/* 6.5 MHz carrier to be interpreted as System DK */
cx25840_write(client, 0x80b, 0x00);
- } else if (!(std & V4L2_STD_SECAM_DK) &&
- (std & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC))) {
+ } else if (!(std & V4L2_STD_SECAM_DK) &&
+ (std & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC))) {
/* 6.5 MHz carrier to be interpreted as System L */
cx25840_write(client, 0x80b, 0x08);
- } else {
+ } else {
/* 6.5 MHz carrier to be autodetected */
cx25840_write(client, 0x80b, 0x10);
- }
+ }
}
cx25840_and_or(client, 0x810, ~0x01, 0);
}
-static int set_input(struct i2c_client *client, enum cx25840_video_input vid_input,
- enum cx25840_audio_input aud_input)
+static int set_input(struct i2c_client *client,
+ enum cx25840_video_input vid_input,
+ enum cx25840_audio_input aud_input)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
u8 is_composite = (vid_input >= CX25840_COMPOSITE1 &&
@@ -1032,7 +1387,7 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
vid_input);
reg = vid_input & 0xff;
is_composite = !is_component &&
- ((vid_input & CX25840_SVIDEO_ON) != CX25840_SVIDEO_ON);
+ ((vid_input & CX25840_SVIDEO_ON) != CX25840_SVIDEO_ON);
v4l_dbg(1, cx25840_debug, client, "mux cfg 0x%x comp=%d\n",
reg, is_composite);
@@ -1040,8 +1395,10 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
reg = 0xf0 + (vid_input - CX25840_COMPOSITE1);
} else {
if ((vid_input & ~0xff0) ||
- luma < CX25840_SVIDEO_LUMA1 || luma > CX25840_SVIDEO_LUMA8 ||
- chroma < CX25840_SVIDEO_CHROMA4 || chroma > CX25840_SVIDEO_CHROMA8) {
+ luma < CX25840_SVIDEO_LUMA1 ||
+ luma > CX25840_SVIDEO_LUMA8 ||
+ chroma < CX25840_SVIDEO_CHROMA4 ||
+ chroma > CX25840_SVIDEO_CHROMA8) {
v4l_err(client, "0x%04x is not a valid video input!\n",
vid_input);
return -EINVAL;
@@ -1065,12 +1422,24 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
case CX25840_AUDIO_SERIAL:
/* do nothing, use serial audio input */
break;
- case CX25840_AUDIO4: reg &= ~0x30; break;
- case CX25840_AUDIO5: reg &= ~0x30; reg |= 0x10; break;
- case CX25840_AUDIO6: reg &= ~0x30; reg |= 0x20; break;
- case CX25840_AUDIO7: reg &= ~0xc0; break;
- case CX25840_AUDIO8: reg &= ~0xc0; reg |= 0x40; break;
-
+ case CX25840_AUDIO4:
+ reg &= ~0x30;
+ break;
+ case CX25840_AUDIO5:
+ reg &= ~0x30;
+ reg |= 0x10;
+ break;
+ case CX25840_AUDIO6:
+ reg &= ~0x30;
+ reg |= 0x20;
+ break;
+ case CX25840_AUDIO7:
+ reg &= ~0xc0;
+ break;
+ case CX25840_AUDIO8:
+ reg &= ~0xc0;
+ reg |= 0x40;
+ break;
default:
v4l_err(client, "0x%04x is not a valid audio input!\n",
aud_input);
@@ -1087,7 +1456,6 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_and_or(client, 0x401, ~0x6, is_composite ? 0 : 0x02);
if (is_cx2388x(state)) {
-
/* Enable or disable the DIF for tuner use */
if (is_dif) {
cx25840_and_or(client, 0x102, ~0x80, 0x80);
@@ -1118,15 +1486,23 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_write4(client, 0x410, 0xffff0dbf);
cx25840_write4(client, 0x414, 0x00137d03);
- cx25840_write4(client, state->vbi_regs_offset + 0x42c, 0x42600000);
- cx25840_write4(client, state->vbi_regs_offset + 0x430, 0x0000039b);
- cx25840_write4(client, state->vbi_regs_offset + 0x438, 0x00000000);
-
- cx25840_write4(client, state->vbi_regs_offset + 0x440, 0xF8E3E824);
- cx25840_write4(client, state->vbi_regs_offset + 0x444, 0x401040dc);
- cx25840_write4(client, state->vbi_regs_offset + 0x448, 0xcd3f02a0);
- cx25840_write4(client, state->vbi_regs_offset + 0x44c, 0x161f1000);
- cx25840_write4(client, state->vbi_regs_offset + 0x450, 0x00000802);
+ cx25840_write4(client, state->vbi_regs_offset + 0x42c,
+ 0x42600000);
+ cx25840_write4(client, state->vbi_regs_offset + 0x430,
+ 0x0000039b);
+ cx25840_write4(client, state->vbi_regs_offset + 0x438,
+ 0x00000000);
+
+ cx25840_write4(client, state->vbi_regs_offset + 0x440,
+ 0xF8E3E824);
+ cx25840_write4(client, state->vbi_regs_offset + 0x444,
+ 0x401040dc);
+ cx25840_write4(client, state->vbi_regs_offset + 0x448,
+ 0xcd3f02a0);
+ cx25840_write4(client, state->vbi_regs_offset + 0x44c,
+ 0x161f1000);
+ cx25840_write4(client, state->vbi_regs_offset + 0x450,
+ 0x00000802);
cx25840_write4(client, 0x91c, 0x01000000);
cx25840_write4(client, 0x8e0, 0x03063870);
@@ -1193,8 +1569,9 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
* Only one of the two will be in use.
*/
cx25840_write4(client, AFE_CTRL, val);
- } else
+ } else {
cx25840_and_or(client, 0x102, ~0x2, 0);
+ }
}
state->vid_input = vid_input;
@@ -1233,29 +1610,32 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_write(client, 0x919, 0x01);
}
- if (is_cx2388x(state) && ((aud_input == CX25840_AUDIO7) ||
- (aud_input == CX25840_AUDIO6))) {
+ if (is_cx2388x(state) &&
+ ((aud_input == CX25840_AUDIO7) || (aud_input == CX25840_AUDIO6))) {
/* Configure audio from LR1 or LR2 input */
cx25840_write4(client, 0x910, 0);
cx25840_write4(client, 0x8d0, 0x63073);
- } else
- if (is_cx2388x(state) && (aud_input == CX25840_AUDIO8)) {
+ } else if (is_cx2388x(state) && (aud_input == CX25840_AUDIO8)) {
/* Configure audio from tuner/sif input */
cx25840_write4(client, 0x910, 0x12b000c9);
cx25840_write4(client, 0x8d0, 0x1f063870);
}
if (is_cx23888(state)) {
- /* HVR1850 */
- /* AUD_IO_CTRL - I2S Input, Parallel1*/
- /* - Channel 1 src - Parallel1 (Merlin out) */
- /* - Channel 2 src - Parallel2 (Merlin out) */
- /* - Channel 3 src - Parallel3 (Merlin AC97 out) */
- /* - I2S source and dir - Merlin, output */
+ /*
+ * HVR1850
+ *
+ * AUD_IO_CTRL - I2S Input, Parallel1
+ * - Channel 1 src - Parallel1 (Merlin out)
+ * - Channel 2 src - Parallel2 (Merlin out)
+ * - Channel 3 src - Parallel3 (Merlin AC97 out)
+ * - I2S source and dir - Merlin, output
+ */
cx25840_write4(client, 0x124, 0x100);
if (!is_dif) {
- /* Stop microcontroller if we don't need it
+ /*
+ * Stop microcontroller if we don't need it
* to avoid audio popping on svideo/composite use.
*/
cx25840_and_or(client, 0x803, ~0x10, 0x00);
@@ -1297,11 +1677,14 @@ static int set_v4lstd(struct i2c_client *client)
fmt = 0xc;
}
- v4l_dbg(1, cx25840_debug, client, "changing video std to fmt %i\n",fmt);
+ v4l_dbg(1, cx25840_debug, client,
+ "changing video std to fmt %i\n", fmt);
- /* Follow step 9 of section 3.16 in the cx25840 datasheet.
- Without this PAL may display a vertical ghosting effect.
- This happens for example with the Yuan MPC622. */
+ /*
+ * Follow step 9 of section 3.16 in the cx25840 datasheet.
+ * Without this PAL may display a vertical ghosting effect.
+ * This happens for example with the Yuan MPC622.
+ */
if (fmt >= 4 && fmt < 8) {
/* Set format to NTSC-M */
cx25840_and_or(client, 0x400, ~0xf, 1);
@@ -1363,14 +1746,15 @@ static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
/* ----------------------------------------------------------------------- */
static int cx25840_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- int HSC, VSC, Vsrc, Hsrc, filter, Vlines;
- int is_50Hz = !(state->std & V4L2_STD_525_60);
+ u32 hsc, vsc, v_src, h_src, v_add;
+ int filter;
+ int is_50hz = !(state->std & V4L2_STD_525_60);
if (format->pad || fmt->code != MEDIA_BUS_FMT_FIXED)
return -EINVAL;
@@ -1379,42 +1763,63 @@ static int cx25840_set_fmt(struct v4l2_subdev *sd,
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
if (is_cx23888(state)) {
- Vsrc = (cx25840_read(client, 0x42a) & 0x3f) << 4;
- Vsrc |= (cx25840_read(client, 0x429) & 0xf0) >> 4;
+ v_src = (cx25840_read(client, 0x42a) & 0x3f) << 4;
+ v_src |= (cx25840_read(client, 0x429) & 0xf0) >> 4;
} else {
- Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
- Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+ v_src = (cx25840_read(client, 0x476) & 0x3f) << 4;
+ v_src |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
}
if (is_cx23888(state)) {
- Hsrc = (cx25840_read(client, 0x426) & 0x3f) << 4;
- Hsrc |= (cx25840_read(client, 0x425) & 0xf0) >> 4;
+ h_src = (cx25840_read(client, 0x426) & 0x3f) << 4;
+ h_src |= (cx25840_read(client, 0x425) & 0xf0) >> 4;
} else {
- Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
- Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+ h_src = (cx25840_read(client, 0x472) & 0x3f) << 4;
+ h_src |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
}
- Vlines = fmt->height + (is_50Hz ? 4 : 7);
+ if (!state->generic_mode) {
+ v_add = is_50hz ? 4 : 7;
- /*
- * We keep 1 margin for the Vsrc < Vlines check since the
- * cx23888 reports a Vsrc of 486 instead of 487 for the NTSC
- * height. Without that margin the cx23885 fails in this
- * check.
- */
- if ((fmt->width == 0) || (Vlines == 0) ||
- (fmt->width * 16 < Hsrc) || (Hsrc < fmt->width) ||
- (Vlines * 8 < Vsrc) || (Vsrc + 1 < Vlines)) {
- v4l_err(client, "%dx%d is not a valid size!\n",
- fmt->width, fmt->height);
- return -ERANGE;
+ /*
+ * cx23888 in 525-line mode is programmed for 486 active lines
+ * while other chips use 487 active lines.
+ *
+ * See reg 0x428 bits [21:12] in cx23888_std_setup() vs
+ * vactive in cx25840_std_setup().
+ */
+ if (is_cx23888(state) && !is_50hz)
+ v_add--;
+ } else {
+ v_add = 0;
}
+
+ if (h_src == 0 ||
+ v_src <= v_add) {
+ v4l_err(client,
+ "chip reported picture size (%u x %u) is far too small\n",
+ (unsigned int)h_src, (unsigned int)v_src);
+ /*
+ * that's the best we can do since the output picture
+ * size is completely unknown in this case
+ */
+ return -EINVAL;
+ }
+
+ fmt->width = clamp(fmt->width, (h_src + 15) / 16, h_src);
+
+ if (v_add * 8 >= v_src)
+ fmt->height = clamp(fmt->height, (u32)1, v_src - v_add);
+ else
+ fmt->height = clamp(fmt->height, (v_src - v_add * 8 + 7) / 8,
+ v_src - v_add);
+
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
- HSC = (Hsrc * (1 << 20)) / fmt->width - (1 << 20);
- VSC = (1 << 16) - (Vsrc * (1 << 9) / Vlines - (1 << 9));
- VSC &= 0x1fff;
+ hsc = (h_src * (1 << 20)) / fmt->width - (1 << 20);
+ vsc = (1 << 16) - (v_src * (1 << 9) / (fmt->height + v_add) - (1 << 9));
+ vsc &= 0x1fff;
if (fmt->width >= 385)
filter = 0;
@@ -1425,21 +1830,23 @@ static int cx25840_set_fmt(struct v4l2_subdev *sd,
else
filter = 3;
- v4l_dbg(1, cx25840_debug, client, "decoder set size %dx%d -> scale %ux%u\n",
- fmt->width, fmt->height, HSC, VSC);
+ v4l_dbg(1, cx25840_debug, client,
+ "decoder set size %u x %u with scale %x x %x\n",
+ (unsigned int)fmt->width, (unsigned int)fmt->height,
+ (unsigned int)hsc, (unsigned int)vsc);
- /* HSCALE=HSC */
+ /* HSCALE=hsc */
if (is_cx23888(state)) {
- cx25840_write4(client, 0x434, HSC | (1 << 24));
- /* VSCALE=VSC VS_INTRLACE=1 VFILT=filter */
- cx25840_write4(client, 0x438, VSC | (1 << 19) | (filter << 16));
+ cx25840_write4(client, 0x434, hsc | (1 << 24));
+ /* VSCALE=vsc VS_INTRLACE=1 VFILT=filter */
+ cx25840_write4(client, 0x438, vsc | (1 << 19) | (filter << 16));
} else {
- cx25840_write(client, 0x418, HSC & 0xff);
- cx25840_write(client, 0x419, (HSC >> 8) & 0xff);
- cx25840_write(client, 0x41a, HSC >> 16);
- /* VSCALE=VSC */
- cx25840_write(client, 0x41c, VSC & 0xff);
- cx25840_write(client, 0x41d, VSC >> 8);
+ cx25840_write(client, 0x418, hsc & 0xff);
+ cx25840_write(client, 0x419, (hsc >> 8) & 0xff);
+ cx25840_write(client, 0x41a, hsc >> 16);
+ /* VSCALE=vsc */
+ cx25840_write(client, 0x41c, vsc & 0xff);
+ cx25840_write(client, 0x41d, vsc >> 8);
/* VS_INTRLACE=1 VFILT=filter */
cx25840_write(client, 0x41e, 0x8 | filter);
}
@@ -1466,23 +1873,25 @@ static void log_video_status(struct i2c_client *client)
int vid_input = state->vid_input;
v4l_info(client, "Video signal: %spresent\n",
- (gen_stat2 & 0x20) ? "" : "not ");
+ (gen_stat2 & 0x20) ? "" : "not ");
v4l_info(client, "Detected format: %s\n",
- fmt_strs[gen_stat1 & 0xf]);
+ fmt_strs[gen_stat1 & 0xf]);
v4l_info(client, "Specified standard: %s\n",
- vidfmt_sel ? fmt_strs[vidfmt_sel] : "automatic detection");
+ vidfmt_sel ? fmt_strs[vidfmt_sel] : "automatic detection");
if (vid_input >= CX25840_COMPOSITE1 &&
vid_input <= CX25840_COMPOSITE8) {
v4l_info(client, "Specified video input: Composite %d\n",
- vid_input - CX25840_COMPOSITE1 + 1);
+ vid_input - CX25840_COMPOSITE1 + 1);
} else {
- v4l_info(client, "Specified video input: S-Video (Luma In%d, Chroma In%d)\n",
- (vid_input & 0xf0) >> 4, (vid_input & 0xf00) >> 8);
+ v4l_info(client,
+ "Specified video input: S-Video (Luma In%d, Chroma In%d)\n",
+ (vid_input & 0xf0) >> 4, (vid_input & 0xf00) >> 8);
}
- v4l_info(client, "Specified audioclock freq: %d Hz\n", state->audclk_freq);
+ v4l_info(client, "Specified audioclock freq: %d Hz\n",
+ state->audclk_freq);
}
/* ----------------------------------------------------------------------- */
@@ -1501,177 +1910,434 @@ static void log_audio_status(struct i2c_client *client)
char *p;
switch (mod_det_stat0) {
- case 0x00: p = "mono"; break;
- case 0x01: p = "stereo"; break;
- case 0x02: p = "dual"; break;
- case 0x04: p = "tri"; break;
- case 0x10: p = "mono with SAP"; break;
- case 0x11: p = "stereo with SAP"; break;
- case 0x12: p = "dual with SAP"; break;
- case 0x14: p = "tri with SAP"; break;
- case 0xfe: p = "forced mode"; break;
- default: p = "not defined";
+ case 0x00:
+ p = "mono";
+ break;
+ case 0x01:
+ p = "stereo";
+ break;
+ case 0x02:
+ p = "dual";
+ break;
+ case 0x04:
+ p = "tri";
+ break;
+ case 0x10:
+ p = "mono with SAP";
+ break;
+ case 0x11:
+ p = "stereo with SAP";
+ break;
+ case 0x12:
+ p = "dual with SAP";
+ break;
+ case 0x14:
+ p = "tri with SAP";
+ break;
+ case 0xfe:
+ p = "forced mode";
+ break;
+ default:
+ p = "not defined";
}
v4l_info(client, "Detected audio mode: %s\n", p);
switch (mod_det_stat1) {
- case 0x00: p = "not defined"; break;
- case 0x01: p = "EIAJ"; break;
- case 0x02: p = "A2-M"; break;
- case 0x03: p = "A2-BG"; break;
- case 0x04: p = "A2-DK1"; break;
- case 0x05: p = "A2-DK2"; break;
- case 0x06: p = "A2-DK3"; break;
- case 0x07: p = "A1 (6.0 MHz FM Mono)"; break;
- case 0x08: p = "AM-L"; break;
- case 0x09: p = "NICAM-BG"; break;
- case 0x0a: p = "NICAM-DK"; break;
- case 0x0b: p = "NICAM-I"; break;
- case 0x0c: p = "NICAM-L"; break;
- case 0x0d: p = "BTSC/EIAJ/A2-M Mono (4.5 MHz FMMono)"; break;
- case 0x0e: p = "IF FM Radio"; break;
- case 0x0f: p = "BTSC"; break;
- case 0x10: p = "high-deviation FM"; break;
- case 0x11: p = "very high-deviation FM"; break;
- case 0xfd: p = "unknown audio standard"; break;
- case 0xfe: p = "forced audio standard"; break;
- case 0xff: p = "no detected audio standard"; break;
- default: p = "not defined";
+ case 0x00:
+ p = "not defined";
+ break;
+ case 0x01:
+ p = "EIAJ";
+ break;
+ case 0x02:
+ p = "A2-M";
+ break;
+ case 0x03:
+ p = "A2-BG";
+ break;
+ case 0x04:
+ p = "A2-DK1";
+ break;
+ case 0x05:
+ p = "A2-DK2";
+ break;
+ case 0x06:
+ p = "A2-DK3";
+ break;
+ case 0x07:
+ p = "A1 (6.0 MHz FM Mono)";
+ break;
+ case 0x08:
+ p = "AM-L";
+ break;
+ case 0x09:
+ p = "NICAM-BG";
+ break;
+ case 0x0a:
+ p = "NICAM-DK";
+ break;
+ case 0x0b:
+ p = "NICAM-I";
+ break;
+ case 0x0c:
+ p = "NICAM-L";
+ break;
+ case 0x0d:
+ p = "BTSC/EIAJ/A2-M Mono (4.5 MHz FMMono)";
+ break;
+ case 0x0e:
+ p = "IF FM Radio";
+ break;
+ case 0x0f:
+ p = "BTSC";
+ break;
+ case 0x10:
+ p = "high-deviation FM";
+ break;
+ case 0x11:
+ p = "very high-deviation FM";
+ break;
+ case 0xfd:
+ p = "unknown audio standard";
+ break;
+ case 0xfe:
+ p = "forced audio standard";
+ break;
+ case 0xff:
+ p = "no detected audio standard";
+ break;
+ default:
+ p = "not defined";
}
v4l_info(client, "Detected audio standard: %s\n", p);
v4l_info(client, "Audio microcontroller: %s\n",
- (download_ctl & 0x10) ?
- ((mute_ctl & 0x2) ? "detecting" : "running") : "stopped");
+ (download_ctl & 0x10) ?
+ ((mute_ctl & 0x2) ? "detecting" : "running") : "stopped");
switch (audio_config >> 4) {
- case 0x00: p = "undefined"; break;
- case 0x01: p = "BTSC"; break;
- case 0x02: p = "EIAJ"; break;
- case 0x03: p = "A2-M"; break;
- case 0x04: p = "A2-BG"; break;
- case 0x05: p = "A2-DK1"; break;
- case 0x06: p = "A2-DK2"; break;
- case 0x07: p = "A2-DK3"; break;
- case 0x08: p = "A1 (6.0 MHz FM Mono)"; break;
- case 0x09: p = "AM-L"; break;
- case 0x0a: p = "NICAM-BG"; break;
- case 0x0b: p = "NICAM-DK"; break;
- case 0x0c: p = "NICAM-I"; break;
- case 0x0d: p = "NICAM-L"; break;
- case 0x0e: p = "FM radio"; break;
- case 0x0f: p = "automatic detection"; break;
- default: p = "undefined";
+ case 0x00:
+ p = "undefined";
+ break;
+ case 0x01:
+ p = "BTSC";
+ break;
+ case 0x02:
+ p = "EIAJ";
+ break;
+ case 0x03:
+ p = "A2-M";
+ break;
+ case 0x04:
+ p = "A2-BG";
+ break;
+ case 0x05:
+ p = "A2-DK1";
+ break;
+ case 0x06:
+ p = "A2-DK2";
+ break;
+ case 0x07:
+ p = "A2-DK3";
+ break;
+ case 0x08:
+ p = "A1 (6.0 MHz FM Mono)";
+ break;
+ case 0x09:
+ p = "AM-L";
+ break;
+ case 0x0a:
+ p = "NICAM-BG";
+ break;
+ case 0x0b:
+ p = "NICAM-DK";
+ break;
+ case 0x0c:
+ p = "NICAM-I";
+ break;
+ case 0x0d:
+ p = "NICAM-L";
+ break;
+ case 0x0e:
+ p = "FM radio";
+ break;
+ case 0x0f:
+ p = "automatic detection";
+ break;
+ default:
+ p = "undefined";
}
v4l_info(client, "Configured audio standard: %s\n", p);
if ((audio_config >> 4) < 0xF) {
switch (audio_config & 0xF) {
- case 0x00: p = "MONO1 (LANGUAGE A/Mono L+R channel for BTSC, EIAJ, A2)"; break;
- case 0x01: p = "MONO2 (LANGUAGE B)"; break;
- case 0x02: p = "MONO3 (STEREO forced MONO)"; break;
- case 0x03: p = "MONO4 (NICAM ANALOG-Language C/Analog Fallback)"; break;
- case 0x04: p = "STEREO"; break;
- case 0x05: p = "DUAL1 (AB)"; break;
- case 0x06: p = "DUAL2 (AC) (FM)"; break;
- case 0x07: p = "DUAL3 (BC) (FM)"; break;
- case 0x08: p = "DUAL4 (AC) (AM)"; break;
- case 0x09: p = "DUAL5 (BC) (AM)"; break;
- case 0x0a: p = "SAP"; break;
- default: p = "undefined";
+ case 0x00:
+ p = "MONO1 (LANGUAGE A/Mono L+R channel for BTSC, EIAJ, A2)";
+ break;
+ case 0x01:
+ p = "MONO2 (LANGUAGE B)";
+ break;
+ case 0x02:
+ p = "MONO3 (STEREO forced MONO)";
+ break;
+ case 0x03:
+ p = "MONO4 (NICAM ANALOG-Language C/Analog Fallback)";
+ break;
+ case 0x04:
+ p = "STEREO";
+ break;
+ case 0x05:
+ p = "DUAL1 (AB)";
+ break;
+ case 0x06:
+ p = "DUAL2 (AC) (FM)";
+ break;
+ case 0x07:
+ p = "DUAL3 (BC) (FM)";
+ break;
+ case 0x08:
+ p = "DUAL4 (AC) (AM)";
+ break;
+ case 0x09:
+ p = "DUAL5 (BC) (AM)";
+ break;
+ case 0x0a:
+ p = "SAP";
+ break;
+ default:
+ p = "undefined";
}
v4l_info(client, "Configured audio mode: %s\n", p);
} else {
switch (audio_config & 0xF) {
- case 0x00: p = "BG"; break;
- case 0x01: p = "DK1"; break;
- case 0x02: p = "DK2"; break;
- case 0x03: p = "DK3"; break;
- case 0x04: p = "I"; break;
- case 0x05: p = "L"; break;
- case 0x06: p = "BTSC"; break;
- case 0x07: p = "EIAJ"; break;
- case 0x08: p = "A2-M"; break;
- case 0x09: p = "FM Radio"; break;
- case 0x0f: p = "automatic standard and mode detection"; break;
- default: p = "undefined";
+ case 0x00:
+ p = "BG";
+ break;
+ case 0x01:
+ p = "DK1";
+ break;
+ case 0x02:
+ p = "DK2";
+ break;
+ case 0x03:
+ p = "DK3";
+ break;
+ case 0x04:
+ p = "I";
+ break;
+ case 0x05:
+ p = "L";
+ break;
+ case 0x06:
+ p = "BTSC";
+ break;
+ case 0x07:
+ p = "EIAJ";
+ break;
+ case 0x08:
+ p = "A2-M";
+ break;
+ case 0x09:
+ p = "FM Radio";
+ break;
+ case 0x0f:
+ p = "automatic standard and mode detection";
+ break;
+ default:
+ p = "undefined";
}
v4l_info(client, "Configured audio system: %s\n", p);
}
if (aud_input) {
- v4l_info(client, "Specified audio input: Tuner (In%d)\n", aud_input);
+ v4l_info(client, "Specified audio input: Tuner (In%d)\n",
+ aud_input);
} else {
v4l_info(client, "Specified audio input: External\n");
}
switch (pref_mode & 0xf) {
- case 0: p = "mono/language A"; break;
- case 1: p = "language B"; break;
- case 2: p = "language C"; break;
- case 3: p = "analog fallback"; break;
- case 4: p = "stereo"; break;
- case 5: p = "language AC"; break;
- case 6: p = "language BC"; break;
- case 7: p = "language AB"; break;
- default: p = "undefined";
+ case 0:
+ p = "mono/language A";
+ break;
+ case 1:
+ p = "language B";
+ break;
+ case 2:
+ p = "language C";
+ break;
+ case 3:
+ p = "analog fallback";
+ break;
+ case 4:
+ p = "stereo";
+ break;
+ case 5:
+ p = "language AC";
+ break;
+ case 6:
+ p = "language BC";
+ break;
+ case 7:
+ p = "language AB";
+ break;
+ default:
+ p = "undefined";
}
v4l_info(client, "Preferred audio mode: %s\n", p);
if ((audio_config & 0xf) == 0xf) {
switch ((afc0 >> 3) & 0x3) {
- case 0: p = "system DK"; break;
- case 1: p = "system L"; break;
- case 2: p = "autodetect"; break;
- default: p = "undefined";
+ case 0:
+ p = "system DK";
+ break;
+ case 1:
+ p = "system L";
+ break;
+ case 2:
+ p = "autodetect";
+ break;
+ default:
+ p = "undefined";
}
v4l_info(client, "Selected 65 MHz format: %s\n", p);
switch (afc0 & 0x7) {
- case 0: p = "chroma"; break;
- case 1: p = "BTSC"; break;
- case 2: p = "EIAJ"; break;
- case 3: p = "A2-M"; break;
- case 4: p = "autodetect"; break;
- default: p = "undefined";
+ case 0:
+ p = "chroma";
+ break;
+ case 1:
+ p = "BTSC";
+ break;
+ case 2:
+ p = "EIAJ";
+ break;
+ case 3:
+ p = "A2-M";
+ break;
+ case 4:
+ p = "autodetect";
+ break;
+ default:
+ p = "undefined";
}
v4l_info(client, "Selected 45 MHz format: %s\n", p);
}
}
+#define CX25840_VCONFIG_OPTION(state, cfg_in, opt_msk) \
+ do { \
+ if ((cfg_in) & (opt_msk)) { \
+ (state)->vid_config &= ~(opt_msk); \
+ (state)->vid_config |= (cfg_in) & (opt_msk); \
+ } \
+ } while (0)
+
+/* apply incoming options to the current vconfig */
+static void cx25840_vconfig_add(struct cx25840_state *state, u32 cfg_in)
+{
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_FMT_MASK);
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_RES_MASK);
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_VBIRAW_MASK);
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_ANCDATA_MASK);
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_TASKBIT_MASK);
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_ACTIVE_MASK);
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_VALID_MASK);
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_HRESETW_MASK);
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_CLKGATE_MASK);
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_DCMODE_MASK);
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_IDID0S_MASK);
+ CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_VIPCLAMP_MASK);
+}
+
/* ----------------------------------------------------------------------- */
-/* This load_fw operation must be called to load the driver's firmware.
- Without this the audio standard detection will fail and you will
- only get mono.
+/*
+ * Initializes the device in the generic mode.
+ * For cx2584x chips also adds additional video output settings provided
+ * in @val parameter (CX25840_VCONFIG_*).
+ *
+ * The generic mode disables some of the ivtv-related hacks in this driver.
+ * For cx2584x chips it also enables setting video output configuration while
+ * setting it according to datasheet defaults by default.
+ */
+static int cx25840_init(struct v4l2_subdev *sd, u32 val)
+{
+ struct cx25840_state *state = to_state(sd);
- Since loading the firmware is often problematic when the driver is
- compiled into the kernel I recommend postponing calling this function
- until the first open of the video device. Another reason for
- postponing it is that loading this firmware takes a long time (seconds)
- due to the slow i2c bus speed. So it will speed up the boot process if
- you can avoid loading the fw as long as the video device isn't used. */
-static int cx25840_load_fw(struct v4l2_subdev *sd)
+ state->generic_mode = true;
+
+ if (is_cx2584x(state)) {
+ /* set datasheet video output defaults */
+ state->vid_config = CX25840_VCONFIG_FMT_BT656 |
+ CX25840_VCONFIG_RES_8BIT |
+ CX25840_VCONFIG_VBIRAW_DISABLED |
+ CX25840_VCONFIG_ANCDATA_ENABLED |
+ CX25840_VCONFIG_TASKBIT_ONE |
+ CX25840_VCONFIG_ACTIVE_HORIZONTAL |
+ CX25840_VCONFIG_VALID_NORMAL |
+ CX25840_VCONFIG_HRESETW_NORMAL |
+ CX25840_VCONFIG_CLKGATE_NONE |
+ CX25840_VCONFIG_DCMODE_DWORDS |
+ CX25840_VCONFIG_IDID0S_NORMAL |
+ CX25840_VCONFIG_VIPCLAMP_DISABLED;
+
+ /* add additional settings */
+ cx25840_vconfig_add(state, val);
+ } else {
+ /* TODO: generic mode needs to be developed for other chips */
+ WARN_ON(1);
+ }
+
+ return 0;
+}
+
+static int cx25840_reset(struct v4l2_subdev *sd, u32 val)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ if (is_cx2583x(state))
+ cx25836_initialize(client);
+ else if (is_cx2388x(state))
+ cx23885_initialize(client);
+ else if (is_cx231xx(state))
+ cx231xx_initialize(client);
+ else
+ cx25840_initialize(client);
+
+ state->is_initialized = 1;
+
+ return 0;
+}
+
+/*
+ * This load_fw operation must be called to load the driver's firmware.
+ * This will load the firmware on the first invocation (further ones are NOP).
+ * Without this the audio standard detection will fail and you will
+ * only get mono.
+ * Alternatively, you can call the reset operation instead of this one.
+ *
+ * Since loading the firmware is often problematic when the driver is
+ * compiled into the kernel I recommend postponing calling this function
+ * until the first open of the video device. Another reason for
+ * postponing it is that loading this firmware takes a long time (seconds)
+ * due to the slow i2c bus speed. So it will speed up the boot process if
+ * you can avoid loading the fw as long as the video device isn't used.
+ */
+static int cx25840_load_fw(struct v4l2_subdev *sd)
+{
+ struct cx25840_state *state = to_state(sd);
+
if (!state->is_initialized) {
/* initialize and load firmware */
- state->is_initialized = 1;
- if (is_cx2583x(state))
- cx25836_initialize(client);
- else if (is_cx2388x(state))
- cx23885_initialize(client);
- else if (is_cx231xx(state))
- cx231xx_initialize(client);
- else
- cx25840_initialize(client);
+ cx25840_reset(sd, 0);
}
return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int cx25840_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+static int cx25840_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1680,7 +2346,8 @@ static int cx25840_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *
return 0;
}
-static int cx25840_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
+static int cx25840_s_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1699,7 +2366,7 @@ static int cx25840_s_audio_stream(struct v4l2_subdev *sd, int enable)
return 0;
v4l_dbg(1, cx25840_debug, client, "%s audio output\n",
- enable ? "enable" : "disable");
+ enable ? "enable" : "disable");
if (enable) {
v = cx25840_read(client, 0x115) | 0x80;
@@ -1722,7 +2389,7 @@ static int cx25840_s_stream(struct v4l2_subdev *sd, int enable)
u8 v;
v4l_dbg(1, cx25840_debug, client, "%s video output\n",
- enable ? "enable" : "disable");
+ enable ? "enable" : "disable");
/*
* It's not clear what should be done for these devices.
@@ -1749,7 +2416,7 @@ static int cx25840_s_stream(struct v4l2_subdev *sd, int enable)
}
/* Query the current detected video format */
-static int cx25840_g_std(struct v4l2_subdev *sd, v4l2_std_id *std)
+static int cx25840_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1775,10 +2442,11 @@ static int cx25840_g_std(struct v4l2_subdev *sd, v4l2_std_id *std)
};
u32 fmt = (cx25840_read4(client, 0x40c) >> 8) & 0xf;
- *std = stds[ fmt ];
+ *std = stds[fmt];
- v4l_dbg(1, cx25840_debug, client, "g_std fmt = %x, v4l2_std_id = 0x%x\n",
- fmt, (unsigned int)stds[ fmt ]);
+ v4l_dbg(1, cx25840_debug, client,
+ "querystd fmt = %x, v4l2_std_id = 0x%x\n",
+ fmt, (unsigned int)stds[fmt]);
return 0;
}
@@ -1787,7 +2455,8 @@ static int cx25840_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- /* A limited function that checks for signal status and returns
+ /*
+ * A limited function that checks for signal status and returns
* the state.
*/
@@ -1798,6 +2467,15 @@ static int cx25840_g_input_status(struct v4l2_subdev *sd, u32 *status)
return 0;
}
+static int cx25840_g_std(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ struct cx25840_state *state = to_state(sd);
+
+ *std = state->std;
+
+ return 0;
+}
+
static int cx25840_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct cx25840_state *state = to_state(sd);
@@ -1827,6 +2505,11 @@ static int cx25840_s_video_routing(struct v4l2_subdev *sd,
if (is_cx23888(state))
cx23888_std_setup(client);
+ if (is_cx2584x(state) && state->generic_mode && config) {
+ cx25840_vconfig_add(state, config);
+ cx25840_vconfig_apply(client);
+ }
+
return set_input(client, input, state->aud_input);
}
@@ -1841,7 +2524,8 @@ static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
return set_input(client, state->vid_input, input);
}
-static int cx25840_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *freq)
+static int cx25840_s_frequency(struct v4l2_subdev *sd,
+ const struct v4l2_frequency *freq)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1864,9 +2548,8 @@ static int cx25840_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
if (is_cx2583x(state))
return 0;
- vt->capability |=
- V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 |
- V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
+ vt->capability |= V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 |
+ V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
mode = cx25840_read(client, 0x804);
@@ -1896,54 +2579,46 @@ static int cx25840_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
return 0;
switch (vt->audmode) {
- case V4L2_TUNER_MODE_MONO:
- /* mono -> mono
- stereo -> mono
- bilingual -> lang1 */
- cx25840_and_or(client, 0x809, ~0xf, 0x00);
- break;
- case V4L2_TUNER_MODE_STEREO:
- case V4L2_TUNER_MODE_LANG1:
- /* mono -> mono
- stereo -> stereo
- bilingual -> lang1 */
- cx25840_and_or(client, 0x809, ~0xf, 0x04);
- break;
- case V4L2_TUNER_MODE_LANG1_LANG2:
- /* mono -> mono
- stereo -> stereo
- bilingual -> lang1/lang2 */
- cx25840_and_or(client, 0x809, ~0xf, 0x07);
- break;
- case V4L2_TUNER_MODE_LANG2:
- /* mono -> mono
- stereo -> stereo
- bilingual -> lang2 */
- cx25840_and_or(client, 0x809, ~0xf, 0x01);
- break;
- default:
- return -EINVAL;
+ case V4L2_TUNER_MODE_MONO:
+ /*
+ * mono -> mono
+ * stereo -> mono
+ * bilingual -> lang1
+ */
+ cx25840_and_or(client, 0x809, ~0xf, 0x00);
+ break;
+ case V4L2_TUNER_MODE_STEREO:
+ case V4L2_TUNER_MODE_LANG1:
+ /*
+ * mono -> mono
+ * stereo -> stereo
+ * bilingual -> lang1
+ */
+ cx25840_and_or(client, 0x809, ~0xf, 0x04);
+ break;
+ case V4L2_TUNER_MODE_LANG1_LANG2:
+ /*
+ * mono -> mono
+ * stereo -> stereo
+ * bilingual -> lang1/lang2
+ */
+ cx25840_and_or(client, 0x809, ~0xf, 0x07);
+ break;
+ case V4L2_TUNER_MODE_LANG2:
+ /*
+ * mono -> mono
+ * stereo -> stereo
+ * bilingual -> lang2
+ */
+ cx25840_and_or(client, 0x809, ~0xf, 0x01);
+ break;
+ default:
+ return -EINVAL;
}
state->audmode = vt->audmode;
return 0;
}
-static int cx25840_reset(struct v4l2_subdev *sd, u32 val)
-{
- struct cx25840_state *state = to_state(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (is_cx2583x(state))
- cx25836_initialize(client);
- else if (is_cx2388x(state))
- cx23885_initialize(client);
- else if (is_cx231xx(state))
- cx231xx_initialize(client);
- else
- cx25840_initialize(client);
- return 0;
-}
-
static int cx25840_log_status(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
@@ -5050,6 +5725,8 @@ static const struct v4l2_ctrl_ops cx25840_ctrl_ops = {
static const struct v4l2_subdev_core_ops cx25840_core_ops = {
.log_status = cx25840_log_status,
.reset = cx25840_reset,
+ /* calling the (optional) init op will turn on the generic mode */
+ .init = cx25840_init,
.load_fw = cx25840_load_fw,
.s_io_pin_config = common_s_io_pin_config,
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -5073,8 +5750,9 @@ static const struct v4l2_subdev_audio_ops cx25840_audio_ops = {
};
static const struct v4l2_subdev_video_ops cx25840_video_ops = {
- .s_std = cx25840_s_std,
.g_std = cx25840_g_std,
+ .s_std = cx25840_s_std,
+ .querystd = cx25840_querystd,
.s_routing = cx25840_s_video_routing,
.s_stream = cx25840_s_stream,
.g_input_status = cx25840_g_input_status,
@@ -5110,22 +5788,28 @@ static u32 get_cx2388x_ident(struct i2c_client *client)
/* Come out of digital power down */
cx25840_write(client, 0x000, 0);
- /* Detecting whether the part is cx23885/7/8 is more
+ /*
+ * Detecting whether the part is cx23885/7/8 is more
* difficult than it needs to be. No ID register. Instead we
* probe certain registers indicated in the datasheets to look
- * for specific defaults that differ between the silicon designs. */
+ * for specific defaults that differ between the silicon designs.
+ */
/* It's either 885/7 if the IR Tx Clk Divider register exists */
if (cx25840_read4(client, 0x204) & 0xffff) {
- /* CX23885 returns bogus repetitive byte values for the DIF,
- * which doesn't exist for it. (Ex. 8a8a8a8a or 31313131) */
+ /*
+ * CX23885 returns bogus repetitive byte values for the DIF,
+ * which doesn't exist for it. (Ex. 8a8a8a8a or 31313131)
+ */
ret = cx25840_read4(client, 0x300);
if (((ret & 0xffff0000) >> 16) == (ret & 0xffff)) {
/* No DIF */
ret = CX23885_AV;
} else {
- /* CX23887 has a broken DIF, but the registers
- * appear valid (but unused), good enough to detect. */
+ /*
+ * CX23887 has a broken DIF, but the registers
+ * appear valid (but unused), good enough to detect.
+ */
ret = CX23887_AV;
}
} else if (cx25840_read4(client, 0x300) & 0x0fffffff) {
@@ -5157,14 +5841,18 @@ static int cx25840_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- v4l_dbg(1, cx25840_debug, client, "detecting cx25840 client on address 0x%x\n", client->addr << 1);
+ v4l_dbg(1, cx25840_debug, client,
+ "detecting cx25840 client on address 0x%x\n",
+ client->addr << 1);
device_id = cx25840_read(client, 0x101) << 8;
device_id |= cx25840_read(client, 0x100);
v4l_dbg(1, cx25840_debug, client, "device_id = 0x%04x\n", device_id);
- /* The high byte of the device ID should be
- * 0x83 for the cx2583x and 0x84 for the cx2584x */
+ /*
+ * The high byte of the device ID should be
+ * 0x83 for the cx2583x and 0x84 for the cx2584x
+ */
if ((device_id & 0xff00) == 0x8300) {
id = CX25836 + ((device_id >> 4) & 0xf) - 6;
} else if ((device_id & 0xff00) == 0x8400) {
@@ -5178,7 +5866,8 @@ static int cx25840_probe(struct i2c_client *client,
v4l_err(client,
"likely a confused/unresponsive cx2388[578] A/V decoder found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
- v4l_err(client, "A method to reset it from the cx25840 driver software is not known at this time\n");
+ v4l_err(client,
+ "A method to reset it from the cx25840 driver software is not known at this time\n");
return -ENODEV;
} else {
v4l_dbg(1, cx25840_debug, client, "cx25840 not found\n");
@@ -5186,7 +5875,7 @@ static int cx25840_probe(struct i2c_client *client,
}
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
- if (state == NULL)
+ if (!state)
return -ENOMEM;
sd = &state->sd;
@@ -5213,7 +5902,7 @@ static int cx25840_probe(struct i2c_client *client,
sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(state->pads),
- state->pads);
+ state->pads);
if (ret < 0) {
v4l_info(client, "failed to initialize media entity!\n");
return ret;
@@ -5241,8 +5930,10 @@ static int cx25840_probe(struct i2c_client *client,
case CX25841:
case CX25842:
case CX25843:
- /* Note: revision '(device_id & 0x0f) == 2' was never built. The
- marking skips from 0x1 == 22 to 0x3 == 23. */
+ /*
+ * Note: revision '(device_id & 0x0f) == 2' was never built.
+ * The marking skips from 0x1 == 22 to 0x3 == 23.
+ */
v4l_info(client, "cx25%3x-2%x found @ 0x%x (%s)\n",
(device_id & 0xfff0) >> 4,
(device_id & 0x0f) < 3 ? (device_id & 0x0f) + 1
@@ -5270,13 +5961,13 @@ static int cx25840_probe(struct i2c_client *client,
state->std = V4L2_STD_NTSC_M;
v4l2_ctrl_handler_init(&state->hdl, 9);
v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
- V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
- V4L2_CID_CONTRAST, 0, 127, 1, 64);
+ V4L2_CID_CONTRAST, 0, 127, 1, 64);
v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
- V4L2_CID_SATURATION, 0, 127, 1, 64);
+ V4L2_CID_SATURATION, 0, 127, 1, 64);
v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
- V4L2_CID_HUE, -128, 127, 1, 0);
+ V4L2_CID_HUE, -128, 127, 1, 0);
if (!is_cx2583x(state)) {
default_volume = cx25840_read(client, 0x8d4);
/*
@@ -5288,8 +5979,7 @@ static int cx25840_probe(struct i2c_client *client,
/* Bottom out at -96 dB, v4l2 vol range 0x2e00-0x2fff */
default_volume = 228;
cx25840_write(client, 0x8d4, 228);
- }
- else if (default_volume < 20) {
+ } else if (default_volume < 20) {
/* Top out at + 8 dB, v4l2 vol range 0xfe00-0xffff */
default_volume = 20;
cx25840_write(client, 0x8d4, 20);
@@ -5297,20 +5987,23 @@ static int cx25840_probe(struct i2c_client *client,
default_volume = (((228 - default_volume) >> 1) + 23) << 9;
state->volume = v4l2_ctrl_new_std(&state->hdl,
- &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,
- 0, 65535, 65535 / 100, default_volume);
+ &cx25840_audio_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME,
+ 0, 65535, 65535 / 100,
+ default_volume);
state->mute = v4l2_ctrl_new_std(&state->hdl,
- &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE,
- 0, 1, 1, 0);
+ &cx25840_audio_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE,
+ 0, 1, 1, 0);
v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops,
- V4L2_CID_AUDIO_BALANCE,
- 0, 65535, 65535 / 100, 32768);
+ V4L2_CID_AUDIO_BALANCE,
+ 0, 65535, 65535 / 100, 32768);
v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops,
- V4L2_CID_AUDIO_BASS,
- 0, 65535, 65535 / 100, 32768);
+ V4L2_CID_AUDIO_BASS,
+ 0, 65535, 65535 / 100, 32768);
v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops,
- V4L2_CID_AUDIO_TREBLE,
- 0, 65535, 65535 / 100, 32768);
+ V4L2_CID_AUDIO_TREBLE,
+ 0, 65535, 65535 / 100, 32768);
}
sd->ctrl_handler = &state->hdl;
if (state->hdl.error) {
diff --git a/drivers/media/i2c/cx25840/cx25840-core.h b/drivers/media/i2c/cx25840/cx25840-core.h
index 7fa5787635ea..8b89e90687a1 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.h
+++ b/drivers/media/i2c/cx25840/cx25840-core.h
@@ -7,7 +7,6 @@
#ifndef _CX25840_CORE_H_
#define _CX25840_CORE_H_
-
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -44,10 +43,15 @@ enum cx25840_media_pads {
* @mute: audio mute V4L2 control (non-cx2583x devices only)
* @pvr150_workaround: whether we enable workaround for Hauppauge PVR150
* hardware bug (audio dropping out)
+ * @generic_mode: whether we disable ivtv-specific hacks
+ * this mode gets turned on when the bridge driver calls
+ * cx25840 subdevice init core op
* @radio: set if we are currently in the radio mode, otherwise
* the current mode is non-radio (that is, video)
* @std: currently set video standard
* @vid_input: currently set video input
+ * @vid_config: currently set video output configuration
+ * only used in the generic mode
* @aud_input: currently set audio input
* @audclk_freq: currently set audio sample rate
* @audmode: currently set audio mode (when in non-radio mode)
@@ -74,9 +78,11 @@ struct cx25840_state {
struct v4l2_ctrl *mute;
};
int pvr150_workaround;
+ bool generic_mode;
int radio;
v4l2_std_id std;
enum cx25840_video_input vid_input;
+ u32 vid_config;
enum cx25840_audio_input aud_input;
u32 audclk_freq;
int audmode;
@@ -84,7 +90,7 @@ struct cx25840_state {
enum cx25840_model id;
u32 rev;
int is_initialized;
- unsigned vbi_regs_offset;
+ unsigned int vbi_regs_offset;
wait_queue_head_t fw_wait;
struct work_struct fw_work;
struct cx25840_ir_state *ir_state;
@@ -109,6 +115,14 @@ static inline bool is_cx2583x(struct cx25840_state *state)
state->id == CX25837;
}
+static inline bool is_cx2584x(struct cx25840_state *state)
+{
+ return state->id == CX25840 ||
+ state->id == CX25841 ||
+ state->id == CX25842 ||
+ state->id == CX25843;
+}
+
static inline bool is_cx231xx(struct cx25840_state *state)
{
return state->id == CX2310X_AV;
@@ -142,7 +156,8 @@ int cx25840_write(struct i2c_client *client, u16 addr, u8 value);
int cx25840_write4(struct i2c_client *client, u16 addr, u32 value);
u8 cx25840_read(struct i2c_client *client, u16 addr);
u32 cx25840_read4(struct i2c_client *client, u16 addr);
-int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned mask, u8 value);
+int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned int mask,
+ u8 value);
int cx25840_and_or4(struct i2c_client *client, u16 addr, u32 and_mask,
u32 or_value);
void cx25840_std_setup(struct i2c_client *client);
@@ -161,9 +176,12 @@ extern const struct v4l2_ctrl_ops cx25840_audio_ctrl_ops;
/* ----------------------------------------------------------------------- */
/* cx25850-vbi.c */
int cx25840_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt);
-int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
-int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
-int cx25840_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi);
+int cx25840_s_sliced_fmt(struct v4l2_subdev *sd,
+ struct v4l2_sliced_vbi_format *fmt);
+int cx25840_g_sliced_fmt(struct v4l2_subdev *sd,
+ struct v4l2_sliced_vbi_format *fmt);
+int cx25840_decode_vbi_line(struct v4l2_subdev *sd,
+ struct v4l2_decode_vbi_line *vbi);
/* ----------------------------------------------------------------------- */
/* cx25850-ir.c */
diff --git a/drivers/media/i2c/cx25840/cx25840-vbi.c b/drivers/media/i2c/cx25840/cx25840-vbi.c
index 643335f0f827..a066d5f0fec9 100644
--- a/drivers/media/i2c/cx25840/cx25840-vbi.c
+++ b/drivers/media/i2c/cx25840/cx25840-vbi.c
@@ -86,6 +86,7 @@ int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *
memset(svbi->service_lines, 0, sizeof(svbi->service_lines));
svbi->service_set = 0;
/* we're done if raw VBI is active */
+ /* TODO: this will have to be changed for generic_mode VBI */
if ((cx25840_read(client, 0x404) & 0x10) == 0)
return 0;
@@ -128,6 +129,7 @@ int cx25840_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
cx25840_write(client, 0x54f, vbi_offset);
else
cx25840_write(client, 0x47f, vbi_offset);
+ /* TODO: this will have to be changed for generic_mode VBI */
cx25840_write(client, 0x404, 0x2e);
return 0;
}
@@ -148,6 +150,7 @@ int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *
cx25840_std_setup(client);
/* Sliced VBI */
+ /* TODO: this will have to be changed for generic_mode VBI */
cx25840_write(client, 0x404, 0x32); /* Ancillary data */
cx25840_write(client, 0x406, 0x13);
if (is_cx23888(state))
@@ -202,6 +205,7 @@ int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *
}
cx25840_write(client, state->vbi_regs_offset + 0x43c, 0x16);
+ /* TODO: this will have to be changed for generic_mode VBI */
if (is_cx23888(state))
cx25840_write(client, 0x428, is_pal ? 0x2a : 0x22);
else
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 83e9961b0505..159a3a604f0e 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -1111,6 +1111,6 @@ static struct i2c_driver imx214_i2c_driver = {
module_i2c_driver(imx214_i2c_driver);
-MODULE_DESCRIPTION("Sony IMX214 Camera drier");
+MODULE_DESCRIPTION("Sony IMX214 Camera driver");
MODULE_AUTHOR("Ricardo Ribalda <ricardo.ribalda@gmail.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 4b23fde937b3..2df743cbe09d 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -730,7 +730,7 @@ static int mt9m001_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9m001 *mt9m001;
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 362c3b93636e..12cb012d91f7 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -10,6 +10,7 @@
#include <linux/log2.h>
#include <linux/gpio.h>
#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
#include <linux/v4l2-mediabus.h>
#include <linux/module.h>
#include <linux/property.h>
@@ -240,6 +241,7 @@ struct mt9m111 {
int power_count;
const struct mt9m111_datafmt *fmt;
int lastpage; /* PageMap cache value */
+ struct regulator *regulator;
bool is_streaming;
/* user point of view - 0: falling 1: rising edge */
unsigned int pclk_sample:1;
@@ -979,11 +981,23 @@ static int mt9m111_power_on(struct mt9m111 *mt9m111)
if (ret < 0)
return ret;
+ ret = regulator_enable(mt9m111->regulator);
+ if (ret < 0)
+ goto out_clk_disable;
+
ret = mt9m111_resume(mt9m111);
- if (ret < 0) {
- dev_err(&client->dev, "Failed to resume the sensor: %d\n", ret);
- v4l2_clk_disable(mt9m111->clk);
- }
+ if (ret < 0)
+ goto out_regulator_disable;
+
+ return 0;
+
+out_regulator_disable:
+ regulator_disable(mt9m111->regulator);
+
+out_clk_disable:
+ v4l2_clk_disable(mt9m111->clk);
+
+ dev_err(&client->dev, "Failed to resume the sensor: %d\n", ret);
return ret;
}
@@ -991,6 +1005,7 @@ static int mt9m111_power_on(struct mt9m111 *mt9m111)
static void mt9m111_power_off(struct mt9m111 *mt9m111)
{
mt9m111_suspend(mt9m111);
+ regulator_disable(mt9m111->regulator);
v4l2_clk_disable(mt9m111->clk);
}
@@ -1232,7 +1247,7 @@ static int mt9m111_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9m111 *mt9m111;
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
@@ -1245,14 +1260,23 @@ static int mt9m111_probe(struct i2c_client *client,
if (!mt9m111)
return -ENOMEM;
- ret = mt9m111_probe_fw(client, mt9m111);
- if (ret)
- return ret;
+ if (dev_fwnode(&client->dev)) {
+ ret = mt9m111_probe_fw(client, mt9m111);
+ if (ret)
+ return ret;
+ }
mt9m111->clk = v4l2_clk_get(&client->dev, "mclk");
if (IS_ERR(mt9m111->clk))
return PTR_ERR(mt9m111->clk);
+ mt9m111->regulator = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(mt9m111->regulator)) {
+ dev_err(&client->dev, "regulator not found: %ld\n",
+ PTR_ERR(mt9m111->regulator));
+ return PTR_ERR(mt9m111->regulator);
+ }
+
/* Default HIGHPOWER context */
mt9m111->ctx = &context_b;
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 5e186ea7391b..dc23b9ed510a 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -1031,7 +1031,7 @@ static int mt9p031_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9p031_platform_data *pdata = mt9p031_get_pdata(client);
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
struct mt9p031 *mt9p031;
unsigned int i;
int ret;
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 45bb872db3c5..aac6f77afa0f 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1224,7 +1224,7 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
ov13858->exposure->minimum,
max, ov13858->exposure->step, max);
break;
- };
+ }
/*
* Applying V4L2 control value only happens
@@ -1262,7 +1262,7 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
- };
+ }
pm_runtime_put(&client->dev);
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index b744a203eb9b..ecd167d7c4d2 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -1194,7 +1194,7 @@ static int ov2640_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov2640_priv *priv;
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index 98a1f2e312b5..6814583d9606 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -576,7 +576,7 @@ static int ov2685_set_ctrl(struct v4l2_ctrl *ctrl)
__func__, ctrl->id, ctrl->val);
ret = -EINVAL;
break;
- };
+ }
pm_runtime_put(&client->dev);
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 5d107c53364d..e65a94353175 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1143,7 +1143,7 @@ static int ov5695_set_ctrl(struct v4l2_ctrl *ctrl)
dev_warn(&client->dev, "%s Unhandled id:0x%x, val:0x%x\n",
__func__, ctrl->id, ctrl->val);
break;
- };
+ }
pm_runtime_put(&client->dev);
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 7f7c933b5cf4..5b9af5e5b7f1 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -1006,7 +1006,6 @@ static int ov6650_probe(struct i2c_client *client,
priv->colorspace = V4L2_COLORSPACE_JPEG;
priv->subdev.internal_ops = &ov6650_internal_ops;
- priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
ret = v4l2_async_register_subdev(&priv->subdev);
if (ret)
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 54e80a60aa57..70bb870b1d08 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -532,7 +532,7 @@ static int ov7740_set_ctrl(struct v4l2_ctrl *ctrl)
struct i2c_client *client = v4l2_get_subdevdata(&ov7740->subdev);
struct regmap *regmap = ov7740->regmap;
int ret;
- u8 val = 0;
+ u8 val;
if (!pm_runtime_get_if_in_use(&client->dev))
return 0;
@@ -551,6 +551,7 @@ static int ov7740_set_ctrl(struct v4l2_ctrl *ctrl)
ret = ov7740_set_contrast(regmap, ctrl->val);
break;
case V4L2_CID_VFLIP:
+ val = ctrl->val ? REG0C_IMG_FLIP : 0x00;
ret = regmap_update_bits(regmap, REG_REG0C,
REG0C_IMG_FLIP, val);
break;
@@ -561,16 +562,16 @@ static int ov7740_set_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_AUTOGAIN:
if (!ctrl->val)
- return ov7740_set_gain(regmap, ov7740->gain->val);
-
- ret = ov7740_set_autogain(regmap, ctrl->val);
+ ret = ov7740_set_gain(regmap, ov7740->gain->val);
+ else
+ ret = ov7740_set_autogain(regmap, ctrl->val);
break;
case V4L2_CID_EXPOSURE_AUTO:
if (ctrl->val == V4L2_EXPOSURE_MANUAL)
- return ov7740_set_exp(regmap, ov7740->exposure->val);
-
- ret = ov7740_set_autoexp(regmap, ctrl->val);
+ ret = ov7740_set_exp(regmap, ov7740->exposure->val);
+ else
+ ret = ov7740_set_autoexp(regmap, ctrl->val);
break;
default:
ret = -EINVAL;
@@ -785,7 +786,11 @@ static int ov7740_try_fmt_internal(struct v4l2_subdev *sd,
fsize++;
}
-
+ if (i >= ARRAY_SIZE(ov7740_framesizes)) {
+ fsize = &ov7740_framesizes[0];
+ fmt->width = fsize->width;
+ fmt->height = fsize->height;
+ }
if (ret_frmsize != NULL)
*ret_frmsize = fsize;
@@ -1007,8 +1012,6 @@ static int ov7740_init_controls(struct ov7740 *ov7740)
ov7740->gain = v4l2_ctrl_new_std(ctrl_hdlr, &ov7740_ctrl_ops,
V4L2_CID_GAIN, 0, 1023, 1, 500);
- if (ov7740->gain)
- ov7740->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
ov7740->auto_gain = v4l2_ctrl_new_std(ctrl_hdlr, &ov7740_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
@@ -1026,7 +1029,6 @@ static int ov7740_init_controls(struct ov7740 *ov7740)
v4l2_ctrl_auto_cluster(2, &ov7740->auto_gain, 0, true);
v4l2_ctrl_auto_cluster(2, &ov7740->auto_exposure,
V4L2_EXPOSURE_MANUAL, true);
- v4l2_ctrl_cluster(2, &ov7740->hflip);
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index dbf1095b9440..cd347d6b7b9d 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -195,11 +195,11 @@ static const struct ov8856_reg mode_3280x2464_regs[] = {
{0x3800, 0x00},
{0x3801, 0x00},
{0x3802, 0x00},
- {0x3803, 0x07},
+ {0x3803, 0x06},
{0x3804, 0x0c},
{0x3805, 0xdf},
{0x3806, 0x09},
- {0x3807, 0xa6},
+ {0x3807, 0xa7},
{0x3808, 0x0c},
{0x3809, 0xd0},
{0x380a, 0x09},
@@ -211,7 +211,7 @@ static const struct ov8856_reg mode_3280x2464_regs[] = {
{0x3810, 0x00},
{0x3811, 0x00},
{0x3812, 0x00},
- {0x3813, 0x00},
+ {0x3813, 0x01},
{0x3814, 0x01},
{0x3815, 0x01},
{0x3816, 0x00},
@@ -385,11 +385,11 @@ static const struct ov8856_reg mode_1640x1232_regs[] = {
{0x3800, 0x00},
{0x3801, 0x00},
{0x3802, 0x00},
- {0x3803, 0x07},
+ {0x3803, 0x06},
{0x3804, 0x0c},
{0x3805, 0xdf},
{0x3806, 0x09},
- {0x3807, 0xa6},
+ {0x3807, 0xa7},
{0x3808, 0x06},
{0x3809, 0x68},
{0x380a, 0x04},
@@ -401,7 +401,7 @@ static const struct ov8856_reg mode_1640x1232_regs[] = {
{0x3810, 0x00},
{0x3811, 0x00},
{0x3812, 0x00},
- {0x3813, 0x00},
+ {0x3813, 0x01},
{0x3814, 0x03},
{0x3815, 0x01},
{0x3816, 0x00},
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index d6831f28378b..482609665305 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -691,14 +691,14 @@ static int ov9640_probe(struct i2c_client *client,
priv->gpio_power = devm_gpiod_get(&client->dev, "Camera power",
GPIOD_OUT_LOW);
- if (IS_ERR_OR_NULL(priv->gpio_power)) {
+ if (IS_ERR(priv->gpio_power)) {
ret = PTR_ERR(priv->gpio_power);
return ret;
}
priv->gpio_reset = devm_gpiod_get(&client->dev, "Camera reset",
GPIOD_OUT_HIGH);
- if (IS_ERR_OR_NULL(priv->gpio_reset)) {
+ if (IS_ERR(priv->gpio_reset)) {
ret = PTR_ERR(priv->gpio_reset);
return ret;
}
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.c b/drivers/media/i2c/smiapp/smiapp-quirk.c
index e46d72cee566..ab96d6067fc3 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.c
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.c
@@ -194,7 +194,7 @@ static int jt8ev1_post_streamoff(struct smiapp_sensor *sensor)
return rval;
/* Wait for 1 ms + one line => 2 ms is likely enough */
- usleep_range(2000, 2000);
+ usleep_range(2000, 2050);
/* Restore it */
rval = smiapp_write_8(sensor, 0x3205, 0x00);
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 9369f38dbf3d..81285b8d5cfb 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -61,7 +61,10 @@ static const u32 mipid02_supported_fmt_codes[] = {
MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12,
MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12,
- MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_BGR888_1X24
+ MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_RGB565_2X8_LE, MEDIA_BUS_FMT_RGB565_2X8_BE,
+ MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_JPEG_1X8
};
/* regulator supplies */
@@ -99,6 +102,7 @@ struct mipid02_dev {
u8 data_lane1_reg1;
u8 mode_reg1;
u8 mode_reg2;
+ u8 data_selection_ctrl;
u8 data_id_rreg;
u8 pix_width_ctrl;
u8 pix_width_ctrl_emb;
@@ -128,6 +132,10 @@ static int bpp_from_code(__u32 code)
case MEDIA_BUS_FMT_SRGGB12_1X12:
return 12;
case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
return 16;
case MEDIA_BUS_FMT_BGR888_1X24:
return 24;
@@ -155,9 +163,14 @@ static u8 data_type_from_code(__u32 code)
case MEDIA_BUS_FMT_SRGGB12_1X12:
return 0x2c;
case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
return 0x1e;
case MEDIA_BUS_FMT_BGR888_1X24:
return 0x24;
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ return 0x22;
default:
return 0;
}
@@ -331,6 +344,25 @@ static int mipid02_detect(struct mipid02_dev *bridge)
return mipid02_read_reg(bridge, MIPID02_CLK_LANE_WR_REG1, &reg);
}
+static u32 mipid02_get_link_freq_from_cid_link_freq(struct mipid02_dev *bridge,
+ struct v4l2_subdev *subdev)
+{
+ struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ ctrl = v4l2_ctrl_find(subdev->ctrl_handler, V4L2_CID_LINK_FREQ);
+ if (!ctrl)
+ return 0;
+ qm.index = v4l2_ctrl_g_ctrl(ctrl);
+
+ ret = v4l2_querymenu(subdev->ctrl_handler, &qm);
+ if (ret)
+ return 0;
+
+ return qm.value;
+}
+
static u32 mipid02_get_link_freq_from_cid_pixel_rate(struct mipid02_dev *bridge,
struct v4l2_subdev *subdev)
{
@@ -358,10 +390,14 @@ static int mipid02_configure_from_rx_speed(struct mipid02_dev *bridge)
struct v4l2_subdev *subdev = bridge->s_subdev;
u32 link_freq;
- link_freq = mipid02_get_link_freq_from_cid_pixel_rate(bridge, subdev);
+ link_freq = mipid02_get_link_freq_from_cid_link_freq(bridge, subdev);
if (!link_freq) {
- dev_err(&client->dev, "Failed to detect link frequency");
- return -EINVAL;
+ link_freq = mipid02_get_link_freq_from_cid_pixel_rate(bridge,
+ subdev);
+ if (!link_freq) {
+ dev_err(&client->dev, "Failed to get link frequency");
+ return -EINVAL;
+ }
}
dev_dbg(&client->dev, "detect link_freq = %d Hz", link_freq);
@@ -452,6 +488,7 @@ static int mipid02_configure_from_tx(struct mipid02_dev *bridge)
{
struct v4l2_fwnode_endpoint *ep = &bridge->tx;
+ bridge->r.data_selection_ctrl = SELECTION_MANUAL_WIDTH;
bridge->r.pix_width_ctrl = ep->bus.parallel.bus_width;
bridge->r.pix_width_ctrl_emb = ep->bus.parallel.bus_width;
if (ep->bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
@@ -467,10 +504,15 @@ static int mipid02_configure_from_code(struct mipid02_dev *bridge)
u8 data_type;
bridge->r.data_id_rreg = 0;
- data_type = data_type_from_code(bridge->fmt.code);
- if (!data_type)
- return -EINVAL;
- bridge->r.data_id_rreg = data_type;
+
+ if (bridge->fmt.code != MEDIA_BUS_FMT_JPEG_1X8) {
+ bridge->r.data_selection_ctrl |= SELECTION_MANUAL_DATA;
+
+ data_type = data_type_from_code(bridge->fmt.code);
+ if (!data_type)
+ return -EINVAL;
+ bridge->r.data_id_rreg = data_type;
+ }
return 0;
}
@@ -554,7 +596,7 @@ static int mipid02_stream_enable(struct mipid02_dev *bridge)
if (ret)
goto error;
ret = mipid02_write_reg(bridge, MIPID02_DATA_SELECTION_CTRL,
- SELECTION_MANUAL_DATA | SELECTION_MANUAL_WIDTH);
+ bridge->r.data_selection_ctrl);
if (ret)
goto error;
ret = mipid02_write_reg(bridge, MIPID02_PIX_WIDTH_CTRL,
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
index 06a78c2cdaab..cbdc9be0a597 100644
--- a/drivers/media/i2c/tda7432.c
+++ b/drivers/media/i2c/tda7432.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* For the STS-Thompson TDA7432 audio processor chip
*
@@ -9,7 +10,7 @@
*
* Copyright (c) 2000 Eric Sandeen <eric_sandeen@bigfoot.com>
* Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
- * This code is placed under the terms of the GNU General Public License
+ *
* Based on tda9855.c by Steve VanDeBogart (vandebo@uclink.berkeley.edu)
* Which was based on tda8425.c by Greg Alexander (c) 1998
*
diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c
index 4d7cd736b930..a25a350b0ddc 100644
--- a/drivers/media/i2c/tw9910.c
+++ b/drivers/media/i2c/tw9910.c
@@ -934,8 +934,7 @@ static int tw9910_probe(struct i2c_client *client,
{
struct tw9910_priv *priv;
struct tw9910_video_info *info;
- struct i2c_adapter *adapter =
- to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
int ret;
if (!client->dev.platform_data) {
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index abd3152df7d0..078141712c88 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -190,12 +190,8 @@ static int mlx90640_setup(struct video_i2c_data *data)
unsigned int n, idx;
for (n = 0; n < data->chip->num_frame_intervals - 1; n++) {
- if (data->frame_interval.numerator
- != data->chip->frame_intervals[n].numerator)
- continue;
-
- if (data->frame_interval.denominator
- == data->chip->frame_intervals[n].denominator)
+ if (V4L2_FRACT_COMPARE(data->frame_interval, ==,
+ data->chip->frame_intervals[n]))
break;
}
diff --git a/drivers/media/mc/Kconfig b/drivers/media/mc/Kconfig
new file mode 100644
index 000000000000..3b9795cfcb36
--- /dev/null
+++ b/drivers/media/mc/Kconfig
@@ -0,0 +1,33 @@
+#
+# Media controller
+# Selectable only for webcam/grabbers, as other drivers don't use it
+#
+
+config MEDIA_CONTROLLER
+ bool "Media Controller API"
+ depends on MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT
+ help
+ Enable the media controller API used to query media devices internal
+ topology and configure it dynamically.
+
+ This API is mostly used by camera interfaces in embedded platforms.
+
+config MEDIA_CONTROLLER_DVB
+ bool "Enable Media controller for DVB (EXPERIMENTAL)"
+ depends on MEDIA_CONTROLLER && DVB_CORE
+ help
+ Enable the media controller API support for DVB.
+
+ This is currently experimental.
+
+config MEDIA_CONTROLLER_REQUEST_API
+ bool "Enable Media controller Request API (EXPERIMENTAL)"
+ depends on MEDIA_CONTROLLER && STAGING_MEDIA
+ help
+ DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+
+ This option enables the Request API for the Media controller and V4L2
+ interfaces. It is currently needed by a few stateless codec drivers.
+
+ There is currently no intention to provide API or ABI stability for
+ this new API as of yet.
diff --git a/drivers/media/mc/Makefile b/drivers/media/mc/Makefile
new file mode 100644
index 000000000000..119037f0e686
--- /dev/null
+++ b/drivers/media/mc/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+mc-objs := mc-device.o mc-devnode.o mc-entity.o \
+ mc-request.o
+
+ifeq ($(CONFIG_USB),y)
+ mc-objs += mc-dev-allocator.o
+endif
+
+obj-$(CONFIG_MEDIA_SUPPORT) += mc.o
diff --git a/drivers/media/media-dev-allocator.c b/drivers/media/mc/mc-dev-allocator.c
index ae17887dec59..ae17887dec59 100644
--- a/drivers/media/media-dev-allocator.c
+++ b/drivers/media/mc/mc-dev-allocator.c
diff --git a/drivers/media/media-device.c b/drivers/media/mc/mc-device.c
index 9ae481ddd975..e19df5165e78 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/mc/mc-device.c
@@ -494,6 +494,7 @@ static long media_device_enum_links32(struct media_device *mdev,
{
struct media_links_enum links;
compat_uptr_t pads_ptr, links_ptr;
+ int ret;
memset(&links, 0, sizeof(links));
@@ -505,7 +506,14 @@ static long media_device_enum_links32(struct media_device *mdev,
links.pads = compat_ptr(pads_ptr);
links.links = compat_ptr(links_ptr);
- return media_device_enum_links(mdev, &links);
+ ret = media_device_enum_links(mdev, &links);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(ulinks->reserved, links.reserved,
+ sizeof(ulinks->reserved)))
+ return -EFAULT;
+ return 0;
}
#define MEDIA_IOC_ENUM_LINKS32 _IOWR('|', 0x02, struct media_links_enum32)
diff --git a/drivers/media/media-devnode.c b/drivers/media/mc/mc-devnode.c
index f11382afe23b..f11382afe23b 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/mc/mc-devnode.c
diff --git a/drivers/media/media-entity.c b/drivers/media/mc/mc-entity.c
index 7c429ce98bae..7c429ce98bae 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/mc/mc-entity.c
diff --git a/drivers/media/media-request.c b/drivers/media/mc/mc-request.c
index e3fca436c75b..e3fca436c75b 100644
--- a/drivers/media/media-request.c
+++ b/drivers/media/mc/mc-request.c
diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.c b/drivers/media/pci/bt8xx/bttv-audio-hook.c
index 8febe7358a8f..da1914a20b81 100644
--- a/drivers/media/pci/bt8xx/bttv-audio-hook.c
+++ b/drivers/media/pci/bt8xx/bttv-audio-hook.c
@@ -1,8 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Handlers for board audio hooks, split from bttv-cards
*
* Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
- * This code is placed under the terms of the GNU General Public License
*/
#include "bttv-audio-hook.h"
diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.h b/drivers/media/pci/bt8xx/bttv-audio-hook.h
index c61b9ac4f4e3..d6a1a5a60a56 100644
--- a/drivers/media/pci/bt8xx/bttv-audio-hook.h
+++ b/drivers/media/pci/bt8xx/bttv-audio-hook.h
@@ -1,4 +1,6 @@
/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
* Handlers for board audio hooks, split from bttv-cards
*
* Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 636e6a2549a9..612d1c0010c1 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2453,7 +2453,6 @@ static int bttv_s_fmt_vid_overlay(struct file *file, void *priv,
static int bttv_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct video_device *vdev = video_devdata(file);
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
@@ -2464,17 +2463,17 @@ static int bttv_querycap(struct file *file, void *priv,
strscpy(cap->card, btv->video_dev.name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"PCI:%s", pci_name(btv->c.pci));
- cap->capabilities =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_DEVICE_CAPS;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
if (no_overlay <= 0)
cap->capabilities |= V4L2_CAP_VIDEO_OVERLAY;
if (video_is_registered(&btv->vbi_dev))
cap->capabilities |= V4L2_CAP_VBI_CAPTURE;
- if (video_is_registered(&btv->radio_dev))
+ if (video_is_registered(&btv->radio_dev)) {
cap->capabilities |= V4L2_CAP_RADIO;
+ if (btv->has_tea575x)
+ cap->capabilities |= V4L2_CAP_HW_FREQ_SEEK;
+ }
/*
* No need to lock here: those vars are initialized during board
@@ -2484,27 +2483,6 @@ static int bttv_querycap(struct file *file, void *priv,
cap->capabilities |= V4L2_CAP_RDS_CAPTURE;
if (btv->tuner_type != TUNER_ABSENT)
cap->capabilities |= V4L2_CAP_TUNER;
- if (vdev->vfl_type == VFL_TYPE_GRABBER)
- cap->device_caps = cap->capabilities &
- (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_OVERLAY |
- V4L2_CAP_TUNER);
- else if (vdev->vfl_type == VFL_TYPE_VBI)
- cap->device_caps = cap->capabilities &
- (V4L2_CAP_VBI_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_TUNER);
- else {
- cap->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
- if (btv->has_saa6588)
- cap->device_caps |= V4L2_CAP_READWRITE |
- V4L2_CAP_RDS_CAPTURE;
- if (btv->has_tea575x)
- cap->device_caps |= V4L2_CAP_HW_FREQ_SEEK;
- }
return 0;
}
@@ -3939,6 +3917,12 @@ static int bttv_register_video(struct bttv *btv)
/* video */
vdev_init(btv, &btv->video_dev, &bttv_video_template, "video");
+ btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ if (btv->tuner_type != TUNER_ABSENT)
+ btv->video_dev.device_caps |= V4L2_CAP_TUNER;
+ if (no_overlay <= 0)
+ btv->video_dev.device_caps |= V4L2_CAP_VIDEO_OVERLAY;
if (video_register_device(&btv->video_dev, VFL_TYPE_GRABBER,
video_nr[btv->c.nr]) < 0)
@@ -3953,6 +3937,10 @@ static int bttv_register_video(struct bttv *btv)
/* vbi */
vdev_init(btv, &btv->vbi_dev, &bttv_video_template, "vbi");
+ btv->vbi_dev.device_caps = V4L2_CAP_VBI_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING | V4L2_CAP_TUNER;
+ if (btv->tuner_type != TUNER_ABSENT)
+ btv->vbi_dev.device_caps |= V4L2_CAP_TUNER;
if (video_register_device(&btv->vbi_dev, VFL_TYPE_VBI,
vbi_nr[btv->c.nr]) < 0)
@@ -3964,6 +3952,12 @@ static int bttv_register_video(struct bttv *btv)
return 0;
/* radio */
vdev_init(btv, &btv->radio_dev, &radio_template, "radio");
+ btv->radio_dev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
+ if (btv->has_saa6588)
+ btv->radio_dev.device_caps |= V4L2_CAP_READWRITE |
+ V4L2_CAP_RDS_CAPTURE;
+ if (btv->has_tea575x)
+ btv->radio_dev.device_caps |= V4L2_CAP_HW_FREQ_SEEK;
btv->radio_dev.ctrl_handler = &btv->radio_ctrl_handler;
if (video_register_device(&btv->radio_dev, VFL_TYPE_RADIO,
radio_nr[btv->c.nr]) < 0)
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index 6c6c60abe9b1..e0e7df460a92 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_COBALT
tristate "Cisco Cobalt support"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
depends on PCI_MSI && MTD_COMPLEX_MAPPINGS
- depends on GPIOLIB || COMPILE_TEST
+ depends on (GPIOLIB && DRM_I2C_ADV7511=n) || COMPILE_TEST
depends on SND
depends on MTD
select I2C_ALGOBIT
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index f9fa3a7c3b8f..39dabd4da60f 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -483,13 +483,8 @@ static int cobalt_querycap(struct file *file, void *priv_fh,
strscpy(vcap->card, "cobalt", sizeof(vcap->card));
snprintf(vcap->bus_info, sizeof(vcap->bus_info),
"PCIe:%s", pci_name(cobalt->pci_dev));
- vcap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
- if (s->is_output)
- vcap->device_caps |= V4L2_CAP_VIDEO_OUTPUT;
- else
- vcap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
- vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS |
- V4L2_CAP_VIDEO_CAPTURE;
+ vcap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_DEVICE_CAPS;
if (cobalt->have_hsma_tx)
vcap->capabilities |= V4L2_CAP_VIDEO_OUTPUT;
return 0;
@@ -1274,6 +1269,11 @@ static int cobalt_node_register(struct cobalt *cobalt, int node)
q->lock = &s->lock;
q->dev = &cobalt->pci_dev->dev;
vdev->queue = q;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (s->is_output)
+ vdev->device_caps |= V4L2_CAP_VIDEO_OUTPUT;
+ else
+ vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
video_set_drvdata(vdev, s);
ret = vb2_queue_init(q);
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 9f5972f6d3a6..d9ffc9c359ca 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -385,16 +385,13 @@ static int cx18_querycap(struct file *file, void *fh,
struct v4l2_capability *vcap)
{
struct cx18_open_id *id = fh2id(fh);
- struct cx18_stream *s = video_drvdata(file);
struct cx18 *cx = id->cx;
strscpy(vcap->driver, CX18_DRIVER_NAME, sizeof(vcap->driver));
strscpy(vcap->card, cx->card_name, sizeof(vcap->card));
snprintf(vcap->bus_info, sizeof(vcap->bus_info),
"PCI:%s", pci_name(cx->pci_dev));
- vcap->capabilities = cx->v4l2_cap; /* capabilities */
- vcap->device_caps = s->v4l2_dev_caps; /* device capabilities */
- vcap->capabilities |= V4L2_CAP_DEVICE_CAPS;
+ vcap->capabilities = cx->v4l2_cap | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 9805e50c2477..b79718519b9b 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -411,6 +411,7 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
return 0;
num = s->video_dev.num;
+ s->video_dev.device_caps = s->v4l2_dev_caps; /* device capabilities */
/* card number + user defined offset + device offset */
if (type != CX18_ENC_STREAM_TYPE_MPG) {
struct cx18_stream *s_mpg = &cx->streams[CX18_ENC_STREAM_TYPE_MPG];
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 8aa5f9b1498a..82f96a4091ac 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1324,12 +1324,11 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->card, cx23885_boards[tsport->dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING | V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_AUDIO | V4L2_CAP_DEVICE_CAPS;
if (dev->tuner_type != TUNER_ABSENT)
- cap->device_caps |= V4L2_CAP_TUNER;
- cap->capabilities = cap->device_caps | V4L2_CAP_VBI_CAPTURE |
- V4L2_CAP_AUDIO | V4L2_CAP_DEVICE_CAPS;
+ cap->capabilities |= V4L2_CAP_TUNER;
return 0;
}
@@ -1542,6 +1541,10 @@ int cx23885_417_register(struct cx23885_dev *dev)
video_set_drvdata(dev->v4l_device, dev);
dev->v4l_device->lock = &dev->lock;
dev->v4l_device->queue = q;
+ dev->v4l_device->device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ if (dev->tuner_type != TUNER_ABSENT)
+ dev->v4l_device->device_caps |= V4L2_CAP_TUNER;
err = video_register_device(dev->v4l_device,
VFL_TYPE_GRABBER, -1);
if (err < 0) {
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index c9ef9ff7b0bd..4f386db33a11 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -2647,8 +2647,6 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
dev->pci_bus,
dev->pci_slot);
- err = -ENODEV;
-
/* dvb stuff */
/* We have to init the queue for each frontend on a port. */
pr_info("%s: cx23885 based dvb card\n", dev->name);
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 0c59ecccc38a..b254473db9a3 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -627,21 +627,17 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx23885_dev *dev = video_drvdata(file);
- struct video_device *vdev = video_devdata(file);
strscpy(cap->driver, "cx23885", sizeof(cap->driver));
strscpy(cap->card, cx23885_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
- cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_AUDIO;
+ cap->capabilities = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_AUDIO | V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_DEVICE_CAPS;
if (dev->tuner_type != TUNER_ABSENT)
- cap->device_caps |= V4L2_CAP_TUNER;
- if (vdev->vfl_type == VFL_TYPE_VBI)
- cap->device_caps |= V4L2_CAP_VBI_CAPTURE;
- else
- cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
- cap->capabilities = cap->device_caps | V4L2_CAP_VBI_CAPTURE |
- V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_DEVICE_CAPS;
+ cap->capabilities |= V4L2_CAP_TUNER;
return 0;
}
@@ -1306,6 +1302,10 @@ int cx23885_video_register(struct cx23885_dev *dev)
dev->video_dev = cx23885_vdev_init(dev, dev->pci,
&cx23885_video_template, "video");
dev->video_dev->queue = &dev->vb2_vidq;
+ dev->video_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_AUDIO | V4L2_CAP_VIDEO_CAPTURE;
+ if (dev->tuner_type != TUNER_ABSENT)
+ dev->video_dev->device_caps |= V4L2_CAP_TUNER;
err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER,
video_nr[dev->nr]);
if (err < 0) {
@@ -1320,6 +1320,10 @@ int cx23885_video_register(struct cx23885_dev *dev)
dev->vbi_dev = cx23885_vdev_init(dev, dev->pci,
&cx23885_vbi_template, "vbi");
dev->vbi_dev->queue = &dev->vb2_vbiq;
+ dev->vbi_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_AUDIO | V4L2_CAP_VBI_CAPTURE;
+ if (dev->tuner_type != TUNER_ABSENT)
+ dev->vbi_dev->device_caps |= V4L2_CAP_TUNER;
err = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->nr]);
if (err < 0) {
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index 1bb5dfc74e27..de7641170478 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -426,18 +426,13 @@ static int cx25821_vidioc_querycap(struct file *file, void *priv,
{
struct cx25821_channel *chan = video_drvdata(file);
struct cx25821_dev *dev = chan->dev;
- const u32 cap_input = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- const u32 cap_output = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE;
strscpy(cap->driver, "cx25821", sizeof(cap->driver));
strscpy(cap->card, cx25821_boards[dev->board].name, sizeof(cap->card));
sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
- if (chan->id >= VID_CHANNEL_NUM)
- cap->device_caps = cap_output;
- else
- cap->device_caps = cap_input;
- cap->capabilities = cap_input | cap_output | V4L2_CAP_DEVICE_CAPS;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -624,6 +619,8 @@ static const struct video_device cx25821_video_device = {
.minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING,
};
static const struct v4l2_file_operations video_out_fops = {
@@ -657,6 +654,7 @@ static const struct video_device cx25821_video_out_device = {
.minor = -1,
.ioctl_ops = &video_out_ioctl_ops,
.tvnorms = CX25821_NORMS,
+ .device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE,
};
void cx25821_video_unregister(struct cx25821_dev *dev, int chan_num)
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index b4ad5d12054e..e1e71ae293ed 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -95,7 +95,7 @@ MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s).");
MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards");
MODULE_AUTHOR("Ricardo Cerqueira");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(CX88_VERSION);
MODULE_SUPPORTED_DEVICE("{{Conexant,23881},{{Conexant,23882},{{Conexant,23883}");
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 0a10c9d192f3..200d68827073 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -28,7 +28,7 @@
MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards");
MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
@@ -1136,6 +1136,10 @@ static int blackbird_register_video(struct cx8802_dev *dev)
dev->mpeg_dev.ctrl_handler = &dev->cxhdl.hdl;
video_set_drvdata(&dev->mpeg_dev, dev);
dev->mpeg_dev.queue = &dev->vb2_mpegq;
+ dev->mpeg_dev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE;
+ if (dev->core->board.tuner_type != UNSET)
+ dev->mpeg_dev.device_caps |= V4L2_CAP_TUNER;
err = video_register_device(&dev->mpeg_dev, VFL_TYPE_GRABBER, -1);
if (err < 0) {
pr_info("can't register mpeg device\n");
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 8597cb8274ab..dcadf78657d6 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -31,7 +31,7 @@
MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
/* ------------------------------------------------------------------ */
diff --git a/drivers/media/pci/cx88/cx88-i2c.c b/drivers/media/pci/cx88/cx88-i2c.c
index 50a9ae3fa596..7fc64aef1ef7 100644
--- a/drivers/media/pci/cx88/cx88-i2c.c
+++ b/drivers/media/pci/cx88/cx88-i2c.c
@@ -8,7 +8,6 @@
* & Marcus Metzler (mocm@thp.uni-koeln.de)
* (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
* (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
- *
* (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org>
* - Multituner support and i2c address binding
*/
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index 27f690b54e0c..589f52d961eb 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -167,14 +167,14 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
static enum hrtimer_restart cx88_ir_work(struct hrtimer *timer)
{
- unsigned long missed;
+ u64 missed;
struct cx88_IR *ir = container_of(timer, struct cx88_IR, timer);
cx88_ir_handle_key(ir);
missed = hrtimer_forward_now(&ir->timer,
ktime_set(0, ir->polling * 1000000));
if (missed > 1)
- ir_dprintk("Missed ticks %ld\n", missed - 1);
+ ir_dprintk("Missed ticks %llu\n", missed - 1);
return HRTIMER_RESTART;
}
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 3b49ebb21b13..e59a74514c7c 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -33,7 +33,7 @@
MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(CX88_VERSION);
/* ------------------------------------------------------------------ */
@@ -800,27 +800,12 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int cx88_querycap(struct file *file, struct cx88_core *core,
struct v4l2_capability *cap)
{
- struct video_device *vdev = video_devdata(file);
-
strscpy(cap->card, core->board.name, sizeof(cap->card));
- cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ cap->capabilities = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_DEVICE_CAPS;
if (core->board.tuner_type != UNSET)
- cap->device_caps |= V4L2_CAP_TUNER;
- switch (vdev->vfl_type) {
- case VFL_TYPE_RADIO:
- cap->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
- break;
- case VFL_TYPE_GRABBER:
- cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
- break;
- case VFL_TYPE_VBI:
- cap->device_caps |= V4L2_CAP_VBI_CAPTURE;
- break;
- default:
- return -EINVAL;
- }
- cap->capabilities = cap->device_caps | V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_VBI_CAPTURE | V4L2_CAP_DEVICE_CAPS;
+ cap->capabilities |= V4L2_CAP_TUNER;
if (core->board.radio.type == CX88_RADIO)
cap->capabilities |= V4L2_CAP_RADIO;
return 0;
@@ -1473,6 +1458,10 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
video_set_drvdata(&dev->video_dev, dev);
dev->video_dev.ctrl_handler = &core->video_hdl;
dev->video_dev.queue = &dev->vb2_vidq;
+ dev->video_dev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE;
+ if (core->board.tuner_type != UNSET)
+ dev->video_dev.device_caps |= V4L2_CAP_TUNER;
err = video_register_device(&dev->video_dev, VFL_TYPE_GRABBER,
video_nr[core->nr]);
if (err < 0) {
@@ -1486,6 +1475,10 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
&cx8800_vbi_template, "vbi");
video_set_drvdata(&dev->vbi_dev, dev);
dev->vbi_dev.queue = &dev->vb2_vbiq;
+ dev->vbi_dev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VBI_CAPTURE;
+ if (core->board.tuner_type != UNSET)
+ dev->vbi_dev.device_caps |= V4L2_CAP_TUNER;
err = video_register_device(&dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[core->nr]);
if (err < 0) {
@@ -1500,6 +1493,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
&cx8800_radio_template, "radio");
video_set_drvdata(&dev->radio_dev, dev);
dev->radio_dev.ctrl_handler = &core->audio_hdl;
+ dev->radio_dev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
err = video_register_device(&dev->radio_dev, VFL_TYPE_RADIO,
radio_nr[core->nr]);
if (err < 0) {
diff --git a/drivers/media/pci/ddbridge/Kconfig b/drivers/media/pci/ddbridge/Kconfig
index eaac91d14654..dab34fb85c09 100644
--- a/drivers/media/pci/ddbridge/Kconfig
+++ b/drivers/media/pci/ddbridge/Kconfig
@@ -36,7 +36,6 @@ config DVB_DDBRIDGE_MSIENABLE
bool "Enable Message Signaled Interrupts (MSI) per default (EXPERIMENTAL)"
depends on DVB_DDBRIDGE
depends on PCI_MSI
- default n
help
Use PCI MSI (Message Signaled Interrupts) per default. Enabling this
might lead to I2C errors originating from the bridge in conjunction
diff --git a/drivers/media/pci/dt3155/Kconfig b/drivers/media/pci/dt3155/Kconfig
index d678ced93f17..a3d24b8a719b 100644
--- a/drivers/media/pci/dt3155/Kconfig
+++ b/drivers/media/pci/dt3155/Kconfig
@@ -3,7 +3,6 @@ config VIDEO_DT3155
tristate "DT3155 frame grabber"
depends on PCI && VIDEO_DEV && VIDEO_V4L2
select VIDEOBUF2_DMA_CONTIG
- default n
help
Enables dt3155 device driver for the DataTranslation DT3155 frame grabber.
Say Y here if you have this hardware.
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index d6d29e61aae9..b4cdda50e742 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -297,9 +297,6 @@ static int dt3155_querycap(struct file *filp, void *p,
strscpy(cap->driver, DT3155_NAME, sizeof(cap->driver));
strscpy(cap->card, DT3155_NAME " frame grabber", sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(pd->pdev));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -490,6 +487,8 @@ static const struct video_device dt3155_vdev = {
.minor = -1,
.release = video_device_release_empty,
.tvnorms = V4L2_STD_ALL,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE,
};
static int dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 2a52a393fe74..c1d133e17e4b 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1174,7 +1174,7 @@ static const struct v4l2_file_operations cio2_v4l2_fops = {
static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
.vidioc_querycap = cio2_v4l2_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = cio2_v4l2_enum_fmt,
+ .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
.vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
.vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
.vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index 079569955fb4..36c089103cf9 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -32,7 +32,6 @@ config VIDEO_IVTV
config VIDEO_IVTV_DEPRECATED_IOCTLS
bool "enable the DVB ioctls abuse on ivtv driver"
depends on VIDEO_IVTV
- default n
help
Enable the usage of the a DVB set of ioctls that were abused by
IVTV driver for a while.
@@ -77,7 +76,6 @@ config VIDEO_FB_IVTV
config VIDEO_FB_IVTV_FORCE_PAT
bool "force cx23415 framebuffer init with x86 PAT enabled"
depends on VIDEO_FB_IVTV && X86_PAT
- default n
help
With PAT enabled, the cx23415 framebuffer driver does not
utilize write-combined caching on the framebuffer memory.
diff --git a/drivers/media/pci/ivtv/ivtv-cards.h b/drivers/media/pci/ivtv/ivtv-cards.h
index 965def0cbfaa..f3e2c5634962 100644
--- a/drivers/media/pci/ivtv/ivtv-cards.h
+++ b/drivers/media/pci/ivtv/ivtv-cards.h
@@ -156,8 +156,7 @@
#define IVTV_CAP_ENCODER (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | \
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | V4L2_CAP_VBI_CAPTURE | \
V4L2_CAP_SLICED_VBI_CAPTURE)
-#define IVTV_CAP_DECODER (V4L2_CAP_VIDEO_OUTPUT | \
- V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_OVERLAY)
+#define IVTV_CAP_DECODER (V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_SLICED_VBI_OUTPUT)
struct ivtv_card_video_input {
u8 video_type; /* video input type */
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index d1e358a2273e..5595f6a274e7 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -734,18 +734,11 @@ static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vc
{
struct ivtv_open_id *id = fh2id(file->private_data);
struct ivtv *itv = id->itv;
- struct ivtv_stream *s = &itv->streams[id->type];
strscpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver));
strscpy(vcap->card, itv->card_name, sizeof(vcap->card));
snprintf(vcap->bus_info, sizeof(vcap->bus_info), "PCI:%s", pci_name(itv->pdev));
vcap->capabilities = itv->v4l2_cap | V4L2_CAP_DEVICE_CAPS;
- vcap->device_caps = s->caps;
- if ((s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY) &&
- !itv->osd_video_pbase) {
- vcap->capabilities &= ~V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
- vcap->device_caps &= ~V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
- }
return 0;
}
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index a641f20e3f86..f7de9118f609 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -139,8 +139,7 @@ static struct {
"decoder MPG",
VFL_TYPE_GRABBER, IVTV_V4L2_DEC_MPG_OFFSET,
PCI_DMA_TODEVICE, 0,
- V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE |
- V4L2_CAP_VIDEO_OUTPUT_OVERLAY,
+ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VBI */
@@ -161,8 +160,7 @@ static struct {
"decoder YUV",
VFL_TYPE_GRABBER, IVTV_V4L2_DEC_YUV_OFFSET,
PCI_DMA_TODEVICE, 0,
- V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE |
- V4L2_CAP_VIDEO_OUTPUT_OVERLAY,
+ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
}
};
@@ -301,6 +299,14 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
if (s_mpg->vdev.v4l2_dev)
num = s_mpg->vdev.num + ivtv_stream_info[type].num_offset;
}
+ s->vdev.device_caps = s->caps;
+ if (itv->osd_video_pbase) {
+ itv->streams[IVTV_DEC_STREAM_TYPE_YUV].vdev.device_caps |=
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ itv->streams[IVTV_DEC_STREAM_TYPE_MPG].vdev.device_caps |=
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ itv->v4l2_cap |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ }
video_set_drvdata(&s->vdev, s);
/* Register device. First try the desired minor, then any free one. */
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 66be490ec563..95a56cce9b65 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -1220,6 +1220,11 @@ static int ivtvfb_init_card(struct ivtv *itv)
/* Allocate DMA */
ivtv_udma_alloc(itv);
+ itv->streams[IVTV_DEC_STREAM_TYPE_YUV].vdev.device_caps |=
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ itv->streams[IVTV_DEC_STREAM_TYPE_MPG].vdev.device_caps |=
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ itv->v4l2_cap |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
return 0;
}
@@ -1246,11 +1251,12 @@ static int ivtvfb_callback_cleanup(struct device *dev, void *p)
struct osd_info *oi = itv->osd_info;
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
- if (unregister_framebuffer(&itv->osd_info->ivtvfb_info)) {
- IVTVFB_WARN("Framebuffer %d is in use, cannot unload\n",
- itv->instance);
- return 0;
- }
+ itv->streams[IVTV_DEC_STREAM_TYPE_YUV].vdev.device_caps &=
+ ~V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ itv->streams[IVTV_DEC_STREAM_TYPE_MPG].vdev.device_caps &=
+ ~V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ itv->v4l2_cap &= ~V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ unregister_framebuffer(&itv->osd_info->ivtvfb_info);
IVTVFB_INFO("Unregister framebuffer %d\n", itv->instance);
itv->ivtvfb_restore = NULL;
ivtvfb_blank(FB_BLANK_VSYNC_SUSPEND, &oi->ivtvfb_info);
diff --git a/drivers/media/pci/meye/Kconfig b/drivers/media/pci/meye/Kconfig
index b0ba78abbdbb..b37da612dd0c 100644
--- a/drivers/media/pci/meye/Kconfig
+++ b/drivers/media/pci/meye/Kconfig
@@ -2,7 +2,8 @@
config VIDEO_MEYE
tristate "Sony Vaio Picturebook Motion Eye Video For Linux"
depends on PCI && VIDEO_V4L2
- depends on SONY_LAPTOP || COMPILE_TEST
+ depends on SONY_LAPTOP
+ depends on X86 || COMPILE_TEST
help
This is the video4linux driver for the Motion Eye camera found
in the Vaio Picturebook laptops. Please read the material in
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index bbe91b0f2565..8218810c899e 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1013,11 +1013,6 @@ static int vidioc_querycap(struct file *file, void *fh,
strscpy(cap->driver, "meye", sizeof(cap->driver));
strscpy(cap->card, "meye", sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(meye.mchip_dev));
-
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -1529,6 +1524,7 @@ static const struct video_device meye_template = {
.fops = &meye_fops,
.ioctl_ops = &meye_ioctl_ops,
.release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING,
};
static const struct v4l2_ctrl_ops meye_ctrl_ops = {
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index fa9a0ead46d5..2d582c02adbf 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -1206,6 +1206,14 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
dev->video_dev->ctrl_handler = &dev->ctrl_handler;
dev->video_dev->lock = &dev->lock;
dev->video_dev->queue = &dev->video_vbq;
+ dev->video_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE;
+ if (dev->tuner_type != TUNER_ABSENT && dev->tuner_type != UNSET)
+ dev->video_dev->device_caps |= V4L2_CAP_TUNER;
+
+ if (saa7134_no_overlay <= 0)
+ dev->video_dev->device_caps |= V4L2_CAP_VIDEO_OVERLAY;
+
err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
video_nr[dev->nr]);
if (err < 0) {
@@ -1220,6 +1228,10 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
dev->vbi_dev->ctrl_handler = &dev->ctrl_handler;
dev->vbi_dev->lock = &dev->lock;
dev->vbi_dev->queue = &dev->vbi_vbq;
+ dev->vbi_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VBI_CAPTURE;
+ if (dev->tuner_type != TUNER_ABSENT && dev->tuner_type != UNSET)
+ dev->vbi_dev->device_caps |= V4L2_CAP_TUNER;
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[dev->nr]);
@@ -1232,6 +1244,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
dev->radio_dev->ctrl_handler = &dev->radio_ctrl_handler;
dev->radio_dev->lock = &dev->lock;
+ dev->radio_dev->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
+ if (dev->has_rds)
+ dev->radio_dev->device_caps |= V4L2_CAP_RDS_CAPTURE;
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[dev->nr]);
if (err < 0)
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 17eafaa5bf02..1a41a56afec6 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -287,6 +287,10 @@ static int empress_init(struct saa7134_dev *dev)
if (err)
return err;
dev->empress_dev->queue = q;
+ dev->empress_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE;
+ if (dev->tuner_type != TUNER_ABSENT && dev->tuner_type != UNSET)
+ dev->empress_dev->device_caps |= V4L2_CAP_TUNER;
video_set_drvdata(dev->empress_dev, dev);
err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 89c1271476c7..606df51bb636 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1489,50 +1489,20 @@ int saa7134_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct saa7134_dev *dev = video_drvdata(file);
- struct video_device *vdev = video_devdata(file);
- u32 radio_caps, video_caps, vbi_caps;
-
- unsigned int tuner_type = dev->tuner_type;
strscpy(cap->driver, "saa7134", sizeof(cap->driver));
strscpy(cap->card, saa7134_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
-
- cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- if ((tuner_type != TUNER_ABSENT) && (tuner_type != UNSET))
- cap->device_caps |= V4L2_CAP_TUNER;
-
- radio_caps = V4L2_CAP_RADIO;
+ cap->capabilities = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_RADIO | V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VBI_CAPTURE | V4L2_CAP_DEVICE_CAPS;
+ if (dev->tuner_type != TUNER_ABSENT && dev->tuner_type != UNSET)
+ cap->capabilities |= V4L2_CAP_TUNER;
if (dev->has_rds)
- radio_caps |= V4L2_CAP_RDS_CAPTURE;
-
- video_caps = V4L2_CAP_VIDEO_CAPTURE;
- if (saa7134_no_overlay <= 0 && !is_empress(file))
- video_caps |= V4L2_CAP_VIDEO_OVERLAY;
-
- vbi_caps = V4L2_CAP_VBI_CAPTURE;
-
- switch (vdev->vfl_type) {
- case VFL_TYPE_RADIO:
- cap->device_caps |= radio_caps;
- break;
- case VFL_TYPE_GRABBER:
- cap->device_caps |= video_caps;
- break;
- case VFL_TYPE_VBI:
- cap->device_caps |= vbi_caps;
- break;
- default:
- return -EINVAL;
- }
- cap->capabilities = radio_caps | video_caps | vbi_caps |
- cap->device_caps | V4L2_CAP_DEVICE_CAPS;
- if (vdev->vfl_type == VFL_TYPE_RADIO) {
- cap->device_caps &= ~V4L2_CAP_STREAMING;
- if (!dev->has_rds)
- cap->device_caps &= ~V4L2_CAP_READWRITE;
- }
+ cap->capabilities |= V4L2_CAP_RDS_CAPTURE;
+ if (saa7134_no_overlay <= 0)
+ cap->capabilities |= V4L2_CAP_VIDEO_OVERLAY;
return 0;
}
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index c594aff92e70..9ae04e18e6c6 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1112,16 +1112,25 @@ static int saa7164_proc_show(struct seq_file *m, void *v)
return 0;
}
+static struct proc_dir_entry *saa7164_pe;
+
static int saa7164_proc_create(void)
{
- struct proc_dir_entry *pe;
-
- pe = proc_create_single("saa7164", S_IRUGO, NULL, saa7164_proc_show);
- if (!pe)
+ saa7164_pe = proc_create_single("saa7164", 0444, NULL, saa7164_proc_show);
+ if (!saa7164_pe)
return -ENOMEM;
return 0;
}
+
+static void saa7164_proc_destroy(void)
+{
+ if (saa7164_pe)
+ remove_proc_entry("saa7164", NULL);
+}
+#else
+static int saa7164_proc_create(void) { return 0; }
+static void saa7164_proc_destroy(void) {}
#endif
static int saa7164_thread_function(void *data)
@@ -1493,19 +1502,21 @@ static struct pci_driver saa7164_pci_driver = {
static int __init saa7164_init(void)
{
- printk(KERN_INFO "saa7164 driver loaded\n");
+ int ret = pci_register_driver(&saa7164_pci_driver);
+
+ if (ret)
+ return ret;
-#ifdef CONFIG_PROC_FS
saa7164_proc_create();
-#endif
- return pci_register_driver(&saa7164_pci_driver);
+
+ pr_info("saa7164 driver loaded\n");
+
+ return 0;
}
static void __exit saa7164_fini(void)
{
-#ifdef CONFIG_PROC_FS
- remove_proc_entry("saa7164", NULL);
-#endif
+ saa7164_proc_destroy();
pci_unregister_driver(&saa7164_pci_driver);
}
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index dcfabad8b284..43fdaa2d32bd 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -491,16 +491,9 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->card, saa7164_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
-
- cap->device_caps =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_TUNER;
-
- cap->capabilities = cap->device_caps |
- V4L2_CAP_VBI_CAPTURE |
- V4L2_CAP_DEVICE_CAPS;
-
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -973,6 +966,8 @@ static struct video_device saa7164_mpeg_template = {
.ioctl_ops = &mpeg_ioctl_ops,
.minor = -1,
.tvnorms = SAA7164_NORMS,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_TUNER,
};
static struct video_device *saa7164_encoder_alloc(
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index 154a04d17ce5..49d61a64c8cb 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -202,16 +202,9 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->card, saa7164_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
-
- cap->device_caps =
- V4L2_CAP_VBI_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_TUNER;
-
- cap->capabilities = cap->device_caps |
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_DEVICE_CAPS;
-
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -675,6 +668,8 @@ static struct video_device saa7164_vbi_template = {
.ioctl_ops = &vbi_ioctl_ops,
.minor = -1,
.tvnorms = SAA7164_NORMS,
+ .device_caps = V4L2_CAP_VBI_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_TUNER,
};
static struct video_device *saa7164_vbi_alloc(
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 73698cc26dd5..609100a46ff8 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -771,9 +771,6 @@ static int solo_enc_querycap(struct file *file, void *priv,
solo_enc->ch);
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(solo_dev->pdev));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1191,6 +1188,8 @@ static const struct video_device solo_enc_template = {
.minor = -1,
.release = video_device_release,
.tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING,
};
static const struct v4l2_ctrl_ops solo_ctrl_ops = {
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index 1ce431af8fc6..a968f75920b5 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -378,9 +378,6 @@ static int solo_querycap(struct file *file, void *priv,
strscpy(cap->card, "Softlogic 6x10", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(solo_dev->pdev));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -628,6 +625,8 @@ static const struct video_device solo_v4l2_template = {
.minor = -1,
.release = video_device_release,
.tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING,
};
static const struct v4l2_ctrl_ops solo_ctrl_ops = {
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 9de5b2a35519..e52e29814378 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -407,10 +407,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(vip->pdev));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -759,6 +755,8 @@ static const struct video_device video_dev_template = {
.fops = &vip_fops,
.ioctl_ops = &vip_ioctl_ops,
.tvnorms = V4L2_STD_ALL,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING,
};
/**
diff --git a/drivers/media/pci/ttpci/Kconfig b/drivers/media/pci/ttpci/Kconfig
index d96d4fa20457..8a362ee9105f 100644
--- a/drivers/media/pci/ttpci/Kconfig
+++ b/drivers/media/pci/ttpci/Kconfig
@@ -1,13 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
config DVB_AV7110_IR
bool
+ depends on RC_CORE=y || RC_CORE = DVB_AV7110
+ default DVB_AV7110
config DVB_AV7110
tristate "AV7110 cards"
depends on DVB_CORE && PCI && I2C
select TTPCI_EEPROM
select VIDEO_SAA7146_VV
- select DVB_AV7110_IR if INPUT_EVDEV=y || INPUT_EVDEV=DVB_AV7110
depends on VIDEO_DEV # dependencies of VIDEO_SAA7146_VV
select DVB_VES1820 if MEDIA_SUBDRV_AUTOSELECT
select DVB_VES1X93 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index e6ee23544a6e..d0cdee1c6eb0 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -218,7 +218,7 @@ static void recover_arm(struct av7110 *av7110)
restart_feeds(av7110);
#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
- av7110_check_ir_config(av7110, true);
+ av7110_set_ir_config(av7110);
#endif
}
@@ -250,10 +250,6 @@ static int arm_thread(void *data)
if (!av7110->arm_ready)
continue;
-#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
- av7110_check_ir_config(av7110, false);
-#endif
-
if (mutex_lock_interruptible(&av7110->dcomlock))
break;
newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2);
@@ -659,9 +655,11 @@ static void gpioirq(unsigned long cookie)
return;
case DATA_IRCOMMAND:
- if (av7110->ir.ir_handler)
- av7110->ir.ir_handler(av7110,
- swahw32(irdebi(av7110, DEBINOSWAP, Reserved, 0, 4)));
+#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
+ av7110_ir_handler(av7110,
+ swahw32(irdebi(av7110, DEBINOSWAP, Reserved,
+ 0, 4)));
+#endif
iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
break;
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index 8606ef5ebbe2..809d938ae166 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -81,23 +81,11 @@ struct av7110;
/* infrared remote control */
struct infrared {
- u16 key_map[256];
- struct input_dev *input_dev;
+ struct rc_dev *rcdev;
char input_phys[32];
- struct timer_list keyup_timer;
- struct tasklet_struct ir_tasklet;
- void (*ir_handler)(struct av7110 *av7110, u32 ircom);
- u32 ir_command;
u32 ir_config;
- u32 device_mask;
- u8 protocol;
- u8 inversion;
- u16 last_key;
- u16 last_toggle;
- bool keypressed;
};
-
/* place to store all the necessary device information */
struct av7110 {
@@ -304,9 +292,10 @@ struct av7110 {
extern int ChangePIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
u16 subpid, u16 pcrpid);
-extern int av7110_check_ir_config(struct av7110 *av7110, int force);
-extern int av7110_ir_init(struct av7110 *av7110);
-extern void av7110_ir_exit(struct av7110 *av7110);
+void av7110_ir_handler(struct av7110 *av7110, u32 ircom);
+int av7110_set_ir_config(struct av7110 *av7110);
+int av7110_ir_init(struct av7110 *av7110);
+void av7110_ir_exit(struct av7110 *av7110);
/* msp3400 i2c subaddresses */
#define MSP_WR_DEM 0x10
diff --git a/drivers/media/pci/ttpci/av7110_ir.c b/drivers/media/pci/ttpci/av7110_ir.c
index dfa18878e5f0..432789a3c312 100644
--- a/drivers/media/pci/ttpci/av7110_ir.c
+++ b/drivers/media/pci/ttpci/av7110_ir.c
@@ -4,379 +4,156 @@
*
* Copyright (C) 1999-2003 Holger Waechtler <holger@convergence.de>
* Copyright (C) 2003-2007 Oliver Endriss <o.endriss@gmx.de>
+ * Copyright (C) 2019 Sean Young <sean@mess.org>
*/
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/proc_fs.h>
#include <linux/kernel.h>
-#include <linux/bitops.h>
+#include <media/rc-core.h>
#include "av7110.h"
#include "av7110_hw.h"
-
-#define AV_CNT 4
-
#define IR_RC5 0
#define IR_RCMM 1
#define IR_RC5_EXT 2 /* internal only */
-#define IR_ALL 0xffffffff
-
-#define UP_TIMEOUT (HZ*7/25)
-
-
-/* Note: enable ir debugging by or'ing debug with 16 */
-
-static int ir_protocol[AV_CNT] = { IR_RCMM, IR_RCMM, IR_RCMM, IR_RCMM};
-module_param_array(ir_protocol, int, NULL, 0644);
-MODULE_PARM_DESC(ir_protocol, "Infrared protocol: 0 RC5, 1 RCMM (default)");
-
-static int ir_inversion[AV_CNT];
-module_param_array(ir_inversion, int, NULL, 0644);
-MODULE_PARM_DESC(ir_inversion, "Inversion of infrared signal: 0 not inverted (default), 1 inverted");
-
-static uint ir_device_mask[AV_CNT] = { IR_ALL, IR_ALL, IR_ALL, IR_ALL };
-module_param_array(ir_device_mask, uint, NULL, 0644);
-MODULE_PARM_DESC(ir_device_mask, "Bitmask of infrared devices: bit 0..31 = device 0..31 (default: all)");
-
-
-static int av_cnt;
-static struct av7110 *av_list[AV_CNT];
-
-static u16 default_key_map [256] = {
- KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7,
- KEY_8, KEY_9, KEY_BACK, 0, KEY_POWER, KEY_MUTE, 0, KEY_INFO,
- KEY_VOLUMEUP, KEY_VOLUMEDOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- KEY_CHANNELUP, KEY_CHANNELDOWN, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, KEY_TEXT, 0, 0, KEY_TV, 0, 0, 0, 0, 0, KEY_SETUP, 0, 0,
- 0, 0, 0, KEY_SUBTITLE, 0, 0, KEY_LANGUAGE, 0,
- KEY_RADIO, 0, 0, 0, 0, KEY_EXIT, 0, 0,
- KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT, KEY_OK, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_RED, KEY_GREEN, KEY_YELLOW,
- KEY_BLUE, 0, 0, 0, 0, 0, 0, 0, KEY_MENU, KEY_LIST, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, KEY_UP, KEY_UP, KEY_DOWN, KEY_DOWN,
- 0, 0, 0, 0, KEY_EPG, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_VCR
-};
-
-
-/* key-up timer */
-static void av7110_emit_keyup(struct timer_list *t)
-{
- struct infrared *ir = from_timer(ir, t, keyup_timer);
-
- if (!ir || !ir->keypressed)
- return;
-
- input_report_key(ir->input_dev, ir->last_key, 0);
- input_sync(ir->input_dev);
- ir->keypressed = false;
-}
-
-
-/* tasklet */
-static void av7110_emit_key(unsigned long parm)
+/* interrupt handler */
+void av7110_ir_handler(struct av7110 *av7110, u32 ircom)
{
- struct infrared *ir = (struct infrared *) parm;
- u32 ircom = ir->ir_command;
- u8 data;
- u8 addr;
- u16 toggle;
- u16 keycode;
-
- /* extract device address and data */
- switch (ir->protocol) {
- case IR_RC5: /* RC5: 5 bits device address, 6 bits data */
- data = ircom & 0x3f;
- addr = (ircom >> 6) & 0x1f;
- toggle = ircom & 0x0800;
- break;
+ struct rc_dev *rcdev = av7110->ir.rcdev;
+ enum rc_proto proto;
+ u32 command, addr, scancode;
+ u32 toggle;
- case IR_RCMM: /* RCMM: ? bits device address, ? bits data */
- data = ircom & 0xff;
- addr = (ircom >> 8) & 0x1f;
- toggle = ircom & 0x8000;
- break;
-
- case IR_RC5_EXT: /* extended RC5: 5 bits device address, 7 bits data */
- data = ircom & 0x3f;
- addr = (ircom >> 6) & 0x1f;
- /* invert 7th data bit for backward compatibility with RC5 keymaps */
- if (!(ircom & 0x1000))
- data |= 0x40;
- toggle = ircom & 0x0800;
- break;
-
- default:
- printk("%s invalid protocol %x\n", __func__, ir->protocol);
- return;
- }
-
- input_event(ir->input_dev, EV_MSC, MSC_RAW, (addr << 16) | data);
- input_event(ir->input_dev, EV_MSC, MSC_SCAN, data);
-
- keycode = ir->key_map[data];
-
- dprintk(16, "%s: code %08x -> addr %i data 0x%02x -> keycode %i\n",
- __func__, ircom, addr, data, keycode);
-
- /* check device address */
- if (!(ir->device_mask & (1 << addr)))
- return;
-
- if (!keycode) {
- printk ("%s: code %08x -> addr %i data 0x%02x -> unknown key!\n",
- __func__, ircom, addr, data);
- return;
- }
-
- if (ir->keypressed &&
- (ir->last_key != keycode || toggle != ir->last_toggle))
- input_event(ir->input_dev, EV_KEY, ir->last_key, 0);
-
- input_event(ir->input_dev, EV_KEY, keycode, 1);
- input_sync(ir->input_dev);
-
- ir->keypressed = true;
- ir->last_key = keycode;
- ir->last_toggle = toggle;
-
- mod_timer(&ir->keyup_timer, jiffies + UP_TIMEOUT);
-}
-
-
-/* register with input layer */
-static void input_register_keys(struct infrared *ir)
-{
- int i;
+ dprintk(4, "ir command = %08x\n", ircom);
- set_bit(EV_KEY, ir->input_dev->evbit);
- set_bit(EV_REP, ir->input_dev->evbit);
- set_bit(EV_MSC, ir->input_dev->evbit);
+ if (rcdev) {
+ switch (av7110->ir.ir_config) {
+ case IR_RC5: /* RC5: 5 bits device address, 6 bits command */
+ command = ircom & 0x3f;
+ addr = (ircom >> 6) & 0x1f;
+ scancode = RC_SCANCODE_RC5(addr, command);
+ toggle = ircom & 0x0800;
+ proto = RC_PROTO_RC5;
+ break;
- set_bit(MSC_RAW, ir->input_dev->mscbit);
- set_bit(MSC_SCAN, ir->input_dev->mscbit);
+ case IR_RCMM: /* RCMM: ? bits device address, ? bits command */
+ command = ircom & 0xff;
+ addr = (ircom >> 8) & 0x1f;
+ scancode = ircom;
+ toggle = ircom & 0x8000;
+ proto = RC_PROTO_UNKNOWN;
+ break;
- memset(ir->input_dev->keybit, 0, sizeof(ir->input_dev->keybit));
+ case IR_RC5_EXT:
+ /*
+ * extended RC5: 5 bits device address, 7 bits command
+ *
+ * Extended RC5 uses only one start bit. The second
+ * start bit is re-assigned bit 6 of the command bit.
+ */
+ command = ircom & 0x3f;
+ addr = (ircom >> 6) & 0x1f;
+ if (!(ircom & 0x1000))
+ command |= 0x40;
+ scancode = RC_SCANCODE_RC5(addr, command);
+ toggle = ircom & 0x0800;
+ proto = RC_PROTO_RC5;
+ break;
+ default:
+ dprintk(2, "unknown ir config %d\n",
+ av7110->ir.ir_config);
+ return;
+ }
- for (i = 0; i < ARRAY_SIZE(ir->key_map); i++) {
- if (ir->key_map[i] > KEY_MAX)
- ir->key_map[i] = 0;
- else if (ir->key_map[i] > KEY_RESERVED)
- set_bit(ir->key_map[i], ir->input_dev->keybit);
+ rc_keydown(rcdev, proto, scancode, toggle != 0);
}
-
- ir->input_dev->keycode = ir->key_map;
- ir->input_dev->keycodesize = sizeof(ir->key_map[0]);
- ir->input_dev->keycodemax = ARRAY_SIZE(ir->key_map);
}
-/* check for configuration changes */
-int av7110_check_ir_config(struct av7110 *av7110, int force)
+int av7110_set_ir_config(struct av7110 *av7110)
{
- int i;
- int modified = force;
- int ret = -ENODEV;
-
- for (i = 0; i < av_cnt; i++)
- if (av7110 == av_list[i])
- break;
-
- if (i < av_cnt && av7110) {
- if ((av7110->ir.protocol & 1) != ir_protocol[i] ||
- av7110->ir.inversion != ir_inversion[i])
- modified = true;
-
- if (modified) {
- /* protocol */
- if (ir_protocol[i]) {
- ir_protocol[i] = 1;
- av7110->ir.protocol = IR_RCMM;
- av7110->ir.ir_config = 0x0001;
- } else if (FW_VERSION(av7110->arm_app) >= 0x2620) {
- av7110->ir.protocol = IR_RC5_EXT;
- av7110->ir.ir_config = 0x0002;
- } else {
- av7110->ir.protocol = IR_RC5;
- av7110->ir.ir_config = 0x0000;
- }
- /* inversion */
- if (ir_inversion[i]) {
- ir_inversion[i] = 1;
- av7110->ir.ir_config |= 0x8000;
- }
- av7110->ir.inversion = ir_inversion[i];
- /* update ARM */
- ret = av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetIR, 1,
- av7110->ir.ir_config);
- } else
- ret = 0;
+ dprintk(4, "ir config = %08x\n", av7110->ir.ir_config);
- /* address */
- if (av7110->ir.device_mask != ir_device_mask[i])
- av7110->ir.device_mask = ir_device_mask[i];
- }
-
- return ret;
+ return av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetIR, 1,
+ av7110->ir.ir_config);
}
-
-/* /proc/av7110_ir interface */
-static ssize_t av7110_ir_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
+static int change_protocol(struct rc_dev *rcdev, u64 *rc_type)
{
- char *page;
+ struct av7110 *av7110 = rcdev->priv;
u32 ir_config;
- int size = sizeof ir_config + sizeof av_list[0]->ir.key_map;
- int i;
- if (count < size)
+ if (*rc_type & RC_PROTO_BIT_UNKNOWN) {
+ ir_config = IR_RCMM;
+ *rc_type = RC_PROTO_UNKNOWN;
+ } else if (*rc_type & RC_PROTO_BIT_RC5) {
+ if (FW_VERSION(av7110->arm_app) >= 0x2620)
+ ir_config = IR_RC5_EXT;
+ else
+ ir_config = IR_RC5;
+ *rc_type = RC_PROTO_BIT_RC5;
+ } else {
return -EINVAL;
-
- page = vmalloc(size);
- if (!page)
- return -ENOMEM;
-
- if (copy_from_user(page, buffer, size)) {
- vfree(page);
- return -EFAULT;
}
- memcpy(&ir_config, page, sizeof ir_config);
-
- for (i = 0; i < av_cnt; i++) {
- /* keymap */
- memcpy(av_list[i]->ir.key_map, page + sizeof ir_config,
- sizeof(av_list[i]->ir.key_map));
- /* protocol, inversion, address */
- ir_protocol[i] = ir_config & 0x0001;
- ir_inversion[i] = ir_config & 0x8000 ? 1 : 0;
- if (ir_config & 0x4000)
- ir_device_mask[i] = 1 << ((ir_config >> 16) & 0x1f);
- else
- ir_device_mask[i] = IR_ALL;
- /* update configuration */
- av7110_check_ir_config(av_list[i], false);
- input_register_keys(&av_list[i]->ir);
- }
- vfree(page);
- return count;
-}
+ if (ir_config == av7110->ir.ir_config)
+ return 0;
-static const struct file_operations av7110_ir_proc_fops = {
- .owner = THIS_MODULE,
- .write = av7110_ir_proc_write,
- .llseek = noop_llseek,
-};
+ av7110->ir.ir_config = ir_config;
-/* interrupt handler */
-static void ir_handler(struct av7110 *av7110, u32 ircom)
-{
- dprintk(4, "ir command = %08x\n", ircom);
- av7110->ir.ir_command = ircom;
- tasklet_schedule(&av7110->ir.ir_tasklet);
+ return av7110_set_ir_config(av7110);
}
-
int av7110_ir_init(struct av7110 *av7110)
{
- struct input_dev *input_dev;
- static struct proc_dir_entry *e;
- int err;
-
- if (av_cnt >= ARRAY_SIZE(av_list))
- return -ENOSPC;
+ struct rc_dev *rcdev;
+ struct pci_dev *pci;
+ int ret;
- av_list[av_cnt++] = av7110;
- av7110_check_ir_config(av7110, true);
-
- timer_setup(&av7110->ir.keyup_timer, av7110_emit_keyup, 0);
-
- input_dev = input_allocate_device();
- if (!input_dev)
+ rcdev = rc_allocate_device(RC_DRIVER_SCANCODE);
+ if (!rcdev)
return -ENOMEM;
- av7110->ir.input_dev = input_dev;
- snprintf(av7110->ir.input_phys, sizeof(av7110->ir.input_phys),
- "pci-%s/ir0", pci_name(av7110->dev->pci));
+ pci = av7110->dev->pci;
- input_dev->name = "DVB on-card IR receiver";
-
- input_dev->phys = av7110->ir.input_phys;
- input_dev->id.bustype = BUS_PCI;
- input_dev->id.version = 2;
- if (av7110->dev->pci->subsystem_vendor) {
- input_dev->id.vendor = av7110->dev->pci->subsystem_vendor;
- input_dev->id.product = av7110->dev->pci->subsystem_device;
+ snprintf(av7110->ir.input_phys, sizeof(av7110->ir.input_phys),
+ "pci-%s/ir0", pci_name(pci));
+
+ rcdev->device_name = av7110->card_name;
+ rcdev->driver_name = KBUILD_MODNAME;
+ rcdev->input_phys = av7110->ir.input_phys;
+ rcdev->input_id.bustype = BUS_PCI;
+ rcdev->input_id.version = 2;
+ if (pci->subsystem_vendor) {
+ rcdev->input_id.vendor = pci->subsystem_vendor;
+ rcdev->input_id.product = pci->subsystem_device;
} else {
- input_dev->id.vendor = av7110->dev->pci->vendor;
- input_dev->id.product = av7110->dev->pci->device;
- }
- input_dev->dev.parent = &av7110->dev->pci->dev;
- /* initial keymap */
- memcpy(av7110->ir.key_map, default_key_map, sizeof av7110->ir.key_map);
- input_register_keys(&av7110->ir);
- err = input_register_device(input_dev);
- if (err) {
- input_free_device(input_dev);
- return err;
+ rcdev->input_id.vendor = pci->vendor;
+ rcdev->input_id.product = pci->device;
}
- /*
- * Input core's default autorepeat is 33 cps with 250 msec
- * delay, let's adjust to numbers more suitable for remote
- * control.
- */
- input_enable_softrepeat(input_dev, 250, 125);
+ rcdev->dev.parent = &pci->dev;
+ rcdev->allowed_protocols = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_UNKNOWN;
+ rcdev->change_protocol = change_protocol;
+ rcdev->map_name = RC_MAP_HAUPPAUGE;
+ rcdev->priv = av7110;
- if (av_cnt == 1) {
- e = proc_create("av7110_ir", S_IWUSR, NULL, &av7110_ir_proc_fops);
- if (e)
- proc_set_size(e, 4 + 256 * sizeof(u16));
- }
+ av7110->ir.rcdev = rcdev;
+ av7110->ir.ir_config = IR_RC5;
+ av7110_set_ir_config(av7110);
- tasklet_init(&av7110->ir.ir_tasklet, av7110_emit_key, (unsigned long) &av7110->ir);
- av7110->ir.ir_handler = ir_handler;
+ ret = rc_register_device(rcdev);
+ if (ret) {
+ av7110->ir.rcdev = NULL;
+ rc_free_device(rcdev);
+ }
- return 0;
+ return ret;
}
-
void av7110_ir_exit(struct av7110 *av7110)
{
- int i;
-
- if (av_cnt == 0)
- return;
-
- del_timer_sync(&av7110->ir.keyup_timer);
- av7110->ir.ir_handler = NULL;
- tasklet_kill(&av7110->ir.ir_tasklet);
-
- for (i = 0; i < av_cnt; i++)
- if (av_list[i] == av7110) {
- av_list[i] = av_list[av_cnt-1];
- av_list[av_cnt-1] = NULL;
- break;
- }
-
- if (av_cnt == 1)
- remove_proc_entry("av7110_ir", NULL);
-
- input_unregister_device(av7110->ir.input_dev);
-
- av_cnt--;
+ rc_unregister_device(av7110->ir.rcdev);
}
//MODULE_AUTHOR("Holger Waechtler <holger@convergence.de>, Oliver Endriss <o.endriss@gmx.de>");
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 5b469cf578f5..8e0952d65ad4 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -729,12 +729,6 @@ static int tw68_querycap(struct file *file, void *priv,
strscpy(cap->card, "Techwell Capture Card",
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->device_caps =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
-
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -913,6 +907,8 @@ static const struct video_device tw68_video_template = {
.ioctl_ops = &video_ioctl_ops,
.release = video_device_release_empty,
.tvnorms = TW68_NORMS,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING,
};
/* ------------------------------------------------------------------ */
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index 377fb1e453fa..9be8c6e4fb69 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -765,9 +765,6 @@ static int tw686x_querycap(struct file *file, void *priv,
strscpy(cap->card, dev->name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"PCI:%s", pci_name(dev->pci_dev));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1280,6 +1277,8 @@ int tw686x_video_init(struct tw686x_dev *dev)
vdev->minor = -1;
vdev->lock = &vc->vb_mutex;
vdev->ctrl_handler = &vc->ctrl_handler;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
vc->device = vdev;
video_set_drvdata(vdev, vc);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f2b5f27ebacb..8a19654b393a 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -6,7 +6,6 @@
menuconfig V4L_PLATFORM_DRIVERS
bool "V4L platform devices"
depends on MEDIA_CAMERA_SUPPORT
- default n
help
Say Y here to enable support for platform-specific V4L drivers.
@@ -155,7 +154,6 @@ config VIDEO_TI_CAL
depends on SOC_DRA7XX || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
- default n
help
Support for the TI CAL (Camera Adaptation Layer) block
found on DRA72X SoC.
@@ -168,7 +166,6 @@ menuconfig V4L_MEM2MEM_DRIVERS
bool "Memory-to-memory multimedia devices"
depends on VIDEO_V4L2
depends on MEDIA_CAMERA_SUPPORT
- default n
help
Say Y here to enable selecting drivers for V4L devices that
use system memory for both source and destination buffers, as opposed
@@ -236,7 +233,6 @@ config VIDEO_MEDIATEK_MDP
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
select VIDEO_MEDIATEK_VPU
- default n
help
It is a v4l2 driver and present in Mediatek MT8173 SoCs.
The driver supports for scaling and color space conversion.
@@ -252,7 +248,6 @@ config VIDEO_MEDIATEK_VCODEC
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
select VIDEO_MEDIATEK_VPU
- default n
help
Mediatek video codec driver provides HW capability to
encode and decode in a range of video formats
@@ -276,7 +271,6 @@ config VIDEO_SAMSUNG_S5P_G2D
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
- default n
help
This is a v4l2 driver for Samsung S5P and EXYNOS4 G2D
2d graphics accelerator.
@@ -296,7 +290,6 @@ config VIDEO_SAMSUNG_S5P_MFC
depends on VIDEO_DEV && VIDEO_V4L2
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
- default n
help
MFC 5.1 and 6.x driver for V4L2
@@ -459,7 +452,6 @@ config VIDEO_ROCKCHIP_RGA
depends on ARCH_ROCKCHIP || COMPILE_TEST
select VIDEOBUF2_DMA_SG
select V4L2_MEM2MEM_DEV
- default n
help
This is a v4l2 driver for Rockchip SOC RGA 2d graphics accelerator.
Rockchip RGA is a separate 2D raster graphic acceleration unit.
@@ -477,7 +469,6 @@ config VIDEO_TI_VPE
select VIDEO_TI_VPDMA
select VIDEO_TI_SC
select VIDEO_TI_CSC
- default n
help
Support for the TI VPE(Video Processing Engine) block
found on DRA7XX SoC.
@@ -530,7 +521,6 @@ config VIDEO_VIM2M
depends on VIDEO_DEV && VIDEO_V4L2
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
- default n
help
This is a virtual test device for the memory-to-memory driver
framework.
@@ -542,7 +532,6 @@ endif #V4L_TEST_DRIVERS
menuconfig DVB_PLATFORM_DRIVERS
bool "DVB platform devices"
depends on MEDIA_DIGITAL_TV_SUPPORT
- default n
help
Say Y here to enable support for platform-specific Digital TV drivers.
@@ -678,7 +667,6 @@ endif #CEC_PLATFORM_DRIVERS
menuconfig SDR_PLATFORM_DRIVERS
bool "SDR platform devices"
depends on MEDIA_SDR_SUPPORT
- default n
help
Say Y here to enable support for platform-specific SDR Drivers.
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index 8144fe36ad48..f899ac3b4a61 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -187,6 +187,7 @@ enum {
VIDEO_STREAMING,
VIDEO_FRAME_INPRG,
VIDEO_STOPPED,
+ VIDEO_CLOCKS_ON,
};
struct aspeed_video_addr {
@@ -440,7 +441,7 @@ static int aspeed_video_start_frame(struct aspeed_video *video)
if (!(seq_ctrl & VE_SEQ_CTRL_COMP_BUSY) ||
!(seq_ctrl & VE_SEQ_CTRL_CAP_BUSY)) {
- dev_err(video->dev, "Engine busy; don't start frame\n");
+ dev_dbg(video->dev, "Engine busy; don't start frame\n");
return -EBUSY;
}
@@ -462,8 +463,7 @@ static int aspeed_video_start_frame(struct aspeed_video *video)
aspeed_video_write(video, VE_COMP_ADDR, addr);
aspeed_video_update(video, VE_INTERRUPT_CTRL, 0,
- VE_INTERRUPT_COMP_COMPLETE |
- VE_INTERRUPT_CAPTURE_COMPLETE);
+ VE_INTERRUPT_COMP_COMPLETE);
aspeed_video_update(video, VE_SEQ_CTRL, 0,
VE_SEQ_CTRL_TRIG_CAPTURE | VE_SEQ_CTRL_TRIG_COMP);
@@ -483,19 +483,30 @@ static void aspeed_video_enable_mode_detect(struct aspeed_video *video)
static void aspeed_video_off(struct aspeed_video *video)
{
+ if (!test_bit(VIDEO_CLOCKS_ON, &video->flags))
+ return;
+
/* Disable interrupts */
aspeed_video_write(video, VE_INTERRUPT_CTRL, 0);
+ aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff);
/* Turn off the relevant clocks */
- clk_disable_unprepare(video->vclk);
- clk_disable_unprepare(video->eclk);
+ clk_disable(video->vclk);
+ clk_disable(video->eclk);
+
+ clear_bit(VIDEO_CLOCKS_ON, &video->flags);
}
static void aspeed_video_on(struct aspeed_video *video)
{
+ if (test_bit(VIDEO_CLOCKS_ON, &video->flags))
+ return;
+
/* Turn on the relevant clocks */
- clk_prepare_enable(video->eclk);
- clk_prepare_enable(video->vclk);
+ clk_enable(video->eclk);
+ clk_enable(video->vclk);
+
+ set_bit(VIDEO_CLOCKS_ON, &video->flags);
}
static void aspeed_video_bufs_done(struct aspeed_video *video,
@@ -511,7 +522,7 @@ static void aspeed_video_bufs_done(struct aspeed_video *video,
spin_unlock_irqrestore(&video->lock, flags);
}
-static void aspeed_video_irq_res_change(struct aspeed_video *video)
+static void aspeed_video_irq_res_change(struct aspeed_video *video, ulong delay)
{
dev_dbg(video->dev, "Resolution changed; resetting\n");
@@ -521,7 +532,7 @@ static void aspeed_video_irq_res_change(struct aspeed_video *video)
aspeed_video_off(video);
aspeed_video_bufs_done(video, VB2_BUF_STATE_ERROR);
- schedule_delayed_work(&video->res_work, RESOLUTION_CHANGE_DELAY);
+ schedule_delayed_work(&video->res_work, delay);
}
static irqreturn_t aspeed_video_irq(int irq, void *arg)
@@ -534,7 +545,7 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
* re-initialize
*/
if (sts & VE_INTERRUPT_MODE_DETECT_WD) {
- aspeed_video_irq_res_change(video);
+ aspeed_video_irq_res_change(video, 0);
return IRQ_HANDLED;
}
@@ -544,7 +555,7 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
VE_INTERRUPT_MODE_DETECT, 0);
aspeed_video_write(video, VE_INTERRUPT_STATUS,
VE_INTERRUPT_MODE_DETECT);
-
+ sts &= ~VE_INTERRUPT_MODE_DETECT;
set_bit(VIDEO_MODE_DETECT_DONE, &video->flags);
wake_up_interruptible_all(&video->wait);
} else {
@@ -552,13 +563,13 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
* Signal acquired while NOT doing resolution
* detection; reset the engine and re-initialize
*/
- aspeed_video_irq_res_change(video);
+ aspeed_video_irq_res_change(video,
+ RESOLUTION_CHANGE_DELAY);
return IRQ_HANDLED;
}
}
- if ((sts & VE_INTERRUPT_COMP_COMPLETE) &&
- (sts & VE_INTERRUPT_CAPTURE_COMPLETE)) {
+ if (sts & VE_INTERRUPT_COMP_COMPLETE) {
struct aspeed_video_buffer *buf;
u32 frame_size = aspeed_video_read(video,
VE_OFFSET_COMP_STREAM);
@@ -587,17 +598,15 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
VE_SEQ_CTRL_FORCE_IDLE |
VE_SEQ_CTRL_TRIG_COMP, 0);
aspeed_video_update(video, VE_INTERRUPT_CTRL,
- VE_INTERRUPT_COMP_COMPLETE |
- VE_INTERRUPT_CAPTURE_COMPLETE, 0);
+ VE_INTERRUPT_COMP_COMPLETE, 0);
aspeed_video_write(video, VE_INTERRUPT_STATUS,
- VE_INTERRUPT_COMP_COMPLETE |
- VE_INTERRUPT_CAPTURE_COMPLETE);
-
+ VE_INTERRUPT_COMP_COMPLETE);
+ sts &= ~VE_INTERRUPT_COMP_COMPLETE;
if (test_bit(VIDEO_STREAMING, &video->flags) && buf)
aspeed_video_start_frame(video);
}
- return IRQ_HANDLED;
+ return sts ? IRQ_NONE : IRQ_HANDLED;
}
static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
@@ -723,27 +732,6 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
det->height = MIN_HEIGHT;
video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
- /*
- * Since we need max buffer size for detection, free the second source
- * buffer first.
- */
- if (video->srcs[1].size)
- aspeed_video_free_buf(video, &video->srcs[1]);
-
- if (video->srcs[0].size < VE_MAX_SRC_BUFFER_SIZE) {
- if (video->srcs[0].size)
- aspeed_video_free_buf(video, &video->srcs[0]);
-
- if (!aspeed_video_alloc_buf(video, &video->srcs[0],
- VE_MAX_SRC_BUFFER_SIZE)) {
- dev_err(video->dev,
- "Failed to allocate source buffers\n");
- return;
- }
- }
-
- aspeed_video_write(video, VE_SRC0_ADDR, video->srcs[0].dma);
-
do {
if (tries) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -758,7 +746,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
res_check(video),
MODE_DETECT_TIMEOUT);
if (!rc) {
- dev_err(video->dev, "Timed out; first mode detect\n");
+ dev_dbg(video->dev, "Timed out; first mode detect\n");
clear_bit(VIDEO_RES_DETECT, &video->flags);
return;
}
@@ -776,7 +764,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
MODE_DETECT_TIMEOUT);
clear_bit(VIDEO_RES_DETECT, &video->flags);
if (!rc) {
- dev_err(video->dev, "Timed out; second mode detect\n");
+ dev_dbg(video->dev, "Timed out; second mode detect\n");
return;
}
@@ -810,7 +798,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
} while (invalid_resolution && (tries++ < INVALID_RESOLUTION_RETRIES));
if (invalid_resolution) {
- dev_err(video->dev, "Invalid resolution detected\n");
+ dev_dbg(video->dev, "Invalid resolution detected\n");
return;
}
@@ -836,8 +824,29 @@ static void aspeed_video_set_resolution(struct aspeed_video *video)
struct v4l2_bt_timings *act = &video->active_timings;
unsigned int size = act->width * act->height;
+ /* Set capture/compression frame sizes */
aspeed_video_calc_compressed_size(video, size);
+ if (video->active_timings.width == 1680) {
+ /*
+ * This is a workaround to fix a silicon bug on A1 and A2
+ * revisions. Since it doesn't break capturing operation of
+ * other revisions, use it for all revisions without checking
+ * the revision ID. It picked 1728 which is a very next
+ * 64-pixels aligned value to 1680 to minimize memory bandwidth
+ * and to get better access speed from video engine.
+ */
+ aspeed_video_write(video, VE_CAP_WINDOW,
+ 1728 << 16 | act->height);
+ size += (1728 - 1680) * video->active_timings.height;
+ } else {
+ aspeed_video_write(video, VE_CAP_WINDOW,
+ act->width << 16 | act->height);
+ }
+ aspeed_video_write(video, VE_COMP_WINDOW,
+ act->width << 16 | act->height);
+ aspeed_video_write(video, VE_SRC_SCANLINE_OFFSET, act->width * 4);
+
/* Don't use direct mode below 1024 x 768 (irqs don't fire) */
if (size < DIRECT_FETCH_THRESHOLD) {
aspeed_video_write(video, VE_TGS_0,
@@ -854,29 +863,16 @@ static void aspeed_video_set_resolution(struct aspeed_video *video)
aspeed_video_update(video, VE_CTRL, 0, VE_CTRL_DIRECT_FETCH);
}
- /* Set capture/compression frame sizes */
- aspeed_video_write(video, VE_CAP_WINDOW,
- act->width << 16 | act->height);
- aspeed_video_write(video, VE_COMP_WINDOW,
- act->width << 16 | act->height);
- aspeed_video_write(video, VE_SRC_SCANLINE_OFFSET, act->width * 4);
-
size *= 4;
- if (size == video->srcs[0].size / 2) {
- aspeed_video_write(video, VE_SRC1_ADDR,
- video->srcs[0].dma + size);
- } else if (size == video->srcs[0].size) {
- if (!aspeed_video_alloc_buf(video, &video->srcs[1], size))
- goto err_mem;
-
- aspeed_video_write(video, VE_SRC1_ADDR, video->srcs[1].dma);
- } else {
- aspeed_video_free_buf(video, &video->srcs[0]);
+ if (size != video->srcs[0].size) {
+ if (video->srcs[0].size)
+ aspeed_video_free_buf(video, &video->srcs[0]);
+ if (video->srcs[1].size)
+ aspeed_video_free_buf(video, &video->srcs[1]);
if (!aspeed_video_alloc_buf(video, &video->srcs[0], size))
goto err_mem;
-
if (!aspeed_video_alloc_buf(video, &video->srcs[1], size))
goto err_mem;
@@ -1445,7 +1441,7 @@ static void aspeed_video_stop_streaming(struct vb2_queue *q)
!test_bit(VIDEO_FRAME_INPRG, &video->flags),
STOP_TIMEOUT);
if (!rc) {
- dev_err(video->dev, "Timed out when stopping streaming\n");
+ dev_dbg(video->dev, "Timed out when stopping streaming\n");
/*
* Need to force stop any DMA and try and get HW into a good
@@ -1589,8 +1585,8 @@ static int aspeed_video_init(struct aspeed_video *video)
return -ENODEV;
}
- rc = devm_request_irq(dev, irq, aspeed_video_irq, IRQF_SHARED,
- DEVICE_NAME, video);
+ rc = devm_request_threaded_irq(dev, irq, NULL, aspeed_video_irq,
+ IRQF_ONESHOT, DEVICE_NAME, video);
if (rc < 0) {
dev_err(dev, "Unable to request IRQ %d\n", irq);
return rc;
@@ -1602,31 +1598,46 @@ static int aspeed_video_init(struct aspeed_video *video)
return PTR_ERR(video->eclk);
}
+ rc = clk_prepare(video->eclk);
+ if (rc)
+ return rc;
+
video->vclk = devm_clk_get(dev, "vclk");
if (IS_ERR(video->vclk)) {
dev_err(dev, "Unable to get VCLK\n");
- return PTR_ERR(video->vclk);
+ rc = PTR_ERR(video->vclk);
+ goto err_unprepare_eclk;
}
+ rc = clk_prepare(video->vclk);
+ if (rc)
+ goto err_unprepare_eclk;
+
of_reserved_mem_device_init(dev);
rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(dev, "Failed to set DMA mask\n");
- of_reserved_mem_device_release(dev);
- return rc;
+ goto err_release_reserved_mem;
}
if (!aspeed_video_alloc_buf(video, &video->jpeg,
VE_JPEG_HEADER_SIZE)) {
dev_err(dev, "Failed to allocate DMA for JPEG header\n");
- of_reserved_mem_device_release(dev);
- return rc;
+ goto err_release_reserved_mem;
}
aspeed_video_init_jpeg_table(video->jpeg.virt, video->yuv420);
return 0;
+
+err_release_reserved_mem:
+ of_reserved_mem_device_release(dev);
+ clk_unprepare(video->vclk);
+err_unprepare_eclk:
+ clk_unprepare(video->eclk);
+
+ return rc;
}
static int aspeed_video_probe(struct platform_device *pdev)
@@ -1670,6 +1681,11 @@ static int aspeed_video_remove(struct platform_device *pdev)
struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
struct aspeed_video *video = to_aspeed_video(v4l2_dev);
+ aspeed_video_off(video);
+
+ clk_unprepare(video->vclk);
+ clk_unprepare(video->eclk);
+
video_unregister_device(&video->vdev);
vb2_queue_release(&video->queue);
diff --git a/drivers/media/platform/atmel/Makefile b/drivers/media/platform/atmel/Makefile
index 484936604ccb..2dba38994a70 100644
--- a/drivers/media/platform/atmel/Makefile
+++ b/drivers/media/platform/atmel/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
+atmel-isc-objs = atmel-sama5d2-isc.o atmel-isc-base.o
+
obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
+obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc-base.c
index 05b9cfb91d20..c1c776b348a9 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -1,24 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Atmel Image Sensor Controller (ISC) driver
+ * Microchip Image Sensor Controller (ISC) common driver base
*
- * Copyright (C) 2016 Atmel
+ * Copyright (C) 2016-2019 Microchip Technology, Inc.
*
- * Author: Songjun Wu <songjun.wu@microchip.com>
+ * Author: Songjun Wu
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
*
- * Sensor-->PFE-->WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB-->RLP-->DMA
- *
- * ISC video pipeline integrates the following submodules:
- * PFE: Parallel Front End to sample the camera sensor input stream
- * WB: Programmable white balance in the Bayer domain
- * CFA: Color filter array interpolation module
- * CC: Programmable color correction
- * GAM: Gamma correction
- * CSC: Programmable color space conversion
- * CBC: Contrast and Brightness control
- * SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling
- * RLP: This module performs rounding, range limiting
- * and packing of the incoming data
*/
#include <linux/clk.h>
@@ -45,176 +33,19 @@
#include <media/videobuf2-dma-contig.h>
#include "atmel-isc-regs.h"
+#include "atmel-isc.h"
-#define ATMEL_ISC_NAME "atmel_isc"
-
-#define ISC_MAX_SUPPORT_WIDTH 2592
-#define ISC_MAX_SUPPORT_HEIGHT 1944
-
-#define ISC_CLK_MAX_DIV 255
-
-enum isc_clk_id {
- ISC_ISPCK = 0,
- ISC_MCK = 1,
-};
-
-struct isc_clk {
- struct clk_hw hw;
- struct clk *clk;
- struct regmap *regmap;
- spinlock_t lock;
- u8 id;
- u8 parent_id;
- u32 div;
- struct device *dev;
-};
-
-#define to_isc_clk(hw) container_of(hw, struct isc_clk, hw)
-
-struct isc_buffer {
- struct vb2_v4l2_buffer vb;
- struct list_head list;
-};
-
-struct isc_subdev_entity {
- struct v4l2_subdev *sd;
- struct v4l2_async_subdev *asd;
- struct v4l2_async_notifier notifier;
-
- u32 pfe_cfg0;
-
- struct list_head list;
-};
-
-/*
- * struct isc_format - ISC media bus format information
- This structure represents the interface between the ISC
- and the sensor. It's the input format received by
- the ISC.
- * @fourcc: Fourcc code for this format
- * @mbus_code: V4L2 media bus format code.
- * @cfa_baycfg: If this format is RAW BAYER, indicate the type of bayer.
- this is either BGBG, RGRG, etc.
- * @pfe_cfg0_bps: Number of hardware data lines connected to the ISC
- */
-
-struct isc_format {
- u32 fourcc;
- u32 mbus_code;
- u32 cfa_baycfg;
-
- bool sd_support;
- u32 pfe_cfg0_bps;
-};
-
-/* Pipeline bitmap */
-#define WB_ENABLE BIT(0)
-#define CFA_ENABLE BIT(1)
-#define CC_ENABLE BIT(2)
-#define GAM_ENABLE BIT(3)
-#define GAM_BENABLE BIT(4)
-#define GAM_GENABLE BIT(5)
-#define GAM_RENABLE BIT(6)
-#define CSC_ENABLE BIT(7)
-#define CBC_ENABLE BIT(8)
-#define SUB422_ENABLE BIT(9)
-#define SUB420_ENABLE BIT(10)
-
-#define GAM_ENABLES (GAM_RENABLE | GAM_GENABLE | GAM_BENABLE | GAM_ENABLE)
-
-/*
- * struct fmt_config - ISC format configuration and internal pipeline
- This structure represents the internal configuration
- of the ISC.
- It also holds the format that ISC will present to v4l2.
- * @sd_format: Pointer to an isc_format struct that holds the sensor
- configuration.
- * @fourcc: Fourcc code for this format.
- * @bpp: Bytes per pixel in the current format.
- * @rlp_cfg_mode: Configuration of the RLP (rounding, limiting packaging)
- * @dcfg_imode: Configuration of the input of the DMA module
- * @dctrl_dview: Configuration of the output of the DMA module
- * @bits_pipeline: Configuration of the pipeline, which modules are enabled
- */
-struct fmt_config {
- struct isc_format *sd_format;
-
- u32 fourcc;
- u8 bpp;
-
- u32 rlp_cfg_mode;
- u32 dcfg_imode;
- u32 dctrl_dview;
-
- u32 bits_pipeline;
-};
-
-#define HIST_ENTRIES 512
-#define HIST_BAYER (ISC_HIS_CFG_MODE_B + 1)
-
-enum{
- HIST_INIT = 0,
- HIST_ENABLED,
- HIST_DISABLED,
-};
-
-struct isc_ctrls {
- struct v4l2_ctrl_handler handler;
-
- u32 brightness;
- u32 contrast;
- u8 gamma_index;
- u8 awb;
-
- u32 r_gain;
- u32 b_gain;
-
- u32 hist_entry[HIST_ENTRIES];
- u32 hist_count[HIST_BAYER];
- u8 hist_id;
- u8 hist_stat;
-};
-
-#define ISC_PIPE_LINE_NODE_NUM 11
-
-struct isc_device {
- struct regmap *regmap;
- struct clk *hclock;
- struct clk *ispck;
- struct isc_clk isc_clks[2];
-
- struct device *dev;
- struct v4l2_device v4l2_dev;
- struct video_device video_dev;
-
- struct vb2_queue vb2_vidq;
- spinlock_t dma_queue_lock;
- struct list_head dma_queue;
- struct isc_buffer *cur_frm;
- unsigned int sequence;
- bool stop;
- struct completion comp;
-
- struct v4l2_format fmt;
- struct isc_format **user_formats;
- unsigned int num_user_formats;
-
- struct fmt_config config;
- struct fmt_config try_config;
-
- struct isc_ctrls ctrls;
- struct work_struct awb_work;
-
- struct mutex lock;
-
- struct regmap_field *pipeline[ISC_PIPE_LINE_NODE_NUM];
+static unsigned int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
- struct isc_subdev_entity *current_subdev;
- struct list_head subdev_entities;
-};
+static unsigned int sensor_preferred = 1;
+module_param(sensor_preferred, uint, 0644);
+MODULE_PARM_DESC(sensor_preferred,
+ "Sensor is preferred to output the specified format (1-on 0-off), default 1");
/* This is a list of the formats that the ISC can *output* */
-static struct isc_format controller_formats[] = {
+const struct isc_format controller_formats[] = {
{
.fourcc = V4L2_PIX_FMT_ARGB444,
},
@@ -245,7 +76,7 @@ static struct isc_format controller_formats[] = {
};
/* This is a list of formats that the ISC can receive as *input* */
-static struct isc_format formats_list[] = {
+struct isc_format formats_list[] = {
{
.fourcc = V4L2_PIX_FMT_SBGGR8,
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
@@ -335,11 +166,8 @@ static struct isc_format formats_list[] = {
},
};
-#define GAMMA_MAX 2
-#define GAMMA_ENTRIES 64
-
/* Gamma table with gamma 1/2.2 */
-static const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES] = {
+const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES] = {
/* 0 --> gamma 1/1.8 */
{ 0x65, 0x66002F, 0x950025, 0xBB0020, 0xDB001D, 0xF8001A,
0x1130018, 0x12B0017, 0x1420016, 0x1580014, 0x16D0013, 0x1810012,
@@ -383,14 +211,39 @@ static const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES] = {
#define ISC_IS_FORMAT_RAW(mbus_code) \
(((mbus_code) & 0xf000) == 0x3000)
-static unsigned int debug;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "debug level (0-2)");
+static inline void isc_update_awb_ctrls(struct isc_device *isc)
+{
+ struct isc_ctrls *ctrls = &isc->ctrls;
-static unsigned int sensor_preferred = 1;
-module_param(sensor_preferred, uint, 0644);
-MODULE_PARM_DESC(sensor_preferred,
- "Sensor is preferred to output the specified format (1-on 0-off), default 1");
+ regmap_write(isc->regmap, ISC_WB_O_RGR,
+ (ISC_WB_O_ZERO_VAL - (ctrls->offset[ISC_HIS_CFG_MODE_R])) |
+ ((ISC_WB_O_ZERO_VAL - ctrls->offset[ISC_HIS_CFG_MODE_GR]) << 16));
+ regmap_write(isc->regmap, ISC_WB_O_BGB,
+ (ISC_WB_O_ZERO_VAL - (ctrls->offset[ISC_HIS_CFG_MODE_B])) |
+ ((ISC_WB_O_ZERO_VAL - ctrls->offset[ISC_HIS_CFG_MODE_GB]) << 16));
+ regmap_write(isc->regmap, ISC_WB_G_RGR,
+ ctrls->gain[ISC_HIS_CFG_MODE_R] |
+ (ctrls->gain[ISC_HIS_CFG_MODE_GR] << 16));
+ regmap_write(isc->regmap, ISC_WB_G_BGB,
+ ctrls->gain[ISC_HIS_CFG_MODE_B] |
+ (ctrls->gain[ISC_HIS_CFG_MODE_GB] << 16));
+}
+
+static inline void isc_reset_awb_ctrls(struct isc_device *isc)
+{
+ unsigned int c;
+
+ for (c = ISC_HIS_CFG_MODE_GR; c <= ISC_HIS_CFG_MODE_B; c++) {
+ /* gains have a fixed point at 9 decimals */
+ isc->ctrls.gain[c] = 1 << 9;
+ /* offsets are in 2's complements, the value
+ * will be substracted from ISC_WB_O_ZERO_VAL to obtain
+ * 2's complement of a value between 0 and
+ * ISC_WB_O_ZERO_VAL >> 1
+ */
+ isc->ctrls.offset[c] = ISC_WB_O_ZERO_VAL;
+ }
+}
static int isc_wait_clk_stable(struct clk_hw *hw)
{
@@ -646,7 +499,7 @@ static int isc_clk_register(struct isc_device *isc, unsigned int id)
return 0;
}
-static int isc_clk_init(struct isc_device *isc)
+int isc_clk_init(struct isc_device *isc)
{
unsigned int i;
int ret;
@@ -663,7 +516,7 @@ static int isc_clk_init(struct isc_device *isc)
return 0;
}
-static void isc_clk_cleanup(struct isc_device *isc)
+void isc_clk_cleanup(struct isc_device *isc)
{
unsigned int i;
@@ -772,7 +625,9 @@ static void isc_start_dma(struct isc_device *isc)
dctrl_dview = isc->config.dctrl_dview;
regmap_write(regmap, ISC_DCTRL, dctrl_dview | ISC_DCTRL_IE_IS);
+ spin_lock(&isc->awb_lock);
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
+ spin_unlock(&isc->awb_lock);
}
static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
@@ -794,11 +649,11 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
bay_cfg = isc->config.sd_format->cfa_baycfg;
+ if (ctrls->awb == ISC_WB_NONE)
+ isc_reset_awb_ctrls(isc);
+
regmap_write(regmap, ISC_WB_CFG, bay_cfg);
- regmap_write(regmap, ISC_WB_O_RGR, 0x0);
- regmap_write(regmap, ISC_WB_O_BGR, 0x0);
- regmap_write(regmap, ISC_WB_G_RGR, ctrls->r_gain | (0x1 << 25));
- regmap_write(regmap, ISC_WB_G_BGR, ctrls->b_gain | (0x1 << 25));
+ isc_update_awb_ctrls(isc);
regmap_write(regmap, ISC_CFA_CFG, bay_cfg | ISC_CFA_CFG_EITPOL);
@@ -848,13 +703,13 @@ static void isc_set_histogram(struct isc_device *isc, bool enable)
if (enable) {
regmap_write(regmap, ISC_HIS_CFG,
- ISC_HIS_CFG_MODE_R |
+ ISC_HIS_CFG_MODE_GR |
(isc->config.sd_format->cfa_baycfg
<< ISC_HIS_CFG_BAYSEL_SHIFT) |
ISC_HIS_CFG_RAR);
regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_EN);
regmap_write(regmap, ISC_INTEN, ISC_INT_HISDONE);
- ctrls->hist_id = ISC_HIS_CFG_MODE_R;
+ ctrls->hist_id = ISC_HIS_CFG_MODE_GR;
isc_update_profile(isc);
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
@@ -897,7 +752,7 @@ static int isc_configure(struct isc_device *isc)
isc_set_pipeline(isc, pipeline);
/*
- * The current implemented histogram is available for RAW R, B, GB
+ * The current implemented histogram is available for RAW R, B, GB, GR
* channels. We need to check if sensor is outputting RAW BAYER
*/
if (isc->ctrls.awb &&
@@ -949,6 +804,10 @@ static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+ /* if we streaming from RAW, we can do one-shot white balance adj */
+ if (ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code))
+ v4l2_ctrl_activate(isc->do_wb_ctrl, true);
+
return 0;
err_configure:
@@ -973,6 +832,8 @@ static void isc_stop_streaming(struct vb2_queue *vq)
struct isc_buffer *buf;
int ret;
+ v4l2_ctrl_activate(isc->do_wb_ctrl, false);
+
isc->stop = true;
/* Wait until the end of the current frame */
@@ -1433,7 +1294,7 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
&pad_cfg, &format);
if (ret < 0)
- goto isc_try_fmt_err;
+ goto isc_try_fmt_subdev_err;
v4l2_fill_pix_format(pixfmt, &format.format);
@@ -1448,6 +1309,7 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
isc_try_fmt_err:
v4l2_err(&isc->v4l2_dev, "Could not find any possible format for a working pipeline\n");
+isc_try_fmt_subdev_err:
memset(&isc->try_config, 0, sizeof(isc->try_config));
return ret;
@@ -1472,6 +1334,12 @@ static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
return ret;
isc->fmt = *f;
+
+ if (isc->try_config.sd_format && isc->config.sd_format &&
+ isc->try_config.sd_format != isc->config.sd_format) {
+ isc->ctrls.hist_stat = HIST_INIT;
+ isc_reset_awb_ctrls(isc);
+ }
/* make the try configuration active */
isc->config = isc->try_config;
@@ -1588,7 +1456,7 @@ static int isc_enum_frameintervals(struct file *file, void *fh,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
int ret = -EINVAL;
- int i;
+ unsigned int i;
for (i = 0; i < isc->num_user_formats; i++)
if (isc->user_formats[i]->fourcc == fival->pixel_format)
@@ -1708,7 +1576,7 @@ static const struct v4l2_file_operations isc_fops = {
.poll = vb2_fop_poll,
};
-static irqreturn_t isc_interrupt(int irq, void *dev_id)
+irqreturn_t isc_interrupt(int irq, void *dev_id)
{
struct isc_device *isc = (struct isc_device *)dev_id;
struct regmap *regmap = isc->regmap;
@@ -1755,7 +1623,7 @@ static irqreturn_t isc_interrupt(int irq, void *dev_id)
return ret;
}
-static void isc_hist_count(struct isc_device *isc)
+static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
@@ -1763,25 +1631,99 @@ static void isc_hist_count(struct isc_device *isc)
u32 *hist_entry = &ctrls->hist_entry[0];
u32 i;
+ *min = 0;
+ *max = HIST_ENTRIES;
+
regmap_bulk_read(regmap, ISC_HIS_ENTRY, hist_entry, HIST_ENTRIES);
*hist_count = 0;
- for (i = 0; i < HIST_ENTRIES; i++)
+ /*
+ * we deliberately ignore the end of the histogram,
+ * the most white pixels
+ */
+ for (i = 1; i < HIST_ENTRIES; i++) {
+ if (*hist_entry && !*min)
+ *min = i;
+ if (*hist_entry)
+ *max = i;
*hist_count += i * (*hist_entry++);
+ }
+
+ if (!*min)
+ *min = 1;
}
static void isc_wb_update(struct isc_ctrls *ctrls)
{
u32 *hist_count = &ctrls->hist_count[0];
- u64 g_count = (u64)hist_count[ISC_HIS_CFG_MODE_GB] << 9;
- u32 hist_r = hist_count[ISC_HIS_CFG_MODE_R];
- u32 hist_b = hist_count[ISC_HIS_CFG_MODE_B];
+ u32 c, offset[4];
+ u64 avg = 0;
+ /* We compute two gains, stretch gain and grey world gain */
+ u32 s_gain[4], gw_gain[4];
+
+ /*
+ * According to Grey World, we need to set gains for R/B to normalize
+ * them towards the green channel.
+ * Thus we want to keep Green as fixed and adjust only Red/Blue
+ * Compute the average of the both green channels first
+ */
+ avg = (u64)hist_count[ISC_HIS_CFG_MODE_GR] +
+ (u64)hist_count[ISC_HIS_CFG_MODE_GB];
+ avg >>= 1;
- if (hist_r)
- ctrls->r_gain = div_u64(g_count, hist_r);
+ /* Green histogram is null, nothing to do */
+ if (!avg)
+ return;
- if (hist_b)
- ctrls->b_gain = div_u64(g_count, hist_b);
+ for (c = ISC_HIS_CFG_MODE_GR; c <= ISC_HIS_CFG_MODE_B; c++) {
+ /*
+ * the color offset is the minimum value of the histogram.
+ * we stretch this color to the full range by substracting
+ * this value from the color component.
+ */
+ offset[c] = ctrls->hist_minmax[c][HIST_MIN_INDEX];
+ /*
+ * The offset is always at least 1. If the offset is 1, we do
+ * not need to adjust it, so our result must be zero.
+ * the offset is computed in a histogram on 9 bits (0..512)
+ * but the offset in register is based on
+ * 12 bits pipeline (0..4096).
+ * we need to shift with the 3 bits that the histogram is
+ * ignoring
+ */
+ ctrls->offset[c] = (offset[c] - 1) << 3;
+
+ /* the offset is then taken and converted to 2's complements */
+ if (!ctrls->offset[c])
+ ctrls->offset[c] = ISC_WB_O_ZERO_VAL;
+
+ /*
+ * the stretch gain is the total number of histogram bins
+ * divided by the actual range of color component (Max - Min)
+ * If we compute gain like this, the actual color component
+ * will be stretched to the full histogram.
+ * We need to shift 9 bits for precision, we have 9 bits for
+ * decimals
+ */
+ s_gain[c] = (HIST_ENTRIES << 9) /
+ (ctrls->hist_minmax[c][HIST_MAX_INDEX] -
+ ctrls->hist_minmax[c][HIST_MIN_INDEX] + 1);
+
+ /*
+ * Now we have to compute the gain w.r.t. the average.
+ * Add/lose gain to the component towards the average.
+ * If it happens that the component is zero, use the
+ * fixed point value : 1.0 gain.
+ */
+ if (hist_count[c])
+ gw_gain[c] = div_u64(avg << 9, hist_count[c]);
+ else
+ gw_gain[c] = 1 << 9;
+
+ /* multiply both gains and adjust for decimals */
+ ctrls->gain[c] = s_gain[c] * gw_gain[c];
+ ctrls->gain[c] >>= 9;
+ }
}
static void isc_awb_work(struct work_struct *w)
@@ -1792,27 +1734,66 @@ static void isc_awb_work(struct work_struct *w)
struct isc_ctrls *ctrls = &isc->ctrls;
u32 hist_id = ctrls->hist_id;
u32 baysel;
+ unsigned long flags;
+ u32 min, max;
+
+ /* streaming is not active anymore */
+ if (isc->stop)
+ return;
if (ctrls->hist_stat != HIST_ENABLED)
return;
- isc_hist_count(isc);
+ isc_hist_count(isc, &min, &max);
+ ctrls->hist_minmax[hist_id][HIST_MIN_INDEX] = min;
+ ctrls->hist_minmax[hist_id][HIST_MAX_INDEX] = max;
if (hist_id != ISC_HIS_CFG_MODE_B) {
hist_id++;
} else {
isc_wb_update(ctrls);
- hist_id = ISC_HIS_CFG_MODE_R;
+ hist_id = ISC_HIS_CFG_MODE_GR;
}
ctrls->hist_id = hist_id;
baysel = isc->config.sd_format->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT;
+ /* if no more auto white balance, reset controls. */
+ if (ctrls->awb == ISC_WB_NONE)
+ isc_reset_awb_ctrls(isc);
+
pm_runtime_get_sync(isc->dev);
+ /*
+ * only update if we have all the required histograms and controls
+ * if awb has been disabled, we need to reset registers as well.
+ */
+ if (hist_id == ISC_HIS_CFG_MODE_GR || ctrls->awb == ISC_WB_NONE) {
+ /*
+ * It may happen that DMA Done IRQ will trigger while we are
+ * updating white balance registers here.
+ * In that case, only parts of the controls have been updated.
+ * We can avoid that by locking the section.
+ */
+ spin_lock_irqsave(&isc->awb_lock, flags);
+ isc_update_awb_ctrls(isc);
+ spin_unlock_irqrestore(&isc->awb_lock, flags);
+
+ /*
+ * if we are doing just the one time white balance adjustment,
+ * we are basically done.
+ */
+ if (ctrls->awb == ISC_WB_ONETIME) {
+ v4l2_info(&isc->v4l2_dev,
+ "Completed one time white-balance adjustment.\n");
+ ctrls->awb = ISC_WB_NONE;
+ }
+ }
regmap_write(regmap, ISC_HIS_CFG, hist_id | baysel | ISC_HIS_CFG_RAR);
isc_update_profile(isc);
- regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
+ /* if awb has been disabled, we don't need to start another histogram */
+ if (ctrls->awb)
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
pm_runtime_put_sync(isc->dev);
}
@@ -1823,6 +1804,9 @@ static int isc_s_ctrl(struct v4l2_ctrl *ctrl)
struct isc_device, ctrls.handler);
struct isc_ctrls *ctrls = &isc->ctrls;
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
ctrls->brightness = ctrl->val & ISC_CBC_BRIGHT_MASK;
@@ -1834,11 +1818,33 @@ static int isc_s_ctrl(struct v4l2_ctrl *ctrl)
ctrls->gamma_index = ctrl->val;
break;
case V4L2_CID_AUTO_WHITE_BALANCE:
- ctrls->awb = ctrl->val;
- if (ctrls->hist_stat != HIST_ENABLED) {
- ctrls->r_gain = 0x1 << 9;
- ctrls->b_gain = 0x1 << 9;
- }
+ if (ctrl->val == 1)
+ ctrls->awb = ISC_WB_AUTO;
+ else
+ ctrls->awb = ISC_WB_NONE;
+
+ /* we did not configure ISC yet */
+ if (!isc->config.sd_format)
+ break;
+
+ if (ctrls->hist_stat != HIST_ENABLED)
+ isc_reset_awb_ctrls(isc);
+
+ if (isc->ctrls.awb == ISC_WB_AUTO &&
+ vb2_is_streaming(&isc->vb2_vidq) &&
+ ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code))
+ isc_set_histogram(isc, true);
+
+ break;
+ case V4L2_CID_DO_WHITE_BALANCE:
+ /* if AWB is enabled, do nothing */
+ if (ctrls->awb == ISC_WB_AUTO)
+ return 0;
+
+ ctrls->awb = ISC_WB_ONETIME;
+ isc_set_histogram(isc, true);
+ v4l2_dbg(1, debug, &isc->v4l2_dev,
+ "One time white-balance started.\n");
break;
default:
return -EINVAL;
@@ -1859,16 +1865,32 @@ static int isc_ctrl_init(struct isc_device *isc)
int ret;
ctrls->hist_stat = HIST_INIT;
+ isc_reset_awb_ctrls(isc);
- ret = v4l2_ctrl_handler_init(hdl, 4);
+ ret = v4l2_ctrl_handler_init(hdl, 5);
if (ret < 0)
return ret;
+ ctrls->brightness = 0;
+ ctrls->contrast = 256;
+
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -1024, 1023, 1, 0);
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 256);
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAMMA, 0, GAMMA_MAX, 1, 2);
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+ /* do_white_balance is a button, so min,max,step,default are ignored */
+ isc->do_wb_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_DO_WHITE_BALANCE,
+ 0, 0, 0, 0);
+
+ if (!isc->do_wb_ctrl) {
+ ret = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+ }
+
+ v4l2_ctrl_activate(isc->do_wb_ctrl, false);
+
v4l2_ctrl_handler_setup(hdl);
return 0;
@@ -1994,7 +2016,7 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
struct isc_device, v4l2_dev);
struct video_device *vdev = &isc->video_dev;
struct vb2_queue *q = &isc->vb2_vidq;
- int ret;
+ int ret = 0;
INIT_WORK(&isc->awb_work, isc_awb_work);
@@ -2025,30 +2047,31 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
if (ret < 0) {
v4l2_err(&isc->v4l2_dev,
"vb2_queue_init() failed: %d\n", ret);
- return ret;
+ goto isc_async_complete_err;
}
/* Init video dma queues */
INIT_LIST_HEAD(&isc->dma_queue);
spin_lock_init(&isc->dma_queue_lock);
+ spin_lock_init(&isc->awb_lock);
ret = isc_formats_init(isc);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev,
"Init format failed: %d\n", ret);
- return ret;
+ goto isc_async_complete_err;
}
ret = isc_set_default_fmt(isc);
if (ret) {
v4l2_err(&isc->v4l2_dev, "Could not set default format\n");
- return ret;
+ goto isc_async_complete_err;
}
ret = isc_ctrl_init(isc);
if (ret) {
v4l2_err(&isc->v4l2_dev, "Init isc ctrols failed: %d\n", ret);
- return ret;
+ goto isc_async_complete_err;
}
/* Register video device */
@@ -2068,19 +2091,23 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
if (ret < 0) {
v4l2_err(&isc->v4l2_dev,
"video_register_device failed: %d\n", ret);
- return ret;
+ goto isc_async_complete_err;
}
return 0;
+
+isc_async_complete_err:
+ mutex_destroy(&isc->lock);
+ return ret;
}
-static const struct v4l2_async_notifier_operations isc_async_ops = {
+const struct v4l2_async_notifier_operations isc_async_ops = {
.bound = isc_async_bound,
.unbind = isc_async_unbind,
.complete = isc_async_complete,
};
-static void isc_subdev_cleanup(struct isc_device *isc)
+void isc_subdev_cleanup(struct isc_device *isc)
{
struct isc_subdev_entity *subdev_entity;
@@ -2092,7 +2119,7 @@ static void isc_subdev_cleanup(struct isc_device *isc)
INIT_LIST_HEAD(&isc->subdev_entities);
}
-static int isc_pipeline_init(struct isc_device *isc)
+int isc_pipeline_init(struct isc_device *isc)
{
struct device *dev = isc->dev;
struct regmap *regmap = isc->regmap;
@@ -2125,300 +2152,12 @@ static int isc_pipeline_init(struct isc_device *isc)
return 0;
}
-static int isc_parse_dt(struct device *dev, struct isc_device *isc)
-{
- struct device_node *np = dev->of_node;
- struct device_node *epn = NULL, *rem;
- struct isc_subdev_entity *subdev_entity;
- unsigned int flags;
- int ret;
-
- INIT_LIST_HEAD(&isc->subdev_entities);
-
- while (1) {
- struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
-
- epn = of_graph_get_next_endpoint(np, epn);
- if (!epn)
- return 0;
-
- rem = of_graph_get_remote_port_parent(epn);
- if (!rem) {
- dev_notice(dev, "Remote device at %pOF not found\n",
- epn);
- continue;
- }
-
- ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
- &v4l2_epn);
- if (ret) {
- of_node_put(rem);
- ret = -EINVAL;
- dev_err(dev, "Could not parse the endpoint\n");
- break;
- }
-
- subdev_entity = devm_kzalloc(dev,
- sizeof(*subdev_entity), GFP_KERNEL);
- if (!subdev_entity) {
- of_node_put(rem);
- ret = -ENOMEM;
- break;
- }
-
- /* asd will be freed by the subsystem once it's added to the
- * notifier list
- */
- subdev_entity->asd = kzalloc(sizeof(*subdev_entity->asd),
- GFP_KERNEL);
- if (!subdev_entity->asd) {
- of_node_put(rem);
- ret = -ENOMEM;
- break;
- }
-
- flags = v4l2_epn.bus.parallel.flags;
-
- if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
- subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW;
-
- if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
- subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW;
-
- if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
- subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
-
- if (v4l2_epn.bus_type == V4L2_MBUS_BT656)
- subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_CCIR_CRC |
- ISC_PFE_CFG0_CCIR656;
-
- subdev_entity->asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
- subdev_entity->asd->match.fwnode =
- of_fwnode_handle(rem);
- list_add_tail(&subdev_entity->list, &isc->subdev_entities);
- }
-
- of_node_put(epn);
- return ret;
-}
-
/* regmap configuration */
#define ATMEL_ISC_REG_MAX 0xbfc
-static const struct regmap_config isc_regmap_config = {
+const struct regmap_config isc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = ATMEL_ISC_REG_MAX,
};
-static int atmel_isc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct isc_device *isc;
- struct resource *res;
- void __iomem *io_base;
- struct isc_subdev_entity *subdev_entity;
- int irq;
- int ret;
-
- isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL);
- if (!isc)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, isc);
- isc->dev = dev;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- io_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(io_base))
- return PTR_ERR(io_base);
-
- isc->regmap = devm_regmap_init_mmio(dev, io_base, &isc_regmap_config);
- if (IS_ERR(isc->regmap)) {
- ret = PTR_ERR(isc->regmap);
- dev_err(dev, "failed to init register map: %d\n", ret);
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- dev_err(dev, "failed to get irq: %d\n", ret);
- return ret;
- }
-
- ret = devm_request_irq(dev, irq, isc_interrupt, 0,
- ATMEL_ISC_NAME, isc);
- if (ret < 0) {
- dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
- irq, ret);
- return ret;
- }
-
- ret = isc_pipeline_init(isc);
- if (ret)
- return ret;
-
- isc->hclock = devm_clk_get(dev, "hclock");
- if (IS_ERR(isc->hclock)) {
- ret = PTR_ERR(isc->hclock);
- dev_err(dev, "failed to get hclock: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(isc->hclock);
- if (ret) {
- dev_err(dev, "failed to enable hclock: %d\n", ret);
- return ret;
- }
-
- ret = isc_clk_init(isc);
- if (ret) {
- dev_err(dev, "failed to init isc clock: %d\n", ret);
- goto unprepare_hclk;
- }
-
- isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
-
- ret = clk_prepare_enable(isc->ispck);
- if (ret) {
- dev_err(dev, "failed to enable ispck: %d\n", ret);
- goto unprepare_hclk;
- }
-
- /* ispck should be greater or equal to hclock */
- ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
- if (ret) {
- dev_err(dev, "failed to set ispck rate: %d\n", ret);
- goto unprepare_clk;
- }
-
- ret = v4l2_device_register(dev, &isc->v4l2_dev);
- if (ret) {
- dev_err(dev, "unable to register v4l2 device.\n");
- goto unprepare_clk;
- }
-
- ret = isc_parse_dt(dev, isc);
- if (ret) {
- dev_err(dev, "fail to parse device tree\n");
- goto unregister_v4l2_device;
- }
-
- if (list_empty(&isc->subdev_entities)) {
- dev_err(dev, "no subdev found\n");
- ret = -ENODEV;
- goto unregister_v4l2_device;
- }
-
- list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
- v4l2_async_notifier_init(&subdev_entity->notifier);
-
- ret = v4l2_async_notifier_add_subdev(&subdev_entity->notifier,
- subdev_entity->asd);
- if (ret) {
- fwnode_handle_put(subdev_entity->asd->match.fwnode);
- kfree(subdev_entity->asd);
- goto cleanup_subdev;
- }
-
- subdev_entity->notifier.ops = &isc_async_ops;
-
- ret = v4l2_async_notifier_register(&isc->v4l2_dev,
- &subdev_entity->notifier);
- if (ret) {
- dev_err(dev, "fail to register async notifier\n");
- goto cleanup_subdev;
- }
-
- if (video_is_registered(&isc->video_dev))
- break;
- }
-
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- pm_request_idle(dev);
-
- return 0;
-
-cleanup_subdev:
- isc_subdev_cleanup(isc);
-
-unregister_v4l2_device:
- v4l2_device_unregister(&isc->v4l2_dev);
-
-unprepare_clk:
- clk_disable_unprepare(isc->ispck);
-unprepare_hclk:
- clk_disable_unprepare(isc->hclock);
-
- isc_clk_cleanup(isc);
-
- return ret;
-}
-
-static int atmel_isc_remove(struct platform_device *pdev)
-{
- struct isc_device *isc = platform_get_drvdata(pdev);
-
- pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(isc->ispck);
- clk_disable_unprepare(isc->hclock);
-
- isc_subdev_cleanup(isc);
-
- v4l2_device_unregister(&isc->v4l2_dev);
-
- isc_clk_cleanup(isc);
-
- return 0;
-}
-
-static int __maybe_unused isc_runtime_suspend(struct device *dev)
-{
- struct isc_device *isc = dev_get_drvdata(dev);
-
- clk_disable_unprepare(isc->ispck);
- clk_disable_unprepare(isc->hclock);
-
- return 0;
-}
-
-static int __maybe_unused isc_runtime_resume(struct device *dev)
-{
- struct isc_device *isc = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(isc->hclock);
- if (ret)
- return ret;
-
- return clk_prepare_enable(isc->ispck);
-}
-
-static const struct dev_pm_ops atmel_isc_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(isc_runtime_suspend, isc_runtime_resume, NULL)
-};
-
-static const struct of_device_id atmel_isc_of_match[] = {
- { .compatible = "atmel,sama5d2-isc" },
- { }
-};
-MODULE_DEVICE_TABLE(of, atmel_isc_of_match);
-
-static struct platform_driver atmel_isc_driver = {
- .probe = atmel_isc_probe,
- .remove = atmel_isc_remove,
- .driver = {
- .name = ATMEL_ISC_NAME,
- .pm = &atmel_isc_dev_pm_ops,
- .of_match_table = of_match_ptr(atmel_isc_of_match),
- },
-};
-
-module_platform_driver(atmel_isc_driver);
-
-MODULE_AUTHOR("Songjun Wu <songjun.wu@microchip.com>");
-MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC");
-MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
index 8f7f8efc71a7..c1283fb21bf6 100644
--- a/drivers/media/platform/atmel/atmel-isc-regs.h
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -100,13 +100,15 @@
#define ISC_WB_O_RGR 0x00000060
/* ISC White Balance Offset for B, GB Register */
-#define ISC_WB_O_BGR 0x00000064
+#define ISC_WB_O_BGB 0x00000064
/* ISC White Balance Gain for R, GR Register */
#define ISC_WB_G_RGR 0x00000068
/* ISC White Balance Gain for B, GB Register */
-#define ISC_WB_G_BGR 0x0000006c
+#define ISC_WB_G_BGB 0x0000006c
+
+#define ISC_WB_O_ZERO_VAL (1 << 13)
/* ISC Color Filter Array Control Register */
#define ISC_CFA_CTRL 0x00000070
diff --git a/drivers/media/platform/atmel/atmel-isc.h b/drivers/media/platform/atmel/atmel-isc.h
new file mode 100644
index 000000000000..bfaed2fad2b5
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-isc.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Microchip Image Sensor Controller (ISC) driver header file
+ *
+ * Copyright (C) 2016-2019 Microchip Technology, Inc.
+ *
+ * Author: Songjun Wu
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ */
+#ifndef _ATMEL_ISC_H_
+
+#define ISC_MAX_SUPPORT_WIDTH 2592
+#define ISC_MAX_SUPPORT_HEIGHT 1944
+
+#define ISC_CLK_MAX_DIV 255
+
+enum isc_clk_id {
+ ISC_ISPCK = 0,
+ ISC_MCK = 1,
+};
+
+struct isc_clk {
+ struct clk_hw hw;
+ struct clk *clk;
+ struct regmap *regmap;
+ spinlock_t lock; /* serialize access to clock registers */
+ u8 id;
+ u8 parent_id;
+ u32 div;
+ struct device *dev;
+};
+
+#define to_isc_clk(v) container_of(v, struct isc_clk, hw)
+
+struct isc_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+struct isc_subdev_entity {
+ struct v4l2_subdev *sd;
+ struct v4l2_async_subdev *asd;
+ struct v4l2_async_notifier notifier;
+
+ u32 pfe_cfg0;
+
+ struct list_head list;
+};
+
+/*
+ * struct isc_format - ISC media bus format information
+ This structure represents the interface between the ISC
+ and the sensor. It's the input format received by
+ the ISC.
+ * @fourcc: Fourcc code for this format
+ * @mbus_code: V4L2 media bus format code.
+ * @cfa_baycfg: If this format is RAW BAYER, indicate the type of bayer.
+ this is either BGBG, RGRG, etc.
+ * @pfe_cfg0_bps: Number of hardware data lines connected to the ISC
+ */
+
+struct isc_format {
+ u32 fourcc;
+ u32 mbus_code;
+ u32 cfa_baycfg;
+
+ bool sd_support;
+ u32 pfe_cfg0_bps;
+};
+
+/* Pipeline bitmap */
+#define WB_ENABLE BIT(0)
+#define CFA_ENABLE BIT(1)
+#define CC_ENABLE BIT(2)
+#define GAM_ENABLE BIT(3)
+#define GAM_BENABLE BIT(4)
+#define GAM_GENABLE BIT(5)
+#define GAM_RENABLE BIT(6)
+#define CSC_ENABLE BIT(7)
+#define CBC_ENABLE BIT(8)
+#define SUB422_ENABLE BIT(9)
+#define SUB420_ENABLE BIT(10)
+
+#define GAM_ENABLES (GAM_RENABLE | GAM_GENABLE | GAM_BENABLE | GAM_ENABLE)
+
+/*
+ * struct fmt_config - ISC format configuration and internal pipeline
+ This structure represents the internal configuration
+ of the ISC.
+ It also holds the format that ISC will present to v4l2.
+ * @sd_format: Pointer to an isc_format struct that holds the sensor
+ configuration.
+ * @fourcc: Fourcc code for this format.
+ * @bpp: Bytes per pixel in the current format.
+ * @rlp_cfg_mode: Configuration of the RLP (rounding, limiting packaging)
+ * @dcfg_imode: Configuration of the input of the DMA module
+ * @dctrl_dview: Configuration of the output of the DMA module
+ * @bits_pipeline: Configuration of the pipeline, which modules are enabled
+ */
+struct fmt_config {
+ struct isc_format *sd_format;
+
+ u32 fourcc;
+ u8 bpp;
+
+ u32 rlp_cfg_mode;
+ u32 dcfg_imode;
+ u32 dctrl_dview;
+
+ u32 bits_pipeline;
+};
+
+#define HIST_ENTRIES 512
+#define HIST_BAYER (ISC_HIS_CFG_MODE_B + 1)
+
+enum{
+ HIST_INIT = 0,
+ HIST_ENABLED,
+ HIST_DISABLED,
+};
+
+struct isc_ctrls {
+ struct v4l2_ctrl_handler handler;
+
+ u32 brightness;
+ u32 contrast;
+ u8 gamma_index;
+#define ISC_WB_NONE 0
+#define ISC_WB_AUTO 1
+#define ISC_WB_ONETIME 2
+ u8 awb;
+
+ /* one for each component : GR, R, GB, B */
+ u32 gain[HIST_BAYER];
+ u32 offset[HIST_BAYER];
+
+ u32 hist_entry[HIST_ENTRIES];
+ u32 hist_count[HIST_BAYER];
+ u8 hist_id;
+ u8 hist_stat;
+#define HIST_MIN_INDEX 0
+#define HIST_MAX_INDEX 1
+ u32 hist_minmax[HIST_BAYER][2];
+};
+
+#define ISC_PIPE_LINE_NODE_NUM 11
+
+/*
+ * struct isc_device - ISC device driver data/config struct
+ * @regmap: Register map
+ * @hclock: Hclock clock input (refer datasheet)
+ * @ispck: iscpck clock (refer datasheet)
+ * @isc_clks: ISC clocks
+ *
+ * @dev: Registered device driver
+ * @v4l2_dev: v4l2 registered device
+ * @video_dev: registered video device
+ *
+ * @vb2_vidq: video buffer 2 video queue
+ * @dma_queue_lock: lock to serialize the dma buffer queue
+ * @dma_queue: the queue for dma buffers
+ * @cur_frm: current isc frame/buffer
+ * @sequence: current frame number
+ * @stop: true if isc is not streaming, false if streaming
+ * @comp: completion reference that signals frame completion
+ *
+ * @fmt: current v42l format
+ * @user_formats: list of formats that are supported and agreed with sd
+ * @num_user_formats: how many formats are in user_formats
+ *
+ * @config: current ISC format configuration
+ * @try_config: the current ISC try format , not yet activated
+ *
+ * @ctrls: holds information about ISC controls
+ * @do_wb_ctrl: control regarding the DO_WHITE_BALANCE button
+ * @awb_work: workqueue reference for autowhitebalance histogram
+ * analysis
+ *
+ * @lock: lock for serializing userspace file operations
+ * with ISC operations
+ * @awb_lock: lock for serializing awb work queue operations
+ * with DMA/buffer operations
+ *
+ * @pipeline: configuration of the ISC pipeline
+ *
+ * @current_subdev: current subdevice: the sensor
+ * @subdev_entities: list of subdevice entitites
+ */
+struct isc_device {
+ struct regmap *regmap;
+ struct clk *hclock;
+ struct clk *ispck;
+ struct isc_clk isc_clks[2];
+
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct video_device video_dev;
+
+ struct vb2_queue vb2_vidq;
+ spinlock_t dma_queue_lock; /* serialize access to dma queue */
+ struct list_head dma_queue;
+ struct isc_buffer *cur_frm;
+ unsigned int sequence;
+ bool stop;
+ struct completion comp;
+
+ struct v4l2_format fmt;
+ struct isc_format **user_formats;
+ unsigned int num_user_formats;
+
+ struct fmt_config config;
+ struct fmt_config try_config;
+
+ struct isc_ctrls ctrls;
+ struct v4l2_ctrl *do_wb_ctrl;
+ struct work_struct awb_work;
+
+ struct mutex lock; /* serialize access to file operations */
+ spinlock_t awb_lock; /* serialize access to DMA buffers from awb work queue */
+
+ struct regmap_field *pipeline[ISC_PIPE_LINE_NODE_NUM];
+
+ struct isc_subdev_entity *current_subdev;
+ struct list_head subdev_entities;
+};
+
+#define GAMMA_MAX 2
+#define GAMMA_ENTRIES 64
+
+#define ATMEL_ISC_NAME "atmel-isc"
+
+extern struct isc_format formats_list[];
+extern const struct isc_format controller_formats[];
+extern const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES];
+extern const struct regmap_config isc_regmap_config;
+extern const struct v4l2_async_notifier_operations isc_async_ops;
+
+irqreturn_t isc_interrupt(int irq, void *dev_id);
+int isc_pipeline_init(struct isc_device *isc);
+int isc_clk_init(struct isc_device *isc);
+void isc_subdev_cleanup(struct isc_device *isc);
+void isc_clk_cleanup(struct isc_device *isc);
+
+#endif
diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
new file mode 100644
index 000000000000..266df14da2d5
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip Image Sensor Controller (ISC) driver
+ *
+ * Copyright (C) 2016-2019 Microchip Technology, Inc.
+ *
+ * Author: Songjun Wu
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ *
+ *
+ * Sensor-->PFE-->WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB-->RLP-->DMA
+ *
+ * ISC video pipeline integrates the following submodules:
+ * PFE: Parallel Front End to sample the camera sensor input stream
+ * WB: Programmable white balance in the Bayer domain
+ * CFA: Color filter array interpolation module
+ * CC: Programmable color correction
+ * GAM: Gamma correction
+ * CSC: Programmable color space conversion
+ * CBC: Contrast and Brightness control
+ * SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling
+ * RLP: This module performs rounding, range limiting
+ * and packing of the incoming data
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "atmel-isc-regs.h"
+#include "atmel-isc.h"
+
+#define ISC_MAX_SUPPORT_WIDTH 2592
+#define ISC_MAX_SUPPORT_HEIGHT 1944
+
+#define ISC_CLK_MAX_DIV 255
+
+static int isc_parse_dt(struct device *dev, struct isc_device *isc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *epn = NULL, *rem;
+ struct isc_subdev_entity *subdev_entity;
+ unsigned int flags;
+ int ret;
+
+ INIT_LIST_HEAD(&isc->subdev_entities);
+
+ while (1) {
+ struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
+
+ epn = of_graph_get_next_endpoint(np, epn);
+ if (!epn)
+ return 0;
+
+ rem = of_graph_get_remote_port_parent(epn);
+ if (!rem) {
+ dev_notice(dev, "Remote device at %pOF not found\n",
+ epn);
+ continue;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
+ &v4l2_epn);
+ if (ret) {
+ of_node_put(rem);
+ ret = -EINVAL;
+ dev_err(dev, "Could not parse the endpoint\n");
+ break;
+ }
+
+ subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity),
+ GFP_KERNEL);
+ if (!subdev_entity) {
+ of_node_put(rem);
+ ret = -ENOMEM;
+ break;
+ }
+
+ /* asd will be freed by the subsystem once it's added to the
+ * notifier list
+ */
+ subdev_entity->asd = kzalloc(sizeof(*subdev_entity->asd),
+ GFP_KERNEL);
+ if (!subdev_entity->asd) {
+ of_node_put(rem);
+ ret = -ENOMEM;
+ break;
+ }
+
+ flags = v4l2_epn.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW;
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
+
+ if (v4l2_epn.bus_type == V4L2_MBUS_BT656)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_CCIR_CRC |
+ ISC_PFE_CFG0_CCIR656;
+
+ subdev_entity->asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ subdev_entity->asd->match.fwnode = of_fwnode_handle(rem);
+ list_add_tail(&subdev_entity->list, &isc->subdev_entities);
+ }
+
+ of_node_put(epn);
+ return ret;
+}
+
+static int atmel_isc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct isc_device *isc;
+ struct resource *res;
+ void __iomem *io_base;
+ struct isc_subdev_entity *subdev_entity;
+ int irq;
+ int ret;
+
+ isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL);
+ if (!isc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, isc);
+ isc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ isc->regmap = devm_regmap_init_mmio(dev, io_base, &isc_regmap_config);
+ if (IS_ERR(isc->regmap)) {
+ ret = PTR_ERR(isc->regmap);
+ dev_err(dev, "failed to init register map: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ dev_err(dev, "failed to get irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, irq, isc_interrupt, 0,
+ ATMEL_ISC_NAME, isc);
+ if (ret < 0) {
+ dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
+ irq, ret);
+ return ret;
+ }
+
+ ret = isc_pipeline_init(isc);
+ if (ret)
+ return ret;
+
+ isc->hclock = devm_clk_get(dev, "hclock");
+ if (IS_ERR(isc->hclock)) {
+ ret = PTR_ERR(isc->hclock);
+ dev_err(dev, "failed to get hclock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret) {
+ dev_err(dev, "failed to enable hclock: %d\n", ret);
+ return ret;
+ }
+
+ ret = isc_clk_init(isc);
+ if (ret) {
+ dev_err(dev, "failed to init isc clock: %d\n", ret);
+ goto unprepare_hclk;
+ }
+
+ isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret) {
+ dev_err(dev, "failed to enable ispck: %d\n", ret);
+ goto unprepare_hclk;
+ }
+
+ /* ispck should be greater or equal to hclock */
+ ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
+ if (ret) {
+ dev_err(dev, "failed to set ispck rate: %d\n", ret);
+ goto unprepare_clk;
+ }
+
+ ret = v4l2_device_register(dev, &isc->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "unable to register v4l2 device.\n");
+ goto unprepare_clk;
+ }
+
+ ret = isc_parse_dt(dev, isc);
+ if (ret) {
+ dev_err(dev, "fail to parse device tree\n");
+ goto unregister_v4l2_device;
+ }
+
+ if (list_empty(&isc->subdev_entities)) {
+ dev_err(dev, "no subdev found\n");
+ ret = -ENODEV;
+ goto unregister_v4l2_device;
+ }
+
+ list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
+ v4l2_async_notifier_init(&subdev_entity->notifier);
+
+ ret = v4l2_async_notifier_add_subdev(&subdev_entity->notifier,
+ subdev_entity->asd);
+ if (ret) {
+ fwnode_handle_put(subdev_entity->asd->match.fwnode);
+ kfree(subdev_entity->asd);
+ goto cleanup_subdev;
+ }
+
+ subdev_entity->notifier.ops = &isc_async_ops;
+
+ ret = v4l2_async_notifier_register(&isc->v4l2_dev,
+ &subdev_entity->notifier);
+ if (ret) {
+ dev_err(dev, "fail to register async notifier\n");
+ goto cleanup_subdev;
+ }
+
+ if (video_is_registered(&isc->video_dev))
+ break;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_request_idle(dev);
+
+ return 0;
+
+cleanup_subdev:
+ isc_subdev_cleanup(isc);
+
+unregister_v4l2_device:
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+unprepare_clk:
+ clk_disable_unprepare(isc->ispck);
+unprepare_hclk:
+ clk_disable_unprepare(isc->hclock);
+
+ isc_clk_cleanup(isc);
+
+ return ret;
+}
+
+static int atmel_isc_remove(struct platform_device *pdev)
+{
+ struct isc_device *isc = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ isc_subdev_cleanup(isc);
+
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
+
+ isc_clk_cleanup(isc);
+
+ return 0;
+}
+
+static int __maybe_unused isc_runtime_suspend(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
+
+ return 0;
+}
+
+static int __maybe_unused isc_runtime_resume(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret)
+ clk_disable_unprepare(isc->hclock);
+
+ return ret;
+}
+
+static const struct dev_pm_ops atmel_isc_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(isc_runtime_suspend, isc_runtime_resume, NULL)
+};
+
+static const struct of_device_id atmel_isc_of_match[] = {
+ { .compatible = "atmel,sama5d2-isc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, atmel_isc_of_match);
+
+static struct platform_driver atmel_isc_driver = {
+ .probe = atmel_isc_probe,
+ .remove = atmel_isc_remove,
+ .driver = {
+ .name = ATMEL_ISC_NAME,
+ .pm = &atmel_isc_dev_pm_ops,
+ .of_match_table = of_match_ptr(atmel_isc_of_match),
+ },
+};
+
+module_platform_driver(atmel_isc_driver);
+
+MODULE_AUTHOR("Songjun Wu");
+MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC");
+MODULE_LICENSE("GPL v2");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/platform/cec-gpio/cec-gpio.c
index d2861749d640..5b17d3a31896 100644
--- a/drivers/media/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/platform/cec-gpio/cec-gpio.c
@@ -17,7 +17,6 @@ struct cec_gpio {
struct gpio_desc *cec_gpio;
int cec_irq;
bool cec_is_low;
- bool cec_have_irq;
struct gpio_desc *hpd_gpio;
int hpd_irq;
@@ -55,9 +54,6 @@ static void cec_gpio_low(struct cec_adapter *adap)
if (cec->cec_is_low)
return;
- if (WARN_ON_ONCE(cec->cec_have_irq))
- free_irq(cec->cec_irq, cec);
- cec->cec_have_irq = false;
cec->cec_is_low = true;
gpiod_set_value(cec->cec_gpio, 0);
}
@@ -114,14 +110,7 @@ static bool cec_gpio_enable_irq(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
- if (cec->cec_have_irq)
- return true;
-
- if (request_irq(cec->cec_irq, cec_gpio_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- adap->name, cec))
- return false;
- cec->cec_have_irq = true;
+ enable_irq(cec->cec_irq);
return true;
}
@@ -129,9 +118,7 @@ static void cec_gpio_disable_irq(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
- if (cec->cec_have_irq)
- free_irq(cec->cec_irq, cec);
- cec->cec_have_irq = false;
+ disable_irq(cec->cec_irq);
}
static void cec_gpio_status(struct cec_adapter *adap, struct seq_file *file)
@@ -139,8 +126,7 @@ static void cec_gpio_status(struct cec_adapter *adap, struct seq_file *file)
struct cec_gpio *cec = cec_get_drvdata(adap);
seq_printf(file, "mode: %s\n", cec->cec_is_low ? "low-drive" : "read");
- if (cec->cec_have_irq)
- seq_printf(file, "using irq: %d\n", cec->cec_irq);
+ seq_printf(file, "using irq: %d\n", cec->cec_irq);
if (cec->hpd_gpio)
seq_printf(file, "hpd: %s\n",
cec->hpd_is_high ? "high" : "low");
@@ -215,6 +201,14 @@ static int cec_gpio_probe(struct platform_device *pdev)
if (IS_ERR(cec->adap))
return PTR_ERR(cec->adap);
+ ret = devm_request_irq(dev, cec->cec_irq, cec_gpio_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ cec->adap->name, cec);
+ if (ret)
+ return ret;
+
+ cec_gpio_disable_irq(cec->adap);
+
if (cec->hpd_gpio) {
cec->hpd_irq = gpiod_to_irq(cec->hpd_gpio);
ret = devm_request_threaded_irq(dev, cec->hpd_irq,
diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/coda/Makefile
index f13adacd924e..bbb16425a875 100644
--- a/drivers/media/platform/coda/Makefile
+++ b/drivers/media/platform/coda/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y += -I$(src)
-coda-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-jpeg.o
+coda-vpu-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-mpeg2.o coda-mpeg4.o coda-jpeg.o
-obj-$(CONFIG_VIDEO_CODA) += coda.o
+obj-$(CONFIG_VIDEO_CODA) += coda-vpu.o
obj-$(CONFIG_VIDEO_IMX_VDOA) += imx-vdoa.o
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 976f6aa69f41..00c7bed3dd57 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -98,6 +98,8 @@ static int coda_command_sync(struct coda_ctx *ctx, int cmd)
struct coda_dev *dev = ctx->dev;
int ret;
+ lockdep_assert_held(&dev->coda_mutex);
+
coda_command_async(ctx, cmd);
ret = coda_wait_timeout(dev);
trace_coda_bit_done(ctx);
@@ -112,6 +114,8 @@ int coda_hw_reset(struct coda_ctx *ctx)
unsigned int idx;
int ret;
+ lockdep_assert_held(&dev->coda_mutex);
+
if (!dev->rstc)
return -ENOENT;
@@ -176,7 +180,7 @@ static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
}
-static int coda_bitstream_pad(struct coda_ctx *ctx, u32 size)
+static int coda_h264_bitstream_pad(struct coda_ctx *ctx, u32 size)
{
unsigned char *buf;
u32 n;
@@ -195,51 +199,122 @@ static int coda_bitstream_pad(struct coda_ctx *ctx, u32 size)
return (n < size) ? -ENOSPC : 0;
}
-static int coda_bitstream_queue(struct coda_ctx *ctx,
- struct vb2_v4l2_buffer *src_buf)
+int coda_bitstream_flush(struct coda_ctx *ctx)
{
- u32 src_size = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
- u32 n;
+ int ret;
- n = kfifo_in(&ctx->bitstream_fifo,
- vb2_plane_vaddr(&src_buf->vb2_buf, 0), src_size);
- if (n < src_size)
- return -ENOSPC;
+ if (ctx->inst_type != CODA_INST_DECODER || !ctx->use_bit)
+ return 0;
- src_buf->sequence = ctx->qsequence++;
+ ret = coda_command_sync(ctx, CODA_COMMAND_DEC_BUF_FLUSH);
+ if (ret < 0) {
+ v4l2_err(&ctx->dev->v4l2_dev, "failed to flush bitstream\n");
+ return ret;
+ }
+
+ kfifo_init(&ctx->bitstream_fifo, ctx->bitstream.vaddr,
+ ctx->bitstream.size);
+ coda_kfifo_sync_to_device_full(ctx);
return 0;
}
+static int coda_bitstream_queue(struct coda_ctx *ctx, const u8 *buf, u32 size)
+{
+ u32 n = kfifo_in(&ctx->bitstream_fifo, buf, size);
+
+ return (n < size) ? -ENOSPC : 0;
+}
+
+static u32 coda_buffer_parse_headers(struct coda_ctx *ctx,
+ struct vb2_v4l2_buffer *src_buf,
+ u32 payload)
+{
+ u8 *vaddr = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ u32 size = 0;
+
+ switch (ctx->codec->src_fourcc) {
+ case V4L2_PIX_FMT_MPEG2:
+ size = coda_mpeg2_parse_headers(ctx, vaddr, payload);
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ size = coda_mpeg4_parse_headers(ctx, vaddr, payload);
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
struct vb2_v4l2_buffer *src_buf)
{
unsigned long payload = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
+ u8 *vaddr = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
int ret;
+ int i;
if (coda_get_bitstream_payload(ctx) + payload + 512 >=
ctx->bitstream.size)
return false;
- if (vb2_plane_vaddr(&src_buf->vb2_buf, 0) == NULL) {
+ if (!vaddr) {
v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
return true;
}
- /* Add zero padding before the first H.264 buffer, if it is too small */
+ if (ctx->qsequence == 0 && payload < 512) {
+ /*
+ * Add padding after the first buffer, if it is too small to be
+ * fetched by the CODA, by repeating the headers. Without
+ * repeated headers, or the first frame already queued, decoder
+ * sequence initialization fails with error code 0x2000 on i.MX6
+ * or error code 0x1 on i.MX51.
+ */
+ u32 header_size = coda_buffer_parse_headers(ctx, src_buf,
+ payload);
+
+ if (header_size) {
+ coda_dbg(1, ctx, "pad with %u-byte header\n",
+ header_size);
+ for (i = payload; i < 512; i += header_size) {
+ ret = coda_bitstream_queue(ctx, vaddr,
+ header_size);
+ if (ret < 0) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "bitstream buffer overflow\n");
+ return false;
+ }
+ if (ctx->dev->devtype->product == CODA_960)
+ break;
+ }
+ } else {
+ coda_dbg(1, ctx,
+ "could not parse header, sequence initialization might fail\n");
+ }
+ }
+
+ /* Add padding before the first buffer, if it is too small */
if (ctx->qsequence == 0 && payload < 512 &&
ctx->codec->src_fourcc == V4L2_PIX_FMT_H264)
- coda_bitstream_pad(ctx, 512 - payload);
+ coda_h264_bitstream_pad(ctx, 512 - payload);
- ret = coda_bitstream_queue(ctx, src_buf);
+ ret = coda_bitstream_queue(ctx, vaddr, payload);
if (ret < 0) {
v4l2_err(&ctx->dev->v4l2_dev, "bitstream buffer overflow\n");
return false;
}
+
+ src_buf->sequence = ctx->qsequence++;
+
/* Sync read pointer to device */
if (ctx == v4l2_m2m_get_curr_priv(ctx->dev->m2m_dev))
coda_kfifo_sync_to_device_write(ctx);
+ /* Set the stream-end flag after the last buffer is queued */
+ if (src_buf->flags & V4L2_BUF_FLAG_LAST)
+ coda_bit_stream_end_flag(ctx);
ctx->hold = false;
return true;
@@ -327,6 +402,9 @@ void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list)
meta->timestamp = src_buf->vb2_buf.timestamp;
meta->start = start;
meta->end = ctx->bitstream_fifo.kfifo.in;
+ meta->last = src_buf->flags & V4L2_BUF_FLAG_LAST;
+ if (meta->last)
+ coda_dbg(1, ctx, "marking last meta");
spin_lock(&ctx->buffer_meta_lock);
list_add_tail(&meta->list,
&ctx->buffer_meta_list);
@@ -391,7 +469,7 @@ static void coda_free_framebuffers(struct coda_ctx *ctx)
int i;
for (i = 0; i < CODA_MAX_FRAMEBUFFERS; i++)
- coda_free_aux_buf(ctx->dev, &ctx->internal_frames[i]);
+ coda_free_aux_buf(ctx->dev, &ctx->internal_frames[i].buf);
}
static int coda_alloc_framebuffers(struct coda_ctx *ctx,
@@ -431,7 +509,7 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
coda_free_framebuffers(ctx);
return -ENOMEM;
}
- ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i],
+ ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i].buf,
size, name);
kfree(name);
if (ret < 0) {
@@ -445,7 +523,7 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
u32 y, cb, cr, mvcol;
/* Start addresses of Y, Cb, Cr planes */
- y = ctx->internal_frames[i].paddr;
+ y = ctx->internal_frames[i].buf.paddr;
cb = y + ysize;
cr = y + ysize + ysize/4;
mvcol = y + ysize + ysize/4 + ysize/4;
@@ -597,6 +675,102 @@ static int coda_encode_header(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
return 0;
}
+static u32 coda_slice_mode(struct coda_ctx *ctx)
+{
+ int size, unit;
+
+ switch (ctx->params.slice_mode) {
+ case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
+ default:
+ return 0;
+ case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB:
+ size = ctx->params.slice_max_mb;
+ unit = 1;
+ break;
+ case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES:
+ size = ctx->params.slice_max_bits;
+ unit = 0;
+ break;
+ }
+
+ return ((size & CODA_SLICING_SIZE_MASK) << CODA_SLICING_SIZE_OFFSET) |
+ ((unit & CODA_SLICING_UNIT_MASK) << CODA_SLICING_UNIT_OFFSET) |
+ ((1 & CODA_SLICING_MODE_MASK) << CODA_SLICING_MODE_OFFSET);
+}
+
+static int coda_enc_param_change(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ u32 change_enable = 0;
+ u32 success;
+ int ret;
+
+ if (ctx->params.gop_size_changed) {
+ change_enable |= CODA_PARAM_CHANGE_RC_GOP;
+ coda_write(dev, ctx->params.gop_size,
+ CODA_CMD_ENC_PARAM_RC_GOP);
+ ctx->gopcounter = ctx->params.gop_size - 1;
+ ctx->params.gop_size_changed = false;
+ }
+ if (ctx->params.h264_intra_qp_changed) {
+ coda_dbg(1, ctx, "parameter change: intra Qp %u\n",
+ ctx->params.h264_intra_qp);
+
+ if (ctx->params.bitrate) {
+ change_enable |= CODA_PARAM_CHANGE_RC_INTRA_QP;
+ coda_write(dev, ctx->params.h264_intra_qp,
+ CODA_CMD_ENC_PARAM_RC_INTRA_QP);
+ }
+ ctx->params.h264_intra_qp_changed = false;
+ }
+ if (ctx->params.bitrate_changed) {
+ coda_dbg(1, ctx, "parameter change: bitrate %u kbit/s\n",
+ ctx->params.bitrate);
+ change_enable |= CODA_PARAM_CHANGE_RC_BITRATE;
+ coda_write(dev, ctx->params.bitrate,
+ CODA_CMD_ENC_PARAM_RC_BITRATE);
+ ctx->params.bitrate_changed = false;
+ }
+ if (ctx->params.framerate_changed) {
+ coda_dbg(1, ctx, "parameter change: frame rate %u/%u Hz\n",
+ ctx->params.framerate & 0xffff,
+ (ctx->params.framerate >> 16) + 1);
+ change_enable |= CODA_PARAM_CHANGE_RC_FRAME_RATE;
+ coda_write(dev, ctx->params.framerate,
+ CODA_CMD_ENC_PARAM_RC_FRAME_RATE);
+ ctx->params.framerate_changed = false;
+ }
+ if (ctx->params.intra_refresh_changed) {
+ coda_dbg(1, ctx, "parameter change: intra refresh MBs %u\n",
+ ctx->params.intra_refresh);
+ change_enable |= CODA_PARAM_CHANGE_INTRA_MB_NUM;
+ coda_write(dev, ctx->params.intra_refresh,
+ CODA_CMD_ENC_PARAM_INTRA_MB_NUM);
+ ctx->params.intra_refresh_changed = false;
+ }
+ if (ctx->params.slice_mode_changed) {
+ change_enable |= CODA_PARAM_CHANGE_SLICE_MODE;
+ coda_write(dev, coda_slice_mode(ctx),
+ CODA_CMD_ENC_PARAM_SLICE_MODE);
+ ctx->params.slice_mode_changed = false;
+ }
+
+ if (!change_enable)
+ return 0;
+
+ coda_write(dev, change_enable, CODA_CMD_ENC_PARAM_CHANGE_ENABLE);
+
+ ret = coda_command_sync(ctx, CODA_COMMAND_RC_CHANGE_PARAMETER);
+ if (ret < 0)
+ return ret;
+
+ success = coda_read(dev, CODA_RET_ENC_PARAM_CHANGE_SUCCESS);
+ if (success != 1)
+ coda_dbg(1, ctx, "parameter change failed: %u\n", success);
+
+ return 0;
+}
+
static phys_addr_t coda_iram_alloc(struct coda_iram_info *iram, size_t size)
{
phys_addr_t ret;
@@ -1035,33 +1209,16 @@ static int coda_start_encoding(struct coda_ctx *ctx)
* in JPEG mode
*/
if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
- switch (ctx->params.slice_mode) {
- case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
- value = 0;
- break;
- case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB:
- value = (ctx->params.slice_max_mb &
- CODA_SLICING_SIZE_MASK)
- << CODA_SLICING_SIZE_OFFSET;
- value |= (1 & CODA_SLICING_UNIT_MASK)
- << CODA_SLICING_UNIT_OFFSET;
- value |= 1 & CODA_SLICING_MODE_MASK;
- break;
- case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES:
- value = (ctx->params.slice_max_bits &
- CODA_SLICING_SIZE_MASK)
- << CODA_SLICING_SIZE_OFFSET;
- value |= (0 & CODA_SLICING_UNIT_MASK)
- << CODA_SLICING_UNIT_OFFSET;
- value |= 1 & CODA_SLICING_MODE_MASK;
- break;
- }
+ value = coda_slice_mode(ctx);
coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
value = ctx->params.gop_size;
coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
}
if (ctx->params.bitrate) {
+ ctx->params.bitrate_changed = false;
+ ctx->params.h264_intra_qp_changed = false;
+
/* Rate control enabled */
value = (ctx->params.bitrate & CODA_RATECONTROL_BITRATE_MASK)
<< CODA_RATECONTROL_BITRATE_OFFSET;
@@ -1198,9 +1355,9 @@ static int coda_start_encoding(struct coda_ctx *ctx)
coda9_set_frame_cache(ctx, q_data_src->fourcc);
/* FIXME */
- coda_write(dev, ctx->internal_frames[2].paddr,
+ coda_write(dev, ctx->internal_frames[2].buf.paddr,
CODA9_CMD_SET_FRAME_SUBSAMP_A);
- coda_write(dev, ctx->internal_frames[3].paddr,
+ coda_write(dev, ctx->internal_frames[3].buf.paddr,
CODA9_CMD_SET_FRAME_SUBSAMP_B);
}
}
@@ -1316,6 +1473,13 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
u32 rot_mode = 0;
u32 dst_fourcc;
u32 reg;
+ int ret;
+
+ ret = coda_enc_param_change(ctx);
+ if (ret < 0) {
+ v4l2_warn(&ctx->dev->v4l2_dev, "parameter change failed: %d\n",
+ ret);
+ }
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -1452,12 +1616,25 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
return 0;
}
+static char coda_frame_type_char(u32 flags)
+{
+ return (flags & V4L2_BUF_FLAG_KEYFRAME) ? 'I' :
+ (flags & V4L2_BUF_FLAG_PFRAME) ? 'P' :
+ (flags & V4L2_BUF_FLAG_BFRAME) ? 'B' : '?';
+}
+
static void coda_finish_encode(struct coda_ctx *ctx)
{
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct coda_dev *dev = ctx->dev;
u32 wr_ptr, start_ptr;
+ /*
+ * Lock to make sure that an encoder stop command running in parallel
+ * will either already have marked src_buf as last, or it will wake up
+ * the capture queue after the buffers are returned.
+ */
+ mutex_lock(&ctx->wakeup_mutex);
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -1483,33 +1660,30 @@ static void coda_finish_encode(struct coda_ctx *ctx)
coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
coda_read(dev, CODA_RET_ENC_PIC_FLAG);
- if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) {
+ dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_LAST);
+ if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0)
dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
- dst_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
- } else {
+ else
dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
- dst_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
- }
+ dst_buf->flags |= src_buf->flags & V4L2_BUF_FLAG_LAST;
- dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
- dst_buf->field = src_buf->field;
- dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_buf->flags |=
- src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_buf->timecode = src_buf->timecode;
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_DONE);
+ mutex_unlock(&ctx->wakeup_mutex);
ctx->gopcounter--;
if (ctx->gopcounter < 0)
ctx->gopcounter = ctx->params.gop_size - 1;
- coda_dbg(1, ctx, "job finished: encoded %c frame (%d)\n",
- (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ? 'I' : 'P',
- dst_buf->sequence);
+ coda_dbg(1, ctx, "job finished: encoded %c frame (%d)%s\n",
+ coda_frame_type_char(dst_buf->flags), dst_buf->sequence,
+ (dst_buf->flags & V4L2_BUF_FLAG_LAST) ? " (last)" : "");
}
static void coda_seq_end_work(struct work_struct *work)
@@ -1579,8 +1753,7 @@ static int coda_alloc_bitstream_buffer(struct coda_ctx *ctx,
return 0;
ctx->bitstream.size = roundup_pow_of_two(q_data->sizeimage * 2);
- ctx->bitstream.vaddr = dma_alloc_wc(&ctx->dev->plat_dev->dev,
- ctx->bitstream.size,
+ ctx->bitstream.vaddr = dma_alloc_wc(ctx->dev->dev, ctx->bitstream.size,
&ctx->bitstream.paddr, GFP_KERNEL);
if (!ctx->bitstream.vaddr) {
v4l2_err(&ctx->dev->v4l2_dev,
@@ -1598,8 +1771,8 @@ static void coda_free_bitstream_buffer(struct coda_ctx *ctx)
if (ctx->bitstream.vaddr == NULL)
return;
- dma_free_wc(&ctx->dev->plat_dev->dev, ctx->bitstream.size,
- ctx->bitstream.vaddr, ctx->bitstream.paddr);
+ dma_free_wc(ctx->dev->dev, ctx->bitstream.size, ctx->bitstream.vaddr,
+ ctx->bitstream.paddr);
ctx->bitstream.vaddr = NULL;
kfifo_init(&ctx->bitstream_fifo, NULL, 0);
}
@@ -1656,7 +1829,7 @@ static bool coda_reorder_enable(struct coda_ctx *ctx)
return profile > V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
}
-static int __coda_start_decoding(struct coda_ctx *ctx)
+static int __coda_decoder_seq_init(struct coda_ctx *ctx)
{
struct coda_q_data *q_data_src, *q_data_dst;
u32 bitstream_buf, bitstream_size;
@@ -1666,6 +1839,8 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
u32 val;
int ret;
+ lockdep_assert_held(&dev->coda_mutex);
+
coda_dbg(1, ctx, "Video Data Order Adapter: %s\n",
ctx->use_vdoa ? "Enabled" : "Disabled");
@@ -1677,8 +1852,6 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
src_fourcc = q_data_src->fourcc;
dst_fourcc = q_data_dst->fourcc;
- coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
-
/* Update coda bitstream read and write pointers from kfifo */
coda_kfifo_sync_to_device_full(ctx);
@@ -1739,6 +1912,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
return ret;
}
+ ctx->sequence_offset = ~0U;
ctx->initialized = 1;
/* Update kfifo out pointer from coda bitstream read pointer */
@@ -1804,6 +1978,64 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
(top_bottom & 0x3ff);
}
+ if (dev->devtype->product != CODA_DX6) {
+ u8 profile, level;
+
+ val = coda_read(dev, CODA7_RET_DEC_SEQ_HEADER_REPORT);
+ profile = val & 0xff;
+ level = (val >> 8) & 0x7f;
+
+ if (profile || level)
+ coda_update_profile_level_ctrls(ctx, profile, level);
+ }
+
+ return 0;
+}
+
+static void coda_dec_seq_init_work(struct work_struct *work)
+{
+ struct coda_ctx *ctx = container_of(work,
+ struct coda_ctx, seq_init_work);
+ struct coda_dev *dev = ctx->dev;
+ int ret;
+
+ mutex_lock(&ctx->buffer_mutex);
+ mutex_lock(&dev->coda_mutex);
+
+ if (ctx->initialized == 1)
+ goto out;
+
+ ret = __coda_decoder_seq_init(ctx);
+ if (ret < 0)
+ goto out;
+
+ ctx->initialized = 1;
+
+out:
+ mutex_unlock(&dev->coda_mutex);
+ mutex_unlock(&ctx->buffer_mutex);
+}
+
+static int __coda_start_decoding(struct coda_ctx *ctx)
+{
+ struct coda_q_data *q_data_src, *q_data_dst;
+ struct coda_dev *dev = ctx->dev;
+ u32 src_fourcc, dst_fourcc;
+ int ret;
+
+ if (!ctx->initialized) {
+ ret = __coda_decoder_seq_init(ctx);
+ if (ret < 0)
+ return ret;
+ }
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ src_fourcc = q_data_src->fourcc;
+ dst_fourcc = q_data_dst->fourcc;
+
+ coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
+
ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "failed to allocate framebuffers\n");
@@ -1812,7 +2044,8 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
/* Tell the decoder how many frame buffers we allocated. */
coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
- coda_write(dev, width, CODA_CMD_SET_FRAME_BUF_STRIDE);
+ coda_write(dev, round_up(q_data_dst->rect.width, 16),
+ CODA_CMD_SET_FRAME_BUF_STRIDE);
if (dev->devtype->product != CODA_DX6) {
/* Set secondary AXI IRAM */
@@ -1928,7 +2161,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
ctx->display_idx < ctx->num_internal_frames) {
vdoa_device_run(ctx->vdoa,
vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0),
- ctx->internal_frames[ctx->display_idx].paddr);
+ ctx->internal_frames[ctx->display_idx].buf.paddr);
} else {
if (dev->devtype->product == CODA_960) {
/*
@@ -2026,6 +2259,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
int width, height;
int decoded_idx;
int display_idx;
+ struct coda_internal_frame *decoded_frame = NULL;
u32 src_fourcc;
int success;
u32 err_mb;
@@ -2146,12 +2380,19 @@ static void coda_finish_decode(struct coda_ctx *ctx)
else if (ctx->display_idx < 0)
ctx->hold = true;
} else if (decoded_idx == -2) {
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames)
+ ctx->sequence_offset++;
/* no frame was decoded, we still return remaining buffers */
} else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
v4l2_err(&dev->v4l2_dev,
"decoded frame index out of range: %d\n", decoded_idx);
} else {
- val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
+ decoded_frame = &ctx->internal_frames[decoded_idx];
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM);
+ if (ctx->sequence_offset == -1)
+ ctx->sequence_offset = val;
val -= ctx->sequence_offset;
spin_lock(&ctx->buffer_meta_lock);
if (!list_empty(&ctx->buffer_meta_list)) {
@@ -2173,28 +2414,26 @@ static void coda_finish_decode(struct coda_ctx *ctx)
val, ctx->sequence_offset,
meta->sequence);
}
- ctx->frame_metas[decoded_idx] = *meta;
+ decoded_frame->meta = *meta;
kfree(meta);
} else {
spin_unlock(&ctx->buffer_meta_lock);
v4l2_err(&dev->v4l2_dev, "empty timestamp list!\n");
- memset(&ctx->frame_metas[decoded_idx], 0,
+ memset(&decoded_frame->meta, 0,
sizeof(struct coda_buffer_meta));
- ctx->frame_metas[decoded_idx].sequence = val;
+ decoded_frame->meta.sequence = val;
+ decoded_frame->meta.last = false;
ctx->sequence_offset++;
}
- trace_coda_dec_pic_done(ctx, &ctx->frame_metas[decoded_idx]);
+ trace_coda_dec_pic_done(ctx, &decoded_frame->meta);
val = coda_read(dev, CODA_RET_DEC_PIC_TYPE) & 0x7;
- if (val == 0)
- ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_KEYFRAME;
- else if (val == 1)
- ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_PFRAME;
- else
- ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_BFRAME;
+ decoded_frame->type = (val == 0) ? V4L2_BUF_FLAG_KEYFRAME :
+ (val == 1) ? V4L2_BUF_FLAG_PFRAME :
+ V4L2_BUF_FLAG_BFRAME;
- ctx->frame_errors[decoded_idx] = err_mb;
+ decoded_frame->error = err_mb;
}
if (display_idx == -1) {
@@ -2214,6 +2453,10 @@ static void coda_finish_decode(struct coda_ctx *ctx)
/* If a frame was copied out, return it */
if (ctx->display_idx >= 0 &&
ctx->display_idx < ctx->num_internal_frames) {
+ struct coda_internal_frame *ready_frame;
+
+ ready_frame = &ctx->internal_frames[ctx->display_idx];
+
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
dst_buf->sequence = ctx->osequence++;
@@ -2221,8 +2464,25 @@ static void coda_finish_decode(struct coda_ctx *ctx)
dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
V4L2_BUF_FLAG_PFRAME |
V4L2_BUF_FLAG_BFRAME);
- dst_buf->flags |= ctx->frame_types[ctx->display_idx];
- meta = &ctx->frame_metas[ctx->display_idx];
+ dst_buf->flags |= ready_frame->type;
+ meta = &ready_frame->meta;
+ if (meta->last && !coda_reorder_enable(ctx)) {
+ /*
+ * If this was the last decoded frame, and reordering
+ * is disabled, this will be the last display frame.
+ */
+ coda_dbg(1, ctx, "last meta, marking as last frame\n");
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ } else if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG &&
+ display_idx == -1) {
+ /*
+ * If there is no designated presentation frame anymore,
+ * this frame has to be the last one.
+ */
+ coda_dbg(1, ctx,
+ "no more frames to return, marking as last frame\n");
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ }
dst_buf->timecode = meta->timecode;
dst_buf->vb2_buf.timestamp = meta->timestamp;
@@ -2231,18 +2491,39 @@ static void coda_finish_decode(struct coda_ctx *ctx)
vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
q_data_dst->sizeimage);
- if (ctx->frame_errors[ctx->display_idx] || err_vdoa)
+ if (ready_frame->error || err_vdoa)
coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR);
else
coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_DONE);
- coda_dbg(1, ctx, "job finished: decoded %c frame (%u/%u)\n",
- (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ? 'I' :
- ((dst_buf->flags & V4L2_BUF_FLAG_PFRAME) ? 'P' : 'B'),
- dst_buf->sequence, ctx->qsequence);
+ if (decoded_frame) {
+ coda_dbg(1, ctx, "job finished: decoded %c frame %u, returned %c frame %u (%u/%u)%s\n",
+ coda_frame_type_char(decoded_frame->type),
+ decoded_frame->meta.sequence,
+ coda_frame_type_char(dst_buf->flags),
+ ready_frame->meta.sequence,
+ dst_buf->sequence, ctx->qsequence,
+ (dst_buf->flags & V4L2_BUF_FLAG_LAST) ?
+ " (last)" : "");
+ } else {
+ coda_dbg(1, ctx, "job finished: no frame decoded (%d), returned %c frame %u (%u/%u)%s\n",
+ decoded_idx,
+ coda_frame_type_char(dst_buf->flags),
+ ready_frame->meta.sequence,
+ dst_buf->sequence, ctx->qsequence,
+ (dst_buf->flags & V4L2_BUF_FLAG_LAST) ?
+ " (last)" : "");
+ }
} else {
- coda_dbg(1, ctx, "job finished: no frame decoded (%u/%u)\n",
- ctx->osequence, ctx->qsequence);
+ if (decoded_frame) {
+ coda_dbg(1, ctx, "job finished: decoded %c frame %u, no frame returned (%d)\n",
+ coda_frame_type_char(decoded_frame->type),
+ decoded_frame->meta.sequence,
+ ctx->display_idx);
+ } else {
+ coda_dbg(1, ctx, "job finished: no frame decoded (%d) or returned (%d)\n",
+ decoded_idx, ctx->display_idx);
+ }
}
/* The rotator will copy the current display frame next time */
@@ -2286,6 +2567,7 @@ const struct coda_context_ops coda_bit_decode_ops = {
.prepare_run = coda_prepare_decode,
.finish_run = coda_finish_decode,
.run_timeout = coda_decode_timeout,
+ .seq_init_work = coda_dec_seq_init_work,
.seq_end_work = coda_seq_end_work,
.release = coda_bit_release,
};
@@ -2297,6 +2579,7 @@ irqreturn_t coda_irq_handler(int irq, void *data)
/* read status register to attend the IRQ */
coda_read(dev, CODA_REG_BIT_INT_STATUS);
+ coda_write(dev, 0, CODA_REG_BIT_INT_REASON);
coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
CODA_REG_BIT_INT_CLEAR);
@@ -2304,7 +2587,6 @@ irqreturn_t coda_irq_handler(int irq, void *data)
if (ctx == NULL) {
v4l2_err(&dev->v4l2_dev,
"Instance released before the end of transaction\n");
- mutex_unlock(&dev->coda_mutex);
return IRQ_HANDLED;
}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 6238047273f2..01428de2596e 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(enable_bwb, "Enable BWB unit for decoding, may crash on certain
void coda_write(struct coda_dev *dev, u32 data, u32 reg)
{
- v4l2_dbg(2, coda_debug, &dev->v4l2_dev,
+ v4l2_dbg(3, coda_debug, &dev->v4l2_dev,
"%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
writel(data, dev->regs_base + reg);
}
@@ -84,7 +84,7 @@ unsigned int coda_read(struct coda_dev *dev, u32 reg)
u32 data;
data = readl(dev->regs_base + reg);
- v4l2_dbg(2, coda_debug, &dev->v4l2_dev,
+ v4l2_dbg(3, coda_debug, &dev->v4l2_dev,
"%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
return data;
}
@@ -879,14 +879,25 @@ static int coda_qbuf(struct file *file, void *priv,
{
struct coda_ctx *ctx = fh_to_ctx(priv);
+ if (ctx->inst_type == CODA_INST_DECODER &&
+ buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ buf->flags &= ~V4L2_BUF_FLAG_LAST;
+
return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf);
}
-static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
- struct vb2_v4l2_buffer *buf)
+static int coda_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) &&
- (buf->sequence == (ctx->qsequence - 1)));
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ ret = v4l2_m2m_dqbuf(file, ctx->fh.m2m_ctx, buf);
+
+ if (ctx->inst_type == CODA_INST_DECODER &&
+ buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ buf->flags &= ~V4L2_BUF_FLAG_LAST;
+
+ return ret;
}
void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
@@ -896,11 +907,8 @@ void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
.type = V4L2_EVENT_EOS
};
- if (coda_buf_is_end_of_stream(ctx, buf)) {
- buf->flags |= V4L2_BUF_FLAG_LAST;
-
+ if (buf->flags & V4L2_BUF_FLAG_LAST)
v4l2_event_queue_fh(&ctx->fh, &eos_event);
- }
v4l2_m2m_buf_done(buf, state);
}
@@ -1001,36 +1009,52 @@ static int coda_try_encoder_cmd(struct file *file, void *fh,
if (ctx->inst_type != CODA_INST_ENCODER)
return -ENOTTY;
- if (ec->cmd != V4L2_ENC_CMD_STOP)
- return -EINVAL;
+ return v4l2_m2m_ioctl_try_encoder_cmd(file, fh, ec);
+}
- if (ec->flags & V4L2_ENC_CMD_STOP_AT_GOP_END)
- return -EINVAL;
+static void coda_wake_up_capture_queue(struct coda_ctx *ctx)
+{
+ struct vb2_queue *dst_vq;
- return 0;
+ coda_dbg(1, ctx, "waking up capture queue\n");
+
+ dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_vq->last_buffer_dequeued = true;
+ wake_up(&dst_vq->done_wq);
}
static int coda_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
- struct vb2_queue *dst_vq;
+ struct vb2_v4l2_buffer *buf;
int ret;
ret = coda_try_encoder_cmd(file, fh, ec);
if (ret < 0)
return ret;
- /* Set the stream-end flag on this context */
- ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+ mutex_lock(&ctx->wakeup_mutex);
+ buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx);
+ if (buf) {
+ /*
+ * If the last output buffer is still on the queue, make sure
+ * that decoder finish_run will see the last flag and report it
+ * to userspace.
+ */
+ buf->flags |= V4L2_BUF_FLAG_LAST;
+ } else {
+ /* Set the stream-end flag on this context */
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
- /* If there is no buffer in flight, wake up */
- if (!ctx->streamon_out || ctx->qsequence == ctx->osequence) {
- dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
- V4L2_BUF_TYPE_VIDEO_CAPTURE);
- dst_vq->last_buffer_dequeued = true;
- wake_up(&dst_vq->done_wq);
+ /*
+ * If the last output buffer has already been taken from the
+ * queue, wake up the capture queue and signal end of stream
+ * via the -EPIPE mechanism.
+ */
+ coda_wake_up_capture_queue(ctx);
}
+ mutex_unlock(&ctx->wakeup_mutex);
return 0;
}
@@ -1043,32 +1067,89 @@ static int coda_try_decoder_cmd(struct file *file, void *fh,
if (ctx->inst_type != CODA_INST_DECODER)
return -ENOTTY;
- if (dc->cmd != V4L2_DEC_CMD_STOP)
- return -EINVAL;
-
- if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
- return -EINVAL;
-
- if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
- return -EINVAL;
-
- return 0;
+ return v4l2_m2m_ioctl_try_decoder_cmd(file, fh, dc);
}
static int coda_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *buf;
+ struct vb2_queue *dst_vq;
+ bool stream_end;
+ bool wakeup;
int ret;
ret = coda_try_decoder_cmd(file, fh, dc);
if (ret < 0)
return ret;
- /* Set the stream-end flag on this context */
- coda_bit_stream_end_flag(ctx);
- ctx->hold = false;
- v4l2_m2m_try_schedule(ctx->fh.m2m_ctx);
+ switch (dc->cmd) {
+ case V4L2_DEC_CMD_START:
+ mutex_lock(&ctx->bitstream_mutex);
+ mutex_lock(&dev->coda_mutex);
+ coda_bitstream_flush(ctx);
+ mutex_unlock(&dev->coda_mutex);
+ dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG;
+ coda_fill_bitstream(ctx, NULL);
+ mutex_unlock(&ctx->bitstream_mutex);
+ break;
+ case V4L2_DEC_CMD_STOP:
+ stream_end = false;
+ wakeup = false;
+
+ buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx);
+ if (buf) {
+ coda_dbg(1, ctx, "marking last pending buffer\n");
+
+ /* Mark last buffer */
+ buf->flags |= V4L2_BUF_FLAG_LAST;
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) == 0) {
+ coda_dbg(1, ctx, "all remaining buffers queued\n");
+ stream_end = true;
+ }
+ } else {
+ coda_dbg(1, ctx, "marking last meta\n");
+
+ /* Mark last meta */
+ spin_lock(&ctx->buffer_meta_lock);
+ if (!list_empty(&ctx->buffer_meta_list)) {
+ struct coda_buffer_meta *meta;
+
+ meta = list_last_entry(&ctx->buffer_meta_list,
+ struct coda_buffer_meta,
+ list);
+ meta->last = true;
+ stream_end = true;
+ } else {
+ wakeup = true;
+ }
+ spin_unlock(&ctx->buffer_meta_lock);
+ }
+
+ if (stream_end) {
+ coda_dbg(1, ctx, "all remaining buffers queued\n");
+
+ /* Set the stream-end flag on this context */
+ coda_bit_stream_end_flag(ctx);
+ ctx->hold = false;
+ v4l2_m2m_try_schedule(ctx->fh.m2m_ctx);
+ }
+
+ if (wakeup) {
+ /* If there is no buffer in flight, wake up */
+ coda_wake_up_capture_queue(ctx);
+ }
+
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -1236,6 +1317,7 @@ static int coda_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
tpf = &a->parm.output.timeperframe;
coda_approximate_timeperframe(tpf);
ctx->params.framerate = coda_timeperframe_to_frate(tpf);
+ ctx->params.framerate_changed = true;
return 0;
}
@@ -1243,9 +1325,16 @@ static int coda_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
static int coda_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+
switch (sub->type) {
case V4L2_EVENT_EOS:
return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ if (ctx->inst_type == CODA_INST_DECODER)
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ else
+ return -EINVAL;
default:
return v4l2_ctrl_subscribe_event(fh, sub);
}
@@ -1269,7 +1358,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
.vidioc_qbuf = coda_qbuf,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
- .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_dqbuf = coda_dqbuf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
@@ -1325,7 +1414,7 @@ static void coda_pic_run_work(struct work_struct *work)
if (!wait_for_completion_timeout(&ctx->completion,
msecs_to_jiffies(1000))) {
- dev_err(&dev->plat_dev->dev, "CODA PIC_RUN timeout\n");
+ dev_err(dev->dev, "CODA PIC_RUN timeout\n");
ctx->hold = true;
@@ -1412,7 +1501,7 @@ static int coda_job_ready(void *m2m_priv)
return 0;
}
- coda_dbg(1, ctx, "job ready\n");
+ coda_dbg(2, ctx, "job ready\n");
return 1;
}
@@ -1563,42 +1652,81 @@ static void coda_update_menu_ctrl(struct v4l2_ctrl *ctrl, int value)
v4l2_ctrl_unlock(ctrl);
}
-static void coda_update_h264_profile_ctrl(struct coda_ctx *ctx)
+void coda_update_profile_level_ctrls(struct coda_ctx *ctx, u8 profile_idc,
+ u8 level_idc)
{
const char * const *profile_names;
+ const char * const *level_names;
+ struct v4l2_ctrl *profile_ctrl;
+ struct v4l2_ctrl *level_ctrl;
+ const char *codec_name;
+ u32 profile_cid;
+ u32 level_cid;
int profile;
+ int level;
- profile = coda_h264_profile(ctx->params.h264_profile_idc);
- if (profile < 0) {
- v4l2_warn(&ctx->dev->v4l2_dev, "Invalid H264 Profile: %u\n",
- ctx->params.h264_profile_idc);
+ switch (ctx->codec->src_fourcc) {
+ case V4L2_PIX_FMT_H264:
+ codec_name = "H264";
+ profile_cid = V4L2_CID_MPEG_VIDEO_H264_PROFILE;
+ level_cid = V4L2_CID_MPEG_VIDEO_H264_LEVEL;
+ profile_ctrl = ctx->h264_profile_ctrl;
+ level_ctrl = ctx->h264_level_ctrl;
+ profile = coda_h264_profile(profile_idc);
+ level = coda_h264_level(level_idc);
+ break;
+ case V4L2_PIX_FMT_MPEG2:
+ codec_name = "MPEG-2";
+ profile_cid = V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE;
+ level_cid = V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL;
+ profile_ctrl = ctx->mpeg2_profile_ctrl;
+ level_ctrl = ctx->mpeg2_level_ctrl;
+ profile = coda_mpeg2_profile(profile_idc);
+ level = coda_mpeg2_level(level_idc);
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ codec_name = "MPEG-4";
+ profile_cid = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE;
+ level_cid = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL;
+ profile_ctrl = ctx->mpeg4_profile_ctrl;
+ level_ctrl = ctx->mpeg4_level_ctrl;
+ profile = coda_mpeg4_profile(profile_idc);
+ level = coda_mpeg4_level(level_idc);
+ break;
+ default:
return;
}
- coda_update_menu_ctrl(ctx->h264_profile_ctrl, profile);
-
- profile_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
-
- coda_dbg(1, ctx, "Parsed H264 Profile: %s\n", profile_names[profile]);
-}
+ profile_names = v4l2_ctrl_get_menu(profile_cid);
+ level_names = v4l2_ctrl_get_menu(level_cid);
-static void coda_update_h264_level_ctrl(struct coda_ctx *ctx)
-{
- const char * const *level_names;
- int level;
+ if (profile < 0) {
+ v4l2_warn(&ctx->dev->v4l2_dev, "Invalid %s profile: %u\n",
+ codec_name, profile_idc);
+ } else {
+ coda_dbg(1, ctx, "Parsed %s profile: %s\n", codec_name,
+ profile_names[profile]);
+ coda_update_menu_ctrl(profile_ctrl, profile);
+ }
- level = coda_h264_level(ctx->params.h264_level_idc);
if (level < 0) {
- v4l2_warn(&ctx->dev->v4l2_dev, "Invalid H264 Level: %u\n",
- ctx->params.h264_level_idc);
- return;
+ v4l2_warn(&ctx->dev->v4l2_dev, "Invalid %s level: %u\n",
+ codec_name, level_idc);
+ } else {
+ coda_dbg(1, ctx, "Parsed %s level: %s\n", codec_name,
+ level_names[level]);
+ coda_update_menu_ctrl(level_ctrl, level);
}
+}
- coda_update_menu_ctrl(ctx->h264_level_ctrl, level);
-
- level_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
+static void coda_queue_source_change_event(struct coda_ctx *ctx)
+{
+ static const struct v4l2_event source_change_event = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
- coda_dbg(1, ctx, "Parsed H264 Level: %s\n", level_names[level]);
+ v4l2_event_queue_fh(&ctx->fh, &source_change_event);
}
static void coda_buf_queue(struct vb2_buffer *vb)
@@ -1631,8 +1759,9 @@ static void coda_buf_queue(struct vb2_buffer *vb)
*/
if (!ctx->params.h264_profile_idc) {
coda_sps_parse_profile(ctx, vb);
- coda_update_h264_profile_ctrl(ctx);
- coda_update_h264_level_ctrl(ctx);
+ coda_update_profile_level_ctrls(ctx,
+ ctx->params.h264_profile_idc,
+ ctx->params.h264_level_idc);
}
}
@@ -1642,6 +1771,22 @@ static void coda_buf_queue(struct vb2_buffer *vb)
/* This set buf->sequence = ctx->qsequence++ */
coda_fill_bitstream(ctx, NULL);
mutex_unlock(&ctx->bitstream_mutex);
+
+ if (!ctx->initialized) {
+ /*
+ * Run sequence initialization in case the queued
+ * buffer contained headers.
+ */
+ if (vb2_is_streaming(vb->vb2_queue) &&
+ ctx->ops->seq_init_work) {
+ queue_work(ctx->dev->workqueue,
+ &ctx->seq_init_work);
+ flush_work(&ctx->seq_init_work);
+ }
+
+ if (ctx->initialized)
+ coda_queue_source_change_event(ctx);
+ }
} else {
if (ctx->inst_type == CODA_INST_ENCODER &&
vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -1653,7 +1798,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
size_t size, const char *name, struct dentry *parent)
{
- buf->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size, &buf->paddr,
+ buf->vaddr = dma_alloc_coherent(dev->dev, size, &buf->paddr,
GFP_KERNEL);
if (!buf->vaddr) {
v4l2_err(&dev->v4l2_dev,
@@ -1670,7 +1815,7 @@ int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
buf->dentry = debugfs_create_blob(name, 0644, parent,
&buf->blob);
if (!buf->dentry)
- dev_warn(&dev->plat_dev->dev,
+ dev_warn(dev->dev,
"failed to create debugfs entry %s\n", name);
}
@@ -1681,8 +1826,7 @@ void coda_free_aux_buf(struct coda_dev *dev,
struct coda_aux_buf *buf)
{
if (buf->vaddr) {
- dma_free_coherent(&dev->plat_dev->dev, buf->size,
- buf->vaddr, buf->paddr);
+ dma_free_coherent(dev->dev, buf->size, buf->vaddr, buf->paddr);
buf->vaddr = NULL;
buf->size = 0;
debugfs_remove(buf->dentry);
@@ -1715,10 +1859,21 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
coda_fill_bitstream(ctx, &list);
mutex_unlock(&ctx->bitstream_mutex);
- if (coda_get_bitstream_payload(ctx) < 512) {
+ if (ctx->dev->devtype->product != CODA_960 &&
+ coda_get_bitstream_payload(ctx) < 512) {
+ v4l2_err(v4l2_dev, "start payload < 512\n");
ret = -EINVAL;
goto err;
}
+
+ if (!ctx->initialized) {
+ /* Run sequence initialization */
+ if (ctx->ops->seq_init_work) {
+ queue_work(ctx->dev->workqueue,
+ &ctx->seq_init_work);
+ flush_work(&ctx->seq_init_work);
+ }
+ }
}
ctx->streamon_out = 1;
@@ -1853,11 +2008,16 @@ static const struct vb2_ops coda_qops = {
static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ const char * const *val_names = v4l2_ctrl_get_menu(ctrl->id);
struct coda_ctx *ctx =
container_of(ctrl->handler, struct coda_ctx, ctrls);
- coda_dbg(1, ctx, "s_ctrl: id = 0x%x, name = \"%s\", val = %d\n",
- ctrl->id, ctrl->name, ctrl->val);
+ if (val_names)
+ coda_dbg(2, ctx, "s_ctrl: id = 0x%x, name = \"%s\", val = %d (\"%s\")\n",
+ ctrl->id, ctrl->name, ctrl->val, val_names[ctrl->val]);
+ else
+ coda_dbg(2, ctx, "s_ctrl: id = 0x%x, name = \"%s\", val = %d\n",
+ ctrl->id, ctrl->name, ctrl->val);
switch (ctrl->id) {
case V4L2_CID_HFLIP:
@@ -1874,12 +2034,14 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDEO_BITRATE:
ctx->params.bitrate = ctrl->val / 1000;
+ ctx->params.bitrate_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
ctx->params.gop_size = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
ctx->params.h264_intra_qp = ctrl->val;
+ ctx->params.h264_intra_qp_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
ctx->params.h264_inter_qp = ctrl->val;
@@ -1919,23 +2081,29 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
ctx->params.mpeg4_inter_qp = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
/* nothing to do, these are fixed */
break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
ctx->params.slice_mode = ctrl->val;
+ ctx->params.slice_mode_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
ctx->params.slice_max_mb = ctrl->val;
+ ctx->params.slice_mode_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
ctx->params.slice_max_bits = ctrl->val * 8;
+ ctx->params.slice_mode_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
break;
case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
ctx->params.intra_refresh = ctrl->val;
+ ctx->params.intra_refresh_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
ctx->params.force_ipicture = true;
@@ -2040,7 +2208,7 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
}
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
- V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES, 0x0,
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES, 0x0,
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1, 0x3fffffff, 1, 1);
@@ -2098,6 +2266,34 @@ static void coda_decode_ctrls(struct coda_ctx *ctx)
&coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, max, 0, max);
if (ctx->h264_level_ctrl)
ctx->h264_level_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctx->mpeg2_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
+ &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE,
+ V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH, 0,
+ V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH);
+ if (ctx->mpeg2_profile_ctrl)
+ ctx->mpeg2_profile_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctx->mpeg2_level_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
+ &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL,
+ V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH, 0,
+ V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH);
+ if (ctx->mpeg2_level_ctrl)
+ ctx->mpeg2_level_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctx->mpeg4_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
+ &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY, 0,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY);
+ if (ctx->mpeg4_profile_ctrl)
+ ctx->mpeg4_profile_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctx->mpeg4_level_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
+ &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_5, 0,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_5);
+ if (ctx->mpeg4_level_ctrl)
+ ctx->mpeg4_level_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
}
static int coda_ctrls_setup(struct coda_ctx *ctx)
@@ -2154,7 +2350,7 @@ static int coda_queue_init(struct coda_ctx *ctx, struct vb2_queue *vq)
* queues to have at least one buffer queued.
*/
vq->min_buffers_needed = 1;
- vq->dev = &ctx->dev->plat_dev->dev;
+ vq->dev = ctx->dev->dev;
return vb2_queue_init(vq);
}
@@ -2240,6 +2436,8 @@ static int coda_open(struct file *file)
ctx->use_bit = !ctx->cvd->direct;
init_completion(&ctx->completion);
INIT_WORK(&ctx->pic_run_work, coda_pic_run_work);
+ if (ctx->ops->seq_init_work)
+ INIT_WORK(&ctx->seq_init_work, ctx->ops->seq_init_work);
if (ctx->ops->seq_end_work)
INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
v4l2_fh_init(&ctx->fh, video_devdata(file));
@@ -2277,7 +2475,7 @@ static int coda_open(struct file *file)
ctx->use_vdoa = false;
/* Power up and upload firmware if necessary */
- ret = pm_runtime_get_sync(&dev->plat_dev->dev);
+ ret = pm_runtime_get_sync(dev->dev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "failed to power up: %d\n", ret);
goto err_pm_get;
@@ -2312,6 +2510,7 @@ static int coda_open(struct file *file)
mutex_init(&ctx->bitstream_mutex);
mutex_init(&ctx->buffer_mutex);
+ mutex_init(&ctx->wakeup_mutex);
INIT_LIST_HEAD(&ctx->buffer_meta_list);
spin_lock_init(&ctx->buffer_meta_lock);
@@ -2324,7 +2523,7 @@ err_ctx_init:
err_clk_ahb:
clk_disable_unprepare(dev->clk_per);
err_clk_per:
- pm_runtime_put_sync(&dev->plat_dev->dev);
+ pm_runtime_put_sync(dev->dev);
err_pm_get:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
@@ -2363,7 +2562,7 @@ static int coda_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrls);
clk_disable_unprepare(dev->clk_ahb);
clk_disable_unprepare(dev->clk_per);
- pm_runtime_put_sync(&dev->plat_dev->dev);
+ pm_runtime_put_sync(dev->dev);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
ida_free(&dev->ida, ctx->idx);
@@ -2486,9 +2685,12 @@ err_clk_per:
static int coda_register_device(struct coda_dev *dev, int i)
{
struct video_device *vfd = &dev->vfd[i];
+ enum coda_inst_type type;
+ int ret;
if (i >= dev->devtype->num_vdevs)
return -EINVAL;
+ type = dev->devtype->vdevs[i]->type;
strscpy(vfd->name, dev->devtype->vdevs[i]->name, sizeof(vfd->name));
vfd->fops = &coda_fops;
@@ -2504,7 +2706,12 @@ static int coda_register_device(struct coda_dev *dev, int i)
v4l2_disable_ioctl(vfd, VIDIOC_G_CROP);
v4l2_disable_ioctl(vfd, VIDIOC_S_CROP);
- return video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (!ret)
+ v4l2_info(&dev->v4l2_dev, "%s registered as %s\n",
+ type == CODA_INST_ENCODER ? "encoder" : "decoder",
+ video_device_node_name(vfd));
+ return ret;
}
static void coda_copy_firmware(struct coda_dev *dev, const u8 * const buf,
@@ -2550,18 +2757,16 @@ static int coda_firmware_request(struct coda_dev *dev)
fw = dev->devtype->firmware[dev->firmware];
- dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw,
+ dev_dbg(dev->dev, "requesting firmware '%s' for %s\n", fw,
coda_product_name(dev->devtype->product));
- return request_firmware_nowait(THIS_MODULE, true, fw,
- &dev->plat_dev->dev, GFP_KERNEL, dev,
- coda_fw_callback);
+ return request_firmware_nowait(THIS_MODULE, true, fw, dev->dev,
+ GFP_KERNEL, dev, coda_fw_callback);
}
static void coda_fw_callback(const struct firmware *fw, void *context)
{
struct coda_dev *dev = context;
- struct platform_device *pdev = dev->plat_dev;
int i, ret;
if (!fw) {
@@ -2579,7 +2784,7 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
* firmware requests, report that the fallback firmware was
* found.
*/
- dev_info(&pdev->dev, "Using fallback firmware %s\n",
+ dev_info(dev->dev, "Using fallback firmware %s\n",
dev->devtype->firmware[dev->firmware]);
}
@@ -2618,10 +2823,7 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
}
}
- v4l2_info(&dev->v4l2_dev, "codec registered as /dev/video[%d-%d]\n",
- dev->vfd[0].num, dev->vfd[i - 1].num);
-
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync(dev->dev);
return;
rel_vfd:
@@ -2629,7 +2831,7 @@ rel_vfd:
video_unregister_device(&dev->vfd[i]);
v4l2_m2m_release(dev->m2m_dev);
put_pm:
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync(dev->dev);
}
enum coda_platform {
@@ -2744,7 +2946,6 @@ static int coda_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct gen_pool *pool;
struct coda_dev *dev;
- struct resource *res;
int ret, irq;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -2762,7 +2963,7 @@ static int coda_probe(struct platform_device *pdev)
spin_lock_init(&dev->irqlock);
- dev->plat_dev = pdev;
+ dev->dev = &pdev->dev;
dev->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(dev->clk_per)) {
dev_err(&pdev->dev, "Could not get per clock\n");
@@ -2776,8 +2977,7 @@ static int coda_probe(struct platform_device *pdev)
}
/* Get memory for physical registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
@@ -2790,8 +2990,8 @@ static int coda_probe(struct platform_device *pdev)
return irq;
}
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, coda_irq_handler,
- IRQF_ONESHOT, dev_name(&pdev->dev), dev);
+ ret = devm_request_irq(&pdev->dev, irq, coda_irq_handler, 0,
+ dev_name(&pdev->dev), dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
return ret;
diff --git a/drivers/media/platform/coda/coda-h264.c b/drivers/media/platform/coda/coda-h264.c
index a2fa29da1d31..8bd0aa8af114 100644
--- a/drivers/media/platform/coda/coda-h264.c
+++ b/drivers/media/platform/coda/coda-h264.c
@@ -10,7 +10,8 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/videodev2.h>
-#include <coda.h>
+
+#include "coda.h"
static const u8 coda_filler_size[8] = { 0, 7, 14, 13, 12, 11, 10, 9 };
diff --git a/drivers/media/platform/coda/coda-mpeg2.c b/drivers/media/platform/coda/coda-mpeg2.c
new file mode 100644
index 000000000000..6f3f6721d286
--- /dev/null
+++ b/drivers/media/platform/coda/coda-mpeg2.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Coda multi-standard codec IP - MPEG-2 helper functions
+ *
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel
+ */
+
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include "coda.h"
+
+int coda_mpeg2_profile(int profile_idc)
+{
+ switch (profile_idc) {
+ case 5:
+ return V4L2_MPEG_VIDEO_MPEG2_PROFILE_SIMPLE;
+ case 4:
+ return V4L2_MPEG_VIDEO_MPEG2_PROFILE_MAIN;
+ case 3:
+ return V4L2_MPEG_VIDEO_MPEG2_PROFILE_SNR_SCALABLE;
+ case 2:
+ return V4L2_MPEG_VIDEO_MPEG2_PROFILE_SPATIALLY_SCALABLE;
+ case 1:
+ return V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH;
+ default:
+ return -EINVAL;
+ }
+}
+
+int coda_mpeg2_level(int level_idc)
+{
+ switch (level_idc) {
+ case 10:
+ return V4L2_MPEG_VIDEO_MPEG2_LEVEL_LOW;
+ case 8:
+ return V4L2_MPEG_VIDEO_MPEG2_LEVEL_MAIN;
+ case 6:
+ return V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH_1440;
+ case 4:
+ return V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Check if the buffer starts with the MPEG-2 sequence header (with or without
+ * quantization matrix) and extension header, for example:
+ *
+ * 00 00 01 b3 2d 01 e0 34 08 8b a3 81
+ * 10 11 11 12 12 12 13 13 13 13 14 14 14 14 14 15
+ * 15 15 15 15 15 16 16 16 16 16 16 16 17 17 17 17
+ * 17 17 17 17 18 18 18 19 18 18 18 19 1a 1a 1a 1a
+ * 19 1b 1b 1b 1b 1b 1c 1c 1c 1c 1e 1e 1e 1f 1f 21
+ * 00 00 01 b5 14 8a 00 01 00 00
+ *
+ * or:
+ *
+ * 00 00 01 b3 08 00 40 15 ff ff e0 28
+ * 00 00 01 b5 14 8a 00 01 00 00
+ *
+ * Returns the detected header size in bytes or 0.
+ */
+u32 coda_mpeg2_parse_headers(struct coda_ctx *ctx, u8 *buf, u32 size)
+{
+ static const u8 sequence_header_start[4] = { 0x00, 0x00, 0x01, 0xb3 };
+ static const union {
+ u8 extension_start[4];
+ u8 start_code_prefix[3];
+ } u = { { 0x00, 0x00, 0x01, 0xb5 } };
+
+ if (size < 22 ||
+ memcmp(buf, sequence_header_start, 4) != 0)
+ return 0;
+
+ if ((size == 22 ||
+ (size >= 25 && memcmp(buf + 22, u.start_code_prefix, 3) == 0)) &&
+ memcmp(buf + 12, u.extension_start, 4) == 0)
+ return 22;
+
+ if ((size == 86 ||
+ (size > 89 && memcmp(buf + 86, u.start_code_prefix, 3) == 0)) &&
+ memcmp(buf + 76, u.extension_start, 4) == 0)
+ return 86;
+
+ return 0;
+}
diff --git a/drivers/media/platform/coda/coda-mpeg4.c b/drivers/media/platform/coda/coda-mpeg4.c
new file mode 100644
index 000000000000..483a4fba1b4f
--- /dev/null
+++ b/drivers/media/platform/coda/coda-mpeg4.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Coda multi-standard codec IP - MPEG-4 helper functions
+ *
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel
+ */
+
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+
+#include "coda.h"
+
+int coda_mpeg4_profile(int profile_idc)
+{
+ switch (profile_idc) {
+ case 0:
+ return V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE;
+ case 15:
+ return V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE;
+ case 2:
+ return V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE;
+ case 1:
+ return V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE;
+ case 11:
+ return V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY;
+ default:
+ return -EINVAL;
+ }
+}
+
+int coda_mpeg4_level(int level_idc)
+{
+ switch (level_idc) {
+ case 0:
+ return V4L2_MPEG_VIDEO_MPEG4_LEVEL_0;
+ case 1:
+ return V4L2_MPEG_VIDEO_MPEG4_LEVEL_1;
+ case 2:
+ return V4L2_MPEG_VIDEO_MPEG4_LEVEL_2;
+ case 3:
+ return V4L2_MPEG_VIDEO_MPEG4_LEVEL_3;
+ case 4:
+ return V4L2_MPEG_VIDEO_MPEG4_LEVEL_4;
+ case 5:
+ return V4L2_MPEG_VIDEO_MPEG4_LEVEL_5;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Check if the buffer starts with the MPEG-4 visual object sequence and visual
+ * object headers, for example:
+ *
+ * 00 00 01 b0 f1
+ * 00 00 01 b5 a9 13 00 00 01 00 00 00 01 20 08
+ * d4 8d 88 00 f5 04 04 08 14 30 3f
+ *
+ * Returns the detected header size in bytes or 0.
+ */
+u32 coda_mpeg4_parse_headers(struct coda_ctx *ctx, u8 *buf, u32 size)
+{
+ static const u8 vos_start[4] = { 0x00, 0x00, 0x01, 0xb0 };
+ static const union {
+ u8 vo_start[4];
+ u8 start_code_prefix[3];
+ } u = { { 0x00, 0x00, 0x01, 0xb5 } };
+
+ if (size < 30 ||
+ memcmp(buf, vos_start, 4) != 0 ||
+ memcmp(buf + 5, u.vo_start, 4) != 0)
+ return 0;
+
+ if (size == 30 ||
+ (size >= 33 && memcmp(buf + 30, u.start_code_prefix, 3) == 0))
+ return 30;
+
+ if (size == 31 ||
+ (size >= 34 && memcmp(buf + 31, u.start_code_prefix, 3) == 0))
+ return 31;
+
+ if (size == 32 ||
+ (size >= 35 && memcmp(buf + 32, u.start_code_prefix, 3) == 0))
+ return 32;
+
+ return 0;
+}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index cfcfff7838cd..848bf1da401e 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -70,7 +70,7 @@ struct coda_aux_buf {
struct coda_dev {
struct v4l2_device v4l2_dev;
struct video_device vfd[5];
- struct platform_device *plat_dev;
+ struct device *dev;
const struct coda_devtype *devtype;
int firmware;
struct vdoa_data *vdoa;
@@ -118,6 +118,8 @@ struct coda_params {
s8 h264_chroma_qp_index_offset;
u8 h264_profile_idc;
u8 h264_level_idc;
+ u8 mpeg2_profile_idc;
+ u8 mpeg2_level_idc;
u8 mpeg4_intra_qp;
u8 mpeg4_inter_qp;
u8 gop_size;
@@ -135,6 +137,12 @@ struct coda_params {
u32 slice_max_bits;
u32 slice_max_mb;
bool force_ipicture;
+ bool gop_size_changed;
+ bool bitrate_changed;
+ bool framerate_changed;
+ bool h264_intra_qp_changed;
+ bool intra_refresh_changed;
+ bool slice_mode_changed;
};
struct coda_buffer_meta {
@@ -144,6 +152,7 @@ struct coda_buffer_meta {
u64 timestamp;
unsigned int start;
unsigned int end;
+ bool last;
};
/* Per-queue, driver-specific private data */
@@ -183,14 +192,23 @@ struct coda_context_ops {
int (*prepare_run)(struct coda_ctx *ctx);
void (*finish_run)(struct coda_ctx *ctx);
void (*run_timeout)(struct coda_ctx *ctx);
+ void (*seq_init_work)(struct work_struct *work);
void (*seq_end_work)(struct work_struct *work);
void (*release)(struct coda_ctx *ctx);
};
+struct coda_internal_frame {
+ struct coda_aux_buf buf;
+ struct coda_buffer_meta meta;
+ u32 type;
+ u32 error;
+};
+
struct coda_ctx {
struct coda_dev *dev;
struct mutex buffer_mutex;
struct work_struct pic_run_work;
+ struct work_struct seq_init_work;
struct work_struct seq_end_work;
struct completion completion;
const struct coda_video_device *cvd;
@@ -213,6 +231,10 @@ struct coda_ctx {
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *h264_profile_ctrl;
struct v4l2_ctrl *h264_level_ctrl;
+ struct v4l2_ctrl *mpeg2_profile_ctrl;
+ struct v4l2_ctrl *mpeg2_level_ctrl;
+ struct v4l2_ctrl *mpeg4_profile_ctrl;
+ struct v4l2_ctrl *mpeg4_level_ctrl;
struct v4l2_fh fh;
int gopcounter;
int runcounter;
@@ -225,10 +247,7 @@ struct coda_ctx {
struct coda_aux_buf parabuf;
struct coda_aux_buf psbuf;
struct coda_aux_buf slicebuf;
- struct coda_aux_buf internal_frames[CODA_MAX_FRAMEBUFFERS];
- u32 frame_types[CODA_MAX_FRAMEBUFFERS];
- struct coda_buffer_meta frame_metas[CODA_MAX_FRAMEBUFFERS];
- u32 frame_errors[CODA_MAX_FRAMEBUFFERS];
+ struct coda_internal_frame internal_frames[CODA_MAX_FRAMEBUFFERS];
struct list_head buffer_meta_list;
spinlock_t buffer_meta_lock;
int num_metas;
@@ -241,11 +260,18 @@ struct coda_ctx {
u32 bit_stream_param;
u32 frm_dis_flg;
u32 frame_mem_ctrl;
+ u32 para_change;
int display_idx;
struct dentry *debugfs_entry;
bool use_bit;
bool use_vdoa;
struct vdoa_ctx *vdoa;
+ /*
+ * wakeup mutex used to serialize encoder stop command and finish_run,
+ * ensures that finish_run always either flags the last returned buffer
+ * or wakes up the capture queue to signal EOS afterwards.
+ */
+ struct mutex wakeup_mutex;
};
extern int coda_debug;
@@ -310,6 +336,7 @@ static inline bool coda_bitstream_can_fetch_past(struct coda_ctx *ctx,
}
bool coda_bitstream_can_fetch_past(struct coda_ctx *ctx, unsigned int pos);
+int coda_bitstream_flush(struct coda_ctx *ctx);
void coda_bit_stream_end_flag(struct coda_ctx *ctx);
@@ -324,6 +351,16 @@ int coda_sps_parse_profile(struct coda_ctx *ctx, struct vb2_buffer *vb);
int coda_h264_sps_fixup(struct coda_ctx *ctx, int width, int height, char *buf,
int *size, int max_size);
+int coda_mpeg2_profile(int profile_idc);
+int coda_mpeg2_level(int level_idc);
+u32 coda_mpeg2_parse_headers(struct coda_ctx *ctx, u8 *buf, u32 size);
+int coda_mpeg4_profile(int profile_idc);
+int coda_mpeg4_level(int level_idc);
+u32 coda_mpeg4_parse_headers(struct coda_ctx *ctx, u8 *buf, u32 size);
+
+void coda_update_profile_level_ctrls(struct coda_ctx *ctx, u8 profile_idc,
+ u8 level_idc);
+
bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb);
int coda_jpeg_write_tables(struct coda_ctx *ctx);
void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality);
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index abf8e195f6c0..b17464b56d3d 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -177,7 +177,7 @@
#define CODA_RET_DEC_SEQ_FRATE_DR 0x1e8
#define CODA_RET_DEC_SEQ_JPG_PARA 0x1e4
#define CODA_RET_DEC_SEQ_JPG_THUMB_IND 0x1e8
-#define CODA9_RET_DEC_SEQ_HEADER_REPORT 0x1ec
+#define CODA7_RET_DEC_SEQ_HEADER_REPORT 0x1ec
/* Decoder Picture Run */
#define CODA_CMD_DEC_PIC_ROT_MODE 0x180
@@ -342,6 +342,24 @@
#define CODA_CMD_ENC_SEQ_JPG_THUMB_SIZE 0x1a4
#define CODA_CMD_ENC_SEQ_JPG_THUMB_OFFSET 0x1a8
+/* Encoder Parameter Change */
+#define CODA_CMD_ENC_PARAM_CHANGE_ENABLE 0x180
+#define CODA_PARAM_CHANGE_RC_GOP BIT(0)
+#define CODA_PARAM_CHANGE_RC_INTRA_QP BIT(1)
+#define CODA_PARAM_CHANGE_RC_BITRATE BIT(2)
+#define CODA_PARAM_CHANGE_RC_FRAME_RATE BIT(3)
+#define CODA_PARAM_CHANGE_INTRA_MB_NUM BIT(4)
+#define CODA_PARAM_CHANGE_SLICE_MODE BIT(5)
+#define CODA_PARAM_CHANGE_HEC_MODE BIT(6)
+#define CODA_CMD_ENC_PARAM_RC_GOP 0x184
+#define CODA_CMD_ENC_PARAM_RC_INTRA_QP 0x188
+#define CODA_CMD_ENC_PARAM_RC_BITRATE 0x18c
+#define CODA_CMD_ENC_PARAM_RC_FRAME_RATE 0x190
+#define CODA_CMD_ENC_PARAM_INTRA_MB_NUM 0x194
+#define CODA_CMD_ENC_PARAM_SLICE_MODE 0x198
+#define CODA_CMD_ENC_PARAM_HEC_MODE 0x19c
+#define CODA_RET_ENC_PARAM_CHANGE_SUCCESS 0x1c0
+
/* Encoder Picture Run */
#define CODA9_CMD_ENC_PIC_SRC_INDEX 0x180
#define CODA9_CMD_ENC_PIC_SRC_STRIDE 0x184
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/coda/trace.h
index a672bfc4c6ba..6cf58237fff2 100644
--- a/drivers/media/platform/coda/trace.h
+++ b/drivers/media/platform/coda/trace.h
@@ -157,7 +157,7 @@ DEFINE_EVENT(coda_buf_meta_class, coda_dec_rot_done,
#endif /* __CODA_TRACE_H__ */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/media/platform/coda
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 61809d2050fa..f0f7ef638c56 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1376,6 +1376,14 @@ vpif_init_free_channel_objects:
return err;
}
+static inline void free_vpif_objs(void)
+{
+ int i;
+
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++)
+ kfree(vpif_obj.dev[i]);
+}
+
static int vpif_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
@@ -1645,7 +1653,7 @@ static __init int vpif_probe(struct platform_device *pdev)
err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
if (err) {
v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
- goto cleanup;
+ goto vpif_free;
}
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
@@ -1692,7 +1700,9 @@ static __init int vpif_probe(struct platform_device *pdev)
"registered sub device %s\n",
subdevdata->name);
}
- vpif_probe_complete();
+ err = vpif_probe_complete();
+ if (err)
+ goto probe_subdev_out;
} else {
vpif_obj.notifier.ops = &vpif_async_ops;
err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
@@ -1711,6 +1721,8 @@ probe_subdev_out:
kfree(vpif_obj.sd);
vpif_unregister:
v4l2_device_unregister(&vpif_obj.v4l2_dev);
+vpif_free:
+ free_vpif_objs();
cleanup:
v4l2_async_notifier_cleanup(&vpif_obj.notifier);
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 3f079ac1b080..d38d2bbb6f0f 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -498,9 +498,9 @@ static struct platform_driver vpss_driver = {
static void vpss_exit(void)
{
+ platform_driver_unregister(&vpss_driver);
iounmap(oper_cfg.vpss_regs_base2);
release_mem_region(VPSS_CLK_CTRL, 4);
- platform_driver_unregister(&vpss_driver);
}
static int __init vpss_init(void)
@@ -509,6 +509,11 @@ static int __init vpss_init(void)
return -EBUSY;
oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+ if (unlikely(!oper_cfg.vpss_regs_base2)) {
+ release_mem_region(VPSS_CLK_CTRL, 4);
+ return -ENOMEM;
+ }
+
writel(VPSS_CLK_CTRL_VENCCLKEN |
VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index ea46d7387221..854869f0024e 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -327,7 +327,7 @@ void gsc_check_src_scale_info(struct gsc_variant *var,
}
}
-int gsc_enum_fmt_mplane(struct v4l2_fmtdesc *f)
+int gsc_enum_fmt(struct v4l2_fmtdesc *f)
{
const struct gsc_fmt *fmt;
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 3ada9737c8f7..772183b090c2 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -385,7 +385,7 @@ void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state);
u32 get_plane_size(struct gsc_frame *fr, unsigned int plane);
const struct gsc_fmt *get_format(int index);
const struct gsc_fmt *find_fmt(u32 *pixelformat, u32 *mbus_code, u32 index);
-int gsc_enum_fmt_mplane(struct v4l2_fmtdesc *f);
+int gsc_enum_fmt(struct v4l2_fmtdesc *f);
int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f);
void gsc_set_frame_size(struct gsc_frame *frame, int width, int height);
int gsc_g_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f);
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 677d7cc80785..35a1d0d6dd66 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -294,15 +294,13 @@ static int gsc_m2m_querycap(struct file *file, void *fh,
strscpy(cap->card, GSC_MODULE_NAME " gscaler", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
dev_name(&gsc->pdev->dev));
- cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
-static int gsc_m2m_enum_fmt_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int gsc_m2m_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
- return gsc_enum_fmt_mplane(f);
+ return gsc_enum_fmt(f);
}
static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
@@ -558,8 +556,8 @@ static int gsc_m2m_s_selection(struct file *file, void *fh,
static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
.vidioc_querycap = gsc_m2m_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = gsc_m2m_enum_fmt_mplane,
- .vidioc_enum_fmt_vid_out_mplane = gsc_m2m_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_cap = gsc_m2m_enum_fmt,
+ .vidioc_enum_fmt_vid_out = gsc_m2m_enum_fmt,
.vidioc_g_fmt_vid_cap_mplane = gsc_m2m_g_fmt_mplane,
.vidioc_g_fmt_vid_out_mplane = gsc_m2m_g_fmt_mplane,
.vidioc_try_fmt_vid_cap_mplane = gsc_m2m_try_fmt_mplane,
@@ -759,6 +757,8 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
gsc->vdev.lock = &gsc->lock;
gsc->vdev.vfl_dir = VFL_DIR_M2M;
gsc->vdev.v4l2_dev = &gsc->v4l2_dev;
+ gsc->vdev.device_caps = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
GSC_MODULE_NAME, gsc->id);
diff --git a/drivers/media/platform/exynos4-is/common.c b/drivers/media/platform/exynos4-is/common.c
index b4e30e7c8a4b..944b224eb621 100644
--- a/drivers/media/platform/exynos4-is/common.c
+++ b/drivers/media/platform/exynos4-is/common.c
@@ -34,15 +34,12 @@ struct v4l2_subdev *fimc_find_remote_sensor(struct media_entity *entity)
}
EXPORT_SYMBOL(fimc_find_remote_sensor);
-void __fimc_vidioc_querycap(struct device *dev, struct v4l2_capability *cap,
- unsigned int caps)
+void __fimc_vidioc_querycap(struct device *dev, struct v4l2_capability *cap)
{
strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
strscpy(cap->card, dev->driver->name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", dev_name(dev));
- cap->device_caps = caps;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
}
EXPORT_SYMBOL(__fimc_vidioc_querycap);
diff --git a/drivers/media/platform/exynos4-is/common.h b/drivers/media/platform/exynos4-is/common.h
index 41de3f716691..0389b66e5144 100644
--- a/drivers/media/platform/exynos4-is/common.h
+++ b/drivers/media/platform/exynos4-is/common.h
@@ -9,5 +9,4 @@
#include <media/v4l2-subdev.h>
struct v4l2_subdev *fimc_find_remote_sensor(struct media_entity *entity);
-void __fimc_vidioc_querycap(struct device *dev, struct v4l2_capability *cap,
- unsigned int caps);
+void __fimc_vidioc_querycap(struct device *dev, struct v4l2_capability *cap);
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index bce94681cbf0..66510365dd5d 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -725,13 +725,12 @@ static int fimc_cap_querycap(struct file *file, void *priv,
{
struct fimc_dev *fimc = video_drvdata(file);
- __fimc_vidioc_querycap(&fimc->pdev->dev, cap, V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_CAPTURE_MPLANE);
+ __fimc_vidioc_querycap(&fimc->pdev->dev, cap);
return 0;
}
-static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int fimc_cap_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
struct fimc_fmt *fmt;
@@ -1358,7 +1357,7 @@ static int fimc_cap_s_selection(struct file *file, void *fh,
static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
.vidioc_querycap = fimc_cap_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = fimc_cap_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_cap = fimc_cap_enum_fmt,
.vidioc_try_fmt_vid_cap_mplane = fimc_cap_try_fmt_mplane,
.vidioc_s_fmt_vid_cap_mplane = fimc_cap_s_fmt_mplane,
.vidioc_g_fmt_vid_cap_mplane = fimc_cap_g_fmt_mplane,
@@ -1762,6 +1761,7 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
vfd->release = video_device_release_empty;
vfd->queue = q;
vfd->lock = &fimc->lock;
+ vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
video_set_drvdata(vfd, fimc);
vid_cap = &fimc->vid_cap;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 8900559e1813..a75f932a289a 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -346,12 +346,12 @@ static int isp_video_querycap(struct file *file, void *priv,
{
struct fimc_isp *isp = video_drvdata(file);
- __fimc_vidioc_querycap(&isp->pdev->dev, cap, V4L2_CAP_STREAMING);
+ __fimc_vidioc_querycap(&isp->pdev->dev, cap);
return 0;
}
-static int isp_video_enum_fmt_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int isp_video_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
const struct fimc_fmt *fmt;
@@ -548,7 +548,7 @@ static int isp_video_reqbufs(struct file *file, void *priv,
static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
.vidioc_querycap = isp_video_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = isp_video_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_cap = isp_video_enum_fmt,
.vidioc_try_fmt_vid_cap_mplane = isp_video_try_fmt_mplane,
.vidioc_s_fmt_vid_cap_mplane = isp_video_s_fmt_mplane,
.vidioc_g_fmt_vid_cap_mplane = isp_video_g_fmt_mplane,
@@ -611,6 +611,7 @@ int fimc_isp_video_device_register(struct fimc_isp *isp,
vdev->minor = -1;
vdev->release = video_device_release_empty;
vdev->lock = &isp->video_lock;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
iv->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vdev->entity, 1, &iv->pad);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 347b90088b91..c1f0aee02e5e 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -655,14 +655,11 @@ static int fimc_lite_querycap(struct file *file, void *priv,
strscpy(cap->card, FIMC_LITE_DRV_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
dev_name(&fimc->pdev->dev));
-
- cap->device_caps = V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
-static int fimc_lite_enum_fmt_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int fimc_lite_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
const struct fimc_fmt *fmt;
@@ -951,7 +948,7 @@ static int fimc_lite_s_selection(struct file *file, void *fh,
static const struct v4l2_ioctl_ops fimc_lite_ioctl_ops = {
.vidioc_querycap = fimc_lite_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = fimc_lite_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_cap = fimc_lite_enum_fmt,
.vidioc_try_fmt_vid_cap_mplane = fimc_lite_try_fmt_mplane,
.vidioc_s_fmt_vid_cap_mplane = fimc_lite_s_fmt_mplane,
.vidioc_g_fmt_vid_cap_mplane = fimc_lite_g_fmt_mplane,
@@ -1279,6 +1276,7 @@ static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
vfd->minor = -1;
vfd->release = video_device_release_empty;
vfd->queue = q;
+ vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
fimc->reqbufs_count = 0;
INIT_LIST_HEAD(&fimc->pending_buf_q);
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index b950c152fa28..62e876fc3555 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -232,14 +232,13 @@ static int fimc_m2m_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
struct fimc_dev *fimc = video_drvdata(file);
- unsigned int caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
- __fimc_vidioc_querycap(&fimc->pdev->dev, cap, caps);
+ __fimc_vidioc_querycap(&fimc->pdev->dev, cap);
return 0;
}
-static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int fimc_m2m_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
struct fimc_fmt *fmt;
@@ -529,8 +528,8 @@ static int fimc_m2m_s_selection(struct file *file, void *fh,
static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_querycap = fimc_m2m_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = fimc_m2m_enum_fmt_mplane,
- .vidioc_enum_fmt_vid_out_mplane = fimc_m2m_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_cap = fimc_m2m_enum_fmt,
+ .vidioc_enum_fmt_vid_out = fimc_m2m_enum_fmt,
.vidioc_g_fmt_vid_cap_mplane = fimc_m2m_g_fmt_mplane,
.vidioc_g_fmt_vid_out_mplane = fimc_m2m_g_fmt_mplane,
.vidioc_try_fmt_vid_cap_mplane = fimc_m2m_try_fmt_mplane,
@@ -732,6 +731,7 @@ int fimc_register_m2m_device(struct fimc_dev *fimc,
vfd->release = video_device_release_empty;
vfd->lock = &fimc->lock;
vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
set_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.m2m", fimc->id);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 1b83a6ec745f..d53427a8db11 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -445,6 +445,7 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
pd->fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK;
else
pd->fimc_bus_type = pd->sensor_bus_type;
+ of_node_put(np);
if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor))) {
of_node_put(rem);
@@ -470,7 +471,8 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
{
struct device_node *parent = fmd->pdev->dev.of_node;
- struct device_node *node, *ports;
+ struct device_node *ports = NULL;
+ struct device_node *node;
int index = 0;
int ret;
@@ -519,12 +521,14 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
}
index++;
}
+ of_node_put(ports);
rpm_put:
pm_runtime_put(fmd->pmf);
return 0;
cleanup:
+ of_node_put(ports);
v4l2_async_notifier_cleanup(&fmd->subdev_notifier);
pm_runtime_put(fmd->pmf);
return ret;
diff --git a/drivers/media/platform/marvell-ccic/Kconfig b/drivers/media/platform/marvell-ccic/Kconfig
index 86b84474dd8c..3e3f86264762 100644
--- a/drivers/media/platform/marvell-ccic/Kconfig
+++ b/drivers/media/platform/marvell-ccic/Kconfig
@@ -2,6 +2,7 @@
config VIDEO_CAFE_CCIC
tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
depends on PCI && I2C && VIDEO_V4L2
+ depends on COMMON_CLK
select VIDEO_OV7670
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
@@ -15,6 +16,7 @@ config VIDEO_MMP_CAMERA
tristate "Marvell Armada 610 integrated camera controller support"
depends on I2C && VIDEO_V4L2
depends on ARCH_MMP || COMPILE_TEST
+ depends on COMMON_CLK
select VIDEO_OV7670
select I2C_GPIO
select VIDEOBUF2_VMALLOC
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index cd108b14b715..37fdcc53a1c4 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -9,6 +9,7 @@
*
* Copyright 2006-11 One Laptop Per Child Association, Inc.
* Copyright 2006-11 Jonathan Corbet <corbet@lwn.net>
+ * Copyright 2018 Lubomir Rintel <lkundrak@v3.sk>
*
* Written by Jonathan Corbet, corbet@lwn.net.
*
@@ -25,10 +26,12 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
+#include <media/i2c/ov7670.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/clkdev.h>
#include "mcam-core.h"
@@ -50,6 +53,7 @@ struct cafe_camera {
int registered; /* Fully initialized? */
struct mcam_camera mcam;
struct pci_dev *pdev;
+ struct i2c_adapter *i2c_adapter;
wait_queue_head_t smbus_wait; /* Waiting on i2c events */
};
@@ -349,15 +353,15 @@ static int cafe_smbus_setup(struct cafe_camera *cam)
return ret;
}
- cam->mcam.i2c_adapter = adap;
+ cam->i2c_adapter = adap;
cafe_smbus_enable_irq(cam);
return 0;
}
static void cafe_smbus_shutdown(struct cafe_camera *cam)
{
- i2c_del_adapter(cam->mcam.i2c_adapter);
- kfree(cam->mcam.i2c_adapter);
+ i2c_del_adapter(cam->i2c_adapter);
+ kfree(cam->i2c_adapter);
}
@@ -450,6 +454,29 @@ static irqreturn_t cafe_irq(int irq, void *data)
return IRQ_RETVAL(handled);
}
+/* -------------------------------------------------------------------------- */
+
+static struct ov7670_config sensor_cfg = {
+ /*
+ * Exclude QCIF mode, because it only captures a tiny portion
+ * of the sensor FOV
+ */
+ .min_width = 320,
+ .min_height = 240,
+
+ /*
+ * Set the clock speed for the XO 1; I don't believe this
+ * driver has ever run anywhere else.
+ */
+ .clock_speed = 45,
+ .use_smbus = 1,
+};
+
+static struct i2c_board_info ov7670_info = {
+ .type = "ov7670",
+ .addr = 0x42 >> 1,
+ .platform_data = &sensor_cfg,
+};
/* -------------------------------------------------------------------------- */
/*
@@ -480,12 +507,6 @@ static int cafe_pci_probe(struct pci_dev *pdev,
mcam->dev = &pdev->dev;
snprintf(mcam->bus_info, sizeof(mcam->bus_info), "PCI:%s", pci_name(pdev));
/*
- * Set the clock speed for the XO 1; I don't believe this
- * driver has ever run anywhere else.
- */
- mcam->clock_speed = 45;
- mcam->use_smbus = 1;
- /*
* Vmalloc mode for buffers is traditional with this driver.
* We *might* be able to run DMA_contig, especially on a system
* with CMA in it.
@@ -511,11 +532,10 @@ static int cafe_pci_probe(struct pci_dev *pdev,
goto out_iounmap;
/*
- * Initialize the controller and leave it powered up. It will
- * stay that way until the sensor driver shows up.
+ * Initialize the controller.
*/
cafe_ctlr_init(mcam);
- cafe_ctlr_power_up(mcam);
+
/*
* Set up I2C/SMBUS communications. We have to drop the mutex here
* because the sensor could attach in this call chain, leading to
@@ -525,12 +545,24 @@ static int cafe_pci_probe(struct pci_dev *pdev,
if (ret)
goto out_pdown;
+ mcam->asd.match_type = V4L2_ASYNC_MATCH_I2C;
+ mcam->asd.match.i2c.adapter_id = i2c_adapter_id(cam->i2c_adapter);
+ mcam->asd.match.i2c.address = ov7670_info.addr;
+
ret = mccic_register(mcam);
- if (ret == 0) {
+ if (ret)
+ goto out_smbus_shutdown;
+
+ clkdev_create(mcam->mclk, "xclk", "%d-%04x",
+ i2c_adapter_id(cam->i2c_adapter), ov7670_info.addr);
+
+ if (i2c_new_device(cam->i2c_adapter, &ov7670_info)) {
cam->registered = 1;
return 0;
}
+ mccic_shutdown(mcam);
+out_smbus_shutdown:
cafe_smbus_shutdown(cam);
out_pdown:
cafe_ctlr_power_down(mcam);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index f1b301810260..dc30c48d4671 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -4,6 +4,7 @@
* so it needs platform-specific support outside of the core.
*
* Copyright 2011 Jonathan Corbet corbet@lwn.net
+ * Copyright 2018 Lubomir Rintel <lkundrak@v3.sk>
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -21,12 +22,12 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
-#include <media/i2c/ov7670.h>
#include <media/videobuf2-vmalloc.h>
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-dma-sg.h>
@@ -93,6 +94,9 @@ MODULE_PARM_DESC(buffer_mode,
#define sensor_call(cam, o, f, args...) \
v4l2_subdev_call(cam->sensor, o, f, ##args)
+#define notifier_to_mcam(notifier) \
+ container_of(notifier, struct mcam_camera, notifier)
+
static struct mcam_format_struct {
__u8 *desc;
__u32 pixelformat;
@@ -200,7 +204,6 @@ struct mcam_vb_buffer {
struct list_head queue;
struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
dma_addr_t dma_desc_pa; /* Descriptor physical address */
- int dma_desc_nent; /* Number of mapped descriptors */
};
static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
@@ -282,6 +285,8 @@ static void mcam_ctlr_stop(struct mcam_camera *cam)
static void mcam_enable_mipi(struct mcam_camera *mcam)
{
/* Using MIPI mode and enable MIPI */
+ if (mcam->calc_dphy)
+ mcam->calc_dphy(mcam);
cam_dbg(mcam, "camera: DPHY3=0x%x, DPHY5=0x%x, DPHY6=0x%x\n",
mcam->dphy[0], mcam->dphy[1], mcam->dphy[2]);
mcam_reg_write(mcam, REG_CSI2_DPHY3, mcam->dphy[0]);
@@ -301,9 +306,6 @@ static void mcam_enable_mipi(struct mcam_camera *mcam)
*/
mcam_reg_write(mcam, REG_CSI2_CTRL0,
CSI2_C0_MIPI_EN | CSI2_C0_ACT_LANE(mcam->lane));
- mcam_reg_write(mcam, REG_CLKCTRL,
- (mcam->mclk_src << 29) | mcam->mclk_div);
-
mcam->mipi_enabled = true;
}
}
@@ -608,9 +610,11 @@ static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
static void mcam_sg_next_buffer(struct mcam_camera *cam)
{
struct mcam_vb_buffer *buf;
+ struct sg_table *sg_table;
buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
list_del_init(&buf->queue);
+ sg_table = vb2_dma_sg_plane_desc(&buf->vb_buf.vb2_buf, 0);
/*
* Very Bad Not Good Things happen if you don't clear
* C1_DESC_ENA before making any descriptor changes.
@@ -618,7 +622,7 @@ static void mcam_sg_next_buffer(struct mcam_camera *cam)
mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
mcam_reg_write(cam, REG_DESC_LEN_Y,
- buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
+ sg_table->nents * sizeof(struct mcam_dma_desc));
mcam_reg_write(cam, REG_DESC_LEN_U, 0);
mcam_reg_write(cam, REG_DESC_LEN_V, 0);
mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
@@ -791,12 +795,6 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
* Make sure it knows we want to use hsync/vsync.
*/
mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC, C0_SIFM_MASK);
- /*
- * This field controls the generation of EOF(DVP only)
- */
- if (cam->bus_type != V4L2_MBUS_CSI2_DPHY)
- mcam_reg_set_bit(cam, REG_CTRL0,
- C0_EOF_VSYNC | C0_VEDGE_CTRL);
}
@@ -832,31 +830,6 @@ static void mcam_ctlr_irq_disable(struct mcam_camera *cam)
mcam_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
}
-
-
-static void mcam_ctlr_init(struct mcam_camera *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- /*
- * Make sure it's not powered down.
- */
- mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
- /*
- * Turn off the enable bit. It sure should be off anyway,
- * but it's good to be sure.
- */
- mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
- /*
- * Clock the sensor appropriately. Controller clock should
- * be 48MHz, sensor "typical" value is half that.
- */
- mcam_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-}
-
-
/*
* Stop the controller, and don't return until we're really sure that no
* further DMA is going on.
@@ -900,14 +873,15 @@ static int mcam_ctlr_power_up(struct mcam_camera *cam)
int ret;
spin_lock_irqsave(&cam->dev_lock, flags);
- ret = cam->plat_power_up(cam);
- if (ret) {
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- return ret;
+ if (cam->plat_power_up) {
+ ret = cam->plat_power_up(cam);
+ if (ret) {
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return ret;
+ }
}
mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
spin_unlock_irqrestore(&cam->dev_lock, flags);
- msleep(5); /* Just to be sure */
return 0;
}
@@ -922,10 +896,101 @@ static void mcam_ctlr_power_down(struct mcam_camera *cam)
* power down routine.
*/
mcam_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
- cam->plat_power_down(cam);
+ if (cam->plat_power_down)
+ cam->plat_power_down(cam);
spin_unlock_irqrestore(&cam->dev_lock, flags);
}
+/* ---------------------------------------------------------------------- */
+/*
+ * Controller clocks.
+ */
+static void mcam_clk_enable(struct mcam_camera *mcam)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_MCAM_CLK; i++) {
+ if (!IS_ERR(mcam->clk[i]))
+ clk_prepare_enable(mcam->clk[i]);
+ }
+}
+
+static void mcam_clk_disable(struct mcam_camera *mcam)
+{
+ int i;
+
+ for (i = NR_MCAM_CLK - 1; i >= 0; i--) {
+ if (!IS_ERR(mcam->clk[i]))
+ clk_disable_unprepare(mcam->clk[i]);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Master sensor clock.
+ */
+static int mclk_prepare(struct clk_hw *hw)
+{
+ struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
+
+ clk_prepare(cam->clk[0]);
+ return 0;
+}
+
+static void mclk_unprepare(struct clk_hw *hw)
+{
+ struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
+
+ clk_unprepare(cam->clk[0]);
+}
+
+static int mclk_enable(struct clk_hw *hw)
+{
+ struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
+ int mclk_src;
+ int mclk_div;
+
+ /*
+ * Clock the sensor appropriately. Controller clock should
+ * be 48MHz, sensor "typical" value is half that.
+ */
+ if (cam->bus_type == V4L2_MBUS_CSI2_DPHY) {
+ mclk_src = cam->mclk_src;
+ mclk_div = cam->mclk_div;
+ } else {
+ mclk_src = 3;
+ mclk_div = 2;
+ }
+
+ clk_enable(cam->clk[0]);
+ mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
+ mcam_ctlr_power_up(cam);
+
+ return 0;
+}
+
+static void mclk_disable(struct clk_hw *hw)
+{
+ struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
+
+ mcam_ctlr_power_down(cam);
+ clk_disable(cam->clk[0]);
+}
+
+static unsigned long mclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 48000000;
+}
+
+static const struct clk_ops mclk_ops = {
+ .prepare = mclk_prepare,
+ .unprepare = mclk_unprepare,
+ .enable = mclk_enable,
+ .disable = mclk_disable,
+ .recalc_rate = mclk_recalc_rate,
+};
+
/* -------------------------------------------------------------------- */
/*
* Communications with the sensor.
@@ -950,7 +1015,6 @@ static int mcam_cam_init(struct mcam_camera *cam)
ret = __mcam_cam_reset(cam);
/* Get/set parameters? */
cam->state = S_IDLE;
- mcam_ctlr_power_down(cam);
return ret;
}
@@ -1016,13 +1080,6 @@ static int mcam_read_setup(struct mcam_camera *cam)
spin_lock_irqsave(&cam->dev_lock, flags);
clear_bit(CF_DMA_ACTIVE, &cam->flags);
mcam_reset_buffers(cam);
- /*
- * Update CSI2_DPHY value
- */
- if (cam->calc_dphy)
- cam->calc_dphy(cam);
- cam_dbg(cam, "camera: DPHY sets: dphy3=0x%x, dphy5=0x%x, dphy6=0x%x\n",
- cam->dphy[0], cam->dphy[1], cam->dphy[2]);
if (cam->bus_type == V4L2_MBUS_CSI2_DPHY)
mcam_enable_mipi(cam);
else
@@ -1160,12 +1217,6 @@ static void mcam_vb_stop_streaming(struct vb2_queue *vq)
return;
mcam_ctlr_stop_dma(cam);
/*
- * Reset the CCIC PHY after stopping streaming,
- * otherwise, the CCIC may be unstable.
- */
- if (cam->ctlr_reset)
- cam->ctlr_reset(cam);
- /*
* VB2 reclaims the buffers, so we need to forget
* about them.
*/
@@ -1592,9 +1643,10 @@ static int mcam_v4l_open(struct file *filp)
if (ret)
goto out;
if (v4l2_fh_is_singular_file(filp)) {
- ret = mcam_ctlr_power_up(cam);
+ ret = sensor_call(cam, core, s_power, 1);
if (ret)
goto out;
+ mcam_clk_enable(cam);
__mcam_cam_reset(cam);
mcam_set_config_needed(cam, 1);
}
@@ -1616,7 +1668,8 @@ static int mcam_v4l_release(struct file *filp)
_vb2_fop_release(filp, NULL);
if (last_open) {
mcam_disable_mipi(cam);
- mcam_ctlr_power_down(cam);
+ sensor_call(cam, core, s_power, 0);
+ mcam_clk_disable(cam);
if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
mcam_free_dma_bufs(cam);
}
@@ -1726,23 +1779,95 @@ EXPORT_SYMBOL_GPL(mccic_irq);
/*
* Registration and such.
*/
-static struct ov7670_config sensor_cfg = {
+
+static int mccic_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd)
+{
+ struct mcam_camera *cam = notifier_to_mcam(notifier);
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ if (cam->sensor) {
+ cam_err(cam, "sensor already bound\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ v4l2_set_subdev_hostdata(subdev, cam);
+ cam->sensor = subdev;
+
+ ret = mcam_cam_init(cam);
+ if (ret) {
+ cam->sensor = NULL;
+ goto out;
+ }
+
+ ret = mcam_setup_vb2(cam);
+ if (ret) {
+ cam->sensor = NULL;
+ goto out;
+ }
+
+ cam->vdev = mcam_v4l_template;
+ cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ cam->vdev.lock = &cam->s_mutex;
+ cam->vdev.queue = &cam->vb_queue;
+ video_set_drvdata(&cam->vdev, cam);
+ ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ cam->sensor = NULL;
+ goto out;
+ }
+
+ cam_dbg(cam, "sensor %s bound\n", subdev->name);
+out:
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static void mccic_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd)
+{
+ struct mcam_camera *cam = notifier_to_mcam(notifier);
+
+ mutex_lock(&cam->s_mutex);
+ if (cam->sensor != subdev) {
+ cam_err(cam, "sensor %s not bound\n", subdev->name);
+ goto out;
+ }
+
+ video_unregister_device(&cam->vdev);
+ cam->sensor = NULL;
+ cam_dbg(cam, "sensor %s unbound\n", subdev->name);
+
+out:
+ mutex_unlock(&cam->s_mutex);
+}
+
+static int mccic_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct mcam_camera *cam = notifier_to_mcam(notifier);
+ int ret;
+
/*
- * Exclude QCIF mode, because it only captures a tiny portion
- * of the sensor FOV
+ * Get the v4l2 setup done.
*/
- .min_width = 320,
- .min_height = 240,
-};
+ ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10);
+ if (!ret)
+ cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler;
+ return ret;
+}
+
+static const struct v4l2_async_notifier_operations mccic_notify_ops = {
+ .bound = mccic_notify_bound,
+ .unbind = mccic_notify_unbind,
+ .complete = mccic_notify_complete,
+};
int mccic_register(struct mcam_camera *cam)
{
- struct i2c_board_info ov7670_info = {
- .type = "ov7670",
- .addr = 0x42 >> 1,
- .platform_data = &sensor_cfg,
- };
+ struct clk_init_data mclk_init = { };
int ret;
/*
@@ -1755,64 +1880,62 @@ int mccic_register(struct mcam_camera *cam)
printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, attempting vmalloc mode instead\n");
cam->buffer_mode = B_vmalloc;
}
+
if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
printk(KERN_ERR "marvell-cam: buffer mode %d unsupported\n",
cam->buffer_mode);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
+
/*
* Register with V4L
*/
ret = v4l2_device_register(cam->dev, &cam->v4l2_dev);
if (ret)
- return ret;
+ goto out;
mutex_init(&cam->s_mutex);
cam->state = S_NOTREADY;
mcam_set_config_needed(cam, 1);
cam->pix_format = mcam_def_pix_format;
cam->mbus_code = mcam_def_mbus_code;
- mcam_ctlr_init(cam);
/*
- * Get the v4l2 setup done.
+ * Register sensor notifier.
*/
- ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10);
- if (ret)
- goto out_unregister;
- cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler;
+ v4l2_async_notifier_init(&cam->notifier);
+ ret = v4l2_async_notifier_add_subdev(&cam->notifier, &cam->asd);
+ if (ret) {
+ cam_warn(cam, "failed to add subdev to a notifier");
+ goto out;
+ }
+
+ cam->notifier.ops = &mccic_notify_ops;
+ ret = v4l2_async_notifier_register(&cam->v4l2_dev, &cam->notifier);
+ if (ret < 0) {
+ cam_warn(cam, "failed to register a sensor notifier");
+ goto out;
+ }
/*
- * Try to find the sensor.
+ * Register sensor master clock.
*/
- sensor_cfg.clock_speed = cam->clock_speed;
- sensor_cfg.use_smbus = cam->use_smbus;
- cam->sensor_addr = ov7670_info.addr;
- cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev,
- cam->i2c_adapter, &ov7670_info, NULL);
- if (cam->sensor == NULL) {
- ret = -ENODEV;
- goto out_unregister;
- }
+ mclk_init.parent_names = NULL;
+ mclk_init.num_parents = 0;
+ mclk_init.ops = &mclk_ops;
+ mclk_init.name = "mclk";
- ret = mcam_cam_init(cam);
- if (ret)
- goto out_unregister;
+ of_property_read_string(cam->dev->of_node, "clock-output-names",
+ &mclk_init.name);
- ret = mcam_setup_vb2(cam);
- if (ret)
- goto out_unregister;
+ cam->mclk_hw.init = &mclk_init;
- mutex_lock(&cam->s_mutex);
- cam->vdev = mcam_v4l_template;
- cam->vdev.v4l2_dev = &cam->v4l2_dev;
- cam->vdev.lock = &cam->s_mutex;
- cam->vdev.queue = &cam->vb_queue;
- video_set_drvdata(&cam->vdev, cam);
- ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
- if (ret) {
- mutex_unlock(&cam->s_mutex);
- goto out_unregister;
+ cam->mclk = devm_clk_register(cam->dev, &cam->mclk_hw);
+ if (IS_ERR(cam->mclk)) {
+ ret = PTR_ERR(cam->mclk);
+ dev_err(cam->dev, "can't register clock\n");
+ goto out;
}
/*
@@ -1823,11 +1946,10 @@ int mccic_register(struct mcam_camera *cam)
cam_warn(cam, "Unable to alloc DMA buffers at load will try again later.");
}
- mutex_unlock(&cam->s_mutex);
return 0;
-out_unregister:
- v4l2_ctrl_handler_free(&cam->ctrl_handler);
+out:
+ v4l2_async_notifier_unregister(&cam->notifier);
v4l2_device_unregister(&cam->v4l2_dev);
return ret;
}
@@ -1843,12 +1965,12 @@ void mccic_shutdown(struct mcam_camera *cam)
*/
if (!list_empty(&cam->vdev.fh_list)) {
cam_warn(cam, "Removing a device with users!\n");
- mcam_ctlr_power_down(cam);
+ sensor_call(cam, core, s_power, 0);
}
if (cam->buffer_mode == B_vmalloc)
mcam_free_dma_bufs(cam);
- video_unregister_device(&cam->vdev);
v4l2_ctrl_handler_free(&cam->ctrl_handler);
+ v4l2_async_notifier_unregister(&cam->notifier);
v4l2_device_unregister(&cam->v4l2_dev);
}
EXPORT_SYMBOL_GPL(mccic_shutdown);
@@ -1865,7 +1987,8 @@ void mccic_suspend(struct mcam_camera *cam)
enum mcam_state cstate = cam->state;
mcam_ctlr_stop_dma(cam);
- mcam_ctlr_power_down(cam);
+ sensor_call(cam, core, s_power, 0);
+ mcam_clk_disable(cam);
cam->state = cstate;
}
mutex_unlock(&cam->s_mutex);
@@ -1878,14 +2001,15 @@ int mccic_resume(struct mcam_camera *cam)
mutex_lock(&cam->s_mutex);
if (!list_empty(&cam->vdev.fh_list)) {
- ret = mcam_ctlr_power_up(cam);
+ mcam_clk_enable(cam);
+ ret = sensor_call(cam, core, s_power, 1);
if (ret) {
mutex_unlock(&cam->s_mutex);
return ret;
}
__mcam_cam_reset(cam);
} else {
- mcam_ctlr_power_down(cam);
+ sensor_call(cam, core, s_power, 0);
}
mutex_unlock(&cam->s_mutex);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index ad8955f9f0a1..2e3a7567a76a 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -8,6 +8,7 @@
#define _MCAM_CORE_H
#include <linux/list.h>
+#include <linux/clk-provider.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
@@ -102,21 +103,16 @@ struct mcam_camera {
* These fields should be set by the platform code prior to
* calling mcam_register().
*/
- struct i2c_adapter *i2c_adapter;
unsigned char __iomem *regs;
unsigned regs_size; /* size in bytes of the register space */
spinlock_t dev_lock;
struct device *dev; /* For messages, dma alloc */
enum mcam_chip_id chip_id;
- short int clock_speed; /* Sensor clock speed, default 30 */
- short int use_smbus; /* SMBUS or straight I2c? */
enum mcam_buffer_mode buffer_mode;
- int mclk_min; /* The minimal value of mclk */
int mclk_src; /* which clock source the mclk derives from */
int mclk_div; /* Clock Divider Value for MCLK */
- int ccic_id;
enum v4l2_mbus_type bus_type;
/* MIPI support */
/* The dphy config value, allocated in board file
@@ -130,6 +126,8 @@ struct mcam_camera {
/* clock tree support */
struct clk *clk[NR_MCAM_CLK];
+ struct clk_hw mclk_hw;
+ struct clk *mclk;
/*
* Callbacks from the core to the platform code.
@@ -137,7 +135,6 @@ struct mcam_camera {
int (*plat_power_up) (struct mcam_camera *cam);
void (*plat_power_down) (struct mcam_camera *cam);
void (*calc_dphy) (struct mcam_camera *cam);
- void (*ctlr_reset) (struct mcam_camera *cam);
/*
* Everything below here is private to the mcam core and
@@ -153,8 +150,9 @@ struct mcam_camera {
* Subsystem structures.
*/
struct video_device vdev;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_async_subdev asd;
struct v4l2_subdev *sensor;
- unsigned short sensor_addr;
/* Videobuf2 stuff */
struct vb2_queue vb_queue;
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index bf4d4a47f1db..10559492e09e 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -4,13 +4,12 @@
* to work with the Armada 610 as used in the OLPC 1.75 system.
*
* Copyright 2011 Jonathan Corbet <corbet@lwn.net>
+ * Copyright 2018 Lubomir Rintel <lkundrak@v3.sk>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/platform_data/i2c-gpio.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -18,10 +17,10 @@
#include <media/v4l2-device.h>
#include <linux/platform_data/media/mmp-camera.h>
#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
#include <linux/io.h>
-#include <linux/delay.h>
#include <linux/list.h>
#include <linux/pm.h>
#include <linux/clk.h>
@@ -32,10 +31,9 @@ MODULE_ALIAS("platform:mmp-camera");
MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_LICENSE("GPL");
-static char *mcam_clks[] = {"CCICAXICLK", "CCICFUNCLK", "CCICPHYCLK"};
+static char *mcam_clks[] = {"axi", "func", "phy"};
struct mmp_camera {
- void __iomem *power_regs;
struct platform_device *pdev;
struct mcam_camera mcam;
struct list_head devlist;
@@ -91,118 +89,6 @@ static struct mmp_camera *mmpcam_find_device(struct platform_device *pdev)
return NULL;
}
-
-
-
-/*
- * Power-related registers; this almost certainly belongs
- * somewhere else.
- *
- * ARMADA 610 register manual, sec 7.2.1, p1842.
- */
-#define CPU_SUBSYS_PMU_BASE 0xd4282800
-#define REG_CCIC_DCGCR 0x28 /* CCIC dyn clock gate ctrl reg */
-#define REG_CCIC_CRCR 0x50 /* CCIC clk reset ctrl reg */
-#define REG_CCIC2_CRCR 0xf4 /* CCIC2 clk reset ctrl reg */
-
-static void mcam_clk_enable(struct mcam_camera *mcam)
-{
- unsigned int i;
-
- for (i = 0; i < NR_MCAM_CLK; i++) {
- if (!IS_ERR(mcam->clk[i]))
- clk_prepare_enable(mcam->clk[i]);
- }
-}
-
-static void mcam_clk_disable(struct mcam_camera *mcam)
-{
- int i;
-
- for (i = NR_MCAM_CLK - 1; i >= 0; i--) {
- if (!IS_ERR(mcam->clk[i]))
- clk_disable_unprepare(mcam->clk[i]);
- }
-}
-
-/*
- * Power control.
- */
-static void mmpcam_power_up_ctlr(struct mmp_camera *cam)
-{
- iowrite32(0x3f, cam->power_regs + REG_CCIC_DCGCR);
- iowrite32(0x3805b, cam->power_regs + REG_CCIC_CRCR);
- mdelay(1);
-}
-
-static int mmpcam_power_up(struct mcam_camera *mcam)
-{
- struct mmp_camera *cam = mcam_to_cam(mcam);
- struct mmp_camera_platform_data *pdata;
-
-/*
- * Turn on power and clocks to the controller.
- */
- mmpcam_power_up_ctlr(cam);
-/*
- * Provide power to the sensor.
- */
- mcam_reg_write(mcam, REG_CLKCTRL, 0x60000002);
- pdata = cam->pdev->dev.platform_data;
- gpio_set_value(pdata->sensor_power_gpio, 1);
- mdelay(5);
- mcam_reg_clear_bit(mcam, REG_CTRL1, 0x10000000);
- gpio_set_value(pdata->sensor_reset_gpio, 0); /* reset is active low */
- mdelay(5);
- gpio_set_value(pdata->sensor_reset_gpio, 1); /* reset is active low */
- mdelay(5);
-
- mcam_clk_enable(mcam);
-
- return 0;
-}
-
-static void mmpcam_power_down(struct mcam_camera *mcam)
-{
- struct mmp_camera *cam = mcam_to_cam(mcam);
- struct mmp_camera_platform_data *pdata;
-/*
- * Turn off clocks and set reset lines
- */
- iowrite32(0, cam->power_regs + REG_CCIC_DCGCR);
- iowrite32(0, cam->power_regs + REG_CCIC_CRCR);
-/*
- * Shut down the sensor.
- */
- pdata = cam->pdev->dev.platform_data;
- gpio_set_value(pdata->sensor_power_gpio, 0);
- gpio_set_value(pdata->sensor_reset_gpio, 0);
-
- mcam_clk_disable(mcam);
-}
-
-static void mcam_ctlr_reset(struct mcam_camera *mcam)
-{
- unsigned long val;
- struct mmp_camera *cam = mcam_to_cam(mcam);
-
- if (mcam->ccic_id) {
- /*
- * Using CCIC2
- */
- val = ioread32(cam->power_regs + REG_CCIC2_CRCR);
- iowrite32(val & ~0x2, cam->power_regs + REG_CCIC2_CRCR);
- iowrite32(val | 0x2, cam->power_regs + REG_CCIC2_CRCR);
- } else {
- /*
- * Using CCIC1
- */
- val = ioread32(cam->power_regs + REG_CCIC_CRCR);
- iowrite32(val & ~0x2, cam->power_regs + REG_CCIC_CRCR);
- iowrite32(val | 0x2, cam->power_regs + REG_CCIC_CRCR);
- }
-}
-
/*
* calc the dphy register values
* There are three dphy registers being used.
@@ -334,13 +220,10 @@ static int mmpcam_probe(struct platform_device *pdev)
struct mmp_camera *cam;
struct mcam_camera *mcam;
struct resource *res;
+ struct fwnode_handle *ep;
struct mmp_camera_platform_data *pdata;
int ret;
- pdata = pdev->dev.platform_data;
- if (!pdata)
- return -ENODEV;
-
cam = devm_kzalloc(&pdev->dev, sizeof(*cam), GFP_KERNEL);
if (cam == NULL)
return -ENOMEM;
@@ -348,25 +231,31 @@ static int mmpcam_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&cam->devlist);
mcam = &cam->mcam;
- mcam->plat_power_up = mmpcam_power_up;
- mcam->plat_power_down = mmpcam_power_down;
- mcam->ctlr_reset = mcam_ctlr_reset;
mcam->calc_dphy = mmpcam_calc_dphy;
mcam->dev = &pdev->dev;
- mcam->use_smbus = 0;
- mcam->ccic_id = pdev->id;
- mcam->mclk_min = pdata->mclk_min;
- mcam->mclk_src = pdata->mclk_src;
- mcam->mclk_div = pdata->mclk_div;
- mcam->bus_type = pdata->bus_type;
- mcam->dphy = pdata->dphy;
+ pdata = pdev->dev.platform_data;
+ if (pdata) {
+ mcam->mclk_src = pdata->mclk_src;
+ mcam->mclk_div = pdata->mclk_div;
+ mcam->bus_type = pdata->bus_type;
+ mcam->dphy = pdata->dphy;
+ mcam->lane = pdata->lane;
+ } else {
+ /*
+ * These are values that used to be hardcoded in mcam-core and
+ * work well on a OLPC XO 1.75 with a parallel bus sensor.
+ * If it turns out other setups make sense, the values should
+ * be obtained from the device tree.
+ */
+ mcam->mclk_src = 3;
+ mcam->mclk_div = 2;
+ }
if (mcam->bus_type == V4L2_MBUS_CSI2_DPHY) {
cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
return PTR_ERR(cam->mipi_clk);
}
mcam->mipi_enabled = false;
- mcam->lane = pdata->lane;
mcam->chip_id = MCAM_ARMADA610;
mcam->buffer_mode = B_DMA_sg;
strscpy(mcam->bus_info, "platform:mmp-camera", sizeof(mcam->bus_info));
@@ -379,54 +268,39 @@ static int mmpcam_probe(struct platform_device *pdev)
if (IS_ERR(mcam->regs))
return PTR_ERR(mcam->regs);
mcam->regs_size = resource_size(res);
+
+ mcam_init_clk(mcam);
+
/*
- * Power/clock memory is elsewhere; get it too. Perhaps this
- * should really be managed outside of this driver?
- */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- cam->power_regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(cam->power_regs))
- return PTR_ERR(cam->power_regs);
- /*
- * Find the i2c adapter. This assumes, of course, that the
- * i2c bus is already up and functioning.
+ * Create a match of the sensor against its OF node.
*/
- mcam->i2c_adapter = platform_get_drvdata(pdata->i2c_device);
- if (mcam->i2c_adapter == NULL) {
- dev_err(&pdev->dev, "No i2c adapter\n");
+ ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(pdev->dev.of_node),
+ NULL);
+ if (!ep)
return -ENODEV;
- }
- /*
- * Sensor GPIO pins.
- */
- ret = devm_gpio_request(&pdev->dev, pdata->sensor_power_gpio,
- "cam-power");
- if (ret) {
- dev_err(&pdev->dev, "Can't get sensor power gpio %d",
- pdata->sensor_power_gpio);
- return ret;
- }
- gpio_direction_output(pdata->sensor_power_gpio, 0);
- ret = devm_gpio_request(&pdev->dev, pdata->sensor_reset_gpio,
- "cam-reset");
- if (ret) {
- dev_err(&pdev->dev, "Can't get sensor reset gpio %d",
- pdata->sensor_reset_gpio);
- return ret;
- }
- gpio_direction_output(pdata->sensor_reset_gpio, 0);
- mcam_init_clk(mcam);
+ mcam->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ mcam->asd.match.fwnode = fwnode_graph_get_remote_port_parent(ep);
+
+ fwnode_handle_put(ep);
/*
- * Power the device up and hand it off to the core.
+ * Register the device with the core.
*/
- ret = mmpcam_power_up(mcam);
- if (ret)
- return ret;
ret = mccic_register(mcam);
if (ret)
- goto out_power_down;
+ return ret;
+
+ /*
+ * Add OF clock provider.
+ */
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get,
+ mcam->mclk);
+ if (ret) {
+ dev_err(&pdev->dev, "can't add DT clock provider\n");
+ goto out;
+ }
+
/*
* Finally, set up our IRQ now that the core is ready to
* deal with it.
@@ -434,7 +308,7 @@ static int mmpcam_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
ret = -ENODEV;
- goto out_unregister;
+ goto out;
}
cam->irq = res->start;
ret = devm_request_irq(&pdev->dev, cam->irq, mmpcam_irq, IRQF_SHARED,
@@ -444,10 +318,10 @@ static int mmpcam_probe(struct platform_device *pdev)
return 0;
}
-out_unregister:
+out:
+ fwnode_handle_put(mcam->asd.match.fwnode);
mccic_shutdown(mcam);
-out_power_down:
- mmpcam_power_down(mcam);
+
return ret;
}
@@ -458,7 +332,6 @@ static int mmpcam_remove(struct mmp_camera *cam)
mmpcam_remove_device(cam);
mccic_shutdown(mcam);
- mmpcam_power_down(mcam);
return 0;
}
@@ -490,17 +363,15 @@ static int mmpcam_resume(struct platform_device *pdev)
{
struct mmp_camera *cam = mmpcam_find_device(pdev);
- /*
- * Power up unconditionally just in case the core tries to
- * touch a register even if nothing was active before; trust
- * me, it's better this way.
- */
- mmpcam_power_up_ctlr(cam);
return mccic_resume(&cam->mcam);
}
#endif
+static const struct of_device_id mmpcam_of_match[] = {
+ { .compatible = "marvell,mmp2-ccic", },
+ {},
+};
static struct platform_driver mmpcam_driver = {
.probe = mmpcam_probe,
@@ -511,6 +382,7 @@ static struct platform_driver mmpcam_driver = {
#endif
.driver = {
.name = "mmp-camera",
+ .of_match_table = of_match_ptr(mmpcam_of_match),
}
};
diff --git a/drivers/media/platform/meson/ao-cec-g12a.c b/drivers/media/platform/meson/ao-cec-g12a.c
index 3620a1e310f5..fb52e5dd044a 100644
--- a/drivers/media/platform/meson/ao-cec-g12a.c
+++ b/drivers/media/platform/meson/ao-cec-g12a.c
@@ -365,28 +365,22 @@ static int meson_ao_cec_g12a_read(void *context, unsigned int addr,
{
struct meson_ao_cec_g12a_device *ao_cec = context;
u32 reg = FIELD_PREP(CECB_RW_ADDR, addr);
- unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&ao_cec->cec_reg_lock, flags);
-
ret = regmap_write(ao_cec->regmap, CECB_RW_REG, reg);
if (ret)
- goto read_out;
+ return ret;
ret = regmap_read_poll_timeout(ao_cec->regmap, CECB_RW_REG, reg,
!(reg & CECB_RW_BUS_BUSY),
5, 1000);
if (ret)
- goto read_out;
+ return ret;
ret = regmap_read(ao_cec->regmap, CECB_RW_REG, &reg);
*data = FIELD_GET(CECB_RW_RD_DATA, reg);
-read_out:
- spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags);
-
return ret;
}
@@ -394,19 +388,11 @@ static int meson_ao_cec_g12a_write(void *context, unsigned int addr,
unsigned int data)
{
struct meson_ao_cec_g12a_device *ao_cec = context;
- unsigned long flags;
u32 reg = FIELD_PREP(CECB_RW_ADDR, addr) |
FIELD_PREP(CECB_RW_WR_DATA, data) |
CECB_RW_WRITE_EN;
- int ret = 0;
- spin_lock_irqsave(&ao_cec->cec_reg_lock, flags);
-
- ret = regmap_write(ao_cec->regmap, CECB_RW_REG, reg);
-
- spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags);
-
- return ret;
+ return regmap_write(ao_cec->regmap, CECB_RW_REG, reg);
}
static const struct regmap_config meson_ao_cec_g12a_cec_regmap_conf = {
@@ -415,7 +401,6 @@ static const struct regmap_config meson_ao_cec_g12a_cec_regmap_conf = {
.reg_read = meson_ao_cec_g12a_read,
.reg_write = meson_ao_cec_g12a_write,
.max_register = 0xffff,
- .fast_io = true,
};
static inline void
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index 656444e7ca2b..ee802fc3bcdf 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -518,7 +518,7 @@ static int mtk_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return -EINVAL;
}
- vb = vq->bufs[buf->index];
+ vb = vb2_get_buffer(vq, buf->index);
jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
jpeg_src_buf->flags = (buf->m.planes[0].bytesused == 0) ?
MTK_JPEG_BUF_FLAGS_LAST_FRAME : MTK_JPEG_BUF_FLAGS_INIT;
@@ -528,8 +528,8 @@ end:
static const struct v4l2_ioctl_ops mtk_jpeg_ioctl_ops = {
.vidioc_querycap = mtk_jpeg_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = mtk_jpeg_enum_fmt_vid_cap,
- .vidioc_enum_fmt_vid_out_mplane = mtk_jpeg_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_cap = mtk_jpeg_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = mtk_jpeg_enum_fmt_vid_out,
.vidioc_try_fmt_vid_cap_mplane = mtk_jpeg_try_fmt_vid_cap_mplane,
.vidioc_try_fmt_vid_out_mplane = mtk_jpeg_try_fmt_vid_out_mplane,
.vidioc_g_fmt_vid_cap_mplane = mtk_jpeg_g_fmt_vid_mplane,
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
index b28e3dd4885c..7c9e2d69e21a 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -612,7 +612,7 @@ static int mtk_mdp_m2m_querycap(struct file *file, void *fh,
return 0;
}
-static int mtk_mdp_enum_fmt_mplane(struct v4l2_fmtdesc *f, u32 type)
+static int mtk_mdp_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
{
const struct mtk_mdp_fmt *fmt;
@@ -625,16 +625,16 @@ static int mtk_mdp_enum_fmt_mplane(struct v4l2_fmtdesc *f, u32 type)
return 0;
}
-static int mtk_mdp_m2m_enum_fmt_mplane_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int mtk_mdp_m2m_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
- return mtk_mdp_enum_fmt_mplane(f, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ return mtk_mdp_enum_fmt(f, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
}
-static int mtk_mdp_m2m_enum_fmt_mplane_vid_out(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int mtk_mdp_m2m_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
- return mtk_mdp_enum_fmt_mplane(f, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ return mtk_mdp_enum_fmt(f, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
}
static int mtk_mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
@@ -927,8 +927,8 @@ static int mtk_mdp_m2m_s_selection(struct file *file, void *fh,
static const struct v4l2_ioctl_ops mtk_mdp_m2m_ioctl_ops = {
.vidioc_querycap = mtk_mdp_m2m_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = mtk_mdp_m2m_enum_fmt_mplane_vid_cap,
- .vidioc_enum_fmt_vid_out_mplane = mtk_mdp_m2m_enum_fmt_mplane_vid_out,
+ .vidioc_enum_fmt_vid_cap = mtk_mdp_m2m_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = mtk_mdp_m2m_enum_fmt_vid_out,
.vidioc_g_fmt_vid_cap_mplane = mtk_mdp_m2m_g_fmt_mplane,
.vidioc_g_fmt_vid_out_mplane = mtk_mdp_m2m_g_fmt_mplane,
.vidioc_try_fmt_vid_cap_mplane = mtk_mdp_m2m_try_fmt_mplane,
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 7ae588e62ed8..90d1a67db7e5 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
@@ -24,7 +24,7 @@
#define DFT_CFG_WIDTH MTK_VDEC_MIN_W
#define DFT_CFG_HEIGHT MTK_VDEC_MIN_H
-static struct mtk_video_fmt mtk_video_formats[] = {
+static const struct mtk_video_fmt mtk_video_formats[] = {
{
.fourcc = V4L2_PIX_FMT_H264,
.type = MTK_FMT_DEC,
@@ -68,9 +68,9 @@ static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
-static struct mtk_video_fmt *mtk_vdec_find_format(struct v4l2_format *f)
+static const struct mtk_video_fmt *mtk_vdec_find_format(struct v4l2_format *f)
{
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
unsigned int k;
for (k = 0; k < NUM_FORMATS; k++) {
@@ -122,8 +122,9 @@ static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
if (dstbuf->used) {
vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0,
ctx->picinfo.fb_sz[0]);
- vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1,
- ctx->picinfo.fb_sz[1]);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
+ vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1,
+ ctx->picinfo.fb_sz[1]);
mtk_v4l2_debug(2,
"[%d]status=%x queue id=%d to done_list %d",
@@ -271,7 +272,7 @@ static void mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
unsigned int pixelformat)
{
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
struct mtk_q_data *dst_q_data;
unsigned int k;
@@ -394,7 +395,8 @@ static void mtk_vdec_worker(struct work_struct *work)
vdec_if_decode(ctx, NULL, NULL, &res_chg);
clean_display_buffer(ctx);
vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
- vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
+ vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
dst_buf->flags |= V4L2_BUF_FLAG_LAST;
v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
clean_free_buffer(ctx);
@@ -644,7 +646,8 @@ static int vidioc_vdec_subscribe_evt(struct v4l2_fh *fh,
}
}
-static int vidioc_try_fmt(struct v4l2_format *f, struct mtk_video_fmt *fmt)
+static int vidioc_try_fmt(struct v4l2_format *f,
+ const struct mtk_video_fmt *fmt)
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
int i;
@@ -717,7 +720,7 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct mtk_video_fmt *fmt)
static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
fmt = mtk_vdec_find_format(f);
if (!fmt) {
@@ -732,7 +735,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
fmt = mtk_vdec_find_format(f);
if (!fmt) {
@@ -826,7 +829,7 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
struct v4l2_pix_format_mplane *pix_mp;
struct mtk_q_data *q_data;
int ret = 0;
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
mtk_v4l2_debug(3, "[%d]", ctx->id);
@@ -925,7 +928,7 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
{
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
int i, j = 0;
for (i = 0; i < NUM_FORMATS; i++) {
@@ -949,14 +952,14 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
return 0;
}
-static int vidioc_vdec_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
- struct v4l2_fmtdesc *f)
+static int vidioc_vdec_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(f, false);
}
-static int vidioc_vdec_enum_fmt_vid_out_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int vidioc_vdec_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(f, true);
}
@@ -1324,7 +1327,8 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
- vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0);
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
}
@@ -1453,8 +1457,8 @@ const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops = {
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
- .vidioc_enum_fmt_vid_cap_mplane = vidioc_vdec_enum_fmt_vid_cap_mplane,
- .vidioc_enum_fmt_vid_out_mplane = vidioc_vdec_enum_fmt_vid_out_mplane,
+ .vidioc_enum_fmt_vid_cap = vidioc_vdec_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = vidioc_vdec_enum_fmt_vid_out,
.vidioc_enum_framesizes = vidioc_enum_framesizes,
.vidioc_querycap = vidioc_vdec_querycap,
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
index 3861d4433be9..e0c5338bde3d 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index 372d37824377..00d090df11bb 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
index 273f78f129da..5a6ec8fb52da 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Tiffany Lin <tiffany.lin@mediatek.com>
@@ -34,8 +34,8 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
}
pdev = of_find_device_by_node(node);
+ of_node_put(node);
if (WARN_ON(!pdev)) {
- of_node_put(node);
return -1;
}
pm->larbvdec = &pdev->dev;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
index 74555cc5a893..872d8bf8cfaf 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Tiffany Lin <tiffany.lin@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index 1044176d8e6f..c95de5d08dda 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
@@ -129,7 +129,7 @@ struct mtk_q_data {
enum v4l2_field field;
unsigned int bytesperline[MTK_VCODEC_MAX_PLANES];
unsigned int sizeimage[MTK_VCODEC_MAX_PLANES];
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
};
/**
@@ -273,7 +273,7 @@ struct mtk_vcodec_ctx {
const struct vdec_common_if *dec_if;
const struct venc_common_if *enc_if;
- unsigned long drv_handle;
+ void *drv_handle;
struct vdec_pic_info picinfo;
int dpb_size;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 0cf5744b4c28..fd8de027e83e 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
@@ -29,7 +29,7 @@
static void mtk_venc_worker(struct work_struct *work);
-static struct mtk_video_fmt mtk_video_formats[] = {
+static const struct mtk_video_fmt mtk_video_formats[] = {
{
.fourcc = V4L2_PIX_FMT_NV12M,
.type = MTK_FMT_FRAME,
@@ -158,7 +158,7 @@ static const struct v4l2_ctrl_ops mtk_vcodec_enc_ctrl_ops = {
static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
{
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
int i, j = 0;
for (i = 0; i < NUM_FORMATS; ++i) {
@@ -199,14 +199,14 @@ static int vidioc_enum_framesizes(struct file *file, void *fh,
return -EINVAL;
}
-static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
- struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(f, false);
}
-static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
- struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(f, true);
}
@@ -266,9 +266,9 @@ static struct mtk_q_data *mtk_venc_get_q_data(struct mtk_vcodec_ctx *ctx,
return &ctx->q_data[MTK_Q_DATA_DST];
}
-static struct mtk_video_fmt *mtk_venc_find_format(struct v4l2_format *f)
+static const struct mtk_video_fmt *mtk_venc_find_format(struct v4l2_format *f)
{
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
unsigned int k;
for (k = 0; k < NUM_FORMATS; k++) {
@@ -283,7 +283,8 @@ static struct mtk_video_fmt *mtk_venc_find_format(struct v4l2_format *f)
/* V4L2 specification suggests the driver corrects the format struct if any of
* the dimensions is unsupported
*/
-static int vidioc_try_fmt(struct v4l2_format *f, struct mtk_video_fmt *fmt)
+static int vidioc_try_fmt(struct v4l2_format *f,
+ const struct mtk_video_fmt *fmt)
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
int i;
@@ -419,7 +420,7 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
struct vb2_queue *vq;
struct mtk_q_data *q_data;
int i, ret;
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (!vq) {
@@ -481,7 +482,7 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
struct vb2_queue *vq;
struct mtk_q_data *q_data;
int ret, i;
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
@@ -580,7 +581,7 @@ static int vidioc_venc_g_fmt(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
fmt = mtk_venc_find_format(f);
@@ -599,7 +600,7 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_video_fmt *fmt;
+ const struct mtk_video_fmt *fmt;
fmt = mtk_venc_find_format(f);
if (!fmt) {
@@ -717,8 +718,8 @@ const struct v4l2_ioctl_ops mtk_venc_ioctl_ops = {
.vidioc_dqbuf = vidioc_venc_dqbuf,
.vidioc_querycap = vidioc_venc_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
- .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_enum_framesizes = vidioc_enum_framesizes,
.vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
@@ -864,12 +865,18 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
err_set_param:
for (i = 0; i < q->num_buffers; ++i) {
- if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
+ struct vb2_buffer *buf = vb2_get_buffer(q, i);
+
+ /*
+ * FIXME: This check is not needed as only active buffers
+ * can be marked as done.
+ */
+ if (buf->state == VB2_BUF_STATE_ACTIVE) {
mtk_v4l2_debug(0, "[%d] id=%d, type=%d, %d -> VB2_BUF_STATE_QUEUED",
ctx->id, i, q->type,
- (int)q->bufs[i]->state);
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(q->bufs[i]),
- VB2_BUF_STATE_QUEUED);
+ (int)buf->state);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(buf),
+ VB2_BUF_STATE_QUEUED);
}
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
index 8248cb628882..a9c9f86b9c83 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index b15e9d2ef6a9..1d82aa2b6017 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
index 4740ae5e9a8e..3e2bfded79a6 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Tiffany Lin <tiffany.lin@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h
index 63165fc1b84a..b7ecdfd74823 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Tiffany Lin <tiffany.lin@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
index f8aae7cc5f57..a3c7a380c930 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Tiffany Lin <tiffany.lin@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
index ba632528fa72..638cd1f3526a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Tiffany Lin <tiffany.lin@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
index 13f7061bfb50..d48f542db1a9 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
index 677adb990e28..b999d7b84ed1 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index 455dbe4887c1..c5f8f1fca44c 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
@@ -266,7 +266,7 @@ static void get_dpb_size(struct vdec_h264_inst *inst, unsigned int *dpb_sz)
mtk_vcodec_debug(inst, "sz=%d", *dpb_sz);
}
-static int vdec_h264_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+static int vdec_h264_init(struct mtk_vcodec_ctx *ctx)
{
struct vdec_h264_inst *inst = NULL;
int err;
@@ -295,7 +295,7 @@ static int vdec_h264_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
mtk_vcodec_debug(inst, "H264 Instance >> %p", inst);
- *h_vdec = (unsigned long)inst;
+ ctx->drv_handle = inst;
return 0;
error_deinit:
@@ -306,7 +306,7 @@ error_free_inst:
return err;
}
-static void vdec_h264_deinit(unsigned long h_vdec)
+static void vdec_h264_deinit(void *h_vdec)
{
struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
@@ -331,7 +331,7 @@ static int find_start_code(unsigned char *data, unsigned int data_sz)
return -1;
}
-static int vdec_h264_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+static int vdec_h264_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *fb, bool *res_chg)
{
struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
@@ -451,8 +451,8 @@ static void vdec_h264_get_fb(struct vdec_h264_inst *inst,
list->count--;
}
-static int vdec_h264_get_param(unsigned long h_vdec,
- enum vdec_get_param_type type, void *out)
+static int vdec_h264_get_param(void *h_vdec, enum vdec_get_param_type type,
+ void *out)
{
struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
@@ -485,16 +485,9 @@ static int vdec_h264_get_param(unsigned long h_vdec,
return 0;
}
-static struct vdec_common_if vdec_h264_if = {
+const struct vdec_common_if vdec_h264_if = {
.init = vdec_h264_init,
.decode = vdec_h264_decode,
.get_param = vdec_h264_get_param,
.deinit = vdec_h264_deinit,
};
-
-struct vdec_common_if *get_h264_dec_comm_if(void);
-
-struct vdec_common_if *get_h264_dec_comm_if(void)
-{
- return &vdec_h264_if;
-}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index 91139cef6283..63a8708ce682 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Jungchang Tsao <jungchang.tsao@mediatek.com>
@@ -388,7 +388,7 @@ static void free_working_buf(struct vdec_vp8_inst *inst)
inst->vsi->dec.working_buf_dma = 0;
}
-static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx)
{
struct vdec_vp8_inst *inst;
int err;
@@ -419,7 +419,7 @@ static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
get_hw_reg_base(inst);
mtk_vcodec_debug(inst, "VP8 Instance >> %p", inst);
- *h_vdec = (unsigned long)inst;
+ ctx->drv_handle = inst;
return 0;
error_deinit:
@@ -429,7 +429,7 @@ error_free_inst:
return err;
}
-static int vdec_vp8_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *fb, bool *res_chg)
{
struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
@@ -565,8 +565,8 @@ static void get_crop_info(struct vdec_vp8_inst *inst, struct v4l2_rect *cr)
cr->left, cr->top, cr->width, cr->height);
}
-static int vdec_vp8_get_param(unsigned long h_vdec,
- enum vdec_get_param_type type, void *out)
+static int vdec_vp8_get_param(void *h_vdec, enum vdec_get_param_type type,
+ void *out)
{
struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
@@ -599,7 +599,7 @@ static int vdec_vp8_get_param(unsigned long h_vdec,
return 0;
}
-static void vdec_vp8_deinit(unsigned long h_vdec)
+static void vdec_vp8_deinit(void *h_vdec)
{
struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
@@ -610,16 +610,9 @@ static void vdec_vp8_deinit(unsigned long h_vdec)
kfree(inst);
}
-static struct vdec_common_if vdec_vp8_if = {
+const struct vdec_common_if vdec_vp8_if = {
.init = vdec_vp8_init,
.decode = vdec_vp8_decode,
.get_param = vdec_vp8_get_param,
.deinit = vdec_vp8_deinit,
};
-
-struct vdec_common_if *get_vp8_dec_comm_if(void);
-
-struct vdec_common_if *get_vp8_dec_comm_if(void)
-{
- return &vdec_vp8_if;
-}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index c1904ad5e69b..5066c283d86d 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
@@ -757,7 +757,7 @@ static int validate_vsi_array_indexes(struct vdec_vp9_inst *inst,
return 0;
}
-static void vdec_vp9_deinit(unsigned long h_vdec)
+static void vdec_vp9_deinit(void *h_vdec)
{
struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
struct mtk_vcodec_mem *mem;
@@ -779,7 +779,7 @@ static void vdec_vp9_deinit(unsigned long h_vdec)
vp9_free_inst(inst);
}
-static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx)
{
struct vdec_vp9_inst *inst;
@@ -803,7 +803,7 @@ static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
inst->vsi = (struct vdec_vp9_vsi *)inst->vpu.vsi;
init_all_fb_lists(inst);
- (*h_vdec) = (unsigned long)inst;
+ ctx->drv_handle = inst;
return 0;
err_deinit_inst:
@@ -812,8 +812,8 @@ err_deinit_inst:
return -EINVAL;
}
-static int vdec_vp9_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
- struct vdec_fb *fb, bool *res_chg)
+static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
{
int ret = 0;
struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
@@ -969,8 +969,8 @@ static void get_crop_info(struct vdec_vp9_inst *inst, struct v4l2_rect *cr)
cr->left, cr->top, cr->width, cr->height);
}
-static int vdec_vp9_get_param(unsigned long h_vdec,
- enum vdec_get_param_type type, void *out)
+static int vdec_vp9_get_param(void *h_vdec, enum vdec_get_param_type type,
+ void *out)
{
struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
int ret = 0;
@@ -1000,16 +1000,9 @@ static int vdec_vp9_get_param(unsigned long h_vdec,
return ret;
}
-static struct vdec_common_if vdec_vp9_if = {
+const struct vdec_common_if vdec_vp9_if = {
.init = vdec_vp9_init,
.decode = vdec_vp9_decode,
.get_param = vdec_vp9_get_param,
.deinit = vdec_vp9_deinit,
};
-
-struct vdec_common_if *get_vp9_dec_comm_if(void);
-
-struct vdec_common_if *get_vp9_dec_comm_if(void)
-{
- return &vdec_vp9_if;
-}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_base.h b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
index b6cb922fc400..ceb4db4cb3be 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
@@ -17,7 +17,7 @@ struct vdec_common_if {
* @ctx : [in] mtk v4l2 context
* @h_vdec : [out] driver handle
*/
- int (*init)(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec);
+ int (*init)(struct mtk_vcodec_ctx *ctx);
/**
* (*decode)() - trigger decode
@@ -26,7 +26,7 @@ struct vdec_common_if {
* @fb : [in] frame buffer to store decoded frame
* @res_chg : [out] resolution change happen
*/
- int (*decode)(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+ int (*decode)(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *fb, bool *res_chg);
/**
@@ -35,14 +35,14 @@ struct vdec_common_if {
* @type : [in] input parameter type
* @out : [out] buffer to store query result
*/
- int (*get_param)(unsigned long h_vdec, enum vdec_get_param_type type,
+ int (*get_param)(void *h_vdec, enum vdec_get_param_type type,
void *out);
/**
* (*deinit)() - deinitialize driver.
* @h_vdec : [in] driver handle to be deinit
*/
- void (*deinit)(unsigned long h_vdec);
+ void (*deinit)(void *h_vdec);
};
#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
index 5c98a76a77b7..2e43dd4486e0 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
@@ -15,23 +15,19 @@
#include "mtk_vcodec_dec_pm.h"
#include "mtk_vpu.h"
-const struct vdec_common_if *get_h264_dec_comm_if(void);
-const struct vdec_common_if *get_vp8_dec_comm_if(void);
-const struct vdec_common_if *get_vp9_dec_comm_if(void);
-
int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
{
int ret = 0;
switch (fourcc) {
case V4L2_PIX_FMT_H264:
- ctx->dec_if = get_h264_dec_comm_if();
+ ctx->dec_if = &vdec_h264_if;
break;
case V4L2_PIX_FMT_VP8:
- ctx->dec_if = get_vp8_dec_comm_if();
+ ctx->dec_if = &vdec_vp8_if;
break;
case V4L2_PIX_FMT_VP9:
- ctx->dec_if = get_vp9_dec_comm_if();
+ ctx->dec_if = &vdec_vp9_if;
break;
default:
return -EINVAL;
@@ -39,7 +35,7 @@ int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
mtk_vdec_lock(ctx);
mtk_vcodec_dec_clock_on(&ctx->dev->pm);
- ret = ctx->dec_if->init(ctx, &ctx->drv_handle);
+ ret = ctx->dec_if->init(ctx);
mtk_vcodec_dec_clock_off(&ctx->dev->pm);
mtk_vdec_unlock(ctx);
@@ -66,7 +62,7 @@ int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
}
}
- if (ctx->drv_handle == 0)
+ if (!ctx->drv_handle)
return -EIO;
mtk_vdec_lock(ctx);
@@ -89,7 +85,7 @@ int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
{
int ret = 0;
- if (ctx->drv_handle == 0)
+ if (!ctx->drv_handle)
return -EIO;
mtk_vdec_lock(ctx);
@@ -101,7 +97,7 @@ int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
void vdec_if_deinit(struct mtk_vcodec_ctx *ctx)
{
- if (ctx->drv_handle == 0)
+ if (!ctx->drv_handle)
return;
mtk_vdec_lock(ctx);
@@ -110,5 +106,5 @@ void vdec_if_deinit(struct mtk_vcodec_ctx *ctx)
mtk_vcodec_dec_clock_off(&ctx->dev->pm);
mtk_vdec_unlock(ctx);
- ctx->drv_handle = 0;
+ ctx->drv_handle = NULL;
}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
index 409623574145..270d8dc9984b 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
@@ -54,6 +54,10 @@ struct vdec_fb_node {
struct vdec_fb *fb;
};
+extern const struct vdec_common_if vdec_h264_if;
+extern const struct vdec_common_if vdec_vp8_if;
+extern const struct vdec_common_if vdec_vp9_if;
+
/**
* vdec_if_init() - initialize decode driver
* @ctx : [in] v4l2 context
diff --git a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
index b05dcdeb7734..47a1c1c0fd04 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
index 035ba917ed0e..3f38cc4509ef 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
index 6701778ea5d9..b76f717e4fd7 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PC Chen <pc.chen@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index 3125eaf2a326..b9624f8df0e9 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Jungchang Tsao <jungchang.tsao@mediatek.com>
@@ -458,7 +458,7 @@ static void h264_encode_filler(struct venc_h264_inst *inst, void *buf,
memset(p, 0xff, size);
}
-static int h264_enc_init(struct mtk_vcodec_ctx *ctx, unsigned long *handle)
+static int h264_enc_init(struct mtk_vcodec_ctx *ctx)
{
int ret = 0;
struct venc_h264_inst *inst;
@@ -484,12 +484,12 @@ static int h264_enc_init(struct mtk_vcodec_ctx *ctx, unsigned long *handle)
if (ret)
kfree(inst);
else
- (*handle) = (unsigned long)inst;
+ ctx->drv_handle = inst;
return ret;
}
-static int h264_enc_encode(unsigned long handle,
+static int h264_enc_encode(void *handle,
enum venc_start_opt opt,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
@@ -584,7 +584,7 @@ encode_err:
return ret;
}
-static int h264_enc_set_param(unsigned long handle,
+static int h264_enc_set_param(void *handle,
enum venc_set_param_type type,
struct venc_enc_param *enc_prm)
{
@@ -637,7 +637,7 @@ static int h264_enc_set_param(unsigned long handle,
return ret;
}
-static int h264_enc_deinit(unsigned long handle)
+static int h264_enc_deinit(void *handle)
{
int ret = 0;
struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
@@ -655,16 +655,9 @@ static int h264_enc_deinit(unsigned long handle)
return ret;
}
-static const struct venc_common_if venc_h264_if = {
+const struct venc_common_if venc_h264_if = {
.init = h264_enc_init,
.encode = h264_enc_encode,
.set_param = h264_enc_set_param,
.deinit = h264_enc_deinit,
};
-
-const struct venc_common_if *get_h264_enc_comm_if(void);
-
-const struct venc_common_if *get_h264_enc_comm_if(void)
-{
- return &venc_h264_if;
-}
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index ba19cdc4e4f1..8d36f0362efe 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
@@ -323,7 +323,7 @@ static int vp8_enc_encode_frame(struct venc_vp8_inst *inst,
return ret;
}
-static int vp8_enc_init(struct mtk_vcodec_ctx *ctx, unsigned long *handle)
+static int vp8_enc_init(struct mtk_vcodec_ctx *ctx)
{
int ret = 0;
struct venc_vp8_inst *inst;
@@ -349,12 +349,12 @@ static int vp8_enc_init(struct mtk_vcodec_ctx *ctx, unsigned long *handle)
if (ret)
kfree(inst);
else
- (*handle) = (unsigned long)inst;
+ ctx->drv_handle = inst;
return ret;
}
-static int vp8_enc_encode(unsigned long handle,
+static int vp8_enc_encode(void *handle,
enum venc_start_opt opt,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
@@ -391,7 +391,7 @@ encode_err:
return ret;
}
-static int vp8_enc_set_param(unsigned long handle,
+static int vp8_enc_set_param(void *handle,
enum venc_set_param_type type,
struct venc_enc_param *enc_prm)
{
@@ -442,7 +442,7 @@ static int vp8_enc_set_param(unsigned long handle,
return ret;
}
-static int vp8_enc_deinit(unsigned long handle)
+static int vp8_enc_deinit(void *handle)
{
int ret = 0;
struct venc_vp8_inst *inst = (struct venc_vp8_inst *)handle;
@@ -460,16 +460,9 @@ static int vp8_enc_deinit(unsigned long handle)
return ret;
}
-static const struct venc_common_if venc_vp8_if = {
+const struct venc_common_if venc_vp8_if = {
.init = vp8_enc_init,
.encode = vp8_enc_encode,
.set_param = vp8_enc_set_param,
.deinit = vp8_enc_deinit,
};
-
-const struct venc_common_if *get_vp8_enc_comm_if(void);
-
-const struct venc_common_if *get_vp8_enc_comm_if(void)
-{
- return &venc_vp8_if;
-}
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_base.h b/drivers/media/platform/mtk-vcodec/venc_drv_base.h
index 81620683b94f..3d718411dc73 100644
--- a/drivers/media/platform/mtk-vcodec/venc_drv_base.h
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_base.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
@@ -19,7 +19,7 @@ struct venc_common_if {
* @ctx: [in] mtk v4l2 context
* @handle: [out] driver handle
*/
- int (*init)(struct mtk_vcodec_ctx *ctx, unsigned long *handle);
+ int (*init)(struct mtk_vcodec_ctx *ctx);
/**
* (*encode)() - trigger encode
@@ -29,7 +29,7 @@ struct venc_common_if {
* @bs_buf: [in] bitstream buffer to store output bitstream
* @result: [out] encode result
*/
- int (*encode)(unsigned long handle, enum venc_start_opt opt,
+ int (*encode)(void *handle, enum venc_start_opt opt,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
struct venc_done_result *result);
@@ -40,14 +40,14 @@ struct venc_common_if {
* @type: [in] parameter type
* @in: [in] buffer to store the parameter
*/
- int (*set_param)(unsigned long handle, enum venc_set_param_type type,
+ int (*set_param)(void *handle, enum venc_set_param_type type,
struct venc_enc_param *in);
/**
* (*deinit)() - deinitialize driver.
* @handle: [in] driver handle
*/
- int (*deinit)(unsigned long handle);
+ int (*deinit)(void *handle);
};
#endif
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.c b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
index 608c08b2ab8f..c6bb82ac2dcd 100644
--- a/drivers/media/platform/mtk-vcodec/venc_drv_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
@@ -17,19 +17,16 @@
#include "mtk_vcodec_enc_pm.h"
#include "mtk_vpu.h"
-const struct venc_common_if *get_h264_enc_comm_if(void);
-const struct venc_common_if *get_vp8_enc_comm_if(void);
-
int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
{
int ret = 0;
switch (fourcc) {
case V4L2_PIX_FMT_VP8:
- ctx->enc_if = get_vp8_enc_comm_if();
+ ctx->enc_if = &venc_vp8_if;
break;
case V4L2_PIX_FMT_H264:
- ctx->enc_if = get_h264_enc_comm_if();
+ ctx->enc_if = &venc_h264_if;
break;
default:
return -EINVAL;
@@ -37,7 +34,7 @@ int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
mtk_venc_lock(ctx);
mtk_vcodec_enc_clock_on(&ctx->dev->pm);
- ret = ctx->enc_if->init(ctx, (unsigned long *)&ctx->drv_handle);
+ ret = ctx->enc_if->init(ctx);
mtk_vcodec_enc_clock_off(&ctx->dev->pm);
mtk_venc_unlock(ctx);
@@ -89,7 +86,7 @@ int venc_if_deinit(struct mtk_vcodec_ctx *ctx)
{
int ret = 0;
- if (ctx->drv_handle == 0)
+ if (!ctx->drv_handle)
return 0;
mtk_venc_lock(ctx);
@@ -98,7 +95,7 @@ int venc_if_deinit(struct mtk_vcodec_ctx *ctx)
mtk_vcodec_enc_clock_off(&ctx->dev->pm);
mtk_venc_unlock(ctx);
- ctx->drv_handle = 0;
+ ctx->drv_handle = NULL;
return ret;
}
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.h b/drivers/media/platform/mtk-vcodec/venc_drv_if.h
index bbba1cec7be4..52fc9cc812fc 100644
--- a/drivers/media/platform/mtk-vcodec/venc_drv_if.h
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_if.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
@@ -110,6 +110,9 @@ struct venc_done_result {
bool is_key_frm;
};
+extern const struct venc_common_if venc_h264_if;
+extern const struct venc_common_if venc_vp8_if;
+
/*
* venc_if_init - Create the driver handle
* @ctx: device context
diff --git a/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h b/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
index be34780760f4..28ee04ca6241 100644
--- a/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
+++ b/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Jungchang Tsao <jungchang.tsao@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
index 7daf8694c62e..3e931b0ed096 100644
--- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PoChun Lin <pochun.lin@mediatek.com>
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.h b/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
index a6b6d0eafb50..ba301a138a5a 100644
--- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: PoChun Lin <pochun.lin@mediatek.com>
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index da655d166d52..cc2ff40d060d 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -460,9 +460,9 @@ struct platform_device *vpu_get_plat_device(struct platform_device *pdev)
}
vpu_pdev = of_find_device_by_node(vpu_node);
+ of_node_put(vpu_node);
if (WARN_ON(!vpu_pdev)) {
dev_err(dev, "vpu pdev failed\n");
- of_node_put(vpu_node);
return NULL;
}
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index 08a606a5adff..1a99dff21ca0 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -14,6 +14,5 @@ config VIDEO_OMAP2_VOUT
select VIDEOBUF_DMA_CONTIG
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
select FRAME_VECTOR
- default n
help
V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 38849f0ba09d..83216fc7156b 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2003,6 +2003,8 @@ static int isp_remove(struct platform_device *pdev)
media_entity_enum_cleanup(&isp->crashed);
v4l2_async_notifier_cleanup(&isp->notifier);
+ kfree(isp);
+
return 0;
}
@@ -2193,7 +2195,7 @@ static int isp_probe(struct platform_device *pdev)
int ret;
int i, m;
- isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
+ isp = kzalloc(sizeof(*isp), GFP_KERNEL);
if (!isp) {
dev_err(&pdev->dev, "could not allocate memory\n");
return -ENOMEM;
@@ -2202,17 +2204,19 @@ static int isp_probe(struct platform_device *pdev)
ret = fwnode_property_read_u32(of_fwnode_handle(pdev->dev.of_node),
"ti,phy-type", &isp->phy_type);
if (ret)
- return ret;
+ goto error_release_isp;
isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"syscon");
- if (IS_ERR(isp->syscon))
- return PTR_ERR(isp->syscon);
+ if (IS_ERR(isp->syscon)) {
+ ret = PTR_ERR(isp->syscon);
+ goto error_release_isp;
+ }
ret = of_property_read_u32_index(pdev->dev.of_node,
"syscon", 1, &isp->syscon_offset);
if (ret)
- return ret;
+ goto error_release_isp;
isp->autoidle = autoidle;
@@ -2369,6 +2373,8 @@ error_isp:
error:
v4l2_async_notifier_cleanup(&isp->notifier);
mutex_destroy(&isp->isp_mutex);
+error_release_isp:
+ kfree(isp);
return ret;
}
@@ -2380,7 +2386,7 @@ static const struct dev_pm_ops omap3isp_pm_ops = {
.complete = isp_pm_complete,
};
-static struct platform_device_id omap3isp_id_table[] = {
+static const struct platform_device_id omap3isp_id_table[] = {
{ "omap3isp", 0 },
{ },
};
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
index e27c502ffa4a..e6c54c4bbfca 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -288,9 +288,10 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
{
struct ispstat *aewb = &isp->isp_aewb;
struct omap3isp_h3a_aewb_config *aewb_cfg;
- struct omap3isp_h3a_aewb_config *aewb_recover_cfg;
+ struct omap3isp_h3a_aewb_config *aewb_recover_cfg = NULL;
+ int ret;
- aewb_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_cfg), GFP_KERNEL);
+ aewb_cfg = kzalloc(sizeof(*aewb_cfg), GFP_KERNEL);
if (!aewb_cfg)
return -ENOMEM;
@@ -300,12 +301,12 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
aewb->isp = isp;
/* Set recover state configuration */
- aewb_recover_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_recover_cfg),
- GFP_KERNEL);
+ aewb_recover_cfg = kzalloc(sizeof(*aewb_recover_cfg), GFP_KERNEL);
if (!aewb_recover_cfg) {
dev_err(aewb->isp->dev,
"AEWB: cannot allocate memory for recover configuration.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
aewb_recover_cfg->saturation_limit = OMAP3ISP_AEWB_MAX_SATURATION_LIM;
@@ -322,13 +323,22 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
if (h3a_aewb_validate_params(aewb, aewb_recover_cfg)) {
dev_err(aewb->isp->dev,
"AEWB: recover configuration is invalid.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
aewb_recover_cfg->buf_size = h3a_aewb_get_buf_size(aewb_recover_cfg);
aewb->recover_priv = aewb_recover_cfg;
- return omap3isp_stat_init(aewb, "AEWB", &h3a_aewb_subdev_ops);
+ ret = omap3isp_stat_init(aewb, "AEWB", &h3a_aewb_subdev_ops);
+
+err:
+ if (ret) {
+ kfree(aewb_cfg);
+ kfree(aewb_recover_cfg);
+ }
+
+ return ret;
}
/*
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index 4f61776abc20..a65cfdfa9637 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -351,9 +351,10 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
{
struct ispstat *af = &isp->isp_af;
struct omap3isp_h3a_af_config *af_cfg;
- struct omap3isp_h3a_af_config *af_recover_cfg;
+ struct omap3isp_h3a_af_config *af_recover_cfg = NULL;
+ int ret;
- af_cfg = devm_kzalloc(isp->dev, sizeof(*af_cfg), GFP_KERNEL);
+ af_cfg = kzalloc(sizeof(*af_cfg), GFP_KERNEL);
if (af_cfg == NULL)
return -ENOMEM;
@@ -363,12 +364,12 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
af->isp = isp;
/* Set recover state configuration */
- af_recover_cfg = devm_kzalloc(isp->dev, sizeof(*af_recover_cfg),
- GFP_KERNEL);
+ af_recover_cfg = kzalloc(sizeof(*af_recover_cfg), GFP_KERNEL);
if (!af_recover_cfg) {
dev_err(af->isp->dev,
"AF: cannot allocate memory for recover configuration.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
af_recover_cfg->paxel.h_start = OMAP3ISP_AF_PAXEL_HZSTART_MIN;
@@ -380,13 +381,22 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
if (h3a_af_validate_params(af, af_recover_cfg)) {
dev_err(af->isp->dev,
"AF: recover configuration is invalid.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
af_recover_cfg->buf_size = h3a_af_get_buf_size(af_recover_cfg);
af->recover_priv = af_recover_cfg;
- return omap3isp_stat_init(af, "AF", &h3a_af_subdev_ops);
+ ret = omap3isp_stat_init(af, "AF", &h3a_af_subdev_ops);
+
+err:
+ if (ret) {
+ kfree(af_cfg);
+ kfree(af_recover_cfg);
+ }
+
+ return ret;
}
void omap3isp_h3a_af_cleanup(struct isp_device *isp)
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index e36571b355f6..0ef78aace6da 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -475,9 +475,9 @@ int omap3isp_hist_init(struct isp_device *isp)
{
struct ispstat *hist = &isp->isp_hist;
struct omap3isp_hist_config *hist_cfg;
- int ret = -1;
+ int ret;
- hist_cfg = devm_kzalloc(isp->dev, sizeof(*hist_cfg), GFP_KERNEL);
+ hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL);
if (hist_cfg == NULL)
return -ENOMEM;
@@ -499,7 +499,7 @@ int omap3isp_hist_init(struct isp_device *isp)
if (IS_ERR(hist->dma_ch)) {
ret = PTR_ERR(hist->dma_ch);
if (ret == -EPROBE_DEFER)
- return ret;
+ goto err;
hist->dma_ch = NULL;
dev_warn(isp->dev,
@@ -515,9 +515,12 @@ int omap3isp_hist_init(struct isp_device *isp)
hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
+
+err:
if (ret) {
- if (hist->dma_ch)
+ if (!IS_ERR_OR_NULL(hist->dma_ch))
dma_release_channel(hist->dma_ch);
+ kfree(hist_cfg);
}
return ret;
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index ca7bb8497c3d..62b2eacb96fd 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -1037,7 +1037,7 @@ static int isp_stat_init_entities(struct ispstat *stat, const char *name,
v4l2_subdev_init(subdev, sd_ops);
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
- subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
+ subdev->grp_id = BIT(16); /* group ID for isp subdevs */
subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
v4l2_set_subdevdata(subdev, stat);
@@ -1075,4 +1075,6 @@ void omap3isp_stat_cleanup(struct ispstat *stat)
mutex_destroy(&stat->ioctl_lock);
isp_stat_bufs_free(stat);
kfree(stat->buf);
+ kfree(stat->priv);
+ kfree(stat->recover_priv);
}
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 6bb4dd264b71..499a7284c5a8 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -1492,6 +1492,5 @@ int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
void omap3isp_video_unregister(struct isp_video *video)
{
- if (video_is_registered(&video->video))
- video_unregister_device(&video->video);
+ video_unregister_device(&video->video);
}
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 6addc5ea8494..1c9bfaabc54c 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -1388,7 +1388,7 @@ static int pxa_buffer_init(struct pxa_camera_dev *pcdev,
break;
default:
return -EINVAL;
- };
+ }
buf->nb_planes = nb_channels;
ret = sg_split(sgt->sgl, sgt->nents, 0, nb_channels,
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index 58aebe7114cd..1d50dfbbb762 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -703,7 +703,7 @@ static int video_s_input(struct file *file, void *fh, unsigned int input)
static const struct v4l2_ioctl_ops msm_vid_ioctl_ops = {
.vidioc_querycap = video_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = video_enum_fmt,
+ .vidioc_enum_fmt_vid_cap = video_enum_fmt,
.vidioc_g_fmt_vid_cap_mplane = video_g_fmt,
.vidioc_s_fmt_vid_cap_mplane = video_s_fmt,
.vidioc_try_fmt_vid_cap_mplane = video_try_fmt,
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index db8e40b55d72..0acc7576cc58 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -446,7 +446,7 @@ static const struct venus_resources msm8996_res = {
.reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset),
.clks = {"core", "iface", "bus", "mbus" },
.clks_num = 4,
- .max_load = 3110400, /* 4096x2160@90 */
+ .max_load = 2563200,
.hfi_version = HFI_VERSION_3XX,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
@@ -469,7 +469,7 @@ static const struct venus_resources sdm845_res = {
.freq_tbl_size = ARRAY_SIZE(sdm845_freq_table),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
- .max_load = 2563200,
+ .max_load = 3110400, /* 4096x2160@90 */
.hfi_version = HFI_VERSION_4XX,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 1eba23409ff3..d3d1748a7ef6 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -78,11 +78,11 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
ret = of_address_to_resource(node, 0, &r);
if (ret)
- return ret;
+ goto err_put_node;
ret = request_firmware(&mdt, fwname, dev);
if (ret < 0)
- return ret;
+ goto err_put_node;
fw_size = qcom_mdt_get_size(mdt);
if (fw_size < 0) {
@@ -116,6 +116,8 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
memunmap(mem_va);
err_release_fw:
release_firmware(mdt);
+err_put_node:
+ of_node_put(node);
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 7d0017613113..71b06dfc6dc4 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -458,6 +458,13 @@ static bool is_dynamic_bufmode(struct venus_inst *inst)
struct venus_core *core = inst->core;
struct venus_caps *caps;
+ /*
+ * v4 doesn't send BUFFER_ALLOC_MODE_SUPPORTED property and supports
+ * dynamic buffer mode by default for HFI_BUFFER_OUTPUT/OUTPUT2.
+ */
+ if (IS_V4(core))
+ return true;
+
caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
if (!caps)
return false;
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 8efd55a2ad70..4f645076abfb 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -1205,6 +1205,8 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
break;
}
case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
+ case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
+ case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE:
/* not implemented on Venus 4xx */
return -ENOTSUPP;
default:
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 6205ad8b3201..e1f998656c07 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -482,8 +482,8 @@ unlock:
static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
.vidioc_querycap = vdec_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = vdec_enum_fmt,
- .vidioc_enum_fmt_vid_out_mplane = vdec_enum_fmt,
+ .vidioc_enum_fmt_vid_cap = vdec_enum_fmt,
+ .vidioc_enum_fmt_vid_out = vdec_enum_fmt,
.vidioc_s_fmt_vid_cap_mplane = vdec_s_fmt,
.vidioc_s_fmt_vid_out_mplane = vdec_s_fmt,
.vidioc_g_fmt_vid_cap_mplane = vdec_g_fmt,
diff --git a/drivers/media/platform/qcom/venus/vdec_ctrls.c b/drivers/media/platform/qcom/venus/vdec_ctrls.c
index 68e0f7d0b8fc..300350bfe8bd 100644
--- a/drivers/media/platform/qcom/venus/vdec_ctrls.c
+++ b/drivers/media/platform/qcom/venus/vdec_ctrls.c
@@ -66,7 +66,7 @@ static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
return -EINVAL;
- };
+ }
return 0;
}
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 7a4815d52c12..a5f3d2c46bea 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -607,8 +607,8 @@ static int venc_enum_frameintervals(struct file *file, void *fh,
static const struct v4l2_ioctl_ops venc_ioctl_ops = {
.vidioc_querycap = venc_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = venc_enum_fmt,
- .vidioc_enum_fmt_vid_out_mplane = venc_enum_fmt,
+ .vidioc_enum_fmt_vid_cap = venc_enum_fmt,
+ .vidioc_enum_fmt_vid_out = venc_enum_fmt,
.vidioc_s_fmt_vid_cap_mplane = venc_s_fmt,
.vidioc_s_fmt_vid_out_mplane = venc_s_fmt,
.vidioc_g_fmt_vid_cap_mplane = venc_g_fmt,
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 8832285d8c15..877c0b3299e9 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -108,6 +108,9 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
ctr->profile.h264 = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ ctr->profile.hevc = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
ctr->profile.vpx = ctrl->val;
break;
@@ -117,6 +120,9 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
ctr->level.h264 = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ ctr->level.hevc = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
ctr->h264_i_qp = ctrl->val;
break;
@@ -208,7 +214,7 @@ int venc_ctrl_init(struct venus_inst *inst)
{
int ret;
- ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 28);
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 30);
if (ret)
return ret;
@@ -237,6 +243,19 @@ int venc_ctrl_init(struct venus_inst *inst)
0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ ~((1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
+ (1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10)),
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
+ 0, V4L2_MPEG_VIDEO_HEVC_LEVEL_1);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
@@ -265,7 +284,7 @@ int venc_ctrl_init(struct venus_inst *inst)
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
- V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES,
0, V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index 8f097e514900..c14af1b929df 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -1019,10 +1019,8 @@ static int rcsi2_probe_resources(struct rcar_csi2 *priv,
return ret;
priv->rstc = devm_reset_control_get(&pdev->dev, NULL);
- if (IS_ERR(priv->rstc))
- return PTR_ERR(priv->rstc);
- return 0;
+ return PTR_ERR_OR_ZERO(priv->rstc);
}
static const struct rcar_csi2_info rcar_csi2_info_r8a7795 = {
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 7cbdcbf9b090..0936bcd98df1 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -749,103 +749,65 @@ static const struct v4l2_ioctl_ops rvin_mc_ioctl_ops = {
* File Operations
*/
-static int rvin_power_on(struct rvin_dev *vin)
+static int rvin_power_parallel(struct rvin_dev *vin, bool on)
{
- int ret;
struct v4l2_subdev *sd = vin_to_source(vin);
-
- pm_runtime_get_sync(vin->v4l2_dev.dev);
-
- ret = v4l2_subdev_call(sd, core, s_power, 1);
- if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
- return ret;
- return 0;
-}
-
-static int rvin_power_off(struct rvin_dev *vin)
-{
+ int power = on ? 1 : 0;
int ret;
- struct v4l2_subdev *sd = vin_to_source(vin);
-
- ret = v4l2_subdev_call(sd, core, s_power, 0);
-
- pm_runtime_put(vin->v4l2_dev.dev);
+ ret = v4l2_subdev_call(sd, core, s_power, power);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
return ret;
return 0;
}
-static int rvin_initialize_device(struct file *file)
+static int rvin_open(struct file *file)
{
struct rvin_dev *vin = video_drvdata(file);
int ret;
- struct v4l2_format f = {
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .fmt.pix = {
- .width = vin->format.width,
- .height = vin->format.height,
- .field = vin->format.field,
- .colorspace = vin->format.colorspace,
- .pixelformat = vin->format.pixelformat,
- },
- };
-
- ret = rvin_power_on(vin);
+ ret = pm_runtime_get_sync(vin->dev);
if (ret < 0)
return ret;
- pm_runtime_enable(&vin->vdev.dev);
- ret = pm_runtime_resume(&vin->vdev.dev);
- if (ret < 0 && ret != -ENOSYS)
- goto eresume;
-
- /*
- * Try to configure with default parameters. Notice: this is the
- * very first open, so, we cannot race against other calls,
- * apart from someone else calling open() simultaneously, but
- * .host_lock is protecting us against it.
- */
- ret = rvin_s_fmt_vid_cap(file, NULL, &f);
- if (ret < 0)
- goto esfmt;
-
- v4l2_ctrl_handler_setup(&vin->ctrl_handler);
-
- return 0;
-esfmt:
- pm_runtime_disable(&vin->vdev.dev);
-eresume:
- rvin_power_off(vin);
-
- return ret;
-}
-
-static int rvin_open(struct file *file)
-{
- struct rvin_dev *vin = video_drvdata(file);
- int ret;
-
- mutex_lock(&vin->lock);
+ ret = mutex_lock_interruptible(&vin->lock);
+ if (ret)
+ goto err_pm;
file->private_data = vin;
ret = v4l2_fh_open(file);
if (ret)
- goto unlock;
-
- if (!v4l2_fh_is_singular_file(file))
- goto unlock;
+ goto err_unlock;
- if (rvin_initialize_device(file)) {
- v4l2_fh_release(file);
- ret = -ENODEV;
+ if (vin->info->use_mc) {
+ ret = v4l2_pipeline_pm_use(&vin->vdev.entity, 1);
+ if (ret < 0)
+ goto err_open;
+ } else {
+ if (v4l2_fh_is_singular_file(file)) {
+ ret = rvin_power_parallel(vin, true);
+ if (ret < 0)
+ goto err_open;
+
+ ret = v4l2_ctrl_handler_setup(&vin->ctrl_handler);
+ if (ret)
+ goto err_parallel;
+ }
}
+ mutex_unlock(&vin->lock);
-unlock:
+ return 0;
+err_parallel:
+ rvin_power_parallel(vin, false);
+err_open:
+ v4l2_fh_release(file);
+err_unlock:
mutex_unlock(&vin->lock);
+err_pm:
+ pm_runtime_put(vin->dev);
+
return ret;
}
@@ -863,18 +825,17 @@ static int rvin_release(struct file *file)
/* the release helper will cleanup any on-going streaming */
ret = _vb2_fop_release(file, NULL);
- /*
- * If this was the last open file.
- * Then de-initialize hw module.
- */
- if (fh_singular) {
- pm_runtime_suspend(&vin->vdev.dev);
- pm_runtime_disable(&vin->vdev.dev);
- rvin_power_off(vin);
+ if (vin->info->use_mc) {
+ v4l2_pipeline_pm_use(&vin->vdev.entity, 0);
+ } else {
+ if (fh_singular)
+ rvin_power_parallel(vin, false);
}
mutex_unlock(&vin->lock);
+ pm_runtime_put(vin->dev);
+
return ret;
}
@@ -888,74 +849,6 @@ static const struct v4l2_file_operations rvin_fops = {
.read = vb2_fop_read,
};
-/* -----------------------------------------------------------------------------
- * Media controller file operations
- */
-
-static int rvin_mc_open(struct file *file)
-{
- struct rvin_dev *vin = video_drvdata(file);
- int ret;
-
- ret = mutex_lock_interruptible(&vin->lock);
- if (ret)
- return ret;
-
- ret = pm_runtime_get_sync(vin->dev);
- if (ret < 0)
- goto err_unlock;
-
- ret = v4l2_pipeline_pm_use(&vin->vdev.entity, 1);
- if (ret < 0)
- goto err_pm;
-
- file->private_data = vin;
-
- ret = v4l2_fh_open(file);
- if (ret)
- goto err_v4l2pm;
-
- mutex_unlock(&vin->lock);
-
- return 0;
-err_v4l2pm:
- v4l2_pipeline_pm_use(&vin->vdev.entity, 0);
-err_pm:
- pm_runtime_put(vin->dev);
-err_unlock:
- mutex_unlock(&vin->lock);
-
- return ret;
-}
-
-static int rvin_mc_release(struct file *file)
-{
- struct rvin_dev *vin = video_drvdata(file);
- int ret;
-
- mutex_lock(&vin->lock);
-
- /* the release helper will cleanup any on-going streaming. */
- ret = _vb2_fop_release(file, NULL);
-
- v4l2_pipeline_pm_use(&vin->vdev.entity, 0);
- pm_runtime_put(vin->dev);
-
- mutex_unlock(&vin->lock);
-
- return ret;
-}
-
-static const struct v4l2_file_operations rvin_mc_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = video_ioctl2,
- .open = rvin_mc_open,
- .release = rvin_mc_release,
- .poll = vb2_fop_poll,
- .mmap = vb2_fop_mmap,
- .read = vb2_fop_read,
-};
-
void rvin_v4l2_unregister(struct rvin_dev *vin)
{
if (!video_is_registered(&vin->vdev))
@@ -996,6 +889,7 @@ int rvin_v4l2_register(struct rvin_dev *vin)
snprintf(vdev->name, sizeof(vdev->name), "VIN%u output", vin->id);
vdev->release = video_device_release_empty;
vdev->lock = &vin->lock;
+ vdev->fops = &rvin_fops;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE;
@@ -1007,10 +901,8 @@ int rvin_v4l2_register(struct rvin_dev *vin)
vin->format.colorspace = RVIN_DEFAULT_COLORSPACE;
if (vin->info->use_mc) {
- vdev->fops = &rvin_mc_fops;
vdev->ioctl_ops = &rvin_mc_ioctl_ops;
} else {
- vdev->fops = &rvin_fops;
vdev->ioctl_ops = &rvin_ioctl_ops;
rvin_reset_format(vin);
}
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 6a90bc4c476e..43aae9b6bb20 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -257,6 +257,8 @@ MODULE_PARM_DESC(debug, "activate debug info");
#define FD1_IP_H3_ES1 0x02010101
#define FD1_IP_M3W 0x02010202
#define FD1_IP_H3 0x02010203
+#define FD1_IP_M3N 0x02010204
+#define FD1_IP_E3 0x02010205
/* LUTs */
#define FD1_LUT_DIF_ADJ 0x1000
@@ -1730,8 +1732,8 @@ static const char * const fdp1_ctrl_deint_menu[] = {
static const struct v4l2_ioctl_ops fdp1_ioctl_ops = {
.vidioc_querycap = fdp1_vidioc_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = fdp1_enum_fmt_vid_cap,
- .vidioc_enum_fmt_vid_out_mplane = fdp1_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_cap = fdp1_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = fdp1_enum_fmt_vid_out,
.vidioc_g_fmt_vid_cap_mplane = fdp1_g_fmt,
.vidioc_g_fmt_vid_out_mplane = fdp1_g_fmt,
.vidioc_try_fmt_vid_cap_mplane = fdp1_try_fmt,
@@ -2365,6 +2367,12 @@ static int fdp1_probe(struct platform_device *pdev)
case FD1_IP_H3:
dprintk(fdp1, "FDP1 Version R-Car H3\n");
break;
+ case FD1_IP_M3N:
+ dprintk(fdp1, "FDP1 Version R-Car M3N\n");
+ break;
+ case FD1_IP_E3:
+ dprintk(fdp1, "FDP1 Version R-Car E3\n");
+ break;
default:
dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
hw_version);
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 1dfd2eb65920..1c3f507acfc9 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -671,8 +671,6 @@ static int jpu_querycap(struct file *file, void *priv,
strscpy(cap->driver, DRV_NAME, sizeof(cap->driver));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
dev_name(ctx->jpu->dev));
- cap->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
- cap->capabilities = V4L2_CAP_DEVICE_CAPS | cap->device_caps;
memset(cap->reserved, 0, sizeof(cap->reserved));
return 0;
@@ -948,8 +946,8 @@ static int jpu_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
static const struct v4l2_ioctl_ops jpu_ioctl_ops = {
.vidioc_querycap = jpu_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = jpu_enum_fmt_cap,
- .vidioc_enum_fmt_vid_out_mplane = jpu_enum_fmt_out,
+ .vidioc_enum_fmt_vid_cap = jpu_enum_fmt_cap,
+ .vidioc_enum_fmt_vid_out = jpu_enum_fmt_out,
.vidioc_g_fmt_vid_cap_mplane = jpu_g_fmt,
.vidioc_g_fmt_vid_out_mplane = jpu_g_fmt,
.vidioc_try_fmt_vid_cap_mplane = jpu_try_fmt,
@@ -1662,6 +1660,8 @@ static int jpu_probe(struct platform_device *pdev)
jpu->vfd_encoder.lock = &jpu->mutex;
jpu->vfd_encoder.v4l2_dev = &jpu->v4l2_dev;
jpu->vfd_encoder.vfl_dir = VFL_DIR_M2M;
+ jpu->vfd_encoder.device_caps = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_GRABBER, -1);
if (ret) {
@@ -1679,6 +1679,8 @@ static int jpu_probe(struct platform_device *pdev)
jpu->vfd_decoder.lock = &jpu->mutex;
jpu->vfd_decoder.v4l2_dev = &jpu->v4l2_dev;
jpu->vfd_decoder.vfl_dir = VFL_DIR_M2M;
+ jpu->vfd_decoder.device_caps = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
ret = video_register_device(&jpu->vfd_decoder, VFL_TYPE_GRABBER, -1);
if (ret) {
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index 150196f7cf96..57d0c0f9fa4b 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -1339,7 +1339,7 @@ static int ceu_enum_frameintervals(struct file *file, void *fh,
static const struct v4l2_ioctl_ops ceu_ioctl_ops = {
.vidioc_querycap = ceu_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = ceu_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = ceu_enum_fmt_vid_cap,
.vidioc_try_fmt_vid_cap_mplane = ceu_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap_mplane = ceu_s_fmt_vid_cap,
.vidioc_g_fmt_vid_cap_mplane = ceu_g_fmt_vid_cap,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 4e936b95018a..b776f83e395e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -523,7 +523,8 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
dev);
ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
dev);
- ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
+ if (FW_HAS_E_MIN_SCRATCH_BUF(dev))
+ ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
get_min_scratch_buf_size, dev);
if (ctx->img_width == 0 || ctx->img_height == 0)
ctx->state = MFCINST_ERROR;
@@ -1344,6 +1345,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
vfd->lock = &dev->mfc_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
set_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
dev->vfd_dec = vfd;
@@ -1362,6 +1364,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
vfd->lock = &dev->mfc_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
dev->vfd_enc = vfd;
video_set_drvdata(vfd, dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index d12fc4f397b6..4017c8b471f4 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -271,13 +271,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->card, dev->vfd_dec->name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
dev_name(&dev->plat_dev->dev));
- /*
- * This is only a mem-to-mem video device. The capture and output
- * device capability flags are left only for backward compatibility
- * and are scheduled for removal.
- */
- cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -309,14 +302,14 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
return 0;
}
-static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
- struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(file, f, false);
}
-static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(file, f, true);
}
@@ -883,8 +876,8 @@ static int vidioc_subscribe_event(struct v4l2_fh *fh,
/* v4l2_ioctl_ops */
static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
- .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
.vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
.vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 74090a68f807..97e76480e942 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -130,7 +130,7 @@ static struct mfc_control controls[] = {
.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
- .maximum = V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
+ .maximum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES,
.default_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
.menu_skip_mask = 0,
},
@@ -1313,13 +1313,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->card, dev->vfd_enc->name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
dev_name(&dev->plat_dev->dev));
- /*
- * This is only a mem-to-mem video device. The capture and output
- * device capability flags are left only for backward compatibility
- * and are scheduled for removal.
- */
- cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1350,14 +1343,14 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
return -EINVAL;
}
-static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
- struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(file, f, false);
}
-static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
- struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(file, f, true);
}
@@ -2339,8 +2332,8 @@ static int vidioc_subscribe_event(struct v4l2_fh *fh,
static const struct v4l2_ioctl_ops s5p_mfc_enc_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
- .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
.vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
.vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index ee727e21ef5b..f76a07400966 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -692,9 +692,9 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
/* multi-slice control */
/* multi-slice MB number or bit size */
mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL);
- if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
+ if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) {
mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB);
- } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
+ } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES) {
mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT);
} else {
mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_MB);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 8717b475d58d..f7621a9051cb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -733,10 +733,10 @@ static int s5p_mfc_set_slice_mode(struct s5p_mfc_ctx *ctx)
/* multi-slice control */
/* multi-slice MB number or bit size */
writel(ctx->slice_mode, mfc_regs->e_mslice_mode);
- if (ctx->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
+ if (ctx->slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) {
writel(ctx->slice_size.mb, mfc_regs->e_mslice_size_mb);
} else if (ctx->slice_mode ==
- V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES) {
writel(ctx->slice_size.bits, mfc_regs->e_mslice_size_bits);
} else {
writel(0x0, mfc_regs->e_mslice_size_mb);
@@ -776,11 +776,11 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
/* multi-slice MB number or bit size */
ctx->slice_mode = p->slice_mode;
reg = 0;
- if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
+ if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) {
reg |= (0x1 << 3);
writel(reg, mfc_regs->e_enc_options);
ctx->slice_size.mb = p->slice_mb;
- } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
+ } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES) {
reg |= (0x1 << 3);
writel(reg, mfc_regs->e_enc_options);
ctx->slice_size.bits = p->slice_bit;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 2e62f8721fa5..7d52431c2c83 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -34,6 +34,11 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
for (i = 0; i < pm->num_clocks; i++) {
pm->clocks[i] = devm_clk_get(pm->device, pm->clk_names[i]);
if (IS_ERR(pm->clocks[i])) {
+ /* additional clocks are optional */
+ if (i && PTR_ERR(pm->clocks[i]) == -ENOENT) {
+ pm->clocks[i] = NULL;
+ continue;
+ }
mfc_err("Failed to get clock: %s\n",
pm->clk_names[i]);
return PTR_ERR(pm->clocks[i]);
diff --git a/drivers/media/platform/seco-cec/seco-cec.c b/drivers/media/platform/seco-cec/seco-cec.c
index e5080d6f5b2d..1d0133f01e00 100644
--- a/drivers/media/platform/seco-cec/seco-cec.c
+++ b/drivers/media/platform/seco-cec/seco-cec.c
@@ -18,7 +18,7 @@
#include <linux/platform_device.h>
/* CEC Framework */
-#include <media/cec.h>
+#include <media/cec-notifier.h>
#include "seco-cec.h"
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
index 075d4695ee4d..a79250a7f812 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
@@ -143,7 +143,7 @@ int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
"%s: stv0367ter_attach failed for NIM card %s\n"
, __func__, dvb_card_str(tsin->dvb_card));
return -ENODEV;
- };
+ }
/*
* init the demod so that i2c gate_ctrl
@@ -203,7 +203,7 @@ int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
"%s: stv6110x_attach failed for NIM card %s\n"
, __func__, dvb_card_str(tsin->dvb_card));
return -ENODEV;
- };
+ }
stv090x_config.tuner_init = fe2->tuner_init;
stv090x_config.tuner_set_mode = fe2->tuner_set_mode;
diff --git a/drivers/media/platform/sti/hva/hva-v4l2.c b/drivers/media/platform/sti/hva/hva-v4l2.c
index c42623dccfd6..64004d15a9c9 100644
--- a/drivers/media/platform/sti/hva/hva-v4l2.c
+++ b/drivers/media/platform/sti/hva/hva-v4l2.c
@@ -566,6 +566,7 @@ static int hva_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
*/
struct vb2_queue *vq;
struct hva_stream *stream;
+ struct vb2_buffer *vb2_buf;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, buf->type);
@@ -575,7 +576,8 @@ static int hva_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return -EINVAL;
}
- stream = (struct hva_stream *)vq->bufs[buf->index];
+ vb2_buf = vb2_get_buffer(vq, buf->index);
+ stream = to_hva_stream(to_vb2_v4l2_buffer(vb2_buf));
stream->bytesused = buf->bytesused;
}
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index b9dad0accd1b..d855e9c09c08 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -1702,7 +1702,7 @@ static int dcmi_probe(struct platform_device *pdev)
if (irq <= 0) {
if (irq != -EPROBE_DEFER)
dev_err(&pdev->dev, "Could not get irq\n");
- return irq;
+ return irq ? irq : -ENXIO;
}
dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index 4c79eb64a7a7..6e0e894154f4 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -924,6 +924,7 @@ static int sun6i_csi_remove(struct platform_device *pdev)
static const struct of_device_id sun6i_csi_of_match[] = {
{ .compatible = "allwinner,sun6i-a31-csi", },
+ { .compatible = "allwinner,sun8i-a83t-csi", },
{ .compatible = "allwinner,sun8i-h3-csi", },
{ .compatible = "allwinner,sun8i-v3s-csi", },
{ .compatible = "allwinner,sun50i-a64-csi", },
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 4867d0ee803a..dda04498ac56 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -1492,8 +1492,6 @@ static int vpe_querycap(struct file *file, void *priv,
strscpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
VPE_MODULE_NAME);
- cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1970,12 +1968,12 @@ static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
.vidioc_querycap = vpe_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
+ .vidioc_enum_fmt_vid_cap = vpe_enum_fmt,
.vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt,
.vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt,
.vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt,
- .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
+ .vidioc_enum_fmt_vid_out = vpe_enum_fmt,
.vidioc_g_fmt_vid_out_mplane = vpe_g_fmt,
.vidioc_try_fmt_vid_out_mplane = vpe_try_fmt,
.vidioc_s_fmt_vid_out_mplane = vpe_s_fmt,
@@ -2408,6 +2406,7 @@ static const struct video_device vpe_videodev = {
.minor = -1,
.release = video_device_release_empty,
.vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
};
static const struct v4l2_m2m_ops m2m_ops = {
diff --git a/drivers/media/platform/vicodec/Kconfig b/drivers/media/platform/vicodec/Kconfig
index 36bb0e934252..89456665cb16 100644
--- a/drivers/media/platform/vicodec/Kconfig
+++ b/drivers/media/platform/vicodec/Kconfig
@@ -4,7 +4,6 @@ config VIDEO_VICODEC
depends on VIDEO_DEV && VIDEO_V4L2
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
- default n
help
Driver for a Virtual Codec
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index bd01a9206aa6..7e7c1e80f29f 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -84,6 +84,7 @@ struct vicodec_q_data {
unsigned int visible_width;
unsigned int visible_height;
unsigned int sizeimage;
+ unsigned int vb2_sizeimage;
unsigned int sequence;
const struct v4l2_fwht_pixfmt_info *info;
};
@@ -116,12 +117,14 @@ struct vicodec_ctx {
struct vicodec_dev *dev;
bool is_enc;
bool is_stateless;
+ bool is_draining;
+ bool next_is_last;
+ bool has_stopped;
spinlock_t *lock;
struct v4l2_ctrl_handler hdl;
struct vb2_v4l2_buffer *last_src_buf;
- struct vb2_v4l2_buffer *last_dst_buf;
/* Source and destination queue data */
struct vicodec_q_data q_data[2];
@@ -138,6 +141,10 @@ struct vicodec_ctx {
bool source_changed;
};
+static const struct v4l2_event vicodec_eos_event = {
+ .type = V4L2_EVENT_EOS
+};
+
static inline struct vicodec_ctx *file2ctx(struct file *file)
{
return container_of(file->private_data, struct vicodec_ctx, fh);
@@ -329,6 +336,10 @@ static int device_process(struct vicodec_ctx *ctx,
copy_cap_to_ref(p_dst, ctx->state.info, &ctx->state);
vb2_set_plane_payload(&dst_vb->vb2_buf, 0, q_dst->sizeimage);
+ if (ntohl(ctx->state.header.flags) & FWHT_FL_I_FRAME)
+ dst_vb->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ else
+ dst_vb->flags |= V4L2_BUF_FLAG_PFRAME;
}
return ret;
}
@@ -397,9 +408,6 @@ static enum vb2_buffer_state get_next_header(struct vicodec_ctx *ctx,
/* device_run() - prepares and starts the device */
static void device_run(void *priv)
{
- static const struct v4l2_event eos_event = {
- .type = V4L2_EVENT_EOS
- };
struct vicodec_ctx *ctx = priv;
struct vicodec_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
@@ -407,7 +415,6 @@ static void device_run(void *priv)
u32 state;
struct media_request *src_req;
-
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
src_req = src_buf->vb2_buf.req_obj.req;
@@ -421,14 +428,14 @@ static void device_run(void *priv)
else
dst_buf->sequence = q_dst->sequence++;
dst_buf->flags &= ~V4L2_BUF_FLAG_LAST;
- v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, !ctx->is_enc);
-
- ctx->last_dst_buf = dst_buf;
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
spin_lock(ctx->lock);
if (!ctx->comp_has_next_frame && src_buf == ctx->last_src_buf) {
dst_buf->flags |= V4L2_BUF_FLAG_LAST;
- v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
+ ctx->is_draining = false;
+ ctx->has_stopped = true;
}
if (ctx->is_enc || ctx->is_stateless) {
src_buf->sequence = q_src->sequence++;
@@ -442,14 +449,14 @@ static void device_run(void *priv)
ctx->comp_has_next_frame = false;
}
v4l2_m2m_buf_done(dst_buf, state);
- if (ctx->is_stateless && src_req)
- v4l2_ctrl_request_complete(src_req, &ctx->hdl);
ctx->comp_size = 0;
ctx->header_size = 0;
ctx->comp_magic_cnt = 0;
ctx->comp_has_frame = false;
spin_unlock(ctx->lock);
+ if (ctx->is_stateless && src_req)
+ v4l2_ctrl_request_complete(src_req, &ctx->hdl);
if (ctx->is_enc)
v4l2_m2m_job_finish(dev->stateful_enc.m2m_dev, ctx->fh.m2m_ctx);
@@ -579,6 +586,8 @@ static int job_ready(void *priv)
unsigned int max_to_copy;
unsigned int comp_frame_size;
+ if (ctx->has_stopped)
+ return 0;
if (ctx->source_changed)
return 0;
if (ctx->is_stateless || ctx->is_enc || ctx->comp_has_frame)
@@ -598,6 +607,8 @@ restart:
if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
state = get_next_header(ctx, &p, p_src + sz - p);
if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) {
+ if (ctx->is_draining && src_buf == ctx->last_src_buf)
+ return 1;
job_remove_src_buf(ctx, state);
goto restart;
}
@@ -625,6 +636,8 @@ restart:
p += copy;
ctx->comp_size += copy;
if (ctx->comp_size < max_to_copy) {
+ if (ctx->is_draining && src_buf == ctx->last_src_buf)
+ return 1;
job_remove_src_buf(ctx, state);
goto restart;
}
@@ -666,7 +679,6 @@ restart:
v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
update_capture_data_from_header(ctx);
- ctx->first_source_change_sent = true;
v4l2_event_queue_fh(&ctx->fh, &rs_event);
set_last_buffer(dst_buf, src_buf, ctx);
ctx->source_changed = true;
@@ -713,7 +725,8 @@ static int enum_fmt(struct v4l2_fmtdesc *f, struct vicodec_ctx *ctx,
const struct v4l2_fwht_pixfmt_info *info =
get_q_data(ctx, f->type)->info;
- if (!info || ctx->is_enc)
+ if (ctx->is_enc ||
+ !vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q))
info = v4l2_fwht_get_pixfmt(f->index);
else
info = v4l2_fwht_find_nth_fmt(info->width_div,
@@ -764,9 +777,6 @@ static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
q_data = get_q_data(ctx, f->type);
info = q_data->info;
- if (!info)
- info = v4l2_fwht_get_pixfmt(0);
-
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -1032,16 +1042,10 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f)
default:
return -EINVAL;
}
- if (q_data->visible_width > q_data->coded_width)
- q_data->visible_width = q_data->coded_width;
- if (q_data->visible_height > q_data->coded_height)
- q_data->visible_height = q_data->coded_height;
-
dprintk(ctx->dev,
- "Setting format for type %d, coded wxh: %dx%d, visible wxh: %dx%d, fourcc: %08x\n",
+ "Setting format for type %d, coded wxh: %dx%d, fourcc: 0x%08x\n",
f->type, q_data->coded_width, q_data->coded_height,
- q_data->visible_width, q_data->visible_height,
q_data->info->id);
return 0;
@@ -1063,18 +1067,58 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vicodec_ctx *ctx = file2ctx(file);
- struct v4l2_pix_format_mplane *pix_mp;
+ struct vicodec_q_data *q_data;
+ struct vicodec_q_data *q_data_cap;
struct v4l2_pix_format *pix;
+ struct v4l2_pix_format_mplane *pix_mp;
+ u32 coded_w = 0, coded_h = 0;
+ unsigned int size = 0;
int ret;
+ q_data = get_q_data(ctx, f->type);
+ q_data_cap = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
ret = vidioc_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
+ if (ctx->is_enc) {
+ struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ struct vb2_queue *vq_cap = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ const struct v4l2_fwht_pixfmt_info *info = ctx->is_stateless ?
+ &pixfmt_stateless_fwht : &pixfmt_fwht;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ coded_w = f->fmt.pix.width;
+ coded_h = f->fmt.pix.height;
+ } else {
+ coded_w = f->fmt.pix_mp.width;
+ coded_h = f->fmt.pix_mp.height;
+ }
+ if (vb2_is_busy(vq) && (coded_w != q_data->coded_width ||
+ coded_h != q_data->coded_height))
+ return -EBUSY;
+ size = coded_w * coded_h *
+ info->sizeimage_mult / info->sizeimage_div;
+ if (!ctx->is_stateless)
+ size += sizeof(struct fwht_cframe_hdr);
+
+ if (vb2_is_busy(vq_cap) && size > q_data_cap->sizeimage)
+ return -EBUSY;
+ }
+
ret = vidioc_s_fmt(file2ctx(file), f);
if (!ret) {
+ if (ctx->is_enc) {
+ q_data->visible_width = coded_w;
+ q_data->visible_height = coded_h;
+ q_data_cap->coded_width = coded_w;
+ q_data_cap->coded_height = coded_h;
+ q_data_cap->sizeimage = size;
+ }
+
switch (f->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
pix = &f->fmt.pix;
ctx->state.colorspace = pix->colorspace;
@@ -1082,7 +1126,6 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
ctx->state.ycbcr_enc = pix->ycbcr_enc;
ctx->state.quantization = pix->quantization;
break;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
pix_mp = &f->fmt.pix_mp;
ctx->state.colorspace = pix_mp->colorspace;
@@ -1173,31 +1216,39 @@ static int vidioc_s_selection(struct file *file, void *priv,
return 0;
}
-static void vicodec_mark_last_buf(struct vicodec_ctx *ctx)
+static int vicodec_mark_last_buf(struct vicodec_ctx *ctx)
{
- static const struct v4l2_event eos_event = {
- .type = V4L2_EVENT_EOS
- };
+ struct vb2_v4l2_buffer *next_dst_buf;
+ int ret = 0;
spin_lock(ctx->lock);
- ctx->last_src_buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx);
- if (!ctx->last_src_buf && ctx->last_dst_buf) {
- ctx->last_dst_buf->flags |= V4L2_BUF_FLAG_LAST;
- v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ if (ctx->is_draining) {
+ ret = -EBUSY;
+ goto unlock;
}
- spin_unlock(ctx->lock);
-}
+ if (ctx->has_stopped)
+ goto unlock;
-static int vicodec_try_encoder_cmd(struct file *file, void *fh,
- struct v4l2_encoder_cmd *ec)
-{
- if (ec->cmd != V4L2_ENC_CMD_STOP)
- return -EINVAL;
+ ctx->last_src_buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx);
+ ctx->is_draining = true;
+ if (ctx->last_src_buf)
+ goto unlock;
+
+ next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!next_dst_buf) {
+ ctx->next_is_last = true;
+ goto unlock;
+ }
- if (ec->flags & V4L2_ENC_CMD_STOP_AT_GOP_END)
- return -EINVAL;
+ next_dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ vb2_buffer_done(&next_dst_buf->vb2_buf, VB2_BUF_STATE_DONE);
+ ctx->is_draining = false;
+ ctx->has_stopped = true;
+ v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
- return 0;
+unlock:
+ spin_unlock(ctx->lock);
+ return ret;
}
static int vicodec_encoder_cmd(struct file *file, void *fh,
@@ -1206,27 +1257,26 @@ static int vicodec_encoder_cmd(struct file *file, void *fh,
struct vicodec_ctx *ctx = file2ctx(file);
int ret;
- ret = vicodec_try_encoder_cmd(file, fh, ec);
+ ret = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, ec);
if (ret < 0)
return ret;
- vicodec_mark_last_buf(ctx);
- return 0;
-}
-
-static int vicodec_try_decoder_cmd(struct file *file, void *fh,
- struct v4l2_decoder_cmd *dc)
-{
- if (dc->cmd != V4L2_DEC_CMD_STOP)
- return -EINVAL;
-
- if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
- return -EINVAL;
-
- if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
- return -EINVAL;
+ if (!vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q) ||
+ !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
+ return 0;
- return 0;
+ if (ec->cmd == V4L2_ENC_CMD_STOP)
+ return vicodec_mark_last_buf(ctx);
+ ret = 0;
+ spin_lock(ctx->lock);
+ if (ctx->is_draining) {
+ ret = -EBUSY;
+ } else if (ctx->has_stopped) {
+ ctx->has_stopped = false;
+ vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q);
+ }
+ spin_unlock(ctx->lock);
+ return ret;
}
static int vicodec_decoder_cmd(struct file *file, void *fh,
@@ -1235,12 +1285,26 @@ static int vicodec_decoder_cmd(struct file *file, void *fh,
struct vicodec_ctx *ctx = file2ctx(file);
int ret;
- ret = vicodec_try_decoder_cmd(file, fh, dc);
+ ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, dc);
if (ret < 0)
return ret;
- vicodec_mark_last_buf(ctx);
- return 0;
+ if (!vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q) ||
+ !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
+ return 0;
+
+ if (dc->cmd == V4L2_DEC_CMD_STOP)
+ return vicodec_mark_last_buf(ctx);
+ ret = 0;
+ spin_lock(ctx->lock);
+ if (ctx->is_draining) {
+ ret = -EBUSY;
+ } else if (ctx->has_stopped) {
+ ctx->has_stopped = false;
+ vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q);
+ }
+ spin_unlock(ctx->lock);
+ return ret;
}
static int vicodec_enum_framesizes(struct file *file, void *fh,
@@ -1283,6 +1347,8 @@ static int vicodec_subscribe_event(struct v4l2_fh *fh,
return -EINVAL;
/* fall through */
case V4L2_EVENT_EOS:
+ if (ctx->is_stateless)
+ return -EINVAL;
return v4l2_event_subscribe(fh, sub, 0, NULL);
default:
return v4l2_ctrl_subscribe_event(fh, sub);
@@ -1297,7 +1363,6 @@ static const struct v4l2_ioctl_ops vicodec_ioctl_ops = {
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap,
@@ -1307,7 +1372,6 @@ static const struct v4l2_ioctl_ops vicodec_ioctl_ops = {
.vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
- .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out,
.vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out,
.vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out,
@@ -1326,9 +1390,9 @@ static const struct v4l2_ioctl_ops vicodec_ioctl_ops = {
.vidioc_g_selection = vidioc_g_selection,
.vidioc_s_selection = vidioc_s_selection,
- .vidioc_try_encoder_cmd = vicodec_try_encoder_cmd,
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
.vidioc_encoder_cmd = vicodec_encoder_cmd,
- .vidioc_try_decoder_cmd = vicodec_try_decoder_cmd,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
.vidioc_decoder_cmd = vicodec_decoder_cmd,
.vidioc_enum_framesizes = vicodec_enum_framesizes,
@@ -1354,6 +1418,7 @@ static int vicodec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
*nplanes = 1;
sizes[0] = size;
+ q_data->vb2_sizeimage = size;
return 0;
}
@@ -1384,11 +1449,11 @@ static int vicodec_buf_prepare(struct vb2_buffer *vb)
}
}
- if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ if (vb2_plane_size(vb, 0) < q_data->vb2_sizeimage) {
dprintk(ctx->dev,
"%s data will not fit into plane (%lu < %lu)\n",
__func__, vb2_plane_size(vb, 0),
- (long)q_data->sizeimage);
+ (long)q_data->vb2_sizeimage);
return -EINVAL;
}
@@ -1412,6 +1477,25 @@ static void vicodec_buf_queue(struct vb2_buffer *vb)
.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
};
+ if (vb2_is_streaming(vq_cap)) {
+ if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type) &&
+ ctx->next_is_last) {
+ unsigned int i;
+
+ for (i = 0; i < vb->num_planes; i++)
+ vb->planes[i].bytesused = 0;
+ vbuf->flags = V4L2_BUF_FLAG_LAST;
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->sequence = get_q_data(ctx, vb->vb2_queue->type)->sequence++;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ ctx->is_draining = false;
+ ctx->has_stopped = true;
+ ctx->next_is_last = false;
+ v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
+ return;
+ }
+ }
+
/* buf_queue handles only the first source change event */
if (ctx->first_source_change_sent) {
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
@@ -1519,16 +1603,11 @@ static int vicodec_start_streaming(struct vb2_queue *q,
unsigned int total_planes_size;
u8 *new_comp_frame = NULL;
- if (!info)
- return -EINVAL;
-
chroma_div = info->width_div * info->height_div;
q_data->sequence = 0;
if (V4L2_TYPE_IS_OUTPUT(q->type))
ctx->last_src_buf = NULL;
- else
- ctx->last_dst_buf = NULL;
state->gop_cnt = 0;
@@ -1604,6 +1683,32 @@ static void vicodec_stop_streaming(struct vb2_queue *q)
vicodec_return_bufs(q, VB2_BUF_STATE_ERROR);
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (ctx->is_draining) {
+ struct vb2_v4l2_buffer *next_dst_buf;
+
+ spin_lock(ctx->lock);
+ ctx->last_src_buf = NULL;
+ next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!next_dst_buf) {
+ ctx->next_is_last = true;
+ } else {
+ next_dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ vb2_buffer_done(&next_dst_buf->vb2_buf, VB2_BUF_STATE_DONE);
+ ctx->is_draining = false;
+ ctx->has_stopped = true;
+ v4l2_event_queue_fh(&ctx->fh, &vicodec_eos_event);
+ }
+ spin_unlock(ctx->lock);
+ }
+ } else {
+ ctx->is_draining = false;
+ ctx->has_stopped = false;
+ ctx->next_is_last = false;
+ }
+ if (!ctx->is_enc && V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->first_source_change_sent = false;
+
if ((!V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) ||
(V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) {
if (!ctx->is_stateless)
@@ -1771,11 +1876,13 @@ static const struct v4l2_ctrl_config vicodec_ctrl_stateless_state = {
*/
static int vicodec_open(struct file *file)
{
+ const struct v4l2_fwht_pixfmt_info *info = v4l2_fwht_get_pixfmt(0);
struct video_device *vfd = video_devdata(file);
struct vicodec_dev *dev = video_drvdata(file);
struct vicodec_ctx *ctx = NULL;
struct v4l2_ctrl_handler *hdl;
- unsigned int size;
+ unsigned int raw_size;
+ unsigned int comp_size;
int rc = 0;
if (mutex_lock_interruptible(vfd->lock))
@@ -1795,13 +1902,16 @@ static int vicodec_open(struct file *file)
file->private_data = &ctx->fh;
ctx->dev = dev;
hdl = &ctx->hdl;
- v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_handler_init(hdl, 5);
v4l2_ctrl_new_std(hdl, &vicodec_ctrl_ops, V4L2_CID_MPEG_VIDEO_GOP_SIZE,
1, 16, 1, 10);
v4l2_ctrl_new_std(hdl, &vicodec_ctrl_ops, V4L2_CID_FWHT_I_FRAME_QP,
1, 31, 1, 20);
v4l2_ctrl_new_std(hdl, &vicodec_ctrl_ops, V4L2_CID_FWHT_P_FRAME_QP,
1, 31, 1, 20);
+ if (ctx->is_enc)
+ v4l2_ctrl_new_std(hdl, &vicodec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 1, 1, 1, 1);
if (ctx->is_stateless)
v4l2_ctrl_new_custom(hdl, &vicodec_ctrl_stateless_state, NULL);
if (hdl->error) {
@@ -1814,7 +1924,7 @@ static int vicodec_open(struct file *file)
v4l2_ctrl_handler_setup(hdl);
if (ctx->is_enc)
- ctx->q_data[V4L2_M2M_SRC].info = v4l2_fwht_get_pixfmt(0);
+ ctx->q_data[V4L2_M2M_SRC].info = info;
else if (ctx->is_stateless)
ctx->q_data[V4L2_M2M_SRC].info = &pixfmt_stateless_fwht;
else
@@ -1823,22 +1933,24 @@ static int vicodec_open(struct file *file)
ctx->q_data[V4L2_M2M_SRC].coded_height = 720;
ctx->q_data[V4L2_M2M_SRC].visible_width = 1280;
ctx->q_data[V4L2_M2M_SRC].visible_height = 720;
- size = 1280 * 720 * ctx->q_data[V4L2_M2M_SRC].info->sizeimage_mult /
- ctx->q_data[V4L2_M2M_SRC].info->sizeimage_div;
- if (ctx->is_enc || ctx->is_stateless)
- ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
+ raw_size = 1280 * 720 * info->sizeimage_mult / info->sizeimage_div;
+ comp_size = 1280 * 720 * pixfmt_fwht.sizeimage_mult /
+ pixfmt_fwht.sizeimage_div;
+ if (ctx->is_enc)
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = raw_size;
+ else if (ctx->is_stateless)
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = comp_size;
else
ctx->q_data[V4L2_M2M_SRC].sizeimage =
- size + sizeof(struct fwht_cframe_hdr);
+ comp_size + sizeof(struct fwht_cframe_hdr);
+ ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
if (ctx->is_enc) {
- ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
ctx->q_data[V4L2_M2M_DST].info = &pixfmt_fwht;
- ctx->q_data[V4L2_M2M_DST].sizeimage = 1280 * 720 *
- ctx->q_data[V4L2_M2M_DST].info->sizeimage_mult /
- ctx->q_data[V4L2_M2M_DST].info->sizeimage_div +
- sizeof(struct fwht_cframe_hdr);
+ ctx->q_data[V4L2_M2M_DST].sizeimage =
+ comp_size + sizeof(struct fwht_cframe_hdr);
} else {
- ctx->q_data[V4L2_M2M_DST].info = NULL;
+ ctx->q_data[V4L2_M2M_DST].info = info;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = raw_size;
}
ctx->state.colorspace = V4L2_COLORSPACE_REC709;
@@ -2013,18 +2125,31 @@ static int register_instance(struct vicodec_dev *dev,
return 0;
}
+static void vicodec_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+{
+ struct vicodec_dev *dev = container_of(v4l2_dev, struct vicodec_dev, v4l2_dev);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+ v4l2_m2m_release(dev->stateful_enc.m2m_dev);
+ v4l2_m2m_release(dev->stateful_dec.m2m_dev);
+ v4l2_m2m_release(dev->stateless_dec.m2m_dev);
+ kfree(dev);
+}
+
static int vicodec_probe(struct platform_device *pdev)
{
struct vicodec_dev *dev;
int ret;
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
- return ret;
+ goto free_dev;
+
+ dev->v4l2_dev.release = vicodec_v4l2_dev_release;
#ifdef CONFIG_MEDIA_CONTROLLER
dev->mdev.dev = &pdev->dev;
@@ -2102,6 +2227,8 @@ unreg_sf_enc:
v4l2_m2m_release(dev->stateful_enc.m2m_dev);
unreg_dev:
v4l2_device_unregister(&dev->v4l2_dev);
+free_dev:
+ kfree(dev);
return ret;
}
@@ -2120,12 +2247,10 @@ static int vicodec_remove(struct platform_device *pdev)
media_device_cleanup(&dev->mdev);
#endif
- v4l2_m2m_release(dev->stateful_enc.m2m_dev);
- v4l2_m2m_release(dev->stateful_dec.m2m_dev);
video_unregister_device(&dev->stateful_enc.vfd);
video_unregister_device(&dev->stateful_dec.vfd);
video_unregister_device(&dev->stateless_dec.vfd);
- v4l2_device_unregister(&dev->v4l2_dev);
+ v4l2_device_put(&dev->v4l2_dev);
return 0;
}
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 243c82b5d537..acd3bd48c7e2 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1359,7 +1359,7 @@ static int vim2m_probe(struct platform_device *pdev)
MEDIA_ENT_F_PROC_VIDEO_SCALER);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
- goto error_m2m;
+ goto error_dev;
}
ret = media_device_register(&dev->mdev);
@@ -1373,11 +1373,11 @@ static int vim2m_probe(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
error_m2m_mc:
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
-error_m2m:
- v4l2_m2m_release(dev->m2m_dev);
#endif
error_dev:
video_unregister_device(&dev->vfd);
+ /* vim2m_device_release called by video_unregister_device to release various objects */
+ return ret;
error_v4l2:
v4l2_device_unregister(&dev->v4l2_dev);
error_free:
diff --git a/drivers/media/platform/vimc/Kconfig b/drivers/media/platform/vimc/Kconfig
index beba6acce593..bd221d3e1a4a 100644
--- a/drivers/media/platform/vimc/Kconfig
+++ b/drivers/media/platform/vimc/Kconfig
@@ -4,7 +4,6 @@ config VIDEO_VIMC
depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_VMALLOC
select VIDEO_V4L2_TPG
- default n
help
Skeleton driver for Virtual Media Controller
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
index c4fc8e7d365a..96d06f030c31 100644
--- a/drivers/media/platform/vimc/Makefile
+++ b/drivers/media/platform/vimc/Makefile
@@ -1,11 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-vimc-objs := vimc-core.o
-vimc_capture-objs := vimc-capture.o
-vimc_common-objs := vimc-common.o
-vimc_debayer-objs := vimc-debayer.o
-vimc_scaler-objs := vimc-scaler.o
-vimc_sensor-objs := vimc-sensor.o
-vimc_streamer-objs := vimc-streamer.o
+vimc-y := vimc-core.o vimc-common.o vimc-streamer.o
-obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \
- vimc_scaler.o vimc_sensor.o vimc_streamer.o
+obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc-capture.o vimc-debayer.o \
+ vimc-scaler.o vimc-sensor.o
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 946dc0908566..664855708fdf 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -142,12 +142,15 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vimc_cap_device *vcap = video_drvdata(file);
+ int ret;
/* Do not change the format while stream is on */
if (vb2_is_busy(&vcap->queue))
return -EBUSY;
- vimc_cap_try_fmt_vid_cap(file, priv, f);
+ ret = vimc_cap_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
dev_dbg(vcap->dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
index f4d2073076ed..03016f204d05 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -377,7 +377,3 @@ void vimc_ent_sd_unregister(struct vimc_ent_device *ved, struct v4l2_subdev *sd)
v4l2_device_unregister_subdev(sd);
}
EXPORT_SYMBOL_GPL(vimc_ent_sd_unregister);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Common");
-MODULE_AUTHOR("Helen Koike <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index 03707bdcbfa8..571c55aa0e16 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -234,10 +234,7 @@ static void vimc_comp_unbind(struct device *master)
static int vimc_comp_compare(struct device *comp, void *data)
{
- const struct platform_device *pdev = to_platform_device(comp);
- const char *name = data;
-
- return !strcmp(pdev->dev.platform_data, name);
+ return comp == data;
}
static struct component_match *vimc_add_subdevs(struct vimc_device *vimc)
@@ -267,7 +264,7 @@ static struct component_match *vimc_add_subdevs(struct vimc_device *vimc)
}
component_match_add(&vimc->pdev.dev, &match, vimc_comp_compare,
- (void *)vimc->pipe_cfg->ents[i].name);
+ &vimc->subdevs[i]->dev);
}
return match;
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index 3ae8c12f5fa3..00598fbf3cba 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -16,14 +16,16 @@
#include "vimc-common.h"
#define VIMC_DEB_DRV_NAME "vimc-debayer"
-/* This module only supports tranforming a bayer format to V4L2_PIX_FMT_RGB24 */
+/* This module only supports transforming a bayer format
+ * to V4L2_PIX_FMT_RGB24
+ */
#define VIMC_DEB_SRC_PIXFMT V4L2_PIX_FMT_RGB24
#define VIMC_DEB_SRC_MBUS_FMT_DEFAULT MEDIA_BUS_FMT_RGB888_1X24
static unsigned int deb_mean_win_size = 3;
module_param(deb_mean_win_size, uint, 0000);
MODULE_PARM_DESC(deb_mean_win_size, " the window size to calculate the mean.\n"
- "NOTE: the window size need to be an odd number, as the main pixel "
+ "NOTE: the window size needs to be an odd number, as the main pixel "
"stays in the center of the window, otherwise the next odd number "
"is considered");
@@ -260,7 +262,7 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
- if (vdeb->src_frame)
+ if (vdeb->ved.stream)
return -EBUSY;
sink_fmt = &vdeb->sink_fmt;
@@ -327,9 +329,6 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
const struct v4l2_format_info *pix_info;
unsigned int frame_size;
- if (vdeb->src_frame)
- return 0;
-
/* We only support translating bayer to RGB24 */
if (src_pixelformat != V4L2_PIX_FMT_RGB24) {
dev_err(vdeb->dev,
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index 5f31c1e351a3..c7123a45c55b 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -148,7 +148,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
- if (vsca->src_frame)
+ if (vsca->ved.stream)
return -EBUSY;
sink_fmt = &vsca->sink_fmt;
@@ -203,9 +203,6 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
const struct v4l2_format_info *pix_info;
unsigned int frame_size;
- if (vsca->src_frame)
- return 0;
-
if (!vimc_sca_is_pixfmt_supported(pixelformat)) {
dev_err(vsca->dev, "pixfmt (0x%08x) is not supported\n",
pixelformat);
@@ -327,7 +324,7 @@ static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
ved);
/* If the stream in this node is not active, just return */
- if (!vsca->src_frame)
+ if (!ved->stream)
return ERR_PTR(-EINVAL);
vimc_sca_fill_src_frame(vsca, sink_frame);
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 46a25f705456..51359472eef2 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -131,7 +131,7 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
- if (vsen->frame)
+ if (vsen->ved.stream)
return -EBUSY;
mf = &vsen->mbus_format;
@@ -187,10 +187,6 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
const struct v4l2_format_info *pix_info;
unsigned int frame_size;
- if (vsen->kthread_sen)
- /* tpg is already executing */
- return 0;
-
/* Calculate the frame size */
pix_info = v4l2_format_info(pixelformat);
frame_size = vsen->mbus_format.width * pix_info->bpp[0] *
@@ -211,7 +207,6 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
vfree(vsen->frame);
vsen->frame = NULL;
- return 0;
}
return 0;
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
index 26b674259489..3b3f36357a0e 100644
--- a/drivers/media/platform/vimc/vimc-streamer.c
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -122,6 +122,14 @@ static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
return -EINVAL;
}
+/*
+ * vimc_streamer_thread - process frames through the pipeline
+ *
+ * @data: vimc_stream struct of the current stream
+ *
+ * From the source to the sink, gets a frame from each subdevice and send to
+ * the next one of the pipeline at a fixed framerate.
+ */
static int vimc_streamer_thread(void *data)
{
struct vimc_stream *stream = data;
@@ -149,6 +157,20 @@ static int vimc_streamer_thread(void *data)
return 0;
}
+/*
+ * vimc_streamer_s_stream - start/stop the streaming on the media pipeline
+ *
+ * @stream: the pointer to the stream structure of the current stream
+ * @ved: pointer to the vimc entity of the entity of the stream
+ * @enable: flag to determine if stream should start/stop
+ *
+ * When starting, check if there is no stream->kthread allocated. This should
+ * indicate that a stream is already running. Then, it initializes
+ * the pipeline, creates and runs a kthread to consume buffers through the
+ * pipeline.
+ * When stopping, analogously check if there is a stream running, stop
+ * the thread and terminates the pipeline.
+ */
int vimc_streamer_s_stream(struct vimc_stream *stream,
struct vimc_ent_device *ved,
int enable)
@@ -188,7 +210,3 @@ int vimc_streamer_s_stream(struct vimc_stream *stream,
return 0;
}
EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer");
-MODULE_AUTHOR("Lucas A. M. Magalhães <lucmaga@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index b172bcc11758..e2ff06edfa93 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -11,7 +11,6 @@ config VIDEO_VIVID
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
select VIDEO_V4L2_TPG
- default n
help
Enables a virtual video driver. This driver emulates a webcam,
TV, S-Video and HDMI capture hardware, including VBI support for
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 7047df6f0e0e..bc2a176937a4 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -500,20 +500,18 @@ static const struct v4l2_file_operations vivid_radio_fops = {
static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid,
+ .vidioc_enum_fmt_vid_cap = vivid_enum_fmt_vid,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_mplane,
.vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane,
.vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
.vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane,
- .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid,
+ .vidioc_enum_fmt_vid_out = vivid_enum_fmt_vid,
.vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
.vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
- .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_mplane,
.vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out_mplane,
.vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane,
.vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out_mplane,
@@ -669,6 +667,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0;
int ret;
int i;
+#ifdef CONFIG_VIDEO_VIVID_CEC
+ unsigned int cec_tx_bus_cnt = 0;
+#endif
/* allocate main vivid state structure */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -722,6 +723,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
in_type_counter[HDMI]--;
dev->num_inputs--;
}
+ dev->num_hdmi_inputs = in_type_counter[HDMI];
/* how many outputs do we have and of what type? */
dev->num_outputs = num_outputs[inst];
@@ -732,6 +734,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
for (i = 0; i < dev->num_outputs; i++) {
dev->output_type[i] = ((output_types[inst] >> i) & 1) ? HDMI : SVID;
dev->output_name_counter[i] = out_type_counter[dev->output_type[i]]++;
+ dev->display_present[i] = true;
}
dev->has_audio_outputs = out_type_counter[SVID];
if (out_type_counter[HDMI] == 16) {
@@ -743,6 +746,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
out_type_counter[HDMI]--;
dev->num_outputs--;
}
+ dev->num_hdmi_outputs = out_type_counter[HDMI];
/* do we create a video capture device? */
dev->has_vid_cap = node_type & 0x0001;
@@ -1001,13 +1005,15 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->webcam_size_idx = 1;
dev->webcam_ival_idx = 3;
tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
- dev->std_cap = V4L2_STD_PAL;
dev->std_out = V4L2_STD_PAL;
if (dev->input_type[0] == TV || dev->input_type[0] == SVID)
tvnorms_cap = V4L2_STD_ALL;
if (dev->output_type[0] == SVID)
tvnorms_out = V4L2_STD_ALL;
- dev->dv_timings_cap = def_dv_timings;
+ for (i = 0; i < MAX_INPUTS; i++) {
+ dev->dv_timings_cap[i] = def_dv_timings;
+ dev->std_cap[i] = V4L2_STD_PAL;
+ }
dev->dv_timings_out = def_dv_timings;
dev->tv_freq = 2804 /* 175.25 * 16 */;
dev->tv_audmode = V4L2_TUNER_MODE_STEREO;
@@ -1037,6 +1043,17 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (ret)
goto unreg_dev;
+ /* enable/disable interface specific controls */
+ if (dev->num_outputs && dev->output_type[0] != HDMI)
+ v4l2_ctrl_activate(dev->ctrl_display_present, false);
+ if (dev->num_inputs && dev->input_type[0] != HDMI) {
+ v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, false);
+ v4l2_ctrl_activate(dev->ctrl_dv_timings, false);
+ } else if (dev->num_inputs && dev->input_type[0] == HDMI) {
+ v4l2_ctrl_activate(dev->ctrl_std_signal_mode, false);
+ v4l2_ctrl_activate(dev->ctrl_standard, false);
+ }
+
/*
* update the capture and output formats to do a proper initial
* configuration.
@@ -1044,14 +1061,6 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vivid_update_format_cap(dev, false);
vivid_update_format_out(dev);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_cap);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_out);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_cap);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_out);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
-
/* initialize overlay */
dev->fb_cap.fmt.width = dev->src_rect.width;
dev->fb_cap.fmt.height = dev->src_rect.height;
@@ -1212,6 +1221,47 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->fb_info.node);
}
+#ifdef CONFIG_VIDEO_VIVID_CEC
+ if (dev->has_vid_cap && in_type_counter[HDMI]) {
+ struct cec_adapter *adap;
+
+ adap = vivid_cec_alloc_adap(dev, 0, false);
+ ret = PTR_ERR_OR_ZERO(adap);
+ if (ret < 0)
+ goto unreg_dev;
+ dev->cec_rx_adap = adap;
+ }
+
+ if (dev->has_vid_out) {
+ for (i = 0; i < dev->num_outputs; i++) {
+ struct cec_adapter *adap;
+
+ if (dev->output_type[i] != HDMI)
+ continue;
+
+ dev->cec_output2bus_map[i] = cec_tx_bus_cnt;
+ adap = vivid_cec_alloc_adap(dev, cec_tx_bus_cnt, true);
+ ret = PTR_ERR_OR_ZERO(adap);
+ if (ret < 0) {
+ for (i = 0; i < dev->num_outputs; i++)
+ cec_delete_adapter(dev->cec_tx_adap[i]);
+ goto unreg_dev;
+ }
+
+ dev->cec_tx_adap[cec_tx_bus_cnt] = adap;
+ cec_tx_bus_cnt++;
+ }
+ }
+#endif
+
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_out);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_out);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
+
/* finally start creating the device nodes */
if (dev->has_vid_cap) {
vfd = &dev->vid_cap_dev;
@@ -1241,22 +1291,15 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
#ifdef CONFIG_VIDEO_VIVID_CEC
if (in_type_counter[HDMI]) {
- struct cec_adapter *adap;
-
- adap = vivid_cec_alloc_adap(dev, 0, false);
- ret = PTR_ERR_OR_ZERO(adap);
- if (ret < 0)
- goto unreg_dev;
- dev->cec_rx_adap = adap;
- ret = cec_register_adapter(adap, &pdev->dev);
+ ret = cec_register_adapter(dev->cec_rx_adap, &pdev->dev);
if (ret < 0) {
- cec_delete_adapter(adap);
+ cec_delete_adapter(dev->cec_rx_adap);
dev->cec_rx_adap = NULL;
goto unreg_dev;
}
- cec_s_phys_addr(adap, 0, false);
+ cec_s_phys_addr(dev->cec_rx_adap, 0, false);
v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI input 0\n",
- dev_name(&adap->devnode.dev));
+ dev_name(&dev->cec_rx_adap->devnode.dev));
}
#endif
@@ -1268,10 +1311,6 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
}
if (dev->has_vid_out) {
-#ifdef CONFIG_VIDEO_VIVID_CEC
- unsigned int bus_cnt = 0;
-#endif
-
vfd = &dev->vid_out_dev;
snprintf(vfd->name, sizeof(vfd->name),
"vivid-%03d-vid-out", inst);
@@ -1299,30 +1338,21 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
#endif
#ifdef CONFIG_VIDEO_VIVID_CEC
- for (i = 0; i < dev->num_outputs; i++) {
- struct cec_adapter *adap;
-
- if (dev->output_type[i] != HDMI)
- continue;
- dev->cec_output2bus_map[i] = bus_cnt;
- adap = vivid_cec_alloc_adap(dev, bus_cnt, true);
- ret = PTR_ERR_OR_ZERO(adap);
- if (ret < 0)
- goto unreg_dev;
- dev->cec_tx_adap[bus_cnt] = adap;
- ret = cec_register_adapter(adap, &pdev->dev);
+ for (i = 0; i < cec_tx_bus_cnt; i++) {
+ ret = cec_register_adapter(dev->cec_tx_adap[i], &pdev->dev);
if (ret < 0) {
- cec_delete_adapter(adap);
- dev->cec_tx_adap[bus_cnt] = NULL;
+ for (; i < cec_tx_bus_cnt; i++) {
+ cec_delete_adapter(dev->cec_tx_adap[i]);
+ dev->cec_tx_adap[i] = NULL;
+ }
goto unreg_dev;
}
v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI output %d\n",
- dev_name(&adap->devnode.dev), bus_cnt);
- bus_cnt++;
- if (bus_cnt <= out_type_counter[HDMI])
- cec_s_phys_addr(adap, bus_cnt << 12, false);
+ dev_name(&dev->cec_tx_adap[i]->devnode.dev), i);
+ if (i <= out_type_counter[HDMI])
+ cec_s_phys_addr(dev->cec_tx_adap[i], i << 12, false);
else
- cec_s_phys_addr(adap, 0x1000, false);
+ cec_s_phys_addr(dev->cec_tx_adap[i], 0x1000, false);
}
#endif
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 6697c7009629..7ebb14673c75 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -22,18 +22,6 @@
#define dprintk(dev, level, fmt, arg...) \
v4l2_dbg(level, vivid_debug, &dev->v4l2_dev, fmt, ## arg)
-/* Maximum allowed frame rate
- *
- * vivid will allow setting timeperframe in [1/FPS_MAX - FPS_MAX/1] range.
- *
- * Ideally FPS_MAX should be infinity, i.e. practically UINT_MAX, but that
- * might hit application errors when they manipulate these values.
- *
- * Besides, for tpf < 10ms image-generation logic should be changed, to avoid
- * producing frames with equal content.
- */
-#define FPS_MAX 100
-
/* The maximum number of clip rectangles */
#define MAX_CLIPS 16
/* The maximum number of inputs */
@@ -180,9 +168,11 @@ struct vivid_dev {
/* supported features */
bool multiplanar;
unsigned num_inputs;
+ unsigned int num_hdmi_inputs;
u8 input_type[MAX_INPUTS];
u8 input_name_counter[MAX_INPUTS];
unsigned num_outputs;
+ unsigned int num_hdmi_outputs;
u8 output_type[MAX_OUTPUTS];
u8 output_name_counter[MAX_OUTPUTS];
bool has_audio_inputs;
@@ -237,6 +227,7 @@ struct vivid_dev {
struct v4l2_ctrl *ctrl_dv_timings_signal_mode;
struct v4l2_ctrl *ctrl_dv_timings;
};
+ struct v4l2_ctrl *ctrl_display_present;
struct v4l2_ctrl *ctrl_has_crop_cap;
struct v4l2_ctrl *ctrl_has_compose_cap;
struct v4l2_ctrl *ctrl_has_scaler_cap;
@@ -245,6 +236,11 @@ struct vivid_dev {
struct v4l2_ctrl *ctrl_has_scaler_out;
struct v4l2_ctrl *ctrl_tx_mode;
struct v4l2_ctrl *ctrl_tx_rgb_range;
+ struct v4l2_ctrl *ctrl_tx_edid_present;
+ struct v4l2_ctrl *ctrl_tx_hotplug;
+ struct v4l2_ctrl *ctrl_tx_rxsense;
+
+ struct v4l2_ctrl *ctrl_rx_power_present;
struct v4l2_ctrl *radio_tx_rds_pi;
struct v4l2_ctrl *radio_tx_rds_pty;
@@ -299,23 +295,24 @@ struct vivid_dev {
bool time_wrap;
u64 time_wrap_offset;
unsigned perc_dropped_buffers;
- enum vivid_signal_mode std_signal_mode;
- unsigned query_std_last;
- v4l2_std_id query_std;
- enum tpg_video_aspect std_aspect_ratio;
+ enum vivid_signal_mode std_signal_mode[MAX_INPUTS];
+ unsigned int query_std_last[MAX_INPUTS];
+ v4l2_std_id query_std[MAX_INPUTS];
+ enum tpg_video_aspect std_aspect_ratio[MAX_INPUTS];
- enum vivid_signal_mode dv_timings_signal_mode;
+ enum vivid_signal_mode dv_timings_signal_mode[MAX_INPUTS];
char **query_dv_timings_qmenu;
char *query_dv_timings_qmenu_strings;
unsigned query_dv_timings_size;
- unsigned query_dv_timings_last;
- unsigned query_dv_timings;
- enum tpg_video_aspect dv_timings_aspect_ratio;
+ unsigned int query_dv_timings_last[MAX_INPUTS];
+ unsigned int query_dv_timings[MAX_INPUTS];
+ enum tpg_video_aspect dv_timings_aspect_ratio[MAX_INPUTS];
/* Input */
unsigned input;
- v4l2_std_id std_cap;
- struct v4l2_dv_timings dv_timings_cap;
+ v4l2_std_id std_cap[MAX_INPUTS];
+ struct v4l2_dv_timings dv_timings_cap[MAX_INPUTS];
+ int dv_timings_cap_sel[MAX_INPUTS];
u32 service_set_cap;
struct vivid_vbi_gen_data vbi_gen;
u8 *edid;
@@ -328,6 +325,8 @@ struct vivid_dev {
unsigned tv_field_cap;
unsigned tv_audio_input;
+ u32 power_present;
+
/* Capture Overlay */
struct v4l2_framebuffer fb_cap;
struct v4l2_fh *overlay_cap_owner;
@@ -360,6 +359,7 @@ struct vivid_dev {
u8 *scaled_line;
u8 *blended_line;
unsigned cur_scaled_line;
+ bool display_present[MAX_OUTPUTS];
/* Output Overlay */
void *fb_vbase_out;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 4cd526ff248b..3e916c8befb7 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -18,6 +18,7 @@
#include "vivid-radio-common.h"
#include "vivid-osd.h"
#include "vivid-ctrls.h"
+#include "vivid-cec.h"
#define VIVID_CID_CUSTOM_BASE (V4L2_CID_USER_BASE | 0xf000)
#define VIVID_CID_BUTTON (VIVID_CID_CUSTOM_BASE + 0)
@@ -68,6 +69,7 @@
#define VIVID_CID_PERCENTAGE_FILL (VIVID_CID_VIVID_BASE + 41)
#define VIVID_CID_REDUCED_FPS (VIVID_CID_VIVID_BASE + 42)
#define VIVID_CID_HSV_ENC (VIVID_CID_VIVID_BASE + 43)
+#define VIVID_CID_DISPLAY_PRESENT (VIVID_CID_VIVID_BASE + 44)
#define VIVID_CID_STD_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 60)
#define VIVID_CID_STANDARD (VIVID_CID_VIVID_BASE + 61)
@@ -357,7 +359,7 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
V4L2_COLORSPACE_470_SYSTEM_BG,
};
struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vid_cap);
- unsigned i;
+ unsigned int i, j;
switch (ctrl->id) {
case VIVID_CID_TEST_PATTERN:
@@ -463,20 +465,35 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
tpg_s_show_square(&dev->tpg, ctrl->val);
break;
case VIVID_CID_STD_ASPECT_RATIO:
- dev->std_aspect_ratio = ctrl->val;
+ dev->std_aspect_ratio[dev->input] = ctrl->val;
tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
break;
case VIVID_CID_DV_TIMINGS_SIGNAL_MODE:
- dev->dv_timings_signal_mode = dev->ctrl_dv_timings_signal_mode->val;
- if (dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS)
- dev->query_dv_timings = dev->ctrl_dv_timings->val;
+ dev->dv_timings_signal_mode[dev->input] =
+ dev->ctrl_dv_timings_signal_mode->val;
+ dev->query_dv_timings[dev->input] = dev->ctrl_dv_timings->val;
+
+ dev->power_present = 0;
+ for (i = 0, j = 0;
+ i < ARRAY_SIZE(dev->dv_timings_signal_mode);
+ i++)
+ if (dev->input_type[i] == HDMI) {
+ if (dev->dv_timings_signal_mode[i] != NO_SIGNAL)
+ dev->power_present |= (1 << j);
+ j++;
+ }
+ __v4l2_ctrl_s_ctrl(dev->ctrl_rx_power_present,
+ dev->power_present);
+
v4l2_ctrl_activate(dev->ctrl_dv_timings,
- dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS);
+ dev->dv_timings_signal_mode[dev->input] ==
+ SELECTED_DV_TIMINGS);
+
vivid_update_quality(dev);
vivid_send_source_change(dev, HDMI);
break;
case VIVID_CID_DV_TIMINGS_ASPECT_RATIO:
- dev->dv_timings_aspect_ratio = ctrl->val;
+ dev->dv_timings_aspect_ratio[dev->input] = ctrl->val;
tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
break;
case VIVID_CID_TSTAMP_SRC:
@@ -908,6 +925,8 @@ static int vivid_vid_out_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vid_out);
struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt;
+ u32 display_present = 0;
+ unsigned int i, j, bus_idx;
switch (ctrl->id) {
case VIVID_CID_HAS_CROP_OUT:
@@ -941,6 +960,37 @@ static int vivid_vid_out_s_ctrl(struct v4l2_ctrl *ctrl)
if (dev->loop_video)
vivid_send_source_change(dev, HDMI);
break;
+ case VIVID_CID_DISPLAY_PRESENT:
+ if (dev->output_type[dev->output] != HDMI)
+ break;
+
+ dev->display_present[dev->output] = ctrl->val;
+ for (i = 0, j = 0; i < dev->num_outputs; i++)
+ if (dev->output_type[i] == HDMI)
+ display_present |=
+ dev->display_present[i] << j++;
+
+ __v4l2_ctrl_s_ctrl(dev->ctrl_tx_rxsense, display_present);
+
+ if (dev->edid_blocks) {
+ __v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present,
+ display_present);
+ __v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug,
+ display_present);
+ }
+
+ bus_idx = dev->cec_output2bus_map[dev->output];
+ if (!dev->cec_tx_adap[bus_idx])
+ break;
+
+ if (ctrl->val && dev->edid_blocks)
+ cec_s_phys_addr(dev->cec_tx_adap[bus_idx],
+ dev->cec_tx_adap[bus_idx]->phys_addr,
+ false);
+ else
+ cec_phys_addr_invalidate(dev->cec_tx_adap[bus_idx]);
+
+ break;
}
return 0;
}
@@ -979,6 +1029,15 @@ static const struct v4l2_ctrl_config vivid_ctrl_has_scaler_out = {
.step = 1,
};
+static const struct v4l2_ctrl_config vivid_ctrl_display_present = {
+ .ops = &vivid_vid_out_ctrl_ops,
+ .id = VIVID_CID_DISPLAY_PRESENT,
+ .name = "Display Present",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
/* Streaming Controls */
@@ -1127,10 +1186,14 @@ static int vivid_sdtv_cap_s_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case VIVID_CID_STD_SIGNAL_MODE:
- dev->std_signal_mode = dev->ctrl_std_signal_mode->val;
- if (dev->std_signal_mode == SELECTED_STD)
- dev->query_std = vivid_standard[dev->ctrl_standard->val];
- v4l2_ctrl_activate(dev->ctrl_standard, dev->std_signal_mode == SELECTED_STD);
+ dev->std_signal_mode[dev->input] =
+ dev->ctrl_std_signal_mode->val;
+ if (dev->std_signal_mode[dev->input] == SELECTED_STD)
+ dev->query_std[dev->input] =
+ vivid_standard[dev->ctrl_standard->val];
+ v4l2_ctrl_activate(dev->ctrl_standard,
+ dev->std_signal_mode[dev->input] ==
+ SELECTED_STD);
vivid_update_quality(dev);
vivid_send_source_change(dev, TV);
vivid_send_source_change(dev, SVID);
@@ -1549,7 +1612,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_vbi_cap_interlaced, NULL);
}
- if (has_hdmi && dev->has_vid_cap) {
+ if (dev->num_hdmi_inputs) {
dev->ctrl_dv_timings_signal_mode = v4l2_ctrl_new_custom(hdl_vid_cap,
&vivid_ctrl_dv_timings_signal_mode, NULL);
@@ -1569,8 +1632,13 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
&vivid_vid_cap_ctrl_ops,
V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
0, V4L2_DV_RGB_RANGE_AUTO);
+ dev->ctrl_rx_power_present = v4l2_ctrl_new_std(hdl_vid_cap,
+ NULL, V4L2_CID_DV_RX_POWER_PRESENT, 0,
+ (2 << (dev->num_hdmi_inputs - 1)) - 1, 0,
+ (2 << (dev->num_hdmi_inputs - 1)) - 1);
+
}
- if (has_hdmi && dev->has_vid_out) {
+ if (dev->num_hdmi_outputs) {
/*
* We aren't doing anything with this at the moment, but
* HDMI outputs typically have this controls.
@@ -1581,6 +1649,20 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
dev->ctrl_tx_mode = v4l2_ctrl_new_std_menu(hdl_vid_out, NULL,
V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
0, V4L2_DV_TX_MODE_HDMI);
+ dev->ctrl_display_present = v4l2_ctrl_new_custom(hdl_vid_out,
+ &vivid_ctrl_display_present, NULL);
+ dev->ctrl_tx_hotplug = v4l2_ctrl_new_std(hdl_vid_out,
+ NULL, V4L2_CID_DV_TX_HOTPLUG, 0,
+ (2 << (dev->num_hdmi_outputs - 1)) - 1, 0,
+ (2 << (dev->num_hdmi_outputs - 1)) - 1);
+ dev->ctrl_tx_rxsense = v4l2_ctrl_new_std(hdl_vid_out,
+ NULL, V4L2_CID_DV_TX_RXSENSE, 0,
+ (2 << (dev->num_hdmi_outputs - 1)) - 1, 0,
+ (2 << (dev->num_hdmi_outputs - 1)) - 1);
+ dev->ctrl_tx_edid_present = v4l2_ctrl_new_std(hdl_vid_out,
+ NULL, V4L2_CID_DV_TX_EDID_PRESENT, 0,
+ (2 << (dev->num_hdmi_outputs - 1)) - 1, 0,
+ (2 << (dev->num_hdmi_outputs - 1)) - 1);
}
if ((dev->has_vid_cap && dev->has_vid_out) ||
(dev->has_vbi_cap && dev->has_vbi_out))
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index f8006a30c12f..6cf495a7d5cc 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -43,7 +43,7 @@
static inline v4l2_std_id vivid_get_std_cap(const struct vivid_dev *dev)
{
if (vivid_is_sdtv_cap(dev))
- return dev->std_cap;
+ return dev->std_cap[dev->input];
return 0;
}
@@ -408,7 +408,7 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
unsigned line_height = 16 / factor;
bool is_tv = vivid_is_sdtv_cap(dev);
- bool is_60hz = is_tv && (dev->std_cap & V4L2_STD_525_60);
+ bool is_60hz = is_tv && (dev->std_cap[dev->input] & V4L2_STD_525_60);
unsigned p;
int line = 1;
u8 *basep[TPG_MAX_PLANES][2];
@@ -419,9 +419,9 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
if (dev->loop_video && dev->can_loop_video &&
((vivid_is_svid_cap(dev) &&
- !VIVID_INVALID_SIGNAL(dev->std_signal_mode)) ||
+ !VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) ||
(vivid_is_hdmi_cap(dev) &&
- !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode))))
+ !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input]))))
is_loop = true;
buf->vb.sequence = dev->vid_cap_seq_count;
diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
index 1a89593b0c86..f2e789bdf4a6 100644
--- a/drivers/media/platform/vivid/vivid-osd.c
+++ b/drivers/media/platform/vivid/vivid-osd.c
@@ -155,7 +155,7 @@ static int _vivid_fb_check_var(struct fb_var_screeninfo *var, struct vivid_dev *
var->nonstd = 0;
var->vmode &= ~FB_VMODE_MASK;
- var->vmode = FB_VMODE_NONINTERLACED;
+ var->vmode |= FB_VMODE_NONINTERLACED;
/* Dummy values */
var->hsync_len = 24;
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
index 40ecd7902b56..1a9348eea781 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -18,7 +18,7 @@
static void vivid_sliced_vbi_cap_fill(struct vivid_dev *dev, unsigned seqnr)
{
struct vivid_vbi_gen_data *vbi_gen = &dev->vbi_gen;
- bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+ bool is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60;
vivid_vbi_gen_sliced(vbi_gen, is_60hz, seqnr);
@@ -65,7 +65,7 @@ static void vivid_sliced_vbi_cap_fill(struct vivid_dev *dev, unsigned seqnr)
static void vivid_g_fmt_vbi_cap(struct vivid_dev *dev, struct v4l2_vbi_format *vbi)
{
- bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+ bool is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60;
vbi->sampling_rate = 27000000;
vbi->offset = 24;
@@ -93,7 +93,7 @@ void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
memset(vbuf, 0x10, vb2_plane_size(&buf->vb.vb2_buf, 0));
- if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode))
+ if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input]))
vivid_vbi_gen_raw(&dev->vbi_gen, &vbi, vbuf);
}
@@ -111,7 +111,7 @@ void vivid_sliced_vbi_cap_process(struct vivid_dev *dev,
vivid_sliced_vbi_cap_fill(dev, buf->vb.sequence);
memset(vbuf, 0, vb2_plane_size(&buf->vb.vb2_buf, 0));
- if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
+ if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) {
unsigned i;
for (i = 0; i < 25; i++)
@@ -124,7 +124,7 @@ static int vbi_cap_queue_setup(struct vb2_queue *vq,
unsigned sizes[], struct device *alloc_devs[])
{
struct vivid_dev *dev = vb2_get_drv_priv(vq);
- bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+ bool is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60;
unsigned size = vq->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE ?
36 * sizeof(struct v4l2_sliced_vbi_data) :
1440 * 2 * (is_60hz ? 12 : 18);
@@ -144,7 +144,7 @@ static int vbi_cap_queue_setup(struct vb2_queue *vq,
static int vbi_cap_buf_prepare(struct vb2_buffer *vb)
{
struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+ bool is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60;
unsigned size = vb->vb2_queue->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE ?
36 * sizeof(struct v4l2_sliced_vbi_data) :
1440 * 2 * (is_60hz ? 12 : 18);
@@ -302,7 +302,7 @@ int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_forma
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
- bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+ bool is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60;
u32 service_set = vbi->service_set;
if (!vivid_is_sdtv_cap(dev) || !dev->has_sliced_vbi_cap)
@@ -337,7 +337,7 @@ int vidioc_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_
bool is_60hz;
if (vdev->vfl_dir == VFL_DIR_RX) {
- is_60hz = dev->std_cap & V4L2_STD_525_60;
+ is_60hz = dev->std_cap[dev->input] & V4L2_STD_525_60;
if (!vivid_is_sdtv_cap(dev) || !dev->has_sliced_vbi_cap ||
cap->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
return -EINVAL;
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 530ac8decb25..8cbaa0c998ed 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -21,11 +21,6 @@
#include "vivid-kthread-cap.h"
#include "vivid-vid-cap.h"
-/* timeperframe: min/max and default */
-static const struct v4l2_fract
- tpf_min = {.numerator = 1, .denominator = FPS_MAX},
- tpf_max = {.numerator = FPS_MAX, .denominator = 1};
-
static const struct vivid_fmt formats_ovl[] = {
{
.fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
@@ -196,7 +191,7 @@ static void vid_cap_buf_finish(struct vb2_buffer *vb)
* test this.
*/
vbuf->flags |= V4L2_BUF_FLAG_TIMECODE;
- if (dev->std_cap & V4L2_STD_525_60)
+ if (dev->std_cap[dev->input] & V4L2_STD_525_60)
fps = 30;
tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
tc->flags = 0;
@@ -299,11 +294,13 @@ void vivid_update_quality(struct vivid_dev *dev)
tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
return;
}
- if (vivid_is_hdmi_cap(dev) && VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode)) {
+ if (vivid_is_hdmi_cap(dev) &&
+ VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) {
tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
return;
}
- if (vivid_is_sdtv_cap(dev) && VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
+ if (vivid_is_sdtv_cap(dev) &&
+ VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) {
tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
return;
}
@@ -358,10 +355,10 @@ static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc)
enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev)
{
if (vivid_is_sdtv_cap(dev))
- return dev->std_aspect_ratio;
+ return dev->std_aspect_ratio[dev->input];
if (vivid_is_hdmi_cap(dev))
- return dev->dv_timings_aspect_ratio;
+ return dev->dv_timings_aspect_ratio[dev->input];
return TPG_VIDEO_ASPECT_IMAGE;
}
@@ -369,7 +366,7 @@ enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev)
static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
{
if (vivid_is_sdtv_cap(dev))
- return (dev->std_cap & V4L2_STD_525_60) ?
+ return (dev->std_cap[dev->input] & V4L2_STD_525_60) ?
TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
if (vivid_is_hdmi_cap(dev) &&
@@ -386,7 +383,7 @@ static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
*/
void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
{
- struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
+ struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt;
unsigned size;
u64 pixelclock;
@@ -403,7 +400,7 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
case SVID:
dev->field_cap = dev->tv_field_cap;
dev->src_rect.width = 720;
- if (dev->std_cap & V4L2_STD_525_60) {
+ if (dev->std_cap[dev->input] & V4L2_STD_525_60) {
dev->src_rect.height = 480;
dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 };
dev->service_set_cap = V4L2_SLICED_CAPTION_525;
@@ -486,8 +483,8 @@ static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field fi
}
}
if (vivid_is_hdmi_cap(dev))
- return dev->dv_timings_cap.bt.interlaced ? V4L2_FIELD_ALTERNATE :
- V4L2_FIELD_NONE;
+ return dev->dv_timings_cap[dev->input].bt.interlaced ?
+ V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE;
return V4L2_FIELD_NONE;
}
@@ -586,7 +583,7 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
h = sz->height;
} else if (vivid_is_sdtv_cap(dev)) {
w = 720;
- h = (dev->std_cap & V4L2_STD_525_60) ? 480 : 576;
+ h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576;
} else {
w = dev->src_rect.width;
h = dev->src_rect.height;
@@ -1310,10 +1307,10 @@ int vidioc_enum_input(struct file *file, void *priv,
dev->input_name_counter[inp->index]);
inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
if (dev->edid_blocks == 0 ||
- dev->dv_timings_signal_mode == NO_SIGNAL)
+ dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL)
inp->status |= V4L2_IN_ST_NO_SIGNAL;
- else if (dev->dv_timings_signal_mode == NO_LOCK ||
- dev->dv_timings_signal_mode == OUT_OF_RANGE)
+ else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK ||
+ dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE)
inp->status |= V4L2_IN_ST_NO_H_LOCK;
break;
}
@@ -1322,9 +1319,9 @@ int vidioc_enum_input(struct file *file, void *priv,
if (dev->sensor_vflip)
inp->status |= V4L2_IN_ST_VFLIP;
if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) {
- if (dev->std_signal_mode == NO_SIGNAL) {
+ if (dev->std_signal_mode[dev->input] == NO_SIGNAL) {
inp->status |= V4L2_IN_ST_NO_SIGNAL;
- } else if (dev->std_signal_mode == NO_LOCK) {
+ } else if (dev->std_signal_mode[dev->input] == NO_LOCK) {
inp->status |= V4L2_IN_ST_NO_H_LOCK;
} else if (vivid_is_tv_cap(dev)) {
switch (tpg_g_quality(&dev->tpg)) {
@@ -1353,7 +1350,7 @@ int vidioc_g_input(struct file *file, void *priv, unsigned *i)
int vidioc_s_input(struct file *file, void *priv, unsigned i)
{
struct vivid_dev *dev = video_drvdata(file);
- struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
+ struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt;
unsigned brightness;
if (i >= dev->num_inputs)
@@ -1407,6 +1404,29 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
v4l2_ctrl_modify_range(dev->brightness,
128 * i, 255 + 128 * i, 1, 128 + 128 * i);
v4l2_ctrl_s_ctrl(dev->brightness, brightness);
+
+ /* Restore per-input states. */
+ v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode,
+ vivid_is_hdmi_cap(dev));
+ v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) &&
+ dev->dv_timings_signal_mode[dev->input] ==
+ SELECTED_DV_TIMINGS);
+ v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev));
+ v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) &&
+ dev->std_signal_mode[dev->input]);
+
+ if (vivid_is_hdmi_cap(dev)) {
+ v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode,
+ dev->dv_timings_signal_mode[dev->input]);
+ v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings,
+ dev->query_dv_timings[dev->input]);
+ } else if (vivid_is_sdtv_cap(dev)) {
+ v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode,
+ dev->std_signal_mode[dev->input]);
+ v4l2_ctrl_s_ctrl(dev->ctrl_standard,
+ dev->std_signal_mode[dev->input]);
+ }
+
return 0;
}
@@ -1499,8 +1519,9 @@ int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
} else if (qual == TPG_QUAL_GRAY) {
vt->rxsubchans = V4L2_TUNER_SUB_MONO;
} else {
- unsigned channel_nr = dev->tv_freq / (6 * 16);
- unsigned options = (dev->std_cap & V4L2_STD_NTSC_M) ? 4 : 3;
+ unsigned int channel_nr = dev->tv_freq / (6 * 16);
+ unsigned int options =
+ (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3;
switch (channel_nr % options) {
case 0:
@@ -1510,7 +1531,7 @@ int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
break;
case 2:
- if (dev->std_cap & V4L2_STD_NTSC_M)
+ if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M)
vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP;
else
vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
@@ -1567,23 +1588,25 @@ const char * const vivid_ctrl_standard_strings[] = {
int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id)
{
struct vivid_dev *dev = video_drvdata(file);
+ unsigned int last = dev->query_std_last[dev->input];
if (!vivid_is_sdtv_cap(dev))
return -ENODATA;
- if (dev->std_signal_mode == NO_SIGNAL ||
- dev->std_signal_mode == NO_LOCK) {
+ if (dev->std_signal_mode[dev->input] == NO_SIGNAL ||
+ dev->std_signal_mode[dev->input] == NO_LOCK) {
*id = V4L2_STD_UNKNOWN;
return 0;
}
if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) {
*id = V4L2_STD_UNKNOWN;
- } else if (dev->std_signal_mode == CURRENT_STD) {
- *id = dev->std_cap;
- } else if (dev->std_signal_mode == SELECTED_STD) {
- *id = dev->query_std;
+ } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) {
+ *id = dev->std_cap[dev->input];
+ } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) {
+ *id = dev->query_std[dev->input];
} else {
- *id = vivid_standard[dev->query_std_last];
- dev->query_std_last = (dev->query_std_last + 1) % ARRAY_SIZE(vivid_standard);
+ *id = vivid_standard[last];
+ dev->query_std_last[dev->input] =
+ (last + 1) % ARRAY_SIZE(vivid_standard);
}
return 0;
@@ -1595,11 +1618,11 @@ int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id)
if (!vivid_is_sdtv_cap(dev))
return -ENODATA;
- if (dev->std_cap == id)
+ if (dev->std_cap[dev->input] == id)
return 0;
if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
return -EBUSY;
- dev->std_cap = id;
+ dev->std_cap[dev->input] = id;
vivid_update_format_cap(dev, false);
return 0;
}
@@ -1676,12 +1699,13 @@ int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
!valid_cvt_gtf_timings(timings))
return -EINVAL;
- if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap, 0, false))
+ if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input],
+ 0, false))
return 0;
if (vb2_is_busy(&dev->vb_vid_cap_q))
return -EBUSY;
- dev->dv_timings_cap = *timings;
+ dev->dv_timings_cap[dev->input] = *timings;
vivid_update_format_cap(dev, false);
return 0;
}
@@ -1690,26 +1714,31 @@ int vidioc_query_dv_timings(struct file *file, void *_fh,
struct v4l2_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
+ unsigned int input = dev->input;
+ unsigned int last = dev->query_dv_timings_last[input];
if (!vivid_is_hdmi_cap(dev))
return -ENODATA;
- if (dev->dv_timings_signal_mode == NO_SIGNAL ||
+ if (dev->dv_timings_signal_mode[input] == NO_SIGNAL ||
dev->edid_blocks == 0)
return -ENOLINK;
- if (dev->dv_timings_signal_mode == NO_LOCK)
+ if (dev->dv_timings_signal_mode[input] == NO_LOCK)
return -ENOLCK;
- if (dev->dv_timings_signal_mode == OUT_OF_RANGE) {
+ if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) {
timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2;
return -ERANGE;
}
- if (dev->dv_timings_signal_mode == CURRENT_DV_TIMINGS) {
- *timings = dev->dv_timings_cap;
- } else if (dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS) {
- *timings = v4l2_dv_timings_presets[dev->query_dv_timings];
+ if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) {
+ *timings = dev->dv_timings_cap[input];
+ } else if (dev->dv_timings_signal_mode[input] ==
+ SELECTED_DV_TIMINGS) {
+ *timings =
+ v4l2_dv_timings_presets[dev->query_dv_timings[input]];
} else {
- *timings = v4l2_dv_timings_presets[dev->query_dv_timings_last];
- dev->query_dv_timings_last = (dev->query_dv_timings_last + 1) %
- dev->query_dv_timings_size;
+ *timings =
+ v4l2_dv_timings_presets[last];
+ dev->query_dv_timings_last[input] =
+ (last + 1) % dev->query_dv_timings_size;
}
return 0;
}
@@ -1719,7 +1748,8 @@ int vidioc_s_edid(struct file *file, void *_fh,
{
struct vivid_dev *dev = video_drvdata(file);
u16 phys_addr;
- unsigned int i;
+ u32 display_present = 0;
+ unsigned int i, j;
int ret;
memset(edid->reserved, 0, sizeof(edid->reserved));
@@ -1729,6 +1759,8 @@ int vidioc_s_edid(struct file *file, void *_fh,
return -EINVAL;
if (edid->blocks == 0) {
dev->edid_blocks = 0;
+ v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
+ v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
phys_addr = CEC_PHYS_ADDR_INVALID;
goto set_phys_addr;
}
@@ -1747,13 +1779,23 @@ int vidioc_s_edid(struct file *file, void *_fh,
dev->edid_blocks = edid->blocks;
memcpy(dev->edid, edid->edid, edid->blocks * 128);
+ for (i = 0, j = 0; i < dev->num_outputs; i++)
+ if (dev->output_type[i] == HDMI)
+ display_present |=
+ dev->display_present[i] << j++;
+
+ v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
+ v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
+
set_phys_addr:
/* TODO: a proper hotplug detect cycle should be emulated here */
cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false);
for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
cec_s_phys_addr(dev->cec_tx_adap[i],
- v4l2_phys_addr_for_input(phys_addr, i + 1),
+ dev->display_present[i] ?
+ v4l2_phys_addr_for_input(phys_addr, i + 1) :
+ CEC_PHYS_ADDR_INVALID,
false);
return 0;
}
@@ -1865,8 +1907,6 @@ int vivid_vid_cap_s_parm(struct file *file, void *priv,
i = ival_sz - 1;
dev->webcam_ival_idx = i;
tpf = webcam_intervals[dev->webcam_ival_idx];
- tpf = V4L2_FRACT_COMPARE(tpf, <, tpf_min) ? tpf_min : tpf;
- tpf = V4L2_FRACT_COMPARE(tpf, >, tpf_max) ? tpf_max : tpf;
/* resync the thread's timings */
dev->cap_seq_resync = true;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 74b83bcc6119..1f33eb1a76b6 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -645,7 +645,7 @@ bool vivid_vid_can_loop(struct vivid_dev *dev)
dev->field_cap == V4L2_FIELD_SEQ_BT)
return false;
if (vivid_is_svid_cap(dev) && vivid_is_svid_out(dev)) {
- if (!(dev->std_cap & V4L2_STD_525_60) !=
+ if (!(dev->std_cap[dev->input] & V4L2_STD_525_60) !=
!(dev->std_out & V4L2_STD_525_60))
return false;
return true;
@@ -797,26 +797,6 @@ int vivid_enum_fmt_vid(struct file *file, void *priv,
return 0;
}
-int vidioc_enum_fmt_vid_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct vivid_dev *dev = video_drvdata(file);
-
- if (!dev->multiplanar)
- return -ENOTTY;
- return vivid_enum_fmt_vid(file, priv, f);
-}
-
-int vidioc_enum_fmt_vid(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct vivid_dev *dev = video_drvdata(file);
-
- if (dev->multiplanar)
- return -ENOTTY;
- return vivid_enum_fmt_vid(file, priv, f);
-}
-
int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -825,7 +805,7 @@ int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
if (vdev->vfl_dir == VFL_DIR_RX) {
if (!vivid_is_sdtv_cap(dev))
return -ENODATA;
- *id = dev->std_cap;
+ *id = dev->std_cap[dev->input];
} else {
if (!vivid_is_svid_out(dev))
return -ENODATA;
@@ -843,7 +823,7 @@ int vidioc_g_dv_timings(struct file *file, void *_fh,
if (vdev->vfl_dir == VFL_DIR_RX) {
if (!vivid_is_hdmi_cap(dev))
return -ENODATA;
- *timings = dev->dv_timings_cap;
+ *timings = dev->dv_timings_cap[dev->input];
} else {
if (!vivid_is_hdmi_out(dev))
return -ENODATA;
@@ -907,6 +887,8 @@ int vidioc_g_edid(struct file *file, void *_fh,
return -EINVAL;
if (dev->output_type[edid->pad] != HDMI)
return -EINVAL;
+ if (!dev->display_present[edid->pad])
+ return -ENODATA;
bus_idx = dev->cec_output2bus_map[edid->pad];
adap = dev->cec_tx_adap[bus_idx];
}
diff --git a/drivers/media/platform/vivid/vivid-vid-common.h b/drivers/media/platform/vivid/vivid-vid-common.h
index 29b6c0b40a1b..d908d9725283 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.h
+++ b/drivers/media/platform/vivid/vivid-vid-common.h
@@ -28,8 +28,6 @@ void vivid_send_source_change(struct vivid_dev *dev, unsigned type);
int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r);
int vivid_enum_fmt_vid(struct file *file, void *priv, struct v4l2_fmtdesc *f);
-int vidioc_enum_fmt_vid_mplane(struct file *file, void *priv, struct v4l2_fmtdesc *f);
-int vidioc_enum_fmt_vid(struct file *file, void *priv, struct v4l2_fmtdesc *f);
int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id);
int vidioc_g_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
int vidioc_enum_dv_timings(struct file *file, void *_fh, struct v4l2_enum_dv_timings *timings);
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 9350ca65dd91..148b663a6075 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -1094,6 +1094,12 @@ int vidioc_s_output(struct file *file, void *priv, unsigned o)
dev->vbi_out_dev.tvnorms = dev->vid_out_dev.tvnorms;
vivid_update_format_out(dev);
+
+ v4l2_ctrl_activate(dev->ctrl_display_present, vivid_is_hdmi_out(dev));
+ if (vivid_is_hdmi_out(dev))
+ v4l2_ctrl_s_ctrl(dev->ctrl_display_present,
+ dev->display_present[dev->output]);
+
return 0;
}
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 4b41687b2bde..eb79d99787bd 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -233,7 +233,6 @@ source "drivers/media/radio/wl128x/Kconfig"
menuconfig V4L_RADIO_ISA_DRIVERS
bool "ISA radio devices"
depends on ISA || COMPILE_TEST
- default n
help
Say Y here to enable support for these ISA drivers.
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 9f7e68498321..9a45cda05779 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -168,8 +168,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->driver, "dsbr100", sizeof(v->driver));
strscpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -378,6 +376,7 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
radio->videodev.release = video_device_release_empty;
radio->videodev.lock = &radio->v4l2_lock;
radio->videodev.ctrl_handler = &radio->hdl;
+ radio->videodev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
radio->usbdev = interface_to_usbdev(intf);
radio->curfreq = FREQ_MIN * FREQ_MUL;
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 12160894839c..a5db9b4dc3de 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -357,9 +357,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->driver, "ADS Cadet", sizeof(v->driver));
strscpy(v->card, "ADS Cadet", sizeof(v->card));
strscpy(v->bus_info, "ISA:radio-cadet", sizeof(v->bus_info));
- v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
- V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -646,6 +643,8 @@ static int __init cadet_init(void)
dev->vdev.ioctl_ops = &cadet_ioctl_ops;
dev->vdev.release = video_device_release_empty;
dev->vdev.lock = &dev->lock;
+ dev->vdev.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+ V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE;
video_set_drvdata(&dev->vdev, dev);
res = video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr);
diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
index 9f9c08393756..ad2ac16ff12d 100644
--- a/drivers/media/radio/radio-isa.c
+++ b/drivers/media/radio/radio-isa.c
@@ -37,9 +37,6 @@ static int radio_isa_querycap(struct file *file, void *priv,
strscpy(v->driver, isa->drv->driver.driver.name, sizeof(v->driver));
strscpy(v->card, isa->drv->card, sizeof(v->card));
snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", isa->v4l2_dev.name);
-
- v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -240,6 +237,7 @@ static int radio_isa_common_probe(struct radio_isa_card *isa,
isa->vdev.fops = &radio_isa_fops;
isa->vdev.ioctl_ops = &radio_isa_ioctl_ops;
isa->vdev.release = video_device_release_empty;
+ isa->vdev.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
video_set_drvdata(&isa->vdev, isa);
isa->freq = FREQ_LOW;
isa->stereo = drv->has_stereo;
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index 4d41857946de..a35648316aa8 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -168,8 +168,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->driver, "radio-keene", sizeof(v->driver));
strscpy(v->card, "Keene FM Transmitter", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_MODULATOR;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -361,6 +359,7 @@ static int usb_keene_probe(struct usb_interface *intf,
radio->vdev.lock = &radio->lock;
radio->vdev.release = video_device_release_empty;
radio->vdev.vfl_dir = VFL_DIR_TX;
+ radio->vdev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_MODULATOR;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c
index cbcf0ed69223..657c3dda6648 100644
--- a/drivers/media/radio/radio-ma901.c
+++ b/drivers/media/radio/radio-ma901.c
@@ -191,8 +191,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->driver, "radio-ma901", sizeof(v->driver));
strscpy(v->card, "Masterkit MA901 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -398,6 +396,7 @@ static int usb_ma901radio_probe(struct usb_interface *intf,
radio->vdev.ioctl_ops = &usb_ma901radio_ioctl_ops;
radio->vdev.release = video_device_release_empty;
radio->vdev.lock = &radio->lock;
+ radio->vdev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c
index 95d12cbff5c9..99788834c646 100644
--- a/drivers/media/radio/radio-miropcm20.c
+++ b/drivers/media/radio/radio-miropcm20.c
@@ -204,8 +204,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->driver, "Miro PCM20", sizeof(v->driver));
strscpy(v->card, "Miro PCM20", sizeof(v->card));
snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", dev->v4l2_dev.name);
- v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -481,6 +479,8 @@ static int __init pcm20_init(void)
dev->vdev.ioctl_ops = &pcm20_ioctl_ops;
dev->vdev.release = video_device_release_empty;
dev->vdev.lock = &dev->lock;
+ dev->vdev.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+ V4L2_CAP_RDS_CAPTURE;
video_set_drvdata(&dev->vdev, dev);
snd_aci_cmd(dev->aci, ACI_SET_TUNERMONO,
dev->audmode == V4L2_TUNER_MODE_MONO, -1);
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index f53f9064e1e9..cb0437b4c331 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -260,9 +260,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->driver, "radio-mr800", sizeof(v->driver));
strscpy(v->card, "AverMedia MR 800 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER |
- V4L2_CAP_HW_FREQ_SEEK;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -545,6 +542,8 @@ static int usb_amradio_probe(struct usb_interface *intf,
radio->vdev.ioctl_ops = &usb_amradio_ioctl_ops;
radio->vdev.release = video_device_release_empty;
radio->vdev.lock = &radio->lock;
+ radio->vdev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER |
+ V4L2_CAP_HW_FREQ_SEEK;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
index 5e782b3c2fa9..c3180d53c282 100644
--- a/drivers/media/radio/radio-raremono.c
+++ b/drivers/media/radio/radio-raremono.c
@@ -184,8 +184,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->driver, "radio-raremono", sizeof(v->driver));
strscpy(v->card, "Thanko's Raremono", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -271,6 +269,14 @@ static int vidioc_g_frequency(struct file *file, void *priv,
return 0;
}
+static void raremono_device_release(struct v4l2_device *v4l2_dev)
+{
+ struct raremono_device *radio = to_raremono_dev(v4l2_dev);
+
+ kfree(radio->buffer);
+ kfree(radio);
+}
+
/* File system interface */
static const struct v4l2_file_operations usb_raremono_fops = {
.owner = THIS_MODULE,
@@ -295,12 +301,14 @@ static int usb_raremono_probe(struct usb_interface *intf,
struct raremono_device *radio;
int retval = 0;
- radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), GFP_KERNEL);
- if (radio)
- radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, GFP_KERNEL);
-
- if (!radio || !radio->buffer)
+ radio = kzalloc(sizeof(*radio), GFP_KERNEL);
+ if (!radio)
+ return -ENOMEM;
+ radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
+ if (!radio->buffer) {
+ kfree(radio);
return -ENOMEM;
+ }
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
@@ -324,7 +332,8 @@ static int usb_raremono_probe(struct usb_interface *intf,
if (retval != 3 ||
(get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) {
dev_info(&intf->dev, "this is not Thanko's Raremono.\n");
- return -ENODEV;
+ retval = -ENODEV;
+ goto free_mem;
}
dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n",
@@ -333,7 +342,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
if (retval < 0) {
dev_err(&intf->dev, "couldn't register v4l2_device\n");
- return retval;
+ goto free_mem;
}
mutex_init(&radio->lock);
@@ -345,6 +354,8 @@ static int usb_raremono_probe(struct usb_interface *intf,
radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops;
radio->vdev.lock = &radio->lock;
radio->vdev.release = video_device_release_empty;
+ radio->vdev.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+ radio->v4l2_dev.release = raremono_device_release;
usb_set_intfdata(intf, &radio->v4l2_dev);
@@ -360,6 +371,10 @@ static int usb_raremono_probe(struct usb_interface *intf,
}
dev_err(&intf->dev, "could not register video device\n");
v4l2_device_unregister(&radio->v4l2_dev);
+
+free_mem:
+ kfree(radio->buffer);
+ kfree(radio);
return retval;
}
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 434c03338d7f..54a40d60e4fd 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -133,8 +133,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->driver, "radio-sf16fmi", sizeof(v->driver));
strscpy(v->card, "SF16-FMI/FMP/FMD radio", sizeof(v->card));
strscpy(v->bus_info, "ISA:radio-sf16fmi", sizeof(v->bus_info));
- v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -345,6 +343,7 @@ static int __init fmi_init(void)
fmi->vdev.fops = &fmi_fops;
fmi->vdev.ioctl_ops = &fmi_ioctl_ops;
fmi->vdev.release = video_device_release_empty;
+ fmi->vdev.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
video_set_drvdata(&fmi->vdev, fmi);
mutex_init(&fmi->lock);
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 645242314a09..b203296de977 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -336,19 +336,6 @@ static int si476x_radio_querycap(struct file *file, void *priv,
strscpy(capability->card, DRIVER_CARD, sizeof(capability->card));
snprintf(capability->bus_info, sizeof(capability->bus_info),
"platform:%s", radio->v4l2dev.name);
-
- capability->device_caps = V4L2_CAP_TUNER
- | V4L2_CAP_RADIO
- | V4L2_CAP_HW_FREQ_SEEK;
-
- si476x_core_lock(radio->core);
- if (!si476x_core_is_a_secondary_tuner(radio->core))
- capability->device_caps |= V4L2_CAP_RDS_CAPTURE
- | V4L2_CAP_READWRITE;
- si476x_core_unlock(radio->core);
-
- capability->capabilities = capability->device_caps
- | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1459,6 +1446,14 @@ static int si476x_radio_probe(struct platform_device *pdev)
radio->videodev.v4l2_dev = &radio->v4l2dev;
radio->videodev.ioctl_ops = &si4761_ioctl_ops;
+ radio->videodev.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+ V4L2_CAP_HW_FREQ_SEEK;
+
+ si476x_core_lock(radio->core);
+ if (!si476x_core_is_a_secondary_tuner(radio->core))
+ radio->videodev.device_caps |= V4L2_CAP_RDS_CAPTURE |
+ V4L2_CAP_READWRITE;
+ si476x_core_unlock(radio->core);
video_set_drvdata(&radio->videodev, radio);
platform_set_drvdata(pdev, radio);
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index b740646adc53..877a24e5c577 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -282,8 +282,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->card, dev->name, sizeof(v->card));
snprintf(v->bus_info, sizeof(v->bus_info),
"I2C:%s", dev_name(&dev->dev));
- v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -465,6 +463,7 @@ static int tea5764_i2c_probe(struct i2c_client *client,
video_set_drvdata(&radio->vdev, radio);
radio->vdev.lock = &radio->mutex;
radio->vdev.v4l2_dev = v4l2_dev;
+ radio->vdev.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
/* initialize and power off the chip */
tea5764_i2c_read(radio);
diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
index 49d4beba341e..fb9de7bbcd19 100644
--- a/drivers/media/radio/radio-tea5777.c
+++ b/drivers/media/radio/radio-tea5777.c
@@ -260,9 +260,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->card, tea->card, sizeof(v->card));
strlcat(v->card, " TEA5777", sizeof(v->card));
strscpy(v->bus_info, tea->bus_info, sizeof(v->bus_info));
- v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- v->device_caps |= V4L2_CAP_HW_FREQ_SEEK;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -553,6 +550,8 @@ int radio_tea5777_init(struct radio_tea5777 *tea, struct module *owner)
strscpy(tea->vd.name, tea->v4l2_dev->name, sizeof(tea->vd.name));
tea->vd.lock = &tea->mutex;
tea->vd.v4l2_dev = tea->v4l2_dev;
+ tea->vd.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+ V4L2_CAP_HW_FREQ_SEEK;
tea->fops = tea575x_fops;
tea->fops.owner = owner;
tea->vd.fops = &tea->fops;
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index 7d196f8ad3b5..948ee3eec914 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -34,8 +34,6 @@ static int timbradio_vidioc_querycap(struct file *file, void *priv,
strscpy(v->driver, DRIVER_NAME, sizeof(v->driver));
strscpy(v->card, "Timberdale Radio", sizeof(v->card));
snprintf(v->bus_info, sizeof(v->bus_info), "platform:"DRIVER_NAME);
- v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -114,6 +112,7 @@ static int timbradio_probe(struct platform_device *pdev)
tr->video_dev.release = video_device_release_empty;
tr->video_dev.minor = -1;
tr->video_dev.lock = &tr->lock;
+ tr->video_dev.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
strscpy(tr->v4l2_dev.name, DRIVER_NAME, sizeof(tr->v4l2_dev.name));
err = v4l2_device_register(NULL, &tr->v4l2_dev);
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 330de50f8920..104ac41c6f96 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1284,14 +1284,6 @@ static int wl1273_fm_vidioc_querycap(struct file *file, void *priv,
sizeof(capability->card));
strscpy(capability->bus_info, radio->bus_type,
sizeof(capability->bus_info));
-
- capability->device_caps = V4L2_CAP_HW_FREQ_SEEK |
- V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_AUDIO |
- V4L2_CAP_RDS_CAPTURE | V4L2_CAP_MODULATOR |
- V4L2_CAP_RDS_OUTPUT;
- capability->capabilities = capability->device_caps |
- V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -1980,6 +1972,10 @@ static const struct video_device wl1273_viddev_template = {
.name = WL1273_FM_DRIVER_NAME,
.release = wl1273_vdev_release,
.vfl_dir = VFL_DIR_TX,
+ .device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
+ V4L2_CAP_RADIO | V4L2_CAP_AUDIO |
+ V4L2_CAP_RDS_CAPTURE | V4L2_CAP_MODULATOR |
+ V4L2_CAP_RDS_OUTPUT,
};
static int wl1273_fm_radio_remove(struct platform_device *pdev)
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index a3152d646c3a..7d53422b3b56 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -223,10 +223,6 @@ static int si470x_vidioc_querycap(struct file *file, void *priv,
{
strscpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));
strscpy(capability->card, DRIVER_CARD, sizeof(capability->card));
- capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE |
- V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
- capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -382,6 +378,9 @@ static int si470x_i2c_probe(struct i2c_client *client,
radio->videodev.lock = &radio->lock;
radio->videodev.v4l2_dev = &radio->v4l2_dev;
radio->videodev.release = video_device_release_empty;
+ radio->videodev.device_caps =
+ V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE | V4L2_CAP_TUNER |
+ V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
video_set_drvdata(&radio->videodev, radio);
radio->gpio_reset = devm_gpiod_get_optional(&client->dev, "reset",
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 58e622d57373..49073747b1e7 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -514,9 +514,6 @@ static int si470x_vidioc_querycap(struct file *file, void *priv,
strscpy(capability->card, DRIVER_CARD, sizeof(capability->card));
usb_make_path(radio->usbdev, capability->bus_info,
sizeof(capability->bus_info));
- capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE |
- V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
- capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -670,6 +667,9 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->videodev.lock = &radio->lock;
radio->videodev.v4l2_dev = &radio->v4l2_dev;
radio->videodev.release = video_device_release_empty;
+ radio->videodev.device_caps =
+ V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE | V4L2_CAP_TUNER |
+ V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
video_set_drvdata(&radio->videodev, radio);
/* get device and chip versions */
diff --git a/drivers/media/radio/si4713/radio-platform-si4713.c b/drivers/media/radio/si4713/radio-platform-si4713.c
index 70d51d3607ff..a7dfe5f55c18 100644
--- a/drivers/media/radio/si4713/radio-platform-si4713.c
+++ b/drivers/media/radio/si4713/radio-platform-si4713.c
@@ -63,9 +63,6 @@ static int radio_si4713_querycap(struct file *file, void *priv,
sizeof(capability->card));
strscpy(capability->bus_info, "platform:radio-si4713",
sizeof(capability->bus_info));
- capability->device_caps = V4L2_CAP_MODULATOR | V4L2_CAP_RDS_OUTPUT;
- capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -175,6 +172,7 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
rsdev->radio_dev.ctrl_handler = sd->ctrl_handler;
/* Serialize all access to the si4713 */
rsdev->radio_dev.lock = &rsdev->lock;
+ rsdev->radio_dev.device_caps = V4L2_CAP_MODULATOR | V4L2_CAP_RDS_OUTPUT;
video_set_drvdata(&rsdev->radio_dev, rsdev);
if (video_register_device(&rsdev->radio_dev, VFL_TYPE_RADIO, radio_nr)) {
dev_err(&pdev->dev, "Could not register video device.\n");
diff --git a/drivers/media/radio/si4713/radio-usb-si4713.c b/drivers/media/radio/si4713/radio-usb-si4713.c
index 23065ecce979..33274189c83c 100644
--- a/drivers/media/radio/si4713/radio-usb-si4713.c
+++ b/drivers/media/radio/si4713/radio-usb-si4713.c
@@ -70,9 +70,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->driver, "radio-usb-si4713", sizeof(v->driver));
strscpy(v->card, "Si4713 FM Transmitter", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->device_caps = V4L2_CAP_MODULATOR | V4L2_CAP_RDS_OUTPUT;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -475,6 +472,7 @@ static int usb_si4713_probe(struct usb_interface *intf,
radio->vdev.lock = &radio->lock;
radio->vdev.release = video_device_release_empty;
radio->vdev.vfl_dir = VFL_DIR_TX;
+ radio->vdev.device_caps = V4L2_CAP_MODULATOR | V4L2_CAP_RDS_OUTPUT;
video_set_drvdata(&radio->vdev, radio);
diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
index 64613dd145a1..b0303cf00387 100644
--- a/drivers/media/radio/tea575x.c
+++ b/drivers/media/radio/tea575x.c
@@ -226,10 +226,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(v->card, tea->card, sizeof(v->card));
strlcat(v->card, tea->tea5759 ? " TEA5759" : " TEA5757", sizeof(v->card));
strscpy(v->bus_info, tea->bus_info, sizeof(v->bus_info));
- v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- if (!tea->cannot_read_data)
- v->device_caps |= V4L2_CAP_HW_FREQ_SEEK;
- v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -529,6 +525,9 @@ int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
strscpy(tea->vd.name, tea->v4l2_dev->name, sizeof(tea->vd.name));
tea->vd.lock = &tea->mutex;
tea->vd.v4l2_dev = tea->v4l2_dev;
+ tea->vd.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+ if (!tea->cannot_read_data)
+ tea->vd.device_caps |= V4L2_CAP_HW_FREQ_SEEK;
tea->fops = tea575x_fops;
tea->fops.owner = owner;
tea->vd.fops = &tea->fops;
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index c80a6df47f5e..1c146d14dbbd 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -185,13 +185,6 @@ static int fm_v4l2_vidioc_querycap(struct file *file, void *priv,
strscpy(capability->card, FM_DRV_CARD_SHORT_NAME,
sizeof(capability->card));
sprintf(capability->bus_info, "UART");
- capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
- V4L2_CAP_RADIO | V4L2_CAP_MODULATOR |
- V4L2_CAP_AUDIO | V4L2_CAP_READWRITE |
- V4L2_CAP_RDS_CAPTURE;
- capability->capabilities = capability->device_caps |
- V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -515,6 +508,9 @@ static const struct video_device fm_viddev_template = {
* but that would affect applications using this driver.
*/
.vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+ V4L2_CAP_MODULATOR | V4L2_CAP_AUDIO |
+ V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE,
};
int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
@@ -541,6 +537,7 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
/* Register with V4L2 subsystem as RADIO device */
if (video_register_device(&gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
+ v4l2_device_unregister(&fmdev->v4l2_dev);
fmerr("Could not register video device\n");
return -ENOMEM;
}
@@ -554,6 +551,8 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
if (ret < 0) {
fmerr("(fmdev): Can't init ctrl handler\n");
v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
+ video_unregister_device(fmdev->radio_dev);
+ v4l2_device_unregister(&fmdev->v4l2_dev);
return -EBUSY;
}
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index ee657003c1a1..0a0ce620e4a2 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -8,6 +8,9 @@
#include <linux/bpf_lirc.h>
#include "rc-core-priv.h"
+#define lirc_rcu_dereference(p) \
+ rcu_dereference_protected(p, lockdep_is_held(&ir_raw_handler_lock))
+
/*
* BPF interface for raw IR
*/
@@ -136,7 +139,7 @@ const struct bpf_verifier_ops lirc_mode2_verifier_ops = {
static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog)
{
- struct bpf_prog_array __rcu *old_array;
+ struct bpf_prog_array *old_array;
struct bpf_prog_array *new_array;
struct ir_raw_event_ctrl *raw;
int ret;
@@ -154,12 +157,12 @@ static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog)
goto unlock;
}
- if (raw->progs && bpf_prog_array_length(raw->progs) >= BPF_MAX_PROGS) {
+ old_array = lirc_rcu_dereference(raw->progs);
+ if (old_array && bpf_prog_array_length(old_array) >= BPF_MAX_PROGS) {
ret = -E2BIG;
goto unlock;
}
- old_array = raw->progs;
ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
if (ret < 0)
goto unlock;
@@ -174,7 +177,7 @@ unlock:
static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
{
- struct bpf_prog_array __rcu *old_array;
+ struct bpf_prog_array *old_array;
struct bpf_prog_array *new_array;
struct ir_raw_event_ctrl *raw;
int ret;
@@ -192,7 +195,7 @@ static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
goto unlock;
}
- old_array = raw->progs;
+ old_array = lirc_rcu_dereference(raw->progs);
ret = bpf_prog_array_copy(old_array, prog, NULL, &new_array);
/*
* Do not use bpf_prog_array_delete_safe() as we would end up
@@ -223,21 +226,22 @@ void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
/*
* This should be called once the rc thread has been stopped, so there can be
* no concurrent bpf execution.
+ *
+ * Should be called with the ir_raw_handler_lock held.
*/
void lirc_bpf_free(struct rc_dev *rcdev)
{
struct bpf_prog_array_item *item;
+ struct bpf_prog_array *array;
- if (!rcdev->raw->progs)
+ array = lirc_rcu_dereference(rcdev->raw->progs);
+ if (!array)
return;
- item = rcu_dereference(rcdev->raw->progs)->items;
- while (item->prog) {
+ for (item = array->items; item->prog; item++)
bpf_prog_put(item->prog);
- item++;
- }
- bpf_prog_array_free(rcdev->raw->progs);
+ bpf_prog_array_free(array);
}
int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
@@ -290,7 +294,7 @@ int lirc_prog_detach(const union bpf_attr *attr)
int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
{
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
- struct bpf_prog_array __rcu *progs;
+ struct bpf_prog_array *progs;
struct rc_dev *rcdev;
u32 cnt, flags = 0;
int ret;
@@ -311,7 +315,7 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
if (ret)
goto put;
- progs = rcdev->raw->progs;
+ progs = lirc_rcu_dereference(rcdev->raw->progs);
cnt = progs ? bpf_prog_array_length(progs) : 0;
if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) {
diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
index 66334e8d63ba..c58f2d38a458 100644
--- a/drivers/media/rc/ir-spi.c
+++ b/drivers/media/rc/ir-spi.c
@@ -161,6 +161,7 @@ static const struct of_device_id ir_spi_of_match[] = {
{ .compatible = "ir-spi-led" },
{},
};
+MODULE_DEVICE_TABLE(of, ir_spi_of_match);
static struct spi_driver ir_spi_driver = {
.probe = ir_spi_probe,
diff --git a/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c b/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
index 732687ce0637..0a867ca90038 100644
--- a/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
+++ b/drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
@@ -12,16 +12,16 @@
static struct rc_map_table adstech_dvb_t_pci[] = {
/* Keys 0 to 9 */
- { 0x4d, KEY_0 },
- { 0x57, KEY_1 },
- { 0x4f, KEY_2 },
- { 0x53, KEY_3 },
- { 0x56, KEY_4 },
- { 0x4e, KEY_5 },
- { 0x5e, KEY_6 },
- { 0x54, KEY_7 },
- { 0x4c, KEY_8 },
- { 0x5c, KEY_9 },
+ { 0x4d, KEY_NUMERIC_0 },
+ { 0x57, KEY_NUMERIC_1 },
+ { 0x4f, KEY_NUMERIC_2 },
+ { 0x53, KEY_NUMERIC_3 },
+ { 0x56, KEY_NUMERIC_4 },
+ { 0x4e, KEY_NUMERIC_5 },
+ { 0x5e, KEY_NUMERIC_6 },
+ { 0x54, KEY_NUMERIC_7 },
+ { 0x4c, KEY_NUMERIC_8 },
+ { 0x5c, KEY_NUMERIC_9 },
{ 0x5b, KEY_POWER },
{ 0x5f, KEY_MUTE },
diff --git a/drivers/media/rc/keymaps/rc-alink-dtu-m.c b/drivers/media/rc/keymaps/rc-alink-dtu-m.c
index 530af333af8e..8a2ccaf3b817 100644
--- a/drivers/media/rc/keymaps/rc-alink-dtu-m.c
+++ b/drivers/media/rc/keymaps/rc-alink-dtu-m.c
@@ -11,22 +11,22 @@
/* A-Link DTU(m) slim remote, 6 rows, 3 columns. */
static struct rc_map_table alink_dtu_m[] = {
{ 0x0800, KEY_VOLUMEUP },
- { 0x0801, KEY_1 },
- { 0x0802, KEY_3 },
- { 0x0803, KEY_7 },
- { 0x0804, KEY_9 },
+ { 0x0801, KEY_NUMERIC_1 },
+ { 0x0802, KEY_NUMERIC_3 },
+ { 0x0803, KEY_NUMERIC_7 },
+ { 0x0804, KEY_NUMERIC_9 },
{ 0x0805, KEY_NEW }, /* symbol: PIP */
- { 0x0806, KEY_0 },
+ { 0x0806, KEY_NUMERIC_0 },
{ 0x0807, KEY_CHANNEL }, /* JUMP */
- { 0x080d, KEY_5 },
- { 0x080f, KEY_2 },
+ { 0x080d, KEY_NUMERIC_5 },
+ { 0x080f, KEY_NUMERIC_2 },
{ 0x0812, KEY_POWER2 },
{ 0x0814, KEY_CHANNELUP },
{ 0x0816, KEY_VOLUMEDOWN },
- { 0x0818, KEY_6 },
+ { 0x0818, KEY_NUMERIC_6 },
{ 0x081a, KEY_MUTE },
- { 0x081b, KEY_8 },
- { 0x081c, KEY_4 },
+ { 0x081b, KEY_NUMERIC_8 },
+ { 0x081c, KEY_NUMERIC_4 },
{ 0x081d, KEY_CHANNELDOWN },
};
diff --git a/drivers/media/rc/keymaps/rc-anysee.c b/drivers/media/rc/keymaps/rc-anysee.c
index 9d1eee1f0515..34da03c46104 100644
--- a/drivers/media/rc/keymaps/rc-anysee.c
+++ b/drivers/media/rc/keymaps/rc-anysee.c
@@ -9,16 +9,16 @@
#include <linux/module.h>
static struct rc_map_table anysee[] = {
- { 0x0800, KEY_0 },
- { 0x0801, KEY_1 },
- { 0x0802, KEY_2 },
- { 0x0803, KEY_3 },
- { 0x0804, KEY_4 },
- { 0x0805, KEY_5 },
- { 0x0806, KEY_6 },
- { 0x0807, KEY_7 },
- { 0x0808, KEY_8 },
- { 0x0809, KEY_9 },
+ { 0x0800, KEY_NUMERIC_0 },
+ { 0x0801, KEY_NUMERIC_1 },
+ { 0x0802, KEY_NUMERIC_2 },
+ { 0x0803, KEY_NUMERIC_3 },
+ { 0x0804, KEY_NUMERIC_4 },
+ { 0x0805, KEY_NUMERIC_5 },
+ { 0x0806, KEY_NUMERIC_6 },
+ { 0x0807, KEY_NUMERIC_7 },
+ { 0x0808, KEY_NUMERIC_8 },
+ { 0x0809, KEY_NUMERIC_9 },
{ 0x080a, KEY_POWER2 }, /* [red power button] */
{ 0x080b, KEY_VIDEO }, /* [*] MODE */
{ 0x080c, KEY_CHANNEL }, /* [symbol counterclockwise arrow] */
diff --git a/drivers/media/rc/keymaps/rc-apac-viewcomp.c b/drivers/media/rc/keymaps/rc-apac-viewcomp.c
index af2e7fdc7b85..bdc47e25d46e 100644
--- a/drivers/media/rc/keymaps/rc-apac-viewcomp.c
+++ b/drivers/media/rc/keymaps/rc-apac-viewcomp.c
@@ -12,16 +12,16 @@
static struct rc_map_table apac_viewcomp[] = {
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x00, KEY_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
+ { 0x00, KEY_NUMERIC_0 },
{ 0x17, KEY_LAST }, /* +100 */
{ 0x0a, KEY_LIST }, /* recall */
diff --git a/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c b/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
index 727e35c31039..1d322137898e 100644
--- a/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
+++ b/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
@@ -21,21 +21,21 @@ static struct rc_map_table t2hybrid[] = {
{ 0x40, KEY_ZOOM }, /* Fullscreen */
{ 0x1e, KEY_VOLUMEUP },
- { 0x12, KEY_0 },
+ { 0x12, KEY_NUMERIC_0 },
{ 0x02, KEY_CHANNELDOWN },
{ 0x1c, KEY_AGAIN }, /* Recall */
- { 0x09, KEY_1 },
- { 0x1d, KEY_2 },
- { 0x1f, KEY_3 },
+ { 0x09, KEY_NUMERIC_1 },
+ { 0x1d, KEY_NUMERIC_2 },
+ { 0x1f, KEY_NUMERIC_3 },
- { 0x0d, KEY_4 },
- { 0x19, KEY_5 },
- { 0x1b, KEY_6 },
+ { 0x0d, KEY_NUMERIC_4 },
+ { 0x19, KEY_NUMERIC_5 },
+ { 0x1b, KEY_NUMERIC_6 },
- { 0x11, KEY_7 },
- { 0x15, KEY_8 },
- { 0x17, KEY_9 },
+ { 0x11, KEY_NUMERIC_7 },
+ { 0x15, KEY_NUMERIC_8 },
+ { 0x17, KEY_NUMERIC_9 },
};
static struct rc_map_list t2hybrid_map = {
diff --git a/drivers/media/rc/keymaps/rc-asus-pc39.c b/drivers/media/rc/keymaps/rc-asus-pc39.c
index 13a935c3ac59..7a4b3a6e3a49 100644
--- a/drivers/media/rc/keymaps/rc-asus-pc39.c
+++ b/drivers/media/rc/keymaps/rc-asus-pc39.c
@@ -16,16 +16,16 @@
static struct rc_map_table asus_pc39[] = {
/* Keys 0 to 9 */
- { 0x082a, KEY_0 },
- { 0x0816, KEY_1 },
- { 0x0812, KEY_2 },
- { 0x0814, KEY_3 },
- { 0x0836, KEY_4 },
- { 0x0832, KEY_5 },
- { 0x0834, KEY_6 },
- { 0x080e, KEY_7 },
- { 0x080a, KEY_8 },
- { 0x080c, KEY_9 },
+ { 0x082a, KEY_NUMERIC_0 },
+ { 0x0816, KEY_NUMERIC_1 },
+ { 0x0812, KEY_NUMERIC_2 },
+ { 0x0814, KEY_NUMERIC_3 },
+ { 0x0836, KEY_NUMERIC_4 },
+ { 0x0832, KEY_NUMERIC_5 },
+ { 0x0834, KEY_NUMERIC_6 },
+ { 0x080e, KEY_NUMERIC_7 },
+ { 0x080a, KEY_NUMERIC_8 },
+ { 0x080c, KEY_NUMERIC_9 },
{ 0x0801, KEY_RADIO }, /* radio */
{ 0x083c, KEY_MENU }, /* dvd/menu */
diff --git a/drivers/media/rc/keymaps/rc-asus-ps3-100.c b/drivers/media/rc/keymaps/rc-asus-ps3-100.c
index 7f836fcc68ac..09b60fa335e3 100644
--- a/drivers/media/rc/keymaps/rc-asus-ps3-100.c
+++ b/drivers/media/rc/keymaps/rc-asus-ps3-100.c
@@ -20,16 +20,16 @@ static struct rc_map_table asus_ps3_100[] = {
{ 0x0807, KEY_GREEN }, /* green */
/* Keys 0 to 9 */
- { 0x082a, KEY_0 },
- { 0x0816, KEY_1 },
- { 0x0812, KEY_2 },
- { 0x0814, KEY_3 },
- { 0x0836, KEY_4 },
- { 0x0832, KEY_5 },
- { 0x0834, KEY_6 },
- { 0x080e, KEY_7 },
- { 0x080a, KEY_8 },
- { 0x080c, KEY_9 },
+ { 0x082a, KEY_NUMERIC_0 },
+ { 0x0816, KEY_NUMERIC_1 },
+ { 0x0812, KEY_NUMERIC_2 },
+ { 0x0814, KEY_NUMERIC_3 },
+ { 0x0836, KEY_NUMERIC_4 },
+ { 0x0832, KEY_NUMERIC_5 },
+ { 0x0834, KEY_NUMERIC_6 },
+ { 0x080e, KEY_NUMERIC_7 },
+ { 0x080a, KEY_NUMERIC_8 },
+ { 0x080c, KEY_NUMERIC_9 },
{ 0x0815, KEY_VOLUMEUP },
{ 0x0826, KEY_VOLUMEDOWN },
diff --git a/drivers/media/rc/keymaps/rc-ati-x10.c b/drivers/media/rc/keymaps/rc-ati-x10.c
index 2f800dd5aa19..31fe1106b708 100644
--- a/drivers/media/rc/keymaps/rc-ati-x10.c
+++ b/drivers/media/rc/keymaps/rc-ati-x10.c
@@ -49,18 +49,18 @@ static struct rc_map_table ati_x10[] = {
* has problems with keycodes greater than 255, so avoid those high
* keycodes in default maps.
*/
- { 0x0d, KEY_1 },
- { 0x0e, KEY_2 },
- { 0x0f, KEY_3 },
- { 0x10, KEY_4 },
- { 0x11, KEY_5 },
- { 0x12, KEY_6 },
- { 0x13, KEY_7 },
- { 0x14, KEY_8 },
- { 0x15, KEY_9 },
+ { 0x0d, KEY_NUMERIC_1 },
+ { 0x0e, KEY_NUMERIC_2 },
+ { 0x0f, KEY_NUMERIC_3 },
+ { 0x10, KEY_NUMERIC_4 },
+ { 0x11, KEY_NUMERIC_5 },
+ { 0x12, KEY_NUMERIC_6 },
+ { 0x13, KEY_NUMERIC_7 },
+ { 0x14, KEY_NUMERIC_8 },
+ { 0x15, KEY_NUMERIC_9 },
{ 0x16, KEY_MENU }, /* "menu": DVD root menu */
/* KEY_NUMERIC_STAR? */
- { 0x17, KEY_0 },
+ { 0x17, KEY_NUMERIC_0 },
{ 0x18, KEY_SETUP }, /* "check": DVD setup menu */
/* KEY_NUMERIC_POUND? */
diff --git a/drivers/media/rc/keymaps/rc-avermedia-a16d.c b/drivers/media/rc/keymaps/rc-avermedia-a16d.c
index 5549c043cfe4..6467ff6e48d7 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-a16d.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-a16d.c
@@ -11,17 +11,17 @@
static struct rc_map_table avermedia_a16d[] = {
{ 0x20, KEY_LIST},
{ 0x00, KEY_POWER},
- { 0x28, KEY_1},
- { 0x18, KEY_2},
- { 0x38, KEY_3},
- { 0x24, KEY_4},
- { 0x14, KEY_5},
- { 0x34, KEY_6},
- { 0x2c, KEY_7},
- { 0x1c, KEY_8},
- { 0x3c, KEY_9},
+ { 0x28, KEY_NUMERIC_1},
+ { 0x18, KEY_NUMERIC_2},
+ { 0x38, KEY_NUMERIC_3},
+ { 0x24, KEY_NUMERIC_4},
+ { 0x14, KEY_NUMERIC_5},
+ { 0x34, KEY_NUMERIC_6},
+ { 0x2c, KEY_NUMERIC_7},
+ { 0x1c, KEY_NUMERIC_8},
+ { 0x3c, KEY_NUMERIC_9},
{ 0x12, KEY_SUBTITLE},
- { 0x22, KEY_0},
+ { 0x22, KEY_NUMERIC_0},
{ 0x32, KEY_REWIND},
{ 0x3a, KEY_SHUFFLE},
{ 0x02, KEY_PRINT},
diff --git a/drivers/media/rc/keymaps/rc-avermedia-cardbus.c b/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
index 74edcd82e685..54fc6d9022c2 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-cardbus.c
@@ -15,19 +15,19 @@ static struct rc_map_table avermedia_cardbus[] = {
{ 0x01, KEY_TUNER }, /* TV/FM */
{ 0x03, KEY_TEXT }, /* Teletext */
{ 0x04, KEY_EPG },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
+ { 0x05, KEY_NUMERIC_1 },
+ { 0x06, KEY_NUMERIC_2 },
+ { 0x07, KEY_NUMERIC_3 },
{ 0x08, KEY_AUDIO },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
+ { 0x09, KEY_NUMERIC_4 },
+ { 0x0a, KEY_NUMERIC_5 },
+ { 0x0b, KEY_NUMERIC_6 },
{ 0x0c, KEY_ZOOM }, /* Full screen */
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
+ { 0x0d, KEY_NUMERIC_7 },
+ { 0x0e, KEY_NUMERIC_8 },
+ { 0x0f, KEY_NUMERIC_9 },
{ 0x10, KEY_PAGEUP }, /* 16-CH PREV */
- { 0x11, KEY_0 },
+ { 0x11, KEY_NUMERIC_0 },
{ 0x12, KEY_INFO },
{ 0x13, KEY_AGAIN }, /* CH RTN - channel return */
{ 0x14, KEY_MUTE },
diff --git a/drivers/media/rc/keymaps/rc-avermedia-dvbt.c b/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
index 796184160a48..92c6df3360b3 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-dvbt.c
@@ -11,16 +11,16 @@
/* Matt Jesson <dvb@jesson.eclipse.co.uk */
static struct rc_map_table avermedia_dvbt[] = {
- { 0x28, KEY_0 }, /* '0' / 'enter' */
- { 0x22, KEY_1 }, /* '1' */
- { 0x12, KEY_2 }, /* '2' / 'up arrow' */
- { 0x32, KEY_3 }, /* '3' */
- { 0x24, KEY_4 }, /* '4' / 'left arrow' */
- { 0x14, KEY_5 }, /* '5' */
- { 0x34, KEY_6 }, /* '6' / 'right arrow' */
- { 0x26, KEY_7 }, /* '7' */
- { 0x16, KEY_8 }, /* '8' / 'down arrow' */
- { 0x36, KEY_9 }, /* '9' */
+ { 0x28, KEY_NUMERIC_0 }, /* '0' / 'enter' */
+ { 0x22, KEY_NUMERIC_1 }, /* '1' */
+ { 0x12, KEY_NUMERIC_2 }, /* '2' / 'up arrow' */
+ { 0x32, KEY_NUMERIC_3 }, /* '3' */
+ { 0x24, KEY_NUMERIC_4 }, /* '4' / 'left arrow' */
+ { 0x14, KEY_NUMERIC_5 }, /* '5' */
+ { 0x34, KEY_NUMERIC_6 }, /* '6' / 'right arrow' */
+ { 0x26, KEY_NUMERIC_7 }, /* '7' */
+ { 0x16, KEY_NUMERIC_8 }, /* '8' / 'down arrow' */
+ { 0x36, KEY_NUMERIC_9 }, /* '9' */
{ 0x20, KEY_VIDEO }, /* 'source' */
{ 0x10, KEY_TEXT }, /* 'teletext' */
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m135a.c b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
index d275d98d066a..311ddeb061ca 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m135a.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
@@ -24,16 +24,16 @@ static struct rc_map_table avermedia_m135a[] = {
{ 0x022e, KEY_DOT }, /* '.' */
{ 0x0201, KEY_MODE }, /* TV/FM or SOURCE */
- { 0x0205, KEY_1 },
- { 0x0206, KEY_2 },
- { 0x0207, KEY_3 },
- { 0x0209, KEY_4 },
- { 0x020a, KEY_5 },
- { 0x020b, KEY_6 },
- { 0x020d, KEY_7 },
- { 0x020e, KEY_8 },
- { 0x020f, KEY_9 },
- { 0x0211, KEY_0 },
+ { 0x0205, KEY_NUMERIC_1 },
+ { 0x0206, KEY_NUMERIC_2 },
+ { 0x0207, KEY_NUMERIC_3 },
+ { 0x0209, KEY_NUMERIC_4 },
+ { 0x020a, KEY_NUMERIC_5 },
+ { 0x020b, KEY_NUMERIC_6 },
+ { 0x020d, KEY_NUMERIC_7 },
+ { 0x020e, KEY_NUMERIC_8 },
+ { 0x020f, KEY_NUMERIC_9 },
+ { 0x0211, KEY_NUMERIC_0 },
{ 0x0213, KEY_RIGHT }, /* -> or L */
{ 0x0212, KEY_LEFT }, /* <- or R */
@@ -70,17 +70,17 @@ static struct rc_map_table avermedia_m135a[] = {
{ 0x0406, KEY_MUTE },
{ 0x0408, KEY_MODE }, /* TV/FM */
- { 0x0409, KEY_1 },
- { 0x040a, KEY_2 },
- { 0x040b, KEY_3 },
- { 0x040c, KEY_4 },
- { 0x040d, KEY_5 },
- { 0x040e, KEY_6 },
- { 0x040f, KEY_7 },
- { 0x0410, KEY_8 },
- { 0x0411, KEY_9 },
+ { 0x0409, KEY_NUMERIC_1 },
+ { 0x040a, KEY_NUMERIC_2 },
+ { 0x040b, KEY_NUMERIC_3 },
+ { 0x040c, KEY_NUMERIC_4 },
+ { 0x040d, KEY_NUMERIC_5 },
+ { 0x040e, KEY_NUMERIC_6 },
+ { 0x040f, KEY_NUMERIC_7 },
+ { 0x0410, KEY_NUMERIC_8 },
+ { 0x0411, KEY_NUMERIC_9 },
{ 0x044c, KEY_DOT }, /* '.' */
- { 0x0412, KEY_0 },
+ { 0x0412, KEY_NUMERIC_0 },
{ 0x0407, KEY_REFRESH }, /* Refresh/Reload */
{ 0x0413, KEY_AUDIO },
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c b/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
index 6a70aba92dfb..a970ed5a090b 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
@@ -18,17 +18,17 @@ static struct rc_map_table avermedia_m733a_rm_k6[] = {
{ 0x0406, KEY_MUTE },
{ 0x0408, KEY_MODE }, /* TV/FM */
- { 0x0409, KEY_1 },
- { 0x040a, KEY_2 },
- { 0x040b, KEY_3 },
- { 0x040c, KEY_4 },
- { 0x040d, KEY_5 },
- { 0x040e, KEY_6 },
- { 0x040f, KEY_7 },
- { 0x0410, KEY_8 },
- { 0x0411, KEY_9 },
+ { 0x0409, KEY_NUMERIC_1 },
+ { 0x040a, KEY_NUMERIC_2 },
+ { 0x040b, KEY_NUMERIC_3 },
+ { 0x040c, KEY_NUMERIC_4 },
+ { 0x040d, KEY_NUMERIC_5 },
+ { 0x040e, KEY_NUMERIC_6 },
+ { 0x040f, KEY_NUMERIC_7 },
+ { 0x0410, KEY_NUMERIC_8 },
+ { 0x0411, KEY_NUMERIC_9 },
{ 0x044c, KEY_DOT }, /* '.' */
- { 0x0412, KEY_0 },
+ { 0x0412, KEY_NUMERIC_0 },
{ 0x0407, KEY_REFRESH }, /* Refresh/Reload */
{ 0x0413, KEY_AUDIO },
diff --git a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
index 61348894c93b..cf8a4fd107f4 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
@@ -20,16 +20,16 @@ static struct rc_map_table avermedia_rm_ks[] = {
{ 0x0506, KEY_MUTE }, /* Mute */
{ 0x0507, KEY_AGAIN }, /* Recall */
{ 0x0508, KEY_VIDEO }, /* Source */
- { 0x0509, KEY_1 }, /* 1 */
- { 0x050a, KEY_2 }, /* 2 */
- { 0x050b, KEY_3 }, /* 3 */
- { 0x050c, KEY_4 }, /* 4 */
- { 0x050d, KEY_5 }, /* 5 */
- { 0x050e, KEY_6 }, /* 6 */
- { 0x050f, KEY_7 }, /* 7 */
- { 0x0510, KEY_8 }, /* 8 */
- { 0x0511, KEY_9 }, /* 9 */
- { 0x0512, KEY_0 }, /* 0 */
+ { 0x0509, KEY_NUMERIC_1 }, /* 1 */
+ { 0x050a, KEY_NUMERIC_2 }, /* 2 */
+ { 0x050b, KEY_NUMERIC_3 }, /* 3 */
+ { 0x050c, KEY_NUMERIC_4 }, /* 4 */
+ { 0x050d, KEY_NUMERIC_5 }, /* 5 */
+ { 0x050e, KEY_NUMERIC_6 }, /* 6 */
+ { 0x050f, KEY_NUMERIC_7 }, /* 7 */
+ { 0x0510, KEY_NUMERIC_8 }, /* 8 */
+ { 0x0511, KEY_NUMERIC_9 }, /* 9 */
+ { 0x0512, KEY_NUMERIC_0 }, /* 0 */
{ 0x0513, KEY_AUDIO }, /* Audio */
{ 0x0515, KEY_EPG }, /* EPG */
{ 0x0516, KEY_PLAYPAUSE }, /* Play/Pause */
diff --git a/drivers/media/rc/keymaps/rc-avermedia.c b/drivers/media/rc/keymaps/rc-avermedia.c
index 631ff52564f0..f96f229b70bb 100644
--- a/drivers/media/rc/keymaps/rc-avermedia.c
+++ b/drivers/media/rc/keymaps/rc-avermedia.c
@@ -11,16 +11,16 @@
/* Alex Hermann <gaaf@gmx.net> */
static struct rc_map_table avermedia[] = {
- { 0x28, KEY_1 },
- { 0x18, KEY_2 },
- { 0x38, KEY_3 },
- { 0x24, KEY_4 },
- { 0x14, KEY_5 },
- { 0x34, KEY_6 },
- { 0x2c, KEY_7 },
- { 0x1c, KEY_8 },
- { 0x3c, KEY_9 },
- { 0x22, KEY_0 },
+ { 0x28, KEY_NUMERIC_1 },
+ { 0x18, KEY_NUMERIC_2 },
+ { 0x38, KEY_NUMERIC_3 },
+ { 0x24, KEY_NUMERIC_4 },
+ { 0x14, KEY_NUMERIC_5 },
+ { 0x34, KEY_NUMERIC_6 },
+ { 0x2c, KEY_NUMERIC_7 },
+ { 0x1c, KEY_NUMERIC_8 },
+ { 0x3c, KEY_NUMERIC_9 },
+ { 0x22, KEY_NUMERIC_0 },
{ 0x20, KEY_TV }, /* TV/FM */
{ 0x10, KEY_CD }, /* CD */
diff --git a/drivers/media/rc/keymaps/rc-avertv-303.c b/drivers/media/rc/keymaps/rc-avertv-303.c
index 47ca8b7ea532..a3e2e945c769 100644
--- a/drivers/media/rc/keymaps/rc-avertv-303.c
+++ b/drivers/media/rc/keymaps/rc-avertv-303.c
@@ -11,16 +11,16 @@
/* AVERTV STUDIO 303 Remote */
static struct rc_map_table avertv_303[] = {
- { 0x2a, KEY_1 },
- { 0x32, KEY_2 },
- { 0x3a, KEY_3 },
- { 0x4a, KEY_4 },
- { 0x52, KEY_5 },
- { 0x5a, KEY_6 },
- { 0x6a, KEY_7 },
- { 0x72, KEY_8 },
- { 0x7a, KEY_9 },
- { 0x0e, KEY_0 },
+ { 0x2a, KEY_NUMERIC_1 },
+ { 0x32, KEY_NUMERIC_2 },
+ { 0x3a, KEY_NUMERIC_3 },
+ { 0x4a, KEY_NUMERIC_4 },
+ { 0x52, KEY_NUMERIC_5 },
+ { 0x5a, KEY_NUMERIC_6 },
+ { 0x6a, KEY_NUMERIC_7 },
+ { 0x72, KEY_NUMERIC_8 },
+ { 0x7a, KEY_NUMERIC_9 },
+ { 0x0e, KEY_NUMERIC_0 },
{ 0x02, KEY_POWER },
{ 0x22, KEY_VIDEO },
diff --git a/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c b/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
index 8e7e95306a5c..5fc8e4cd102e 100644
--- a/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
+++ b/drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
@@ -10,18 +10,18 @@
static struct rc_map_table azurewave_ad_tu700[] = {
{ 0x0000, KEY_TAB }, /* Tab */
- { 0x0001, KEY_2 },
+ { 0x0001, KEY_NUMERIC_2 },
{ 0x0002, KEY_CHANNELDOWN },
- { 0x0003, KEY_1 },
+ { 0x0003, KEY_NUMERIC_1 },
{ 0x0004, KEY_MENU }, /* Record List */
{ 0x0005, KEY_CHANNELUP },
- { 0x0006, KEY_3 },
+ { 0x0006, KEY_NUMERIC_3 },
{ 0x0007, KEY_SLEEP }, /* Hibernate */
{ 0x0008, KEY_VIDEO }, /* A/V */
- { 0x0009, KEY_4 },
+ { 0x0009, KEY_NUMERIC_4 },
{ 0x000a, KEY_VOLUMEDOWN },
{ 0x000c, KEY_CANCEL }, /* Cancel */
- { 0x000d, KEY_7 },
+ { 0x000d, KEY_NUMERIC_7 },
{ 0x000e, KEY_AGAIN }, /* Recall */
{ 0x000f, KEY_TEXT }, /* Teletext */
{ 0x0010, KEY_MUTE },
@@ -29,17 +29,17 @@ static struct rc_map_table azurewave_ad_tu700[] = {
{ 0x0012, KEY_FASTFORWARD }, /* FF >> */
{ 0x0013, KEY_BACK }, /* Back */
{ 0x0014, KEY_PLAY },
- { 0x0015, KEY_0 },
+ { 0x0015, KEY_NUMERIC_0 },
{ 0x0016, KEY_POWER2 }, /* [red power button] */
{ 0x0017, KEY_FAVORITES }, /* Favorite List */
{ 0x0018, KEY_RED },
- { 0x0019, KEY_8 },
+ { 0x0019, KEY_NUMERIC_8 },
{ 0x001a, KEY_STOP },
- { 0x001b, KEY_9 },
+ { 0x001b, KEY_NUMERIC_9 },
{ 0x001c, KEY_EPG }, /* Info/EPG */
- { 0x001d, KEY_5 },
+ { 0x001d, KEY_NUMERIC_5 },
{ 0x001e, KEY_VOLUMEUP },
- { 0x001f, KEY_6 },
+ { 0x001f, KEY_NUMERIC_6 },
{ 0x0040, KEY_REWIND }, /* FR << */
{ 0x0041, KEY_PREVIOUS }, /* Replay */
{ 0x0042, KEY_NEXT }, /* Skip */
diff --git a/drivers/media/rc/keymaps/rc-behold-columbus.c b/drivers/media/rc/keymaps/rc-behold-columbus.c
index b68380a76010..8579b3d5128d 100644
--- a/drivers/media/rc/keymaps/rc-behold-columbus.c
+++ b/drivers/media/rc/keymaps/rc-behold-columbus.c
@@ -37,24 +37,24 @@ static struct rc_map_table behold_columbus[] = {
* 0x07 0x08 0x09 0x10 *
* 7 8 9 Zoom *
* */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
{ 0x0D, KEY_SETUP }, /* Setup key */
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
{ 0x19, KEY_CAMERA }, /* Snapshot key */
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
{ 0x10, KEY_ZOOM },
/* 0x0A 0x00 0x0B 0x0C *
* RECALL 0 ChannelUp VolumeUp *
* */
{ 0x0A, KEY_AGAIN },
- { 0x00, KEY_0 },
+ { 0x00, KEY_NUMERIC_0 },
{ 0x0B, KEY_CHANNELUP },
{ 0x0C, KEY_VOLUMEUP },
diff --git a/drivers/media/rc/keymaps/rc-behold.c b/drivers/media/rc/keymaps/rc-behold.c
index 2b7cddb2f36d..28397ce05a7f 100644
--- a/drivers/media/rc/keymaps/rc-behold.c
+++ b/drivers/media/rc/keymaps/rc-behold.c
@@ -37,21 +37,21 @@ static struct rc_map_table behold[] = {
* 0x07 0x08 0x09 *
* 7 8 9 *
* */
- { 0x866b01, KEY_1 },
- { 0x866b02, KEY_2 },
- { 0x866b03, KEY_3 },
- { 0x866b04, KEY_4 },
- { 0x866b05, KEY_5 },
- { 0x866b06, KEY_6 },
- { 0x866b07, KEY_7 },
- { 0x866b08, KEY_8 },
- { 0x866b09, KEY_9 },
+ { 0x866b01, KEY_NUMERIC_1 },
+ { 0x866b02, KEY_NUMERIC_2 },
+ { 0x866b03, KEY_NUMERIC_3 },
+ { 0x866b04, KEY_NUMERIC_4 },
+ { 0x866b05, KEY_NUMERIC_5 },
+ { 0x866b06, KEY_NUMERIC_6 },
+ { 0x866b07, KEY_NUMERIC_7 },
+ { 0x866b08, KEY_NUMERIC_8 },
+ { 0x866b09, KEY_NUMERIC_9 },
/* 0x0a 0x00 0x17 *
* RECALL 0 MODE *
* */
{ 0x866b0a, KEY_AGAIN },
- { 0x866b00, KEY_0 },
+ { 0x866b00, KEY_NUMERIC_0 },
{ 0x866b17, KEY_MODE },
/* 0x14 0x10 *
diff --git a/drivers/media/rc/keymaps/rc-budget-ci-old.c b/drivers/media/rc/keymaps/rc-budget-ci-old.c
index 56f051af6154..6ca822256862 100644
--- a/drivers/media/rc/keymaps/rc-budget-ci-old.c
+++ b/drivers/media/rc/keymaps/rc-budget-ci-old.c
@@ -16,16 +16,16 @@
*/
static struct rc_map_table budget_ci_old[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
+ { 0x00, KEY_NUMERIC_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
{ 0x0a, KEY_ENTER },
{ 0x0b, KEY_RED },
{ 0x0c, KEY_POWER }, /* RADIO on Hauppauge */
diff --git a/drivers/media/rc/keymaps/rc-cinergy-1400.c b/drivers/media/rc/keymaps/rc-cinergy-1400.c
index dacb13c53bb4..4433d28b219c 100644
--- a/drivers/media/rc/keymaps/rc-cinergy-1400.c
+++ b/drivers/media/rc/keymaps/rc-cinergy-1400.c
@@ -12,16 +12,16 @@
static struct rc_map_table cinergy_1400[] = {
{ 0x01, KEY_POWER },
- { 0x02, KEY_1 },
- { 0x03, KEY_2 },
- { 0x04, KEY_3 },
- { 0x05, KEY_4 },
- { 0x06, KEY_5 },
- { 0x07, KEY_6 },
- { 0x08, KEY_7 },
- { 0x09, KEY_8 },
- { 0x0a, KEY_9 },
- { 0x0c, KEY_0 },
+ { 0x02, KEY_NUMERIC_1 },
+ { 0x03, KEY_NUMERIC_2 },
+ { 0x04, KEY_NUMERIC_3 },
+ { 0x05, KEY_NUMERIC_4 },
+ { 0x06, KEY_NUMERIC_5 },
+ { 0x07, KEY_NUMERIC_6 },
+ { 0x08, KEY_NUMERIC_7 },
+ { 0x09, KEY_NUMERIC_8 },
+ { 0x0a, KEY_NUMERIC_9 },
+ { 0x0c, KEY_NUMERIC_0 },
{ 0x0b, KEY_VIDEO },
{ 0x0d, KEY_REFRESH },
diff --git a/drivers/media/rc/keymaps/rc-cinergy.c b/drivers/media/rc/keymaps/rc-cinergy.c
index 6ab2e51b764d..b34a37b8fe61 100644
--- a/drivers/media/rc/keymaps/rc-cinergy.c
+++ b/drivers/media/rc/keymaps/rc-cinergy.c
@@ -9,16 +9,16 @@
#include <linux/module.h>
static struct rc_map_table cinergy[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
+ { 0x00, KEY_NUMERIC_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
{ 0x0a, KEY_POWER },
{ 0x0b, KEY_MEDIA }, /* app */
diff --git a/drivers/media/rc/keymaps/rc-d680-dmb.c b/drivers/media/rc/keymaps/rc-d680-dmb.c
index f67aa597a75b..d491a5e9750f 100644
--- a/drivers/media/rc/keymaps/rc-d680-dmb.c
+++ b/drivers/media/rc/keymaps/rc-d680-dmb.c
@@ -11,16 +11,16 @@
static struct rc_map_table rc_map_d680_dmb_table[] = {
{ 0x0038, KEY_SWITCHVIDEOMODE }, /* TV/AV */
{ 0x080c, KEY_ZOOM },
- { 0x0800, KEY_0 },
- { 0x0001, KEY_1 },
- { 0x0802, KEY_2 },
- { 0x0003, KEY_3 },
- { 0x0804, KEY_4 },
- { 0x0005, KEY_5 },
- { 0x0806, KEY_6 },
- { 0x0007, KEY_7 },
- { 0x0808, KEY_8 },
- { 0x0009, KEY_9 },
+ { 0x0800, KEY_NUMERIC_0 },
+ { 0x0001, KEY_NUMERIC_1 },
+ { 0x0802, KEY_NUMERIC_2 },
+ { 0x0003, KEY_NUMERIC_3 },
+ { 0x0804, KEY_NUMERIC_4 },
+ { 0x0005, KEY_NUMERIC_5 },
+ { 0x0806, KEY_NUMERIC_6 },
+ { 0x0007, KEY_NUMERIC_7 },
+ { 0x0808, KEY_NUMERIC_8 },
+ { 0x0009, KEY_NUMERIC_9 },
{ 0x000a, KEY_MUTE },
{ 0x0829, KEY_BACK },
{ 0x0012, KEY_CHANNELUP },
diff --git a/drivers/media/rc/keymaps/rc-delock-61959.c b/drivers/media/rc/keymaps/rc-delock-61959.c
index c60fc1e46fc5..529435e8d416 100644
--- a/drivers/media/rc/keymaps/rc-delock-61959.c
+++ b/drivers/media/rc/keymaps/rc-delock-61959.c
@@ -14,16 +14,16 @@ static struct rc_map_table delock_61959[] = {
{ 0x866b16, KEY_POWER2 }, /* Power */
{ 0x866b0c, KEY_POWER }, /* Shut Down */
- { 0x866b00, KEY_1},
- { 0x866b01, KEY_2},
- { 0x866b02, KEY_3},
- { 0x866b03, KEY_4},
- { 0x866b04, KEY_5},
- { 0x866b05, KEY_6},
- { 0x866b06, KEY_7},
- { 0x866b07, KEY_8},
- { 0x866b08, KEY_9},
- { 0x866b14, KEY_0},
+ { 0x866b00, KEY_NUMERIC_1},
+ { 0x866b01, KEY_NUMERIC_2},
+ { 0x866b02, KEY_NUMERIC_3},
+ { 0x866b03, KEY_NUMERIC_4},
+ { 0x866b04, KEY_NUMERIC_5},
+ { 0x866b05, KEY_NUMERIC_6},
+ { 0x866b06, KEY_NUMERIC_7},
+ { 0x866b07, KEY_NUMERIC_8},
+ { 0x866b08, KEY_NUMERIC_9},
+ { 0x866b14, KEY_NUMERIC_0},
{ 0x866b0a, KEY_ZOOM}, /* Full Screen */
{ 0x866b10, KEY_CAMERA}, /* Photo */
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index 4ee801acb089..f1fcdf16f485 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -17,16 +17,16 @@ static struct rc_map_table dib0700_nec_table[] = {
/* Key codes for the Pixelview SBTVD remote */
{ 0x866b13, KEY_MUTE },
{ 0x866b12, KEY_POWER },
- { 0x866b01, KEY_1 },
- { 0x866b02, KEY_2 },
- { 0x866b03, KEY_3 },
- { 0x866b04, KEY_4 },
- { 0x866b05, KEY_5 },
- { 0x866b06, KEY_6 },
- { 0x866b07, KEY_7 },
- { 0x866b08, KEY_8 },
- { 0x866b09, KEY_9 },
- { 0x866b00, KEY_0 },
+ { 0x866b01, KEY_NUMERIC_1 },
+ { 0x866b02, KEY_NUMERIC_2 },
+ { 0x866b03, KEY_NUMERIC_3 },
+ { 0x866b04, KEY_NUMERIC_4 },
+ { 0x866b05, KEY_NUMERIC_5 },
+ { 0x866b06, KEY_NUMERIC_6 },
+ { 0x866b07, KEY_NUMERIC_7 },
+ { 0x866b08, KEY_NUMERIC_8 },
+ { 0x866b09, KEY_NUMERIC_9 },
+ { 0x866b00, KEY_NUMERIC_0 },
{ 0x866b0d, KEY_CHANNELUP },
{ 0x866b19, KEY_CHANNELDOWN },
{ 0x866b10, KEY_VOLUMEUP },
@@ -60,17 +60,17 @@ static struct rc_map_table dib0700_nec_table[] = {
/* Key codes for the Elgato EyeTV Diversity silver remote */
{ 0x4501, KEY_POWER },
{ 0x4502, KEY_MUTE },
- { 0x4503, KEY_1 },
- { 0x4504, KEY_2 },
- { 0x4505, KEY_3 },
- { 0x4506, KEY_4 },
- { 0x4507, KEY_5 },
- { 0x4508, KEY_6 },
- { 0x4509, KEY_7 },
- { 0x450a, KEY_8 },
- { 0x450b, KEY_9 },
+ { 0x4503, KEY_NUMERIC_1 },
+ { 0x4504, KEY_NUMERIC_2 },
+ { 0x4505, KEY_NUMERIC_3 },
+ { 0x4506, KEY_NUMERIC_4 },
+ { 0x4507, KEY_NUMERIC_5 },
+ { 0x4508, KEY_NUMERIC_6 },
+ { 0x4509, KEY_NUMERIC_7 },
+ { 0x450a, KEY_NUMERIC_8 },
+ { 0x450b, KEY_NUMERIC_9 },
{ 0x450c, KEY_LAST },
- { 0x450d, KEY_0 },
+ { 0x450d, KEY_NUMERIC_0 },
{ 0x450e, KEY_ENTER },
{ 0x450f, KEY_RED },
{ 0x4510, KEY_CHANNELUP },
diff --git a/drivers/media/rc/keymaps/rc-dib0700-rc5.c b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
index ef4085a0fda3..002fffcba95d 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-rc5.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-rc5.c
@@ -22,16 +22,16 @@ static struct rc_map_table dib0700_rc5_table[] = {
{ 0x0709, KEY_VOLUMEDOWN },
{ 0x0706, KEY_CHANNELUP },
{ 0x070c, KEY_CHANNELDOWN },
- { 0x070f, KEY_1 },
- { 0x0715, KEY_2 },
- { 0x0710, KEY_3 },
- { 0x0718, KEY_4 },
- { 0x071b, KEY_5 },
- { 0x071e, KEY_6 },
- { 0x0711, KEY_7 },
- { 0x0721, KEY_8 },
- { 0x0712, KEY_9 },
- { 0x0727, KEY_0 },
+ { 0x070f, KEY_NUMERIC_1 },
+ { 0x0715, KEY_NUMERIC_2 },
+ { 0x0710, KEY_NUMERIC_3 },
+ { 0x0718, KEY_NUMERIC_4 },
+ { 0x071b, KEY_NUMERIC_5 },
+ { 0x071e, KEY_NUMERIC_6 },
+ { 0x0711, KEY_NUMERIC_7 },
+ { 0x0721, KEY_NUMERIC_8 },
+ { 0x0712, KEY_NUMERIC_9 },
+ { 0x0727, KEY_NUMERIC_0 },
{ 0x0724, KEY_SCREEN }, /* 'Square' key */
{ 0x072a, KEY_TEXT }, /* 'T' key */
{ 0x072d, KEY_REWIND },
@@ -43,17 +43,17 @@ static struct rc_map_table dib0700_rc5_table[] = {
/* Key codes for the Terratec Cinergy DT XS Diversity, similar to cinergyT2.c */
{ 0xeb01, KEY_POWER },
- { 0xeb02, KEY_1 },
- { 0xeb03, KEY_2 },
- { 0xeb04, KEY_3 },
- { 0xeb05, KEY_4 },
- { 0xeb06, KEY_5 },
- { 0xeb07, KEY_6 },
- { 0xeb08, KEY_7 },
- { 0xeb09, KEY_8 },
- { 0xeb0a, KEY_9 },
+ { 0xeb02, KEY_NUMERIC_1 },
+ { 0xeb03, KEY_NUMERIC_2 },
+ { 0xeb04, KEY_NUMERIC_3 },
+ { 0xeb05, KEY_NUMERIC_4 },
+ { 0xeb06, KEY_NUMERIC_5 },
+ { 0xeb07, KEY_NUMERIC_6 },
+ { 0xeb08, KEY_NUMERIC_7 },
+ { 0xeb09, KEY_NUMERIC_8 },
+ { 0xeb0a, KEY_NUMERIC_9 },
{ 0xeb0b, KEY_VIDEO },
- { 0xeb0c, KEY_0 },
+ { 0xeb0c, KEY_NUMERIC_0 },
{ 0xeb0d, KEY_REFRESH },
{ 0xeb0f, KEY_EPG },
{ 0xeb10, KEY_UP },
@@ -92,16 +92,16 @@ static struct rc_map_table dib0700_rc5_table[] = {
{ 0xeb5c, KEY_NEXT },
/* Key codes for the Haupauge WinTV Nova-TD, copied from nova-t-usb2.c (Nova-T USB2) */
- { 0x1e00, KEY_0 },
- { 0x1e01, KEY_1 },
- { 0x1e02, KEY_2 },
- { 0x1e03, KEY_3 },
- { 0x1e04, KEY_4 },
- { 0x1e05, KEY_5 },
- { 0x1e06, KEY_6 },
- { 0x1e07, KEY_7 },
- { 0x1e08, KEY_8 },
- { 0x1e09, KEY_9 },
+ { 0x1e00, KEY_NUMERIC_0 },
+ { 0x1e01, KEY_NUMERIC_1 },
+ { 0x1e02, KEY_NUMERIC_2 },
+ { 0x1e03, KEY_NUMERIC_3 },
+ { 0x1e04, KEY_NUMERIC_4 },
+ { 0x1e05, KEY_NUMERIC_5 },
+ { 0x1e06, KEY_NUMERIC_6 },
+ { 0x1e07, KEY_NUMERIC_7 },
+ { 0x1e08, KEY_NUMERIC_8 },
+ { 0x1e09, KEY_NUMERIC_9 },
{ 0x1e0a, KEY_KPASTERISK },
{ 0x1e0b, KEY_RED },
{ 0x1e0c, KEY_RADIO },
@@ -144,16 +144,16 @@ static struct rc_map_table dib0700_rc5_table[] = {
{ 0x0f4e, KEY_PRINT }, /* PREVIEW */
{ 0x0840, KEY_SCREEN }, /* full screen toggle*/
{ 0x0f71, KEY_DOT }, /* frequency */
- { 0x0743, KEY_0 },
- { 0x0c41, KEY_1 },
- { 0x0443, KEY_2 },
- { 0x0b7f, KEY_3 },
- { 0x0e41, KEY_4 },
- { 0x0643, KEY_5 },
- { 0x097f, KEY_6 },
- { 0x0d7e, KEY_7 },
- { 0x057c, KEY_8 },
- { 0x0a40, KEY_9 },
+ { 0x0743, KEY_NUMERIC_0 },
+ { 0x0c41, KEY_NUMERIC_1 },
+ { 0x0443, KEY_NUMERIC_2 },
+ { 0x0b7f, KEY_NUMERIC_3 },
+ { 0x0e41, KEY_NUMERIC_4 },
+ { 0x0643, KEY_NUMERIC_5 },
+ { 0x097f, KEY_NUMERIC_6 },
+ { 0x0d7e, KEY_NUMERIC_7 },
+ { 0x057c, KEY_NUMERIC_8 },
+ { 0x0a40, KEY_NUMERIC_9 },
{ 0x0e4e, KEY_CLEAR },
{ 0x047c, KEY_CHANNEL }, /* show channel number */
{ 0x0f41, KEY_LAST }, /* recall */
@@ -168,16 +168,16 @@ static struct rc_map_table dib0700_rc5_table[] = {
{ 0x007d, KEY_CHANNELDOWN },
/* Key codes for Nova-TD "credit card" remote control. */
- { 0x1d00, KEY_0 },
- { 0x1d01, KEY_1 },
- { 0x1d02, KEY_2 },
- { 0x1d03, KEY_3 },
- { 0x1d04, KEY_4 },
- { 0x1d05, KEY_5 },
- { 0x1d06, KEY_6 },
- { 0x1d07, KEY_7 },
- { 0x1d08, KEY_8 },
- { 0x1d09, KEY_9 },
+ { 0x1d00, KEY_NUMERIC_0 },
+ { 0x1d01, KEY_NUMERIC_1 },
+ { 0x1d02, KEY_NUMERIC_2 },
+ { 0x1d03, KEY_NUMERIC_3 },
+ { 0x1d04, KEY_NUMERIC_4 },
+ { 0x1d05, KEY_NUMERIC_5 },
+ { 0x1d06, KEY_NUMERIC_6 },
+ { 0x1d07, KEY_NUMERIC_7 },
+ { 0x1d08, KEY_NUMERIC_8 },
+ { 0x1d09, KEY_NUMERIC_9 },
{ 0x1d0a, KEY_TEXT },
{ 0x1d0d, KEY_MENU },
{ 0x1d0f, KEY_MUTE },
diff --git a/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c b/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
index f4d0799dcc72..2466d8c50226 100644
--- a/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
+++ b/drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
@@ -12,14 +12,14 @@ static struct rc_map_table digitalnow_tinytwin[] = {
{ 0x0000, KEY_MUTE }, /* [symbol speaker] */
{ 0x0001, KEY_VOLUMEUP },
{ 0x0002, KEY_POWER2 }, /* TV [power button] */
- { 0x0003, KEY_2 },
- { 0x0004, KEY_3 },
- { 0x0005, KEY_4 },
- { 0x0006, KEY_6 },
- { 0x0007, KEY_7 },
- { 0x0008, KEY_8 },
+ { 0x0003, KEY_NUMERIC_2 },
+ { 0x0004, KEY_NUMERIC_3 },
+ { 0x0005, KEY_NUMERIC_4 },
+ { 0x0006, KEY_NUMERIC_6 },
+ { 0x0007, KEY_NUMERIC_7 },
+ { 0x0008, KEY_NUMERIC_8 },
{ 0x0009, KEY_NUMERIC_STAR }, /* [*] */
- { 0x000a, KEY_0 },
+ { 0x000a, KEY_NUMERIC_0 },
{ 0x000b, KEY_NUMERIC_POUND }, /* [#] */
{ 0x000c, KEY_RIGHT }, /* [right arrow] */
{ 0x000d, KEY_HOMEPAGE }, /* [symbol home] Start */
@@ -36,10 +36,10 @@ static struct rc_map_table digitalnow_tinytwin[] = {
{ 0x0019, KEY_BLUE }, /* [blue] MyTV */
{ 0x001a, KEY_REWIND }, /* REW [<<] */
{ 0x001b, KEY_PLAY }, /* PLAY */
- { 0x001c, KEY_5 },
- { 0x001d, KEY_9 },
+ { 0x001c, KEY_NUMERIC_5 },
+ { 0x001d, KEY_NUMERIC_9 },
{ 0x001e, KEY_VOLUMEDOWN },
- { 0x001f, KEY_1 },
+ { 0x001f, KEY_NUMERIC_1 },
{ 0x0040, KEY_STOP }, /* STOP */
{ 0x0042, KEY_PAUSE }, /* PAUSE */
{ 0x0043, KEY_SCREEN }, /* Aspect */
diff --git a/drivers/media/rc/keymaps/rc-digittrade.c b/drivers/media/rc/keymaps/rc-digittrade.c
index 6849f1a5721c..65bc8ad7e52c 100644
--- a/drivers/media/rc/keymaps/rc-digittrade.c
+++ b/drivers/media/rc/keymaps/rc-digittrade.c
@@ -14,11 +14,11 @@
/* Digittrade DVB-T USB Stick */
static struct rc_map_table digittrade[] = {
- { 0x0000, KEY_9 },
+ { 0x0000, KEY_NUMERIC_9 },
{ 0x0001, KEY_EPG }, /* EPG */
{ 0x0002, KEY_VOLUMEDOWN }, /* Vol Dn */
{ 0x0003, KEY_TEXT }, /* TELETEXT */
- { 0x0004, KEY_8 },
+ { 0x0004, KEY_NUMERIC_8 },
{ 0x0005, KEY_MUTE }, /* MUTE */
{ 0x0006, KEY_POWER2 }, /* POWER */
{ 0x0009, KEY_ZOOM }, /* FULLSCREEN */
@@ -26,22 +26,22 @@ static struct rc_map_table digittrade[] = {
{ 0x000d, KEY_SUBTITLE }, /* SUBTITLE */
{ 0x000e, KEY_STOP }, /* STOP */
{ 0x0010, KEY_OK }, /* RETURN */
- { 0x0011, KEY_2 },
- { 0x0012, KEY_4 },
- { 0x0015, KEY_3 },
- { 0x0016, KEY_5 },
+ { 0x0011, KEY_NUMERIC_2 },
+ { 0x0012, KEY_NUMERIC_4 },
+ { 0x0015, KEY_NUMERIC_3 },
+ { 0x0016, KEY_NUMERIC_5 },
{ 0x0017, KEY_CHANNELDOWN }, /* Ch Dn */
{ 0x0019, KEY_CHANNELUP }, /* CH Up */
{ 0x001a, KEY_PAUSE }, /* PAUSE */
- { 0x001b, KEY_1 },
+ { 0x001b, KEY_NUMERIC_1 },
{ 0x001d, KEY_AUDIO }, /* DUAL SOUND */
{ 0x001e, KEY_PLAY }, /* PLAY */
{ 0x001f, KEY_CAMERA }, /* SNAPSHOT */
{ 0x0040, KEY_VOLUMEUP }, /* Vol Up */
- { 0x0048, KEY_7 },
- { 0x004c, KEY_6 },
+ { 0x0048, KEY_NUMERIC_7 },
+ { 0x004c, KEY_NUMERIC_6 },
{ 0x004d, KEY_PLAYPAUSE }, /* TIMESHIFT */
- { 0x0054, KEY_0 },
+ { 0x0054, KEY_NUMERIC_0 },
};
static struct rc_map_list digittrade_map = {
diff --git a/drivers/media/rc/keymaps/rc-dm1105-nec.c b/drivers/media/rc/keymaps/rc-dm1105-nec.c
index d853cd9a0936..cd0b985c994d 100644
--- a/drivers/media/rc/keymaps/rc-dm1105-nec.c
+++ b/drivers/media/rc/keymaps/rc-dm1105-nec.c
@@ -15,16 +15,16 @@
static struct rc_map_table dm1105_nec[] = {
{ 0x0a, KEY_POWER2}, /* power */
{ 0x0c, KEY_MUTE}, /* mute */
- { 0x11, KEY_1},
- { 0x12, KEY_2},
- { 0x13, KEY_3},
- { 0x14, KEY_4},
- { 0x15, KEY_5},
- { 0x16, KEY_6},
- { 0x17, KEY_7},
- { 0x18, KEY_8},
- { 0x19, KEY_9},
- { 0x10, KEY_0},
+ { 0x11, KEY_NUMERIC_1},
+ { 0x12, KEY_NUMERIC_2},
+ { 0x13, KEY_NUMERIC_3},
+ { 0x14, KEY_NUMERIC_4},
+ { 0x15, KEY_NUMERIC_5},
+ { 0x16, KEY_NUMERIC_6},
+ { 0x17, KEY_NUMERIC_7},
+ { 0x18, KEY_NUMERIC_8},
+ { 0x19, KEY_NUMERIC_9},
+ { 0x10, KEY_NUMERIC_0},
{ 0x1c, KEY_CHANNELUP}, /* ch+ */
{ 0x0f, KEY_CHANNELDOWN}, /* ch- */
{ 0x1a, KEY_VOLUMEUP}, /* vol+ */
diff --git a/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c b/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
index cdc1d8c990cb..a82f64dc9411 100644
--- a/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
+++ b/drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
@@ -13,16 +13,16 @@
static struct rc_map_table dntv_live_dvb_t[] = {
{ 0x00, KEY_ESC }, /* 'go up a level?' */
/* Keys 0 to 9 */
- { 0x0a, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
+ { 0x0a, KEY_NUMERIC_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
{ 0x0b, KEY_TUNER }, /* tv/fm */
{ 0x0c, KEY_SEARCH }, /* scan */
diff --git a/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c b/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
index 38e1d1b837da..d3f5048a0220 100644
--- a/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
+++ b/drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
@@ -18,17 +18,17 @@ static struct rc_map_table dntv_live_dvbt_pro[] = {
{ 0x58, KEY_TUNER }, /* digital Radio */
{ 0x5a, KEY_RADIO }, /* FM radio */
{ 0x59, KEY_DVD }, /* dvd menu */
- { 0x03, KEY_1 },
- { 0x01, KEY_2 },
- { 0x06, KEY_3 },
- { 0x09, KEY_4 },
- { 0x1d, KEY_5 },
- { 0x1f, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x19, KEY_8 },
- { 0x1b, KEY_9 },
+ { 0x03, KEY_NUMERIC_1 },
+ { 0x01, KEY_NUMERIC_2 },
+ { 0x06, KEY_NUMERIC_3 },
+ { 0x09, KEY_NUMERIC_4 },
+ { 0x1d, KEY_NUMERIC_5 },
+ { 0x1f, KEY_NUMERIC_6 },
+ { 0x0d, KEY_NUMERIC_7 },
+ { 0x19, KEY_NUMERIC_8 },
+ { 0x1b, KEY_NUMERIC_9 },
{ 0x0c, KEY_CANCEL },
- { 0x15, KEY_0 },
+ { 0x15, KEY_NUMERIC_0 },
{ 0x4a, KEY_CLEAR },
{ 0x13, KEY_BACK },
{ 0x00, KEY_TAB },
diff --git a/drivers/media/rc/keymaps/rc-dtt200u.c b/drivers/media/rc/keymaps/rc-dtt200u.c
index 86fd6a1668af..e7f87baa3212 100644
--- a/drivers/media/rc/keymaps/rc-dtt200u.c
+++ b/drivers/media/rc/keymaps/rc-dtt200u.c
@@ -12,21 +12,21 @@ static struct rc_map_table dtt200u_table[] = {
{ 0x8001, KEY_MUTE },
{ 0x8002, KEY_CHANNELDOWN },
{ 0x8003, KEY_VOLUMEDOWN },
- { 0x8004, KEY_1 },
- { 0x8005, KEY_2 },
- { 0x8006, KEY_3 },
- { 0x8007, KEY_4 },
- { 0x8008, KEY_5 },
- { 0x8009, KEY_6 },
- { 0x800a, KEY_7 },
+ { 0x8004, KEY_NUMERIC_1 },
+ { 0x8005, KEY_NUMERIC_2 },
+ { 0x8006, KEY_NUMERIC_3 },
+ { 0x8007, KEY_NUMERIC_4 },
+ { 0x8008, KEY_NUMERIC_5 },
+ { 0x8009, KEY_NUMERIC_6 },
+ { 0x800a, KEY_NUMERIC_7 },
{ 0x800c, KEY_ZOOM },
- { 0x800d, KEY_0 },
+ { 0x800d, KEY_NUMERIC_0 },
{ 0x800e, KEY_SELECT },
{ 0x8012, KEY_POWER },
{ 0x801a, KEY_CHANNELUP },
- { 0x801b, KEY_8 },
+ { 0x801b, KEY_NUMERIC_8 },
{ 0x801e, KEY_VOLUMEUP },
- { 0x801f, KEY_9 },
+ { 0x801f, KEY_NUMERIC_9 },
};
static struct rc_map_list dtt200u_map = {
diff --git a/drivers/media/rc/keymaps/rc-dvbsky.c b/drivers/media/rc/keymaps/rc-dvbsky.c
index 4b61f60a4854..f5063af2e5bc 100644
--- a/drivers/media/rc/keymaps/rc-dvbsky.c
+++ b/drivers/media/rc/keymaps/rc-dvbsky.c
@@ -13,16 +13,16 @@
*/
static struct rc_map_table rc5_dvbsky[] = {
- { 0x0000, KEY_0 },
- { 0x0001, KEY_1 },
- { 0x0002, KEY_2 },
- { 0x0003, KEY_3 },
- { 0x0004, KEY_4 },
- { 0x0005, KEY_5 },
- { 0x0006, KEY_6 },
- { 0x0007, KEY_7 },
- { 0x0008, KEY_8 },
- { 0x0009, KEY_9 },
+ { 0x0000, KEY_NUMERIC_0 },
+ { 0x0001, KEY_NUMERIC_1 },
+ { 0x0002, KEY_NUMERIC_2 },
+ { 0x0003, KEY_NUMERIC_3 },
+ { 0x0004, KEY_NUMERIC_4 },
+ { 0x0005, KEY_NUMERIC_5 },
+ { 0x0006, KEY_NUMERIC_6 },
+ { 0x0007, KEY_NUMERIC_7 },
+ { 0x0008, KEY_NUMERIC_8 },
+ { 0x0009, KEY_NUMERIC_9 },
{ 0x000a, KEY_MUTE },
{ 0x000d, KEY_OK },
{ 0x000b, KEY_STOP },
diff --git a/drivers/media/rc/keymaps/rc-dvico-mce.c b/drivers/media/rc/keymaps/rc-dvico-mce.c
index 8342c32f58fd..b1bb8cdb3705 100644
--- a/drivers/media/rc/keymaps/rc-dvico-mce.c
+++ b/drivers/media/rc/keymaps/rc-dvico-mce.c
@@ -35,17 +35,17 @@ static struct rc_map_table rc_map_dvico_mce_table[] = {
{ 0x0152, KEY_CAMERA },
{ 0x015a, KEY_TUNER }, /* Live */
{ 0x0119, KEY_OPEN },
- { 0x010b, KEY_1 },
- { 0x0117, KEY_2 },
- { 0x011b, KEY_3 },
- { 0x0107, KEY_4 },
- { 0x0150, KEY_5 },
- { 0x0154, KEY_6 },
- { 0x0148, KEY_7 },
- { 0x014c, KEY_8 },
- { 0x0158, KEY_9 },
+ { 0x010b, KEY_NUMERIC_1 },
+ { 0x0117, KEY_NUMERIC_2 },
+ { 0x011b, KEY_NUMERIC_3 },
+ { 0x0107, KEY_NUMERIC_4 },
+ { 0x0150, KEY_NUMERIC_5 },
+ { 0x0154, KEY_NUMERIC_6 },
+ { 0x0148, KEY_NUMERIC_7 },
+ { 0x014c, KEY_NUMERIC_8 },
+ { 0x0158, KEY_NUMERIC_9 },
{ 0x0113, KEY_ANGLE }, /* Aspect */
- { 0x0103, KEY_0 },
+ { 0x0103, KEY_NUMERIC_0 },
{ 0x011f, KEY_ZOOM },
{ 0x0143, KEY_REWIND },
{ 0x0147, KEY_PLAYPAUSE },
diff --git a/drivers/media/rc/keymaps/rc-dvico-portable.c b/drivers/media/rc/keymaps/rc-dvico-portable.c
index 366bd10bf987..ec12ba6995dc 100644
--- a/drivers/media/rc/keymaps/rc-dvico-portable.c
+++ b/drivers/media/rc/keymaps/rc-dvico-portable.c
@@ -24,17 +24,17 @@ static struct rc_map_table rc_map_dvico_portable_table[] = {
{ 0x0316, KEY_CAMERA },
{ 0x0340, KEY_TUNER }, /* ATV/DTV */
{ 0x0345, KEY_OPEN },
- { 0x0319, KEY_1 },
- { 0x0318, KEY_2 },
- { 0x031b, KEY_3 },
- { 0x031a, KEY_4 },
- { 0x0358, KEY_5 },
- { 0x0359, KEY_6 },
- { 0x0315, KEY_7 },
- { 0x0314, KEY_8 },
- { 0x0317, KEY_9 },
+ { 0x0319, KEY_NUMERIC_1 },
+ { 0x0318, KEY_NUMERIC_2 },
+ { 0x031b, KEY_NUMERIC_3 },
+ { 0x031a, KEY_NUMERIC_4 },
+ { 0x0358, KEY_NUMERIC_5 },
+ { 0x0359, KEY_NUMERIC_6 },
+ { 0x0315, KEY_NUMERIC_7 },
+ { 0x0314, KEY_NUMERIC_8 },
+ { 0x0317, KEY_NUMERIC_9 },
{ 0x0344, KEY_ANGLE }, /* Aspect */
- { 0x0355, KEY_0 },
+ { 0x0355, KEY_NUMERIC_0 },
{ 0x0307, KEY_ZOOM },
{ 0x030a, KEY_REWIND },
{ 0x0308, KEY_PLAYPAUSE },
diff --git a/drivers/media/rc/keymaps/rc-em-terratec.c b/drivers/media/rc/keymaps/rc-em-terratec.c
index cbbba21484fb..a1f59aa6ff23 100644
--- a/drivers/media/rc/keymaps/rc-em-terratec.c
+++ b/drivers/media/rc/keymaps/rc-em-terratec.c
@@ -13,19 +13,19 @@ static struct rc_map_table em_terratec[] = {
{ 0x02, KEY_SELECT },
{ 0x03, KEY_MUTE },
{ 0x04, KEY_POWER },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
+ { 0x05, KEY_NUMERIC_1 },
+ { 0x06, KEY_NUMERIC_2 },
+ { 0x07, KEY_NUMERIC_3 },
{ 0x08, KEY_CHANNELUP },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
+ { 0x09, KEY_NUMERIC_4 },
+ { 0x0a, KEY_NUMERIC_5 },
+ { 0x0b, KEY_NUMERIC_6 },
{ 0x0c, KEY_CHANNELDOWN },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
+ { 0x0d, KEY_NUMERIC_7 },
+ { 0x0e, KEY_NUMERIC_8 },
+ { 0x0f, KEY_NUMERIC_9 },
{ 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_0 },
+ { 0x11, KEY_NUMERIC_0 },
{ 0x12, KEY_MENU },
{ 0x13, KEY_PRINT },
{ 0x14, KEY_VOLUMEDOWN },
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
index 057c13b765ef..7a00471b6005 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
@@ -16,16 +16,16 @@ static struct rc_map_table encore_enltv_fm53[] = {
{ 0x10, KEY_POWER2},
{ 0x06, KEY_MUTE},
- { 0x09, KEY_1},
- { 0x1d, KEY_2},
- { 0x1f, KEY_3},
- { 0x19, KEY_4},
- { 0x1b, KEY_5},
- { 0x11, KEY_6},
- { 0x17, KEY_7},
- { 0x12, KEY_8},
- { 0x16, KEY_9},
- { 0x48, KEY_0},
+ { 0x09, KEY_NUMERIC_1},
+ { 0x1d, KEY_NUMERIC_2},
+ { 0x1f, KEY_NUMERIC_3},
+ { 0x19, KEY_NUMERIC_4},
+ { 0x1b, KEY_NUMERIC_5},
+ { 0x11, KEY_NUMERIC_6},
+ { 0x17, KEY_NUMERIC_7},
+ { 0x12, KEY_NUMERIC_8},
+ { 0x16, KEY_NUMERIC_9},
+ { 0x48, KEY_NUMERIC_0},
{ 0x04, KEY_LIST}, /* -/-- */
{ 0x40, KEY_LAST}, /* recall */
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv.c b/drivers/media/rc/keymaps/rc-encore-enltv.c
index 5b4e832d5fac..712210097b4d 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv.c
@@ -22,16 +22,16 @@ static struct rc_map_table encore_enltv[] = {
{ 0x01, KEY_AUDIO }, /* music */
{ 0x02, KEY_CAMERA }, /* picture */
- { 0x1f, KEY_1 },
- { 0x03, KEY_2 },
- { 0x04, KEY_3 },
- { 0x05, KEY_4 },
- { 0x1c, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x1d, KEY_9 },
- { 0x0a, KEY_0 },
+ { 0x1f, KEY_NUMERIC_1 },
+ { 0x03, KEY_NUMERIC_2 },
+ { 0x04, KEY_NUMERIC_3 },
+ { 0x05, KEY_NUMERIC_4 },
+ { 0x1c, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x1d, KEY_NUMERIC_9 },
+ { 0x0a, KEY_NUMERIC_0 },
{ 0x09, KEY_LIST }, /* -/-- */
{ 0x0b, KEY_LAST }, /* recall */
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv2.c b/drivers/media/rc/keymaps/rc-encore-enltv2.c
index cd0555924456..a08470b4f187 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv2.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv2.c
@@ -14,16 +14,16 @@
static struct rc_map_table encore_enltv2[] = {
{ 0x4c, KEY_POWER2 },
{ 0x4a, KEY_TUNER },
- { 0x40, KEY_1 },
- { 0x60, KEY_2 },
- { 0x50, KEY_3 },
- { 0x70, KEY_4 },
- { 0x48, KEY_5 },
- { 0x68, KEY_6 },
- { 0x58, KEY_7 },
- { 0x78, KEY_8 },
- { 0x44, KEY_9 },
- { 0x54, KEY_0 },
+ { 0x40, KEY_NUMERIC_1 },
+ { 0x60, KEY_NUMERIC_2 },
+ { 0x50, KEY_NUMERIC_3 },
+ { 0x70, KEY_NUMERIC_4 },
+ { 0x48, KEY_NUMERIC_5 },
+ { 0x68, KEY_NUMERIC_6 },
+ { 0x58, KEY_NUMERIC_7 },
+ { 0x78, KEY_NUMERIC_8 },
+ { 0x44, KEY_NUMERIC_9 },
+ { 0x54, KEY_NUMERIC_0 },
{ 0x64, KEY_LAST }, /* +100 */
{ 0x4e, KEY_AGAIN }, /* Recall */
diff --git a/drivers/media/rc/keymaps/rc-eztv.c b/drivers/media/rc/keymaps/rc-eztv.c
index 0e481d51fcb5..4e494d953e33 100644
--- a/drivers/media/rc/keymaps/rc-eztv.c
+++ b/drivers/media/rc/keymaps/rc-eztv.c
@@ -46,16 +46,16 @@ static struct rc_map_table eztv[] = {
{ 0x2d, KEY_PLAY }, /* play */
{ 0x2e, KEY_CAMERA }, /* snapshot / shuffle */
- { 0x00, KEY_0 },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
+ { 0x00, KEY_NUMERIC_0 },
+ { 0x05, KEY_NUMERIC_1 },
+ { 0x06, KEY_NUMERIC_2 },
+ { 0x07, KEY_NUMERIC_3 },
+ { 0x09, KEY_NUMERIC_4 },
+ { 0x0a, KEY_NUMERIC_5 },
+ { 0x0b, KEY_NUMERIC_6 },
+ { 0x0d, KEY_NUMERIC_7 },
+ { 0x0e, KEY_NUMERIC_8 },
+ { 0x0f, KEY_NUMERIC_9 },
{ 0x2a, KEY_VOLUMEUP },
{ 0x11, KEY_VOLUMEDOWN },
diff --git a/drivers/media/rc/keymaps/rc-flydvb.c b/drivers/media/rc/keymaps/rc-flydvb.c
index 45940d7c92d0..202a1fbd1935 100644
--- a/drivers/media/rc/keymaps/rc-flydvb.c
+++ b/drivers/media/rc/keymaps/rc-flydvb.c
@@ -12,17 +12,17 @@ static struct rc_map_table flydvb[] = {
{ 0x01, KEY_ZOOM }, /* Full Screen */
{ 0x00, KEY_POWER }, /* Power */
- { 0x03, KEY_1 },
- { 0x04, KEY_2 },
- { 0x05, KEY_3 },
- { 0x07, KEY_4 },
- { 0x08, KEY_5 },
- { 0x09, KEY_6 },
- { 0x0b, KEY_7 },
- { 0x0c, KEY_8 },
- { 0x0d, KEY_9 },
+ { 0x03, KEY_NUMERIC_1 },
+ { 0x04, KEY_NUMERIC_2 },
+ { 0x05, KEY_NUMERIC_3 },
+ { 0x07, KEY_NUMERIC_4 },
+ { 0x08, KEY_NUMERIC_5 },
+ { 0x09, KEY_NUMERIC_6 },
+ { 0x0b, KEY_NUMERIC_7 },
+ { 0x0c, KEY_NUMERIC_8 },
+ { 0x0d, KEY_NUMERIC_9 },
{ 0x06, KEY_AGAIN }, /* Recall */
- { 0x0f, KEY_0 },
+ { 0x0f, KEY_NUMERIC_0 },
{ 0x10, KEY_MUTE }, /* Mute */
{ 0x02, KEY_RADIO }, /* TV/Radio */
{ 0x1b, KEY_LANGUAGE }, /* SAP (Second Audio Program) */
diff --git a/drivers/media/rc/keymaps/rc-flyvideo.c b/drivers/media/rc/keymaps/rc-flyvideo.c
index b2d4e4c7b192..a44467fb15cb 100644
--- a/drivers/media/rc/keymaps/rc-flyvideo.c
+++ b/drivers/media/rc/keymaps/rc-flyvideo.c
@@ -9,16 +9,16 @@
#include <linux/module.h>
static struct rc_map_table flyvideo[] = {
- { 0x0f, KEY_0 },
- { 0x03, KEY_1 },
- { 0x04, KEY_2 },
- { 0x05, KEY_3 },
- { 0x07, KEY_4 },
- { 0x08, KEY_5 },
- { 0x09, KEY_6 },
- { 0x0b, KEY_7 },
- { 0x0c, KEY_8 },
- { 0x0d, KEY_9 },
+ { 0x0f, KEY_NUMERIC_0 },
+ { 0x03, KEY_NUMERIC_1 },
+ { 0x04, KEY_NUMERIC_2 },
+ { 0x05, KEY_NUMERIC_3 },
+ { 0x07, KEY_NUMERIC_4 },
+ { 0x08, KEY_NUMERIC_5 },
+ { 0x09, KEY_NUMERIC_6 },
+ { 0x0b, KEY_NUMERIC_7 },
+ { 0x0c, KEY_NUMERIC_8 },
+ { 0x0d, KEY_NUMERIC_9 },
{ 0x0e, KEY_MODE }, /* Air/Cable */
{ 0x11, KEY_VIDEO }, /* Video */
diff --git a/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c b/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
index 1c63fc7d4576..253199f5531a 100644
--- a/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
+++ b/drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
@@ -12,16 +12,16 @@
static struct rc_map_table fusionhdtv_mce[] = {
- { 0x0b, KEY_1 },
- { 0x17, KEY_2 },
- { 0x1b, KEY_3 },
- { 0x07, KEY_4 },
- { 0x50, KEY_5 },
- { 0x54, KEY_6 },
- { 0x48, KEY_7 },
- { 0x4c, KEY_8 },
- { 0x58, KEY_9 },
- { 0x03, KEY_0 },
+ { 0x0b, KEY_NUMERIC_1 },
+ { 0x17, KEY_NUMERIC_2 },
+ { 0x1b, KEY_NUMERIC_3 },
+ { 0x07, KEY_NUMERIC_4 },
+ { 0x50, KEY_NUMERIC_5 },
+ { 0x54, KEY_NUMERIC_6 },
+ { 0x48, KEY_NUMERIC_7 },
+ { 0x4c, KEY_NUMERIC_8 },
+ { 0x58, KEY_NUMERIC_9 },
+ { 0x03, KEY_NUMERIC_0 },
{ 0x5e, KEY_OK },
{ 0x51, KEY_UP },
diff --git a/drivers/media/rc/keymaps/rc-gadmei-rm008z.c b/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
index 4a0a9786914f..c630ef306f11 100644
--- a/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
+++ b/drivers/media/rc/keymaps/rc-gadmei-rm008z.c
@@ -21,16 +21,16 @@ static struct rc_map_table gadmei_rm008z[] = {
{ 0x0b, KEY_AUDIO}, /* SV */
{ 0x0f, KEY_RADIO}, /* FM */
- { 0x00, KEY_1},
- { 0x01, KEY_2},
- { 0x02, KEY_3},
- { 0x03, KEY_4},
- { 0x04, KEY_5},
- { 0x05, KEY_6},
- { 0x06, KEY_7},
- { 0x07, KEY_8},
- { 0x08, KEY_9},
- { 0x09, KEY_0},
+ { 0x00, KEY_NUMERIC_1},
+ { 0x01, KEY_NUMERIC_2},
+ { 0x02, KEY_NUMERIC_3},
+ { 0x03, KEY_NUMERIC_4},
+ { 0x04, KEY_NUMERIC_5},
+ { 0x05, KEY_NUMERIC_6},
+ { 0x06, KEY_NUMERIC_7},
+ { 0x07, KEY_NUMERIC_8},
+ { 0x08, KEY_NUMERIC_9},
+ { 0x09, KEY_NUMERIC_0},
{ 0x0a, KEY_INFO}, /* OSD */
{ 0x1c, KEY_BACKSPACE}, /* LAST */
diff --git a/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c b/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
index cc876a85cc31..c966c130b05d 100644
--- a/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
+++ b/drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
@@ -15,16 +15,16 @@
static struct rc_map_table genius_tvgo_a11mce[] = {
/* Keys 0 to 9 */
- { 0x48, KEY_0 },
- { 0x09, KEY_1 },
- { 0x1d, KEY_2 },
- { 0x1f, KEY_3 },
- { 0x19, KEY_4 },
- { 0x1b, KEY_5 },
- { 0x11, KEY_6 },
- { 0x17, KEY_7 },
- { 0x12, KEY_8 },
- { 0x16, KEY_9 },
+ { 0x48, KEY_NUMERIC_0 },
+ { 0x09, KEY_NUMERIC_1 },
+ { 0x1d, KEY_NUMERIC_2 },
+ { 0x1f, KEY_NUMERIC_3 },
+ { 0x19, KEY_NUMERIC_4 },
+ { 0x1b, KEY_NUMERIC_5 },
+ { 0x11, KEY_NUMERIC_6 },
+ { 0x17, KEY_NUMERIC_7 },
+ { 0x12, KEY_NUMERIC_8 },
+ { 0x16, KEY_NUMERIC_9 },
{ 0x54, KEY_RECORD }, /* recording */
{ 0x06, KEY_MUTE }, /* mute */
diff --git a/drivers/media/rc/keymaps/rc-gotview7135.c b/drivers/media/rc/keymaps/rc-gotview7135.c
index 6b94bd39d977..0dc4ef36d76f 100644
--- a/drivers/media/rc/keymaps/rc-gotview7135.c
+++ b/drivers/media/rc/keymaps/rc-gotview7135.c
@@ -14,16 +14,16 @@ static struct rc_map_table gotview7135[] = {
{ 0x11, KEY_POWER },
{ 0x35, KEY_TV },
- { 0x1b, KEY_0 },
- { 0x29, KEY_1 },
- { 0x19, KEY_2 },
- { 0x39, KEY_3 },
- { 0x1f, KEY_4 },
- { 0x2c, KEY_5 },
- { 0x21, KEY_6 },
- { 0x24, KEY_7 },
- { 0x18, KEY_8 },
- { 0x2b, KEY_9 },
+ { 0x1b, KEY_NUMERIC_0 },
+ { 0x29, KEY_NUMERIC_1 },
+ { 0x19, KEY_NUMERIC_2 },
+ { 0x39, KEY_NUMERIC_3 },
+ { 0x1f, KEY_NUMERIC_4 },
+ { 0x2c, KEY_NUMERIC_5 },
+ { 0x21, KEY_NUMERIC_6 },
+ { 0x24, KEY_NUMERIC_7 },
+ { 0x18, KEY_NUMERIC_8 },
+ { 0x2b, KEY_NUMERIC_9 },
{ 0x3b, KEY_AGAIN }, /* LOOP */
{ 0x06, KEY_AUDIO },
{ 0x31, KEY_PRINT }, /* PREVIEW */
diff --git a/drivers/media/rc/keymaps/rc-hauppauge.c b/drivers/media/rc/keymaps/rc-hauppauge.c
index 582aa9012443..82552360c3c3 100644
--- a/drivers/media/rc/keymaps/rc-hauppauge.c
+++ b/drivers/media/rc/keymaps/rc-hauppauge.c
@@ -67,20 +67,20 @@ static struct rc_map_table rc5_hauppauge_new[] = {
{ 0x1e30, KEY_PAUSE }, /* pause */
{ 0x1e1e, KEY_NEXTSONG }, /* skip >| */
- { 0x1e01, KEY_1 },
- { 0x1e02, KEY_2 },
- { 0x1e03, KEY_3 },
+ { 0x1e01, KEY_NUMERIC_1 },
+ { 0x1e02, KEY_NUMERIC_2 },
+ { 0x1e03, KEY_NUMERIC_3 },
- { 0x1e04, KEY_4 },
- { 0x1e05, KEY_5 },
- { 0x1e06, KEY_6 },
+ { 0x1e04, KEY_NUMERIC_4 },
+ { 0x1e05, KEY_NUMERIC_5 },
+ { 0x1e06, KEY_NUMERIC_6 },
- { 0x1e07, KEY_7 },
- { 0x1e08, KEY_8 },
- { 0x1e09, KEY_9 },
+ { 0x1e07, KEY_NUMERIC_7 },
+ { 0x1e08, KEY_NUMERIC_8 },
+ { 0x1e09, KEY_NUMERIC_9 },
{ 0x1e0a, KEY_TEXT }, /* keypad asterisk as well */
- { 0x1e00, KEY_0 },
+ { 0x1e00, KEY_NUMERIC_0 },
{ 0x1e0e, KEY_SUBTITLE }, /* also the Pound key (#) */
{ 0x1e0b, KEY_RED }, /* red button */
@@ -96,16 +96,16 @@ static struct rc_map_table rc5_hauppauge_new[] = {
{ 0x1f3b, KEY_SELECT }, /* GO */
/* Keys 0 to 9 */
- { 0x1f00, KEY_0 },
- { 0x1f01, KEY_1 },
- { 0x1f02, KEY_2 },
- { 0x1f03, KEY_3 },
- { 0x1f04, KEY_4 },
- { 0x1f05, KEY_5 },
- { 0x1f06, KEY_6 },
- { 0x1f07, KEY_7 },
- { 0x1f08, KEY_8 },
- { 0x1f09, KEY_9 },
+ { 0x1f00, KEY_NUMERIC_0 },
+ { 0x1f01, KEY_NUMERIC_1 },
+ { 0x1f02, KEY_NUMERIC_2 },
+ { 0x1f03, KEY_NUMERIC_3 },
+ { 0x1f04, KEY_NUMERIC_4 },
+ { 0x1f05, KEY_NUMERIC_5 },
+ { 0x1f06, KEY_NUMERIC_6 },
+ { 0x1f07, KEY_NUMERIC_7 },
+ { 0x1f08, KEY_NUMERIC_8 },
+ { 0x1f09, KEY_NUMERIC_9 },
{ 0x1f1f, KEY_EXIT }, /* back/exit */
{ 0x1f0d, KEY_MENU },
@@ -140,16 +140,16 @@ static struct rc_map_table rc5_hauppauge_new[] = {
* Keycodes for DSR-0112 remote bundled with Haupauge MiniStick
* Keycodes start with address = 0x1d
*/
- { 0x1d00, KEY_0 },
- { 0x1d01, KEY_1 },
- { 0x1d02, KEY_2 },
- { 0x1d03, KEY_3 },
- { 0x1d04, KEY_4 },
- { 0x1d05, KEY_5 },
- { 0x1d06, KEY_6 },
- { 0x1d07, KEY_7 },
- { 0x1d08, KEY_8 },
- { 0x1d09, KEY_9 },
+ { 0x1d00, KEY_NUMERIC_0 },
+ { 0x1d01, KEY_NUMERIC_1 },
+ { 0x1d02, KEY_NUMERIC_2 },
+ { 0x1d03, KEY_NUMERIC_3 },
+ { 0x1d04, KEY_NUMERIC_4 },
+ { 0x1d05, KEY_NUMERIC_5 },
+ { 0x1d06, KEY_NUMERIC_6 },
+ { 0x1d07, KEY_NUMERIC_7 },
+ { 0x1d08, KEY_NUMERIC_8 },
+ { 0x1d09, KEY_NUMERIC_9 },
{ 0x1d0a, KEY_TEXT },
{ 0x1d0d, KEY_MENU },
{ 0x1d0f, KEY_MUTE },
@@ -190,16 +190,16 @@ static struct rc_map_table rc5_hauppauge_new[] = {
{ 0x1c17, KEY_RIGHT },
{ 0x1c25, KEY_OK },
- { 0x1c00, KEY_0 },
- { 0x1c01, KEY_1 },
- { 0x1c02, KEY_2 },
- { 0x1c03, KEY_3 },
- { 0x1c04, KEY_4 },
- { 0x1c05, KEY_5 },
- { 0x1c06, KEY_6 },
- { 0x1c07, KEY_7 },
- { 0x1c08, KEY_8 },
- { 0x1c09, KEY_9 },
+ { 0x1c00, KEY_NUMERIC_0 },
+ { 0x1c01, KEY_NUMERIC_1 },
+ { 0x1c02, KEY_NUMERIC_2 },
+ { 0x1c03, KEY_NUMERIC_3 },
+ { 0x1c04, KEY_NUMERIC_4 },
+ { 0x1c05, KEY_NUMERIC_5 },
+ { 0x1c06, KEY_NUMERIC_6 },
+ { 0x1c07, KEY_NUMERIC_7 },
+ { 0x1c08, KEY_NUMERIC_8 },
+ { 0x1c09, KEY_NUMERIC_9 },
{ 0x1c1f, KEY_EXIT }, /* BACK */
{ 0x1c0d, KEY_MENU },
@@ -233,6 +233,7 @@ static struct rc_map_table rc5_hauppauge_new[] = {
* This one also uses RC-5 protocol
* Keycodes start with address = 0x00
*/
+ { 0x000f, KEY_TV },
{ 0x001f, KEY_TV },
{ 0x0020, KEY_CHANNELUP },
{ 0x000c, KEY_RADIO },
@@ -245,20 +246,20 @@ static struct rc_map_table rc5_hauppauge_new[] = {
{ 0x0021, KEY_CHANNELDOWN },
{ 0x0022, KEY_VIDEO }, /* source */
- { 0x0001, KEY_1 },
- { 0x0002, KEY_2 },
- { 0x0003, KEY_3 },
+ { 0x0001, KEY_NUMERIC_1 },
+ { 0x0002, KEY_NUMERIC_2 },
+ { 0x0003, KEY_NUMERIC_3 },
- { 0x0004, KEY_4 },
- { 0x0005, KEY_5 },
- { 0x0006, KEY_6 },
+ { 0x0004, KEY_NUMERIC_4 },
+ { 0x0005, KEY_NUMERIC_5 },
+ { 0x0006, KEY_NUMERIC_6 },
- { 0x0007, KEY_7 },
- { 0x0008, KEY_8 },
- { 0x0009, KEY_9 },
+ { 0x0007, KEY_NUMERIC_7 },
+ { 0x0008, KEY_NUMERIC_8 },
+ { 0x0009, KEY_NUMERIC_9 },
{ 0x001e, KEY_RED }, /* Reserved */
- { 0x0000, KEY_0 },
+ { 0x0000, KEY_NUMERIC_0 },
{ 0x0026, KEY_SLEEP }, /* Minimize */
};
diff --git a/drivers/media/rc/keymaps/rc-hisi-poplar.c b/drivers/media/rc/keymaps/rc-hisi-poplar.c
index b4dbec6e70ce..49a18e916915 100644
--- a/drivers/media/rc/keymaps/rc-hisi-poplar.c
+++ b/drivers/media/rc/keymaps/rc-hisi-poplar.c
@@ -9,16 +9,16 @@
#include <media/rc-map.h>
static struct rc_map_table hisi_poplar_keymap[] = {
- { 0x0000b292, KEY_1},
- { 0x0000b293, KEY_2},
- { 0x0000b2cc, KEY_3},
- { 0x0000b28e, KEY_4},
- { 0x0000b28f, KEY_5},
- { 0x0000b2c8, KEY_6},
- { 0x0000b28a, KEY_7},
- { 0x0000b28b, KEY_8},
- { 0x0000b2c4, KEY_9},
- { 0x0000b287, KEY_0},
+ { 0x0000b292, KEY_NUMERIC_1},
+ { 0x0000b293, KEY_NUMERIC_2},
+ { 0x0000b2cc, KEY_NUMERIC_3},
+ { 0x0000b28e, KEY_NUMERIC_4},
+ { 0x0000b28f, KEY_NUMERIC_5},
+ { 0x0000b2c8, KEY_NUMERIC_6},
+ { 0x0000b28a, KEY_NUMERIC_7},
+ { 0x0000b28b, KEY_NUMERIC_8},
+ { 0x0000b2c4, KEY_NUMERIC_9},
+ { 0x0000b287, KEY_NUMERIC_0},
{ 0x0000b282, KEY_HOMEPAGE},
{ 0x0000b2ca, KEY_UP},
{ 0x0000b299, KEY_LEFT},
diff --git a/drivers/media/rc/keymaps/rc-hisi-tv-demo.c b/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
index 8e25b40714f8..c73068b653f7 100644
--- a/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
+++ b/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
@@ -9,16 +9,16 @@
#include <media/rc-map.h>
static struct rc_map_table hisi_tv_demo_keymap[] = {
- { 0x00000092, KEY_1},
- { 0x00000093, KEY_2},
- { 0x000000cc, KEY_3},
- { 0x0000009f, KEY_4},
- { 0x0000008e, KEY_5},
- { 0x0000008f, KEY_6},
- { 0x000000c8, KEY_7},
- { 0x00000094, KEY_8},
- { 0x0000008a, KEY_9},
- { 0x0000008b, KEY_0},
+ { 0x00000092, KEY_NUMERIC_1},
+ { 0x00000093, KEY_NUMERIC_2},
+ { 0x000000cc, KEY_NUMERIC_3},
+ { 0x0000009f, KEY_NUMERIC_4},
+ { 0x0000008e, KEY_NUMERIC_5},
+ { 0x0000008f, KEY_NUMERIC_6},
+ { 0x000000c8, KEY_NUMERIC_7},
+ { 0x00000094, KEY_NUMERIC_8},
+ { 0x0000008a, KEY_NUMERIC_9},
+ { 0x0000008b, KEY_NUMERIC_0},
{ 0x000000ce, KEY_ENTER},
{ 0x000000ca, KEY_UP},
{ 0x00000099, KEY_LEFT},
diff --git a/drivers/media/rc/keymaps/rc-iodata-bctv7e.c b/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
index 6ced43458f2a..9cc6ea0f4226 100644
--- a/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
+++ b/drivers/media/rc/keymaps/rc-iodata-bctv7e.c
@@ -17,16 +17,16 @@ static struct rc_map_table iodata_bctv7e[] = {
{ 0x00, KEY_POWER },
/* Keys 0 to 9 */
- { 0x44, KEY_0 }, /* 10 */
- { 0x50, KEY_1 },
- { 0x30, KEY_2 },
- { 0x70, KEY_3 },
- { 0x48, KEY_4 },
- { 0x28, KEY_5 },
- { 0x68, KEY_6 },
- { 0x58, KEY_7 },
- { 0x38, KEY_8 },
- { 0x78, KEY_9 },
+ { 0x44, KEY_NUMERIC_0 }, /* 10 */
+ { 0x50, KEY_NUMERIC_1 },
+ { 0x30, KEY_NUMERIC_2 },
+ { 0x70, KEY_NUMERIC_3 },
+ { 0x48, KEY_NUMERIC_4 },
+ { 0x28, KEY_NUMERIC_5 },
+ { 0x68, KEY_NUMERIC_6 },
+ { 0x58, KEY_NUMERIC_7 },
+ { 0x38, KEY_NUMERIC_8 },
+ { 0x78, KEY_NUMERIC_9 },
{ 0x10, KEY_L }, /* Live */
{ 0x08, KEY_TIME }, /* Time Shift */
diff --git a/drivers/media/rc/keymaps/rc-it913x-v1.c b/drivers/media/rc/keymaps/rc-it913x-v1.c
index d8eaba9834c2..1e049f26a246 100644
--- a/drivers/media/rc/keymaps/rc-it913x-v1.c
+++ b/drivers/media/rc/keymaps/rc-it913x-v1.c
@@ -11,22 +11,22 @@
static struct rc_map_table it913x_v1_rc[] = {
/* Type 1 */
{ 0x61d601, KEY_VIDEO }, /* Source */
- { 0x61d602, KEY_3 },
+ { 0x61d602, KEY_NUMERIC_3 },
{ 0x61d603, KEY_POWER }, /* ShutDown */
- { 0x61d604, KEY_1 },
- { 0x61d605, KEY_5 },
- { 0x61d606, KEY_6 },
+ { 0x61d604, KEY_NUMERIC_1 },
+ { 0x61d605, KEY_NUMERIC_5 },
+ { 0x61d606, KEY_NUMERIC_6 },
{ 0x61d607, KEY_CHANNELDOWN }, /* CH- */
- { 0x61d608, KEY_2 },
+ { 0x61d608, KEY_NUMERIC_2 },
{ 0x61d609, KEY_CHANNELUP }, /* CH+ */
- { 0x61d60a, KEY_9 },
+ { 0x61d60a, KEY_NUMERIC_9 },
{ 0x61d60b, KEY_ZOOM }, /* Zoom */
- { 0x61d60c, KEY_7 },
- { 0x61d60d, KEY_8 },
+ { 0x61d60c, KEY_NUMERIC_7 },
+ { 0x61d60d, KEY_NUMERIC_8 },
{ 0x61d60e, KEY_VOLUMEUP }, /* Vol+ */
- { 0x61d60f, KEY_4 },
+ { 0x61d60f, KEY_NUMERIC_4 },
{ 0x61d610, KEY_ESC }, /* [back up arrow] */
- { 0x61d611, KEY_0 },
+ { 0x61d611, KEY_NUMERIC_0 },
{ 0x61d612, KEY_OK }, /* [enter arrow] */
{ 0x61d613, KEY_VOLUMEDOWN }, /* Vol- */
{ 0x61d614, KEY_RECORD }, /* Rec */
@@ -43,16 +43,16 @@ static struct rc_map_table it913x_v1_rc[] = {
{ 0x61d61f, KEY_BLUE },
{ 0x61d643, KEY_POWER2 }, /* [red power button] */
/* Type 2 - 20 buttons */
- { 0x807f0d, KEY_0 },
- { 0x807f04, KEY_1 },
- { 0x807f05, KEY_2 },
- { 0x807f06, KEY_3 },
- { 0x807f07, KEY_4 },
- { 0x807f08, KEY_5 },
- { 0x807f09, KEY_6 },
- { 0x807f0a, KEY_7 },
- { 0x807f1b, KEY_8 },
- { 0x807f1f, KEY_9 },
+ { 0x807f0d, KEY_NUMERIC_0 },
+ { 0x807f04, KEY_NUMERIC_1 },
+ { 0x807f05, KEY_NUMERIC_2 },
+ { 0x807f06, KEY_NUMERIC_3 },
+ { 0x807f07, KEY_NUMERIC_4 },
+ { 0x807f08, KEY_NUMERIC_5 },
+ { 0x807f09, KEY_NUMERIC_6 },
+ { 0x807f0a, KEY_NUMERIC_7 },
+ { 0x807f1b, KEY_NUMERIC_8 },
+ { 0x807f1f, KEY_NUMERIC_9 },
{ 0x807f12, KEY_POWER },
{ 0x807f01, KEY_MEDIA_REPEAT}, /* Recall */
{ 0x807f19, KEY_PAUSE }, /* Timeshift */
diff --git a/drivers/media/rc/keymaps/rc-it913x-v2.c b/drivers/media/rc/keymaps/rc-it913x-v2.c
index 26747a327d91..da3107da26b7 100644
--- a/drivers/media/rc/keymaps/rc-it913x-v2.c
+++ b/drivers/media/rc/keymaps/rc-it913x-v2.c
@@ -20,31 +20,31 @@ static struct rc_map_table it913x_v2_rc[] = {
{ 0x807f04, KEY_VOLUMEUP }, /* Volume- */
{ 0x807f05, KEY_SCREEN }, /* FullScreen */
{ 0x807f06, KEY_VOLUMEDOWN }, /* Volume- */
- { 0x807f07, KEY_0 }, /* 0 */
+ { 0x807f07, KEY_NUMERIC_0 }, /* 0 */
{ 0x807f08, KEY_CHANNELDOWN }, /* Channel- */
{ 0x807f09, KEY_PREVIOUS }, /* Recall */
- { 0x807f0a, KEY_1 }, /* 1 */
- { 0x807f1b, KEY_2 }, /* 2 */
- { 0x807f1f, KEY_3 }, /* 3 */
- { 0x807f0c, KEY_4 }, /* 4 */
- { 0x807f0d, KEY_5 }, /* 5 */
- { 0x807f0e, KEY_6 }, /* 6 */
- { 0x807f00, KEY_7 }, /* 7 */
- { 0x807f0f, KEY_8 }, /* 8 */
- { 0x807f19, KEY_9 }, /* 9 */
+ { 0x807f0a, KEY_NUMERIC_1 }, /* 1 */
+ { 0x807f1b, KEY_NUMERIC_2 }, /* 2 */
+ { 0x807f1f, KEY_NUMERIC_3 }, /* 3 */
+ { 0x807f0c, KEY_NUMERIC_4 }, /* 4 */
+ { 0x807f0d, KEY_NUMERIC_5 }, /* 5 */
+ { 0x807f0e, KEY_NUMERIC_6 }, /* 6 */
+ { 0x807f00, KEY_NUMERIC_7 }, /* 7 */
+ { 0x807f0f, KEY_NUMERIC_8 }, /* 8 */
+ { 0x807f19, KEY_NUMERIC_9 }, /* 9 */
/* Type 2 */
/* keys stereo, snapshot unassigned */
- { 0x866b00, KEY_0 },
- { 0x866b01, KEY_1 },
- { 0x866b02, KEY_2 },
- { 0x866b03, KEY_3 },
- { 0x866b04, KEY_4 },
- { 0x866b05, KEY_5 },
- { 0x866b06, KEY_6 },
- { 0x866b07, KEY_7 },
- { 0x866b08, KEY_8 },
- { 0x866b09, KEY_9 },
+ { 0x866b00, KEY_NUMERIC_0 },
+ { 0x866b01, KEY_NUMERIC_1 },
+ { 0x866b02, KEY_NUMERIC_2 },
+ { 0x866b03, KEY_NUMERIC_3 },
+ { 0x866b04, KEY_NUMERIC_4 },
+ { 0x866b05, KEY_NUMERIC_5 },
+ { 0x866b06, KEY_NUMERIC_6 },
+ { 0x866b07, KEY_NUMERIC_7 },
+ { 0x866b08, KEY_NUMERIC_8 },
+ { 0x866b09, KEY_NUMERIC_9 },
{ 0x866b12, KEY_POWER },
{ 0x866b13, KEY_MUTE },
{ 0x866b0a, KEY_PREVIOUS }, /* Recall */
diff --git a/drivers/media/rc/keymaps/rc-kaiomy.c b/drivers/media/rc/keymaps/rc-kaiomy.c
index a00051339842..548760e86a2d 100644
--- a/drivers/media/rc/keymaps/rc-kaiomy.c
+++ b/drivers/media/rc/keymaps/rc-kaiomy.c
@@ -18,19 +18,19 @@ static struct rc_map_table kaiomy[] = {
{ 0x0b, KEY_ZOOM},
{ 0x03, KEY_POWER},
- { 0x04, KEY_1},
- { 0x08, KEY_2},
- { 0x02, KEY_3},
+ { 0x04, KEY_NUMERIC_1},
+ { 0x08, KEY_NUMERIC_2},
+ { 0x02, KEY_NUMERIC_3},
- { 0x0f, KEY_4},
- { 0x05, KEY_5},
- { 0x06, KEY_6},
+ { 0x0f, KEY_NUMERIC_4},
+ { 0x05, KEY_NUMERIC_5},
+ { 0x06, KEY_NUMERIC_6},
- { 0x0c, KEY_7},
- { 0x0d, KEY_8},
- { 0x0a, KEY_9},
+ { 0x0c, KEY_NUMERIC_7},
+ { 0x0d, KEY_NUMERIC_8},
+ { 0x0a, KEY_NUMERIC_9},
- { 0x11, KEY_0},
+ { 0x11, KEY_NUMERIC_0},
{ 0x09, KEY_CHANNELUP},
{ 0x07, KEY_CHANNELDOWN},
diff --git a/drivers/media/rc/keymaps/rc-kworld-315u.c b/drivers/media/rc/keymaps/rc-kworld-315u.c
index ed0e0586dea2..f5aed4b96019 100644
--- a/drivers/media/rc/keymaps/rc-kworld-315u.c
+++ b/drivers/media/rc/keymaps/rc-kworld-315u.c
@@ -17,23 +17,23 @@ static struct rc_map_table kworld_315u[] = {
{ 0x610b, KEY_ZOOM },
{ 0x6103, KEY_POWER2 }, /* shutdown */
- { 0x6104, KEY_1 },
- { 0x6108, KEY_2 },
- { 0x6102, KEY_3 },
+ { 0x6104, KEY_NUMERIC_1 },
+ { 0x6108, KEY_NUMERIC_2 },
+ { 0x6102, KEY_NUMERIC_3 },
{ 0x6109, KEY_CHANNELUP },
- { 0x610f, KEY_4 },
- { 0x6105, KEY_5 },
- { 0x6106, KEY_6 },
+ { 0x610f, KEY_NUMERIC_4 },
+ { 0x6105, KEY_NUMERIC_5 },
+ { 0x6106, KEY_NUMERIC_6 },
{ 0x6107, KEY_CHANNELDOWN },
- { 0x610c, KEY_7 },
- { 0x610d, KEY_8 },
- { 0x610a, KEY_9 },
+ { 0x610c, KEY_NUMERIC_7 },
+ { 0x610d, KEY_NUMERIC_8 },
+ { 0x610a, KEY_NUMERIC_9 },
{ 0x610e, KEY_VOLUMEUP },
{ 0x6110, KEY_LAST },
- { 0x6111, KEY_0 },
+ { 0x6111, KEY_NUMERIC_0 },
{ 0x6112, KEY_ENTER },
{ 0x6113, KEY_VOLUMEDOWN },
diff --git a/drivers/media/rc/keymaps/rc-kworld-pc150u.c b/drivers/media/rc/keymaps/rc-kworld-pc150u.c
index 9c60cf4f3bf2..7938761eb994 100644
--- a/drivers/media/rc/keymaps/rc-kworld-pc150u.c
+++ b/drivers/media/rc/keymaps/rc-kworld-pc150u.c
@@ -20,16 +20,16 @@ static struct rc_map_table kworld_pc150u[] = {
{ 0x16, KEY_EJECTCLOSECD }, /* -> ) */
{ 0x1d, KEY_POWER2 },
- { 0x00, KEY_1 },
- { 0x01, KEY_2 },
- { 0x02, KEY_3 },
- { 0x03, KEY_4 },
- { 0x04, KEY_5 },
- { 0x05, KEY_6 },
- { 0x06, KEY_7 },
- { 0x07, KEY_8 },
- { 0x08, KEY_9 },
- { 0x0a, KEY_0 },
+ { 0x00, KEY_NUMERIC_1 },
+ { 0x01, KEY_NUMERIC_2 },
+ { 0x02, KEY_NUMERIC_3 },
+ { 0x03, KEY_NUMERIC_4 },
+ { 0x04, KEY_NUMERIC_5 },
+ { 0x05, KEY_NUMERIC_6 },
+ { 0x06, KEY_NUMERIC_7 },
+ { 0x07, KEY_NUMERIC_8 },
+ { 0x08, KEY_NUMERIC_9 },
+ { 0x0a, KEY_NUMERIC_0 },
{ 0x09, KEY_AGAIN },
{ 0x14, KEY_MUTE },
diff --git a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
index db5edde3eeb1..75389b74e02d 100644
--- a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
+++ b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
@@ -17,16 +17,20 @@ static struct rc_map_table kworld_plus_tv_analog[] = {
{ 0x16, KEY_CLOSECD }, /* -> ) */
{ 0x1d, KEY_POWER2 },
- { 0x00, KEY_1 },
- { 0x01, KEY_2 },
- { 0x02, KEY_3 }, /* Two keys have the same code: 3 and left */
- { 0x03, KEY_4 }, /* Two keys have the same code: 3 and right */
- { 0x04, KEY_5 },
- { 0x05, KEY_6 },
- { 0x06, KEY_7 },
- { 0x07, KEY_8 },
- { 0x08, KEY_9 },
- { 0x0a, KEY_0 },
+ { 0x00, KEY_NUMERIC_1 },
+ { 0x01, KEY_NUMERIC_2 },
+
+ /* Two keys have the same code: 3 and left */
+ { 0x02, KEY_NUMERIC_3 },
+
+ /* Two keys have the same code: 4 and right */
+ { 0x03, KEY_NUMERIC_4 },
+ { 0x04, KEY_NUMERIC_5 },
+ { 0x05, KEY_NUMERIC_6 },
+ { 0x06, KEY_NUMERIC_7 },
+ { 0x07, KEY_NUMERIC_8 },
+ { 0x08, KEY_NUMERIC_9 },
+ { 0x0a, KEY_NUMERIC_0 },
{ 0x09, KEY_AGAIN },
{ 0x14, KEY_MUTE },
diff --git a/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c b/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
index afee942e0edf..2f2b981e1995 100644
--- a/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
+++ b/drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
@@ -12,20 +12,20 @@ static struct rc_map_table leadtek_y04g0051[] = {
{ 0x0300, KEY_POWER2 },
{ 0x0303, KEY_SCREEN },
{ 0x0304, KEY_RIGHT },
- { 0x0305, KEY_1 },
- { 0x0306, KEY_2 },
- { 0x0307, KEY_3 },
+ { 0x0305, KEY_NUMERIC_1 },
+ { 0x0306, KEY_NUMERIC_2 },
+ { 0x0307, KEY_NUMERIC_3 },
{ 0x0308, KEY_LEFT },
- { 0x0309, KEY_4 },
- { 0x030a, KEY_5 },
- { 0x030b, KEY_6 },
+ { 0x0309, KEY_NUMERIC_4 },
+ { 0x030a, KEY_NUMERIC_5 },
+ { 0x030b, KEY_NUMERIC_6 },
{ 0x030c, KEY_UP },
- { 0x030d, KEY_7 },
- { 0x030e, KEY_8 },
- { 0x030f, KEY_9 },
+ { 0x030d, KEY_NUMERIC_7 },
+ { 0x030e, KEY_NUMERIC_8 },
+ { 0x030f, KEY_NUMERIC_9 },
{ 0x0310, KEY_DOWN },
{ 0x0311, KEY_AGAIN },
- { 0x0312, KEY_0 },
+ { 0x0312, KEY_NUMERIC_0 },
{ 0x0313, KEY_OK }, /* 1st ok */
{ 0x0314, KEY_MUTE },
{ 0x0316, KEY_OK }, /* 2nd ok */
diff --git a/drivers/media/rc/keymaps/rc-lme2510.c b/drivers/media/rc/keymaps/rc-lme2510.c
index b0901a8a72a6..181e48f0cb67 100644
--- a/drivers/media/rc/keymaps/rc-lme2510.c
+++ b/drivers/media/rc/keymaps/rc-lme2510.c
@@ -10,16 +10,16 @@
static struct rc_map_table lme2510_rc[] = {
/* Type 1 - 26 buttons */
- { 0xef12ba45, KEY_0 },
- { 0xef12a05f, KEY_1 },
- { 0xef12af50, KEY_2 },
- { 0xef12a25d, KEY_3 },
- { 0xef12be41, KEY_4 },
- { 0xef12f50a, KEY_5 },
- { 0xef12bd42, KEY_6 },
- { 0xef12b847, KEY_7 },
- { 0xef12b649, KEY_8 },
- { 0xef12fa05, KEY_9 },
+ { 0xef12ba45, KEY_NUMERIC_0 },
+ { 0xef12a05f, KEY_NUMERIC_1 },
+ { 0xef12af50, KEY_NUMERIC_2 },
+ { 0xef12a25d, KEY_NUMERIC_3 },
+ { 0xef12be41, KEY_NUMERIC_4 },
+ { 0xef12f50a, KEY_NUMERIC_5 },
+ { 0xef12bd42, KEY_NUMERIC_6 },
+ { 0xef12b847, KEY_NUMERIC_7 },
+ { 0xef12b649, KEY_NUMERIC_8 },
+ { 0xef12fa05, KEY_NUMERIC_9 },
{ 0xef12bc43, KEY_POWER },
{ 0xef12b946, KEY_SUBTITLE },
{ 0xef12f906, KEY_PAUSE },
@@ -37,16 +37,16 @@ static struct rc_map_table lme2510_rc[] = {
{ 0xef12f807, KEY_EPG },
{ 0xef12fe01, KEY_STOP },
/* Type 2 - 20 buttons */
- { 0xff40ea15, KEY_0 },
- { 0xff40f708, KEY_1 },
- { 0xff40f609, KEY_2 },
- { 0xff40f50a, KEY_3 },
- { 0xff40f30c, KEY_4 },
- { 0xff40f20d, KEY_5 },
- { 0xff40f10e, KEY_6 },
- { 0xff40ef10, KEY_7 },
- { 0xff40ee11, KEY_8 },
- { 0xff40ed12, KEY_9 },
+ { 0xff40ea15, KEY_NUMERIC_0 },
+ { 0xff40f708, KEY_NUMERIC_1 },
+ { 0xff40f609, KEY_NUMERIC_2 },
+ { 0xff40f50a, KEY_NUMERIC_3 },
+ { 0xff40f30c, KEY_NUMERIC_4 },
+ { 0xff40f20d, KEY_NUMERIC_5 },
+ { 0xff40f10e, KEY_NUMERIC_6 },
+ { 0xff40ef10, KEY_NUMERIC_7 },
+ { 0xff40ee11, KEY_NUMERIC_8 },
+ { 0xff40ed12, KEY_NUMERIC_9 },
{ 0xff40ff00, KEY_POWER },
{ 0xff40fb04, KEY_MEDIA_REPEAT}, /* Recall */
{ 0xff40e51a, KEY_PAUSE }, /* Timeshift */
@@ -58,16 +58,16 @@ static struct rc_map_table lme2510_rc[] = {
{ 0xff40e718, KEY_RECORD },
{ 0xff40e916, KEY_STOP },
/* Type 3 - 20 buttons */
- { 0xff00e31c, KEY_0 },
- { 0xff00f807, KEY_1 },
- { 0xff00ea15, KEY_2 },
- { 0xff00f609, KEY_3 },
- { 0xff00e916, KEY_4 },
- { 0xff00e619, KEY_5 },
- { 0xff00f20d, KEY_6 },
- { 0xff00f30c, KEY_7 },
- { 0xff00e718, KEY_8 },
- { 0xff00a15e, KEY_9 },
+ { 0xff00e31c, KEY_NUMERIC_0 },
+ { 0xff00f807, KEY_NUMERIC_1 },
+ { 0xff00ea15, KEY_NUMERIC_2 },
+ { 0xff00f609, KEY_NUMERIC_3 },
+ { 0xff00e916, KEY_NUMERIC_4 },
+ { 0xff00e619, KEY_NUMERIC_5 },
+ { 0xff00f20d, KEY_NUMERIC_6 },
+ { 0xff00f30c, KEY_NUMERIC_7 },
+ { 0xff00e718, KEY_NUMERIC_8 },
+ { 0xff00a15e, KEY_NUMERIC_9 },
{ 0xff00ba45, KEY_POWER },
{ 0xff00bb44, KEY_MEDIA_REPEAT}, /* Recall */
{ 0xff00b54a, KEY_PAUSE }, /* Timeshift */
diff --git a/drivers/media/rc/keymaps/rc-manli.c b/drivers/media/rc/keymaps/rc-manli.c
index 5e9a49e2dd6a..e884aeb5c3d6 100644
--- a/drivers/media/rc/keymaps/rc-manli.c
+++ b/drivers/media/rc/keymaps/rc-manli.c
@@ -35,22 +35,22 @@ static struct rc_map_table manli[] = {
* 0x07 0x08 0x09 *
* 7 8 9 *
* */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
/* 0x0a 0x00 0x17 *
* RECALL 0 +100 *
* PLUS *
* */
{ 0x0a, KEY_AGAIN }, /*XXX KEY_REWIND? */
- { 0x00, KEY_0 },
+ { 0x00, KEY_NUMERIC_0 },
{ 0x17, KEY_DIGITS }, /*XXX*/
/* 0x14 0x10 *
diff --git a/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c b/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c
index 407706b246f2..bf74912859b3 100644
--- a/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c
+++ b/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c
@@ -63,16 +63,16 @@ static struct rc_map_table medion_x10_digitainer[] = {
{ 0x27, KEY_RECORD },
{ 0x26, KEY_FORWARD },
- { 0x0d, KEY_1 },
- { 0x0e, KEY_2 },
- { 0x0f, KEY_3 },
- { 0x10, KEY_4 },
- { 0x11, KEY_5 },
- { 0x12, KEY_6 },
- { 0x13, KEY_7 },
- { 0x14, KEY_8 },
- { 0x15, KEY_9 },
- { 0x17, KEY_0 },
+ { 0x0d, KEY_NUMERIC_1 },
+ { 0x0e, KEY_NUMERIC_2 },
+ { 0x0f, KEY_NUMERIC_3 },
+ { 0x10, KEY_NUMERIC_4 },
+ { 0x11, KEY_NUMERIC_5 },
+ { 0x12, KEY_NUMERIC_6 },
+ { 0x13, KEY_NUMERIC_7 },
+ { 0x14, KEY_NUMERIC_8 },
+ { 0x15, KEY_NUMERIC_9 },
+ { 0x17, KEY_NUMERIC_0 },
/* these do not actually exist on this remote, but these scancodes
* exist on all other Medion X10 remotes and adding them here allows
diff --git a/drivers/media/rc/keymaps/rc-medion-x10-or2x.c b/drivers/media/rc/keymaps/rc-medion-x10-or2x.c
index 2ff5c454304d..293045c9aaa5 100644
--- a/drivers/media/rc/keymaps/rc-medion-x10-or2x.c
+++ b/drivers/media/rc/keymaps/rc-medion-x10-or2x.c
@@ -52,16 +52,16 @@ static struct rc_map_table medion_x10_or2x[] = {
{ 0x29, KEY_PAUSE },
{ 0x27, KEY_RECORD },
- { 0x0d, KEY_1 },
- { 0x0e, KEY_2 },
- { 0x0f, KEY_3 },
- { 0x10, KEY_4 },
- { 0x11, KEY_5 },
- { 0x12, KEY_6 },
- { 0x13, KEY_7 },
- { 0x14, KEY_8 },
- { 0x15, KEY_9 },
- { 0x17, KEY_0 },
+ { 0x0d, KEY_NUMERIC_1 },
+ { 0x0e, KEY_NUMERIC_2 },
+ { 0x0f, KEY_NUMERIC_3 },
+ { 0x10, KEY_NUMERIC_4 },
+ { 0x11, KEY_NUMERIC_5 },
+ { 0x12, KEY_NUMERIC_6 },
+ { 0x13, KEY_NUMERIC_7 },
+ { 0x14, KEY_NUMERIC_8 },
+ { 0x15, KEY_NUMERIC_9 },
+ { 0x17, KEY_NUMERIC_0 },
{ 0x30, KEY_CLEAR },
{ 0x36, KEY_ENTER },
{ 0x37, KEY_NUMERIC_STAR },
diff --git a/drivers/media/rc/keymaps/rc-medion-x10.c b/drivers/media/rc/keymaps/rc-medion-x10.c
index 66b962dc982b..843dba3bad73 100644
--- a/drivers/media/rc/keymaps/rc-medion-x10.c
+++ b/drivers/media/rc/keymaps/rc-medion-x10.c
@@ -37,16 +37,16 @@ static struct rc_map_table medion_x10[] = {
{ 0x35, KEY_BLUE }, /* blue */
{ 0x16, KEY_TEXT }, /* TXT */
- { 0x0d, KEY_1 },
- { 0x0e, KEY_2 },
- { 0x0f, KEY_3 },
- { 0x10, KEY_4 },
- { 0x11, KEY_5 },
- { 0x12, KEY_6 },
- { 0x13, KEY_7 },
- { 0x14, KEY_8 },
- { 0x15, KEY_9 },
- { 0x17, KEY_0 },
+ { 0x0d, KEY_NUMERIC_1 },
+ { 0x0e, KEY_NUMERIC_2 },
+ { 0x0f, KEY_NUMERIC_3 },
+ { 0x10, KEY_NUMERIC_4 },
+ { 0x11, KEY_NUMERIC_5 },
+ { 0x12, KEY_NUMERIC_6 },
+ { 0x13, KEY_NUMERIC_7 },
+ { 0x14, KEY_NUMERIC_8 },
+ { 0x15, KEY_NUMERIC_9 },
+ { 0x17, KEY_NUMERIC_0 },
{ 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
{ 0x20, KEY_DELETE }, /* DELETE */
diff --git a/drivers/media/rc/keymaps/rc-msi-digivox-ii.c b/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
index d361554e8a2d..ab001d2dac67 100644
--- a/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
+++ b/drivers/media/rc/keymaps/rc-msi-digivox-ii.c
@@ -9,23 +9,23 @@
#include <linux/module.h>
static struct rc_map_table msi_digivox_ii[] = {
- { 0x0302, KEY_2 },
+ { 0x0302, KEY_NUMERIC_2 },
{ 0x0303, KEY_UP }, /* up */
- { 0x0304, KEY_3 },
+ { 0x0304, KEY_NUMERIC_3 },
{ 0x0305, KEY_CHANNELDOWN },
- { 0x0308, KEY_5 },
- { 0x0309, KEY_0 },
- { 0x030b, KEY_8 },
+ { 0x0308, KEY_NUMERIC_5 },
+ { 0x0309, KEY_NUMERIC_0 },
+ { 0x030b, KEY_NUMERIC_8 },
{ 0x030d, KEY_DOWN }, /* down */
- { 0x0310, KEY_9 },
- { 0x0311, KEY_7 },
+ { 0x0310, KEY_NUMERIC_9 },
+ { 0x0311, KEY_NUMERIC_7 },
{ 0x0314, KEY_VOLUMEUP },
{ 0x0315, KEY_CHANNELUP },
{ 0x0316, KEY_OK },
{ 0x0317, KEY_POWER2 },
- { 0x031a, KEY_1 },
- { 0x031c, KEY_4 },
- { 0x031d, KEY_6 },
+ { 0x031a, KEY_NUMERIC_1 },
+ { 0x031c, KEY_NUMERIC_4 },
+ { 0x031d, KEY_NUMERIC_6 },
{ 0x031f, KEY_VOLUMEDOWN },
};
diff --git a/drivers/media/rc/keymaps/rc-msi-digivox-iii.c b/drivers/media/rc/keymaps/rc-msi-digivox-iii.c
index 31d41564a438..6129d3e925e5 100644
--- a/drivers/media/rc/keymaps/rc-msi-digivox-iii.c
+++ b/drivers/media/rc/keymaps/rc-msi-digivox-iii.c
@@ -14,22 +14,22 @@
since rc-kworld-315u.c lacks NEC extended address byte. */
static struct rc_map_table msi_digivox_iii[] = {
{ 0x61d601, KEY_VIDEO }, /* Source */
- { 0x61d602, KEY_3 },
+ { 0x61d602, KEY_NUMERIC_3 },
{ 0x61d603, KEY_POWER }, /* ShutDown */
- { 0x61d604, KEY_1 },
- { 0x61d605, KEY_5 },
- { 0x61d606, KEY_6 },
+ { 0x61d604, KEY_NUMERIC_1 },
+ { 0x61d605, KEY_NUMERIC_5 },
+ { 0x61d606, KEY_NUMERIC_6 },
{ 0x61d607, KEY_CHANNELDOWN }, /* CH- */
- { 0x61d608, KEY_2 },
+ { 0x61d608, KEY_NUMERIC_2 },
{ 0x61d609, KEY_CHANNELUP }, /* CH+ */
- { 0x61d60a, KEY_9 },
+ { 0x61d60a, KEY_NUMERIC_9 },
{ 0x61d60b, KEY_ZOOM }, /* Zoom */
- { 0x61d60c, KEY_7 },
- { 0x61d60d, KEY_8 },
+ { 0x61d60c, KEY_NUMERIC_7 },
+ { 0x61d60d, KEY_NUMERIC_8 },
{ 0x61d60e, KEY_VOLUMEUP }, /* Vol+ */
- { 0x61d60f, KEY_4 },
+ { 0x61d60f, KEY_NUMERIC_4 },
{ 0x61d610, KEY_ESC }, /* [back up arrow] */
- { 0x61d611, KEY_0 },
+ { 0x61d611, KEY_NUMERIC_0 },
{ 0x61d612, KEY_OK }, /* [enter arrow] */
{ 0x61d613, KEY_VOLUMEDOWN }, /* Vol- */
{ 0x61d614, KEY_RECORD }, /* Rec */
diff --git a/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c b/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
index 78cf2c286083..42270a7ef3ee 100644
--- a/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
+++ b/drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
@@ -44,16 +44,16 @@ static struct rc_map_table msi_tvanywhere_plus[] = {
<< FUNC >> RESET
*/
- { 0x01, KEY_1 }, /* 1 */
- { 0x0b, KEY_2 }, /* 2 */
- { 0x1b, KEY_3 }, /* 3 */
- { 0x05, KEY_4 }, /* 4 */
- { 0x09, KEY_5 }, /* 5 */
- { 0x15, KEY_6 }, /* 6 */
- { 0x06, KEY_7 }, /* 7 */
- { 0x0a, KEY_8 }, /* 8 */
- { 0x12, KEY_9 }, /* 9 */
- { 0x02, KEY_0 }, /* 0 */
+ { 0x01, KEY_NUMERIC_1 }, /* 1 */
+ { 0x0b, KEY_NUMERIC_2 }, /* 2 */
+ { 0x1b, KEY_NUMERIC_3 }, /* 3 */
+ { 0x05, KEY_NUMERIC_4 }, /* 4 */
+ { 0x09, KEY_NUMERIC_5 }, /* 5 */
+ { 0x15, KEY_NUMERIC_6 }, /* 6 */
+ { 0x06, KEY_NUMERIC_7 }, /* 7 */
+ { 0x0a, KEY_NUMERIC_8 }, /* 8 */
+ { 0x12, KEY_NUMERIC_9 }, /* 9 */
+ { 0x02, KEY_NUMERIC_0 }, /* 0 */
{ 0x10, KEY_KPPLUS }, /* + */
{ 0x13, KEY_AGAIN }, /* Recall */
diff --git a/drivers/media/rc/keymaps/rc-msi-tvanywhere.c b/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
index 359a57be3a66..45793c641009 100644
--- a/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
+++ b/drivers/media/rc/keymaps/rc-msi-tvanywhere.c
@@ -12,16 +12,16 @@
static struct rc_map_table msi_tvanywhere[] = {
/* Keys 0 to 9 */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
+ { 0x00, KEY_NUMERIC_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
{ 0x0c, KEY_MUTE },
{ 0x0f, KEY_SCREEN }, /* Full Screen */
diff --git a/drivers/media/rc/keymaps/rc-nebula.c b/drivers/media/rc/keymaps/rc-nebula.c
index 17d7c1b324da..2dc6061f69b3 100644
--- a/drivers/media/rc/keymaps/rc-nebula.c
+++ b/drivers/media/rc/keymaps/rc-nebula.c
@@ -9,16 +9,16 @@
#include <linux/module.h>
static struct rc_map_table nebula[] = {
- { 0x0000, KEY_0 },
- { 0x0001, KEY_1 },
- { 0x0002, KEY_2 },
- { 0x0003, KEY_3 },
- { 0x0004, KEY_4 },
- { 0x0005, KEY_5 },
- { 0x0006, KEY_6 },
- { 0x0007, KEY_7 },
- { 0x0008, KEY_8 },
- { 0x0009, KEY_9 },
+ { 0x0000, KEY_NUMERIC_0 },
+ { 0x0001, KEY_NUMERIC_1 },
+ { 0x0002, KEY_NUMERIC_2 },
+ { 0x0003, KEY_NUMERIC_3 },
+ { 0x0004, KEY_NUMERIC_4 },
+ { 0x0005, KEY_NUMERIC_5 },
+ { 0x0006, KEY_NUMERIC_6 },
+ { 0x0007, KEY_NUMERIC_7 },
+ { 0x0008, KEY_NUMERIC_8 },
+ { 0x0009, KEY_NUMERIC_9 },
{ 0x000a, KEY_TV },
{ 0x000b, KEY_AUX },
{ 0x000c, KEY_DVD },
diff --git a/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c b/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
index 76beef44a8d7..b12c54d47db3 100644
--- a/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
+++ b/drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
@@ -23,16 +23,16 @@ static struct rc_map_table nec_terratec_cinergy_xs[] = {
{ 0x1444, KEY_TEXT}, /* Teletext */
{ 0x1445, KEY_DELETE},
- { 0x1402, KEY_1},
- { 0x1403, KEY_2},
- { 0x1404, KEY_3},
- { 0x1405, KEY_4},
- { 0x1406, KEY_5},
- { 0x1407, KEY_6},
- { 0x1408, KEY_7},
- { 0x1409, KEY_8},
- { 0x140a, KEY_9},
- { 0x140c, KEY_0},
+ { 0x1402, KEY_NUMERIC_1},
+ { 0x1403, KEY_NUMERIC_2},
+ { 0x1404, KEY_NUMERIC_3},
+ { 0x1405, KEY_NUMERIC_4},
+ { 0x1406, KEY_NUMERIC_5},
+ { 0x1407, KEY_NUMERIC_6},
+ { 0x1408, KEY_NUMERIC_7},
+ { 0x1409, KEY_NUMERIC_8},
+ { 0x140a, KEY_NUMERIC_9},
+ { 0x140c, KEY_NUMERIC_0},
{ 0x140b, KEY_TUNER}, /* AV */
{ 0x140d, KEY_MODE}, /* A.B */
@@ -79,16 +79,16 @@ static struct rc_map_table nec_terratec_cinergy_xs[] = {
/* Terratec Black IR, with most keys in black */
{ 0x04eb01, KEY_POWER2},
- { 0x04eb02, KEY_1},
- { 0x04eb03, KEY_2},
- { 0x04eb04, KEY_3},
- { 0x04eb05, KEY_4},
- { 0x04eb06, KEY_5},
- { 0x04eb07, KEY_6},
- { 0x04eb08, KEY_7},
- { 0x04eb09, KEY_8},
- { 0x04eb0a, KEY_9},
- { 0x04eb0c, KEY_0},
+ { 0x04eb02, KEY_NUMERIC_1},
+ { 0x04eb03, KEY_NUMERIC_2},
+ { 0x04eb04, KEY_NUMERIC_3},
+ { 0x04eb05, KEY_NUMERIC_4},
+ { 0x04eb06, KEY_NUMERIC_5},
+ { 0x04eb07, KEY_NUMERIC_6},
+ { 0x04eb08, KEY_NUMERIC_7},
+ { 0x04eb09, KEY_NUMERIC_8},
+ { 0x04eb0a, KEY_NUMERIC_9},
+ { 0x04eb0c, KEY_NUMERIC_0},
{ 0x04eb0b, KEY_TEXT}, /* TXT */
{ 0x04eb0d, KEY_REFRESH}, /* Refresh */
diff --git a/drivers/media/rc/keymaps/rc-norwood.c b/drivers/media/rc/keymaps/rc-norwood.c
index 3765705c5549..acd5b1ccf8d0 100644
--- a/drivers/media/rc/keymaps/rc-norwood.c
+++ b/drivers/media/rc/keymaps/rc-norwood.c
@@ -14,16 +14,16 @@
static struct rc_map_table norwood[] = {
/* Keys 0 to 9 */
- { 0x20, KEY_0 },
- { 0x21, KEY_1 },
- { 0x22, KEY_2 },
- { 0x23, KEY_3 },
- { 0x24, KEY_4 },
- { 0x25, KEY_5 },
- { 0x26, KEY_6 },
- { 0x27, KEY_7 },
- { 0x28, KEY_8 },
- { 0x29, KEY_9 },
+ { 0x20, KEY_NUMERIC_0 },
+ { 0x21, KEY_NUMERIC_1 },
+ { 0x22, KEY_NUMERIC_2 },
+ { 0x23, KEY_NUMERIC_3 },
+ { 0x24, KEY_NUMERIC_4 },
+ { 0x25, KEY_NUMERIC_5 },
+ { 0x26, KEY_NUMERIC_6 },
+ { 0x27, KEY_NUMERIC_7 },
+ { 0x28, KEY_NUMERIC_8 },
+ { 0x29, KEY_NUMERIC_9 },
{ 0x78, KEY_VIDEO }, /* Video Source */
{ 0x2c, KEY_EXIT }, /* Open/Close software */
diff --git a/drivers/media/rc/keymaps/rc-npgtech.c b/drivers/media/rc/keymaps/rc-npgtech.c
index abaf7f6d4cb7..98a755e8bc18 100644
--- a/drivers/media/rc/keymaps/rc-npgtech.c
+++ b/drivers/media/rc/keymaps/rc-npgtech.c
@@ -12,16 +12,16 @@ static struct rc_map_table npgtech[] = {
{ 0x1d, KEY_SWITCHVIDEOMODE }, /* switch inputs */
{ 0x2a, KEY_FRONT },
- { 0x3e, KEY_1 },
- { 0x02, KEY_2 },
- { 0x06, KEY_3 },
- { 0x0a, KEY_4 },
- { 0x0e, KEY_5 },
- { 0x12, KEY_6 },
- { 0x16, KEY_7 },
- { 0x1a, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x3a, KEY_0 },
+ { 0x3e, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x06, KEY_NUMERIC_3 },
+ { 0x0a, KEY_NUMERIC_4 },
+ { 0x0e, KEY_NUMERIC_5 },
+ { 0x12, KEY_NUMERIC_6 },
+ { 0x16, KEY_NUMERIC_7 },
+ { 0x1a, KEY_NUMERIC_8 },
+ { 0x1e, KEY_NUMERIC_9 },
+ { 0x3a, KEY_NUMERIC_0 },
{ 0x22, KEY_NUMLOCK }, /* -/-- */
{ 0x20, KEY_REFRESH },
diff --git a/drivers/media/rc/keymaps/rc-pctv-sedna.c b/drivers/media/rc/keymaps/rc-pctv-sedna.c
index e3462c5c8984..c3bb1ecdd0ca 100644
--- a/drivers/media/rc/keymaps/rc-pctv-sedna.c
+++ b/drivers/media/rc/keymaps/rc-pctv-sedna.c
@@ -14,16 +14,16 @@
Also for the remote bundled with Kozumi KTV-01C card */
static struct rc_map_table pctv_sedna[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
+ { 0x00, KEY_NUMERIC_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
{ 0x0a, KEY_AGAIN }, /* Recall */
{ 0x0b, KEY_CHANNELUP },
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-color.c b/drivers/media/rc/keymaps/rc-pinnacle-color.c
index 63c2851e9dfe..b862725635b9 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-color.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-color.c
@@ -49,16 +49,16 @@ static struct rc_map_table pinnacle_color[] = {
{ 0x4c, KEY_STOP },
{ 0x54, KEY_NEXT },
- { 0x69, KEY_0 },
- { 0x6a, KEY_1 },
- { 0x6b, KEY_2 },
- { 0x6c, KEY_3 },
- { 0x6d, KEY_4 },
- { 0x6e, KEY_5 },
- { 0x6f, KEY_6 },
- { 0x70, KEY_7 },
- { 0x71, KEY_8 },
- { 0x72, KEY_9 },
+ { 0x69, KEY_NUMERIC_0 },
+ { 0x6a, KEY_NUMERIC_1 },
+ { 0x6b, KEY_NUMERIC_2 },
+ { 0x6c, KEY_NUMERIC_3 },
+ { 0x6d, KEY_NUMERIC_4 },
+ { 0x6e, KEY_NUMERIC_5 },
+ { 0x6f, KEY_NUMERIC_6 },
+ { 0x70, KEY_NUMERIC_7 },
+ { 0x71, KEY_NUMERIC_8 },
+ { 0x72, KEY_NUMERIC_9 },
{ 0x74, KEY_CHANNEL },
{ 0x0a, KEY_BACKSPACE },
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-grey.c b/drivers/media/rc/keymaps/rc-pinnacle-grey.c
index 31794d4180db..3853b653cee6 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-grey.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-grey.c
@@ -9,16 +9,16 @@
#include <linux/module.h>
static struct rc_map_table pinnacle_grey[] = {
- { 0x3a, KEY_0 },
- { 0x31, KEY_1 },
- { 0x32, KEY_2 },
- { 0x33, KEY_3 },
- { 0x34, KEY_4 },
- { 0x35, KEY_5 },
- { 0x36, KEY_6 },
- { 0x37, KEY_7 },
- { 0x38, KEY_8 },
- { 0x39, KEY_9 },
+ { 0x3a, KEY_NUMERIC_0 },
+ { 0x31, KEY_NUMERIC_1 },
+ { 0x32, KEY_NUMERIC_2 },
+ { 0x33, KEY_NUMERIC_3 },
+ { 0x34, KEY_NUMERIC_4 },
+ { 0x35, KEY_NUMERIC_5 },
+ { 0x36, KEY_NUMERIC_6 },
+ { 0x37, KEY_NUMERIC_7 },
+ { 0x38, KEY_NUMERIC_8 },
+ { 0x39, KEY_NUMERIC_9 },
{ 0x2f, KEY_POWER },
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
index 876aeb6e1d9c..96d8112fb468 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
@@ -20,16 +20,16 @@ static struct rc_map_table pinnacle_pctv_hd[] = {
{ 0x0709, KEY_VOLUMEDOWN },
{ 0x0706, KEY_CHANNELUP },
{ 0x070c, KEY_CHANNELDOWN },
- { 0x070f, KEY_1 },
- { 0x0715, KEY_2 },
- { 0x0710, KEY_3 },
- { 0x0718, KEY_4 },
- { 0x071b, KEY_5 },
- { 0x071e, KEY_6 },
- { 0x0711, KEY_7 },
- { 0x0721, KEY_8 },
- { 0x0712, KEY_9 },
- { 0x0727, KEY_0 },
+ { 0x070f, KEY_NUMERIC_1 },
+ { 0x0715, KEY_NUMERIC_2 },
+ { 0x0710, KEY_NUMERIC_3 },
+ { 0x0718, KEY_NUMERIC_4 },
+ { 0x071b, KEY_NUMERIC_5 },
+ { 0x071e, KEY_NUMERIC_6 },
+ { 0x0711, KEY_NUMERIC_7 },
+ { 0x0721, KEY_NUMERIC_8 },
+ { 0x0712, KEY_NUMERIC_9 },
+ { 0x0727, KEY_NUMERIC_0 },
{ 0x0724, KEY_ZOOM }, /* 'Square' key */
{ 0x072a, KEY_SUBTITLE }, /* 'T' key */
{ 0x072d, KEY_REWIND },
diff --git a/drivers/media/rc/keymaps/rc-pixelview-002t.c b/drivers/media/rc/keymaps/rc-pixelview-002t.c
index c0550e09f255..c3439c46644c 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-002t.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-002t.c
@@ -16,16 +16,16 @@ static struct rc_map_table pixelview_002t[] = {
{ 0x866b13, KEY_MUTE },
{ 0x866b12, KEY_POWER2 }, /* power */
- { 0x866b01, KEY_1 },
- { 0x866b02, KEY_2 },
- { 0x866b03, KEY_3 },
- { 0x866b04, KEY_4 },
- { 0x866b05, KEY_5 },
- { 0x866b06, KEY_6 },
- { 0x866b07, KEY_7 },
- { 0x866b08, KEY_8 },
- { 0x866b09, KEY_9 },
- { 0x866b00, KEY_0 },
+ { 0x866b01, KEY_NUMERIC_1 },
+ { 0x866b02, KEY_NUMERIC_2 },
+ { 0x866b03, KEY_NUMERIC_3 },
+ { 0x866b04, KEY_NUMERIC_4 },
+ { 0x866b05, KEY_NUMERIC_5 },
+ { 0x866b06, KEY_NUMERIC_6 },
+ { 0x866b07, KEY_NUMERIC_7 },
+ { 0x866b08, KEY_NUMERIC_8 },
+ { 0x866b09, KEY_NUMERIC_9 },
+ { 0x866b00, KEY_NUMERIC_0 },
{ 0x866b0d, KEY_CHANNELUP },
{ 0x866b19, KEY_CHANNELDOWN },
diff --git a/drivers/media/rc/keymaps/rc-pixelview-mk12.c b/drivers/media/rc/keymaps/rc-pixelview-mk12.c
index 864c8ea5d8e3..ea11ccde8442 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-mk12.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-mk12.c
@@ -16,16 +16,16 @@ static struct rc_map_table pixelview_mk12[] = {
{ 0x866b03, KEY_TUNER }, /* Timeshift */
{ 0x866b1e, KEY_POWER2 }, /* power */
- { 0x866b01, KEY_1 },
- { 0x866b0b, KEY_2 },
- { 0x866b1b, KEY_3 },
- { 0x866b05, KEY_4 },
- { 0x866b09, KEY_5 },
- { 0x866b15, KEY_6 },
- { 0x866b06, KEY_7 },
- { 0x866b0a, KEY_8 },
- { 0x866b12, KEY_9 },
- { 0x866b02, KEY_0 },
+ { 0x866b01, KEY_NUMERIC_1 },
+ { 0x866b0b, KEY_NUMERIC_2 },
+ { 0x866b1b, KEY_NUMERIC_3 },
+ { 0x866b05, KEY_NUMERIC_4 },
+ { 0x866b09, KEY_NUMERIC_5 },
+ { 0x866b15, KEY_NUMERIC_6 },
+ { 0x866b06, KEY_NUMERIC_7 },
+ { 0x866b0a, KEY_NUMERIC_8 },
+ { 0x866b12, KEY_NUMERIC_9 },
+ { 0x866b02, KEY_NUMERIC_0 },
{ 0x866b13, KEY_AGAIN }, /* loop */
{ 0x866b10, KEY_DIGITS }, /* +100 */
diff --git a/drivers/media/rc/keymaps/rc-pixelview-new.c b/drivers/media/rc/keymaps/rc-pixelview-new.c
index e4e34f2ccf74..0259666831b0 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-new.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-new.c
@@ -17,16 +17,16 @@ static struct rc_map_table pixelview_new[] = {
{ 0x3c, KEY_TIME }, /* Timeshift */
{ 0x12, KEY_POWER },
- { 0x3d, KEY_1 },
- { 0x38, KEY_2 },
- { 0x18, KEY_3 },
- { 0x35, KEY_4 },
- { 0x39, KEY_5 },
- { 0x15, KEY_6 },
- { 0x36, KEY_7 },
- { 0x3a, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x3e, KEY_0 },
+ { 0x3d, KEY_NUMERIC_1 },
+ { 0x38, KEY_NUMERIC_2 },
+ { 0x18, KEY_NUMERIC_3 },
+ { 0x35, KEY_NUMERIC_4 },
+ { 0x39, KEY_NUMERIC_5 },
+ { 0x15, KEY_NUMERIC_6 },
+ { 0x36, KEY_NUMERIC_7 },
+ { 0x3a, KEY_NUMERIC_8 },
+ { 0x1e, KEY_NUMERIC_9 },
+ { 0x3e, KEY_NUMERIC_0 },
{ 0x1c, KEY_AGAIN }, /* LOOP */
{ 0x3f, KEY_VIDEO }, /* Source */
diff --git a/drivers/media/rc/keymaps/rc-pixelview.c b/drivers/media/rc/keymaps/rc-pixelview.c
index 988919735165..29f6d2c013e4 100644
--- a/drivers/media/rc/keymaps/rc-pixelview.c
+++ b/drivers/media/rc/keymaps/rc-pixelview.c
@@ -25,16 +25,16 @@ static struct rc_map_table pixelview[] = {
{ 0x19, KEY_ZOOM }, /* zoom */
{ 0x0f, KEY_TEXT }, /* min */
- { 0x01, KEY_1 },
- { 0x0b, KEY_2 },
- { 0x1b, KEY_3 },
- { 0x05, KEY_4 },
- { 0x09, KEY_5 },
- { 0x15, KEY_6 },
- { 0x06, KEY_7 },
- { 0x0a, KEY_8 },
- { 0x12, KEY_9 },
- { 0x02, KEY_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x0b, KEY_NUMERIC_2 },
+ { 0x1b, KEY_NUMERIC_3 },
+ { 0x05, KEY_NUMERIC_4 },
+ { 0x09, KEY_NUMERIC_5 },
+ { 0x15, KEY_NUMERIC_6 },
+ { 0x06, KEY_NUMERIC_7 },
+ { 0x0a, KEY_NUMERIC_8 },
+ { 0x12, KEY_NUMERIC_9 },
+ { 0x02, KEY_NUMERIC_0 },
{ 0x10, KEY_LAST }, /* +100 */
{ 0x13, KEY_LIST }, /* recall */
diff --git a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
index cf98cf8dc13c..66fe2e52e7c8 100644
--- a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
+++ b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c
@@ -16,16 +16,16 @@
static struct rc_map_table powercolor_real_angel[] = {
{ 0x38, KEY_SWITCHVIDEOMODE }, /* switch inputs */
{ 0x0c, KEY_MEDIA }, /* Turn ON/OFF App */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
+ { 0x00, KEY_NUMERIC_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
{ 0x0a, KEY_DIGITS }, /* single, double, triple digit */
{ 0x29, KEY_PREVIOUS }, /* previous channel */
{ 0x12, KEY_BRIGHTNESSUP },
diff --git a/drivers/media/rc/keymaps/rc-proteus-2309.c b/drivers/media/rc/keymaps/rc-proteus-2309.c
index d2c13d0e7bff..36eebefd975c 100644
--- a/drivers/media/rc/keymaps/rc-proteus-2309.c
+++ b/drivers/media/rc/keymaps/rc-proteus-2309.c
@@ -12,16 +12,16 @@
static struct rc_map_table proteus_2309[] = {
/* numeric */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
+ { 0x00, KEY_NUMERIC_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
{ 0x5c, KEY_POWER }, /* power */
{ 0x20, KEY_ZOOM }, /* full screen */
diff --git a/drivers/media/rc/keymaps/rc-purpletv.c b/drivers/media/rc/keymaps/rc-purpletv.c
index c8011f4d96ea..bf4543fecb6f 100644
--- a/drivers/media/rc/keymaps/rc-purpletv.c
+++ b/drivers/media/rc/keymaps/rc-purpletv.c
@@ -13,16 +13,16 @@ static struct rc_map_table purpletv[] = {
{ 0x6f, KEY_MUTE },
{ 0x10, KEY_BACKSPACE }, /* Recall */
- { 0x11, KEY_0 },
- { 0x04, KEY_1 },
- { 0x05, KEY_2 },
- { 0x06, KEY_3 },
- { 0x08, KEY_4 },
- { 0x09, KEY_5 },
- { 0x0a, KEY_6 },
- { 0x0c, KEY_7 },
- { 0x0d, KEY_8 },
- { 0x0e, KEY_9 },
+ { 0x11, KEY_NUMERIC_0 },
+ { 0x04, KEY_NUMERIC_1 },
+ { 0x05, KEY_NUMERIC_2 },
+ { 0x06, KEY_NUMERIC_3 },
+ { 0x08, KEY_NUMERIC_4 },
+ { 0x09, KEY_NUMERIC_5 },
+ { 0x0a, KEY_NUMERIC_6 },
+ { 0x0c, KEY_NUMERIC_7 },
+ { 0x0d, KEY_NUMERIC_8 },
+ { 0x0e, KEY_NUMERIC_9 },
{ 0x12, KEY_DOT }, /* 100+ */
{ 0x07, KEY_VOLUMEUP },
diff --git a/drivers/media/rc/keymaps/rc-pv951.c b/drivers/media/rc/keymaps/rc-pv951.c
index 5235ee899c30..69db55463000 100644
--- a/drivers/media/rc/keymaps/rc-pv951.c
+++ b/drivers/media/rc/keymaps/rc-pv951.c
@@ -11,16 +11,16 @@
/* Mark Phalan <phalanm@o2.ie> */
static struct rc_map_table pv951[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
+ { 0x00, KEY_NUMERIC_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
{ 0x12, KEY_POWER },
{ 0x10, KEY_MUTE },
diff --git a/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c b/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
index 1cf786649675..33bb458b81fd 100644
--- a/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
+++ b/drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
@@ -14,16 +14,16 @@ static struct rc_map_table real_audio_220_32_keys[] = {
{ 0x1c, KEY_RADIO},
{ 0x12, KEY_POWER2},
- { 0x01, KEY_1},
- { 0x02, KEY_2},
- { 0x03, KEY_3},
- { 0x04, KEY_4},
- { 0x05, KEY_5},
- { 0x06, KEY_6},
- { 0x07, KEY_7},
- { 0x08, KEY_8},
- { 0x09, KEY_9},
- { 0x00, KEY_0},
+ { 0x01, KEY_NUMERIC_1},
+ { 0x02, KEY_NUMERIC_2},
+ { 0x03, KEY_NUMERIC_3},
+ { 0x04, KEY_NUMERIC_4},
+ { 0x05, KEY_NUMERIC_5},
+ { 0x06, KEY_NUMERIC_6},
+ { 0x07, KEY_NUMERIC_7},
+ { 0x08, KEY_NUMERIC_8},
+ { 0x09, KEY_NUMERIC_9},
+ { 0x00, KEY_NUMERIC_0},
{ 0x0c, KEY_VOLUMEUP},
{ 0x18, KEY_VOLUMEDOWN},
diff --git a/drivers/media/rc/keymaps/rc-reddo.c b/drivers/media/rc/keymaps/rc-reddo.c
index a68003381540..b70390d19e78 100644
--- a/drivers/media/rc/keymaps/rc-reddo.c
+++ b/drivers/media/rc/keymaps/rc-reddo.c
@@ -23,21 +23,21 @@
static struct rc_map_table reddo[] = {
{ 0x61d601, KEY_EPG }, /* EPG */
- { 0x61d602, KEY_3 },
- { 0x61d604, KEY_1 },
- { 0x61d605, KEY_5 },
- { 0x61d606, KEY_6 },
+ { 0x61d602, KEY_NUMERIC_3 },
+ { 0x61d604, KEY_NUMERIC_1 },
+ { 0x61d605, KEY_NUMERIC_5 },
+ { 0x61d606, KEY_NUMERIC_6 },
{ 0x61d607, KEY_CHANNELDOWN }, /* CH- */
- { 0x61d608, KEY_2 },
+ { 0x61d608, KEY_NUMERIC_2 },
{ 0x61d609, KEY_CHANNELUP }, /* CH+ */
- { 0x61d60a, KEY_9 },
+ { 0x61d60a, KEY_NUMERIC_9 },
{ 0x61d60b, KEY_ZOOM }, /* Zoom */
- { 0x61d60c, KEY_7 },
- { 0x61d60d, KEY_8 },
+ { 0x61d60c, KEY_NUMERIC_7 },
+ { 0x61d60d, KEY_NUMERIC_8 },
{ 0x61d60e, KEY_VOLUMEUP }, /* Vol+ */
- { 0x61d60f, KEY_4 },
+ { 0x61d60f, KEY_NUMERIC_4 },
{ 0x61d610, KEY_ESC }, /* [back up arrow] */
- { 0x61d611, KEY_0 },
+ { 0x61d611, KEY_NUMERIC_0 },
{ 0x61d612, KEY_OK }, /* [enter arrow] */
{ 0x61d613, KEY_VOLUMEDOWN }, /* Vol- */
{ 0x61d614, KEY_RECORD }, /* Rec */
diff --git a/drivers/media/rc/keymaps/rc-snapstream-firefly.c b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
index 8d55b4ccee83..e3d5bff3bd9e 100644
--- a/drivers/media/rc/keymaps/rc-snapstream-firefly.c
+++ b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
@@ -12,16 +12,16 @@ static struct rc_map_table snapstream_firefly[] = {
{ 0x2c, KEY_ZOOM }, /* Maximize */
{ 0x02, KEY_CLOSE },
- { 0x0d, KEY_1 },
- { 0x0e, KEY_2 },
- { 0x0f, KEY_3 },
- { 0x10, KEY_4 },
- { 0x11, KEY_5 },
- { 0x12, KEY_6 },
- { 0x13, KEY_7 },
- { 0x14, KEY_8 },
- { 0x15, KEY_9 },
- { 0x17, KEY_0 },
+ { 0x0d, KEY_NUMERIC_1 },
+ { 0x0e, KEY_NUMERIC_2 },
+ { 0x0f, KEY_NUMERIC_3 },
+ { 0x10, KEY_NUMERIC_4 },
+ { 0x11, KEY_NUMERIC_5 },
+ { 0x12, KEY_NUMERIC_6 },
+ { 0x13, KEY_NUMERIC_7 },
+ { 0x14, KEY_NUMERIC_8 },
+ { 0x15, KEY_NUMERIC_9 },
+ { 0x17, KEY_NUMERIC_0 },
{ 0x16, KEY_BACK },
{ 0x18, KEY_KPENTER }, /* ent */
diff --git a/drivers/media/rc/keymaps/rc-su3000.c b/drivers/media/rc/keymaps/rc-su3000.c
index 1c82737e3999..64cfc01aa48f 100644
--- a/drivers/media/rc/keymaps/rc-su3000.c
+++ b/drivers/media/rc/keymaps/rc-su3000.c
@@ -10,16 +10,16 @@
static struct rc_map_table su3000[] = {
{ 0x25, KEY_POWER }, /* right-bottom Red */
{ 0x0a, KEY_MUTE }, /* -/-- */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x00, KEY_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
+ { 0x00, KEY_NUMERIC_0 },
{ 0x20, KEY_UP }, /* CH+ */
{ 0x21, KEY_DOWN }, /* CH+ */
{ 0x12, KEY_VOLUMEUP }, /* Brightness Up */
diff --git a/drivers/media/rc/keymaps/rc-tango.c b/drivers/media/rc/keymaps/rc-tango.c
index 6f0fec6d3944..2b9cef6ef5b5 100644
--- a/drivers/media/rc/keymaps/rc-tango.c
+++ b/drivers/media/rc/keymaps/rc-tango.c
@@ -17,16 +17,16 @@ static struct rc_map_table tango_table[] = {
{ 0x4cb51, KEY_MUTE },
{ 0x4cb52, KEY_VOLUMEDOWN },
- { 0x4cb41, KEY_1 },
- { 0x4cb03, KEY_2 },
- { 0x4cb42, KEY_3 },
- { 0x4cb45, KEY_4 },
- { 0x4cb07, KEY_5 },
- { 0x4cb46, KEY_6 },
- { 0x4cb55, KEY_7 },
- { 0x4cb17, KEY_8 },
- { 0x4cb56, KEY_9 },
- { 0x4cb1b, KEY_0 },
+ { 0x4cb41, KEY_NUMERIC_1 },
+ { 0x4cb03, KEY_NUMERIC_2 },
+ { 0x4cb42, KEY_NUMERIC_3 },
+ { 0x4cb45, KEY_NUMERIC_4 },
+ { 0x4cb07, KEY_NUMERIC_5 },
+ { 0x4cb46, KEY_NUMERIC_6 },
+ { 0x4cb55, KEY_NUMERIC_7 },
+ { 0x4cb17, KEY_NUMERIC_8 },
+ { 0x4cb56, KEY_NUMERIC_9 },
+ { 0x4cb1b, KEY_NUMERIC_0 },
{ 0x4cb59, KEY_DELETE },
{ 0x4cb5a, KEY_CAPSLOCK },
diff --git a/drivers/media/rc/keymaps/rc-tbs-nec.c b/drivers/media/rc/keymaps/rc-tbs-nec.c
index 42766cb877c3..420980925f29 100644
--- a/drivers/media/rc/keymaps/rc-tbs-nec.c
+++ b/drivers/media/rc/keymaps/rc-tbs-nec.c
@@ -11,16 +11,16 @@
static struct rc_map_table tbs_nec[] = {
{ 0x84, KEY_POWER2}, /* power */
{ 0x94, KEY_MUTE}, /* mute */
- { 0x87, KEY_1},
- { 0x86, KEY_2},
- { 0x85, KEY_3},
- { 0x8b, KEY_4},
- { 0x8a, KEY_5},
- { 0x89, KEY_6},
- { 0x8f, KEY_7},
- { 0x8e, KEY_8},
- { 0x8d, KEY_9},
- { 0x92, KEY_0},
+ { 0x87, KEY_NUMERIC_1},
+ { 0x86, KEY_NUMERIC_2},
+ { 0x85, KEY_NUMERIC_3},
+ { 0x8b, KEY_NUMERIC_4},
+ { 0x8a, KEY_NUMERIC_5},
+ { 0x89, KEY_NUMERIC_6},
+ { 0x8f, KEY_NUMERIC_7},
+ { 0x8e, KEY_NUMERIC_8},
+ { 0x8d, KEY_NUMERIC_9},
+ { 0x92, KEY_NUMERIC_0},
{ 0xc0, KEY_10CHANNELSUP}, /* 10+ */
{ 0xd0, KEY_10CHANNELSDOWN}, /* 10- */
{ 0x96, KEY_CHANNELUP}, /* ch+ */
diff --git a/drivers/media/rc/keymaps/rc-technisat-ts35.c b/drivers/media/rc/keymaps/rc-technisat-ts35.c
index 34bd04a75277..9a917ea0ceba 100644
--- a/drivers/media/rc/keymaps/rc-technisat-ts35.c
+++ b/drivers/media/rc/keymaps/rc-technisat-ts35.c
@@ -13,16 +13,16 @@ static struct rc_map_table technisat_ts35[] = {
{0x1c, KEY_AB},
{0x33, KEY_POWER},
- {0x3e, KEY_1},
- {0x3d, KEY_2},
- {0x3c, KEY_3},
- {0x3b, KEY_4},
- {0x3a, KEY_5},
- {0x39, KEY_6},
- {0x38, KEY_7},
- {0x37, KEY_8},
- {0x36, KEY_9},
- {0x3f, KEY_0},
+ {0x3e, KEY_NUMERIC_1},
+ {0x3d, KEY_NUMERIC_2},
+ {0x3c, KEY_NUMERIC_3},
+ {0x3b, KEY_NUMERIC_4},
+ {0x3a, KEY_NUMERIC_5},
+ {0x39, KEY_NUMERIC_6},
+ {0x38, KEY_NUMERIC_7},
+ {0x37, KEY_NUMERIC_8},
+ {0x36, KEY_NUMERIC_9},
+ {0x3f, KEY_NUMERIC_0},
{0x35, KEY_DIGITS},
{0x2c, KEY_TV},
diff --git a/drivers/media/rc/keymaps/rc-technisat-usb2.c b/drivers/media/rc/keymaps/rc-technisat-usb2.c
index 58b3baf5ee96..942100686c82 100644
--- a/drivers/media/rc/keymaps/rc-technisat-usb2.c
+++ b/drivers/media/rc/keymaps/rc-technisat-usb2.c
@@ -30,18 +30,18 @@
static struct rc_map_table technisat_usb2[] = {
{0x0a0c, KEY_POWER},
- {0x0a01, KEY_1},
- {0x0a02, KEY_2},
- {0x0a03, KEY_3},
+ {0x0a01, KEY_NUMERIC_1},
+ {0x0a02, KEY_NUMERIC_2},
+ {0x0a03, KEY_NUMERIC_3},
{0x0a0d, KEY_MUTE},
- {0x0a04, KEY_4},
- {0x0a05, KEY_5},
- {0x0a06, KEY_6},
+ {0x0a04, KEY_NUMERIC_4},
+ {0x0a05, KEY_NUMERIC_5},
+ {0x0a06, KEY_NUMERIC_6},
{0x0a38, KEY_VIDEO}, /* EXT */
- {0x0a07, KEY_7},
- {0x0a08, KEY_8},
- {0x0a09, KEY_9},
- {0x0a00, KEY_0},
+ {0x0a07, KEY_NUMERIC_7},
+ {0x0a08, KEY_NUMERIC_8},
+ {0x0a09, KEY_NUMERIC_9},
+ {0x0a00, KEY_NUMERIC_0},
{0x0a4f, KEY_INFO},
{0x0a20, KEY_CHANNELUP},
{0x0a52, KEY_MENU},
diff --git a/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c b/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c
index 4b2741b158c4..da06f844d8fb 100644
--- a/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c
+++ b/drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c
@@ -9,17 +9,17 @@
static struct rc_map_table terratec_cinergy_c_pci[] = {
{ 0x3e, KEY_POWER},
- { 0x3d, KEY_1},
- { 0x3c, KEY_2},
- { 0x3b, KEY_3},
- { 0x3a, KEY_4},
- { 0x39, KEY_5},
- { 0x38, KEY_6},
- { 0x37, KEY_7},
- { 0x36, KEY_8},
- { 0x35, KEY_9},
+ { 0x3d, KEY_NUMERIC_1},
+ { 0x3c, KEY_NUMERIC_2},
+ { 0x3b, KEY_NUMERIC_3},
+ { 0x3a, KEY_NUMERIC_4},
+ { 0x39, KEY_NUMERIC_5},
+ { 0x38, KEY_NUMERIC_6},
+ { 0x37, KEY_NUMERIC_7},
+ { 0x36, KEY_NUMERIC_8},
+ { 0x35, KEY_NUMERIC_9},
{ 0x34, KEY_VIDEO_NEXT}, /* AV */
- { 0x33, KEY_0},
+ { 0x33, KEY_NUMERIC_0},
{ 0x32, KEY_REFRESH},
{ 0x30, KEY_EPG},
{ 0x2f, KEY_UP},
diff --git a/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c b/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c
index 631f86665206..a1844b531572 100644
--- a/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c
+++ b/drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c
@@ -42,17 +42,17 @@ static struct rc_map_table terratec_cinergy_s2_hd[] = {
{ 0x2f, KEY_UP},
{ 0x30, KEY_EPG},
{ 0x32, KEY_VIDEO}, /* A<=>B */
- { 0x33, KEY_0},
+ { 0x33, KEY_NUMERIC_0},
{ 0x34, KEY_VCR}, /* AV */
- { 0x35, KEY_9},
- { 0x36, KEY_8},
- { 0x37, KEY_7},
- { 0x38, KEY_6},
- { 0x39, KEY_5},
- { 0x3a, KEY_4},
- { 0x3b, KEY_3},
- { 0x3c, KEY_2},
- { 0x3d, KEY_1},
+ { 0x35, KEY_NUMERIC_9},
+ { 0x36, KEY_NUMERIC_8},
+ { 0x37, KEY_NUMERIC_7},
+ { 0x38, KEY_NUMERIC_6},
+ { 0x39, KEY_NUMERIC_5},
+ { 0x3a, KEY_NUMERIC_4},
+ { 0x3b, KEY_NUMERIC_3},
+ { 0x3c, KEY_NUMERIC_2},
+ { 0x3d, KEY_NUMERIC_1},
{ 0x3e, KEY_POWER},
};
diff --git a/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c b/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
index 6cf53a56bce4..fe587e3f0240 100644
--- a/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
+++ b/drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
@@ -16,20 +16,20 @@ static struct rc_map_table terratec_cinergy_xs[] = {
{ 0x41, KEY_HOME},
{ 0x01, KEY_POWER},
{ 0x42, KEY_MENU},
- { 0x02, KEY_1},
- { 0x03, KEY_2},
- { 0x04, KEY_3},
+ { 0x02, KEY_NUMERIC_1},
+ { 0x03, KEY_NUMERIC_2},
+ { 0x04, KEY_NUMERIC_3},
{ 0x43, KEY_SUBTITLE},
- { 0x05, KEY_4},
- { 0x06, KEY_5},
- { 0x07, KEY_6},
+ { 0x05, KEY_NUMERIC_4},
+ { 0x06, KEY_NUMERIC_5},
+ { 0x07, KEY_NUMERIC_6},
{ 0x44, KEY_TEXT},
- { 0x08, KEY_7},
- { 0x09, KEY_8},
- { 0x0a, KEY_9},
+ { 0x08, KEY_NUMERIC_7},
+ { 0x09, KEY_NUMERIC_8},
+ { 0x0a, KEY_NUMERIC_9},
{ 0x45, KEY_DELETE},
{ 0x0b, KEY_TUNER},
- { 0x0c, KEY_0},
+ { 0x0c, KEY_NUMERIC_0},
{ 0x0d, KEY_MODE},
{ 0x46, KEY_TV},
{ 0x47, KEY_DVD},
diff --git a/drivers/media/rc/keymaps/rc-terratec-slim-2.c b/drivers/media/rc/keymaps/rc-terratec-slim-2.c
index bd1c1761b550..a54a59f90313 100644
--- a/drivers/media/rc/keymaps/rc-terratec-slim-2.c
+++ b/drivers/media/rc/keymaps/rc-terratec-slim-2.c
@@ -17,21 +17,21 @@ static struct rc_map_table terratec_slim_2[] = {
{ 0x8001, KEY_MUTE }, /* MUTE */
{ 0x8002, KEY_VOLUMEDOWN },
{ 0x8003, KEY_CHANNELDOWN },
- { 0x8004, KEY_1 },
- { 0x8005, KEY_2 },
- { 0x8006, KEY_3 },
- { 0x8007, KEY_4 },
- { 0x8008, KEY_5 },
- { 0x8009, KEY_6 },
- { 0x800a, KEY_7 },
+ { 0x8004, KEY_NUMERIC_1 },
+ { 0x8005, KEY_NUMERIC_2 },
+ { 0x8006, KEY_NUMERIC_3 },
+ { 0x8007, KEY_NUMERIC_4 },
+ { 0x8008, KEY_NUMERIC_5 },
+ { 0x8009, KEY_NUMERIC_6 },
+ { 0x800a, KEY_NUMERIC_7 },
{ 0x800c, KEY_ZOOM }, /* [fullscreen] */
- { 0x800d, KEY_0 },
+ { 0x800d, KEY_NUMERIC_0 },
{ 0x800e, KEY_AGAIN }, /* [two arrows forming a circle] */
{ 0x8012, KEY_POWER2 }, /* [red power button] */
{ 0x801a, KEY_VOLUMEUP },
- { 0x801b, KEY_8 },
+ { 0x801b, KEY_NUMERIC_8 },
{ 0x801e, KEY_CHANNELUP },
- { 0x801f, KEY_9 },
+ { 0x801f, KEY_NUMERIC_9 },
};
static struct rc_map_list terratec_slim_2_map = {
diff --git a/drivers/media/rc/keymaps/rc-terratec-slim.c b/drivers/media/rc/keymaps/rc-terratec-slim.c
index b44942691388..146e3a3480dc 100644
--- a/drivers/media/rc/keymaps/rc-terratec-slim.c
+++ b/drivers/media/rc/keymaps/rc-terratec-slim.c
@@ -11,16 +11,16 @@
/* TerraTec slim remote, 7 rows, 4 columns. */
/* Uses NEC extended 0x02bd. */
static struct rc_map_table terratec_slim[] = {
- { 0x02bd00, KEY_1 },
- { 0x02bd01, KEY_2 },
- { 0x02bd02, KEY_3 },
- { 0x02bd03, KEY_4 },
- { 0x02bd04, KEY_5 },
- { 0x02bd05, KEY_6 },
- { 0x02bd06, KEY_7 },
- { 0x02bd07, KEY_8 },
- { 0x02bd08, KEY_9 },
- { 0x02bd09, KEY_0 },
+ { 0x02bd00, KEY_NUMERIC_1 },
+ { 0x02bd01, KEY_NUMERIC_2 },
+ { 0x02bd02, KEY_NUMERIC_3 },
+ { 0x02bd03, KEY_NUMERIC_4 },
+ { 0x02bd04, KEY_NUMERIC_5 },
+ { 0x02bd05, KEY_NUMERIC_6 },
+ { 0x02bd06, KEY_NUMERIC_7 },
+ { 0x02bd07, KEY_NUMERIC_8 },
+ { 0x02bd08, KEY_NUMERIC_9 },
+ { 0x02bd09, KEY_NUMERIC_0 },
{ 0x02bd0a, KEY_MUTE },
{ 0x02bd0b, KEY_NEW }, /* symbol: PIP */
{ 0x02bd0e, KEY_VOLUMEDOWN },
diff --git a/drivers/media/rc/keymaps/rc-tevii-nec.c b/drivers/media/rc/keymaps/rc-tevii-nec.c
index 58fcc72f528e..5b96e9a38e9d 100644
--- a/drivers/media/rc/keymaps/rc-tevii-nec.c
+++ b/drivers/media/rc/keymaps/rc-tevii-nec.c
@@ -11,16 +11,16 @@
static struct rc_map_table tevii_nec[] = {
{ 0x0a, KEY_POWER2},
{ 0x0c, KEY_MUTE},
- { 0x11, KEY_1},
- { 0x12, KEY_2},
- { 0x13, KEY_3},
- { 0x14, KEY_4},
- { 0x15, KEY_5},
- { 0x16, KEY_6},
- { 0x17, KEY_7},
- { 0x18, KEY_8},
- { 0x19, KEY_9},
- { 0x10, KEY_0},
+ { 0x11, KEY_NUMERIC_1},
+ { 0x12, KEY_NUMERIC_2},
+ { 0x13, KEY_NUMERIC_3},
+ { 0x14, KEY_NUMERIC_4},
+ { 0x15, KEY_NUMERIC_5},
+ { 0x16, KEY_NUMERIC_6},
+ { 0x17, KEY_NUMERIC_7},
+ { 0x18, KEY_NUMERIC_8},
+ { 0x19, KEY_NUMERIC_9},
+ { 0x10, KEY_NUMERIC_0},
{ 0x1c, KEY_MENU},
{ 0x0f, KEY_VOLUMEDOWN},
{ 0x1a, KEY_LAST},
diff --git a/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c b/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
index 7dfaf05f4934..40b773ba45b9 100644
--- a/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
+++ b/drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
@@ -10,16 +10,16 @@
static struct rc_map_table total_media_in_hand_02[] = {
- { 0x0000, KEY_0 },
- { 0x0001, KEY_1 },
- { 0x0002, KEY_2 },
- { 0x0003, KEY_3 },
- { 0x0004, KEY_4 },
- { 0x0005, KEY_5 },
- { 0x0006, KEY_6 },
- { 0x0007, KEY_7 },
- { 0x0008, KEY_8 },
- { 0x0009, KEY_9 },
+ { 0x0000, KEY_NUMERIC_0 },
+ { 0x0001, KEY_NUMERIC_1 },
+ { 0x0002, KEY_NUMERIC_2 },
+ { 0x0003, KEY_NUMERIC_3 },
+ { 0x0004, KEY_NUMERIC_4 },
+ { 0x0005, KEY_NUMERIC_5 },
+ { 0x0006, KEY_NUMERIC_6 },
+ { 0x0007, KEY_NUMERIC_7 },
+ { 0x0008, KEY_NUMERIC_8 },
+ { 0x0009, KEY_NUMERIC_9 },
{ 0x000a, KEY_MUTE },
{ 0x000b, KEY_STOP }, /* Stop */
{ 0x000c, KEY_POWER2 }, /* Turn on/off application */
diff --git a/drivers/media/rc/keymaps/rc-total-media-in-hand.c b/drivers/media/rc/keymaps/rc-total-media-in-hand.c
index a12569425b8b..2144db485d83 100644
--- a/drivers/media/rc/keymaps/rc-total-media-in-hand.c
+++ b/drivers/media/rc/keymaps/rc-total-media-in-hand.c
@@ -10,16 +10,16 @@
/* Uses NEC extended 0x02bd */
static struct rc_map_table total_media_in_hand[] = {
- { 0x02bd00, KEY_1 },
- { 0x02bd01, KEY_2 },
- { 0x02bd02, KEY_3 },
- { 0x02bd03, KEY_4 },
- { 0x02bd04, KEY_5 },
- { 0x02bd05, KEY_6 },
- { 0x02bd06, KEY_7 },
- { 0x02bd07, KEY_8 },
- { 0x02bd08, KEY_9 },
- { 0x02bd09, KEY_0 },
+ { 0x02bd00, KEY_NUMERIC_1 },
+ { 0x02bd01, KEY_NUMERIC_2 },
+ { 0x02bd02, KEY_NUMERIC_3 },
+ { 0x02bd03, KEY_NUMERIC_4 },
+ { 0x02bd04, KEY_NUMERIC_5 },
+ { 0x02bd05, KEY_NUMERIC_6 },
+ { 0x02bd06, KEY_NUMERIC_7 },
+ { 0x02bd07, KEY_NUMERIC_8 },
+ { 0x02bd08, KEY_NUMERIC_9 },
+ { 0x02bd09, KEY_NUMERIC_0 },
{ 0x02bd0a, KEY_MUTE },
{ 0x02bd0b, KEY_CYCLEWINDOWS }, /* yellow, [min / max] */
{ 0x02bd0c, KEY_VIDEO }, /* TV / AV */
diff --git a/drivers/media/rc/keymaps/rc-trekstor.c b/drivers/media/rc/keymaps/rc-trekstor.c
index 8576831b20bd..e938e0da51a6 100644
--- a/drivers/media/rc/keymaps/rc-trekstor.c
+++ b/drivers/media/rc/keymaps/rc-trekstor.c
@@ -12,7 +12,7 @@
/* Imported from af9015.h.
Initial keytable was from Marc Schneider <macke@macke.org> */
static struct rc_map_table trekstor[] = {
- { 0x0084, KEY_0 },
+ { 0x0084, KEY_NUMERIC_0 },
{ 0x0085, KEY_MUTE }, /* Mute */
{ 0x0086, KEY_HOMEPAGE }, /* Home */
{ 0x0087, KEY_UP }, /* Up */
@@ -24,17 +24,17 @@ static struct rc_map_table trekstor[] = {
{ 0x008d, KEY_PLAY }, /* Play/Pause */
{ 0x008e, KEY_STOP }, /* Stop */
{ 0x008f, KEY_EPG }, /* Info/EPG */
- { 0x0090, KEY_7 },
- { 0x0091, KEY_4 },
- { 0x0092, KEY_1 },
+ { 0x0090, KEY_NUMERIC_7 },
+ { 0x0091, KEY_NUMERIC_4 },
+ { 0x0092, KEY_NUMERIC_1 },
{ 0x0093, KEY_CHANNELDOWN }, /* Channel - */
- { 0x0094, KEY_8 },
- { 0x0095, KEY_5 },
- { 0x0096, KEY_2 },
+ { 0x0094, KEY_NUMERIC_8 },
+ { 0x0095, KEY_NUMERIC_5 },
+ { 0x0096, KEY_NUMERIC_2 },
{ 0x0097, KEY_CHANNELUP }, /* Channel + */
- { 0x0098, KEY_9 },
- { 0x0099, KEY_6 },
- { 0x009a, KEY_3 },
+ { 0x0098, KEY_NUMERIC_9 },
+ { 0x0099, KEY_NUMERIC_6 },
+ { 0x009a, KEY_NUMERIC_3 },
{ 0x009b, KEY_VOLUMEDOWN }, /* Volume - */
{ 0x009c, KEY_TV }, /* TV */
{ 0x009d, KEY_RECORD }, /* Record */
diff --git a/drivers/media/rc/keymaps/rc-tt-1500.c b/drivers/media/rc/keymaps/rc-tt-1500.c
index 52f239d2c025..ff70aab13b48 100644
--- a/drivers/media/rc/keymaps/rc-tt-1500.c
+++ b/drivers/media/rc/keymaps/rc-tt-1500.c
@@ -13,16 +13,16 @@
static struct rc_map_table tt_1500[] = {
{ 0x1501, KEY_POWER },
{ 0x1502, KEY_SHUFFLE }, /* ? double-arrow key */
- { 0x1503, KEY_1 },
- { 0x1504, KEY_2 },
- { 0x1505, KEY_3 },
- { 0x1506, KEY_4 },
- { 0x1507, KEY_5 },
- { 0x1508, KEY_6 },
- { 0x1509, KEY_7 },
- { 0x150a, KEY_8 },
- { 0x150b, KEY_9 },
- { 0x150c, KEY_0 },
+ { 0x1503, KEY_NUMERIC_1 },
+ { 0x1504, KEY_NUMERIC_2 },
+ { 0x1505, KEY_NUMERIC_3 },
+ { 0x1506, KEY_NUMERIC_4 },
+ { 0x1507, KEY_NUMERIC_5 },
+ { 0x1508, KEY_NUMERIC_6 },
+ { 0x1509, KEY_NUMERIC_7 },
+ { 0x150a, KEY_NUMERIC_8 },
+ { 0x150b, KEY_NUMERIC_9 },
+ { 0x150c, KEY_NUMERIC_0 },
{ 0x150d, KEY_UP },
{ 0x150e, KEY_LEFT },
{ 0x150f, KEY_OK },
diff --git a/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c b/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c
index a72cb06a811e..5fc696d9e583 100644
--- a/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c
+++ b/drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c
@@ -15,16 +15,16 @@ static struct rc_map_table twinhan_dtv_cab_ci[] = {
{ 0x23, KEY_EPG},
{ 0x3b, KEY_F22}, /* Record List */
- { 0x3c, KEY_1},
- { 0x3e, KEY_2},
- { 0x39, KEY_3},
- { 0x36, KEY_4},
- { 0x22, KEY_5},
- { 0x20, KEY_6},
- { 0x32, KEY_7},
- { 0x26, KEY_8},
- { 0x24, KEY_9},
- { 0x2a, KEY_0},
+ { 0x3c, KEY_NUMERIC_1},
+ { 0x3e, KEY_NUMERIC_2},
+ { 0x39, KEY_NUMERIC_3},
+ { 0x36, KEY_NUMERIC_4},
+ { 0x22, KEY_NUMERIC_5},
+ { 0x20, KEY_NUMERIC_6},
+ { 0x32, KEY_NUMERIC_7},
+ { 0x26, KEY_NUMERIC_8},
+ { 0x24, KEY_NUMERIC_9},
+ { 0x2a, KEY_NUMERIC_0},
{ 0x33, KEY_CANCEL},
{ 0x2c, KEY_BACK},
diff --git a/drivers/media/rc/keymaps/rc-twinhan1027.c b/drivers/media/rc/keymaps/rc-twinhan1027.c
index 3ee28bcf31dc..e1cdcfa792dc 100644
--- a/drivers/media/rc/keymaps/rc-twinhan1027.c
+++ b/drivers/media/rc/keymaps/rc-twinhan1027.c
@@ -10,16 +10,16 @@ static struct rc_map_table twinhan_vp1027[] = {
{ 0x1c, KEY_EPG },
{ 0x04, KEY_LIST },
- { 0x03, KEY_1 },
- { 0x01, KEY_2 },
- { 0x06, KEY_3 },
- { 0x09, KEY_4 },
- { 0x1d, KEY_5 },
- { 0x1f, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x19, KEY_8 },
- { 0x1b, KEY_9 },
- { 0x15, KEY_0 },
+ { 0x03, KEY_NUMERIC_1 },
+ { 0x01, KEY_NUMERIC_2 },
+ { 0x06, KEY_NUMERIC_3 },
+ { 0x09, KEY_NUMERIC_4 },
+ { 0x1d, KEY_NUMERIC_5 },
+ { 0x1f, KEY_NUMERIC_6 },
+ { 0x0d, KEY_NUMERIC_7 },
+ { 0x19, KEY_NUMERIC_8 },
+ { 0x1b, KEY_NUMERIC_9 },
+ { 0x15, KEY_NUMERIC_0 },
{ 0x0c, KEY_CANCEL },
{ 0x4a, KEY_CLEAR },
diff --git a/drivers/media/rc/keymaps/rc-videomate-m1f.c b/drivers/media/rc/keymaps/rc-videomate-m1f.c
index d2d183759a03..e16b9b851c72 100644
--- a/drivers/media/rc/keymaps/rc-videomate-m1f.c
+++ b/drivers/media/rc/keymaps/rc-videomate-m1f.c
@@ -41,17 +41,17 @@ static struct rc_map_table videomate_k100[] = {
{ 0x10, KEY_PREVIOUS },
{ 0x0d, KEY_PAUSE },
{ 0x0f, KEY_NEXT },
- { 0x1e, KEY_1 },
- { 0x1f, KEY_2 },
- { 0x20, KEY_3 },
- { 0x21, KEY_4 },
- { 0x22, KEY_5 },
- { 0x23, KEY_6 },
- { 0x24, KEY_7 },
- { 0x25, KEY_8 },
- { 0x26, KEY_9 },
+ { 0x1e, KEY_NUMERIC_1 },
+ { 0x1f, KEY_NUMERIC_2 },
+ { 0x20, KEY_NUMERIC_3 },
+ { 0x21, KEY_NUMERIC_4 },
+ { 0x22, KEY_NUMERIC_5 },
+ { 0x23, KEY_NUMERIC_6 },
+ { 0x24, KEY_NUMERIC_7 },
+ { 0x25, KEY_NUMERIC_8 },
+ { 0x26, KEY_NUMERIC_9 },
{ 0x2a, KEY_NUMERIC_STAR }, /* * key */
- { 0x1d, KEY_0 },
+ { 0x1d, KEY_NUMERIC_0 },
{ 0x29, KEY_SUBTITLE }, /* # key */
{ 0x27, KEY_CLEAR },
{ 0x34, KEY_SCREEN },
diff --git a/drivers/media/rc/keymaps/rc-videomate-s350.c b/drivers/media/rc/keymaps/rc-videomate-s350.c
index e4d4dff06a24..a867d7a08055 100644
--- a/drivers/media/rc/keymaps/rc-videomate-s350.c
+++ b/drivers/media/rc/keymaps/rc-videomate-s350.c
@@ -22,16 +22,16 @@ static struct rc_map_table videomate_s350[] = {
{ 0x13, KEY_CHANNELDOWN},
{ 0x14, KEY_MUTE},
{ 0x15, KEY_VOLUMEDOWN},
- { 0x16, KEY_1},
- { 0x17, KEY_2},
- { 0x18, KEY_3},
- { 0x19, KEY_4},
- { 0x1a, KEY_5},
- { 0x1b, KEY_6},
- { 0x1c, KEY_7},
- { 0x1d, KEY_8},
- { 0x1e, KEY_9},
- { 0x1f, KEY_0},
+ { 0x16, KEY_NUMERIC_1},
+ { 0x17, KEY_NUMERIC_2},
+ { 0x18, KEY_NUMERIC_3},
+ { 0x19, KEY_NUMERIC_4},
+ { 0x1a, KEY_NUMERIC_5},
+ { 0x1b, KEY_NUMERIC_6},
+ { 0x1c, KEY_NUMERIC_7},
+ { 0x1d, KEY_NUMERIC_8},
+ { 0x1e, KEY_NUMERIC_9},
+ { 0x1f, KEY_NUMERIC_0},
{ 0x21, KEY_SLEEP},
{ 0x24, KEY_ZOOM},
{ 0x25, KEY_LAST}, /* Recall */
diff --git a/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c b/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
index 7c4890944407..fdc3b0e1350f 100644
--- a/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
+++ b/drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
@@ -42,16 +42,16 @@ static struct rc_map_table videomate_tv_pvr[] = {
{ 0x04, KEY_RECORD },
- { 0x16, KEY_1 },
- { 0x17, KEY_2 },
- { 0x18, KEY_3 },
- { 0x19, KEY_4 },
- { 0x1a, KEY_5 },
- { 0x1b, KEY_6 },
- { 0x1c, KEY_7 },
- { 0x1d, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x1f, KEY_0 },
+ { 0x16, KEY_NUMERIC_1 },
+ { 0x17, KEY_NUMERIC_2 },
+ { 0x18, KEY_NUMERIC_3 },
+ { 0x19, KEY_NUMERIC_4 },
+ { 0x1a, KEY_NUMERIC_5 },
+ { 0x1b, KEY_NUMERIC_6 },
+ { 0x1c, KEY_NUMERIC_7 },
+ { 0x1d, KEY_NUMERIC_8 },
+ { 0x1e, KEY_NUMERIC_9 },
+ { 0x1f, KEY_NUMERIC_0 },
{ 0x20, KEY_LANGUAGE },
{ 0x21, KEY_SLEEP },
diff --git a/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c b/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
index e443192dbe14..999ba4e084ae 100644
--- a/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
+++ b/drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
@@ -13,16 +13,16 @@
*/
static struct rc_map_table winfast_usbii_deluxe[] = {
- { 0x62, KEY_0},
- { 0x75, KEY_1},
- { 0x76, KEY_2},
- { 0x77, KEY_3},
- { 0x79, KEY_4},
- { 0x7a, KEY_5},
- { 0x7b, KEY_6},
- { 0x7d, KEY_7},
- { 0x7e, KEY_8},
- { 0x7f, KEY_9},
+ { 0x62, KEY_NUMERIC_0},
+ { 0x75, KEY_NUMERIC_1},
+ { 0x76, KEY_NUMERIC_2},
+ { 0x77, KEY_NUMERIC_3},
+ { 0x79, KEY_NUMERIC_4},
+ { 0x7a, KEY_NUMERIC_5},
+ { 0x7b, KEY_NUMERIC_6},
+ { 0x7d, KEY_NUMERIC_7},
+ { 0x7e, KEY_NUMERIC_8},
+ { 0x7f, KEY_NUMERIC_9},
{ 0x38, KEY_CAMERA}, /* SNAPSHOT */
{ 0x37, KEY_RECORD}, /* RECORD */
diff --git a/drivers/media/rc/keymaps/rc-winfast.c b/drivers/media/rc/keymaps/rc-winfast.c
index ee7f4c349fd6..be52a3e1f8ae 100644
--- a/drivers/media/rc/keymaps/rc-winfast.c
+++ b/drivers/media/rc/keymaps/rc-winfast.c
@@ -12,16 +12,16 @@
static struct rc_map_table winfast[] = {
/* Keys 0 to 9 */
- { 0x12, KEY_0 },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
+ { 0x12, KEY_NUMERIC_0 },
+ { 0x05, KEY_NUMERIC_1 },
+ { 0x06, KEY_NUMERIC_2 },
+ { 0x07, KEY_NUMERIC_3 },
+ { 0x09, KEY_NUMERIC_4 },
+ { 0x0a, KEY_NUMERIC_5 },
+ { 0x0b, KEY_NUMERIC_6 },
+ { 0x0d, KEY_NUMERIC_7 },
+ { 0x0e, KEY_NUMERIC_8 },
+ { 0x0f, KEY_NUMERIC_9 },
{ 0x00, KEY_POWER2 },
{ 0x1b, KEY_AUDIO }, /* Audio Source */
diff --git a/drivers/media/rc/keymaps/rc-xbox-dvd.c b/drivers/media/rc/keymaps/rc-xbox-dvd.c
index 42815ab57bff..9d656042a81f 100644
--- a/drivers/media/rc/keymaps/rc-xbox-dvd.c
+++ b/drivers/media/rc/keymaps/rc-xbox-dvd.c
@@ -14,16 +14,16 @@ static struct rc_map_table xbox_dvd[] = {
{0xaa9, KEY_LEFT},
{0xac3, KEY_INFO},
- {0xac6, KEY_9},
- {0xac7, KEY_8},
- {0xac8, KEY_7},
- {0xac9, KEY_6},
- {0xaca, KEY_5},
- {0xacb, KEY_4},
- {0xacc, KEY_3},
- {0xacd, KEY_2},
- {0xace, KEY_1},
- {0xacf, KEY_0},
+ {0xac6, KEY_NUMERIC_9},
+ {0xac7, KEY_NUMERIC_8},
+ {0xac8, KEY_NUMERIC_7},
+ {0xac9, KEY_NUMERIC_6},
+ {0xaca, KEY_NUMERIC_5},
+ {0xacb, KEY_NUMERIC_4},
+ {0xacc, KEY_NUMERIC_3},
+ {0xacd, KEY_NUMERIC_2},
+ {0xace, KEY_NUMERIC_1},
+ {0xacf, KEY_NUMERIC_0},
{0xad5, KEY_ANGLE},
{0xad8, KEY_BACK},
diff --git a/drivers/media/rc/keymaps/rc-zx-irdec.c b/drivers/media/rc/keymaps/rc-zx-irdec.c
index 84ca48966401..7bb0c05eb759 100644
--- a/drivers/media/rc/keymaps/rc-zx-irdec.c
+++ b/drivers/media/rc/keymaps/rc-zx-irdec.c
@@ -8,16 +8,16 @@
#include <media/rc-map.h>
static struct rc_map_table zx_irdec_table[] = {
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x31, KEY_0 },
+ { 0x01, KEY_NUMERIC_1 },
+ { 0x02, KEY_NUMERIC_2 },
+ { 0x03, KEY_NUMERIC_3 },
+ { 0x04, KEY_NUMERIC_4 },
+ { 0x05, KEY_NUMERIC_5 },
+ { 0x06, KEY_NUMERIC_6 },
+ { 0x07, KEY_NUMERIC_7 },
+ { 0x08, KEY_NUMERIC_8 },
+ { 0x09, KEY_NUMERIC_9 },
+ { 0x31, KEY_NUMERIC_0 },
{ 0x16, KEY_DELETE },
{ 0x0a, KEY_MODE }, /* Input method */
{ 0x0c, KEY_VOLUMEUP },
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 10830605c734..f078f8a3aec8 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -19,7 +19,7 @@
#include "rc-core-priv.h"
#include <uapi/linux/lirc.h>
-#define LIRCBUF_SIZE 256
+#define LIRCBUF_SIZE 1024
static dev_t lirc_base_dev;
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 72862e4bec62..4d5351ebb940 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1176,8 +1176,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK);
if (unlikely(!rawir.duration)) {
- dev_warn(ir->dev, "nonsensical irdata %02x with duration 0",
- ir->buf_in[i]);
+ dev_dbg(ir->dev, "nonsensical irdata %02x with duration 0",
+ ir->buf_in[i]);
break;
}
if (rawir.pulse) {
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 9e1a978a5fd9..72a7bbbf6b1f 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Amlogic Meson IR remote receiver
*
@@ -113,10 +113,8 @@ static int meson_ir_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ir->reg = devm_ioremap_resource(dev, res);
- if (IS_ERR(ir->reg)) {
- dev_err(dev, "failed to map registers\n");
+ if (IS_ERR(ir->reg))
return PTR_ERR(ir->reg);
- }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index 46101efe017b..50fb0aebb8d4 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -320,10 +320,8 @@ static int mtk_ir_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ir->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ir->base)) {
- dev_err(dev, "failed to map registers\n");
+ if (IS_ERR(ir->base))
return PTR_ERR(ir->base);
- }
ir->rc = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
if (!ir->rc) {
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index be5fd129d728..13da4c5c7d17 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1502,7 +1502,7 @@ static ssize_t store_wakeup_protocols(struct device *device,
const char *buf, size_t len)
{
struct rc_dev *dev = to_rc_dev(device);
- enum rc_proto protocol;
+ enum rc_proto protocol = RC_PROTO_UNKNOWN;
ssize_t rc;
u64 allowed;
int i;
@@ -1511,9 +1511,7 @@ static ssize_t store_wakeup_protocols(struct device *device,
allowed = dev->allowed_wakeup_protocols;
- if (sysfs_streq(buf, "none")) {
- protocol = RC_PROTO_UNKNOWN;
- } else {
+ if (!sysfs_streq(buf, "none")) {
for (i = 0; i < ARRAY_SIZE(protocols); i++) {
if ((allowed & (1ULL << i)) &&
sysfs_streq(buf, protocols[i].name)) {
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index a48f68539231..aa719d0ae6b0 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -195,7 +195,6 @@ static int sunxi_ir_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ir->base = devm_ioremap_resource(dev, res);
if (IS_ERR(ir->base)) {
- dev_err(dev, "failed to map registers\n");
ret = PTR_ERR(ir->base);
goto exit_clkdisable_clk;
}
diff --git a/drivers/media/spi/Kconfig b/drivers/media/spi/Kconfig
index ba464efdab03..08386abb9bbc 100644
--- a/drivers/media/spi/Kconfig
+++ b/drivers/media/spi/Kconfig
@@ -2,7 +2,7 @@
if VIDEO_V4L2
menu "SPI helper chips"
- visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
+ visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST || EXPERT
config VIDEO_GS1662
tristate "Gennum Serializers video"
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index 72805e5abc68..a7108e575e9b 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -16,7 +16,7 @@ config MEDIA_TUNER
select MEDIA_TUNER_MC44S803 if MEDIA_SUBDRV_AUTOSELECT
menu "Customize TV tuners"
- visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
+ visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST || EXPERT
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT || MEDIA_SDR_SUPPORT
config MEDIA_TUNER_SIMPLE
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 7be893def190..e87040d6eca7 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -129,6 +129,7 @@ static int si2157_init(struct dvb_frontend *fe)
chip_id = cmd.args[1] << 24 | cmd.args[2] << 16 | cmd.args[3] << 8 |
cmd.args[4] << 0;
+ #define SI2177_A30 ('A' << 24 | 77 << 16 | '3' << 8 | '0' << 0)
#define SI2158_A20 ('A' << 24 | 58 << 16 | '2' << 8 | '0' << 0)
#define SI2148_A20 ('A' << 24 | 48 << 16 | '2' << 8 | '0' << 0)
#define SI2157_A30 ('A' << 24 | 57 << 16 | '3' << 8 | '0' << 0)
@@ -144,6 +145,9 @@ static int si2157_init(struct dvb_frontend *fe)
case SI2141_A10:
fw_name = SI2141_A10_FIRMWARE;
break;
+ case SI2177_A30:
+ fw_name = SI2157_A30_FIRMWARE;
+ break;
case SI2157_A30:
case SI2147_A30:
case SI2146_A10:
@@ -520,6 +524,7 @@ static const struct i2c_device_id si2157_id_table[] = {
{"si2157", SI2157_CHIPTYPE_SI2157},
{"si2146", SI2157_CHIPTYPE_SI2146},
{"si2141", SI2157_CHIPTYPE_SI2141},
+ {"si2177", SI2157_CHIPTYPE_SI2177},
{}
};
MODULE_DEVICE_TABLE(i2c, si2157_id_table);
@@ -541,3 +546,4 @@ MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(SI2158_A20_FIRMWARE);
MODULE_FIRMWARE(SI2141_A10_FIRMWARE);
+MODULE_FIRMWARE(SI2157_A30_FIRMWARE);
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index 7d16934c7708..2bda903358da 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -41,6 +41,7 @@ struct si2157_dev {
#define SI2157_CHIPTYPE_SI2157 0
#define SI2157_CHIPTYPE_SI2146 1
#define SI2157_CHIPTYPE_SI2141 2
+#define SI2157_CHIPTYPE_SI2177 3
/* firmware command struct */
#define SI2157_ARGLEN 30
@@ -52,5 +53,5 @@ struct si2157_cmd {
#define SI2158_A20_FIRMWARE "dvb-tuner-si2158-a20-01.fw"
#define SI2141_A10_FIRMWARE "dvb-tuner-si2141-a10-01.fw"
-
+#define SI2157_A30_FIRMWARE "dvb-tuner-si2157-a30-01.fw"
#endif
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 3329de6671ce..b35231ffe503 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -613,10 +613,6 @@ static int airspy_querycap(struct file *file, void *fh,
strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
strscpy(cap->card, s->vdev.name, sizeof(cap->card));
usb_make_path(s->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE | V4L2_CAP_TUNER;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -1057,6 +1053,8 @@ static int airspy_probe(struct usb_interface *intf,
s->v4l2_dev.ctrl_handler = &s->hdl;
s->vdev.v4l2_dev = &s->v4l2_dev;
s->vdev.lock = &s->v4l2_lock;
+ s->vdev.device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE | V4L2_CAP_TUNER;
ret = video_register_device(&s->vdev, VFL_TYPE_SDR, -1);
if (ret) {
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index f746f6e2f686..a8a72d5fbd12 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -719,6 +719,12 @@ static int au0828_usb_probe(struct usb_interface *interface,
/* Setup */
au0828_card_setup(dev);
+ /*
+ * Store the pointer to the au0828_dev so it can be accessed in
+ * au0828_usb_disconnect
+ */
+ usb_set_intfdata(interface, dev);
+
/* Analog TV */
retval = au0828_analog_register(dev, interface);
if (retval) {
@@ -737,12 +743,6 @@ static int au0828_usb_probe(struct usb_interface *interface,
/* Remote controller */
au0828_rc_register(dev);
- /*
- * Store the pointer to the au0828_dev so it can be accessed in
- * au0828_usb_disconnect
- */
- usb_set_intfdata(interface, dev);
-
pr_info("Registered device AU0828 [%s]\n",
dev->board.name == NULL ? "Unset" : dev->board.name);
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index a414a25e48a8..5e00019bce8a 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -1182,7 +1182,6 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct video_device *vdev = video_devdata(file);
struct au0828_dev *dev = video_drvdata(file);
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
@@ -1193,16 +1192,10 @@ static int vidioc_querycap(struct file *file, void *priv,
usb_make_path(dev->usbdev, cap->bus_info, sizeof(cap->bus_info));
/* set the device capabilities */
- cap->device_caps = V4L2_CAP_AUDIO |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_TUNER;
- if (vdev->vfl_type == VFL_TYPE_GRABBER)
- cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
- else
- cap->device_caps |= V4L2_CAP_VBI_CAPTURE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS |
- V4L2_CAP_VBI_CAPTURE | V4L2_CAP_VIDEO_CAPTURE;
+ cap->capabilities =
+ V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1991,6 +1984,9 @@ int au0828_analog_register(struct au0828_dev *dev,
dev->vdev.lock = &dev->lock;
dev->vdev.queue = &dev->vb_vidq;
dev->vdev.queue->lock = &dev->vb_queue_lock;
+ dev->vdev.device_caps =
+ V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_TUNER | V4L2_CAP_VIDEO_CAPTURE;
strscpy(dev->vdev.name, "au0828a video", sizeof(dev->vdev.name));
/* Setup the VBI device */
@@ -1999,6 +1995,9 @@ int au0828_analog_register(struct au0828_dev *dev,
dev->vbi_dev.lock = &dev->lock;
dev->vbi_dev.queue = &dev->vb_vbiq;
dev->vbi_dev.queue->lock = &dev->vb_vbi_queue_lock;
+ dev->vbi_dev.device_caps =
+ V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE;
strscpy(dev->vbi_dev.name, "au0828a vbi", sizeof(dev->vbi_dev.name));
/* Init entities at the Media Controller */
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index b2268981c963..17468f7d78ed 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -893,7 +893,6 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
cpia2_unregister_camera(cam);
v4l2_device_disconnect(&cam->v4l2_dev);
mutex_unlock(&cam->v4l2_lock);
- v4l2_device_put(&cam->v4l2_dev);
if(cam->buffers) {
DBG("Wakeup waiting processes\n");
@@ -902,6 +901,8 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
wake_up_interruptible(&cam->wq_stream);
}
+ v4l2_device_put(&cam->v4l2_dev);
+
LOG("CPiA2 camera disconnected.\n");
}
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index da6a5b2f86d1..0feae825cebb 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -250,13 +250,6 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) <0)
memset(vc->bus_info,0, sizeof(vc->bus_info));
-
- vc->device_caps = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
- vc->capabilities = vc->device_caps |
- V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -1152,6 +1145,8 @@ int cpia2_register_camera(struct camera_data *cam)
cam->vdev.lock = &cam->v4l2_lock;
cam->vdev.ctrl_handler = hdl;
cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ cam->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
reset_camera_struct_v4l(cam);
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 26b05df698f0..e0d98ba8fdbf 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1023,6 +1023,8 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
{USB_DEVICE(0x2040, 0xb123),
.driver_info = CX231XX_BOARD_HAUPPAUGE_955Q},
+ {USB_DEVICE(0x2040, 0xb124),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_955Q},
{USB_DEVICE(0x2040, 0xb151),
.driver_info = CX231XX_BOARD_HAUPPAUGE_935C},
{USB_DEVICE(0x2040, 0xb150),
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index 8fbb9523c88d..e205f7f0a56a 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -1147,6 +1147,7 @@ static int dvb_fini(struct cx231xx *dev)
if (dev->dvb) {
unregister_dvb(dev->dvb);
+ kfree(dev->dvb);
dev->dvb = NULL;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index f8820478d46b..b651ac7713ea 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -1555,30 +1555,19 @@ static int vidioc_streamoff(struct file *file, void *priv,
int cx231xx_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct video_device *vdev = video_devdata(file);
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
strscpy(cap->driver, "cx231xx", sizeof(cap->driver));
strscpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
-
- if (vdev->vfl_type == VFL_TYPE_RADIO)
- cap->device_caps = V4L2_CAP_RADIO;
- else {
- cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- if (vdev->vfl_type == VFL_TYPE_VBI)
- cap->device_caps |= V4L2_CAP_VBI_CAPTURE;
- else
- cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
- }
- if (dev->tuner_type != TUNER_ABSENT)
- cap->device_caps |= V4L2_CAP_TUNER;
- cap->capabilities = cap->device_caps | V4L2_CAP_READWRITE |
+ cap->capabilities = V4L2_CAP_READWRITE |
V4L2_CAP_VBI_CAPTURE | V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
if (video_is_registered(&dev->radio_dev))
cap->capabilities |= V4L2_CAP_RADIO;
+ if (dev->tuner_type != TUNER_ABSENT)
+ cap->capabilities |= V4L2_CAP_TUNER;
return 0;
}
@@ -2234,6 +2223,11 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev_err(dev->dev, "failed to initialize video media entity!\n");
#endif
dev->vdev.ctrl_handler = &dev->ctrl_handler;
+ dev->vdev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE;
+ if (dev->tuner_type != TUNER_ABSENT)
+ dev->vdev.device_caps |= V4L2_CAP_TUNER;
+
/* register v4l2 video video_device */
ret = video_register_device(&dev->vdev, VFL_TYPE_GRABBER,
video_nr[dev->devno]);
@@ -2262,6 +2256,11 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev_err(dev->dev, "failed to initialize vbi media entity!\n");
#endif
dev->vbi_dev.ctrl_handler = &dev->ctrl_handler;
+ dev->vbi_dev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VBI_CAPTURE;
+ if (dev->tuner_type != TUNER_ABSENT)
+ dev->vbi_dev.device_caps |= V4L2_CAP_TUNER;
+
/* register v4l2 vbi video_device */
ret = video_register_device(&dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->devno]);
@@ -2277,6 +2276,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
cx231xx_vdev_init(dev, &dev->radio_dev,
&cx231xx_radio_template, "radio");
dev->radio_dev.ctrl_handler = &dev->radio_ctrl_handler;
+ dev->radio_dev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
ret = video_register_device(&dev->radio_dev, VFL_TYPE_RADIO,
radio_nr[dev->devno]);
if (ret < 0) {
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index de52309eaaab..3afd18733614 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -107,8 +107,6 @@ static int af9035_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req)
memcpy(req->rbuf, &state->buf[ACK_HDR_LEN], req->rlen);
exit:
mutex_unlock(&d->usb_mutex);
- if (ret < 0)
- dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index 48fb0d41e03b..fb6d99dea31a 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -56,7 +56,7 @@ static int anysee_ctrl_msg(struct dvb_usb_device *d,
/* TODO FIXME: dvb_usb_generic_rw() fails rarely with error code -32
* (EPIPE, Broken pipe). Function supports currently msleep() as a
* parameter but I would not like to use it, since according to
- * Documentation/timers/timers-howto.txt it should not be used such
+ * Documentation/timers/timers-howto.rst it should not be used such
* short, under < 20ms, sleeps. Repeating failed message would be
* better choice as not to add unwanted delays...
* Fixing that correctly is one of those or both;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c
index 91729a39a306..7e817ea506c6 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c
@@ -24,14 +24,19 @@ static int dvb_usb_v2_generic_io(struct dvb_usb_device *d,
ret = usb_bulk_msg(d->udev, usb_sndbulkpipe(d->udev,
d->props->generic_bulk_ctrl_endpoint), wbuf, wlen,
&actual_length, 2000);
- if (ret < 0)
+ if (ret) {
dev_err(&d->udev->dev, "%s: usb_bulk_msg() failed=%d\n",
KBUILD_MODNAME, ret);
- else
- ret = actual_length != wlen ? -EIO : 0;
+ return ret;
+ }
+ if (actual_length != wlen) {
+ dev_err(&d->udev->dev, "%s: usb_bulk_msg() write length=%d, actual=%d\n",
+ KBUILD_MODNAME, wlen, actual_length);
+ return -EIO;
+ }
- /* an answer is expected, and no error before */
- if (!ret && rbuf && rlen) {
+ /* an answer is expected */
+ if (rbuf && rlen) {
if (d->props->generic_bulk_ctrl_delay)
usleep_range(d->props->generic_bulk_ctrl_delay,
d->props->generic_bulk_ctrl_delay
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index c41e10bd6ef7..8610487f2d72 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -91,8 +91,6 @@ static int dvbsky_gpio_ctrl(struct dvb_usb_device *d, u8 gport, u8 value)
obuf[1] = gport;
obuf[2] = value;
ret = dvbsky_usb_generic_rw(d, obuf, 3, ibuf, 1);
- if (ret)
- dev_err(&d->udev->dev, "failed=%d\n", ret);
return ret;
}
@@ -130,8 +128,6 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[3] = msg[0].addr;
ret = dvbsky_usb_generic_rw(d, obuf, 4,
ibuf, msg[0].len + 1);
- if (ret)
- dev_err(&d->udev->dev, "failed=%d\n", ret);
if (!ret)
memcpy(msg[0].buf, &ibuf[1], msg[0].len);
} else {
@@ -142,8 +138,6 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
memcpy(&obuf[3], msg[0].buf, msg[0].len);
ret = dvbsky_usb_generic_rw(d, obuf,
msg[0].len + 3, ibuf, 1);
- if (ret)
- dev_err(&d->udev->dev, "failed=%d\n", ret);
}
} else {
if ((msg[0].len > 60) || (msg[1].len > 60)) {
@@ -161,9 +155,6 @@ static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
memcpy(&obuf[4], msg[0].buf, msg[0].len);
ret = dvbsky_usb_generic_rw(d, obuf,
msg[0].len + 4, ibuf, msg[1].len + 1);
- if (ret)
- dev_err(&d->udev->dev, "failed=%d\n", ret);
-
if (!ret)
memcpy(msg[1].buf, &ibuf[1], msg[1].len);
}
@@ -192,8 +183,6 @@ static int dvbsky_rc_query(struct dvb_usb_device *d)
obuf[0] = 0x10;
ret = dvbsky_usb_generic_rw(d, obuf, 1, ibuf, 2);
- if (ret)
- dev_err(&d->udev->dev, "failed=%d\n", ret);
if (ret == 0)
code = (ibuf[0] << 8) | ibuf[1];
if (code != 0xffff) {
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index 87dbae875177..1a3e5f965ae4 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -139,12 +139,24 @@ config DVB_USB_CXUSB
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the Conexant USB2.0 hybrid reference design.
- Currently, only DVB and ATSC modes are supported, analog mode
- shall be added in the future. Devices that require this module:
+ DVB and ATSC modes are supported, for a basic analog mode support
+ see the next option ("Analog support for the Conexant USB2.0 hybrid
+ reference design").
+ Devices that require this module:
Medion MD95700 hybrid USB2.0 device.
DViCO FusionHDTV (Bluebird) USB2.0 devices
+config DVB_USB_CXUSB_ANALOG
+ bool "Analog support for the Conexant USB2.0 hybrid reference design"
+ depends on DVB_USB_CXUSB && VIDEO_V4L2
+ select VIDEO_CX25840
+ select VIDEOBUF2_VMALLOC
+ help
+ Say Y here to enable basic analog mode support for the Conexant
+ USB2.0 hybrid reference design.
+ Currently this mode is supported only on a Medion MD95700 device.
+
config DVB_USB_M920X
tristate "Uli m920x DVB-T USB2.0 support"
depends on DVB_USB
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile
index 407d90ca8be0..28e4806a87cd 100644
--- a/drivers/media/usb/dvb-usb/Makefile
+++ b/drivers/media/usb/dvb-usb/Makefile
@@ -42,6 +42,9 @@ dvb-usb-digitv-objs := digitv.o
obj-$(CONFIG_DVB_USB_DIGITV) += dvb-usb-digitv.o
dvb-usb-cxusb-objs := cxusb.o
+ifeq ($(CONFIG_DVB_USB_CXUSB_ANALOG),y)
+dvb-usb-cxusb-objs += cxusb-analog.o
+endif
obj-$(CONFIG_DVB_USB_CXUSB) += dvb-usb-cxusb.o
dvb-usb-ttusb2-objs := ttusb2.o
diff --git a/drivers/media/usb/dvb-usb/cxusb-analog.c b/drivers/media/usb/dvb-usb/cxusb-analog.c
new file mode 100644
index 000000000000..0699f718d052
--- /dev/null
+++ b/drivers/media/usb/dvb-usb/cxusb-analog.c
@@ -0,0 +1,1845 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// DVB USB compliant linux driver for Conexant USB reference design -
+// (analog part).
+//
+// Copyright (C) 2011, 2017, 2018
+// Maciej S. Szmigiero (mail@maciej.szmigiero.name)
+//
+// In case there are new analog / DVB-T hybrid devices released in the market
+// using the same general design as Medion MD95700: a CX25840 video decoder
+// outputting a BT.656 stream to a USB bridge chip which then forwards it to
+// the host in isochronous USB packets this code should be made generic, with
+// board specific bits implemented via separate card structures.
+//
+// This is, however, unlikely as the Medion model was released
+// years ago (in 2005).
+//
+// TODO:
+// * audio support,
+// * finish radio support (requires audio of course),
+// * VBI support,
+// * controls support
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ktime.h>
+#include <linux/vmalloc.h>
+#include <media/drv-intf/cx25840.h>
+#include <media/tuner.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "cxusb.h"
+
+static int cxusb_medion_v_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct dvb_usb_device *dvbdev = vb2_get_drv_priv(q);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ unsigned int size = cxdev->width * cxdev->height * 2;
+
+ if (*num_planes > 0) {
+ if (*num_planes != 1)
+ return -EINVAL;
+
+ if (sizes[0] < size)
+ return -EINVAL;
+ } else {
+ *num_planes = 1;
+ sizes[0] = size;
+ }
+
+ return 0;
+}
+
+static int cxusb_medion_v_buf_init(struct vb2_buffer *vb)
+{
+ struct dvb_usb_device *dvbdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ cxusb_vprintk(dvbdev, OPS, "buffer init\n");
+
+ if (vb2_plane_size(vb, 0) < cxdev->width * cxdev->height * 2)
+ return -ENOMEM;
+
+ cxusb_vprintk(dvbdev, OPS, "buffer OK\n");
+
+ return 0;
+}
+
+static void cxusb_auxbuf_init(struct dvb_usb_device *dvbdev,
+ struct cxusb_medion_auxbuf *auxbuf,
+ u8 *buf, unsigned int len)
+{
+ cxusb_vprintk(dvbdev, AUXB, "initializing auxbuf of len %u\n", len);
+
+ auxbuf->buf = buf;
+ auxbuf->len = len;
+ auxbuf->paylen = 0;
+}
+
+static void cxusb_auxbuf_head_trim(struct dvb_usb_device *dvbdev,
+ struct cxusb_medion_auxbuf *auxbuf,
+ unsigned int pos)
+{
+ if (pos == 0)
+ return;
+
+ if (WARN_ON(pos > auxbuf->paylen))
+ return;
+
+ cxusb_vprintk(dvbdev, AUXB,
+ "trimming auxbuf len by %u to %u\n",
+ pos, auxbuf->paylen - pos);
+
+ memmove(auxbuf->buf, auxbuf->buf + pos, auxbuf->paylen - pos);
+ auxbuf->paylen -= pos;
+}
+
+static unsigned int cxusb_auxbuf_paylen(struct cxusb_medion_auxbuf *auxbuf)
+{
+ return auxbuf->paylen;
+}
+
+static bool cxusb_auxbuf_make_space(struct dvb_usb_device *dvbdev,
+ struct cxusb_medion_auxbuf *auxbuf,
+ unsigned int howmuch)
+{
+ unsigned int freespace;
+
+ if (WARN_ON(howmuch >= auxbuf->len))
+ howmuch = auxbuf->len - 1;
+
+ freespace = auxbuf->len - cxusb_auxbuf_paylen(auxbuf);
+
+ cxusb_vprintk(dvbdev, AUXB, "freespace is %u\n", freespace);
+
+ if (freespace >= howmuch)
+ return true;
+
+ howmuch -= freespace;
+
+ cxusb_vprintk(dvbdev, AUXB, "will overwrite %u bytes of buffer\n",
+ howmuch);
+
+ cxusb_auxbuf_head_trim(dvbdev, auxbuf, howmuch);
+
+ return false;
+}
+
+/* returns false if some data was overwritten */
+static bool cxusb_auxbuf_append_urb(struct dvb_usb_device *dvbdev,
+ struct cxusb_medion_auxbuf *auxbuf,
+ struct urb *urb)
+{
+ unsigned long len;
+ int i;
+ bool ret;
+
+ for (i = 0, len = 0; i < urb->number_of_packets; i++)
+ len += urb->iso_frame_desc[i].actual_length;
+
+ ret = cxusb_auxbuf_make_space(dvbdev, auxbuf, len);
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+ unsigned int to_copy;
+
+ to_copy = urb->iso_frame_desc[i].actual_length;
+
+ memcpy(auxbuf->buf + auxbuf->paylen, urb->transfer_buffer +
+ urb->iso_frame_desc[i].offset, to_copy);
+
+ auxbuf->paylen += to_copy;
+ }
+
+ return ret;
+}
+
+static bool cxusb_auxbuf_copy(struct cxusb_medion_auxbuf *auxbuf,
+ unsigned int pos, unsigned char *dest,
+ unsigned int len)
+{
+ if (pos + len > auxbuf->paylen)
+ return false;
+
+ memcpy(dest, auxbuf->buf + pos, len);
+
+ return true;
+}
+
+static bool cxusb_medion_cf_refc_fld_chg(struct dvb_usb_device *dvbdev,
+ struct cxusb_bt656_params *bt656,
+ bool firstfield,
+ unsigned int maxlines,
+ unsigned int maxlinesamples,
+ unsigned char buf[4])
+{
+ bool firstfield_code = (buf[3] & CXUSB_BT656_FIELD_MASK) ==
+ CXUSB_BT656_FIELD_1;
+ unsigned int remlines;
+
+ if (bt656->line == 0 || firstfield == firstfield_code)
+ return false;
+
+ if (bt656->fmode == LINE_SAMPLES) {
+ unsigned int remsamples = maxlinesamples -
+ bt656->linesamples;
+
+ cxusb_vprintk(dvbdev, BT656,
+ "field %c after line %u field change\n",
+ firstfield ? '1' : '2', bt656->line);
+
+ if (bt656->buf && remsamples > 0) {
+ memset(bt656->buf, 0, remsamples);
+ bt656->buf += remsamples;
+
+ cxusb_vprintk(dvbdev, BT656,
+ "field %c line %u %u samples still remaining (of %u)\n",
+ firstfield ? '1' : '2',
+ bt656->line, remsamples,
+ maxlinesamples);
+ }
+
+ bt656->line++;
+ }
+
+ remlines = maxlines - bt656->line;
+ if (bt656->buf && remlines > 0) {
+ memset(bt656->buf, 0, remlines * maxlinesamples);
+ bt656->buf += remlines * maxlinesamples;
+
+ cxusb_vprintk(dvbdev, BT656,
+ "field %c %u lines still remaining (of %u)\n",
+ firstfield ? '1' : '2', remlines,
+ maxlines);
+ }
+
+ return true;
+}
+
+static void cxusb_medion_cf_refc_start_sch(struct dvb_usb_device *dvbdev,
+ struct cxusb_bt656_params *bt656,
+ bool firstfield,
+ unsigned char buf[4])
+{
+ bool firstfield_code = (buf[3] & CXUSB_BT656_FIELD_MASK) ==
+ CXUSB_BT656_FIELD_1;
+ bool sav_code = (buf[3] & CXUSB_BT656_SEAV_MASK) ==
+ CXUSB_BT656_SEAV_SAV;
+ bool vbi_code = (buf[3] & CXUSB_BT656_VBI_MASK) ==
+ CXUSB_BT656_VBI_ON;
+
+ if (!sav_code || firstfield != firstfield_code)
+ return;
+
+ if (!vbi_code) {
+ cxusb_vprintk(dvbdev, BT656, "line start @ pos %u\n",
+ bt656->pos);
+
+ bt656->linesamples = 0;
+ bt656->fmode = LINE_SAMPLES;
+ } else {
+ cxusb_vprintk(dvbdev, BT656, "VBI start @ pos %u\n",
+ bt656->pos);
+
+ bt656->fmode = VBI_SAMPLES;
+ }
+}
+
+static void cxusb_medion_cf_refc_line_smpl(struct dvb_usb_device *dvbdev,
+ struct cxusb_bt656_params *bt656,
+ bool firstfield,
+ unsigned int maxlinesamples,
+ unsigned char buf[4])
+{
+ bool sav_code = (buf[3] & CXUSB_BT656_SEAV_MASK) ==
+ CXUSB_BT656_SEAV_SAV;
+ unsigned int remsamples;
+
+ if (sav_code)
+ cxusb_vprintk(dvbdev, BT656,
+ "SAV in line samples @ line %u, pos %u\n",
+ bt656->line, bt656->pos);
+
+ remsamples = maxlinesamples - bt656->linesamples;
+ if (bt656->buf && remsamples > 0) {
+ memset(bt656->buf, 0, remsamples);
+ bt656->buf += remsamples;
+
+ cxusb_vprintk(dvbdev, BT656,
+ "field %c line %u %u samples still remaining (of %u)\n",
+ firstfield ? '1' : '2', bt656->line, remsamples,
+ maxlinesamples);
+ }
+
+ bt656->fmode = START_SEARCH;
+ bt656->line++;
+}
+
+static void cxusb_medion_cf_refc_vbi_smpl(struct dvb_usb_device *dvbdev,
+ struct cxusb_bt656_params *bt656,
+ unsigned char buf[4])
+{
+ bool sav_code = (buf[3] & CXUSB_BT656_SEAV_MASK) ==
+ CXUSB_BT656_SEAV_SAV;
+
+ if (sav_code)
+ cxusb_vprintk(dvbdev, BT656, "SAV in VBI samples @ pos %u\n",
+ bt656->pos);
+
+ bt656->fmode = START_SEARCH;
+}
+
+/* returns whether the whole 4-byte code should be skipped in the buffer */
+static bool cxusb_medion_cf_ref_code(struct dvb_usb_device *dvbdev,
+ struct cxusb_bt656_params *bt656,
+ bool firstfield,
+ unsigned int maxlines,
+ unsigned int maxlinesamples,
+ unsigned char buf[4])
+{
+ if (bt656->fmode == START_SEARCH) {
+ cxusb_medion_cf_refc_start_sch(dvbdev, bt656, firstfield, buf);
+ } else if (bt656->fmode == LINE_SAMPLES) {
+ cxusb_medion_cf_refc_line_smpl(dvbdev, bt656, firstfield,
+ maxlinesamples, buf);
+ return false;
+ } else if (bt656->fmode == VBI_SAMPLES) {
+ cxusb_medion_cf_refc_vbi_smpl(dvbdev, bt656, buf);
+ return false;
+ }
+
+ return true;
+}
+
+static bool cxusb_medion_cs_start_sch(struct dvb_usb_device *dvbdev,
+ struct cxusb_medion_auxbuf *auxbuf,
+ struct cxusb_bt656_params *bt656,
+ unsigned int maxlinesamples)
+{
+ unsigned char buf[64];
+ unsigned int idx;
+ unsigned int tocheck = clamp_t(size_t, maxlinesamples / 4, 3,
+ sizeof(buf));
+
+ if (!cxusb_auxbuf_copy(auxbuf, bt656->pos + 1, buf, tocheck))
+ return false;
+
+ for (idx = 0; idx <= tocheck - 3; idx++)
+ if (memcmp(buf + idx, CXUSB_BT656_PREAMBLE, 3) == 0) {
+ bt656->pos += (1 + idx);
+ return true;
+ }
+
+ cxusb_vprintk(dvbdev, BT656, "line %u early start, pos %u\n",
+ bt656->line, bt656->pos);
+
+ bt656->linesamples = 0;
+ bt656->fmode = LINE_SAMPLES;
+
+ return true;
+}
+
+static void cxusb_medion_cs_line_smpl(struct cxusb_bt656_params *bt656,
+ unsigned int maxlinesamples,
+ unsigned char val)
+{
+ if (bt656->buf)
+ *(bt656->buf++) = val;
+
+ bt656->linesamples++;
+ bt656->pos++;
+
+ if (bt656->linesamples >= maxlinesamples) {
+ bt656->fmode = START_SEARCH;
+ bt656->line++;
+ }
+}
+
+static bool cxusb_medion_copy_samples(struct dvb_usb_device *dvbdev,
+ struct cxusb_medion_auxbuf *auxbuf,
+ struct cxusb_bt656_params *bt656,
+ unsigned int maxlinesamples,
+ unsigned char val)
+{
+ if (bt656->fmode == START_SEARCH && bt656->line > 0)
+ return cxusb_medion_cs_start_sch(dvbdev, auxbuf, bt656,
+ maxlinesamples);
+ else if (bt656->fmode == LINE_SAMPLES)
+ cxusb_medion_cs_line_smpl(bt656, maxlinesamples, val);
+ else /* TODO: copy VBI samples */
+ bt656->pos++;
+
+ return true;
+}
+
+static bool cxusb_medion_copy_field(struct dvb_usb_device *dvbdev,
+ struct cxusb_medion_auxbuf *auxbuf,
+ struct cxusb_bt656_params *bt656,
+ bool firstfield,
+ unsigned int maxlines,
+ unsigned int maxlinesmpls)
+{
+ while (bt656->line < maxlines) {
+ unsigned char val;
+
+ if (!cxusb_auxbuf_copy(auxbuf, bt656->pos, &val, 1))
+ break;
+
+ if (val == CXUSB_BT656_PREAMBLE[0]) {
+ unsigned char buf[4];
+
+ buf[0] = val;
+ if (!cxusb_auxbuf_copy(auxbuf, bt656->pos + 1,
+ buf + 1, 3))
+ break;
+
+ if (buf[1] == CXUSB_BT656_PREAMBLE[1] &&
+ buf[2] == CXUSB_BT656_PREAMBLE[2]) {
+ /*
+ * is this a field change?
+ * if so, terminate copying the current field
+ */
+ if (cxusb_medion_cf_refc_fld_chg(dvbdev,
+ bt656,
+ firstfield,
+ maxlines,
+ maxlinesmpls,
+ buf))
+ return true;
+
+ if (cxusb_medion_cf_ref_code(dvbdev, bt656,
+ firstfield,
+ maxlines,
+ maxlinesmpls,
+ buf))
+ bt656->pos += 4;
+
+ continue;
+ }
+ }
+
+ if (!cxusb_medion_copy_samples(dvbdev, auxbuf, bt656,
+ maxlinesmpls, val))
+ break;
+ }
+
+ if (bt656->line < maxlines) {
+ cxusb_vprintk(dvbdev, BT656,
+ "end of buffer pos = %u, line = %u\n",
+ bt656->pos, bt656->line);
+ return false;
+ }
+
+ return true;
+}
+
+static bool cxusb_medion_v_process_auxbuf(struct cxusb_medion_dev *cxdev,
+ bool reset)
+{
+ struct dvb_usb_device *dvbdev = cxdev->dvbdev;
+ struct cxusb_bt656_params *bt656 = &cxdev->bt656;
+
+ /*
+ * if this is a new frame
+ * fetch a buffer from list
+ */
+ if (bt656->mode == NEW_FRAME) {
+ if (!list_empty(&cxdev->buflist)) {
+ cxdev->vbuf =
+ list_first_entry(&cxdev->buflist,
+ struct cxusb_medion_vbuffer,
+ list);
+ list_del(&cxdev->vbuf->list);
+ } else {
+ dev_warn(&dvbdev->udev->dev, "no free buffers\n");
+ }
+ }
+
+ if (bt656->mode == NEW_FRAME || reset) {
+ cxusb_vprintk(dvbdev, URB, "will copy field 1\n");
+ bt656->pos = 0;
+ bt656->mode = FIRST_FIELD;
+ bt656->fmode = START_SEARCH;
+ bt656->line = 0;
+
+ if (cxdev->vbuf) {
+ cxdev->vbuf->vb2.vb2_buf.timestamp = ktime_get_ns();
+ bt656->buf = vb2_plane_vaddr(&cxdev->vbuf->vb2.vb2_buf,
+ 0);
+ }
+ }
+
+ if (bt656->mode == FIRST_FIELD) {
+ if (!cxusb_medion_copy_field(dvbdev, &cxdev->auxbuf, bt656,
+ true, cxdev->height / 2,
+ cxdev->width * 2))
+ return false;
+
+ /*
+ * do not trim buffer there in case
+ * we need to reset the search later
+ */
+
+ cxusb_vprintk(dvbdev, URB, "will copy field 2\n");
+ bt656->mode = SECOND_FIELD;
+ bt656->fmode = START_SEARCH;
+ bt656->line = 0;
+ }
+
+ if (bt656->mode == SECOND_FIELD) {
+ if (!cxusb_medion_copy_field(dvbdev, &cxdev->auxbuf, bt656,
+ false, cxdev->height / 2,
+ cxdev->width * 2))
+ return false;
+
+ cxusb_auxbuf_head_trim(dvbdev, &cxdev->auxbuf, bt656->pos);
+
+ bt656->mode = NEW_FRAME;
+
+ if (cxdev->vbuf) {
+ vb2_set_plane_payload(&cxdev->vbuf->vb2.vb2_buf, 0,
+ cxdev->width * cxdev->height * 2);
+
+ cxdev->vbuf->vb2.field = cxdev->field_order;
+ cxdev->vbuf->vb2.sequence = cxdev->vbuf_sequence++;
+
+ vb2_buffer_done(&cxdev->vbuf->vb2.vb2_buf,
+ VB2_BUF_STATE_DONE);
+
+ cxdev->vbuf = NULL;
+ cxdev->bt656.buf = NULL;
+
+ cxusb_vprintk(dvbdev, URB, "frame done\n");
+ } else {
+ cxusb_vprintk(dvbdev, URB, "frame skipped\n");
+ cxdev->vbuf_sequence++;
+ }
+ }
+
+ return true;
+}
+
+static bool cxusb_medion_v_complete_handle_urb(struct cxusb_medion_dev *cxdev,
+ bool *auxbuf_reset)
+{
+ struct dvb_usb_device *dvbdev = cxdev->dvbdev;
+ unsigned int urbn;
+ struct urb *urb;
+ int ret;
+
+ *auxbuf_reset = false;
+
+ urbn = cxdev->nexturb;
+ if (!test_bit(urbn, &cxdev->urbcomplete))
+ return false;
+
+ clear_bit(urbn, &cxdev->urbcomplete);
+
+ do {
+ cxdev->nexturb++;
+ cxdev->nexturb %= CXUSB_VIDEO_URBS;
+ urb = cxdev->streamurbs[cxdev->nexturb];
+ } while (!urb);
+
+ urb = cxdev->streamurbs[urbn];
+ cxusb_vprintk(dvbdev, URB, "URB %u status = %d\n", urbn, urb->status);
+
+ if (urb->status == 0 || urb->status == -EXDEV) {
+ int i;
+ unsigned long len;
+
+ for (i = 0, len = 0; i < urb->number_of_packets; i++)
+ len += urb->iso_frame_desc[i].actual_length;
+
+ cxusb_vprintk(dvbdev, URB, "URB %u data len = %lu\n", urbn,
+ len);
+
+ if (len > 0) {
+ cxusb_vprintk(dvbdev, URB, "appending URB\n");
+
+ /*
+ * append new data to auxbuf while
+ * overwriting old data if necessary
+ *
+ * if any overwrite happens then we can no
+ * longer rely on consistency of the whole
+ * data so let's start again the current
+ * auxbuf frame assembling process from
+ * the beginning
+ */
+ *auxbuf_reset =
+ !cxusb_auxbuf_append_urb(dvbdev,
+ &cxdev->auxbuf,
+ urb);
+ }
+ }
+
+ cxusb_vprintk(dvbdev, URB, "URB %u resubmit\n", urbn);
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret != 0)
+ dev_err(&dvbdev->udev->dev,
+ "unable to resubmit URB %u (%d), you'll have to restart streaming\n",
+ urbn, ret);
+
+ /* next URB is complete already? reschedule us then to handle it */
+ return test_bit(cxdev->nexturb, &cxdev->urbcomplete);
+}
+
+static void cxusb_medion_v_complete_work(struct work_struct *work)
+{
+ struct cxusb_medion_dev *cxdev = container_of(work,
+ struct cxusb_medion_dev,
+ urbwork);
+ struct dvb_usb_device *dvbdev = cxdev->dvbdev;
+ bool auxbuf_reset;
+ bool reschedule;
+
+ mutex_lock(cxdev->videodev->lock);
+
+ cxusb_vprintk(dvbdev, URB, "worker called, stop_streaming = %d\n",
+ (int)cxdev->stop_streaming);
+
+ if (cxdev->stop_streaming)
+ goto unlock;
+
+ reschedule = cxusb_medion_v_complete_handle_urb(cxdev, &auxbuf_reset);
+
+ if (cxusb_medion_v_process_auxbuf(cxdev, auxbuf_reset))
+ /* reschedule us until auxbuf no longer can produce any frame */
+ reschedule = true;
+
+ if (reschedule) {
+ cxusb_vprintk(dvbdev, URB, "rescheduling worker\n");
+ schedule_work(&cxdev->urbwork);
+ }
+
+unlock:
+ mutex_unlock(cxdev->videodev->lock);
+}
+
+static void cxusb_medion_v_complete(struct urb *u)
+{
+ struct dvb_usb_device *dvbdev = u->context;
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ unsigned int i;
+
+ for (i = 0; i < CXUSB_VIDEO_URBS; i++)
+ if (cxdev->streamurbs[i] == u)
+ break;
+
+ if (i >= CXUSB_VIDEO_URBS) {
+ dev_err(&dvbdev->udev->dev,
+ "complete on unknown URB\n");
+ return;
+ }
+
+ cxusb_vprintk(dvbdev, URB, "URB %u complete\n", i);
+
+ set_bit(i, &cxdev->urbcomplete);
+ schedule_work(&cxdev->urbwork);
+}
+
+static void cxusb_medion_urbs_free(struct cxusb_medion_dev *cxdev)
+{
+ unsigned int i;
+
+ for (i = 0; i < CXUSB_VIDEO_URBS; i++)
+ if (cxdev->streamurbs[i]) {
+ kfree(cxdev->streamurbs[i]->transfer_buffer);
+ usb_free_urb(cxdev->streamurbs[i]);
+ cxdev->streamurbs[i] = NULL;
+ }
+}
+
+static void cxusb_medion_return_buffers(struct cxusb_medion_dev *cxdev,
+ bool requeue)
+{
+ struct cxusb_medion_vbuffer *vbuf, *vbuf_tmp;
+
+ list_for_each_entry_safe(vbuf, vbuf_tmp, &cxdev->buflist,
+ list) {
+ list_del(&vbuf->list);
+ vb2_buffer_done(&vbuf->vb2.vb2_buf,
+ requeue ? VB2_BUF_STATE_QUEUED :
+ VB2_BUF_STATE_ERROR);
+ }
+
+ if (cxdev->vbuf) {
+ vb2_buffer_done(&cxdev->vbuf->vb2.vb2_buf,
+ requeue ? VB2_BUF_STATE_QUEUED :
+ VB2_BUF_STATE_ERROR);
+
+ cxdev->vbuf = NULL;
+ cxdev->bt656.buf = NULL;
+ }
+}
+
+static int cxusb_medion_v_ss_auxbuf_alloc(struct cxusb_medion_dev *cxdev,
+ int *npackets)
+{
+ struct dvb_usb_device *dvbdev = cxdev->dvbdev;
+ u8 *buf;
+ unsigned int framelen, urblen, auxbuflen;
+
+ framelen = (cxdev->width * 2 + 4 + 4) *
+ (cxdev->height + 50 /* VBI lines */);
+
+ /*
+ * try to fit a whole frame into each URB, as long as doing so
+ * does not require very high order memory allocations
+ */
+ BUILD_BUG_ON(CXUSB_VIDEO_URB_MAX_SIZE / CXUSB_VIDEO_PKT_SIZE >
+ CXUSB_VIDEO_MAX_FRAME_PKTS);
+ *npackets = min_t(int, (framelen + CXUSB_VIDEO_PKT_SIZE - 1) /
+ CXUSB_VIDEO_PKT_SIZE,
+ CXUSB_VIDEO_URB_MAX_SIZE / CXUSB_VIDEO_PKT_SIZE);
+ urblen = *npackets * CXUSB_VIDEO_PKT_SIZE;
+
+ cxusb_vprintk(dvbdev, URB,
+ "each URB will have %d packets for total of %u bytes (%u x %u @ %u)\n",
+ *npackets, urblen, (unsigned int)cxdev->width,
+ (unsigned int)cxdev->height, framelen);
+
+ auxbuflen = framelen + urblen;
+
+ buf = vmalloc(auxbuflen);
+ if (!buf)
+ return -ENOMEM;
+
+ cxusb_auxbuf_init(dvbdev, &cxdev->auxbuf, buf, auxbuflen);
+
+ return 0;
+}
+
+static u32 cxusb_medion_norm2field_order(v4l2_std_id norm)
+{
+ bool is625 = norm & V4L2_STD_625_50;
+ bool is525 = norm & V4L2_STD_525_60;
+
+ if (!is625 && !is525)
+ return V4L2_FIELD_NONE;
+
+ if (is625 && is525)
+ return V4L2_FIELD_NONE;
+
+ if (is625)
+ return V4L2_FIELD_SEQ_TB;
+ else /* is525 */
+ return V4L2_FIELD_SEQ_BT;
+}
+
+static u32 cxusb_medion_field_order(struct cxusb_medion_dev *cxdev)
+{
+ struct dvb_usb_device *dvbdev = cxdev->dvbdev;
+ u32 field;
+ int ret;
+ v4l2_std_id norm;
+
+ /* TV tuner is PAL-only so it is always TB */
+ if (cxdev->input == 0)
+ return V4L2_FIELD_SEQ_TB;
+
+ field = cxusb_medion_norm2field_order(cxdev->norm);
+ if (field != V4L2_FIELD_NONE)
+ return field;
+
+ ret = v4l2_subdev_call(cxdev->cx25840, video, g_std, &norm);
+ if (ret != 0) {
+ cxusb_vprintk(dvbdev, OPS,
+ "cannot get current standard for input %u\n",
+ (unsigned int)cxdev->input);
+ } else {
+ field = cxusb_medion_norm2field_order(norm);
+ if (field != V4L2_FIELD_NONE)
+ return field;
+ }
+
+ dev_warn(&dvbdev->udev->dev,
+ "cannot determine field order for the current standard setup and received signal, using TB\n");
+ return V4L2_FIELD_SEQ_TB;
+}
+
+static int cxusb_medion_v_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct dvb_usb_device *dvbdev = vb2_get_drv_priv(q);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ u8 streamon_params[2] = { 0x03, 0x00 };
+ int npackets, i;
+ int ret;
+
+ cxusb_vprintk(dvbdev, OPS, "should start streaming\n");
+
+ if (cxdev->stop_streaming) {
+ /* stream is being stopped */
+ ret = -EBUSY;
+ goto ret_retbufs;
+ }
+
+ cxdev->field_order = cxusb_medion_field_order(cxdev);
+
+ ret = v4l2_subdev_call(cxdev->cx25840, video, s_stream, 1);
+ if (ret != 0) {
+ dev_err(&dvbdev->udev->dev,
+ "unable to start stream (%d)\n", ret);
+ goto ret_retbufs;
+ }
+
+ ret = cxusb_ctrl_msg(dvbdev, CMD_STREAMING_ON, streamon_params, 2,
+ NULL, 0);
+ if (ret != 0) {
+ dev_err(&dvbdev->udev->dev,
+ "unable to start streaming (%d)\n", ret);
+ goto ret_unstream_cx;
+ }
+
+ ret = cxusb_medion_v_ss_auxbuf_alloc(cxdev, &npackets);
+ if (ret != 0)
+ goto ret_unstream_md;
+
+ for (i = 0; i < CXUSB_VIDEO_URBS; i++) {
+ int framen;
+ u8 *streambuf;
+ struct urb *surb;
+
+ /*
+ * TODO: change this to an array of single pages to avoid
+ * doing a large continuous allocation when (if)
+ * s-g isochronous USB transfers are supported
+ */
+ streambuf = kmalloc(npackets * CXUSB_VIDEO_PKT_SIZE,
+ GFP_KERNEL);
+ if (!streambuf) {
+ if (i < 2) {
+ ret = -ENOMEM;
+ goto ret_freeab;
+ }
+ break;
+ }
+
+ surb = usb_alloc_urb(npackets, GFP_KERNEL);
+ if (!surb) {
+ kfree(streambuf);
+ ret = -ENOMEM;
+ goto ret_freeu;
+ }
+
+ cxdev->streamurbs[i] = surb;
+ surb->dev = dvbdev->udev;
+ surb->context = dvbdev;
+ surb->pipe = usb_rcvisocpipe(dvbdev->udev, 2);
+
+ surb->interval = 1;
+ surb->transfer_flags = URB_ISO_ASAP;
+
+ surb->transfer_buffer = streambuf;
+
+ surb->complete = cxusb_medion_v_complete;
+ surb->number_of_packets = npackets;
+ surb->transfer_buffer_length = npackets * CXUSB_VIDEO_PKT_SIZE;
+
+ for (framen = 0; framen < npackets; framen++) {
+ surb->iso_frame_desc[framen].offset =
+ CXUSB_VIDEO_PKT_SIZE * framen;
+
+ surb->iso_frame_desc[framen].length =
+ CXUSB_VIDEO_PKT_SIZE;
+ }
+ }
+
+ cxdev->urbcomplete = 0;
+ cxdev->nexturb = 0;
+ cxdev->vbuf_sequence = 0;
+
+ cxdev->vbuf = NULL;
+ cxdev->bt656.mode = NEW_FRAME;
+ cxdev->bt656.buf = NULL;
+
+ for (i = 0; i < CXUSB_VIDEO_URBS; i++)
+ if (cxdev->streamurbs[i]) {
+ ret = usb_submit_urb(cxdev->streamurbs[i],
+ GFP_KERNEL);
+ if (ret != 0)
+ dev_err(&dvbdev->udev->dev,
+ "URB %d submission failed (%d)\n", i,
+ ret);
+ }
+
+ return 0;
+
+ret_freeu:
+ cxusb_medion_urbs_free(cxdev);
+
+ret_freeab:
+ vfree(cxdev->auxbuf.buf);
+
+ret_unstream_md:
+ cxusb_ctrl_msg(dvbdev, CMD_STREAMING_OFF, NULL, 0, NULL, 0);
+
+ret_unstream_cx:
+ v4l2_subdev_call(cxdev->cx25840, video, s_stream, 0);
+
+ret_retbufs:
+ cxusb_medion_return_buffers(cxdev, true);
+
+ return ret;
+}
+
+static void cxusb_medion_v_stop_streaming(struct vb2_queue *q)
+{
+ struct dvb_usb_device *dvbdev = vb2_get_drv_priv(q);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ int ret;
+ unsigned int i;
+
+ cxusb_vprintk(dvbdev, OPS, "should stop streaming\n");
+
+ if (WARN_ON(cxdev->stop_streaming))
+ return;
+
+ cxdev->stop_streaming = true;
+
+ cxusb_ctrl_msg(dvbdev, CMD_STREAMING_OFF, NULL, 0, NULL, 0);
+
+ ret = v4l2_subdev_call(cxdev->cx25840, video, s_stream, 0);
+ if (ret != 0)
+ dev_err(&dvbdev->udev->dev, "unable to stop stream (%d)\n",
+ ret);
+
+ /* let URB completion run */
+ mutex_unlock(cxdev->videodev->lock);
+
+ for (i = 0; i < CXUSB_VIDEO_URBS; i++)
+ if (cxdev->streamurbs[i])
+ usb_kill_urb(cxdev->streamurbs[i]);
+
+ flush_work(&cxdev->urbwork);
+
+ mutex_lock(cxdev->videodev->lock);
+
+ /* free transfer buffer and URB */
+ vfree(cxdev->auxbuf.buf);
+
+ cxusb_medion_urbs_free(cxdev);
+
+ cxusb_medion_return_buffers(cxdev, false);
+
+ cxdev->stop_streaming = false;
+}
+
+static void cxusub_medion_v_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2buf = to_vb2_v4l2_buffer(vb);
+ struct cxusb_medion_vbuffer *vbuf =
+ container_of(v4l2buf, struct cxusb_medion_vbuffer, vb2);
+ struct dvb_usb_device *dvbdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ /* cxusb_vprintk(dvbdev, OPS, "mmmm.. a fresh buffer...\n"); */
+
+ list_add_tail(&vbuf->list, &cxdev->buflist);
+}
+
+static const struct vb2_ops cxdev_video_qops = {
+ .queue_setup = cxusb_medion_v_queue_setup,
+ .buf_init = cxusb_medion_v_buf_init,
+ .start_streaming = cxusb_medion_v_start_streaming,
+ .stop_streaming = cxusb_medion_v_stop_streaming,
+ .buf_queue = cxusub_medion_v_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish
+};
+
+static const __u32 videocaps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+static const __u32 radiocaps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+
+static int cxusb_medion_v_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+
+ strscpy(cap->driver, dvbdev->udev->dev.driver->name,
+ sizeof(cap->driver));
+ strscpy(cap->card, "Medion 95700", sizeof(cap->card));
+ usb_make_path(dvbdev->udev, cap->bus_info, sizeof(cap->bus_info));
+
+ cap->capabilities = videocaps | radiocaps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int cxusb_medion_v_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index != 0)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_PIX_FMT_UYVY;
+
+ return 0;
+}
+
+static int cxusb_medion_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ f->fmt.pix.width = cxdev->width;
+ f->fmt.pix.height = cxdev->height;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
+ f->fmt.pix.field = vb2_start_streaming_called(&cxdev->videoqueue) ?
+ cxdev->field_order : cxusb_medion_field_order(cxdev);
+ f->fmt.pix.bytesperline = cxdev->width * 2;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+ return 0;
+}
+
+static int cxusb_medion_try_s_fmt_vid_cap(struct file *file,
+ struct v4l2_format *f,
+ bool isset)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ struct v4l2_subdev_format subfmt;
+ u32 field;
+ int ret;
+
+ if (isset && vb2_is_busy(&cxdev->videoqueue))
+ return -EBUSY;
+
+ field = vb2_start_streaming_called(&cxdev->videoqueue) ?
+ cxdev->field_order : cxusb_medion_field_order(cxdev);
+
+ memset(&subfmt, 0, sizeof(subfmt));
+ subfmt.which = isset ? V4L2_SUBDEV_FORMAT_ACTIVE :
+ V4L2_SUBDEV_FORMAT_TRY;
+ subfmt.format.width = f->fmt.pix.width & ~1;
+ subfmt.format.height = f->fmt.pix.height & ~1;
+ subfmt.format.code = MEDIA_BUS_FMT_FIXED;
+ subfmt.format.field = field;
+ subfmt.format.colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ ret = v4l2_subdev_call(cxdev->cx25840, pad, set_fmt, NULL, &subfmt);
+ if (ret != 0)
+ return ret;
+
+ f->fmt.pix.width = subfmt.format.width;
+ f->fmt.pix.height = subfmt.format.height;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
+ f->fmt.pix.field = field;
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ if (isset) {
+ cxdev->width = f->fmt.pix.width;
+ cxdev->height = f->fmt.pix.height;
+ }
+
+ return 0;
+}
+
+static int cxusb_medion_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ return cxusb_medion_try_s_fmt_vid_cap(file, f, false);
+}
+
+static int cxusb_medion_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ return cxusb_medion_try_s_fmt_vid_cap(file, f, true);
+}
+
+static const struct {
+ struct v4l2_input input;
+ u32 inputcfg;
+} cxusb_medion_inputs[] = {
+ { .input = { .name = "TV tuner", .type = V4L2_INPUT_TYPE_TUNER,
+ .tuner = 0, .std = V4L2_STD_PAL },
+ .inputcfg = CX25840_COMPOSITE2, },
+
+ { .input = { .name = "Composite", .type = V4L2_INPUT_TYPE_CAMERA,
+ .std = V4L2_STD_ALL },
+ .inputcfg = CX25840_COMPOSITE1, },
+
+ { .input = { .name = "S-Video", .type = V4L2_INPUT_TYPE_CAMERA,
+ .std = V4L2_STD_ALL },
+ .inputcfg = CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 }
+};
+
+#define CXUSB_INPUT_CNT ARRAY_SIZE(cxusb_medion_inputs)
+
+static int cxusb_medion_enum_input(struct file *file, void *fh,
+ struct v4l2_input *inp)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ u32 index = inp->index;
+
+ if (index >= CXUSB_INPUT_CNT)
+ return -EINVAL;
+
+ *inp = cxusb_medion_inputs[index].input;
+ inp->index = index;
+ inp->capabilities |= V4L2_IN_CAP_STD;
+
+ if (index == cxdev->input) {
+ int ret;
+ u32 status = 0;
+
+ ret = v4l2_subdev_call(cxdev->cx25840, video, g_input_status,
+ &status);
+ if (ret != 0)
+ dev_warn(&dvbdev->udev->dev,
+ "cx25840 input status query failed (%d)\n",
+ ret);
+ else
+ inp->status = status;
+ }
+
+ return 0;
+}
+
+static int cxusb_medion_g_input(struct file *file, void *fh,
+ unsigned int *i)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ *i = cxdev->input;
+
+ return 0;
+}
+
+static int cxusb_medion_set_norm(struct cxusb_medion_dev *cxdev,
+ v4l2_std_id norm)
+{
+ struct dvb_usb_device *dvbdev = cxdev->dvbdev;
+ int ret;
+
+ cxusb_vprintk(dvbdev, OPS,
+ "trying to set standard for input %u to %lx\n",
+ (unsigned int)cxdev->input,
+ (unsigned long)norm);
+
+ /* no autodetection support */
+ if (norm == V4L2_STD_UNKNOWN)
+ return -EINVAL;
+
+ /* on composite or S-Video any std is acceptable */
+ if (cxdev->input != 0) {
+ ret = v4l2_subdev_call(cxdev->cx25840, video, s_std, norm);
+ if (ret)
+ return ret;
+
+ goto ret_savenorm;
+ }
+
+ /* TV tuner is only able to demodulate PAL */
+ if ((norm & ~V4L2_STD_PAL) != 0)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(cxdev->tda9887, video, s_std, norm);
+ if (ret != 0) {
+ dev_err(&dvbdev->udev->dev,
+ "tda9887 norm setup failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = v4l2_subdev_call(cxdev->tuner, video, s_std, norm);
+ if (ret != 0) {
+ dev_err(&dvbdev->udev->dev,
+ "tuner norm setup failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = v4l2_subdev_call(cxdev->cx25840, video, s_std, norm);
+ if (ret != 0) {
+ dev_err(&dvbdev->udev->dev,
+ "cx25840 norm setup failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ret_savenorm:
+ cxdev->norm = norm;
+
+ return 0;
+}
+
+static int cxusb_medion_s_input(struct file *file, void *fh,
+ unsigned int i)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ int ret;
+ v4l2_std_id norm;
+
+ if (i >= CXUSB_INPUT_CNT)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(cxdev->cx25840, video, s_routing,
+ cxusb_medion_inputs[i].inputcfg, 0, 0);
+ if (ret != 0)
+ return ret;
+
+ cxdev->input = i;
+ cxdev->videodev->tvnorms = cxusb_medion_inputs[i].input.std;
+
+ norm = cxdev->norm & cxusb_medion_inputs[i].input.std;
+ if (norm == 0)
+ norm = cxusb_medion_inputs[i].input.std;
+
+ cxusb_medion_set_norm(cxdev, norm);
+
+ return 0;
+}
+
+static int cxusb_medion_g_tuner(struct file *file, void *fh,
+ struct v4l2_tuner *tuner)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ struct video_device *vdev = video_devdata(file);
+ int ret;
+
+ if (tuner->index != 0)
+ return -EINVAL;
+
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ tuner->type = V4L2_TUNER_ANALOG_TV;
+ else
+ tuner->type = V4L2_TUNER_RADIO;
+
+ tuner->capability = 0;
+ tuner->afc = 0;
+
+ /*
+ * fills:
+ * always: capability (static), rangelow (static), rangehigh (static)
+ * radio mode: afc (may fail silently), rxsubchans (static), audmode
+ */
+ ret = v4l2_subdev_call(cxdev->tda9887, tuner, g_tuner, tuner);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * fills:
+ * always: capability (static), rangelow (static), rangehigh (static)
+ * radio mode: rxsubchans (always stereo), audmode,
+ * signal (might be wrong)
+ */
+ ret = v4l2_subdev_call(cxdev->tuner, tuner, g_tuner, tuner);
+ if (ret != 0)
+ return ret;
+
+ tuner->signal = 0;
+
+ /*
+ * fills: TV mode: capability, rxsubchans, audmode, signal
+ */
+ ret = v4l2_subdev_call(cxdev->cx25840, tuner, g_tuner, tuner);
+ if (ret != 0)
+ return ret;
+
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ strscpy(tuner->name, "TV tuner", sizeof(tuner->name));
+ else
+ strscpy(tuner->name, "Radio tuner", sizeof(tuner->name));
+
+ memset(tuner->reserved, 0, sizeof(tuner->reserved));
+
+ return 0;
+}
+
+static int cxusb_medion_s_tuner(struct file *file, void *fh,
+ const struct v4l2_tuner *tuner)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ struct video_device *vdev = video_devdata(file);
+ int ret;
+
+ if (tuner->index != 0)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(cxdev->tda9887, tuner, s_tuner, tuner);
+ if (ret != 0)
+ return ret;
+
+ ret = v4l2_subdev_call(cxdev->tuner, tuner, s_tuner, tuner);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * make sure that cx25840 is in a correct TV / radio mode,
+ * since calls above may have changed it for tuner / IF demod
+ */
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ v4l2_subdev_call(cxdev->cx25840, video, s_std, cxdev->norm);
+ else
+ v4l2_subdev_call(cxdev->cx25840, tuner, s_radio);
+
+ return v4l2_subdev_call(cxdev->cx25840, tuner, s_tuner, tuner);
+}
+
+static int cxusb_medion_g_frequency(struct file *file, void *fh,
+ struct v4l2_frequency *freq)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ if (freq->tuner != 0)
+ return -EINVAL;
+
+ return v4l2_subdev_call(cxdev->tuner, tuner, g_frequency, freq);
+}
+
+static int cxusb_medion_s_frequency(struct file *file, void *fh,
+ const struct v4l2_frequency *freq)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ struct video_device *vdev = video_devdata(file);
+ int ret;
+
+ if (freq->tuner != 0)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(cxdev->tda9887, tuner, s_frequency, freq);
+ if (ret != 0)
+ return ret;
+
+ ret = v4l2_subdev_call(cxdev->tuner, tuner, s_frequency, freq);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * make sure that cx25840 is in a correct TV / radio mode,
+ * since calls above may have changed it for tuner / IF demod
+ */
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ v4l2_subdev_call(cxdev->cx25840, video, s_std, cxdev->norm);
+ else
+ v4l2_subdev_call(cxdev->cx25840, tuner, s_radio);
+
+ return v4l2_subdev_call(cxdev->cx25840, tuner, s_frequency, freq);
+}
+
+static int cxusb_medion_g_std(struct file *file, void *fh,
+ v4l2_std_id *norm)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ *norm = cxdev->norm;
+
+ if (*norm == V4L2_STD_UNKNOWN)
+ return -ENODATA;
+
+ return 0;
+}
+
+static int cxusb_medion_s_std(struct file *file, void *fh,
+ v4l2_std_id norm)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ return cxusb_medion_set_norm(cxdev, norm);
+}
+
+static int cxusb_medion_querystd(struct file *file, void *fh,
+ v4l2_std_id *norm)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ v4l2_std_id norm_mask;
+ int ret;
+
+ /*
+ * make sure we don't have improper std bits set for the TV tuner
+ * (could happen when no signal was present yet after reset)
+ */
+ if (cxdev->input == 0)
+ norm_mask = V4L2_STD_PAL;
+ else
+ norm_mask = V4L2_STD_ALL;
+
+ ret = v4l2_subdev_call(cxdev->cx25840, video, querystd, norm);
+ if (ret != 0) {
+ cxusb_vprintk(dvbdev, OPS,
+ "cannot get detected standard for input %u\n",
+ (unsigned int)cxdev->input);
+ return ret;
+ }
+
+ cxusb_vprintk(dvbdev, OPS, "input %u detected standard is %lx\n",
+ (unsigned int)cxdev->input, (unsigned long)*norm);
+ *norm &= norm_mask;
+
+ return 0;
+}
+
+static int cxusb_medion_log_status(struct file *file, void *fh)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(file);
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ v4l2_device_call_all(&cxdev->v4l2dev, 0, core, log_status);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops cxusb_video_ioctl = {
+ .vidioc_querycap = cxusb_medion_v_querycap,
+ .vidioc_enum_fmt_vid_cap = cxusb_medion_v_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cxusb_medion_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = cxusb_medion_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cxusb_medion_try_fmt_vid_cap,
+ .vidioc_enum_input = cxusb_medion_enum_input,
+ .vidioc_g_input = cxusb_medion_g_input,
+ .vidioc_s_input = cxusb_medion_s_input,
+ .vidioc_g_tuner = cxusb_medion_g_tuner,
+ .vidioc_s_tuner = cxusb_medion_s_tuner,
+ .vidioc_g_frequency = cxusb_medion_g_frequency,
+ .vidioc_s_frequency = cxusb_medion_s_frequency,
+ .vidioc_g_std = cxusb_medion_g_std,
+ .vidioc_s_std = cxusb_medion_s_std,
+ .vidioc_querystd = cxusb_medion_querystd,
+ .vidioc_log_status = cxusb_medion_log_status,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff
+};
+
+static const struct v4l2_ioctl_ops cxusb_radio_ioctl = {
+ .vidioc_querycap = cxusb_medion_v_querycap,
+ .vidioc_g_tuner = cxusb_medion_g_tuner,
+ .vidioc_s_tuner = cxusb_medion_s_tuner,
+ .vidioc_g_frequency = cxusb_medion_g_frequency,
+ .vidioc_s_frequency = cxusb_medion_s_frequency,
+ .vidioc_log_status = cxusb_medion_log_status
+};
+
+/*
+ * in principle, this should be const, but s_io_pin_config is declared
+ * to take non-const, and gcc complains
+ */
+static struct v4l2_subdev_io_pin_config cxusub_medion_pin_config[] = {
+ { .pin = CX25840_PIN_DVALID_PRGM0, .function = CX25840_PAD_DEFAULT,
+ .strength = CX25840_PIN_DRIVE_MEDIUM },
+ { .pin = CX25840_PIN_PLL_CLK_PRGM7, .function = CX25840_PAD_AUX_PLL },
+ { .pin = CX25840_PIN_HRESET_PRGM2, .function = CX25840_PAD_ACTIVE,
+ .strength = CX25840_PIN_DRIVE_MEDIUM }
+};
+
+int cxusb_medion_analog_init(struct dvb_usb_device *dvbdev)
+{
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ u8 tuner_analog_msg_data[] = { 0x9c, 0x60, 0x85, 0x54 };
+ struct i2c_msg tuner_analog_msg = { .addr = 0x61, .flags = 0,
+ .buf = tuner_analog_msg_data,
+ .len =
+ sizeof(tuner_analog_msg_data) };
+ struct v4l2_subdev_format subfmt;
+ int ret;
+
+ /* switch tuner to analog mode so IF demod will become accessible */
+ ret = i2c_transfer(&dvbdev->i2c_adap, &tuner_analog_msg, 1);
+ if (ret != 1)
+ dev_warn(&dvbdev->udev->dev,
+ "tuner analog switch failed (%d)\n", ret);
+
+ /*
+ * cx25840 might have lost power during mode switching so we need
+ * to set it again
+ */
+ ret = v4l2_subdev_call(cxdev->cx25840, core, reset, 0);
+ if (ret != 0)
+ dev_warn(&dvbdev->udev->dev,
+ "cx25840 reset failed (%d)\n", ret);
+
+ ret = v4l2_subdev_call(cxdev->cx25840, video, s_routing,
+ CX25840_COMPOSITE1, 0, 0);
+ if (ret != 0)
+ dev_warn(&dvbdev->udev->dev,
+ "cx25840 initial input setting failed (%d)\n", ret);
+
+ /* composite */
+ cxdev->input = 1;
+ cxdev->videodev->tvnorms = V4L2_STD_ALL;
+ cxdev->norm = V4L2_STD_PAL;
+
+ /* TODO: setup audio samples insertion */
+
+ ret = v4l2_subdev_call(cxdev->cx25840, core, s_io_pin_config,
+ ARRAY_SIZE(cxusub_medion_pin_config),
+ cxusub_medion_pin_config);
+ if (ret != 0)
+ dev_warn(&dvbdev->udev->dev,
+ "cx25840 pin config failed (%d)\n", ret);
+
+ /* make sure that we aren't in radio mode */
+ v4l2_subdev_call(cxdev->tda9887, video, s_std, cxdev->norm);
+ v4l2_subdev_call(cxdev->tuner, video, s_std, cxdev->norm);
+ v4l2_subdev_call(cxdev->cx25840, video, s_std, cxdev->norm);
+
+ memset(&subfmt, 0, sizeof(subfmt));
+ subfmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ subfmt.format.width = cxdev->width;
+ subfmt.format.height = cxdev->height;
+ subfmt.format.code = MEDIA_BUS_FMT_FIXED;
+ subfmt.format.field = V4L2_FIELD_SEQ_TB;
+ subfmt.format.colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ ret = v4l2_subdev_call(cxdev->cx25840, pad, set_fmt, NULL, &subfmt);
+ if (ret != 0)
+ dev_warn(&dvbdev->udev->dev,
+ "cx25840 format set failed (%d)\n", ret);
+
+ if (ret == 0) {
+ cxdev->width = subfmt.format.width;
+ cxdev->height = subfmt.format.height;
+ }
+
+ return 0;
+}
+
+static int cxusb_videoradio_open(struct file *f)
+{
+ struct dvb_usb_device *dvbdev = video_drvdata(f);
+ int ret;
+
+ /*
+ * no locking needed since this call only modifies analog
+ * state if there are no other analog handles currenly
+ * opened so ops done via them cannot create a conflict
+ */
+ ret = cxusb_medion_get(dvbdev, CXUSB_OPEN_ANALOG);
+ if (ret != 0)
+ return ret;
+
+ ret = v4l2_fh_open(f);
+ if (ret != 0)
+ goto ret_release;
+
+ cxusb_vprintk(dvbdev, OPS, "got open\n");
+
+ return 0;
+
+ret_release:
+ cxusb_medion_put(dvbdev);
+
+ return ret;
+}
+
+static int cxusb_videoradio_release(struct file *f)
+{
+ struct video_device *vdev = video_devdata(f);
+ struct dvb_usb_device *dvbdev = video_drvdata(f);
+ int ret;
+
+ cxusb_vprintk(dvbdev, OPS, "got release\n");
+
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ ret = vb2_fop_release(f);
+ else
+ ret = v4l2_fh_release(f);
+
+ cxusb_medion_put(dvbdev);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations cxusb_video_fops = {
+ .owner = THIS_MODULE,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .open = cxusb_videoradio_open,
+ .release = cxusb_videoradio_release
+};
+
+static const struct v4l2_file_operations cxusb_radio_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = cxusb_videoradio_open,
+ .release = cxusb_videoradio_release
+};
+
+static void cxusb_medion_v4l2_release(struct v4l2_device *v4l2_dev)
+{
+ struct cxusb_medion_dev *cxdev =
+ container_of(v4l2_dev, struct cxusb_medion_dev, v4l2dev);
+ struct dvb_usb_device *dvbdev = cxdev->dvbdev;
+
+ cxusb_vprintk(dvbdev, OPS, "v4l2 device release\n");
+
+ v4l2_device_unregister(&cxdev->v4l2dev);
+
+ mutex_destroy(&cxdev->dev_lock);
+
+ while (completion_done(&cxdev->v4l2_release))
+ schedule();
+
+ complete(&cxdev->v4l2_release);
+}
+
+static void cxusb_medion_videodev_release(struct video_device *vdev)
+{
+ struct dvb_usb_device *dvbdev = video_get_drvdata(vdev);
+
+ cxusb_vprintk(dvbdev, OPS, "video device release\n");
+
+ vb2_queue_release(vdev->queue);
+
+ video_device_release(vdev);
+}
+
+static int cxusb_medion_register_analog_video(struct dvb_usb_device *dvbdev)
+{
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ int ret;
+
+ cxdev->videoqueue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ cxdev->videoqueue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ |
+ VB2_DMABUF;
+ cxdev->videoqueue.ops = &cxdev_video_qops;
+ cxdev->videoqueue.mem_ops = &vb2_vmalloc_memops;
+ cxdev->videoqueue.drv_priv = dvbdev;
+ cxdev->videoqueue.buf_struct_size =
+ sizeof(struct cxusb_medion_vbuffer);
+ cxdev->videoqueue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ cxdev->videoqueue.min_buffers_needed = 6;
+ cxdev->videoqueue.lock = &cxdev->dev_lock;
+
+ ret = vb2_queue_init(&cxdev->videoqueue);
+ if (ret) {
+ dev_err(&dvbdev->udev->dev,
+ "video queue init failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ cxdev->videodev = video_device_alloc();
+ if (!cxdev->videodev) {
+ dev_err(&dvbdev->udev->dev, "video device alloc failed\n");
+ ret = -ENOMEM;
+ goto ret_qrelease;
+ }
+
+ cxdev->videodev->device_caps = videocaps;
+ cxdev->videodev->fops = &cxusb_video_fops;
+ cxdev->videodev->v4l2_dev = &cxdev->v4l2dev;
+ cxdev->videodev->queue = &cxdev->videoqueue;
+ strscpy(cxdev->videodev->name, "cxusb", sizeof(cxdev->videodev->name));
+ cxdev->videodev->vfl_dir = VFL_DIR_RX;
+ cxdev->videodev->ioctl_ops = &cxusb_video_ioctl;
+ cxdev->videodev->tvnorms = V4L2_STD_ALL;
+ cxdev->videodev->release = cxusb_medion_videodev_release;
+ cxdev->videodev->lock = &cxdev->dev_lock;
+ video_set_drvdata(cxdev->videodev, dvbdev);
+
+ ret = video_register_device(cxdev->videodev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(&dvbdev->udev->dev,
+ "video device register failed, ret = %d\n", ret);
+ goto ret_vrelease;
+ }
+
+ return 0;
+
+ret_vrelease:
+ video_device_release(cxdev->videodev);
+
+ret_qrelease:
+ vb2_queue_release(&cxdev->videoqueue);
+
+ return ret;
+}
+
+static int cxusb_medion_register_analog_radio(struct dvb_usb_device *dvbdev)
+{
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ int ret;
+
+ cxdev->radiodev = video_device_alloc();
+ if (!cxdev->radiodev) {
+ dev_err(&dvbdev->udev->dev, "radio device alloc failed\n");
+ return -ENOMEM;
+ }
+
+ cxdev->radiodev->device_caps = radiocaps;
+ cxdev->radiodev->fops = &cxusb_radio_fops;
+ cxdev->radiodev->v4l2_dev = &cxdev->v4l2dev;
+ strscpy(cxdev->radiodev->name, "cxusb", sizeof(cxdev->radiodev->name));
+ cxdev->radiodev->vfl_dir = VFL_DIR_RX;
+ cxdev->radiodev->ioctl_ops = &cxusb_radio_ioctl;
+ cxdev->radiodev->release = video_device_release;
+ cxdev->radiodev->lock = &cxdev->dev_lock;
+ video_set_drvdata(cxdev->radiodev, dvbdev);
+
+ ret = video_register_device(cxdev->radiodev, VFL_TYPE_RADIO, -1);
+ if (ret) {
+ dev_err(&dvbdev->udev->dev,
+ "radio device register failed, ret = %d\n", ret);
+ video_device_release(cxdev->radiodev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cxusb_medion_register_analog_subdevs(struct dvb_usb_device *dvbdev)
+{
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ int ret;
+ struct tuner_setup tun_setup;
+
+ /* attach cx25840 capture chip */
+ cxdev->cx25840 = v4l2_i2c_new_subdev(&cxdev->v4l2dev,
+ &dvbdev->i2c_adap,
+ "cx25840", 0x44, NULL);
+ if (!cxdev->cx25840) {
+ dev_err(&dvbdev->udev->dev, "cx25840 not found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Initialize cx25840 chip by calling its subdevice init core op.
+ *
+ * This switches it into the generic mode that disables some of
+ * ivtv-related hacks in the cx25840 driver while allowing setting
+ * of the chip video output configuration (passed in the call below
+ * as the last argument).
+ */
+ ret = v4l2_subdev_call(cxdev->cx25840, core, init,
+ CX25840_VCONFIG_FMT_BT656 |
+ CX25840_VCONFIG_RES_8BIT |
+ CX25840_VCONFIG_VBIRAW_DISABLED |
+ CX25840_VCONFIG_ANCDATA_DISABLED |
+ CX25840_VCONFIG_ACTIVE_COMPOSITE |
+ CX25840_VCONFIG_VALID_ANDACTIVE |
+ CX25840_VCONFIG_HRESETW_NORMAL |
+ CX25840_VCONFIG_CLKGATE_NONE |
+ CX25840_VCONFIG_DCMODE_DWORDS);
+ if (ret != 0) {
+ dev_err(&dvbdev->udev->dev,
+ "cx25840 init failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* attach analog tuner */
+ cxdev->tuner = v4l2_i2c_new_subdev(&cxdev->v4l2dev,
+ &dvbdev->i2c_adap,
+ "tuner", 0x61, NULL);
+ if (!cxdev->tuner) {
+ dev_err(&dvbdev->udev->dev, "tuner not found\n");
+ return -ENODEV;
+ }
+
+ /* configure it */
+ memset(&tun_setup, 0, sizeof(tun_setup));
+ tun_setup.addr = 0x61;
+ tun_setup.type = TUNER_PHILIPS_FMD1216ME_MK3;
+ tun_setup.mode_mask = T_RADIO | T_ANALOG_TV;
+ v4l2_subdev_call(cxdev->tuner, tuner, s_type_addr, &tun_setup);
+
+ /* attach IF demod */
+ cxdev->tda9887 = v4l2_i2c_new_subdev(&cxdev->v4l2dev,
+ &dvbdev->i2c_adap,
+ "tuner", 0x43, NULL);
+ if (!cxdev->tda9887) {
+ dev_err(&dvbdev->udev->dev, "tda9887 not found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int cxusb_medion_register_analog(struct dvb_usb_device *dvbdev)
+{
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ int ret;
+
+ mutex_init(&cxdev->dev_lock);
+
+ init_completion(&cxdev->v4l2_release);
+
+ cxdev->v4l2dev.release = cxusb_medion_v4l2_release;
+
+ ret = v4l2_device_register(&dvbdev->udev->dev, &cxdev->v4l2dev);
+ if (ret != 0) {
+ dev_err(&dvbdev->udev->dev,
+ "V4L2 device registration failed, ret = %d\n", ret);
+ mutex_destroy(&cxdev->dev_lock);
+ return ret;
+ }
+
+ ret = cxusb_medion_register_analog_subdevs(dvbdev);
+ if (ret)
+ goto ret_unregister;
+
+ INIT_WORK(&cxdev->urbwork, cxusb_medion_v_complete_work);
+ INIT_LIST_HEAD(&cxdev->buflist);
+
+ cxdev->width = 320;
+ cxdev->height = 240;
+
+ ret = cxusb_medion_register_analog_video(dvbdev);
+ if (ret)
+ goto ret_unregister;
+
+ ret = cxusb_medion_register_analog_radio(dvbdev);
+ if (ret)
+ goto ret_vunreg;
+
+ return 0;
+
+ret_vunreg:
+ video_unregister_device(cxdev->videodev);
+
+ret_unregister:
+ v4l2_device_put(&cxdev->v4l2dev);
+ wait_for_completion(&cxdev->v4l2_release);
+
+ return ret;
+}
+
+void cxusb_medion_unregister_analog(struct dvb_usb_device *dvbdev)
+{
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ cxusb_vprintk(dvbdev, OPS, "unregistering analog\n");
+
+ video_unregister_device(cxdev->radiodev);
+ video_unregister_device(cxdev->videodev);
+
+ v4l2_device_put(&cxdev->v4l2dev);
+ wait_for_completion(&cxdev->v4l2_release);
+
+ cxusb_vprintk(dvbdev, OPS, "analog unregistered\n");
+}
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 8039ba4ebf68..bac0778f7def 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -12,18 +12,21 @@
* design, so it can be reused for the "analogue-only" device (if it will
* appear at all).
*
- * TODO: Use the cx25840-driver for the analogue part
*
* Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de)
* Copyright (C) 2006 Michael Krufky (mkrufky@linuxtv.org)
* Copyright (C) 2006, 2007 Chris Pascoe (c.pascoe@itee.uq.edu.au)
+ * Copyright (C) 2011, 2017 Maciej S. Szmigiero (mail@maciej.szmigiero.name)
*
* see Documentation/media/dvb-drivers/dvb-usb.rst for more information
*/
#include <media/tuner.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
#include "cxusb.h"
@@ -44,17 +47,45 @@
#include "si2157.h"
/* debug */
-static int dvb_usb_cxusb_debug;
+int dvb_usb_cxusb_debug;
module_param_named(debug, dvb_usb_cxusb_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level (1=rc (or-able))." DVB_USB_DEBUG_STATUS);
+MODULE_PARM_DESC(debug, "set debugging level (see cxusb.h)."
+ DVB_USB_DEBUG_STATUS);
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#define deb_info(args...) dprintk(dvb_usb_cxusb_debug, 0x03, args)
-#define deb_i2c(args...) dprintk(dvb_usb_cxusb_debug, 0x02, args)
+#define deb_info(args...) dprintk(dvb_usb_cxusb_debug, CXUSB_DBG_MISC, args)
+#define deb_i2c(args...) dprintk(dvb_usb_cxusb_debug, CXUSB_DBG_I2C, args)
-static int cxusb_ctrl_msg(struct dvb_usb_device *d,
- u8 cmd, const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+enum cxusb_table_index {
+ MEDION_MD95700,
+ DVICO_BLUEBIRD_LG064F_COLD,
+ DVICO_BLUEBIRD_LG064F_WARM,
+ DVICO_BLUEBIRD_DUAL_1_COLD,
+ DVICO_BLUEBIRD_DUAL_1_WARM,
+ DVICO_BLUEBIRD_LGZ201_COLD,
+ DVICO_BLUEBIRD_LGZ201_WARM,
+ DVICO_BLUEBIRD_TH7579_COLD,
+ DVICO_BLUEBIRD_TH7579_WARM,
+ DIGITALNOW_BLUEBIRD_DUAL_1_COLD,
+ DIGITALNOW_BLUEBIRD_DUAL_1_WARM,
+ DVICO_BLUEBIRD_DUAL_2_COLD,
+ DVICO_BLUEBIRD_DUAL_2_WARM,
+ DVICO_BLUEBIRD_DUAL_4,
+ DVICO_BLUEBIRD_DVB_T_NANO_2,
+ DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM,
+ AVERMEDIA_VOLAR_A868R,
+ DVICO_BLUEBIRD_DUAL_4_REV_2,
+ CONEXANT_D680_DMB,
+ MYGICA_D689,
+ MYGICA_T230,
+ NR__cxusb_table_index
+};
+
+static struct usb_device_id cxusb_table[];
+
+int cxusb_ctrl_msg(struct dvb_usb_device *d,
+ u8 cmd, const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
struct cxusb_state *st = d->priv;
int ret;
@@ -86,7 +117,8 @@ static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff)
struct cxusb_state *st = d->priv;
u8 o[2], i;
- if (st->gpio_write_state[GPIO_TUNER] == onoff)
+ if (st->gpio_write_state[GPIO_TUNER] == onoff &&
+ !st->gpio_write_refresh[GPIO_TUNER])
return;
o[0] = GPIO_TUNER;
@@ -97,10 +129,11 @@ static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff)
deb_info("gpio_write failed.\n");
st->gpio_write_state[GPIO_TUNER] = onoff;
+ st->gpio_write_refresh[GPIO_TUNER] = false;
}
static int cxusb_bluebird_gpio_rw(struct dvb_usb_device *d, u8 changemask,
- u8 newval)
+ u8 newval)
{
u8 o[2], gpio_state;
int rc;
@@ -128,7 +161,7 @@ static void cxusb_nano2_led(struct dvb_usb_device *d, int onoff)
}
static int cxusb_d680_dmb_gpio_tuner(struct dvb_usb_device *d,
- u8 addr, int onoff)
+ u8 addr, int onoff)
{
u8 o[2] = {addr, onoff};
u8 i;
@@ -138,12 +171,12 @@ static int cxusb_d680_dmb_gpio_tuner(struct dvb_usb_device *d,
if (rc < 0)
return rc;
+
if (i == 0x01)
return 0;
- else {
- deb_info("gpio_write failed.\n");
- return -EIO;
- }
+
+ deb_info("gpio_write failed.\n");
+ return -EIO;
}
/* I2C */
@@ -158,7 +191,6 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
return -EAGAIN;
for (i = 0; i < num; i++) {
-
if (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_MEDION)
switch (msg[i].addr) {
case 0x63:
@@ -184,13 +216,13 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[2] = msg[i].addr;
if (cxusb_ctrl_msg(d, CMD_I2C_READ,
obuf, 3,
- ibuf, 1+msg[i].len) < 0) {
+ ibuf, 1 + msg[i].len) < 0) {
warn("i2c read failed");
break;
}
memcpy(msg[i].buf, &ibuf[1], msg[i].len);
- } else if (i+1 < num && (msg[i+1].flags & I2C_M_RD) &&
- msg[i].addr == msg[i+1].addr) {
+ } else if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD) &&
+ msg[i].addr == msg[i + 1].addr) {
/* write to then read from same address */
u8 obuf[MAX_XFER_SIZE], ibuf[MAX_XFER_SIZE];
@@ -207,19 +239,19 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
goto unlock;
}
obuf[0] = msg[i].len;
- obuf[1] = msg[i+1].len;
+ obuf[1] = msg[i + 1].len;
obuf[2] = msg[i].addr;
memcpy(&obuf[3], msg[i].buf, msg[i].len);
if (cxusb_ctrl_msg(d, CMD_I2C_READ,
- obuf, 3+msg[i].len,
- ibuf, 1+msg[i+1].len) < 0)
+ obuf, 3 + msg[i].len,
+ ibuf, 1 + msg[i + 1].len) < 0)
break;
if (ibuf[0] != 0x08)
deb_i2c("i2c read may have failed\n");
- memcpy(msg[i+1].buf, &ibuf[1], msg[i+1].len);
+ memcpy(msg[i + 1].buf, &ibuf[1], msg[i + 1].len);
i++;
} else {
@@ -237,7 +269,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
memcpy(&obuf[2], msg[i].buf, msg[i].len);
if (cxusb_ctrl_msg(d, CMD_I2C_WRITE, obuf,
- 2+msg[i].len, &ibuf,1) < 0)
+ 2 + msg[i].len, &ibuf, 1) < 0)
break;
if (ibuf != 0x08)
deb_i2c("i2c write may have failed\n");
@@ -256,7 +288,7 @@ unlock:
static u32 cxusb_i2c_func(struct i2c_adapter *adapter)
{
- return I2C_FUNC_I2C;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static struct i2c_algorithm cxusb_i2c_algo = {
@@ -264,29 +296,67 @@ static struct i2c_algorithm cxusb_i2c_algo = {
.functionality = cxusb_i2c_func,
};
-static int cxusb_power_ctrl(struct dvb_usb_device *d, int onoff)
+static int _cxusb_power_ctrl(struct dvb_usb_device *d, int onoff)
{
u8 b = 0;
+
+ deb_info("setting power %s\n", onoff ? "ON" : "OFF");
+
if (onoff)
return cxusb_ctrl_msg(d, CMD_POWER_ON, &b, 1, NULL, 0);
else
return cxusb_ctrl_msg(d, CMD_POWER_OFF, &b, 1, NULL, 0);
}
+static int cxusb_power_ctrl(struct dvb_usb_device *d, int onoff)
+{
+ bool is_medion = d->props.devices[0].warm_ids[0] == &cxusb_table[MEDION_MD95700];
+ int ret;
+
+ if (is_medion && !onoff) {
+ struct cxusb_medion_dev *cxdev = d->priv;
+
+ mutex_lock(&cxdev->open_lock);
+
+ if (cxdev->open_type == CXUSB_OPEN_ANALOG) {
+ deb_info("preventing DVB core from setting power OFF while we are in analog mode\n");
+ ret = -EBUSY;
+ goto ret_unlock;
+ }
+ }
+
+ ret = _cxusb_power_ctrl(d, onoff);
+
+ret_unlock:
+ if (is_medion && !onoff) {
+ struct cxusb_medion_dev *cxdev = d->priv;
+
+ mutex_unlock(&cxdev->open_lock);
+ }
+
+ return ret;
+}
+
static int cxusb_aver_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
+
if (!onoff)
return cxusb_ctrl_msg(d, CMD_POWER_OFF, NULL, 0, NULL, 0);
if (d->state == DVB_USB_STATE_INIT &&
usb_set_interface(d->udev, 0, 0) < 0)
err("set interface failed");
- do {} while (!(ret = cxusb_ctrl_msg(d, CMD_POWER_ON, NULL, 0, NULL, 0)) &&
- !(ret = cxusb_ctrl_msg(d, 0x15, NULL, 0, NULL, 0)) &&
- !(ret = cxusb_ctrl_msg(d, 0x17, NULL, 0, NULL, 0)) && 0);
+ do {
+ /* Nothing */
+ } while (!(ret = cxusb_ctrl_msg(d, CMD_POWER_ON, NULL, 0, NULL, 0)) &&
+ !(ret = cxusb_ctrl_msg(d, 0x15, NULL, 0, NULL, 0)) &&
+ !(ret = cxusb_ctrl_msg(d, 0x17, NULL, 0, NULL, 0)) && 0);
+
if (!ret) {
- /* FIXME: We don't know why, but we need to configure the
- * lgdt3303 with the register settings below on resume */
+ /*
+ * FIXME: We don't know why, but we need to configure the
+ * lgdt3303 with the register settings below on resume
+ */
int i;
u8 buf;
static const u8 bufs[] = {
@@ -304,7 +374,7 @@ static int cxusb_aver_power_ctrl(struct dvb_usb_device *d, int onoff)
msleep(20);
for (i = 0; i < ARRAY_SIZE(bufs); i += 4 / sizeof(u8)) {
ret = cxusb_ctrl_msg(d, CMD_I2C_WRITE,
- bufs+i, 4, &buf, 1);
+ bufs + i, 4, &buf, 1);
if (ret)
break;
if (buf != 0x8)
@@ -317,6 +387,7 @@ static int cxusb_aver_power_ctrl(struct dvb_usb_device *d, int onoff)
static int cxusb_bluebird_power_ctrl(struct dvb_usb_device *d, int onoff)
{
u8 b = 0;
+
if (onoff)
return cxusb_ctrl_msg(d, CMD_POWER_ON, &b, 1, NULL, 0);
else
@@ -338,6 +409,7 @@ static int cxusb_d680_dmb_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
u8 b;
+
ret = cxusb_power_ctrl(d, onoff);
if (!onoff)
return ret;
@@ -350,11 +422,26 @@ static int cxusb_d680_dmb_power_ctrl(struct dvb_usb_device *d, int onoff)
static int cxusb_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
+ struct dvb_usb_device *dvbdev = adap->dev;
+ bool is_medion = dvbdev->props.devices[0].warm_ids[0] ==
+ &cxusb_table[MEDION_MD95700];
u8 buf[2] = { 0x03, 0x00 };
+
+ if (is_medion && onoff) {
+ int ret;
+
+ ret = cxusb_medion_get(dvbdev, CXUSB_OPEN_DIGITAL);
+ if (ret != 0)
+ return ret;
+ }
+
if (onoff)
- cxusb_ctrl_msg(adap->dev, CMD_STREAMING_ON, buf, 2, NULL, 0);
+ cxusb_ctrl_msg(dvbdev, CMD_STREAMING_ON, buf, 2, NULL, 0);
else
- cxusb_ctrl_msg(adap->dev, CMD_STREAMING_OFF, NULL, 0, NULL, 0);
+ cxusb_ctrl_msg(dvbdev, CMD_STREAMING_OFF, NULL, 0, NULL, 0);
+
+ if (is_medion && !onoff)
+ cxusb_medion_put(dvbdev);
return 0;
}
@@ -370,7 +457,7 @@ static int cxusb_aver_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
}
static int cxusb_read_status(struct dvb_frontend *fe,
- enum fe_status *status)
+ enum fe_status *status)
{
struct dvb_usb_adapter *adap = (struct dvb_usb_adapter *)fe->dvb->priv;
struct cxusb_state *state = (struct cxusb_state *)adap->dev->priv;
@@ -403,8 +490,8 @@ static void cxusb_d680_dmb_drain_message(struct dvb_usb_device *d)
return;
while (1) {
if (usb_bulk_msg(d->udev,
- usb_rcvbulkpipe(d->udev, ep),
- junk, junk_len, &rd_count, timeout) < 0)
+ usb_rcvbulkpipe(d->udev, ep),
+ junk, junk_len, &rd_count, timeout) < 0)
break;
if (!rd_count)
break;
@@ -426,8 +513,8 @@ static void cxusb_d680_dmb_drain_video(struct dvb_usb_device *d)
return;
while (1) {
if (usb_bulk_msg(d->udev,
- usb_rcvbulkpipe(d->udev, p->endpoint),
- junk, junk_len, &rd_count, timeout) < 0)
+ usb_rcvbulkpipe(d->udev, p->endpoint),
+ junk, junk_len, &rd_count, timeout) < 0)
break;
if (!rd_count)
break;
@@ -435,17 +522,18 @@ static void cxusb_d680_dmb_drain_video(struct dvb_usb_device *d)
kfree(junk);
}
-static int cxusb_d680_dmb_streaming_ctrl(
- struct dvb_usb_adapter *adap, int onoff)
+static int cxusb_d680_dmb_streaming_ctrl(struct dvb_usb_adapter *adap,
+ int onoff)
{
if (onoff) {
u8 buf[2] = { 0x03, 0x00 };
+
cxusb_d680_dmb_drain_video(adap->dev);
return cxusb_ctrl_msg(adap->dev, CMD_STREAMING_ON,
- buf, sizeof(buf), NULL, 0);
+ buf, sizeof(buf), NULL, 0);
} else {
int ret = cxusb_ctrl_msg(adap->dev,
- CMD_STREAMING_OFF, NULL, 0, NULL, 0);
+ CMD_STREAMING_OFF, NULL, 0, NULL, 0);
return ret;
}
}
@@ -465,8 +553,12 @@ static int cxusb_rc_query(struct dvb_usb_device *d)
static int cxusb_bluebird2_rc_query(struct dvb_usb_device *d)
{
u8 ircode[4];
- struct i2c_msg msg = { .addr = 0x6b, .flags = I2C_M_RD,
- .buf = ircode, .len = 4 };
+ struct i2c_msg msg = {
+ .addr = 0x6b,
+ .flags = I2C_M_RD,
+ .buf = ircode,
+ .len = 4
+ };
if (cxusb_i2c_xfer(&d->i2c_adap, &msg, 1) != 1)
return 0;
@@ -490,13 +582,13 @@ static int cxusb_d680_dmb_rc_query(struct dvb_usb_device *d)
return 0;
}
-static int cxusb_dee1601_demod_init(struct dvb_frontend* fe)
+static int cxusb_dee1601_demod_init(struct dvb_frontend *fe)
{
- static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x28 };
- static u8 reset [] = { RESET, 0x80 };
- static u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
- static u8 agc_cfg [] = { AGC_TARGET, 0x28, 0x20 };
- static u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x28 };
+ static u8 reset[] = { RESET, 0x80 };
+ static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 };
+ static u8 agc_cfg[] = { AGC_TARGET, 0x28, 0x20 };
+ static u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 };
static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
@@ -511,13 +603,14 @@ static int cxusb_dee1601_demod_init(struct dvb_frontend* fe)
return 0;
}
-static int cxusb_mt352_demod_init(struct dvb_frontend* fe)
-{ /* used in both lgz201 and th7579 */
- static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x29 };
- static u8 reset [] = { RESET, 0x80 };
- static u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
- static u8 agc_cfg [] = { AGC_TARGET, 0x24, 0x20 };
- static u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+static int cxusb_mt352_demod_init(struct dvb_frontend *fe)
+{
+ /* used in both lgz201 and th7579 */
+ static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x29 };
+ static u8 reset[] = { RESET, 0x80 };
+ static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 };
+ static u8 agc_cfg[] = { AGC_TARGET, 0x24, 0x20 };
+ static u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 };
static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
@@ -627,9 +720,21 @@ static struct max2165_config mygica_d689_max2165_cfg = {
/* Callbacks for DVB USB */
static int cxusb_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap)
{
+ struct dvb_usb_device *dvbdev = adap->dev;
+ bool is_medion = dvbdev->props.devices[0].warm_ids[0] ==
+ &cxusb_table[MEDION_MD95700];
+
dvb_attach(simple_tuner_attach, adap->fe_adap[0].fe,
- &adap->dev->i2c_adap, 0x61,
+ &dvbdev->i2c_adap, 0x61,
TUNER_PHILIPS_FMD1216ME_MK3);
+
+ if (is_medion && adap->fe_adap[0].fe)
+ /*
+ * make sure that DVB core won't put to sleep (reset, really)
+ * tuner when we might be open in analog mode
+ */
+ adap->fe_adap[0].fe->ops.tuner_ops.sleep = NULL;
+
return 0;
}
@@ -642,7 +747,8 @@ static int cxusb_dee1601_tuner_attach(struct dvb_usb_adapter *adap)
static int cxusb_lgz201_tuner_attach(struct dvb_usb_adapter *adap)
{
- dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61, NULL, DVB_PLL_LG_Z201);
+ dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61,
+ NULL, DVB_PLL_LG_Z201);
return 0;
}
@@ -702,7 +808,7 @@ static int cxusb_dvico_xc3028_tuner_attach(struct dvb_usb_adapter *adap)
adap->fe_adap[0].fe->callback = dvico_bluebird_xc2028_callback;
fe = dvb_attach(xc2028_attach, adap->fe_adap[0].fe, &cfg);
- if (fe == NULL || fe->ops.tuner_ops.set_config == NULL)
+ if (!fe || !fe->ops.tuner_ops.set_config)
return -EIO;
fe->ops.tuner_ops.set_config(fe, &ctl);
@@ -720,33 +826,120 @@ static int cxusb_mxl5003s_tuner_attach(struct dvb_usb_adapter *adap)
static int cxusb_d680_dmb_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dvb_frontend *fe;
+
fe = dvb_attach(mxl5005s_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, &d680_dmb_tuner);
- return (fe == NULL) ? -EIO : 0;
+ return (!fe) ? -EIO : 0;
}
static int cxusb_mygica_d689_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dvb_frontend *fe;
+
fe = dvb_attach(max2165_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, &mygica_d689_max2165_cfg);
- return (fe == NULL) ? -EIO : 0;
+ return (!fe) ? -EIO : 0;
}
-static int cxusb_cx22702_frontend_attach(struct dvb_usb_adapter *adap)
+static int cxusb_medion_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dvb_usb_device *dvbdev = adap->dev;
+
+ if (acquire)
+ return cxusb_medion_get(dvbdev, CXUSB_OPEN_DIGITAL);
+
+ cxusb_medion_put(dvbdev);
+
+ return 0;
+}
+
+static int cxusb_medion_set_mode(struct dvb_usb_device *dvbdev, bool digital)
+{
+ struct cxusb_state *st = dvbdev->priv;
+ int ret;
u8 b;
- if (usb_set_interface(adap->dev->udev, 0, 6) < 0)
- err("set interface failed");
+ unsigned int i;
- cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, &b, 1);
+ /*
+ * switching mode while doing an I2C transaction often causes
+ * the device to crash
+ */
+ mutex_lock(&dvbdev->i2c_mutex);
+
+ if (digital) {
+ ret = usb_set_interface(dvbdev->udev, 0, 6);
+ if (ret != 0) {
+ dev_err(&dvbdev->udev->dev,
+ "digital interface selection failed (%d)\n",
+ ret);
+ goto ret_unlock;
+ }
+ } else {
+ ret = usb_set_interface(dvbdev->udev, 0, 1);
+ if (ret != 0) {
+ dev_err(&dvbdev->udev->dev,
+ "analog interface selection failed (%d)\n",
+ ret);
+ goto ret_unlock;
+ }
+ }
+
+ /* pipes need to be cleared after setting interface */
+ ret = usb_clear_halt(dvbdev->udev, usb_rcvbulkpipe(dvbdev->udev, 1));
+ if (ret != 0)
+ dev_warn(&dvbdev->udev->dev,
+ "clear halt on IN pipe failed (%d)\n",
+ ret);
+
+ ret = usb_clear_halt(dvbdev->udev, usb_sndbulkpipe(dvbdev->udev, 1));
+ if (ret != 0)
+ dev_warn(&dvbdev->udev->dev,
+ "clear halt on OUT pipe failed (%d)\n",
+ ret);
+
+ ret = cxusb_ctrl_msg(dvbdev, digital ? CMD_DIGITAL : CMD_ANALOG,
+ NULL, 0, &b, 1);
+ if (ret != 0) {
+ dev_err(&dvbdev->udev->dev, "mode switch failed (%d)\n",
+ ret);
+ goto ret_unlock;
+ }
+
+ /* mode switch seems to reset GPIO states */
+ for (i = 0; i < ARRAY_SIZE(st->gpio_write_refresh); i++)
+ st->gpio_write_refresh[i] = true;
+
+ret_unlock:
+ mutex_unlock(&dvbdev->i2c_mutex);
+
+ return ret;
+}
+
+static int cxusb_cx22702_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *dvbdev = adap->dev;
+ bool is_medion = dvbdev->props.devices[0].warm_ids[0] ==
+ &cxusb_table[MEDION_MD95700];
+
+ if (is_medion) {
+ int ret;
+
+ ret = cxusb_medion_set_mode(dvbdev, true);
+ if (ret)
+ return ret;
+ }
adap->fe_adap[0].fe = dvb_attach(cx22702_attach, &cxusb_cx22702_config,
- &adap->dev->i2c_adap);
- if ((adap->fe_adap[0].fe) != NULL)
- return 0;
+ &dvbdev->i2c_adap);
+ if (!adap->fe_adap[0].fe)
+ return -EIO;
- return -EIO;
+ if (is_medion)
+ adap->fe_adap[0].fe->ops.ts_bus_ctrl =
+ cxusb_medion_fe_ts_bus_ctrl;
+
+ return 0;
}
static int cxusb_lgdt3303_frontend_attach(struct dvb_usb_adapter *adap)
@@ -760,7 +953,7 @@ static int cxusb_lgdt3303_frontend_attach(struct dvb_usb_adapter *adap)
&cxusb_lgdt3303_config,
0x0e,
&adap->dev->i2c_adap);
- if ((adap->fe_adap[0].fe) != NULL)
+ if (adap->fe_adap[0].fe)
return 0;
return -EIO;
@@ -772,7 +965,7 @@ static int cxusb_aver_lgdt3303_frontend_attach(struct dvb_usb_adapter *adap)
&cxusb_aver_lgdt3303_config,
0x0e,
&adap->dev->i2c_adap);
- if (adap->fe_adap[0].fe != NULL)
+ if (adap->fe_adap[0].fe)
return 0;
return -EIO;
@@ -788,7 +981,7 @@ static int cxusb_mt352_frontend_attach(struct dvb_usb_adapter *adap)
adap->fe_adap[0].fe = dvb_attach(mt352_attach, &cxusb_mt352_config,
&adap->dev->i2c_adap);
- if ((adap->fe_adap[0].fe) != NULL)
+ if (adap->fe_adap[0].fe)
return 0;
return -EIO;
@@ -803,13 +996,13 @@ static int cxusb_dee1601_frontend_attach(struct dvb_usb_adapter *adap)
adap->fe_adap[0].fe = dvb_attach(mt352_attach, &cxusb_dee1601_config,
&adap->dev->i2c_adap);
- if ((adap->fe_adap[0].fe) != NULL)
+ if (adap->fe_adap[0].fe)
return 0;
adap->fe_adap[0].fe = dvb_attach(zl10353_attach,
&cxusb_zl10353_dee1601_config,
&adap->dev->i2c_adap);
- if ((adap->fe_adap[0].fe) != NULL)
+ if (adap->fe_adap[0].fe)
return 0;
return -EIO;
@@ -819,8 +1012,12 @@ static int cxusb_dualdig4_frontend_attach(struct dvb_usb_adapter *adap)
{
u8 ircode[4];
int i;
- struct i2c_msg msg = { .addr = 0x6b, .flags = I2C_M_RD,
- .buf = ircode, .len = 4 };
+ struct i2c_msg msg = {
+ .addr = 0x6b,
+ .flags = I2C_M_RD,
+ .buf = ircode,
+ .len = 4
+ };
if (usb_set_interface(adap->dev->udev, 0, 1) < 0)
err("set interface failed");
@@ -836,7 +1033,7 @@ static int cxusb_dualdig4_frontend_attach(struct dvb_usb_adapter *adap)
dvb_attach(zl10353_attach,
&cxusb_zl10353_xc3028_config_no_i2c_gate,
&adap->dev->i2c_adap);
- if ((adap->fe_adap[0].fe) == NULL)
+ if (!adap->fe_adap[0].fe)
return -EIO;
/* try to determine if there is no IR decoder on the I2C bus */
@@ -934,7 +1131,7 @@ static struct dib7000p_config cxusb_dualdig4_rev2_config = {
};
struct dib0700_adapter_state {
- int (*set_param_save)(struct dvb_frontend *);
+ int (*set_param_save)(struct dvb_frontend *fe);
struct dib7000p_ops dib7000p_ops;
};
@@ -953,14 +1150,15 @@ static int cxusb_dualdig4_rev2_frontend_attach(struct dvb_usb_adapter *adap)
return -ENODEV;
if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
- &cxusb_dualdig4_rev2_config) < 0) {
- printk(KERN_WARNING "Unable to enumerate dib7000p\n");
+ &cxusb_dualdig4_rev2_config) < 0) {
+ pr_warn("Unable to enumerate dib7000p\n");
return -ENODEV;
}
- adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80,
- &cxusb_dualdig4_rev2_config);
- if (adap->fe_adap[0].fe == NULL)
+ adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap,
+ 0x80,
+ &cxusb_dualdig4_rev2_config);
+ if (!adap->fe_adap[0].fe)
return -EIO;
return 0;
@@ -993,11 +1191,16 @@ static int dib7070_set_param_override(struct dvb_frontend *fe)
struct dib0700_adapter_state *state = adap->priv;
u16 offset;
- u8 band = BAND_OF_FREQUENCY(p->frequency/1000);
+ u8 band = BAND_OF_FREQUENCY(p->frequency / 1000);
+
switch (band) {
- case BAND_VHF: offset = 950; break;
+ case BAND_VHF:
+ offset = 950;
+ break;
default:
- case BAND_UHF: offset = 550; break;
+ case BAND_UHF:
+ offset = 550;
+ break;
}
state->dib7000p_ops.set_wbd_ref(fe, offset + dib0070_wbd_offset(fe));
@@ -1019,7 +1222,7 @@ static int cxusb_dualdig4_rev2_tuner_attach(struct dvb_usb_adapter *adap)
DIBX000_I2C_INTERFACE_TUNER, 1);
if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c,
- &dib7070p_dib0070_config) == NULL)
+ &dib7070p_dib0070_config) == NULL)
return -ENODEV;
st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
@@ -1042,13 +1245,13 @@ static int cxusb_nano2_frontend_attach(struct dvb_usb_adapter *adap)
adap->fe_adap[0].fe = dvb_attach(zl10353_attach,
&cxusb_zl10353_xc3028_config,
&adap->dev->i2c_adap);
- if ((adap->fe_adap[0].fe) != NULL)
+ if (adap->fe_adap[0].fe)
return 0;
adap->fe_adap[0].fe = dvb_attach(mt352_attach,
&cxusb_mt352_xc3028_config,
&adap->dev->i2c_adap);
- if ((adap->fe_adap[0].fe) != NULL)
+ if (adap->fe_adap[0].fe)
return 0;
return -EIO;
@@ -1079,11 +1282,14 @@ static int cxusb_d680_dmb_frontend_attach(struct dvb_usb_adapter *adap)
/* Unblock all USB pipes */
usb_clear_halt(d->udev,
- usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ usb_sndbulkpipe(d->udev,
+ d->props.generic_bulk_ctrl_endpoint));
usb_clear_halt(d->udev,
- usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ usb_rcvbulkpipe(d->udev,
+ d->props.generic_bulk_ctrl_endpoint));
usb_clear_halt(d->udev,
- usb_rcvbulkpipe(d->udev, d->props.adapter[0].fe[0].stream.endpoint));
+ usb_rcvbulkpipe(d->udev,
+ d->props.adapter[0].fe[0].stream.endpoint));
/* Drain USB pipes to avoid hang after reboot */
for (n = 0; n < 5; n++) {
@@ -1105,8 +1311,9 @@ static int cxusb_d680_dmb_frontend_attach(struct dvb_usb_adapter *adap)
msleep(100);
/* Attach frontend */
- adap->fe_adap[0].fe = dvb_attach(lgs8gxx_attach, &d680_lgs8gl5_cfg, &d->i2c_adap);
- if (adap->fe_adap[0].fe == NULL)
+ adap->fe_adap[0].fe = dvb_attach(lgs8gxx_attach,
+ &d680_lgs8gl5_cfg, &d->i2c_adap);
+ if (!adap->fe_adap[0].fe)
return -EIO;
return 0;
@@ -1136,12 +1343,14 @@ static int cxusb_mygica_d689_frontend_attach(struct dvb_usb_adapter *adap)
/* Unblock all USB pipes */
usb_clear_halt(d->udev,
- usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ usb_sndbulkpipe(d->udev,
+ d->props.generic_bulk_ctrl_endpoint));
usb_clear_halt(d->udev,
- usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ usb_rcvbulkpipe(d->udev,
+ d->props.generic_bulk_ctrl_endpoint));
usb_clear_halt(d->udev,
- usb_rcvbulkpipe(d->udev, d->props.adapter[0].fe[0].stream.endpoint));
-
+ usb_rcvbulkpipe(d->udev,
+ d->props.adapter[0].fe[0].stream.endpoint));
/* Reset the tuner */
if (cxusb_d680_dmb_gpio_tuner(d, 0x07, 0) < 0) {
@@ -1156,9 +1365,10 @@ static int cxusb_mygica_d689_frontend_attach(struct dvb_usb_adapter *adap)
msleep(100);
/* Attach frontend */
- adap->fe_adap[0].fe = dvb_attach(atbm8830_attach, &mygica_d689_atbm8830_cfg,
- &d->i2c_adap);
- if (adap->fe_adap[0].fe == NULL)
+ adap->fe_adap[0].fe = dvb_attach(atbm8830_attach,
+ &mygica_d689_atbm8830_cfg,
+ &d->i2c_adap);
+ if (!adap->fe_adap[0].fe)
return -EIO;
return 0;
@@ -1181,11 +1391,14 @@ static int cxusb_mygica_t230_frontend_attach(struct dvb_usb_adapter *adap)
/* Unblock all USB pipes */
usb_clear_halt(d->udev,
- usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ usb_sndbulkpipe(d->udev,
+ d->props.generic_bulk_ctrl_endpoint));
usb_clear_halt(d->udev,
- usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ usb_rcvbulkpipe(d->udev,
+ d->props.generic_bulk_ctrl_endpoint));
usb_clear_halt(d->udev,
- usb_rcvbulkpipe(d->udev, d->props.adapter[0].fe[0].stream.endpoint));
+ usb_rcvbulkpipe(d->udev,
+ d->props.adapter[0].fe[0].stream.endpoint));
/* attach frontend */
si2168_config.i2c_adapter = &adapter;
@@ -1198,7 +1411,7 @@ static int cxusb_mygica_t230_frontend_attach(struct dvb_usb_adapter *adap)
info.platform_data = &si2168_config;
request_module(info.type);
client_demod = i2c_new_device(&d->i2c_adap, &info);
- if (client_demod == NULL || client_demod->dev.driver == NULL)
+ if (!client_demod || !client_demod->dev.driver)
return -ENODEV;
if (!try_module_get(client_demod->dev.driver->owner)) {
@@ -1218,7 +1431,7 @@ static int cxusb_mygica_t230_frontend_attach(struct dvb_usb_adapter *adap)
info.platform_data = &si2157_config;
request_module(info.type);
client_tuner = i2c_new_device(adapter, &info);
- if (client_tuner == NULL || client_tuner->dev.driver == NULL) {
+ if (!client_tuner || !client_tuner->dev.driver) {
module_put(client_demod->dev.driver->owner);
i2c_unregister_device(client_demod);
return -ENODEV;
@@ -1309,6 +1522,104 @@ static int bluebird_patch_dvico_firmware_download(struct usb_device *udev,
return -EINVAL;
}
+int cxusb_medion_get(struct dvb_usb_device *dvbdev,
+ enum cxusb_open_type open_type)
+{
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+ int ret = 0;
+
+ mutex_lock(&cxdev->open_lock);
+
+ if (WARN_ON((cxdev->open_type == CXUSB_OPEN_INIT ||
+ cxdev->open_type == CXUSB_OPEN_NONE) &&
+ cxdev->open_ctr != 0)) {
+ ret = -EINVAL;
+ goto ret_unlock;
+ }
+
+ if (cxdev->open_type == CXUSB_OPEN_INIT) {
+ ret = -EAGAIN;
+ goto ret_unlock;
+ }
+
+ if (cxdev->open_ctr == 0) {
+ if (cxdev->open_type != open_type) {
+ deb_info("will acquire and switch to %s\n",
+ open_type == CXUSB_OPEN_ANALOG ?
+ "analog" : "digital");
+
+ if (open_type == CXUSB_OPEN_ANALOG) {
+ ret = _cxusb_power_ctrl(dvbdev, 1);
+ if (ret != 0)
+ dev_warn(&dvbdev->udev->dev,
+ "powerup for analog switch failed (%d)\n",
+ ret);
+
+ ret = cxusb_medion_set_mode(dvbdev, false);
+ if (ret != 0)
+ goto ret_unlock;
+
+ ret = cxusb_medion_analog_init(dvbdev);
+ if (ret != 0)
+ goto ret_unlock;
+ } else { /* digital */
+ ret = _cxusb_power_ctrl(dvbdev, 1);
+ if (ret != 0)
+ dev_warn(&dvbdev->udev->dev,
+ "powerup for digital switch failed (%d)\n",
+ ret);
+
+ ret = cxusb_medion_set_mode(dvbdev, true);
+ if (ret != 0)
+ goto ret_unlock;
+ }
+
+ cxdev->open_type = open_type;
+ } else {
+ deb_info("reacquired idle %s\n",
+ open_type == CXUSB_OPEN_ANALOG ?
+ "analog" : "digital");
+ }
+
+ cxdev->open_ctr = 1;
+ } else if (cxdev->open_type == open_type) {
+ cxdev->open_ctr++;
+ deb_info("acquired %s\n", open_type == CXUSB_OPEN_ANALOG ?
+ "analog" : "digital");
+ } else {
+ ret = -EBUSY;
+ }
+
+ret_unlock:
+ mutex_unlock(&cxdev->open_lock);
+
+ return ret;
+}
+
+void cxusb_medion_put(struct dvb_usb_device *dvbdev)
+{
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ mutex_lock(&cxdev->open_lock);
+
+ if (cxdev->open_type == CXUSB_OPEN_INIT) {
+ WARN_ON(cxdev->open_ctr != 0);
+ cxdev->open_type = CXUSB_OPEN_NONE;
+ goto unlock;
+ }
+
+ if (!WARN_ON(cxdev->open_ctr < 1)) {
+ cxdev->open_ctr--;
+
+ deb_info("release %s\n",
+ cxdev->open_type == CXUSB_OPEN_ANALOG ?
+ "analog" : "digital");
+ }
+
+unlock:
+ mutex_unlock(&cxdev->open_lock);
+}
+
/* DVB USB Driver stuff */
static struct dvb_usb_device_properties cxusb_medion_properties;
static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties;
@@ -1324,41 +1635,141 @@ static struct dvb_usb_device_properties cxusb_d680_dmb_properties;
static struct dvb_usb_device_properties cxusb_mygica_d689_properties;
static struct dvb_usb_device_properties cxusb_mygica_t230_properties;
+static int cxusb_medion_priv_init(struct dvb_usb_device *dvbdev)
+{
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ cxdev->dvbdev = dvbdev;
+ cxdev->open_type = CXUSB_OPEN_INIT;
+ mutex_init(&cxdev->open_lock);
+
+ return 0;
+}
+
+static void cxusb_medion_priv_destroy(struct dvb_usb_device *dvbdev)
+{
+ struct cxusb_medion_dev *cxdev = dvbdev->priv;
+
+ mutex_destroy(&cxdev->open_lock);
+}
+
+static bool cxusb_medion_check_altsetting(struct usb_host_interface *as)
+{
+ unsigned int ctr;
+
+ for (ctr = 0; ctr < as->desc.bNumEndpoints; ctr++) {
+ if ((as->endpoint[ctr].desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK) != 2)
+ continue;
+
+ if (as->endpoint[ctr].desc.bEndpointAddress & USB_DIR_IN &&
+ ((as->endpoint[ctr].desc.bmAttributes &
+ USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_ISOC))
+ return true;
+
+ break;
+ }
+
+ return false;
+}
+
+static bool cxusb_medion_check_intf(struct usb_interface *intf)
+{
+ unsigned int ctr;
+
+ if (intf->num_altsetting < 2) {
+ dev_err(intf->usb_dev, "no alternate interface");
+
+ return false;
+ }
+
+ for (ctr = 0; ctr < intf->num_altsetting; ctr++) {
+ if (intf->altsetting[ctr].desc.bAlternateSetting != 1)
+ continue;
+
+ if (cxusb_medion_check_altsetting(&intf->altsetting[ctr]))
+ return true;
+
+ break;
+ }
+
+ dev_err(intf->usb_dev, "no iso interface");
+
+ return false;
+}
+
static int cxusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- if (0 == dvb_usb_device_init(intf, &cxusb_medion_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf,
- &cxusb_bluebird_nano2_needsfirmware_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &cxusb_aver_a868r_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf,
- &cxusb_bluebird_dualdig4_rev2_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &cxusb_d680_dmb_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties,
- THIS_MODULE, NULL, adapter_nr) ||
- 0)
+ struct dvb_usb_device *dvbdev;
+ int ret;
+
+ /* Medion 95700 */
+ if (!dvb_usb_device_init(intf, &cxusb_medion_properties,
+ THIS_MODULE, &dvbdev, adapter_nr)) {
+ if (!cxusb_medion_check_intf(intf)) {
+ ret = -ENODEV;
+ goto ret_uninit;
+ }
+
+ _cxusb_power_ctrl(dvbdev, 1);
+ ret = cxusb_medion_set_mode(dvbdev, false);
+ if (ret)
+ goto ret_uninit;
+
+ ret = cxusb_medion_register_analog(dvbdev);
+
+ cxusb_medion_set_mode(dvbdev, true);
+ _cxusb_power_ctrl(dvbdev, 0);
+
+ if (ret != 0)
+ goto ret_uninit;
+
+ /* release device from INIT mode to normal operation */
+ cxusb_medion_put(dvbdev);
+
+ return 0;
+ } else if (!dvb_usb_device_init(intf,
+ &cxusb_bluebird_lgh064f_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ !dvb_usb_device_init(intf,
+ &cxusb_bluebird_dee1601_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ !dvb_usb_device_init(intf,
+ &cxusb_bluebird_lgz201_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ !dvb_usb_device_init(intf,
+ &cxusb_bluebird_dtt7579_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ !dvb_usb_device_init(intf,
+ &cxusb_bluebird_dualdig4_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ !dvb_usb_device_init(intf,
+ &cxusb_bluebird_nano2_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ !dvb_usb_device_init(intf,
+ &cxusb_bluebird_nano2_needsfirmware_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ !dvb_usb_device_init(intf, &cxusb_aver_a868r_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ !dvb_usb_device_init(intf,
+ &cxusb_bluebird_dualdig4_rev2_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ !dvb_usb_device_init(intf, &cxusb_d680_dmb_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ !dvb_usb_device_init(intf, &cxusb_mygica_d689_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ !dvb_usb_device_init(intf, &cxusb_mygica_t230_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ 0)
return 0;
return -EINVAL;
+
+ret_uninit:
+ dvb_usb_device_exit(intf);
+
+ return ret;
}
static void cxusb_disconnect(struct usb_interface *intf)
@@ -1367,6 +1778,9 @@ static void cxusb_disconnect(struct usb_interface *intf)
struct cxusb_state *st = d->priv;
struct i2c_client *client;
+ if (d->props.devices[0].warm_ids[0] == &cxusb_table[MEDION_MD95700])
+ cxusb_medion_unregister_analog(d);
+
/* remove I2C client for tuner */
client = st->i2c_client_tuner;
if (client) {
@@ -1384,31 +1798,6 @@ static void cxusb_disconnect(struct usb_interface *intf)
dvb_usb_device_exit(intf);
}
-enum cxusb_table_index {
- MEDION_MD95700,
- DVICO_BLUEBIRD_LG064F_COLD,
- DVICO_BLUEBIRD_LG064F_WARM,
- DVICO_BLUEBIRD_DUAL_1_COLD,
- DVICO_BLUEBIRD_DUAL_1_WARM,
- DVICO_BLUEBIRD_LGZ201_COLD,
- DVICO_BLUEBIRD_LGZ201_WARM,
- DVICO_BLUEBIRD_TH7579_COLD,
- DVICO_BLUEBIRD_TH7579_WARM,
- DIGITALNOW_BLUEBIRD_DUAL_1_COLD,
- DIGITALNOW_BLUEBIRD_DUAL_1_WARM,
- DVICO_BLUEBIRD_DUAL_2_COLD,
- DVICO_BLUEBIRD_DUAL_2_WARM,
- DVICO_BLUEBIRD_DUAL_4,
- DVICO_BLUEBIRD_DVB_T_NANO_2,
- DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM,
- AVERMEDIA_VOLAR_A868R,
- DVICO_BLUEBIRD_DUAL_4_REV_2,
- CONEXANT_D680_DMB,
- MYGICA_D689,
- MYGICA_T230,
- NR__cxusb_table_index
-};
-
static struct usb_device_id cxusb_table[NR__cxusb_table_index + 1] = {
[MEDION_MD95700] = {
USB_DEVICE(USB_VID_MEDION, USB_PID_MEDION_MD95700)
@@ -1438,10 +1827,12 @@ static struct usb_device_id cxusb_table[NR__cxusb_table_index + 1] = {
USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_TH7579_WARM)
},
[DIGITALNOW_BLUEBIRD_DUAL_1_COLD] = {
- USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD)
+ USB_DEVICE(USB_VID_DVICO,
+ USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD)
},
[DIGITALNOW_BLUEBIRD_DUAL_1_WARM] = {
- USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM)
+ USB_DEVICE(USB_VID_DVICO,
+ USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM)
},
[DVICO_BLUEBIRD_DUAL_2_COLD] = {
USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD)
@@ -1456,7 +1847,8 @@ static struct usb_device_id cxusb_table[NR__cxusb_table_index + 1] = {
USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2)
},
[DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM] = {
- USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM)
+ USB_DEVICE(USB_VID_DVICO,
+ USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM)
},
[AVERMEDIA_VOLAR_A868R] = {
USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_A868R)
@@ -1475,14 +1867,16 @@ static struct usb_device_id cxusb_table[NR__cxusb_table_index + 1] = {
},
{} /* Terminating entry */
};
-MODULE_DEVICE_TABLE (usb, cxusb_table);
+MODULE_DEVICE_TABLE(usb, cxusb_table);
static struct dvb_usb_device_properties cxusb_medion_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = CYPRESS_FX2,
- .size_of_priv = sizeof(struct cxusb_state),
+ .size_of_priv = sizeof(struct cxusb_medion_dev),
+ .priv_init = cxusb_medion_priv_init,
+ .priv_destroy = cxusb_medion_priv_destroy,
.num_adapters = 1,
.adapter = {
@@ -1503,7 +1897,7 @@ static struct dvb_usb_device_properties cxusb_medion_properties = {
}
}
},
- }},
+ } },
},
},
.power_ctrl = cxusb_power_ctrl,
@@ -1514,7 +1908,8 @@ static struct dvb_usb_device_properties cxusb_medion_properties = {
.num_device_descs = 1,
.devices = {
- { "Medion MD95700 (MDUSBTV-HYBRID)",
+ {
+ "Medion MD95700 (MDUSBTV-HYBRID)",
{ NULL },
{ &cxusb_table[MEDION_MD95700], NULL },
},
@@ -1527,8 +1922,10 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = {
.usb_ctrl = DEVICE_SPECIFIC,
.firmware = "dvb-usb-bluebird-01.fw",
.download_firmware = bluebird_patch_dvico_firmware_download,
- /* use usb alt setting 0 for EP4 transfer (dvb-t),
- use usb alt setting 7 for EP2 transfer (atsc) */
+ /*
+ * use usb alt setting 0 for EP4 transfer (dvb-t),
+ * use usb alt setting 7 for EP2 transfer (atsc)
+ */
.size_of_priv = sizeof(struct cxusb_state),
@@ -1552,7 +1949,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = {
}
}
},
- }},
+ } },
},
},
@@ -1585,8 +1982,10 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
.usb_ctrl = DEVICE_SPECIFIC,
.firmware = "dvb-usb-bluebird-01.fw",
.download_firmware = bluebird_patch_dvico_firmware_download,
- /* use usb alt setting 0 for EP4 transfer (dvb-t),
- use usb alt setting 7 for EP2 transfer (atsc) */
+ /*
+ * use usb alt setting 0 for EP4 transfer (dvb-t),
+ * use usb alt setting 7 for EP2 transfer (atsc)
+ */
.size_of_priv = sizeof(struct cxusb_state),
@@ -1609,7 +2008,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
}
}
},
- }},
+ } },
},
},
@@ -1634,7 +2033,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
{ &cxusb_table[DVICO_BLUEBIRD_DUAL_1_WARM], NULL },
},
{ "DigitalNow DVB-T Dual USB",
- { &cxusb_table[DIGITALNOW_BLUEBIRD_DUAL_1_COLD], NULL },
+ { &cxusb_table[DIGITALNOW_BLUEBIRD_DUAL_1_COLD], NULL },
{ &cxusb_table[DIGITALNOW_BLUEBIRD_DUAL_1_WARM], NULL },
},
{ "DViCO FusionHDTV DVB-T Dual Digital 2",
@@ -1650,8 +2049,10 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
.usb_ctrl = DEVICE_SPECIFIC,
.firmware = "dvb-usb-bluebird-01.fw",
.download_firmware = bluebird_patch_dvico_firmware_download,
- /* use usb alt setting 0 for EP4 transfer (dvb-t),
- use usb alt setting 7 for EP2 transfer (atsc) */
+ /*
+ * use usb alt setting 0 for EP4 transfer (dvb-t),
+ * use usb alt setting 7 for EP2 transfer (atsc)
+ */
.size_of_priv = sizeof(struct cxusb_state),
@@ -1675,7 +2076,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
}
}
},
- }},
+ } },
},
},
.power_ctrl = cxusb_bluebird_power_ctrl,
@@ -1706,8 +2107,11 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
.usb_ctrl = DEVICE_SPECIFIC,
.firmware = "dvb-usb-bluebird-01.fw",
.download_firmware = bluebird_patch_dvico_firmware_download,
- /* use usb alt setting 0 for EP4 transfer (dvb-t),
- use usb alt setting 7 for EP2 transfer (atsc) */
+
+ /*
+ * use usb alt setting 0 for EP4 transfer (dvb-t),
+ * use usb alt setting 7 for EP2 transfer (atsc)
+ */
.size_of_priv = sizeof(struct cxusb_state),
@@ -1731,7 +2135,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
}
}
},
- }},
+ } },
},
},
.power_ctrl = cxusb_bluebird_power_ctrl,
@@ -1783,7 +2187,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties = {
}
}
},
- }},
+ } },
},
},
@@ -1837,7 +2241,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = {
}
}
},
- }},
+ } },
},
},
@@ -1864,7 +2268,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = {
}
};
-static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_properties = {
+static struct dvb_usb_device_properties
+cxusb_bluebird_nano2_needsfirmware_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
@@ -1893,7 +2298,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
}
}
},
- }},
+ } },
},
},
@@ -1912,10 +2317,11 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
},
.num_device_descs = 1,
- .devices = {
- { "DViCO FusionHDTV DVB-T NANO2 w/o firmware",
+ .devices = { {
+ "DViCO FusionHDTV DVB-T NANO2 w/o firmware",
{ &cxusb_table[DVICO_BLUEBIRD_DVB_T_NANO_2], NULL },
- { &cxusb_table[DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM], NULL },
+ { &cxusb_table[DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM],
+ NULL },
},
}
};
@@ -1946,7 +2352,7 @@ static struct dvb_usb_device_properties cxusb_aver_a868r_properties = {
}
}
},
- }},
+ } },
},
},
.power_ctrl = cxusb_aver_power_ctrl,
@@ -1992,7 +2398,7 @@ struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties = {
}
}
},
- }},
+ } },
},
},
@@ -2046,7 +2452,7 @@ static struct dvb_usb_device_properties cxusb_d680_dmb_properties = {
}
}
},
- }},
+ } },
},
},
@@ -2101,7 +2507,7 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = {
}
}
},
- }},
+ } },
},
},
@@ -2195,6 +2601,6 @@ module_usb_driver(cxusb_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
+MODULE_AUTHOR("Maciej S. Szmigiero <mail@maciej.szmigiero.name>");
MODULE_DESCRIPTION("Driver for Conexant USB2.0 hybrid reference design");
-MODULE_VERSION("1.0-alpha");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 88f9b9804b25..9e374e53125b 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -2,9 +2,29 @@
#ifndef _DVB_USB_CXUSB_H_
#define _DVB_USB_CXUSB_H_
+#include <linux/completion.h>
+#include <linux/i2c.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
#define DVB_USB_LOG_PREFIX "cxusb"
#include "dvb-usb.h"
+#define CXUSB_VIDEO_URBS (5)
+#define CXUSB_VIDEO_URB_MAX_SIZE (512 * 1024)
+
+#define CXUSB_VIDEO_PKT_SIZE 3030
+#define CXUSB_VIDEO_MAX_FRAME_PKTS 346
+#define CXUSB_VIDEO_MAX_FRAME_SIZE (CXUSB_VIDEO_MAX_FRAME_PKTS * \
+ CXUSB_VIDEO_PKT_SIZE)
+
/* usb commands - some of it are guesses, don't have a reference yet */
#define CMD_BLUEBIRD_GPIO_RW 0x05
@@ -29,11 +49,26 @@
#define CMD_ANALOG 0x50
#define CMD_DIGITAL 0x51
+#define CXUSB_BT656_PREAMBLE ((const u8 *)"\xff\x00\x00")
+
+#define CXUSB_BT656_FIELD_MASK BIT(6)
+#define CXUSB_BT656_FIELD_1 0
+#define CXUSB_BT656_FIELD_2 BIT(6)
+
+#define CXUSB_BT656_VBI_MASK BIT(5)
+#define CXUSB_BT656_VBI_ON BIT(5)
+#define CXUSB_BT656_VBI_OFF 0
+
+#define CXUSB_BT656_SEAV_MASK BIT(4)
+#define CXUSB_BT656_SEAV_EAV BIT(4)
+#define CXUSB_BT656_SEAV_SAV 0
+
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 80
struct cxusb_state {
u8 gpio_write_state[3];
+ bool gpio_write_refresh[3];
struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
@@ -42,7 +77,128 @@ struct cxusb_state {
struct mutex stream_mutex;
u8 last_lock;
int (*fe_read_status)(struct dvb_frontend *fe,
- enum fe_status *status);
+ enum fe_status *status);
+};
+
+enum cxusb_open_type {
+ CXUSB_OPEN_INIT,
+ CXUSB_OPEN_NONE,
+ CXUSB_OPEN_ANALOG,
+ CXUSB_OPEN_DIGITAL
+};
+
+struct cxusb_medion_auxbuf {
+ u8 *buf;
+ unsigned int len;
+ unsigned int paylen;
+};
+
+enum cxusb_bt656_mode {
+ NEW_FRAME, FIRST_FIELD, SECOND_FIELD
+};
+
+enum cxusb_bt656_fmode {
+ START_SEARCH, LINE_SAMPLES, VBI_SAMPLES
};
+struct cxusb_bt656_params {
+ enum cxusb_bt656_mode mode;
+ enum cxusb_bt656_fmode fmode;
+ unsigned int pos;
+ unsigned int line;
+ unsigned int linesamples;
+ u8 *buf;
+};
+
+struct cxusb_medion_dev {
+ /* has to be the first one */
+ struct cxusb_state state;
+
+ struct dvb_usb_device *dvbdev;
+
+ enum cxusb_open_type open_type;
+ unsigned int open_ctr;
+ struct mutex open_lock;
+
+#ifdef CONFIG_DVB_USB_CXUSB_ANALOG
+ struct v4l2_device v4l2dev;
+ struct v4l2_subdev *cx25840;
+ struct v4l2_subdev *tuner;
+ struct v4l2_subdev *tda9887;
+ struct video_device *videodev, *radiodev;
+ struct mutex dev_lock;
+
+ struct vb2_queue videoqueue;
+ u32 input;
+ bool stop_streaming;
+ u32 width, height;
+ u32 field_order;
+ struct cxusb_medion_auxbuf auxbuf;
+ v4l2_std_id norm;
+
+ struct urb *streamurbs[CXUSB_VIDEO_URBS];
+ unsigned long urbcomplete;
+ struct work_struct urbwork;
+ unsigned int nexturb;
+
+ struct cxusb_bt656_params bt656;
+ struct cxusb_medion_vbuffer *vbuf;
+ __u32 vbuf_sequence;
+
+ struct list_head buflist;
+
+ struct completion v4l2_release;
+#endif
+};
+
+struct cxusb_medion_vbuffer {
+ struct vb2_v4l2_buffer vb2;
+ struct list_head list;
+};
+
+/* defines for "debug" module parameter */
+#define CXUSB_DBG_RC BIT(0)
+#define CXUSB_DBG_I2C BIT(1)
+#define CXUSB_DBG_MISC BIT(2)
+#define CXUSB_DBG_BT656 BIT(3)
+#define CXUSB_DBG_URB BIT(4)
+#define CXUSB_DBG_OPS BIT(5)
+#define CXUSB_DBG_AUXB BIT(6)
+
+extern int dvb_usb_cxusb_debug;
+
+#define cxusb_vprintk(dvbdev, lvl, ...) do { \
+ struct cxusb_medion_dev *_cxdev = (dvbdev)->priv; \
+ if (dvb_usb_cxusb_debug & CXUSB_DBG_##lvl) \
+ v4l2_printk(KERN_DEBUG, \
+ &_cxdev->v4l2dev, __VA_ARGS__); \
+ } while (0)
+
+int cxusb_ctrl_msg(struct dvb_usb_device *d,
+ u8 cmd, const u8 *wbuf, int wlen, u8 *rbuf, int rlen);
+
+#ifdef CONFIG_DVB_USB_CXUSB_ANALOG
+int cxusb_medion_analog_init(struct dvb_usb_device *dvbdev);
+int cxusb_medion_register_analog(struct dvb_usb_device *dvbdev);
+void cxusb_medion_unregister_analog(struct dvb_usb_device *dvbdev);
+#else
+static inline int cxusb_medion_analog_init(struct dvb_usb_device *dvbdev)
+{
+ return -EINVAL;
+}
+
+static inline int cxusb_medion_register_analog(struct dvb_usb_device *dvbdev)
+{
+ return 0;
+}
+
+static inline void cxusb_medion_unregister_analog(struct dvb_usb_device *dvbdev)
+{
+}
+#endif
+
+int cxusb_medion_get(struct dvb_usb_device *dvbdev,
+ enum cxusb_open_type open_type);
+void cxusb_medion_put(struct dvb_usb_device *dvbdev);
+
#endif
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
index 8056053c9ab0..0a7f8ba90992 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
@@ -56,9 +56,6 @@ static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
* for reception.
*/
if (adap->feedcount == onoff && adap->feedcount > 0) {
- deb_ts("submitting all URBs\n");
- usb_urb_submit(&adap->fe_adap[adap->active_fe].stream);
-
deb_ts("controlling pid parser\n");
if (adap->props.fe[adap->active_fe].caps & DVB_USB_ADAP_HAS_PID_FILTER &&
adap->props.fe[adap->active_fe].caps &
@@ -80,6 +77,8 @@ static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
}
}
+ deb_ts("submitting all URBs\n");
+ usb_urb_submit(&adap->fe_adap[adap->active_fe].stream);
}
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index e97f6edc98de..16a0b4a359ea 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -130,6 +130,10 @@ static int dvb_usb_exit(struct dvb_usb_device *d)
dvb_usb_i2c_exit(d);
deb_info("state should be zero now: %x\n", d->state);
d->state = DVB_USB_STATE_INIT;
+
+ if (d->priv != NULL && d->props.priv_destroy != NULL)
+ d->props.priv_destroy(d);
+
kfree(d->priv);
kfree(d);
return 0;
@@ -151,6 +155,15 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
err("no memory for priv in 'struct dvb_usb_device'");
return -ENOMEM;
}
+
+ if (d->props.priv_init != NULL) {
+ ret = d->props.priv_init(d);
+ if (ret != 0) {
+ kfree(d->priv);
+ d->priv = NULL;
+ return ret;
+ }
+ }
}
/* check the capabilities and set appropriate variables */
@@ -284,12 +297,15 @@ EXPORT_SYMBOL(dvb_usb_device_init);
void dvb_usb_device_exit(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
- const char *name = "generic DVB-USB module";
+ const char *default_name = "generic DVB-USB module";
+ char name[40];
usb_set_intfdata(intf, NULL);
if (d != NULL && d->desc != NULL) {
- name = d->desc->name;
+ strscpy(name, d->desc->name, sizeof(name));
dvb_usb_exit(d);
+ } else {
+ strscpy(name, default_name, sizeof(name));
}
info("%s successfully deinitialized and disconnected.", name);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 32829bdd5f22..2eb0e24e8943 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -129,6 +129,9 @@ struct usb_data_stream_properties {
* @frontend_ctrl: called to power on/off active frontend.
* @streaming_ctrl: called to start and stop the MPEG2-TS streaming of the
* device (not URB submitting/killing).
+ * This callback will be called without data URBs being active - data URBs
+ * will be submitted only after streaming_ctrl(1) returns successfully and
+ * they will be killed before streaming_ctrl(0) gets called.
* @pid_filter_ctrl: called to en/disable the PID filter, if any.
* @pid_filter: called to set/unset a PID for filtering.
* @frontend_attach: called to attach the possible frontends (fill fe-field
@@ -234,6 +237,11 @@ enum dvb_usb_mode {
*
* @size_of_priv: how many bytes shall be allocated for the private field
* of struct dvb_usb_device.
+ * @priv_init: optional callback to initialize the variable that private field
+ * of struct dvb_usb_device has pointer to just after it had been allocated and
+ * zeroed.
+ * @priv_destroy: just like priv_init, only called before deallocating
+ * the memory pointed by private field of struct dvb_usb_device.
*
* @power_ctrl: called to enable/disable power of the device.
* @read_mac_address: called to read the MAC address of the device.
@@ -275,6 +283,8 @@ struct dvb_usb_device_properties {
int no_reconnect;
int size_of_priv;
+ int (*priv_init)(struct dvb_usb_device *);
+ void (*priv_destroy)(struct dvb_usb_device *);
int num_adapters;
struct dvb_usb_adapter_properties adapter[MAX_NO_OF_ADAPTER_PER_DEVICE];
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index d85ea1af6aa1..5aa15a7a49de 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
+#include <linux/usb/input.h>
#include <linux/slab.h>
#include <linux/bitrev.h>
@@ -58,7 +59,6 @@ struct em28xx_ir_poll_result {
struct em28xx_IR {
struct em28xx *dev;
struct rc_dev *rc;
- char name[32];
char phys[32];
/* poll decoder */
@@ -277,21 +277,8 @@ static int em2874_polling_getkey(struct em28xx_IR *ir,
break;
case RC_PROTO_BIT_NEC:
- poll_result->scancode = msg[1] << 8 | msg[2];
- if ((msg[3] ^ msg[4]) != 0xff) { /* 32 bits NEC */
- poll_result->protocol = RC_PROTO_NEC32;
- poll_result->scancode = RC_SCANCODE_NEC32((msg[1] << 24) |
- (msg[2] << 16) |
- (msg[3] << 8) |
- (msg[4]));
- } else if ((msg[1] ^ msg[2]) != 0xff) { /* 24 bits NEC */
- poll_result->protocol = RC_PROTO_NECX;
- poll_result->scancode = RC_SCANCODE_NECX(msg[1] << 8 |
- msg[2], msg[3]);
- } else { /* Normal NEC */
- poll_result->protocol = RC_PROTO_NEC;
- poll_result->scancode = RC_SCANCODE_NEC(msg[1], msg[3]);
- }
+ poll_result->scancode = ir_nec_bytes_to_scancode(msg[1], msg[2], msg[3], msg[4],
+ &poll_result->protocol);
break;
case RC_PROTO_BIT_RC6_0:
@@ -617,10 +604,7 @@ static int em28xx_register_snapshot_button(struct em28xx *dev)
set_bit(EM28XX_SNAPSHOT_KEY, input_dev->keybit);
input_dev->keycodesize = 0;
input_dev->keycodemax = 0;
- input_dev->id.bustype = BUS_USB;
- input_dev->id.vendor = le16_to_cpu(udev->descriptor.idVendor);
- input_dev->id.product = le16_to_cpu(udev->descriptor.idProduct);
- input_dev->id.version = 1;
+ usb_to_input_id(udev, &input_dev->id);
input_dev->dev.parent = &dev->intf->dev;
err = input_register_device(input_dev);
@@ -832,19 +816,12 @@ static int em28xx_ir_init(struct em28xx *dev)
/* This is how often we ask the chip for IR information */
ir->polling = 100; /* ms */
- /* init input device */
- snprintf(ir->name, sizeof(ir->name), "%s IR",
- dev_name(&dev->intf->dev));
-
usb_make_path(udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
- rc->device_name = ir->name;
+ rc->device_name = em28xx_boards[dev->model].name;
rc->input_phys = ir->phys;
- rc->input_id.bustype = BUS_USB;
- rc->input_id.version = 1;
- rc->input_id.vendor = le16_to_cpu(udev->descriptor.idVendor);
- rc->input_id.product = le16_to_cpu(udev->descriptor.idProduct);
+ usb_to_input_id(udev, &rc->input_id);
rc->dev.parent = &dev->intf->dev;
rc->driver_name = MODULE_NAME;
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index f43717ea831d..0512e1959394 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1984,7 +1984,6 @@ static int vidioc_s_register(struct file *file, void *priv,
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct video_device *vdev = video_devdata(file);
struct em28xx *dev = video_drvdata(file);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
struct usb_device *udev = interface_to_usbdev(dev->intf);
@@ -1993,23 +1992,12 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(udev, cap->bus_info, sizeof(cap->bus_info));
- if (vdev->vfl_type == VFL_TYPE_GRABBER)
- cap->device_caps = V4L2_CAP_READWRITE |
- V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- else if (vdev->vfl_type == VFL_TYPE_RADIO)
- cap->device_caps = V4L2_CAP_RADIO;
- else
- cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_VBI_CAPTURE;
-
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_READWRITE |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
if (dev->int_audio_type != EM28XX_INT_AUDIO_NONE)
- cap->device_caps |= V4L2_CAP_AUDIO;
-
+ cap->capabilities |= V4L2_CAP_AUDIO;
if (dev->tuner_type != TUNER_ABSENT)
- cap->device_caps |= V4L2_CAP_TUNER;
-
- cap->capabilities = cap->device_caps |
- V4L2_CAP_DEVICE_CAPS | V4L2_CAP_READWRITE |
- V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities |= V4L2_CAP_TUNER;
if (video_is_registered(&v4l2->vbi_dev))
cap->capabilities |= V4L2_CAP_VBI_CAPTURE;
if (video_is_registered(&v4l2->radio_dev))
@@ -2782,6 +2770,13 @@ static int em28xx_v4l2_init(struct em28xx *dev)
mutex_init(&v4l2->vb_vbi_queue_lock);
v4l2->vdev.queue = &v4l2->vb_vidq;
v4l2->vdev.queue->lock = &v4l2->vb_queue_lock;
+ v4l2->vdev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING;
+ if (dev->int_audio_type != EM28XX_INT_AUDIO_NONE)
+ v4l2->vdev.device_caps |= V4L2_CAP_AUDIO;
+ if (dev->tuner_type != TUNER_ABSENT)
+ v4l2->vdev.device_caps |= V4L2_CAP_TUNER;
+
/* disable inapplicable ioctls */
if (dev->is_webcam) {
@@ -2818,6 +2813,10 @@ static int em28xx_v4l2_init(struct em28xx *dev)
v4l2->vbi_dev.queue = &v4l2->vb_vbiq;
v4l2->vbi_dev.queue->lock = &v4l2->vb_vbi_queue_lock;
+ v4l2->vbi_dev.device_caps = V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE | V4L2_CAP_VBI_CAPTURE;
+ if (dev->tuner_type != TUNER_ABSENT)
+ v4l2->vbi_dev.device_caps |= V4L2_CAP_TUNER;
/* disable inapplicable ioctls */
v4l2_disable_ioctl(&v4l2->vbi_dev, VIDIOC_S_PARM);
@@ -2845,6 +2844,7 @@ static int em28xx_v4l2_init(struct em28xx *dev)
if (em28xx_boards[dev->model].radio.type == EM28XX_RADIO) {
em28xx_vdev_init(dev, &v4l2->radio_dev, &em28xx_radio_template,
"radio");
+ v4l2->radio_dev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
ret = video_register_device(&v4l2->radio_dev, VFL_TYPE_RADIO,
radio_nr[dev->devno]);
if (ret < 0) {
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index b63b7bb7745c..88edfef80b40 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -279,15 +279,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->driver, "go7007", sizeof(cap->driver));
strscpy(cap->card, go->name, sizeof(cap->card));
strscpy(cap->bus_info, go->bus_info, sizeof(cap->bus_info));
-
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
-
- if (go->board_info->num_aud_inputs)
- cap->device_caps |= V4L2_CAP_AUDIO;
- if (go->board_info->flags & GO7007_BOARD_HAS_TUNER)
- cap->device_caps |= V4L2_CAP_TUNER;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1114,6 +1105,12 @@ int go7007_v4l2_init(struct go7007 *go)
*vdev = go7007_template;
vdev->lock = &go->serialize_lock;
vdev->queue = &go->vidq;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+ if (go->board_info->num_aud_inputs)
+ vdev->device_caps |= V4L2_CAP_AUDIO;
+ if (go->board_info->flags & GO7007_BOARD_HAS_TUNER)
+ vdev->device_caps |= V4L2_CAP_TUNER;
video_set_drvdata(vdev, go);
vdev->v4l2_dev = &go->v4l2_dev;
if (!v4l2_device_has_op(&go->v4l2_dev, 0, video, querystd))
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index a7ed5257cdba..be11f7830bca 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1209,10 +1209,6 @@ static int vidioc_querycap(struct file *file, void *priv,
}
usb_make_path(gspca_dev->dev, (char *) cap->bus_info,
sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE
- | V4L2_CAP_STREAMING
- | V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1508,6 +1504,8 @@ int gspca_dev_probe2(struct usb_interface *intf,
gspca_dev->empty_packet = -1; /* don't check the empty packets */
gspca_dev->vdev = gspca_template;
gspca_dev->vdev.v4l2_dev = &gspca_dev->v4l2_dev;
+ gspca_dev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
video_set_drvdata(&gspca_dev->vdev, gspca_dev);
gspca_dev->module = module;
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index 7d4a9452f545..cec841ad7495 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -896,19 +896,13 @@ static int hackrf_querycap(struct file *file, void *fh,
{
struct hackrf_dev *dev = video_drvdata(file);
struct usb_interface *intf = dev->intf;
- struct video_device *vdev = video_devdata(file);
dev_dbg(&intf->dev, "\n");
- cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
- if (vdev->vfl_dir == VFL_DIR_RX)
- cap->device_caps |= V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER;
- else
- cap->device_caps |= V4L2_CAP_SDR_OUTPUT | V4L2_CAP_MODULATOR;
-
cap->capabilities = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_SDR_OUTPUT | V4L2_CAP_MODULATOR |
- V4L2_CAP_DEVICE_CAPS | cap->device_caps;
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE |
+ V4L2_CAP_DEVICE_CAPS;
strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
strscpy(cap->card, dev->rx_vdev.name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
@@ -1487,6 +1481,8 @@ static int hackrf_probe(struct usb_interface *intf,
dev->rx_vdev.ctrl_handler = &dev->rx_ctrl_handler;
dev->rx_vdev.lock = &dev->v4l2_lock;
dev->rx_vdev.vfl_dir = VFL_DIR_RX;
+ dev->rx_vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE |
+ V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER;
video_set_drvdata(&dev->rx_vdev, dev);
ret = video_register_device(&dev->rx_vdev, VFL_TYPE_SDR, -1);
if (ret) {
@@ -1505,6 +1501,8 @@ static int hackrf_probe(struct usb_interface *intf,
dev->tx_vdev.ctrl_handler = &dev->tx_ctrl_handler;
dev->tx_vdev.lock = &dev->v4l2_lock;
dev->tx_vdev.vfl_dir = VFL_DIR_TX;
+ dev->tx_vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE |
+ V4L2_CAP_SDR_OUTPUT | V4L2_CAP_MODULATOR;
video_set_drvdata(&dev->tx_vdev, dev);
ret = video_register_device(&dev->tx_vdev, VFL_TYPE_SDR, -1);
if (ret) {
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 7580fc5f2f12..5b3e67b80627 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -435,7 +435,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
/* wait for the first buffer */
if (!(file->f_flags & O_NONBLOCK)) {
if (wait_event_interruptible(dev->wait_data,
- hdpvr_get_next_buffer(dev)))
+ !list_empty_careful(&dev->rec_buff_list)))
return -ERESTARTSYS;
}
@@ -461,10 +461,17 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
goto err;
}
if (!err) {
- v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
- "timeout: restart streaming\n");
+ v4l2_info(&dev->v4l2_dev,
+ "timeout: restart streaming\n");
+ mutex_lock(&dev->io_mutex);
hdpvr_stop_streaming(dev);
- msecs_to_jiffies(4000);
+ mutex_unlock(&dev->io_mutex);
+ /*
+ * The FW needs about 4 seconds after streaming
+ * stopped before it is ready to restart
+ * streaming.
+ */
+ msleep(4000);
err = hdpvr_start_streaming(dev);
if (err) {
ret = err;
@@ -577,9 +584,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->driver, "hdpvr", sizeof(cap->driver));
strscpy(cap->card, "Hauppauge HD PVR", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO |
- V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1127,9 +1131,7 @@ static void hdpvr_device_release(struct video_device *vdev)
struct hdpvr_device *dev = video_get_drvdata(vdev);
hdpvr_delete(dev);
- mutex_lock(&dev->io_mutex);
flush_work(&dev->worker);
- mutex_unlock(&dev->io_mutex);
v4l2_device_unregister(&dev->v4l2_dev);
v4l2_ctrl_handler_free(&dev->hdl);
@@ -1150,6 +1152,8 @@ static const struct video_device hdpvr_video_template = {
.release = hdpvr_device_release,
.ioctl_ops = &hdpvr_ioctl_ops,
.tvnorms = V4L2_STD_ALL,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO |
+ V4L2_CAP_READWRITE,
};
static const struct v4l2_ctrl_ops hdpvr_ctrl_ops = {
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index b405bc3c2781..4c9b2a12acfb 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -598,9 +598,6 @@ static int msi2500_querycap(struct file *file, void *fh,
strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
strscpy(cap->card, dev->vdev.name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE | V4L2_CAP_TUNER;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1274,6 +1271,8 @@ static int msi2500_probe(struct usb_interface *intf,
dev->v4l2_dev.ctrl_handler = &dev->hdl;
dev->vdev.v4l2_dev = &dev->v4l2_dev;
dev->vdev.lock = &dev->v4l2_lock;
+ dev->vdev.device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE | V4L2_CAP_TUNER;
ret = video_register_device(&dev->vdev, VFL_TYPE_SDR, -1);
if (ret) {
diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig
index 64f9df067269..e6a4f730591b 100644
--- a/drivers/media/usb/pvrusb2/Kconfig
+++ b/drivers/media/usb/pvrusb2/Kconfig
@@ -41,6 +41,8 @@ config VIDEO_PVRUSB2_DVB
select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA10048 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LGDT3306A if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA8290 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
index 58ca7498e119..e4b31ae02f59 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -101,10 +101,35 @@ static const struct routing_scheme routing_defav400 = {
.cnt = ARRAY_SIZE(routing_schemeav400),
};
+static const struct routing_scheme_item routing_scheme160xxx[] = {
+ [PVR2_CVAL_INPUT_TV] = {
+ .vid = CX25840_COMPOSITE7,
+ .aud = CX25840_AUDIO8,
+ },
+ [PVR2_CVAL_INPUT_RADIO] = {
+ .vid = CX25840_COMPOSITE4,
+ .aud = CX25840_AUDIO6,
+ },
+ [PVR2_CVAL_INPUT_COMPOSITE] = {
+ .vid = CX25840_COMPOSITE3,
+ .aud = CX25840_AUDIO_SERIAL,
+ },
+ [PVR2_CVAL_INPUT_SVIDEO] = {
+ .vid = CX25840_SVIDEO1,
+ .aud = CX25840_AUDIO_SERIAL,
+ },
+};
+
+static const struct routing_scheme routing_def160xxx = {
+ .def = routing_scheme160xxx,
+ .cnt = ARRAY_SIZE(routing_scheme160xxx),
+};
+
static const struct routing_scheme *routing_schemes[] = {
[PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
[PVR2_ROUTING_SCHEME_GOTVIEW] = &routing_defgv,
[PVR2_ROUTING_SCHEME_AV400] = &routing_defav400,
+ [PVR2_ROUTING_SCHEME_HAUP160XXX] = &routing_def160xxx,
};
void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-devattr.c b/drivers/media/usb/pvrusb2/pvrusb2-devattr.c
index d476c492b87e..1fcf63218885 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-devattr.c
@@ -27,6 +27,9 @@ pvr2_device_desc structures.
#include "tda18271.h"
#include "tda8290.h"
#include "tuner-simple.h"
+#include "si2157.h"
+#include "lgdt3306a.h"
+#include "si2168.h"
#endif
@@ -178,10 +181,10 @@ static struct lgdt330x_config pvr2_lgdt3303_config = {
static int pvr2_lgdt3303_attach(struct pvr2_dvb_adapter *adap)
{
- adap->fe = dvb_attach(lgdt330x_attach, &pvr2_lgdt3303_config,
- 0x0e,
- &adap->channel.hdw->i2c_adap);
- if (adap->fe)
+ adap->fe[0] = dvb_attach(lgdt330x_attach, &pvr2_lgdt3303_config,
+ 0x0e,
+ &adap->channel.hdw->i2c_adap);
+ if (adap->fe[0])
return 0;
return -EIO;
@@ -189,7 +192,7 @@ static int pvr2_lgdt3303_attach(struct pvr2_dvb_adapter *adap)
static int pvr2_lgh06xf_attach(struct pvr2_dvb_adapter *adap)
{
- dvb_attach(simple_tuner_attach, adap->fe,
+ dvb_attach(simple_tuner_attach, adap->fe[0],
&adap->channel.hdw->i2c_adap, 0x61,
TUNER_LG_TDVS_H06XF);
@@ -238,10 +241,10 @@ static struct lgdt330x_config pvr2_lgdt3302_config = {
static int pvr2_lgdt3302_attach(struct pvr2_dvb_adapter *adap)
{
- adap->fe = dvb_attach(lgdt330x_attach, &pvr2_lgdt3302_config,
- 0x0e,
- &adap->channel.hdw->i2c_adap);
- if (adap->fe)
+ adap->fe[0] = dvb_attach(lgdt330x_attach, &pvr2_lgdt3302_config,
+ 0x0e,
+ &adap->channel.hdw->i2c_adap);
+ if (adap->fe[0])
return 0;
return -EIO;
@@ -249,7 +252,7 @@ static int pvr2_lgdt3302_attach(struct pvr2_dvb_adapter *adap)
static int pvr2_fcv1236d_attach(struct pvr2_dvb_adapter *adap)
{
- dvb_attach(simple_tuner_attach, adap->fe,
+ dvb_attach(simple_tuner_attach, adap->fe[0],
&adap->channel.hdw->i2c_adap, 0x61,
TUNER_PHILIPS_FCV1236D);
@@ -325,9 +328,9 @@ static struct tda18271_config hauppauge_tda18271_dvb_config = {
static int pvr2_tda10048_attach(struct pvr2_dvb_adapter *adap)
{
- adap->fe = dvb_attach(tda10048_attach, &hauppauge_tda10048_config,
- &adap->channel.hdw->i2c_adap);
- if (adap->fe)
+ adap->fe[0] = dvb_attach(tda10048_attach, &hauppauge_tda10048_config,
+ &adap->channel.hdw->i2c_adap);
+ if (adap->fe[0])
return 0;
return -EIO;
@@ -335,10 +338,10 @@ static int pvr2_tda10048_attach(struct pvr2_dvb_adapter *adap)
static int pvr2_73xxx_tda18271_8295_attach(struct pvr2_dvb_adapter *adap)
{
- dvb_attach(tda829x_attach, adap->fe,
+ dvb_attach(tda829x_attach, adap->fe[0],
&adap->channel.hdw->i2c_adap, 0x42,
&tda829x_no_probe);
- dvb_attach(tda18271_attach, adap->fe, 0x60,
+ dvb_attach(tda18271_attach, adap->fe[0], 0x60,
&adap->channel.hdw->i2c_adap,
&hauppauge_tda18271_dvb_config);
@@ -423,9 +426,9 @@ static struct tda18271_config hauppauge_tda18271_config = {
static int pvr2_s5h1409_attach(struct pvr2_dvb_adapter *adap)
{
- adap->fe = dvb_attach(s5h1409_attach, &pvr2_s5h1409_config,
- &adap->channel.hdw->i2c_adap);
- if (adap->fe)
+ adap->fe[0] = dvb_attach(s5h1409_attach, &pvr2_s5h1409_config,
+ &adap->channel.hdw->i2c_adap);
+ if (adap->fe[0])
return 0;
return -EIO;
@@ -433,9 +436,9 @@ static int pvr2_s5h1409_attach(struct pvr2_dvb_adapter *adap)
static int pvr2_s5h1411_attach(struct pvr2_dvb_adapter *adap)
{
- adap->fe = dvb_attach(s5h1411_attach, &pvr2_s5h1411_config,
- &adap->channel.hdw->i2c_adap);
- if (adap->fe)
+ adap->fe[0] = dvb_attach(s5h1411_attach, &pvr2_s5h1411_config,
+ &adap->channel.hdw->i2c_adap);
+ if (adap->fe[0])
return 0;
return -EIO;
@@ -443,10 +446,10 @@ static int pvr2_s5h1411_attach(struct pvr2_dvb_adapter *adap)
static int pvr2_tda18271_8295_attach(struct pvr2_dvb_adapter *adap)
{
- dvb_attach(tda829x_attach, adap->fe,
+ dvb_attach(tda829x_attach, adap->fe[0],
&adap->channel.hdw->i2c_adap, 0x42,
&tda829x_no_probe);
- dvb_attach(tda18271_attach, adap->fe, 0x60,
+ dvb_attach(tda18271_attach, adap->fe[0], 0x60,
&adap->channel.hdw->i2c_adap,
&hauppauge_tda18271_config);
@@ -515,7 +518,166 @@ static const struct pvr2_device_desc pvr2_device_751xx = {
#endif
};
+/*------------------------------------------------------------------------*/
+/* Hauppauge PVR-USB2 Model 160000 / 160111 -- HVR-1955 / HVR-1975 */
+
+#ifdef CONFIG_VIDEO_PVRUSB2_DVB
+static int pvr2_si2157_attach(struct pvr2_dvb_adapter *adap);
+static int pvr2_si2168_attach(struct pvr2_dvb_adapter *adap);
+static int pvr2_dual_fe_attach(struct pvr2_dvb_adapter *adap);
+static int pvr2_lgdt3306a_attach(struct pvr2_dvb_adapter *adap);
+
+static const struct pvr2_dvb_props pvr2_160000_dvb_props = {
+ .frontend_attach = pvr2_dual_fe_attach,
+ .tuner_attach = pvr2_si2157_attach,
+};
+
+static const struct pvr2_dvb_props pvr2_160111_dvb_props = {
+ .frontend_attach = pvr2_lgdt3306a_attach,
+ .tuner_attach = pvr2_si2157_attach,
+};
+
+static int pvr2_si2157_attach(struct pvr2_dvb_adapter *adap)
+{
+ struct si2157_config si2157_config = {};
+
+ si2157_config.inversion = 1;
+ si2157_config.fe = adap->fe[0];
+
+ adap->i2c_client_tuner = dvb_module_probe("si2157", "si2177",
+ &adap->channel.hdw->i2c_adap,
+ 0x60, &si2157_config);
+
+ if (!adap->i2c_client_tuner)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int pvr2_si2168_attach(struct pvr2_dvb_adapter *adap)
+{
+ struct si2168_config si2168_config = {};
+ struct i2c_adapter *adapter;
+
+ pr_debug("%s()\n", __func__);
+
+ si2168_config.fe = &adap->fe[1];
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.ts_mode = SI2168_TS_PARALLEL; /*2, 1-serial, 2-parallel.*/
+ si2168_config.ts_clock_gapped = 1; /*0-disabled, 1-enabled.*/
+ si2168_config.ts_clock_inv = 0; /*0-not-invert, 1-invert*/
+ si2168_config.spectral_inversion = 1; /*0-not-invert, 1-invert*/
+
+ adap->i2c_client_demod[1] = dvb_module_probe("si2168", NULL,
+ &adap->channel.hdw->i2c_adap,
+ 0x64, &si2168_config);
+
+ if (!adap->i2c_client_demod[1])
+ return -ENODEV;
+
+ return 0;
+}
+static int pvr2_lgdt3306a_attach(struct pvr2_dvb_adapter *adap)
+{
+ struct lgdt3306a_config lgdt3306a_config;
+ struct i2c_adapter *adapter;
+
+ pr_debug("%s()\n", __func__);
+
+ lgdt3306a_config.fe = &adap->fe[0];
+ lgdt3306a_config.i2c_adapter = &adapter;
+ lgdt3306a_config.deny_i2c_rptr = 1;
+ lgdt3306a_config.spectral_inversion = 1;
+ lgdt3306a_config.qam_if_khz = 4000;
+ lgdt3306a_config.vsb_if_khz = 3250;
+ lgdt3306a_config.mpeg_mode = LGDT3306A_MPEG_PARALLEL;
+ lgdt3306a_config.tpclk_edge = LGDT3306A_TPCLK_FALLING_EDGE;
+ lgdt3306a_config.tpvalid_polarity = LGDT3306A_TP_VALID_LOW;
+ lgdt3306a_config.xtalMHz = 25, /* demod clock MHz; 24/25 supported */
+
+ adap->i2c_client_demod[0] = dvb_module_probe("lgdt3306a", NULL,
+ &adap->channel.hdw->i2c_adap,
+ 0x59, &lgdt3306a_config);
+
+ if (!adap->i2c_client_demod[0])
+ return -ENODEV;
+
+ return 0;
+}
+
+static int pvr2_dual_fe_attach(struct pvr2_dvb_adapter *adap)
+{
+ pr_debug("%s()\n", __func__);
+
+ if (pvr2_lgdt3306a_attach(adap) != 0)
+ return -ENODEV;
+
+ if (pvr2_si2168_attach(adap) != 0) {
+ dvb_module_release(adap->i2c_client_demod[0]);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+#endif
+
+#define PVR2_FIRMWARE_160xxx "v4l-pvrusb2-160xxx-01.fw"
+static const char *pvr2_fw1_names_160xxx[] = {
+ PVR2_FIRMWARE_160xxx,
+};
+
+static const struct pvr2_device_client_desc pvr2_cli_160xxx[] = {
+ { .module_id = PVR2_CLIENT_ID_CX25840 },
+};
+
+static const struct pvr2_device_desc pvr2_device_160000 = {
+ .description = "WinTV HVR-1975 Model 160000",
+ .shortname = "160000",
+ .client_table.lst = pvr2_cli_160xxx,
+ .client_table.cnt = ARRAY_SIZE(pvr2_cli_160xxx),
+ .fx2_firmware.lst = pvr2_fw1_names_160xxx,
+ .fx2_firmware.cnt = ARRAY_SIZE(pvr2_fw1_names_160xxx),
+ .default_tuner_type = TUNER_ABSENT,
+ .flag_has_cx25840 = 1,
+ .flag_has_hauppauge_rom = 1,
+ .flag_has_analogtuner = 1,
+ .flag_has_composite = 1,
+ .flag_has_svideo = 1,
+ .flag_fx2_16kb = 1,
+ .signal_routing_scheme = PVR2_ROUTING_SCHEME_HAUPPAUGE,
+ .digital_control_scheme = PVR2_DIGITAL_SCHEME_HAUPPAUGE,
+ .default_std_mask = V4L2_STD_NTSC_M,
+ .led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
+ .ir_scheme = PVR2_IR_SCHEME_ZILOG,
+#ifdef CONFIG_VIDEO_PVRUSB2_DVB
+ .dvb_props = &pvr2_160000_dvb_props,
+#endif
+};
+
+static const struct pvr2_device_desc pvr2_device_160111 = {
+ .description = "WinTV HVR-1955 Model 160111",
+ .shortname = "160111",
+ .client_table.lst = pvr2_cli_160xxx,
+ .client_table.cnt = ARRAY_SIZE(pvr2_cli_160xxx),
+ .fx2_firmware.lst = pvr2_fw1_names_160xxx,
+ .fx2_firmware.cnt = ARRAY_SIZE(pvr2_fw1_names_160xxx),
+ .default_tuner_type = TUNER_ABSENT,
+ .flag_has_cx25840 = 1,
+ .flag_has_hauppauge_rom = 1,
+ .flag_has_analogtuner = 1,
+ .flag_has_composite = 1,
+ .flag_has_svideo = 1,
+ .flag_fx2_16kb = 1,
+ .signal_routing_scheme = PVR2_ROUTING_SCHEME_HAUPPAUGE,
+ .digital_control_scheme = PVR2_DIGITAL_SCHEME_HAUPPAUGE,
+ .default_std_mask = V4L2_STD_NTSC_M,
+ .led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
+ .ir_scheme = PVR2_IR_SCHEME_ZILOG,
+#ifdef CONFIG_VIDEO_PVRUSB2_DVB
+ .dvb_props = &pvr2_160111_dvb_props,
+#endif
+};
/*------------------------------------------------------------------------*/
@@ -542,6 +704,10 @@ struct usb_device_id pvr2_device_table[] = {
.driver_info = (kernel_ulong_t)&pvr2_device_751xx},
{ USB_DEVICE(0x0ccd, 0x0039),
.driver_info = (kernel_ulong_t)&pvr2_device_av400},
+ { USB_DEVICE(0x2040, 0x7502),
+ .driver_info = (kernel_ulong_t)&pvr2_device_160111},
+ { USB_DEVICE(0x2040, 0x7510),
+ .driver_info = (kernel_ulong_t)&pvr2_device_160000},
{ }
};
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-devattr.h b/drivers/media/usb/pvrusb2/pvrusb2-devattr.h
index ed0c129c1b3f..3c88f05d82d9 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-devattr.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-devattr.h
@@ -56,6 +56,7 @@ struct pvr2_string_table {
#define PVR2_ROUTING_SCHEME_GOTVIEW 1
#define PVR2_ROUTING_SCHEME_ONAIR 2
#define PVR2_ROUTING_SCHEME_AV400 3
+#define PVR2_ROUTING_SCHEME_HAUP160XXX 4
#define PVR2_DIGITAL_SCHEME_NONE 0
#define PVR2_DIGITAL_SCHEME_HAUPPAUGE 1
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
index d8874a952418..6954584526a3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
@@ -334,26 +334,19 @@ static int pvr2_dvb_frontend_init(struct pvr2_dvb_adapter *adap)
goto done;
}
- if ((dvb_props->frontend_attach(adap) == 0) && (adap->fe)) {
-
- if (dvb_register_frontend(&adap->dvb_adap, adap->fe)) {
+ if (dvb_props->frontend_attach(adap) == 0 && adap->fe[0]) {
+ if (dvb_register_frontend(&adap->dvb_adap, adap->fe[0])) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"frontend registration failed!");
- dvb_frontend_detach(adap->fe);
- adap->fe = NULL;
ret = -ENODEV;
- goto done;
+ goto fail_frontend0;
}
+ if (adap->fe[0]->ops.analog_ops.standby)
+ adap->fe[0]->ops.analog_ops.standby(adap->fe[0]);
- if (dvb_props->tuner_attach)
- dvb_props->tuner_attach(adap);
-
- if (adap->fe->ops.analog_ops.standby)
- adap->fe->ops.analog_ops.standby(adap->fe);
-
- /* Ensure all frontends negotiate bus access */
- adap->fe->ops.ts_bus_ctrl = pvr2_dvb_bus_ctrl;
-
+ pvr2_trace(PVR2_TRACE_INFO, "transferring fe[%d] ts_bus_ctrl() to pvr2_dvb_bus_ctrl()",
+ adap->fe[0]->id);
+ adap->fe[0]->ops.ts_bus_ctrl = pvr2_dvb_bus_ctrl;
} else {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"no frontend was attached!");
@@ -361,17 +354,74 @@ static int pvr2_dvb_frontend_init(struct pvr2_dvb_adapter *adap)
return ret;
}
- done:
+ if (dvb_props->tuner_attach && dvb_props->tuner_attach(adap)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS, "tuner attach failed");
+ ret = -ENODEV;
+ goto fail_tuner;
+ }
+
+ if (adap->fe[1]) {
+ adap->fe[1]->id = 1;
+ adap->fe[1]->tuner_priv = adap->fe[0]->tuner_priv;
+ memcpy(&adap->fe[1]->ops.tuner_ops,
+ &adap->fe[0]->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ if (dvb_register_frontend(&adap->dvb_adap, adap->fe[1])) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "frontend registration failed!");
+ ret = -ENODEV;
+ goto fail_frontend1;
+ }
+ /* MFE lock */
+ adap->dvb_adap.mfe_shared = 1;
+
+ if (adap->fe[1]->ops.analog_ops.standby)
+ adap->fe[1]->ops.analog_ops.standby(adap->fe[1]);
+
+ pvr2_trace(PVR2_TRACE_INFO, "transferring fe[%d] ts_bus_ctrl() to pvr2_dvb_bus_ctrl()",
+ adap->fe[1]->id);
+ adap->fe[1]->ops.ts_bus_ctrl = pvr2_dvb_bus_ctrl;
+ }
+done:
pvr2_channel_limit_inputs(&adap->channel, 0);
return ret;
+
+fail_frontend1:
+ dvb_frontend_detach(adap->fe[1]);
+ adap->fe[1] = NULL;
+fail_tuner:
+ dvb_unregister_frontend(adap->fe[0]);
+fail_frontend0:
+ dvb_frontend_detach(adap->fe[0]);
+ adap->fe[0] = NULL;
+ dvb_module_release(adap->i2c_client_tuner);
+ dvb_module_release(adap->i2c_client_demod[1]);
+ dvb_module_release(adap->i2c_client_demod[0]);
+
+ return ret;
}
static int pvr2_dvb_frontend_exit(struct pvr2_dvb_adapter *adap)
{
- if (adap->fe != NULL) {
- dvb_unregister_frontend(adap->fe);
- dvb_frontend_detach(adap->fe);
+ if (adap->fe[1]) {
+ dvb_unregister_frontend(adap->fe[1]);
+ dvb_frontend_detach(adap->fe[1]);
+ adap->fe[1] = NULL;
+ }
+ if (adap->fe[0]) {
+ dvb_unregister_frontend(adap->fe[0]);
+ dvb_frontend_detach(adap->fe[0]);
+ adap->fe[0] = NULL;
}
+
+ dvb_module_release(adap->i2c_client_tuner);
+ adap->i2c_client_tuner = NULL;
+ dvb_module_release(adap->i2c_client_demod[1]);
+ adap->i2c_client_demod[1] = NULL;
+ dvb_module_release(adap->i2c_client_demod[0]);
+ adap->i2c_client_demod[0] = NULL;
+
return 0;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.h b/drivers/media/usb/pvrusb2/pvrusb2-dvb.h
index e7f71fb94a6e..c0b27f5211bf 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.h
@@ -18,7 +18,10 @@ struct pvr2_dvb_adapter {
struct dmxdev dmxdev;
struct dvb_demux demux;
struct dvb_net dvb_net;
- struct dvb_frontend *fe;
+ struct dvb_frontend *fe[2];
+
+ struct i2c_client *i2c_client_demod[2];
+ struct i2c_client *i2c_client_tuner;
int feedcount;
int max_feed_count;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h b/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h
index be76911335d3..e54aa42b4115 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-fx2-cmd.h
@@ -28,6 +28,10 @@
#define FX2CMD_FWPOST1 0x52u
+/* These 2 only exist on Model 160xxx */
+#define FX2CMD_HCW_DEMOD_RESET_PIN 0xd4u
+#define FX2CMD_HCW_MAKO_SLEEP_PIN 0xd5u
+
#define FX2CMD_POWER_OFF 0xdcu
#define FX2CMD_POWER_ON 0xdeu
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 70b5cb08d65b..6fe8b9af858a 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -306,6 +306,8 @@ static const struct pvr2_fx2cmd_descdef pvr2_fx2cmd_desc[] = {
{FX2CMD_ONAIR_DTV_STREAMING_OFF, "onair dtv stream off"},
{FX2CMD_ONAIR_DTV_POWER_ON, "onair dtv power on"},
{FX2CMD_ONAIR_DTV_POWER_OFF, "onair dtv power off"},
+ {FX2CMD_HCW_DEMOD_RESET_PIN, "hcw demod reset pin"},
+ {FX2CMD_HCW_MAKO_SLEEP_PIN, "hcw mako sleep pin"},
};
@@ -1670,7 +1672,7 @@ static int pvr2_decoder_enable(struct pvr2_hdw *hdw,int enablefl)
}
if (!hdw->flag_decoder_missed) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: No decoder present");
+ "***WARNING*** No decoder present");
hdw->flag_decoder_missed = !0;
trace_stbit("flag_decoder_missed",
hdw->flag_decoder_missed);
@@ -2129,10 +2131,28 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
((0) << 16));
}
- // This step MUST happen after the earlier powerup step.
+ /* This step MUST happen after the earlier powerup step */
pvr2_i2c_core_init(hdw);
if (!pvr2_hdw_dev_ok(hdw)) return;
+ /* Reset demod only on Hauppauge 160xxx platform */
+ if (le16_to_cpu(hdw->usb_dev->descriptor.idVendor) == 0x2040 &&
+ (le16_to_cpu(hdw->usb_dev->descriptor.idProduct) == 0x7502 ||
+ le16_to_cpu(hdw->usb_dev->descriptor.idProduct) == 0x7510)) {
+ pr_info("%s(): resetting 160xxx demod\n", __func__);
+ /* TODO: not sure this is proper place to reset once only */
+ pvr2_issue_simple_cmd(hdw,
+ FX2CMD_HCW_DEMOD_RESET_PIN |
+ (1 << 8) |
+ ((0) << 16));
+ usleep_range(10000, 10500);
+ pvr2_issue_simple_cmd(hdw,
+ FX2CMD_HCW_DEMOD_RESET_PIN |
+ (1 << 8) |
+ ((1) << 16));
+ usleep_range(10000, 10500);
+ }
+
pvr2_hdw_load_modules(hdw);
if (!pvr2_hdw_dev_ok(hdw)) return;
@@ -2356,7 +2376,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
if (hdw_desc->flag_is_experimental) {
pvr2_trace(PVR2_TRACE_INFO, "**********");
pvr2_trace(PVR2_TRACE_INFO,
- "WARNING: Support for this device (%s) is experimental.",
+ "***WARNING*** Support for this device (%s) is experimental.",
hdw_desc->description);
pvr2_trace(PVR2_TRACE_INFO,
"Important functionality might not be entirely working.");
@@ -4002,6 +4022,20 @@ int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *hdw)
static int pvr2_hdw_cmd_hcw_demod_reset(struct pvr2_hdw *hdw, int onoff)
{
hdw->flag_ok = !0;
+
+ /* Use this for Hauppauge 160xxx only */
+ if (le16_to_cpu(hdw->usb_dev->descriptor.idVendor) == 0x2040 &&
+ (le16_to_cpu(hdw->usb_dev->descriptor.idProduct) == 0x7502 ||
+ le16_to_cpu(hdw->usb_dev->descriptor.idProduct) == 0x7510)) {
+ pr_debug("%s(): resetting demod on Hauppauge 160xxx platform skipped\n",
+ __func__);
+ /* Can't reset 160xxx or it will trash Demod tristate */
+ return pvr2_issue_simple_cmd(hdw,
+ FX2CMD_HCW_MAKO_SLEEP_PIN |
+ (1 << 8) |
+ ((onoff ? 1 : 0) << 16));
+ }
+
return pvr2_issue_simple_cmd(hdw,
FX2CMD_HCW_DEMOD_RESETIN |
(1 << 8) |
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 68e323f8d9cf..275394bafe7d 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -333,11 +333,11 @@ static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: Detected a wedged cx25840 chip; the device will not work.");
+ "***WARNING*** Detected a wedged cx25840 chip; the device will not work.");
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: Try power cycling the pvrusb2 device.");
+ "***WARNING*** Try power cycling the pvrusb2 device.");
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: Disabling further access to the device to prevent other foul-ups.");
+ "***WARNING*** Disabling further access to the device to prevent other foul-ups.");
// This blocks all further communication with the part.
hdw->i2c_func[0x44] = NULL;
pvr2_hdw_render_useless(hdw);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c
index 447279b4a545..e7ab41401577 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
@@ -343,7 +343,7 @@ struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "WARNING: Failed to classify the following standard(s): %.*s",
+ "***WARNING*** Failed to classify the following standard(s): %.*s",
bcnt,buf);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
index c5dbd5d96457..3e42e209be37 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
@@ -792,7 +792,8 @@ struct pvr2_sysfs_class *pvr2_sysfs_class_create(void)
void pvr2_sysfs_class_destroy(struct pvr2_sysfs_class *clp)
{
pvr2_sysfs_trace("Unregistering pvr2_sysfs_class id=%p", clp);
- class_unregister(&clp->class);
+ if (clp)
+ class_unregister(&clp->class);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index aa4fbc3e88cc..0aff2f396392 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -118,17 +118,6 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS;
- switch (fh->pdi->devbase.vfl_type) {
- case VFL_TYPE_GRABBER:
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
- break;
- case VFL_TYPE_RADIO:
- cap->device_caps = V4L2_CAP_RADIO;
- break;
- default:
- return -EINVAL;
- }
- cap->device_caps |= V4L2_CAP_TUNER | V4L2_CAP_READWRITE;
return 0;
}
@@ -1195,6 +1184,8 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
int unit_number;
struct pvr2_hdw *hdw;
int *nr_ptr = NULL;
+ u32 caps = V4L2_CAP_TUNER | V4L2_CAP_READWRITE;
+
dip->v4lp = vp;
hdw = vp->channel.mc_head->hdw;
@@ -1205,6 +1196,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
dip->config = pvr2_config_mpeg;
dip->minor_type = pvr2_v4l_type_video;
nr_ptr = video_nr;
+ caps |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
if (!dip->stream) {
pr_err(KBUILD_MODNAME
": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n");
@@ -1215,12 +1207,14 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
dip->config = pvr2_config_vbi;
dip->minor_type = pvr2_v4l_type_vbi;
nr_ptr = vbi_nr;
+ caps |= V4L2_CAP_VBI_CAPTURE;
break;
case VFL_TYPE_RADIO:
dip->stream = &vp->channel.mc_head->video_stream;
dip->config = pvr2_config_mpeg;
dip->minor_type = pvr2_v4l_type_radio;
nr_ptr = radio_nr;
+ caps |= V4L2_CAP_RADIO;
break;
default:
/* Bail out (this should be impossible) */
@@ -1231,6 +1225,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
dip->devbase = vdev_template;
dip->devbase.release = pvr2_video_device_release;
dip->devbase.ioctl_ops = &pvr2_ioctl_ops;
+ dip->devbase.device_caps = caps;
{
int val;
pvr2_ctrl_get_value(
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index a15ad0f3faf1..9b76cf133d52 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -1113,6 +1113,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler;
pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
pdev->vdev.lock = &pdev->v4l2_lock;
+ pdev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
if (rc < 0) {
diff --git a/drivers/media/usb/pwc/pwc-v4l.c b/drivers/media/usb/pwc/pwc-v4l.c
index 5212898db77c..76c498cccc49 100644
--- a/drivers/media/usb/pwc/pwc-v4l.c
+++ b/drivers/media/usb/pwc/pwc-v4l.c
@@ -483,9 +483,6 @@ static int pwc_querycap(struct file *file, void *fh, struct v4l2_capability *cap
strscpy(cap->driver, PWC_NAME, sizeof(cap->driver));
strscpy(cap->card, pdev->vdev.name, sizeof(cap->card));
usb_make_path(pdev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/usb/pwc/pwc.h b/drivers/media/usb/pwc/pwc.h
index 8aa7e868e6b1..3362962d0d00 100644
--- a/drivers/media/usb/pwc/pwc.h
+++ b/drivers/media/usb/pwc/pwc.h
@@ -43,15 +43,15 @@
/* Trace certain actions in the driver */
-#define PWC_DEBUG_LEVEL_MODULE (1<<0)
-#define PWC_DEBUG_LEVEL_PROBE (1<<1)
-#define PWC_DEBUG_LEVEL_OPEN (1<<2)
-#define PWC_DEBUG_LEVEL_READ (1<<3)
-#define PWC_DEBUG_LEVEL_MEMORY (1<<4)
-#define PWC_DEBUG_LEVEL_FLOW (1<<5)
-#define PWC_DEBUG_LEVEL_SIZE (1<<6)
-#define PWC_DEBUG_LEVEL_IOCTL (1<<7)
-#define PWC_DEBUG_LEVEL_TRACE (1<<8)
+#define PWC_DEBUG_LEVEL_MODULE BIT(0)
+#define PWC_DEBUG_LEVEL_PROBE BIT(1)
+#define PWC_DEBUG_LEVEL_OPEN BIT(2)
+#define PWC_DEBUG_LEVEL_READ BIT(3)
+#define PWC_DEBUG_LEVEL_MEMORY BIT(4)
+#define PWC_DEBUG_LEVEL_FLOW BIT(5)
+#define PWC_DEBUG_LEVEL_SIZE BIT(6)
+#define PWC_DEBUG_LEVEL_IOCTL BIT(7)
+#define PWC_DEBUG_LEVEL_TRACE BIT(8)
#define PWC_DEBUG_MODULE(fmt, args...) PWC_DEBUG(MODULE, fmt, ##args)
#define PWC_DEBUG_PROBE(fmt, args...) PWC_DEBUG(PROBE, fmt, ##args)
diff --git a/drivers/media/usb/s2255/Kconfig b/drivers/media/usb/s2255/Kconfig
index e0e3c0ba3f23..e4a0c914d9c3 100644
--- a/drivers/media/usb/s2255/Kconfig
+++ b/drivers/media/usb/s2255/Kconfig
@@ -3,7 +3,6 @@ config USB_S2255
tristate "USB Sensoray 2255 video capture device"
depends on VIDEO_V4L2
select VIDEOBUF2_VMALLOC
- default n
help
Say Y here if you want support for the Sensoray 2255 USB device.
This driver can be compiled as a module, called s2255drv.
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 3eccbd48bdac..aa90558479f7 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -724,9 +724,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->driver, "s2255", sizeof(cap->driver));
strscpy(cap->card, "s2255", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1657,6 +1654,8 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
vc->vdev.ctrl_handler = &vc->hdl;
vc->vdev.lock = &dev->lock;
vc->vdev.v4l2_dev = &dev->v4l2_dev;
+ vc->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
video_set_drvdata(&vc->vdev, vc);
if (video_nr == -1)
ret = video_register_device(&vc->vdev,
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 38016632c6d8..b71a0f4b40b5 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -337,11 +337,6 @@ static int vidioc_querycap(struct file *file,
strscpy(cap->driver, "stk1160", sizeof(cap->driver));
strscpy(cap->card, "stk1160", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -821,6 +816,8 @@ int stk1160_video_register(struct stk1160 *dev)
/* This will be used to set video_device parent */
dev->vdev.v4l2_dev = &dev->v4l2_dev;
+ dev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
/* NTSC is default */
dev->norm = V4L2_STD_NTSC_M;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index cb7d6454bbe1..be8041e3e6b8 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -798,10 +798,6 @@ static int stk_vidioc_querycap(struct file *filp,
strscpy(cap->driver, "stk", sizeof(cap->driver));
strscpy(cap->card, "stk", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
-
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE
- | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1261,6 +1257,8 @@ static int stk_register_video_device(struct stk_camera *dev)
dev->vdev = stk_v4l_data;
dev->vdev.lock = &dev->lock;
dev->vdev.v4l2_dev = &dev->v4l2_dev;
+ dev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
video_set_drvdata(&dev->vdev, dev);
err = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1);
if (err)
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 072210f5f92f..85fcddfb0202 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -854,22 +854,17 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
- struct video_device *vdev = video_devdata(file);
strscpy(cap->driver, "tm6000", sizeof(cap->driver));
strscpy(cap->card, "Trident TVMaster TM5600/6000/6010",
sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_DEVICE_CAPS;
if (dev->tuner_type != TUNER_ABSENT)
- cap->device_caps |= V4L2_CAP_TUNER;
- if (vdev->vfl_type == VFL_TYPE_GRABBER)
- cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
- else
- cap->device_caps |= V4L2_CAP_RADIO;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS |
- V4L2_CAP_RADIO | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ cap->capabilities |= V4L2_CAP_TUNER;
+ if (dev->caps.has_radio)
+ cap->capabilities |= V4L2_CAP_RADIO;
return 0;
}
@@ -1639,6 +1634,10 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
vdev_init(dev, &dev->vfd, &tm6000_template, "video");
dev->vfd.ctrl_handler = &dev->ctrl_handler;
+ dev->vfd.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ if (dev->tuner_type != TUNER_ABSENT)
+ dev->vfd.device_caps |= V4L2_CAP_TUNER;
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
@@ -1659,6 +1658,7 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
vdev_init(dev, &dev->radio_dev, &tm6000_radio_template,
"radio");
dev->radio_dev.ctrl_handler = &dev->radio_ctrl_handler;
+ dev->radio_dev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
ret = video_register_device(&dev->radio_dev, VFL_TYPE_RADIO,
radio_nr);
if (ret < 0) {
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 4a1eab711bdc..51f784479e91 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -603,9 +603,6 @@ static int usbtv_querycap(struct file *file, void *priv,
strscpy(cap->driver, "usbtv", sizeof(cap->driver));
strscpy(cap->card, "usbtv", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE;
- cap->device_caps |= V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -942,6 +939,8 @@ int usbtv_video_init(struct usbtv *usbtv)
usbtv->vdev.tvnorms = USBTV_TV_STD;
usbtv->vdev.queue = &usbtv->vb2q;
usbtv->vdev.lock = &usbtv->v4l2_lock;
+ usbtv->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
video_set_drvdata(&usbtv->vdev, usbtv);
ret = video_register_device(&usbtv->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 6d42154e3d0a..93750af82d98 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -452,24 +452,18 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *vc)
{
struct usb_usbvision *usbvision = video_drvdata(file);
- struct video_device *vdev = video_devdata(file);
strscpy(vc->driver, "USBVision", sizeof(vc->driver));
strscpy(vc->card,
usbvision_device_data[usbvision->dev_model].model_string,
sizeof(vc->card));
usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info));
- vc->device_caps = usbvision->have_tuner ? V4L2_CAP_TUNER : 0;
- if (vdev->vfl_type == VFL_TYPE_GRABBER)
- vc->device_caps |= V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- else
- vc->device_caps |= V4L2_CAP_RADIO;
-
- vc->capabilities = vc->device_caps | V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
+ vc->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
if (usbvision_device_data[usbvision->dev_model].radio)
vc->capabilities |= V4L2_CAP_RADIO;
+ if (usbvision->have_tuner)
+ vc->capabilities |= V4L2_CAP_TUNER;
return 0;
}
@@ -1267,6 +1261,11 @@ static int usbvision_register_video(struct usb_usbvision *usbvision)
v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_S_TUNER);
}
+ usbvision->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ if (usbvision->have_tuner)
+ usbvision->vdev.device_caps |= V4L2_CAP_TUNER;
+
if (video_register_device(&usbvision->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
goto err_exit;
printk(KERN_INFO "USBVision[%d]: registered USBVision Video device %s [v4l2]\n",
@@ -1277,6 +1276,7 @@ static int usbvision_register_video(struct usb_usbvision *usbvision)
/* usbvision has radio */
usbvision_vdev_init(usbvision, &usbvision->rdev,
&usbvision_radio_template, "USBVision Radio");
+ usbvision->rdev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
if (video_register_device(&usbvision->rdev, VFL_TYPE_RADIO, radio_nr) < 0)
goto err_exit;
printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device %s [v4l2]\n",
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 26163a5bde7d..e399b9fad757 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -2345,7 +2345,9 @@ void uvc_ctrl_cleanup_device(struct uvc_device *dev)
struct uvc_entity *entity;
unsigned int i;
- cancel_work_sync(&dev->async_ctrl.work);
+ /* Can be uninitialized if we are aborting on probe error. */
+ if (dev->async_ctrl.work.func)
+ cancel_work_sync(&dev->async_ctrl.work);
/* Free controls and control mappings for all entities. */
list_for_each_entry(entity, &dev->entities, list) {
diff --git a/drivers/media/usb/uvc/uvc_debugfs.c b/drivers/media/usb/uvc/uvc_debugfs.c
index 8ba54139a087..d2b109959d82 100644
--- a/drivers/media/usb/uvc/uvc_debugfs.c
+++ b/drivers/media/usb/uvc/uvc_debugfs.c
@@ -74,12 +74,13 @@ void uvc_debugfs_init_stream(struct uvc_streaming *stream)
{
struct usb_device *udev = stream->dev->udev;
struct dentry *dent;
- char dir_name[32];
+ char dir_name[33];
if (uvc_debugfs_root_dir == NULL)
return;
- sprintf(dir_name, "%u-%u", udev->bus->busnum, udev->devnum);
+ snprintf(dir_name, sizeof(dir_name), "%u-%u-%u", udev->bus->busnum,
+ udev->devnum, stream->intfnum);
dent = debugfs_create_dir(dir_name, uvc_debugfs_root_dir);
if (IS_ERR_OR_NULL(dent)) {
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 37a7992585df..a9bcba4fa9c6 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -694,14 +694,10 @@ static int zr364xx_vidioc_querycap(struct file *file, void *priv,
struct zr364xx_camera *cam = video_drvdata(file);
strscpy(cap->driver, DRIVER_DESC, sizeof(cap->driver));
- strscpy(cap->card, cam->udev->product, sizeof(cap->card));
+ if (cam->udev->product)
+ strscpy(cap->card, cam->udev->product, sizeof(cap->card));
strscpy(cap->bus_info, dev_name(&cam->udev->dev),
sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -1328,6 +1324,8 @@ static const struct video_device zr364xx_template = {
.fops = &zr364xx_fops,
.ioctl_ops = &zr364xx_ioctl_ops,
.release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING,
};
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 8b9d4b3ec10e..7c5f62f196e5 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -13,7 +13,6 @@ config VIDEO_V4L2
config VIDEO_ADV_DEBUG
bool "Enable advanced debug functionality on V4L2 drivers"
- default n
help
Say Y here to enable advanced debugging functionality on some
V4L devices.
@@ -21,7 +20,6 @@ config VIDEO_ADV_DEBUG
config VIDEO_FIXED_MINOR_RANGES
bool "Enable old-style fixed minor ranges on drivers/video devices"
- default n
help
Say Y here to enable the old-style fixed-range minor assignments.
Only useful if you rely on the old behavior and use mknod instead of udev.
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index c9efb2de710d..f8ad1c580a3e 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -321,6 +321,16 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
return x;
}
+static unsigned int clamp_roundup(unsigned int x, unsigned int min,
+ unsigned int max, unsigned int alignment)
+{
+ x = clamp(x, min, max);
+ if (alignment)
+ x = round_up(x, alignment);
+
+ return x;
+}
+
void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
unsigned int walign,
u32 *h, unsigned int hmin, unsigned int hmax,
@@ -531,8 +541,25 @@ static inline unsigned int v4l2_format_block_height(const struct v4l2_format_inf
return info->block_h[plane];
}
+void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
+ const struct v4l2_frmsize_stepwise *frmsize)
+{
+ if (!frmsize)
+ return;
+
+ /*
+ * Clamp width/height to meet min/max constraints and round it up to
+ * macroblock alignment.
+ */
+ *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width,
+ frmsize->step_width);
+ *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height,
+ frmsize->step_height);
+}
+EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints);
+
int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
- int pixelformat, int width, int height)
+ u32 pixelformat, u32 width, u32 height)
{
const struct v4l2_format_info *info;
struct v4l2_plane_pix_format *plane;
@@ -586,7 +613,8 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp);
-int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, int pixelformat, int width, int height)
+int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
+ u32 width, u32 height)
{
const struct v4l2_format_info *info;
int i;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 7d3a33258748..371537dd8cd3 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -394,6 +394,21 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Explicit",
NULL,
};
+ static const char * const mpeg_mpeg2_level[] = {
+ "Low",
+ "Main",
+ "High 1440",
+ "High",
+ NULL,
+ };
+ static const char * const mpeg2_profile[] = {
+ "Simple",
+ "Main",
+ "SNR Scalable",
+ "Spatially Scalable",
+ "High",
+ NULL,
+ };
static const char * const mpeg_mpeg4_level[] = {
"0",
"0b",
@@ -610,6 +625,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return h264_fp_arrangement_type;
case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
return h264_fmo_map_type;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
+ return mpeg_mpeg2_level;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
+ return mpeg2_profile;
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
return mpeg_mpeg4_level;
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
@@ -820,6 +839,13 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_SPS: return "H264 Sequence Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_H264_PPS: return "H264 Picture Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX: return "H264 Scaling Matrix";
+ case V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS: return "H264 Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS: return "H264 Decode Parameters";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile";
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
@@ -1145,6 +1171,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_FLASH_STROBE_STOP:
case V4L2_CID_AUTO_FOCUS_START:
case V4L2_CID_AUTO_FOCUS_STOP:
+ case V4L2_CID_DO_WHITE_BALANCE:
*type = V4L2_CTRL_TYPE_BUTTON;
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
@@ -1184,6 +1211,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
@@ -1301,6 +1330,21 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_FWHT_PARAMS:
*type = V4L2_CTRL_TYPE_FWHT_PARAMS;
break;
+ case V4L2_CID_MPEG_VIDEO_H264_SPS:
+ *type = V4L2_CTRL_TYPE_H264_SPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PPS:
+ *type = V4L2_CTRL_TYPE_H264_PPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX:
+ *type = V4L2_CTRL_TYPE_H264_SCALING_MATRIX;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_H264_SLICE_PARAMS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS:
+ *type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -1450,6 +1494,32 @@ static bool std_equal(const struct v4l2_ctrl *ctrl, u32 idx,
}
}
+static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
+ void *p = ptr.p + idx * ctrl->elem_size;
+
+ memset(p, 0, ctrl->elem_size);
+
+ /*
+ * The cast is needed to get rid of a gcc warning complaining that
+ * V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS is not part of the
+ * v4l2_ctrl_type enum.
+ */
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
+ p_mpeg2_slice_params = p;
+ /* 4:2:0 */
+ p_mpeg2_slice_params->sequence.chroma_format = 1;
+ /* interlaced top field */
+ p_mpeg2_slice_params->picture.picture_structure = 1;
+ p_mpeg2_slice_params->picture.picture_coding_type =
+ V4L2_MPEG2_PICTURE_CODING_TYPE_I;
+ break;
+ }
+}
+
static void std_init(const struct v4l2_ctrl *ctrl, u32 idx,
union v4l2_ctrl_ptr ptr)
{
@@ -1469,6 +1539,10 @@ static void std_init(const struct v4l2_ctrl *ctrl, u32 idx,
case V4L2_CTRL_TYPE_BOOLEAN:
ptr.p_s32[idx] = ctrl->default_value;
break;
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ ptr.p_s32[idx] = 0;
+ break;
case V4L2_CTRL_TYPE_U8:
ptr.p_u8[idx] = ctrl->default_value;
break;
@@ -1479,8 +1553,7 @@ static void std_init(const struct v4l2_ctrl *ctrl, u32 idx,
ptr.p_u32[idx] = ctrl->default_value;
break;
default:
- idx *= ctrl->elem_size;
- memset(ptr.p + idx, 0, ctrl->elem_size);
+ std_init_compound(ctrl, idx, ptr);
break;
}
}
@@ -1670,6 +1743,13 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
case V4L2_CTRL_TYPE_FWHT_PARAMS:
return 0;
+ case V4L2_CTRL_TYPE_H264_SPS:
+ case V4L2_CTRL_TYPE_H264_PPS:
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ return 0;
+
default:
return -EINVAL;
}
@@ -2149,15 +2229,6 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
if (size_extra_req)
new_ref->p_req.p = &new_ref[1];
- if (ctrl->handler == hdl) {
- /* By default each control starts in a cluster of its own.
- new_ref->ctrl is basically a cluster array with one
- element, so that's perfect to use as the cluster pointer.
- But only do this for the handler that owns the control. */
- ctrl->cluster = &new_ref->ctrl;
- ctrl->ncontrols = 1;
- }
-
INIT_LIST_HEAD(&new_ref->node);
mutex_lock(hdl->lock);
@@ -2190,6 +2261,15 @@ insert_in_hash:
hdl->buckets[bucket] = new_ref;
if (ctrl_ref)
*ctrl_ref = new_ref;
+ if (ctrl->handler == hdl) {
+ /* By default each control starts in a cluster of its own.
+ * new_ref->ctrl is basically a cluster array with one
+ * element, so that's perfect to use as the cluster pointer.
+ * But only do this for the handler that owns the control.
+ */
+ ctrl->cluster = &new_ref->ctrl;
+ ctrl->ncontrols = 1;
+ }
unlock:
mutex_unlock(hdl->lock);
@@ -2253,6 +2333,21 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_FWHT_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_fwht_params);
break;
+ case V4L2_CTRL_TYPE_H264_SPS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_sps);
+ break;
+ case V4L2_CTRL_TYPE_H264_PPS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_pps);
+ break;
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix);
+ break;
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_decode_params);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
@@ -2369,16 +2464,15 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
&def, &flags);
- is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
- cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
+ is_menu = (type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU);
if (is_menu)
WARN_ON(step);
else
WARN_ON(cfg->menu_skip_mask);
- if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
+ if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
qmenu = v4l2_ctrl_get_menu(cfg->id);
- else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
- qmenu_int == NULL) {
+ } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 414636dedffd..cbb74f748555 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -589,11 +589,9 @@ static void determine_valid_ioctls(struct video_device *vdev)
if (is_vid || is_tch) {
/* video and metadata specific ioctls */
if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
- ops->vidioc_enum_fmt_vid_cap_mplane ||
ops->vidioc_enum_fmt_vid_overlay ||
ops->vidioc_enum_fmt_meta_cap)) ||
(is_tx && (ops->vidioc_enum_fmt_vid_out ||
- ops->vidioc_enum_fmt_vid_out_mplane ||
ops->vidioc_enum_fmt_meta_out)))
set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index c2d980ab3af7..7e740d332a54 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -209,10 +209,10 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
have_clk_lane = true;
}
- if (lanes_used & BIT(clock_lane)) {
- if (have_clk_lane || !use_default_lane_mapping)
- pr_warn("duplicated lane %u in clock-lanes, using defaults\n",
- v);
+ if (have_clk_lane && lanes_used & BIT(clock_lane) &&
+ !use_default_lane_mapping) {
+ pr_warn("duplicated lane %u in clock-lanes, using defaults\n",
+ v);
use_default_lane_mapping = true;
}
@@ -1095,7 +1095,7 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
}
}
- return PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode);
+ return !fwnode || PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode);
error:
fwnode_handle_put(fwnode);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 6859bdac86fe..b1f4b991dba6 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1321,6 +1321,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_H264: descr = "H.264"; break;
case V4L2_PIX_FMT_H264_NO_SC: descr = "H.264 (No Start Codes)"; break;
case V4L2_PIX_FMT_H264_MVC: descr = "H.264 MVC"; break;
+ case V4L2_PIX_FMT_H264_SLICE_RAW: descr = "H.264 Parsed Slice Data"; break;
case V4L2_PIX_FMT_H263: descr = "H.263"; break;
case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break;
case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break;
@@ -1377,8 +1378,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vdev = video_devdata(file);
struct v4l2_fmtdesc *p = arg;
int ret = check_fmt(file, p->type);
+ u32 cap_mask;
if (ret)
return ret;
@@ -1386,30 +1389,34 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ cap_mask = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+ if (!!(vdev->device_caps & cap_mask) !=
+ (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE))
+ break;
+
if (unlikely(!ops->vidioc_enum_fmt_vid_cap))
break;
ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
break;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (unlikely(!ops->vidioc_enum_fmt_vid_cap_mplane))
- break;
- ret = ops->vidioc_enum_fmt_vid_cap_mplane(file, fh, arg);
- break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_enum_fmt_vid_overlay))
break;
ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ cap_mask = V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+ if (!!(vdev->device_caps & cap_mask) !=
+ (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
+ break;
+
if (unlikely(!ops->vidioc_enum_fmt_vid_out))
break;
ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg);
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (unlikely(!ops->vidioc_enum_fmt_vid_out_mplane))
- break;
- ret = ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg);
- break;
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_enum_fmt_sdr_cap))
break;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index fd96df98c780..4f5176702937 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -1118,6 +1118,35 @@ int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
+int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
+ return -EINVAL;
+
+ ec->flags = 0;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd);
+
+int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
+ return -EINVAL;
+
+ dc->flags = 0;
+
+ if (dc->cmd == V4L2_DEC_CMD_STOP) {
+ dc->stop.pts = 0;
+ } else if (dc->cmd == V4L2_DEC_CMD_START) {
+ dc->start.speed = 0;
+ dc->start.format = V4L2_DEC_START_FMT_NONE;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
+
/*
* v4l2_file_operations helpers. It is assumed here same lock is used
* for the output and the capture buffer queue.
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index f24978b80440..21fb90d66bfc 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -112,56 +112,217 @@ static int subdev_close(struct file *file)
return 0;
}
-#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
-static int check_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_format *format)
+static inline int check_which(__u32 which)
{
- if (format->which != V4L2_SUBDEV_FORMAT_TRY &&
- format->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ if (which != V4L2_SUBDEV_FORMAT_TRY &&
+ which != V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
- if (format->pad >= sd->entity.num_pads)
+ return 0;
+}
+
+static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
+{
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (sd->entity.graph_obj.mdev) {
+ if (pad >= sd->entity.num_pads)
+ return -EINVAL;
+ return 0;
+ }
+#endif
+ /* allow pad 0 on subdevices not registered as media entities */
+ if (pad > 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int check_cfg(__u32 which, struct v4l2_subdev_pad_config *cfg)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY && !cfg)
return -EINVAL;
return 0;
}
-static int check_crop(struct v4l2_subdev *sd, struct v4l2_subdev_crop *crop)
+static inline int check_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
{
- if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
- crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ if (!format)
return -EINVAL;
- if (crop->pad >= sd->entity.num_pads)
+ return check_which(format->which) ? : check_pad(sd, format->pad) ? :
+ check_cfg(format->which, cfg);
+}
+
+static int call_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ return check_format(sd, cfg, format) ? :
+ sd->ops->pad->get_fmt(sd, cfg, format);
+}
+
+static int call_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ return check_format(sd, cfg, format) ? :
+ sd->ops->pad->set_fmt(sd, cfg, format);
+}
+
+static int call_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (!code)
return -EINVAL;
- return 0;
+ return check_which(code->which) ? : check_pad(sd, code->pad) ? :
+ check_cfg(code->which, cfg) ? :
+ sd->ops->pad->enum_mbus_code(sd, cfg, code);
}
-static int check_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_selection *sel)
+static int call_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
{
- if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
- sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ if (!fse)
return -EINVAL;
- if (sel->pad >= sd->entity.num_pads)
+ return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
+ check_cfg(fse->which, cfg) ? :
+ sd->ops->pad->enum_frame_size(sd, cfg, fse);
+}
+
+static inline int check_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ if (!fi)
return -EINVAL;
- return 0;
+ return check_pad(sd, fi->pad);
+}
+
+static int call_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ return check_frame_interval(sd, fi) ? :
+ sd->ops->video->g_frame_interval(sd, fi);
+}
+
+static int call_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ return check_frame_interval(sd, fi) ? :
+ sd->ops->video->s_frame_interval(sd, fi);
+}
+
+static int call_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ if (!fie)
+ return -EINVAL;
+
+ return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
+ check_cfg(fie->which, cfg) ? :
+ sd->ops->pad->enum_frame_interval(sd, cfg, fie);
+}
+
+static inline int check_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ if (!sel)
+ return -EINVAL;
+
+ return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
+ check_cfg(sel->which, cfg);
+}
+
+static int call_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ return check_selection(sd, cfg, sel) ? :
+ sd->ops->pad->get_selection(sd, cfg, sel);
+}
+
+static int call_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ return check_selection(sd, cfg, sel) ? :
+ sd->ops->pad->set_selection(sd, cfg, sel);
}
-static int check_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+static inline int check_edid(struct v4l2_subdev *sd,
+ struct v4l2_subdev_edid *edid)
{
- if (edid->pad >= sd->entity.num_pads)
+ if (!edid)
return -EINVAL;
if (edid->blocks && edid->edid == NULL)
return -EINVAL;
- return 0;
+ return check_pad(sd, edid->pad);
+}
+
+static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
+}
+
+static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
+}
+
+static int call_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ if (!cap)
+ return -EINVAL;
+
+ return check_pad(sd, cap->pad) ? :
+ sd->ops->pad->dv_timings_cap(sd, cap);
}
-#endif
+
+static int call_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *dvt)
+{
+ if (!dvt)
+ return -EINVAL;
+
+ return check_pad(sd, dvt->pad) ? :
+ sd->ops->pad->enum_dv_timings(sd, dvt);
+}
+
+static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
+ .get_fmt = call_get_fmt,
+ .set_fmt = call_set_fmt,
+ .enum_mbus_code = call_enum_mbus_code,
+ .enum_frame_size = call_enum_frame_size,
+ .enum_frame_interval = call_enum_frame_interval,
+ .get_selection = call_get_selection,
+ .set_selection = call_set_selection,
+ .get_edid = call_get_edid,
+ .set_edid = call_set_edid,
+ .dv_timings_cap = call_dv_timings_cap,
+ .enum_dv_timings = call_enum_dv_timings,
+};
+
+static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
+ .g_frame_interval = call_g_frame_interval,
+ .s_frame_interval = call_s_frame_interval,
+};
+
+const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
+ .pad = &v4l2_subdev_call_pad_wrappers,
+ .video = &v4l2_subdev_call_video_wrappers,
+};
+EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
@@ -284,10 +445,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_G_FMT: {
struct v4l2_subdev_format *format = arg;
- rval = check_format(sd, format);
- if (rval)
- return rval;
-
memset(format->reserved, 0, sizeof(format->reserved));
memset(format->format.reserved, 0, sizeof(format->format.reserved));
return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh->pad, format);
@@ -296,10 +453,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_FMT: {
struct v4l2_subdev_format *format = arg;
- rval = check_format(sd, format);
- if (rval)
- return rval;
-
memset(format->reserved, 0, sizeof(format->reserved));
memset(format->format.reserved, 0, sizeof(format->format.reserved));
return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh->pad, format);
@@ -309,10 +462,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_subdev_crop *crop = arg;
struct v4l2_subdev_selection sel;
- rval = check_crop(sd, crop);
- if (rval)
- return rval;
-
memset(crop->reserved, 0, sizeof(crop->reserved));
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
@@ -332,10 +481,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_subdev_selection sel;
memset(crop->reserved, 0, sizeof(crop->reserved));
- rval = check_crop(sd, crop);
- if (rval)
- return rval;
-
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
@@ -353,13 +498,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
struct v4l2_subdev_mbus_code_enum *code = arg;
- if (code->which != V4L2_SUBDEV_FORMAT_TRY &&
- code->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- if (code->pad >= sd->entity.num_pads)
- return -EINVAL;
-
memset(code->reserved, 0, sizeof(code->reserved));
return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh->pad,
code);
@@ -368,13 +506,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
struct v4l2_subdev_frame_size_enum *fse = arg;
- if (fse->which != V4L2_SUBDEV_FORMAT_TRY &&
- fse->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- if (fse->pad >= sd->entity.num_pads)
- return -EINVAL;
-
memset(fse->reserved, 0, sizeof(fse->reserved));
return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh->pad,
fse);
@@ -383,9 +514,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
struct v4l2_subdev_frame_interval *fi = arg;
- if (fi->pad >= sd->entity.num_pads)
- return -EINVAL;
-
memset(fi->reserved, 0, sizeof(fi->reserved));
return v4l2_subdev_call(sd, video, g_frame_interval, arg);
}
@@ -393,9 +521,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
struct v4l2_subdev_frame_interval *fi = arg;
- if (fi->pad >= sd->entity.num_pads)
- return -EINVAL;
-
memset(fi->reserved, 0, sizeof(fi->reserved));
return v4l2_subdev_call(sd, video, s_frame_interval, arg);
}
@@ -403,13 +528,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
struct v4l2_subdev_frame_interval_enum *fie = arg;
- if (fie->which != V4L2_SUBDEV_FORMAT_TRY &&
- fie->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- if (fie->pad >= sd->entity.num_pads)
- return -EINVAL;
-
memset(fie->reserved, 0, sizeof(fie->reserved));
return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh->pad,
fie);
@@ -418,10 +536,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_G_SELECTION: {
struct v4l2_subdev_selection *sel = arg;
- rval = check_selection(sd, sel);
- if (rval)
- return rval;
-
memset(sel->reserved, 0, sizeof(sel->reserved));
return v4l2_subdev_call(
sd, pad, get_selection, subdev_fh->pad, sel);
@@ -430,10 +544,6 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_SELECTION: {
struct v4l2_subdev_selection *sel = arg;
- rval = check_selection(sd, sel);
- if (rval)
- return rval;
-
memset(sel->reserved, 0, sizeof(sel->reserved));
return v4l2_subdev_call(
sd, pad, set_selection, subdev_fh->pad, sel);
@@ -442,38 +552,24 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_G_EDID: {
struct v4l2_subdev_edid *edid = arg;
- rval = check_edid(sd, edid);
- if (rval)
- return rval;
-
return v4l2_subdev_call(sd, pad, get_edid, edid);
}
case VIDIOC_S_EDID: {
struct v4l2_subdev_edid *edid = arg;
- rval = check_edid(sd, edid);
- if (rval)
- return rval;
-
return v4l2_subdev_call(sd, pad, set_edid, edid);
}
case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
struct v4l2_dv_timings_cap *cap = arg;
- if (cap->pad >= sd->entity.num_pads)
- return -EINVAL;
-
return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
}
case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
struct v4l2_enum_dv_timings *dvt = arg;
- if (dvt->pad >= sd->entity.num_pads)
- return -EINVAL;
-
return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
}
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 0491122b03c4..76b4ac7b1678 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -277,7 +277,6 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_dma_contig_memory *mem;
struct videobuf_mapping *map;
int retval;
- unsigned long size;
dev_dbg(q->dev, "%s\n", __func__);
@@ -300,7 +299,6 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
goto error;
/* Try to remap memory */
- size = vma->vm_end - vma->vm_start;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/* the "vm_pgoff" is just used in v4l2 to find the
@@ -311,7 +309,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
*/
vma->vm_pgoff = 0;
- retval = vm_iomap_memory(vma, mem->dma_handle, size);
+ retval = vm_iomap_memory(vma, mem->dma_handle, mem->size);
if (retval) {
dev_err(q->dev, "mmap: remap failed with error %d. ",
retval);
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index 8f38dae39532..f8bd5a369560 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -7,7 +7,7 @@
* into PAGE_SIZE chunks). They also assume the driver does not need
* to touch the video data.
*
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
+ * (c) 2007 Mauro Carvalho Chehab <mchehab@kernel.org>
*/
#include <linux/init.h>
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 139782fefd02..eff26c1b1394 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/consumer.h> /* GPIO descriptor enum */
+#include <linux/gpio/machine.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/platform_device.h>
@@ -2169,7 +2170,8 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
waitpin_desc = gpiochip_request_own_desc(&gpmc->gpio_chip,
wait_pin, "WAITPIN",
- 0);
+ GPIO_ACTIVE_HIGH,
+ GPIOD_IN);
if (IS_ERR(waitpin_desc)) {
dev_err(&pdev->dev, "invalid wait-pin: %d\n", wait_pin);
ret = PTR_ERR(waitpin_desc);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index d8882b0a1338..c2dd322691d1 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -6001,13 +6001,12 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
if (mpt_config(ioc, &cfg) != 0)
goto out;
- mem = kmalloc(iocpage2sz, GFP_KERNEL);
+ mem = kmemdup(pIoc2, iocpage2sz, GFP_KERNEL);
if (!mem) {
rc = -ENOMEM;
goto out;
}
- memcpy(mem, (u8 *)pIoc2, iocpage2sz);
ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
mpt_read_ioc_pg_3(ioc);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index a17d275bf1d4..6855ff443e04 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1336,9 +1336,8 @@ config MFD_TI_LMU
select REGMAP_I2C
help
Say yes here to enable support for TI LMU chips.
-
- TI LMU MFD supports LM3532, LM3631, LM3632, LM3633, LM3695 and LM3697.
- It consists of backlight, LED and regulator driver.
+ TI LMU MFD supports LM3532, LM3631, LM3632, LM3633, LM3695 and
+ LM36274. It consists of backlight, LED and regulator driver.
It provides consistent device controls for lighting functions.
config MFD_OMAP_USB_HOST
diff --git a/drivers/mfd/ti-lmu.c b/drivers/mfd/ti-lmu.c
index 96b21b5af570..fd6e8c417baa 100644
--- a/drivers/mfd/ti-lmu.c
+++ b/drivers/mfd/ti-lmu.c
@@ -108,17 +108,14 @@ static const struct mfd_cell lm3695_devices[] = {
},
};
-static const struct mfd_cell lm3697_devices[] = {
+static const struct mfd_cell lm36274_devices[] = {
+ LM363X_REGULATOR(LM36274_BOOST),
+ LM363X_REGULATOR(LM36274_LDO_POS),
+ LM363X_REGULATOR(LM36274_LDO_NEG),
{
- .name = "ti-lmu-backlight",
- .id = LM3697,
- .of_compatible = "ti,lm3697-backlight",
- },
- /* Monitoring driver for open/short circuit detection */
- {
- .name = "ti-lmu-fault-monitor",
- .id = LM3697,
- .of_compatible = "ti,lm3697-fault-monitor",
+ .name = "lm36274-leds",
+ .id = LM36274,
+ .of_compatible = "ti,lm36274-backlight",
},
};
@@ -134,7 +131,7 @@ TI_LMU_DATA(lm3631, LM3631_MAX_REG);
TI_LMU_DATA(lm3632, LM3632_MAX_REG);
TI_LMU_DATA(lm3633, LM3633_MAX_REG);
TI_LMU_DATA(lm3695, LM3695_MAX_REG);
-TI_LMU_DATA(lm3697, LM3697_MAX_REG);
+TI_LMU_DATA(lm36274, LM36274_MAX_REG);
static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
@@ -203,7 +200,7 @@ static const struct of_device_id ti_lmu_of_match[] = {
{ .compatible = "ti,lm3632", .data = &lm3632_data },
{ .compatible = "ti,lm3633", .data = &lm3633_data },
{ .compatible = "ti,lm3695", .data = &lm3695_data },
- { .compatible = "ti,lm3697", .data = &lm3697_data },
+ { .compatible = "ti,lm36274", .data = &lm36274_data },
{ }
};
MODULE_DEVICE_TABLE(of, ti_lmu_of_match);
@@ -213,7 +210,7 @@ static const struct i2c_device_id ti_lmu_ids[] = {
{ "lm3632", LM3632 },
{ "lm3633", LM3633 },
{ "lm3695", LM3695 },
- { "lm3697", LM3697 },
+ { "lm36274", LM36274 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ti_lmu_ids);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 17f839dee976..d9fcfd3b5af0 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -236,7 +236,7 @@ void lkdtm_CORRUPT_USER_DS(void)
set_fs(KERNEL_DS);
/* Make sure we do not keep running with a KERNEL_DS! */
- force_sig(SIGKILL, current);
+ force_sig(SIGKILL);
}
/* Test that VMAP_STACK is actually allocating with a leading guard page */
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 8a1428d4f138..bba49abb6750 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -15,7 +15,7 @@
*
* Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
*
- * See Documentation/fault-injection/provoke-crashes.txt for instructions
+ * See Documentation/fault-injection/provoke-crashes.rst for instructions
*/
#include "lkdtm.h"
#include <linux/fs.h>
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index ef0e476b2525..49abbc52457d 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -48,7 +48,7 @@ config MTD_MS02NV
If you want to compile this driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/kbuild/modules.txt>.
+ say M here and read <file:Documentation/kbuild/modules.rst>.
The module will be called ms02-nv.
config MTD_DATAFLASH
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
index 19a96ce515c1..66b7cffdb0c2 100644
--- a/drivers/mtd/nand/raw/ingenic/Kconfig
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -16,7 +16,7 @@ config MTD_NAND_JZ4780
if MTD_NAND_JZ4780
config MTD_NAND_INGENIC_ECC
- tristate
+ bool
config MTD_NAND_JZ4740_ECC
tristate "Hardware BCH support for JZ4740 SoC"
diff --git a/drivers/mtd/nand/raw/ingenic/Makefile b/drivers/mtd/nand/raw/ingenic/Makefile
index 1ac4f455baea..b63d36889263 100644
--- a/drivers/mtd/nand/raw/ingenic/Makefile
+++ b/drivers/mtd/nand/raw/ingenic/Makefile
@@ -2,7 +2,9 @@
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
obj-$(CONFIG_MTD_NAND_JZ4780) += ingenic_nand.o
-obj-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o
+ingenic_nand-y += ingenic_nand_drv.o
+ingenic_nand-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o
+
obj-$(CONFIG_MTD_NAND_JZ4740_ECC) += jz4740_ecc.o
obj-$(CONFIG_MTD_NAND_JZ4725B_BCH) += jz4725b_bch.o
obj-$(CONFIG_MTD_NAND_JZ4780_BCH) += jz4780_bch.o
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index d3e085c5685a..c954189606f6 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -30,7 +30,6 @@ int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
{
return ecc->ops->calculate(ecc, params, buf, ecc_code);
}
-EXPORT_SYMBOL(ingenic_ecc_calculate);
/**
* ingenic_ecc_correct() - detect and correct bit errors
@@ -51,7 +50,6 @@ int ingenic_ecc_correct(struct ingenic_ecc *ecc,
{
return ecc->ops->correct(ecc, params, buf, ecc_code);
}
-EXPORT_SYMBOL(ingenic_ecc_correct);
/**
* ingenic_ecc_get() - get the ECC controller device
@@ -111,7 +109,6 @@ struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *of_node)
}
return ecc;
}
-EXPORT_SYMBOL(of_ingenic_ecc_get);
/**
* ingenic_ecc_release() - release the ECC controller device
@@ -122,7 +119,6 @@ void ingenic_ecc_release(struct ingenic_ecc *ecc)
clk_disable_unprepare(ecc->clk);
put_device(ecc->dev);
}
-EXPORT_SYMBOL(ingenic_ecc_release);
int ingenic_ecc_probe(struct platform_device *pdev)
{
@@ -159,8 +155,3 @@ int ingenic_ecc_probe(struct platform_device *pdev)
return 0;
}
EXPORT_SYMBOL(ingenic_ecc_probe);
-
-MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
-MODULE_DESCRIPTION("Ingenic ECC common driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index d7b7c0f13909..d7b7c0f13909 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index b021a5720b42..89773293c64d 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -51,6 +51,7 @@
#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
#define NFC_REG_SPARE_AREA 0x00A0
#define NFC_REG_PAT_ID 0x00A4
+#define NFC_REG_MDMA_CNT 0x00C4
#define NFC_RAM0_BASE 0x0400
#define NFC_RAM1_BASE 0x0800
@@ -69,6 +70,7 @@
#define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
#define NFC_SAM BIT(12)
#define NFC_RAM_METHOD BIT(14)
+#define NFC_DMA_TYPE_NORMAL BIT(15)
#define NFC_DEBUG_CTL BIT(31)
/* define bit use in NFC_ST */
@@ -205,14 +207,13 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
* NAND Controller capabilities structure: stores NAND controller capabilities
* for distinction between compatible strings.
*
- * @sram_through_ahb: On A23, we choose to access the internal RAM through AHB
- * instead of MBUS (less configuration). A10, A10s, A13 and
- * A20 use the MBUS but no extra configuration is needed.
+ * @extra_mbus_conf: Contrary to A10, A10s and A13, accessing internal RAM
+ * through MBUS on A23/A33 needs extra configuration.
* @reg_io_data: I/O data register
* @dma_maxburst: DMA maxburst
*/
struct sunxi_nfc_caps {
- bool sram_through_ahb;
+ bool extra_mbus_conf;
unsigned int reg_io_data;
unsigned int dma_maxburst;
};
@@ -368,28 +369,12 @@ static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
goto err_unmap_buf;
}
- /*
- * On A23, we suppose the "internal RAM" (p.12 of the NFC user manual)
- * refers to the NAND controller's internal SRAM. This memory is mapped
- * and so is accessible from the AHB. It seems that it can also be
- * accessed by the MBUS. MBUS accesses are mandatory when using the
- * internal DMA instead of the external DMA engine.
- *
- * During DMA I/O operation, either we access this memory from the AHB
- * by clearing the NFC_RAM_METHOD bit, or we set the bit and use the
- * MBUS. In this case, we should also configure the MBUS DMA length
- * NFC_REG_MDMA_CNT(0xC4) to be chunksize * nchunks. NAND I/O over MBUS
- * are also limited to 32kiB pages.
- */
- if (nfc->caps->sram_through_ahb)
- writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
- nfc->regs + NFC_REG_CTL);
- else
- writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
- nfc->regs + NFC_REG_CTL);
-
+ writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
writel(chunksize, nfc->regs + NFC_REG_CNT);
+ if (nfc->caps->extra_mbus_conf)
+ writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT);
dmat = dmaengine_submit(dmad);
@@ -2151,6 +2136,11 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+
+ if (nfc->caps->extra_mbus_conf)
+ writel(readl(nfc->regs + NFC_REG_CTL) |
+ NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL);
+
} else {
dev_warn(dev, "failed to request rxtx DMA channel\n");
}
@@ -2200,7 +2190,7 @@ static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
};
static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
- .sram_through_ahb = true,
+ .extra_mbus_conf = true,
.reg_io_data = NFC_REG_A23_IO_DATA,
.dma_maxburst = 8,
};
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index e5586390026a..e6c646007cda 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -180,7 +180,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F4GQ4xA", 0xF4,
- NAND_MEMORG(1, 2048, 64, 64, 4096, 40, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 6502727049a8..21def3f8fb36 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -100,7 +100,7 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB", 0x12,
- NAND_MEMORG(1, 2048, 64, 64, 1024, 40, 1, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
@@ -109,7 +109,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF2GE4AB", 0x22,
- NAND_MEMORG(1, 2048, 64, 64, 2048, 20, 2, 1, 1),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index dfd6f315d2cc..e3b25f310936 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -325,17 +325,17 @@ static u16 __get_link_speed(struct port *port)
default:
/* unknown speed value from ethtool. shouldn't happen */
if (slave->speed != SPEED_UNKNOWN)
- pr_warn_once("%s: unknown ethtool speed (%d) for port %d (set it to 0)\n",
+ pr_warn_once("%s: (slave %s): unknown ethtool speed (%d) for port %d (set it to 0)\n",
slave->bond->dev->name,
- slave->speed,
+ slave->dev->name, slave->speed,
port->actor_port_number);
speed = 0;
break;
}
}
- netdev_dbg(slave->bond->dev, "Port %d Received link speed %d update from adapter\n",
- port->actor_port_number, speed);
+ slave_dbg(slave->bond->dev, slave->dev, "Port %d Received link speed %d update from adapter\n",
+ port->actor_port_number, speed);
return speed;
}
@@ -359,14 +359,14 @@ static u8 __get_duplex(struct port *port)
switch (slave->duplex) {
case DUPLEX_FULL:
retval = 0x1;
- netdev_dbg(slave->bond->dev, "Port %d Received status full duplex update from adapter\n",
- port->actor_port_number);
+ slave_dbg(slave->bond->dev, slave->dev, "Port %d Received status full duplex update from adapter\n",
+ port->actor_port_number);
break;
case DUPLEX_HALF:
default:
retval = 0x0;
- netdev_dbg(slave->bond->dev, "Port %d Received status NOT full duplex update from adapter\n",
- port->actor_port_number);
+ slave_dbg(slave->bond->dev, slave->dev, "Port %d Received status NOT full duplex update from adapter\n",
+ port->actor_port_number);
break;
}
}
@@ -500,10 +500,12 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
if ((port->sm_vars & AD_PORT_MATCHED) &&
(lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) {
partner->port_state |= AD_STATE_SYNCHRONIZATION;
- pr_debug("%s partner sync=1\n", port->slave->dev->name);
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "partner sync=1\n");
} else {
partner->port_state &= ~AD_STATE_SYNCHRONIZATION;
- pr_debug("%s partner sync=0\n", port->slave->dev->name);
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "partner sync=0\n");
}
}
}
@@ -789,8 +791,9 @@ static inline void __update_lacpdu_from_port(struct port *port)
lacpdu->actor_port_priority = htons(port->actor_port_priority);
lacpdu->actor_port = htons(port->actor_port_number);
lacpdu->actor_state = port->actor_oper_port_state;
- pr_debug("update lacpdu: %s, actor port state %x\n",
- port->slave->dev->name, port->actor_oper_port_state);
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "update lacpdu: actor port state %x\n",
+ port->actor_oper_port_state);
/* lacpdu->reserved_3_1 initialized
* lacpdu->tlv_type_partner_info initialized
@@ -1022,11 +1025,11 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
/* check if the state machine was changed */
if (port->sm_mux_state != last_state) {
- pr_debug("Mux Machine: Port=%d (%s), Last State=%d, Curr State=%d\n",
- port->actor_port_number,
- port->slave->dev->name,
- last_state,
- port->sm_mux_state);
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "Mux Machine: Port=%d, Last State=%d, Curr State=%d\n",
+ port->actor_port_number,
+ last_state,
+ port->sm_mux_state);
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
@@ -1140,11 +1143,11 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/* check if the State machine was changed or new lacpdu arrived */
if ((port->sm_rx_state != last_state) || (lacpdu)) {
- pr_debug("Rx Machine: Port=%d (%s), Last State=%d, Curr State=%d\n",
- port->actor_port_number,
- port->slave->dev->name,
- last_state,
- port->sm_rx_state);
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "Rx Machine: Port=%d, Last State=%d, Curr State=%d\n",
+ port->actor_port_number,
+ last_state,
+ port->sm_rx_state);
switch (port->sm_rx_state) {
case AD_RX_INITIALIZE:
if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS))
@@ -1192,9 +1195,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/* detect loopback situation */
if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
&(port->actor_system))) {
- netdev_err(port->slave->bond->dev, "An illegal loopback occurred on adapter (%s)\n"
- "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
- port->slave->dev->name);
+ slave_err(port->slave->bond->dev, port->slave->dev, "An illegal loopback occurred on slave\n"
+ "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n");
return;
}
__update_selected(lacpdu, port);
@@ -1263,8 +1265,10 @@ static void ad_tx_machine(struct port *port)
__update_lacpdu_from_port(port);
if (ad_lacpdu_send(port) >= 0) {
- pr_debug("Sent LACPDU on port %d\n",
- port->actor_port_number);
+ slave_dbg(port->slave->bond->dev,
+ port->slave->dev,
+ "Sent LACPDU on port %d\n",
+ port->actor_port_number);
/* mark ntt as false, so it will not be sent
* again until demanded
@@ -1343,9 +1347,10 @@ static void ad_periodic_machine(struct port *port)
/* check if the state machine was changed */
if (port->sm_periodic_state != last_state) {
- pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n",
- port->actor_port_number, last_state,
- port->sm_periodic_state);
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n",
+ port->actor_port_number, last_state,
+ port->sm_periodic_state);
switch (port->sm_periodic_state) {
case AD_NO_PERIODIC:
port->sm_periodic_timer_counter = 0;
@@ -1421,9 +1426,9 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
port->next_port_in_aggregator = NULL;
port->actor_port_aggregator_identifier = 0;
- netdev_dbg(bond->dev, "Port %d left LAG %d\n",
- port->actor_port_number,
- temp_aggregator->aggregator_identifier);
+ slave_dbg(bond->dev, port->slave->dev, "Port %d left LAG %d\n",
+ port->actor_port_number,
+ temp_aggregator->aggregator_identifier);
/* if the aggregator is empty, clear its
* parameters, and set it ready to be attached
*/
@@ -1436,10 +1441,10 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
/* meaning: the port was related to an aggregator
* but was not on the aggregator port list
*/
- net_warn_ratelimited("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
+ net_warn_ratelimited("%s: (slave %s): Warning: Port %d was related to aggregator %d but was not on its port list\n",
port->slave->bond->dev->name,
- port->actor_port_number,
port->slave->dev->name,
+ port->actor_port_number,
port->aggregator->aggregator_identifier);
}
}
@@ -1470,9 +1475,9 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
port->next_port_in_aggregator = aggregator->lag_ports;
port->aggregator->num_of_ports++;
aggregator->lag_ports = port;
- netdev_dbg(bond->dev, "Port %d joined LAG %d(existing LAG)\n",
- port->actor_port_number,
- port->aggregator->aggregator_identifier);
+ slave_dbg(bond->dev, slave->dev, "Port %d joined LAG %d (existing LAG)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
/* mark this port as selected */
port->sm_vars |= AD_PORT_SELECTED;
@@ -1517,12 +1522,13 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
/* mark this port as selected */
port->sm_vars |= AD_PORT_SELECTED;
- netdev_dbg(bond->dev, "Port %d joined LAG %d(new LAG)\n",
- port->actor_port_number,
- port->aggregator->aggregator_identifier);
+ slave_dbg(bond->dev, port->slave->dev, "Port %d joined LAG %d (new LAG)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
} else {
- netdev_err(bond->dev, "Port %d (on %s) did not find a suitable aggregator\n",
- port->actor_port_number, port->slave->dev->name);
+ slave_err(bond->dev, port->slave->dev,
+ "Port %d did not find a suitable aggregator\n",
+ port->actor_port_number);
}
}
/* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
@@ -1601,8 +1607,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
break;
default:
- net_warn_ratelimited("%s: Impossible agg select mode %d\n",
+ net_warn_ratelimited("%s: (slave %s): Impossible agg select mode %d\n",
curr->slave->bond->dev->name,
+ curr->slave->dev->name,
__get_agg_selection_mode(curr->lag_ports));
break;
}
@@ -1703,36 +1710,37 @@ static void ad_agg_selection_logic(struct aggregator *agg,
/* if there is new best aggregator, activate it */
if (best) {
- netdev_dbg(bond->dev, "best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
+ netdev_dbg(bond->dev, "(slave %s): best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
+ best->slave ? best->slave->dev->name : "NULL",
best->aggregator_identifier, best->num_of_ports,
best->actor_oper_aggregator_key,
best->partner_oper_aggregator_key,
best->is_individual, best->is_active);
- netdev_dbg(bond->dev, "best ports %p slave %p %s\n",
- best->lag_ports, best->slave,
- best->slave ? best->slave->dev->name : "NULL");
+ netdev_dbg(bond->dev, "(slave %s): best ports %p slave %p\n",
+ best->slave ? best->slave->dev->name : "NULL",
+ best->lag_ports, best->slave);
bond_for_each_slave_rcu(bond, slave, iter) {
agg = &(SLAVE_AD_INFO(slave)->aggregator);
- netdev_dbg(bond->dev, "Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
- agg->aggregator_identifier, agg->num_of_ports,
- agg->actor_oper_aggregator_key,
- agg->partner_oper_aggregator_key,
- agg->is_individual, agg->is_active);
+ slave_dbg(bond->dev, slave->dev, "Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
+ agg->aggregator_identifier, agg->num_of_ports,
+ agg->actor_oper_aggregator_key,
+ agg->partner_oper_aggregator_key,
+ agg->is_individual, agg->is_active);
}
- /* check if any partner replys */
- if (best->is_individual) {
+ /* check if any partner replies */
+ if (best->is_individual)
net_warn_ratelimited("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave ?
- best->slave->bond->dev->name : "NULL");
- }
+ bond->dev->name);
best->is_active = 1;
- netdev_dbg(bond->dev, "LAG %d chosen as the active LAG\n",
+ netdev_dbg(bond->dev, "(slave %s): LAG %d chosen as the active LAG\n",
+ best->slave ? best->slave->dev->name : "NULL",
best->aggregator_identifier);
- netdev_dbg(bond->dev, "Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
+ netdev_dbg(bond->dev, "(slave %s): Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
+ best->slave ? best->slave->dev->name : "NULL",
best->aggregator_identifier, best->num_of_ports,
best->actor_oper_aggregator_key,
best->partner_oper_aggregator_key,
@@ -1788,7 +1796,9 @@ static void ad_clear_agg(struct aggregator *aggregator)
aggregator->lag_ports = NULL;
aggregator->is_active = 0;
aggregator->num_of_ports = 0;
- pr_debug("LAG %d was cleared\n",
+ pr_debug("%s: LAG %d was cleared\n",
+ aggregator->slave ?
+ aggregator->slave->dev->name : "NULL",
aggregator->aggregator_identifier);
}
}
@@ -1885,9 +1895,10 @@ static void ad_enable_collecting_distributing(struct port *port,
bool *update_slave_arr)
{
if (port->aggregator->is_active) {
- pr_debug("Enabling port %d(LAG %d)\n",
- port->actor_port_number,
- port->aggregator->aggregator_identifier);
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "Enabling port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
__enable_port(port);
/* Slave array needs update */
*update_slave_arr = true;
@@ -1905,9 +1916,10 @@ static void ad_disable_collecting_distributing(struct port *port,
if (port->aggregator &&
!MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system),
&(null_mac_addr))) {
- pr_debug("Disabling port %d(LAG %d)\n",
- port->actor_port_number,
- port->aggregator->aggregator_identifier);
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "Disabling port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
__disable_port(port);
/* Slave array needs an update */
*update_slave_arr = true;
@@ -1920,7 +1932,7 @@ static void ad_disable_collecting_distributing(struct port *port,
* @port: the port we're looking at
*/
static void ad_marker_info_received(struct bond_marker *marker_info,
- struct port *port)
+ struct port *port)
{
struct bond_marker marker;
@@ -1933,10 +1945,10 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
marker.tlv_type = AD_MARKER_RESPONSE_SUBTYPE;
/* send the marker response */
- if (ad_marker_send(port, &marker) >= 0) {
- pr_debug("Sent Marker Response on port %d\n",
- port->actor_port_number);
- }
+ if (ad_marker_send(port, &marker) >= 0)
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "Sent Marker Response on port %d\n",
+ port->actor_port_number);
}
/**
@@ -2085,13 +2097,12 @@ void bond_3ad_unbind_slave(struct slave *slave)
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
- netdev_warn(bond->dev, "Trying to unbind an uninitialized port on %s\n",
- slave->dev->name);
+ slave_warn(bond->dev, slave->dev, "Trying to unbind an uninitialized port\n");
goto out;
}
- netdev_dbg(bond->dev, "Unbinding Link Aggregation Group %d\n",
- aggregator->aggregator_identifier);
+ slave_dbg(bond->dev, slave->dev, "Unbinding Link Aggregation Group %d\n",
+ aggregator->aggregator_identifier);
/* Tell the partner that this port is not suitable for aggregation */
port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
@@ -2129,13 +2140,13 @@ void bond_3ad_unbind_slave(struct slave *slave)
* new aggregator
*/
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
- netdev_dbg(bond->dev, "Some port(s) related to LAG %d - replacing with LAG %d\n",
- aggregator->aggregator_identifier,
- new_aggregator->aggregator_identifier);
+ slave_dbg(bond->dev, slave->dev, "Some port(s) related to LAG %d - replacing with LAG %d\n",
+ aggregator->aggregator_identifier,
+ new_aggregator->aggregator_identifier);
if ((new_aggregator->lag_ports == port) &&
new_aggregator->is_active) {
- netdev_info(bond->dev, "Removing an active aggregator\n");
+ slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
select_new_active_agg = 1;
}
@@ -2166,7 +2177,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
ad_agg_selection_logic(__get_first_agg(port),
&dummy_slave_update);
} else {
- netdev_warn(bond->dev, "unbinding aggregator, and could not find a new aggregator for its ports\n");
+ slave_warn(bond->dev, slave->dev, "unbinding aggregator, and could not find a new aggregator for its ports\n");
}
} else {
/* in case that the only port related to this
@@ -2175,7 +2186,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
select_new_active_agg = aggregator->is_active;
ad_clear_agg(aggregator);
if (select_new_active_agg) {
- netdev_info(bond->dev, "Removing an active aggregator\n");
+ slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
/* select new active aggregator */
temp_aggregator = __get_first_agg(port);
if (temp_aggregator)
@@ -2185,7 +2196,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
}
}
- netdev_dbg(bond->dev, "Unbinding port %d\n", port->actor_port_number);
+ slave_dbg(bond->dev, slave->dev, "Unbinding port %d\n", port->actor_port_number);
/* find the aggregator that this port is connected to */
bond_for_each_slave(bond, slave_iter, iter) {
@@ -2208,7 +2219,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
select_new_active_agg = temp_aggregator->is_active;
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
- netdev_info(bond->dev, "Removing an active aggregator\n");
+ slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
/* select new active aggregator */
ad_agg_selection_logic(__get_first_agg(port),
&dummy_slave_update);
@@ -2379,9 +2390,9 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave)
switch (lacpdu->subtype) {
case AD_TYPE_LACPDU:
ret = RX_HANDLER_CONSUMED;
- netdev_dbg(slave->bond->dev,
- "Received LACPDU on port %d slave %s\n",
- port->actor_port_number, slave->dev->name);
+ slave_dbg(slave->bond->dev, slave->dev,
+ "Received LACPDU on port %d\n",
+ port->actor_port_number);
/* Protect against concurrent state machines */
spin_lock(&slave->bond->mode_lock);
ad_rx_machine(lacpdu, port);
@@ -2395,18 +2406,18 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave)
marker = (struct bond_marker *)lacpdu;
switch (marker->tlv_type) {
case AD_MARKER_INFORMATION_SUBTYPE:
- netdev_dbg(slave->bond->dev, "Received Marker Information on port %d\n",
- port->actor_port_number);
+ slave_dbg(slave->bond->dev, slave->dev, "Received Marker Information on port %d\n",
+ port->actor_port_number);
ad_marker_info_received(marker, port);
break;
case AD_MARKER_RESPONSE_SUBTYPE:
- netdev_dbg(slave->bond->dev, "Received Marker Response on port %d\n",
- port->actor_port_number);
+ slave_dbg(slave->bond->dev, slave->dev, "Received Marker Response on port %d\n",
+ port->actor_port_number);
ad_marker_response_received(marker, port);
break;
default:
- netdev_dbg(slave->bond->dev, "Received an unknown Marker subtype on slot %d\n",
- port->actor_port_number);
+ slave_dbg(slave->bond->dev, slave->dev, "Received an unknown Marker subtype on port %d\n",
+ port->actor_port_number);
stat = &SLAVE_AD_INFO(slave)->stats.marker_unknown_rx;
atomic64_inc(stat);
stat = &BOND_AD_INFO(bond).stats.marker_unknown_rx;
@@ -2456,9 +2467,10 @@ static void ad_update_actor_keys(struct port *port, bool reset)
if (!reset) {
if (!speed) {
- netdev_err(port->slave->dev,
- "speed changed to 0 for port %s",
- port->slave->dev->name);
+ slave_err(port->slave->bond->dev,
+ port->slave->dev,
+ "speed changed to 0 on port %d\n",
+ port->actor_port_number);
} else if (duplex && ospeed != speed) {
/* Speed change restarts LACP state-machine */
port->sm_vars |= AD_PORT_BEGIN;
@@ -2483,17 +2495,16 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
- netdev_warn(slave->bond->dev,
- "speed/duplex changed for uninitialized port %s\n",
- slave->dev->name);
+ slave_warn(slave->bond->dev, slave->dev,
+ "speed/duplex changed for uninitialized port\n");
return;
}
spin_lock_bh(&slave->bond->mode_lock);
ad_update_actor_keys(port, false);
spin_unlock_bh(&slave->bond->mode_lock);
- netdev_dbg(slave->bond->dev, "Port %d slave %s changed speed/duplex\n",
- port->actor_port_number, slave->dev->name);
+ slave_dbg(slave->bond->dev, slave->dev, "Port %d changed speed/duplex\n",
+ port->actor_port_number);
}
/**
@@ -2513,8 +2524,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
- netdev_warn(slave->bond->dev, "link status changed for uninitialized port on %s\n",
- slave->dev->name);
+ slave_warn(slave->bond->dev, slave->dev, "link status changed for uninitialized port\n");
return;
}
@@ -2539,9 +2549,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
spin_unlock_bh(&slave->bond->mode_lock);
- netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
- port->actor_port_number,
- link == BOND_LINK_UP ? "UP" : "DOWN");
+ slave_dbg(slave->bond->dev, slave->dev, "Port %d changed link status to %s\n",
+ port->actor_port_number,
+ link == BOND_LINK_UP ? "UP" : "DOWN");
/* RTNL is held and mode_lock is released so it's safe
* to update slave_array here.
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 790e41c6fdd0..8c79bad2a9a5 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -300,7 +300,7 @@ static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
if (arp->op_code == htons(ARPOP_REPLY)) {
/* update rx hash table for this ARP */
rlb_update_entry_from_arp(bond, arp);
- netdev_dbg(bond->dev, "Server received an ARP Reply from client\n");
+ slave_dbg(bond->dev, slave->dev, "Server received an ARP Reply from client\n");
}
out:
return RX_HANDLER_ANOTHER;
@@ -442,8 +442,9 @@ static void rlb_update_client(struct rlb_client_info *client_info)
client_info->slave->dev->dev_addr,
client_info->mac_dst);
if (!skb) {
- netdev_err(client_info->slave->bond->dev,
- "failed to create an ARP packet\n");
+ slave_err(client_info->slave->bond->dev,
+ client_info->slave->dev,
+ "failed to create an ARP packet\n");
continue;
}
@@ -667,14 +668,15 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
if (tx_slave)
bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
tx_slave->dev->addr_len);
- netdev_dbg(bond->dev, "Server sent ARP Reply packet\n");
+ netdev_dbg(bond->dev, "(slave %s): Server sent ARP Reply packet\n",
+ tx_slave ? tx_slave->dev->name : "NULL");
} else if (arp->op_code == htons(ARPOP_REQUEST)) {
/* Create an entry in the rx_hashtbl for this client as a
* place holder.
* When the arp reply is received the entry will be updated
* with the correct unicast address of the client.
*/
- rlb_choose_channel(skb, bond);
+ tx_slave = rlb_choose_channel(skb, bond);
/* The ARP reply packets must be delayed so that
* they can cancel out the influence of the ARP request.
@@ -687,7 +689,8 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
* updated with their assigned mac.
*/
rlb_req_update_subnet_clients(bond, arp->ip_src);
- netdev_dbg(bond->dev, "Server sent ARP Request packet\n");
+ netdev_dbg(bond->dev, "(slave %s): Server sent ARP Request packet\n",
+ tx_slave ? tx_slave->dev->name : "NULL");
}
return tx_slave;
@@ -923,9 +926,8 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
- netdev_dbg(slave->bond->dev,
- "Send learning packet: dev %s mac %pM vlan %d\n",
- slave->dev->name, mac_addr, vid);
+ slave_dbg(slave->bond->dev, slave->dev,
+ "Send learning packet: mac %pM vlan %d\n", mac_addr, vid);
if (vid)
__vlan_hwaccel_put_tag(skb, vlan_proto, vid);
@@ -1016,8 +1018,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[],
memcpy(ss.__data, addr, len);
ss.ss_family = dev->type;
if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) {
- netdev_err(slave->bond->dev, "dev_set_mac_address of dev %s failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
- dev->name);
+ slave_err(slave->bond->dev, dev, "dev_set_mac_address on slave failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n");
return -EOPNOTSUPP;
}
return 0;
@@ -1192,12 +1193,11 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
free_mac_slave->dev->addr_len);
- netdev_warn(bond->dev, "the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
- slave->dev->name, free_mac_slave->dev->name);
+ slave_warn(bond->dev, slave->dev, "the slave hw address is in use by the bond; giving it the hw address of %s\n",
+ free_mac_slave->dev->name);
} else if (has_bond_addr) {
- netdev_err(bond->dev, "the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
- slave->dev->name);
+ slave_err(bond->dev, slave->dev, "the slave hw address is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n");
return -EFAULT;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 799fc38c5c34..9b7016abca2f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -613,8 +613,8 @@ static int bond_set_dev_addr(struct net_device *bond_dev,
{
int err;
- netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n",
- bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len);
+ slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
+ bond_dev, slave_dev, slave_dev->addr_len);
err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
if (err)
return err;
@@ -661,8 +661,8 @@ static void bond_do_fail_over_mac(struct bonding *bond,
if (new_active) {
rv = bond_set_dev_addr(bond->dev, new_active->dev);
if (rv)
- netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
- -rv, bond->dev->name);
+ slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
+ -rv);
}
break;
case BOND_FOM_FOLLOW:
@@ -692,8 +692,8 @@ static void bond_do_fail_over_mac(struct bonding *bond,
rv = dev_set_mac_address(new_active->dev,
(struct sockaddr *)&ss, NULL);
if (rv) {
- netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
- -rv, new_active->dev->name);
+ slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
+ -rv);
goto out;
}
@@ -707,8 +707,8 @@ static void bond_do_fail_over_mac(struct bonding *bond,
rv = dev_set_mac_address(old_active->dev,
(struct sockaddr *)&ss, NULL);
if (rv)
- netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
- -rv, new_active->dev->name);
+ slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
+ -rv);
out:
break;
default:
@@ -796,6 +796,8 @@ static bool bond_should_notify_peers(struct bonding *bond)
slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_peer_notif ||
+ bond->send_peer_notif %
+ max(1, bond->params.peer_notif_delay) != 0 ||
!netif_carrier_ok(bond->dev) ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
@@ -834,9 +836,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
if (new_active->link == BOND_LINK_BACK) {
if (bond_uses_primary(bond)) {
- netdev_info(bond->dev, "making interface %s the new active one %d ms earlier\n",
- new_active->dev->name,
- (bond->params.updelay - new_active->delay) * bond->params.miimon);
+ slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
+ (bond->params.updelay - new_active->delay) * bond->params.miimon);
}
new_active->delay = 0;
@@ -850,8 +851,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
if (bond_uses_primary(bond)) {
- netdev_info(bond->dev, "making interface %s the new active one\n",
- new_active->dev->name);
+ slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
}
}
}
@@ -888,15 +888,18 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
if (netif_running(bond->dev)) {
bond->send_peer_notif =
- bond->params.num_peer_notif;
+ bond->params.num_peer_notif *
+ max(1, bond->params.peer_notif_delay);
should_notify_peers =
bond_should_notify_peers(bond);
}
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
- if (should_notify_peers)
+ if (should_notify_peers) {
+ bond->send_peer_notif--;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
+ }
}
}
@@ -939,7 +942,7 @@ void bond_select_active_slave(struct bonding *bond)
return;
if (netif_carrier_ok(bond->dev))
- netdev_info(bond->dev, "first active interface up!\n");
+ netdev_info(bond->dev, "active interface up!\n");
else
netdev_info(bond->dev, "now running without any active interface!\n");
}
@@ -1077,12 +1080,16 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_ALL_TSO)
+
static void bond_compute_features(struct bonding *bond)
{
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
netdev_features_t enc_features = BOND_ENC_FEATURES;
+ netdev_features_t mpls_features = BOND_MPLS_FEATURES;
struct net_device *bond_dev = bond->dev;
struct list_head *iter;
struct slave *slave;
@@ -1093,6 +1100,7 @@ static void bond_compute_features(struct bonding *bond)
if (!bond_has_slaves(bond))
goto done;
vlan_features &= NETIF_F_ALL_FOR_ALL;
+ mpls_features &= NETIF_F_ALL_FOR_ALL;
bond_for_each_slave(bond, slave, iter) {
vlan_features = netdev_increment_features(vlan_features,
@@ -1101,6 +1109,11 @@ static void bond_compute_features(struct bonding *bond)
enc_features = netdev_increment_features(enc_features,
slave->dev->hw_enc_features,
BOND_ENC_FEATURES);
+
+ mpls_features = netdev_increment_features(mpls_features,
+ slave->dev->mpls_features,
+ BOND_MPLS_FEATURES);
+
dst_release_flag &= slave->dev->priv_flags;
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
@@ -1114,6 +1127,7 @@ done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_GSO_UDP_L4;
+ bond_dev->mpls_features = mpls_features;
bond_dev->gso_max_segs = gso_max_segs;
netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -1369,15 +1383,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
- netdev_warn(bond_dev, "no link monitoring support for %s\n",
- slave_dev->name);
+ slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
}
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved");
- netdev_err(bond_dev,
- "Error: Device is in use and cannot be enslaved\n");
+ slave_err(bond_dev, slave_dev,
+ "Error: Device is in use and cannot be enslaved\n");
return -EBUSY;
}
@@ -1390,21 +1403,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
/* vlan challenged mutual exclusion */
/* no need to lock since we're protected by rtnl_lock */
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
- netdev_dbg(bond_dev, "%s is NETIF_F_VLAN_CHALLENGED\n",
- slave_dev->name);
+ slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
if (vlan_uses_dev(bond_dev)) {
NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond");
- netdev_err(bond_dev, "Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
- slave_dev->name, bond_dev->name);
+ slave_err(bond_dev, slave_dev, "Error: cannot enslave VLAN challenged slave on VLAN enabled bond\n");
return -EPERM;
} else {
- netdev_warn(bond_dev, "enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
- slave_dev->name, slave_dev->name,
- bond_dev->name);
+ slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
}
} else {
- netdev_dbg(bond_dev, "%s is !NETIF_F_VLAN_CHALLENGED\n",
- slave_dev->name);
+ slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
}
/* Old ifenslave binaries are no longer supported. These can
@@ -1414,8 +1422,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
*/
if (slave_dev->flags & IFF_UP) {
NL_SET_ERR_MSG(extack, "Device can not be enslaved while up");
- netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n",
- slave_dev->name);
+ slave_err(bond_dev, slave_dev, "slave is up - this may be due to an out of date ifenslave\n");
return -EPERM;
}
@@ -1428,14 +1435,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
*/
if (!bond_has_slaves(bond)) {
if (bond_dev->type != slave_dev->type) {
- netdev_dbg(bond_dev, "change device type from %d to %d\n",
- bond_dev->type, slave_dev->type);
+ slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
+ bond_dev->type, slave_dev->type);
res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
bond_dev);
res = notifier_to_errno(res);
if (res) {
- netdev_err(bond_dev, "refused to change device type\n");
+ slave_err(bond_dev, slave_dev, "refused to change device type\n");
return -EBUSY;
}
@@ -1455,31 +1462,31 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
} else if (bond_dev->type != slave_dev->type) {
NL_SET_ERR_MSG(extack, "Device type is different from other slaves");
- netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
- slave_dev->name, slave_dev->type, bond_dev->type);
+ slave_err(bond_dev, slave_dev, "ether type (%d) is different from other slaves (%d), can not enslave it\n",
+ slave_dev->type, bond_dev->type);
return -EINVAL;
}
if (slave_dev->type == ARPHRD_INFINIBAND &&
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves");
- netdev_warn(bond_dev, "Type (%d) supports only active-backup mode\n",
- slave_dev->type);
+ slave_warn(bond_dev, slave_dev, "Type (%d) supports only active-backup mode\n",
+ slave_dev->type);
res = -EOPNOTSUPP;
goto err_undo_flags;
}
if (!slave_ops->ndo_set_mac_address ||
slave_dev->type == ARPHRD_INFINIBAND) {
- netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
+ slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
if (!bond_has_slaves(bond)) {
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
- netdev_warn(bond_dev, "Setting fail_over_mac to active for active-backup mode\n");
+ slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
} else {
NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
- netdev_err(bond_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
+ slave_err(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
res = -EOPNOTSUPP;
goto err_undo_flags;
}
@@ -1515,7 +1522,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
new_slave->original_mtu = slave_dev->mtu;
res = dev_set_mtu(slave_dev, bond->dev->mtu);
if (res) {
- netdev_dbg(bond_dev, "Error %d calling dev_set_mtu\n", res);
+ slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
goto err_free;
}
@@ -1536,7 +1543,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
extack);
if (res) {
- netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
+ slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
goto err_restore_mtu;
}
}
@@ -1547,7 +1554,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
/* open the slave since the application closed it */
res = dev_open(slave_dev, extack);
if (res) {
- netdev_dbg(bond_dev, "Opening slave %s failed\n", slave_dev->name);
+ slave_err(bond_dev, slave_dev, "Opening slave failed\n");
goto err_restore_mac;
}
@@ -1566,8 +1573,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
res = vlan_vids_add_by_dev(slave_dev, bond_dev);
if (res) {
- netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
- slave_dev->name);
+ slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
goto err_close;
}
@@ -1597,12 +1603,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
* supported); thus, we don't need to change
* the messages for netif_carrier.
*/
- netdev_warn(bond_dev, "MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
- slave_dev->name);
+ slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
} else if (link_reporting == -1) {
/* unable get link status using mii/ethtool */
- netdev_warn(bond_dev, "can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
- slave_dev->name);
+ slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
}
}
@@ -1636,9 +1640,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
if (new_slave->link != BOND_LINK_DOWN)
new_slave->last_link_up = jiffies;
- netdev_dbg(bond_dev, "Initial state of slave_dev is BOND_LINK_%s\n",
- new_slave->link == BOND_LINK_DOWN ? "DOWN" :
- (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+ slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
+ new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+ (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
if (bond_uses_primary(bond) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
@@ -1679,7 +1683,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
break;
default:
- netdev_dbg(bond_dev, "This slave is always active in trunk mode\n");
+ slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
/* always active in trunk mode */
bond_set_active_slave(new_slave);
@@ -1698,7 +1702,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
#ifdef CONFIG_NET_POLL_CONTROLLER
if (bond->dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
- netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
+ slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
res = -EBUSY;
goto err_detach;
}
@@ -1711,19 +1715,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
if (res) {
- netdev_dbg(bond_dev, "Error %d calling netdev_rx_handler_register\n", res);
+ slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
goto err_detach;
}
res = bond_master_upper_dev_link(bond, new_slave, extack);
if (res) {
- netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res);
+ slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
goto err_unregister;
}
res = bond_sysfs_slave_add(new_slave);
if (res) {
- netdev_dbg(bond_dev, "Error %d calling bond_sysfs_slave_add\n", res);
+ slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
goto err_upper_unlink;
}
@@ -1777,10 +1781,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
bond_update_slave_arr(bond, NULL);
- netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
- slave_dev->name,
- bond_is_active_slave(new_slave) ? "an active" : "a backup",
- new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
+ slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
+ bond_is_active_slave(new_slave) ? "an active" : "a backup",
+ new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
/* enslave is successful */
bond_queue_slave_event(new_slave);
@@ -1875,8 +1878,7 @@ static int __bond_release_one(struct net_device *bond_dev,
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
- netdev_dbg(bond_dev, "cannot release %s\n",
- slave_dev->name);
+ slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
return -EINVAL;
}
@@ -1885,8 +1887,7 @@ static int __bond_release_one(struct net_device *bond_dev,
slave = bond_get_slave_by_dev(bond, slave_dev);
if (!slave) {
/* not a slave of this bond */
- netdev_info(bond_dev, "%s not enslaved\n",
- slave_dev->name);
+ slave_info(bond_dev, slave_dev, "interface not enslaved\n");
unblock_netpoll_tx();
return -EINVAL;
}
@@ -1910,9 +1911,8 @@ static int __bond_release_one(struct net_device *bond_dev,
if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, slave);
- netdev_info(bond_dev, "Releasing %s interface %s\n",
- bond_is_active_slave(slave) ? "active" : "backup",
- slave_dev->name);
+ slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
+ bond_is_active_slave(slave) ? "active" : "backup");
oldcurrent = rcu_access_pointer(bond->curr_active_slave);
@@ -1922,9 +1922,8 @@ static int __bond_release_one(struct net_device *bond_dev,
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
- netdev_warn(bond_dev, "the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
- slave_dev->name, slave->perm_hwaddr,
- bond_dev->name, slave_dev->name);
+ slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
+ slave->perm_hwaddr);
}
if (rtnl_dereference(bond->primary_slave) == slave)
@@ -1972,8 +1971,7 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_compute_features(bond);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
- netdev_info(bond_dev, "last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
- slave_dev->name, bond_dev->name);
+ slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
vlan_vids_del_by_dev(slave_dev, bond_dev);
@@ -2033,8 +2031,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
/* First release a slave and then destroy the bond if no more slaves are left.
* Must be under rtnl_lock when this function is called.
*/
-static int bond_release_and_destroy(struct net_device *bond_dev,
- struct net_device *slave_dev)
+static int bond_release_and_destroy(struct net_device *bond_dev,
+ struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
int ret;
@@ -2042,8 +2040,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
ret = __bond_release_one(bond_dev, slave_dev, false, true);
if (ret == 0 && !bond_has_slaves(bond)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
- netdev_info(bond_dev, "Destroying bond %s\n",
- bond_dev->name);
+ netdev_info(bond_dev, "Destroying bond\n");
bond_remove_proc_entry(bond);
unregister_netdevice(bond_dev);
}
@@ -2101,13 +2098,12 @@ static int bond_miimon_inspect(struct bonding *bond)
commit++;
slave->delay = bond->params.downdelay;
if (slave->delay) {
- netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
- (BOND_MODE(bond) ==
- BOND_MODE_ACTIVEBACKUP) ?
- (bond_is_active_slave(slave) ?
- "active " : "backup ") : "",
- slave->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
+ (BOND_MODE(bond) ==
+ BOND_MODE_ACTIVEBACKUP) ?
+ (bond_is_active_slave(slave) ?
+ "active " : "backup ") : "",
+ bond->params.downdelay * bond->params.miimon);
}
/*FALLTHRU*/
case BOND_LINK_FAIL:
@@ -2115,10 +2111,9 @@ static int bond_miimon_inspect(struct bonding *bond)
/* recovered before downdelay expired */
bond_propose_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
- netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
- (bond->params.downdelay - slave->delay) *
- bond->params.miimon,
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
+ (bond->params.downdelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -2141,20 +2136,18 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->delay = bond->params.updelay;
if (slave->delay) {
- netdev_info(bond->dev, "link status up for interface %s, enabling it in %d ms\n",
- slave->dev->name,
- ignore_updelay ? 0 :
- bond->params.updelay *
- bond->params.miimon);
+ slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
+ ignore_updelay ? 0 :
+ bond->params.updelay *
+ bond->params.miimon);
}
/*FALLTHRU*/
case BOND_LINK_BACK:
if (!link_state) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
- netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
- (bond->params.updelay - slave->delay) *
- bond->params.miimon,
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
+ (bond->params.updelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -2210,9 +2203,8 @@ static void bond_miimon_commit(struct bonding *bond)
bond_needs_speed_duplex(bond)) {
slave->link = BOND_LINK_DOWN;
if (net_ratelimit())
- netdev_warn(bond->dev,
- "failed to get link speed/duplex for %s\n",
- slave->dev->name);
+ slave_warn(bond->dev, slave->dev,
+ "failed to get link speed/duplex\n");
continue;
}
bond_set_slave_link_state(slave, BOND_LINK_UP,
@@ -2231,10 +2223,9 @@ static void bond_miimon_commit(struct bonding *bond)
bond_set_backup_slave(slave);
}
- netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",
- slave->dev->name,
- slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
- slave->duplex ? "full" : "half");
+ slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
+ slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
+ slave->duplex ? "full" : "half");
bond_miimon_link_change(bond, slave, BOND_LINK_UP);
@@ -2255,8 +2246,7 @@ static void bond_miimon_commit(struct bonding *bond)
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
- netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
@@ -2266,8 +2256,8 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
default:
- netdev_err(bond->dev, "invalid new link %d on slave %s\n",
- slave->new_link, slave->dev->name);
+ slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
+ slave->new_link);
slave->new_link = BOND_LINK_NOCHANGE;
continue;
@@ -2294,6 +2284,7 @@ static void bond_mii_monitor(struct work_struct *work)
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
bool should_notify_peers = false;
+ bool commit;
unsigned long delay;
struct slave *slave;
struct list_head *iter;
@@ -2304,12 +2295,19 @@ static void bond_mii_monitor(struct work_struct *work)
goto re_arm;
rcu_read_lock();
-
should_notify_peers = bond_should_notify_peers(bond);
-
- if (bond_miimon_inspect(bond)) {
+ commit = !!bond_miimon_inspect(bond);
+ if (bond->send_peer_notif) {
rcu_read_unlock();
+ if (rtnl_trylock()) {
+ bond->send_peer_notif--;
+ rtnl_unlock();
+ }
+ } else {
+ rcu_read_unlock();
+ }
+ if (commit) {
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
@@ -2323,8 +2321,7 @@ static void bond_mii_monitor(struct work_struct *work)
bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
- } else
- rcu_read_unlock();
+ }
re_arm:
if (bond->params.miimon)
@@ -2364,15 +2361,16 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
* switches in VLAN mode (especially if ports are configured as
* "native" to a VLAN) might not pass non-tagged frames.
*/
-static void bond_arp_send(struct net_device *slave_dev, int arp_op,
- __be32 dest_ip, __be32 src_ip,
- struct bond_vlan_tag *tags)
+static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
+ __be32 src_ip, struct bond_vlan_tag *tags)
{
struct sk_buff *skb;
struct bond_vlan_tag *outer_tag = tags;
+ struct net_device *slave_dev = slave->dev;
+ struct net_device *bond_dev = slave->bond->dev;
- netdev_dbg(slave_dev, "arp %d on slave %s: dst %pI4 src %pI4\n",
- arp_op, slave_dev->name, &dest_ip, &src_ip);
+ slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
+ arp_op, &dest_ip, &src_ip);
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
@@ -2394,8 +2392,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
continue;
}
- netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
- ntohs(outer_tag->vlan_proto), tags->vlan_id);
+ slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
+ ntohs(outer_tag->vlan_proto), tags->vlan_id);
skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
tags->vlan_id);
if (!skb) {
@@ -2407,8 +2405,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
}
/* Set the outer tag */
if (outer_tag->vlan_id) {
- netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
- ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
+ slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
+ ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
outer_tag->vlan_id);
}
@@ -2465,7 +2463,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
int i;
for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
- netdev_dbg(bond->dev, "basa: target %pI4\n", &targets[i]);
+ slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
+ __func__, &targets[i]);
tags = NULL;
/* Find out through which dev should the packet go */
@@ -2479,7 +2478,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
bond->dev->name,
&targets[i]);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+ bond_arp_send(slave, ARPOP_REQUEST, targets[i],
0, tags);
continue;
}
@@ -2496,7 +2495,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
goto found;
/* Not our device - skip */
- netdev_dbg(bond->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
+ slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
&targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
ip_rt_put(rt);
@@ -2505,8 +2504,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
found:
addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
ip_rt_put(rt);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, tags);
+ bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
kfree(tags);
}
}
@@ -2516,15 +2514,15 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
int i;
if (!sip || !bond_has_this_ip(bond, tip)) {
- netdev_dbg(bond->dev, "bva: sip %pI4 tip %pI4 not found\n",
- &sip, &tip);
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
+ __func__, &sip, &tip);
return;
}
i = bond_get_targets_ip(bond->params.arp_targets, sip);
if (i == -1) {
- netdev_dbg(bond->dev, "bva: sip %pI4 not found in targets\n",
- &sip);
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
+ __func__, &sip);
return;
}
slave->last_rx = jiffies;
@@ -2552,8 +2550,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
alen = arp_hdr_len(bond->dev);
- netdev_dbg(bond->dev, "bond_arp_rcv: skb->dev %s\n",
- skb->dev->name);
+ slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
+ __func__, skb->dev->name);
if (alen > skb_headlen(skb)) {
arp = kmalloc(alen, GFP_ATOMIC);
@@ -2577,10 +2575,10 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
arp_ptr += 4 + bond->dev->addr_len;
memcpy(&tip, arp_ptr, 4);
- netdev_dbg(bond->dev, "bond_arp_rcv: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
- slave->dev->name, bond_slave_state(slave),
- bond->params.arp_validate, slave_do_arp_validate(bond, slave),
- &sip, &tip);
+ slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
+ __func__, slave->dev->name, bond_slave_state(slave),
+ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ &sip, &tip);
curr_active_slave = rcu_dereference(bond->curr_active_slave);
curr_arp_slave = rcu_dereference(bond->current_arp_slave);
@@ -2683,12 +2681,10 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* is closed.
*/
if (!oldcurrent) {
- netdev_info(bond->dev, "link status definitely up for interface %s\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status definitely up\n");
do_failover = 1;
} else {
- netdev_info(bond->dev, "interface %s is now up\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "interface is now up\n");
}
}
} else {
@@ -2707,8 +2703,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- netdev_info(bond->dev, "interface %s is now down\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "interface is now down\n");
if (slave == oldcurrent)
do_failover = 1;
@@ -2858,8 +2853,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
}
- netdev_info(bond->dev, "link status definitely up for interface %s\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status definitely up\n");
if (!rtnl_dereference(bond->curr_active_slave) ||
slave == rtnl_dereference(bond->primary_slave))
@@ -2878,8 +2872,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
- netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
if (slave == rtnl_dereference(bond->curr_active_slave)) {
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
@@ -2889,8 +2882,8 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
default:
- netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
- slave->new_link, slave->dev->name);
+ slave_err(bond->dev, slave->dev, "impossible: new_link %d on slave\n",
+ slave->new_link);
continue;
}
@@ -2961,8 +2954,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_LATER);
- netdev_info(bond->dev, "backup interface %s is now down\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "backup interface is now down\n");
}
if (slave == curr_arp_slave)
found = true;
@@ -3074,6 +3066,8 @@ static int bond_master_netdev_event(unsigned long event,
{
struct bonding *event_bond = netdev_priv(bond_dev);
+ netdev_dbg(bond_dev, "%s called\n", __func__);
+
switch (event) {
case NETDEV_CHANGENAME:
return bond_event_changename(event_bond);
@@ -3083,10 +3077,6 @@ static int bond_master_netdev_event(unsigned long event,
case NETDEV_REGISTER:
bond_create_proc_entry(event_bond);
break;
- case NETDEV_NOTIFY_PEERS:
- if (event_bond->send_peer_notif)
- event_bond->send_peer_notif--;
- break;
default:
break;
}
@@ -3105,12 +3095,17 @@ static int bond_slave_netdev_event(unsigned long event,
* before netdev_rx_handler_register is called in which case
* slave will be NULL
*/
- if (!slave)
+ if (!slave) {
+ netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
return NOTIFY_DONE;
+ }
+
bond_dev = slave->bond->dev;
bond = slave->bond;
primary = rtnl_dereference(bond->primary_slave);
+ slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
+
switch (event) {
case NETDEV_UNREGISTER:
if (bond_dev->type != ARPHRD_ETHER)
@@ -3212,7 +3207,8 @@ static int bond_netdev_event(struct notifier_block *this,
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
- netdev_dbg(event_dev, "event: %lx\n", event);
+ netdev_dbg(event_dev, "%s received %s\n",
+ __func__, netdev_cmd_to_name(event));
if (!(event_dev->priv_flags & IFF_BONDING))
return NOTIFY_DONE;
@@ -3220,16 +3216,13 @@ static int bond_netdev_event(struct notifier_block *this,
if (event_dev->flags & IFF_MASTER) {
int ret;
- netdev_dbg(event_dev, "IFF_MASTER\n");
ret = bond_master_netdev_event(event, event_dev);
if (ret != NOTIFY_DONE)
return ret;
}
- if (event_dev->flags & IFF_SLAVE) {
- netdev_dbg(event_dev, "IFF_SLAVE\n");
+ if (event_dev->flags & IFF_SLAVE)
return bond_slave_netdev_event(event, event_dev);
- }
return NOTIFY_DONE;
}
@@ -3546,12 +3539,11 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
- netdev_dbg(bond_dev, "slave_dev=%p:\n", slave_dev);
+ slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
if (!slave_dev)
return -ENODEV;
- netdev_dbg(bond_dev, "slave_dev->name=%s:\n", slave_dev->name);
switch (cmd) {
case BOND_ENSLAVE_OLD:
case SIOCBONDENSLAVE:
@@ -3676,7 +3668,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
bond_for_each_slave(bond, slave, iter) {
- netdev_dbg(bond_dev, "s %p c_m %p\n",
+ slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
slave, slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -3690,8 +3682,8 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
* means changing their mtu from timer context, which
* is probably not a good idea.
*/
- netdev_dbg(bond_dev, "err %d %s\n", res,
- slave->dev->name);
+ slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
+ res, new_mtu);
goto unwind;
}
}
@@ -3709,10 +3701,9 @@ unwind:
break;
tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
- if (tmp_res) {
- netdev_dbg(bond_dev, "unwind err %d dev %s\n",
- tmp_res, rollback_slave->dev->name);
- }
+ if (tmp_res)
+ slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
+ tmp_res);
}
return res;
@@ -3736,7 +3727,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
return bond_alb_set_mac_address(bond_dev, addr);
- netdev_dbg(bond_dev, "bond=%p\n", bond);
+ netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
/* If fail_over_mac is enabled, do nothing and return success.
* Returning an error causes ifenslave to fail.
@@ -3749,7 +3740,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
return -EADDRNOTAVAIL;
bond_for_each_slave(bond, slave, iter) {
- netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name);
+ slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
+ __func__, slave);
res = dev_set_mac_address(slave->dev, addr, NULL);
if (res) {
/* TODO: consider downing the slave
@@ -3758,7 +3750,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
* breakage anyway until ARP finish
* updating, so...
*/
- netdev_dbg(bond_dev, "err %d %s\n", res, slave->dev->name);
+ slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
+ __func__, res);
goto unwind;
}
}
@@ -3781,8 +3774,8 @@ unwind:
tmp_res = dev_set_mac_address(rollback_slave->dev,
(struct sockaddr *)&tmp_ss, NULL);
if (tmp_res) {
- netdev_dbg(bond_dev, "unwind err %d dev %s\n",
- tmp_res, rollback_slave->dev->name);
+ slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
+ __func__, tmp_res);
}
}
@@ -3866,8 +3859,8 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct iphdr *iph = ip_hdr(skb);
struct slave *slave;
+ int slave_cnt;
u32 slave_id;
/* Start with the curr_active_slave that joined the bond as the
@@ -3876,23 +3869,32 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
* send the join/membership reports. The curr_active_slave found
* will send all of this type of traffic.
*/
- if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
- slave = rcu_dereference(bond->curr_active_slave);
- if (slave)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
- bond_xmit_slave_id(bond, skb, 0);
- } else {
- int slave_cnt = READ_ONCE(bond->slave_cnt);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ int noff = skb_network_offset(skb);
+ struct iphdr *iph;
- if (likely(slave_cnt)) {
- slave_id = bond_rr_gen_slave_id(bond);
- bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
- } else {
- bond_tx_drop(bond_dev, skb);
+ if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
+ goto non_igmp;
+
+ iph = ip_hdr(skb);
+ if (iph->protocol == IPPROTO_IGMP) {
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
+ bond_xmit_slave_id(bond, skb, 0);
+ return NETDEV_TX_OK;
}
}
+non_igmp:
+ slave_cnt = READ_ONCE(bond->slave_cnt);
+ if (likely(slave_cnt)) {
+ slave_id = bond_rr_gen_slave_id(bond);
+ bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
+ } else {
+ bond_tx_drop(bond_dev, skb);
+ }
return NETDEV_TX_OK;
}
@@ -4003,9 +4005,8 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
if (skipslave == slave)
continue;
- netdev_dbg(bond->dev,
- "Adding slave dev %s to tx hash array[%d]\n",
- slave->dev->name, new_arr->count);
+ slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
+ new_arr->count);
new_arr->arr[new_arr->count++] = slave;
}
@@ -4707,6 +4708,7 @@ static int bond_check_params(struct bond_params *params)
params->arp_all_targets = arp_all_targets_value;
params->updelay = updelay;
params->downdelay = downdelay;
+ params->peer_notif_delay = 0;
params->use_carrier = use_carrier;
params->lacp_fast = lacp_fast;
params->primary[0] = 0;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index b24cce48ae35..b43b51646b11 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -108,6 +108,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
+ [IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -215,6 +216,14 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
if (err)
return err;
}
+ if (data[IFLA_BOND_PEER_NOTIF_DELAY]) {
+ int delay = nla_get_u32(data[IFLA_BOND_PEER_NOTIF_DELAY]);
+
+ bond_opt_initval(&newval, delay);
+ err = __bond_opt_set(bond, BOND_OPT_PEER_NOTIF_DELAY, &newval);
+ if (err)
+ return err;
+ }
if (data[IFLA_BOND_USE_CARRIER]) {
int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
@@ -494,6 +503,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
+ nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */
0;
}
@@ -536,6 +546,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.downdelay * bond->params.miimon))
goto nla_put_failure;
+ if (nla_put_u32(skb, IFLA_BOND_PEER_NOTIF_DELAY,
+ bond->params.peer_notif_delay * bond->params.miimon))
+ goto nla_put_failure;
+
if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
goto nla_put_failure;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 9677418e0362..ddb3916d3506 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -24,6 +24,8 @@ static int bond_option_updelay_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_downdelay_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_peer_notif_delay_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_use_carrier_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_interval_set(struct bonding *bond,
@@ -424,6 +426,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.desc = "Number of peer notifications to send on failover event",
.values = bond_num_peer_notif_tbl,
.set = bond_option_num_peer_notif_set
+ },
+ [BOND_OPT_PEER_NOTIF_DELAY] = {
+ .id = BOND_OPT_PEER_NOTIF_DELAY,
+ .name = "peer_notif_delay",
+ .desc = "Delay between each peer notification on failover event, in milliseconds",
+ .values = bond_intmax_tbl,
+ .set = bond_option_peer_notif_delay_set
}
};
@@ -783,14 +792,12 @@ static int bond_option_active_slave_set(struct bonding *bond,
if (slave_dev) {
if (!netif_is_bond_slave(slave_dev)) {
- netdev_err(bond->dev, "Device %s is not bonding slave\n",
- slave_dev->name);
+ slave_err(bond->dev, slave_dev, "Device is not bonding slave\n");
return -EINVAL;
}
if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
- netdev_err(bond->dev, "Device %s is not our slave\n",
- slave_dev->name);
+ slave_err(bond->dev, slave_dev, "Device is not our slave\n");
return -EINVAL;
}
}
@@ -809,18 +816,15 @@ static int bond_option_active_slave_set(struct bonding *bond,
if (new_active == old_active) {
/* do nothing */
- netdev_dbg(bond->dev, "%s is already the current active slave\n",
- new_active->dev->name);
+ slave_dbg(bond->dev, new_active->dev, "is already the current active slave\n");
} else {
if (old_active && (new_active->link == BOND_LINK_UP) &&
bond_slave_is_up(new_active)) {
- netdev_dbg(bond->dev, "Setting %s as active slave\n",
- new_active->dev->name);
+ slave_dbg(bond->dev, new_active->dev, "Setting as active slave\n");
bond_change_active_slave(bond, new_active);
} else {
- netdev_err(bond->dev, "Could not set %s as active slave; either %s is down or the link is down\n",
- new_active->dev->name,
- new_active->dev->name);
+ slave_err(bond->dev, new_active->dev, "Could not set as active slave; either %s is down or the link is down\n",
+ new_active->dev->name);
ret = -EINVAL;
}
}
@@ -846,6 +850,9 @@ static int bond_option_miimon_set(struct bonding *bond,
if (bond->params.downdelay)
netdev_dbg(bond->dev, "Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
bond->params.downdelay * bond->params.miimon);
+ if (bond->params.peer_notif_delay)
+ netdev_dbg(bond->dev, "Note: Updating peer_notif_delay (to %d) since it is a multiple of the miimon value\n",
+ bond->params.peer_notif_delay * bond->params.miimon);
if (newval->value && bond->params.arp_interval) {
netdev_dbg(bond->dev, "MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n");
bond->params.arp_interval = 0;
@@ -869,52 +876,59 @@ static int bond_option_miimon_set(struct bonding *bond,
return 0;
}
-/* Set up and down delays. These must be multiples of the
- * MII monitoring value, and are stored internally as the multiplier.
- * Thus, we must translate to MS for the real world.
+/* Set up, down and peer notification delays. These must be multiples
+ * of the MII monitoring value, and are stored internally as the
+ * multiplier. Thus, we must translate to MS for the real world.
*/
-static int bond_option_updelay_set(struct bonding *bond,
- const struct bond_opt_value *newval)
+static int _bond_option_delay_set(struct bonding *bond,
+ const struct bond_opt_value *newval,
+ const char *name,
+ int *target)
{
int value = newval->value;
if (!bond->params.miimon) {
- netdev_err(bond->dev, "Unable to set up delay as MII monitoring is disabled\n");
+ netdev_err(bond->dev, "Unable to set %s as MII monitoring is disabled\n",
+ name);
return -EPERM;
}
if ((value % bond->params.miimon) != 0) {
- netdev_warn(bond->dev, "up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+ netdev_warn(bond->dev,
+ "%s (%d) is not a multiple of miimon (%d), value rounded to %d ms\n",
+ name,
value, bond->params.miimon,
(value / bond->params.miimon) *
bond->params.miimon);
}
- bond->params.updelay = value / bond->params.miimon;
- netdev_dbg(bond->dev, "Setting up delay to %d\n",
- bond->params.updelay * bond->params.miimon);
+ *target = value / bond->params.miimon;
+ netdev_dbg(bond->dev, "Setting %s to %d\n",
+ name,
+ *target * bond->params.miimon);
return 0;
}
+static int bond_option_updelay_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ return _bond_option_delay_set(bond, newval, "up delay",
+ &bond->params.updelay);
+}
+
static int bond_option_downdelay_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- int value = newval->value;
-
- if (!bond->params.miimon) {
- netdev_err(bond->dev, "Unable to set down delay as MII monitoring is disabled\n");
- return -EPERM;
- }
- if ((value % bond->params.miimon) != 0) {
- netdev_warn(bond->dev, "down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
- value, bond->params.miimon,
- (value / bond->params.miimon) *
- bond->params.miimon);
- }
- bond->params.downdelay = value / bond->params.miimon;
- netdev_dbg(bond->dev, "Setting down delay to %d\n",
- bond->params.downdelay * bond->params.miimon);
+ return _bond_option_delay_set(bond, newval, "down delay",
+ &bond->params.downdelay);
+}
- return 0;
+static int bond_option_peer_notif_delay_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ int ret = _bond_option_delay_set(bond, newval,
+ "peer notification delay",
+ &bond->params.peer_notif_delay);
+ return ret;
}
static int bond_option_use_carrier_set(struct bonding *bond,
@@ -1132,8 +1146,7 @@ static int bond_option_primary_set(struct bonding *bond,
bond_for_each_slave(bond, slave, iter) {
if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
- netdev_dbg(bond->dev, "Setting %s as primary slave\n",
- slave->dev->name);
+ slave_dbg(bond->dev, slave->dev, "Setting as primary slave\n");
rcu_assign_pointer(bond->primary_slave, slave);
strcpy(bond->params.primary, slave->dev->name);
bond->force_primary = true;
@@ -1150,8 +1163,8 @@ static int bond_option_primary_set(struct bonding *bond,
strncpy(bond->params.primary, primary, IFNAMSIZ);
bond->params.primary[IFNAMSIZ - 1] = 0;
- netdev_dbg(bond->dev, "Recording %s as primary, but it has not been enslaved to %s yet\n",
- primary, bond->dev->name);
+ netdev_dbg(bond->dev, "Recording %s as primary, but it has not been enslaved yet\n",
+ primary);
out:
unblock_netpoll_tx();
@@ -1378,12 +1391,12 @@ static int bond_option_slaves_set(struct bonding *bond,
switch (command[0]) {
case '+':
- netdev_dbg(bond->dev, "Adding slave %s\n", dev->name);
+ slave_dbg(bond->dev, dev, "Enslaving interface\n");
ret = bond_enslave(bond->dev, dev, NULL);
break;
case '-':
- netdev_dbg(bond->dev, "Removing slave %s\n", dev->name);
+ slave_dbg(bond->dev, dev, "Releasing interface\n");
ret = bond_release(bond->dev, dev);
break;
@@ -1447,7 +1460,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
return 0;
err:
- netdev_err(bond->dev, "Invalid MAC address.\n");
+ netdev_err(bond->dev, "Invalid ad_actor_system MAC address.\n");
return -EINVAL;
}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 9f7d83e827c3..fd5c9cbe45b1 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -104,6 +104,8 @@ static void bond_info_show_master(struct seq_file *seq)
bond->params.updelay * bond->params.miimon);
seq_printf(seq, "Down Delay (ms): %d\n",
bond->params.downdelay * bond->params.miimon);
+ seq_printf(seq, "Peer Notification Delay (ms): %d\n",
+ bond->params.peer_notif_delay * bond->params.miimon);
/* ARP information */
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 94214eaf53c5..2d615a93685e 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -327,6 +327,18 @@ static ssize_t bonding_show_updelay(struct device *d,
static DEVICE_ATTR(updelay, 0644,
bonding_show_updelay, bonding_sysfs_store_option);
+static ssize_t bonding_show_peer_notif_delay(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ return sprintf(buf, "%d\n",
+ bond->params.peer_notif_delay * bond->params.miimon);
+}
+static DEVICE_ATTR(peer_notif_delay, 0644,
+ bonding_show_peer_notif_delay, bonding_sysfs_store_option);
+
/* Show the LACP interval. */
static ssize_t bonding_show_lacp(struct device *d,
struct device_attribute *attr,
@@ -718,6 +730,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_arp_ip_target.attr,
&dev_attr_downdelay.attr,
&dev_attr_updelay.attr,
+ &dev_attr_peer_notif_delay.attr,
&dev_attr_lacp_rate.attr,
&dev_attr_ad_select.attr,
&dev_attr_xmit_hash_policy.attr,
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 68bb58a57f3b..8242fb287cbb 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -683,7 +683,7 @@ static void softing_netdev_cleanup(struct net_device *netdev)
static ssize_t show_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+ struct softing *card = dev_get_drvdata(dev); \
return sprintf(buf, "%u\n", card->member); \
} \
static DEVICE_ATTR(name, 0444, show_##name, NULL)
@@ -692,7 +692,7 @@ static DEVICE_ATTR(name, 0444, show_##name, NULL)
static ssize_t show_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+ struct softing *card = dev_get_drvdata(dev); \
return sprintf(buf, "%s\n", card->member); \
} \
static DEVICE_ATTR(name, 0444, show_##name, NULL)
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index b91e78e3598f..f6232ce8481f 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -99,8 +99,8 @@ config NET_DSA_SMSC_LAN9303_MDIO
for MDIO managed mode.
config NET_DSA_VITESSE_VSC73XX
- tristate "Vitesse VSC7385/7388/7395/7398 support"
- depends on OF && SPI
+ tristate
+ depends on OF
depends on NET_DSA
select FIXED_PHY
select VITESSE_PHY
@@ -109,4 +109,24 @@ config NET_DSA_VITESSE_VSC73XX
This enables support for the Vitesse VSC7385, VSC7388,
VSC7395 and VSC7398 SparX integrated ethernet switches.
+config NET_DSA_VITESSE_VSC73XX_SPI
+ tristate "Vitesse VSC7385/7388/7395/7398 SPI mode support"
+ depends on OF
+ depends on NET_DSA
+ depends on SPI
+ select NET_DSA_VITESSE_VSC73XX
+ ---help---
+ This enables support for the Vitesse VSC7385, VSC7388, VSC7395
+ and VSC7398 SparX integrated ethernet switches in SPI managed mode.
+
+config NET_DSA_VITESSE_VSC73XX_PLATFORM
+ tristate "Vitesse VSC7385/7388/7395/7398 Platform mode support"
+ depends on OF
+ depends on NET_DSA
+ depends on HAS_IOMEM
+ select NET_DSA_VITESSE_VSC73XX
+ ---help---
+ This enables support for the Vitesse VSC7385, VSC7388, VSC7395
+ and VSC7398 SparX integrated ethernet switches, connected over
+ a CPU-attached address bus and work in memory-mapped I/O mode.
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index d99dc6de0006..ae70b79628d6 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -14,7 +14,9 @@ realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
-obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx.o
+obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx-core.o
+obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o
+obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
obj-y += b53/
obj-y += microchip/
obj-y += mv88e6xxx/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index c8040ecf4425..907af62846ba 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -955,13 +955,13 @@ static int b53_setup(struct dsa_switch *ds)
if (ret)
dev_err(ds->dev, "failed to apply configuration\n");
- /* Configure IMP/CPU port, disable unused ports. Enabled
+ /* Configure IMP/CPU port, disable all other ports. Enabled
* ports will be configured with .port_enable
*/
for (port = 0; port < dev->num_ports; port++) {
if (dsa_is_cpu_port(ds, port))
b53_enable_cpu_port(dev, port);
- else if (dsa_is_unused_port(ds, port))
+ else
b53_disable_port(ds, port);
}
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 2c3a6751bdaf..fe0a13b79c4b 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -13,5 +13,6 @@ menuconfig NET_DSA_MICROCHIP_KSZ9477
config NET_DSA_MICROCHIP_KSZ9477_SPI
tristate "KSZ9477 series SPI connected switch driver"
depends on NET_DSA_MICROCHIP_KSZ9477 && SPI
+ select REGMAP_SPI
help
Select to enable support for registering switches configured through SPI.
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index c026d15721f6..a8c97f7a79b7 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -65,51 +65,36 @@ static const struct {
{ 0x83, "tx_discards" },
};
-static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
+static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
- u32 data;
+ regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
+}
- ksz_read32(dev, addr, &data);
- if (set)
- data |= bits;
- else
- data &= ~bits;
- ksz_write32(dev, addr, data);
+static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
+ bool set)
+{
+ regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
+{
+ regmap_update_bits(dev->regmap[2], addr, bits, set ? bits : 0);
}
static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
u32 bits, bool set)
{
- u32 addr;
- u32 data;
-
- addr = PORT_CTRL_ADDR(port, offset);
- ksz_read32(dev, addr, &data);
-
- if (set)
- data |= bits;
- else
- data &= ~bits;
-
- ksz_write32(dev, addr, data);
+ regmap_update_bits(dev->regmap[2], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
}
-static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev, u32 waiton,
- int timeout)
+static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
- u8 data;
+ unsigned int val;
- do {
- ksz_read8(dev, REG_SW_VLAN_CTRL, &data);
- if (!(data & waiton))
- break;
- usleep_range(1, 10);
- } while (timeout-- > 0);
-
- if (timeout <= 0)
- return -ETIMEDOUT;
-
- return 0;
+ return regmap_read_poll_timeout(dev->regmap[0], REG_SW_VLAN_CTRL,
+ val, !(val & VLAN_START), 10, 1000);
}
static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
@@ -123,8 +108,8 @@ static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
/* wait to be cleared */
- ret = ksz9477_wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
- if (ret < 0) {
+ ret = ksz9477_wait_vlan_ctrl_ready(dev);
+ if (ret) {
dev_dbg(dev->dev, "Failed to read vlan table\n");
goto exit;
}
@@ -156,8 +141,8 @@ static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
/* wait to be cleared */
- ret = ksz9477_wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
- if (ret < 0) {
+ ret = ksz9477_wait_vlan_ctrl_ready(dev);
+ if (ret) {
dev_dbg(dev->dev, "Failed to write vlan table\n");
goto exit;
}
@@ -191,55 +176,35 @@ static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
}
-static int ksz9477_wait_alu_ready(struct ksz_device *dev, u32 waiton,
- int timeout)
+static int ksz9477_wait_alu_ready(struct ksz_device *dev)
{
- u32 data;
-
- do {
- ksz_read32(dev, REG_SW_ALU_CTRL__4, &data);
- if (!(data & waiton))
- break;
- usleep_range(1, 10);
- } while (timeout-- > 0);
+ unsigned int val;
- if (timeout <= 0)
- return -ETIMEDOUT;
-
- return 0;
+ return regmap_read_poll_timeout(dev->regmap[2], REG_SW_ALU_CTRL__4,
+ val, !(val & ALU_START), 10, 1000);
}
-static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev, u32 waiton,
- int timeout)
+static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
{
- u32 data;
+ unsigned int val;
- do {
- ksz_read32(dev, REG_SW_ALU_STAT_CTRL__4, &data);
- if (!(data & waiton))
- break;
- usleep_range(1, 10);
- } while (timeout-- > 0);
-
- if (timeout <= 0)
- return -ETIMEDOUT;
-
- return 0;
+ return regmap_read_poll_timeout(dev->regmap[2],
+ REG_SW_ALU_STAT_CTRL__4,
+ val, !(val & ALU_STAT_START),
+ 10, 1000);
}
static int ksz9477_reset_switch(struct ksz_device *dev)
{
u8 data8;
- u16 data16;
u32 data32;
/* reset switch */
ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
/* turn off SPI DO Edge select */
- ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
- data8 &= ~SPI_AUTO_EDGE_DETECTION;
- ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
+ regmap_update_bits(dev->regmap[0], REG_SW_GLOBAL_SERIAL_CTRL_0,
+ SPI_AUTO_EDGE_DETECTION, 0);
/* default configuration */
ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
@@ -253,10 +218,14 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
/* set broadcast storm protection 10% rate */
- ksz_read16(dev, REG_SW_MAC_CTRL_2, &data16);
- data16 &= ~BROADCAST_STORM_RATE;
- data16 |= (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
- ksz_write16(dev, REG_SW_MAC_CTRL_2, data16);
+ regmap_update_bits(dev->regmap[1], REG_SW_MAC_CTRL_2,
+ BROADCAST_STORM_RATE,
+ (BROADCAST_STORM_VALUE *
+ BROADCAST_STORM_PROT_RATE) / 100);
+
+ if (dev->synclko_125)
+ ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
+ SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ);
return 0;
}
@@ -264,12 +233,8 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
u64 *cnt)
{
- struct ksz_poll_ctx ctx = {
- .dev = dev,
- .port = port,
- .offset = REG_PORT_MIB_CTRL_STAT__4,
- };
struct ksz_port *p = &dev->ports[port];
+ unsigned int val;
u32 data;
int ret;
@@ -279,11 +244,11 @@ static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
data |= (addr << MIB_COUNTER_INDEX_S);
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
- ret = readx_poll_timeout(ksz_pread32_poll, &ctx, data,
- !(data & MIB_COUNTER_READ), 10, 1000);
-
+ ret = regmap_read_poll_timeout(dev->regmap[2],
+ PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
+ val, !(val & MIB_COUNTER_READ), 10, 1000);
/* failed to read MIB. get out of loop */
- if (ret < 0) {
+ if (ret) {
dev_dbg(dev->dev, "Failed to get MIB\n");
return;
}
@@ -518,10 +483,10 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
u8 data;
- ksz_read8(dev, REG_SW_LUE_CTRL_2, &data);
- data &= ~(SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S);
- data |= (SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
- ksz_write8(dev, REG_SW_LUE_CTRL_2, data);
+ regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2,
+ SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
+ SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
+
if (port < dev->mib_port_cnt) {
/* flush individual port */
ksz_pread8(dev, port, P_STP_CTRL, &data);
@@ -648,8 +613,8 @@ static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
/* wait to be finished */
- ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0) {
+ ret = ksz9477_wait_alu_ready(dev);
+ if (ret) {
dev_dbg(dev->dev, "Failed to read ALU\n");
goto exit;
}
@@ -672,8 +637,8 @@ static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
/* wait to be finished */
- ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0)
+ ret = ksz9477_wait_alu_ready(dev);
+ if (ret)
dev_dbg(dev->dev, "Failed to write ALU\n");
exit:
@@ -705,8 +670,8 @@ static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
/* wait to be finished */
- ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0) {
+ ret = ksz9477_wait_alu_ready(dev);
+ if (ret) {
dev_dbg(dev->dev, "Failed to read ALU\n");
goto exit;
}
@@ -739,8 +704,8 @@ static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
/* wait to be finished */
- ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0)
+ ret = ksz9477_wait_alu_ready(dev);
+ if (ret)
dev_dbg(dev->dev, "Failed to write ALU\n");
exit:
@@ -846,7 +811,7 @@ static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
- if (ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) {
+ if (ksz9477_wait_alu_sta_ready(dev)) {
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
goto exit;
}
@@ -887,7 +852,7 @@ static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
- if (ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0)
+ if (ksz9477_wait_alu_sta_ready(dev))
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
exit:
@@ -917,8 +882,8 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
- ret = ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
- if (ret < 0) {
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret) {
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
goto exit;
}
@@ -959,8 +924,8 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
- ret = ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
- if (ret < 0)
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret)
dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
exit:
@@ -1165,6 +1130,62 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
return interface;
}
+static void ksz9477_port_mmd_write(struct ksz_device *dev, int port,
+ u8 dev_addr, u16 reg_addr, u16 val)
+{
+ ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP,
+ MMD_SETUP(PORT_MMD_OP_INDEX, dev_addr));
+ ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, reg_addr);
+ ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP,
+ MMD_SETUP(PORT_MMD_OP_DATA_NO_INCR, dev_addr));
+ ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, val);
+}
+
+static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
+{
+ /* Apply PHY settings to address errata listed in
+ * KSZ9477, KSZ9897, KSZ9896, KSZ9567, KSZ8565
+ * Silicon Errata and Data Sheet Clarification documents:
+ *
+ * Register settings are needed to improve PHY receive performance
+ */
+ ksz9477_port_mmd_write(dev, port, 0x01, 0x6f, 0xdd0b);
+ ksz9477_port_mmd_write(dev, port, 0x01, 0x8f, 0x6032);
+ ksz9477_port_mmd_write(dev, port, 0x01, 0x9d, 0x248c);
+ ksz9477_port_mmd_write(dev, port, 0x01, 0x75, 0x0060);
+ ksz9477_port_mmd_write(dev, port, 0x01, 0xd3, 0x7777);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x06, 0x3008);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x08, 0x2001);
+
+ /* Transmit waveform amplitude can be improved
+ * (1000BASE-T, 100BASE-TX, 10BASE-Te)
+ */
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x04, 0x00d0);
+
+ /* Energy Efficient Ethernet (EEE) feature select must
+ * be manually disabled (except on KSZ8565 which is 100Mbit)
+ */
+ if (dev->features & GBIT_SUPPORT)
+ ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000);
+
+ /* Register settings are required to meet data sheet
+ * supply current specifications
+ */
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x13, 0x6eff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x14, 0xe6ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x15, 0x6eff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x16, 0xe6ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x17, 0x00ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x18, 0x43ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x19, 0xc3ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x1a, 0x6fff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x1b, 0x07ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x1c, 0x0fff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x1d, 0xe7ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x1e, 0xefff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee);
+}
+
static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
u8 data8;
@@ -1203,6 +1224,8 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
false);
+ if (dev->phy_errata_9477)
+ ksz9477_phy_errata_setup(dev, port);
} else {
/* force flow control */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
@@ -1474,6 +1497,7 @@ struct ksz_chip_data {
int num_statics;
int cpu_ports;
int port_cnt;
+ bool phy_errata_9477;
};
static const struct ksz_chip_data ksz9477_switch_chips[] = {
@@ -1485,6 +1509,7 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .phy_errata_9477 = true,
},
{
.chip_id = 0x00989700,
@@ -1494,6 +1519,7 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .phy_errata_9477 = true,
},
{
.chip_id = 0x00989300,
@@ -1522,6 +1548,7 @@ static int ksz9477_switch_init(struct ksz_device *dev)
dev->num_statics = chip->num_statics;
dev->port_cnt = chip->port_cnt;
dev->cpu_ports = chip->cpu_ports;
+ dev->phy_errata_9477 = chip->phy_errata_9477;
break;
}
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 75178624d3f5..5a9e27b337a8 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -10,119 +10,43 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include "ksz_priv.h"
-#include "ksz_spi.h"
-
-/* SPI frame opcodes */
-#define KS_SPIOP_RD 3
-#define KS_SPIOP_WR 2
+#include "ksz_common.h"
#define SPI_ADDR_SHIFT 24
-#define SPI_ADDR_MASK (BIT(SPI_ADDR_SHIFT) - 1)
+#define SPI_ADDR_ALIGN 3
#define SPI_TURNAROUND_SHIFT 5
-/* Enough to read all switch port registers. */
-#define SPI_TX_BUF_LEN 0x100
-
-static int ksz9477_spi_read_reg(struct spi_device *spi, u32 reg, u8 *val,
- unsigned int len)
-{
- u32 txbuf;
- int ret;
-
- txbuf = reg & SPI_ADDR_MASK;
- txbuf |= KS_SPIOP_RD << SPI_ADDR_SHIFT;
- txbuf <<= SPI_TURNAROUND_SHIFT;
- txbuf = cpu_to_be32(txbuf);
-
- ret = spi_write_then_read(spi, &txbuf, 4, val, len);
- return ret;
-}
-
-static int ksz9477_spi_write_reg(struct spi_device *spi, u32 reg, u8 *val,
- unsigned int len)
-{
- u32 *txbuf = (u32 *)val;
-
- *txbuf = reg & SPI_ADDR_MASK;
- *txbuf |= (KS_SPIOP_WR << SPI_ADDR_SHIFT);
- *txbuf <<= SPI_TURNAROUND_SHIFT;
- *txbuf = cpu_to_be32(*txbuf);
-
- return spi_write(spi, txbuf, 4 + len);
-}
-
-static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
- unsigned int len)
-{
- struct spi_device *spi = dev->priv;
-
- return ksz9477_spi_read_reg(spi, reg, data, len);
-}
-
-static int ksz_spi_write(struct ksz_device *dev, u32 reg, void *data,
- unsigned int len)
-{
- struct spi_device *spi = dev->priv;
-
- if (len > SPI_TX_BUF_LEN)
- len = SPI_TX_BUF_LEN;
- memcpy(&dev->txbuf[4], data, len);
- return ksz9477_spi_write_reg(spi, reg, dev->txbuf, len);
-}
-
-static int ksz_spi_read24(struct ksz_device *dev, u32 reg, u32 *val)
-{
- int ret;
-
- *val = 0;
- ret = ksz_spi_read(dev, reg, (u8 *)val, 3);
- if (!ret) {
- *val = be32_to_cpu(*val);
- /* convert to 24bit */
- *val >>= 8;
- }
-
- return ret;
-}
-
-static int ksz_spi_write24(struct ksz_device *dev, u32 reg, u32 value)
-{
- /* make it to big endian 24bit from MSB */
- value <<= 8;
- value = cpu_to_be32(value);
- return ksz_spi_write(dev, reg, &value, 3);
-}
-
-static const struct ksz_io_ops ksz9477_spi_ops = {
- .read8 = ksz_spi_read8,
- .read16 = ksz_spi_read16,
- .read24 = ksz_spi_read24,
- .read32 = ksz_spi_read32,
- .write8 = ksz_spi_write8,
- .write16 = ksz_spi_write16,
- .write24 = ksz_spi_write24,
- .write32 = ksz_spi_write32,
- .get = ksz_spi_get,
- .set = ksz_spi_set,
-};
+KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT,
+ SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN);
static int ksz9477_spi_probe(struct spi_device *spi)
{
struct ksz_device *dev;
- int ret;
+ int i, ret;
- dev = ksz_switch_alloc(&spi->dev, &ksz9477_spi_ops, spi);
+ dev = ksz_switch_alloc(&spi->dev, spi);
if (!dev)
return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
+ dev->regmap[i] = devm_regmap_init_spi(spi,
+ &ksz9477_regmap_config[i]);
+ if (IS_ERR(dev->regmap[i])) {
+ ret = PTR_ERR(dev->regmap[i]);
+ dev_err(&spi->dev,
+ "Failed to initialize regmap%i: %d\n",
+ ksz9477_regmap_config[i].val_bits, ret);
+ return ret;
+ }
+ }
+
if (spi->dev.platform_data)
dev->pdata = spi->dev.platform_data;
- dev->txbuf = devm_kzalloc(dev->dev, 4 + SPI_TX_BUF_LEN, GFP_KERNEL);
-
ret = ksz9477_switch_register(dev);
/* Main DSA driver may not be started yet. */
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index db91b213eae1..a3d2d67894bd 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -396,9 +396,7 @@ void ksz_disable_port(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL_GPL(ksz_disable_port);
-struct ksz_device *ksz_switch_alloc(struct device *base,
- const struct ksz_io_ops *ops,
- void *priv)
+struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
{
struct dsa_switch *ds;
struct ksz_device *swdev;
@@ -416,7 +414,6 @@ struct ksz_device *ksz_switch_alloc(struct device *base,
swdev->ds = ds;
swdev->priv = priv;
- swdev->ops = ops;
return swdev;
}
@@ -442,7 +439,6 @@ int ksz_switch_register(struct ksz_device *dev,
}
mutex_init(&dev->dev_mutex);
- mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
@@ -463,6 +459,8 @@ int ksz_switch_register(struct ksz_device *dev,
ret = of_get_phy_mode(dev->dev->of_node);
if (ret >= 0)
dev->interface = ret;
+ dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
+ "microchip,synclko-125");
}
ret = dsa_register_switch(dev->ds);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 21cd794e18f1..ee7096d8af07 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -7,6 +7,8 @@
#ifndef __KSZ_COMMON_H
#define __KSZ_COMMON_H
+#include <linux/regmap.h>
+
void ksz_port_cleanup(struct ksz_device *dev, int port);
void ksz_update_port_member(struct ksz_device *dev, int port);
void ksz_init_mib_timer(struct ksz_device *dev);
@@ -41,114 +43,44 @@ void ksz_disable_port(struct dsa_switch *ds, int port);
static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read8(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
+ unsigned int value;
+ int ret = regmap_read(dev->regmap[0], reg, &value);
+ *val = value;
return ret;
}
static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read16(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_read24(struct ksz_device *dev, u32 reg, u32 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read24(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
+ unsigned int value;
+ int ret = regmap_read(dev->regmap[1], reg, &value);
+ *val = value;
return ret;
}
static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read32(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
+ unsigned int value;
+ int ret = regmap_read(dev->regmap[2], reg, &value);
+ *val = value;
return ret;
}
static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write8(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
+ return regmap_write(dev->regmap[0], reg, value);
}
static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write16(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_write24(struct ksz_device *dev, u32 reg, u32 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write24(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
+ return regmap_write(dev->regmap[1], reg, value);
}
static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write32(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_get(struct ksz_device *dev, u32 reg, void *data,
- size_t len)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->get(dev, reg, data, len);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_set(struct ksz_device *dev, u32 reg, void *data,
- size_t len)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->set(dev, reg, data, len);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
+ return regmap_write(dev->regmap[2], reg, value);
}
static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
@@ -187,47 +119,36 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
-{
- u8 data;
-
- ksz_read8(dev, addr, &data);
- if (set)
- data |= bits;
- else
- data &= ~bits;
- ksz_write8(dev, addr, data);
-}
-
-static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
- bool set)
-{
- u32 addr;
- u8 data;
-
- addr = dev->dev_ops->get_port_addr(port, offset);
- ksz_read8(dev, addr, &data);
-
- if (set)
- data |= bits;
- else
- data &= ~bits;
-
- ksz_write8(dev, addr, data);
-}
-
-struct ksz_poll_ctx {
- struct ksz_device *dev;
- int port;
- int offset;
-};
-
-static inline u32 ksz_pread32_poll(struct ksz_poll_ctx *ctx)
-{
- u32 data;
-
- ksz_pread32(ctx->dev, ctx->port, ctx->offset, &data);
- return data;
-}
+/* Regmap tables generation */
+#define KSZ_SPI_OP_RD 3
+#define KSZ_SPI_OP_WR 2
+
+#define KSZ_SPI_OP_FLAG_MASK(opcode, swp, regbits, regpad) \
+ swab##swp((opcode) << ((regbits) + (regpad)))
+
+#define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \
+ { \
+ .val_bits = (width), \
+ .reg_stride = (width) / 8, \
+ .reg_bits = (regbits) + (regalign), \
+ .pad_bits = (regpad), \
+ .max_register = BIT(regbits) - 1, \
+ .cache_type = REGCACHE_NONE, \
+ .read_flag_mask = \
+ KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_RD, swp, \
+ regbits, regpad), \
+ .write_flag_mask = \
+ KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_WR, swp, \
+ regbits, regpad), \
+ .reg_format_endian = REGMAP_ENDIAN_BIG, \
+ .val_format_endian = REGMAP_ENDIAN_BIG \
+ }
+
+#define KSZ_REGMAP_TABLE(ksz, swp, regbits, regpad, regalign) \
+ static const struct regmap_config ksz##_regmap_config[] = { \
+ KSZ_REGMAP_ENTRY(8, swp, (regbits), (regpad), (regalign)), \
+ KSZ_REGMAP_ENTRY(16, swp, (regbits), (regpad), (regalign)), \
+ KSZ_REGMAP_ENTRY(32, swp, (regbits), (regpad), (regalign)), \
+ }
#endif
diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h
index b52e5ca17ab4..beacf0e40f42 100644
--- a/drivers/net/dsa/microchip/ksz_priv.h
+++ b/drivers/net/dsa/microchip/ksz_priv.h
@@ -14,8 +14,6 @@
#include <linux/etherdevice.h>
#include <net/dsa.h>
-struct ksz_io_ops;
-
struct vlan_table {
u32 table[3];
};
@@ -49,14 +47,13 @@ struct ksz_device {
const char *name;
struct mutex dev_mutex; /* device access */
- struct mutex reg_mutex; /* register access */
struct mutex stats_mutex; /* status access */
struct mutex alu_mutex; /* ALU access */
struct mutex vlan_mutex; /* vlan access */
- const struct ksz_io_ops *ops;
const struct ksz_dev_ops *dev_ops;
struct device *dev;
+ struct regmap *regmap[3];
void *priv;
@@ -77,11 +74,11 @@ struct ksz_device {
int last_port; /* ports after that not used */
phy_interface_t interface;
u32 regs_size;
+ bool phy_errata_9477;
+ bool synclko_125;
struct vlan_table *vlan_cache;
- u8 *txbuf;
-
struct ksz_port *ports;
struct timer_list mib_read_timer;
struct work_struct mib_read;
@@ -100,19 +97,6 @@ struct ksz_device {
u16 port_mask;
};
-struct ksz_io_ops {
- int (*read8)(struct ksz_device *dev, u32 reg, u8 *value);
- int (*read16)(struct ksz_device *dev, u32 reg, u16 *value);
- int (*read24)(struct ksz_device *dev, u32 reg, u32 *value);
- int (*read32)(struct ksz_device *dev, u32 reg, u32 *value);
- int (*write8)(struct ksz_device *dev, u32 reg, u8 value);
- int (*write16)(struct ksz_device *dev, u32 reg, u16 value);
- int (*write24)(struct ksz_device *dev, u32 reg, u32 value);
- int (*write32)(struct ksz_device *dev, u32 reg, u32 value);
- int (*get)(struct ksz_device *dev, u32 reg, void *data, size_t len);
- int (*set)(struct ksz_device *dev, u32 reg, void *data, size_t len);
-};
-
struct alu_struct {
/* entry 1 */
u8 is_static:1;
@@ -161,8 +145,7 @@ struct ksz_dev_ops {
void (*exit)(struct ksz_device *dev);
};
-struct ksz_device *ksz_switch_alloc(struct device *base,
- const struct ksz_io_ops *ops, void *priv);
+struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
int ksz_switch_register(struct ksz_device *dev,
const struct ksz_dev_ops *ops);
void ksz_switch_remove(struct ksz_device *dev);
diff --git a/drivers/net/dsa/microchip/ksz_spi.h b/drivers/net/dsa/microchip/ksz_spi.h
deleted file mode 100644
index 427811bd60b3..000000000000
--- a/drivers/net/dsa/microchip/ksz_spi.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Microchip KSZ series SPI access common header
- *
- * Copyright (C) 2017-2018 Microchip Technology Inc.
- * Tristram Ha <Tristram.Ha@microchip.com>
- */
-
-#ifndef __KSZ_SPI_H
-#define __KSZ_SPI_H
-
-/* Chip dependent SPI access */
-static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
- unsigned int len);
-static int ksz_spi_write(struct ksz_device *dev, u32 reg, void *data,
- unsigned int len);
-
-static int ksz_spi_read8(struct ksz_device *dev, u32 reg, u8 *val)
-{
- return ksz_spi_read(dev, reg, val, 1);
-}
-
-static int ksz_spi_read16(struct ksz_device *dev, u32 reg, u16 *val)
-{
- int ret = ksz_spi_read(dev, reg, (u8 *)val, 2);
-
- if (!ret)
- *val = be16_to_cpu(*val);
-
- return ret;
-}
-
-static int ksz_spi_read32(struct ksz_device *dev, u32 reg, u32 *val)
-{
- int ret = ksz_spi_read(dev, reg, (u8 *)val, 4);
-
- if (!ret)
- *val = be32_to_cpu(*val);
-
- return ret;
-}
-
-static int ksz_spi_write8(struct ksz_device *dev, u32 reg, u8 value)
-{
- return ksz_spi_write(dev, reg, &value, 1);
-}
-
-static int ksz_spi_write16(struct ksz_device *dev, u32 reg, u16 value)
-{
- value = cpu_to_be16(value);
- return ksz_spi_write(dev, reg, &value, 2);
-}
-
-static int ksz_spi_write32(struct ksz_device *dev, u32 reg, u32 value)
-{
- value = cpu_to_be32(value);
- return ksz_spi_write(dev, reg, &value, 4);
-}
-
-static int ksz_spi_get(struct ksz_device *dev, u32 reg, void *data, size_t len)
-{
- return ksz_spi_read(dev, reg, data, len);
-}
-
-static int ksz_spi_set(struct ksz_device *dev, u32 reg, void *data, size_t len)
-{
- return ksz_spi_write(dev, reg, data, len);
-}
-
-#endif
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index c7d352da5448..3181e95586d6 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -428,24 +428,48 @@ static int
mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
{
struct mt7530_priv *priv = ds->priv;
- u32 ncpo1, ssc_delta, trgint, i;
+ u32 ncpo1, ssc_delta, trgint, i, xtal;
+
+ xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+
+ if (xtal == HWTRAP_XTAL_20MHZ) {
+ dev_err(priv->dev,
+ "%s: MT7530 with a 20MHz XTAL is not supported!\n",
+ __func__);
+ return -EINVAL;
+ }
switch (mode) {
case PHY_INTERFACE_MODE_RGMII:
trgint = 0;
+ /* PLL frequency: 125MHz */
ncpo1 = 0x0c80;
- ssc_delta = 0x87;
break;
case PHY_INTERFACE_MODE_TRGMII:
trgint = 1;
- ncpo1 = 0x1400;
- ssc_delta = 0x57;
+ if (priv->id == ID_MT7621) {
+ /* PLL frequency: 150MHz: 1.2GBit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0780;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x0a00;
+ } else { /* PLL frequency: 250MHz: 2.0Gbit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0c80;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x1400;
+ }
break;
default:
dev_err(priv->dev, "xMII mode %d not supported\n", mode);
return -EINVAL;
}
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ssc_delta = 0x57;
+ else
+ ssc_delta = 0x87;
+
mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
P6_INTF_MODE(trgint));
@@ -507,7 +531,9 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
mt7530_rmw(priv, MT7530_TRGMII_RD(i),
RD_TAP_MASK, RD_TAP(16));
else
- mt7623_trgmii_set(priv, GSW_INTF_MODE, INTF_MODE_TRGMII);
+ if (priv->id != ID_MT7621)
+ mt7623_trgmii_set(priv, GSW_INTF_MODE,
+ INTF_MODE_TRGMII);
return 0;
}
@@ -613,13 +639,13 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
if (phy_is_pseudo_fixed_link(phydev)) {
- if (priv->id == ID_MT7530) {
- dev_dbg(priv->dev, "phy-mode for master device = %x\n",
- phydev->interface);
+ dev_dbg(priv->dev, "phy-mode for master device = %x\n",
+ phydev->interface);
- /* Setup TX circuit incluing relevant PAD and driving */
- mt7530_pad_clk_setup(ds, phydev->interface);
+ /* Setup TX circuit incluing relevant PAD and driving */
+ mt7530_pad_clk_setup(ds, phydev->interface);
+ if (priv->id == ID_MT7530) {
/* Setup RX circuit, relevant PAD and driving on the
* host which must be placed after the setup on the
* device side is all finished.
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 4331429969fa..bfac90f48102 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -244,6 +244,10 @@ enum mt7530_vlan_port_attr {
/* Register for hw trap status */
#define MT7530_HWTRAP 0x7800
+#define HWTRAP_XTAL_MASK (BIT(10) | BIT(9))
+#define HWTRAP_XTAL_25MHZ (BIT(10) | BIT(9))
+#define HWTRAP_XTAL_40MHZ (BIT(10))
+#define HWTRAP_XTAL_20MHZ (BIT(9))
/* Register for hw trap modification */
#define MT7530_MHWTRAP 0x7804
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 063c7a671b41..6b17cd961d06 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -118,9 +118,9 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
u16 ctl1;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
@@ -135,13 +135,13 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
}
}
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
if (err)
goto unlock;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
unlock:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
@@ -162,7 +162,7 @@ static void mv88e6xxx_g1_irq_bus_lock(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
}
static void mv88e6xxx_g1_irq_bus_sync_unlock(struct irq_data *d)
@@ -184,7 +184,7 @@ static void mv88e6xxx_g1_irq_bus_sync_unlock(struct irq_data *d)
goto out;
out:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static const struct irq_chip mv88e6xxx_g1_irq_chip = {
@@ -239,9 +239,9 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
*/
free_irq(chip->irq, chip);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
mv88e6xxx_g1_irq_free_common(chip);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@@ -310,12 +310,12 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
*/
irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
err = request_threaded_irq(chip->irq, NULL,
mv88e6xxx_g1_irq_thread_fn,
IRQF_ONESHOT | IRQF_SHARED,
dev_name(chip->dev), chip);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (err)
mv88e6xxx_g1_irq_free_common(chip);
@@ -359,9 +359,9 @@ static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
kthread_destroy_worker(chip->kworker);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
mv88e6xxx_g1_irq_free_common(chip);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@@ -496,11 +496,11 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
mv88e6xxx_phy_is_internal(ds, port))
return;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed,
phydev->duplex, phydev->pause,
phydev->interface);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
@@ -616,12 +616,12 @@ static int mv88e6xxx_link_state(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_link_state)
err = chip->info->ops->port_link_state(chip, port, state);
else
err = -EOPNOTSUPP;
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -651,10 +651,10 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
}
pause = !!phylink_test(state->advertising, Pause);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, pause,
state->interface);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
@@ -665,9 +665,9 @@ static void mv88e6xxx_mac_link_force(struct dsa_switch *ds, int port, int link)
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = chip->info->ops->port_set_link(chip, port, link);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
dev_err(chip->dev, "p%d: failed to force MAC link\n", port);
@@ -825,6 +825,12 @@ static int mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip,
STATS_TYPE_BANK0 | STATS_TYPE_PORT);
}
+static int mv88e6250_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data)
+{
+ return mv88e6xxx_stats_get_strings(chip, data, STATS_TYPE_BANK0);
+}
+
static int mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
uint8_t *data)
{
@@ -859,7 +865,7 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
if (stringset != ETH_SS_STATS)
return;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->stats_get_strings)
count = chip->info->ops->stats_get_strings(chip, data);
@@ -872,7 +878,7 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
data += count * ETH_GSTRING_LEN;
mv88e6xxx_atu_vtu_get_strings(data);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_stats_get_sset_count(struct mv88e6xxx_chip *chip,
@@ -895,6 +901,11 @@ static int mv88e6095_stats_get_sset_count(struct mv88e6xxx_chip *chip)
STATS_TYPE_PORT);
}
+static int mv88e6250_stats_get_sset_count(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0);
+}
+
static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0 |
@@ -910,7 +921,7 @@ static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port, int sset)
if (sset != ETH_SS_STATS)
return 0;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->stats_get_sset_count)
count = chip->info->ops->stats_get_sset_count(chip);
if (count < 0)
@@ -927,7 +938,7 @@ static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port, int sset)
count += ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings);
out:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return count;
}
@@ -942,11 +953,11 @@ static int mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
if (stat->type & types) {
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
bank1_select,
histogram);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
j++;
}
@@ -962,6 +973,13 @@ static int mv88e6095_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
}
+static int mv88e6250_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ return mv88e6xxx_stats_get_stats(chip, port, data, STATS_TYPE_BANK0,
+ 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+}
+
static int mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
@@ -998,14 +1016,14 @@ static void mv88e6xxx_get_stats(struct mv88e6xxx_chip *chip, int port,
if (chip->info->ops->stats_get_stats)
count = chip->info->ops->stats_get_stats(chip, port, data);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->serdes_get_stats) {
data += count;
count = chip->info->ops->serdes_get_stats(chip, port, data);
}
data += count;
mv88e6xxx_atu_vtu_get_stats(chip, port, data);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
@@ -1014,10 +1032,10 @@ static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int ret;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
ret = mv88e6xxx_stats_snapshot(chip, port);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (ret < 0)
return;
@@ -1044,7 +1062,7 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
memset(p, 0xff, 32 * sizeof(u16));
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
for (i = 0; i < 32; i++) {
@@ -1053,7 +1071,7 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
p[i] = reg;
}
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
@@ -1119,9 +1137,9 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_state(chip, port, state);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
dev_err(ds->dev, "p%d: failed to update state\n", port);
@@ -1306,9 +1324,9 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
dev_err(ds->dev, "p%d: failed to flush ATU\n", port);
@@ -1436,7 +1454,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (!vid_begin)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
do {
err = mv88e6xxx_vtu_getnext(chip, &vlan);
@@ -1476,7 +1494,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
} while (vlan.vid < vid_end);
unlock:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -1492,9 +1510,9 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
if (!chip->info->max_vid)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -1628,7 +1646,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
else
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
if (_mv88e6xxx_port_vlan_add(chip, port, vid, member))
@@ -1639,7 +1657,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
dev_err(ds->dev, "p%d: failed to set PVID %d\n", port,
vlan->vid_end);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
@@ -1685,7 +1703,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
if (!chip->info->max_vid)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
if (err)
@@ -1704,7 +1722,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
}
unlock:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -1715,10 +1733,10 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -1729,10 +1747,10 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UNUSED);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -1749,9 +1767,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
eth_broadcast_addr(addr.mac);
do {
- mutex_lock(&chip->reg_lock);
err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
- mutex_unlock(&chip->reg_lock);
if (err)
return err;
@@ -1784,10 +1800,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
int err;
/* Dump port's default Filtering Information Database (VLAN ID 0) */
- mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_get_fid(chip, port, &fid);
- mutex_unlock(&chip->reg_lock);
-
if (err)
return err;
@@ -1797,9 +1810,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
/* Dump VLANs' Filtering Information Databases */
do {
- mutex_lock(&chip->reg_lock);
err = mv88e6xxx_vtu_getnext(chip, &vlan);
- mutex_unlock(&chip->reg_lock);
if (err)
return err;
@@ -1819,8 +1830,13 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
- return mv88e6xxx_port_db_dump(chip, port, cb, data);
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_db_dump(chip, port, cb, data);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
}
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
@@ -1867,9 +1883,9 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_bridge_map(chip, br);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -1879,11 +1895,11 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
{
struct mv88e6xxx_chip *chip = ds->priv;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_bridge_map(chip, br) ||
mv88e6xxx_port_vlan_map(chip, port))
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev,
@@ -1895,9 +1911,9 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev,
if (!mv88e6xxx_has_pvt(chip))
return 0;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_pvt_map(chip, dev, port);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -1910,10 +1926,10 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int dev,
if (!mv88e6xxx_has_pvt(chip))
return;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_pvt_map(chip, dev, port))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
@@ -2264,14 +2280,14 @@ static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_serdes_power(chip, port, true);
if (!err && chip->info->ops->serdes_irq_setup)
err = chip->info->ops->serdes_irq_setup(chip, port);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -2280,7 +2296,7 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED))
dev_err(chip->dev, "failed to disable port\n");
@@ -2291,7 +2307,7 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port)
if (mv88e6xxx_serdes_power(chip, port, false))
dev_err(chip->dev, "failed to power off SERDES\n");
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
@@ -2300,9 +2316,9 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_atu_set_age_time(chip, ageing_time);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -2432,7 +2448,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->setup_errata) {
err = chip->info->ops->setup_errata(chip);
@@ -2539,7 +2555,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
goto unlock;
unlock:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -2554,9 +2570,9 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
if (!chip->info->ops->phy_read)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (reg == MII_PHYSID2) {
/* Some internal PHYs don't have a model number. */
@@ -2589,9 +2605,9 @@ static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
if (!chip->info->ops->phy_write)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_write(chip, bus, phy, reg, val);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -2606,9 +2622,9 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
int err;
if (external) {
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
return err;
@@ -2729,9 +2745,9 @@ static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
if (!chip->info->ops->get_eeprom)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = chip->info->ops->get_eeprom(chip, eeprom, data);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
return err;
@@ -2753,9 +2769,9 @@ static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
if (eeprom->magic != 0xc3ec4951)
return -EINVAL;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = chip->info->ops->set_eeprom(chip, eeprom, data);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -3444,6 +3460,44 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.phylink_validate = mv88e6352_phylink_validate,
};
+static const struct mv88e6xxx_ops mv88e6250_ops = {
+ /* MV88E6XXX_FAMILY_6250 */
+ .ieee_pri_map = mv88e6250_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed = mv88e6250_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6250_port_link_state,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6250_stats_get_sset_count,
+ .stats_get_strings = mv88e6250_stats_get_strings,
+ .stats_get_stats = mv88e6250_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6250_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6250_g1_reset,
+ .vtu_getnext = mv88e6250_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6250_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6065_phylink_validate,
+};
+
static const struct mv88e6xxx_ops mv88e6290_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
@@ -4229,6 +4283,27 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.ops = &mv88e6240_ops,
},
+ [MV88E6250] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6250,
+ .family = MV88E6XXX_FAMILY_6250,
+ .name = "Marvell 88E6250",
+ .num_databases = 64,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .port_base_addr = 0x08,
+ .phy_base_addr = 0x00,
+ .global1_addr = 0x0f,
+ .global2_addr = 0x07,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .dual_chip = true,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
+ .ops = &mv88e6250_ops,
+ },
+
[MV88E6290] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6290,
.family = MV88E6XXX_FAMILY_6390,
@@ -4457,9 +4532,9 @@ static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
u16 id;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_read(chip, 0, MV88E6XXX_PORT_SWITCH_ID, &id);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
return err;
@@ -4522,12 +4597,12 @@ static void mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
{
struct mv88e6xxx_chip *chip = ds->priv;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC))
dev_err(ds->dev, "p%d: failed to load multicast MAC address\n",
port);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
@@ -4536,10 +4611,10 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_UNUSED);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -4550,12 +4625,12 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err = -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_set_egress_floods)
err = chip->info->ops->port_set_egress_floods(chip, port,
unicast,
multicast);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -4711,6 +4786,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
err = PTR_ERR(chip->reset);
goto out;
}
+ if (chip->reset)
+ usleep_range(1000, 2000);
err = mv88e6xxx_detect(chip);
if (err)
@@ -4726,9 +4803,9 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
chip->eeprom_len = pdata->eeprom_len;
}
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_switch_reset(chip);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
@@ -4747,12 +4824,12 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
* the PHYs will link their interrupts to these interrupt
* controllers
*/
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->irq > 0)
err = mv88e6xxx_g1_irq_setup(chip);
else
err = mv88e6xxx_irq_poll_setup(chip);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
@@ -4837,6 +4914,10 @@ static const struct of_device_id mv88e6xxx_of_match[] = {
.compatible = "marvell,mv88e6190",
.data = &mv88e6xxx_table[MV88E6190],
},
+ {
+ .compatible = "marvell,mv88e6250",
+ .data = &mv88e6xxx_table[MV88E6250],
+ },
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index d3e10111a6fe..4646e46d47f2 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -58,6 +58,7 @@ enum mv88e6xxx_model {
MV88E6190X,
MV88E6191,
MV88E6240,
+ MV88E6250,
MV88E6290,
MV88E6320,
MV88E6321,
@@ -76,6 +77,7 @@ enum mv88e6xxx_family {
MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */
MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
+ MV88E6XXX_FAMILY_6250, /* 6250 */
MV88E6XXX_FAMILY_6320, /* 6320 6321 */
MV88E6XXX_FAMILY_6341, /* 6141 6341 */
MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
@@ -108,6 +110,12 @@ struct mv88e6xxx_info {
* when it is non-zero, and use indirect access to internal registers.
*/
bool multi_chip;
+ /* Dual-chip Addressing Mode
+ * Some chips respond to only half of the 32 SMI addresses,
+ * allowing two to coexist on the same SMI interface.
+ */
+ bool dual_chip;
+
enum dsa_tag_protocol tag_protocol;
/* Mask for FromPort and ToPort value of PortVec used in ATU Move
@@ -572,4 +580,14 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
phy_interface_t mode);
struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip);
+static inline void mv88e6xxx_reg_lock(struct mv88e6xxx_chip *chip)
+{
+ mutex_lock(&chip->reg_lock);
+}
+
+static inline void mv88e6xxx_reg_unlock(struct mv88e6xxx_chip *chip)
+{
+ mutex_unlock(&chip->reg_lock);
+}
+
#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 09b8a3d0dd37..1323ef30a5e9 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -178,7 +178,7 @@ int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip)
return mv88e6185_g1_wait_ppu_polling(chip);
}
-int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip)
+int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip)
{
u16 val;
int err;
@@ -194,7 +194,14 @@ int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip)
if (err)
return err;
- err = mv88e6xxx_g1_wait_init_ready(chip);
+ return mv88e6xxx_g1_wait_init_ready(chip);
+}
+
+int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6250_g1_reset(chip);
if (err)
return err;
@@ -295,6 +302,12 @@ int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41);
}
+int mv88e6250_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
+{
+ /* Reset the IEEE Tag priorities to defaults */
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa50);
+}
+
/* Offset 0x1a: Monitor Control */
/* Offset 0x1a: Monitor & MGMT Control on some devices */
@@ -375,26 +388,26 @@ int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
u16 ptr;
int err;
- /* 01:c2:80:00:00:00:00-01:c2:80:00:00:00:07 are Management */
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XLO;
+ /* 01:80:c2:00:00:00-01:80:c2:00:00:07 are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XLO;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
- /* 01:c2:80:00:00:00:08-01:c2:80:00:00:00:0f are Management */
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XHI;
+ /* 01:80:c2:00:00:08-01:80:c2:00:00:0f are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XHI;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
- /* 01:c2:80:00:00:00:20-01:c2:80:00:00:00:27 are Management */
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XLO;
+ /* 01:80:c2:00:00:20-01:80:c2:00:00:27 are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XLO;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
- /* 01:c2:80:00:00:00:28-01:c2:80:00:00:00:2f are Management */
- ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XHI;
+ /* 01:80:c2:00:00:28-01:80:c2:00:00:2f are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XHI;
err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
if (err)
return err;
@@ -461,7 +474,7 @@ int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index)
/* Offset 0x1d: Statistics Operation 2 */
-int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip)
+static int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_STATS_OP,
MV88E6XXX_G1_STATS_OP_BUSY);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 7bd5ab733a3f..d444266f7d78 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -186,10 +186,10 @@
#define MV88E6390_G1_MONITOR_MGMT_CTL 0x1a
#define MV88E6390_G1_MONITOR_MGMT_CTL_UPDATE 0x8000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_MASK 0x3f00
-#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XLO 0x0000
-#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000000XHI 0x0100
-#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XLO 0x0200
-#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C280000002XHI 0x0300
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XLO 0x0000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XHI 0x0100
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XLO 0x0200
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XHI 0x0300
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
@@ -255,11 +255,11 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
-int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
int mv88e6320_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
@@ -274,7 +274,9 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip);
+
int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip);
+int mv88e6250_g1_ieee_pri_map(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port);
@@ -301,6 +303,10 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
+int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6250_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 4542dfa5fc69..1cf388e9bd94 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -90,7 +90,7 @@ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
if (err)
return err;
} else {
- if (mv88e6xxx_num_databases(chip) > 16) {
+ if (mv88e6xxx_num_databases(chip) > 64) {
/* ATU DBNum[7:4] are located in ATU Control 15:12 */
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
&val);
@@ -102,6 +102,9 @@ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
val);
if (err)
return err;
+ } else if (mv88e6xxx_num_databases(chip) > 16) {
+ /* ATU DBNum[5:4] are located in ATU Operation 9:8 */
+ op |= (fid & 0x30) << 4;
}
/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
@@ -314,7 +317,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
int err;
u16 val;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_atu_op(chip, 0,
MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
@@ -361,12 +364,12 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
entry.mac, entry.portvec, spid);
chip->ports[spid].atu_full_violation++;
}
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return IRQ_HANDLED;
out:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n",
err);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index ecef69045a42..6cac997360e8 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -303,6 +303,35 @@ static int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g1_vtu_vid_read(chip, entry);
}
+int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_vtu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_read(chip, entry);
+ if (err)
+ return err;
+
+ /* VTU DBNum[3:0] are located in VTU Operation 3:0
+ * VTU DBNum[5:4] are located in VTU Operation 9:8
+ */
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
+ if (err)
+ return err;
+
+ entry->fid = val & 0x000f;
+ entry->fid |= (val & 0x0300) >> 4;
+ }
+
+ return 0;
+}
+
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -392,6 +421,35 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return 0;
}
+int mv88e6250_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 op = MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE;
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_write(chip, entry);
+ if (err)
+ return err;
+
+ /* VTU DBNum[3:0] are located in VTU Operation 3:0
+ * VTU DBNum[5:4] are located in VTU Operation 9:8
+ */
+ op |= entry->fid & 0x000f;
+ op |= (entry->fid & 0x0030) << 4;
+ }
+
+ return mv88e6xxx_g1_vtu_op(chip, op);
+}
+
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -521,7 +579,7 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
int err;
u16 val;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION);
if (err)
@@ -549,12 +607,12 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
chip->ports[spid].vtu_miss_violation++;
}
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return IRQ_HANDLED;
out:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
dev_err(chip->dev, "VTU problem: error %d while handling interrupt\n",
err);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 1546171210a1..2305b94b3051 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -812,6 +812,32 @@ const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {
.irq_free = mv88e6097_watchdog_free,
};
+static void mv88e6250_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+ u16 reg;
+
+ mv88e6xxx_g2_read(chip, MV88E6250_G2_WDOG_CTL, &reg);
+
+ reg &= ~(MV88E6250_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6250_G2_WDOG_CTL_QC_ENABLE);
+
+ mv88e6xxx_g2_write(chip, MV88E6250_G2_WDOG_CTL, reg);
+}
+
+static int mv88e6250_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6250_G2_WDOG_CTL,
+ MV88E6250_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6250_G2_WDOG_CTL_QC_ENABLE |
+ MV88E6250_G2_WDOG_CTL_SWRESET);
+}
+
+const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops = {
+ .irq_action = mv88e6097_watchdog_action,
+ .irq_setup = mv88e6250_watchdog_setup,
+ .irq_free = mv88e6250_watchdog_free,
+};
+
static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL,
@@ -867,20 +893,20 @@ static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
struct mv88e6xxx_chip *chip = dev_id;
irqreturn_t ret = IRQ_NONE;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->watchdog_ops->irq_action)
ret = chip->info->ops->watchdog_ops->irq_action(chip, irq);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return ret;
}
static void mv88e6xxx_g2_watchdog_free(struct mv88e6xxx_chip *chip)
{
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->watchdog_ops->irq_free)
chip->info->ops->watchdog_ops->irq_free(chip);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
free_irq(chip->watchdog_irq, chip);
irq_dispose_mapping(chip->watchdog_irq);
@@ -902,10 +928,10 @@ static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->watchdog_ops->irq_setup)
err = chip->info->ops->watchdog_ops->irq_setup(chip);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@ -960,9 +986,9 @@ static irqreturn_t mv88e6xxx_g2_irq_thread_fn(int irq, void *dev_id)
int err;
u16 reg;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g2_int_source(chip, &reg);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
@@ -981,7 +1007,7 @@ static void mv88e6xxx_g2_irq_bus_lock(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
}
static void mv88e6xxx_g2_irq_bus_sync_unlock(struct irq_data *d)
@@ -993,7 +1019,7 @@ static void mv88e6xxx_g2_irq_bus_sync_unlock(struct irq_data *d)
if (err)
dev_err(chip->dev, "failed to mask interrupts\n");
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static const struct irq_chip mv88e6xxx_g2_irq_chip = {
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index bfb2c6123f55..a664fc25f132 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -202,6 +202,18 @@
#define MV88E6XXX_G2_SCRATCH_MISC_DATA_MASK 0x00ff
/* Offset 0x1B: Watch Dog Control Register */
+#define MV88E6250_G2_WDOG_CTL 0x1b
+#define MV88E6250_G2_WDOG_CTL_QC_HISTORY 0x0100
+#define MV88E6250_G2_WDOG_CTL_QC_EVENT 0x0080
+#define MV88E6250_G2_WDOG_CTL_QC_ENABLE 0x0040
+#define MV88E6250_G2_WDOG_CTL_EGRESS_HISTORY 0x0020
+#define MV88E6250_G2_WDOG_CTL_EGRESS_EVENT 0x0010
+#define MV88E6250_G2_WDOG_CTL_EGRESS_ENABLE 0x0008
+#define MV88E6250_G2_WDOG_CTL_FORCE_IRQ 0x0004
+#define MV88E6250_G2_WDOG_CTL_HISTORY 0x0002
+#define MV88E6250_G2_WDOG_CTL_SWRESET 0x0001
+
+/* Offset 0x1B: Watch Dog Control Register */
#define MV88E6352_G2_WDOG_CTL 0x1b
#define MV88E6352_G2_WDOG_CTL_EGRESS_EVENT 0x0080
#define MV88E6352_G2_WDOG_CTL_RMU_TIMEOUT 0x0040
@@ -330,6 +342,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
int port);
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
+extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
@@ -480,6 +493,7 @@ static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
}
static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {};
+static const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops = {};
static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {};
static const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {};
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 7f95a636561d..a4c488b12e8f 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -147,7 +147,7 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
return -ERANGE;
}
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (tstamp_enable) {
chip->enable_count += 1;
if (chip->enable_count == 1 && ptp_ops->global_enable)
@@ -161,7 +161,7 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
if (chip->enable_count == 0 && ptp_ops->global_disable)
ptp_ops->global_disable(chip);
}
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
/* Once hardware has been configured, enable timestamp checks
* in the RX/TX paths.
@@ -301,10 +301,10 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
skb_queue_splice_tail_init(rxq, &received);
spin_unlock_irqrestore(&rxq->lock, flags);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
reg, buf, ARRAY_SIZE(buf));
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
pr_err("failed to get the receive time stamp\n");
@@ -314,9 +314,9 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
seq_id = buf[3];
if (status & MV88E6XXX_PTP_TS_VALID) {
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_ptp_write(chip, ps->port_id, reg, 0);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
pr_err("failed to clear the receive status\n");
}
@@ -327,9 +327,9 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
ns = timehi << 16 | timelo;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
ns = timecounter_cyc2time(&chip->tstamp_tc, ns);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
shwt = skb_hwtstamps(skb);
memset(shwt, 0, sizeof(*shwt));
shwt->hwtstamp = ns_to_ktime(ns);
@@ -405,12 +405,12 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
if (!ps->tx_skb)
return 0;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
ptp_ops->dep_sts_reg,
departure_block,
ARRAY_SIZE(departure_block));
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
goto free_and_clear_skb;
@@ -430,9 +430,9 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
}
/* We have the timestamp; go ahead and clear valid now */
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
mv88e6xxx_port_ptp_write(chip, ps->port_id, ptp_ops->dep_sts_reg, 0);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK;
if (status != MV88E6XXX_PTP_TS_STATUS_NORMAL) {
@@ -447,9 +447,9 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
time_raw = ((u32)departure_block[2] << 16) | departure_block[1];
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
ns = timecounter_cyc2time(&chip->tstamp_tc, time_raw);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
shhwtstamps.hwtstamp = ns_to_ktime(ns);
dev_dbg(chip->dev,
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 2952db73f55c..252b5b3a3efe 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -137,7 +137,7 @@ static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (mutex_trylock(&chip->ppu_mutex)) {
if (mv88e6xxx_phy_ppu_enable(chip) == 0)
@@ -145,7 +145,7 @@ static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
mutex_unlock(&chip->ppu_mutex);
}
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 9a2b4b385a2c..04309ef0a1cc 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -290,6 +290,18 @@ int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
}
+/* Support 10, 100 Mbps (e.g. 88E6250 family) */
+int mv88e6250_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+{
+ if (speed == SPEED_MAX)
+ speed = 100;
+
+ if (speed > 100)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
+}
+
/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */
int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
{
@@ -517,6 +529,71 @@ int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
return 0;
}
+int mv88e6250_port_link_state(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ if (port < 5) {
+ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+ case MV88E6250_PORT_STS_PORTMODE_PHY_10_HALF:
+ state->speed = SPEED_10;
+ state->duplex = DUPLEX_HALF;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF:
+ state->speed = SPEED_100;
+ state->duplex = DUPLEX_HALF;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL:
+ state->speed = SPEED_10;
+ state->duplex = DUPLEX_FULL;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL:
+ state->speed = SPEED_100;
+ state->duplex = DUPLEX_FULL;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ break;
+ }
+ } else {
+ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF:
+ state->speed = SPEED_10;
+ state->duplex = DUPLEX_HALF;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF:
+ state->speed = SPEED_100;
+ state->duplex = DUPLEX_HALF;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL:
+ state->speed = SPEED_10;
+ state->duplex = DUPLEX_FULL;
+ break;
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL:
+ state->speed = SPEED_100;
+ state->duplex = DUPLEX_FULL;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ break;
+ }
+ }
+
+ state->link = !!(reg & MV88E6250_PORT_STS_LINK);
+ state->an_enabled = 1;
+ state->an_complete = state->link;
+
+ return 0;
+}
+
int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
struct phylink_link_state *state)
{
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index f2fba3f73199..8d5a6cd6fb19 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -19,6 +19,16 @@
#define MV88E6XXX_PORT_STS_MY_PAUSE 0x4000
#define MV88E6XXX_PORT_STS_HD_FLOW 0x2000
#define MV88E6XXX_PORT_STS_PHY_DETECT 0x1000
+#define MV88E6250_PORT_STS_LINK 0x1000
+#define MV88E6250_PORT_STS_PORTMODE_MASK 0x0f00
+#define MV88E6250_PORT_STS_PORTMODE_PHY_10_HALF 0x0800
+#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
+#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
+#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
#define MV88E6XXX_PORT_STS_LINK 0x0800
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
@@ -108,6 +118,7 @@
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6191 0x1910
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6185 0x1a70
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6250 0x2500
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6290 0x2900
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6321 0x3100
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6141 0x3400
@@ -275,6 +286,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup);
int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+int mv88e6250_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
@@ -328,6 +340,8 @@ int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
struct phylink_link_state *state);
+int mv88e6250_port_link_state(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state);
int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
struct phylink_link_state *state);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index 7b40c5886b75..768d256f7c9f 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -138,10 +138,10 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
u32 raw_ts;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_EVENT_STATUS,
status, ARRAY_SIZE(status));
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err) {
dev_err(chip->dev, "failed to read TAI status register\n");
@@ -158,18 +158,18 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
/* Clear the valid bit so the next timestamp can come in */
status[0] &= ~MV88E6XXX_TAI_EVENT_STATUS_VALID;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
/* This is an external timestamp */
ev.type = PTP_CLOCK_EXTTS;
/* We only have one timestamping channel. */
ev.index = 0;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
ev.timestamp = timecounter_cyc2time(&chip->tstamp_tc, raw_ts);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
ptp_clock_event(chip->ptp_clock, &ev);
out:
@@ -192,12 +192,12 @@ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
adj *= scaled_ppm;
diff = div_u64(adj, CC_MULT_DEM);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
timecounter_read(&chip->tstamp_tc);
chip->tstamp_cc.mult = neg_adj ? mult - diff : mult + diff;
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return 0;
}
@@ -206,9 +206,9 @@ static int mv88e6xxx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
timecounter_adjtime(&chip->tstamp_tc, delta);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return 0;
}
@@ -219,9 +219,9 @@ static int mv88e6xxx_ptp_gettime(struct ptp_clock_info *ptp,
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
u64 ns;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
ns = timecounter_read(&chip->tstamp_tc);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
*ts = ns_to_timespec64(ns);
@@ -236,9 +236,9 @@ static int mv88e6xxx_ptp_settime(struct ptp_clock_info *ptp,
ns = timespec64_to_ns(ts);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc, ns);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return 0;
}
@@ -256,7 +256,7 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
if (pin < 0)
return -EBUSY;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (on) {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ;
@@ -278,7 +278,7 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
}
out:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index d986c5d55bf1..20c526c2a9ee 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -208,7 +208,7 @@ static irqreturn_t mv88e6352_serdes_thread_fn(int irq, void *dev_id)
u16 status;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_INT_STATUS, &status);
if (err)
@@ -219,7 +219,7 @@ static irqreturn_t mv88e6352_serdes_thread_fn(int irq, void *dev_id)
mv88e6352_serdes_irq_link(chip, port->port);
}
out:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return ret;
}
@@ -253,12 +253,12 @@ int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
/* Requesting the IRQ will trigger irq callbacks. So we cannot
* hold the reg_lock.
*/
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
err = request_threaded_irq(chip->ports[port].serdes_irq, NULL,
mv88e6352_serdes_thread_fn,
IRQF_ONESHOT, "mv88e6xxx-serdes",
&chip->ports[port]);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (err) {
dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n",
@@ -279,9 +279,9 @@ void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
/* Freeing the IRQ will trigger irq callbacks. So we cannot
* hold the reg_lock.
*/
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
free_irq(chip->ports[port].serdes_irq, &chip->ports[port]);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
chip->ports[port].serdes_irq = 0;
}
@@ -621,7 +621,7 @@ static irqreturn_t mv88e6390_serdes_thread_fn(int irq, void *dev_id)
lane = mv88e6390x_serdes_get_lane(chip, port->port);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
@@ -637,7 +637,7 @@ static irqreturn_t mv88e6390_serdes_thread_fn(int irq, void *dev_id)
}
}
out:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return ret;
}
@@ -666,12 +666,12 @@ int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
/* Requesting the IRQ will trigger irq callbacks. So we cannot
* hold the reg_lock.
*/
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
err = request_threaded_irq(chip->ports[port].serdes_irq, NULL,
mv88e6390_serdes_thread_fn,
IRQF_ONESHOT, "mv88e6xxx-serdes",
&chip->ports[port]);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (err) {
dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n",
@@ -705,9 +705,9 @@ void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
/* Freeing the IRQ will trigger irq callbacks. So we cannot
* hold the reg_lock.
*/
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
free_irq(chip->ports[port].serdes_irq, &chip->ports[port]);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
chip->ports[port].serdes_irq = 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
index 92e9324f1fb9..5fc78a063843 100644
--- a/drivers/net/dsa/mv88e6xxx/smi.c
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -20,6 +20,10 @@
* When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing
* multiple devices to share the SMI interface. In this mode it responds to only
* 2 registers, used to indirectly access the internal SMI devices.
+ *
+ * Some chips use a different scheme: Only the ADDR4 pin is used for
+ * configuration, and the device responds to 16 of the 32 SMI
+ * addresses, allowing two to coexist on the same SMI interface.
*/
static int mv88e6xxx_smi_direct_read(struct mv88e6xxx_chip *chip,
@@ -72,6 +76,23 @@ static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_direct_ops = {
.write = mv88e6xxx_smi_direct_write,
};
+static int mv88e6xxx_smi_dual_direct_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ return mv88e6xxx_smi_direct_read(chip, chip->sw_addr + dev, reg, data);
+}
+
+static int mv88e6xxx_smi_dual_direct_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ return mv88e6xxx_smi_direct_write(chip, chip->sw_addr + dev, reg, data);
+}
+
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_dual_direct_ops = {
+ .read = mv88e6xxx_smi_dual_direct_read,
+ .write = mv88e6xxx_smi_dual_direct_write,
+};
+
/* Offset 0x00: SMI Command Register
* Offset 0x01: SMI Data Register
*/
@@ -140,7 +161,9 @@ static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = {
int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
struct mii_bus *bus, int sw_addr)
{
- if (sw_addr == 0)
+ if (chip->info->dual_chip)
+ chip->smi_ops = &mv88e6xxx_smi_dual_direct_ops;
+ else if (sw_addr == 0)
chip->smi_ops = &mv88e6xxx_smi_direct_ops;
else if (chip->info->multi_chip)
chip->smi_ops = &mv88e6xxx_smi_indirect_ops;
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index c4fa400efdcc..27709f866c23 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -14,6 +14,7 @@
#include <linux/of_platform.h>
#include <linux/if_bridge.h>
#include <linux/mdio.h>
+#include <linux/gpio.h>
#include <linux/etherdevice.h>
#include "qca8k.h"
@@ -1046,6 +1047,20 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
+ priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
+ GPIOD_ASIS);
+ if (IS_ERR(priv->reset_gpio))
+ return PTR_ERR(priv->reset_gpio);
+
+ if (priv->reset_gpio) {
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ /* The active low duration must be greater than 10 ms
+ * and checkpatch.pl wants 20 ms.
+ */
+ msleep(20);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ }
+
/* read the switches ID register */
id = qca8k_read(priv, QCA8K_REG_MASK_CTRL);
id >>= QCA8K_MASK_CTRL_ID_S;
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 91557433ce2f..42d6ea24eb14 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/regmap.h>
+#include <linux/gpio.h>
#define QCA8K_NUM_PORTS 7
@@ -174,6 +175,7 @@ struct qca8k_priv {
struct mutex reg_mutex;
struct device *dev;
struct dsa_switch_ops ops;
+ struct gpio_desc *reset_gpio;
};
struct qca8k_mib_desc {
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index 1144fc5f61a8..770134a66e48 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -9,10 +9,17 @@ tristate "NXP SJA1105 Ethernet switch family support"
This is the driver for the NXP SJA1105 automotive Ethernet switch
family. These are 5-port devices and are managed over an SPI
interface. Probing is handled based on OF bindings and so is the
- linkage to phylib. The driver supports the following revisions:
+ linkage to PHYLINK. The driver supports the following revisions:
- SJA1105E (Gen. 1, No TT-Ethernet)
- SJA1105T (Gen. 1, TT-Ethernet)
- SJA1105P (Gen. 2, No SGMII, No TT-Ethernet)
- SJA1105Q (Gen. 2, No SGMII, TT-Ethernet)
- SJA1105R (Gen. 2, SGMII, No TT-Ethernet)
- SJA1105S (Gen. 2, SGMII, TT-Ethernet)
+
+config NET_DSA_SJA1105_PTP
+ bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
+ depends on NET_DSA_SJA1105
+ help
+ This enables support for timestamping and PTP clock manipulations in
+ the SJA1105 DSA driver.
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
index 941848de8b46..4483113e6259 100644
--- a/drivers/net/dsa/sja1105/Makefile
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -8,3 +8,7 @@ sja1105-objs := \
sja1105_clocking.o \
sja1105_static_config.o \
sja1105_dynamic_config.o \
+
+ifdef CONFIG_NET_DSA_SJA1105_PTP
+sja1105-objs += sja1105_ptp.o
+endif
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index b043bfc408f2..78094db32622 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -5,6 +5,8 @@
#ifndef _SJA1105_H
#define _SJA1105_H
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <linux/dsa/sja1105.h>
#include <net/dsa.h>
#include <linux/mutex.h>
@@ -27,9 +29,14 @@ struct sja1105_regs {
u64 rgu;
u64 config;
u64 rmii_pll1;
+ u64 ptp_control;
+ u64 ptpclk;
+ u64 ptpclkrate;
+ u64 ptptsclk;
+ u64 ptpegr_ts[SJA1105_NUM_PORTS];
u64 pad_mii_tx[SJA1105_NUM_PORTS];
+ u64 pad_mii_id[SJA1105_NUM_PORTS];
u64 cgu_idiv[SJA1105_NUM_PORTS];
- u64 rgmii_pad_mii_tx[SJA1105_NUM_PORTS];
u64 mii_tx_clk[SJA1105_NUM_PORTS];
u64 mii_rx_clk[SJA1105_NUM_PORTS];
u64 mii_ext_tx_clk[SJA1105_NUM_PORTS];
@@ -50,11 +57,26 @@ struct sja1105_info {
* switch core and device_id)
*/
u64 part_no;
+ /* E/T and P/Q/R/S have partial timestamps of different sizes.
+ * They must be reconstructed on both families anyway to get the full
+ * 64-bit values back.
+ */
+ int ptp_ts_bits;
+ /* Also SPI commands are of different sizes to retrieve
+ * the egress timestamps.
+ */
+ int ptpegr_ts_bytes;
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
const struct sja1105_regs *regs;
+ int (*ptp_cmd)(const void *ctx, const void *data);
int (*reset_cmd)(const void *ctx, const void *data);
int (*setup_rgmii_delay)(const void *ctx, int port);
+ /* Prototypes from include/net/dsa.h */
+ int (*fdb_add_cmd)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*fdb_del_cmd)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
const char *name;
};
@@ -67,13 +89,25 @@ struct sja1105_private {
struct spi_device *spidev;
struct dsa_switch *ds;
struct sja1105_port ports[SJA1105_NUM_PORTS];
+ struct ptp_clock_info ptp_caps;
+ struct ptp_clock *clock;
+ /* The cycle counter translates the PTP timestamps (based on
+ * a free-running counter) into a software time domain.
+ */
+ struct cyclecounter tstamp_cc;
+ struct timecounter tstamp_tc;
+ struct delayed_work refresh_work;
+ /* Serializes all operations on the cycle counter */
+ struct mutex ptp_lock;
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ struct sja1105_tagger_data tagger_data;
};
#include "sja1105_dynamic_config.h"
+#include "sja1105_ptp.h"
struct sja1105_spi_message {
u64 access;
@@ -97,6 +131,8 @@ int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 base_addr,
void *packed_buf, u64 buf_len);
int sja1105_static_config_upload(struct sja1105_private *priv);
+int sja1105_inhibit_tx(const struct sja1105_private *priv,
+ unsigned long port_bitmap, bool tx_inhibited);
extern struct sja1105_info sja1105e_info;
extern struct sja1105_info sja1105t_info;
@@ -125,6 +161,7 @@ typedef enum {
SJA1105_SPEED_AUTO = 0,
} sja1105_speed_t;
+int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port);
int sja1105_clocking_setup_port(struct sja1105_private *priv, int port);
int sja1105_clocking_setup(struct sja1105_private *priv);
@@ -142,7 +179,20 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
enum sja1105_blk_idx blk_idx,
int index, void *entry, bool keep);
-u8 sja1105_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid);
+enum sja1105_iotag {
+ SJA1105_C_TAG = 0, /* Inner VLAN header */
+ SJA1105_S_TAG = 1, /* Outer VLAN header */
+};
+
+u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid);
+int sja1105et_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105et_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
/* Common implementations for the static and dynamic configs */
size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 94bfe0ee50a8..608126a15d72 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -19,6 +19,17 @@ struct sja1105_cfg_pad_mii_tx {
u64 clk_ipud;
};
+struct sja1105_cfg_pad_mii_id {
+ u64 rxc_stable_ovr;
+ u64 rxc_delay;
+ u64 rxc_bypass;
+ u64 rxc_pd;
+ u64 txc_stable_ovr;
+ u64 txc_delay;
+ u64 txc_bypass;
+ u64 txc_pd;
+};
+
/* UM10944 Table 82.
* IDIV_0_C to IDIV_4_C control registers
* (addr. 10000Bh to 10000Fh)
@@ -373,11 +384,88 @@ static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK);
return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->rgmii_pad_mii_tx[port],
+ regs->pad_mii_tx[port],
packed_buf, SJA1105_SIZE_CGU_CMD);
}
-static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port)
+static void
+sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_CGU_CMD;
+
+ sja1105_packing(buf, &cmd->rxc_stable_ovr, 15, 15, size, op);
+ sja1105_packing(buf, &cmd->rxc_delay, 14, 10, size, op);
+ sja1105_packing(buf, &cmd->rxc_bypass, 9, 9, size, op);
+ sja1105_packing(buf, &cmd->rxc_pd, 8, 8, size, op);
+ sja1105_packing(buf, &cmd->txc_stable_ovr, 7, 7, size, op);
+ sja1105_packing(buf, &cmd->txc_delay, 6, 2, size, op);
+ sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
+}
+
+/* Valid range in degrees is an integer between 73.8 and 101.7 */
+static inline u64 sja1105_rgmii_delay(u64 phase)
+{
+ /* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
+ * To avoid floating point operations we'll multiply by 10
+ * and get 1 decimal point precision.
+ */
+ phase *= 10;
+ return (phase - 738) / 9;
+}
+
+/* The RGMII delay setup procedure is 2-step and gets called upon each
+ * .phylink_mac_config. Both are strategic.
+ * The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
+ * with recovering from a frequency change of the link partner's RGMII clock.
+ * The easiest way to recover from this is to temporarily power down the TDL,
+ * as it will re-lock at the new frequency afterwards.
+ */
+int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int rc;
+
+ if (priv->rgmii_rx_delay[port])
+ pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
+ if (priv->rgmii_tx_delay[port])
+ pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+
+ /* Stage 1: Turn the RGMII delay lines off. */
+ pad_mii_id.rxc_bypass = 1;
+ pad_mii_id.rxc_pd = 1;
+ pad_mii_id.txc_bypass = 1;
+ pad_mii_id.txc_pd = 1;
+ sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+ if (rc < 0)
+ return rc;
+
+ /* Stage 2: Turn the RGMII delay lines on. */
+ if (priv->rgmii_rx_delay[port]) {
+ pad_mii_id.rxc_bypass = 0;
+ pad_mii_id.rxc_pd = 0;
+ }
+ if (priv->rgmii_tx_delay[port]) {
+ pad_mii_id.txc_bypass = 0;
+ pad_mii_id.txc_pd = 0;
+ }
+ sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
{
struct device *dev = priv->ds->dev;
struct sja1105_mac_config_entry *mac;
@@ -429,6 +517,12 @@ static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port)
}
if (!priv->info->setup_rgmii_delay)
return 0;
+ /* The role has no hardware effect for RGMII. However we use it as
+ * a proxy for this interface being a MAC-to-MAC connection, with
+ * the RGMII internal delays needing to be applied by us.
+ */
+ if (role == XMII_MAC)
+ return 0;
return priv->info->setup_rgmii_delay(priv, port);
}
@@ -575,7 +669,7 @@ int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
rc = sja1105_rmii_clocking_setup(priv, port, role);
break;
case XMII_MODE_RGMII:
- rc = sja1105_rgmii_clocking_setup(priv, port);
+ rc = sja1105_rgmii_clocking_setup(priv, port, role);
break;
default:
dev_err(dev, "Invalid interface mode specified: %d\n",
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index e73ab28bf632..6bfb1696a6f2 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -3,6 +3,98 @@
*/
#include "sja1105.h"
+/* In the dynamic configuration interface, the switch exposes a register-like
+ * view of some of the static configuration tables.
+ * Many times the field organization of the dynamic tables is abbreviated (not
+ * all fields are dynamically reconfigurable) and different from the static
+ * ones, but the key reason for having it is that we can spare a switch reset
+ * for settings that can be changed dynamically.
+ *
+ * This file creates a per-switch-family abstraction called
+ * struct sja1105_dynamic_table_ops and two operations that work with it:
+ * - sja1105_dynamic_config_write
+ * - sja1105_dynamic_config_read
+ *
+ * Compared to the struct sja1105_table_ops from sja1105_static_config.c,
+ * the dynamic accessors work with a compound buffer:
+ *
+ * packed_buf
+ *
+ * |
+ * V
+ * +-----------------------------------------+------------------+
+ * | ENTRY BUFFER | COMMAND BUFFER |
+ * +-----------------------------------------+------------------+
+ *
+ * <----------------------- packed_size ------------------------>
+ *
+ * The ENTRY BUFFER may or may not have the same layout, or size, as its static
+ * configuration table entry counterpart. When it does, the same packing
+ * function is reused (bar exceptional cases - see
+ * sja1105pqrs_dyn_l2_lookup_entry_packing).
+ *
+ * The reason for the COMMAND BUFFER being at the end is to be able to send
+ * a dynamic write command through a single SPI burst. By the time the switch
+ * reacts to the command, the ENTRY BUFFER is already populated with the data
+ * sent by the core.
+ *
+ * The COMMAND BUFFER is always SJA1105_SIZE_DYN_CMD bytes (one 32-bit word) in
+ * size.
+ *
+ * Sometimes the ENTRY BUFFER does not really exist (when the number of fields
+ * that can be reconfigured is small), then the switch repurposes some of the
+ * unused 32 bits of the COMMAND BUFFER to hold ENTRY data.
+ *
+ * The key members of struct sja1105_dynamic_table_ops are:
+ * - .entry_packing: A function that deals with packing an ENTRY structure
+ * into an SPI buffer, or retrieving an ENTRY structure
+ * from one.
+ * The @packed_buf pointer it's given does always point to
+ * the ENTRY portion of the buffer.
+ * - .cmd_packing: A function that deals with packing/unpacking the COMMAND
+ * structure to/from the SPI buffer.
+ * It is given the same @packed_buf pointer as .entry_packing,
+ * so most of the time, the @packed_buf points *behind* the
+ * COMMAND offset inside the buffer.
+ * To access the COMMAND portion of the buffer, the function
+ * knows its correct offset.
+ * Giving both functions the same pointer is handy because in
+ * extreme cases (see sja1105pqrs_dyn_l2_lookup_entry_packing)
+ * the .entry_packing is able to jump to the COMMAND portion,
+ * or vice-versa (sja1105pqrs_l2_lookup_cmd_packing).
+ * - .access: A bitmap of:
+ * OP_READ: Set if the hardware manual marks the ENTRY portion of the
+ * dynamic configuration table buffer as R (readable) after
+ * an SPI read command (the switch will populate the buffer).
+ * OP_WRITE: Set if the manual marks the ENTRY portion of the dynamic
+ * table buffer as W (writable) after an SPI write command
+ * (the switch will read the fields provided in the buffer).
+ * OP_DEL: Set if the manual says the VALIDENT bit is supported in the
+ * COMMAND portion of this dynamic config buffer (i.e. the
+ * specified entry can be invalidated through a SPI write
+ * command).
+ * OP_SEARCH: Set if the manual says that the index of an entry can
+ * be retrieved in the COMMAND portion of the buffer based
+ * on its ENTRY portion, as a result of a SPI write command.
+ * Only the TCAM-based FDB table on SJA1105 P/Q/R/S supports
+ * this.
+ * - .max_entry_count: The number of entries, counting from zero, that can be
+ * reconfigured through the dynamic interface. If a static
+ * table can be reconfigured at all dynamically, this
+ * number always matches the maximum number of supported
+ * static entries.
+ * - .packed_size: The length in bytes of the compound ENTRY + COMMAND BUFFER.
+ * Note that sometimes the compound buffer may contain holes in
+ * it (see sja1105_vlan_lookup_cmd_packing). The @packed_buf is
+ * contiguous however, so @packed_size includes any unused
+ * bytes.
+ * - .addr: The base SPI address at which the buffer must be written to the
+ * switch's memory. When looking at the hardware manual, this must
+ * always match the lowest documented address for the ENTRY, and not
+ * that of the COMMAND, since the other 32-bit words will follow along
+ * at the correct addresses.
+ */
+
#define SJA1105_SIZE_DYN_CMD 4
#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
@@ -35,17 +127,70 @@
#define SJA1105_MAX_DYN_CMD_SIZE \
SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD
+struct sja1105_dyn_cmd {
+ bool search;
+ u64 valid;
+ u64 rdwrset;
+ u64 errors;
+ u64 valident;
+ u64 index;
+};
+
+enum sja1105_hostcmd {
+ SJA1105_HOSTCMD_SEARCH = 1,
+ SJA1105_HOSTCMD_READ = 2,
+ SJA1105_HOSTCMD_WRITE = 3,
+ SJA1105_HOSTCMD_INVALIDATE = 4,
+};
+
static void
sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
const int size = SJA1105_SIZE_DYN_CMD;
+ u64 hostcmd;
sja1105_packing(p, &cmd->valid, 31, 31, size, op);
sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
sja1105_packing(p, &cmd->errors, 29, 29, size, op);
sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+
+ /* VALIDENT is supposed to indicate "keep or not", but in SJA1105 E/T,
+ * using it to delete a management route was unsupported. UM10944
+ * said about it:
+ *
+ * In case of a write access with the MGMTROUTE flag set,
+ * the flag will be ignored. It will always be found cleared
+ * for read accesses with the MGMTROUTE flag set.
+ *
+ * SJA1105 P/Q/R/S keeps the same behavior w.r.t. VALIDENT, but there
+ * is now another flag called HOSTCMD which does more stuff (quoting
+ * from UM11040):
+ *
+ * A write request is accepted only when HOSTCMD is set to write host
+ * or invalid. A read request is accepted only when HOSTCMD is set to
+ * search host or read host.
+ *
+ * So it is possible to translate a RDWRSET/VALIDENT combination into
+ * HOSTCMD so that we keep the dynamic command API in place, and
+ * at the same time achieve compatibility with the management route
+ * command structure.
+ */
+ if (cmd->rdwrset == SPI_READ) {
+ if (cmd->search)
+ hostcmd = SJA1105_HOSTCMD_SEARCH;
+ else
+ hostcmd = SJA1105_HOSTCMD_READ;
+ } else {
+ /* SPI_WRITE */
+ if (cmd->valident)
+ hostcmd = SJA1105_HOSTCMD_WRITE;
+ else
+ hostcmd = SJA1105_HOSTCMD_INVALIDATE;
+ }
+ sja1105_packing(p, &hostcmd, 25, 23, size, op);
+
/* Hack - The hardware takes the 'index' field within
* struct sja1105_l2_lookup_entry as the index on which this command
* will operate. However it will ignore everything else, so 'index'
@@ -54,9 +199,66 @@ sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
* such that our API doesn't need to ask for a full-blown entry
* structure when e.g. a delete is requested.
*/
- sja1105_packing(buf, &cmd->index, 29, 20,
+ sja1105_packing(buf, &cmd->index, 15, 6,
SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
- /* TODO hostcmd */
+}
+
+/* The switch is so retarded that it makes our command/entry abstraction
+ * crumble apart.
+ *
+ * On P/Q/R/S, the switch tries to say whether a FDB entry
+ * is statically programmed or dynamically learned via a flag called LOCKEDS.
+ * The hardware manual says about this fiels:
+ *
+ * On write will specify the format of ENTRY.
+ * On read the flag will be found cleared at times the VALID flag is found
+ * set. The flag will also be found cleared in response to a read having the
+ * MGMTROUTE flag set. In response to a read with the MGMTROUTE flag
+ * cleared, the flag be set if the most recent access operated on an entry
+ * that was either loaded by configuration or through dynamic reconfiguration
+ * (as opposed to automatically learned entries).
+ *
+ * The trouble with this flag is that it's part of the *command* to access the
+ * dynamic interface, and not part of the *entry* retrieved from it.
+ * Otherwise said, for a sja1105_dynamic_config_read, LOCKEDS is supposed to be
+ * an output from the switch into the command buffer, and for a
+ * sja1105_dynamic_config_write, the switch treats LOCKEDS as an input
+ * (hence we can write either static, or automatically learned entries, from
+ * the core).
+ * But the manual contradicts itself in the last phrase where it says that on
+ * read, LOCKEDS will be set to 1 for all FDB entries written through the
+ * dynamic interface (therefore, the value of LOCKEDS from the
+ * sja1105_dynamic_config_write is not really used for anything, it'll store a
+ * 1 anyway).
+ * This means you can't really write a FDB entry with LOCKEDS=0 (automatically
+ * learned) into the switch, which kind of makes sense.
+ * As for reading through the dynamic interface, it doesn't make too much sense
+ * to put LOCKEDS into the command, since the switch will inevitably have to
+ * ignore it (otherwise a command would be like "read the FDB entry 123, but
+ * only if it's dynamically learned" <- well how am I supposed to know?) and
+ * just use it as an output buffer for its findings. But guess what... that's
+ * what the entry buffer is for!
+ * Unfortunately, what really breaks this abstraction is the fact that it
+ * wasn't designed having the fact in mind that the switch can output
+ * entry-related data as writeback through the command buffer.
+ * However, whether a FDB entry is statically or dynamically learned *is* part
+ * of the entry and not the command data, no matter what the switch thinks.
+ * In order to do that, we'll need to wrap around the
+ * sja1105pqrs_l2_lookup_entry_packing from sja1105_static_config.c, and take
+ * a peek outside of the caller-supplied @buf (the entry buffer), to reach the
+ * command buffer.
+ */
+static size_t
+sja1105pqrs_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1105pqrs_l2_lookup_entry_packing(buf, entry_ptr, op);
}
static void
@@ -107,6 +309,36 @@ static size_t sja1105et_mgmt_route_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static void
+sja1105pqrs_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ u64 mgmtroute = 1;
+
+ sja1105pqrs_l2_lookup_cmd_packing(buf, cmd, op);
+ if (op == PACK)
+ sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
+}
+
+static size_t sja1105pqrs_mgmt_route_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_mgmt_entry *entry = entry_ptr;
+
+ /* In P/Q/R/S, enfport got renamed to mgmtvalid, but its purpose
+ * is the same (driver uses it to confirm that frame was sent).
+ * So just keep the name from E/T.
+ */
+ sja1105_packing(buf, &entry->tsreg, 71, 71, size, op);
+ sja1105_packing(buf, &entry->takets, 70, 70, size, op);
+ sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
+ sja1105_packing(buf, &entry->destports, 21, 17, size, op);
+ sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
+ return size;
+}
+
/* In E/T, entry is at addresses 0x27-0x28. There is a 4 byte gap at 0x29,
* and command is at 0x2a. Similarly in P/Q/R/S there is a 1 register gap
* between entry (0x2d, 0x2e) and command (0x30).
@@ -240,6 +472,7 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
#define OP_READ BIT(0)
#define OP_WRITE BIT(1)
#define OP_DEL BIT(2)
+#define OP_SEARCH BIT(3)
/* SJA1105E/T: First generation */
struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
@@ -293,6 +526,7 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
.addr = 0x38,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {0},
+ [BLK_IDX_AVB_PARAMS] = {0},
[BLK_IDX_GENERAL_PARAMS] = {
.entry_packing = sja1105et_general_params_entry_packing,
.cmd_packing = sja1105et_general_params_cmd_packing,
@@ -304,14 +538,22 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_XMII_PARAMS] = {0},
};
-/* SJA1105P/Q/R/S: Second generation: TODO */
+/* SJA1105P/Q/R/S: Second generation */
struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_L2_LOOKUP] = {
- .entry_packing = sja1105pqrs_l2_lookup_entry_packing,
+ .entry_packing = sja1105pqrs_dyn_l2_lookup_entry_packing,
.cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
- .access = (OP_READ | OP_WRITE | OP_DEL),
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
- .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_MGMT_ROUTE] = {
+ .entry_packing = sja1105pqrs_mgmt_route_entry_packing,
+ .cmd_packing = sja1105pqrs_mgmt_route_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .max_entry_count = SJA1105_NUM_PORTS,
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
.addr = 0x24,
},
[BLK_IDX_L2_POLICING] = {0},
@@ -348,6 +590,7 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
.addr = 0x38,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {0},
+ [BLK_IDX_AVB_PARAMS] = {0},
[BLK_IDX_GENERAL_PARAMS] = {
.entry_packing = sja1105et_general_params_entry_packing,
.cmd_packing = sja1105et_general_params_cmd_packing,
@@ -359,6 +602,24 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_XMII_PARAMS] = {0},
};
+/* Provides read access to the settings through the dynamic interface
+ * of the switch.
+ * @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
+ * The selection is limited by the hardware in respect to which
+ * configuration blocks can be read through the dynamic interface.
+ * @index is used to retrieve a particular table entry. If negative,
+ * (and if the @blk_idx supports the searching operation) a search
+ * is performed by the @entry parameter.
+ * @entry Type-casted to an unpacked structure that holds a table entry
+ * of the type specified in @blk_idx.
+ * Usually an output argument. If @index is negative, then this
+ * argument is used as input/output: it should be pre-populated
+ * with the element to search for. Entries which support the
+ * search operation will have an "index" field (not the @index
+ * argument to this function) and that is where the found index
+ * will be returned (or left unmodified - thus negative - if not
+ * found).
+ */
int sja1105_dynamic_config_read(struct sja1105_private *priv,
enum sja1105_blk_idx blk_idx,
int index, void *entry)
@@ -375,8 +636,10 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
ops = &priv->info->dyn_ops[blk_idx];
- if (index >= ops->max_entry_count)
+ if (index >= 0 && index >= ops->max_entry_count)
return -ERANGE;
+ if (index < 0 && !(ops->access & OP_SEARCH))
+ return -EOPNOTSUPP;
if (!(ops->access & OP_READ))
return -EOPNOTSUPP;
if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
@@ -388,9 +651,20 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
cmd.valid = true; /* Trigger action on table entry */
cmd.rdwrset = SPI_READ; /* Action is read */
- cmd.index = index;
+ if (index < 0) {
+ /* Avoid copying a signed negative number to an u64 */
+ cmd.index = 0;
+ cmd.search = true;
+ } else {
+ cmd.index = index;
+ cmd.search = false;
+ }
+ cmd.valident = true;
ops->cmd_packing(packed_buf, &cmd, PACK);
+ if (cmd.search)
+ ops->entry_packing(packed_buf, entry, PACK);
+
/* Send SPI write operation: read config table entry */
rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
packed_buf, ops->packed_size);
@@ -416,7 +690,7 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
* So don't error out in that case.
*/
if (!cmd.valident && blk_idx != BLK_IDX_MGMT_ROUTE)
- return -EINVAL;
+ return -ENOENT;
cpu_relax();
} while (cmd.valid && --retries);
@@ -448,6 +722,8 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
if (index >= ops->max_entry_count)
return -ERANGE;
+ if (index < 0)
+ return -ERANGE;
if (!(ops->access & OP_WRITE))
return -EOPNOTSUPP;
if (!keep && !(ops->access & OP_DEL))
@@ -510,7 +786,7 @@ static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
* is also received as argument in the Koopman notation that the switch
* hardware stores it in.
*/
-u8 sja1105_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
+u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params =
priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries;
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
index 77be59546a55..740dadf43f01 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
@@ -7,13 +7,10 @@
#include "sja1105.h"
#include <linux/packing.h>
-struct sja1105_dyn_cmd {
- u64 valid;
- u64 rdwrset;
- u64 errors;
- u64 valident;
- u64 index;
-};
+/* Special index that can be used for sja1105_dynamic_config_read */
+#define SJA1105_SEARCH -1
+
+struct sja1105_dyn_cmd;
struct sja1105_dynamic_table_ops {
/* This returns size_t just to keep same prototype as the
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 1c3959efebc4..32bf3a7cc3b6 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -70,8 +70,7 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
/* Keep standard IFG of 12 bytes on egress. */
.ifg = 0,
/* Always put the MAC speed in automatic mode, where it can be
- * retrieved from the PHY object through phylib and
- * sja1105_adjust_port_config.
+ * adjusted at runtime by PHYLINK.
*/
.speed = SJA1105_SPEED_AUTO,
/* No static correction for 1-step 1588 events */
@@ -81,7 +80,7 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
.maxage = 0xFF,
/* Internal VLAN (pvid) to apply to untagged ingress */
.vlanprio = 0,
- .vlanid = 0,
+ .vlanid = 1,
.ing_mirr = false,
.egr_mirr = false,
/* Don't drop traffic with other EtherType than ETH_P_IP */
@@ -116,7 +115,6 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
if (!table->entries)
return -ENOMEM;
- /* Override table based on phylib DT bindings */
table->entry_count = SJA1105_NUM_PORTS;
mac = table->entries;
@@ -157,7 +155,7 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
if (!table->entries)
return -ENOMEM;
- /* Override table based on phylib DT bindings */
+ /* Override table based on PHYLINK DT bindings */
table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
mii = table->entries;
@@ -205,11 +203,16 @@ static int sja1105_init_static_fdb(struct sja1105_private *priv)
static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
{
struct sja1105_table *table;
+ u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
/* Learned FDB entries are forgotten after 300 seconds */
.maxage = SJA1105_AGEING_TIME_MS(300000),
/* All entries within a FDB bin are available for learning */
.dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
+ /* And the P/Q/R/S equivalent setting: */
+ .start_dynspc = 0,
+ .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
+ max_fdb_entries, max_fdb_entries, },
/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
.poly = 0x97,
/* This selects between Independent VLAN Learning (IVL) and
@@ -225,6 +228,13 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
* Maybe correlate with no_linklocal_learn from bridge driver?
*/
.no_mgmt_learn = true,
+ /* P/Q/R/S only */
+ .use_static = true,
+ /* Dynamically learned FDB entries can overwrite other (older)
+ * dynamic FDB entries
+ */
+ .owr_dyn = true,
+ .drpnolearn = true,
};
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
@@ -257,20 +267,15 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
.vmemb_port = 0,
.vlan_bc = 0,
.tag_port = 0,
- .vlanid = 0,
+ .vlanid = 1,
};
int i;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
- /* The static VLAN table will only contain the initial pvid of 0.
+ /* The static VLAN table will only contain the initial pvid of 1.
* All other VLANs are to be configured through dynamic entries,
* and kept in the static configuration table as backing memory.
- * The pvid of 0 is sufficient to pass traffic while the ports are
- * standalone and when vlan_filtering is disabled. When filtering
- * gets enabled, the switchdev core sets up the VLAN ID 1 and sets
- * it as the new pvid. Actually 'pvid 1' still comes up in 'bridge
- * vlan' even when vlan_filtering is off, but it has no effect.
*/
if (table->entry_count) {
kfree(table->entries);
@@ -284,7 +289,7 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 1;
- /* VLAN ID 0: all DT-defined ports are members; no restrictions on
+ /* VLAN 1: all DT-defined ports are members; no restrictions on
* forwarding; always transmit priority-tagged frames as untagged.
*/
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
@@ -380,14 +385,14 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
.mirr_ptacu = 0,
.switchid = priv->ds->index,
/* Priority queue for link-local frames trapped to CPU */
- .hostprio = 0,
+ .hostprio = 7,
.mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
.mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
- .incl_srcpt1 = true,
+ .incl_srcpt1 = false,
.send_meta1 = false,
.mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
.mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
- .incl_srcpt0 = true,
+ .incl_srcpt0 = false,
.send_meta0 = false,
/* The destination for traffic matching mac_fltres1 and
* mac_fltres0 on all ports except host_port. Such traffic
@@ -499,6 +504,39 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
return 0;
}
+static int sja1105_init_avb_params(struct sja1105_private *priv,
+ bool on)
+{
+ struct sja1105_avb_params_entry *avb;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
+
+ /* Discard previous AVB Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Configure the reception of meta frames only if requested */
+ if (!on)
+ return 0;
+
+ table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
+
+ avb = table->entries;
+
+ avb->destmeta = SJA1105_META_DMAC;
+ avb->srcmeta = SJA1105_META_SMAC;
+
+ return 0;
+}
+
static int sja1105_static_config_load(struct sja1105_private *priv,
struct sja1105_dt_port *ports)
{
@@ -539,6 +577,9 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
rc = sja1105_init_general_params(priv);
if (rc < 0)
return rc;
+ rc = sja1105_init_avb_params(priv, false);
+ if (rc < 0)
+ return rc;
/* Send initial configuration to hardware via SPI */
return sja1105_static_config_upload(priv);
@@ -644,26 +685,18 @@ static int sja1105_parse_dt(struct sja1105_private *priv,
return rc;
}
-/* Convert back and forth MAC speed from Mbps to SJA1105 encoding */
+/* Convert link speed from SJA1105 to ethtool encoding */
static int sja1105_speed[] = {
- [SJA1105_SPEED_AUTO] = 0,
- [SJA1105_SPEED_10MBPS] = 10,
- [SJA1105_SPEED_100MBPS] = 100,
- [SJA1105_SPEED_1000MBPS] = 1000,
+ [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN,
+ [SJA1105_SPEED_10MBPS] = SPEED_10,
+ [SJA1105_SPEED_100MBPS] = SPEED_100,
+ [SJA1105_SPEED_1000MBPS] = SPEED_1000,
};
-/* Set link speed and enable/disable traffic I/O in the MAC configuration
- * for a specific port.
- *
- * @speed_mbps: If 0, leave the speed unchanged, else adapt MAC to PHY speed.
- * @enabled: Manage Rx and Tx settings for this port. If false, overrides the
- * settings from the STP state, but not persistently (does not
- * overwrite the static MAC info for this port).
- */
+/* Set link speed in the MAC configuration for a specific port. */
static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
- int speed_mbps, bool enabled)
+ int speed_mbps)
{
- struct sja1105_mac_config_entry dyn_mac;
struct sja1105_xmii_params_entry *mii;
struct sja1105_mac_config_entry *mac;
struct device *dev = priv->ds->dev;
@@ -671,21 +704,33 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
sja1105_speed_t speed;
int rc;
- mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+ /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
+ * tables. On E/T, MAC reconfig tables are not readable, only writable.
+ * We have to *know* what the MAC looks like. For the sake of keeping
+ * the code common, we'll use the static configuration tables as a
+ * reasonable approximation for both E/T and P/Q/R/S.
+ */
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
switch (speed_mbps) {
- case 0:
- /* No speed update requested */
+ case SPEED_UNKNOWN:
+ /* PHYLINK called sja1105_mac_config() to inform us about
+ * the state->interface, but AN has not completed and the
+ * speed is not yet valid. UM10944.pdf says that setting
+ * SJA1105_SPEED_AUTO at runtime disables the port, so that is
+ * ok for power consumption in case AN will never complete -
+ * otherwise PHYLINK should come back with a new update.
+ */
speed = SJA1105_SPEED_AUTO;
break;
- case 10:
+ case SPEED_10:
speed = SJA1105_SPEED_10MBPS;
break;
- case 100:
+ case SPEED_100:
speed = SJA1105_SPEED_100MBPS;
break;
- case 1000:
+ case SPEED_1000:
speed = SJA1105_SPEED_1000MBPS;
break;
default:
@@ -693,26 +738,16 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
return -EINVAL;
}
- /* If requested, overwrite SJA1105_SPEED_AUTO from the static MAC
- * configuration table, since this will be used for the clocking setup,
- * and we no longer need to store it in the static config (already told
- * hardware we want auto during upload phase).
+ /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
+ * table, since this will be used for the clocking setup, and we no
+ * longer need to store it in the static config (already told hardware
+ * we want auto during upload phase).
*/
mac[port].speed = speed;
- /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
- * tables. On E/T, MAC reconfig tables are not readable, only writable.
- * We have to *know* what the MAC looks like. For the sake of keeping
- * the code common, we'll use the static configuration tables as a
- * reasonable approximation for both E/T and P/Q/R/S.
- */
- dyn_mac = mac[port];
- dyn_mac.ingress = enabled && mac[port].ingress;
- dyn_mac.egress = enabled && mac[port].egress;
-
/* Write to the dynamic reconfiguration tables */
- rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG,
- port, &dyn_mac, true);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
if (rc < 0) {
dev_err(dev, "Failed to write MAC config: %d\n", rc);
return rc;
@@ -724,9 +759,6 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* the clock setup does interrupt the clock signal for a certain time
* which causes trouble for all PHYs relying on this signal.
*/
- if (!enabled)
- return 0;
-
phy_mode = mii->xmii_mode[port];
if (phy_mode != XMII_MODE_RGMII)
return 0;
@@ -734,15 +766,67 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
return sja1105_clocking_setup_port(priv, port);
}
-static void sja1105_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+/* The SJA1105 MAC programming model is through the static config (the xMII
+ * Mode table cannot be dynamically reconfigured), and we have to program
+ * that early (earlier than PHYLINK calls us, anyway).
+ * So just error out in case the connected PHY attempts to change the initial
+ * system interface MII protocol from what is defined in the DT, at least for
+ * now.
+ */
+static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
+ phy_interface_t interface)
+{
+ struct sja1105_xmii_params_entry *mii;
+ sja1105_phy_interface_t phy_mode;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+ phy_mode = mii->xmii_mode[port];
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ return (phy_mode != XMII_MODE_MII);
+ case PHY_INTERFACE_MODE_RMII:
+ return (phy_mode != XMII_MODE_RMII);
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return (phy_mode != XMII_MODE_RGMII);
+ default:
+ return true;
+ }
+}
+
+static void sja1105_mac_config(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
{
struct sja1105_private *priv = ds->priv;
- if (!phydev->link)
- sja1105_adjust_port_config(priv, port, 0, false);
- else
- sja1105_adjust_port_config(priv, port, phydev->speed, true);
+ if (sja1105_phy_mode_mismatch(priv, port, state->interface))
+ return;
+
+ if (link_an_mode == MLO_AN_INBAND) {
+ dev_err(ds->dev, "In-band AN not supported!\n");
+ return;
+ }
+
+ sja1105_adjust_port_config(priv, port, state->speed);
+}
+
+static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ sja1105_inhibit_tx(ds->priv, BIT(port), true);
+}
+
+static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ sja1105_inhibit_tx(ds->priv, BIT(port), false);
}
static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
@@ -759,6 +843,16 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+ /* include/linux/phylink.h says:
+ * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
+ * expects the MAC driver to return all supported link modes.
+ */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ sja1105_phy_mode_mismatch(priv, port, state->interface)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
/* The MAC does not support pause frames, and also doesn't
* support half-duplex traffic modes.
*/
@@ -774,6 +868,77 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static int
+sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
+ const struct sja1105_l2_lookup_entry *requested)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+ l2_lookup = table->entries;
+
+ for (i = 0; i < table->entry_count; i++)
+ if (l2_lookup[i].macaddr == requested->macaddr &&
+ l2_lookup[i].vlanid == requested->vlanid &&
+ l2_lookup[i].destports & BIT(port))
+ return i;
+
+ return -1;
+}
+
+/* We want FDB entries added statically through the bridge command to persist
+ * across switch resets, which are a common thing during normal SJA1105
+ * operation. So we have to back them up in the static configuration tables
+ * and hence apply them on next static config upload... yay!
+ */
+static int
+sja1105_static_fdb_change(struct sja1105_private *priv, int port,
+ const struct sja1105_l2_lookup_entry *requested,
+ bool keep)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int rc, match;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+
+ match = sja1105_find_static_fdb_entry(priv, port, requested);
+ if (match < 0) {
+ /* Can't delete a missing entry. */
+ if (!keep)
+ return 0;
+
+ /* No match => new entry */
+ rc = sja1105_table_resize(table, table->entry_count + 1);
+ if (rc)
+ return rc;
+
+ match = table->entry_count - 1;
+ }
+
+ /* Assign pointer after the resize (it may be new memory) */
+ l2_lookup = table->entries;
+
+ /* We have a match.
+ * If the job was to add this FDB entry, it's already done (mostly
+ * anyway, since the port forwarding mask may have changed, case in
+ * which we update it).
+ * Otherwise we have to delete it.
+ */
+ if (keep) {
+ l2_lookup[match] = *requested;
+ return 0;
+ }
+
+ /* To remove, the strategy is to overwrite the element with
+ * the last one, and then reduce the array size by 1
+ */
+ l2_lookup[match] = l2_lookup[table->entry_count - 1];
+ return sja1105_table_resize(table, table->entry_count - 1);
+}
+
/* First-generation switches have a 4-way set associative TCAM that
* holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
* a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
@@ -785,10 +950,10 @@ static inline int sja1105et_fdb_index(int bin, int way)
return bin * SJA1105ET_FDB_BIN_SIZE + way;
}
-static int sja1105_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
- const u8 *addr, u16 vid,
- struct sja1105_l2_lookup_entry *match,
- int *last_unused)
+static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
+ const u8 *addr, u16 vid,
+ struct sja1105_l2_lookup_entry *match,
+ int *last_unused)
{
int way;
@@ -817,19 +982,19 @@ static int sja1105_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
return -1;
}
-static int sja1105_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+int sja1105et_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
{
struct sja1105_l2_lookup_entry l2_lookup = {0};
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int last_unused = -1;
- int bin, way;
+ int bin, way, rc;
- bin = sja1105_fdb_hash(priv, addr, vid);
+ bin = sja1105et_fdb_hash(priv, addr, vid);
- way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
- &l2_lookup, &last_unused);
+ way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, &last_unused);
if (way >= 0) {
/* We have an FDB entry. Is our port in the destination
* mask? If yes, we need to do nothing. If not, we need
@@ -868,22 +1033,26 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
}
l2_lookup.index = sja1105et_fdb_index(bin, way);
- return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
- l2_lookup.index, &l2_lookup,
- true);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup,
+ true);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
}
-static int sja1105_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+int sja1105et_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
{
struct sja1105_l2_lookup_entry l2_lookup = {0};
struct sja1105_private *priv = ds->priv;
- int index, bin, way;
+ int index, bin, way, rc;
bool keep;
- bin = sja1105_fdb_hash(priv, addr, vid);
- way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
- &l2_lookup, NULL);
+ bin = sja1105et_fdb_hash(priv, addr, vid);
+ way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, NULL);
if (way < 0)
return 0;
index = sja1105et_fdb_index(bin, way);
@@ -893,15 +1062,176 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
* need to completely evict the FDB entry.
* Otherwise we just write it back.
*/
- if (l2_lookup.destports & BIT(port))
- l2_lookup.destports &= ~BIT(port);
+ l2_lookup.destports &= ~BIT(port);
+
+ if (l2_lookup.destports)
+ keep = true;
+ else
+ keep = false;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ index, &l2_lookup, keep);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
+}
+
+int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ int rc, i;
+
+ /* Search for an existing entry in the FDB table */
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.vlanid = vid;
+ l2_lookup.iotag = SJA1105_S_TAG;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.mask_iotag = BIT(0);
+ l2_lookup.destports = BIT(port);
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &l2_lookup);
+ if (rc == 0) {
+ /* Found and this port is already in the entry's
+ * port mask => job done
+ */
+ if (l2_lookup.destports & BIT(port))
+ return 0;
+ /* l2_lookup.index is populated by the switch in case it
+ * found something.
+ */
+ l2_lookup.destports |= BIT(port);
+ goto skip_finding_an_index;
+ }
+
+ /* Not found, so try to find an unused spot in the FDB.
+ * This is slightly inefficient because the strategy is knock-knock at
+ * every possible position from 0 to 1023.
+ */
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, NULL);
+ if (rc < 0)
+ break;
+ }
+ if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
+ dev_err(ds->dev, "FDB is full, cannot add entry.\n");
+ return -EINVAL;
+ }
+ l2_lookup.lockeds = true;
+ l2_lookup.index = i;
+
+skip_finding_an_index:
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup,
+ true);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
+}
+
+int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ bool keep;
+ int rc;
+
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.vlanid = vid;
+ l2_lookup.iotag = SJA1105_S_TAG;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.mask_iotag = BIT(0);
+ l2_lookup.destports = BIT(port);
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &l2_lookup);
+ if (rc < 0)
+ return 0;
+
+ l2_lookup.destports &= ~BIT(port);
+
+ /* Decide whether we remove just this port from the FDB entry,
+ * or if we remove it completely.
+ */
if (l2_lookup.destports)
keep = true;
else
keep = false;
- return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
- index, &l2_lookup, keep);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup, keep);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
+}
+
+static int sja1105_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_private *priv = ds->priv;
+ u16 rx_vid, tx_vid;
+ int rc, i;
+
+ if (dsa_port_is_vlan_filtering(&ds->ports[port]))
+ return priv->info->fdb_add_cmd(ds, port, addr, vid);
+
+ /* Since we make use of VLANs even when the bridge core doesn't tell us
+ * to, translate these FDB entries into the correct dsa_8021q ones.
+ * The basic idea (also repeats for removal below) is:
+ * - Each of the other front-panel ports needs to be able to forward a
+ * pvid-tagged (aka tagged with their rx_vid) frame that matches this
+ * DMAC.
+ * - The CPU port (aka the tx_vid of this port) needs to be able to
+ * send a frame matching this DMAC to the specified port.
+ * For a better picture see net/dsa/tag_8021q.c.
+ */
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ if (i == port)
+ continue;
+ if (i == dsa_upstream_port(priv->ds, port))
+ continue;
+
+ rx_vid = dsa_8021q_rx_vid(ds, i);
+ rc = priv->info->fdb_add_cmd(ds, port, addr, rx_vid);
+ if (rc < 0)
+ return rc;
+ }
+ tx_vid = dsa_8021q_tx_vid(ds, port);
+ return priv->info->fdb_add_cmd(ds, port, addr, tx_vid);
+}
+
+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_private *priv = ds->priv;
+ u16 rx_vid, tx_vid;
+ int rc, i;
+
+ if (dsa_port_is_vlan_filtering(&ds->ports[port]))
+ return priv->info->fdb_del_cmd(ds, port, addr, vid);
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ if (i == port)
+ continue;
+ if (i == dsa_upstream_port(priv->ds, port))
+ continue;
+
+ rx_vid = dsa_8021q_rx_vid(ds, i);
+ rc = priv->info->fdb_del_cmd(ds, port, addr, rx_vid);
+ if (rc < 0)
+ return rc;
+ }
+ tx_vid = dsa_8021q_tx_vid(ds, port);
+ return priv->info->fdb_del_cmd(ds, port, addr, tx_vid);
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
@@ -909,8 +1239,12 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
+ u16 rx_vid, tx_vid;
int i;
+ rx_vid = dsa_8021q_rx_vid(ds, port);
+ tx_vid = dsa_8021q_tx_vid(ds, port);
+
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
@@ -919,7 +1253,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
i, &l2_lookup);
/* No fdb entry at i, not an issue */
- if (rc == -EINVAL)
+ if (rc == -ENOENT)
continue;
if (rc) {
dev_err(dev, "Failed to dump FDB: %d\n", rc);
@@ -935,7 +1269,41 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
if (!(l2_lookup.destports & BIT(port)))
continue;
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
- cb(macaddr, l2_lookup.vlanid, false, data);
+
+ /* On SJA1105 E/T, the switch doesn't implement the LOCKEDS
+ * bit, so it doesn't tell us whether a FDB entry is static
+ * or not.
+ * But, of course, we can find out - we're the ones who added
+ * it in the first place.
+ */
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID) {
+ int match;
+
+ match = sja1105_find_static_fdb_entry(priv, port,
+ &l2_lookup);
+ l2_lookup.lockeds = (match >= 0);
+ }
+
+ /* We need to hide the dsa_8021q VLANs from the user. This
+ * basically means hiding the duplicates and only showing
+ * the pvid that is supposed to be active in standalone and
+ * non-vlan_filtering modes (aka 1).
+ * - For statically added FDB entries (bridge fdb add), we
+ * can convert the TX VID (coming from the CPU port) into the
+ * pvid and ignore the RX VIDs of the other ports.
+ * - For dynamically learned FDB entries, a single entry with
+ * no duplicates is learned - that which has the real port's
+ * pvid, aka RX VID.
+ */
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ if (l2_lookup.vlanid == tx_vid ||
+ l2_lookup.vlanid == rx_vid)
+ l2_lookup.vlanid = 1;
+ else
+ continue;
+ }
+ cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
}
return 0;
}
@@ -1056,27 +1424,6 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
sja1105_bridge_member(ds, port, br, false);
}
-static u8 sja1105_stp_state_get(struct sja1105_private *priv, int port)
-{
- struct sja1105_mac_config_entry *mac;
-
- mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
-
- if (!mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
- return BR_STATE_BLOCKING;
- if (mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
- return BR_STATE_LISTENING;
- if (mac[port].ingress && !mac[port].egress && mac[port].dyn_learn)
- return BR_STATE_LEARNING;
- if (mac[port].ingress && mac[port].egress && mac[port].dyn_learn)
- return BR_STATE_FORWARDING;
- /* This is really an error condition if the MAC was in none of the STP
- * states above. But treating the port as disabled does nothing, which
- * is adequate, and it also resets the MAC to a known state later on.
- */
- return BR_STATE_DISABLED;
-}
-
/* For situations where we need to change a setting at runtime that is only
* available through the static configuration, resetting the switch in order
* to upload the new static config is unavoidable. Back up the settings we
@@ -1087,27 +1434,18 @@ static int sja1105_static_config_reload(struct sja1105_private *priv)
{
struct sja1105_mac_config_entry *mac;
int speed_mbps[SJA1105_NUM_PORTS];
- u8 stp_state[SJA1105_NUM_PORTS];
int rc, i;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- /* Back up settings changed by sja1105_adjust_port_config and
- * sja1105_bridge_stp_state_set and restore their defaults.
+ /* Back up the dynamic link speed changed by sja1105_adjust_port_config
+ * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
+ * switch wants to see in the static config in order to allow us to
+ * change it through the dynamic interface later.
*/
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
speed_mbps[i] = sja1105_speed[mac[i].speed];
mac[i].speed = SJA1105_SPEED_AUTO;
- if (i == dsa_upstream_port(priv->ds, i)) {
- mac[i].ingress = true;
- mac[i].egress = true;
- mac[i].dyn_learn = true;
- } else {
- stp_state[i] = sja1105_stp_state_get(priv, i);
- mac[i].ingress = false;
- mac[i].egress = false;
- mac[i].dyn_learn = false;
- }
}
/* Reset switch and send updated static configuration */
@@ -1124,13 +1462,7 @@ static int sja1105_static_config_reload(struct sja1105_private *priv)
goto out;
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- bool enabled = (speed_mbps[i] != 0);
-
- if (i != dsa_upstream_port(priv->ds, i))
- sja1105_bridge_stp_state_set(priv->ds, i, stp_state[i]);
-
- rc = sja1105_adjust_port_config(priv, i, speed_mbps[i],
- enabled);
+ rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
if (rc < 0)
goto out;
}
@@ -1138,23 +1470,6 @@ out:
return rc;
}
-/* The TPID setting belongs to the General Parameters table,
- * which can only be partially reconfigured at runtime (and not the TPID).
- * So a switch reset is required.
- */
-static int sja1105_change_tpid(struct sja1105_private *priv,
- u16 tpid, u16 tpid2)
-{
- struct sja1105_general_params_entry *general_params;
- struct sja1105_table *table;
-
- table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
- general_params = table->entries;
- general_params->tpid = tpid;
- general_params->tpid2 = tpid2;
- return sja1105_static_config_reload(priv);
-}
-
static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
{
struct sja1105_mac_config_entry *mac;
@@ -1273,17 +1588,41 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
return 0;
}
+/* The TPID setting belongs to the General Parameters table,
+ * which can only be partially reconfigured at runtime (and not the TPID).
+ * So a switch reset is required.
+ */
static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
{
+ struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
+ struct sja1105_table *table;
+ u16 tpid, tpid2;
int rc;
- if (enabled)
+ if (enabled) {
/* Enable VLAN filtering. */
- rc = sja1105_change_tpid(priv, ETH_P_8021Q, ETH_P_8021AD);
- else
+ tpid = ETH_P_8021AD;
+ tpid2 = ETH_P_8021Q;
+ } else {
/* Disable VLAN filtering. */
- rc = sja1105_change_tpid(priv, ETH_P_SJA1105, ETH_P_SJA1105);
+ tpid = ETH_P_SJA1105;
+ tpid2 = ETH_P_SJA1105;
+ }
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+ /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
+ general_params->tpid = tpid;
+ /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
+ general_params->tpid2 = tpid2;
+ /* When VLAN filtering is on, we need to at least be able to
+ * decode management traffic through the "backup plan".
+ */
+ general_params->incl_srcpt1 = enabled;
+ general_params->incl_srcpt0 = enabled;
+
+ rc = sja1105_static_config_reload(priv);
if (rc)
dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
@@ -1372,6 +1711,11 @@ static int sja1105_setup(struct dsa_switch *ds)
return rc;
}
+ rc = sja1105_ptp_clock_register(priv);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
+ return rc;
+ }
/* Create and send configuration down to device */
rc = sja1105_static_config_load(priv, ports);
if (rc < 0) {
@@ -1401,8 +1745,16 @@ static int sja1105_setup(struct dsa_switch *ds)
return sja1105_setup_8021q_tagging(ds, true);
}
+static void sja1105_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ cancel_work_sync(&priv->tagger_data.rxtstamp_work);
+ skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
+}
+
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool takets)
{
struct sja1105_mgmt_entry mgmt_route = {0};
struct sja1105_private *priv = ds->priv;
@@ -1415,6 +1767,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
mgmt_route.destports = BIT(port);
mgmt_route.enfport = 1;
+ mgmt_route.tsreg = 0;
+ mgmt_route.takets = takets;
rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
slot, &mgmt_route, true);
@@ -1446,6 +1800,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
if (!timeout) {
/* Clean up the management route so that a follow-up
* frame may not match on it by mistake.
+ * This is only hardware supported on P/Q/R/S - on E/T it is
+ * a no-op and we are silently discarding the -EOPNOTSUPP.
*/
sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
slot, &mgmt_route, false);
@@ -1464,7 +1820,11 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct sja1105_port *sp = &priv->ports[port];
+ struct skb_shared_hwtstamps shwt = {0};
int slot = sp->mgmt_slot;
+ struct sk_buff *clone;
+ u64 now, ts;
+ int rc;
/* The tragic fact about the switch having 4x2 slots for installing
* management routes is that all of them except one are actually
@@ -1482,8 +1842,36 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
*/
mutex_lock(&priv->mgmt_lock);
- sja1105_mgmt_xmit(ds, port, slot, skb);
+ /* The clone, if there, was made by dsa_skb_tx_timestamp */
+ clone = DSA_SKB_CB(skb)->clone;
+
+ sja1105_mgmt_xmit(ds, port, slot, skb, !!clone);
+
+ if (!clone)
+ goto out;
+
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ mutex_lock(&priv->ptp_lock);
+
+ now = priv->tstamp_cc.read(&priv->tstamp_cc);
+
+ rc = sja1105_ptpegr_ts_poll(priv, slot, &ts);
+ if (rc < 0) {
+ dev_err(ds->dev, "xmit: timed out polling for tstamp\n");
+ kfree_skb(clone);
+ goto out_unlock_ptp;
+ }
+
+ ts = sja1105_tstamp_reconstruct(priv, now, ts);
+ ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
+
+ shwt.hwtstamp = ns_to_ktime(ts);
+ skb_complete_tx_timestamp(clone, &shwt);
+out_unlock_ptp:
+ mutex_unlock(&priv->ptp_lock);
+out:
mutex_unlock(&priv->mgmt_lock);
return NETDEV_TX_OK;
}
@@ -1512,15 +1900,180 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
return sja1105_static_config_reload(priv);
}
+/* Caller must hold priv->tagger_data.meta_lock */
+static int sja1105_change_rxtstamping(struct sja1105_private *priv,
+ bool on)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+ int rc;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+ general_params->send_meta1 = on;
+ general_params->send_meta0 = on;
+
+ rc = sja1105_init_avb_params(priv, on);
+ if (rc < 0)
+ return rc;
+
+ /* Initialize the meta state machine to a known state */
+ if (priv->tagger_data.stampable_skb) {
+ kfree_skb(priv->tagger_data.stampable_skb);
+ priv->tagger_data.stampable_skb = NULL;
+ }
+
+ return sja1105_static_config_reload(priv);
+}
+
+static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+ bool rx_on;
+ int rc;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->ports[port].hwts_tx_en = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->ports[port].hwts_tx_en = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ rx_on = false;
+ break;
+ default:
+ rx_on = true;
+ break;
+ }
+
+ if (rx_on != priv->tagger_data.hwts_rx_en) {
+ spin_lock(&priv->tagger_data.meta_lock);
+ rc = sja1105_change_rxtstamping(priv, rx_on);
+ spin_unlock(&priv->tagger_data.meta_lock);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to change RX timestamping: %d\n", rc);
+ return -EFAULT;
+ }
+ priv->tagger_data.hwts_rx_en = rx_on;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+ return 0;
+}
+
+static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ if (priv->ports[port].hwts_tx_en)
+ config.tx_type = HWTSTAMP_TX_ON;
+ else
+ config.tx_type = HWTSTAMP_TX_OFF;
+ if (priv->tagger_data.hwts_rx_en)
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+#define to_tagger(d) \
+ container_of((d), struct sja1105_tagger_data, rxtstamp_work)
+#define to_sja1105(d) \
+ container_of((d), struct sja1105_private, tagger_data)
+
+static void sja1105_rxtstamp_work(struct work_struct *work)
+{
+ struct sja1105_tagger_data *data = to_tagger(work);
+ struct sja1105_private *priv = to_sja1105(data);
+ struct sk_buff *skb;
+ u64 now;
+
+ mutex_lock(&priv->ptp_lock);
+
+ now = priv->tstamp_cc.read(&priv->tstamp_cc);
+
+ while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
+ struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
+ u64 ts;
+
+ *shwt = (struct skb_shared_hwtstamps) {0};
+
+ ts = SJA1105_SKB_CB(skb)->meta_tstamp;
+ ts = sja1105_tstamp_reconstruct(priv, now, ts);
+ ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
+
+ shwt->hwtstamp = ns_to_ktime(ts);
+ netif_rx_ni(skb);
+ }
+
+ mutex_unlock(&priv->ptp_lock);
+}
+
+/* Called from dsa_skb_defer_rx_timestamp */
+static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *data = &priv->tagger_data;
+
+ if (!data->hwts_rx_en)
+ return false;
+
+ /* We need to read the full PTP clock to reconstruct the Rx
+ * timestamp. For that we need a sleepable context.
+ */
+ skb_queue_tail(&data->skb_rxtstamp_queue, skb);
+ schedule_work(&data->rxtstamp_work);
+ return true;
+}
+
+/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
+ * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
+ * callback, where we will timestamp it synchronously.
+ */
+static bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!sp->hwts_tx_en)
+ return false;
+
+ return true;
+}
+
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.setup = sja1105_setup,
- .adjust_link = sja1105_adjust_link,
+ .teardown = sja1105_teardown,
.set_ageing_time = sja1105_set_ageing_time,
.phylink_validate = sja1105_phylink_validate,
+ .phylink_mac_config = sja1105_mac_config,
+ .phylink_mac_link_up = sja1105_mac_link_up,
+ .phylink_mac_link_down = sja1105_mac_link_down,
.get_strings = sja1105_get_strings,
.get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count,
+ .get_ts_info = sja1105_get_ts_info,
.port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del,
@@ -1535,6 +2088,10 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del,
.port_deferred_xmit = sja1105_port_deferred_xmit,
+ .port_hwtstamp_get = sja1105_hwtstamp_get,
+ .port_hwtstamp_set = sja1105_hwtstamp_set,
+ .port_rxtstamp = sja1105_port_rxtstamp,
+ .port_txtstamp = sja1105_port_txtstamp,
};
static int sja1105_check_device_id(struct sja1105_private *priv)
@@ -1575,6 +2132,7 @@ static int sja1105_check_device_id(struct sja1105_private *priv)
static int sja1105_probe(struct spi_device *spi)
{
+ struct sja1105_tagger_data *tagger_data;
struct device *dev = &spi->dev;
struct sja1105_private *priv;
struct dsa_switch *ds;
@@ -1629,12 +2187,17 @@ static int sja1105_probe(struct spi_device *spi)
ds->priv = priv;
priv->ds = ds;
+ tagger_data = &priv->tagger_data;
+ skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
+ INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
+
/* Connections between dsa_port and sja1105_port */
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
struct sja1105_port *sp = &priv->ports[i];
ds->ports[i].priv = sp;
sp->dp = &ds->ports[i];
+ sp->data = tagger_data;
}
mutex_init(&priv->mgmt_lock);
@@ -1645,6 +2208,7 @@ static int sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
+ sja1105_ptp_clock_unregister(priv);
dsa_unregister_switch(priv->ds);
sja1105_static_config_free(&priv->static_config);
return 0;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
new file mode 100644
index 000000000000..d19cfdf681af
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+/* The adjfine API clamps ppb between [-32,768,000, 32,768,000], and
+ * therefore scaled_ppm between [-2,147,483,648, 2,147,483,647].
+ * Set the maximum supported ppb to a round value smaller than the maximum.
+ *
+ * Percentually speaking, this is a +/- 0.032x adjustment of the
+ * free-running counter (0.968x to 1.032x).
+ */
+#define SJA1105_MAX_ADJ_PPB 32000000
+#define SJA1105_SIZE_PTP_CMD 4
+
+/* Timestamps are in units of 8 ns clock ticks (equivalent to a fixed
+ * 125 MHz clock) so the scale factor (MULT / SHIFT) needs to be 8.
+ * Furthermore, wisely pick SHIFT as 28 bits, which translates
+ * MULT into 2^31 (0x80000000). This is the same value around which
+ * the hardware PTPCLKRATE is centered, so the same ppb conversion
+ * arithmetic can be reused.
+ */
+#define SJA1105_CC_SHIFT 28
+#define SJA1105_CC_MULT (8 << SJA1105_CC_SHIFT)
+
+/* Having 33 bits of cycle counter left until a 64-bit overflow during delta
+ * conversion, we multiply this by the 8 ns counter resolution and arrive at
+ * a comfortable 68.71 second refresh interval until the delta would cause
+ * an integer overflow, in absence of any other readout.
+ * Approximate to 1 minute.
+ */
+#define SJA1105_REFRESH_INTERVAL (HZ * 60)
+
+/* This range is actually +/- SJA1105_MAX_ADJ_PPB
+ * divided by 1000 (ppb -> ppm) and with a 16-bit
+ * "fractional" part (actually fixed point).
+ * |
+ * v
+ * Convert scaled_ppm from the +/- ((10^6) << 16) range
+ * into the +/- (1 << 31) range.
+ *
+ * This forgoes a "ppb" numeric representation (up to NSEC_PER_SEC)
+ * and defines the scaling factor between scaled_ppm and the actual
+ * frequency adjustments (both cycle counter and hardware).
+ *
+ * ptpclkrate = scaled_ppm * 2^31 / (10^6 * 2^16)
+ * simplifies to
+ * ptpclkrate = scaled_ppm * 2^9 / 5^6
+ */
+#define SJA1105_CC_MULT_NUM (1 << 9)
+#define SJA1105_CC_MULT_DEM 15625
+
+#define ptp_to_sja1105(d) container_of((d), struct sja1105_private, ptp_caps)
+#define cc_to_sja1105(d) container_of((d), struct sja1105_private, tstamp_cc)
+#define dw_to_sja1105(d) container_of((d), struct sja1105_private, refresh_work)
+
+struct sja1105_ptp_cmd {
+ u64 resptp; /* reset */
+};
+
+int sja1105_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ /* Called during cleanup */
+ if (!priv->clock)
+ return -ENODEV;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
+ info->phc_index = ptp_clock_index(priv->clock);
+ return 0;
+}
+
+int sja1105et_ptp_cmd(const void *ctx, const void *data)
+{
+ const struct sja1105_ptp_cmd *cmd = data;
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_regs *regs = priv->info->regs;
+ const int size = SJA1105_SIZE_PTP_CMD;
+ u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
+ /* No need to keep this as part of the structure */
+ u64 valid = 1;
+
+ sja1105_pack(buf, &valid, 31, 31, size);
+ sja1105_pack(buf, &cmd->resptp, 2, 2, size);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control,
+ buf, SJA1105_SIZE_PTP_CMD);
+}
+
+int sja1105pqrs_ptp_cmd(const void *ctx, const void *data)
+{
+ const struct sja1105_ptp_cmd *cmd = data;
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_regs *regs = priv->info->regs;
+ const int size = SJA1105_SIZE_PTP_CMD;
+ u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
+ /* No need to keep this as part of the structure */
+ u64 valid = 1;
+
+ sja1105_pack(buf, &valid, 31, 31, size);
+ sja1105_pack(buf, &cmd->resptp, 3, 3, size);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control,
+ buf, SJA1105_SIZE_PTP_CMD);
+}
+
+/* The switch returns partial timestamps (24 bits for SJA1105 E/T, which wrap
+ * around in 0.135 seconds, and 32 bits for P/Q/R/S, wrapping around in 34.35
+ * seconds).
+ *
+ * This receives the RX or TX MAC timestamps, provided by hardware as
+ * the lower bits of the cycle counter, sampled at the time the timestamp was
+ * collected.
+ *
+ * To reconstruct into a full 64-bit-wide timestamp, the cycle counter is
+ * read and the high-order bits are filled in.
+ *
+ * Must be called within one wraparound period of the partial timestamp since
+ * it was generated by the MAC.
+ */
+u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
+ u64 ts_partial)
+{
+ u64 partial_tstamp_mask = CYCLECOUNTER_MASK(priv->info->ptp_ts_bits);
+ u64 ts_reconstructed;
+
+ ts_reconstructed = (now & ~partial_tstamp_mask) | ts_partial;
+
+ /* Check lower bits of current cycle counter against the timestamp.
+ * If the current cycle counter is lower than the partial timestamp,
+ * then wraparound surely occurred and must be accounted for.
+ */
+ if ((now & partial_tstamp_mask) <= ts_partial)
+ ts_reconstructed -= (partial_tstamp_mask + 1);
+
+ return ts_reconstructed;
+}
+
+/* Reads the SPI interface for an egress timestamp generated by the switch
+ * for frames sent using management routes.
+ *
+ * SJA1105 E/T layout of the 4-byte SPI payload:
+ *
+ * 31 23 15 7 0
+ * | | | | |
+ * +-----+-----+-----+ ^
+ * ^ |
+ * | |
+ * 24-bit timestamp Update bit
+ *
+ *
+ * SJA1105 P/Q/R/S layout of the 8-byte SPI payload:
+ *
+ * 31 23 15 7 0 63 55 47 39 32
+ * | | | | | | | | | |
+ * ^ +-----+-----+-----+-----+
+ * | ^
+ * | |
+ * Update bit 32-bit timestamp
+ *
+ * Notice that the update bit is in the same place.
+ * To have common code for E/T and P/Q/R/S for reading the timestamp,
+ * we need to juggle with the offset and the bit indices.
+ */
+int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ int tstamp_bit_start, tstamp_bit_end;
+ int timeout = 10;
+ u8 packed_buf[8];
+ u64 update;
+ int rc;
+
+ do {
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ,
+ regs->ptpegr_ts[port],
+ packed_buf,
+ priv->info->ptpegr_ts_bytes);
+ if (rc < 0)
+ return rc;
+
+ sja1105_unpack(packed_buf, &update, 0, 0,
+ priv->info->ptpegr_ts_bytes);
+ if (update)
+ break;
+
+ usleep_range(10, 50);
+ } while (--timeout);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* Point the end bit to the second 32-bit word on P/Q/R/S,
+ * no-op on E/T.
+ */
+ tstamp_bit_end = (priv->info->ptpegr_ts_bytes - 4) * 8;
+ /* Shift the 24-bit timestamp on E/T to be collected from 31:8.
+ * No-op on P/Q/R/S.
+ */
+ tstamp_bit_end += 32 - priv->info->ptp_ts_bits;
+ tstamp_bit_start = tstamp_bit_end + priv->info->ptp_ts_bits - 1;
+
+ *ts = 0;
+
+ sja1105_unpack(packed_buf, ts, tstamp_bit_start, tstamp_bit_end,
+ priv->info->ptpegr_ts_bytes);
+
+ return 0;
+}
+
+int sja1105_ptp_reset(struct sja1105_private *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_ptp_cmd cmd = {0};
+ int rc;
+
+ mutex_lock(&priv->ptp_lock);
+
+ cmd.resptp = 1;
+ dev_dbg(ds->dev, "Resetting PTP clock\n");
+ rc = priv->info->ptp_cmd(priv, &cmd);
+
+ timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ mutex_unlock(&priv->ptp_lock);
+
+ return rc;
+}
+
+static int sja1105_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct sja1105_private *priv = ptp_to_sja1105(ptp);
+ u64 ns;
+
+ mutex_lock(&priv->ptp_lock);
+ ns = timecounter_read(&priv->tstamp_tc);
+ mutex_unlock(&priv->ptp_lock);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int sja1105_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct sja1105_private *priv = ptp_to_sja1105(ptp);
+ u64 ns = timespec64_to_ns(ts);
+
+ mutex_lock(&priv->ptp_lock);
+ timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc, ns);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct sja1105_private *priv = ptp_to_sja1105(ptp);
+ s64 clkrate;
+
+ clkrate = (s64)scaled_ppm * SJA1105_CC_MULT_NUM;
+ clkrate = div_s64(clkrate, SJA1105_CC_MULT_DEM);
+
+ mutex_lock(&priv->ptp_lock);
+
+ /* Force a readout to update the timer *before* changing its frequency.
+ *
+ * This way, its corrected time curve can at all times be modeled
+ * as a linear "A * x + B" function, where:
+ *
+ * - B are past frequency adjustments and offset shifts, all
+ * accumulated into the cycle_last variable.
+ *
+ * - A is the new frequency adjustments we're just about to set.
+ *
+ * Reading now makes B accumulate the correct amount of time,
+ * corrected at the old rate, before changing it.
+ *
+ * Hardware timestamps then become simple points on the curve and
+ * are approximated using the above function. This is still better
+ * than letting the switch take the timestamps using the hardware
+ * rate-corrected clock (PTPCLKVAL) - the comparison in this case would
+ * be that we're shifting the ruler at the same time as we're taking
+ * measurements with it.
+ *
+ * The disadvantage is that it's possible to receive timestamps when
+ * a frequency adjustment took place in the near past.
+ * In this case they will be approximated using the new ppb value
+ * instead of a compound function made of two segments (one at the old
+ * and the other at the new rate) - introducing some inaccuracy.
+ */
+ timecounter_read(&priv->tstamp_tc);
+
+ priv->tstamp_cc.mult = SJA1105_CC_MULT + clkrate;
+
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct sja1105_private *priv = ptp_to_sja1105(ptp);
+
+ mutex_lock(&priv->ptp_lock);
+ timecounter_adjtime(&priv->tstamp_tc, delta);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static u64 sja1105_ptptsclk_read(const struct cyclecounter *cc)
+{
+ struct sja1105_private *priv = cc_to_sja1105(cc);
+ const struct sja1105_regs *regs = priv->info->regs;
+ u64 ptptsclk = 0;
+ int rc;
+
+ rc = sja1105_spi_send_int(priv, SPI_READ, regs->ptptsclk,
+ &ptptsclk, 8);
+ if (rc < 0)
+ dev_err_ratelimited(priv->ds->dev,
+ "failed to read ptp cycle counter: %d\n",
+ rc);
+ return ptptsclk;
+}
+
+static void sja1105_ptp_overflow_check(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct sja1105_private *priv = dw_to_sja1105(dw);
+ struct timespec64 ts;
+
+ sja1105_ptp_gettime(&priv->ptp_caps, &ts);
+
+ schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
+}
+
+static const struct ptp_clock_info sja1105_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "SJA1105 PHC",
+ .adjfine = sja1105_ptp_adjfine,
+ .adjtime = sja1105_ptp_adjtime,
+ .gettime64 = sja1105_ptp_gettime,
+ .settime64 = sja1105_ptp_settime,
+ .max_adj = SJA1105_MAX_ADJ_PPB,
+};
+
+int sja1105_ptp_clock_register(struct sja1105_private *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+
+ /* Set up the cycle counter */
+ priv->tstamp_cc = (struct cyclecounter) {
+ .read = sja1105_ptptsclk_read,
+ .mask = CYCLECOUNTER_MASK(64),
+ .shift = SJA1105_CC_SHIFT,
+ .mult = SJA1105_CC_MULT,
+ };
+ mutex_init(&priv->ptp_lock);
+ INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
+
+ schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
+
+ priv->ptp_caps = sja1105_ptp_caps;
+
+ priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev);
+ if (IS_ERR_OR_NULL(priv->clock))
+ return PTR_ERR(priv->clock);
+
+ return sja1105_ptp_reset(priv);
+}
+
+void sja1105_ptp_clock_unregister(struct sja1105_private *priv)
+{
+ if (IS_ERR_OR_NULL(priv->clock))
+ return;
+
+ cancel_delayed_work_sync(&priv->refresh_work);
+ ptp_clock_unregister(priv->clock);
+ priv->clock = NULL;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
new file mode 100644
index 000000000000..af456b0a4d27
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_PTP_H
+#define _SJA1105_PTP_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
+
+int sja1105_ptp_clock_register(struct sja1105_private *priv);
+
+void sja1105_ptp_clock_unregister(struct sja1105_private *priv);
+
+int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts);
+
+int sja1105et_ptp_cmd(const void *ctx, const void *data);
+
+int sja1105pqrs_ptp_cmd(const void *ctx, const void *data);
+
+int sja1105_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *ts);
+
+u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
+ u64 ts_partial);
+
+int sja1105_ptp_reset(struct sja1105_private *priv);
+
+#else
+
+static inline int sja1105_ptp_clock_register(struct sja1105_private *priv)
+{
+ return 0;
+}
+
+static inline void sja1105_ptp_clock_unregister(struct sja1105_private *priv)
+{
+ return;
+}
+
+static inline int
+sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
+{
+ return 0;
+}
+
+static inline u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv,
+ u64 now, u64 ts_partial)
+{
+ return 0;
+}
+
+static inline int sja1105_ptp_reset(struct sja1105_private *priv)
+{
+ return 0;
+}
+
+#define sja1105et_ptp_cmd NULL
+
+#define sja1105pqrs_ptp_cmd NULL
+
+#define sja1105_get_ts_info NULL
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
+
+#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 2eb70b8acfc3..84dc603138cf 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -283,20 +283,22 @@ static int sja1105_cold_reset(const struct sja1105_private *priv)
return priv->info->reset_cmd(priv, &reset);
}
-static int sja1105_inhibit_tx(const struct sja1105_private *priv,
- const unsigned long *port_bitmap)
+int sja1105_inhibit_tx(const struct sja1105_private *priv,
+ unsigned long port_bitmap, bool tx_inhibited)
{
const struct sja1105_regs *regs = priv->info->regs;
u64 inhibit_cmd;
- int port, rc;
+ int rc;
rc = sja1105_spi_send_int(priv, SPI_READ, regs->port_control,
&inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
if (rc < 0)
return rc;
- for_each_set_bit(port, port_bitmap, SJA1105_NUM_PORTS)
- inhibit_cmd |= BIT(port);
+ if (tx_inhibited)
+ inhibit_cmd |= port_bitmap;
+ else
+ inhibit_cmd &= ~port_bitmap;
return sja1105_spi_send_int(priv, SPI_WRITE, regs->port_control,
&inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
@@ -413,7 +415,7 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
* Tx on all ports and waiting for current packet to drain.
* Otherwise, the PHY will see an unterminated Ethernet packet.
*/
- rc = sja1105_inhibit_tx(priv, &port_bitmap);
+ rc = sja1105_inhibit_tx(priv, port_bitmap, true);
if (rc < 0) {
dev_err(dev, "Failed to inhibit Tx on ports\n");
return -ENXIO;
@@ -478,7 +480,12 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
}
+ rc = sja1105_ptp_reset(priv);
+ if (rc < 0)
+ dev_err(dev, "Failed to reset PTP clock: %d\n", rc);
+
dev_info(dev, "Reset switch and programmed static config\n");
+
out:
kfree(config_buf);
return rc;
@@ -491,11 +498,10 @@ static struct sja1105_regs sja1105et_regs = {
.port_control = 0x11,
.config = 0x020000,
.rgu = 0x100440,
+ /* UM10944.pdf, Table 86, ACU Register overview */
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
.rmii_pll1 = 0x10000A,
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
- /* UM10944.pdf, Table 86, ACU Register overview */
- .rgmii_pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
.mac = {0x200, 0x202, 0x204, 0x206, 0x208},
.mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
.mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
@@ -507,6 +513,11 @@ static struct sja1105_regs sja1105et_regs = {
.rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
.rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
.rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
+ .ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
+ .ptp_control = 0x17,
+ .ptpclk = 0x18, /* Spans 0x18 to 0x19 */
+ .ptpclkrate = 0x1A,
+ .ptptsclk = 0x1B, /* Spans 0x1B to 0x1C */
};
static struct sja1105_regs sja1105pqrs_regs = {
@@ -516,11 +527,11 @@ static struct sja1105_regs sja1105pqrs_regs = {
.port_control = 0x12,
.config = 0x020000,
.rgu = 0x100440,
+ /* UM10944.pdf, Table 86, ACU Register overview */
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
.rmii_pll1 = 0x10000A,
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
- /* UM10944.pdf, Table 86, ACU Register overview */
- .rgmii_pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
.mac = {0x200, 0x202, 0x204, 0x206, 0x208},
.mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
.mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
@@ -533,6 +544,11 @@ static struct sja1105_regs sja1105pqrs_regs = {
.rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
.rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
.qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
+ .ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
+ .ptp_control = 0x18,
+ .ptpclk = 0x19,
+ .ptpclkrate = 0x1B,
+ .ptptsclk = 0x1C,
};
struct sja1105_info sja1105e_info = {
@@ -540,7 +556,12 @@ struct sja1105_info sja1105e_info = {
.part_no = SJA1105ET_PART_NO,
.static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops,
+ .ptp_ts_bits = 24,
+ .ptpegr_ts_bytes = 4,
.reset_cmd = sja1105et_reset_cmd,
+ .fdb_add_cmd = sja1105et_fdb_add,
+ .fdb_del_cmd = sja1105et_fdb_del,
+ .ptp_cmd = sja1105et_ptp_cmd,
.regs = &sja1105et_regs,
.name = "SJA1105E",
};
@@ -549,7 +570,12 @@ struct sja1105_info sja1105t_info = {
.part_no = SJA1105ET_PART_NO,
.static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops,
+ .ptp_ts_bits = 24,
+ .ptpegr_ts_bytes = 4,
.reset_cmd = sja1105et_reset_cmd,
+ .fdb_add_cmd = sja1105et_fdb_add,
+ .fdb_del_cmd = sja1105et_fdb_del,
+ .ptp_cmd = sja1105et_ptp_cmd,
.regs = &sja1105et_regs,
.name = "SJA1105T",
};
@@ -558,7 +584,13 @@ struct sja1105_info sja1105p_info = {
.part_no = SJA1105P_PART_NO,
.static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd = sja1105pqrs_ptp_cmd,
.regs = &sja1105pqrs_regs,
.name = "SJA1105P",
};
@@ -567,7 +599,13 @@ struct sja1105_info sja1105q_info = {
.part_no = SJA1105Q_PART_NO,
.static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd = sja1105pqrs_ptp_cmd,
.regs = &sja1105pqrs_regs,
.name = "SJA1105Q",
};
@@ -576,7 +614,13 @@ struct sja1105_info sja1105r_info = {
.part_no = SJA1105R_PART_NO,
.static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd = sja1105pqrs_ptp_cmd,
.regs = &sja1105pqrs_regs,
.name = "SJA1105R",
};
@@ -586,6 +630,12 @@ struct sja1105_info sja1105s_info = {
.static_ops = sja1105s_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd = sja1105pqrs_ptp_cmd,
.name = "SJA1105S",
};
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index b3c992b0abb0..b31c737dc560 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -91,6 +91,28 @@ u32 sja1105_crc32(const void *buf, size_t len)
return ~crc;
}
+static size_t sja1105et_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY;
+ struct sja1105_avb_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->destmeta, 95, 48, size, op);
+ sja1105_packing(buf, &entry->srcmeta, 47, 0, size, op);
+ return size;
+}
+
+static size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
+ struct sja1105_avb_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->destmeta, 125, 78, size, op);
+ sja1105_packing(buf, &entry->srcmeta, 77, 30, size, op);
+ return size;
+}
+
static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -208,11 +230,20 @@ sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
{
const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+ int offset, i;
+ for (i = 0, offset = 58; i < 5; i++, offset += 11)
+ sja1105_packing(buf, &entry->maxaddrp[i],
+ offset + 10, offset + 0, size, op);
sja1105_packing(buf, &entry->maxage, 57, 43, size, op);
+ sja1105_packing(buf, &entry->start_dynspc, 42, 33, size, op);
+ sja1105_packing(buf, &entry->drpnolearn, 32, 28, size, op);
sja1105_packing(buf, &entry->shared_learn, 27, 27, size, op);
sja1105_packing(buf, &entry->no_enf_hostprt, 26, 26, size, op);
sja1105_packing(buf, &entry->no_mgmt_learn, 25, 25, size, op);
+ sja1105_packing(buf, &entry->use_static, 24, 24, size, op);
+ sja1105_packing(buf, &entry->owr_dyn, 23, 23, size, op);
+ sja1105_packing(buf, &entry->learn_once, 22, 22, size, op);
return size;
}
@@ -236,10 +267,20 @@ size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
struct sja1105_l2_lookup_entry *entry = entry_ptr;
- /* These are static L2 lookup entries, so the structure
- * should match UM11040 Table 16/17 definitions when
- * LOCKEDS is 1.
- */
+ if (entry->lockeds) {
+ sja1105_packing(buf, &entry->tsreg, 159, 159, size, op);
+ sja1105_packing(buf, &entry->mirrvlan, 158, 147, size, op);
+ sja1105_packing(buf, &entry->takets, 146, 146, size, op);
+ sja1105_packing(buf, &entry->mirr, 145, 145, size, op);
+ sja1105_packing(buf, &entry->retag, 144, 144, size, op);
+ } else {
+ sja1105_packing(buf, &entry->touched, 159, 159, size, op);
+ sja1105_packing(buf, &entry->age, 158, 144, size, op);
+ }
+ sja1105_packing(buf, &entry->mask_iotag, 143, 143, size, op);
+ sja1105_packing(buf, &entry->mask_vlanid, 142, 131, size, op);
+ sja1105_packing(buf, &entry->mask_macaddr, 130, 83, size, op);
+ sja1105_packing(buf, &entry->iotag, 82, 82, size, op);
sja1105_packing(buf, &entry->vlanid, 81, 70, size, op);
sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
sja1105_packing(buf, &entry->destports, 21, 17, size, op);
@@ -413,6 +454,7 @@ static u64 blk_id_map[BLK_IDX_MAX] = {
[BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG,
[BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS,
[BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS,
+ [BLK_IDX_AVB_PARAMS] = BLKID_AVB_PARAMS,
[BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
[BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
};
@@ -442,7 +484,7 @@ const char *sja1105_static_config_error_msg[] = {
"vl-forwarding-parameters-table.partspc.",
};
-sja1105_config_valid_t
+static sja1105_config_valid_t
static_config_check_memory_size(const struct sja1105_table *tables)
{
const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
@@ -614,6 +656,12 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105et_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105et_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
@@ -672,6 +720,12 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105et_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105et_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
@@ -730,6 +784,12 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
@@ -788,6 +848,12 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
@@ -846,6 +912,12 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
@@ -904,6 +976,12 @@ struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
.packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
.max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
},
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
[BLK_IDX_GENERAL_PARAMS] = {
.packing = sja1105pqrs_general_params_entry_packing,
.unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index 069ca8fd059c..684465fc0882 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -20,10 +20,12 @@
#define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28
#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4
#define SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY 40
+#define SJA1105ET_SIZE_AVB_PARAMS_ENTRY 12
#define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20
#define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32
#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16
#define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44
+#define SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY 16
/* UM10944.pdf Page 11, Table 2. Configuration Blocks */
enum {
@@ -34,6 +36,7 @@ enum {
BLKID_MAC_CONFIG = 0x09,
BLKID_L2_LOOKUP_PARAMS = 0x0D,
BLKID_L2_FORWARDING_PARAMS = 0x0E,
+ BLKID_AVB_PARAMS = 0x10,
BLKID_GENERAL_PARAMS = 0x11,
BLKID_XMII_PARAMS = 0x4E,
};
@@ -46,6 +49,7 @@ enum sja1105_blk_idx {
BLK_IDX_MAC_CONFIG,
BLK_IDX_L2_LOOKUP_PARAMS,
BLK_IDX_L2_FORWARDING_PARAMS,
+ BLK_IDX_AVB_PARAMS,
BLK_IDX_GENERAL_PARAMS,
BLK_IDX_XMII_PARAMS,
BLK_IDX_MAX,
@@ -64,6 +68,7 @@ enum sja1105_blk_idx {
#define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1
#define SJA1105_MAX_GENERAL_PARAMS_COUNT 1
#define SJA1105_MAX_XMII_PARAMS_COUNT 1
+#define SJA1105_MAX_AVB_PARAMS_COUNT 1
#define SJA1105_MAX_FRAME_MEMORY 929
@@ -122,9 +127,36 @@ struct sja1105_l2_lookup_entry {
u64 destports;
u64 enfport;
u64 index;
+ /* P/Q/R/S only */
+ u64 mask_iotag;
+ u64 mask_vlanid;
+ u64 mask_macaddr;
+ u64 iotag;
+ u64 lockeds;
+ union {
+ /* LOCKEDS=1: Static FDB entries */
+ struct {
+ u64 tsreg;
+ u64 mirrvlan;
+ u64 takets;
+ u64 mirr;
+ u64 retag;
+ };
+ /* LOCKEDS=0: Dynamically learned FDB entries */
+ struct {
+ u64 touched;
+ u64 age;
+ };
+ };
};
struct sja1105_l2_lookup_params_entry {
+ u64 maxaddrp[5]; /* P/Q/R/S only */
+ u64 start_dynspc; /* P/Q/R/S only */
+ u64 drpnolearn; /* P/Q/R/S only */
+ u64 use_static; /* P/Q/R/S only */
+ u64 owr_dyn; /* P/Q/R/S only */
+ u64 learn_once; /* P/Q/R/S only */
u64 maxage; /* Shared */
u64 dyn_tbsz; /* E/T only */
u64 poly; /* E/T only */
@@ -153,6 +185,11 @@ struct sja1105_l2_policing_entry {
u64 partition;
};
+struct sja1105_avb_params_entry {
+ u64 destmeta;
+ u64 srcmeta;
+};
+
struct sja1105_mac_config_entry {
u64 top[8];
u64 base[8];
diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index d4780610ea8a..614377ef7956 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -10,10 +10,6 @@
* handling the switch in a memory-mapped manner by connecting to that external
* CPU's memory bus.
*
- * This driver (currently) only takes control of the switch chip over SPI and
- * configures it to route packages around when connected to a CPU port. The
- * chip has embedded PHYs and VLAN support so we model it using DSA.
- *
* Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org>
* Includes portions of code from the firmware uploader by:
* Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
@@ -24,8 +20,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
#include <linux/bitops.h>
#include <linux/if_bridge.h>
#include <linux/etherdevice.h>
@@ -34,6 +28,8 @@
#include <linux/random.h>
#include <net/dsa.h>
+#include "vitesse-vsc73xx.h"
+
#define VSC73XX_BLOCK_MAC 0x1 /* Subblocks 0-4, 6 (CPU port) */
#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
@@ -255,13 +251,6 @@
#define VSC73XX_GLORESET_PHY_RESET BIT(1)
#define VSC73XX_GLORESET_MASTER_RESET BIT(0)
-#define VSC73XX_CMD_MODE_READ 0
-#define VSC73XX_CMD_MODE_WRITE 1
-#define VSC73XX_CMD_MODE_SHIFT 4
-#define VSC73XX_CMD_BLOCK_SHIFT 5
-#define VSC73XX_CMD_BLOCK_MASK 0x7
-#define VSC73XX_CMD_SUBBLOCK_MASK 0xf
-
#define VSC7385_CLOCK_DELAY ((3 << 4) | 3)
#define VSC7385_CLOCK_DELAY_MASK ((3 << 4) | 3)
@@ -274,20 +263,6 @@
VSC73XX_ICPU_CTRL_CLK_EN | \
VSC73XX_ICPU_CTRL_SRST)
-/**
- * struct vsc73xx - VSC73xx state container
- */
-struct vsc73xx {
- struct device *dev;
- struct gpio_desc *reset;
- struct spi_device *spi;
- struct dsa_switch *ds;
- struct gpio_chip gc;
- u16 chipid;
- u8 addr[ETH_ALEN];
- struct mutex lock; /* Protects SPI traffic */
-};
-
#define IS_7385(a) ((a)->chipid == VSC73XX_CHIPID_ID_7385)
#define IS_7388(a) ((a)->chipid == VSC73XX_CHIPID_ID_7388)
#define IS_7395(a) ((a)->chipid == VSC73XX_CHIPID_ID_7395)
@@ -365,7 +340,7 @@ static const struct vsc73xx_counter vsc73xx_tx_counters[] = {
{ 29, "TxQoSClass3" }, /* non-standard counter */
};
-static int vsc73xx_is_addr_valid(u8 block, u8 subblock)
+int vsc73xx_is_addr_valid(u8 block, u8 subblock)
{
switch (block) {
case VSC73XX_BLOCK_MAC:
@@ -396,96 +371,18 @@ static int vsc73xx_is_addr_valid(u8 block, u8 subblock)
return 0;
}
-
-static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock)
-{
- u8 ret;
-
- ret = (block & VSC73XX_CMD_BLOCK_MASK) << VSC73XX_CMD_BLOCK_SHIFT;
- ret |= (mode & 1) << VSC73XX_CMD_MODE_SHIFT;
- ret |= subblock & VSC73XX_CMD_SUBBLOCK_MASK;
-
- return ret;
-}
+EXPORT_SYMBOL(vsc73xx_is_addr_valid);
static int vsc73xx_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
u32 *val)
{
- struct spi_transfer t[2];
- struct spi_message m;
- u8 cmd[4];
- u8 buf[4];
- int ret;
-
- if (!vsc73xx_is_addr_valid(block, subblock))
- return -EINVAL;
-
- spi_message_init(&m);
-
- memset(&t, 0, sizeof(t));
-
- t[0].tx_buf = cmd;
- t[0].len = sizeof(cmd);
- spi_message_add_tail(&t[0], &m);
-
- t[1].rx_buf = buf;
- t[1].len = sizeof(buf);
- spi_message_add_tail(&t[1], &m);
-
- cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_READ, block, subblock);
- cmd[1] = reg;
- cmd[2] = 0;
- cmd[3] = 0;
-
- mutex_lock(&vsc->lock);
- ret = spi_sync(vsc->spi, &m);
- mutex_unlock(&vsc->lock);
-
- if (ret)
- return ret;
-
- *val = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
-
- return 0;
+ return vsc->ops->read(vsc, block, subblock, reg, val);
}
static int vsc73xx_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
u32 val)
{
- struct spi_transfer t[2];
- struct spi_message m;
- u8 cmd[2];
- u8 buf[4];
- int ret;
-
- if (!vsc73xx_is_addr_valid(block, subblock))
- return -EINVAL;
-
- spi_message_init(&m);
-
- memset(&t, 0, sizeof(t));
-
- t[0].tx_buf = cmd;
- t[0].len = sizeof(cmd);
- spi_message_add_tail(&t[0], &m);
-
- t[1].tx_buf = buf;
- t[1].len = sizeof(buf);
- spi_message_add_tail(&t[1], &m);
-
- cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_WRITE, block, subblock);
- cmd[1] = reg;
-
- buf[0] = (val >> 24) & 0xff;
- buf[1] = (val >> 16) & 0xff;
- buf[2] = (val >> 8) & 0xff;
- buf[3] = val & 0xff;
-
- mutex_lock(&vsc->lock);
- ret = spi_sync(vsc->spi, &m);
- mutex_unlock(&vsc->lock);
-
- return ret;
+ return vsc->ops->write(vsc, block, subblock, reg, val);
}
static int vsc73xx_update_bits(struct vsc73xx *vsc, u8 block, u8 subblock,
@@ -520,22 +417,8 @@ static int vsc73xx_detect(struct vsc73xx *vsc)
}
if (val == 0xffffffff) {
- dev_info(vsc->dev, "chip seems dead, assert reset\n");
- gpiod_set_value_cansleep(vsc->reset, 1);
- /* Reset pulse should be 20ns minimum, according to datasheet
- * table 245, so 10us should be fine
- */
- usleep_range(10, 100);
- gpiod_set_value_cansleep(vsc->reset, 0);
- /* Wait 20ms according to datasheet table 245 */
- msleep(20);
-
- ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
- VSC73XX_ICPU_MBOX_VAL, &val);
- if (val == 0xffffffff) {
- dev_err(vsc->dev, "seems not to help, giving up\n");
- return -ENODEV;
- }
+ dev_info(vsc->dev, "chip seems dead.\n");
+ return -EAGAIN;
}
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
@@ -586,9 +469,8 @@ static int vsc73xx_detect(struct vsc73xx *vsc)
}
if (icpu_si_boot_en && !icpu_pi_en) {
dev_err(vsc->dev,
- "iCPU enabled boots from SI, no external memory\n");
- dev_err(vsc->dev, "no idea how to deal with this\n");
- return -ENODEV;
+ "iCPU enabled boots from PI/SI, no external memory\n");
+ return -EAGAIN;
}
if (!icpu_si_boot_en && icpu_pi_en) {
dev_err(vsc->dev,
@@ -1245,21 +1127,11 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
return 0;
}
-static int vsc73xx_probe(struct spi_device *spi)
+int vsc73xx_probe(struct vsc73xx *vsc)
{
- struct device *dev = &spi->dev;
- struct vsc73xx *vsc;
+ struct device *dev = vsc->dev;
int ret;
- vsc = devm_kzalloc(dev, sizeof(*vsc), GFP_KERNEL);
- if (!vsc)
- return -ENOMEM;
-
- spi_set_drvdata(spi, vsc);
- vsc->spi = spi_dev_get(spi);
- vsc->dev = dev;
- mutex_init(&vsc->lock);
-
/* Release reset, if any */
vsc->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(vsc->reset)) {
@@ -1270,15 +1142,20 @@ static int vsc73xx_probe(struct spi_device *spi)
/* Wait 20ms according to datasheet table 245 */
msleep(20);
- spi->mode = SPI_MODE_0;
- spi->bits_per_word = 8;
- ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(dev, "spi setup failed.\n");
- return ret;
- }
-
ret = vsc73xx_detect(vsc);
+ if (ret == -EAGAIN) {
+ dev_err(vsc->dev,
+ "Chip seems to be out of control. Assert reset and try again.\n");
+ gpiod_set_value_cansleep(vsc->reset, 1);
+ /* Reset pulse should be 20ns minimum, according to datasheet
+ * table 245, so 10us should be fine
+ */
+ usleep_range(10, 100);
+ gpiod_set_value_cansleep(vsc->reset, 0);
+ /* Wait 20ms according to datasheet table 245 */
+ msleep(20);
+ ret = vsc73xx_detect(vsc);
+ }
if (ret) {
dev_err(dev, "no chip found (%d)\n", ret);
return -ENODEV;
@@ -1321,43 +1198,16 @@ static int vsc73xx_probe(struct spi_device *spi)
return 0;
}
+EXPORT_SYMBOL(vsc73xx_probe);
-static int vsc73xx_remove(struct spi_device *spi)
+int vsc73xx_remove(struct vsc73xx *vsc)
{
- struct vsc73xx *vsc = spi_get_drvdata(spi);
-
dsa_unregister_switch(vsc->ds);
gpiod_set_value(vsc->reset, 1);
return 0;
}
-
-static const struct of_device_id vsc73xx_of_match[] = {
- {
- .compatible = "vitesse,vsc7385",
- },
- {
- .compatible = "vitesse,vsc7388",
- },
- {
- .compatible = "vitesse,vsc7395",
- },
- {
- .compatible = "vitesse,vsc7398",
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
-
-static struct spi_driver vsc73xx_driver = {
- .probe = vsc73xx_probe,
- .remove = vsc73xx_remove,
- .driver = {
- .name = "vsc73xx",
- .of_match_table = vsc73xx_of_match,
- },
-};
-module_spi_driver(vsc73xx_driver);
+EXPORT_SYMBOL(vsc73xx_remove);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
new file mode 100644
index 000000000000..0541785f9fee
--- /dev/null
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/* DSA driver for:
+ * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+ *
+ * This driver takes control of the switch chip connected over CPU-attached
+ * address bus and configures it to route packages around when connected to
+ * a CPU port.
+ *
+ * Copyright (C) 2019 Pawel Dembicki <paweldembicki@gmail.com>
+ * Based on vitesse-vsc-spi.c by:
+ * Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org>
+ * Includes portions of code from the firmware uploader by:
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "vitesse-vsc73xx.h"
+
+#define VSC73XX_CMD_PLATFORM_BLOCK_SHIFT 14
+#define VSC73XX_CMD_PLATFORM_BLOCK_MASK 0x7
+#define VSC73XX_CMD_PLATFORM_SUBBLOCK_SHIFT 10
+#define VSC73XX_CMD_PLATFORM_SUBBLOCK_MASK 0xf
+#define VSC73XX_CMD_PLATFORM_REGISTER_SHIFT 2
+
+/**
+ * struct vsc73xx_platform - VSC73xx Platform state container
+ */
+struct vsc73xx_platform {
+ struct platform_device *pdev;
+ void __iomem *base_addr;
+ struct vsc73xx vsc;
+};
+
+static const struct vsc73xx_ops vsc73xx_platform_ops;
+
+static u32 vsc73xx_make_addr(u8 block, u8 subblock, u8 reg)
+{
+ u32 ret;
+
+ ret = (block & VSC73XX_CMD_PLATFORM_BLOCK_MASK)
+ << VSC73XX_CMD_PLATFORM_BLOCK_SHIFT;
+ ret |= (subblock & VSC73XX_CMD_PLATFORM_SUBBLOCK_MASK)
+ << VSC73XX_CMD_PLATFORM_SUBBLOCK_SHIFT;
+ ret |= reg << VSC73XX_CMD_PLATFORM_REGISTER_SHIFT;
+
+ return ret;
+}
+
+static int vsc73xx_platform_read(struct vsc73xx *vsc, u8 block, u8 subblock,
+ u8 reg, u32 *val)
+{
+ struct vsc73xx_platform *vsc_platform = vsc->priv;
+ u32 offset;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ offset = vsc73xx_make_addr(block, subblock, reg);
+ /* By default vsc73xx running in big-endian mode.
+ * (See "Register Addressing" section 5.5.3 in the VSC7385 manual.)
+ */
+ *val = ioread32be(vsc_platform->base_addr + offset);
+
+ return 0;
+}
+
+static int vsc73xx_platform_write(struct vsc73xx *vsc, u8 block, u8 subblock,
+ u8 reg, u32 val)
+{
+ struct vsc73xx_platform *vsc_platform = vsc->priv;
+ u32 offset;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ offset = vsc73xx_make_addr(block, subblock, reg);
+ iowrite32be(val, vsc_platform->base_addr + offset);
+
+ return 0;
+}
+
+static int vsc73xx_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct vsc73xx_platform *vsc_platform;
+ struct resource *res = NULL;
+ int ret;
+
+ vsc_platform = devm_kzalloc(dev, sizeof(*vsc_platform), GFP_KERNEL);
+ if (!vsc_platform)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, vsc_platform);
+ vsc_platform->pdev = pdev;
+ vsc_platform->vsc.dev = dev;
+ vsc_platform->vsc.priv = vsc_platform;
+ vsc_platform->vsc.ops = &vsc73xx_platform_ops;
+
+ /* obtain I/O memory space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
+ ret = -ENXIO;
+ return ret;
+ }
+
+ vsc_platform->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(vsc_platform->base_addr)) {
+ dev_err(&pdev->dev, "cannot request I/O memory space\n");
+ ret = -ENXIO;
+ return ret;
+ }
+
+ return vsc73xx_probe(&vsc_platform->vsc);
+}
+
+static int vsc73xx_platform_remove(struct platform_device *pdev)
+{
+ struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
+
+ return vsc73xx_remove(&vsc_platform->vsc);
+}
+
+static const struct vsc73xx_ops vsc73xx_platform_ops = {
+ .read = vsc73xx_platform_read,
+ .write = vsc73xx_platform_write,
+};
+
+static const struct of_device_id vsc73xx_of_match[] = {
+ {
+ .compatible = "vitesse,vsc7385",
+ },
+ {
+ .compatible = "vitesse,vsc7388",
+ },
+ {
+ .compatible = "vitesse,vsc7395",
+ },
+ {
+ .compatible = "vitesse,vsc7398",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+
+static struct platform_driver vsc73xx_platform_driver = {
+ .probe = vsc73xx_platform_probe,
+ .remove = vsc73xx_platform_remove,
+ .driver = {
+ .name = "vsc73xx-platform",
+ .of_match_table = vsc73xx_of_match,
+ },
+};
+module_platform_driver(vsc73xx_platform_driver);
+
+MODULE_AUTHOR("Pawel Dembicki <paweldembicki@gmail.com>");
+MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 Platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
new file mode 100644
index 000000000000..e73c8fcddc9f
--- /dev/null
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/* DSA driver for:
+ * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+ *
+ * This driver takes control of the switch chip over SPI and
+ * configures it to route packages around when connected to a CPU port.
+ *
+ * Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org>
+ * Includes portions of code from the firmware uploader by:
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#include "vitesse-vsc73xx.h"
+
+#define VSC73XX_CMD_SPI_MODE_READ 0
+#define VSC73XX_CMD_SPI_MODE_WRITE 1
+#define VSC73XX_CMD_SPI_MODE_SHIFT 4
+#define VSC73XX_CMD_SPI_BLOCK_SHIFT 5
+#define VSC73XX_CMD_SPI_BLOCK_MASK 0x7
+#define VSC73XX_CMD_SPI_SUBBLOCK_MASK 0xf
+
+/**
+ * struct vsc73xx_spi - VSC73xx SPI state container
+ */
+struct vsc73xx_spi {
+ struct spi_device *spi;
+ struct mutex lock; /* Protects SPI traffic */
+ struct vsc73xx vsc;
+};
+
+static const struct vsc73xx_ops vsc73xx_spi_ops;
+
+static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock)
+{
+ u8 ret;
+
+ ret =
+ (block & VSC73XX_CMD_SPI_BLOCK_MASK) << VSC73XX_CMD_SPI_BLOCK_SHIFT;
+ ret |= (mode & 1) << VSC73XX_CMD_SPI_MODE_SHIFT;
+ ret |= subblock & VSC73XX_CMD_SPI_SUBBLOCK_MASK;
+
+ return ret;
+}
+
+static int vsc73xx_spi_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 *val)
+{
+ struct vsc73xx_spi *vsc_spi = vsc->priv;
+ struct spi_transfer t[2];
+ struct spi_message m;
+ u8 cmd[4];
+ u8 buf[4];
+ int ret;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = sizeof(buf);
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_SPI_MODE_READ, block, subblock);
+ cmd[1] = reg;
+ cmd[2] = 0;
+ cmd[3] = 0;
+
+ mutex_lock(&vsc_spi->lock);
+ ret = spi_sync(vsc_spi->spi, &m);
+ mutex_unlock(&vsc_spi->lock);
+
+ if (ret)
+ return ret;
+
+ *val = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+
+ return 0;
+}
+
+static int vsc73xx_spi_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 val)
+{
+ struct vsc73xx_spi *vsc_spi = vsc->priv;
+ struct spi_transfer t[2];
+ struct spi_message m;
+ u8 cmd[2];
+ u8 buf[4];
+ int ret;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = sizeof(buf);
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_SPI_MODE_WRITE, block, subblock);
+ cmd[1] = reg;
+
+ buf[0] = (val >> 24) & 0xff;
+ buf[1] = (val >> 16) & 0xff;
+ buf[2] = (val >> 8) & 0xff;
+ buf[3] = val & 0xff;
+
+ mutex_lock(&vsc_spi->lock);
+ ret = spi_sync(vsc_spi->spi, &m);
+ mutex_unlock(&vsc_spi->lock);
+
+ return ret;
+}
+
+static int vsc73xx_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct vsc73xx_spi *vsc_spi;
+ int ret;
+
+ vsc_spi = devm_kzalloc(dev, sizeof(*vsc_spi), GFP_KERNEL);
+ if (!vsc_spi)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, vsc_spi);
+ vsc_spi->spi = spi_dev_get(spi);
+ vsc_spi->vsc.dev = dev;
+ vsc_spi->vsc.priv = vsc_spi;
+ vsc_spi->vsc.ops = &vsc73xx_spi_ops;
+ mutex_init(&vsc_spi->lock);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(dev, "spi setup failed.\n");
+ return ret;
+ }
+
+ return vsc73xx_probe(&vsc_spi->vsc);
+}
+
+static int vsc73xx_spi_remove(struct spi_device *spi)
+{
+ struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
+
+ return vsc73xx_remove(&vsc_spi->vsc);
+}
+
+static const struct vsc73xx_ops vsc73xx_spi_ops = {
+ .read = vsc73xx_spi_read,
+ .write = vsc73xx_spi_write,
+};
+
+static const struct of_device_id vsc73xx_of_match[] = {
+ {
+ .compatible = "vitesse,vsc7385",
+ },
+ {
+ .compatible = "vitesse,vsc7388",
+ },
+ {
+ .compatible = "vitesse,vsc7395",
+ },
+ {
+ .compatible = "vitesse,vsc7398",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+
+static struct spi_driver vsc73xx_spi_driver = {
+ .probe = vsc73xx_spi_probe,
+ .remove = vsc73xx_spi_remove,
+ .driver = {
+ .name = "vsc73xx-spi",
+ .of_match_table = vsc73xx_of_match,
+ },
+};
+module_spi_driver(vsc73xx_spi_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
new file mode 100644
index 000000000000..7478f8d4e0a9
--- /dev/null
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/gpio/driver.h>
+
+/**
+ * struct vsc73xx - VSC73xx state container
+ */
+struct vsc73xx {
+ struct device *dev;
+ struct gpio_desc *reset;
+ struct dsa_switch *ds;
+ struct gpio_chip gc;
+ u16 chipid;
+ u8 addr[ETH_ALEN];
+ const struct vsc73xx_ops *ops;
+ void *priv;
+};
+
+struct vsc73xx_ops {
+ int (*read)(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 *val);
+ int (*write)(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 val);
+};
+
+int vsc73xx_is_addr_valid(u8 block, u8 subblock);
+int vsc73xx_probe(struct vsc73xx *vsc);
+int vsc73xx_remove(struct vsc73xx *vsc);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index fe115b7caba0..93a2d4deb27c 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -76,6 +76,7 @@ source "drivers/net/ethernet/ezchip/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
+source "drivers/net/ethernet/google/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/hp/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 7b5bf9682066..fb9155cffcff 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
+obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
obj-$(CONFIG_NET_VENDOR_HP) += hp/
obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 9e06dff619c3..3434730a7699 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -224,8 +224,8 @@ static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(DRV_NAME));
- strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
@@ -818,7 +818,6 @@ static int emac_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
db = netdev_priv(ndev);
- memset(db, 0, sizeof(*db));
db->dev = &pdev->dev;
db->ndev = ndev;
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 9f80b73f90b1..d19f2ecf8e84 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -60,6 +60,7 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_MAX_QUEUES_NUM = 2,
ENA_ADMIN_HW_HINTS = 3,
ENA_ADMIN_LLQ = 4,
+ ENA_ADMIN_MAX_QUEUES_EXT = 7,
ENA_ADMIN_RSS_HASH_FUNCTION = 10,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
@@ -421,7 +422,13 @@ struct ena_admin_get_set_feature_common_desc {
/* as appears in ena_admin_aq_feature_id */
u8 feature_id;
- u16 reserved16;
+ /* The driver specifies the max feature version it supports and the
+ * device responds with the currently supported feature version. The
+ * field is zero based
+ */
+ u8 feature_version;
+
+ u8 reserved8;
};
struct ena_admin_device_attr_feature_desc {
@@ -524,6 +531,39 @@ struct ena_admin_feature_llq_desc {
/* the stride control the driver selected to use */
u16 descriptors_stride_ctrl_enabled;
+
+ /* Maximum size in bytes taken by llq entries in a single tx burst.
+ * Set to 0 when there is no such limit.
+ */
+ u32 max_tx_burst_size;
+};
+
+struct ena_admin_queue_ext_feature_fields {
+ u32 max_tx_sq_num;
+
+ u32 max_tx_cq_num;
+
+ u32 max_rx_sq_num;
+
+ u32 max_rx_cq_num;
+
+ u32 max_tx_sq_depth;
+
+ u32 max_tx_cq_depth;
+
+ u32 max_rx_sq_depth;
+
+ u32 max_rx_cq_depth;
+
+ u32 max_tx_header_size;
+
+ /* Maximum Descriptors number, including meta descriptor, allowed for
+ * a single Tx packet
+ */
+ u16 max_per_packet_tx_descs;
+
+ /* Maximum Descriptors number allowed for a single Rx packet */
+ u16 max_per_packet_rx_descs;
};
struct ena_admin_queue_feature_desc {
@@ -832,6 +872,19 @@ struct ena_admin_get_feat_cmd {
u32 raw[11];
};
+struct ena_admin_queue_ext_feature_desc {
+ /* version */
+ u8 version;
+
+ u8 reserved1[3];
+
+ union {
+ struct ena_admin_queue_ext_feature_fields max_queue_ext;
+
+ u32 raw[10];
+ };
+};
+
struct ena_admin_get_feat_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -844,6 +897,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_queue_feature_desc max_queue;
+ struct ena_admin_queue_ext_feature_desc max_queue_ext;
+
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_get_feature_link_desc link;
@@ -908,7 +963,9 @@ struct ena_admin_aenq_common_desc {
u16 syndrom;
- /* 0 : phase */
+ /* 0 : phase
+ * 7:1 : reserved - MBZ
+ */
u8 flags;
u8 reserved1[3];
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 7f8266b191ae..911a2e7a375a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -91,7 +91,7 @@ struct ena_com_stats_ctx {
struct ena_admin_acq_get_stats_resp get_resp;
};
-static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
+static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
struct ena_common_mem_addr *ena_addr,
dma_addr_t addr)
{
@@ -115,7 +115,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
GFP_KERNEL);
if (!sq->entries) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
@@ -137,7 +137,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
GFP_KERNEL);
if (!cq->entries) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
@@ -160,7 +160,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
GFP_KERNEL);
if (!aenq->entries) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
@@ -190,7 +190,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
return 0;
}
-static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
+static void comp_ctxt_release(struct ena_com_admin_queue *queue,
struct ena_comp_ctx *comp_ctx)
{
comp_ctx->occupied = false;
@@ -277,7 +277,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
return comp_ctx;
}
-static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
{
size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
struct ena_comp_ctx *comp_ctx;
@@ -285,7 +285,7 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
if (unlikely(!queue->comp_ctx)) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
@@ -356,7 +356,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
}
if (!io_sq->desc_addr.virt_addr) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
}
@@ -382,7 +382,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- pr_err("bounce buffer memory allocation failed");
+ pr_err("bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -396,6 +396,10 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
0x0, io_sq->llq_info.desc_list_entry_size);
io_sq->llq_buf_ctrl.descs_left_in_line =
io_sq->llq_info.descs_num_before_header;
+
+ if (io_sq->llq_info.max_entries_in_tx_burst > 0)
+ io_sq->entries_in_tx_burst_left =
+ io_sq->llq_info.max_entries_in_tx_burst;
}
io_sq->tail = 0;
@@ -436,7 +440,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
}
if (!io_cq->cdesc_addr.virt_addr) {
- pr_err("memory allocation failed");
+ pr_err("memory allocation failed\n");
return -ENOMEM;
}
@@ -727,6 +731,9 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
supported_feat, llq_info->descs_num_before_header);
}
+ llq_info->max_entries_in_tx_burst =
+ (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
+
rc = ena_com_set_llq(ena_dev);
if (rc)
pr_err("Cannot set LLQ configuration: %d\n", rc);
@@ -755,16 +762,26 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
admin_queue->stats.no_completion++;
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- if (comp_ctx->status == ENA_CMD_COMPLETED)
- pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
- comp_ctx->cmd_opcode);
- else
- pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
+ if (comp_ctx->status == ENA_CMD_COMPLETED) {
+ pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
+ comp_ctx->cmd_opcode,
+ admin_queue->auto_polling ? "ON" : "OFF");
+ /* Check if fallback to polling is enabled */
+ if (admin_queue->auto_polling)
+ admin_queue->polling = true;
+ } else {
+ pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
-
- admin_queue->running_state = false;
- ret = -ETIME;
- goto err;
+ }
+ /* Check if shifted to polling mode.
+ * This will happen if there is a completion without an interrupt
+ * and autopolling mode is enabled. Continuing normal execution in such case
+ */
+ if (!admin_queue->polling) {
+ admin_queue->running_state = false;
+ ret = -ETIME;
+ goto err;
+ }
}
ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
@@ -822,7 +839,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (read_resp->reg_off != offset) {
- pr_err("Read failure: wrong offset provided");
+ pr_err("Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -961,7 +978,8 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_resp *get_resp,
enum ena_admin_aq_feature_id feature_id,
dma_addr_t control_buf_dma_addr,
- u32 control_buff_size)
+ u32 control_buff_size,
+ u8 feature_ver)
{
struct ena_com_admin_queue *admin_queue;
struct ena_admin_get_feat_cmd get_cmd;
@@ -992,7 +1010,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
}
get_cmd.control_buffer.length = control_buff_size;
-
+ get_cmd.feat_common.feature_version = feature_ver;
get_cmd.feat_common.feature_id = feature_id;
ret = ena_com_execute_admin_command(admin_queue,
@@ -1012,13 +1030,15 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
static int ena_com_get_feature(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_resp *get_resp,
- enum ena_admin_aq_feature_id feature_id)
+ enum ena_admin_aq_feature_id feature_id,
+ u8 feature_ver)
{
return ena_com_get_feature_ex(ena_dev,
get_resp,
feature_id,
0,
- 0);
+ 0,
+ feature_ver);
}
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
@@ -1078,7 +1098,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
int ret;
ret = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
if (unlikely(ret))
return ret;
@@ -1498,7 +1518,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
struct ena_admin_get_feat_resp get_resp;
int ret;
- ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
+ ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
if (ret) {
pr_info("Can't get aenq configuration\n");
return ret;
@@ -1643,6 +1663,12 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
ena_dev->admin_queue.polling = polling;
}
+void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
+ bool polling)
+{
+ ena_dev->admin_queue.auto_polling = polling;
+}
+
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@@ -1867,7 +1893,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
int ena_com_get_link_params(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_resp *resp)
{
- return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
+ return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
}
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
@@ -1877,7 +1903,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
int rc;
rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_DEVICE_ATTRIBUTES);
+ ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
if (rc)
return rc;
@@ -1885,17 +1911,34 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
sizeof(get_resp.u.dev_attr));
ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
- rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_MAX_QUEUES_NUM);
- if (rc)
- return rc;
+ if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_MAX_QUEUES_EXT,
+ ENA_FEATURE_MAX_QUEUE_EXT_VER);
+ if (rc)
+ return rc;
- memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
- sizeof(get_resp.u.max_queue));
- ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
+ if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
+ return -EINVAL;
+
+ memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
+ sizeof(get_resp.u.max_queue_ext));
+ ena_dev->tx_max_header_size =
+ get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
+ } else {
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+ ENA_ADMIN_MAX_QUEUES_NUM, 0);
+ memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
+ sizeof(get_resp.u.max_queue));
+ ena_dev->tx_max_header_size =
+ get_resp.u.max_queue.max_header_size;
+
+ if (rc)
+ return rc;
+ }
rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_AENQ_CONFIG);
+ ENA_ADMIN_AENQ_CONFIG, 0);
if (rc)
return rc;
@@ -1903,7 +1946,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
sizeof(get_resp.u.aenq));
rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
if (rc)
return rc;
@@ -1913,7 +1956,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
/* Driver hints isn't mandatory admin command. So in case the
* command isn't supported set driver hints to 0
*/
- rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
if (!rc)
memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
@@ -1924,7 +1967,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
else
return rc;
- rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
if (!rc)
memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
sizeof(get_resp.u.llq));
@@ -2161,7 +2204,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_resp resp;
ret = ena_com_get_feature(ena_dev, &resp,
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
if (unlikely(ret)) {
pr_err("Failed to get offload capabilities %d\n", ret);
return ret;
@@ -2190,7 +2233,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
/* Validate hash function is supported */
ret = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_RSS_HASH_FUNCTION);
+ ENA_ADMIN_RSS_HASH_FUNCTION, 0);
if (unlikely(ret))
return ret;
@@ -2250,7 +2293,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_HASH_FUNCTION,
rss->hash_key_dma_addr,
- sizeof(*rss->hash_key));
+ sizeof(*rss->hash_key), 0);
if (unlikely(rc))
return rc;
@@ -2302,7 +2345,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_HASH_FUNCTION,
rss->hash_key_dma_addr,
- sizeof(*rss->hash_key));
+ sizeof(*rss->hash_key), 0);
if (unlikely(rc))
return rc;
@@ -2327,7 +2370,7 @@ int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_HASH_INPUT,
rss->hash_ctrl_dma_addr,
- sizeof(*rss->hash_ctrl));
+ sizeof(*rss->hash_ctrl), 0);
if (unlikely(rc))
return rc;
@@ -2563,7 +2606,7 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
rss->rss_ind_tbl_dma_addr,
- tbl_size);
+ tbl_size, 0);
if (unlikely(rc))
return rc;
@@ -2778,7 +2821,7 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
int rc;
rc = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_INTERRUPT_MODERATION);
+ ENA_ADMIN_INTERRUPT_MODERATION, 0);
if (rc) {
if (rc == -EOPNOTSUPP) {
@@ -2913,8 +2956,8 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_cfg)
{
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
int rc;
- int size;
if (!llq_features->max_llq_num) {
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@@ -2925,12 +2968,10 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
if (rc)
return rc;
- /* Validate the descriptor is not too big */
- size = ena_dev->tx_max_header_size;
- size += ena_dev->llq_info.descs_num_before_header *
- sizeof(struct ena_eth_io_tx_desc);
+ ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
+ (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
- if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+ if (unlikely(ena_dev->tx_max_header_size == 0)) {
pr_err("the size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 078d6f2b4f39..0d3664fe260d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -101,6 +101,8 @@
#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
+
enum ena_intr_moder_level {
ENA_INTR_MODER_LOWEST = 0,
ENA_INTR_MODER_LOW,
@@ -159,6 +161,7 @@ struct ena_com_llq_info {
u16 desc_list_entry_size;
u16 descs_num_before_header;
u16 descs_per_entry;
+ u16 max_entries_in_tx_burst;
};
struct ena_com_io_cq {
@@ -238,6 +241,7 @@ struct ena_com_io_sq {
u8 phase;
u8 desc_entry_size;
u8 dma_addr_bits;
+ u16 entries_in_tx_burst_left;
} ____cacheline_aligned;
struct ena_com_admin_cq {
@@ -281,6 +285,9 @@ struct ena_com_admin_queue {
/* Indicate if the admin queue should poll for completion */
bool polling;
+ /* Define if fallback to polling mode should occur */
+ bool auto_polling;
+
u16 curr_cmd_id;
/* Indicate that the ena was initialized and can
@@ -377,6 +384,7 @@ struct ena_com_dev {
struct ena_com_dev_get_features_ctx {
struct ena_admin_queue_feature_desc max_queues;
+ struct ena_admin_queue_ext_feature_desc max_queue_ext;
struct ena_admin_device_attr_feature_desc dev_attr;
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
@@ -536,6 +544,17 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
*/
bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
+/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
+ * @ena_dev: ENA communication layer struct
+ * @polling: Enable/Disable polling mode
+ *
+ * Set the autopolling mode.
+ * If autopolling is on:
+ * In case of missing interrupt when data is available switch to polling.
+ */
+void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
+ bool polling);
+
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct
*
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index f6c2d3855be8..38046bf0ff44 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -32,7 +32,7 @@
#include "ena_eth_com.h"
-static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
struct ena_com_io_cq *io_cq)
{
struct ena_eth_io_rx_cdesc_base *cdesc;
@@ -59,7 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
return cdesc;
}
-static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
+static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
{
u16 tail_masked;
u32 offset;
@@ -71,7 +71,7 @@ static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
}
-static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
u8 *bounce_buffer)
{
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
@@ -82,6 +82,17 @@ static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq
dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
+ if (is_llq_max_tx_burst_exists(io_sq)) {
+ if (unlikely(!io_sq->entries_in_tx_burst_left)) {
+ pr_err("Error: trying to send more packets than tx burst allows\n");
+ return -ENOSPC;
+ }
+
+ io_sq->entries_in_tx_burst_left--;
+ pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
+ io_sq->qid, io_sq->entries_in_tx_burst_left);
+ }
+
/* Make sure everything was written into the bounce buffer before
* writing the bounce buffer to the device
*/
@@ -100,7 +111,7 @@ static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq
return 0;
}
-static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
u8 *header_src,
u16 header_len)
{
@@ -131,7 +142,7 @@ static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
return 0;
}
-static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
{
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
u8 *bounce_buffer;
@@ -151,7 +162,7 @@ static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
return sq_desc;
}
-static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
+static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
{
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
@@ -178,7 +189,7 @@ static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
return 0;
}
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+static void *get_sq_desc(struct ena_com_io_sq *io_sq)
{
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
return get_sq_desc_llq(io_sq);
@@ -186,7 +197,7 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
return get_sq_desc_regular_queue(io_sq);
}
-static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
+static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
{
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
@@ -214,7 +225,7 @@ static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
return 0;
}
-static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
{
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
return ena_com_sq_update_llq_tail(io_sq);
@@ -228,7 +239,7 @@ static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
return 0;
}
-static inline struct ena_eth_io_rx_cdesc_base *
+static struct ena_eth_io_rx_cdesc_base *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
{
idx &= (io_cq->q_depth - 1);
@@ -237,7 +248,7 @@ static inline struct ena_eth_io_rx_cdesc_base *
idx * io_cq->cdesc_entry_size_in_bytes);
}
-static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
u16 *first_cdesc_idx)
{
struct ena_eth_io_rx_cdesc_base *cdesc;
@@ -274,24 +285,7 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
return count;
}
-static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
- struct ena_com_tx_ctx *ena_tx_ctx)
-{
- int rc;
-
- if (ena_tx_ctx->meta_valid) {
- rc = memcmp(&io_sq->cached_tx_meta,
- &ena_tx_ctx->ena_meta,
- sizeof(struct ena_com_tx_meta));
-
- if (unlikely(rc != 0))
- return true;
- }
-
- return false;
-}
-
-static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
@@ -340,7 +334,7 @@ static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io
return ena_com_sq_update_tail(io_sq);
}
-static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
+static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
struct ena_eth_io_rx_cdesc_base *cdesc)
{
ena_rx_ctx->l3_proto = cdesc->status &
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 340d02b64ca6..77986c0ea52c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -125,8 +125,55 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
return ena_com_free_desc(io_sq) > temp;
}
+static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ if (!ena_tx_ctx->meta_valid)
+ return false;
+
+ return !!memcmp(&io_sq->cached_tx_meta,
+ &ena_tx_ctx->ena_meta,
+ sizeof(struct ena_com_tx_meta));
+}
+
+static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
+{
+ return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
+ io_sq->llq_info.max_entries_in_tx_burst > 0;
+}
+
+static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_com_llq_info *llq_info;
+ int descs_after_first_entry;
+ int num_entries_needed = 1;
+ u16 num_descs;
+
+ if (!is_llq_max_tx_burst_exists(io_sq))
+ return false;
+
+ llq_info = &io_sq->llq_info;
+ num_descs = ena_tx_ctx->num_bufs;
+
+ if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
+ ++num_descs;
+
+ if (num_descs > llq_info->descs_num_before_header) {
+ descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
+ num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
+ llq_info->descs_per_entry);
+ }
+
+ pr_debug("queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
+ num_descs, num_entries_needed);
+
+ return num_entries_needed > io_sq->entries_in_tx_burst_left;
+}
+
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
{
+ u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
u16 tail = io_sq->tail;
pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
@@ -134,6 +181,12 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
writel(tail, io_sq->db_addr);
+ if (is_llq_max_tx_burst_exists(io_sq)) {
+ pr_debug("reset available entries in tx burst for queue %d to %d\n",
+ io_sq->qid, max_entries_in_tx_burst);
+ io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
+ }
+
return 0;
}
@@ -142,15 +195,17 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
u16 unreported_comp, head;
bool need_update;
- head = io_cq->head;
- unreported_comp = head - io_cq->last_head_update;
- need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
-
- if (io_cq->cq_head_db_reg && need_update) {
- pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
- io_cq->qid, head);
- writel(head, io_cq->cq_head_db_reg);
- io_cq->last_head_update = head;
+ if (unlikely(io_cq->cq_head_db_reg)) {
+ head = io_cq->head;
+ unreported_comp = head - io_cq->last_head_update;
+ need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
+
+ if (unlikely(need_update)) {
+ pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
+ io_cq->qid, head);
+ writel(head, io_cq->cq_head_db_reg);
+ io_cq->last_head_update = head;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index fe596bc30a96..b997c3ce9e2b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -88,13 +88,14 @@ static const struct ena_stats ena_stats_tx_strings[] = {
static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(cnt),
ENA_STAT_RX_ENTRY(bytes),
+ ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
+ ENA_STAT_RX_ENTRY(csum_good),
ENA_STAT_RX_ENTRY(refil_partial),
ENA_STAT_RX_ENTRY(bad_csum),
ENA_STAT_RX_ENTRY(page_alloc_fail),
ENA_STAT_RX_ENTRY(skb_alloc_fail),
ENA_STAT_RX_ENTRY(dma_mapping_err),
ENA_STAT_RX_ENTRY(bad_desc_num),
- ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring),
ENA_STAT_RX_ENTRY(csum_unchecked),
@@ -447,13 +448,32 @@ static void ena_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- struct ena_ring *tx_ring = &adapter->tx_ring[0];
- struct ena_ring *rx_ring = &adapter->rx_ring[0];
- ring->rx_max_pending = rx_ring->ring_size;
- ring->tx_max_pending = tx_ring->ring_size;
- ring->rx_pending = rx_ring->ring_size;
- ring->tx_pending = tx_ring->ring_size;
+ ring->tx_max_pending = adapter->max_tx_ring_size;
+ ring->rx_max_pending = adapter->max_rx_ring_size;
+ ring->tx_pending = adapter->tx_ring[0].ring_size;
+ ring->rx_pending = adapter->rx_ring[0].ring_size;
+}
+
+static int ena_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ u32 new_tx_size, new_rx_size;
+
+ new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ?
+ ENA_MIN_RING_SIZE : ring->tx_pending;
+ new_tx_size = rounddown_pow_of_two(new_tx_size);
+
+ new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ?
+ ENA_MIN_RING_SIZE : ring->rx_pending;
+ new_rx_size = rounddown_pow_of_two(new_rx_size);
+
+ if (new_tx_size == adapter->requested_tx_ring_size &&
+ new_rx_size == adapter->requested_rx_ring_size)
+ return 0;
+
+ return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size);
}
static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
@@ -807,6 +827,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_coalesce = ena_get_coalesce,
.set_coalesce = ena_set_coalesce,
.get_ringparam = ena_get_ringparam,
+ .set_ringparam = ena_set_ringparam,
.get_sset_count = ena_get_sset_count,
.get_strings = ena_get_strings,
.get_ethtool_stats = ena_get_ethtool_stats,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 9c83642922c7..664e3ed97ea9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -182,7 +182,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
ena_init_io_rings_common(adapter, rxr, i);
/* TX specific ring state */
- txr->ring_size = adapter->tx_ring_size;
+ txr->ring_size = adapter->requested_tx_ring_size;
txr->tx_max_header_size = ena_dev->tx_max_header_size;
txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
txr->sgl_size = adapter->max_tx_sgl_size;
@@ -190,7 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
/* RX specific ring state */
- rxr->ring_size = adapter->rx_ring_size;
+ rxr->ring_size = adapter->requested_rx_ring_size;
rxr->rx_copybreak = adapter->rx_copybreak;
rxr->sgl_size = adapter->max_rx_sgl_size;
rxr->smoothed_interval =
@@ -228,11 +228,11 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
}
size = sizeof(u16) * tx_ring->ring_size;
- tx_ring->free_tx_ids = vzalloc_node(size, node);
- if (!tx_ring->free_tx_ids) {
- tx_ring->free_tx_ids = vzalloc(size);
- if (!tx_ring->free_tx_ids)
- goto err_free_tx_ids;
+ tx_ring->free_ids = vzalloc_node(size, node);
+ if (!tx_ring->free_ids) {
+ tx_ring->free_ids = vzalloc(size);
+ if (!tx_ring->free_ids)
+ goto err_tx_free_ids;
}
size = tx_ring->tx_max_header_size;
@@ -245,7 +245,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
/* Req id ring for TX out of order completions */
for (i = 0; i < tx_ring->ring_size; i++)
- tx_ring->free_tx_ids[i] = i;
+ tx_ring->free_ids[i] = i;
/* Reset tx statistics */
memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
@@ -256,9 +256,9 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
return 0;
err_push_buf_intermediate_buf:
- vfree(tx_ring->free_tx_ids);
- tx_ring->free_tx_ids = NULL;
-err_free_tx_ids:
+ vfree(tx_ring->free_ids);
+ tx_ring->free_ids = NULL;
+err_tx_free_ids:
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
err_tx_buffer_info:
@@ -278,8 +278,8 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- vfree(tx_ring->free_tx_ids);
- tx_ring->free_tx_ids = NULL;
+ vfree(tx_ring->free_ids);
+ tx_ring->free_ids = NULL;
vfree(tx_ring->push_buf_intermediate_buf);
tx_ring->push_buf_intermediate_buf = NULL;
@@ -326,7 +326,7 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
ena_free_tx_resources(adapter, i);
}
-static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
+static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
{
if (likely(req_id < rx_ring->ring_size))
return 0;
@@ -377,10 +377,10 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
}
size = sizeof(u16) * rx_ring->ring_size;
- rx_ring->free_rx_ids = vzalloc_node(size, node);
- if (!rx_ring->free_rx_ids) {
- rx_ring->free_rx_ids = vzalloc(size);
- if (!rx_ring->free_rx_ids) {
+ rx_ring->free_ids = vzalloc_node(size, node);
+ if (!rx_ring->free_ids) {
+ rx_ring->free_ids = vzalloc(size);
+ if (!rx_ring->free_ids) {
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
return -ENOMEM;
@@ -389,7 +389,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
/* Req id ring for receiving RX pkts out of order */
for (i = 0; i < rx_ring->ring_size; i++)
- rx_ring->free_rx_ids[i] = i;
+ rx_ring->free_ids[i] = i;
/* Reset rx statistics */
memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
@@ -415,8 +415,8 @@ static void ena_free_rx_resources(struct ena_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- vfree(rx_ring->free_rx_ids);
- rx_ring->free_rx_ids = NULL;
+ vfree(rx_ring->free_ids);
+ rx_ring->free_ids = NULL;
}
/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
@@ -460,7 +460,7 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
ena_free_rx_resources(adapter, i);
}
-static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
+static int ena_alloc_rx_page(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info, gfp_t gfp)
{
struct ena_com_buf *ena_buf;
@@ -531,7 +531,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
for (i = 0; i < num; i++) {
struct ena_rx_buffer *rx_info;
- req_id = rx_ring->free_rx_ids[next_to_use];
+ req_id = rx_ring->free_ids[next_to_use];
rc = validate_rx_req_id(rx_ring, req_id);
if (unlikely(rc < 0))
break;
@@ -594,7 +594,6 @@ static void ena_free_rx_bufs(struct ena_adapter *adapter,
/* ena_refill_all_rx_bufs - allocate all queues Rx buffers
* @adapter: board private structure
- *
*/
static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
{
@@ -621,7 +620,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
ena_free_rx_bufs(adapter, i);
}
-static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring,
+static void ena_unmap_tx_skb(struct ena_ring *tx_ring,
struct ena_tx_buffer *tx_info)
{
struct ena_com_buf *ena_buf;
@@ -797,7 +796,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_pkts++;
total_done += tx_info->tx_descs;
- tx_ring->free_tx_ids[next_to_clean] = req_id;
+ tx_ring->free_ids[next_to_clean] = req_id;
next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
tx_ring->ring_size);
}
@@ -911,7 +910,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- rx_ring->free_rx_ids[*next_to_clean] = req_id;
+ rx_ring->free_ids[*next_to_clean] = req_id;
*next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
rx_ring->ring_size);
return skb;
@@ -935,7 +934,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info->page = NULL;
- rx_ring->free_rx_ids[*next_to_clean] = req_id;
+ rx_ring->free_ids[*next_to_clean] = req_id;
*next_to_clean =
ENA_RX_RING_IDX_NEXT(*next_to_clean,
rx_ring->ring_size);
@@ -956,7 +955,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
* @ena_rx_ctx: received packet context/metadata
* @skb: skb currently being received and modified
*/
-static inline void ena_rx_checksum(struct ena_ring *rx_ring,
+static void ena_rx_checksum(struct ena_ring *rx_ring,
struct ena_com_rx_ctx *ena_rx_ctx,
struct sk_buff *skb)
{
@@ -1001,6 +1000,9 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
if (likely(ena_rx_ctx->l4_csum_checked)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.csum_good++;
+ u64_stats_update_end(&rx_ring->syncp);
} else {
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.csum_unchecked++;
@@ -1088,7 +1090,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* exit if we failed to retrieve a buffer */
if (unlikely(!skb)) {
for (i = 0; i < ena_rx_ctx.descs; i++) {
- rx_ring->free_tx_ids[next_to_clean] =
+ rx_ring->free_ids[next_to_clean] =
rx_ring->ena_bufs[i].req_id;
next_to_clean =
ENA_RX_RING_IDX_NEXT(next_to_clean,
@@ -1153,7 +1155,7 @@ error:
return 0;
}
-inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
+void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
struct ena_ring *tx_ring)
{
/* We apply adaptive moderation on Rx path only.
@@ -1172,7 +1174,7 @@ inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
rx_ring->per_napi_bytes = 0;
}
-static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
+static void ena_unmask_interrupt(struct ena_ring *tx_ring,
struct ena_ring *rx_ring)
{
struct ena_eth_io_intr_reg intr_reg;
@@ -1192,7 +1194,7 @@ static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
}
-static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
struct ena_ring *rx_ring)
{
int cpu = get_cpu();
@@ -1635,7 +1637,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
ctx.qid = ena_qid;
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
ctx.msix_vector = msix_vector;
- ctx.queue_size = adapter->tx_ring_size;
+ ctx.queue_size = tx_ring->ring_size;
ctx.numa_node = cpu_to_node(tx_ring->cpu);
rc = ena_com_create_io_queue(ena_dev, &ctx);
@@ -1702,7 +1704,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
ctx.msix_vector = msix_vector;
- ctx.queue_size = adapter->rx_ring_size;
+ ctx.queue_size = rx_ring->ring_size;
ctx.numa_node = cpu_to_node(rx_ring->cpu);
rc = ena_com_create_io_queue(ena_dev, &ctx);
@@ -1749,6 +1751,112 @@ create_err:
return rc;
}
+static void set_io_rings_size(struct ena_adapter *adapter,
+ int new_tx_size, int new_rx_size)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ adapter->tx_ring[i].ring_size = new_tx_size;
+ adapter->rx_ring[i].ring_size = new_rx_size;
+ }
+}
+
+/* This function allows queue allocation to backoff when the system is
+ * low on memory. If there is not enough memory to allocate io queues
+ * the driver will try to allocate smaller queues.
+ *
+ * The backoff algorithm is as follows:
+ * 1. Try to allocate TX and RX and if successful.
+ * 1.1. return success
+ *
+ * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
+ *
+ * 3. If TX or RX is smaller than 256
+ * 3.1. return failure.
+ * 4. else
+ * 4.1. go back to 1.
+ */
+static int create_queues_with_size_backoff(struct ena_adapter *adapter)
+{
+ int rc, cur_rx_ring_size, cur_tx_ring_size;
+ int new_rx_ring_size, new_tx_ring_size;
+
+ /* current queue sizes might be set to smaller than the requested
+ * ones due to past queue allocation failures.
+ */
+ set_io_rings_size(adapter, adapter->requested_tx_ring_size,
+ adapter->requested_rx_ring_size);
+
+ while (1) {
+ rc = ena_setup_all_tx_resources(adapter);
+ if (rc)
+ goto err_setup_tx;
+
+ rc = ena_create_all_io_tx_queues(adapter);
+ if (rc)
+ goto err_create_tx_queues;
+
+ rc = ena_setup_all_rx_resources(adapter);
+ if (rc)
+ goto err_setup_rx;
+
+ rc = ena_create_all_io_rx_queues(adapter);
+ if (rc)
+ goto err_create_rx_queues;
+
+ return 0;
+
+err_create_rx_queues:
+ ena_free_all_io_rx_resources(adapter);
+err_setup_rx:
+ ena_destroy_all_tx_queues(adapter);
+err_create_tx_queues:
+ ena_free_all_io_tx_resources(adapter);
+err_setup_tx:
+ if (rc != -ENOMEM) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Queue creation failed with error code %d\n",
+ rc);
+ return rc;
+ }
+
+ cur_tx_ring_size = adapter->tx_ring[0].ring_size;
+ cur_rx_ring_size = adapter->rx_ring[0].ring_size;
+
+ netif_err(adapter, ifup, adapter->netdev,
+ "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
+ cur_tx_ring_size, cur_rx_ring_size);
+
+ new_tx_ring_size = cur_tx_ring_size;
+ new_rx_ring_size = cur_rx_ring_size;
+
+ /* Decrease the size of the larger queue, or
+ * decrease both if they are the same size.
+ */
+ if (cur_rx_ring_size <= cur_tx_ring_size)
+ new_tx_ring_size = cur_tx_ring_size / 2;
+ if (cur_rx_ring_size >= cur_tx_ring_size)
+ new_rx_ring_size = cur_rx_ring_size / 2;
+
+ if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
+ new_rx_ring_size < ENA_MIN_RING_SIZE) {
+ netif_err(adapter, ifup, adapter->netdev,
+ "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
+ ENA_MIN_RING_SIZE);
+ return rc;
+ }
+
+ netif_err(adapter, ifup, adapter->netdev,
+ "Retrying queue creation with sizes TX=%d, RX=%d\n",
+ new_tx_ring_size,
+ new_rx_ring_size);
+
+ set_io_rings_size(adapter, new_tx_ring_size,
+ new_rx_ring_size);
+ }
+}
+
static int ena_up(struct ena_adapter *adapter)
{
int rc, i;
@@ -1768,25 +1876,9 @@ static int ena_up(struct ena_adapter *adapter)
if (rc)
goto err_req_irq;
- /* allocate transmit descriptors */
- rc = ena_setup_all_tx_resources(adapter);
+ rc = create_queues_with_size_backoff(adapter);
if (rc)
- goto err_setup_tx;
-
- /* allocate receive descriptors */
- rc = ena_setup_all_rx_resources(adapter);
- if (rc)
- goto err_setup_rx;
-
- /* Create TX queues */
- rc = ena_create_all_io_tx_queues(adapter);
- if (rc)
- goto err_create_tx_queues;
-
- /* Create RX queues */
- rc = ena_create_all_io_rx_queues(adapter);
- if (rc)
- goto err_create_rx_queues;
+ goto err_create_queues_with_backoff;
rc = ena_up_complete(adapter);
if (rc)
@@ -1815,14 +1907,11 @@ static int ena_up(struct ena_adapter *adapter)
return rc;
err_up:
- ena_destroy_all_rx_queues(adapter);
-err_create_rx_queues:
ena_destroy_all_tx_queues(adapter);
-err_create_tx_queues:
- ena_free_all_io_rx_resources(adapter);
-err_setup_rx:
ena_free_all_io_tx_resources(adapter);
-err_setup_tx:
+ ena_destroy_all_rx_queues(adapter);
+ ena_free_all_io_rx_resources(adapter);
+err_create_queues_with_backoff:
ena_free_io_irq(adapter);
err_req_irq:
ena_del_napi(adapter);
@@ -1942,6 +2031,20 @@ static int ena_close(struct net_device *netdev)
return 0;
}
+int ena_update_queue_sizes(struct ena_adapter *adapter,
+ u32 new_tx_size,
+ u32 new_rx_size)
+{
+ bool dev_up;
+
+ dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ ena_close(adapter->netdev);
+ adapter->requested_tx_ring_size = new_tx_size;
+ adapter->requested_rx_ring_size = new_rx_size;
+ ena_init_io_rings(adapter);
+ return dev_up ? ena_up(adapter) : 0;
+}
+
static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
{
u32 mss = skb_shinfo(skb)->gso_size;
@@ -2152,7 +2255,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
next_to_use = tx_ring->next_to_use;
- req_id = tx_ring->free_tx_ids[next_to_use];
+ req_id = tx_ring->free_ids[next_to_use];
tx_info = &tx_ring->tx_buffer_info[req_id];
tx_info->num_of_bufs = 0;
@@ -2172,6 +2275,13 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb);
+ if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx))) {
+ netif_dbg(adapter, tx_queued, dev,
+ "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
+ qid);
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ }
+
/* prepare the packet's descriptors to dma engine */
rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
&nb_hw_desc);
@@ -2447,13 +2557,6 @@ static int ena_device_validate_params(struct ena_adapter *adapter,
return -EINVAL;
}
- if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
- (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
- netif_err(adapter, drv, netdev,
- "Error, device doesn't support enough queues\n");
- return -EINVAL;
- }
-
if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
netif_err(adapter, drv, netdev,
"Error, device max mtu is smaller than netdev MTU\n");
@@ -3027,18 +3130,32 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
- int io_sq_num, io_queue_num;
+ int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;
- /* In case of LLQ use the llq number in the get feature cmd */
+ if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+ struct ena_admin_queue_ext_feature_fields *max_queue_ext =
+ &get_feat_ctx->max_queue_ext.max_queue_ext;
+ io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
+ max_queue_ext->max_rx_cq_num);
+
+ io_tx_sq_num = max_queue_ext->max_tx_sq_num;
+ io_tx_cq_num = max_queue_ext->max_tx_cq_num;
+ } else {
+ struct ena_admin_queue_feature_desc *max_queues =
+ &get_feat_ctx->max_queues;
+ io_tx_sq_num = max_queues->max_sq_num;
+ io_tx_cq_num = max_queues->max_cq_num;
+ io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
+ }
+
+ /* In case of LLQ use the llq fields for the tx SQ/CQ */
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
- io_sq_num = get_feat_ctx->llq.max_llq_num;
- else
- io_sq_num = get_feat_ctx->max_queues.max_sq_num;
+ io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
- io_queue_num = min_t(int, io_queue_num, io_sq_num);
- io_queue_num = min_t(int, io_queue_num,
- get_feat_ctx->max_queues.max_cq_num);
+ io_queue_num = min_t(int, io_queue_num, io_rx_num);
+ io_queue_num = min_t(int, io_queue_num, io_tx_sq_num);
+ io_queue_num = min_t(int, io_queue_num, io_tx_cq_num);
/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
if (unlikely(!io_queue_num)) {
@@ -3212,7 +3329,7 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
pci_release_selected_regions(pdev, release_bars);
}
-static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
+static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
{
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
@@ -3221,36 +3338,70 @@ static inline void set_default_llq_configurations(struct ena_llq_configurations
llq_config->llq_ring_entry_size_value = 128;
}
-static int ena_calc_queue_size(struct pci_dev *pdev,
- struct ena_com_dev *ena_dev,
- u16 *max_tx_sgl_size,
- u16 *max_rx_sgl_size,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
{
- u32 queue_size = ENA_DEFAULT_RING_SIZE;
+ struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
+ struct ena_com_dev *ena_dev = ctx->ena_dev;
+ u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
+ u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
+ u32 max_tx_queue_size;
+ u32 max_rx_queue_size;
- queue_size = min_t(u32, queue_size,
- get_feat_ctx->max_queues.max_cq_depth);
- queue_size = min_t(u32, queue_size,
- get_feat_ctx->max_queues.max_sq_depth);
+ if (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+ struct ena_admin_queue_ext_feature_fields *max_queue_ext =
+ &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
+ max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
+ max_queue_ext->max_rx_sq_depth);
+ max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
- queue_size = min_t(u32, queue_size,
- get_feat_ctx->llq.max_llq_depth);
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ llq->max_llq_depth);
+ else
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ max_queue_ext->max_tx_sq_depth);
- queue_size = rounddown_pow_of_two(queue_size);
+ ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_tx_descs);
+ ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_rx_descs);
+ } else {
+ struct ena_admin_queue_feature_desc *max_queues =
+ &ctx->get_feat_ctx->max_queues;
+ max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
+ max_queues->max_sq_depth);
+ max_tx_queue_size = max_queues->max_cq_depth;
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ llq->max_llq_depth);
+ else
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ max_queues->max_sq_depth);
- if (unlikely(!queue_size)) {
- dev_err(&pdev->dev, "Invalid queue size\n");
- return -EFAULT;
+ ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_tx_descs);
+ ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_rx_descs);
}
- *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- get_feat_ctx->max_queues.max_packet_tx_descs);
- *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- get_feat_ctx->max_queues.max_packet_rx_descs);
+ max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
+ max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
+
+ tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
+ max_tx_queue_size);
+ rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
+ max_rx_queue_size);
- return queue_size;
+ tx_queue_size = rounddown_pow_of_two(tx_queue_size);
+ rx_queue_size = rounddown_pow_of_two(rx_queue_size);
+
+ ctx->max_tx_queue_size = max_tx_queue_size;
+ ctx->max_rx_queue_size = max_rx_queue_size;
+ ctx->tx_queue_size = tx_queue_size;
+ ctx->rx_queue_size = rx_queue_size;
+
+ return 0;
}
/* ena_probe - Device Initialization Routine
@@ -3266,23 +3417,19 @@ static int ena_calc_queue_size(struct pci_dev *pdev,
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct ena_com_dev_get_features_ctx get_feat_ctx;
- static int version_printed;
- struct net_device *netdev;
- struct ena_adapter *adapter;
+ struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL;
- char *queue_type_str;
- static int adapters_found;
+ struct ena_adapter *adapter;
int io_queue_num, bars, rc;
- int queue_size;
- u16 tx_sgl_size = 0;
- u16 rx_sgl_size = 0;
+ struct net_device *netdev;
+ static int adapters_found;
+ char *queue_type_str;
bool wd_state;
dev_dbg(&pdev->dev, "%s\n", __func__);
- if (version_printed++ == 0)
- dev_info(&pdev->dev, "%s", version);
+ dev_info_once(&pdev->dev, "%s", version);
rc = pci_enable_device_mem(pdev);
if (rc) {
@@ -3334,20 +3481,25 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_device_destroy;
}
+ calc_queue_ctx.ena_dev = ena_dev;
+ calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
+ calc_queue_ctx.pdev = pdev;
+
/* initial Tx interrupt delay, Assumes 1 usec granularity.
* Updated during device initialization with the real granularity
*/
ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
- queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
- &rx_sgl_size, &get_feat_ctx);
- if ((queue_size <= 0) || (io_queue_num <= 0)) {
+ rc = ena_calc_queue_size(&calc_queue_ctx);
+ if (rc || io_queue_num <= 0) {
rc = -EFAULT;
goto err_device_destroy;
}
- dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n",
- io_queue_num, queue_size,
+ dev_info(&pdev->dev, "creating %d io queues. rx queue size: %d tx queue size. %d LLQ is %s\n",
+ io_queue_num,
+ calc_queue_ctx.rx_queue_size,
+ calc_queue_ctx.tx_queue_size,
(ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
"ENABLED" : "DISABLED");
@@ -3373,11 +3525,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
- adapter->tx_ring_size = queue_size;
- adapter->rx_ring_size = queue_size;
-
- adapter->max_tx_sgl_size = tx_sgl_size;
- adapter->max_rx_sgl_size = rx_sgl_size;
+ adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
+ adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
+ adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
+ adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
+ adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
+ adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
adapter->num_queues = io_queue_num;
adapter->last_monitored_tx_qid = 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 63870072cbbd..efbcffd22215 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -44,8 +44,8 @@
#include "ena_eth_com.h"
#define DRV_MODULE_VER_MAJOR 2
-#define DRV_MODULE_VER_MINOR 0
-#define DRV_MODULE_VER_SUBMINOR 3
+#define DRV_MODULE_VER_MINOR 1
+#define DRV_MODULE_VER_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION
@@ -79,6 +79,7 @@
#define ENA_BAR_MASK (BIT(ENA_REG_BAR) | BIT(ENA_MEM_BAR))
#define ENA_DEFAULT_RING_SIZE (1024)
+#define ENA_MIN_RING_SIZE (256)
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
@@ -154,6 +155,18 @@ struct ena_napi {
u32 qid;
};
+struct ena_calc_queue_size_ctx {
+ struct ena_com_dev_get_features_ctx *get_feat_ctx;
+ struct ena_com_dev *ena_dev;
+ struct pci_dev *pdev;
+ u16 tx_queue_size;
+ u16 rx_queue_size;
+ u16 max_tx_queue_size;
+ u16 max_rx_queue_size;
+ u16 max_tx_sgl_size;
+ u16 max_rx_sgl_size;
+};
+
struct ena_tx_buffer {
struct sk_buff *skb;
/* num of ena desc for this specific skb
@@ -208,26 +221,24 @@ struct ena_stats_tx {
struct ena_stats_rx {
u64 cnt;
u64 bytes;
+ u64 rx_copybreak_pkt;
+ u64 csum_good;
u64 refil_partial;
u64 bad_csum;
u64 page_alloc_fail;
u64 skb_alloc_fail;
u64 dma_mapping_err;
u64 bad_desc_num;
- u64 rx_copybreak_pkt;
u64 bad_req_id;
u64 empty_rx_ring;
u64 csum_unchecked;
};
struct ena_ring {
- union {
- /* Holds the empty requests for TX/RX
- * out of order completions
- */
- u16 *free_tx_ids;
- u16 *free_rx_ids;
- };
+ /* Holds the empty requests for TX/RX
+ * out of order completions
+ */
+ u16 *free_ids;
union {
struct ena_tx_buffer *tx_buffer_info;
@@ -321,8 +332,11 @@ struct ena_adapter {
u32 tx_usecs, rx_usecs; /* interrupt moderation */
u32 tx_frames, rx_frames; /* interrupt moderation */
- u32 tx_ring_size;
- u32 rx_ring_size;
+ u32 requested_tx_ring_size;
+ u32 requested_rx_ring_size;
+
+ u32 max_tx_ring_size;
+ u32 max_rx_ring_size;
u32 msg_enable;
@@ -372,6 +386,10 @@ void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
+int ena_update_queue_sizes(struct ena_adapter *adapter,
+ u32 new_tx_size,
+ u32 new_rx_size);
+
int ena_get_sset_count(struct net_device *netdev, int sset);
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 173be45463ee..02f1b70c4e25 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -9,6 +9,8 @@
#ifndef AQ_CFG_H
#define AQ_CFG_H
+#include <generated/utsrelease.h>
+
#define AQ_CFG_VECS_DEF 8U
#define AQ_CFG_TCS_DEF 1U
@@ -86,10 +88,7 @@
#define AQ_CFG_DRV_AUTHOR "aQuantia"
#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver"
#define AQ_CFG_DRV_NAME "atlantic"
-#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
- __stringify(NIC_MINOR_DRIVER_VERSION)"."\
- __stringify(NIC_BUILD_DRIVER_VERSION)"."\
- __stringify(NIC_REVISION_DRIVER_VERSION) \
+#define AQ_CFG_DRV_VERSION UTS_RELEASE \
AQ_CFG_DRV_VERSION_SUFFIX
#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
index adad6a7acabe..6da65099047d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2014-2019 aQuantia Corporation. */
/* File aq_drvinfo.c: Definition of common code for firmware info in sys.*/
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h
index 41fbb1358068..23a0487893a7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2014-2017 aQuantia Corporation. */
/* File aq_drvinfo.h: Declaration of common code for firmware info in sys.*/
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 1fff462a4175..440690b18734 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2014-2017 aQuantia Corporation. */
/* File aq_filters.c: RX filters related functions. */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.h b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
index c6a08c6585d5..122e06c88a33 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2014-2017 aQuantia Corporation. */
/* File aq_filters.h: RX filters related functions. */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 5315df5ff6f8..100722ad5c2d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -108,11 +108,16 @@ err_exit:
static int aq_ndev_set_features(struct net_device *ndev,
netdev_features_t features)
{
+ bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+ bool is_vlan_tx_insert = !!(features & NETIF_F_HW_VLAN_CTAG_TX);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
+ bool need_ndev_restart = false;
+ struct aq_nic_cfg_s *aq_cfg;
bool is_lro = false;
int err = 0;
+ aq_cfg = aq_nic_get_cfg(aq_nic);
+
if (!(features & NETIF_F_NTUPLE)) {
if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
err = aq_clear_rxnfc_all_rules(aq_nic);
@@ -135,17 +140,32 @@ static int aq_ndev_set_features(struct net_device *ndev,
if (aq_cfg->is_lro != is_lro) {
aq_cfg->is_lro = is_lro;
-
- if (netif_running(ndev)) {
- aq_ndev_close(ndev);
- aq_ndev_open(ndev);
- }
+ need_ndev_restart = true;
}
}
- if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM)
+
+ if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM) {
err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
aq_cfg);
+ if (unlikely(err))
+ goto err_exit;
+ }
+
+ if (aq_cfg->is_vlan_rx_strip != is_vlan_rx_strip) {
+ aq_cfg->is_vlan_rx_strip = is_vlan_rx_strip;
+ need_ndev_restart = true;
+ }
+ if (aq_cfg->is_vlan_tx_insert != is_vlan_tx_insert) {
+ aq_cfg->is_vlan_tx_insert = is_vlan_tx_insert;
+ need_ndev_restart = true;
+ }
+
+ if (need_ndev_restart && netif_running(ndev)) {
+ aq_ndev_close(ndev);
+ aq_ndev_open(ndev);
+ }
+
err_exit:
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 41172fbebddd..e1392766e21e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -126,6 +126,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
cfg->features = cfg->aq_hw_caps->hw_features;
+ cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
+ cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
cfg->is_vlan_force_promisc = true;
}
@@ -286,7 +288,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->hw_features |= aq_hw_caps->hw_features;
self->ndev->features = aq_hw_caps->hw_features;
self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
- NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO;
+ NETIF_F_RXHASH | NETIF_F_SG |
+ NETIF_F_LRO | NETIF_F_TSO;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -427,26 +430,37 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
unsigned int dx = ring->sw_tail;
struct aq_ring_buff_s *first = NULL;
struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
+ bool need_context_tag = false;
+
+ dx_buff->flags = 0U;
if (unlikely(skb_is_gso(skb))) {
- dx_buff->flags = 0U;
+ dx_buff->mss = skb_shinfo(skb)->gso_size;
+ dx_buff->is_gso = 1U;
dx_buff->len_pkt = skb->len;
dx_buff->len_l2 = ETH_HLEN;
dx_buff->len_l3 = ip_hdrlen(skb);
dx_buff->len_l4 = tcp_hdrlen(skb);
- dx_buff->mss = skb_shinfo(skb)->gso_size;
- dx_buff->is_txc = 1U;
dx_buff->eop_index = 0xffffU;
-
dx_buff->is_ipv6 =
(ip_hdr(skb)->version == 6) ? 1U : 0U;
+ need_context_tag = true;
+ }
+
+ if (self->aq_nic_cfg.is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
+ dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
+ dx_buff->len_pkt = skb->len;
+ dx_buff->is_vlan = 1U;
+ need_context_tag = true;
+ }
+ if (need_context_tag) {
dx = aq_ring_next_dx(ring, dx);
dx_buff = &ring->buff_ring[dx];
+ dx_buff->flags = 0U;
++ret;
}
- dx_buff->flags = 0U;
dx_buff->len = skb_headlen(skb);
dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
skb->data,
@@ -535,7 +549,7 @@ mapping_error:
--ret, dx = aq_ring_next_dx(ring, dx)) {
dx_buff = &ring->buff_ring[dx];
- if (!dx_buff->is_txc && dx_buff->pa) {
+ if (!dx_buff->is_gso && !dx_buff->is_vlan && dx_buff->pa) {
if (unlikely(dx_buff->is_sop)) {
dma_unmap_single(aq_nic_get_dev(self),
dx_buff->pa,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 0f22f5d5691b..255b54a6ae07 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -35,6 +35,8 @@ struct aq_nic_cfg_s {
u32 flow_control;
u32 link_speed_msk;
u32 wol;
+ u8 is_vlan_rx_strip;
+ u8 is_vlan_tx_insert;
bool is_vlan_force_promisc;
u16 is_mc_list_enabled;
u16 mc_list_count;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 2a7b91ed17c5..3901d7994ca1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -409,6 +409,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
}
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+
skb->protocol = eth_type_trans(skb, ndev);
aq_rx_checksum(self, buff, skb);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 6bd67210d0b7..47abd09d06c2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -27,7 +27,7 @@ struct aq_rxpage {
* +----------+----------+----------+-----------
* 4/8bytes|len pkt |len pkt | | skb
* +----------+----------+----------+-----------
- * 4/8bytes|is_txc |len,flags |len |len,is_eop
+ * 4/8bytes|is_gso |len,flags |len |len,is_eop
* +----------+----------+----------+-----------
*
* This aq_ring_buff_s doesn't have endianness dependency.
@@ -44,6 +44,7 @@ struct __packed aq_ring_buff_s {
u8 is_hash_l4;
u8 rsvd1;
struct aq_rxpage rxdata;
+ u16 vlan_rx_tag;
};
/* EOP */
struct {
@@ -59,6 +60,7 @@ struct __packed aq_ring_buff_s {
u8 is_ipv6:1;
u8 rsvd2:7;
u32 len_pkt;
+ u16 vlan_tx_tag;
};
};
union {
@@ -70,11 +72,12 @@ struct __packed aq_ring_buff_s {
u32 is_cso_err:1;
u32 is_sop:1;
u32 is_eop:1;
- u32 is_txc:1;
+ u32 is_gso:1;
u32 is_mapped:1;
u32 is_cleaned:1;
u32 is_error:1;
- u32 rsvd3:6;
+ u32 is_vlan:1;
+ u32 rsvd3:5;
u16 eop_index;
u16 rsvd4;
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 0f140a9fe404..359a4d387185 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -451,7 +451,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->sw_tail];
- if (buff->is_txc) {
+ if (buff->is_gso) {
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24) |
HW_ATL_A0_TXD_CTL_CMD_TCP |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 13ac2661a473..30f7fc4c97ff 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -40,7 +40,9 @@
NETIF_F_TSO | \
NETIF_F_LRO | \
NETIF_F_NTUPLE | \
- NETIF_F_HW_VLAN_CTAG_FILTER, \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@@ -245,6 +247,9 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
/* LSO offloads*/
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ /* Outer VLAN tag offload */
+ hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
+
/* LRO offloads */
{
unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
@@ -487,6 +492,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
unsigned int buff_pa_len = 0U;
unsigned int pkt_len = 0U;
unsigned int frag_count = 0U;
+ bool is_vlan = false;
bool is_gso = false;
buff = &ring->buff_ring[ring->sw_tail];
@@ -501,36 +507,44 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->sw_tail];
- if (buff->is_txc) {
+ if (buff->is_gso) {
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
+ txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
txd->ctl |= (buff->len_l3 << 31) |
- (buff->len_l2 << 24) |
- HW_ATL_B0_TXD_CTL_CMD_TCP |
- HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
- txd->ctl2 |= (buff->mss << 16) |
- (buff->len_l4 << 8) |
- (buff->len_l3 >> 1);
+ (buff->len_l2 << 24);
+ txd->ctl2 |= (buff->mss << 16);
+ is_gso = true;
pkt_len -= (buff->len_l4 +
buff->len_l3 +
buff->len_l2);
- is_gso = true;
-
if (buff->is_ipv6)
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
- } else {
+ txd->ctl2 |= (buff->len_l4 << 8) |
+ (buff->len_l3 >> 1);
+ }
+ if (buff->is_vlan) {
+ txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
+ txd->ctl |= buff->vlan_tx_tag << 4;
+ is_vlan = true;
+ }
+ if (!buff->is_gso && !buff->is_vlan) {
buff_pa_len = buff->len;
txd->buf_addr = buff->pa;
txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
((u32)buff_pa_len << 4));
txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
+
/* PAY_LEN */
txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
- if (is_gso) {
- txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
+ if (is_gso || is_vlan) {
+ /* enable tx context */
txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
}
+ if (is_gso)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
/* Tx checksum offloads */
if (buff->is_ip_cso)
@@ -539,13 +553,16 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
if (buff->is_udp_cso || buff->is_tcp_cso)
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
+ if (is_vlan)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_VLAN;
+
if (unlikely(buff->is_eop)) {
txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
is_gso = false;
+ is_vlan = false;
}
}
-
ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
}
@@ -559,6 +576,7 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
{
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+ u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
@@ -578,7 +596,8 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, !!vlan_rx_stripping,
+ aq_ring->idx);
/* Rx ring set mode */
@@ -681,11 +700,15 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->hw_head];
+ buff->flags = 0U;
+ buff->is_hash_l4 = 0U;
+
rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
- pkt_type = 0xFFU & (rxd_wb->type >> 4);
+ pkt_type = (rxd_wb->type & HW_ATL_B0_RXD_WB_STAT_PKTTYPE) >>
+ HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT;
if (is_rx_check_sum_enabled & BIT(0) &&
(0x0U == (pkt_type & 0x3U)))
@@ -706,6 +729,13 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_cso_err = 0U;
}
+ if (self->aq_nic_cfg->is_vlan_rx_strip &&
+ ((pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN) ||
+ (pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE))) {
+ buff->is_vlan = 1;
+ buff->vlan_rx_tag = le16_to_cpu(rxd_wb->vlan);
+ }
+
if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
/* MAC error or DMA error */
buff->is_error = 1U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index e4ba2ccf9830..808d8cd4252a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -107,10 +107,17 @@
#define HW_ATL_B0_RXD_NCEA0 (0x1)
#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F)
+#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE_SHIFT (0x0)
#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0)
+#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT (0x4)
#define HW_ATL_B0_RXD_WB_STAT_RXCTRL (0x00180000)
+#define HW_ATL_B0_RXD_WB_STAT_RXCTRL_SHIFT (0x13)
#define HW_ATL_B0_RXD_WB_STAT_SPLHDR (0x00200000)
#define HW_ATL_B0_RXD_WB_STAT_HDRLEN (0xFFC00000)
+#define HW_ATL_B0_RXD_WB_STAT_HDRLEN_SHIFT (0x16)
+
+#define HW_ATL_B0_RXD_WB_PKTTYPE_VLAN BIT(5)
+#define HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE BIT(6)
#define HW_ATL_B0_RXD_WB_STAT2_DD (0x0001)
#define HW_ATL_B0_RXD_WB_STAT2_EOP (0x0002)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 451529069f28..1149812ae463 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -1004,6 +1004,22 @@ void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
rx_desc_vlan_stripping);
}
+void hw_atl_rpo_outer_vlan_tag_mode_set(void *context,
+ u32 outervlantagmode)
+{
+ aq_hw_write_reg_bit(context, HW_ATL_RPO_OUTER_VL_INS_MODE_ADR,
+ HW_ATL_RPO_OUTER_VL_INS_MODE_MSK,
+ HW_ATL_RPO_OUTER_VL_INS_MODE_SHIFT,
+ outervlantagmode);
+}
+
+u32 hw_atl_rpo_outer_vlan_tag_mode_get(void *context)
+{
+ return aq_hw_read_reg_bit(context, HW_ATL_RPO_OUTER_VL_INS_MODE_ADR,
+ HW_ATL_RPO_OUTER_VL_INS_MODE_MSK,
+ HW_ATL_RPO_OUTER_VL_INS_MODE_SHIFT);
+}
+
void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 tcp_udp_crc_offload_en)
{
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 34b42ce43512..0c37abbabca5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -488,6 +488,11 @@ void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
u32 rx_desc_vlan_stripping,
u32 descriptor);
+void hw_atl_rpo_outer_vlan_tag_mode_set(void *context,
+ u32 outervlantagmode);
+
+u32 hw_atl_rpo_outer_vlan_tag_mode_get(void *context);
+
/* set tcp/udp checksum offload enable */
void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 tcp_udp_crc_offload_en);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index fc1446f737bb..c3febcdfa92e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1383,6 +1383,24 @@
/* default value of bitfield l4_chk_en */
#define HW_ATL_RPOL4CHK_EN_DEFAULT 0x0
+/* RX outer_vl_ins_mode Bitfield Definitions
+ * Preprocessor definitions for the bitfield "outer_vl_ins_mode".
+ * PORT="pif_rpo_outer_vl_mode_i"
+ */
+
+/* Register address for bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_ADR 0x00005580
+/* Bitmask for bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_MSK 0x00000004
+/* Inverted bitmask for bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_MSKN 0xFFFFFFFB
+/* Lower bit position of bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_SHIFT 2
+/* Width of bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_WIDTH 1
+/* Default value of bitfield outer_vl_ins_mode */
+#define HW_ATL_RPO_OUTER_VL_INS_MODE_DEFAULT 0x0
+
/* rx reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
* port="pif_rx_reg_res_dsbl_i"
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 23374bffa92b..597654b51e01 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -7,11 +7,6 @@
#ifndef VER_H
#define VER_H
-#define NIC_MAJOR_DRIVER_VERSION 2
-#define NIC_MINOR_DRIVER_VERSION 0
-#define NIC_BUILD_DRIVER_VERSION 4
-#define NIC_REVISION_DRIVER_VERSION 0
-
#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
#endif /* VER_H */
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 953ff1f9ac70..0058051ba925 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_ATHEROS
bool "Atheros devices"
default y
- depends on PCI
+ depends on (PCI || ATH79)
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -17,6 +17,14 @@ config NET_VENDOR_ATHEROS
if NET_VENDOR_ATHEROS
+config AG71XX
+ tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support"
+ depends on ATH79
+ select PHYLIB
+ help
+ If you wish to compile a kernel for AR7XXX/91XXX and enable
+ ethernet support, then you should always answer Y to this.
+
config ATL2
tristate "Atheros L2 Fast Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
index aa3d394b87e6..aca696cb6425 100644
--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -3,6 +3,7 @@
# Makefile for the Atheros network device drivers.
#
+obj-$(CONFIG_AG71XX) += ag71xx.o
obj-$(CONFIG_ATL1) += atlx/
obj-$(CONFIG_ATL2) += atlx/
obj-$(CONFIG_ATL1E) += atl1e/
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
new file mode 100644
index 000000000000..72a57c6cd254
--- /dev/null
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -0,0 +1,1898 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Atheros AR71xx built-in ethernet mac driver
+ *
+ * Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de>
+ *
+ * List of authors contributed to this driver before mainlining:
+ * Alexander Couzens <lynxis@fe80.eu>
+ * Christian Lamparter <chunkeey@gmail.com>
+ * Chuanhong Guo <gch981213@gmail.com>
+ * Daniel F. Dickinson <cshored@thecshore.com>
+ * David Bauer <mail@david-bauer.net>
+ * Felix Fietkau <nbd@nbd.name>
+ * Gabor Juhos <juhosg@freemail.hu>
+ * Hauke Mehrtens <hauke@hauke-m.de>
+ * Johann Neuhauser <johann@it-neuhauser.de>
+ * John Crispin <john@phrozen.org>
+ * Jo-Philipp Wich <jo@mein.io>
+ * Koen Vandeputte <koen.vandeputte@ncentric.com>
+ * Lucian Cristian <lucian.cristian@gmail.com>
+ * Matt Merhar <mattmerhar@protonmail.com>
+ * Milan Krstic <milan.krstic@gmail.com>
+ * Petr Å tetiar <ynezz@true.cz>
+ * Rosen Penev <rosenp@gmail.com>
+ * Stephen Walker <stephendwalker+github@gmail.com>
+ * Vittorio Gambaletta <openwrt@vittgam.net>
+ * Weijie Gao <hackpascal@gmail.com>
+ * Imre Kaloz <kaloz@openwrt.org>
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+
+/* For our NAPI weight bigger does *NOT* mean better - it means more
+ * D-cache misses and lots more wasted cycles than we'll ever
+ * possibly gain from saving instructions.
+ */
+#define AG71XX_NAPI_WEIGHT 32
+#define AG71XX_OOM_REFILL (1 + HZ / 10)
+
+#define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
+#define AG71XX_INT_TX (AG71XX_INT_TX_PS)
+#define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
+
+#define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX)
+#define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL)
+
+#define AG71XX_TX_MTU_LEN 1540
+
+#define AG71XX_TX_RING_SPLIT 512
+#define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
+ AG71XX_TX_RING_SPLIT)
+#define AG71XX_TX_RING_SIZE_DEFAULT 128
+#define AG71XX_RX_RING_SIZE_DEFAULT 256
+
+#define AG71XX_MDIO_RETRY 1000
+#define AG71XX_MDIO_DELAY 5
+#define AG71XX_MDIO_MAX_CLK 5000000
+
+/* Register offsets */
+#define AG71XX_REG_MAC_CFG1 0x0000
+#define MAC_CFG1_TXE BIT(0) /* Tx Enable */
+#define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */
+#define MAC_CFG1_RXE BIT(2) /* Rx Enable */
+#define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */
+#define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */
+#define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */
+#define MAC_CFG1_SR BIT(31) /* Soft Reset */
+#define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
+ MAC_CFG1_SRX | MAC_CFG1_STX)
+
+#define AG71XX_REG_MAC_CFG2 0x0004
+#define MAC_CFG2_FDX BIT(0)
+#define MAC_CFG2_PAD_CRC_EN BIT(2)
+#define MAC_CFG2_LEN_CHECK BIT(4)
+#define MAC_CFG2_IF_1000 BIT(9)
+#define MAC_CFG2_IF_10_100 BIT(8)
+
+#define AG71XX_REG_MAC_MFL 0x0010
+
+#define AG71XX_REG_MII_CFG 0x0020
+#define MII_CFG_CLK_DIV_4 0
+#define MII_CFG_CLK_DIV_6 2
+#define MII_CFG_CLK_DIV_8 3
+#define MII_CFG_CLK_DIV_10 4
+#define MII_CFG_CLK_DIV_14 5
+#define MII_CFG_CLK_DIV_20 6
+#define MII_CFG_CLK_DIV_28 7
+#define MII_CFG_CLK_DIV_34 8
+#define MII_CFG_CLK_DIV_42 9
+#define MII_CFG_CLK_DIV_50 10
+#define MII_CFG_CLK_DIV_58 11
+#define MII_CFG_CLK_DIV_66 12
+#define MII_CFG_CLK_DIV_74 13
+#define MII_CFG_CLK_DIV_82 14
+#define MII_CFG_CLK_DIV_98 15
+#define MII_CFG_RESET BIT(31)
+
+#define AG71XX_REG_MII_CMD 0x0024
+#define MII_CMD_READ BIT(0)
+
+#define AG71XX_REG_MII_ADDR 0x0028
+#define MII_ADDR_SHIFT 8
+
+#define AG71XX_REG_MII_CTRL 0x002c
+#define AG71XX_REG_MII_STATUS 0x0030
+#define AG71XX_REG_MII_IND 0x0034
+#define MII_IND_BUSY BIT(0)
+#define MII_IND_INVALID BIT(2)
+
+#define AG71XX_REG_MAC_IFCTL 0x0038
+#define MAC_IFCTL_SPEED BIT(16)
+
+#define AG71XX_REG_MAC_ADDR1 0x0040
+#define AG71XX_REG_MAC_ADDR2 0x0044
+#define AG71XX_REG_FIFO_CFG0 0x0048
+#define FIFO_CFG0_WTM BIT(0) /* Watermark Module */
+#define FIFO_CFG0_RXS BIT(1) /* Rx System Module */
+#define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */
+#define FIFO_CFG0_TXS BIT(3) /* Tx System Module */
+#define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */
+#define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
+ | FIFO_CFG0_TXS | FIFO_CFG0_TXF)
+#define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
+
+#define FIFO_CFG0_ENABLE_SHIFT 8
+
+#define AG71XX_REG_FIFO_CFG1 0x004c
+#define AG71XX_REG_FIFO_CFG2 0x0050
+#define AG71XX_REG_FIFO_CFG3 0x0054
+#define AG71XX_REG_FIFO_CFG4 0x0058
+#define FIFO_CFG4_DE BIT(0) /* Drop Event */
+#define FIFO_CFG4_DV BIT(1) /* RX_DV Event */
+#define FIFO_CFG4_FC BIT(2) /* False Carrier */
+#define FIFO_CFG4_CE BIT(3) /* Code Error */
+#define FIFO_CFG4_CR BIT(4) /* CRC error */
+#define FIFO_CFG4_LM BIT(5) /* Length Mismatch */
+#define FIFO_CFG4_LO BIT(6) /* Length out of range */
+#define FIFO_CFG4_OK BIT(7) /* Packet is OK */
+#define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
+#define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
+#define FIFO_CFG4_DR BIT(10) /* Dribble */
+#define FIFO_CFG4_LE BIT(11) /* Long Event */
+#define FIFO_CFG4_CF BIT(12) /* Control Frame */
+#define FIFO_CFG4_PF BIT(13) /* Pause Frame */
+#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
+#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
+#define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
+#define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
+#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
+ FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
+ FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
+ FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
+ FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
+ FIFO_CFG4_VT)
+
+#define AG71XX_REG_FIFO_CFG5 0x005c
+#define FIFO_CFG5_DE BIT(0) /* Drop Event */
+#define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
+#define FIFO_CFG5_FC BIT(2) /* False Carrier */
+#define FIFO_CFG5_CE BIT(3) /* Code Error */
+#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
+#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
+#define FIFO_CFG5_OK BIT(6) /* Packet is OK */
+#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
+#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
+#define FIFO_CFG5_DR BIT(9) /* Dribble */
+#define FIFO_CFG5_CF BIT(10) /* Control Frame */
+#define FIFO_CFG5_PF BIT(11) /* Pause Frame */
+#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
+#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
+#define FIFO_CFG5_LE BIT(14) /* Long Event */
+#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
+#define FIFO_CFG5_16 BIT(16) /* unknown */
+#define FIFO_CFG5_17 BIT(17) /* unknown */
+#define FIFO_CFG5_SF BIT(18) /* Short Frame */
+#define FIFO_CFG5_BM BIT(19) /* Byte Mode */
+#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
+ FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
+ FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
+ FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
+ FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
+ FIFO_CFG5_17 | FIFO_CFG5_SF)
+
+#define AG71XX_REG_TX_CTRL 0x0180
+#define TX_CTRL_TXE BIT(0) /* Tx Enable */
+
+#define AG71XX_REG_TX_DESC 0x0184
+#define AG71XX_REG_TX_STATUS 0x0188
+#define TX_STATUS_PS BIT(0) /* Packet Sent */
+#define TX_STATUS_UR BIT(1) /* Tx Underrun */
+#define TX_STATUS_BE BIT(3) /* Bus Error */
+
+#define AG71XX_REG_RX_CTRL 0x018c
+#define RX_CTRL_RXE BIT(0) /* Rx Enable */
+
+#define AG71XX_DMA_RETRY 10
+#define AG71XX_DMA_DELAY 1
+
+#define AG71XX_REG_RX_DESC 0x0190
+#define AG71XX_REG_RX_STATUS 0x0194
+#define RX_STATUS_PR BIT(0) /* Packet Received */
+#define RX_STATUS_OF BIT(2) /* Rx Overflow */
+#define RX_STATUS_BE BIT(3) /* Bus Error */
+
+#define AG71XX_REG_INT_ENABLE 0x0198
+#define AG71XX_REG_INT_STATUS 0x019c
+#define AG71XX_INT_TX_PS BIT(0)
+#define AG71XX_INT_TX_UR BIT(1)
+#define AG71XX_INT_TX_BE BIT(3)
+#define AG71XX_INT_RX_PR BIT(4)
+#define AG71XX_INT_RX_OF BIT(6)
+#define AG71XX_INT_RX_BE BIT(7)
+
+#define AG71XX_REG_FIFO_DEPTH 0x01a8
+#define AG71XX_REG_RX_SM 0x01b0
+#define AG71XX_REG_TX_SM 0x01b4
+
+#define ETH_SWITCH_HEADER_LEN 2
+
+#define AG71XX_DEFAULT_MSG_ENABLE \
+ (NETIF_MSG_DRV \
+ | NETIF_MSG_PROBE \
+ | NETIF_MSG_LINK \
+ | NETIF_MSG_TIMER \
+ | NETIF_MSG_IFDOWN \
+ | NETIF_MSG_IFUP \
+ | NETIF_MSG_RX_ERR \
+ | NETIF_MSG_TX_ERR)
+
+#define DESC_EMPTY BIT(31)
+#define DESC_MORE BIT(24)
+#define DESC_PKTLEN_M 0xfff
+struct ag71xx_desc {
+ u32 data;
+ u32 ctrl;
+ u32 next;
+ u32 pad;
+} __aligned(4);
+
+#define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \
+ L1_CACHE_BYTES)
+
+struct ag71xx_buf {
+ union {
+ struct {
+ struct sk_buff *skb;
+ unsigned int len;
+ } tx;
+ struct {
+ dma_addr_t dma_addr;
+ void *rx_buf;
+ } rx;
+ };
+};
+
+struct ag71xx_ring {
+ /* "Hot" fields in the data path. */
+ unsigned int curr;
+ unsigned int dirty;
+
+ /* "Cold" fields - not used in the data path. */
+ struct ag71xx_buf *buf;
+ u16 order;
+ u16 desc_split;
+ dma_addr_t descs_dma;
+ u8 *descs_cpu;
+};
+
+enum ag71xx_type {
+ AR7100,
+ AR7240,
+ AR9130,
+ AR9330,
+ AR9340,
+ QCA9530,
+ QCA9550,
+};
+
+struct ag71xx_dcfg {
+ u32 max_frame_len;
+ const u32 *fifodata;
+ u16 desc_pktlen_mask;
+ bool tx_hang_workaround;
+ enum ag71xx_type type;
+};
+
+struct ag71xx {
+ /* Critical data related to the per-packet data path are clustered
+ * early in this structure to help improve the D-cache footprint.
+ */
+ struct ag71xx_ring rx_ring ____cacheline_aligned;
+ struct ag71xx_ring tx_ring ____cacheline_aligned;
+
+ u16 rx_buf_size;
+ u8 rx_buf_offset;
+
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ struct napi_struct napi;
+ u32 msg_enable;
+ const struct ag71xx_dcfg *dcfg;
+
+ /* From this point onwards we're not looking at per-packet fields. */
+ void __iomem *mac_base;
+
+ struct ag71xx_desc *stop_desc;
+ dma_addr_t stop_desc_dma;
+
+ int phy_if_mode;
+
+ struct delayed_work restart_work;
+ struct timer_list oom_timer;
+
+ struct reset_control *mac_reset;
+
+ u32 fifodata[3];
+ int mac_idx;
+
+ struct reset_control *mdio_reset;
+ struct mii_bus *mii_bus;
+ struct clk *clk_mdio;
+ struct clk *clk_eth;
+};
+
+static int ag71xx_desc_empty(struct ag71xx_desc *desc)
+{
+ return (desc->ctrl & DESC_EMPTY) != 0;
+}
+
+static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx)
+{
+ return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE];
+}
+
+static int ag71xx_ring_size_order(int size)
+{
+ return fls(size - 1);
+}
+
+static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
+{
+ return ag->dcfg->type == type;
+}
+
+static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
+{
+ iowrite32(value, ag->mac_base + reg);
+ /* flush write */
+ (void)ioread32(ag->mac_base + reg);
+}
+
+static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
+{
+ return ioread32(ag->mac_base + reg);
+}
+
+static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
+{
+ void __iomem *r;
+
+ r = ag->mac_base + reg;
+ iowrite32(ioread32(r) | mask, r);
+ /* flush write */
+ (void)ioread32(r);
+}
+
+static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
+{
+ void __iomem *r;
+
+ r = ag->mac_base + reg;
+ iowrite32(ioread32(r) & ~mask, r);
+ /* flush write */
+ (void)ioread32(r);
+}
+
+static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
+{
+ ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
+}
+
+static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
+{
+ ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
+}
+
+static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
+{
+ struct net_device *ndev = ag->ndev;
+ int i;
+
+ for (i = 0; i < AG71XX_MDIO_RETRY; i++) {
+ u32 busy;
+
+ udelay(AG71XX_MDIO_DELAY);
+
+ busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
+ if (!busy)
+ return 0;
+
+ udelay(AG71XX_MDIO_DELAY);
+ }
+
+ netif_err(ag, link, ndev, "MDIO operation timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg)
+{
+ struct ag71xx *ag = bus->priv;
+ int err, val;
+
+ err = ag71xx_mdio_wait_busy(ag);
+ if (err)
+ return err;
+
+ ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
+ ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
+ /* enable read mode */
+ ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
+
+ err = ag71xx_mdio_wait_busy(ag);
+ if (err)
+ return err;
+
+ val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
+ /* disable read mode */
+ ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
+
+ netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
+ addr, reg, val);
+
+ return val;
+}
+
+static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg,
+ u16 val)
+{
+ struct ag71xx *ag = bus->priv;
+
+ netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
+ addr, reg, val);
+
+ ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
+ ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
+ ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
+
+ return ag71xx_mdio_wait_busy(ag);
+}
+
+static const u32 ar71xx_mdio_div_table[] = {
+ 4, 4, 6, 8, 10, 14, 20, 28,
+};
+
+static const u32 ar7240_mdio_div_table[] = {
+ 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
+};
+
+static const u32 ar933x_mdio_div_table[] = {
+ 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
+};
+
+static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
+{
+ unsigned long ref_clock;
+ const u32 *table;
+ int ndivs, i;
+
+ ref_clock = clk_get_rate(ag->clk_mdio);
+ if (!ref_clock)
+ return -EINVAL;
+
+ if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
+ table = ar933x_mdio_div_table;
+ ndivs = ARRAY_SIZE(ar933x_mdio_div_table);
+ } else if (ag71xx_is(ag, AR7240)) {
+ table = ar7240_mdio_div_table;
+ ndivs = ARRAY_SIZE(ar7240_mdio_div_table);
+ } else {
+ table = ar71xx_mdio_div_table;
+ ndivs = ARRAY_SIZE(ar71xx_mdio_div_table);
+ }
+
+ for (i = 0; i < ndivs; i++) {
+ unsigned long t;
+
+ t = ref_clock / table[i];
+ if (t <= AG71XX_MDIO_MAX_CLK) {
+ *div = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int ag71xx_mdio_reset(struct mii_bus *bus)
+{
+ struct ag71xx *ag = bus->priv;
+ int err;
+ u32 t;
+
+ err = ag71xx_mdio_get_divider(ag, &t);
+ if (err)
+ return err;
+
+ ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
+ usleep_range(100, 200);
+
+ ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static int ag71xx_mdio_probe(struct ag71xx *ag)
+{
+ struct device *dev = &ag->pdev->dev;
+ struct net_device *ndev = ag->ndev;
+ static struct mii_bus *mii_bus;
+ struct device_node *np;
+ int err;
+
+ np = dev->of_node;
+ ag->mii_bus = NULL;
+
+ ag->clk_mdio = devm_clk_get(dev, "mdio");
+ if (IS_ERR(ag->clk_mdio)) {
+ netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
+ return PTR_ERR(ag->clk_mdio);
+ }
+
+ err = clk_prepare_enable(ag->clk_mdio);
+ if (err) {
+ netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
+ return err;
+ }
+
+ mii_bus = devm_mdiobus_alloc(dev);
+ if (!mii_bus) {
+ err = -ENOMEM;
+ goto mdio_err_put_clk;
+ }
+
+ ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
+ if (IS_ERR(ag->mdio_reset)) {
+ netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
+ return PTR_ERR(ag->mdio_reset);
+ }
+
+ mii_bus->name = "ag71xx_mdio";
+ mii_bus->read = ag71xx_mdio_mii_read;
+ mii_bus->write = ag71xx_mdio_mii_write;
+ mii_bus->reset = ag71xx_mdio_reset;
+ mii_bus->priv = ag;
+ mii_bus->parent = dev;
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
+
+ if (!IS_ERR(ag->mdio_reset)) {
+ reset_control_assert(ag->mdio_reset);
+ msleep(100);
+ reset_control_deassert(ag->mdio_reset);
+ msleep(200);
+ }
+
+ err = of_mdiobus_register(mii_bus, np);
+ if (err)
+ goto mdio_err_put_clk;
+
+ ag->mii_bus = mii_bus;
+
+ return 0;
+
+mdio_err_put_clk:
+ clk_disable_unprepare(ag->clk_mdio);
+ return err;
+}
+
+static void ag71xx_mdio_remove(struct ag71xx *ag)
+{
+ if (ag->mii_bus)
+ mdiobus_unregister(ag->mii_bus);
+ clk_disable_unprepare(ag->clk_mdio);
+}
+
+static void ag71xx_hw_stop(struct ag71xx *ag)
+{
+ /* disable all interrupts and stop the rx/tx engine */
+ ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
+ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
+}
+
+static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
+{
+ unsigned long timestamp;
+ u32 rx_sm, tx_sm, rx_fd;
+
+ timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start;
+ if (likely(time_before(jiffies, timestamp + HZ / 10)))
+ return false;
+
+ if (!netif_carrier_ok(ag->ndev))
+ return false;
+
+ rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
+ if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
+ return true;
+
+ tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
+ rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
+ if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
+ ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
+ return true;
+
+ return false;
+}
+
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
+{
+ struct ag71xx_ring *ring = &ag->tx_ring;
+ int sent = 0, bytes_compl = 0, n = 0;
+ struct net_device *ndev = ag->ndev;
+ int ring_mask, ring_size;
+ bool dma_stuck = false;
+
+ ring_mask = BIT(ring->order) - 1;
+ ring_size = BIT(ring->order);
+
+ netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
+
+ while (ring->dirty + n != ring->curr) {
+ struct ag71xx_desc *desc;
+ struct sk_buff *skb;
+ unsigned int i;
+
+ i = (ring->dirty + n) & ring_mask;
+ desc = ag71xx_ring_desc(ring, i);
+ skb = ring->buf[i].tx.skb;
+
+ if (!flush && !ag71xx_desc_empty(desc)) {
+ if (ag->dcfg->tx_hang_workaround &&
+ ag71xx_check_dma_stuck(ag)) {
+ schedule_delayed_work(&ag->restart_work,
+ HZ / 2);
+ dma_stuck = true;
+ }
+ break;
+ }
+
+ if (flush)
+ desc->ctrl |= DESC_EMPTY;
+
+ n++;
+ if (!skb)
+ continue;
+
+ dev_kfree_skb_any(skb);
+ ring->buf[i].tx.skb = NULL;
+
+ bytes_compl += ring->buf[i].tx.len;
+
+ sent++;
+ ring->dirty += n;
+
+ while (n > 0) {
+ ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
+ n--;
+ }
+ }
+
+ netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
+
+ if (!sent)
+ return 0;
+
+ ag->ndev->stats.tx_bytes += bytes_compl;
+ ag->ndev->stats.tx_packets += sent;
+
+ netdev_completed_queue(ag->ndev, sent, bytes_compl);
+ if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
+ netif_wake_queue(ag->ndev);
+
+ if (!dma_stuck)
+ cancel_delayed_work(&ag->restart_work);
+
+ return sent;
+}
+
+static void ag71xx_dma_wait_stop(struct ag71xx *ag)
+{
+ struct net_device *ndev = ag->ndev;
+ int i;
+
+ for (i = 0; i < AG71XX_DMA_RETRY; i++) {
+ u32 rx, tx;
+
+ mdelay(AG71XX_DMA_DELAY);
+
+ rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
+ tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
+ if (!rx && !tx)
+ return;
+ }
+
+ netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
+}
+
+static void ag71xx_dma_reset(struct ag71xx *ag)
+{
+ struct net_device *ndev = ag->ndev;
+ u32 val;
+ int i;
+
+ /* stop RX and TX */
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
+ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
+
+ /* give the hardware some time to really stop all rx/tx activity
+ * clearing the descriptors too early causes random memory corruption
+ */
+ ag71xx_dma_wait_stop(ag);
+
+ /* clear descriptor addresses */
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
+
+ /* clear pending RX/TX interrupts */
+ for (i = 0; i < 256; i++) {
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
+ ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
+ }
+
+ /* clear pending errors */
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
+ ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
+
+ val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
+ if (val)
+ netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
+ val);
+
+ val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
+
+ /* mask out reserved bits */
+ val &= ~0xff000000;
+
+ if (val)
+ netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
+ val);
+}
+
+static void ag71xx_hw_setup(struct ag71xx *ag)
+{
+ u32 init = MAC_CFG1_INIT;
+
+ /* setup MAC configuration registers */
+ ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
+
+ ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
+ MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
+
+ /* setup max frame length to zero */
+ ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
+
+ /* setup FIFO configuration registers */
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
+}
+
+static unsigned int ag71xx_max_frame_len(unsigned int mtu)
+{
+ return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
+}
+
+static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
+{
+ u32 t;
+
+ t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
+ | (((u32)mac[3]) << 8) | ((u32)mac[2]);
+
+ ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
+
+ t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
+ ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
+}
+
+static void ag71xx_fast_reset(struct ag71xx *ag)
+{
+ struct net_device *dev = ag->ndev;
+ u32 rx_ds;
+ u32 mii_reg;
+
+ ag71xx_hw_stop(ag);
+
+ mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
+ rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
+
+ ag71xx_tx_packets(ag, true);
+
+ reset_control_assert(ag->mac_reset);
+ usleep_range(10, 20);
+ reset_control_deassert(ag->mac_reset);
+ usleep_range(10, 20);
+
+ ag71xx_dma_reset(ag);
+ ag71xx_hw_setup(ag);
+ ag->tx_ring.curr = 0;
+ ag->tx_ring.dirty = 0;
+ netdev_reset_queue(ag->ndev);
+
+ /* setup max frame length */
+ ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
+ ag71xx_max_frame_len(ag->ndev->mtu));
+
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
+ ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
+
+ ag71xx_hw_set_macaddr(ag, dev->dev_addr);
+}
+
+static void ag71xx_hw_start(struct ag71xx *ag)
+{
+ /* start RX engine */
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
+
+ /* enable interrupts */
+ ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
+
+ netif_wake_queue(ag->ndev);
+}
+
+static void ag71xx_link_adjust(struct ag71xx *ag, bool update)
+{
+ struct phy_device *phydev = ag->ndev->phydev;
+ u32 cfg2;
+ u32 ifctl;
+ u32 fifo5;
+
+ if (!phydev->link && update) {
+ ag71xx_hw_stop(ag);
+ return;
+ }
+
+ if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
+ ag71xx_fast_reset(ag);
+
+ cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
+ cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
+ cfg2 |= (phydev->duplex) ? MAC_CFG2_FDX : 0;
+
+ ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
+ ifctl &= ~(MAC_IFCTL_SPEED);
+
+ fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
+ fifo5 &= ~FIFO_CFG5_BM;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ cfg2 |= MAC_CFG2_IF_1000;
+ fifo5 |= FIFO_CFG5_BM;
+ break;
+ case SPEED_100:
+ cfg2 |= MAC_CFG2_IF_10_100;
+ ifctl |= MAC_IFCTL_SPEED;
+ break;
+ case SPEED_10:
+ cfg2 |= MAC_CFG2_IF_10_100;
+ break;
+ default:
+ WARN(1, "not supported speed %i\n", phydev->speed);
+ return;
+ }
+
+ if (ag->tx_ring.desc_split) {
+ ag->fifodata[2] &= 0xffff;
+ ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
+ }
+
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
+
+ ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
+ ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
+
+ ag71xx_hw_start(ag);
+
+ if (update)
+ phy_print_status(phydev);
+}
+
+static void ag71xx_phy_link_adjust(struct net_device *ndev)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ ag71xx_link_adjust(ag, true);
+}
+
+static int ag71xx_phy_connect(struct ag71xx *ag)
+{
+ struct device_node *np = ag->pdev->dev.of_node;
+ struct net_device *ndev = ag->ndev;
+ struct device_node *phy_node;
+ struct phy_device *phydev;
+ int ret;
+
+ if (of_phy_is_fixed_link(np)) {
+ ret = of_phy_register_fixed_link(np);
+ if (ret < 0) {
+ netif_err(ag, probe, ndev, "Failed to register fixed PHY link: %d\n",
+ ret);
+ return ret;
+ }
+
+ phy_node = of_node_get(np);
+ } else {
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ }
+
+ if (!phy_node) {
+ netif_err(ag, probe, ndev, "Could not find valid phy node\n");
+ return -ENODEV;
+ }
+
+ phydev = of_phy_connect(ag->ndev, phy_node, ag71xx_phy_link_adjust,
+ 0, ag->phy_if_mode);
+
+ of_node_put(phy_node);
+
+ if (!phydev) {
+ netif_err(ag, probe, ndev, "Could not connect to PHY device\n");
+ return -ENODEV;
+ }
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+static void ag71xx_ring_tx_clean(struct ag71xx *ag)
+{
+ struct ag71xx_ring *ring = &ag->tx_ring;
+ int ring_mask = BIT(ring->order) - 1;
+ u32 bytes_compl = 0, pkts_compl = 0;
+ struct net_device *ndev = ag->ndev;
+
+ while (ring->curr != ring->dirty) {
+ struct ag71xx_desc *desc;
+ u32 i = ring->dirty & ring_mask;
+
+ desc = ag71xx_ring_desc(ring, i);
+ if (!ag71xx_desc_empty(desc)) {
+ desc->ctrl = 0;
+ ndev->stats.tx_errors++;
+ }
+
+ if (ring->buf[i].tx.skb) {
+ bytes_compl += ring->buf[i].tx.len;
+ pkts_compl++;
+ dev_kfree_skb_any(ring->buf[i].tx.skb);
+ }
+ ring->buf[i].tx.skb = NULL;
+ ring->dirty++;
+ }
+
+ /* flush descriptors */
+ wmb();
+
+ netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+}
+
+static void ag71xx_ring_tx_init(struct ag71xx *ag)
+{
+ struct ag71xx_ring *ring = &ag->tx_ring;
+ int ring_size = BIT(ring->order);
+ int ring_mask = ring_size - 1;
+ int i;
+
+ for (i = 0; i < ring_size; i++) {
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
+ desc->next = (u32)(ring->descs_dma +
+ AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
+
+ desc->ctrl = DESC_EMPTY;
+ ring->buf[i].tx.skb = NULL;
+ }
+
+ /* flush descriptors */
+ wmb();
+
+ ring->curr = 0;
+ ring->dirty = 0;
+ netdev_reset_queue(ag->ndev);
+}
+
+static void ag71xx_ring_rx_clean(struct ag71xx *ag)
+{
+ struct ag71xx_ring *ring = &ag->rx_ring;
+ int ring_size = BIT(ring->order);
+ int i;
+
+ if (!ring->buf)
+ return;
+
+ for (i = 0; i < ring_size; i++)
+ if (ring->buf[i].rx.rx_buf) {
+ dma_unmap_single(&ag->pdev->dev,
+ ring->buf[i].rx.dma_addr,
+ ag->rx_buf_size, DMA_FROM_DEVICE);
+ skb_free_frag(ring->buf[i].rx.rx_buf);
+ }
+}
+
+static int ag71xx_buffer_size(struct ag71xx *ag)
+{
+ return ag->rx_buf_size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
+ int offset,
+ void *(*alloc)(unsigned int size))
+{
+ struct ag71xx_ring *ring = &ag->rx_ring;
+ struct ag71xx_desc *desc;
+ void *data;
+
+ desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
+
+ data = alloc(ag71xx_buffer_size(ag));
+ if (!data)
+ return false;
+
+ buf->rx.rx_buf = data;
+ buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
+ DMA_FROM_DEVICE);
+ desc->data = (u32)buf->rx.dma_addr + offset;
+ return true;
+}
+
+static int ag71xx_ring_rx_init(struct ag71xx *ag)
+{
+ struct ag71xx_ring *ring = &ag->rx_ring;
+ struct net_device *ndev = ag->ndev;
+ int ring_mask = BIT(ring->order) - 1;
+ int ring_size = BIT(ring->order);
+ unsigned int i;
+ int ret;
+
+ ret = 0;
+ for (i = 0; i < ring_size; i++) {
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
+ desc->next = (u32)(ring->descs_dma +
+ AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
+
+ netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
+ desc, desc->next);
+ }
+
+ for (i = 0; i < ring_size; i++) {
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
+ if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
+ netdev_alloc_frag)) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ desc->ctrl = DESC_EMPTY;
+ }
+
+ /* flush descriptors */
+ wmb();
+
+ ring->curr = 0;
+ ring->dirty = 0;
+
+ return ret;
+}
+
+static int ag71xx_ring_rx_refill(struct ag71xx *ag)
+{
+ struct ag71xx_ring *ring = &ag->rx_ring;
+ int ring_mask = BIT(ring->order) - 1;
+ int offset = ag->rx_buf_offset;
+ unsigned int count;
+
+ count = 0;
+ for (; ring->curr - ring->dirty > 0; ring->dirty++) {
+ struct ag71xx_desc *desc;
+ unsigned int i;
+
+ i = ring->dirty & ring_mask;
+ desc = ag71xx_ring_desc(ring, i);
+
+ if (!ring->buf[i].rx.rx_buf &&
+ !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
+ napi_alloc_frag))
+ break;
+
+ desc->ctrl = DESC_EMPTY;
+ count++;
+ }
+
+ /* flush descriptors */
+ wmb();
+
+ netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
+ count);
+
+ return count;
+}
+
+static int ag71xx_rings_init(struct ag71xx *ag)
+{
+ struct ag71xx_ring *tx = &ag->tx_ring;
+ struct ag71xx_ring *rx = &ag->rx_ring;
+ int ring_size, tx_size;
+
+ ring_size = BIT(tx->order) + BIT(rx->order);
+ tx_size = BIT(tx->order);
+
+ tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL);
+ if (!tx->buf)
+ return -ENOMEM;
+
+ tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
+ ring_size * AG71XX_DESC_SIZE,
+ &tx->descs_dma, GFP_ATOMIC);
+ if (!tx->descs_cpu) {
+ kfree(tx->buf);
+ tx->buf = NULL;
+ return -ENOMEM;
+ }
+
+ rx->buf = &tx->buf[BIT(tx->order)];
+ rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
+ rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
+
+ ag71xx_ring_tx_init(ag);
+ return ag71xx_ring_rx_init(ag);
+}
+
+static void ag71xx_rings_free(struct ag71xx *ag)
+{
+ struct ag71xx_ring *tx = &ag->tx_ring;
+ struct ag71xx_ring *rx = &ag->rx_ring;
+ int ring_size;
+
+ ring_size = BIT(tx->order) + BIT(rx->order);
+
+ if (tx->descs_cpu)
+ dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
+ tx->descs_cpu, tx->descs_dma);
+
+ kfree(tx->buf);
+
+ tx->descs_cpu = NULL;
+ rx->descs_cpu = NULL;
+ tx->buf = NULL;
+ rx->buf = NULL;
+}
+
+static void ag71xx_rings_cleanup(struct ag71xx *ag)
+{
+ ag71xx_ring_rx_clean(ag);
+ ag71xx_ring_tx_clean(ag);
+ ag71xx_rings_free(ag);
+
+ netdev_reset_queue(ag->ndev);
+}
+
+static void ag71xx_hw_init(struct ag71xx *ag)
+{
+ ag71xx_hw_stop(ag);
+
+ ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
+ usleep_range(20, 30);
+
+ reset_control_assert(ag->mac_reset);
+ msleep(100);
+ reset_control_deassert(ag->mac_reset);
+ msleep(200);
+
+ ag71xx_hw_setup(ag);
+
+ ag71xx_dma_reset(ag);
+}
+
+static int ag71xx_hw_enable(struct ag71xx *ag)
+{
+ int ret;
+
+ ret = ag71xx_rings_init(ag);
+ if (ret)
+ return ret;
+
+ napi_enable(&ag->napi);
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
+ netif_start_queue(ag->ndev);
+
+ return 0;
+}
+
+static void ag71xx_hw_disable(struct ag71xx *ag)
+{
+ netif_stop_queue(ag->ndev);
+
+ ag71xx_hw_stop(ag);
+ ag71xx_dma_reset(ag);
+
+ napi_disable(&ag->napi);
+ del_timer_sync(&ag->oom_timer);
+
+ ag71xx_rings_cleanup(ag);
+}
+
+static int ag71xx_open(struct net_device *ndev)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+ unsigned int max_frame_len;
+ int ret;
+
+ max_frame_len = ag71xx_max_frame_len(ndev->mtu);
+ ag->rx_buf_size =
+ SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
+
+ /* setup max frame length */
+ ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
+ ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
+
+ ret = ag71xx_hw_enable(ag);
+ if (ret)
+ goto err;
+
+ ret = ag71xx_phy_connect(ag);
+ if (ret)
+ goto err;
+
+ phy_start(ndev->phydev);
+
+ return 0;
+
+err:
+ ag71xx_rings_cleanup(ag);
+ return ret;
+}
+
+static int ag71xx_stop(struct net_device *ndev)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ ag71xx_hw_disable(ag);
+
+ return 0;
+}
+
+static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
+{
+ int i, ring_mask, ndesc, split;
+ struct ag71xx_desc *desc;
+
+ ring_mask = BIT(ring->order) - 1;
+ ndesc = 0;
+ split = ring->desc_split;
+
+ if (!split)
+ split = len;
+
+ while (len > 0) {
+ unsigned int cur_len = len;
+
+ i = (ring->curr + ndesc) & ring_mask;
+ desc = ag71xx_ring_desc(ring, i);
+
+ if (!ag71xx_desc_empty(desc))
+ return -1;
+
+ if (cur_len > split) {
+ cur_len = split;
+
+ /* TX will hang if DMA transfers <= 4 bytes,
+ * make sure next segment is more than 4 bytes long.
+ */
+ if (len <= split + 4)
+ cur_len -= 4;
+ }
+
+ desc->data = addr;
+ addr += cur_len;
+ len -= cur_len;
+
+ if (len > 0)
+ cur_len |= DESC_MORE;
+
+ /* prevent early tx attempt of this descriptor */
+ if (!ndesc)
+ cur_len |= DESC_EMPTY;
+
+ desc->ctrl = cur_len;
+ ndesc++;
+ }
+
+ return ndesc;
+}
+
+static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ int i, n, ring_min, ring_mask, ring_size;
+ struct ag71xx *ag = netdev_priv(ndev);
+ struct ag71xx_ring *ring;
+ struct ag71xx_desc *desc;
+ dma_addr_t dma_addr;
+
+ ring = &ag->tx_ring;
+ ring_mask = BIT(ring->order) - 1;
+ ring_size = BIT(ring->order);
+
+ if (skb->len <= 4) {
+ netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
+ goto err_drop;
+ }
+
+ dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ i = ring->curr & ring_mask;
+ desc = ag71xx_ring_desc(ring, i);
+
+ /* setup descriptor fields */
+ n = ag71xx_fill_dma_desc(ring, (u32)dma_addr,
+ skb->len & ag->dcfg->desc_pktlen_mask);
+ if (n < 0)
+ goto err_drop_unmap;
+
+ i = (ring->curr + n - 1) & ring_mask;
+ ring->buf[i].tx.len = skb->len;
+ ring->buf[i].tx.skb = skb;
+
+ netdev_sent_queue(ndev, skb->len);
+
+ skb_tx_timestamp(skb);
+
+ desc->ctrl &= ~DESC_EMPTY;
+ ring->curr += n;
+
+ /* flush descriptor */
+ wmb();
+
+ ring_min = 2;
+ if (ring->desc_split)
+ ring_min *= AG71XX_TX_RING_DS_PER_PKT;
+
+ if (ring->curr - ring->dirty >= ring_size - ring_min) {
+ netif_dbg(ag, tx_err, ndev, "tx queue full\n");
+ netif_stop_queue(ndev);
+ }
+
+ netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
+
+ /* enable TX engine */
+ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
+
+ return NETDEV_TX_OK;
+
+err_drop_unmap:
+ dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+
+err_drop:
+ ndev->stats.tx_dropped++;
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ if (!ndev->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static void ag71xx_oom_timer_handler(struct timer_list *t)
+{
+ struct ag71xx *ag = from_timer(ag, t, oom_timer);
+
+ napi_schedule(&ag->napi);
+}
+
+static void ag71xx_tx_timeout(struct net_device *ndev)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ netif_err(ag, tx_err, ndev, "tx timeout\n");
+
+ schedule_delayed_work(&ag->restart_work, 1);
+}
+
+static void ag71xx_restart_work_func(struct work_struct *work)
+{
+ struct ag71xx *ag = container_of(work, struct ag71xx,
+ restart_work.work);
+ struct net_device *ndev = ag->ndev;
+
+ rtnl_lock();
+ ag71xx_hw_disable(ag);
+ ag71xx_hw_enable(ag);
+ if (ndev->phydev->link)
+ ag71xx_link_adjust(ag, false);
+ rtnl_unlock();
+}
+
+static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
+{
+ struct net_device *ndev = ag->ndev;
+ int ring_mask, ring_size, done = 0;
+ unsigned int pktlen_mask, offset;
+ struct sk_buff *next, *skb;
+ struct ag71xx_ring *ring;
+ struct list_head rx_list;
+
+ ring = &ag->rx_ring;
+ pktlen_mask = ag->dcfg->desc_pktlen_mask;
+ offset = ag->rx_buf_offset;
+ ring_mask = BIT(ring->order) - 1;
+ ring_size = BIT(ring->order);
+
+ netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
+ limit, ring->curr, ring->dirty);
+
+ INIT_LIST_HEAD(&rx_list);
+
+ while (done < limit) {
+ unsigned int i = ring->curr & ring_mask;
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+ int pktlen;
+ int err = 0;
+
+ if (ag71xx_desc_empty(desc))
+ break;
+
+ if ((ring->dirty + ring_size) == ring->curr) {
+ WARN_ONCE(1, "RX out of ring");
+ break;
+ }
+
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
+
+ pktlen = desc->ctrl & pktlen_mask;
+ pktlen -= ETH_FCS_LEN;
+
+ dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
+ ag->rx_buf_size, DMA_FROM_DEVICE);
+
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += pktlen;
+
+ skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
+ if (!skb) {
+ skb_free_frag(ring->buf[i].rx.rx_buf);
+ goto next;
+ }
+
+ skb_reserve(skb, offset);
+ skb_put(skb, pktlen);
+
+ if (err) {
+ ndev->stats.rx_dropped++;
+ kfree_skb(skb);
+ } else {
+ skb->dev = ndev;
+ skb->ip_summed = CHECKSUM_NONE;
+ list_add_tail(&skb->list, &rx_list);
+ }
+
+next:
+ ring->buf[i].rx.rx_buf = NULL;
+ done++;
+
+ ring->curr++;
+ }
+
+ ag71xx_ring_rx_refill(ag);
+
+ list_for_each_entry_safe(skb, next, &rx_list, list)
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb_list(&rx_list);
+
+ netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
+ ring->curr, ring->dirty, done);
+
+ return done;
+}
+
+static int ag71xx_poll(struct napi_struct *napi, int limit)
+{
+ struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
+ struct ag71xx_ring *rx_ring = &ag->rx_ring;
+ int rx_ring_size = BIT(rx_ring->order);
+ struct net_device *ndev = ag->ndev;
+ int tx_done, rx_done;
+ u32 status;
+
+ tx_done = ag71xx_tx_packets(ag, false);
+
+ netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
+ rx_done = ag71xx_rx_packets(ag, limit);
+
+ if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf)
+ goto oom;
+
+ status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
+ if (unlikely(status & RX_STATUS_OF)) {
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
+ ndev->stats.rx_fifo_errors++;
+
+ /* restart RX */
+ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
+ }
+
+ if (rx_done < limit) {
+ if (status & RX_STATUS_PR)
+ goto more;
+
+ status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
+ if (status & TX_STATUS_PS)
+ goto more;
+
+ netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
+ rx_done, tx_done, limit);
+
+ napi_complete(napi);
+
+ /* enable interrupts */
+ ag71xx_int_enable(ag, AG71XX_INT_POLL);
+ return rx_done;
+ }
+
+more:
+ netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
+ rx_done, tx_done, limit);
+ return limit;
+
+oom:
+ netif_err(ag, rx_err, ndev, "out of memory\n");
+
+ mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
+ napi_complete(napi);
+ return 0;
+}
+
+static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ag71xx *ag;
+ u32 status;
+
+ ag = netdev_priv(ndev);
+ status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ if (unlikely(status & AG71XX_INT_ERR)) {
+ if (status & AG71XX_INT_TX_BE) {
+ ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
+ netif_err(ag, intr, ndev, "TX BUS error\n");
+ }
+ if (status & AG71XX_INT_RX_BE) {
+ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
+ netif_err(ag, intr, ndev, "RX BUS error\n");
+ }
+ }
+
+ if (likely(status & AG71XX_INT_POLL)) {
+ ag71xx_int_disable(ag, AG71XX_INT_POLL);
+ netif_dbg(ag, intr, ndev, "enable polling mode\n");
+ napi_schedule(&ag->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ ndev->mtu = new_mtu;
+ ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
+ ag71xx_max_frame_len(ndev->mtu));
+
+ return 0;
+}
+
+static const struct net_device_ops ag71xx_netdev_ops = {
+ .ndo_open = ag71xx_open,
+ .ndo_stop = ag71xx_stop,
+ .ndo_start_xmit = ag71xx_hard_start_xmit,
+ .ndo_do_ioctl = ag71xx_do_ioctl,
+ .ndo_tx_timeout = ag71xx_tx_timeout,
+ .ndo_change_mtu = ag71xx_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static const u32 ar71xx_addr_ar7100[] = {
+ 0x19000000, 0x1a000000,
+};
+
+static int ag71xx_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct ag71xx_dcfg *dcfg;
+ struct net_device *ndev;
+ struct resource *res;
+ const void *mac_addr;
+ int tx_size, err, i;
+ struct ag71xx *ag;
+
+ if (!np)
+ return -ENODEV;
+
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
+ if (!ndev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ dcfg = of_device_get_match_data(&pdev->dev);
+ if (!dcfg)
+ return -EINVAL;
+
+ ag = netdev_priv(ndev);
+ ag->mac_idx = -1;
+ for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) {
+ if (ar71xx_addr_ar7100[i] == res->start)
+ ag->mac_idx = i;
+ }
+
+ if (ag->mac_idx < 0) {
+ netif_err(ag, probe, ndev, "unknown mac idx\n");
+ return -EINVAL;
+ }
+
+ ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
+ if (IS_ERR(ag->clk_eth)) {
+ netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
+ return PTR_ERR(ag->clk_eth);
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ag->pdev = pdev;
+ ag->ndev = ndev;
+ ag->dcfg = dcfg;
+ ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
+ memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
+
+ ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
+ if (IS_ERR(ag->mac_reset)) {
+ netif_err(ag, probe, ndev, "missing mac reset\n");
+ err = PTR_ERR(ag->mac_reset);
+ goto err_free;
+ }
+
+ ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ res->end - res->start + 1);
+ if (!ag->mac_base) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
+ 0x0, dev_name(&pdev->dev), ndev);
+ if (err) {
+ netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
+ ndev->irq);
+ goto err_free;
+ }
+
+ ndev->netdev_ops = &ag71xx_netdev_ops;
+
+ INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
+ timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
+
+ tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
+ ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
+
+ ndev->min_mtu = 68;
+ ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0);
+
+ ag->rx_buf_offset = NET_SKB_PAD;
+ if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
+ ag->rx_buf_offset += NET_IP_ALIGN;
+
+ if (ag71xx_is(ag, AR7100)) {
+ ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
+ tx_size *= AG71XX_TX_RING_DS_PER_PKT;
+ }
+ ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
+
+ ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct ag71xx_desc),
+ &ag->stop_desc_dma, GFP_KERNEL);
+ if (!ag->stop_desc)
+ goto err_free;
+
+ ag->stop_desc->data = 0;
+ ag->stop_desc->ctrl = 0;
+ ag->stop_desc->next = (u32)ag->stop_desc_dma;
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ if (!mac_addr || !is_valid_ether_addr(ndev->dev_addr)) {
+ netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
+ eth_random_addr(ndev->dev_addr);
+ }
+
+ ag->phy_if_mode = of_get_phy_mode(np);
+ if (ag->phy_if_mode < 0) {
+ netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
+ err = ag->phy_if_mode;
+ goto err_free;
+ }
+
+ netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
+
+ err = clk_prepare_enable(ag->clk_eth);
+ if (err) {
+ netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
+ goto err_free;
+ }
+
+ ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
+
+ ag71xx_hw_init(ag);
+
+ err = ag71xx_mdio_probe(ag);
+ if (err)
+ goto err_put_clk;
+
+ platform_set_drvdata(pdev, ndev);
+
+ err = register_netdev(ndev);
+ if (err) {
+ netif_err(ag, probe, ndev, "unable to register net device\n");
+ platform_set_drvdata(pdev, NULL);
+ goto err_mdio_remove;
+ }
+
+ netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
+ (unsigned long)ag->mac_base, ndev->irq,
+ phy_modes(ag->phy_if_mode));
+
+ return 0;
+
+err_mdio_remove:
+ ag71xx_mdio_remove(ag);
+err_put_clk:
+ clk_disable_unprepare(ag->clk_eth);
+err_free:
+ free_netdev(ndev);
+ return err;
+}
+
+static int ag71xx_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ag71xx *ag;
+
+ if (!ndev)
+ return 0;
+
+ ag = netdev_priv(ndev);
+ unregister_netdev(ndev);
+ ag71xx_mdio_remove(ag);
+ clk_disable_unprepare(ag->clk_eth);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const u32 ar71xx_fifo_ar7100[] = {
+ 0x0fff0000, 0x00001fff, 0x00780fff,
+};
+
+static const u32 ar71xx_fifo_ar9130[] = {
+ 0x0fff0000, 0x00001fff, 0x008001ff,
+};
+
+static const u32 ar71xx_fifo_ar9330[] = {
+ 0x0010ffff, 0x015500aa, 0x01f00140,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = {
+ .type = AR7100,
+ .fifodata = ar71xx_fifo_ar7100,
+ .max_frame_len = 1540,
+ .desc_pktlen_mask = SZ_4K - 1,
+ .tx_hang_workaround = false,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = {
+ .type = AR7240,
+ .fifodata = ar71xx_fifo_ar7100,
+ .max_frame_len = 1540,
+ .desc_pktlen_mask = SZ_4K - 1,
+ .tx_hang_workaround = true,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = {
+ .type = AR9130,
+ .fifodata = ar71xx_fifo_ar9130,
+ .max_frame_len = 1540,
+ .desc_pktlen_mask = SZ_4K - 1,
+ .tx_hang_workaround = false,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = {
+ .type = AR9330,
+ .fifodata = ar71xx_fifo_ar9330,
+ .max_frame_len = 1540,
+ .desc_pktlen_mask = SZ_4K - 1,
+ .tx_hang_workaround = true,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = {
+ .type = AR9340,
+ .fifodata = ar71xx_fifo_ar9330,
+ .max_frame_len = SZ_16K - 1,
+ .desc_pktlen_mask = SZ_16K - 1,
+ .tx_hang_workaround = true,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = {
+ .type = QCA9530,
+ .fifodata = ar71xx_fifo_ar9330,
+ .max_frame_len = SZ_16K - 1,
+ .desc_pktlen_mask = SZ_16K - 1,
+ .tx_hang_workaround = true,
+};
+
+static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = {
+ .type = QCA9550,
+ .fifodata = ar71xx_fifo_ar9330,
+ .max_frame_len = 1540,
+ .desc_pktlen_mask = SZ_16K - 1,
+ .tx_hang_workaround = true,
+};
+
+static const struct of_device_id ag71xx_match[] = {
+ { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
+ { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
+ { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
+ { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
+ { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
+ { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
+ { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
+ { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
+ { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
+ { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
+ {}
+};
+
+static struct platform_driver ag71xx_driver = {
+ .probe = ag71xx_probe,
+ .remove = ag71xx_remove,
+ .driver = {
+ .name = "ag71xx",
+ .of_match_table = ag71xx_match,
+ }
+};
+
+module_platform_driver(ag71xx_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 25bf085324b8..be7f9cebb675 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2201,7 +2201,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- u16 tpd_req = 1;
+ u16 tpd_req;
struct atl1c_tpd_desc *tpd;
enum atl1c_trans_queue type = atl1c_trans_normal;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index b123509d385f..e9017caf024d 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -8,6 +8,7 @@ config NET_VENDOR_BROADCOM
default y
depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
SIBYTE_SB1xxx_SOC
+ select DIMLIB
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
@@ -198,6 +199,7 @@ config BNXT
select FW_LOADER
select LIBCRC32C
select NET_DEVLINK
+ select PAGE_POOL
---help---
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 85e610210477..291e4afd4a1a 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -2659,7 +2659,6 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
priv = netdev_priv(dev);
- memset(priv, 0, sizeof(*priv));
/* initialize default and fetch platform data */
priv->enet_is_sw = true;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index cae9b77ff44b..b9c5cea8db16 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -609,7 +609,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
u32 usecs, pkts;
unsigned int i;
@@ -992,7 +992,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
{
struct bcm_sysport_priv *priv =
container_of(napi, struct bcm_sysport_priv, napi);
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
unsigned int work_done = 0;
work_done = bcm_sysport_desc_rx(priv, budget);
@@ -1016,8 +1016,8 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
}
if (priv->dim.use_dim) {
- net_dim_sample(priv->dim.event_ctr, priv->dim.packets,
- priv->dim.bytes, &dim_sample);
+ dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
+ priv->dim.bytes, &dim_sample);
net_dim(&priv->dim.dim, dim_sample);
}
@@ -1087,16 +1087,16 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
static void bcm_sysport_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct dim *dim = container_of(work, struct dim, work);
struct bcm_sysport_net_dim *ndim =
container_of(dim, struct bcm_sysport_net_dim, dim);
struct bcm_sysport_priv *priv =
container_of(ndim, struct bcm_sysport_priv, dim);
- struct net_dim_cq_moder cur_profile =
- net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
+ dim->profile_ix);
bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
- dim->state = NET_DIM_START_MEASURE;
+ dim->state = DIM_START_MEASURE;
}
/* RX and misc interrupt routine */
@@ -1437,7 +1437,7 @@ static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
struct bcm_sysport_net_dim *dim = &priv->dim;
INIT_WORK(&dim->dim.work, cb);
- dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
dim->event_ctr = 0;
dim->packets = 0;
dim->bytes = 0;
@@ -1446,7 +1446,7 @@ static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
{
struct bcm_sysport_net_dim *dim = &priv->dim;
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
u32 usecs, pkts;
usecs = priv->rx_coalesce_usecs;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 86193931203a..6d80735fbc7f 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -11,7 +11,7 @@
#include <linux/bitmap.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/net_dim.h>
+#include <linux/dim.h>
/* Receive/transmit descriptor format */
#define DESC_ADDR_HI_STATUS_LEN 0x00
@@ -702,7 +702,7 @@ struct bcm_sysport_net_dim {
u16 event_ctr;
unsigned long packets;
unsigned long bytes;
- struct net_dim dim;
+ struct dim dim;
};
/* Software view of the TX ring */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 008ad0ca89ba..656ed80647f0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -684,7 +684,7 @@ static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
if (unlikely(gfpflags_allow_blocking(gfp_mask)))
return (void *)__get_free_page(gfp_mask);
- return netdev_alloc_frag(fp->rx_frag_size);
+ return napi_alloc_frag(fp->rx_frag_size);
}
return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
@@ -3857,9 +3857,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
if (!(bp->flags & TX_TIMESTAMPING_EN)) {
+ bp->eth_stats.ptp_skip_tx_ts++;
BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
} else if (bp->ptp_tx_skb) {
- BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ bp->eth_stats.ptp_skip_tx_ts++;
+ netdev_err_once(bp->dev,
+ "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
} else {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/* schedule check for Tx timestamp */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 51fc845de31a..4a0ba6801c9e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -182,7 +182,9 @@ static const struct {
{ STATS_OFFSET32(driver_filtered_tx_pkt),
4, false, "driver_filtered_tx_pkt" },
{ STATS_OFFSET32(eee_tx_lpi),
- 4, true, "Tx LPI entry count"}
+ 4, true, "Tx LPI entry count"},
+ { STATS_OFFSET32(ptp_skip_tx_ts),
+ 4, false, "ptp_skipped_tx_tstamp" },
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 03ac10b1cd1e..2cc14db8f0ec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15214,11 +15214,24 @@ static void bnx2x_ptp_task(struct work_struct *work)
u32 val_seq;
u64 timestamp, ns;
struct skb_shared_hwtstamps shhwtstamps;
+ bool bail = true;
+ int i;
+
+ /* FW may take a while to complete timestamping; try a bit and if it's
+ * still not complete, may indicate an error state - bail out then.
+ */
+ for (i = 0; i < 10; i++) {
+ /* Read Tx timestamp registers */
+ val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+ if (val_seq & 0x10000) {
+ bail = false;
+ break;
+ }
+ msleep(1 << i);
+ }
- /* Read Tx timestamp registers */
- val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
- NIG_REG_P0_TLLH_PTP_BUF_SEQID);
- if (val_seq & 0x10000) {
+ if (!bail) {
/* There is a valid timestamp value */
timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
@@ -15233,16 +15246,18 @@ static void bnx2x_ptp_task(struct work_struct *work)
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
- dev_kfree_skb_any(bp->ptp_tx_skb);
- bp->ptp_tx_skb = NULL;
DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
timestamp, ns);
} else {
- DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
- /* Reschedule to keep checking for a valid timestamp value */
- schedule_work(&bp->ptp_task);
+ DP(BNX2X_MSG_PTP,
+ "Tx timestamp is not recorded (register read=%u)\n",
+ val_seq);
+ bp->eth_stats.ptp_skip_tx_ts++;
}
+
+ dev_kfree_skb_any(bp->ptp_tx_skb);
+ bp->ptp_tx_skb = NULL;
}
void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index b2644ed13d06..d55e63692cf3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -207,6 +207,9 @@ struct bnx2x_eth_stats {
u32 driver_filtered_tx_pkt;
/* src: Clear-on-Read register; Will not survive PMF Migration */
u32 eee_tx_lpi;
+
+ /* PTP */
+ u32 ptp_skip_tx_ts;
};
struct bnx2x_eth_q_stats {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f758b2e0591f..3f632028eff0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -54,6 +54,7 @@
#include <net/pkt_cls.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <net/page_pool.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -668,19 +669,20 @@ next_tx_int:
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
gfp_t gfp)
{
struct device *dev = &bp->pdev->dev;
struct page *page;
- page = alloc_page(gfp);
+ page = page_pool_dev_alloc_pages(rxr->page_pool);
if (!page)
return NULL;
*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(dev, *mapping)) {
- __free_page(page);
+ page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
*mapping += bp->rx_dma_offset;
@@ -716,7 +718,8 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_addr_t mapping;
if (BNXT_RX_PAGE_MODE(bp)) {
- struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
+ struct page *page =
+ __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
if (!page)
return -ENOMEM;
@@ -1989,6 +1992,9 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
+ if (event & BNXT_REDIRECT_EVENT)
+ xdp_do_flush_map();
+
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
u16 prod = txr->tx_prod;
@@ -2130,12 +2136,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
}
}
if (bp->flags & BNXT_FLAG_DIM) {
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
- net_dim_sample(cpr->event_ctr,
- cpr->rx_packets,
- cpr->rx_bytes,
- &dim_sample);
+ dim_update_sample(cpr->event_ctr,
+ cpr->rx_packets,
+ cpr->rx_bytes,
+ &dim_sample);
net_dim(&cpr->dim, dim_sample);
}
return work_done;
@@ -2254,9 +2260,23 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
for (j = 0; j < max_idx;) {
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
- struct sk_buff *skb = tx_buf->skb;
+ struct sk_buff *skb;
int k, last;
+ if (i < bp->tx_nr_rings_xdp &&
+ tx_buf->action == XDP_REDIRECT) {
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_len(tx_buf, len),
+ PCI_DMA_TODEVICE);
+ xdp_return_frame(tx_buf->xdpf);
+ tx_buf->action = 0;
+ tx_buf->xdpf = NULL;
+ j++;
+ continue;
+ }
+
+ skb = tx_buf->skb;
if (!skb) {
j++;
continue;
@@ -2343,7 +2363,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
dma_unmap_page_attrs(&pdev->dev, mapping,
PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- __free_page(data);
+ page_pool_recycle_direct(rxr->page_pool, data);
} else {
dma_unmap_single_attrs(&pdev->dev, mapping,
bp->rx_buf_use_size,
@@ -2480,6 +2500,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
xdp_rxq_info_unreg(&rxr->xdp_rxq);
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
@@ -2494,6 +2517,26 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
}
}
+static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct page_pool_params pp = { 0 };
+
+ pp.pool_size = bp->rx_ring_size;
+ pp.nid = dev_to_node(&bp->pdev->dev);
+ pp.dev = &bp->pdev->dev;
+ pp.dma_dir = DMA_BIDIRECTIONAL;
+
+ rxr->page_pool = page_pool_create(&pp);
+ if (IS_ERR(rxr->page_pool)) {
+ int err = PTR_ERR(rxr->page_pool);
+
+ rxr->page_pool = NULL;
+ return err;
+ }
+ return 0;
+}
+
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
int i, rc, agg_rings = 0, tpa_rings = 0;
@@ -2513,10 +2556,22 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
ring = &rxr->rx_ring_struct;
+ rc = bnxt_alloc_rx_page_pool(bp, rxr);
+ if (rc)
+ return rc;
+
rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
if (rc < 0)
return rc;
+ rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rxr->page_pool);
+ if (rc) {
+ xdp_rxq_info_unreg(&rxr->xdp_rxq);
+ return rc;
+ }
+
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
@@ -5508,7 +5563,16 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
{
- return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
+ int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
+ int cp = bp->cp_nr_rings;
+
+ if (!ulp_stat)
+ return cp;
+
+ if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
+ return bnxt_get_ulp_msix_base(bp) + ulp_stat;
+
+ return cp + ulp_stat;
}
static bool bnxt_need_reserve_rings(struct bnxt *bp)
@@ -7477,11 +7541,7 @@ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
{
- unsigned int stat;
-
- stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
- stat -= bp->cp_nr_rings;
- return stat;
+ return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
}
int bnxt_get_avail_msix(struct bnxt *bp, int num)
@@ -7813,7 +7873,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
if (bp->bnapi[i]->rx_ring) {
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
- cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
napi_enable(&bp->bnapi[i]->napi);
}
@@ -9847,32 +9907,19 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-static int bnxt_setup_tc_block(struct net_device *dev,
- struct tc_block_offload *f)
-{
- struct bnxt *bp = netdev_priv(dev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
- bp, bp, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(bnxt_block_cb_list);
static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
+ struct bnxt *bp = netdev_priv(dev);
+
switch (type) {
case TC_SETUP_BLOCK:
- return bnxt_setup_tc_block(dev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &bnxt_block_cb_list,
+ bnxt_setup_tc_block_cb,
+ bp, bp, true);
case TC_SETUP_QDISC_MQPRIO: {
struct tc_mqprio_qopt *mqprio = type_data;
@@ -10233,6 +10280,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
.ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
.ndo_bpf = bnxt_xdp,
+ .ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
.ndo_get_devlink_port = bnxt_get_devlink_port,
@@ -10262,10 +10310,10 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
+ bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
- bnxt_cleanup_pci(bp);
bnxt_free_port_stats(bp);
free_netdev(dev);
}
@@ -10859,6 +10907,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF) {
bnxt_clear_int_mode(bp);
+ pci_disable_device(pdev);
pci_wake_from_d3(pdev, bp->wol);
pci_set_power_state(pdev, PCI_D3hot);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index be438d82f939..16694b704d15 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -24,7 +24,9 @@
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/xdp.h>
-#include <linux/net_dim.h>
+#include <linux/dim.h>
+
+struct page_pool;
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -587,15 +589,21 @@ struct nqe_cn {
#define BNXT_HWRM_CHNL_CHIMP 0
#define BNXT_HWRM_CHNL_KONG 1
-#define BNXT_RX_EVENT 1
-#define BNXT_AGG_EVENT 2
-#define BNXT_TX_EVENT 4
+#define BNXT_RX_EVENT 1
+#define BNXT_AGG_EVENT 2
+#define BNXT_TX_EVENT 4
+#define BNXT_REDIRECT_EVENT 8
struct bnxt_sw_tx_bd {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
u8 is_gso;
u8 is_push;
+ u8 action;
union {
unsigned short nr_frags;
u16 rx_prod;
@@ -793,6 +801,7 @@ struct bnxt_rx_ring_info {
struct bnxt_ring_struct rx_ring_struct;
struct bnxt_ring_struct rx_agg_ring_struct;
struct xdp_rxq_info xdp_rxq;
+ struct page_pool *page_pool;
};
struct bnxt_cp_ring_info {
@@ -810,7 +819,7 @@ struct bnxt_cp_ring_info {
u64 rx_bytes;
u64 event_ctr;
- struct net_dim dim;
+ struct dim dim;
union {
struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 70775158c8c4..07301cb87c03 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -396,7 +396,7 @@ static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
if (bp->max_dscp_value < 0x3f)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 94e208e9789f..61393f351a77 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include "bnxt_hsi.h"
-#include <linux/net_dim.h>
+#include <linux/dim.h>
#include "bnxt.h"
#include "bnxt_debugfs.h"
@@ -21,7 +21,7 @@ static ssize_t debugfs_dim_read(struct file *filep,
char __user *buffer,
size_t count, loff_t *ppos)
{
- struct net_dim *dim = filep->private_data;
+ struct dim *dim = filep->private_data;
int len;
char *buf;
@@ -61,7 +61,7 @@ static const struct file_operations debugfs_dim_fops = {
.read = debugfs_dim_read,
};
-static struct dentry *debugfs_dim_ring_init(struct net_dim *dim, int ring_idx,
+static struct dentry *debugfs_dim_ring_init(struct dim *dim, int ring_idx,
struct dentry *dd)
{
static char qname[16];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
index afa97c8bb081..6f6576dc417a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -7,26 +7,25 @@
* the Free Software Foundation.
*/
-#include <linux/net_dim.h>
+#include <linux/dim.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
void bnxt_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim,
- work);
+ struct dim *dim = container_of(work, struct dim, work);
struct bnxt_cp_ring_info *cpr = container_of(dim,
struct bnxt_cp_ring_info,
dim);
struct bnxt_napi *bnapi = container_of(cpr,
struct bnxt_napi,
cp_ring);
- struct net_dim_cq_moder cur_moder =
+ struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
cpr->rx_ring_coal.coal_ticks = cur_moder.usec;
cpr->rx_ring_coal.coal_bufs = cur_moder.pkts;
bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi);
- dim->state = NET_DIM_START_MEASURE;
+ dim->state = DIM_START_MEASURE;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index a6c7baf38036..c7ee63d69679 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2799,7 +2799,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
dev_kfree_skb(skb);
return -EIO;
}
- bnxt_xmit_xdp(bp, txr, map, pkt_size, 0);
+ bnxt_xmit_bd(bp, txr, map, pkt_size);
/* Sync BD data before updating doorbell */
wmb();
@@ -2842,7 +2842,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bool offline = false;
u8 test_results = 0;
u8 test_mask = 0;
- int rc, i;
+ int rc = 0, i;
if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
return;
@@ -2913,9 +2913,9 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
- bnxt_open_nic(bp, false, true);
+ rc = bnxt_open_nic(bp, false, true);
}
- if (bnxt_test_irq(bp)) {
+ if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 44d6c5743fb9..6fe4a7174271 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -170,10 +170,10 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
}
static int bnxt_tc_parse_flow(struct bnxt *bp,
- struct tc_cls_flower_offload *tc_flow_cmd,
+ struct flow_cls_offload *tc_flow_cmd,
struct bnxt_tc_flow *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(tc_flow_cmd);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd);
struct flow_dissector *dissector = rule->match.dissector;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
@@ -1262,7 +1262,7 @@ static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
* The hash-tables are already protected by the rhashtable API.
*/
static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
- struct tc_cls_flower_offload *tc_flow_cmd)
+ struct flow_cls_offload *tc_flow_cmd)
{
struct bnxt_tc_flow_node *new_node, *old_node;
struct bnxt_tc_info *tc_info = bp->tc_info;
@@ -1348,7 +1348,7 @@ done:
}
static int bnxt_tc_del_flow(struct bnxt *bp,
- struct tc_cls_flower_offload *tc_flow_cmd)
+ struct flow_cls_offload *tc_flow_cmd)
{
struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_flow_node *flow_node;
@@ -1363,7 +1363,7 @@ static int bnxt_tc_del_flow(struct bnxt *bp,
}
static int bnxt_tc_get_flow_stats(struct bnxt *bp,
- struct tc_cls_flower_offload *tc_flow_cmd)
+ struct flow_cls_offload *tc_flow_cmd)
{
struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
struct bnxt_tc_info *tc_info = bp->tc_info;
@@ -1585,14 +1585,14 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp)
}
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
switch (cls_flower->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return bnxt_tc_add_flow(bp, src_fid, cls_flower);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return bnxt_tc_del_flow(bp, cls_flower);
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
return bnxt_tc_get_flow_stats(bp, cls_flower);
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index 8a0968967bc5..ffec57d1a5ec 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -196,7 +196,7 @@ struct bnxt_tc_flow_node {
};
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
- struct tc_cls_flower_offload *cls_flower);
+ struct flow_cls_offload *cls_flower);
int bnxt_init_tc(struct bnxt *bp);
void bnxt_shutdown_tc(struct bnxt *bp);
void bnxt_tc_flow_stats_work(struct bnxt *bp);
@@ -209,7 +209,7 @@ static inline bool bnxt_tc_flower_enabled(struct bnxt *bp)
#else /* CONFIG_BNXT_FLOWER_OFFLOAD */
static inline int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index bfa342a98d08..fc77caf0a076 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -157,8 +157,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int resv_msix;
- avail_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
+ resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
+ avail_msix = min_t(int, resv_msix, avail_msix);
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
}
bnxt_fill_msix_vecs(bp, ent);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index f760921389a3..f9bf7d7250ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -161,34 +161,19 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,
}
}
-static int bnxt_vf_rep_setup_tc_block(struct net_device *dev,
- struct tc_block_offload *f)
-{
- struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- bnxt_vf_rep_setup_tc_block_cb,
- vf_rep, vf_rep, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block,
- bnxt_vf_rep_setup_tc_block_cb, vf_rep);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(bnxt_vf_block_cb_list);
static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
+ struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
+
switch (type) {
case TC_SETUP_BLOCK:
- return bnxt_vf_rep_setup_tc_block(dev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &bnxt_vf_block_cb_list,
+ bnxt_vf_rep_setup_tc_block_cb,
+ vf_rep, vf_rep, true);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 0184ef6f05a7..c6f6f2033880 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -15,12 +15,14 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <net/page_pool.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_xdp.h"
-void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len, u16 rx_prod)
+struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len)
{
struct bnxt_sw_tx_bd *tx_buf;
struct tx_bd *txbd;
@@ -29,7 +31,6 @@ void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
- tx_buf->rx_prod = rx_prod;
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
@@ -40,30 +41,67 @@ void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
prod = NEXT_TX(prod);
txr->tx_prod = prod;
+ return tx_buf;
+}
+
+static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len, u16 rx_prod)
+{
+ struct bnxt_sw_tx_bd *tx_buf;
+
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf->rx_prod = rx_prod;
+ tx_buf->action = XDP_TX;
+}
+
+static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len,
+ struct xdp_frame *xdpf)
+{
+ struct bnxt_sw_tx_bd *tx_buf;
+
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf->action = XDP_REDIRECT;
+ tx_buf->xdpf = xdpf;
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+ dma_unmap_len_set(tx_buf, len, 0);
}
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
{
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ bool rx_doorbell_needed = false;
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
- u16 rx_prod;
int i;
for (i = 0; i < nr_pkts; i++) {
- last_tx_cons = tx_cons;
+ tx_buf = &txr->tx_buf_ring[tx_cons];
+
+ if (tx_buf->action == XDP_REDIRECT) {
+ struct pci_dev *pdev = bp->pdev;
+
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_len(tx_buf, len),
+ PCI_DMA_TODEVICE);
+ xdp_return_frame(tx_buf->xdpf);
+ tx_buf->action = 0;
+ tx_buf->xdpf = NULL;
+ } else if (tx_buf->action == XDP_TX) {
+ rx_doorbell_needed = true;
+ last_tx_cons = tx_cons;
+ }
tx_cons = NEXT_TX(tx_cons);
}
txr->tx_cons = tx_cons;
- if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
- rx_prod = rxr->rx_prod;
- } else {
+ if (rx_doorbell_needed) {
tx_buf = &txr->tx_buf_ring[last_tx_cons];
- rx_prod = tx_buf->rx_prod;
+ bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
}
- bnxt_db_write(bp, &rxr->rx_db, rx_prod);
}
/* returns the following:
@@ -88,19 +126,19 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false;
pdev = bp->pdev;
- txr = rxr->bnapi->tx_ring;
rx_buf = &rxr->rx_buf_ring[cons];
offset = bp->rx_offset;
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+ txr = rxr->bnapi->tx_ring;
xdp.data_hard_start = *data_ptr - offset;
xdp.data = *data_ptr;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = *data_ptr + *len;
xdp.rxq = &rxr->xdp_rxq;
orig_data = xdp.data;
- mapping = rx_buf->mapping - bp->rx_dma_offset;
-
- dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
rcu_read_lock();
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -132,10 +170,34 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event = BNXT_TX_EVENT;
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
bp->rx_dir);
- bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
- NEXT_RX(rxr->rx_prod));
+ __bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
+ NEXT_RX(rxr->rx_prod));
bnxt_reuse_rx_data(rxr, cons, page);
return true;
+ case XDP_REDIRECT:
+ /* if we are calling this here then we know that the
+ * redirect is coming from a frame received by the
+ * bnxt_en driver.
+ */
+ dma_unmap_page_attrs(&pdev->dev, mapping,
+ PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+
+ /* if we are unable to allocate a new buffer, abort and reuse */
+ if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
+ trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_reuse_rx_data(rxr, cons, page);
+ return true;
+ }
+
+ if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
+ trace_xdp_exception(bp->dev, xdp_prog, act);
+ page_pool_recycle_direct(rxr->page_pool, page);
+ return true;
+ }
+
+ *event |= BNXT_REDIRECT_EVENT;
+ break;
default:
bpf_warn_invalid_xdp_action(act);
/* Fall thru */
@@ -149,6 +211,56 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return true;
}
+int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
+ struct pci_dev *pdev = bp->pdev;
+ struct bnxt_tx_ring_info *txr;
+ dma_addr_t mapping;
+ int drops = 0;
+ int ring;
+ int i;
+
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
+ !bp->tx_nr_rings_xdp ||
+ !xdp_prog)
+ return -EINVAL;
+
+ ring = smp_processor_id() % bp->tx_nr_rings_xdp;
+ txr = &bp->tx_ring[ring];
+
+ for (i = 0; i < num_frames; i++) {
+ struct xdp_frame *xdp = frames[i];
+
+ if (!txr || !bnxt_tx_avail(bp, txr) ||
+ !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) {
+ xdp_return_frame_rx_napi(xdp);
+ drops++;
+ continue;
+ }
+
+ mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&pdev->dev, mapping)) {
+ xdp_return_frame_rx_napi(xdp);
+ drops++;
+ continue;
+ }
+ __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
+ }
+
+ if (flags & XDP_XMIT_FLUSH) {
+ /* Sync BD data before updating doorbell */
+ wmb();
+ bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
+ }
+
+ return num_frames - drops;
+}
+
/* Under rtnl_lock */
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 414b748038ca..0df40c3beb05 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -10,12 +10,15 @@
#ifndef BNXT_XDP_H
#define BNXT_XDP_H
-void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len, u16 rx_prod);
+struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct page *page, u8 **data_ptr, unsigned int *len,
u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 41b50e6570ea..34466b827dde 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -640,7 +640,7 @@ static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
struct ethtool_coalesce *ec)
{
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
u32 usecs, pkts;
ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
@@ -1895,7 +1895,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
{
struct bcmgenet_rx_ring *ring = container_of(napi,
struct bcmgenet_rx_ring, napi);
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
unsigned int work_done;
work_done = bcmgenet_desc_rx(ring, budget);
@@ -1906,8 +1906,8 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
}
if (ring->dim.use_dim) {
- net_dim_sample(ring->dim.event_ctr, ring->dim.packets,
- ring->dim.bytes, &dim_sample);
+ dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
+ ring->dim.bytes, &dim_sample);
net_dim(&ring->dim.dim, dim_sample);
}
@@ -1916,16 +1916,16 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
static void bcmgenet_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct dim *dim = container_of(work, struct dim, work);
struct bcmgenet_net_dim *ndim =
container_of(dim, struct bcmgenet_net_dim, dim);
struct bcmgenet_rx_ring *ring =
container_of(ndim, struct bcmgenet_rx_ring, dim);
- struct net_dim_cq_moder cur_profile =
+ struct dim_cq_moder cur_profile =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
- dim->state = NET_DIM_START_MEASURE;
+ dim->state = DIM_START_MEASURE;
}
/* Assign skb to RX DMA descriptor. */
@@ -2082,7 +2082,7 @@ static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
struct bcmgenet_net_dim *dim = &ring->dim;
INIT_WORK(&dim->dim.work, cb);
- dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
dim->event_ctr = 0;
dim->packets = 0;
dim->bytes = 0;
@@ -2091,7 +2091,7 @@ static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
{
struct bcmgenet_net_dim *dim = &ring->dim;
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
u32 usecs, pkts;
usecs = ring->rx_coalesce_usecs;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 9ad835aee1bc..4a8fc03d82fd 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -13,7 +13,7 @@
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/phy.h>
-#include <linux/net_dim.h>
+#include <linux/dim.h>
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -578,7 +578,7 @@ struct bcmgenet_net_dim {
u16 event_ctr;
unsigned long packets;
unsigned long bytes;
- struct net_dim dim;
+ struct dim dim;
};
struct bcmgenet_rx_ring {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 6d1f9c822548..4c404d2213f9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6710,7 +6710,7 @@ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (skb_size <= PAGE_SIZE) {
- data = netdev_alloc_frag(skb_size);
+ data = napi_alloc_frag(skb_size);
*frag_size = skb_size;
} else {
data = kmalloc(skb_size, GFP_ATOMIC);
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 1766697c9c5a..f4b3bd85dfe3 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Atmel device configuration
+# Cadence device configuration
#
config NET_VENDOR_CADENCE
@@ -13,15 +13,15 @@ config NET_VENDOR_CADENCE
If unsure, say Y.
Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the remaining Atmel network card questions. If you say Y, you will be
+ kernel: saying N will just cause the configurator to skip all the
+ remaining Cadence network card questions. If you say Y, you will be
asked for your specific card in the following questions.
if NET_VENDOR_CADENCE
config MACB
tristate "Cadence MACB/GEM support"
- depends on HAS_DMA
+ depends on HAS_DMA && COMMON_CLK
select PHYLIB
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
@@ -42,7 +42,7 @@ config MACB_USE_HWSTAMP
config MACB_PCI
tristate "Cadence PCI MACB/GEM support"
- depends on MACB && PCI && COMMON_CLK
+ depends on MACB && PCI
---help---
This is PCI wrapper for MACB driver.
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 6ff123da6a14..03983bd46eef 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -496,7 +496,11 @@
/* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0
-#define GEM_SUBNSINCR_SIZE 16
+#define GEM_SUBNSINCRL_OFFSET 24
+#define GEM_SUBNSINCRL_SIZE 8
+#define GEM_SUBNSINCRH_OFFSET 0
+#define GEM_SUBNSINCRH_SIZE 16
+#define GEM_SUBNSINCR_SIZE 24
/* Bitfields in TI */
#define GEM_NSINCR_OFFSET 0
@@ -834,6 +838,9 @@ struct gem_tx_ts {
/* limit RX checksum offload to TCP and UDP packets */
#define GEM_RX_CSUM_CHECKED_MASK 2
+/* Scaled PPM fraction */
+#define PPM_FRACTION 16
+
/* struct macb_tx_skb - data about an skb which is being transmitted
* @skb: skb currently being transmitted, only set for the last buffer
* of the frame
@@ -1060,7 +1067,8 @@ struct macb_or_gem_ops {
int (*mog_alloc_rx_buffers)(struct macb *bp);
void (*mog_free_rx_buffers)(struct macb *bp);
void (*mog_init_rings)(struct macb *bp);
- int (*mog_rx)(struct macb_queue *queue, int budget);
+ int (*mog_rx)(struct macb_queue *queue, struct napi_struct *napi,
+ int budget);
};
/* MACB-PTP interface: adapt to platform needs. */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 262a28ff81fc..5ca17e62dc3e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/crc32.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -37,6 +38,13 @@
#include <linux/pm_runtime.h>
#include "macb.h"
+/* This structure is only used for MACB on SiFive FU540 devices */
+struct sifive_fu540_macb_mgmt {
+ void __iomem *reg;
+ unsigned long rate;
+ struct clk_hw hw;
+};
+
#define MACB_RX_BUFFER_SIZE 128
#define RX_BUFFER_MULTIPLE 64 /* bytes */
@@ -981,7 +989,8 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
*/
}
-static int gem_rx(struct macb_queue *queue, int budget)
+static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
+ int budget)
{
struct macb *bp = queue->bp;
unsigned int len;
@@ -1063,7 +1072,7 @@ static int gem_rx(struct macb_queue *queue, int budget)
skb->data, 32, true);
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(napi, skb);
}
gem_rx_refill(queue);
@@ -1071,8 +1080,8 @@ static int gem_rx(struct macb_queue *queue, int budget)
return count;
}
-static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
- unsigned int last_frag)
+static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
+ unsigned int first_frag, unsigned int last_frag)
{
unsigned int len;
unsigned int frag;
@@ -1148,7 +1157,7 @@ static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
bp->dev->stats.rx_bytes += skb->len;
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
skb->len, skb->csum);
- netif_receive_skb(skb);
+ napi_gro_receive(napi, skb);
return 0;
}
@@ -1171,7 +1180,8 @@ static inline void macb_init_rx_ring(struct macb_queue *queue)
queue->rx_tail = 0;
}
-static int macb_rx(struct macb_queue *queue, int budget)
+static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
+ int budget)
{
struct macb *bp = queue->bp;
bool reset_rx_queue = false;
@@ -1208,7 +1218,7 @@ static int macb_rx(struct macb_queue *queue, int budget)
continue;
}
- dropped = macb_rx_frame(queue, first_frag, tail);
+ dropped = macb_rx_frame(queue, napi, first_frag, tail);
first_frag = -1;
if (unlikely(dropped < 0)) {
reset_rx_queue = true;
@@ -1262,7 +1272,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget);
- work_done = bp->macbgem_ops.mog_rx(queue, budget);
+ work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
@@ -3477,7 +3487,7 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q];
queue->bp = bp;
- netif_napi_add(dev, &queue->napi, macb_poll, 64);
+ netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
@@ -3616,6 +3626,8 @@ static int macb_init(struct platform_device *pdev)
/* max number of receive buffers */
#define AT91ETHER_MAX_RX_DESCR 9
+static struct sifive_fu540_macb_mgmt *mgmt;
+
/* Initialize and start the Receiver and Transmit subsystems */
static int at91ether_start(struct net_device *dev)
{
@@ -3943,6 +3955,116 @@ static int at91ether_init(struct platform_device *pdev)
return 0;
}
+static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return mgmt->rate;
+}
+
+static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ if (WARN_ON(rate < 2500000))
+ return 2500000;
+ else if (rate == 2500000)
+ return 2500000;
+ else if (WARN_ON(rate < 13750000))
+ return 2500000;
+ else if (WARN_ON(rate < 25000000))
+ return 25000000;
+ else if (rate == 25000000)
+ return 25000000;
+ else if (WARN_ON(rate < 75000000))
+ return 25000000;
+ else if (WARN_ON(rate < 125000000))
+ return 125000000;
+ else if (rate == 125000000)
+ return 125000000;
+
+ WARN_ON(rate > 125000000);
+
+ return 125000000;
+}
+
+static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
+ if (rate != 125000000)
+ iowrite32(1, mgmt->reg);
+ else
+ iowrite32(0, mgmt->reg);
+ mgmt->rate = rate;
+
+ return 0;
+}
+
+static const struct clk_ops fu540_c000_ops = {
+ .recalc_rate = fu540_macb_tx_recalc_rate,
+ .round_rate = fu540_macb_tx_round_rate,
+ .set_rate = fu540_macb_tx_set_rate,
+};
+
+static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
+ struct clk **hclk, struct clk **tx_clk,
+ struct clk **rx_clk, struct clk **tsu_clk)
+{
+ struct clk_init_data init;
+ int err = 0;
+
+ err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
+ if (err)
+ return err;
+
+ mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
+ if (!mgmt)
+ return -ENOMEM;
+
+ init.name = "sifive-gemgxl-mgmt";
+ init.ops = &fu540_c000_ops;
+ init.flags = 0;
+ init.num_parents = 0;
+
+ mgmt->rate = 0;
+ mgmt->hw.init = &init;
+
+ *tx_clk = clk_register(NULL, &mgmt->hw);
+ if (IS_ERR(*tx_clk))
+ return PTR_ERR(*tx_clk);
+
+ err = clk_prepare_enable(*tx_clk);
+ if (err)
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ else
+ dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
+
+ return 0;
+}
+
+static int fu540_c000_init(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ return -ENODEV;
+
+ mgmt->reg = ioremap(res->start, resource_size(res));
+ if (!mgmt->reg)
+ return -ENOMEM;
+
+ return macb_init(pdev);
+}
+
+static const struct macb_config fu540_c000_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = fu540_c000_clk_init,
+ .init = fu540_c000_init,
+ .jumbo_max_len = 10240,
+};
+
static const struct macb_config at91sam9260_config = {
.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
@@ -4032,6 +4154,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,emac", .data = &emac_config },
{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
+ { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
@@ -4239,6 +4362,7 @@ err_out_free_netdev:
err_disable_clocks:
clk_disable_unprepare(tx_clk);
+ clk_unregister(tx_clk);
clk_disable_unprepare(hclk);
clk_disable_unprepare(pclk);
clk_disable_unprepare(rx_clk);
@@ -4273,6 +4397,7 @@ static int macb_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
clk_disable_unprepare(bp->tx_clk);
+ clk_unregister(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
clk_disable_unprepare(bp->rx_clk);
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 0a8aca8d3634..43a3f0dbf857 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -104,7 +104,10 @@ static int gem_tsu_incr_set(struct macb *bp, struct tsu_incr *incr_spec)
* to take effect.
*/
spin_lock_irqsave(&bp->tsu_clk_lock, flags);
- gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, incr_spec->sub_ns));
+ /* RegBit[15:0] = Subns[23:8]; RegBit[31:24] = Subns[7:0] */
+ gem_writel(bp, TISUBN, GEM_BF(SUBNSINCRL, incr_spec->sub_ns) |
+ GEM_BF(SUBNSINCRH, (incr_spec->sub_ns >>
+ GEM_SUBNSINCRL_SIZE)));
gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns));
spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
@@ -135,7 +138,7 @@ static int gem_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
* (temp / USEC_PER_SEC) + 0.5
*/
adj += (USEC_PER_SEC >> 1);
- adj >>= GEM_SUBNSINCR_SIZE; /* remove fractions */
+ adj >>= PPM_FRACTION; /* remove fractions */
adj = div_u64(adj, USEC_PER_SEC);
adj = neg_adj ? (word - adj) : (word + adj);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 11d4e91ea754..99f49d059414 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1855,7 +1855,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
static int xgmac_suspend(struct device *dev)
{
- struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+ struct net_device *ndev = dev_get_drvdata(dev);
struct xgmac_priv *priv = netdev_priv(ndev);
u32 value;
@@ -1881,7 +1881,7 @@ static int xgmac_suspend(struct device *dev)
static int xgmac_resume(struct device *dev)
{
- struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+ struct net_device *ndev = dev_get_drvdata(dev);
struct xgmac_priv *priv = netdev_priv(ndev);
void __iomem *ioaddr = priv->base;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 91d8a885deba..20390f6afbb4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
- cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \
+ cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \
cudbg_common.o cudbg_lib.o cudbg_zlib.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index a8fe0808823d..1fbb640e896a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -280,6 +280,7 @@ struct tp_params {
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
+ u32 filter_mask;
u32 ingress_config; /* cached TP_INGRESS_CONFIG */
/* cached TP_OUT_CONFIG compressed error vector
@@ -600,6 +601,7 @@ struct port_info {
u8 vin;
u8 vivld;
u8 smt_idx;
+ u8 rx_cchan;
};
struct dentry;
@@ -878,6 +880,7 @@ struct uld_msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
unsigned int idx;
+ cpumask_var_t aff_mask;
};
struct vf_info {
@@ -902,10 +905,6 @@ struct mbox_list {
struct list_head list;
};
-struct mps_encap_entry {
- atomic_t refcnt;
-};
-
#if IS_ENABLED(CONFIG_THERMAL)
struct ch_thermal {
struct thermal_zone_device *tzdev;
@@ -914,6 +913,14 @@ struct ch_thermal {
};
#endif
+struct mps_entries_ref {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u8 mask[ETH_ALEN];
+ u16 idx;
+ refcount_t refcnt;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -938,9 +945,10 @@ struct adapter {
struct cxgb4_virt_res vres;
unsigned int swintr;
- struct {
+ struct msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
+ cpumask_var_t aff_mask;
} msix_info[MAX_INGQ + 1];
struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
@@ -965,7 +973,6 @@ struct adapter {
unsigned int rawf_start;
unsigned int rawf_cnt;
struct smt_data *smt;
- struct mps_encap_entry *mps_encap;
struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
@@ -973,6 +980,8 @@ struct adapter {
struct list_head list_node;
struct list_head rcu_node;
struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
+ struct list_head mps_ref;
+ spinlock_t mps_ref_lock; /* lock for syncing mps ref/def activities */
void *iscsi_ppm;
@@ -1898,5 +1907,46 @@ int cxgb4_dcb_enabled(const struct net_device *dev);
int cxgb4_thermal_init(struct adapter *adap);
int cxgb4_thermal_remove(struct adapter *adap);
+int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
+ cpumask_var_t *aff_mask, int idx);
+void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask);
+
+int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
+ int *tcam_idx, const u8 *addr,
+ bool persistent, u8 *smt_idx);
+
+int cxgb4_alloc_mac_filt(struct adapter *adap, unsigned int viid,
+ bool free, unsigned int naddr,
+ const u8 **addr, u16 *idx,
+ u64 *hash, bool sleep_ok);
+int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
+ unsigned int naddr, const u8 **addr, bool sleep_ok);
+int cxgb4_init_mps_ref_entries(struct adapter *adap);
+void cxgb4_free_mps_ref_entries(struct adapter *adap);
+int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask,
+ unsigned int vni, unsigned int vni_mask,
+ u8 dip_hit, u8 lookup_type, bool sleep_ok);
+int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
+ int idx, bool sleep_ok);
+int cxgb4_free_raw_mac_filt(struct adapter *adap,
+ unsigned int viid,
+ const u8 *addr,
+ const u8 *mask,
+ unsigned int idx,
+ u8 lookup_type,
+ u8 port_id,
+ bool sleep_ok);
+int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
+ unsigned int viid,
+ const u8 *addr,
+ const u8 *mask,
+ unsigned int idx,
+ u8 lookup_type,
+ u8 port_id,
+ bool sleep_ok);
+int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
+ int *tcam_idx, const u8 *addr,
+ bool persistent, u8 *smt_idx);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 4107007b6ec4..43b0f8c57da7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -248,8 +248,9 @@ static int validate_filter(struct net_device *dev,
u32 fconf, iconf;
/* Check for unconfigured fields being used. */
- fconf = adapter->params.tp.vlan_pri_map;
iconf = adapter->params.tp.ingress_config;
+ fconf = fs->hash ? adapter->params.tp.filter_mask :
+ adapter->params.tp.vlan_pri_map;
if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
@@ -726,10 +727,8 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
cxgb4_smt_release(f->smt);
if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
- if (atomic_dec_and_test(&adap->mps_encap[f->fs.val.ovlan &
- 0x1ff].refcnt))
- t4_free_encap_mac_filt(adap, pi->viid,
- f->fs.val.ovlan & 0x1ff, 0);
+ t4_free_encap_mac_filt(adap, pi->viid,
+ f->fs.val.ovlan & 0x1ff, 0);
if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
@@ -1041,7 +1040,7 @@ static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
RSS_QUEUE_V(f->fs.iq) |
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
- RX_CHANNEL_F |
+ RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
(f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
@@ -1081,7 +1080,7 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
RSS_QUEUE_V(f->fs.iq) |
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
- RX_CHANNEL_F |
+ RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
(f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
@@ -1176,7 +1175,6 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
if (ret < 0)
goto free_atid;
- atomic_inc(&adapter->mps_encap[ret].refcnt);
f->fs.val.ovlan = ret;
f->fs.mask.ovlan = 0xffff;
f->fs.val.ovlan_vld = 1;
@@ -1419,7 +1417,6 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
if (ret < 0)
goto free_clip;
- atomic_inc(&adapter->mps_encap[ret].refcnt);
f->fs.val.ovlan = ret;
f->fs.mask.ovlan = 0x1ff;
f->fs.val.ovlan_vld = 1;
@@ -1833,24 +1830,38 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
}
}
-int init_hash_filter(struct adapter *adap)
+void init_hash_filter(struct adapter *adap)
{
+ u32 reg;
+
/* On T6, verify the necessary register configs and warn the user in
* case of improper config
*/
if (is_t6(adap->params.chip)) {
- if (TCAM_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_0_A)) != 4)
- goto err;
+ if (is_offload(adap)) {
+ if (!(t4_read_reg(adap, TP_GLOBAL_CONFIG_A)
+ & ACTIVEFILTERCOUNTS_F)) {
+ dev_err(adap->pdev_dev, "Invalid hash filter + ofld config\n");
+ return;
+ }
+ } else {
+ reg = t4_read_reg(adap, LE_DB_RSP_CODE_0_A);
+ if (TCAM_ACTV_HIT_G(reg) != 4) {
+ dev_err(adap->pdev_dev, "Invalid hash filter config\n");
+ return;
+ }
+
+ reg = t4_read_reg(adap, LE_DB_RSP_CODE_1_A);
+ if (HASH_ACTV_HIT_G(reg) != 4) {
+ dev_err(adap->pdev_dev, "Invalid hash filter config\n");
+ return;
+ }
+ }
- if (HASH_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_1_A)) != 4)
- goto err;
} else {
dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
- return -EINVAL;
+ return;
}
+
adap->params.hash_filter = 1;
- return 0;
-err:
- dev_warn(adap->pdev_dev, "Invalid hash filter config!\n");
- return -EINVAL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
index 8db5fca6dcc9..b0751c0611ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -50,7 +50,7 @@ int delete_filter(struct adapter *adapter, unsigned int fidx);
int writable_filter(struct filter_entry *f);
void clear_all_filters(struct adapter *adapter);
-int init_hash_filter(struct adapter *adap);
+void init_hash_filter(struct adapter *adap);
bool is_filter_exact_match(struct adapter *adap,
struct ch_filter_specification *fs);
#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 715e4edcf4a2..67202b6f352e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -366,13 +366,19 @@ static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
int ret;
u64 mhash = 0;
u64 uhash = 0;
+ /* idx stores the index of allocated filters,
+ * its size should be modified based on the number of
+ * MAC addresses that we allocate filters for
+ */
+
+ u16 idx[1] = {};
bool free = false;
bool ucast = is_unicast_ether_addr(mac_addr);
const u8 *maclist[1] = {mac_addr};
struct hash_mac_addr *new_entry;
- ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
- NULL, ucast ? &uhash : &mhash, false);
+ ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
+ idx, ucast ? &uhash : &mhash, false);
if (ret < 0)
goto out;
/* if hash != 0, then add the addr to hash addr list
@@ -410,7 +416,7 @@ static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
}
}
- ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
+ ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
return ret < 0 ? -EINVAL : 0;
}
@@ -449,9 +455,9 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
* Addresses are programmed to hash region, if tcam runs out of entries.
*
*/
-static int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
- int *tcam_idx, const u8 *addr, bool persist,
- u8 *smt_idx)
+int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
+ int *tcam_idx, const u8 *addr, bool persist,
+ u8 *smt_idx)
{
struct adapter *adapter = pi->adapter;
struct hash_mac_addr *entry, *new_entry;
@@ -505,8 +511,8 @@ static int link_start(struct net_device *dev)
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
!!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (ret == 0)
- ret = cxgb4_change_mac(pi, pi->viid, &pi->xact_addr_filt,
- dev->dev_addr, true, &pi->smt_idx);
+ ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
+ dev->dev_addr, true, &pi->smt_idx);
if (ret == 0)
ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
&pi->link_cfg);
@@ -702,9 +708,38 @@ static void name_msix_vecs(struct adapter *adap)
}
}
+int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
+ cpumask_var_t *aff_mask, int idx)
+{
+ int rv;
+
+ if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
+ dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
+ *aff_mask);
+
+ rv = irq_set_affinity_hint(vec, *aff_mask);
+ if (rv)
+ dev_warn(adap->pdev_dev,
+ "irq_set_affinity_hint %u failed %d\n",
+ vec, rv);
+
+ return 0;
+}
+
+void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
+{
+ irq_set_affinity_hint(vec, NULL);
+ free_cpumask_var(aff_mask);
+}
+
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
+ struct msix_info *minfo;
int err, ethqidx;
int msi_index = 2;
@@ -714,32 +749,77 @@ static int request_msix_queue_irqs(struct adapter *adap)
return err;
for_each_ethrxq(s, ethqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
+ minfo = &adap->msix_info[msi_index];
+ err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
+ minfo->desc,
&s->ethrxq[ethqidx].rspq);
if (err)
goto unwind;
+
+ cxgb4_set_msix_aff(adap, minfo->vec,
+ &minfo->aff_mask, ethqidx);
msi_index++;
}
return 0;
unwind:
- while (--ethqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->ethrxq[ethqidx].rspq);
+ while (--ethqidx >= 0) {
+ msi_index--;
+ minfo = &adap->msix_info[msi_index];
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
+ free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
+ }
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
return err;
}
static void free_msix_queue_irqs(struct adapter *adap)
{
- int i, msi_index = 2;
struct sge *s = &adap->sge;
+ struct msix_info *minfo;
+ int i, msi_index = 2;
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
- for_each_ethrxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
+ for_each_ethrxq(s, i) {
+ minfo = &adap->msix_info[msi_index++];
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
+ free_irq(minfo->vec, &s->ethrxq[i].rspq);
+ }
+}
+
+static int setup_ppod_edram(struct adapter *adap)
+{
+ unsigned int param, val;
+ int ret;
+
+ /* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check
+ * if firmware supports ppod edram feature or not. If firmware
+ * returns 1, then driver can enable this feature by sending
+ * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to
+ * enable ppod edram feature.
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));
+
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
+ if (ret < 0) {
+ dev_warn(adap->pdev_dev,
+ "querying PPOD_EDRAM support failed: %d\n",
+ ret);
+ return -1;
+ }
+
+ if (val != 1)
+ return -1;
+
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
+ if (ret < 0) {
+ dev_err(adap->pdev_dev,
+ "setting PPOD_EDRAM failed: %d\n", ret);
+ return -1;
+ }
+ return 0;
}
/**
@@ -1646,6 +1726,18 @@ unsigned int cxgb4_port_chan(const struct net_device *dev)
}
EXPORT_SYMBOL(cxgb4_port_chan);
+/**
+ * cxgb4_port_e2cchan - get the HW c-channel of a port
+ * @dev: the net device for the port
+ *
+ * Return the HW RX c-channel of the given port.
+ */
+unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->rx_cchan;
+}
+EXPORT_SYMBOL(cxgb4_port_e2cchan);
+
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
{
struct adapter *adap = netdev2adap(dev);
@@ -2934,8 +3026,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = cxgb4_change_mac(pi, pi->viid, &pi->xact_addr_filt,
- addr->sa_data, true, &pi->smt_idx);
+ ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
+ addr->sa_data, true, &pi->smt_idx);
if (ret < 0)
return ret;
@@ -3043,14 +3135,14 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
}
static int cxgb_setup_tc_flower(struct net_device *dev,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
switch (cls_flower->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return cxgb4_tc_flower_replace(dev, cls_flower);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return cxgb4_tc_flower_destroy(dev, cls_flower);
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
return cxgb4_tc_flower_stats(dev, cls_flower);
default:
return -EOPNOTSUPP;
@@ -3098,32 +3190,19 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-static int cxgb_setup_tc_block(struct net_device *dev,
- struct tc_block_offload *f)
-{
- struct port_info *pi = netdev2pinfo(dev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
- pi, dev, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(cxgb_block_cb_list);
static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
+ struct port_info *pi = netdev2pinfo(dev);
+
switch (type) {
case TC_SETUP_BLOCK:
- return cxgb_setup_tc_block(dev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &cxgb_block_cb_list,
+ cxgb_setup_tc_block_cb,
+ pi, dev, true);
default:
return -EOPNOTSUPP;
}
@@ -3187,8 +3266,6 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
i);
return;
}
- atomic_dec(&adapter->mps_encap[adapter->rawf_start +
- pi->port_id].refcnt);
}
}
@@ -3277,7 +3354,6 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
cxgb_del_udp_tunnel(netdev, ti);
return;
}
- atomic_inc(&adapter->mps_encap[ret].refcnt);
}
}
@@ -3905,14 +3981,14 @@ static int adap_init0_phy(struct adapter *adap)
*/
static int adap_init0_config(struct adapter *adapter, int reset)
{
+ char *fw_config_file, fw_config_file_path[256];
+ u32 finiver, finicsum, cfcsum, param, val;
struct fw_caps_config_cmd caps_cmd;
- const struct firmware *cf;
unsigned long mtype = 0, maddr = 0;
- u32 finiver, finicsum, cfcsum;
- int ret;
- int config_issued = 0;
- char *fw_config_file, fw_config_file_path[256];
+ const struct firmware *cf;
char *config_name = NULL;
+ int config_issued = 0;
+ int ret;
/*
* Reset device if necessary.
@@ -4020,6 +4096,24 @@ static int adap_init0_config(struct adapter *adapter, int reset)
goto bye;
}
+ val = 0;
+
+ /* Ofld + Hash filter is supported. Older fw will fail this request and
+ * it is fine.
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD));
+ ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
+ 1, &param, &val);
+
+ /* FW doesn't know about Hash filter + ofld support,
+ * it's not a problem, don't return an error.
+ */
+ if (ret < 0) {
+ dev_warn(adapter->pdev_dev,
+ "Hash filter with ofld is not supported by FW\n");
+ }
+
/*
* Issue a Capability Configuration command to the firmware to get it
* to parse the Configuration File. We don't use t4_fw_config_file()
@@ -4096,6 +4190,13 @@ static int adap_init0_config(struct adapter *adapter, int reset)
dev_err(adapter->pdev_dev,
"HMA configuration failed with error %d\n", ret);
+ if (is_t6(adapter->params.chip)) {
+ ret = setup_ppod_edram(adapter);
+ if (!ret)
+ dev_info(adapter->pdev_dev, "Successfully enabled "
+ "ppod edram feature\n");
+ }
+
/*
* And finally tell the firmware to initialize itself using the
* parameters from the Configuration File.
@@ -4580,6 +4681,13 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
+ /* hash filter has some mandatory register settings to be tested and for
+ * that it needs to test whether offload is enabled or not, hence
+ * checking and setting it here.
+ */
+ if (caps_cmd.ofldcaps)
+ adap->params.offload = 1;
+
if (caps_cmd.ofldcaps ||
(caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
/* query offload-related parameters */
@@ -4619,11 +4727,8 @@ static int adap_init0(struct adapter *adap)
adap->params.ofldq_wr_cred = val[5];
if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
- ret = init_hash_filter(adap);
- if (ret < 0)
- goto bye;
+ init_hash_filter(adap);
} else {
- adap->params.offload = 1;
adap->num_ofld_uld += 1;
}
}
@@ -4715,6 +4820,22 @@ static int adap_init0(struct adapter *adap)
goto bye;
adap->vres.iscsi.start = val[0];
adap->vres.iscsi.size = val[1] - val[0] + 1;
+ if (is_t6(adap->params.chip)) {
+ params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START);
+ params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+ params, val);
+ if (!ret) {
+ adap->vres.ppod_edram.start = val[0];
+ adap->vres.ppod_edram.size =
+ val[1] - val[0] + 1;
+
+ dev_info(adap->pdev_dev,
+ "ppod edram start 0x%x end 0x%x size 0x%x\n",
+ val[0], val[1],
+ adap->vres.ppod_edram.size);
+ }
+ }
/* LIO target and cxgb4i initiaitor */
adap->num_ofld_uld += 2;
}
@@ -5315,7 +5436,6 @@ static void free_some_resources(struct adapter *adapter)
{
unsigned int i;
- kvfree(adapter->mps_encap);
kvfree(adapter->smt);
kvfree(adapter->l2t);
kvfree(adapter->srq);
@@ -5841,12 +5961,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->params.offload = 0;
}
- adapter->mps_encap = kvcalloc(adapter->params.arch.mps_tcam_size,
- sizeof(struct mps_encap_entry),
- GFP_KERNEL);
- if (!adapter->mps_encap)
- dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
-
#if IS_ENABLED(CONFIG_IPV6)
if (chip_ver <= CHELSIO_T5 &&
(!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
@@ -5922,6 +6036,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* check for PCI Express bandwidth capabiltites */
pcie_print_link_status(pdev);
+ cxgb4_init_mps_ref_entries(adapter);
+
err = init_rss(adapter);
if (err)
goto out_free_dev;
@@ -6048,6 +6164,8 @@ static void remove_one(struct pci_dev *pdev)
disable_interrupts(adapter);
+ cxgb4_free_mps_ref_entries(adapter);
+
for_each_port(adapter, i)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
new file mode 100644
index 000000000000..b1a073eea60b
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Chelsio Communications, Inc. All rights reserved. */
+
+#include "cxgb4.h"
+
+static int cxgb4_mps_ref_dec_by_mac(struct adapter *adap,
+ const u8 *addr, const u8 *mask)
+{
+ u8 bitmask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ struct mps_entries_ref *mps_entry, *tmp;
+ int ret = -EINVAL;
+
+ spin_lock_bh(&adap->mps_ref_lock);
+ list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
+ if (ether_addr_equal(mps_entry->addr, addr) &&
+ ether_addr_equal(mps_entry->mask, mask ? mask : bitmask)) {
+ if (!refcount_dec_and_test(&mps_entry->refcnt)) {
+ spin_unlock_bh(&adap->mps_ref_lock);
+ return -EBUSY;
+ }
+ list_del(&mps_entry->list);
+ kfree(mps_entry);
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_bh(&adap->mps_ref_lock);
+ return ret;
+}
+
+static int cxgb4_mps_ref_dec(struct adapter *adap, u16 idx)
+{
+ struct mps_entries_ref *mps_entry, *tmp;
+ int ret = -EINVAL;
+
+ spin_lock(&adap->mps_ref_lock);
+ list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
+ if (mps_entry->idx == idx) {
+ if (!refcount_dec_and_test(&mps_entry->refcnt)) {
+ spin_unlock(&adap->mps_ref_lock);
+ return -EBUSY;
+ }
+ list_del(&mps_entry->list);
+ kfree(mps_entry);
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock(&adap->mps_ref_lock);
+ return ret;
+}
+
+static int cxgb4_mps_ref_inc(struct adapter *adap, const u8 *mac_addr,
+ u16 idx, const u8 *mask)
+{
+ u8 bitmask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ struct mps_entries_ref *mps_entry;
+ int ret = 0;
+
+ spin_lock_bh(&adap->mps_ref_lock);
+ list_for_each_entry(mps_entry, &adap->mps_ref, list) {
+ if (mps_entry->idx == idx) {
+ refcount_inc(&mps_entry->refcnt);
+ goto unlock;
+ }
+ }
+ mps_entry = kzalloc(sizeof(*mps_entry), GFP_ATOMIC);
+ if (!mps_entry) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ ether_addr_copy(mps_entry->mask, mask ? mask : bitmask);
+ ether_addr_copy(mps_entry->addr, mac_addr);
+ mps_entry->idx = idx;
+ refcount_set(&mps_entry->refcnt, 1);
+ list_add_tail(&mps_entry->list, &adap->mps_ref);
+unlock:
+ spin_unlock_bh(&adap->mps_ref_lock);
+ return ret;
+}
+
+int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
+ unsigned int naddr, const u8 **addr, bool sleep_ok)
+{
+ int ret, i;
+
+ for (i = 0; i < naddr; i++) {
+ if (!cxgb4_mps_ref_dec_by_mac(adap, addr[i], NULL)) {
+ ret = t4_free_mac_filt(adap, adap->mbox, viid,
+ 1, &addr[i], sleep_ok);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* return number of filters freed */
+ return naddr;
+}
+
+int cxgb4_alloc_mac_filt(struct adapter *adap, unsigned int viid,
+ bool free, unsigned int naddr, const u8 **addr,
+ u16 *idx, u64 *hash, bool sleep_ok)
+{
+ int ret, i;
+
+ ret = t4_alloc_mac_filt(adap, adap->mbox, viid, free,
+ naddr, addr, idx, hash, sleep_ok);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < naddr; i++) {
+ if (idx[i] != 0xffff) {
+ if (cxgb4_mps_ref_inc(adap, addr[i], idx[i], NULL)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+ }
+
+ goto out;
+error:
+ cxgb4_free_mac_filt(adap, viid, naddr, addr, sleep_ok);
+
+out:
+ /* Returns a negative error number or the number of filters allocated */
+ return ret;
+}
+
+int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
+ int *tcam_idx, const u8 *addr,
+ bool persistent, u8 *smt_idx)
+{
+ int ret;
+
+ ret = cxgb4_change_mac(pi, viid, tcam_idx,
+ addr, persistent, smt_idx);
+ if (ret < 0)
+ return ret;
+
+ cxgb4_mps_ref_inc(pi->adapter, addr, *tcam_idx, NULL);
+ return ret;
+}
+
+int cxgb4_free_raw_mac_filt(struct adapter *adap,
+ unsigned int viid,
+ const u8 *addr,
+ const u8 *mask,
+ unsigned int idx,
+ u8 lookup_type,
+ u8 port_id,
+ bool sleep_ok)
+{
+ int ret = 0;
+
+ if (!cxgb4_mps_ref_dec(adap, idx))
+ ret = t4_free_raw_mac_filt(adap, viid, addr,
+ mask, idx, lookup_type,
+ port_id, sleep_ok);
+
+ return ret;
+}
+
+int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
+ unsigned int viid,
+ const u8 *addr,
+ const u8 *mask,
+ unsigned int idx,
+ u8 lookup_type,
+ u8 port_id,
+ bool sleep_ok)
+{
+ int ret;
+
+ ret = t4_alloc_raw_mac_filt(adap, viid, addr,
+ mask, idx, lookup_type,
+ port_id, sleep_ok);
+ if (ret < 0)
+ return ret;
+
+ if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
+ ret = -ENOMEM;
+ t4_free_raw_mac_filt(adap, viid, addr,
+ mask, idx, lookup_type,
+ port_id, sleep_ok);
+ }
+
+ return ret;
+}
+
+int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
+ int idx, bool sleep_ok)
+{
+ int ret = 0;
+
+ if (!cxgb4_mps_ref_dec(adap, idx))
+ ret = t4_free_encap_mac_filt(adap, viid, idx, sleep_ok);
+
+ return ret;
+}
+
+int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask,
+ unsigned int vni, unsigned int vni_mask,
+ u8 dip_hit, u8 lookup_type, bool sleep_ok)
+{
+ int ret;
+
+ ret = t4_alloc_encap_mac_filt(adap, viid, addr, mask, vni, vni_mask,
+ dip_hit, lookup_type, sleep_ok);
+ if (ret < 0)
+ return ret;
+
+ if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
+ ret = -ENOMEM;
+ t4_free_encap_mac_filt(adap, viid, ret, sleep_ok);
+ }
+ return ret;
+}
+
+int cxgb4_init_mps_ref_entries(struct adapter *adap)
+{
+ spin_lock_init(&adap->mps_ref_lock);
+ INIT_LIST_HEAD(&adap->mps_ref);
+
+ return 0;
+}
+
+void cxgb4_free_mps_ref_entries(struct adapter *adap)
+{
+ struct mps_entries_ref *mps_entry, *tmp;
+
+ if (!list_empty(&adap->mps_ref))
+ return;
+
+ spin_lock(&adap->mps_ref_lock);
+ list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
+ list_del(&mps_entry->list);
+ kfree(mps_entry);
+ }
+ spin_unlock(&adap->mps_ref_lock);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index cfaf8f618d1f..312599c6b35a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -80,10 +80,10 @@ static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
}
static void cxgb4_process_flow_match(struct net_device *dev,
- struct tc_cls_flower_offload *cls,
+ struct flow_cls_offload *cls,
struct ch_filter_specification *fs)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
u16 addr_type = 0;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
@@ -223,9 +223,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
}
static int cxgb4_validate_flow_match(struct net_device *dev,
- struct tc_cls_flower_offload *cls)
+ struct flow_cls_offload *cls)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
u16 ethtype_mask = 0;
u16 ethtype_key = 0;
@@ -378,10 +378,10 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
static void cxgb4_process_flow_actions(struct net_device *in,
- struct tc_cls_flower_offload *cls,
+ struct flow_cls_offload *cls,
struct ch_filter_specification *fs)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act;
int i;
@@ -544,9 +544,9 @@ static bool valid_pedit_action(struct net_device *dev,
}
static int cxgb4_validate_flow_actions(struct net_device *dev,
- struct tc_cls_flower_offload *cls)
+ struct flow_cls_offload *cls)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
@@ -633,7 +633,7 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
}
int cxgb4_tc_flower_replace(struct net_device *dev,
- struct tc_cls_flower_offload *cls)
+ struct flow_cls_offload *cls)
{
struct adapter *adap = netdev2adap(dev);
struct ch_tc_flower_entry *ch_flower;
@@ -709,7 +709,7 @@ free_entry:
}
int cxgb4_tc_flower_destroy(struct net_device *dev,
- struct tc_cls_flower_offload *cls)
+ struct flow_cls_offload *cls)
{
struct adapter *adap = netdev2adap(dev);
struct ch_tc_flower_entry *ch_flower;
@@ -783,7 +783,7 @@ static void ch_flower_stats_cb(struct timer_list *t)
}
int cxgb4_tc_flower_stats(struct net_device *dev,
- struct tc_cls_flower_offload *cls)
+ struct flow_cls_offload *cls)
{
struct adapter *adap = netdev2adap(dev);
struct ch_tc_flower_stats *ofld_stats;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
index 050c8a50ae41..eb4c95248baf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
@@ -109,11 +109,11 @@ struct ch_tc_pedit_fields {
#define PEDIT_UDP_SPORT_DPORT 0x0
int cxgb4_tc_flower_replace(struct net_device *dev,
- struct tc_cls_flower_offload *cls);
+ struct flow_cls_offload *cls);
int cxgb4_tc_flower_destroy(struct net_device *dev,
- struct tc_cls_flower_offload *cls);
+ struct flow_cls_offload *cls);
int cxgb4_tc_flower_stats(struct net_device *dev,
- struct tc_cls_flower_offload *cls);
+ struct flow_cls_offload *cls);
int cxgb4_init_tc_flower(struct adapter *adap);
void cxgb4_cleanup_tc_flower(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 6c685b920713..5b602243d573 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -352,25 +352,32 @@ static int
request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ struct uld_msix_info *minfo;
int err = 0;
unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx];
- err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
+ minfo = &adap->msix_info_ulds[bmap_idx];
+ err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
- adap->msix_info_ulds[bmap_idx].desc,
+ minfo->desc,
&rxq_info->uldrxq[idx].rspq);
if (err)
goto unwind;
+
+ cxgb4_set_msix_aff(adap, minfo->vec,
+ &minfo->aff_mask, idx);
}
return 0;
+
unwind:
while (idx-- > 0) {
bmap_idx = rxq_info->msix_tbl[idx];
+ minfo = &adap->msix_info_ulds[bmap_idx];
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_msix_idx_in_bmap(adap, bmap_idx);
- free_irq(adap->msix_info_ulds[bmap_idx].vec,
- &rxq_info->uldrxq[idx].rspq);
+ free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
return err;
}
@@ -379,14 +386,16 @@ static void
free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ struct uld_msix_info *minfo;
unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx];
+ minfo = &adap->msix_info_ulds[bmap_idx];
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_msix_idx_in_bmap(adap, bmap_idx);
- free_irq(adap->msix_info_ulds[bmap_idx].vec,
- &rxq_info->uldrxq[idx].rspq);
+ free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 21da34a4ca24..cee582e36134 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -292,6 +292,7 @@ struct cxgb4_virt_res { /* virtualized HW resources */
struct cxgb4_range ocq;
struct cxgb4_range key;
unsigned int ncrypto_fc;
+ struct cxgb4_range ppod_edram;
};
struct chcr_stats_debug {
@@ -393,6 +394,7 @@ int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev);
+unsigned int cxgb4_port_e2cchan(const struct net_device *dev);
unsigned int cxgb4_port_viid(const struct net_device *dev);
unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid);
unsigned int cxgb4_port_idx(const struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 93feb258067b..9dd5ed9a2965 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -6209,6 +6209,37 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
}
/**
+ * t4_get_tp_e2c_map - return the E2C channel map associated with a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ */
+static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
+{
+ unsigned int nports;
+ u32 param, val = 0;
+ int ret;
+
+ nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
+ if (pidx >= nports) {
+ CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ /* FW version >= 1.16.44.0 can determine E2C channel map using
+ * FW_PARAMS_PARAM_DEV_TPCHMAP API.
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPCHMAP));
+ ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
+ 0, 1, &param, &val);
+ if (!ret)
+ return (val >> (8 * pidx)) & 0xff;
+
+ return 0;
+}
+
+/**
* t4_get_tp_ch_map - return TP ingress channels associated with a port
* @adapter: the adapter
* @pidx: the port index
@@ -9368,8 +9399,9 @@ int t4_init_sge_params(struct adapter *adapter)
*/
int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
{
- int chan;
- u32 v;
+ u32 param, val, v;
+ int chan, ret;
+
v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
adap->params.tp.tre = TIMERRESOLUTION_G(v);
@@ -9379,11 +9411,47 @@ int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
for (chan = 0; chan < NCHAN; chan++)
adap->params.tp.tx_modq[chan] = chan;
- /* Cache the adapter's Compressed Filter Mode and global Incress
+ /* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
* Configuration.
*/
- t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
- TP_VLAN_PRI_MAP_A, sleep_ok);
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FILTER) |
+ FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_FILTER_MODE_MASK));
+
+ /* Read current value */
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret == 0) {
+ dev_info(adap->pdev_dev,
+ "Current filter mode/mask 0x%x:0x%x\n",
+ FW_PARAMS_PARAM_FILTER_MODE_G(val),
+ FW_PARAMS_PARAM_FILTER_MASK_G(val));
+ adap->params.tp.vlan_pri_map =
+ FW_PARAMS_PARAM_FILTER_MODE_G(val);
+ adap->params.tp.filter_mask =
+ FW_PARAMS_PARAM_FILTER_MASK_G(val);
+ } else {
+ dev_info(adap->pdev_dev,
+ "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
+
+ /* Incase of older-fw (which doesn't expose the api
+ * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
+ * the fw api) combination, fall-back to older method of reading
+ * the filter mode from indirect-register
+ */
+ t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP_A, sleep_ok);
+
+ /* With the older-fw and newer-driver combination we might run
+ * into an issue when user wants to use hash filter region but
+ * the filter_mask is zero, in this case filter_mask validation
+ * is tough. To avoid that we set the filter_mask same as filter
+ * mode, which will behave exactly as the older way of ignoring
+ * the filter mask validation.
+ */
+ adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
+ }
+
t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
TP_INGRESS_CONFIG_A, sleep_ok);
@@ -9594,6 +9662,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
pi->tx_chan = port;
pi->lport = port;
pi->rss_size = rss_size;
+ pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
/* If fw supports returning the VIN as part of FW_VI_CMD,
* save the returned values.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index eb222d40ddbf..a957a6e4d4c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1334,6 +1334,10 @@
#define TP_OUT_CONFIG_A 0x7d04
#define TP_GLOBAL_CONFIG_A 0x7d08
+#define ACTIVEFILTERCOUNTS_S 22
+#define ACTIVEFILTERCOUNTS_V(x) ((x) << ACTIVEFILTERCOUNTS_S)
+#define ACTIVEFILTERCOUNTS_F ACTIVEFILTERCOUNTS_V(1U)
+
#define TP_CMM_TCB_BASE_A 0x7d10
#define TP_CMM_MM_BASE_A 0x7d14
#define TP_CMM_TIMER_BASE_A 0x7d18
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index b2a618e72fcf..65313f6b5704 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1221,6 +1221,23 @@ enum fw_params_mnem {
/*
* device parameters
*/
+
+#define FW_PARAMS_PARAM_FILTER_MODE_S 16
+#define FW_PARAMS_PARAM_FILTER_MODE_M 0xffff
+#define FW_PARAMS_PARAM_FILTER_MODE_V(x) \
+ ((x) << FW_PARAMS_PARAM_FILTER_MODE_S)
+#define FW_PARAMS_PARAM_FILTER_MODE_G(x) \
+ (((x) >> FW_PARAMS_PARAM_FILTER_MODE_S) & \
+ FW_PARAMS_PARAM_FILTER_MODE_M)
+
+#define FW_PARAMS_PARAM_FILTER_MASK_S 0
+#define FW_PARAMS_PARAM_FILTER_MASK_M 0xffff
+#define FW_PARAMS_PARAM_FILTER_MASK_V(x) \
+ ((x) << FW_PARAMS_PARAM_FILTER_MASK_S)
+#define FW_PARAMS_PARAM_FILTER_MASK_G(x) \
+ (((x) >> FW_PARAMS_PARAM_FILTER_MASK_S) & \
+ FW_PARAMS_PARAM_FILTER_MASK_M)
+
enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
@@ -1250,12 +1267,16 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C,
FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D,
FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E,
+ FW_PARAMS_PARAM_DEV_TPCHMAP = 0x1F,
FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20,
FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
+ FW_PARAMS_PARAM_DEV_PPOD_EDRAM = 0x23,
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
+ FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28,
FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
+ FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
};
/*
@@ -1312,6 +1333,8 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37,
FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39,
FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A,
+ FW_PARAMS_PARAM_PFVF_PPOD_EDRAM_START = 0x3B,
+ FW_PARAMS_PARAM_PFVF_PPOD_EDRAM_END = 0x3C,
FW_PARAMS_PARAM_PFVF_LINK_STATE = 0x40,
};
@@ -1347,6 +1370,11 @@ enum fw_params_param_dev_diag {
FW_PARAM_DEV_DIAG_MAXTMPTHRESH = 0x02,
};
+enum fw_params_param_dev_filter {
+ FW_PARAM_DEV_FILTER_VNIC_MODE = 0x00,
+ FW_PARAM_DEV_FILTER_MODE_MASK = 0x01,
+};
+
enum fw_params_param_dev_fwcache {
FW_PARAM_DEV_FWCACHE_FLUSH = 0x00,
FW_PARAM_DEV_FWCACHE_FLUSHINV = 0x01,
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index e2919005ead3..21034536c9c5 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -123,6 +123,9 @@ static int ppm_get_cpu_entries(struct cxgbi_ppm *ppm, unsigned int count,
unsigned int cpu;
int i;
+ if (!ppm->pool)
+ return -EINVAL;
+
cpu = get_cpu();
pool = per_cpu_ptr(ppm->pool, cpu);
spin_lock_bh(&pool->lock);
@@ -169,7 +172,9 @@ static int ppm_get_entries(struct cxgbi_ppm *ppm, unsigned int count,
}
ppm->next = i + count;
- if (ppm->next >= ppm->bmap_index_max)
+ if (ppm->max_index_in_edram && (ppm->next >= ppm->max_index_in_edram))
+ ppm->next = 0;
+ else if (ppm->next >= ppm->bmap_index_max)
ppm->next = 0;
spin_unlock_bh(&ppm->map_lock);
@@ -382,18 +387,36 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
struct pci_dev *pdev, void *lldev,
- struct cxgbi_tag_format *tformat,
- unsigned int ppmax,
- unsigned int llimit,
- unsigned int start,
- unsigned int reserve_factor)
+ struct cxgbi_tag_format *tformat, unsigned int iscsi_size,
+ unsigned int llimit, unsigned int start,
+ unsigned int reserve_factor, unsigned int iscsi_edram_start,
+ unsigned int iscsi_edram_size)
{
struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
struct cxgbi_ppm_pool *pool = NULL;
- unsigned int ppmax_pool = 0;
unsigned int pool_index_max = 0;
- unsigned int alloc_sz;
+ unsigned int ppmax_pool = 0;
unsigned int ppod_bmap_size;
+ unsigned int alloc_sz;
+ unsigned int ppmax;
+
+ if (!iscsi_edram_start)
+ iscsi_edram_size = 0;
+
+ if (iscsi_edram_size &&
+ ((iscsi_edram_start + iscsi_edram_size) != start)) {
+ pr_err("iscsi ppod region not contiguous: EDRAM start 0x%x "
+ "size 0x%x DDR start 0x%x\n",
+ iscsi_edram_start, iscsi_edram_size, start);
+ return -EINVAL;
+ }
+
+ if (iscsi_edram_size) {
+ reserve_factor = 0;
+ start = iscsi_edram_start;
+ }
+
+ ppmax = (iscsi_edram_size + iscsi_size) >> PPOD_SIZE_SHIFT;
if (ppm) {
pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
@@ -434,6 +457,14 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
__func__, ppmax, ppmax_pool, ppod_bmap_size, start,
end);
}
+ if (iscsi_edram_size) {
+ unsigned int first_ddr_idx =
+ iscsi_edram_size >> PPOD_SIZE_SHIFT;
+
+ ppm->max_index_in_edram = first_ddr_idx - 1;
+ bitmap_set(ppm->ppod_bmap, first_ddr_idx, 1);
+ pr_debug("reserved %u ppod in bitmap\n", first_ddr_idx);
+ }
spin_lock_init(&ppm->map_lock);
kref_init(&ppm->refcnt);
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
index a91ad766cef0..7b02c200dd1e 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
@@ -143,6 +143,7 @@ struct cxgbi_ppm {
spinlock_t map_lock; /* ppm map lock */
unsigned int bmap_index_max;
unsigned int next;
+ unsigned int max_index_in_edram;
unsigned long *ppod_bmap;
struct cxgbi_ppod_data ppod_data[0];
};
@@ -324,9 +325,9 @@ int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *, unsigned short nr_pages,
unsigned long caller_data);
int cxgbi_ppm_init(void **ppm_pp, struct net_device *, struct pci_dev *,
void *lldev, struct cxgbi_tag_format *,
- unsigned int ppmax, unsigned int llimit,
- unsigned int start,
- unsigned int reserve_factor);
+ unsigned int iscsi_size, unsigned int llimit,
+ unsigned int start, unsigned int reserve_factor,
+ unsigned int edram_start, unsigned int edram_size);
int cxgbi_ppm_release(struct cxgbi_ppm *ppm);
void cxgbi_tagmask_check(unsigned int tagmask, struct cxgbi_tag_format *);
unsigned int cxgbi_tagmask_set(unsigned int ppmax);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 055f77c70fa3..030fed65393e 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1062,7 +1062,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf)
}
/* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.txt)
+ * Documentation/networking/phy.rst)
*/
phy_support_asym_pause(phydev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index 8bd384720f80..fbef2829f3de 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -10,8 +10,7 @@ config FSL_DPAA2_ETH
config FSL_DPAA2_PTP_CLOCK
tristate "Freescale DPAA2 PTP Clock"
- depends on FSL_DPAA2_ETH
- imply PTP_1588_CLOCK
+ depends on FSL_DPAA2_ETH && PTP_1588_CLOCK_QORIQ
default y
help
This driver adds support for using the DPAA2 1588 timer module
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 7d2390e3df77..0acb11557ed1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -555,7 +555,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
/* Prepare the HW SGT structure */
sgt_buf_size = priv->tx_data_offset +
sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
- sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
+ sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
if (unlikely(!sgt_buf)) {
err = -ENOMEM;
goto sgt_buf_alloc_failed;
@@ -757,6 +757,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
u16 queue_mapping;
unsigned int needed_headroom;
u32 fd_len;
+ u8 prio = 0;
int err, i;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
@@ -814,6 +815,18 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
* a queue affined to the same core that processed the Rx frame
*/
queue_mapping = skb_get_queue_mapping(skb);
+
+ if (net_dev->num_tc) {
+ prio = netdev_txq_to_tc(net_dev, queue_mapping);
+ /* Hardware interprets priority level 0 as being the highest,
+ * so we need to do a reverse mapping to the netdev tc index
+ */
+ prio = net_dev->num_tc - prio - 1;
+ /* We have only one FQ array entry for all Tx hardware queues
+ * with the same flow id (but different priority levels)
+ */
+ queue_mapping %= dpaa2_eth_queue_count(priv);
+ }
fq = &priv->fq[queue_mapping];
fd_len = dpaa2_fd_get_len(&fd);
@@ -824,7 +837,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
* the Tx confirmation callback for this frame
*/
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, 0);
+ err = priv->enqueue(priv, fq, &fd, prio);
if (err != -EBUSY)
break;
}
@@ -997,13 +1010,6 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
int i, j;
int new_count;
- /* This is the lazy seeding of Rx buffer pools.
- * dpaa2_add_bufs() is also used on the Rx hotpath and calls
- * napi_alloc_frag(). The trouble with that is that it in turn ends up
- * calling this_cpu_ptr(), which mandates execution in atomic context.
- * Rather than splitting up the code, do a one-off preempt disable.
- */
- preempt_disable();
for (j = 0; j < priv->num_channels; j++) {
for (i = 0; i < DPAA2_ETH_NUM_BUFS;
i += DPAA2_ETH_BUFS_PER_CMD) {
@@ -1011,12 +1017,10 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
priv->channel[j]->buf_count += new_count;
if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
- preempt_enable();
return -ENOMEM;
}
}
}
- preempt_enable();
return 0;
}
@@ -1872,6 +1876,78 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
return n - drops;
}
+static int update_xps(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct cpumask xps_mask;
+ struct dpaa2_eth_fq *fq;
+ int i, num_queues, netdev_queues;
+ int err = 0;
+
+ num_queues = dpaa2_eth_queue_count(priv);
+ netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
+
+ /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
+ * queues, so only process those
+ */
+ for (i = 0; i < netdev_queues; i++) {
+ fq = &priv->fq[i % num_queues];
+
+ cpumask_clear(&xps_mask);
+ cpumask_set_cpu(fq->target_cpu, &xps_mask);
+
+ err = netif_set_xps_queue(net_dev, &xps_mask, i);
+ if (err) {
+ netdev_warn_once(net_dev, "Error setting XPS queue\n");
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int dpaa2_eth_setup_tc(struct net_device *net_dev,
+ enum tc_setup_type type, void *type_data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct tc_mqprio_qopt *mqprio = type_data;
+ u8 num_tc, num_queues;
+ int i;
+
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EINVAL;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_queues = dpaa2_eth_queue_count(priv);
+ num_tc = mqprio->num_tc;
+
+ if (num_tc == net_dev->num_tc)
+ return 0;
+
+ if (num_tc > dpaa2_eth_tc_count(priv)) {
+ netdev_err(net_dev, "Max %d traffic classes supported\n",
+ dpaa2_eth_tc_count(priv));
+ return -EINVAL;
+ }
+
+ if (!num_tc) {
+ netdev_reset_tc(net_dev);
+ netif_set_real_num_tx_queues(net_dev, num_queues);
+ goto out;
+ }
+
+ netdev_set_num_tc(net_dev, num_tc);
+ netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
+
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
+
+out:
+ update_xps(priv);
+
+ return 0;
+}
+
static const struct net_device_ops dpaa2_eth_ops = {
.ndo_open = dpaa2_eth_open,
.ndo_start_xmit = dpaa2_eth_tx,
@@ -1884,6 +1960,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_change_mtu = dpaa2_eth_change_mtu,
.ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
+ .ndo_setup_tc = dpaa2_eth_setup_tc,
};
static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -2138,10 +2215,9 @@ static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
static void set_fq_affinity(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
- struct cpumask xps_mask;
struct dpaa2_eth_fq *fq;
int rx_cpu, txc_cpu;
- int i, err;
+ int i;
/* For each FQ, pick one channel/CPU to deliver frames to.
* This may well change at runtime, either through irqbalance or
@@ -2160,17 +2236,6 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
break;
case DPAA2_TX_CONF_FQ:
fq->target_cpu = txc_cpu;
-
- /* Tell the stack to affine to txc_cpu the Tx queue
- * associated with the confirmation one
- */
- cpumask_clear(&xps_mask);
- cpumask_set_cpu(txc_cpu, &xps_mask);
- err = netif_set_xps_queue(priv->net_dev, &xps_mask,
- fq->flowid);
- if (err)
- dev_err(dev, "Error setting XPS queue\n");
-
txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
if (txc_cpu >= nr_cpu_ids)
txc_cpu = cpumask_first(&priv->dpio_cpumask);
@@ -2180,6 +2245,8 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
}
fq->channel = get_affine_channel(priv, fq->target_cpu);
}
+
+ update_xps(priv);
}
static void setup_fqs(struct dpaa2_eth_priv *priv)
@@ -2361,11 +2428,10 @@ static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_fq *fq,
- struct dpaa2_fd *fd,
- u8 prio __always_unused)
+ struct dpaa2_fd *fd, u8 prio)
{
return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
- fq->tx_fqid, fd);
+ fq->tx_fqid[prio], fd);
}
static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
@@ -2479,14 +2545,9 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
queue.destination.type = DPNI_DEST_DPCON;
queue.destination.priority = 1;
queue.user_context = (u64)(uintptr_t)fq;
- queue.flc.stash_control = 1;
- queue.flc.value &= 0xFFFFFFFFFFFFFFC0;
- /* 01 01 00 - data, annotation, flow context */
- queue.flc.value |= 0x14;
err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_RX, 0, fq->flowid,
- DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
- DPNI_QUEUE_OPT_FLC,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
&queue);
if (err) {
dev_err(dev, "dpni_set_queue(RX) failed\n");
@@ -2526,17 +2587,21 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue;
struct dpni_queue_id qid;
- int err;
+ int i, err;
- err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
- if (err) {
- dev_err(dev, "dpni_get_queue(TX) failed\n");
- return err;
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, i, fq->flowid,
+ &queue, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue(TX) failed\n");
+ return err;
+ }
+ fq->tx_fqid[i] = qid.fqid;
}
+ /* All Tx queues belonging to the same flowid have the same qdbin */
fq->tx_qdbin = qid.qdbin;
- fq->tx_fqid = qid.fqid;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
@@ -3236,7 +3301,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
dev = &dpni_dev->dev;
/* Net device */
- net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index e180d5a68c98..9af18c24221f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -282,10 +282,13 @@ struct dpaa2_eth_ch_stats {
};
/* Maximum number of queues associated with a DPNI */
+#define DPAA2_ETH_MAX_TCS 8
#define DPAA2_ETH_MAX_RX_QUEUES 16
#define DPAA2_ETH_MAX_TX_QUEUES 16
#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
DPAA2_ETH_MAX_TX_QUEUES)
+#define DPAA2_ETH_MAX_NETDEV_QUEUES \
+ (DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS)
#define DPAA2_ETH_MAX_DPCONS 16
@@ -299,8 +302,9 @@ struct dpaa2_eth_priv;
struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
- u32 tx_fqid;
+ u32 tx_fqid[DPAA2_ETH_MAX_TCS];
u16 flowid;
+ u8 tc;
int target_cpu;
u32 dq_frames;
u32 dq_bytes;
@@ -448,6 +452,9 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
#define dpaa2_eth_fs_count(priv) \
((priv)->dpni_attrs.fs_entries)
+#define dpaa2_eth_tc_count(priv) \
+ ((priv)->dpni_attrs.num_tcs)
+
/* We have exactly one {Rx, Tx conf} queue per channel */
#define dpaa2_eth_queue_count(priv) \
((priv)->num_channels)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 9b150db3b510..a9503aea527f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -5,114 +5,58 @@
*/
#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/ptp_clock_kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/msi.h>
#include <linux/fsl/mc.h>
+#include <linux/fsl/ptp_qoriq.h>
#include "dpaa2-ptp.h"
-struct ptp_dpaa2_priv {
- struct fsl_mc_device *ptp_mc_dev;
- struct ptp_clock *clock;
- struct ptp_clock_info caps;
- u32 freq_comp;
-};
-
-/* PTP clock operations */
-static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int dpaa2_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
{
- struct ptp_dpaa2_priv *ptp_dpaa2 =
- container_of(ptp, struct ptp_dpaa2_priv, caps);
- struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
- struct device *dev = &mc_dev->dev;
- u64 adj;
- u32 diff, tmr_add;
- int neg_adj = 0;
- int err = 0;
-
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
-
- tmr_add = ptp_dpaa2->freq_comp;
- adj = tmr_add;
- adj *= ppb;
- diff = div_u64(adj, 1000000000ULL);
-
- tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+ struct fsl_mc_device *mc_dev;
+ struct device *dev;
+ u32 mask = 0;
+ u32 bit;
+ int err;
- err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
- mc_dev->mc_handle, tmr_add);
- if (err)
- dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
- return err;
-}
+ dev = ptp_qoriq->dev;
+ mc_dev = to_fsl_mc_device(dev);
-static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- struct ptp_dpaa2_priv *ptp_dpaa2 =
- container_of(ptp, struct ptp_dpaa2_priv, caps);
- struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
- struct device *dev = &mc_dev->dev;
- s64 now;
- int err = 0;
+ switch (rq->type) {
+ case PTP_CLK_REQ_PPS:
+ bit = DPRTC_EVENT_PPS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
- if (err) {
- dev_err(dev, "dprtc_get_time err %d\n", err);
+ err = dprtc_get_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, &mask);
+ if (err < 0) {
+ dev_err(dev, "dprtc_get_irq_mask(): %d\n", err);
return err;
}
- now += delta;
+ if (on)
+ mask |= bit;
+ else
+ mask &= ~bit;
- err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
- if (err)
- dev_err(dev, "dprtc_set_time err %d\n", err);
- return err;
-}
-
-static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
- struct ptp_dpaa2_priv *ptp_dpaa2 =
- container_of(ptp, struct ptp_dpaa2_priv, caps);
- struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
- struct device *dev = &mc_dev->dev;
- u64 ns;
- u32 remainder;
- int err = 0;
-
- err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
- if (err) {
- dev_err(dev, "dprtc_get_time err %d\n", err);
+ err = dprtc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, mask);
+ if (err < 0) {
+ dev_err(dev, "dprtc_set_irq_mask(): %d\n", err);
return err;
}
- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
- ts->tv_nsec = remainder;
- return err;
-}
-
-static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- struct ptp_dpaa2_priv *ptp_dpaa2 =
- container_of(ptp, struct ptp_dpaa2_priv, caps);
- struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
- struct device *dev = &mc_dev->dev;
- u64 ns;
- int err = 0;
-
- ns = ts->tv_sec * 1000000000ULL;
- ns += ts->tv_nsec;
-
- err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
- if (err)
- dev_err(dev, "dprtc_set_time err %d\n", err);
- return err;
+ return 0;
}
-static const struct ptp_clock_info ptp_dpaa2_caps = {
+static const struct ptp_clock_info dpaa2_ptp_caps = {
.owner = THIS_MODULE,
.name = "DPAA2 PTP Clock",
.max_adj = 512000,
@@ -121,21 +65,58 @@ static const struct ptp_clock_info ptp_dpaa2_caps = {
.n_per_out = 3,
.n_pins = 0,
.pps = 1,
- .adjfreq = ptp_dpaa2_adjfreq,
- .adjtime = ptp_dpaa2_adjtime,
- .gettime64 = ptp_dpaa2_gettime,
- .settime64 = ptp_dpaa2_settime,
+ .adjfine = ptp_qoriq_adjfine,
+ .adjtime = ptp_qoriq_adjtime,
+ .gettime64 = ptp_qoriq_gettime,
+ .settime64 = ptp_qoriq_settime,
+ .enable = dpaa2_ptp_enable,
};
+static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
+{
+ struct ptp_qoriq *ptp_qoriq = priv;
+ struct ptp_clock_event event;
+ struct fsl_mc_device *mc_dev;
+ struct device *dev;
+ u32 status = 0;
+ int err;
+
+ dev = ptp_qoriq->dev;
+ mc_dev = to_fsl_mc_device(dev);
+
+ err = dprtc_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, &status);
+ if (unlikely(err)) {
+ dev_err(dev, "dprtc_get_irq_status err %d\n", err);
+ return IRQ_NONE;
+ }
+
+ if (status & DPRTC_EVENT_PPS) {
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(ptp_qoriq->clock, &event);
+ }
+
+ err = dprtc_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, status);
+ if (unlikely(err)) {
+ dev_err(dev, "dprtc_clear_irq_status err %d\n", err);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
- struct ptp_dpaa2_priv *ptp_dpaa2;
- u32 tmr_add = 0;
+ struct fsl_mc_device_irq *irq;
+ struct ptp_qoriq *ptp_qoriq;
+ struct device_node *node;
+ void __iomem *base;
int err;
- ptp_dpaa2 = devm_kzalloc(dev, sizeof(*ptp_dpaa2), GFP_KERNEL);
- if (!ptp_dpaa2)
+ ptp_qoriq = devm_kzalloc(dev, sizeof(*ptp_qoriq), GFP_KERNEL);
+ if (!ptp_qoriq)
return -ENOMEM;
err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
@@ -154,30 +135,60 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
goto err_free_mcp;
}
- ptp_dpaa2->ptp_mc_dev = mc_dev;
+ ptp_qoriq->dev = dev;
- err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
- mc_dev->mc_handle, &tmr_add);
- if (err) {
- dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
+ node = of_find_compatible_node(NULL, NULL, "fsl,dpaa2-ptp");
+ if (!node) {
+ err = -ENODEV;
goto err_close;
}
- ptp_dpaa2->freq_comp = tmr_add;
- ptp_dpaa2->caps = ptp_dpaa2_caps;
+ dev->of_node = node;
- ptp_dpaa2->clock = ptp_clock_register(&ptp_dpaa2->caps, dev);
- if (IS_ERR(ptp_dpaa2->clock)) {
- err = PTR_ERR(ptp_dpaa2->clock);
+ base = of_iomap(node, 0);
+ if (!base) {
+ err = -ENOMEM;
goto err_close;
}
- dpaa2_phc_index = ptp_clock_index(ptp_dpaa2->clock);
+ err = fsl_mc_allocate_irqs(mc_dev);
+ if (err) {
+ dev_err(dev, "MC irqs allocation failed\n");
+ goto err_unmap;
+ }
+
+ irq = mc_dev->irqs[0];
+ ptp_qoriq->irq = irq->msi_desc->irq;
- dev_set_drvdata(dev, ptp_dpaa2);
+ err = devm_request_threaded_irq(dev, ptp_qoriq->irq, NULL,
+ dpaa2_ptp_irq_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(dev), ptp_qoriq);
+ if (err < 0) {
+ dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
+ goto err_free_mc_irq;
+ }
+
+ err = dprtc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPRTC_IRQ_INDEX, 1);
+ if (err < 0) {
+ dev_err(dev, "dprtc_set_irq_enable(): %d\n", err);
+ goto err_free_mc_irq;
+ }
+
+ err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps);
+ if (err)
+ goto err_free_mc_irq;
+
+ dpaa2_phc_index = ptp_qoriq->phc_index;
+ dev_set_drvdata(dev, ptp_qoriq);
return 0;
+err_free_mc_irq:
+ fsl_mc_free_irqs(mc_dev);
+err_unmap:
+ iounmap(base);
err_close:
dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
err_free_mcp:
@@ -188,12 +199,15 @@ err_exit:
static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev)
{
- struct ptp_dpaa2_priv *ptp_dpaa2;
struct device *dev = &mc_dev->dev;
+ struct ptp_qoriq *ptp_qoriq;
+
+ ptp_qoriq = dev_get_drvdata(dev);
- ptp_dpaa2 = dev_get_drvdata(dev);
- ptp_clock_unregister(ptp_dpaa2->clock);
+ dpaa2_phc_index = -1;
+ ptp_qoriq_free(ptp_qoriq);
+ fsl_mc_free_irqs(mc_dev);
dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
fsl_mc_portal_free(mc_dev->mc_io);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
index 9af4ac71f347..720cd50f5895 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
@@ -17,22 +17,54 @@
#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
-#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
-#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
-#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
-#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
+#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
+#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
+#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
+#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
+#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
+#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
#pragma pack(push, 1)
struct dprtc_cmd_open {
__le32 dprtc_id;
};
-struct dprtc_get_freq_compensation {
- __le32 freq_compensation;
+struct dprtc_cmd_get_irq {
+ __le32 pad;
+ u8 irq_index;
};
-struct dprtc_time {
- __le64 time;
+struct dprtc_cmd_set_irq_enable {
+ u8 en;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_enable {
+ u8 en;
+};
+
+struct dprtc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dprtc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprtc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprtc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
};
#pragma pack(pop)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.c b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
index c13e09bc7b9d..ed52a34fa6a1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
@@ -74,121 +74,220 @@ int dprtc_close(struct fsl_mc_io *mc_io,
}
/**
- * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
+ * dprtc_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
*
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @freq_compensation: The new frequency compensation value to set.
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
*
* Return: '0' on Success; Error code otherwise.
*/
-int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u32 freq_compensation)
+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
{
- struct dprtc_get_freq_compensation *cmd_params;
+ struct dprtc_cmd_set_irq_enable *cmd_params;
struct fsl_mc_command cmd = { 0 };
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
cmd_flags,
token);
- cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
- cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
+ cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->en = en;
return mc_send_command(mc_io, &cmd);
}
/**
- * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
+ * dprtc_get_irq_enable() - Get overall interrupt state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en)
+{
+ struct dprtc_rsp_get_irq_enable *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->en;
+
+ return 0;
+}
+
+/**
+ * dprtc_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @mask: Event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct dprtc_cmd_set_irq_mask *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
*
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @freq_compensation: Frequency compensation value
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
*
* Return: '0' on Success; Error code otherwise.
*/
-int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u32 *freq_compensation)
+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask)
{
- struct dprtc_get_freq_compensation *rsp_params;
+ struct dprtc_rsp_get_irq_mask *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
struct fsl_mc_command cmd = { 0 };
int err;
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
cmd_flags,
token);
+ cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
- rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
- *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
+ rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
return 0;
}
/**
- * dprtc_get_time() - Returns the current RTC time.
+ * dprtc_get_irq_status() - Get the current status of any pending interrupts.
*
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRTC object
- * @time: Current RTC time.
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
*
* Return: '0' on Success; Error code otherwise.
*/
-int dprtc_get_time(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- uint64_t *time)
+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
{
- struct dprtc_time *rsp_params;
+ struct dprtc_cmd_get_irq_status *cmd_params;
+ struct dprtc_rsp_get_irq_status *rsp_params;
struct fsl_mc_command cmd = { 0 };
int err;
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
cmd_flags,
token);
+ cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
- rsp_params = (struct dprtc_time *)cmd.params;
- *time = le64_to_cpu(rsp_params->time);
+ rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
return 0;
}
/**
- * dprtc_set_time() - Updates current RTC time.
+ * dprtc_clear_irq_status() - Clear a pending interrupt's status
*
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPRTC object
- * @time: New RTC time.
+ * @irq_index: The interrupt index to configure
+ * @status: Bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
*
* Return: '0' on Success; Error code otherwise.
*/
-int dprtc_set_time(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- uint64_t time)
+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
{
- struct dprtc_time *cmd_params;
+ struct dprtc_cmd_clear_irq_status *cmd_params;
struct fsl_mc_command cmd = { 0 };
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
cmd_flags,
token);
- cmd_params = (struct dprtc_time *)cmd.params;
- cmd_params->time = cpu_to_le64(time);
+ cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->status = cpu_to_le32(status);
return mc_send_command(mc_io, &cmd);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
index fe19618d6cdf..be7914c1634d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -13,6 +13,14 @@
struct fsl_mc_io;
+/**
+ * Number of irq's
+ */
+#define DPRTC_MAX_IRQ_NUM 1
+#define DPRTC_IRQ_INDEX 0
+
+#define DPRTC_EVENT_PPS 0x08000000
+
int dprtc_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int dprtc_id,
@@ -22,24 +30,40 @@ int dprtc_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
-int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u32 freq_compensation);
-
-int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u32 *freq_compensation);
-
-int dprtc_get_time(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- uint64_t *time);
-
-int dprtc_set_time(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- uint64_t time);
+int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
#endif /* __FSL_DPRTC_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 8429f5c1d810..ed0d010c7cf2 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -29,3 +29,13 @@ config FSL_ENETC_PTP_CLOCK
packets using the SO_TIMESTAMPING API.
If compiled as module (M), the module name is fsl-enetc-ptp.
+
+config FSL_ENETC_HW_TIMESTAMPING
+ bool "ENETC hardware timestamping support"
+ depends on FSL_ENETC || FSL_ENETC_VF
+ help
+ Enable hardware timestamping support on the Ethernet packets
+ using the SO_TIMESTAMPING API. Because the RX BD ring dynamic
+ allocation has not been supported and it is too expensive to use
+ extended RX BDs if timestamping is not used, this option enables
+ extended RX BDs in order to support hardware timestamping.
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 491475d87736..223709443ea4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -13,7 +13,8 @@
#define ENETC_MAX_SKB_FRAGS 13
#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
-static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb);
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int active_offloads);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
{
@@ -33,7 +34,7 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- count = enetc_map_tx_buffs(tx_ring, skb);
+ count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
if (unlikely(!count))
goto drop_packet_err;
@@ -105,7 +106,8 @@ static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
}
}
-static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int active_offloads)
{
struct enetc_tx_swbd *tx_swbd;
struct skb_frag_struct *frag;
@@ -137,7 +139,10 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
count++;
do_vlan = skb_vlan_tag_present(skb);
- do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+ do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
+ tx_swbd->do_tstamp = do_tstamp;
+ tx_swbd->check_wb = tx_swbd->do_tstamp;
if (do_vlan || do_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
@@ -299,24 +304,70 @@ static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
}
+static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
+ u64 *tstamp)
+{
+ u32 lo, hi, tstamp_lo;
+
+ lo = enetc_rd(hw, ENETC_SICTR0);
+ hi = enetc_rd(hw, ENETC_SICTR1);
+ tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
+ if (lo <= tstamp_lo)
+ hi -= 1;
+ *tstamp = (u64)hi << 32 | tstamp_lo;
+}
+
+static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+}
+
static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
{
struct net_device *ndev = tx_ring->ndev;
int tx_frm_cnt = 0, tx_byte_cnt = 0;
struct enetc_tx_swbd *tx_swbd;
int i, bds_to_clean;
+ bool do_tstamp;
+ u64 tstamp = 0;
i = tx_ring->next_to_clean;
tx_swbd = &tx_ring->tx_swbd[i];
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+ do_tstamp = false;
+
while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
bool is_eof = !!tx_swbd->skb;
+ if (unlikely(tx_swbd->check_wb)) {
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ union enetc_tx_bd *txbd;
+
+ txbd = ENETC_TXBD(*tx_ring, i);
+
+ if (txbd->flags & ENETC_TXBD_FLAGS_W &&
+ tx_swbd->do_tstamp) {
+ enetc_get_tx_tstamp(&priv->si->hw, txbd,
+ &tstamp);
+ do_tstamp = true;
+ }
+ }
+
if (likely(tx_swbd->dma))
enetc_unmap_tx_buff(tx_ring, tx_swbd);
if (is_eof) {
+ if (unlikely(do_tstamp)) {
+ enetc_tstamp_tx(tx_swbd->skb, tstamp);
+ do_tstamp = false;
+ }
napi_consume_skb(tx_swbd->skb, napi_budget);
tx_swbd->skb = NULL;
}
@@ -425,10 +476,38 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
return j;
}
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+static void enetc_get_rx_tstamp(struct net_device *ndev,
+ union enetc_rx_bd *rxbd,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 lo, hi, tstamp_lo;
+ u64 tstamp;
+
+ if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
+ lo = enetc_rd(hw, ENETC_SICTR0);
+ hi = enetc_rd(hw, ENETC_SICTR1);
+ tstamp_lo = le32_to_cpu(rxbd->r.tstamp);
+ if (lo <= tstamp_lo)
+ hi -= 1;
+
+ tstamp = (u64)hi << 32 | tstamp_lo;
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
+ }
+}
+#endif
+
static void enetc_get_offloads(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd, struct sk_buff *skb)
{
- /* TODO: add tstamp, hashing */
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
+#endif
+ /* TODO: hashing */
if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
@@ -442,6 +521,10 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxbd->r.vlan_opt));
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ if (priv->active_offloads & ENETC_F_RX_TSTAMP)
+ enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
+#endif
}
static void enetc_process_skb(struct enetc_bdr *rx_ring,
@@ -1074,6 +1157,9 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
rbmr = ENETC_RBMR_EN;
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ rbmr |= ENETC_RBMR_BDS;
+#endif
if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
rbmr |= ENETC_RBMR_VTE;
@@ -1341,6 +1427,62 @@ int enetc_close(struct net_device *ndev)
return 0;
}
+int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_bdr *tx_ring;
+ u8 num_tc;
+ int i;
+
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EOPNOTSUPP;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = mqprio->num_tc;
+
+ if (!num_tc) {
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+
+ /* Reset all ring priorities to 0 */
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
+ }
+
+ return 0;
+ }
+
+ /* Check if we have enough BD rings available to accommodate all TCs */
+ if (num_tc > priv->num_tx_rings) {
+ netdev_err(ndev, "Max %d traffic classes supported\n",
+ priv->num_tx_rings);
+ return -EINVAL;
+ }
+
+ /* For the moment, we use only one BD ring per TC.
+ *
+ * Configure num_tc BD rings with increasing priorities.
+ */
+ for (i = 0; i < num_tc; i++) {
+ tx_ring = priv->tx_ring[i];
+ enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
+ }
+
+ /* Reset the number of netdev queues based on the TC count */
+ netif_set_real_num_tx_queues(ndev, num_tc);
+
+ netdev_set_num_tc(ndev, num_tc);
+
+ /* Each TC is associated with one netdev queue */
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(ndev, i, 1, i);
+
+ return 0;
+}
+
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -1396,6 +1538,70 @@ int enetc_set_features(struct net_device *ndev,
return 0;
}
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->active_offloads |= ENETC_F_TX_TSTAMP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
+ break;
+ default:
+ priv->active_offloads |= ENETC_F_RX_TSTAMP;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+
+ if (priv->active_offloads & ENETC_F_TX_TSTAMP)
+ config.tx_type = HWTSTAMP_TX_ON;
+ else
+ config.tx_type = HWTSTAMP_TX_OFF;
+
+ config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+#endif
+
+int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ if (cmd == SIOCSHWTSTAMP)
+ return enetc_hwtstamp_set(ndev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return enetc_hwtstamp_get(ndev, rq);
+#endif
+ return -EINVAL;
+}
+
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index b274135c5103..541b4e2073fe 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -21,7 +21,9 @@ struct enetc_tx_swbd {
struct sk_buff *skb;
dma_addr_t dma;
u16 len;
- u16 is_dma_page;
+ u8 is_dma_page:1;
+ u8 check_wb:1;
+ u8 do_tstamp:1;
};
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
@@ -167,6 +169,12 @@ struct enetc_cls_rule {
#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+/* TODO: more hardware offloads */
+enum enetc_active_offloads {
+ ENETC_F_RX_TSTAMP = BIT(0),
+ ENETC_F_TX_TSTAMP = BIT(1),
+};
+
struct enetc_ndev_priv {
struct net_device *ndev;
struct device *dev; /* dma-mapping device */
@@ -178,6 +186,7 @@ struct enetc_ndev_priv {
u16 rx_bd_count, tx_bd_count;
u16 msg_enable;
+ int active_offloads;
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
@@ -200,6 +209,9 @@ struct enetc_msg_cmd_set_primary_mac {
#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
+/* PTP driver exports */
+extern int enetc_phc_index;
+
/* SI common */
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
void enetc_pci_remove(struct pci_dev *pdev);
@@ -216,6 +228,10 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
int enetc_set_features(struct net_device *ndev,
netdev_features_t features);
+int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
+int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+
/* ethtool */
void enetc_set_ethtool_ops(struct net_device *ndev);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index b9519b6ad727..fcb52efec075 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -555,6 +555,35 @@ static void enetc_get_ringparam(struct net_device *ndev,
}
}
+static int enetc_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ int *phc_idx;
+
+ phc_idx = symbol_get(enetc_phc_index);
+ if (phc_idx) {
+ info->phc_index = *phc_idx;
+ symbol_put(enetc_phc_index);
+ } else {
+ info->phc_index = -1;
+ }
+
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+#else
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+#endif
+ return 0;
+}
+
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_regs_len = enetc_get_reglen,
.get_regs = enetc_get_regs,
@@ -571,6 +600,7 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_link = ethtool_op_get_link,
+ .get_ts_info = enetc_get_ts_info,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
@@ -586,6 +616,7 @@ static const struct ethtool_ops enetc_vf_ethtool_ops = {
.set_rxfh = enetc_set_rxfh,
.get_ringparam = enetc_get_ringparam,
.get_link = ethtool_op_get_link,
+ .get_ts_info = enetc_get_ts_info,
};
void enetc_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index df8eb8882d92..88276299f447 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -127,7 +127,7 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_TBSR_BUSY BIT(0)
#define ENETC_TBMR_VIH BIT(9)
#define ENETC_TBMR_PRIO_MASK GENMASK(2, 0)
-#define ENETC_TBMR_PRIO_SET(val) val
+#define ENETC_TBMR_SET_PRIO(val) ((val) & ENETC_TBMR_PRIO_MASK)
#define ENETC_TBMR_EN BIT(31)
#define ENETC_TBSR 0x4
#define ENETC_TBBAR0 0x10
@@ -361,6 +361,12 @@ union enetc_tx_bd {
u8 e_flags;
u8 flags;
} ext; /* Tx BD extension */
+ struct {
+ __le32 tstamp;
+ u8 reserved[10];
+ u8 status;
+ u8 flags;
+ } wb; /* writeback descriptor */
};
#define ENETC_TXBD_FLAGS_L4CS BIT(0)
@@ -399,6 +405,9 @@ union enetc_rx_bd {
struct {
__le64 addr;
u8 reserved[8];
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ u8 reserved1[16];
+#endif
} w;
struct {
__le16 inet_csum;
@@ -413,6 +422,10 @@ union enetc_rx_bd {
};
__le32 lstatus;
};
+#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ __le32 tstamp;
+ u8 reserved[12];
+#endif
} r;
};
@@ -531,3 +544,13 @@ static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
}
+
+static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
+ int prio)
+{
+ u32 val = enetc_txbdr_rd(hw, bdr_idx, ENETC_TBMR);
+
+ val &= ~ENETC_TBMR_PRIO_MASK;
+ val |= ENETC_TBMR_SET_PRIO(prio);
+ enetc_txbdr_wr(hw, bdr_idx, ENETC_TBMR, val);
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 78287c517095..258b3cb38a6f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -702,6 +702,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_vlan = enetc_pf_set_vf_vlan,
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
+ .ndo_do_ioctl = enetc_ioctl,
+ .ndo_setup_tc = enetc_setup_tc,
};
static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 8c1497e7d9c5..2fd2586e42bf 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -7,6 +7,9 @@
#include "enetc.h"
+int enetc_phc_index = -1;
+EXPORT_SYMBOL(enetc_phc_index);
+
static struct ptp_clock_info enetc_ptp_caps = {
.owner = THIS_MODULE,
.name = "ENETC PTP clock",
@@ -96,6 +99,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
if (err)
goto err_no_clock;
+ enetc_phc_index = ptp_qoriq->phc_index;
pci_set_drvdata(pdev, ptp_qoriq);
return 0;
@@ -119,6 +123,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
{
struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
+ enetc_phc_index = -1;
ptp_qoriq_free(ptp_qoriq);
kfree(ptp_qoriq);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 72c3ea887bcf..ebd21bf4cfa1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -111,6 +111,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_get_stats = enetc_get_stats,
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
+ .ndo_do_ioctl = enetc_ioctl,
+ .ndo_setup_tc = enetc_setup_tc,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 38f10f7dcbc3..9d459ccf251d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1689,10 +1689,10 @@ static void fec_get_mac(struct net_device *ndev)
*/
if (!is_valid_ether_addr(iap)) {
/* Report it and use a random ethernet address instead */
- netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
+ dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
eth_hw_addr_random(ndev);
- netdev_info(ndev, "Using random MAC address: %pM\n",
- ndev->dev_addr);
+ dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
return;
}
@@ -2446,30 +2446,31 @@ static int
fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct device *dev = &fep->pdev->dev;
unsigned int cycle;
if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
- pr_err("Rx coalesced frames exceed hardware limitation\n");
+ dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
return -EINVAL;
}
if (ec->tx_max_coalesced_frames > 255) {
- pr_err("Tx coalesced frame exceed hardware limitation\n");
+ dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesced usec exceed hardware limitation\n");
+ dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesced usec exceed hardware limitation\n");
+ dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
@@ -3473,7 +3474,6 @@ fec_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to enable phy regulator: %d\n", ret);
- clk_disable_unprepare(fep->clk_ipg);
goto failed_regulator;
}
} else {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7e892b1cbd3d..19e2365be7d8 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -617,7 +617,7 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
if (IS_ERR(fep->ptp_clock)) {
fep->ptp_clock = NULL;
- pr_err("ptp_clock_register failed\n");
+ dev_err(&pdev->dev, "ptp_clock_register failed\n");
}
schedule_delayed_work(&fep->time_keep, HZ);
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c
index f54da3c684d0..e1bdfed16134 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.c
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c
@@ -144,7 +144,8 @@
/* Hash Key extraction fields: */
#define DEFAULT_HASH_KEY_EXTRACT_FIELDS \
(KG_SCH_KN_IPSRC1 | KG_SCH_KN_IPDST1 | \
- KG_SCH_KN_L4PSRC | KG_SCH_KN_L4PDST)
+ KG_SCH_KN_L4PSRC | KG_SCH_KN_L4PDST | \
+ KG_SCH_KN_IPSEC_SPI)
/* Default values to be used as hash key in case IPv4 or L4 (TCP, UDP)
* don't exist in the frame
diff --git a/drivers/net/ethernet/google/Kconfig b/drivers/net/ethernet/google/Kconfig
new file mode 100644
index 000000000000..b8f04d052fda
--- /dev/null
+++ b/drivers/net/ethernet/google/Kconfig
@@ -0,0 +1,27 @@
+#
+# Google network device configuration
+#
+
+config NET_VENDOR_GOOGLE
+ bool "Google Devices"
+ default y
+ help
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Google devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_GOOGLE
+
+config GVE
+ tristate "Google Virtual NIC (gVNIC) support"
+ depends on PCI_MSI
+ help
+ This driver supports Google Virtual NIC (gVNIC)"
+
+ To compile this driver as a module, choose M here.
+ The module will be called gve.
+
+endif #NET_VENDOR_GOOGLE
diff --git a/drivers/net/ethernet/google/Makefile b/drivers/net/ethernet/google/Makefile
new file mode 100644
index 000000000000..402cc3ba1639
--- /dev/null
+++ b/drivers/net/ethernet/google/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Google network device drivers.
+#
+
+obj-$(CONFIG_GVE) += gve/
diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile
new file mode 100644
index 000000000000..3354ce40eb97
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/Makefile
@@ -0,0 +1,4 @@
+# Makefile for the Google virtual Ethernet (gve) driver
+
+obj-$(CONFIG_GVE) += gve.o
+gve-objs := gve_main.o gve_tx.o gve_rx.o gve_ethtool.o gve_adminq.o
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
new file mode 100644
index 000000000000..92372dc43be8
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -0,0 +1,459 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2019 Google, Inc.
+ */
+
+#ifndef _GVE_H_
+#define _GVE_H_
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/u64_stats_sync.h>
+#include "gve_desc.h"
+
+#ifndef PCI_VENDOR_ID_GOOGLE
+#define PCI_VENDOR_ID_GOOGLE 0x1ae0
+#endif
+
+#define PCI_DEV_ID_GVNIC 0x0042
+
+#define GVE_REGISTER_BAR 0
+#define GVE_DOORBELL_BAR 2
+
+/* Driver can alloc up to 2 segments for the header and 2 for the payload. */
+#define GVE_TX_MAX_IOVEC 4
+/* 1 for management, 1 for rx, 1 for tx */
+#define GVE_MIN_MSIX 3
+
+/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
+struct gve_rx_desc_queue {
+ struct gve_rx_desc *desc_ring; /* the descriptor ring */
+ dma_addr_t bus; /* the bus for the desc_ring */
+ u32 cnt; /* free-running total number of completed packets */
+ u32 fill_cnt; /* free-running total number of descriptors posted */
+ u32 mask; /* masks the cnt to the size of the ring */
+ u8 seqno; /* the next expected seqno for this desc*/
+};
+
+/* The page info for a single slot in the RX data queue */
+struct gve_rx_slot_page_info {
+ struct page *page;
+ void *page_address;
+ u32 page_offset; /* offset to write to in page */
+};
+
+/* A list of pages registered with the device during setup and used by a queue
+ * as buffers
+ */
+struct gve_queue_page_list {
+ u32 id; /* unique id */
+ u32 num_entries;
+ struct page **pages; /* list of num_entries pages */
+ dma_addr_t *page_buses; /* the dma addrs of the pages */
+};
+
+/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
+struct gve_rx_data_queue {
+ struct gve_rx_data_slot *data_ring; /* read by NIC */
+ dma_addr_t data_bus; /* dma mapping of the slots */
+ struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
+ struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
+ u32 mask; /* masks the cnt to the size of the ring */
+ u32 cnt; /* free-running total number of completed packets */
+};
+
+struct gve_priv;
+
+/* An RX ring that contains a power-of-two sized desc and data ring. */
+struct gve_rx_ring {
+ struct gve_priv *gve;
+ struct gve_rx_desc_queue desc;
+ struct gve_rx_data_queue data;
+ u64 rbytes; /* free-running bytes received */
+ u64 rpackets; /* free-running packets received */
+ u32 q_num; /* queue index */
+ u32 ntfy_id; /* notification block index */
+ struct gve_queue_resources *q_resources; /* head and tail pointer idx */
+ dma_addr_t q_resources_bus; /* dma address for the queue resources */
+ struct u64_stats_sync statss; /* sync stats for 32bit archs */
+};
+
+/* A TX desc ring entry */
+union gve_tx_desc {
+ struct gve_tx_pkt_desc pkt; /* first desc for a packet */
+ struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
+};
+
+/* Tracks the memory in the fifo occupied by a segment of a packet */
+struct gve_tx_iovec {
+ u32 iov_offset; /* offset into this segment */
+ u32 iov_len; /* length */
+ u32 iov_padding; /* padding associated with this segment */
+};
+
+/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
+ * ring entry but only used for a pkt_desc not a seg_desc
+ */
+struct gve_tx_buffer_state {
+ struct sk_buff *skb; /* skb for this pkt */
+ struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
+};
+
+/* A TX buffer - each queue has one */
+struct gve_tx_fifo {
+ void *base; /* address of base of FIFO */
+ u32 size; /* total size */
+ atomic_t available; /* how much space is still available */
+ u32 head; /* offset to write at */
+ struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
+};
+
+/* A TX ring that contains a power-of-two sized desc ring and a FIFO buffer */
+struct gve_tx_ring {
+ /* Cacheline 0 -- Accessed & dirtied during transmit */
+ struct gve_tx_fifo tx_fifo;
+ u32 req; /* driver tracked head pointer */
+ u32 done; /* driver tracked tail pointer */
+
+ /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
+ __be32 last_nic_done ____cacheline_aligned; /* NIC tail pointer */
+ u64 pkt_done; /* free-running - total packets completed */
+ u64 bytes_done; /* free-running - total bytes completed */
+
+ /* Cacheline 2 -- Read-mostly fields */
+ union gve_tx_desc *desc ____cacheline_aligned;
+ struct gve_tx_buffer_state *info; /* Maps 1:1 to a desc */
+ struct netdev_queue *netdev_txq;
+ struct gve_queue_resources *q_resources; /* head and tail pointer idx */
+ u32 mask; /* masks req and done down to queue size */
+
+ /* Slow-path fields */
+ u32 q_num ____cacheline_aligned; /* queue idx */
+ u32 stop_queue; /* count of queue stops */
+ u32 wake_queue; /* count of queue wakes */
+ u32 ntfy_id; /* notification block index */
+ dma_addr_t bus; /* dma address of the descr ring */
+ dma_addr_t q_resources_bus; /* dma address of the queue resources */
+ struct u64_stats_sync statss; /* sync stats for 32bit archs */
+} ____cacheline_aligned;
+
+/* Wraps the info for one irq including the napi struct and the queues
+ * associated with that irq.
+ */
+struct gve_notify_block {
+ __be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */
+ char name[IFNAMSIZ + 16]; /* name registered with the kernel */
+ struct napi_struct napi; /* kernel napi struct for this block */
+ struct gve_priv *priv;
+ struct gve_tx_ring *tx; /* tx rings on this block */
+ struct gve_rx_ring *rx; /* rx rings on this block */
+} ____cacheline_aligned;
+
+/* Tracks allowed and current queue settings */
+struct gve_queue_config {
+ u16 max_queues;
+ u16 num_queues; /* current */
+};
+
+/* Tracks the available and used qpl IDs */
+struct gve_qpl_config {
+ u32 qpl_map_size; /* map memory size */
+ unsigned long *qpl_id_map; /* bitmap of used qpl ids */
+};
+
+struct gve_priv {
+ struct net_device *dev;
+ struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
+ struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
+ struct gve_queue_page_list *qpls; /* array of num qpls */
+ struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
+ dma_addr_t ntfy_block_bus;
+ struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
+ char mgmt_msix_name[IFNAMSIZ + 16];
+ u32 mgmt_msix_idx;
+ __be32 *counter_array; /* array of num_event_counters */
+ dma_addr_t counter_array_bus;
+
+ u16 num_event_counters;
+ u16 tx_desc_cnt; /* num desc per ring */
+ u16 rx_desc_cnt; /* num desc per ring */
+ u16 tx_pages_per_qpl; /* tx buffer length */
+ u16 rx_pages_per_qpl; /* rx buffer length */
+ u64 max_registered_pages;
+ u64 num_registered_pages; /* num pages registered with NIC */
+ u32 rx_copybreak; /* copy packets smaller than this */
+ u16 default_num_queues; /* default num queues to set up */
+
+ struct gve_queue_config tx_cfg;
+ struct gve_queue_config rx_cfg;
+ struct gve_qpl_config qpl_cfg; /* map used QPL ids */
+ u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
+
+ struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
+ __be32 __iomem *db_bar2; /* "array" of doorbells */
+ u32 msg_enable; /* level for netif* netdev print macros */
+ struct pci_dev *pdev;
+
+ /* metrics */
+ u32 tx_timeo_cnt;
+
+ /* Admin queue - see gve_adminq.h*/
+ union gve_adminq_command *adminq;
+ dma_addr_t adminq_bus_addr;
+ u32 adminq_mask; /* masks prod_cnt to adminq size */
+ u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
+
+ struct workqueue_struct *gve_wq;
+ struct work_struct service_task;
+ unsigned long service_task_flags;
+ unsigned long state_flags;
+};
+
+enum gve_service_task_flags {
+ GVE_PRIV_FLAGS_DO_RESET = BIT(1),
+ GVE_PRIV_FLAGS_RESET_IN_PROGRESS = BIT(2),
+ GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = BIT(3),
+};
+
+enum gve_state_flags {
+ GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = BIT(1),
+ GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = BIT(2),
+ GVE_PRIV_FLAGS_DEVICE_RINGS_OK = BIT(3),
+ GVE_PRIV_FLAGS_NAPI_ENABLED = BIT(4),
+};
+
+static inline bool gve_get_do_reset(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
+}
+
+static inline void gve_set_do_reset(struct gve_priv *priv)
+{
+ set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
+}
+
+static inline void gve_clear_do_reset(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
+}
+
+static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
+ &priv->service_task_flags);
+}
+
+static inline void gve_set_reset_in_progress(struct gve_priv *priv)
+{
+ set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
+}
+
+static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
+}
+
+static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
+ &priv->service_task_flags);
+}
+
+static inline void gve_set_probe_in_progress(struct gve_priv *priv)
+{
+ set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
+}
+
+static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
+}
+
+static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
+}
+
+static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
+{
+ set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
+}
+
+static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
+}
+
+static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
+}
+
+static inline void gve_set_device_resources_ok(struct gve_priv *priv)
+{
+ set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
+}
+
+static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
+}
+
+static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
+}
+
+static inline void gve_set_device_rings_ok(struct gve_priv *priv)
+{
+ set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
+}
+
+static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
+}
+
+static inline bool gve_get_napi_enabled(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
+}
+
+static inline void gve_set_napi_enabled(struct gve_priv *priv)
+{
+ set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
+}
+
+static inline void gve_clear_napi_enabled(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
+}
+
+/* Returns the address of the ntfy_blocks irq doorbell
+ */
+static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
+ struct gve_notify_block *block)
+{
+ return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
+}
+
+/* Returns the index into ntfy_blocks of the given tx ring's block
+ */
+static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
+{
+ return queue_idx;
+}
+
+/* Returns the index into ntfy_blocks of the given rx ring's block
+ */
+static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
+{
+ return (priv->num_ntfy_blks / 2) + queue_idx;
+}
+
+/* Returns the number of tx queue page lists
+ */
+static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
+{
+ return priv->tx_cfg.num_queues;
+}
+
+/* Returns the number of rx queue page lists
+ */
+static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
+{
+ return priv->rx_cfg.num_queues;
+}
+
+/* Returns a pointer to the next available tx qpl in the list of qpls
+ */
+static inline
+struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
+{
+ int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
+ priv->qpl_cfg.qpl_map_size);
+
+ /* we are out of tx qpls */
+ if (id >= gve_num_tx_qpls(priv))
+ return NULL;
+
+ set_bit(id, priv->qpl_cfg.qpl_id_map);
+ return &priv->qpls[id];
+}
+
+/* Returns a pointer to the next available rx qpl in the list of qpls
+ */
+static inline
+struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
+{
+ int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
+ priv->qpl_cfg.qpl_map_size,
+ gve_num_tx_qpls(priv));
+
+ /* we are out of rx qpls */
+ if (id == priv->qpl_cfg.qpl_map_size)
+ return NULL;
+
+ set_bit(id, priv->qpl_cfg.qpl_id_map);
+ return &priv->qpls[id];
+}
+
+/* Unassigns the qpl with the given id
+ */
+static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
+{
+ clear_bit(id, priv->qpl_cfg.qpl_id_map);
+}
+
+/* Returns the correct dma direction for tx and rx qpls
+ */
+static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
+ int id)
+{
+ if (id < gve_num_tx_qpls(priv))
+ return DMA_TO_DEVICE;
+ else
+ return DMA_FROM_DEVICE;
+}
+
+/* Returns true if the max mtu allows page recycling */
+static inline bool gve_can_recycle_pages(struct net_device *dev)
+{
+ /* We can't recycle the pages if we can't fit a packet into half a
+ * page.
+ */
+ return dev->max_mtu <= PAGE_SIZE / 2;
+}
+
+/* buffers */
+int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
+ enum dma_data_direction);
+void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
+ enum dma_data_direction);
+/* tx handling */
+netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
+bool gve_tx_poll(struct gve_notify_block *block, int budget);
+int gve_tx_alloc_rings(struct gve_priv *priv);
+void gve_tx_free_rings(struct gve_priv *priv);
+__be32 gve_tx_load_event_counter(struct gve_priv *priv,
+ struct gve_tx_ring *tx);
+/* rx handling */
+void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
+bool gve_rx_poll(struct gve_notify_block *block, int budget);
+int gve_rx_alloc_rings(struct gve_priv *priv);
+void gve_rx_free_rings(struct gve_priv *priv);
+bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
+ netdev_features_t feat);
+/* Reset */
+void gve_schedule_reset(struct gve_priv *priv);
+int gve_reset(struct gve_priv *priv, bool attempt_teardown);
+int gve_adjust_queues(struct gve_priv *priv,
+ struct gve_queue_config new_rx_config,
+ struct gve_queue_config new_tx_config);
+/* exported by ethtool.c */
+extern const struct ethtool_ops gve_ethtool_ops;
+/* needed by ethtool */
+extern const char gve_version_str[];
+#endif /* _GVE_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
new file mode 100644
index 000000000000..c3ba7baf0107
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2019 Google, Inc.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include "gve.h"
+#include "gve_adminq.h"
+#include "gve_register.h"
+
+#define GVE_MAX_ADMINQ_RELEASE_CHECK 500
+#define GVE_ADMINQ_SLEEP_LEN 20
+#define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
+
+int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
+{
+ priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
+ &priv->adminq_bus_addr, GFP_KERNEL);
+ if (unlikely(!priv->adminq))
+ return -ENOMEM;
+
+ priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
+ priv->adminq_prod_cnt = 0;
+
+ /* Setup Admin queue with the device */
+ iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
+ &priv->reg_bar0->adminq_pfn);
+
+ gve_set_admin_queue_ok(priv);
+ return 0;
+}
+
+void gve_adminq_release(struct gve_priv *priv)
+{
+ int i = 0;
+
+ /* Tell the device the adminq is leaving */
+ iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
+ while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
+ /* If this is reached the device is unrecoverable and still
+ * holding memory. Continue looping to avoid memory corruption,
+ * but WARN so it is visible what is going on.
+ */
+ if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
+ WARN(1, "Unrecoverable platform error!");
+ i++;
+ msleep(GVE_ADMINQ_SLEEP_LEN);
+ }
+ gve_clear_device_rings_ok(priv);
+ gve_clear_device_resources_ok(priv);
+ gve_clear_admin_queue_ok(priv);
+}
+
+void gve_adminq_free(struct device *dev, struct gve_priv *priv)
+{
+ if (!gve_get_admin_queue_ok(priv))
+ return;
+ gve_adminq_release(priv);
+ dma_free_coherent(dev, PAGE_SIZE, priv->adminq, priv->adminq_bus_addr);
+ gve_clear_admin_queue_ok(priv);
+}
+
+static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
+{
+ iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
+}
+
+static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
+{
+ int i;
+
+ for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
+ if (ioread32be(&priv->reg_bar0->adminq_event_counter)
+ == prod_cnt)
+ return true;
+ msleep(GVE_ADMINQ_SLEEP_LEN);
+ }
+
+ return false;
+}
+
+static int gve_adminq_parse_err(struct device *dev, u32 status)
+{
+ if (status != GVE_ADMINQ_COMMAND_PASSED &&
+ status != GVE_ADMINQ_COMMAND_UNSET)
+ dev_err(dev, "AQ command failed with status %d\n", status);
+
+ switch (status) {
+ case GVE_ADMINQ_COMMAND_PASSED:
+ return 0;
+ case GVE_ADMINQ_COMMAND_UNSET:
+ dev_err(dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
+ return -EINVAL;
+ case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
+ case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
+ case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
+ case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
+ case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
+ return -EAGAIN;
+ case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
+ case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
+ case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
+ case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
+ case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
+ case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
+ return -EINVAL;
+ case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
+ return -ETIME;
+ case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
+ case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
+ return -EACCES;
+ case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
+ return -ENOMEM;
+ case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
+ return -ENOTSUPP;
+ default:
+ dev_err(dev, "parse_aq_err: unknown status code %d\n", status);
+ return -EINVAL;
+ }
+}
+
+/* This function is not threadsafe - the caller is responsible for any
+ * necessary locks.
+ */
+int gve_adminq_execute_cmd(struct gve_priv *priv,
+ union gve_adminq_command *cmd_orig)
+{
+ union gve_adminq_command *cmd;
+ u32 status = 0;
+ u32 prod_cnt;
+
+ cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
+ priv->adminq_prod_cnt++;
+ prod_cnt = priv->adminq_prod_cnt;
+
+ memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
+
+ gve_adminq_kick_cmd(priv, prod_cnt);
+ if (!gve_adminq_wait_for_cmd(priv, prod_cnt)) {
+ dev_err(&priv->pdev->dev, "AQ command timed out, need to reset AQ\n");
+ return -ENOTRECOVERABLE;
+ }
+
+ memcpy(cmd_orig, cmd, sizeof(*cmd));
+ status = be32_to_cpu(READ_ONCE(cmd->status));
+ return gve_adminq_parse_err(&priv->pdev->dev, status);
+}
+
+/* The device specifies that the management vector can either be the first irq
+ * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
+ * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
+ * the management vector is first.
+ *
+ * gve arranges the msix vectors so that the management vector is last.
+ */
+#define GVE_NTFY_BLK_BASE_MSIX_IDX 0
+int gve_adminq_configure_device_resources(struct gve_priv *priv,
+ dma_addr_t counter_array_bus_addr,
+ u32 num_counters,
+ dma_addr_t db_array_bus_addr,
+ u32 num_ntfy_blks)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
+ cmd.configure_device_resources =
+ (struct gve_adminq_configure_device_resources) {
+ .counter_array = cpu_to_be64(counter_array_bus_addr),
+ .num_counters = cpu_to_be32(num_counters),
+ .irq_db_addr = cpu_to_be64(db_array_bus_addr),
+ .num_irq_dbs = cpu_to_be32(num_ntfy_blks),
+ .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
+ .ntfy_blk_msix_base_idx =
+ cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
+int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
+int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ struct gve_tx_ring *tx = &priv->tx[queue_index];
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
+ cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
+ .queue_id = cpu_to_be32(queue_index),
+ .reserved = 0,
+ .queue_resources_addr = cpu_to_be64(tx->q_resources_bus),
+ .tx_ring_addr = cpu_to_be64(tx->bus),
+ .queue_page_list_id = cpu_to_be32(tx->tx_fifo.qpl->id),
+ .ntfy_id = cpu_to_be32(tx->ntfy_id),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
+int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ struct gve_rx_ring *rx = &priv->rx[queue_index];
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
+ cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
+ .queue_id = cpu_to_be32(queue_index),
+ .index = cpu_to_be32(queue_index),
+ .reserved = 0,
+ .ntfy_id = cpu_to_be32(rx->ntfy_id),
+ .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
+ .rx_desc_ring_addr = cpu_to_be64(rx->desc.bus),
+ .rx_data_ring_addr = cpu_to_be64(rx->data.data_bus),
+ .queue_page_list_id = cpu_to_be32(rx->data.qpl->id),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
+int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
+ cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {
+ .queue_id = cpu_to_be32(queue_index),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
+int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
+ cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
+ .queue_id = cpu_to_be32(queue_index),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
+int gve_adminq_describe_device(struct gve_priv *priv)
+{
+ struct gve_device_descriptor *descriptor;
+ union gve_adminq_command cmd;
+ dma_addr_t descriptor_bus;
+ int err = 0;
+ u8 *mac;
+ u16 mtu;
+
+ memset(&cmd, 0, sizeof(cmd));
+ descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
+ &descriptor_bus, GFP_KERNEL);
+ if (!descriptor)
+ return -ENOMEM;
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
+ cmd.describe_device.device_descriptor_addr =
+ cpu_to_be64(descriptor_bus);
+ cmd.describe_device.device_descriptor_version =
+ cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
+ cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);
+
+ err = gve_adminq_execute_cmd(priv, &cmd);
+ if (err)
+ goto free_device_descriptor;
+
+ priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+ if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
+ netif_err(priv, drv, priv->dev, "Tx desc count %d too low\n",
+ priv->tx_desc_cnt);
+ err = -EINVAL;
+ goto free_device_descriptor;
+ }
+ priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+ if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
+ < PAGE_SIZE ||
+ priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
+ < PAGE_SIZE) {
+ netif_err(priv, drv, priv->dev, "Rx desc count %d too low\n",
+ priv->rx_desc_cnt);
+ err = -EINVAL;
+ goto free_device_descriptor;
+ }
+ priv->max_registered_pages =
+ be64_to_cpu(descriptor->max_registered_pages);
+ mtu = be16_to_cpu(descriptor->mtu);
+ if (mtu < ETH_MIN_MTU) {
+ netif_err(priv, drv, priv->dev, "MTU %d below minimum MTU\n",
+ mtu);
+ err = -EINVAL;
+ goto free_device_descriptor;
+ }
+ priv->dev->max_mtu = mtu;
+ priv->num_event_counters = be16_to_cpu(descriptor->counters);
+ ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
+ mac = descriptor->mac;
+ netif_info(priv, drv, priv->dev, "MAC addr: %pM\n", mac);
+ priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
+ priv->rx_pages_per_qpl = be16_to_cpu(descriptor->rx_pages_per_qpl);
+ if (priv->rx_pages_per_qpl < priv->rx_desc_cnt) {
+ netif_err(priv, drv, priv->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
+ priv->rx_pages_per_qpl);
+ priv->rx_desc_cnt = priv->rx_pages_per_qpl;
+ }
+ priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
+
+free_device_descriptor:
+ dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,
+ descriptor_bus);
+ return err;
+}
+
+int gve_adminq_register_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl)
+{
+ struct device *hdev = &priv->pdev->dev;
+ u32 num_entries = qpl->num_entries;
+ u32 size = num_entries * sizeof(qpl->page_buses[0]);
+ union gve_adminq_command cmd;
+ dma_addr_t page_list_bus;
+ __be64 *page_list;
+ int err;
+ int i;
+
+ memset(&cmd, 0, sizeof(cmd));
+ page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; i++)
+ page_list[i] = cpu_to_be64(qpl->page_buses[i]);
+
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);
+ cmd.reg_page_list = (struct gve_adminq_register_page_list) {
+ .page_list_id = cpu_to_be32(qpl->id),
+ .num_pages = cpu_to_be32(num_entries),
+ .page_address_list_addr = cpu_to_be64(page_list_bus),
+ };
+
+ err = gve_adminq_execute_cmd(priv, &cmd);
+ dma_free_coherent(hdev, size, page_list, page_list_bus);
+ return err;
+}
+
+int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
+ cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
+ .page_list_id = cpu_to_be32(page_list_id),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
+int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
+ cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
+ .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
+ .parameter_value = cpu_to_be64(mtu),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
new file mode 100644
index 000000000000..4dfa06edc0f8
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2019 Google, Inc.
+ */
+
+#ifndef _GVE_ADMINQ_H
+#define _GVE_ADMINQ_H
+
+#include <linux/build_bug.h>
+
+/* Admin queue opcodes */
+enum gve_adminq_opcodes {
+ GVE_ADMINQ_DESCRIBE_DEVICE = 0x1,
+ GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES = 0x2,
+ GVE_ADMINQ_REGISTER_PAGE_LIST = 0x3,
+ GVE_ADMINQ_UNREGISTER_PAGE_LIST = 0x4,
+ GVE_ADMINQ_CREATE_TX_QUEUE = 0x5,
+ GVE_ADMINQ_CREATE_RX_QUEUE = 0x6,
+ GVE_ADMINQ_DESTROY_TX_QUEUE = 0x7,
+ GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8,
+ GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
+ GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB,
+};
+
+/* Admin queue status codes */
+enum gve_adminq_statuses {
+ GVE_ADMINQ_COMMAND_UNSET = 0x0,
+ GVE_ADMINQ_COMMAND_PASSED = 0x1,
+ GVE_ADMINQ_COMMAND_ERROR_ABORTED = 0xFFFFFFF0,
+ GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS = 0xFFFFFFF1,
+ GVE_ADMINQ_COMMAND_ERROR_CANCELLED = 0xFFFFFFF2,
+ GVE_ADMINQ_COMMAND_ERROR_DATALOSS = 0xFFFFFFF3,
+ GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED = 0xFFFFFFF4,
+ GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION = 0xFFFFFFF5,
+ GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR = 0xFFFFFFF6,
+ GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT = 0xFFFFFFF7,
+ GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND = 0xFFFFFFF8,
+ GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE = 0xFFFFFFF9,
+ GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED = 0xFFFFFFFA,
+ GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED = 0xFFFFFFFB,
+ GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED = 0xFFFFFFFC,
+ GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE = 0xFFFFFFFD,
+ GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED = 0xFFFFFFFE,
+ GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR = 0xFFFFFFFF,
+};
+
+#define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1
+
+/* All AdminQ command structs should be naturally packed. The static_assert
+ * calls make sure this is the case at compile time.
+ */
+
+struct gve_adminq_describe_device {
+ __be64 device_descriptor_addr;
+ __be32 device_descriptor_version;
+ __be32 available_length;
+};
+
+static_assert(sizeof(struct gve_adminq_describe_device) == 16);
+
+struct gve_device_descriptor {
+ __be64 max_registered_pages;
+ __be16 reserved1;
+ __be16 tx_queue_entries;
+ __be16 rx_queue_entries;
+ __be16 default_num_queues;
+ __be16 mtu;
+ __be16 counters;
+ __be16 tx_pages_per_qpl;
+ __be16 rx_pages_per_qpl;
+ u8 mac[ETH_ALEN];
+ __be16 num_device_options;
+ __be16 total_length;
+ u8 reserved2[6];
+};
+
+static_assert(sizeof(struct gve_device_descriptor) == 40);
+
+struct device_option {
+ __be32 option_id;
+ __be32 option_length;
+};
+
+static_assert(sizeof(struct device_option) == 8);
+
+struct gve_adminq_configure_device_resources {
+ __be64 counter_array;
+ __be64 irq_db_addr;
+ __be32 num_counters;
+ __be32 num_irq_dbs;
+ __be32 irq_db_stride;
+ __be32 ntfy_blk_msix_base_idx;
+};
+
+static_assert(sizeof(struct gve_adminq_configure_device_resources) == 32);
+
+struct gve_adminq_register_page_list {
+ __be32 page_list_id;
+ __be32 num_pages;
+ __be64 page_address_list_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_register_page_list) == 16);
+
+struct gve_adminq_unregister_page_list {
+ __be32 page_list_id;
+};
+
+static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4);
+
+struct gve_adminq_create_tx_queue {
+ __be32 queue_id;
+ __be32 reserved;
+ __be64 queue_resources_addr;
+ __be64 tx_ring_addr;
+ __be32 queue_page_list_id;
+ __be32 ntfy_id;
+};
+
+static_assert(sizeof(struct gve_adminq_create_tx_queue) == 32);
+
+struct gve_adminq_create_rx_queue {
+ __be32 queue_id;
+ __be32 index;
+ __be32 reserved;
+ __be32 ntfy_id;
+ __be64 queue_resources_addr;
+ __be64 rx_desc_ring_addr;
+ __be64 rx_data_ring_addr;
+ __be32 queue_page_list_id;
+ u8 padding[4];
+};
+
+static_assert(sizeof(struct gve_adminq_create_rx_queue) == 48);
+
+/* Queue resources that are shared with the device */
+struct gve_queue_resources {
+ union {
+ struct {
+ __be32 db_index; /* Device -> Guest */
+ __be32 counter_index; /* Device -> Guest */
+ };
+ u8 reserved[64];
+ };
+};
+
+static_assert(sizeof(struct gve_queue_resources) == 64);
+
+struct gve_adminq_destroy_tx_queue {
+ __be32 queue_id;
+};
+
+static_assert(sizeof(struct gve_adminq_destroy_tx_queue) == 4);
+
+struct gve_adminq_destroy_rx_queue {
+ __be32 queue_id;
+};
+
+static_assert(sizeof(struct gve_adminq_destroy_rx_queue) == 4);
+
+/* GVE Set Driver Parameter Types */
+enum gve_set_driver_param_types {
+ GVE_SET_PARAM_MTU = 0x1,
+};
+
+struct gve_adminq_set_driver_parameter {
+ __be32 parameter_type;
+ u8 reserved[4];
+ __be64 parameter_value;
+};
+
+static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16);
+
+union gve_adminq_command {
+ struct {
+ __be32 opcode;
+ __be32 status;
+ union {
+ struct gve_adminq_configure_device_resources
+ configure_device_resources;
+ struct gve_adminq_create_tx_queue create_tx_queue;
+ struct gve_adminq_create_rx_queue create_rx_queue;
+ struct gve_adminq_destroy_tx_queue destroy_tx_queue;
+ struct gve_adminq_destroy_rx_queue destroy_rx_queue;
+ struct gve_adminq_describe_device describe_device;
+ struct gve_adminq_register_page_list reg_page_list;
+ struct gve_adminq_unregister_page_list unreg_page_list;
+ struct gve_adminq_set_driver_parameter set_driver_param;
+ };
+ };
+ u8 reserved[64];
+};
+
+static_assert(sizeof(union gve_adminq_command) == 64);
+
+int gve_adminq_alloc(struct device *dev, struct gve_priv *priv);
+void gve_adminq_free(struct device *dev, struct gve_priv *priv);
+void gve_adminq_release(struct gve_priv *priv);
+int gve_adminq_execute_cmd(struct gve_priv *priv,
+ union gve_adminq_command *cmd_orig);
+int gve_adminq_describe_device(struct gve_priv *priv);
+int gve_adminq_configure_device_resources(struct gve_priv *priv,
+ dma_addr_t counter_array_bus_addr,
+ u32 num_counters,
+ dma_addr_t db_array_bus_addr,
+ u32 num_ntfy_blks);
+int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
+int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_register_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl);
+int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
+int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
+#endif /* _GVE_ADMINQ_H */
diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h
new file mode 100644
index 000000000000..54779871d52e
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_desc.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2019 Google, Inc.
+ */
+
+/* GVE Transmit Descriptor formats */
+
+#ifndef _GVE_DESC_H_
+#define _GVE_DESC_H_
+
+#include <linux/build_bug.h>
+
+/* A note on seg_addrs
+ *
+ * Base addresses encoded in seg_addr are not assumed to be physical
+ * addresses. The ring format assumes these come from some linear address
+ * space. This could be physical memory, kernel virtual memory, user virtual
+ * memory. gVNIC uses lists of registered pages. Each queue is assumed
+ * to be associated with a single such linear address space to ensure a
+ * consistent meaning for seg_addrs posted to its rings.
+ */
+
+struct gve_tx_pkt_desc {
+ u8 type_flags; /* desc type is lower 4 bits, flags upper */
+ u8 l4_csum_offset; /* relative offset of L4 csum word */
+ u8 l4_hdr_offset; /* Offset of start of L4 headers in packet */
+ u8 desc_cnt; /* Total descriptors for this packet */
+ __be16 len; /* Total length of this packet (in bytes) */
+ __be16 seg_len; /* Length of this descriptor's segment */
+ __be64 seg_addr; /* Base address (see note) of this segment */
+} __packed;
+
+struct gve_tx_seg_desc {
+ u8 type_flags; /* type is lower 4 bits, flags upper */
+ u8 l3_offset; /* TSO: 2 byte units to start of IPH */
+ __be16 reserved;
+ __be16 mss; /* TSO MSS */
+ __be16 seg_len;
+ __be64 seg_addr;
+} __packed;
+
+/* GVE Transmit Descriptor Types */
+#define GVE_TXD_STD (0x0 << 4) /* Std with Host Address */
+#define GVE_TXD_TSO (0x1 << 4) /* TSO with Host Address */
+#define GVE_TXD_SEG (0x2 << 4) /* Seg with Host Address */
+
+/* GVE Transmit Descriptor Flags for Std Pkts */
+#define GVE_TXF_L4CSUM BIT(0) /* Need csum offload */
+#define GVE_TXF_TSTAMP BIT(2) /* Timestamp required */
+
+/* GVE Transmit Descriptor Flags for TSO Segs */
+#define GVE_TXSF_IPV6 BIT(1) /* IPv6 TSO */
+
+/* GVE Receive Packet Descriptor */
+/* The start of an ethernet packet comes 2 bytes into the rx buffer.
+ * gVNIC adds this padding so that both the DMA and the L3/4 protocol header
+ * access is aligned.
+ */
+#define GVE_RX_PAD 2
+
+struct gve_rx_desc {
+ u8 padding[48];
+ __be32 rss_hash; /* Receive-side scaling hash (Toeplitz for gVNIC) */
+ __be16 mss;
+ __be16 reserved; /* Reserved to zero */
+ u8 hdr_len; /* Header length (L2-L4) including padding */
+ u8 hdr_off; /* 64-byte-scaled offset into RX_DATA entry */
+ __sum16 csum; /* 1's-complement partial checksum of L3+ bytes */
+ __be16 len; /* Length of the received packet */
+ __be16 flags_seq; /* Flags [15:3] and sequence number [2:0] (1-7) */
+} __packed;
+static_assert(sizeof(struct gve_rx_desc) == 64);
+
+/* As with the Tx ring format, the qpl_offset entries below are offsets into an
+ * ordered list of registered pages.
+ */
+struct gve_rx_data_slot {
+ /* byte offset into the rx registered segment of this slot */
+ __be64 qpl_offset;
+};
+
+/* GVE Recive Packet Descriptor Seq No */
+#define GVE_SEQNO(x) (be16_to_cpu(x) & 0x7)
+
+/* GVE Recive Packet Descriptor Flags */
+#define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x)))
+#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */
+#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */
+#define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */
+#define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */
+#define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */
+#define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */
+
+/* GVE IRQ */
+#define GVE_IRQ_ACK BIT(31)
+#define GVE_IRQ_MASK BIT(30)
+#define GVE_IRQ_EVENT BIT(29)
+
+static inline bool gve_needs_rss(__be16 flag)
+{
+ if (flag & GVE_RXF_FRAG)
+ return false;
+ if (flag & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
+ return true;
+ return false;
+}
+
+static inline u8 gve_next_seqno(u8 seq)
+{
+ return (seq + 1) == 8 ? 1 : seq + 1;
+}
+#endif /* _GVE_DESC_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
new file mode 100644
index 000000000000..26540b856541
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2019 Google, Inc.
+ */
+
+#include <linux/rtnetlink.h>
+#include "gve.h"
+
+static void gve_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ strlcpy(info->driver, "gve", sizeof(info->driver));
+ strlcpy(info->version, gve_version_str, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
+}
+
+static void gve_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ priv->msg_enable = value;
+}
+
+static u32 gve_get_msglevel(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ return priv->msg_enable;
+}
+
+static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
+ "rx_dropped", "tx_dropped", "tx_timeouts",
+};
+
+#define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats)
+#define NUM_GVE_TX_CNTS 5
+#define NUM_GVE_RX_CNTS 2
+
+static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ char *s = (char *)data;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ memcpy(s, *gve_gstrings_main_stats,
+ sizeof(gve_gstrings_main_stats));
+ s += sizeof(gve_gstrings_main_stats);
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ snprintf(s, ETH_GSTRING_LEN, "rx_desc_cnt[%u]", i);
+ s += ETH_GSTRING_LEN;
+ snprintf(s, ETH_GSTRING_LEN, "rx_desc_fill_cnt[%u]", i);
+ s += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ snprintf(s, ETH_GSTRING_LEN, "tx_req[%u]", i);
+ s += ETH_GSTRING_LEN;
+ snprintf(s, ETH_GSTRING_LEN, "tx_done[%u]", i);
+ s += ETH_GSTRING_LEN;
+ snprintf(s, ETH_GSTRING_LEN, "tx_wake[%u]", i);
+ s += ETH_GSTRING_LEN;
+ snprintf(s, ETH_GSTRING_LEN, "tx_stop[%u]", i);
+ s += ETH_GSTRING_LEN;
+ snprintf(s, ETH_GSTRING_LEN, "tx_event_counter[%u]", i);
+ s += ETH_GSTRING_LEN;
+ }
+}
+
+static int gve_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return GVE_MAIN_STATS_LEN +
+ (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
+ (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+gve_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u64 rx_pkts, rx_bytes, tx_pkts, tx_bytes;
+ unsigned int start;
+ int ring;
+ int i;
+
+ ASSERT_RTNL();
+
+ for (rx_pkts = 0, rx_bytes = 0, ring = 0;
+ ring < priv->rx_cfg.num_queues; ring++) {
+ if (priv->rx) {
+ do {
+ start =
+ u64_stats_fetch_begin(&priv->rx[ring].statss);
+ rx_pkts += priv->rx[ring].rpackets;
+ rx_bytes += priv->rx[ring].rbytes;
+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ start));
+ }
+ }
+ for (tx_pkts = 0, tx_bytes = 0, ring = 0;
+ ring < priv->tx_cfg.num_queues; ring++) {
+ if (priv->tx) {
+ do {
+ start =
+ u64_stats_fetch_begin(&priv->tx[ring].statss);
+ tx_pkts += priv->tx[ring].pkt_done;
+ tx_bytes += priv->tx[ring].bytes_done;
+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ start));
+ }
+ }
+
+ i = 0;
+ data[i++] = rx_pkts;
+ data[i++] = tx_pkts;
+ data[i++] = rx_bytes;
+ data[i++] = tx_bytes;
+ /* Skip rx_dropped and tx_dropped */
+ i += 2;
+ data[i++] = priv->tx_timeo_cnt;
+ i = GVE_MAIN_STATS_LEN;
+
+ /* walk RX rings */
+ if (priv->rx) {
+ for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
+ struct gve_rx_ring *rx = &priv->rx[ring];
+
+ data[i++] = rx->desc.cnt;
+ data[i++] = rx->desc.fill_cnt;
+ }
+ } else {
+ i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
+ }
+ /* walk TX rings */
+ if (priv->tx) {
+ for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
+ struct gve_tx_ring *tx = &priv->tx[ring];
+
+ data[i++] = tx->req;
+ data[i++] = tx->done;
+ data[i++] = tx->wake_queue;
+ data[i++] = tx->stop_queue;
+ data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
+ tx));
+ }
+ } else {
+ i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
+ }
+}
+
+static void gve_get_channels(struct net_device *netdev,
+ struct ethtool_channels *cmd)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ cmd->max_rx = priv->rx_cfg.max_queues;
+ cmd->max_tx = priv->tx_cfg.max_queues;
+ cmd->max_other = 0;
+ cmd->max_combined = 0;
+ cmd->rx_count = priv->rx_cfg.num_queues;
+ cmd->tx_count = priv->tx_cfg.num_queues;
+ cmd->other_count = 0;
+ cmd->combined_count = 0;
+}
+
+static int gve_set_channels(struct net_device *netdev,
+ struct ethtool_channels *cmd)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ struct gve_queue_config new_tx_cfg = priv->tx_cfg;
+ struct gve_queue_config new_rx_cfg = priv->rx_cfg;
+ struct ethtool_channels old_settings;
+ int new_tx = cmd->tx_count;
+ int new_rx = cmd->rx_count;
+
+ gve_get_channels(netdev, &old_settings);
+
+ /* Changing combined is not allowed allowed */
+ if (cmd->combined_count != old_settings.combined_count)
+ return -EINVAL;
+
+ if (!new_rx || !new_tx)
+ return -EINVAL;
+
+ if (!netif_carrier_ok(netdev)) {
+ priv->tx_cfg.num_queues = new_tx;
+ priv->rx_cfg.num_queues = new_rx;
+ return 0;
+ }
+
+ new_tx_cfg.num_queues = new_tx;
+ new_rx_cfg.num_queues = new_rx;
+
+ return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
+}
+
+static void gve_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *cmd)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ cmd->rx_max_pending = priv->rx_desc_cnt;
+ cmd->tx_max_pending = priv->tx_desc_cnt;
+ cmd->rx_pending = priv->rx_desc_cnt;
+ cmd->tx_pending = priv->tx_desc_cnt;
+}
+
+static int gve_user_reset(struct net_device *netdev, u32 *flags)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (*flags == ETH_RESET_ALL) {
+ *flags = 0;
+ return gve_reset(priv, true);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+const struct ethtool_ops gve_ethtool_ops = {
+ .get_drvinfo = gve_get_drvinfo,
+ .get_strings = gve_get_strings,
+ .get_sset_count = gve_get_sset_count,
+ .get_ethtool_stats = gve_get_ethtool_stats,
+ .set_msglevel = gve_set_msglevel,
+ .get_msglevel = gve_get_msglevel,
+ .set_channels = gve_set_channels,
+ .get_channels = gve_get_channels,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = gve_get_ringparam,
+ .reset = gve_user_reset,
+};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
new file mode 100644
index 000000000000..24f16e3368cd
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -0,0 +1,1232 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2019 Google, Inc.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <net/sch_generic.h>
+#include "gve.h"
+#include "gve_adminq.h"
+#include "gve_register.h"
+
+#define GVE_DEFAULT_RX_COPYBREAK (256)
+
+#define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK)
+#define GVE_VERSION "1.0.0"
+#define GVE_VERSION_PREFIX "GVE-"
+
+const char gve_version_str[] = GVE_VERSION;
+static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
+
+static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ unsigned int start;
+ int ring;
+
+ if (priv->rx) {
+ for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
+ do {
+ start =
+ u64_stats_fetch_begin(&priv->rx[ring].statss);
+ s->rx_packets += priv->rx[ring].rpackets;
+ s->rx_bytes += priv->rx[ring].rbytes;
+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ start));
+ }
+ }
+ if (priv->tx) {
+ for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
+ do {
+ start =
+ u64_stats_fetch_begin(&priv->tx[ring].statss);
+ s->tx_packets += priv->tx[ring].pkt_done;
+ s->tx_bytes += priv->tx[ring].bytes_done;
+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ start));
+ }
+ }
+}
+
+static int gve_alloc_counter_array(struct gve_priv *priv)
+{
+ priv->counter_array =
+ dma_alloc_coherent(&priv->pdev->dev,
+ priv->num_event_counters *
+ sizeof(*priv->counter_array),
+ &priv->counter_array_bus, GFP_KERNEL);
+ if (!priv->counter_array)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gve_free_counter_array(struct gve_priv *priv)
+{
+ dma_free_coherent(&priv->pdev->dev,
+ priv->num_event_counters *
+ sizeof(*priv->counter_array),
+ priv->counter_array, priv->counter_array_bus);
+ priv->counter_array = NULL;
+}
+
+static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
+{
+ struct gve_priv *priv = arg;
+
+ queue_work(priv->gve_wq, &priv->service_task);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gve_intr(int irq, void *arg)
+{
+ struct gve_notify_block *block = arg;
+ struct gve_priv *priv = block->priv;
+
+ iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
+ napi_schedule_irqoff(&block->napi);
+ return IRQ_HANDLED;
+}
+
+static int gve_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct gve_notify_block *block;
+ __be32 __iomem *irq_doorbell;
+ bool reschedule = false;
+ struct gve_priv *priv;
+
+ block = container_of(napi, struct gve_notify_block, napi);
+ priv = block->priv;
+
+ if (block->tx)
+ reschedule |= gve_tx_poll(block, budget);
+ if (block->rx)
+ reschedule |= gve_rx_poll(block, budget);
+
+ if (reschedule)
+ return budget;
+
+ napi_complete(napi);
+ irq_doorbell = gve_irq_doorbell(priv, block);
+ iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
+
+ /* Double check we have no extra work.
+ * Ensure unmask synchronizes with checking for work.
+ */
+ dma_rmb();
+ if (block->tx)
+ reschedule |= gve_tx_poll(block, -1);
+ if (block->rx)
+ reschedule |= gve_rx_poll(block, -1);
+ if (reschedule && napi_reschedule(napi))
+ iowrite32be(GVE_IRQ_MASK, irq_doorbell);
+
+ return 0;
+}
+
+static int gve_alloc_notify_blocks(struct gve_priv *priv)
+{
+ int num_vecs_requested = priv->num_ntfy_blks + 1;
+ char *name = priv->dev->name;
+ unsigned int active_cpus;
+ int vecs_enabled;
+ int i, j;
+ int err;
+
+ priv->msix_vectors = kvzalloc(num_vecs_requested *
+ sizeof(*priv->msix_vectors), GFP_KERNEL);
+ if (!priv->msix_vectors)
+ return -ENOMEM;
+ for (i = 0; i < num_vecs_requested; i++)
+ priv->msix_vectors[i].entry = i;
+ vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
+ GVE_MIN_MSIX, num_vecs_requested);
+ if (vecs_enabled < 0) {
+ dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
+ GVE_MIN_MSIX, vecs_enabled);
+ err = vecs_enabled;
+ goto abort_with_msix_vectors;
+ }
+ if (vecs_enabled != num_vecs_requested) {
+ int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
+ int vecs_per_type = new_num_ntfy_blks / 2;
+ int vecs_left = new_num_ntfy_blks % 2;
+
+ priv->num_ntfy_blks = new_num_ntfy_blks;
+ priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
+ vecs_per_type);
+ priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
+ vecs_per_type + vecs_left);
+ dev_err(&priv->pdev->dev,
+ "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
+ vecs_enabled, priv->tx_cfg.max_queues,
+ priv->rx_cfg.max_queues);
+ if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
+ priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
+ if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
+ priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
+ }
+ /* Half the notification blocks go to TX and half to RX */
+ active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
+
+ /* Setup Management Vector - the last vector */
+ snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
+ name);
+ err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
+ gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
+ if (err) {
+ dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
+ goto abort_with_msix_enabled;
+ }
+ priv->ntfy_blocks =
+ dma_alloc_coherent(&priv->pdev->dev,
+ priv->num_ntfy_blks *
+ sizeof(*priv->ntfy_blocks),
+ &priv->ntfy_block_bus, GFP_KERNEL);
+ if (!priv->ntfy_blocks) {
+ err = -ENOMEM;
+ goto abort_with_mgmt_vector;
+ }
+ /* Setup the other blocks - the first n-1 vectors */
+ for (i = 0; i < priv->num_ntfy_blks; i++) {
+ struct gve_notify_block *block = &priv->ntfy_blocks[i];
+ int msix_idx = i;
+
+ snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
+ name, i);
+ block->priv = priv;
+ err = request_irq(priv->msix_vectors[msix_idx].vector,
+ gve_intr, 0, block->name, block);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Failed to receive msix vector %d\n", i);
+ goto abort_with_some_ntfy_blocks;
+ }
+ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+ get_cpu_mask(i % active_cpus));
+ }
+ return 0;
+abort_with_some_ntfy_blocks:
+ for (j = 0; j < i; j++) {
+ struct gve_notify_block *block = &priv->ntfy_blocks[j];
+ int msix_idx = j;
+
+ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+ NULL);
+ free_irq(priv->msix_vectors[msix_idx].vector, block);
+ }
+ dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
+ sizeof(*priv->ntfy_blocks),
+ priv->ntfy_blocks, priv->ntfy_block_bus);
+ priv->ntfy_blocks = NULL;
+abort_with_mgmt_vector:
+ free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
+abort_with_msix_enabled:
+ pci_disable_msix(priv->pdev);
+abort_with_msix_vectors:
+ kfree(priv->msix_vectors);
+ priv->msix_vectors = NULL;
+ return err;
+}
+
+static void gve_free_notify_blocks(struct gve_priv *priv)
+{
+ int i;
+
+ /* Free the irqs */
+ for (i = 0; i < priv->num_ntfy_blks; i++) {
+ struct gve_notify_block *block = &priv->ntfy_blocks[i];
+ int msix_idx = i;
+
+ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+ NULL);
+ free_irq(priv->msix_vectors[msix_idx].vector, block);
+ }
+ dma_free_coherent(&priv->pdev->dev,
+ priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
+ priv->ntfy_blocks, priv->ntfy_block_bus);
+ priv->ntfy_blocks = NULL;
+ free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
+ pci_disable_msix(priv->pdev);
+ kfree(priv->msix_vectors);
+ priv->msix_vectors = NULL;
+}
+
+static int gve_setup_device_resources(struct gve_priv *priv)
+{
+ int err;
+
+ err = gve_alloc_counter_array(priv);
+ if (err)
+ return err;
+ err = gve_alloc_notify_blocks(priv);
+ if (err)
+ goto abort_with_counter;
+ err = gve_adminq_configure_device_resources(priv,
+ priv->counter_array_bus,
+ priv->num_event_counters,
+ priv->ntfy_block_bus,
+ priv->num_ntfy_blks);
+ if (unlikely(err)) {
+ dev_err(&priv->pdev->dev,
+ "could not setup device_resources: err=%d\n", err);
+ err = -ENXIO;
+ goto abort_with_ntfy_blocks;
+ }
+ gve_set_device_resources_ok(priv);
+ return 0;
+abort_with_ntfy_blocks:
+ gve_free_notify_blocks(priv);
+abort_with_counter:
+ gve_free_counter_array(priv);
+ return err;
+}
+
+static void gve_trigger_reset(struct gve_priv *priv);
+
+static void gve_teardown_device_resources(struct gve_priv *priv)
+{
+ int err;
+
+ /* Tell device its resources are being freed */
+ if (gve_get_device_resources_ok(priv)) {
+ err = gve_adminq_deconfigure_device_resources(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Could not deconfigure device resources: err=%d\n",
+ err);
+ gve_trigger_reset(priv);
+ }
+ }
+ gve_free_counter_array(priv);
+ gve_free_notify_blocks(priv);
+ gve_clear_device_resources_ok(priv);
+}
+
+static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
+{
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
+ NAPI_POLL_WEIGHT);
+}
+
+static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
+{
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ netif_napi_del(&block->napi);
+}
+
+static int gve_register_qpls(struct gve_priv *priv)
+{
+ int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int err;
+ int i;
+
+ for (i = 0; i < num_qpls; i++) {
+ err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to register queue page list %d\n",
+ priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int gve_unregister_qpls(struct gve_priv *priv)
+{
+ int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int err;
+ int i;
+
+ for (i = 0; i < num_qpls; i++) {
+ err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean up */
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to unregister queue page list %d\n",
+ priv->qpls[i].id);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int gve_create_rings(struct gve_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ err = gve_adminq_create_tx_queue(priv, i);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "failed to create tx queue %d\n",
+ i);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
+ netif_dbg(priv, drv, priv->dev, "created tx queue %d\n", i);
+ }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ err = gve_adminq_create_rx_queue(priv, i);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "failed to create rx queue %d\n",
+ i);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
+ /* Rx data ring has been prefilled with packet buffers at
+ * queue allocation time.
+ * Write the doorbell to provide descriptor slots and packet
+ * buffers to the NIC.
+ */
+ gve_rx_write_doorbell(priv, &priv->rx[i]);
+ netif_dbg(priv, drv, priv->dev, "created rx queue %d\n", i);
+ }
+
+ return 0;
+}
+
+static int gve_alloc_rings(struct gve_priv *priv)
+{
+ int ntfy_idx;
+ int err;
+ int i;
+
+ /* Setup tx rings */
+ priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
+ GFP_KERNEL);
+ if (!priv->tx)
+ return -ENOMEM;
+ err = gve_tx_alloc_rings(priv);
+ if (err)
+ goto free_tx;
+ /* Setup rx rings */
+ priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
+ GFP_KERNEL);
+ if (!priv->rx) {
+ err = -ENOMEM;
+ goto free_tx_queue;
+ }
+ err = gve_rx_alloc_rings(priv);
+ if (err)
+ goto free_rx;
+ /* Add tx napi & init sync stats*/
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ u64_stats_init(&priv->tx[i].statss);
+ ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
+ gve_add_napi(priv, ntfy_idx);
+ }
+ /* Add rx napi & init sync stats*/
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ u64_stats_init(&priv->rx[i].statss);
+ ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
+ gve_add_napi(priv, ntfy_idx);
+ }
+
+ return 0;
+
+free_rx:
+ kfree(priv->rx);
+ priv->rx = NULL;
+free_tx_queue:
+ gve_tx_free_rings(priv);
+free_tx:
+ kfree(priv->tx);
+ priv->tx = NULL;
+ return err;
+}
+
+static int gve_destroy_rings(struct gve_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ err = gve_adminq_destroy_tx_queue(priv, i);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to destroy tx queue %d\n",
+ i);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
+ netif_dbg(priv, drv, priv->dev, "destroyed tx queue %d\n", i);
+ }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ err = gve_adminq_destroy_rx_queue(priv, i);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to destroy rx queue %d\n",
+ i);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
+ netif_dbg(priv, drv, priv->dev, "destroyed rx queue %d\n", i);
+ }
+ return 0;
+}
+
+static void gve_free_rings(struct gve_priv *priv)
+{
+ int ntfy_idx;
+ int i;
+
+ if (priv->tx) {
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
+ gve_remove_napi(priv, ntfy_idx);
+ }
+ gve_tx_free_rings(priv);
+ kfree(priv->tx);
+ priv->tx = NULL;
+ }
+ if (priv->rx) {
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
+ gve_remove_napi(priv, ntfy_idx);
+ }
+ gve_rx_free_rings(priv);
+ kfree(priv->rx);
+ priv->rx = NULL;
+ }
+}
+
+int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
+ enum dma_data_direction dir)
+{
+ *page = alloc_page(GFP_KERNEL);
+ if (!*page)
+ return -ENOMEM;
+ *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
+ if (dma_mapping_error(dev, *dma)) {
+ put_page(*page);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
+ int pages)
+{
+ struct gve_queue_page_list *qpl = &priv->qpls[id];
+ int err;
+ int i;
+
+ if (pages + priv->num_registered_pages > priv->max_registered_pages) {
+ netif_err(priv, drv, priv->dev,
+ "Reached max number of registered pages %llu > %llu\n",
+ pages + priv->num_registered_pages,
+ priv->max_registered_pages);
+ return -EINVAL;
+ }
+
+ qpl->id = id;
+ qpl->num_entries = pages;
+ qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
+ /* caller handles clean up */
+ if (!qpl->pages)
+ return -ENOMEM;
+ qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
+ GFP_KERNEL);
+ /* caller handles clean up */
+ if (!qpl->page_buses)
+ return -ENOMEM;
+
+ for (i = 0; i < pages; i++) {
+ err = gve_alloc_page(&priv->pdev->dev, &qpl->pages[i],
+ &qpl->page_buses[i],
+ gve_qpl_dma_dir(priv, id));
+ /* caller handles clean up */
+ if (err)
+ return -ENOMEM;
+ }
+ priv->num_registered_pages += pages;
+
+ return 0;
+}
+
+void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
+ enum dma_data_direction dir)
+{
+ if (!dma_mapping_error(dev, dma))
+ dma_unmap_page(dev, dma, PAGE_SIZE, dir);
+ if (page)
+ put_page(page);
+}
+
+static void gve_free_queue_page_list(struct gve_priv *priv,
+ int id)
+{
+ struct gve_queue_page_list *qpl = &priv->qpls[id];
+ int i;
+
+ if (!qpl->pages)
+ return;
+ if (!qpl->page_buses)
+ goto free_pages;
+
+ for (i = 0; i < qpl->num_entries; i++)
+ gve_free_page(&priv->pdev->dev, qpl->pages[i],
+ qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
+
+ kfree(qpl->page_buses);
+free_pages:
+ kfree(qpl->pages);
+ priv->num_registered_pages -= qpl->num_entries;
+}
+
+static int gve_alloc_qpls(struct gve_priv *priv)
+{
+ int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int i, j;
+ int err;
+
+ priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
+ if (!priv->qpls)
+ return -ENOMEM;
+
+ for (i = 0; i < gve_num_tx_qpls(priv); i++) {
+ err = gve_alloc_queue_page_list(priv, i,
+ priv->tx_pages_per_qpl);
+ if (err)
+ goto free_qpls;
+ }
+ for (; i < num_qpls; i++) {
+ err = gve_alloc_queue_page_list(priv, i,
+ priv->rx_pages_per_qpl);
+ if (err)
+ goto free_qpls;
+ }
+
+ priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
+ sizeof(unsigned long) * BITS_PER_BYTE;
+ priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!priv->qpl_cfg.qpl_id_map) {
+ err = -ENOMEM;
+ goto free_qpls;
+ }
+
+ return 0;
+
+free_qpls:
+ for (j = 0; j <= i; j++)
+ gve_free_queue_page_list(priv, j);
+ kfree(priv->qpls);
+ return err;
+}
+
+static void gve_free_qpls(struct gve_priv *priv)
+{
+ int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int i;
+
+ kfree(priv->qpl_cfg.qpl_id_map);
+
+ for (i = 0; i < num_qpls; i++)
+ gve_free_queue_page_list(priv, i);
+
+ kfree(priv->qpls);
+}
+
+/* Use this to schedule a reset when the device is capable of continuing
+ * to handle other requests in its current state. If it is not, do a reset
+ * in thread instead.
+ */
+void gve_schedule_reset(struct gve_priv *priv)
+{
+ gve_set_do_reset(priv);
+ queue_work(priv->gve_wq, &priv->service_task);
+}
+
+static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
+static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
+static void gve_turndown(struct gve_priv *priv);
+static void gve_turnup(struct gve_priv *priv);
+
+static int gve_open(struct net_device *dev)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = gve_alloc_qpls(priv);
+ if (err)
+ return err;
+ err = gve_alloc_rings(priv);
+ if (err)
+ goto free_qpls;
+
+ err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
+ if (err)
+ goto free_rings;
+ err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
+ if (err)
+ goto free_rings;
+
+ err = gve_register_qpls(priv);
+ if (err)
+ goto reset;
+ err = gve_create_rings(priv);
+ if (err)
+ goto reset;
+ gve_set_device_rings_ok(priv);
+
+ gve_turnup(priv);
+ netif_carrier_on(dev);
+ return 0;
+
+free_rings:
+ gve_free_rings(priv);
+free_qpls:
+ gve_free_qpls(priv);
+ return err;
+
+reset:
+ /* This must have been called from a reset due to the rtnl lock
+ * so just return at this point.
+ */
+ if (gve_get_reset_in_progress(priv))
+ return err;
+ /* Otherwise reset before returning */
+ gve_reset_and_teardown(priv, true);
+ /* if this fails there is nothing we can do so just ignore the return */
+ gve_reset_recovery(priv, false);
+ /* return the original error */
+ return err;
+}
+
+static int gve_close(struct net_device *dev)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ int err;
+
+ netif_carrier_off(dev);
+ if (gve_get_device_rings_ok(priv)) {
+ gve_turndown(priv);
+ err = gve_destroy_rings(priv);
+ if (err)
+ goto err;
+ err = gve_unregister_qpls(priv);
+ if (err)
+ goto err;
+ gve_clear_device_rings_ok(priv);
+ }
+
+ gve_free_rings(priv);
+ gve_free_qpls(priv);
+ return 0;
+
+err:
+ /* This must have been called from a reset due to the rtnl lock
+ * so just return at this point.
+ */
+ if (gve_get_reset_in_progress(priv))
+ return err;
+ /* Otherwise reset before returning */
+ gve_reset_and_teardown(priv, true);
+ return gve_reset_recovery(priv, false);
+}
+
+int gve_adjust_queues(struct gve_priv *priv,
+ struct gve_queue_config new_rx_config,
+ struct gve_queue_config new_tx_config)
+{
+ int err;
+
+ if (netif_carrier_ok(priv->dev)) {
+ /* To make this process as simple as possible we teardown the
+ * device, set the new configuration, and then bring the device
+ * up again.
+ */
+ err = gve_close(priv->dev);
+ /* we have already tried to reset in close,
+ * just fail at this point
+ */
+ if (err)
+ return err;
+ priv->tx_cfg = new_tx_config;
+ priv->rx_cfg = new_rx_config;
+
+ err = gve_open(priv->dev);
+ if (err)
+ goto err;
+
+ return 0;
+ }
+ /* Set the config for the next up. */
+ priv->tx_cfg = new_tx_config;
+ priv->rx_cfg = new_rx_config;
+
+ return 0;
+err:
+ netif_err(priv, drv, priv->dev,
+ "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
+ gve_turndown(priv);
+ return err;
+}
+
+static void gve_turndown(struct gve_priv *priv)
+{
+ int idx;
+
+ if (netif_carrier_ok(priv->dev))
+ netif_carrier_off(priv->dev);
+
+ if (!gve_get_napi_enabled(priv))
+ return;
+
+ /* Disable napi to prevent more work from coming in */
+ for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ napi_disable(&block->napi);
+ }
+ for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ napi_disable(&block->napi);
+ }
+
+ /* Stop tx queues */
+ netif_tx_disable(priv->dev);
+
+ gve_clear_napi_enabled(priv);
+}
+
+static void gve_turnup(struct gve_priv *priv)
+{
+ int idx;
+
+ /* Start the tx queues */
+ netif_tx_start_all_queues(priv->dev);
+
+ /* Enable napi and unmask interrupts for all queues */
+ for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ napi_enable(&block->napi);
+ iowrite32be(0, gve_irq_doorbell(priv, block));
+ }
+ for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ napi_enable(&block->napi);
+ iowrite32be(0, gve_irq_doorbell(priv, block));
+ }
+
+ gve_set_napi_enabled(priv);
+}
+
+static void gve_tx_timeout(struct net_device *dev)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ gve_schedule_reset(priv);
+ priv->tx_timeo_cnt++;
+}
+
+static const struct net_device_ops gve_netdev_ops = {
+ .ndo_start_xmit = gve_tx,
+ .ndo_open = gve_open,
+ .ndo_stop = gve_close,
+ .ndo_get_stats64 = gve_get_stats,
+ .ndo_tx_timeout = gve_tx_timeout,
+};
+
+static void gve_handle_status(struct gve_priv *priv, u32 status)
+{
+ if (GVE_DEVICE_STATUS_RESET_MASK & status) {
+ dev_info(&priv->pdev->dev, "Device requested reset.\n");
+ gve_set_do_reset(priv);
+ }
+}
+
+static void gve_handle_reset(struct gve_priv *priv)
+{
+ /* A service task will be scheduled at the end of probe to catch any
+ * resets that need to happen, and we don't want to reset until
+ * probe is done.
+ */
+ if (gve_get_probe_in_progress(priv))
+ return;
+
+ if (gve_get_do_reset(priv)) {
+ rtnl_lock();
+ gve_reset(priv, false);
+ rtnl_unlock();
+ }
+}
+
+/* Handle NIC status register changes and reset requests */
+static void gve_service_task(struct work_struct *work)
+{
+ struct gve_priv *priv = container_of(work, struct gve_priv,
+ service_task);
+
+ gve_handle_status(priv,
+ ioread32be(&priv->reg_bar0->device_status));
+
+ gve_handle_reset(priv);
+}
+
+static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
+{
+ int num_ntfy;
+ int err;
+
+ /* Set up the adminq */
+ err = gve_adminq_alloc(&priv->pdev->dev, priv);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Failed to alloc admin queue: err=%d\n", err);
+ return err;
+ }
+
+ if (skip_describe_device)
+ goto setup_device;
+
+ /* Get the initial information we need from the device */
+ err = gve_adminq_describe_device(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Could not get device information: err=%d\n", err);
+ goto err;
+ }
+ if (priv->dev->max_mtu > PAGE_SIZE) {
+ priv->dev->max_mtu = PAGE_SIZE;
+ err = gve_adminq_set_mtu(priv, priv->dev->mtu);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "Could not set mtu");
+ goto err;
+ }
+ }
+ priv->dev->mtu = priv->dev->max_mtu;
+ num_ntfy = pci_msix_vec_count(priv->pdev);
+ if (num_ntfy <= 0) {
+ dev_err(&priv->pdev->dev,
+ "could not count MSI-x vectors: err=%d\n", num_ntfy);
+ err = num_ntfy;
+ goto err;
+ } else if (num_ntfy < GVE_MIN_MSIX) {
+ dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
+ GVE_MIN_MSIX, num_ntfy);
+ err = -EINVAL;
+ goto err;
+ }
+
+ priv->num_registered_pages = 0;
+ priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
+ /* gvnic has one Notification Block per MSI-x vector, except for the
+ * management vector
+ */
+ priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
+ priv->mgmt_msix_idx = priv->num_ntfy_blks;
+
+ priv->tx_cfg.max_queues =
+ min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
+ priv->rx_cfg.max_queues =
+ min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
+
+ priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
+ priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
+ if (priv->default_num_queues > 0) {
+ priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
+ priv->tx_cfg.num_queues);
+ priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
+ priv->rx_cfg.num_queues);
+ }
+
+ netif_info(priv, drv, priv->dev, "TX queues %d, RX queues %d\n",
+ priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
+ netif_info(priv, drv, priv->dev, "Max TX queues %d, Max RX queues %d\n",
+ priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
+
+setup_device:
+ err = gve_setup_device_resources(priv);
+ if (!err)
+ return 0;
+err:
+ gve_adminq_free(&priv->pdev->dev, priv);
+ return err;
+}
+
+static void gve_teardown_priv_resources(struct gve_priv *priv)
+{
+ gve_teardown_device_resources(priv);
+ gve_adminq_free(&priv->pdev->dev, priv);
+}
+
+static void gve_trigger_reset(struct gve_priv *priv)
+{
+ /* Reset the device by releasing the AQ */
+ gve_adminq_release(priv);
+}
+
+static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
+{
+ gve_trigger_reset(priv);
+ /* With the reset having already happened, close cannot fail */
+ if (was_up)
+ gve_close(priv->dev);
+ gve_teardown_priv_resources(priv);
+}
+
+static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
+{
+ int err;
+
+ err = gve_init_priv(priv, true);
+ if (err)
+ goto err;
+ if (was_up) {
+ err = gve_open(priv->dev);
+ if (err)
+ goto err;
+ }
+ return 0;
+err:
+ dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
+ gve_turndown(priv);
+ return err;
+}
+
+int gve_reset(struct gve_priv *priv, bool attempt_teardown)
+{
+ bool was_up = netif_carrier_ok(priv->dev);
+ int err;
+
+ dev_info(&priv->pdev->dev, "Performing reset\n");
+ gve_clear_do_reset(priv);
+ gve_set_reset_in_progress(priv);
+ /* If we aren't attempting to teardown normally, just go turndown and
+ * reset right away.
+ */
+ if (!attempt_teardown) {
+ gve_turndown(priv);
+ gve_reset_and_teardown(priv, was_up);
+ } else {
+ /* Otherwise attempt to close normally */
+ if (was_up) {
+ err = gve_close(priv->dev);
+ /* If that fails reset as we did above */
+ if (err)
+ gve_reset_and_teardown(priv, was_up);
+ }
+ /* Clean up any remaining resources */
+ gve_teardown_priv_resources(priv);
+ }
+
+ /* Set it all back up */
+ err = gve_reset_recovery(priv, was_up);
+ gve_clear_reset_in_progress(priv);
+ return err;
+}
+
+static void gve_write_version(u8 __iomem *driver_version_register)
+{
+ const char *c = gve_version_prefix;
+
+ while (*c) {
+ writeb(*c, driver_version_register);
+ c++;
+ }
+
+ c = gve_version_str;
+ while (*c) {
+ writeb(*c, driver_version_register);
+ c++;
+ }
+ writeb('\n', driver_version_register);
+}
+
+static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int max_tx_queues, max_rx_queues;
+ struct net_device *dev;
+ __be32 __iomem *db_bar;
+ struct gve_registers __iomem *reg_bar;
+ struct gve_priv *priv;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return -ENXIO;
+
+ err = pci_request_regions(pdev, "gvnic-cfg");
+ if (err)
+ goto abort_with_enabled;
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
+ goto abort_with_pci_region;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to set consistent dma mask: err=%d\n", err);
+ goto abort_with_pci_region;
+ }
+
+ reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
+ if (!reg_bar) {
+ dev_err(&pdev->dev, "Failed to map pci bar!\n");
+ err = -ENOMEM;
+ goto abort_with_pci_region;
+ }
+
+ db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
+ if (!db_bar) {
+ dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
+ err = -ENOMEM;
+ goto abort_with_reg_bar;
+ }
+
+ gve_write_version(&reg_bar->driver_version);
+ /* Get max queues to alloc etherdev */
+ max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
+ max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
+ /* Alloc and setup the netdev and priv */
+ dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
+ if (!dev) {
+ dev_err(&pdev->dev, "could not allocate netdev\n");
+ goto abort_with_db_bar;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ pci_set_drvdata(pdev, dev);
+ dev->ethtool_ops = &gve_ethtool_ops;
+ dev->netdev_ops = &gve_netdev_ops;
+ /* advertise features */
+ dev->hw_features = NETIF_F_HIGHDMA;
+ dev->hw_features |= NETIF_F_SG;
+ dev->hw_features |= NETIF_F_HW_CSUM;
+ dev->hw_features |= NETIF_F_TSO;
+ dev->hw_features |= NETIF_F_TSO6;
+ dev->hw_features |= NETIF_F_TSO_ECN;
+ dev->hw_features |= NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_RXHASH;
+ dev->features = dev->hw_features;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->min_mtu = ETH_MIN_MTU;
+ netif_carrier_off(dev);
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+ priv->pdev = pdev;
+ priv->msg_enable = DEFAULT_MSG_LEVEL;
+ priv->reg_bar0 = reg_bar;
+ priv->db_bar2 = db_bar;
+ priv->service_task_flags = 0x0;
+ priv->state_flags = 0x0;
+
+ gve_set_probe_in_progress(priv);
+ priv->gve_wq = alloc_ordered_workqueue("gve", 0);
+ if (!priv->gve_wq) {
+ dev_err(&pdev->dev, "Could not allocate workqueue");
+ err = -ENOMEM;
+ goto abort_with_netdev;
+ }
+ INIT_WORK(&priv->service_task, gve_service_task);
+ priv->tx_cfg.max_queues = max_tx_queues;
+ priv->rx_cfg.max_queues = max_rx_queues;
+
+ err = gve_init_priv(priv, false);
+ if (err)
+ goto abort_with_wq;
+
+ err = register_netdev(dev);
+ if (err)
+ goto abort_with_wq;
+
+ dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
+ gve_clear_probe_in_progress(priv);
+ queue_work(priv->gve_wq, &priv->service_task);
+ return 0;
+
+abort_with_wq:
+ destroy_workqueue(priv->gve_wq);
+
+abort_with_netdev:
+ free_netdev(dev);
+
+abort_with_db_bar:
+ pci_iounmap(pdev, db_bar);
+
+abort_with_reg_bar:
+ pci_iounmap(pdev, reg_bar);
+
+abort_with_pci_region:
+ pci_release_regions(pdev);
+
+abort_with_enabled:
+ pci_disable_device(pdev);
+ return -ENXIO;
+}
+EXPORT_SYMBOL(gve_probe);
+
+static void gve_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct gve_priv *priv = netdev_priv(netdev);
+ __be32 __iomem *db_bar = priv->db_bar2;
+ void __iomem *reg_bar = priv->reg_bar0;
+
+ unregister_netdev(netdev);
+ gve_teardown_priv_resources(priv);
+ destroy_workqueue(priv->gve_wq);
+ free_netdev(netdev);
+ pci_iounmap(pdev, db_bar);
+ pci_iounmap(pdev, reg_bar);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id gve_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
+ { }
+};
+
+static struct pci_driver gvnic_driver = {
+ .name = "gvnic",
+ .id_table = gve_id_table,
+ .probe = gve_probe,
+ .remove = gve_remove,
+};
+
+module_pci_driver(gvnic_driver);
+
+MODULE_DEVICE_TABLE(pci, gve_id_table);
+MODULE_AUTHOR("Google, Inc.");
+MODULE_DESCRIPTION("gVNIC Driver");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_VERSION(GVE_VERSION);
diff --git a/drivers/net/ethernet/google/gve/gve_register.h b/drivers/net/ethernet/google/gve/gve_register.h
new file mode 100644
index 000000000000..84ab8893aadd
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_register.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2019 Google, Inc.
+ */
+
+#ifndef _GVE_REGISTER_H_
+#define _GVE_REGISTER_H_
+
+/* Fixed Configuration Registers */
+struct gve_registers {
+ __be32 device_status;
+ __be32 driver_status;
+ __be32 max_tx_queues;
+ __be32 max_rx_queues;
+ __be32 adminq_pfn;
+ __be32 adminq_doorbell;
+ __be32 adminq_event_counter;
+ u8 reserved[3];
+ u8 driver_version;
+};
+
+enum gve_device_status_flags {
+ GVE_DEVICE_STATUS_RESET_MASK = BIT(1),
+ GVE_DEVICE_STATUS_LINK_STATUS_MASK = BIT(2),
+};
+#endif /* _GVE_REGISTER_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
new file mode 100644
index 000000000000..c1aeabd1c594
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2019 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+#include <linux/etherdevice.h>
+
+static void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
+{
+ struct gve_notify_block *block =
+ &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
+
+ block->rx = NULL;
+}
+
+static void gve_rx_free_ring(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ struct device *dev = &priv->pdev->dev;
+ size_t bytes;
+ u32 slots;
+
+ gve_rx_remove_from_block(priv, idx);
+
+ bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
+ dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
+ rx->desc.desc_ring = NULL;
+
+ dma_free_coherent(dev, sizeof(*rx->q_resources),
+ rx->q_resources, rx->q_resources_bus);
+ rx->q_resources = NULL;
+
+ gve_unassign_qpl(priv, rx->data.qpl->id);
+ rx->data.qpl = NULL;
+ kfree(rx->data.page_info);
+
+ slots = rx->data.mask + 1;
+ bytes = sizeof(*rx->data.data_ring) * slots;
+ dma_free_coherent(dev, bytes, rx->data.data_ring,
+ rx->data.data_bus);
+ rx->data.data_ring = NULL;
+ netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
+}
+
+static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
+ struct gve_rx_data_slot *slot,
+ dma_addr_t addr, struct page *page)
+{
+ page_info->page = page;
+ page_info->page_offset = 0;
+ page_info->page_address = page_address(page);
+ slot->qpl_offset = cpu_to_be64(addr);
+}
+
+static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
+{
+ struct gve_priv *priv = rx->gve;
+ u32 slots;
+ int i;
+
+ /* Allocate one page per Rx queue slot. Each page is split into two
+ * packet buffers, when possible we "page flip" between the two.
+ */
+ slots = rx->data.mask + 1;
+
+ rx->data.page_info = kvzalloc(slots *
+ sizeof(*rx->data.page_info), GFP_KERNEL);
+ if (!rx->data.page_info)
+ return -ENOMEM;
+
+ rx->data.qpl = gve_assign_rx_qpl(priv);
+
+ for (i = 0; i < slots; i++) {
+ struct page *page = rx->data.qpl->pages[i];
+ dma_addr_t addr = i * PAGE_SIZE;
+
+ gve_setup_rx_buffer(&rx->data.page_info[i],
+ &rx->data.data_ring[i], addr, page);
+ }
+
+ return slots;
+}
+
+static void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
+{
+ u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ struct gve_rx_ring *rx = &priv->rx[queue_idx];
+
+ block->rx = rx;
+ rx->ntfy_id = ntfy_idx;
+}
+
+static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ struct device *hdev = &priv->pdev->dev;
+ u32 slots, npages;
+ int filled_pages;
+ size_t bytes;
+ int err;
+
+ netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
+ /* Make sure everything is zeroed to start with */
+ memset(rx, 0, sizeof(*rx));
+
+ rx->gve = priv;
+ rx->q_num = idx;
+
+ slots = priv->rx_pages_per_qpl;
+ rx->data.mask = slots - 1;
+
+ /* alloc rx data ring */
+ bytes = sizeof(*rx->data.data_ring) * slots;
+ rx->data.data_ring = dma_alloc_coherent(hdev, bytes,
+ &rx->data.data_bus,
+ GFP_KERNEL);
+ if (!rx->data.data_ring)
+ return -ENOMEM;
+ filled_pages = gve_prefill_rx_pages(rx);
+ if (filled_pages < 0) {
+ err = -ENOMEM;
+ goto abort_with_slots;
+ }
+ rx->desc.fill_cnt = filled_pages;
+ /* Ensure data ring slots (packet buffers) are visible. */
+ dma_wmb();
+
+ /* Alloc gve_queue_resources */
+ rx->q_resources =
+ dma_alloc_coherent(hdev,
+ sizeof(*rx->q_resources),
+ &rx->q_resources_bus,
+ GFP_KERNEL);
+ if (!rx->q_resources) {
+ err = -ENOMEM;
+ goto abort_filled;
+ }
+ netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx,
+ (unsigned long)rx->data.data_bus);
+
+ /* alloc rx desc ring */
+ bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
+ npages = bytes / PAGE_SIZE;
+ if (npages * PAGE_SIZE != bytes) {
+ err = -EIO;
+ goto abort_with_q_resources;
+ }
+
+ rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
+ GFP_KERNEL);
+ if (!rx->desc.desc_ring) {
+ err = -ENOMEM;
+ goto abort_with_q_resources;
+ }
+ rx->desc.mask = slots - 1;
+ rx->desc.cnt = 0;
+ rx->desc.seqno = 1;
+ gve_rx_add_to_block(priv, idx);
+
+ return 0;
+
+abort_with_q_resources:
+ dma_free_coherent(hdev, sizeof(*rx->q_resources),
+ rx->q_resources, rx->q_resources_bus);
+ rx->q_resources = NULL;
+abort_filled:
+ kfree(rx->data.page_info);
+abort_with_slots:
+ bytes = sizeof(*rx->data.data_ring) * slots;
+ dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
+ rx->data.data_ring = NULL;
+
+ return err;
+}
+
+int gve_rx_alloc_rings(struct gve_priv *priv)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ err = gve_rx_alloc_ring(priv, i);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to alloc rx ring=%d: err=%d\n",
+ i, err);
+ break;
+ }
+ }
+ /* Unallocate if there was an error */
+ if (err) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ gve_rx_free_ring(priv, j);
+ }
+ return err;
+}
+
+void gve_rx_free_rings(struct gve_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ gve_rx_free_ring(priv, i);
+}
+
+void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
+{
+ u32 db_idx = be32_to_cpu(rx->q_resources->db_index);
+
+ iowrite32be(rx->desc.fill_cnt, &priv->db_bar2[db_idx]);
+}
+
+static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
+{
+ if (likely(pkt_flags & (GVE_RXF_TCP | GVE_RXF_UDP)))
+ return PKT_HASH_TYPE_L4;
+ if (pkt_flags & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
+ return PKT_HASH_TYPE_L3;
+ return PKT_HASH_TYPE_L2;
+}
+
+static struct sk_buff *gve_rx_copy(struct net_device *dev,
+ struct napi_struct *napi,
+ struct gve_rx_slot_page_info *page_info,
+ u16 len)
+{
+ struct sk_buff *skb = napi_alloc_skb(napi, len);
+ void *va = page_info->page_address + GVE_RX_PAD +
+ page_info->page_offset;
+
+ if (unlikely(!skb))
+ return NULL;
+
+ __skb_put(skb, len);
+
+ skb_copy_to_linear_data(skb, va, len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ return skb;
+}
+
+static struct sk_buff *gve_rx_add_frags(struct net_device *dev,
+ struct napi_struct *napi,
+ struct gve_rx_slot_page_info *page_info,
+ u16 len)
+{
+ struct sk_buff *skb = napi_get_frags(napi);
+
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_add_rx_frag(skb, 0, page_info->page,
+ page_info->page_offset +
+ GVE_RX_PAD, len, PAGE_SIZE / 2);
+
+ return skb;
+}
+
+static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info,
+ struct gve_rx_data_slot *data_ring)
+{
+ u64 addr = be64_to_cpu(data_ring->qpl_offset);
+
+ page_info->page_offset ^= PAGE_SIZE / 2;
+ addr ^= PAGE_SIZE / 2;
+ data_ring->qpl_offset = cpu_to_be64(addr);
+}
+
+static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
+ netdev_features_t feat)
+{
+ struct gve_rx_slot_page_info *page_info;
+ struct gve_priv *priv = rx->gve;
+ struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+ struct net_device *dev = priv->dev;
+ struct sk_buff *skb;
+ int pagecount;
+ u16 len;
+ u32 idx;
+
+ /* drop this packet */
+ if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR))
+ return true;
+
+ len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
+ idx = rx->data.cnt & rx->data.mask;
+ page_info = &rx->data.page_info[idx];
+
+ /* gvnic can only receive into registered segments. If the buffer
+ * can't be recycled, our only choice is to copy the data out of
+ * it so that we can return it to the device.
+ */
+
+ if (PAGE_SIZE == 4096) {
+ if (len <= priv->rx_copybreak) {
+ /* Just copy small packets */
+ skb = gve_rx_copy(dev, napi, page_info, len);
+ goto have_skb;
+ }
+ if (unlikely(!gve_can_recycle_pages(dev))) {
+ skb = gve_rx_copy(dev, napi, page_info, len);
+ goto have_skb;
+ }
+ pagecount = page_count(page_info->page);
+ if (pagecount == 1) {
+ /* No part of this page is used by any SKBs; we attach
+ * the page fragment to a new SKB and pass it up the
+ * stack.
+ */
+ skb = gve_rx_add_frags(dev, napi, page_info, len);
+ if (!skb)
+ return true;
+ /* Make sure the kernel stack can't release the page */
+ get_page(page_info->page);
+ /* "flip" to other packet buffer on this page */
+ gve_rx_flip_buff(page_info, &rx->data.data_ring[idx]);
+ } else if (pagecount >= 2) {
+ /* We have previously passed the other half of this
+ * page up the stack, but it has not yet been freed.
+ */
+ skb = gve_rx_copy(dev, napi, page_info, len);
+ } else {
+ WARN(pagecount < 1, "Pagecount should never be < 1");
+ return false;
+ }
+ } else {
+ skb = gve_rx_copy(dev, napi, page_info, len);
+ }
+
+have_skb:
+ /* We didn't manage to allocate an skb but we haven't had any
+ * reset worthy failures.
+ */
+ if (!skb)
+ return true;
+
+ rx->data.cnt++;
+
+ if (likely(feat & NETIF_F_RXCSUM)) {
+ /* NIC passes up the partial sum */
+ if (rx_desc->csum)
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum = csum_unfold(rx_desc->csum);
+ }
+
+ /* parse flags & pass relevant info up */
+ if (likely(feat & NETIF_F_RXHASH) &&
+ gve_needs_rss(rx_desc->flags_seq))
+ skb_set_hash(skb, be32_to_cpu(rx_desc->rss_hash),
+ gve_rss_type(rx_desc->flags_seq));
+
+ if (skb_is_nonlinear(skb))
+ napi_gro_frags(napi);
+ else
+ napi_gro_receive(napi, skb);
+ return true;
+}
+
+static bool gve_rx_work_pending(struct gve_rx_ring *rx)
+{
+ struct gve_rx_desc *desc;
+ __be16 flags_seq;
+ u32 next_idx;
+
+ next_idx = rx->desc.cnt & rx->desc.mask;
+ desc = rx->desc.desc_ring + next_idx;
+
+ flags_seq = desc->flags_seq;
+ /* Make sure we have synchronized the seq no with the device */
+ smp_rmb();
+
+ return (GVE_SEQNO(flags_seq) == rx->desc.seqno);
+}
+
+bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
+ netdev_features_t feat)
+{
+ struct gve_priv *priv = rx->gve;
+ struct gve_rx_desc *desc;
+ u32 cnt = rx->desc.cnt;
+ u32 idx = cnt & rx->desc.mask;
+ u32 work_done = 0;
+ u64 bytes = 0;
+
+ desc = rx->desc.desc_ring + idx;
+ while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
+ work_done < budget) {
+ netif_info(priv, rx_status, priv->dev,
+ "[%d] idx=%d desc=%p desc->flags_seq=0x%x\n",
+ rx->q_num, idx, desc, desc->flags_seq);
+ netif_info(priv, rx_status, priv->dev,
+ "[%d] seqno=%d rx->desc.seqno=%d\n",
+ rx->q_num, GVE_SEQNO(desc->flags_seq),
+ rx->desc.seqno);
+ bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
+ if (!gve_rx(rx, desc, feat))
+ gve_schedule_reset(priv);
+ cnt++;
+ idx = cnt & rx->desc.mask;
+ desc = rx->desc.desc_ring + idx;
+ rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
+ work_done++;
+ }
+
+ if (!work_done)
+ return false;
+
+ u64_stats_update_begin(&rx->statss);
+ rx->rpackets += work_done;
+ rx->rbytes += bytes;
+ u64_stats_update_end(&rx->statss);
+ rx->desc.cnt = cnt;
+ rx->desc.fill_cnt += work_done;
+
+ /* restock desc ring slots */
+ dma_wmb(); /* Ensure descs are visible before ringing doorbell */
+ gve_rx_write_doorbell(priv, rx);
+ return gve_rx_work_pending(rx);
+}
+
+bool gve_rx_poll(struct gve_notify_block *block, int budget)
+{
+ struct gve_rx_ring *rx = block->rx;
+ netdev_features_t feat;
+ bool repoll = false;
+
+ feat = block->napi.dev->features;
+
+ /* If budget is 0, do all the work */
+ if (budget == 0)
+ budget = INT_MAX;
+
+ if (budget > 0)
+ repoll |= gve_clean_rx_done(rx, budget, feat);
+ else
+ repoll |= gve_rx_work_pending(rx);
+ return repoll;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
new file mode 100644
index 000000000000..778b87b5a06c
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2019 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/vmalloc.h>
+#include <linux/skbuff.h>
+
+static inline void gve_tx_put_doorbell(struct gve_priv *priv,
+ struct gve_queue_resources *q_resources,
+ u32 val)
+{
+ iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
+}
+
+/* gvnic can only transmit from a Registered Segment.
+ * We copy skb payloads into the registered segment before writing Tx
+ * descriptors and ringing the Tx doorbell.
+ *
+ * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
+ * free allocations in the order they were allocated.
+ */
+
+static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
+{
+ fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP,
+ PAGE_KERNEL);
+ if (unlikely(!fifo->base)) {
+ netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n",
+ fifo->qpl->id);
+ return -ENOMEM;
+ }
+
+ fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
+ atomic_set(&fifo->available, fifo->size);
+ fifo->head = 0;
+ return 0;
+}
+
+static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
+{
+ WARN(atomic_read(&fifo->available) != fifo->size,
+ "Releasing non-empty fifo");
+
+ vunmap(fifo->base);
+}
+
+static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
+ size_t bytes)
+{
+ return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head;
+}
+
+static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
+{
+ return (atomic_read(&fifo->available) <= bytes) ? false : true;
+}
+
+/* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
+ * @fifo: FIFO to allocate from
+ * @bytes: Allocation size
+ * @iov: Scatter-gather elements to fill with allocation fragment base/len
+ *
+ * Returns number of valid elements in iov[] or negative on error.
+ *
+ * Allocations from a given FIFO must be externally synchronized but concurrent
+ * allocation and frees are allowed.
+ */
+static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
+ struct gve_tx_iovec iov[2])
+{
+ size_t overflow, padding;
+ u32 aligned_head;
+ int nfrags = 0;
+
+ if (!bytes)
+ return 0;
+
+ /* This check happens before we know how much padding is needed to
+ * align to a cacheline boundary for the payload, but that is fine,
+ * because the FIFO head always start aligned, and the FIFO's boundaries
+ * are aligned, so if there is space for the data, there is space for
+ * the padding to the next alignment.
+ */
+ WARN(!gve_tx_fifo_can_alloc(fifo, bytes),
+ "Reached %s when there's not enough space in the fifo", __func__);
+
+ nfrags++;
+
+ iov[0].iov_offset = fifo->head;
+ iov[0].iov_len = bytes;
+ fifo->head += bytes;
+
+ if (fifo->head > fifo->size) {
+ /* If the allocation did not fit in the tail fragment of the
+ * FIFO, also use the head fragment.
+ */
+ nfrags++;
+ overflow = fifo->head - fifo->size;
+ iov[0].iov_len -= overflow;
+ iov[1].iov_offset = 0; /* Start of fifo*/
+ iov[1].iov_len = overflow;
+
+ fifo->head = overflow;
+ }
+
+ /* Re-align to a cacheline boundary */
+ aligned_head = L1_CACHE_ALIGN(fifo->head);
+ padding = aligned_head - fifo->head;
+ iov[nfrags - 1].iov_padding = padding;
+ atomic_sub(bytes + padding, &fifo->available);
+ fifo->head = aligned_head;
+
+ if (fifo->head == fifo->size)
+ fifo->head = 0;
+
+ return nfrags;
+}
+
+/* gve_tx_free_fifo - Return space to Tx FIFO
+ * @fifo: FIFO to return fragments to
+ * @bytes: Bytes to free
+ */
+static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
+{
+ atomic_add(bytes, &fifo->available);
+}
+
+static void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
+{
+ struct gve_notify_block *block =
+ &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
+
+ block->tx = NULL;
+}
+
+static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
+ u32 to_do, bool try_to_wake);
+
+static void gve_tx_free_ring(struct gve_priv *priv, int idx)
+{
+ struct gve_tx_ring *tx = &priv->tx[idx];
+ struct device *hdev = &priv->pdev->dev;
+ size_t bytes;
+ u32 slots;
+
+ gve_tx_remove_from_block(priv, idx);
+ slots = tx->mask + 1;
+ gve_clean_tx_done(priv, tx, tx->req, false);
+ netdev_tx_reset_queue(tx->netdev_txq);
+
+ dma_free_coherent(hdev, sizeof(*tx->q_resources),
+ tx->q_resources, tx->q_resources_bus);
+ tx->q_resources = NULL;
+
+ gve_tx_fifo_release(priv, &tx->tx_fifo);
+ gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
+ tx->tx_fifo.qpl = NULL;
+
+ bytes = sizeof(*tx->desc) * slots;
+ dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
+ tx->desc = NULL;
+
+ vfree(tx->info);
+ tx->info = NULL;
+
+ netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
+}
+
+static void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
+{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ struct gve_tx_ring *tx = &priv->tx[queue_idx];
+
+ block->tx = tx;
+ tx->ntfy_id = ntfy_idx;
+}
+
+static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
+{
+ struct gve_tx_ring *tx = &priv->tx[idx];
+ struct device *hdev = &priv->pdev->dev;
+ u32 slots = priv->tx_desc_cnt;
+ size_t bytes;
+
+ /* Make sure everything is zeroed to start */
+ memset(tx, 0, sizeof(*tx));
+ tx->q_num = idx;
+
+ tx->mask = slots - 1;
+
+ /* alloc metadata */
+ tx->info = vzalloc(sizeof(*tx->info) * slots);
+ if (!tx->info)
+ return -ENOMEM;
+
+ /* alloc tx queue */
+ bytes = sizeof(*tx->desc) * slots;
+ tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
+ if (!tx->desc)
+ goto abort_with_info;
+
+ tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
+
+ /* map Tx FIFO */
+ if (gve_tx_fifo_init(priv, &tx->tx_fifo))
+ goto abort_with_desc;
+
+ tx->q_resources =
+ dma_alloc_coherent(hdev,
+ sizeof(*tx->q_resources),
+ &tx->q_resources_bus,
+ GFP_KERNEL);
+ if (!tx->q_resources)
+ goto abort_with_fifo;
+
+ netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
+ (unsigned long)tx->bus);
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ gve_tx_add_to_block(priv, idx);
+
+ return 0;
+
+abort_with_fifo:
+ gve_tx_fifo_release(priv, &tx->tx_fifo);
+abort_with_desc:
+ dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
+ tx->desc = NULL;
+abort_with_info:
+ vfree(tx->info);
+ tx->info = NULL;
+ return -ENOMEM;
+}
+
+int gve_tx_alloc_rings(struct gve_priv *priv)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ err = gve_tx_alloc_ring(priv, i);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to alloc tx ring=%d: err=%d\n",
+ i, err);
+ break;
+ }
+ }
+ /* Unallocate if there was an error */
+ if (err) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ gve_tx_free_ring(priv, j);
+ }
+ return err;
+}
+
+void gve_tx_free_rings(struct gve_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->tx_cfg.num_queues; i++)
+ gve_tx_free_ring(priv, i);
+}
+
+/* gve_tx_avail - Calculates the number of slots available in the ring
+ * @tx: tx ring to check
+ *
+ * Returns the number of slots available
+ *
+ * The capacity of the queue is mask + 1. We don't need to reserve an entry.
+ **/
+static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
+{
+ return tx->mask + 1 - (tx->req - tx->done);
+}
+
+static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
+ struct sk_buff *skb)
+{
+ int pad_bytes, align_hdr_pad;
+ int bytes;
+ int hlen;
+
+ hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
+ tcp_hdrlen(skb) : skb_headlen(skb);
+
+ pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
+ hlen);
+ /* We need to take into account the header alignment padding. */
+ align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
+ bytes = align_hdr_pad + pad_bytes + skb->len;
+
+ return bytes;
+}
+
+/* The most descriptors we could need are 3 - 1 for the headers, 1 for
+ * the beginning of the payload at the end of the FIFO, and 1 if the
+ * payload wraps to the beginning of the FIFO.
+ */
+#define MAX_TX_DESC_NEEDED 3
+
+/* Check if sufficient resources (descriptor ring space, FIFO space) are
+ * available to transmit the given number of bytes.
+ */
+static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
+{
+ return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED &&
+ gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required));
+}
+
+/* Stops the queue if the skb cannot be transmitted. */
+static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
+{
+ int bytes_required;
+
+ bytes_required = gve_skb_fifo_bytes_required(tx, skb);
+ if (likely(gve_can_tx(tx, bytes_required)))
+ return 0;
+
+ /* No space, so stop the queue */
+ tx->stop_queue++;
+ netif_tx_stop_queue(tx->netdev_txq);
+ smp_mb(); /* sync with restarting queue in gve_clean_tx_done() */
+
+ /* Now check for resources again, in case gve_clean_tx_done() freed
+ * resources after we checked and we stopped the queue after
+ * gve_clean_tx_done() checked.
+ *
+ * gve_maybe_stop_tx() gve_clean_tx_done()
+ * nsegs/can_alloc test failed
+ * gve_tx_free_fifo()
+ * if (tx queue stopped)
+ * netif_tx_queue_wake()
+ * netif_tx_stop_queue()
+ * Need to check again for space here!
+ */
+ if (likely(!gve_can_tx(tx, bytes_required)))
+ return -EBUSY;
+
+ netif_tx_start_queue(tx->netdev_txq);
+ tx->wake_queue++;
+ return 0;
+}
+
+static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
+ struct sk_buff *skb, bool is_gso,
+ int l4_hdr_offset, u32 desc_cnt,
+ u16 hlen, u64 addr)
+{
+ /* l4_hdr_offset and csum_offset are in units of 16-bit words */
+ if (is_gso) {
+ pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
+ pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
+ pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
+ pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
+ pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
+ } else {
+ pkt_desc->pkt.type_flags = GVE_TXD_STD;
+ pkt_desc->pkt.l4_csum_offset = 0;
+ pkt_desc->pkt.l4_hdr_offset = 0;
+ }
+ pkt_desc->pkt.desc_cnt = desc_cnt;
+ pkt_desc->pkt.len = cpu_to_be16(skb->len);
+ pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
+ pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
+}
+
+static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
+ struct sk_buff *skb, bool is_gso,
+ u16 len, u64 addr)
+{
+ seg_desc->seg.type_flags = GVE_TXD_SEG;
+ if (is_gso) {
+ if (skb_is_gso_v6(skb))
+ seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
+ seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1;
+ seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ }
+ seg_desc->seg.seg_len = cpu_to_be16(len);
+ seg_desc->seg.seg_addr = cpu_to_be64(addr);
+}
+
+static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
+{
+ int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
+ union gve_tx_desc *pkt_desc, *seg_desc;
+ struct gve_tx_buffer_state *info;
+ bool is_gso = skb_is_gso(skb);
+ u32 idx = tx->req & tx->mask;
+ int payload_iov = 2;
+ int copy_offset;
+ u32 next_idx;
+ int i;
+
+ info = &tx->info[idx];
+ pkt_desc = &tx->desc[idx];
+
+ l4_hdr_offset = skb_checksum_start_offset(skb);
+ /* If the skb is gso, then we want the tcp header in the first segment
+ * otherwise we want the linear portion of the skb (which will contain
+ * the checksum because skb->csum_start and skb->csum_offset are given
+ * relative to skb->head) in the first segment.
+ */
+ hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
+ skb_headlen(skb);
+
+ info->skb = skb;
+ /* We don't want to split the header, so if necessary, pad to the end
+ * of the fifo and then put the header at the beginning of the fifo.
+ */
+ pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
+ hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
+ &info->iov[0]);
+ WARN(!hdr_nfrags, "hdr_nfrags should never be 0!");
+ payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
+ &info->iov[payload_iov]);
+
+ gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
+ 1 + payload_nfrags, hlen,
+ info->iov[hdr_nfrags - 1].iov_offset);
+
+ skb_copy_bits(skb, 0,
+ tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
+ hlen);
+ copy_offset = hlen;
+
+ for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
+ next_idx = (tx->req + 1 + i - payload_iov) & tx->mask;
+ seg_desc = &tx->desc[next_idx];
+
+ gve_tx_fill_seg_desc(seg_desc, skb, is_gso,
+ info->iov[i].iov_len,
+ info->iov[i].iov_offset);
+
+ skb_copy_bits(skb, copy_offset,
+ tx->tx_fifo.base + info->iov[i].iov_offset,
+ info->iov[i].iov_len);
+ copy_offset += info->iov[i].iov_len;
+ }
+
+ return 1 + payload_nfrags;
+}
+
+netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_tx_ring *tx;
+ int nsegs;
+
+ WARN(skb_get_queue_mapping(skb) > priv->tx_cfg.num_queues,
+ "skb queue index out of range");
+ tx = &priv->tx[skb_get_queue_mapping(skb)];
+ if (unlikely(gve_maybe_stop_tx(tx, skb))) {
+ /* We need to ring the txq doorbell -- we have stopped the Tx
+ * queue for want of resources, but prior calls to gve_tx()
+ * may have added descriptors without ringing the doorbell.
+ */
+
+ /* Ensure tx descs from a prior gve_tx are visible before
+ * ringing doorbell.
+ */
+ dma_wmb();
+ gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
+ return NETDEV_TX_BUSY;
+ }
+ nsegs = gve_tx_add_skb(tx, skb);
+
+ netdev_tx_sent_queue(tx->netdev_txq, skb->len);
+ skb_tx_timestamp(skb);
+
+ /* give packets to NIC */
+ tx->req += nsegs;
+
+ if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
+ return NETDEV_TX_OK;
+
+ /* Ensure tx descs are visible before ringing doorbell */
+ dma_wmb();
+ gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
+ return NETDEV_TX_OK;
+}
+
+#define GVE_TX_START_THRESH PAGE_SIZE
+
+static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
+ u32 to_do, bool try_to_wake)
+{
+ struct gve_tx_buffer_state *info;
+ u64 pkts = 0, bytes = 0;
+ size_t space_freed = 0;
+ struct sk_buff *skb;
+ int i, j;
+ u32 idx;
+
+ for (j = 0; j < to_do; j++) {
+ idx = tx->done & tx->mask;
+ netif_info(priv, tx_done, priv->dev,
+ "[%d] %s: idx=%d (req=%u done=%u)\n",
+ tx->q_num, __func__, idx, tx->req, tx->done);
+ info = &tx->info[idx];
+ skb = info->skb;
+
+ /* Mark as free */
+ if (skb) {
+ info->skb = NULL;
+ bytes += skb->len;
+ pkts++;
+ dev_consume_skb_any(skb);
+ /* FIFO free */
+ for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
+ space_freed += info->iov[i].iov_len +
+ info->iov[i].iov_padding;
+ info->iov[i].iov_len = 0;
+ info->iov[i].iov_padding = 0;
+ }
+ }
+ tx->done++;
+ }
+
+ gve_tx_free_fifo(&tx->tx_fifo, space_freed);
+ u64_stats_update_begin(&tx->statss);
+ tx->bytes_done += bytes;
+ tx->pkt_done += pkts;
+ u64_stats_update_end(&tx->statss);
+ netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
+
+ /* start the queue if we've stopped it */
+#ifndef CONFIG_BQL
+ /* Make sure that the doorbells are synced */
+ smp_mb();
+#endif
+ if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
+ likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
+ tx->wake_queue++;
+ netif_tx_wake_queue(tx->netdev_txq);
+ }
+
+ return pkts;
+}
+
+__be32 gve_tx_load_event_counter(struct gve_priv *priv,
+ struct gve_tx_ring *tx)
+{
+ u32 counter_index = be32_to_cpu((tx->q_resources->counter_index));
+
+ return READ_ONCE(priv->counter_array[counter_index]);
+}
+
+bool gve_tx_poll(struct gve_notify_block *block, int budget)
+{
+ struct gve_priv *priv = block->priv;
+ struct gve_tx_ring *tx = block->tx;
+ bool repoll = false;
+ u32 nic_done;
+ u32 to_do;
+
+ /* If budget is 0, do all the work */
+ if (budget == 0)
+ budget = INT_MAX;
+
+ /* Find out how much work there is to be done */
+ tx->last_nic_done = gve_tx_load_event_counter(priv, tx);
+ nic_done = be32_to_cpu(tx->last_nic_done);
+ if (budget > 0) {
+ /* Do as much work as we have that the budget will
+ * allow
+ */
+ to_do = min_t(u32, (nic_done - tx->done), budget);
+ gve_clean_tx_done(priv, tx, to_do, true);
+ }
+ /* If we still have work we want to repoll */
+ repoll |= (nic_done != tx->done);
+ return repoll;
+}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index a0d780c14e60..3892a2062404 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -46,6 +46,16 @@ config HIP04_ETH
If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
want to use the internal ethernet then you should answer Y to this.
+config HI13X1_GMAC
+ bool "Hisilicon HI13X1 Network Device Support"
+ depends on HIP04_ETH
+ help
+ If you wish to compile a kernel for a hardware with hisilicon hi13x1_gamc
+ then you should answer Y to this. This makes this driver suitable for use
+ on certain boards such as the HI13X1.
+
+ If you are unsure, say N.
+
config HNS_MDIO
tristate
select PHYLIB
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index e1f2978506fd..625635771b83 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -16,6 +16,8 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#define SC_PPE_RESET_DREQ 0x026C
+
#define PPE_CFG_RX_ADDR 0x100
#define PPE_CFG_POOL_GRP 0x300
#define PPE_CFG_RX_BUF_SIZE 0x400
@@ -33,10 +35,23 @@
#define GE_MODE_CHANGE_REG 0x1b4
#define GE_RECV_CONTROL_REG 0x1e0
#define GE_STATION_MAC_ADDRESS 0x210
-#define PPE_CFG_CPU_ADD_ADDR 0x580
-#define PPE_CFG_MAX_FRAME_LEN_REG 0x408
+
#define PPE_CFG_BUS_CTRL_REG 0x424
#define PPE_CFG_RX_CTRL_REG 0x428
+
+#if defined(CONFIG_HI13X1_GMAC)
+#define PPE_CFG_CPU_ADD_ADDR 0x6D0
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x500
+#define PPE_CFG_RX_PKT_MODE_REG 0x504
+#define PPE_CFG_QOS_VMID_GEN 0x520
+#define PPE_CFG_RX_PKT_INT 0x740
+#define PPE_INTEN 0x700
+#define PPE_INTSTS 0x708
+#define PPE_RINT 0x704
+#define PPE_CFG_STS_MODE 0x880
+#else
+#define PPE_CFG_CPU_ADD_ADDR 0x580
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x408
#define PPE_CFG_RX_PKT_MODE_REG 0x438
#define PPE_CFG_QOS_VMID_GEN 0x500
#define PPE_CFG_RX_PKT_INT 0x538
@@ -44,8 +59,12 @@
#define PPE_INTSTS 0x608
#define PPE_RINT 0x604
#define PPE_CFG_STS_MODE 0x700
+#endif /* CONFIG_HI13X1_GMAC */
+
#define PPE_HIS_RX_PKT_CNT 0x804
+#define RESET_DREQ_ALL 0xffffffff
+
/* REG_INTERRUPT */
#define RCV_INT BIT(10)
#define RCV_NOBUF BIT(8)
@@ -57,8 +76,15 @@
/* TX descriptor config */
#define TX_FREE_MEM BIT(0)
#define TX_READ_ALLOC_L3 BIT(1)
-#define TX_FINISH_CACHE_INV BIT(2)
+#if defined(CONFIG_HI13X1_GMAC)
+#define TX_CLEAR_WB BIT(7)
+#define TX_RELEASE_TO_PPE BIT(4)
+#define TX_FINISH_CACHE_INV BIT(6)
+#define TX_POOL_SHIFT 16
+#else
#define TX_CLEAR_WB BIT(4)
+#define TX_FINISH_CACHE_INV BIT(2)
+#endif
#define TX_L3_CHECKSUM BIT(5)
#define TX_LOOP_BACK BIT(11)
@@ -93,18 +119,35 @@
#define GE_RX_PORT_EN BIT(1)
#define GE_TX_PORT_EN BIT(2)
-#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
-
#define PPE_CFG_RX_PKT_ALIGN BIT(18)
-#define PPE_CFG_QOS_VMID_MODE BIT(14)
+
+#if defined(CONFIG_HI13X1_GMAC)
+#define PPE_CFG_QOS_VMID_GRP_SHIFT 4
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 7
+#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(0)
+#define PPE_CFG_QOS_VMID_MODE BIT(15)
+#define PPE_CFG_BUS_LOCAL_REL (BIT(9) | BIT(15) | BIT(19) | BIT(23))
+
+/* buf unit size is cache_line_size, which is 64, so the shift is 6 */
+#define PPE_BUF_SIZE_SHIFT 6
+#define PPE_TX_BUF_HOLD BIT(31)
+#define CACHE_LINE_MASK 0x3F
+#else
#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
+#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
+#define PPE_CFG_QOS_VMID_MODE BIT(14)
+#define PPE_CFG_BUS_LOCAL_REL BIT(14)
+
+/* buf unit size is 1, so the shift is 6 */
+#define PPE_BUF_SIZE_SHIFT 0
+#define PPE_TX_BUF_HOLD 0
+#endif /* CONFIG_HI13X1_GMAC */
#define PPE_CFG_RX_FIFO_FSFU BIT(11)
#define PPE_CFG_RX_DEPTH_SHIFT 16
#define PPE_CFG_RX_START_SHIFT 0
-#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
-#define PPE_CFG_BUS_LOCAL_REL BIT(14)
#define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
#define RX_DESC_NUM 128
@@ -128,26 +171,50 @@
#define HIP04_MIN_TX_COALESCE_FRAMES 100
struct tx_desc {
+#if defined(CONFIG_HI13X1_GMAC)
+ u32 reserved1[2];
+ u32 send_addr;
+ u16 send_size;
+ u16 data_offset;
+ u32 reserved2[7];
+ u32 cfg;
+ u32 wb_addr;
+ u32 reserved3[3];
+#else
u32 send_addr;
u32 send_size;
u32 next_addr;
u32 cfg;
u32 wb_addr;
+#endif
} __aligned(64);
struct rx_desc {
+#if defined(CONFIG_HI13X1_GMAC)
+ u32 reserved1[3];
+ u16 pkt_len;
+ u16 reserved_16;
+ u32 reserved2[6];
+ u32 pkt_err;
+ u32 reserved3[5];
+#else
u16 reserved_16;
u16 pkt_len;
u32 reserve1[3];
u32 pkt_err;
u32 reserve2[4];
+#endif
};
struct hip04_priv {
void __iomem *base;
+#if defined(CONFIG_HI13X1_GMAC)
+ void __iomem *sysctrl_base;
+#endif
int phy_mode;
int chan;
unsigned int port;
+ unsigned int group;
unsigned int speed;
unsigned int duplex;
unsigned int reg_inten;
@@ -221,6 +288,13 @@ static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
}
+static void hip04_reset_dreq(struct hip04_priv *priv)
+{
+#if defined(CONFIG_HI13X1_GMAC)
+ writel_relaxed(RESET_DREQ_ALL, priv->sysctrl_base + SC_PPE_RESET_DREQ);
+#endif
+}
+
static void hip04_reset_ppe(struct hip04_priv *priv)
{
u32 val, tmp, timeout = 0;
@@ -241,14 +315,14 @@ static void hip04_config_fifo(struct hip04_priv *priv)
val |= PPE_CFG_STS_RX_PKT_CNT_RC;
writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
- val = BIT(priv->port);
+ val = BIT(priv->group);
regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
- val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT;
+ val = priv->group << PPE_CFG_QOS_VMID_GRP_SHIFT;
val |= PPE_CFG_QOS_VMID_MODE;
writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
- val = RX_BUF_SIZE;
+ val = RX_BUF_SIZE >> PPE_BUF_SIZE_SHIFT;
regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
@@ -285,8 +359,10 @@ static void hip04_config_fifo(struct hip04_priv *priv)
val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
+#ifndef CONFIG_HI13X1_GMAC
val = GE_AUTO_NEG_CTL;
writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
+#endif
}
static void hip04_mac_enable(struct net_device *ndev)
@@ -329,12 +405,18 @@ static void hip04_mac_disable(struct net_device *ndev)
static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
{
- writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR);
+ u32 val;
+
+ val = phys >> PPE_BUF_SIZE_SHIFT | PPE_TX_BUF_HOLD;
+ writel(val, priv->base + PPE_CFG_CPU_ADD_ADDR);
}
static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
{
- regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys);
+ u32 val;
+
+ val = phys >> PPE_BUF_SIZE_SHIFT;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val);
}
static u32 hip04_recv_cnt(struct hip04_priv *priv)
@@ -442,11 +524,20 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->tx_skb[tx_head] = skb;
priv->tx_phys[tx_head] = phys;
- desc->send_addr = cpu_to_be32(phys);
- desc->send_size = cpu_to_be32(skb->len);
- desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
+
+ desc->send_size = (__force u32)cpu_to_be32(skb->len);
+#if defined(CONFIG_HI13X1_GMAC)
+ desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
+ | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
+ desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK);
+ desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK);
+#else
+ desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
+ desc->send_addr = (__force u32)cpu_to_be32(phys);
+#endif
phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
- desc->wb_addr = cpu_to_be32(phys);
+ desc->wb_addr = (__force u32)cpu_to_be32(phys +
+ offsetof(struct tx_desc, send_addr));
skb_tx_timestamp(skb);
hip04_set_xmit_desc(priv, phys);
@@ -507,8 +598,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
priv->rx_phys[priv->rx_head] = 0;
desc = (struct rx_desc *)skb->data;
- len = be16_to_cpu(desc->pkt_len);
- err = be32_to_cpu(desc->pkt_err);
+ len = be16_to_cpu((__force __be16)desc->pkt_len);
+ err = be32_to_cpu((__force __be32)desc->pkt_err);
if (0 == len) {
dev_kfree_skb_any(skb);
@@ -828,7 +919,16 @@ static int hip04_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg);
+#if defined(CONFIG_HI13X1_GMAC)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->sysctrl_base = devm_ioremap_resource(d, res);
+ if (IS_ERR(priv->sysctrl_base)) {
+ ret = PTR_ERR(priv->sysctrl_base);
+ goto init_fail;
+ }
+#endif
+
+ ret = of_parse_phandle_with_fixed_args(node, "port-handle", 3, 0, &arg);
if (ret < 0) {
dev_warn(d, "no port-handle\n");
goto init_fail;
@@ -836,6 +936,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->port = arg.args[0];
priv->chan = arg.args[1] * RX_DESC_NUM;
+ priv->group = arg.args[2];
hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -896,6 +997,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
ndev->irq = irq;
netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+ hip04_reset_dreq(priv);
hip04_reset_ppe(priv);
if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index fe879c07ae3c..2235dd55fab2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2370,6 +2370,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6;
ndev->max_mtu = MAC_MAX_MTU_V2 -
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
break;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 83e19c6b974e..8ad5292eebbe 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -69,7 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode {
};
#define HCLGE_MBX_MAX_MSG_SIZE 16
-#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index fa8b8506b120..908d4f45c06a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -16,21 +16,18 @@ static LIST_HEAD(hnae3_ae_dev_list);
*/
static DEFINE_MUTEX(hnae3_common_lock);
-static bool hnae3_client_match(enum hnae3_client_type client_type,
- enum hnae3_dev_type dev_type)
+static bool hnae3_client_match(enum hnae3_client_type client_type)
{
- if ((dev_type == HNAE3_DEV_KNIC) && (client_type == HNAE3_CLIENT_KNIC ||
- client_type == HNAE3_CLIENT_ROCE))
- return true;
-
- if (dev_type == HNAE3_DEV_UNIC && client_type == HNAE3_CLIENT_UNIC)
+ if (client_type == HNAE3_CLIENT_KNIC ||
+ client_type == HNAE3_CLIENT_ROCE)
return true;
return false;
}
void hnae3_set_client_init_flag(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev, int inited)
+ struct hnae3_ae_dev *ae_dev,
+ unsigned int inited)
{
if (!client || !ae_dev)
return;
@@ -39,9 +36,6 @@ void hnae3_set_client_init_flag(struct hnae3_client *client,
case HNAE3_CLIENT_KNIC:
hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
break;
- case HNAE3_CLIENT_UNIC:
- hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
- break;
case HNAE3_CLIENT_ROCE:
hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
break;
@@ -61,10 +55,6 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
inited = hnae3_get_bit(ae_dev->flag,
HNAE3_KNIC_CLIENT_INITED_B);
break;
- case HNAE3_CLIENT_UNIC:
- inited = hnae3_get_bit(ae_dev->flag,
- HNAE3_UNIC_CLIENT_INITED_B);
- break;
case HNAE3_CLIENT_ROCE:
inited = hnae3_get_bit(ae_dev->flag,
HNAE3_ROCE_CLIENT_INITED_B);
@@ -82,7 +72,7 @@ static int hnae3_init_client_instance(struct hnae3_client *client,
int ret;
/* check if this client matches the type of ae_dev */
- if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
+ if (!(hnae3_client_match(client->type) &&
hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0;
}
@@ -99,7 +89,7 @@ static void hnae3_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
/* check if this client matches the type of ae_dev */
- if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
+ if (!(hnae3_client_match(client->type) &&
hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
return;
@@ -251,6 +241,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
}
list_del(&ae_algo->node);
@@ -351,6 +342,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
}
list_del(&ae_dev->node);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index ad21b0ef1946..48c7b70fc2c4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -102,15 +102,9 @@ enum hnae3_loop {
enum hnae3_client_type {
HNAE3_CLIENT_KNIC,
- HNAE3_CLIENT_UNIC,
HNAE3_CLIENT_ROCE,
};
-enum hnae3_dev_type {
- HNAE3_DEV_KNIC,
- HNAE3_DEV_UNIC,
-};
-
/* mac media type */
enum hnae3_media_type {
HNAE3_MEDIA_TYPE_UNKNOWN,
@@ -154,7 +148,6 @@ enum hnae3_reset_type {
HNAE3_VF_FULL_RESET,
HNAE3_FLR_RESET,
HNAE3_FUNC_RESET,
- HNAE3_CORE_RESET,
HNAE3_GLOBAL_RESET,
HNAE3_IMP_RESET,
HNAE3_UNKNOWN_RESET,
@@ -220,8 +213,7 @@ struct hnae3_ae_dev {
const struct hnae3_ae_ops *ops;
struct list_head node;
u32 flag;
- u8 override_pci_need_reset; /* fix to stop multiple reset happening */
- enum hnae3_dev_type dev_type;
+ unsigned long hw_err_reset_req;
enum hnae3_reset_type reset_type;
void *priv;
};
@@ -271,6 +263,8 @@ struct hnae3_ae_dev {
* get auto autonegotiation of pause frame use
* restart_autoneg()
* restart autonegotiation
+ * halt_autoneg()
+ * halt/resume autonegotiation when autonegotiation on
* get_coalesce_usecs()
* get usecs to delay a TX interrupt after a packet is sent
* get_rx_max_coalesced_frames()
@@ -339,10 +333,14 @@ struct hnae3_ae_dev {
* Set vlan filter config of Ports
* set_vf_vlan_filter()
* Set vlan filter config of vf
+ * restore_vlan_table()
+ * Restore vlan filter entries after reset
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
* set_gro_en
* Enable/disable HW GRO
+ * add_arfs_entry
+ * Check the 5-tuples of flow, and create flow director rule
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -386,6 +384,7 @@ struct hnae3_ae_ops {
int (*set_autoneg)(struct hnae3_handle *handle, bool enable);
int (*get_autoneg)(struct hnae3_handle *handle);
int (*restart_autoneg)(struct hnae3_handle *handle);
+ int (*halt_autoneg)(struct hnae3_handle *handle, bool halt);
void (*get_coalesce_usecs)(struct hnae3_handle *handle,
u32 *tx_usecs, u32 *rx_usecs);
@@ -463,6 +462,8 @@ struct hnae3_ae_ops {
u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
+ enum hnae3_reset_type (*get_reset_level)(struct hnae3_ae_dev *ae_dev,
+ unsigned long *addr);
void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type);
void (*get_channels)(struct hnae3_handle *handle,
@@ -492,7 +493,9 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd, u32 *rule_locs);
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
- int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
+ int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
+ u16 flow_id, struct flow_keys *fkeys);
+ int (*dbg_run_cmd)(struct hnae3_handle *handle, const char *cmd_buf);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
@@ -502,6 +505,7 @@ struct hnae3_ae_ops {
void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
+ void (*restore_vlan_table)(struct hnae3_handle *handle);
};
struct hnae3_dcb_ops {
@@ -643,5 +647,6 @@ void hnae3_unregister_client(struct hnae3_client *client);
int hnae3_register_client(struct hnae3_client *client);
void hnae3_set_client_init_flag(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev, int inited);
+ struct hnae3_ae_dev *ae_dev,
+ unsigned int inited);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index b6fabbbdfd5b..d2ec4c573bf8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -4,8 +4,7 @@
#include "hnae3.h"
#include "hns3_enet.h"
-static
-int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
+static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP;
}
-static
-int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
+static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP;
}
-static
-int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP;
}
-static
-int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index fc4917ac44be..a4b937286f55 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -11,7 +11,8 @@
static struct dentry *hns3_dbgfs_root;
-static int hns3_dbg_queue_info(struct hnae3_handle *h, char *cmd_buf)
+static int hns3_dbg_queue_info(struct hnae3_handle *h,
+ const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
@@ -155,7 +156,7 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
return 0;
}
-static int hns3_dbg_bd_info(struct hnae3_handle *h, char *cmd_buf)
+static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
@@ -252,6 +253,7 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump qos buf cfg\n");
dev_info(&h->pdev->dev, "dump mng tbl\n");
dev_info(&h->pdev->dev, "dump reset info\n");
+ dev_info(&h->pdev->dev, "dump m7 info\n");
dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
dev_info(&h->pdev->dev, "dump mac tnl status\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index f326805543a4..310afa708831 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -4,6 +4,9 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -14,6 +17,7 @@
#include <linux/sctp.h>
#include <linux/vermagic.h>
#include <net/gre.h>
+#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/vxlan.h>
@@ -24,8 +28,7 @@
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
-static void hns3_clear_all_ring(struct hnae3_handle *h);
-static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
+static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
static void hns3_remove_hw_addr(struct net_device *netdev);
static const char hns3_driver_name[] = "hns3";
@@ -79,23 +82,6 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
return IRQ_HANDLED;
}
-/* This callback function is used to set affinity changes to the irq affinity
- * masks when the irq_set_affinity_notifier function is used.
- */
-static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct hns3_enet_tqp_vector *tqp_vectors =
- container_of(notify, struct hns3_enet_tqp_vector,
- affinity_notify);
-
- tqp_vectors->affinity_mask = *mask;
-}
-
-static void hns3_nic_irq_affinity_release(struct kref *ref)
-{
-}
-
static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
{
struct hns3_enet_tqp_vector *tqp_vectors;
@@ -107,8 +93,7 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
continue;
- /* clear the affinity notifier and affinity mask */
- irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
+ /* clear the affinity mask */
irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
/* release the irq resource */
@@ -153,20 +138,14 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
- tqp_vectors->name,
- tqp_vectors);
+ tqp_vectors->name, tqp_vectors);
if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n",
tqp_vectors->vector_irq);
+ hns3_nic_uninit_irq(priv);
return ret;
}
- tqp_vectors->affinity_notify.notify =
- hns3_nic_irq_affinity_notify;
- tqp_vectors->affinity_notify.release =
- hns3_nic_irq_affinity_release;
- irq_set_affinity_notifier(tqp_vectors->vector_irq,
- &tqp_vectors->affinity_notify);
irq_set_affinity_hint(tqp_vectors->vector_irq,
&tqp_vectors->affinity_mask);
@@ -297,8 +276,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) {
netdev_err(netdev,
- "netif_set_real_num_tx_queues fail, ret=%d!\n",
- ret);
+ "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
return ret;
}
@@ -340,6 +318,40 @@ static void hns3_tqp_disable(struct hnae3_queue *tqp)
hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
}
+static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
+ netdev->rx_cpu_rmap = NULL;
+#endif
+}
+
+static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int i, ret;
+
+ if (!netdev->rx_cpu_rmap) {
+ netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
+ if (!netdev->rx_cpu_rmap)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
+ tqp_vector->vector_irq);
+ if (ret) {
+ hns3_free_rx_cpu_rmap(netdev);
+ return ret;
+ }
+ }
+#endif
+ return 0;
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -351,11 +363,16 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
return ret;
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
+
/* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv);
if (ret) {
- netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
- return ret;
+ netdev_err(netdev, "init irq failed! ret=%d\n", ret);
+ goto free_rmap;
}
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
@@ -384,7 +401,8 @@ out_start_err:
hns3_vector_disable(&priv->tqp_vector[j]);
hns3_nic_uninit_irq(priv);
-
+free_rmap:
+ hns3_free_rx_cpu_rmap(netdev);
return ret;
}
@@ -429,16 +447,13 @@ static int hns3_nic_net_open(struct net_device *netdev)
ret = hns3_nic_net_up(netdev);
if (ret) {
- netdev_err(netdev,
- "hns net up fail, ret=%d!\n", ret);
+ netdev_err(netdev, "net up fail, ret=%d!\n", ret);
return ret;
}
kinfo = &h->kinfo;
- for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
- netdev_set_prio_tc_map(netdev, i,
- kinfo->prio_tc[i]);
- }
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
@@ -447,6 +462,20 @@ static int hns3_nic_net_open(struct net_device *netdev)
return 0;
}
+static void hns3_reset_tx_queue(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct netdev_queue *dev_queue;
+ u32 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ dev_queue = netdev_get_tx_queue(ndev,
+ priv->ring_data[i].queue_index);
+ netdev_tx_reset_queue(dev_queue);
+ }
+}
+
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -467,10 +496,19 @@ static void hns3_nic_net_down(struct net_device *netdev)
if (ops->stop)
ops->stop(priv->ae_handle);
+ hns3_free_rx_cpu_rmap(netdev);
+
/* free irq resources */
hns3_nic_uninit_irq(priv);
- hns3_clear_all_ring(priv->ae_handle);
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(netdev))
+ hns3_clear_all_ring(priv->ae_handle, false);
+
+ hns3_reset_tx_queue(priv->ae_handle);
}
static int hns3_nic_net_stop(struct net_device *netdev)
@@ -641,7 +679,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
if (l3.v4->version == 4)
l3.v4->check = 0;
- /* tunnel packet.*/
+ /* tunnel packet */
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_UDP_TUNNEL |
@@ -666,11 +704,11 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
l3.v4->check = 0;
}
- /* normal or tunnel packet*/
+ /* normal or tunnel packet */
l4_offset = l4.hdr - skb->data;
hdr_len = (l4.tcp->doff << 2) + l4_offset;
- /* remove payload length from inner pseudo checksum when tso*/
+ /* remove payload length from inner pseudo checksum when tso */
l4_paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(l4_paylen));
@@ -778,7 +816,7 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
il2_hdr = skb_inner_mac_header(skb);
- /* compute OL4 header size, defined in 4 Bytes. */
+ /* compute OL4 header size, defined in 4 Bytes */
l4_len = il2_hdr - l4.hdr;
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
@@ -913,8 +951,9 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{
/* Config bd buffer end */
- hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
- hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
+ if (!!frag_end)
+ hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, 1U);
+ hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
}
static int hns3_fill_desc_vtags(struct sk_buff *skb,
@@ -988,7 +1027,8 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
- int size, int frag_end, enum hns_desc_type type)
+ unsigned int size, int frag_end,
+ enum hns_desc_type type)
{
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
@@ -1038,8 +1078,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec);
- desc->tx.type_cs_vlan_tso_len =
- cpu_to_le32(type_cs_vlan_tso);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
@@ -1086,19 +1125,19 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
- DESC_TYPE_SKB : DESC_TYPE_PAGE;
+ DESC_TYPE_SKB : DESC_TYPE_PAGE;
/* now, fill the descriptor */
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
- (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
+ (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
frag_end && (k == frag_buf_num - 1) ?
1 : 0);
desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
- /* move ring pointer to next.*/
+ /* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -1452,12 +1491,10 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes;
rx_pkts += ring->stats.rx_pkts;
- rx_drop += ring->stats.non_vld_descs;
rx_drop += ring->stats.l2_err;
- rx_errors += ring->stats.non_vld_descs;
rx_errors += ring->stats.l2_err;
+ rx_errors += ring->stats.l3l4_csum_err;
rx_crc_errors += ring->stats.l2_err;
- rx_crc_errors += ring->stats.l3l4_csum_err;
rx_multicast += ring->stats.rx_multicast;
rx_length_errors += ring->stats.err_pkt_len;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
@@ -1493,12 +1530,12 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
static int hns3_setup_tc(struct net_device *netdev, void *type_data)
{
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
- struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_knic_private_info *kinfo = &h->kinfo;
u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
+ struct hnae3_knic_private_info *kinfo;
u8 tc = mqprio_qopt->qopt.num_tc;
u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw;
+ struct hnae3_handle *h;
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
@@ -1510,6 +1547,9 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (!netdev)
return -EINVAL;
+ h = hns3_get_handle(netdev);
+ kinfo = &h->kinfo;
+
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
}
@@ -1527,15 +1567,11 @@ static int hns3_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter)
ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
- if (!ret)
- set_bit(vid, priv->active_vlans);
-
return ret;
}
@@ -1543,33 +1579,11 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter)
ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
- if (!ret)
- clear_bit(vid, priv->active_vlans);
-
- return ret;
-}
-
-static int hns3_restore_vlan(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret = 0;
- u16 vid;
-
- for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
- ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
- if (ret) {
- netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
- vid, ret);
- return ret;
- }
- }
-
return ret;
}
@@ -1581,7 +1595,7 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
- qos, vlan_proto);
+ qos, vlan_proto);
return ret;
}
@@ -1722,6 +1736,32 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
h->ae_algo->ops->reset_event(h->pdev, h);
}
+#ifdef CONFIG_RFS_ACCEL
+static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct hnae3_handle *h = hns3_get_handle(dev);
+ struct flow_keys fkeys;
+
+ if (!h->ae_algo->ops->add_arfs_entry)
+ return -EOPNOTSUPP;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
+ return -EPROTONOSUPPORT;
+
+ if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
+ fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
+ (fkeys.basic.ip_proto != IPPROTO_TCP &&
+ fkeys.basic.ip_proto != IPPROTO_UDP))
+ return -EPROTONOSUPPORT;
+
+ return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
+}
+#endif
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -1737,6 +1777,10 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = hns3_rx_flow_steer,
+#endif
+
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -1802,8 +1846,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct hnae3_ae_dev *ae_dev;
int ret;
- ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
- GFP_KERNEL);
+ ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
if (!ae_dev) {
ret = -ENOMEM;
return ret;
@@ -1811,7 +1854,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
- ae_dev->dev_type = HNAE3_DEV_KNIC;
ae_dev->reset_type = HNAE3_NONE_RESET;
hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
@@ -1895,9 +1937,9 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- if (!ae_dev) {
+ if (!ae_dev || !ae_dev->ops) {
dev_err(&pdev->dev,
- "Can't recover - error happened during device init\n");
+ "Can't recover - error happened before device initialized\n");
return PCI_ERS_RESULT_NONE;
}
@@ -1912,14 +1954,23 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ const struct hnae3_ae_ops *ops;
+ enum hnae3_reset_type reset_type;
struct device *dev = &pdev->dev;
- dev_info(dev, "requesting reset due to PCI error\n");
+ if (!ae_dev || !ae_dev->ops)
+ return PCI_ERS_RESULT_NONE;
+ ops = ae_dev->ops;
/* request the reset */
- if (ae_dev->ops->reset_event) {
- if (!ae_dev->override_pci_need_reset)
- ae_dev->ops->reset_event(pdev, NULL);
+ if (ops->reset_event) {
+ if (ae_dev->hw_err_reset_req) {
+ reset_type = ops->get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ ops->set_default_reset_request(ae_dev, reset_type);
+ dev_info(dev, "requesting reset due to PCI error\n");
+ ops->reset_event(pdev, NULL);
+ }
return PCI_ERS_RESULT_RECOVERED;
}
@@ -2168,7 +2219,7 @@ out_buffer_fail:
return ret;
}
-/* detach a in-used buffer and replace with a reserved one */
+/* detach a in-used buffer and replace with a reserved one */
static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
struct hns3_desc_cb *res_cb)
{
@@ -2181,8 +2232,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{
ring->desc_cb[i].reuse_flag = 0;
- ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
- + ring->desc_cb[i].page_offset);
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
}
@@ -2284,8 +2335,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
-static void
-hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
+static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
+ int cleand_count)
{
struct hns3_desc_cb *desc_cb;
struct hns3_desc_cb res_cbs;
@@ -2338,7 +2389,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Avoid re-using remote pages, or the stack is still using the page
* when page_offset rollback to zero, flag default unreuse
*/
- if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) ||
+ if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
(!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
return;
@@ -2347,7 +2398,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
desc_cb->reuse_flag = 1;
- /* Bump ref count on page before it is given*/
+ /* Bump ref count on page before it is given */
get_page(desc_cb->priv);
} else if (page_count(desc_cb->priv) == 1) {
desc_cb->reuse_flag = 1;
@@ -2356,13 +2407,13 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
}
}
-static int hns3_gro_complete(struct sk_buff *skb)
+static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
{
__be16 type = skb->protocol;
struct tcphdr *th;
int depth = 0;
- while (type == htons(ETH_P_8021Q)) {
+ while (eth_type_vlan(type)) {
struct vlan_hdr *vh;
if ((depth + VLAN_HLEN) > skb_headlen(skb))
@@ -2373,10 +2424,24 @@ static int hns3_gro_complete(struct sk_buff *skb)
depth += VLAN_HLEN;
}
+ skb_set_network_header(skb, depth);
+
if (type == htons(ETH_P_IP)) {
+ const struct iphdr *iph = ip_hdr(skb);
+
depth += sizeof(struct iphdr);
+ skb_set_transport_header(skb, depth);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
+ iph->daddr, 0);
} else if (type == htons(ETH_P_IPV6)) {
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+
depth += sizeof(struct ipv6hdr);
+ skb_set_transport_header(skb, depth);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
+ &iph->daddr, 0);
} else {
netdev_err(skb->dev,
"Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
@@ -2384,13 +2449,16 @@ static int hns3_gro_complete(struct sk_buff *skb)
return -EFAULT;
}
- th = (struct tcphdr *)(skb->data + depth);
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+ skb->csum_start = (unsigned char *)th - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ skb->ip_summed = CHECKSUM_PARTIAL;
return 0;
}
@@ -2508,7 +2576,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
-static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
+static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
unsigned char *va)
{
#define HNS3_NEED_ADD_FRAG 1
@@ -2537,7 +2605,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* We can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
put_page(desc_cb->priv);
@@ -2574,7 +2642,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
*/
if (pending) {
pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
- ring->desc_num;
+ ring->desc_num;
pre_desc = &ring->desc[pre_bd];
bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
} else {
@@ -2628,21 +2696,22 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 l234info,
u32 bd_base_info, u32 ol_info)
{
- u16 gro_count;
u32 l3_type;
- gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
- HNS3_RXD_GRO_COUNT_S);
+ skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+ HNS3_RXD_GRO_SIZE_M,
+ HNS3_RXD_GRO_SIZE_S);
/* if there is no HW GRO, do not set gro params */
- if (!gro_count) {
+ if (!skb_shinfo(skb)->gso_size) {
hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
return 0;
}
- NAPI_GRO_CB(skb)->count = gro_count;
+ NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
+ HNS3_RXD_GRO_COUNT_M,
+ HNS3_RXD_GRO_COUNT_S);
- l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
- HNS3_RXD_L3ID_S);
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
if (l3_type == HNS3_L3_TYPE_IPV4)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
else if (l3_type == HNS3_L3_TYPE_IPV6)
@@ -2650,11 +2719,7 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
else
return -EFAULT;
- skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
- HNS3_RXD_GRO_SIZE_M,
- HNS3_RXD_GRO_SIZE_S);
-
- return hns3_gro_complete(skb);
+ return hns3_gro_complete(skb, l234info);
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
@@ -2703,14 +2768,6 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
vlan_tag);
}
- if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.non_vld_descs++;
- u64_stats_update_end(&ring->syncp);
-
- return -EINVAL;
- }
-
if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
BIT(HNS3_RXD_L2E_B))))) {
u64_stats_update_begin(&ring->syncp);
@@ -2762,8 +2819,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
+ unsigned int length;
u32 bd_base_info;
- int length;
int ret;
desc = &ring->desc[ring->next_to_clean];
@@ -2828,14 +2885,14 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return ret;
}
+ skb_record_rx_queue(skb, ring->tqp->tqp_index);
*out_skb = skb;
return 0;
}
-int hns3_clean_rx_ring(
- struct hns3_enet_ring *ring, int budget,
- void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
+int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
+ void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int recv_pkts, recv_bds, clean_count, err;
@@ -2887,42 +2944,25 @@ int hns3_clean_rx_ring(
out:
/* Make all data has been write before submit */
if (clean_count + unused_count > 0)
- hns3_nic_alloc_rx_buffers(ring,
- clean_count + unused_count);
+ hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
return recv_pkts;
}
-static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
+static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)
{
- struct hns3_enet_tqp_vector *tqp_vector =
- ring_group->ring->tqp_vector;
+#define HNS3_RX_LOW_BYTE_RATE 10000
+#define HNS3_RX_MID_BYTE_RATE 20000
+#define HNS3_RX_ULTRA_PACKET_RATE 40
+
enum hns3_flow_level_range new_flow_level;
- int packets_per_msecs;
- int bytes_per_msecs;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int packets_per_msecs, bytes_per_msecs;
u32 time_passed_ms;
- u16 new_int_gl;
-
- if (!tqp_vector->last_jiffies)
- return false;
-
- if (ring_group->total_packets == 0) {
- ring_group->coal.int_gl = HNS3_INT_GL_50K;
- ring_group->coal.flow_level = HNS3_FLOW_LOW;
- return true;
- }
- /* Simple throttlerate management
- * 0-10MB/s lower (50000 ints/s)
- * 10-20MB/s middle (20000 ints/s)
- * 20-1249MB/s high (18000 ints/s)
- * > 40000pps ultra (8000 ints/s)
- */
- new_flow_level = ring_group->coal.flow_level;
- new_int_gl = ring_group->coal.int_gl;
+ tqp_vector = ring_group->ring->tqp_vector;
time_passed_ms =
jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
-
if (!time_passed_ms)
return false;
@@ -2932,9 +2972,14 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
do_div(ring_group->total_bytes, time_passed_ms);
bytes_per_msecs = ring_group->total_bytes;
-#define HNS3_RX_LOW_BYTE_RATE 10000
-#define HNS3_RX_MID_BYTE_RATE 20000
+ new_flow_level = ring_group->coal.flow_level;
+ /* Simple throttlerate management
+ * 0-10MB/s lower (50000 ints/s)
+ * 10-20MB/s middle (20000 ints/s)
+ * 20-1249MB/s high (18000 ints/s)
+ * > 40000pps ultra (8000 ints/s)
+ */
switch (new_flow_level) {
case HNS3_FLOW_LOW:
if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
@@ -2954,13 +2999,40 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
break;
}
-#define HNS3_RX_ULTRA_PACKET_RATE 40
-
if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
&tqp_vector->rx_group == ring_group)
new_flow_level = HNS3_FLOW_ULTRA;
- switch (new_flow_level) {
+ ring_group->total_bytes = 0;
+ ring_group->total_packets = 0;
+ ring_group->coal.flow_level = new_flow_level;
+
+ return true;
+}
+
+static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
+{
+ struct hns3_enet_tqp_vector *tqp_vector;
+ u16 new_int_gl;
+
+ if (!ring_group->ring)
+ return false;
+
+ tqp_vector = ring_group->ring->tqp_vector;
+ if (!tqp_vector->last_jiffies)
+ return false;
+
+ if (ring_group->total_packets == 0) {
+ ring_group->coal.int_gl = HNS3_INT_GL_50K;
+ ring_group->coal.flow_level = HNS3_FLOW_LOW;
+ return true;
+ }
+
+ if (!hns3_get_new_flow_lvl(ring_group))
+ return false;
+
+ new_int_gl = ring_group->coal.int_gl;
+ switch (ring_group->coal.flow_level) {
case HNS3_FLOW_LOW:
new_int_gl = HNS3_INT_GL_50K;
break;
@@ -2977,9 +3049,6 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
break;
}
- ring_group->total_bytes = 0;
- ring_group->total_packets = 0;
- ring_group->coal.flow_level = new_flow_level;
if (new_int_gl != ring_group->coal.int_gl) {
ring_group->coal.int_gl = new_int_gl;
return true;
@@ -3280,6 +3349,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
if (!vector)
return -ENOMEM;
+ /* save the actual available vector number */
vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
priv->vector_num = vector_num;
@@ -3331,8 +3401,6 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
- irq_set_affinity_notifier(tqp_vector->vector_irq,
- NULL);
irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
free_irq(tqp_vector->vector_irq, tqp_vector);
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
@@ -3364,7 +3432,7 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
}
static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
- int ring_type)
+ unsigned int ring_type)
{
struct hns3_nic_ring_data *ring_data = priv->ring_data;
int queue_num = priv->ae_handle->kinfo.num_tqps;
@@ -3550,8 +3618,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
struct hnae3_queue *q = ring->tqp;
if (!HNAE3_IS_TX_RING(ring)) {
- hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
- (u32)dma);
+ hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
@@ -3851,6 +3918,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_client_stop(handle);
+ hns3_uninit_phy(netdev);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
goto out_netdev_free;
@@ -3858,9 +3927,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_del_all_fd_rules(netdev, true);
- hns3_force_clear_all_rx_ring(handle);
-
- hns3_uninit_phy(netdev);
+ hns3_clear_all_ring(handle, true);
hns3_nic_uninit_vector_data(priv);
@@ -3997,8 +4064,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
ret);
return ret;
}
- hns3_replace_buffer(ring, ring->next_to_use,
- &res_cbs);
+ hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
}
ring_ptr_move_fw(ring, next_to_use);
}
@@ -4030,40 +4096,26 @@ static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
}
}
-static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
+static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
{
struct net_device *ndev = h->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hns3_enet_ring *ring;
u32 i;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
- hns3_force_clear_rx_ring(ring);
- }
-}
-
-static void hns3_clear_all_ring(struct hnae3_handle *h)
-{
- struct net_device *ndev = h->kinfo.netdev;
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- u32 i;
-
- for (i = 0; i < h->kinfo.num_tqps; i++) {
- struct netdev_queue *dev_queue;
struct hns3_enet_ring *ring;
ring = priv->ring_data[i].ring;
hns3_clear_tx_ring(ring);
- dev_queue = netdev_get_tx_queue(ndev,
- priv->ring_data[i].queue_index);
- netdev_tx_reset_queue(dev_queue);
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
/* Continue to clear other rings even if clearing some
* rings failed.
*/
- hns3_clear_rx_ring(ring);
+ if (force)
+ hns3_force_clear_rx_ring(ring);
+ else
+ hns3_clear_rx_ring(ring);
}
}
@@ -4173,7 +4225,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
if (ret) {
set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
netdev_err(kinfo->netdev,
- "hns net up fail, ret=%d!\n", ret);
+ "net up fail, ret=%d!\n", ret);
return ret;
}
}
@@ -4251,12 +4303,8 @@ static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(netdev, vlan_filter_enable);
- /* Hardware table is only clear when pf resets */
- if (!(handle->flags & HNAE3_SUPPORT_VF)) {
- ret = hns3_restore_vlan(netdev);
- if (ret)
- return ret;
- }
+ if (handle->ae_algo->ops->restore_vlan_table)
+ handle->ae_algo->ops->restore_vlan_table(handle);
return hns3_restore_fd_rules(netdev);
}
@@ -4272,7 +4320,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return 0;
}
- hns3_force_clear_all_rx_ring(handle);
+ hns3_clear_all_ring(handle, true);
+ hns3_reset_tx_queue(priv->ae_handle);
hns3_nic_uninit_vector_data(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index c14480f9b625..848b866761df 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -145,7 +145,7 @@ enum hns3_nic_state {
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_LKBK_B 15
#define HNS3_RXD_GRO_SIZE_S 16
-#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
+#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
#define HNS3_TXD_L3T_S 0
#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
@@ -384,7 +384,6 @@ struct ring_stats {
u64 rx_err_cnt;
u64 reuse_pg_cnt;
u64 err_pkt_len;
- u64 non_vld_descs;
u64 err_bd_num;
u64 l2_err;
u64 l3l4_csum_err;
@@ -417,7 +416,7 @@ struct hns3_enet_ring {
*/
int next_to_clean;
- int pull_len; /* head length for current packet */
+ u32 pull_len; /* head length for current packet */
u32 frag_num;
unsigned char *va; /* first buffer address for current packet */
@@ -446,25 +445,6 @@ enum hns3_flow_level_range {
HNS3_FLOW_ULTRA = 3,
};
-enum hns3_link_mode_bits {
- HNS3_LM_FIBRE_BIT = BIT(0),
- HNS3_LM_AUTONEG_BIT = BIT(1),
- HNS3_LM_TP_BIT = BIT(2),
- HNS3_LM_PAUSE_BIT = BIT(3),
- HNS3_LM_BACKPLANE_BIT = BIT(4),
- HNS3_LM_10BASET_HALF_BIT = BIT(5),
- HNS3_LM_10BASET_FULL_BIT = BIT(6),
- HNS3_LM_100BASET_HALF_BIT = BIT(7),
- HNS3_LM_100BASET_FULL_BIT = BIT(8),
- HNS3_LM_1000BASET_FULL_BIT = BIT(9),
- HNS3_LM_10000BASEKR_FULL_BIT = BIT(10),
- HNS3_LM_25000BASEKR_FULL_BIT = BIT(11),
- HNS3_LM_40000BASELR4_FULL_BIT = BIT(12),
- HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13),
- HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14),
- HNS3_LM_COUNT = 15
-};
-
#define HNS3_INT_GL_MAX 0x1FE0
#define HNS3_INT_GL_50K 0x0014
#define HNS3_INT_GL_20K 0x0032
@@ -550,7 +530,6 @@ struct hns3_nic_priv {
struct notifier_block notifier_block;
/* Vxlan/Geneve information */
struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX];
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct hns3_enet_coalesce tx_coal;
struct hns3_enet_coalesce rx_coal;
};
@@ -631,7 +610,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hnae3_buf_size(_ring) ((_ring)->buf_size)
#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
-#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring))
+#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index d1588ea6132c..5bff98a9b0dc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -44,7 +44,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("errors", rx_err_cnt),
HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
- HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
HNS3_TQP_STAT("err_bd_num", err_bd_num),
HNS3_TQP_STAT("l2_err", l2_err),
HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
@@ -60,6 +59,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
+#define HNS3_NIC_LB_SETUP_USEC 10000
/* Nic loopback test err */
#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
@@ -117,7 +117,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret;
ret = hns3_lp_setup(ndev, loop_mode, true);
- usleep_range(10000, 20000);
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return ret;
}
@@ -132,7 +132,7 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret;
}
- usleep_range(10000, 20000);
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return 0;
}
@@ -149,6 +149,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
+
+ /* The dst mac addr of loopback packet is the same as the host'
+ * mac addr, the SSU component may loop back the packet to host
+ * before the packet reaches mac or serdes, which will defect
+ * the purpose of mac or serdes selftest.
+ */
ethh->h_dest[5] += 0x1f;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
@@ -243,11 +249,13 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
skb_get(skb);
tx_ret = hns3_nic_net_xmit(skb, ndev);
- if (tx_ret == NETDEV_TX_OK)
+ if (tx_ret == NETDEV_TX_OK) {
good_cnt++;
- else
+ } else {
+ kfree_skb(skb);
netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n",
tx_ret);
+ }
}
if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR;
@@ -327,6 +335,13 @@ static void hns3_self_test(struct net_device *ndev,
h->ae_algo->ops->enable_vlan_filter(h, false);
#endif
+ /* Tell firmware to stop mac autoneg before loopback test start,
+ * otherwise loopback test may be failed when the port is still
+ * negotiating.
+ */
+ if (h->ae_algo->ops->halt_autoneg)
+ h->ae_algo->ops->halt_autoneg(h, true);
+
set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
@@ -349,6 +364,9 @@ static void hns3_self_test(struct net_device *ndev,
clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
+ if (h->ae_algo->ops->halt_autoneg)
+ h->ae_algo->ops->halt_autoneg(h, false);
+
#if IS_ENABLED(CONFIG_VLAN_8021Q)
if (dis_vlan_filter)
h->ae_algo->ops->enable_vlan_filter(h, true);
@@ -435,7 +453,7 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
buff = hns3_get_strings_tqps(h, buff);
- h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff);
+ ops->get_strings(h, stringset, (u8 *)buff);
break;
case ETH_SS_TEST:
ops->get_strings(h, stringset, data);
@@ -510,6 +528,11 @@ static void hns3_get_drvinfo(struct net_device *netdev,
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
+ if (!h->ae_algo->ops->get_fw_version) {
+ netdev_err(netdev, "could not get fw version!\n");
+ return;
+ }
+
strncpy(drvinfo->version, hns3_driver_version,
sizeof(drvinfo->version));
drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
@@ -530,7 +553,7 @@ static u32 hns3_get_link(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status)
+ if (h->ae_algo->ops->get_status)
return h->ae_algo->ops->get_status(h);
else
return 0;
@@ -560,7 +583,7 @@ static void hns3_get_pauseparam(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam)
+ if (h->ae_algo->ops->get_pauseparam)
h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
&param->rx_pause, &param->tx_pause);
}
@@ -610,9 +633,6 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
u8 media_type;
u8 link_stat;
- if (!h->ae_algo || !h->ae_algo->ops)
- return -EOPNOTSUPP;
-
ops = h->ae_algo->ops;
if (ops->get_media_type)
ops->get_media_type(h, &media_type, &module_type);
@@ -740,8 +760,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops ||
- !h->ae_algo->ops->get_rss_key_size)
+ if (!h->ae_algo->ops->get_rss_key_size)
return 0;
return h->ae_algo->ops->get_rss_key_size(h);
@@ -751,8 +770,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops ||
- !h->ae_algo->ops->get_rss_indir_size)
+ if (!h->ae_algo->ops->get_rss_indir_size)
return 0;
return h->ae_algo->ops->get_rss_indir_size(h);
@@ -763,7 +781,7 @@ static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss)
+ if (!h->ae_algo->ops->get_rss)
return -EOPNOTSUPP;
return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
@@ -774,7 +792,7 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
+ if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
if ((h->pdev->revision == 0x20 &&
@@ -799,9 +817,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops)
- return -EOPNOTSUPP;
-
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.num_tqps;
@@ -915,9 +930,6 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops)
- return -EOPNOTSUPP;
-
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
if (h->ae_algo->ops->set_rss_tuple)
@@ -1193,7 +1205,7 @@ static int hns3_set_phys_id(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id)
+ if (!h->ae_algo->ops->set_led_id)
return -EOPNOTSUPP;
return h->ae_algo->ops->set_led_id(h, state);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index fbd904e3077c..22f6acd45d9a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -110,8 +110,7 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
- (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
- HCLGE_NIC_CMQ_ENABLE);
+ ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
} else {
@@ -120,8 +119,7 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
- (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
- HCLGE_NIC_CMQ_ENABLE);
+ ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
@@ -175,7 +173,11 @@ static bool hclge_is_special_opcode(u16 opcode)
HCLGE_OPC_STATS_MAC,
HCLGE_OPC_STATS_MAC_ALL,
HCLGE_OPC_QUERY_32_BIT_REG,
- HCLGE_OPC_QUERY_64_BIT_REG};
+ HCLGE_OPC_QUERY_64_BIT_REG,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@@ -186,12 +188,43 @@ static bool hclge_is_special_opcode(u16 opcode)
return false;
}
+static int hclge_cmd_convert_err_code(u16 desc_ret)
+{
+ switch (desc_ret) {
+ case HCLGE_CMD_EXEC_SUCCESS:
+ return 0;
+ case HCLGE_CMD_NO_AUTH:
+ return -EPERM;
+ case HCLGE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HCLGE_CMD_QUEUE_FULL:
+ return -EXFULL;
+ case HCLGE_CMD_NEXT_ERR:
+ return -ENOSR;
+ case HCLGE_CMD_UNEXE_ERR:
+ return -ENOTBLK;
+ case HCLGE_CMD_PARA_ERR:
+ return -EINVAL;
+ case HCLGE_CMD_RESULT_ERR:
+ return -ERANGE;
+ case HCLGE_CMD_TIMEOUT:
+ return -ETIME;
+ case HCLGE_CMD_HILINK_ERR:
+ return -ENOLINK;
+ case HCLGE_CMD_QUEUE_ILLEGAL:
+ return -ENXIO;
+ case HCLGE_CMD_INVALID:
+ return -EBADR;
+ default:
+ return -EIO;
+ }
+}
+
static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int num, int ntc)
{
u16 opcode, desc_ret;
int handle;
- int retval;
opcode = le16_to_cpu(desc[0].opcode);
for (handle = 0; handle < num; handle++) {
@@ -205,17 +238,9 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
else
desc_ret = le16_to_cpu(desc[0].retval);
- if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
- retval = 0;
- else if (desc_ret == HCLGE_CMD_NO_AUTH)
- retval = -EPERM;
- else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
- retval = -EOPNOTSUPP;
- else
- retval = -EIO;
hw->cmq.last_status = desc_ret;
- return retval;
+ return hclge_cmd_convert_err_code(desc_ret);
}
/**
@@ -230,6 +255,7 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
+ struct hclge_cmq_ring *csq = &hw->cmq.csq;
struct hclge_desc *desc_to_use;
bool complete = false;
u32 timeout = 0;
@@ -239,8 +265,16 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclge_ring_space(&hw->cmq.csq) ||
- test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ if (num > hclge_ring_space(&hw->cmq.csq)) {
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
+ * need update the SW HEAD pointer csq->next_to_clean
+ */
+ csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -278,7 +312,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
}
if (!complete) {
- retval = -EAGAIN;
+ retval = -EBADE;
} else {
retval = hclge_cmd_check_retval(hw, desc, num, ntc);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d79a209b80f6..96840d8f3e24 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -41,6 +41,14 @@ enum hclge_cmd_return_status {
HCLGE_CMD_NO_AUTH = 1,
HCLGE_CMD_NOT_SUPPORTED = 2,
HCLGE_CMD_QUEUE_FULL = 3,
+ HCLGE_CMD_NEXT_ERR = 4,
+ HCLGE_CMD_UNEXE_ERR = 5,
+ HCLGE_CMD_PARA_ERR = 6,
+ HCLGE_CMD_RESULT_ERR = 7,
+ HCLGE_CMD_TIMEOUT = 8,
+ HCLGE_CMD_HILINK_ERR = 9,
+ HCLGE_CMD_QUEUE_ILLEGAL = 10,
+ HCLGE_CMD_INVALID = 11,
};
enum hclge_cmd_status {
@@ -180,6 +188,9 @@ enum hclge_opcode_type {
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
+ /* PPU commands */
+ HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
+
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
@@ -243,6 +254,9 @@ enum hclge_opcode_type {
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+ /* M7 stats command */
+ HCLGE_OPC_M7_STATS_BD = 0x7012,
+ HCLGE_OPC_M7_STATS_INFO = 0x7013,
/* SFP command */
HCLGE_OPC_GET_SFP_INFO = 0x7104,
@@ -265,6 +279,8 @@ enum hclge_opcode_type {
HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
+ HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
+ HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
HCLGE_IGU_COMMON_INT_EN = 0x1806,
HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
@@ -641,6 +657,11 @@ enum hclge_mac_vlan_tbl_opcode {
HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
};
+enum hclge_mac_vlan_add_resp_code {
+ HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */
+ HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */
+};
+
#define HCLGE_MAC_VLAN_BIT0_EN_B 0
#define HCLGE_MAC_VLAN_BIT1_EN_B 1
#define HCLGE_MAC_EPORT_SW_EN_B 12
@@ -674,7 +695,6 @@ struct hclge_umv_spc_alc_cmd {
#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1)
#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2)
-#define HCLGE_MAC_ETHERTYPE_LLDP 0x88cc
struct hclge_mac_mgr_tbl_entry_cmd {
u8 flags;
@@ -872,7 +892,7 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
-#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x200 /* 512 byte */
+#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */
#define HCLGE_TYPE_CRQ 0
#define HCLGE_TYPE_CSQ 1
@@ -970,6 +990,25 @@ struct hclge_fd_ad_config_cmd {
u8 rsv2[8];
};
+struct hclge_get_m7_bd_cmd {
+ __le32 bd_num;
+ u8 rsv[20];
+};
+
+struct hclge_query_ppu_pf_other_int_dfx_cmd {
+ __le16 over_8bd_no_fe_qid;
+ __le16 over_8bd_no_fe_vf_id;
+ __le16 tso_mss_cmp_min_err_qid;
+ __le16 tso_mss_cmp_min_err_vf_id;
+ __le16 tso_mss_cmp_max_err_qid;
+ __le16 tso_mss_cmp_max_err_vf_id;
+ __le16 tx_rd_fbd_poison_qid;
+ __le16 tx_rd_fbd_poison_vf_id;
+ __le16 rx_rd_fbd_poison_qid;
+ __le16 rx_rd_fbd_poison_vf_id;
+ u8 rsv[4];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 1161361a973b..bac4ce13f6ae 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -325,6 +325,8 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hdev->tm_info.hw_pfc_map = pfc_map;
hdev->tm_info.pfc_en = pfc->pfc_en;
+ hclge_tm_pfc_info_update(hdev);
+
return hclge_pause_setup_hw(hdev, false);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index a9ffb57c4607..ab625c757a95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -61,9 +61,11 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
struct hclge_dbg_dfx_message *dfx_message,
- char *cmd_buf, int msg_num, int offset,
- enum hclge_opcode_type cmd)
+ const char *cmd_buf, int msg_num,
+ int offset, enum hclge_opcode_type cmd)
{
+#define BD_DATA_NUM 6
+
struct hclge_desc *desc_src;
struct hclge_desc *desc;
int bd_num, buf_len;
@@ -92,14 +94,16 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return;
}
- max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num;
+ max = (bd_num * BD_DATA_NUM) <= msg_num ?
+ (bd_num * BD_DATA_NUM) : msg_num;
desc = desc_src;
for (i = 0; i < max; i++) {
- (((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc;
+ ((i > 0) && ((i % BD_DATA_NUM) == 0)) ? desc++ : desc;
if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
- dfx_message->message, desc->data[i % 6]);
+ dfx_message->message,
+ desc->data[i % BD_DATA_NUM]);
dfx_message++;
}
@@ -107,7 +111,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
kfree(desc_src);
}
-static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf)
+static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_dbg_bitmap_cmd *bitmap;
@@ -207,7 +211,7 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf)
dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]);
}
-static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf)
+static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
{
int msg_num;
@@ -395,7 +399,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -403,7 +407,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -412,9 +416,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
goto err_tm_pg_cmd_send;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n",
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
bp_to_qs_map_cmd->tc_id);
- dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n",
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
bp_to_qs_map_cmd->qs_group_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
bp_to_qs_map_cmd->qs_bit_map);
@@ -473,7 +477,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
- dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n",
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
nq_to_qs_map->qset_id);
cmd = HCLGE_OPC_TM_PG_WEIGHT;
@@ -537,7 +541,8 @@ err_tm_cmd_send:
cmd, ret);
}
-static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf)
+static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
+ const char *cmd_buf)
{
struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
@@ -921,11 +926,67 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
hdev->rst_stats.reset_cnt);
}
+void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
+{
+ struct hclge_desc *desc_src, *desc_tmp;
+ struct hclge_get_m7_bd_cmd *req;
+ struct hclge_desc desc;
+ u32 bd_num, buf_len;
+ int ret, i;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
+
+ req = (struct hclge_get_m7_bd_cmd *)desc.data;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "get firmware statistics bd number failed, ret=%d\n",
+ ret);
+ return;
+ }
+
+ bd_num = le32_to_cpu(req->bd_num);
+
+ buf_len = sizeof(struct hclge_desc) * bd_num;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ dev_err(&hdev->pdev->dev,
+ "allocate desc for get_m7_stats failed\n");
+ return;
+ }
+
+ desc_tmp = desc_src;
+ ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
+ HCLGE_OPC_M7_STATS_INFO);
+ if (ret) {
+ kfree(desc_src);
+ dev_err(&hdev->pdev->dev,
+ "get firmware statistics failed, ret=%d\n", ret);
+ return;
+ }
+
+ for (i = 0; i < bd_num; i++) {
+ dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
+ le32_to_cpu(desc_tmp->data[0]),
+ le32_to_cpu(desc_tmp->data[1]),
+ le32_to_cpu(desc_tmp->data[2]));
+ dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
+ le32_to_cpu(desc_tmp->data[3]),
+ le32_to_cpu(desc_tmp->data[4]),
+ le32_to_cpu(desc_tmp->data[5]));
+
+ desc_tmp++;
+ }
+
+ kfree(desc_src);
+}
+
/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
* @hdev: pointer to struct hclge_dev
* @cmd_buf: string that contains offset and length
*/
-static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *cmd_buf)
+static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
+ const char *cmd_buf)
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
@@ -998,13 +1059,13 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
- dev_info(&hdev->pdev->dev, "[%07lu.%03lu]status = 0x%x\n",
+ dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
(unsigned long)stats.time, rem_nsec / 1000,
stats.status);
}
}
-int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -1029,6 +1090,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
hclge_dbg_dump_rst_info(hdev);
+ } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
+ hclge_dbg_get_m7_stats_info(hdev);
} else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
hclge_dbg_dump_ncl_config(hdev,
&cmd_buf[sizeof("dump ncl_config")]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 4ac80634c984..0a7243825e7b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -87,25 +87,25 @@ static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
static const struct hclge_hw_error hclge_igu_int[] = {
{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
{ .int_msk = BIT(0), .msg = "rx_buf_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "tx_buf_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(4), .msg = "tx_buf_underrun",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
@@ -413,13 +413,13 @@ static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
{ .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
- .reset_level = HNAE3_CORE_RESET },
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
@@ -631,29 +631,20 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
{ /* sentinel */ }
};
-static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg,
- const struct hclge_hw_error *err,
- u32 err_sts)
+static void hclge_log_error(struct device *dev, char *reg,
+ const struct hclge_hw_error *err,
+ u32 err_sts, unsigned long *reset_requests)
{
- enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET;
- bool need_reset = false;
-
while (err->msg) {
if (err->int_msk & err_sts) {
dev_warn(dev, "%s %s found [error status=0x%x]\n",
reg, err->msg, err_sts);
- if (err->reset_level != HNAE3_NONE_RESET &&
- err->reset_level >= reset_level) {
- reset_level = err->reset_level;
- need_reset = true;
- }
+ if (err->reset_level &&
+ err->reset_level != HNAE3_NONE_RESET)
+ set_bit(err->reset_level, reset_requests);
}
err++;
}
- if (need_reset)
- return reset_level;
- else
- return HNAE3_NONE_RESET;
}
/* hclge_cmd_query_error: read the error information
@@ -673,19 +664,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
enum hclge_err_int_type int_type)
{
struct device *dev = &hdev->pdev->dev;
- int num = 1;
+ int desc_num = 1;
int ret;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
if (flag) {
desc[0].flag |= cpu_to_le16(flag);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
- num = 2;
+ desc_num = 2;
}
if (w_num)
desc[0].data[w_num] = cpu_to_le32(int_type);
- ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret)
dev_err(dev, "query error cmd failed (%d)\n", ret);
@@ -941,7 +932,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2];
- int num = 1;
+ int desc_num = 1;
int ret;
/* configure PPU error interrupts */
@@ -960,7 +951,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
- num = 2;
+ desc_num = 2;
} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en)
@@ -978,7 +969,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
return -EINVAL;
}
- ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
return ret;
}
@@ -1069,12 +1060,51 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
return ret;
}
-#define HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type) \
- do { \
- if (ae_dev->ops->set_default_reset_request) \
- ae_dev->ops->set_default_reset_request(ae_dev, \
- reset_type); \
- } while (0)
+/* hclge_query_bd_num: query number of buffer descriptors
+ * @hdev: pointer to struct hclge_dev
+ * @is_ras: true for ras, false for msix
+ * @mpf_bd_num: number of main PF interrupt buffer descriptors
+ * @pf_bd_num: number of not main PF interrupt buffer descriptors
+ *
+ * This function querys number of mpf and pf buffer descriptors.
+ */
+static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
+ int *mpf_bd_num, int *pf_bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_min_bd_num, pf_min_bd_num;
+ enum hclge_opcode_type opcode;
+ struct hclge_desc desc_bd;
+ int ret;
+
+ if (is_ras) {
+ opcode = HCLGE_QUERY_RAS_INT_STS_BD_NUM;
+ mpf_min_bd_num = HCLGE_MPF_RAS_INT_MIN_BD_NUM;
+ pf_min_bd_num = HCLGE_PF_RAS_INT_MIN_BD_NUM;
+ } else {
+ opcode = HCLGE_QUERY_MSIX_INT_STS_BD_NUM;
+ mpf_min_bd_num = HCLGE_MPF_MSIX_INT_MIN_BD_NUM;
+ pf_min_bd_num = HCLGE_PF_MSIX_INT_MIN_BD_NUM;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc_bd, opcode, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ if (ret) {
+ dev_err(dev, "fail(%d) to query msix int status bd num\n",
+ ret);
+ return ret;
+ }
+
+ *mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
+ *pf_bd_num = le32_to_cpu(desc_bd.data[1]);
+ if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) {
+ dev_err(dev, "Invalid bd num: mpf(%d), pf(%d)\n",
+ *mpf_bd_num, *pf_bd_num);
+ return -EINVAL;
+ }
+
+ return 0;
+}
/* hclge_handle_mpf_ras_error: handle all main PF RAS errors
* @hdev: pointer to struct hclge_dev
@@ -1089,7 +1119,6 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
int num)
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
- enum hnae3_reset_type reset_level;
struct device *dev = &hdev->pdev->dev;
__le32 *desc_data;
u32 status;
@@ -1098,8 +1127,6 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
/* query all main PF RAS errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret) {
dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
@@ -1108,95 +1135,74 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
/* log HNS common errors */
status = le32_to_cpu(desc[0].data[0]);
- if (status) {
- reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
- &hclge_imp_tcm_ecc_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
+ &hclge_imp_tcm_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[1]);
- if (status) {
- reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
- &hclge_cmdq_nic_mem_ecc_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
+ &hclge_cmdq_nic_mem_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
- if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
+ if ((le32_to_cpu(desc[0].data[2])) & BIT(0))
dev_warn(dev, "imp_rd_data_poison_err found\n");
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET);
- }
status = le32_to_cpu(desc[0].data[3]);
- if (status) {
- reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
- &hclge_tqp_int_ecc_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
+ &hclge_tqp_int_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[4]);
- if (status) {
- reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS",
- &hclge_msix_sram_ecc_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "MSIX_ECC_INT_STS",
+ &hclge_msix_sram_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* log SSU(Storage Switch Unit) errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*(desc_data + 2));
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
- &hclge_ssu_mem_ecc_err_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
+ &hclge_ssu_mem_ecc_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT",
- &hclge_ssu_com_err_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_COMMON_ERR_INT",
+ &hclge_ssu_com_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* log IGU(Ingress Unit) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "IGU_INT_STS",
- &hclge_igu_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "IGU_INT_STS",
+ &hclge_igu_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* log PPP(Programmable Packet Process) errors */
desc_data = (__le32 *)&desc[4];
status = le32_to_cpu(*(desc_data + 1));
- if (status) {
- reset_level =
- hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
- &hclge_ppp_mpf_abnormal_int_st1[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
+ &hclge_ppp_mpf_abnormal_int_st1[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
- if (status) {
- reset_level =
- hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
- &hclge_ppp_mpf_abnormal_int_st3[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppp_mpf_abnormal_int_st3[0], status,
+ &ae_dev->hw_err_reset_req);
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[5];
@@ -1204,66 +1210,53 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
if (status) {
dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
"rpu_rx_pkt_ecc_mbit_err");
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
status = le32_to_cpu(*(desc_data + 2));
- if (status) {
- reset_level =
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
- &hclge_ppu_mpf_abnormal_int_st2[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+ &hclge_ppu_mpf_abnormal_int_st2[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
- if (status) {
- reset_level =
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
- &hclge_ppu_mpf_abnormal_int_st3[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppu_mpf_abnormal_int_st3[0], status,
+ &ae_dev->hw_err_reset_req);
/* log TM(Traffic Manager) errors */
desc_data = (__le32 *)&desc[6];
status = le32_to_cpu(*desc_data);
- if (status) {
- reset_level = hclge_log_error(dev, "TM_SCH_RINT",
- &hclge_tm_sch_rint[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "TM_SCH_RINT",
+ &hclge_tm_sch_rint[0], status,
+ &ae_dev->hw_err_reset_req);
/* log QCN(Quantized Congestion Control) errors */
desc_data = (__le32 *)&desc[7];
status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "QCN_FIFO_RINT",
- &hclge_qcn_fifo_rint[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "QCN_FIFO_RINT",
+ &hclge_qcn_fifo_rint[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "QCN_ECC_RINT",
- &hclge_qcn_ecc_rint[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "QCN_ECC_RINT",
+ &hclge_qcn_ecc_rint[0], status,
+ &ae_dev->hw_err_reset_req);
/* log NCSI errors */
desc_data = (__le32 *)&desc[9];
status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT",
- &hclge_ncsi_err_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "NCSI_ECC_INT_RPT",
+ &hclge_ncsi_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* clear all main PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
@@ -1285,7 +1278,6 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev;
- enum hnae3_reset_type reset_level;
__le32 *desc_data;
u32 status;
int ret;
@@ -1293,8 +1285,6 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* query all PF RAS errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret) {
dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
@@ -1303,53 +1293,41 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* log SSU(Storage Switch Unit) errors */
status = le32_to_cpu(desc[0].data[0]);
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
- &hclge_ssu_port_based_err_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[1]);
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
- &hclge_ssu_fifo_overflow_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
+ &hclge_ssu_fifo_overflow_int[0], status,
+ &ae_dev->hw_err_reset_req);
status = le32_to_cpu(desc[0].data[2]);
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT",
- &hclge_ssu_ets_tcg_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_ETS_TCG_INT",
+ &hclge_ssu_ets_tcg_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
- &hclge_igu_egu_tnl_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
+ &hclge_igu_egu_tnl_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
- &hclge_ppu_pf_abnormal_int[0],
- status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
- }
+ if (status)
+ hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
+ &hclge_ppu_pf_abnormal_int[0], status,
+ &ae_dev->hw_err_reset_req);
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
@@ -1359,24 +1337,16 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
{
- struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
- struct hclge_desc desc_bd;
struct hclge_desc *desc;
int ret;
/* query the number of registers in the RAS int status */
- hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM,
- true);
- ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
- if (ret) {
- dev_err(dev, "fail(%d) to query ras int status bd num\n", ret);
+ ret = hclge_query_bd_num(hdev, true, &mpf_bd_num, &pf_bd_num);
+ if (ret)
return ret;
- }
- mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
- pf_bd_num = le32_to_cpu(desc_bd.data[1]);
- bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -1396,6 +1366,66 @@ static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
return ret;
}
+static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[3];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 3);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE AXI error sts\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_info(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
+ le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
+ le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
+ dev_info(dev, "AXI3: %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
+ le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
+
+ return 0;
+}
+
+static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
+ HCLGE_CMD_FLAG_NEXT, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_info(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
+ le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
+
+ return 0;
+}
+
static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
@@ -1403,8 +1433,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
int ret;
/* read overflow error status */
- ret = hclge_cmd_query_error(hdev, &desc[0],
- HCLGE_ROCEE_PF_RAS_INT_CMD,
+ ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
0, 0, 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
@@ -1464,19 +1493,27 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
status = le32_to_cpu(desc[0].data[0]);
- if (status & HCLGE_ROCEE_RERR_INT_MASK) {
- dev_warn(dev, "ROCEE RAS AXI rresp error\n");
- reset_type = HNAE3_FUNC_RESET;
- }
+ if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
+ if (status & HCLGE_ROCEE_RERR_INT_MASK)
+ dev_warn(dev, "ROCEE RAS AXI rresp error\n");
+
+ if (status & HCLGE_ROCEE_BERR_INT_MASK)
+ dev_warn(dev, "ROCEE RAS AXI bresp error\n");
- if (status & HCLGE_ROCEE_BERR_INT_MASK) {
- dev_warn(dev, "ROCEE RAS AXI bresp error\n");
reset_type = HNAE3_FUNC_RESET;
+
+ ret = hclge_log_rocee_axi_error(hdev);
+ if (ret)
+ return HNAE3_GLOBAL_RESET;
}
if (status & HCLGE_ROCEE_ECC_INT_MASK) {
dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
reset_type = HNAE3_GLOBAL_RESET;
+
+ ret = hclge_log_rocee_ecc_error(hdev);
+ if (ret)
+ return HNAE3_GLOBAL_RESET;
}
if (status & HCLGE_ROCEE_OVF_INT_MASK) {
@@ -1486,7 +1523,6 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
/* reset everything for now */
return HNAE3_GLOBAL_RESET;
}
- reset_type = HNAE3_FUNC_RESET;
}
/* clear error status */
@@ -1501,7 +1537,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
return reset_type;
}
-static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
+int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc;
@@ -1539,7 +1575,7 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
if (reset_type != HNAE3_NONE_RESET)
- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
+ set_bit(reset_type, &ae_dev->hw_err_reset_req);
}
static const struct hclge_hw_blk hw_blk[] = {
@@ -1574,10 +1610,9 @@ static const struct hclge_hw_blk hw_blk[] = {
{ /* sentinel */ }
};
-int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
+int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state)
{
const struct hclge_hw_blk *module = hw_blk;
- struct device *dev = &hdev->pdev->dev;
int ret = 0;
while (module->name) {
@@ -1589,10 +1624,6 @@ int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
module++;
}
- ret = hclge_config_rocee_ras_interrupt(hdev, state);
- if (ret)
- dev_err(dev, "fail(%d) to configure ROCEE err int\n", ret);
-
return ret;
}
@@ -1602,165 +1633,281 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
struct device *dev = &hdev->pdev->dev;
u32 status;
+ if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
+ dev_err(dev,
+ "Can't recover - RAS error reported during dev init\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+ if (status & HCLGE_RAS_REG_NFE_MASK ||
+ status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
+ ae_dev->hw_err_reset_req = 0;
+ else
+ goto out;
+
/* Handling Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
dev_warn(dev,
"HNS Non-Fatal RAS error(status=0x%x) identified\n",
status);
hclge_handle_all_ras_errors(hdev);
- } else {
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
- hdev->pdev->revision < 0x21) {
- ae_dev->override_pci_need_reset = 1;
- return PCI_ERS_RESULT_RECOVERED;
- }
}
- if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
- dev_warn(dev, "ROCEE uncorrected RAS error identified\n");
+ /* Handling Non-fatal Rocee RAS errors */
+ if (hdev->pdev->revision >= 0x21 &&
+ status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
+ dev_warn(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev);
}
- if (status & HCLGE_RAS_REG_NFE_MASK ||
- status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
- ae_dev->override_pci_need_reset = 0;
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ goto out;
+
+ if (ae_dev->hw_err_reset_req)
return PCI_ERS_RESULT_NEED_RESET;
- }
- ae_dev->override_pci_need_reset = 1;
+out:
return PCI_ERS_RESULT_RECOVERED;
}
-int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
- unsigned long *reset_requests)
+static int hclge_clear_hw_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc, bool is_mpf,
+ u32 bd_num)
+{
+ if (is_mpf)
+ desc[0].opcode =
+ cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT);
+ else
+ desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT);
+
+ desc[0].flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
+
+ return hclge_cmd_send(&hdev->hw, &desc[0], bd_num);
+}
+
+/* hclge_query_8bd_info: query information about over_8bd_nfe_err
+ * @hdev: pointer to struct hclge_dev
+ * @vf_id: Index of the virtual function with error
+ * @q_id: Physical index of the queue with error
+ *
+ * This function get specific index of queue and function which causes
+ * over_8bd_nfe_err by using command. If vf_id is 0, it means error is
+ * caused by PF instead of VF.
+ */
+static int hclge_query_over_8bd_err_info(struct hclge_dev *hdev, u16 *vf_id,
+ u16 *q_id)
+{
+ struct hclge_query_ppu_pf_other_int_dfx_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PPU_PF_OTHER_INT_DFX, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ req = (struct hclge_query_ppu_pf_other_int_dfx_cmd *)desc.data;
+ *vf_id = le16_to_cpu(req->over_8bd_no_fe_vf_id);
+ *q_id = le16_to_cpu(req->over_8bd_no_fe_qid);
+
+ return 0;
+}
+
+/* hclge_handle_over_8bd_err: handle MSI-X error named over_8bd_nfe_err
+ * @hdev: pointer to struct hclge_dev
+ * @reset_requests: reset level that we need to trigger later
+ *
+ * over_8bd_nfe_err is a special MSI-X because it may caused by a VF, in
+ * that case, we need to trigger VF reset. Otherwise, a PF reset is needed.
+ */
+static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
{
- struct hclge_mac_tnl_stats mac_tnl_stats;
struct device *dev = &hdev->pdev->dev;
- u32 mpf_bd_num, pf_bd_num, bd_num;
- enum hnae3_reset_type reset_level;
- struct hclge_desc desc_bd;
- struct hclge_desc *desc;
- __le32 *desc_data;
- u32 status;
+ u16 vf_id;
+ u16 q_id;
int ret;
- /* query the number of bds for the MSIx int status */
- hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
- true);
- ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ ret = hclge_query_over_8bd_err_info(hdev, &vf_id, &q_id);
if (ret) {
- dev_err(dev, "fail(%d) to query msix int status bd num\n",
+ dev_err(dev, "fail(%d) to query over_8bd_no_fe info\n",
ret);
- return ret;
+ return;
}
- mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
- pf_bd_num = le32_to_cpu(desc_bd.data[1]);
- bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ dev_warn(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%d), queue_id(%d)\n",
+ vf_id, q_id);
- desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
- if (!desc)
- goto out;
+ if (vf_id) {
+ if (vf_id >= hdev->num_alloc_vport) {
+ dev_err(dev, "invalid vf id(%d)\n", vf_id);
+ return;
+ }
+
+ /* If we need to trigger other reset whose level is higher
+ * than HNAE3_VF_FUNC_RESET, no need to trigger a VF reset
+ * here.
+ */
+ if (*reset_requests != 0)
+ return;
+ ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
+ if (ret)
+ dev_warn(dev, "inform reset to vf(%d) failed %d!\n",
+ hdev->vport->vport_id, ret);
+ } else {
+ set_bit(HNAE3_FUNC_RESET, reset_requests);
+ }
+}
+
+/* hclge_handle_mpf_msix_error: handle all main PF MSI-X errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @mpf_bd_num: number of extended command structures
+ * @reset_requests: record of the reset level that we need
+ *
+ * This function handles all the main PF MSI-X errors in the hw register/s
+ * using command.
+ */
+static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int mpf_bd_num,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
/* query all main PF MSIx errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
if (ret) {
- dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
- ret);
- goto msi_error;
+ dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret);
+ return ret;
}
/* log MAC errors */
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data);
- if (status) {
- reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
- &hclge_mac_afifo_tnl_int[0],
- status);
- set_bit(reset_level, reset_requests);
- }
+ if (status)
+ hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
+ &hclge_mac_afifo_tnl_int[0], status,
+ reset_requests);
/* log PPU(RCB) MPF errors */
desc_data = (__le32 *)&desc[5];
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
- if (status) {
- reset_level =
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
- &hclge_ppu_mpf_abnormal_int_st2[0],
- status);
- set_bit(reset_level, reset_requests);
- }
+ if (status)
+ dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
+ status);
/* clear all main PF MSIx errors */
- hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
+ if (ret)
+ dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret);
- ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
- if (ret) {
- dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
- ret);
- goto msi_error;
- }
+ return ret;
+}
+
+/* hclge_handle_pf_msix_error: handle all PF MSI-X errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @mpf_bd_num: number of extended command structures
+ * @reset_requests: record of the reset level that we need
+ *
+ * This function handles all the PF MSI-X errors in the hw register/s using
+ * command.
+ */
+static int hclge_handle_pf_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int pf_bd_num,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
/* query all PF MSIx errors */
- memset(desc, 0, bd_num * sizeof(struct hclge_desc));
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
-
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
if (ret) {
- dev_err(dev, "query all pf msix int cmd failed (%d)\n",
- ret);
- goto msi_error;
+ dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret);
+ return ret;
}
/* log SSU PF errors */
status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
- &hclge_ssu_port_based_pf_int[0],
- status);
- set_bit(reset_level, reset_requests);
- }
+ if (status)
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_pf_int[0],
+ status, reset_requests);
/* read and log PPP PF errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*desc_data);
- if (status) {
- reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
- &hclge_ppp_pf_abnormal_int[0],
- status);
- set_bit(reset_level, reset_requests);
- }
+ if (status)
+ hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
+ &hclge_ppp_pf_abnormal_int[0],
+ status, reset_requests);
/* log PPU(RCB) PF errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
- if (status) {
- reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
- &hclge_ppu_pf_abnormal_int[0],
- status);
- set_bit(reset_level, reset_requests);
- }
+ if (status)
+ hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
+ &hclge_ppu_pf_abnormal_int[0],
+ status, reset_requests);
+
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_OVER_8BD_ERR_MASK;
+ if (status)
+ hclge_handle_over_8bd_err(hdev, reset_requests);
/* clear all PF MSIx errors */
- hclge_cmd_reuse_desc(&desc[0], false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num);
+ if (ret)
+ dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret);
- ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
- if (ret) {
- dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
- ret);
+ return ret;
+}
+
+static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct hclge_mac_tnl_stats mac_tnl_stats;
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc *desc;
+ u32 status;
+ int ret;
+
+ /* query the number of bds for the MSIx int status */
+ ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num);
+ if (ret)
+ goto out;
+
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out;
}
+ ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num,
+ reset_requests);
+ if (ret)
+ goto msi_error;
+
+ memset(desc, 0, bd_num * sizeof(struct hclge_desc));
+ ret = hclge_handle_pf_msix_error(hdev, desc, pf_bd_num, reset_requests);
+ if (ret)
+ goto msi_error;
+
/* query and clear mac tnl interruptions */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
true);
@@ -1783,7 +1930,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
ret = hclge_clear_mac_tnl_int(hdev);
if (ret)
dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
- set_bit(HNAE3_NONE_RESET, reset_requests);
}
msi_error:
@@ -1791,3 +1937,70 @@ msi_error:
out:
return ret;
}
+
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
+ dev_err(dev,
+ "Can't handle - MSIx error reported during dev init\n");
+ return 0;
+ }
+
+ return hclge_handle_all_hw_msix_error(hdev, reset_requests);
+}
+
+void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGE_DESC_NO_DATA_LEN 8
+
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc *desc;
+ u32 status;
+ int ret;
+
+ ae_dev->hw_err_reset_req = 0;
+ status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+
+ /* query the number of bds for the MSIx int status */
+ ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num);
+ if (ret)
+ return;
+
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return;
+
+ /* Clear HNS hw errors reported through msix */
+ memset(&desc[0].data[0], 0xFF, mpf_bd_num * sizeof(struct hclge_desc) -
+ HCLGE_DESC_NO_DATA_LEN);
+ ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
+ if (ret) {
+ dev_err(dev, "fail(%d) to clear mpf msix int during init\n",
+ ret);
+ goto msi_error;
+ }
+
+ memset(&desc[0].data[0], 0xFF, pf_bd_num * sizeof(struct hclge_desc) -
+ HCLGE_DESC_NO_DATA_LEN);
+ ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num);
+ if (ret) {
+ dev_err(dev, "fail(%d) to clear pf msix int during init\n",
+ ret);
+ goto msi_error;
+ }
+
+ /* Handle Non-fatal HNS RAS errors */
+ if (status & HCLGE_RAS_REG_NFE_MASK) {
+ dev_warn(dev, "HNS hw error(RAS) identified during init\n");
+ hclge_handle_all_ras_errors(hdev);
+ }
+
+msi_error:
+ kfree(desc);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 9645590c9294..7ea8bb28a0cb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -6,6 +6,11 @@
#include "hclge_main.h"
+#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
+#define HCLGE_PF_RAS_INT_MIN_BD_NUM 4
+#define HCLGE_MPF_MSIX_INT_MIN_BD_NUM 10
+#define HCLGE_PF_MSIX_INT_MIN_BD_NUM 4
+
#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
@@ -47,9 +52,9 @@
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF
#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
-#define HCLGE_MAC_TNL_INT_EN GENMASK(7, 0)
-#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(7, 0)
-#define HCLGE_MAC_TNL_INT_CLR GENMASK(7, 0)
+#define HCLGE_MAC_TNL_INT_EN GENMASK(9, 0)
+#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(9, 0)
+#define HCLGE_MAC_TNL_INT_CLR GENMASK(9, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
@@ -81,9 +86,10 @@
#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0)
#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0)
#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
-#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28)
+#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK BIT(29)
#define HCLGE_PPU_PF_INT_RAS_MASK 0x18
-#define HCLGE_PPU_PF_INT_MSIX_MASK 0x27
+#define HCLGE_PPU_PF_INT_MSIX_MASK 0x26
+#define HCLGE_PPU_PF_OVER_8BD_ERR_MASK 0x01
#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0)
#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0)
#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0)
@@ -94,6 +100,7 @@
#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1
#define HCLGE_ROCEE_RERR_INT_MASK BIT(0)
#define HCLGE_ROCEE_BERR_INT_MASK BIT(1)
+#define HCLGE_ROCEE_AXI_ERR_INT_MASK GENMASK(1, 0)
#define HCLGE_ROCEE_ECC_INT_MASK BIT(2)
#define HCLGE_ROCEE_OVF_INT_MASK BIT(3)
#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000
@@ -119,7 +126,9 @@ struct hclge_hw_error {
};
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
-int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
+int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
+int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
+void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev);
pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d3b1f8cb1155..3fde5471e1c0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -27,14 +27,26 @@
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-#define HCLGE_BUF_SIZE_UNIT 256
+#define HCLGE_BUF_SIZE_UNIT 256U
+#define HCLGE_BUF_MUL_BY 2
+#define HCLGE_BUF_DIV_BY 2
+#define NEED_RESERVE_TC_NUM 2
+#define BUF_MAX_PERCENT 100
+#define BUF_RESERVE_PERCENT 90
+
+#define HCLGE_RESET_MAX_FAIL_CNT 5
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
u16 *allocated_size, bool is_alloc);
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
+ unsigned long *addr);
static struct hnae3_ae_algo ae_algo;
@@ -290,7 +302,7 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
{
.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
- .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
+ .ethter_type = cpu_to_le16(ETH_P_LLDP),
.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
.i_port_bitmap = 0x1,
@@ -437,8 +449,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_RX_STATUS,
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
@@ -446,7 +457,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
if (ret) {
dev_err(&hdev->pdev->dev,
"Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
+ ret, i);
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
@@ -500,6 +511,7 @@ static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ /* each tqp has TX & RX two queues */
return kinfo->num_tqps * (2);
}
@@ -528,7 +540,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
return buff;
}
-static u64 *hclge_comm_get_stats(void *comm_stats,
+static u64 *hclge_comm_get_stats(const void *comm_stats,
const struct hclge_comm_stats_str strs[],
int size, u64 *data)
{
@@ -552,8 +564,7 @@ static u8 *hclge_comm_get_strings(u32 stringset,
return buff;
for (i = 0; i < size; i++) {
- snprintf(buff, ETH_GSTRING_LEN,
- strs[i].desc);
+ snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
buff = buff + ETH_GSTRING_LEN;
}
@@ -644,8 +655,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
return count;
}
-static void hclge_get_strings(struct hnae3_handle *handle,
- u32 stringset,
+static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u8 *data)
{
u8 *p = (char *)data;
@@ -653,21 +663,17 @@ static void hclge_get_strings(struct hnae3_handle *handle,
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(stringset,
- g_mac_stats_string,
- size,
- p);
+ p = hclge_comm_get_strings(stringset, g_mac_stats_string,
+ size, p);
p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_APP],
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -678,8 +684,7 @@ static void hclge_get_strings(struct hnae3_handle *handle,
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_PHY],
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -692,10 +697,8 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
- g_mac_stats_string,
- ARRAY_SIZE(g_mac_stats_string),
- data);
+ p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
+ ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
@@ -726,6 +729,8 @@ static int hclge_parse_func_status(struct hclge_dev *hdev,
static int hclge_query_function_status(struct hclge_dev *hdev)
{
+#define HCLGE_QUERY_MAX_CNT 5
+
struct hclge_func_status_cmd *req;
struct hclge_desc desc;
int timeout = 0;
@@ -738,9 +743,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "query function status failed %d.\n",
- ret);
-
+ "query function status failed %d.\n", ret);
return ret;
}
@@ -748,7 +751,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
if (req->pf_state)
break;
usleep_range(1000, 2000);
- } while (timeout++ < 5);
+ } while (timeout++ < HCLGE_QUERY_MAX_CNT);
ret = hclge_parse_func_status(hdev, req);
@@ -800,7 +803,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
- hdev->num_msi = hdev->num_roce_msi +
+ hdev->num_msi = hdev->num_roce_msi +
hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
@@ -1058,6 +1061,7 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
}
static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
@@ -1076,7 +1080,7 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
struct hclge_cfg_param_cmd *req;
u64 mac_addr_tmp_high;
u64 mac_addr_tmp;
- int i;
+ unsigned int i;
req = (struct hclge_cfg_param_cmd *)desc[0].data;
@@ -1138,7 +1142,8 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
{
struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
struct hclge_cfg_param_cmd *req;
- int i, ret;
+ unsigned int i;
+ int ret;
for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
u32 offset = 0;
@@ -1204,7 +1209,8 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev)
{
struct hclge_cfg cfg;
- int ret, i;
+ unsigned int i;
+ int ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret) {
@@ -1226,8 +1232,10 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.hw_pfc_map = 0;
hdev->wanted_umv_size = cfg.umv_space;
- if (hnae3_dev_fd_supported(hdev))
+ if (hnae3_dev_fd_supported(hdev)) {
hdev->fd_en = true;
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ }
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
@@ -1265,8 +1273,8 @@ static int hclge_configure(struct hclge_dev *hdev)
return ret;
}
-static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
- int tso_mss_max)
+static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
+ unsigned int tso_mss_max)
{
struct hclge_cfg_tso_status_cmd *req;
struct hclge_desc desc;
@@ -1352,8 +1360,9 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
req = (struct hclge_tqp_map_cmd *)desc.data;
req->tqp_id = cpu_to_le16(tqp_pid);
req->tqp_vf = func_id;
- req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
- 1 << HCLGE_TQP_MAP_EN_B;
+ req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
+ if (!is_pf)
+ req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
req->tqp_vid = cpu_to_le16(tqp_vid);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1457,11 +1466,6 @@ static int hclge_map_tqp(struct hclge_dev *hdev)
return 0;
}
-static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
-{
- /* this would be initialized later */
-}
-
static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
{
struct hnae3_handle *nic = &vport->nic;
@@ -1472,20 +1476,12 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->ae_algo = &ae_algo;
nic->numa_node_mask = hdev->numa_node_mask;
- if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
- ret = hclge_knic_setup(vport, num_tqps,
- hdev->num_tx_desc, hdev->num_rx_desc);
-
- if (ret) {
- dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
- ret);
- return ret;
- }
- } else {
- hclge_unic_setup(vport, num_tqps);
- }
+ ret = hclge_knic_setup(vport, num_tqps,
+ hdev->num_tx_desc, hdev->num_rx_desc);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
- return 0;
+ return ret;
}
static int hclge_alloc_vport(struct hclge_dev *hdev)
@@ -1591,7 +1587,8 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
static u32 hclge_get_tc_num(struct hclge_dev *hdev)
{
- int i, cnt = 0;
+ unsigned int i;
+ u32 cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i))
@@ -1604,7 +1601,8 @@ static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
- int i, cnt = 0;
+ unsigned int i;
+ int cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
@@ -1621,7 +1619,8 @@ static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
- int i, cnt = 0;
+ unsigned int i;
+ int cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
@@ -1671,7 +1670,8 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_dcb_supported(hdev))
- shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
+ shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
+ hdev->dv_buf_size;
else
shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ hdev->dv_buf_size;
@@ -1689,7 +1689,8 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
if (hnae3_dev_dcb_supported(hdev)) {
buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
- - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
+ - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
+ HCLGE_BUF_SIZE_UNIT);
} else {
buf_alloc->s_buf.self.high = aligned_mps +
HCLGE_NON_DCB_ADDITIONAL_BUF;
@@ -1697,14 +1698,18 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
}
if (hnae3_dev_dcb_supported(hdev)) {
+ hi_thrd = shared_buf - hdev->dv_buf_size;
+
+ if (tc_num <= NEED_RESERVE_TC_NUM)
+ hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
+ / BUF_MAX_PERCENT;
+
if (tc_num)
- hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
- else
- hi_thrd = shared_buf - hdev->dv_buf_size;
+ hi_thrd = hi_thrd / tc_num;
- hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
+ hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
- lo_thrd = hi_thrd - aligned_mps / 2;
+ lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
} else {
hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
lo_thrd = aligned_mps;
@@ -1749,7 +1754,7 @@ static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
{
u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
- int i;
+ unsigned int i;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
@@ -1765,12 +1770,13 @@ static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
- priv->wl.low = max ? aligned_mps : 256;
+ priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
priv->wl.high = roundup(priv->wl.low + aligned_mps,
HCLGE_BUF_SIZE_UNIT);
} else {
priv->wl.low = 0;
- priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
+ priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
+ aligned_mps;
}
priv->buf_size = priv->wl.high + hdev->dv_buf_size;
@@ -1789,9 +1795,10 @@ static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+ unsigned int mask = BIT((unsigned int)i);
- if (hdev->hw_tc_map & BIT(i) &&
- !(hdev->tm_info.hw_pfc_map & BIT(i))) {
+ if (hdev->hw_tc_map & mask &&
+ !(hdev->tm_info.hw_pfc_map & mask)) {
/* Clear the no pfc TC private buffer */
priv->wl.low = 0;
priv->wl.high = 0;
@@ -1818,9 +1825,10 @@ static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+ unsigned int mask = BIT((unsigned int)i);
- if (hdev->hw_tc_map & BIT(i) &&
- hdev->tm_info.hw_pfc_map & BIT(i)) {
+ if (hdev->hw_tc_map & mask &&
+ hdev->tm_info.hw_pfc_map & mask) {
/* Reduce the number of pfc TC with private buffer */
priv->wl.low = 0;
priv->enable = 0;
@@ -1837,6 +1845,55 @@ static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
+static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+#define COMPENSATE_BUFFER 0x3C00
+#define COMPENSATE_HALF_MPS_NUM 5
+#define PRIV_WL_GAP 0x1800
+
+ u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ u32 tc_num = hclge_get_tc_num(hdev);
+ u32 half_mps = hdev->mps >> 1;
+ u32 min_rx_priv;
+ unsigned int i;
+
+ if (tc_num)
+ rx_priv = rx_priv / tc_num;
+
+ if (tc_num <= NEED_RESERVE_TC_NUM)
+ rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
+
+ min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
+ COMPENSATE_HALF_MPS_NUM * half_mps;
+ min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
+ rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
+
+ if (rx_priv < min_rx_priv)
+ return false;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ priv->enable = 0;
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ priv->enable = 1;
+ priv->buf_size = rx_priv;
+ priv->wl.high = rx_priv - hdev->dv_buf_size;
+ priv->wl.low = priv->wl.high - PRIV_WL_GAP;
+ }
+
+ buf_alloc->s_buf.buf_size = 0;
+
+ return true;
+}
+
/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
* @hdev: pointer to struct hclge_dev
* @buf_alloc: pointer to buffer calculation data
@@ -1856,6 +1913,9 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
return 0;
}
+ if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
+ return 0;
+
if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
return 0;
@@ -2153,7 +2213,6 @@ static int hclge_init_msi(struct hclge_dev *hdev)
static u8 hclge_check_speed_dup(u8 duplex, int speed)
{
-
if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
duplex = HCLGE_MAC_FULL;
@@ -2171,7 +2230,8 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
- hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
+ if (duplex)
+ hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
switch (speed) {
case HCLGE_MAC_SPEED_10M:
@@ -2261,7 +2321,8 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
req = (struct hclge_config_auto_neg_cmd *)desc.data;
- hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+ if (enable)
+ hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
req->cfg_an_cmd_flag = cpu_to_le32(flag);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2316,6 +2377,17 @@ static int hclge_restart_autoneg(struct hnae3_handle *handle)
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
+static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
+ return hclge_set_autoneg_en(hdev, !halt);
+
+ return 0;
+}
+
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
@@ -2389,6 +2461,15 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
+ if (hdev->hw.mac.support_autoneg) {
+ ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Config mac autoneg fail ret=%d\n", ret);
+ return ret;
+ }
+ }
+
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
@@ -2423,7 +2504,8 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
- if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
schedule_work(&hdev->rst_service_task);
}
@@ -2458,7 +2540,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
{
- int mac_state;
+ unsigned int mac_state;
int link_stat;
if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
@@ -2508,6 +2590,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
static void hclge_update_port_capability(struct hclge_mac *mac)
{
+ /* update fec ability by speed */
+ hclge_convert_setting_fec(mac);
+
/* firmware can not identify back plane type, the media type
* read from configuration can help deal it
*/
@@ -2529,7 +2614,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac)
static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
{
- struct hclge_sfp_info_cmd *resp = NULL;
+ struct hclge_sfp_info_cmd *resp;
struct hclge_desc desc;
int ret;
@@ -2580,6 +2665,11 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
mac->speed_ability = le32_to_cpu(resp->speed_ability);
mac->autoneg = resp->autoneg;
mac->support_autoneg = resp->autoneg_ability;
+ mac->speed_type = QUERY_ACTIVE_SPEED;
+ if (!resp->active_fec)
+ mac->fec_mode = 0;
+ else
+ mac->fec_mode = BIT(resp->active_fec);
} else {
mac->speed_type = QUERY_SFP_SPEED;
}
@@ -2645,6 +2735,7 @@ static void hclge_service_timer(struct timer_list *t)
mod_timer(&hdev->service_timer, jiffies + HZ);
hdev->hw_stats.stats_timer++;
+ hdev->fd_arfs_expire_timer++;
hclge_task_schedule(hdev);
}
@@ -2693,19 +2784,11 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
- dev_info(&hdev->pdev->dev, "core reset interrupt\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
- *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
- hdev->rst_stats.core_rst_cnt++;
- return HCLGE_VECTOR0_EVENT_RST;
- }
-
/* check for vector0 msix event source */
if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
- dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
- msix_src_reg);
+ dev_info(&hdev->pdev->dev, "received event 0x%x\n",
+ msix_src_reg);
+ *clearval = msix_src_reg;
return HCLGE_VECTOR0_EVENT_ERR;
}
@@ -2717,8 +2800,11 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
}
/* print other vector0 event source */
- dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
- cmdq_src_reg, msix_src_reg);
+ dev_info(&hdev->pdev->dev,
+ "CMDQ INT status:0x%x, other INT status:0x%x\n",
+ cmdq_src_reg, msix_src_reg);
+ *clearval = msix_src_reg;
+
return HCLGE_VECTOR0_EVENT_OTHER;
}
@@ -2754,8 +2840,8 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
+ u32 clearval = 0;
u32 event_cause;
- u32 clearval;
hclge_enable_vector(&hdev->misc_vector, false);
event_cause = hclge_check_event_cause(hdev, &clearval);
@@ -2797,7 +2883,8 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
}
/* clear the source of interrupt if it is not cause by reset */
- if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
+ if (!clearval ||
+ event_cause == HCLGE_VECTOR0_EVENT_MBX) {
hclge_clear_event_cause(hdev, event_cause, clearval);
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -2861,6 +2948,9 @@ int hclge_notify_client(struct hclge_dev *hdev,
struct hnae3_client *client = hdev->nic_client;
u16 i;
+ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
+ return 0;
+
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
@@ -2886,7 +2976,7 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
int ret = 0;
u16 i;
- if (!client)
+ if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
return 0;
if (!client->ops->reset_notify)
@@ -2923,10 +3013,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_GLOBAL_RESET_BIT;
break;
- case HNAE3_CORE_RESET:
- reg = HCLGE_GLOBAL_RESET_REG;
- reg_bit = HCLGE_CORE_RESET_BIT;
- break;
case HNAE3_FUNC_RESET:
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
@@ -3058,12 +3144,6 @@ static void hclge_do_reset(struct hclge_dev *hdev)
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Global Reset requested\n");
break;
- case HNAE3_CORE_RESET:
- val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
- hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
- hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
- dev_info(&pdev->dev, "Core Reset requested\n");
- break;
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
/* schedule again to check later */
@@ -3083,10 +3163,11 @@ static void hclge_do_reset(struct hclge_dev *hdev)
}
}
-static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr)
{
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+ struct hclge_dev *hdev = ae_dev->priv;
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
@@ -3110,16 +3191,10 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
rst_level = HNAE3_IMP_RESET;
clear_bit(HNAE3_IMP_RESET, addr);
clear_bit(HNAE3_GLOBAL_RESET, addr);
- clear_bit(HNAE3_CORE_RESET, addr);
clear_bit(HNAE3_FUNC_RESET, addr);
} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
rst_level = HNAE3_GLOBAL_RESET;
clear_bit(HNAE3_GLOBAL_RESET, addr);
- clear_bit(HNAE3_CORE_RESET, addr);
- clear_bit(HNAE3_FUNC_RESET, addr);
- } else if (test_bit(HNAE3_CORE_RESET, addr)) {
- rst_level = HNAE3_CORE_RESET;
- clear_bit(HNAE3_CORE_RESET, addr);
clear_bit(HNAE3_FUNC_RESET, addr);
} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
rst_level = HNAE3_FUNC_RESET;
@@ -3147,9 +3222,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
case HNAE3_GLOBAL_RESET:
clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
break;
- case HNAE3_CORE_RESET:
- clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
- break;
default:
break;
}
@@ -3180,6 +3252,8 @@ static int hclge_reset_prepare_down(struct hclge_dev *hdev)
static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
{
+#define HCLGE_RESET_SYNC_TIME 100
+
u32 reg_val;
int ret = 0;
@@ -3188,7 +3262,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* There is no mechanism for PF to know if VF has stopped IO
* for now, just wait 100 ms for VF to stop IO
*/
- msleep(100);
+ msleep(HCLGE_RESET_SYNC_TIME);
ret = hclge_func_reset_cmd(hdev, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3208,7 +3282,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* There is no mechanism for PF to know if VF has stopped IO
* for now, just wait 100 ms for VF to stop IO
*/
- msleep(100);
+ msleep(HCLGE_RESET_SYNC_TIME);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
hdev->rst_stats.flr_rst_cnt++;
@@ -3222,6 +3296,10 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
break;
}
+ /* inform hardware that preparatory work is done */
+ msleep(HCLGE_RESET_SYNC_TIME);
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
+ HCLGE_NIC_CMQ_ENABLE);
dev_info(&hdev->pdev->dev, "prepare wait ok\n");
return ret;
@@ -3230,7 +3308,6 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
{
#define MAX_RESET_FAIL_CNT 5
-#define RESET_UPGRADE_DELAY_SEC 10
if (hdev->reset_pending) {
dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
@@ -3254,8 +3331,9 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
hclge_clear_reset_cause(hdev);
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
mod_timer(&hdev->reset_timer,
- jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
+ jiffies + HCLGE_RESET_INTERVAL);
return false;
}
@@ -3282,6 +3360,25 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev)
return ret;
}
+static int hclge_reset_stack(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+}
+
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
@@ -3325,19 +3422,8 @@ static void hclge_reset(struct hclge_dev *hdev)
goto err_reset;
rtnl_lock();
- ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- if (ret)
- goto err_reset_lock;
- ret = hclge_reset_ae_dev(hdev->ae_dev);
- if (ret)
- goto err_reset_lock;
-
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- goto err_reset_lock;
-
- ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ ret = hclge_reset_stack(hdev);
if (ret)
goto err_reset_lock;
@@ -3347,16 +3433,23 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset_lock;
+ rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
+ * times
+ */
+ if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
+ goto err_reset;
+
+ rtnl_lock();
+
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
if (ret)
goto err_reset_lock;
rtnl_unlock();
- ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- goto err_reset;
-
ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
if (ret)
goto err_reset;
@@ -3399,11 +3492,12 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
if (!handle)
handle = &hdev->vport[0].nic;
- if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
+ if (time_before(jiffies, (hdev->last_reset_time +
+ HCLGE_RESET_INTERVAL)))
return;
else if (hdev->default_reset_request)
hdev->reset_level =
- hclge_get_reset_level(hdev,
+ hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
hdev->reset_level = HNAE3_FUNC_RESET;
@@ -3432,13 +3526,14 @@ static void hclge_reset_timer(struct timer_list *t)
struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
dev_info(&hdev->pdev->dev,
- "triggering global reset in reset timer\n");
- set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
+ "triggering reset in reset timer\n");
hclge_reset_event(hdev->pdev, NULL);
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
/* check if there is any ongoing reset in the hardware. This status can
* be checked from reset_pending. If there is then, we need to wait for
* hardware to complete reset.
@@ -3449,12 +3544,12 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
* now.
*/
hdev->last_reset_time = jiffies;
- hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclge_reset(hdev);
/* check if we got any *new* reset requests to be honored */
- hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclge_do_reset(hdev);
@@ -3521,6 +3616,11 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_port_info(hdev);
hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev);
+ hclge_sync_vlan_filter(hdev);
+ if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
+ hclge_rfs_filter_expire(hdev);
+ hdev->fd_arfs_expire_timer = 0;
+ }
hclge_service_complete(hdev);
}
@@ -3614,29 +3714,28 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key)
{
struct hclge_rss_config_cmd *req;
+ unsigned int key_offset = 0;
struct hclge_desc desc;
- int key_offset;
+ int key_counts;
int key_size;
int ret;
+ key_counts = HCLGE_RSS_KEY_SIZE;
req = (struct hclge_rss_config_cmd *)desc.data;
- for (key_offset = 0; key_offset < 3; key_offset++) {
+ while (key_counts) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
false);
req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
- if (key_offset == 2)
- key_size =
- HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
- else
- key_size = HCLGE_RSS_HASH_KEY_NUM;
-
+ key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
memcpy(req->hash_key,
key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
+ key_counts -= key_size;
+ key_offset++;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3995,13 +4094,14 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
struct hclge_vport *vport = hdev->vport;
u8 *rss_indir = vport[0].rss_indirection_tbl;
u16 rss_size = vport[0].alloc_rss_size;
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
u8 *key = vport[0].rss_hash_key;
u8 hfunc = vport[0].rss_algo;
- u16 tc_offset[HCLGE_MAX_TC_NUM];
u16 tc_valid[HCLGE_MAX_TC_NUM];
- u16 tc_size[HCLGE_MAX_TC_NUM];
u16 roundup_size;
- int i, ret;
+ unsigned int i;
+ int ret;
ret = hclge_set_rss_indir_table(hdev, rss_indir);
if (ret)
@@ -4156,8 +4256,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
return 0;
}
-static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
- int vector,
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4174,8 +4273,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
}
-static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
- int vector,
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4196,8 +4294,7 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
if (ret)
dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
- vector_id,
- ret);
+ vector_id, ret);
return ret;
}
@@ -4503,19 +4600,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
case 0:
return false;
case BIT(INNER_DST_MAC):
- for (i = 0; i < 6; i++) {
- calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
+ for (i = 0; i < ETH_ALEN; i++) {
+ calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]);
- calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
+ calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]);
}
return true;
case BIT(INNER_SRC_MAC):
- for (i = 0; i < 6; i++) {
- calc_x(key_x[5 - i], rule->tuples.src_mac[i],
+ for (i = 0; i < ETH_ALEN; i++) {
+ calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples.src_mac[i]);
- calc_y(key_y[5 - i], rule->tuples.src_mac[i],
+ calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples.src_mac[i]);
}
@@ -4551,19 +4648,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
return true;
case BIT(INNER_SRC_IP):
- calc_x(tmp_x_l, rule->tuples.src_ip[3],
- rule->tuples_mask.src_ip[3]);
- calc_y(tmp_y_l, rule->tuples.src_ip[3],
- rule->tuples_mask.src_ip[3]);
+ calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
+ rule->tuples_mask.src_ip[IPV4_INDEX]);
+ calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
+ rule->tuples_mask.src_ip[IPV4_INDEX]);
*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true;
case BIT(INNER_DST_IP):
- calc_x(tmp_x_l, rule->tuples.dst_ip[3],
- rule->tuples_mask.dst_ip[3]);
- calc_y(tmp_y_l, rule->tuples.dst_ip[3],
- rule->tuples_mask.dst_ip[3]);
+ calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
+ rule->tuples_mask.dst_ip[IPV4_INDEX]);
+ calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
+ rule->tuples_mask.dst_ip[IPV4_INDEX]);
*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
@@ -4617,7 +4714,7 @@ static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
{
u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
u8 cur_pos = 0, tuple_size, shift_bits;
- int i;
+ unsigned int i;
for (i = 0; i < MAX_META_DATA; i++) {
tuple_size = meta_data_key_info[i].key_length;
@@ -4659,7 +4756,8 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
u8 *cur_key_x, *cur_key_y;
- int i, ret, tuple_size;
+ unsigned int i;
+ int ret, tuple_size;
u8 meta_data_region;
memset(key_x, 0, sizeof(key_x));
@@ -4812,6 +4910,7 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev,
*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
BIT(INNER_IP_TOS);
+ /* check whether src/dst ip address used */
if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
!tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
*unused |= BIT(INNER_SRC_IP);
@@ -4836,6 +4935,7 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev,
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
BIT(INNER_DST_PORT);
+ /* check whether src/dst ip address used */
if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
!usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
*unused |= BIT(INNER_SRC_IP);
@@ -4906,14 +5006,18 @@ static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
struct hclge_fd_rule *rule = NULL;
struct hlist_node *node2;
+ spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location >= location)
break;
}
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return rule && rule->location == location;
}
+/* make sure being called after lock up with fd_rule_lock */
static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
struct hclge_fd_rule *new_rule,
u16 location,
@@ -4937,9 +5041,13 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
kfree(rule);
hdev->hclge_fd_rule_num--;
- if (!is_add)
- return 0;
+ if (!is_add) {
+ if (!hdev->hclge_fd_rule_num)
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ clear_bit(location, hdev->fd_bmap);
+ return 0;
+ }
} else if (!is_add) {
dev_err(&hdev->pdev->dev,
"delete fail, rule %d is inexistent\n",
@@ -4954,7 +5062,9 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
else
hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
+ set_bit(location, hdev->fd_bmap);
hdev->hclge_fd_rule_num++;
+ hdev->fd_active_type = new_rule->rule_type;
return 0;
}
@@ -4969,14 +5079,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
case SCTP_V4_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
- rule->tuples.src_ip[3] =
+ rule->tuples.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
- rule->tuples_mask.src_ip[3] =
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
- rule->tuples.dst_ip[3] =
+ rule->tuples.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
- rule->tuples_mask.dst_ip[3] =
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
@@ -4995,14 +5105,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break;
case IP_USER_FLOW:
- rule->tuples.src_ip[3] =
+ rule->tuples.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
- rule->tuples_mask.src_ip[3] =
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
- rule->tuples.dst_ip[3] =
+ rule->tuples.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
- rule->tuples_mask.dst_ip[3] =
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
@@ -5019,14 +5129,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
case TCP_V6_FLOW:
case UDP_V6_FLOW:
be32_to_cpu_array(rule->tuples.src_ip,
- fs->h_u.tcp_ip6_spec.ip6src, 4);
+ fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.src_ip,
- fs->m_u.tcp_ip6_spec.ip6src, 4);
+ fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples.dst_ip,
- fs->h_u.tcp_ip6_spec.ip6dst, 4);
+ fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.dst_ip,
- fs->m_u.tcp_ip6_spec.ip6dst, 4);
+ fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
rule->tuples_mask.src_port =
@@ -5042,14 +5152,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break;
case IPV6_USER_FLOW:
be32_to_cpu_array(rule->tuples.src_ip,
- fs->h_u.usr_ip6_spec.ip6src, 4);
+ fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.src_ip,
- fs->m_u.usr_ip6_spec.ip6src, 4);
+ fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples.dst_ip,
- fs->h_u.usr_ip6_spec.ip6dst, 4);
+ fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.dst_ip,
- fs->m_u.usr_ip6_spec.ip6dst, 4);
+ fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
@@ -5112,6 +5222,36 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
return 0;
}
+/* make sure being called after lock up with fd_rule_lock */
+static int hclge_fd_config_rule(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ int ret;
+
+ if (!rule) {
+ dev_err(&hdev->pdev->dev,
+ "The flow director rule is NULL\n");
+ return -EINVAL;
+ }
+
+ /* it will never fail here, so needn't to check return value */
+ hclge_fd_update_rule_list(hdev, rule, rule->location, true);
+
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto clear_rule;
+
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto clear_rule;
+
+ return 0;
+
+clear_rule:
+ hclge_fd_update_rule_list(hdev, rule, rule->location, false);
+ return ret;
+}
+
static int hclge_add_fd_entry(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -5174,8 +5314,10 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
return -ENOMEM;
ret = hclge_fd_get_tuple(hdev, fs, rule);
- if (ret)
- goto free_rule;
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
rule->flow_type = fs->flow_type;
@@ -5184,24 +5326,19 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
rule->vf_id = dst_vport_id;
rule->queue_id = q_index;
rule->action = action;
+ rule->rule_type = HCLGE_FD_EP_ACTIVE;
- ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
- if (ret)
- goto free_rule;
+ /* to avoid rule conflict, when user configure rule by ethtool,
+ * we need to clear all arfs rules
+ */
+ hclge_clear_arfs_rules(handle);
- ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
- if (ret)
- goto free_rule;
+ spin_lock_bh(&hdev->fd_rule_lock);
+ ret = hclge_fd_config_rule(hdev, rule);
- ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
- if (ret)
- goto free_rule;
+ spin_unlock_bh(&hdev->fd_rule_lock);
return ret;
-
-free_rule:
- kfree(rule);
- return ret;
}
static int hclge_del_fd_entry(struct hnae3_handle *handle,
@@ -5222,18 +5359,21 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (!hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev,
- "Delete fail, rule %d is inexistent\n",
- fs->location);
+ "Delete fail, rule %d is inexistent\n", fs->location);
return -ENOENT;
}
- ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
- fs->location, NULL, false);
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
+ NULL, false);
if (ret)
return ret;
- return hclge_fd_update_rule_list(hdev, NULL, fs->location,
- false);
+ spin_lock_bh(&hdev->fd_rule_lock);
+ ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return ret;
}
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
@@ -5243,25 +5383,30 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node;
+ u16 location;
if (!hnae3_dev_fd_supported(hdev))
return;
+ spin_lock_bh(&hdev->fd_rule_lock);
+ for_each_set_bit(location, hdev->fd_bmap,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
+ NULL, false);
+
if (clear_list) {
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
rule_node) {
- hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
- rule->location, NULL, false);
hlist_del(&rule->rule_node);
kfree(rule);
- hdev->hclge_fd_rule_num--;
}
- } else {
- hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
- rule_node)
- hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
- rule->location, NULL, false);
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ hdev->hclge_fd_rule_num = 0;
+ bitmap_zero(hdev->fd_bmap,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
}
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
}
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@@ -5283,6 +5428,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
if (!hdev->fd_en)
return 0;
+ spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
if (!ret)
@@ -5292,11 +5438,18 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
dev_warn(&hdev->pdev->dev,
"Restore rule %d failed, remove it\n",
rule->location);
+ clear_bit(rule->location, hdev->fd_bmap);
hlist_del(&rule->rule_node);
kfree(rule);
hdev->hclge_fd_rule_num--;
}
}
+
+ if (hdev->hclge_fd_rule_num)
+ hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return 0;
}
@@ -5329,13 +5482,18 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ spin_lock_bh(&hdev->fd_rule_lock);
+
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location >= fs->location)
break;
}
- if (!rule || fs->location != rule->location)
+ if (!rule || fs->location != rule->location) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return -ENOENT;
+ }
fs->flow_type = rule->flow_type;
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
@@ -5343,16 +5501,16 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
case TCP_V4_FLOW:
case UDP_V4_FLOW:
fs->h_u.tcp_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[3]);
+ cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+ rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
fs->h_u.tcp_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[3]);
+ cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
fs->m_u.tcp_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+ rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
fs->m_u.tcp_ip4_spec.psrc =
@@ -5372,16 +5530,16 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
break;
case IP_USER_FLOW:
fs->h_u.usr_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[3]);
+ cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+ rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
fs->h_u.usr_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[3]);
+ cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
fs->m_u.usr_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+ rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
fs->m_u.usr_ip4_spec.tos =
@@ -5400,20 +5558,22 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
case TCP_V6_FLOW:
case UDP_V6_FLOW:
cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
- rule->tuples.src_ip, 4);
+ rule->tuples.src_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
+ memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
+ sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, 4);
+ rule->tuples_mask.src_ip, IPV6_SIZE);
cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
- rule->tuples.dst_ip, 4);
+ rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+ memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
+ sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, 4);
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
fs->m_u.tcp_ip6_spec.psrc =
@@ -5428,20 +5588,22 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
break;
case IPV6_USER_FLOW:
cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
- rule->tuples.src_ip, 4);
+ rule->tuples.src_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
+ memset(fs->m_u.usr_ip6_spec.ip6src, 0,
+ sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, 4);
+ rule->tuples_mask.src_ip, IPV6_SIZE);
cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
- rule->tuples.dst_ip, 4);
+ rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+ memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
+ sizeof(int) * IPV6_SIZE);
else
cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, 4);
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
fs->m_u.usr_ip6_spec.l4_proto =
@@ -5474,6 +5636,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
break;
default:
+ spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP;
}
@@ -5505,6 +5668,8 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
fs->ring_cookie |= vf_id;
}
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
return 0;
}
@@ -5522,20 +5687,208 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+ spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2,
&hdev->fd_rule_list, rule_node) {
- if (cnt == cmd->rule_cnt)
+ if (cnt == cmd->rule_cnt) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
return -EMSGSIZE;
+ }
rule_locs[cnt] = rule->location;
cnt++;
}
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
cmd->rule_cnt = cnt;
return 0;
}
+static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
+ struct hclge_fd_rule_tuples *tuples)
+{
+ tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
+ tuples->ip_proto = fkeys->basic.ip_proto;
+ tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
+ tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
+ } else {
+ memcpy(tuples->src_ip,
+ fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
+ sizeof(tuples->src_ip));
+ memcpy(tuples->dst_ip,
+ fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
+ sizeof(tuples->dst_ip));
+ }
+}
+
+/* traverse all rules, check whether an existed rule has the same tuples */
+static struct hclge_fd_rule *
+hclge_fd_search_flow_keys(struct hclge_dev *hdev,
+ const struct hclge_fd_rule_tuples *tuples)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
+ return rule;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
+ struct hclge_fd_rule *rule)
+{
+ rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
+ BIT(INNER_SRC_PORT);
+ rule->action = 0;
+ rule->vf_id = 0;
+ rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
+ if (tuples->ether_proto == ETH_P_IP) {
+ if (tuples->ip_proto == IPPROTO_TCP)
+ rule->flow_type = TCP_V4_FLOW;
+ else
+ rule->flow_type = UDP_V4_FLOW;
+ } else {
+ if (tuples->ip_proto == IPPROTO_TCP)
+ rule->flow_type = TCP_V6_FLOW;
+ else
+ rule->flow_type = UDP_V6_FLOW;
+ }
+ memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
+ memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
+}
+
+static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
+ u16 flow_id, struct flow_keys *fkeys)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_fd_rule_tuples new_tuples;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ u16 tmp_queue_id;
+ u16 bit_id;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ memset(&new_tuples, 0, sizeof(new_tuples));
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ /* when there is already fd rule existed add by user,
+ * arfs should not work
+ */
+ if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return -EOPNOTSUPP;
+ }
+
+ /* check is there flow director filter existed for this flow,
+ * if not, create a new filter for it;
+ * if filter exist with different queue id, modify the filter;
+ * if filter exist with same queue id, do nothing
+ */
+ rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
+ if (!rule) {
+ bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
+ if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return -ENOSPC;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return -ENOMEM;
+ }
+
+ set_bit(bit_id, hdev->fd_bmap);
+ rule->location = bit_id;
+ rule->flow_id = flow_id;
+ rule->queue_id = queue_id;
+ hclge_fd_build_arfs_rule(&new_tuples, rule);
+ ret = hclge_fd_config_rule(hdev, rule);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (ret)
+ return ret;
+
+ return rule->location;
+ }
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (rule->queue_id == queue_id)
+ return rule->location;
+
+ tmp_queue_id = rule->queue_id;
+ rule->queue_id = queue_id;
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret) {
+ rule->queue_id = tmp_queue_id;
+ return ret;
+ }
+
+ return rule->location;
+}
+
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ HLIST_HEAD(del_list);
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return;
+ }
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (rps_may_expire_flow(handle->netdev, rule->queue_id,
+ rule->flow_id, rule->location)) {
+ hlist_del_init(&rule->rule_node);
+ hlist_add_head(&rule->rule_node, &del_list);
+ hdev->hclge_fd_rule_num--;
+ clear_bit(rule->location, hdev->fd_bmap);
+ }
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ kfree(rule);
+ }
+#endif
+}
+
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
+ hclge_del_all_fd_entries(handle, true);
+#endif
+}
+
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -5565,10 +5918,12 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ bool clear;
hdev->fd_en = enable;
+ clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
if (!enable)
- hclge_del_all_fd_entries(handle, false);
+ hclge_del_all_fd_entries(handle, clear);
else
hclge_restore_fd_entries(handle);
}
@@ -5582,20 +5937,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+
+ if (enable) {
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
+ }
+
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -5726,7 +6081,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
return -EBUSY;
}
-static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
+static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
{
struct hclge_desc desc;
@@ -5737,7 +6092,8 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
req->stream_id = cpu_to_le16(stream_id);
- req->enable |= enable << HCLGE_TQP_ENABLE_B;
+ if (enable)
+ req->enable |= 1U << HCLGE_TQP_ENABLE_B;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -5838,6 +6194,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ hclge_clear_arfs_rules(handle);
+
/* If it is not PF reset, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
@@ -5903,11 +6261,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (op == HCLGE_MAC_VLAN_ADD) {
if ((!resp_code) || (resp_code == 1)) {
return_status = 0;
- } else if (resp_code == 2) {
+ } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for uc_overflow.\n");
- } else if (resp_code == 3) {
+ } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for mc_overflow.\n");
@@ -5952,13 +6310,15 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
{
- int word_num;
- int bit_num;
+#define HCLGE_VF_NUM_IN_FIRST_DESC 192
+
+ unsigned int word_num;
+ unsigned int bit_num;
if (vfid > 255 || vfid < 0)
return -EIO;
- if (vfid >= 0 && vfid <= 191) {
+ if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
@@ -5966,7 +6326,7 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
else
desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
- word_num = (vfid - 192) / 32;
+ word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
bit_num = vfid % 32;
if (clr)
desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
@@ -6149,6 +6509,10 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
mutex_init(&hdev->umv_mutex);
hdev->max_umv_size = allocated_size;
+ /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
+ * preserve some unicast mac vlan table entries shared by pf
+ * and its vfs.
+ */
hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_req_vfs + 2);
@@ -6181,7 +6545,9 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
req = (struct hclge_umv_spc_alc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
- hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
+ if (!is_alloc)
+ hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
+
req->space_size = cpu_to_le32(space_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -6270,8 +6636,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
is_multicast_ether_addr(addr)) {
dev_err(&hdev->pdev->dev,
"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
- addr,
- is_zero_ether_addr(addr),
+ addr, is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr));
return -EINVAL;
@@ -6338,9 +6703,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
- dev_dbg(&hdev->pdev->dev,
- "Remove mac err! invalid mac:%pM.\n",
- addr);
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
+ addr);
return -EINVAL;
}
@@ -6381,18 +6745,16 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
- if (!status) {
- /* This mac addr exist, update VFID for it */
- hclge_update_desc_vfid(desc, vport->vport_id, false);
- status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- } else {
+ if (status) {
/* This mac addr do not exist, add new entry for it */
memset(desc[0].data, 0, sizeof(desc[0].data));
memset(desc[1].data, 0, sizeof(desc[0].data));
memset(desc[2].data, 0, sizeof(desc[0].data));
- hclge_update_desc_vfid(desc, vport->vport_id, false);
- status = hclge_add_mac_vlan_tbl(vport, &req, desc);
}
+ status = hclge_update_desc_vfid(desc, vport->vport_id, false);
+ if (status)
+ return status;
+ status = hclge_add_mac_vlan_tbl(vport, &req, desc);
if (status == -ENOSPC)
dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
@@ -6430,7 +6792,9 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
/* This mac addr exist, remove this handle's VFID for it */
- hclge_update_desc_vfid(desc, vport->vport_id, true);
+ status = hclge_update_desc_vfid(desc, vport->vport_id, true);
+ if (status)
+ return status;
if (hclge_is_all_function_id_zero(desc))
/* All the vfid is zero, so need to delete this entry */
@@ -6759,7 +7123,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
-static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
bool is_kill, u16 vlan, u8 qos,
__be16 proto)
{
@@ -6771,6 +7135,12 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
u8 vf_byte_off;
int ret;
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
+ * is unable and unnecessary to add new vlan id to vf vlan filter
+ */
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
+ return 0;
+
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
hclge_cmd_setup_basic_desc(&desc[1],
@@ -6806,6 +7176,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
return 0;
if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ set_bit(vfid, hdev->vf_vlan_full);
dev_warn(&hdev->pdev->dev,
"vf vlan table is full, vf vlan filter is disabled\n");
return 0;
@@ -6819,12 +7190,13 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
if (!req0->resp_code)
return 0;
- if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
- dev_warn(&hdev->pdev->dev,
- "vlan %d filter is not in vf vlan table\n",
- vlan);
+ /* vf vlan filter is disabled when vf vlan table is full,
+ * then new vlan id will not be added into vf vlan table.
+ * Just return 0 without warning, avoid massive verbose
+ * print logs when unload.
+ */
+ if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
return 0;
- }
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%d.\n",
@@ -7140,10 +7512,6 @@ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
{
struct hclge_vport_vlan_cfg *vlan;
- /* vlan 0 is reserved */
- if (!vlan_id)
- return;
-
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
return;
@@ -7238,6 +7606,43 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
mutex_unlock(&hdev->vport_cfg_mutex);
}
+static void hclge_restore_vlan_table(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ u16 vlan_proto, qos;
+ u16 state, vlan_id;
+ int i;
+
+ mutex_lock(&hdev->vport_cfg_mutex);
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
+ vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ qos = vport->port_base_vlan_cfg.vlan_info.qos;
+ state = vport->port_base_vlan_cfg.state;
+
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
+ hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id, vlan_id, qos,
+ false);
+ continue;
+ }
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->hd_tbl_status)
+ hclge_set_vlan_filter_hw(hdev,
+ htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, 0,
+ false);
+ }
+ }
+
+ mutex_unlock(&hdev->vport_cfg_mutex);
+}
+
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -7415,11 +7820,20 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
- /* when port based VLAN enabled, we use port based VLAN as the VLAN
- * filter entry. In this case, we don't update VLAN filter table
- * when user add new VLAN or remove exist VLAN, just update the vport
- * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
- * table until port based VLAN disabled
+ /* When device is resetting, firmware is unable to handle
+ * mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ return -EBUSY;
+ }
+
+ /* When port base vlan enabled, we use port base vlan as the vlan
+ * filter entry. In this case, we don't update vlan filter table
+ * when user add new vlan or remove exist vlan, just update the vport
+ * vlan list. The vlan id in vlan list will be writen in vlan filter
+ * table until port base vlan disabled
*/
if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
@@ -7427,16 +7841,53 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
writen_to_tbl = true;
}
- if (ret)
- return ret;
+ if (!ret) {
+ if (is_kill)
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ else
+ hclge_add_vport_vlan_table(vport, vlan_id,
+ writen_to_tbl);
+ } else if (is_kill) {
+ /* When remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack
+ */
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ }
+ return ret;
+}
- if (is_kill)
- hclge_rm_vport_vlan_table(vport, vlan_id, false);
- else
- hclge_add_vport_vlan_table(vport, vlan_id,
- writen_to_tbl);
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+{
+#define HCLGE_MAX_SYNC_COUNT 60
- return 0;
+ int i, ret, sync_cnt = 0;
+ u16 vlan_id;
+
+ /* start from vport 1 for PF is always alive */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id, vlan_id,
+ 0, true);
+ if (ret && ret != -EINVAL)
+ return;
+
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+
+ sync_cnt++;
+ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
+ return;
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ }
+ }
}
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
@@ -7463,7 +7914,7 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
{
struct hclge_dev *hdev = vport->back;
- int i, max_frm_size, ret = 0;
+ int i, max_frm_size, ret;
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
@@ -7523,7 +7974,8 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
- hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
+ if (enable)
+ hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -7574,7 +8026,7 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
- int ret = 0;
+ int ret;
queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
@@ -7591,7 +8043,6 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
return ret;
}
- reset_try_times = 0;
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
/* Wait for tqp hw reset */
msleep(20);
@@ -7630,7 +8081,6 @@ void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
return;
}
- reset_try_times = 0;
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
/* Wait for tqp hw reset */
msleep(20);
@@ -7700,7 +8150,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
u16 remote_advertising = 0;
- u16 local_advertising = 0;
+ u16 local_advertising;
u32 rx_pause, tx_pause;
u8 flowctl;
@@ -7733,8 +8183,9 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
- *auto_neg = hclge_get_autoneg(handle);
+ *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
*rx_en = 0;
@@ -7765,11 +8216,13 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
struct phy_device *phydev = hdev->hw.mac.phydev;
u32 fc_autoneg;
- fc_autoneg = hclge_get_autoneg(handle);
- if (auto_neg != fc_autoneg) {
- dev_info(&hdev->pdev->dev,
- "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
- return -EOPNOTSUPP;
+ if (phydev) {
+ fc_autoneg = hclge_get_autoneg(handle);
+ if (auto_neg != fc_autoneg) {
+ dev_info(&hdev->pdev->dev,
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
}
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
@@ -7780,16 +8233,13 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
- if (!fc_autoneg)
+ if (!auto_neg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
if (phydev)
return phy_start_aneg(phydev);
- if (hdev->pdev->revision == 0x20)
- return -EOPNOTSUPP;
-
- return hclge_restart_autoneg(handle);
+ return -EOPNOTSUPP;
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
@@ -7825,7 +8275,8 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct phy_device *phydev = hdev->hw.mac.phydev;
- int mdix_ctrl, mdix, retval, is_resolved;
+ int mdix_ctrl, mdix, is_resolved;
+ unsigned int retval;
if (!phydev) {
*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
@@ -7894,6 +8345,102 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "PF info end.\n");
}
+static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_client *client = vport->nic.client;
+ struct hclge_dev *hdev = ae_dev->priv;
+ int rst_cnt;
+ int ret;
+
+ rst_cnt = hdev->rst_stats.reset_cnt;
+ ret = client->ops->init_instance(&vport->nic);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.reset_cnt) {
+ ret = -EBUSY;
+ goto init_nic_err;
+ }
+
+ /* Enable nic hw error interrupts */
+ ret = hclge_config_nic_hw_error(hdev, true);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "fail(%d) to enable hw error interrupts\n", ret);
+ goto init_nic_err;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ if (netif_msg_drv(&hdev->vport->nic))
+ hclge_info_show(hdev);
+
+ return ret;
+
+init_nic_err:
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ client->ops->uninit_instance(&vport->nic, 0);
+
+ return ret;
+}
+
+static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_client *client = vport->roce.client;
+ struct hclge_dev *hdev = ae_dev->priv;
+ int rst_cnt;
+ int ret;
+
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
+ !hdev->nic_client)
+ return 0;
+
+ client = hdev->roce_client;
+ ret = hclge_init_roce_base_info(vport);
+ if (ret)
+ return ret;
+
+ rst_cnt = hdev->rst_stats.reset_cnt;
+ ret = client->ops->init_instance(&vport->roce);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.reset_cnt) {
+ ret = -EBUSY;
+ goto init_roce_err;
+ }
+
+ /* Enable roce ras interrupts */
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "fail(%d) to enable roce ras interrupts\n", ret);
+ goto init_roce_err;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ return 0;
+
+init_roce_err:
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
+
+ return ret;
+}
+
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -7909,41 +8456,13 @@ static int hclge_init_client_instance(struct hnae3_client *client,
hdev->nic_client = client;
vport->nic.client = client;
- ret = client->ops->init_instance(&vport->nic);
+ ret = hclge_init_nic_client_instance(ae_dev, vport);
if (ret)
goto clear_nic;
- hnae3_set_client_init_flag(client, ae_dev, 1);
-
- if (netif_msg_drv(&hdev->vport->nic))
- hclge_info_show(hdev);
-
- if (hdev->roce_client &&
- hnae3_dev_roce_supported(hdev)) {
- struct hnae3_client *rc = hdev->roce_client;
-
- ret = hclge_init_roce_base_info(vport);
- if (ret)
- goto clear_roce;
-
- ret = rc->ops->init_instance(&vport->roce);
- if (ret)
- goto clear_roce;
-
- hnae3_set_client_init_flag(hdev->roce_client,
- ae_dev, 1);
- }
-
- break;
- case HNAE3_CLIENT_UNIC:
- hdev->nic_client = client;
- vport->nic.client = client;
-
- ret = client->ops->init_instance(&vport->nic);
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
if (ret)
- goto clear_nic;
-
- hnae3_set_client_init_flag(client, ae_dev, 1);
+ goto clear_roce;
break;
case HNAE3_CLIENT_ROCE:
@@ -7952,17 +8471,9 @@ static int hclge_init_client_instance(struct hnae3_client *client,
vport->roce.client = client;
}
- if (hdev->roce_client && hdev->nic_client) {
- ret = hclge_init_roce_base_info(vport);
- if (ret)
- goto clear_roce;
-
- ret = client->ops->init_instance(&vport->roce);
- if (ret)
- goto clear_roce;
-
- hnae3_set_client_init_flag(client, ae_dev, 1);
- }
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
break;
default:
@@ -7970,7 +8481,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
}
}
- return 0;
+ return ret;
clear_nic:
hdev->nic_client = NULL;
@@ -7992,6 +8503,10 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
vport = &hdev->vport[i];
if (hdev->roce_client) {
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
hdev->roce_client->ops->uninit_instance(&vport->roce,
0);
hdev->roce_client = NULL;
@@ -8000,6 +8515,10 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (client->type == HNAE3_CLIENT_ROCE)
return;
if (hdev->nic_client && client->ops->uninit_instance) {
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
vport->nic.client = NULL;
@@ -8081,6 +8600,7 @@ static void hclge_state_init(struct hclge_dev *hdev)
static void hclge_state_uninit(struct hclge_dev *hdev)
{
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ set_bit(HCLGE_STATE_REMOVING, &hdev->state);
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
@@ -8122,6 +8642,23 @@ static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
}
+static void hclge_clear_resetting_state(struct hclge_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to clear VF's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "clear vf(%d) rst failed %d!\n",
+ vport->vport_id, ret);
+ }
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -8143,6 +8680,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_init(&hdev->vport_lock);
mutex_init(&hdev->vport_cfg_mutex);
+ spin_lock_init(&hdev->fd_rule_lock);
ret = hclge_pci_init(hdev);
if (ret) {
@@ -8270,13 +8808,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- ret = hclge_hw_error_set_state(hdev, true);
- if (ret) {
- dev_err(&pdev->dev,
- "fail(%d) to enable hw error interrupts\n", ret);
- goto err_mdiobus_unreg;
- }
-
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -8288,6 +8819,22 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
hclge_clear_all_event_cause(hdev);
+ hclge_clear_resetting_state(hdev);
+
+ /* Log and clear the hw errors those already occurred */
+ hclge_handle_all_hns_hw_errors(ae_dev);
+
+ /* request delayed reset for the error recovery because an immediate
+ * global reset on a PF affecting pending initialization of other PFs
+ */
+ if (ae_dev->hw_err_reset_req) {
+ enum hnae3_reset_type reset_level;
+
+ reset_level = hclge_get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ hclge_set_def_reset_request(ae_dev, reset_level);
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
+ }
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
@@ -8342,6 +8889,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_stats_clear(hdev);
memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
+ memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
ret = hclge_cmd_init(hdev);
if (ret) {
@@ -8393,21 +8941,31 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_init_fd_config(hdev);
if (ret) {
- dev_err(&pdev->dev,
- "fd table init fail, ret=%d\n", ret);
+ dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
return ret;
}
/* Re-enable the hw error interrupts because
- * the interrupts get disabled on core/global reset.
+ * the interrupts get disabled on global reset.
*/
- ret = hclge_hw_error_set_state(hdev, true);
+ ret = hclge_config_nic_hw_error(hdev, true);
if (ret) {
dev_err(&pdev->dev,
- "fail(%d) to re-enable HNS hw error interrupts\n", ret);
+ "fail(%d) to re-enable NIC hw error interrupts\n",
+ ret);
return ret;
}
+ if (hdev->roce_client) {
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail(%d) to re-enable roce ras interrupts\n",
+ ret);
+ return ret;
+ }
+ }
+
hclge_reset_vport_state(hdev);
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
@@ -8432,8 +8990,11 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
+ /* Disable all hw interrupts */
hclge_config_mac_tnl_int(hdev, false);
- hclge_hw_error_set_state(hdev, false);
+ hclge_config_nic_hw_error(hdev, false);
+ hclge_config_rocee_ras_interrupt(hdev, false);
+
hclge_cmd_uninit(hdev);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
@@ -8478,15 +9039,16 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
int cur_rss_size = kinfo->rss_size;
int cur_tqps = kinfo->num_tqps;
- u16 tc_offset[HCLGE_MAX_TC_NUM];
u16 tc_valid[HCLGE_MAX_TC_NUM];
- u16 tc_size[HCLGE_MAX_TC_NUM];
u16 roundup_size;
u32 *rss_indir;
- int ret, i;
+ unsigned int i;
+ int ret;
kinfo->req_rss_size = new_tqps_num;
@@ -8571,10 +9133,12 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
void *data)
{
#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+#define HCLGE_32_BIT_DESC_NODATA_LEN 2
struct hclge_desc *desc;
u32 *reg_val = data;
__le32 *desc_data;
+ int nodata_num;
int cmd_num;
int i, k, n;
int ret;
@@ -8582,7 +9146,9 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
if (regs_num == 0)
return 0;
- cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
+ nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
+ HCLGE_32_BIT_REG_RTN_DATANUM);
desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -8599,7 +9165,7 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
for (i = 0; i < cmd_num; i++) {
if (i == 0) {
desc_data = (__le32 *)(&desc[i].data[0]);
- n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
} else {
desc_data = (__le32 *)(&desc[i]);
n = HCLGE_32_BIT_REG_RTN_DATANUM;
@@ -8621,10 +9187,12 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
void *data)
{
#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+#define HCLGE_64_BIT_DESC_NODATA_LEN 1
struct hclge_desc *desc;
u64 *reg_val = data;
__le64 *desc_data;
+ int nodata_len;
int cmd_num;
int i, k, n;
int ret;
@@ -8632,7 +9200,9 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
if (regs_num == 0)
return 0;
- cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
+ nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
+ HCLGE_64_BIT_REG_RTN_DATANUM);
desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -8649,7 +9219,7 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
for (i = 0; i < cmd_num; i++) {
if (i == 0) {
desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
} else {
desc_data = (__le64 *)(&desc[i]);
n = HCLGE_64_BIT_REG_RTN_DATANUM;
@@ -8876,6 +9446,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.restart_autoneg = hclge_restart_autoneg,
+ .halt_autoneg = hclge_halt_autoneg,
.get_pauseparam = hclge_get_pauseparam,
.set_pauseparam = hclge_set_pauseparam,
.set_mtu = hclge_set_mtu,
@@ -8892,6 +9463,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .get_reset_level = hclge_get_reset_level,
.set_default_reset_request = hclge_set_def_reset_request,
.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
.set_channels = hclge_set_channels,
@@ -8908,6 +9480,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
+ .add_arfs_entry = hclge_add_fd_entry_by_arfs,
.dbg_run_cmd = hclge_dbg_run_cmd,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
@@ -8918,6 +9491,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_timer_task = hclge_set_timer_task,
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
+ .restore_vlan_table = hclge_restore_vlan_table,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index dd06b11187b0..6a12285f4c76 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -201,6 +201,8 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_DOWN,
HCLGE_STATE_DISABLED,
HCLGE_STATE_REMOVING,
+ HCLGE_STATE_NIC_REGISTERED,
+ HCLGE_STATE_ROCE_REGISTERED,
HCLGE_STATE_SERVICE_INITED,
HCLGE_STATE_SERVICE_SCHED,
HCLGE_STATE_RST_SERVICE_SCHED,
@@ -472,6 +474,7 @@ enum HCLGE_FD_KEY_TYPE {
enum HCLGE_FD_STAGE {
HCLGE_FD_STAGE_1,
HCLGE_FD_STAGE_2,
+ MAX_STAGE_NUM,
};
/* OUTER_XXX indicates tuples in tunnel header of tunnel packet
@@ -526,7 +529,7 @@ enum HCLGE_FD_META_DATA {
struct key_info {
u8 key_type;
- u8 key_length;
+ u8 key_length; /* use bit as unit */
};
static const struct key_info meta_data_key_info[] = {
@@ -578,6 +581,16 @@ static const struct key_info tuple_key_info[] = {
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
#define MAX_META_DATA_LENGTH 32
+/* assigned by firmware, the real filter number for each pf may be less */
+#define MAX_FD_FILTER_NUM 4096
+#define HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL 5
+
+enum HCLGE_FD_ACTIVE_RULE_TYPE {
+ HCLGE_FD_RULE_NONE,
+ HCLGE_FD_ARFS_ACTIVE,
+ HCLGE_FD_EP_ACTIVE,
+};
+
enum HCLGE_FD_PACKET_TYPE {
NIC_PACKET,
ROCE_PACKET,
@@ -600,18 +613,23 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg {
u8 fd_mode;
- u16 max_key_length;
+ u16 max_key_length; /* use bit as unit */
u32 proto_support;
- u32 rule_num[2]; /* rule entry number */
- u16 cnt_num[2]; /* rule hit counter number */
- struct hclge_fd_key_cfg key_cfg[2];
+ u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
+ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
+ struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
};
+#define IPV4_INDEX 3
+#define IPV6_SIZE 4
struct hclge_fd_rule_tuples {
- u8 src_mac[6];
- u8 dst_mac[6];
- u32 src_ip[4];
- u32 dst_ip[4];
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ /* Be compatible for ip address of both ipv4 and ipv6.
+ * For ipv4 address, we store it in src/dst_ip[3].
+ */
+ u32 src_ip[IPV6_SIZE];
+ u32 dst_ip[IPV6_SIZE];
u16 src_port;
u16 dst_port;
u16 vlan_tag1;
@@ -630,6 +648,8 @@ struct hclge_fd_rule {
u16 vf_id;
u16 queue_id;
u16 location;
+ u16 flow_id; /* only used for arfs */
+ enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
};
struct hclge_fd_ad_data {
@@ -679,6 +699,20 @@ struct hclge_mac_tnl_stats {
u32 status;
};
+#define HCLGE_RESET_INTERVAL (10 * HZ)
+#define HCLGE_WAIT_RESET_DONE 100
+
+#pragma pack(1)
+struct hclge_vf_vlan_cfg {
+ u8 mbx_cmd;
+ u8 subcode;
+ u8 is_kill;
+ u16 vlan;
+ u16 proto;
+};
+
+#pragma pack()
+
/* For each bit of TCAM entry, it uses a pair of 'x' and
* 'y' to indicate which value to match, like below:
* ----------------------------------
@@ -806,10 +840,15 @@ struct hclge_dev {
struct hclge_vlan_type_cfg vlan_type_cfg;
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+ unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
+ spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
u16 hclge_fd_rule_num;
+ u16 fd_arfs_expire_timer;
+ unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
+ enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
u16 wanted_umv_size;
@@ -891,13 +930,14 @@ struct hclge_vport {
u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
+ unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
struct hclge_port_base_vlan_config port_base_vlan_cfg;
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
u16 used_umv_num;
- int vport_id;
+ u16 vport_id;
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
@@ -959,7 +999,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
-int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 0e04e63f2a94..a38ac7cfe16b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -29,6 +29,10 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
"PF fail to gen resp to VF len %d exceeds max len %d\n",
resp_data_len,
HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ /* If resp_data_len is too long, set the value to max length
+ * and return the msg to VF
+ */
+ resp_data_len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
}
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
@@ -93,7 +97,7 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
else if (hdev->reset_type == HNAE3_FLR_RESET)
reset_type = HNAE3_VF_FULL_RESET;
else
- return -EINVAL;
+ reset_type = HNAE3_VF_FUNC_RESET;
memcpy(&msg_data[0], &reset_type, sizeof(u16));
@@ -192,12 +196,10 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
return ret;
ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
- if (ret)
- return ret;
hclge_free_vector_ring_chain(&ring_chain);
- return 0;
+ return ret;
}
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
@@ -308,21 +310,23 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
+ struct hclge_vf_vlan_cfg *msg_cmd;
int status = 0;
- if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
+ msg_cmd = (struct hclge_vf_vlan_cfg *)mbx_req->msg;
+ if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
struct hnae3_handle *handle = &vport->nic;
u16 vlan, proto;
bool is_kill;
- is_kill = !!mbx_req->msg[2];
- memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
- memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
+ is_kill = !!msg_cmd->is_kill;
+ vlan = msg_cmd->vlan;
+ proto = msg_cmd->proto;
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
- } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
+ } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
- bool en = mbx_req->msg[2] ? true : false;
+ bool en = msg_cmd->is_kill ? true : false;
status = hclge_en_hw_strip_rxvtag(handle, en);
} else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
@@ -365,13 +369,14 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
u8 vf_tc_map = 0;
- int i, ret;
+ unsigned int i;
+ int ret;
for (i = 0; i < kinfo->num_tc; i++)
vf_tc_map |= BIT(i);
ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
- sizeof(u8));
+ sizeof(vf_tc_map));
return ret;
}
@@ -553,7 +558,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport;
struct hclge_desc *desc;
- int ret, flag;
+ unsigned int flag;
+ int ret;
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 1e8134892d77..abb1b438564e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -55,9 +55,9 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
+ HCLGE_MDIO_PHYID_S, (u32)phyid);
hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ HCLGE_MDIO_PHYREG_S, (u32)regnum);
hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
@@ -93,9 +93,9 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
+ HCLGE_MDIO_PHYID_S, (u32)phyid);
hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ HCLGE_MDIO_PHYREG_S, (u32)regnum);
hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
@@ -224,6 +224,13 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
linkmode_and(phydev->supported, phydev->supported, mask);
linkmode_copy(phydev->advertising, phydev->supported);
+ /* supported flag is Pause and Asym Pause, but default advertising
+ * should be rx on, tx on, so need clear Asym Pause in advertising
+ * flag
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index a7bbb6d3091a..3f41fa2bc414 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -43,18 +43,23 @@ enum hclge_shaper_level {
static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
u8 *ir_b, u8 *ir_u, u8 *ir_s)
{
+#define DIVISOR_CLK (1000 * 8)
+#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
+
const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */
6 * 32, /* Prioriy group level */
6 * 8, /* Port level */
6 * 256 /* Qset level */
};
- u8 ir_u_calc = 0, ir_s_calc = 0;
+ u8 ir_u_calc = 0;
+ u8 ir_s_calc = 0;
u32 ir_calc;
u32 tick;
/* Calc tick */
- if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
+ if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
+ ir > HCLGE_ETHER_MAX_RATE)
return -EINVAL;
tick = tick_array[shaper_level];
@@ -66,7 +71,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
* ir_calc = ---------------- * 1000
* tick * 1
*/
- ir_calc = (1008000 + (tick >> 1) - 1) / tick;
+ ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
if (ir_calc == ir) {
*ir_b = 126;
@@ -78,27 +83,28 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Increasing the denominator to select ir_s value */
while (ir_calc > ir) {
ir_s_calc++;
- ir_calc = 1008000 / (tick * (1 << ir_s_calc));
+ ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
}
if (ir_calc == ir)
*ir_b = 126;
else
- *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
+ *ir_b = (ir * tick * (1 << ir_s_calc) +
+ (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
} else {
/* Increasing the numerator to select ir_u value */
u32 numerator;
while (ir_calc < ir) {
ir_u_calc++;
- numerator = 1008000 * (1 << ir_u_calc);
+ numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
ir_calc = (numerator + (tick >> 1)) / tick;
}
if (ir_calc == ir) {
*ir_b = 126;
} else {
- u32 denominator = (8000 * (1 << --ir_u_calc));
+ u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc));
*ir_b = (ir * tick + (denominator >> 1)) / denominator;
}
}
@@ -119,14 +125,13 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev,
opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
return -EINVAL;
- for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
+ for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
- if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1))
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
}
+ hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
+
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
if (ret)
return ret;
@@ -219,8 +224,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
trans_gap = pause_param->pause_trans_gap;
trans_time = le16_to_cpu(pause_param->pause_trans_time);
- return hclge_pause_param_cfg(hdev, mac_addr, trans_gap,
- trans_time);
+ return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
}
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
@@ -361,29 +365,36 @@ static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
+ u8 bs_b, u8 bs_s)
+{
+ u32 shapping_para = 0;
+
+ hclge_tm_set_field(shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shapping_para, BS_S, bs_s);
+
+ return shapping_para;
+}
+
static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pg_id,
- u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
+ u32 shapping_para)
{
struct hclge_pg_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
struct hclge_desc desc;
- u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
- HCLGE_OPC_TM_PG_C_SHAPPING;
+ HCLGE_OPC_TM_PG_C_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, opcode, false);
shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
shap_cfg_cmd->pg_id = pg_id;
- hclge_tm_set_field(shapping_para, IR_B, ir_b);
- hclge_tm_set_field(shapping_para, IR_U, ir_u);
- hclge_tm_set_field(shapping_para, IR_S, ir_s);
- hclge_tm_set_field(shapping_para, BS_B, bs_b);
- hclge_tm_set_field(shapping_para, BS_S, bs_s);
-
shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -397,7 +408,7 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
u8 ir_u, ir_b, ir_s;
int ret;
- ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE,
+ ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
HCLGE_SHAPER_LVL_PORT,
&ir_b, &ir_u, &ir_s);
if (ret)
@@ -406,11 +417,9 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
- hclge_tm_set_field(shapping_para, IR_B, ir_b);
- hclge_tm_set_field(shapping_para, IR_U, ir_u);
- hclge_tm_set_field(shapping_para, IR_S, ir_s);
- hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
- hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
+ shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
@@ -419,16 +428,14 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pri_id,
- u8 ir_b, u8 ir_u, u8 ir_s,
- u8 bs_b, u8 bs_s)
+ u32 shapping_para)
{
struct hclge_pri_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
struct hclge_desc desc;
- u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
- HCLGE_OPC_TM_PRI_C_SHAPPING;
+ HCLGE_OPC_TM_PRI_C_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, opcode, false);
@@ -436,12 +443,6 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_id = pri_id;
- hclge_tm_set_field(shapping_para, IR_B, ir_b);
- hclge_tm_set_field(shapping_para, IR_U, ir_u);
- hclge_tm_set_field(shapping_para, IR_S, ir_s);
- hclge_tm_set_field(shapping_para, BS_B, bs_b);
- hclge_tm_set_field(shapping_para, BS_S, bs_s);
-
shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -531,6 +532,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
max_rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc);
+ /* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
@@ -538,6 +540,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+ /* Set to the maximum specification value (max_rss_size). */
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size;
@@ -595,8 +598,10 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
hdev->tm_info.prio_tc[i] =
(i >= hdev->tm_info.num_tc) ? 0 : i;
- /* DCB is enabled if we have more than 1 TC */
- if (hdev->tm_info.num_tc > 1)
+ /* DCB is enabled if we have more than 1 TC or pfc_en is
+ * non-zero.
+ */
+ if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
else
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
@@ -604,12 +609,14 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
{
+#define BW_PERCENT 100
+
u8 i;
for (i = 0; i < hdev->tm_info.num_pg; i++) {
int k;
- hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
+ hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
hdev->tm_info.pg_info[i].pg_id = i;
hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
@@ -621,7 +628,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++)
- hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
}
}
@@ -682,6 +689,7 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
{
u8 ir_u, ir_b, ir_s;
+ u32 shaper_para;
int ret;
u32 i;
@@ -699,18 +707,21 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
+ shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_C_BUCKET, i,
- 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ shaper_para);
if (ret)
return ret;
+ shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_P_BUCKET, i,
- ir_b, ir_u, ir_s,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ shaper_para);
if (ret)
return ret;
}
@@ -730,8 +741,7 @@ static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
/* pg to prio */
for (i = 0; i < hdev->tm_info.num_pg; i++) {
/* Cfg dwrr */
- ret = hclge_tm_pg_weight_cfg(hdev, i,
- hdev->tm_info.pg_dwrr[i]);
+ ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
if (ret)
return ret;
}
@@ -811,6 +821,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{
u8 ir_u, ir_b, ir_s;
+ u32 shaper_para;
int ret;
u32 i;
@@ -822,17 +833,19 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_tm_pri_shapping_cfg(
- hdev, HCLGE_TM_SHAP_C_BUCKET, i,
- 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
+ shaper_para);
if (ret)
return ret;
- ret = hclge_tm_pri_shapping_cfg(
- hdev, HCLGE_TM_SHAP_P_BUCKET, i,
- ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
+ shaper_para);
if (ret)
return ret;
}
@@ -844,6 +857,7 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
u8 ir_u, ir_b, ir_s;
+ u32 shaper_para;
int ret;
ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
@@ -851,18 +865,19 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
if (ret)
return ret;
+ shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
- vport->vport_id,
- 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ vport->vport_id, shaper_para);
if (ret)
return ret;
+ shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
- vport->vport_id,
- ir_b, ir_u, ir_s,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
+ vport->vport_id, shaper_para);
if (ret)
return ret;
@@ -964,7 +979,7 @@ static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
struct hclge_ets_tc_weight_cmd *ets_weight;
struct hclge_desc desc;
- int i;
+ unsigned int i;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
@@ -1124,6 +1139,9 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
int ret;
u8 i;
+ if (vport->vport_id >= HNAE3_MAX_TC)
+ return -EINVAL;
+
ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
if (ret)
return ret;
@@ -1212,8 +1230,8 @@ static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
struct hclge_mac *mac = &hdev->hw.mac;
return hclge_pause_param_cfg(hdev, mac->mac_addr,
- HCLGE_DEFAULT_PAUSE_TRANS_GAP,
- HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+ HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+ HCLGE_DEFAULT_PAUSE_TRANS_TIME);
}
static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
@@ -1358,7 +1376,8 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{
- u8 i, bit_map = 0;
+ u8 bit_map = 0;
+ u8 i;
hdev->tm_info.num_tc = num_tc;
@@ -1375,6 +1394,19 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hclge_tm_schd_info_init(hdev);
}
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
+{
+ /* DCB is enabled if we have more than 1 TC or pfc_en is
+ * non-zero.
+ */
+ if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
+ hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+ else
+ hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+
+ hclge_pfc_info_init(hdev);
+}
+
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
{
int ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index f60e540c7a62..818610988d34 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -12,7 +12,7 @@
#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
-#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF
+#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0x7F
#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
/* SP or DWRR */
@@ -147,6 +147,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init);
int hclge_tm_schd_setup_hw(struct hclge_dev *hdev);
void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index 6193f8fa7cf3..53804d95ea90 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -6,4 +6,4 @@
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
-hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 71f356fc2446..652b796044e3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -98,7 +98,6 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
@@ -110,7 +109,6 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
@@ -179,6 +177,38 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
}
+static int hclgevf_cmd_convert_err_code(u16 desc_ret)
+{
+ switch (desc_ret) {
+ case HCLGEVF_CMD_EXEC_SUCCESS:
+ return 0;
+ case HCLGEVF_CMD_NO_AUTH:
+ return -EPERM;
+ case HCLGEVF_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HCLGEVF_CMD_QUEUE_FULL:
+ return -EXFULL;
+ case HCLGEVF_CMD_NEXT_ERR:
+ return -ENOSR;
+ case HCLGEVF_CMD_UNEXE_ERR:
+ return -ENOTBLK;
+ case HCLGEVF_CMD_PARA_ERR:
+ return -EINVAL;
+ case HCLGEVF_CMD_RESULT_ERR:
+ return -ERANGE;
+ case HCLGEVF_CMD_TIMEOUT:
+ return -ETIME;
+ case HCLGEVF_CMD_HILINK_ERR:
+ return -ENOLINK;
+ case HCLGEVF_CMD_QUEUE_ILLEGAL:
+ return -ENXIO;
+ case HCLGEVF_CMD_INVALID:
+ return -EBADR;
+ default:
+ return -EIO;
+ }
+}
+
/* hclgevf_cmd_send - send command to command queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor for describing the command
@@ -190,6 +220,7 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
{
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+ struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
struct hclgevf_desc *desc_to_use;
bool complete = false;
u32 timeout = 0;
@@ -201,8 +232,17 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclgevf_ring_space(&hw->cmq.csq) ||
- test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
+ * need update the SW HEAD pointer csq->next_to_clean
+ */
+ csq->next_to_clean = hclgevf_read_dev(hw,
+ HCLGEVF_NIC_CSQ_HEAD_REG);
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -251,11 +291,7 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
else
retval = le16_to_cpu(desc[0].retval);
- if ((enum hclgevf_cmd_return_status)retval ==
- HCLGEVF_CMD_EXEC_SUCCESS)
- status = 0;
- else
- status = -EIO;
+ status = hclgevf_cmd_convert_err_code(retval);
hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
ntc++;
handle++;
@@ -265,14 +301,13 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
}
if (!complete)
- status = -EAGAIN;
+ status = -EBADE;
/* Clean the command send queue */
handle = hclgevf_cmd_csq_clean(hw);
- if (handle != num) {
+ if (handle != num)
dev_warn(&hdev->pdev->dev,
"cleaned %d, need to clean %d\n", handle, num);
- }
spin_unlock_bh(&hw->cmq.csq.lock);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 47030b42341f..127a434a56f3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -46,9 +46,17 @@ struct hclgevf_cmq_ring {
enum hclgevf_cmd_return_status {
HCLGEVF_CMD_EXEC_SUCCESS = 0,
- HCLGEVF_CMD_NO_AUTH = 1,
- HCLGEVF_CMD_NOT_EXEC = 2,
- HCLGEVF_CMD_QUEUE_FULL = 3,
+ HCLGEVF_CMD_NO_AUTH = 1,
+ HCLGEVF_CMD_NOT_SUPPORTED = 2,
+ HCLGEVF_CMD_QUEUE_FULL = 3,
+ HCLGEVF_CMD_NEXT_ERR = 4,
+ HCLGEVF_CMD_UNEXE_ERR = 5,
+ HCLGEVF_CMD_PARA_ERR = 6,
+ HCLGEVF_CMD_RESULT_ERR = 7,
+ HCLGEVF_CMD_TIMEOUT = 8,
+ HCLGEVF_CMD_HILINK_ERR = 9,
+ HCLGEVF_CMD_QUEUE_ILLEGAL = 10,
+ HCLGEVF_CMD_INVALID = 11,
};
enum hclgevf_cmd_status {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 5d53467ee2d2..a13a0e101c3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -11,6 +11,8 @@
#define HCLGEVF_NAME "hclgevf"
+#define HCLGEVF_RESET_MAX_FAIL_CNT 5
+
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
static struct hnae3_ae_algo ae_algovf;
@@ -83,8 +85,7 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
HCLGEVF_TQP_INTR_GL2_REG,
HCLGEVF_TQP_INTR_RL_REG};
-static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
- struct hnae3_handle *handle)
+static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
{
if (!handle->client)
return container_of(handle, struct hclgevf_dev, nic);
@@ -232,7 +233,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
int status;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
- true, &resp_msg, sizeof(u8));
+ true, &resp_msg, sizeof(resp_msg));
if (status) {
dev_err(&hdev->pdev->dev,
"VF request to get TC info from PF failed %d",
@@ -321,7 +322,8 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
- 2, true, resp_data, 2);
+ sizeof(msg_data), true, resp_data,
+ sizeof(resp_data));
if (!ret)
qid_in_pf = *(u16 *)resp_data;
@@ -382,7 +384,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
struct hnae3_handle *nic = &hdev->nic;
struct hnae3_knic_private_info *kinfo;
u16 new_tqps = hdev->num_tqps;
- int i;
+ unsigned int i;
kinfo = &nic->kinfo;
kinfo->num_tc = 0;
@@ -418,7 +420,7 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
u8 resp_msg;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
- 0, false, &resp_msg, sizeof(u8));
+ 0, false, &resp_msg, sizeof(resp_msg));
if (status)
dev_err(&hdev->pdev->dev,
"VF failed to fetch link status(%d) from PF", status);
@@ -453,11 +455,13 @@ static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
u8 resp_msg;
send_msg = HCLGEVF_ADVERTISING;
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
- sizeof(u8), false, &resp_msg, sizeof(u8));
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
+ &send_msg, sizeof(send_msg), false,
+ &resp_msg, sizeof(resp_msg));
send_msg = HCLGEVF_SUPPORTED;
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
- sizeof(u8), false, &resp_msg, sizeof(u8));
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
+ &send_msg, sizeof(send_msg), false,
+ &resp_msg, sizeof(resp_msg));
}
static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
@@ -470,12 +474,6 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->numa_node_mask = hdev->numa_node_mask;
nic->flags |= HNAE3_SUPPORT_VF;
- if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
- dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
- hdev->ae_dev->dev_type);
- return -EINVAL;
- }
-
ret = hclgevf_knic_setup(hdev);
if (ret)
dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
@@ -544,14 +542,16 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
const u8 hfunc, const u8 *key)
{
struct hclgevf_rss_config_cmd *req;
+ unsigned int key_offset = 0;
struct hclgevf_desc desc;
- int key_offset;
+ int key_counts;
int key_size;
int ret;
+ key_counts = HCLGEVF_RSS_KEY_SIZE;
req = (struct hclgevf_rss_config_cmd *)desc.data;
- for (key_offset = 0; key_offset < 3; key_offset++) {
+ while (key_counts) {
hclgevf_cmd_setup_basic_desc(&desc,
HCLGEVF_OPC_RSS_GENERIC_CONFIG,
false);
@@ -560,15 +560,12 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
req->hash_config |=
(key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
- if (key_offset == 2)
- key_size =
- HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
- else
- key_size = HCLGEVF_RSS_HASH_KEY_NUM;
-
+ key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
memcpy(req->hash_key,
key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
+ key_counts -= key_size;
+ key_offset++;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -631,7 +628,7 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
struct hclgevf_desc desc;
u16 roundup_size;
int status;
- int i;
+ unsigned int i;
req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
@@ -997,6 +994,8 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
u8 type;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+ type = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
+ HCLGE_MBX_UNMAP_RING_TO_VECTOR;
for (node = ring_chain; node; node = node->next) {
int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
@@ -1006,9 +1005,6 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
hclgevf_cmd_setup_basic_desc(&desc,
HCLGEVF_OPC_MBX_VF_TO_PF,
false);
- type = en ?
- HCLGE_MBX_MAP_RING_TO_VECTOR :
- HCLGE_MBX_UNMAP_RING_TO_VECTOR;
req->msg[0] = type;
req->msg[1] = vector_id;
}
@@ -1134,7 +1130,7 @@ static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc)
return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc);
}
-static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
+static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
{
struct hclgevf_cfg_com_tqp_queue_cmd *req;
@@ -1147,7 +1143,8 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
false);
req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
req->stream_id = cpu_to_le16(stream_id);
- req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
+ if (enable)
+ req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -1193,7 +1190,7 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
HCLGE_MBX_MAC_VLAN_UC_MODIFY;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
- subcode, msg_data, ETH_ALEN * 2,
+ subcode, msg_data, sizeof(msg_data),
true, NULL, 0);
if (!status)
ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
@@ -1248,19 +1245,61 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
#define HCLGEVF_VLAN_MBX_MSG_LEN 5
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
+ int ret;
- if (vlan_id > 4095)
+ if (vlan_id > HCLGEVF_MAX_VLAN_ID)
return -EINVAL;
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
+ /* When device is resetting, firmware is unable to handle
+ * mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ return -EBUSY;
+ }
+
msg_data[0] = is_kill;
memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
memcpy(&msg_data[3], &proto, sizeof(proto));
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_VLAN_FILTER, msg_data,
- HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER, msg_data,
+ HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+
+ /* When remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack.
+ */
+ if (is_kill && ret)
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+
+ return ret;
+}
+
+static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_MAX_SYNC_COUNT 60
+ struct hnae3_handle *handle = &hdev->nic;
+ int ret, sync_cnt = 0;
+ u16 vlan_id;
+
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
+ vlan_id, true);
+ if (ret)
+ return;
+
+ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ sync_cnt++;
+ if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
+ return;
+
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ }
}
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
@@ -1280,7 +1319,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
u8 msg_data[2];
int ret;
- memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
+ memcpy(msg_data, &queue_id, sizeof(queue_id));
/* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
@@ -1288,7 +1327,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
return ret;
return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
- 2, true, NULL, 0);
+ sizeof(msg_data), true, NULL, 0);
}
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
@@ -1306,6 +1345,10 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev,
struct hnae3_handle *handle = &hdev->nic;
int ret;
+ if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
+ !client)
+ return 0;
+
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
@@ -1410,6 +1453,8 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
{
+#define HCLGEVF_RESET_SYNC_TIME 100
+
int ret = 0;
switch (hdev->reset_type) {
@@ -1427,13 +1472,34 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
}
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
-
+ /* inform hardware that preparatory work is done */
+ msleep(HCLGEVF_RESET_SYNC_TIME);
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ HCLGEVF_NIC_CMQ_ENABLE);
dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
hdev->reset_type, ret);
return ret;
}
+static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
+{
+ hdev->rst_stats.rst_fail_cnt++;
+ dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n",
+ hdev->rst_stats.rst_fail_cnt);
+
+ if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+
+ if (hclgevf_is_reset_pending(hdev)) {
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ hclgevf_reset_task_schedule(hdev);
+ } else {
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ HCLGEVF_NIC_CMQ_ENABLE);
+ }
+}
+
static int hclgevf_reset(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
@@ -1490,19 +1556,13 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
hdev->last_reset_time = jiffies;
ae_dev->reset_type = HNAE3_NONE_RESET;
hdev->rst_stats.rst_done_cnt++;
+ hdev->rst_stats.rst_fail_cnt = 0;
return ret;
err_reset_lock:
rtnl_unlock();
err_reset:
- /* When VF reset failed, only the higher level reset asserted by PF
- * can restore it, so re-initialize the command queue to receive
- * this higher reset event.
- */
- hclgevf_cmd_init(hdev);
- dev_err(&hdev->pdev->dev, "failed to reset VF\n");
- if (hclgevf_is_reset_pending(hdev))
- hclgevf_reset_task_schedule(hdev);
+ hclgevf_reset_err_handle(hdev);
return ret;
}
@@ -1612,7 +1672,8 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) {
+ if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
+ !test_bit(HCLGEVF_STATE_REMOVING, &hdev->state)) {
set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
schedule_work(&hdev->rst_service_task);
}
@@ -1648,7 +1709,8 @@ static void hclgevf_service_timer(struct timer_list *t)
{
struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
- mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+ mod_timer(&hdev->service_timer, jiffies +
+ HCLGEVF_GENERAL_TASK_INTERVAL * HZ);
hdev->stats_timer++;
hclgevf_task_schedule(hdev);
@@ -1668,9 +1730,9 @@ static void hclgevf_reset_service_task(struct work_struct *work)
if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
&hdev->reset_state)) {
/* PF has initmated that it is about to reset the hardware.
- * We now have to poll & check if harware has actually completed
- * the reset sequence. On hardware reset completion, VF needs to
- * reset the client and ae device.
+ * We now have to poll & check if hardware has actually
+ * completed the reset sequence. On hardware reset completion,
+ * VF needs to reset the client and ae device.
*/
hdev->reset_attempts = 0;
@@ -1686,7 +1748,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
- * 1. reset was initiated due to watchdog timeout due to
+ * 1. reset was initiated due to watchdog timeout caused by
* a. IMP was earlier reset and our TX got choked down and
* which resulted in watchdog reacting and inducing VF
* reset. This also means our cmdq would be unreliable.
@@ -1748,7 +1810,8 @@ static void hclgevf_keep_alive_timer(struct timer_list *t)
struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
schedule_work(&hdev->keep_alive_task);
- mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+ mod_timer(&hdev->keep_alive_timer, jiffies +
+ HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
}
static void hclgevf_keep_alive_task(struct work_struct *work)
@@ -1763,7 +1826,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
return;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
- 0, false, &respmsg, sizeof(u8));
+ 0, false, &respmsg, sizeof(respmsg));
if (ret)
dev_err(&hdev->pdev->dev,
"VF sends keep alive cmd failed(=%d)\n", ret);
@@ -1789,6 +1852,8 @@ static void hclgevf_service_task(struct work_struct *work)
hclgevf_update_link_mode(hdev);
+ hclgevf_sync_vlan_filter(hdev);
+
hclgevf_deferred_task_schedule(hdev);
clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
@@ -1995,7 +2060,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
}
- /* Initialize RSS indirect table for each vport */
+ /* Initialize RSS indirect table */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
@@ -2008,9 +2073,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
{
- /* other vlan config(like, VLAN TX/RX offload) would also be added
- * here later
- */
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
false);
}
@@ -2032,7 +2094,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- /* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev);
@@ -2056,7 +2117,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
if (hclgevf_reset_tqp(handle, i))
break;
- /* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0);
}
@@ -2080,7 +2140,8 @@ static int hclgevf_client_start(struct hnae3_handle *handle)
if (ret)
return ret;
- mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+ mod_timer(&hdev->keep_alive_timer, jiffies +
+ HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
return 0;
}
@@ -2123,6 +2184,7 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
{
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
if (hdev->keep_alive_timer.function)
del_timer_sync(&hdev->keep_alive_timer);
@@ -2249,49 +2311,68 @@ static void hclgevf_info_show(struct hclgevf_dev *hdev)
dev_info(dev, "VF info end.\n");
}
-static int hclgevf_init_client_instance(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
+static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
int ret;
- switch (client->type) {
- case HNAE3_CLIENT_KNIC:
- hdev->nic_client = client;
- hdev->nic.client = client;
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
- ret = client->ops->init_instance(&hdev->nic);
- if (ret)
- goto clear_nic;
+ set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+ hnae3_set_client_init_flag(client, ae_dev, 1);
- hnae3_set_client_init_flag(client, ae_dev, 1);
+ if (netif_msg_drv(&hdev->nic))
+ hclgevf_info_show(hdev);
- if (netif_msg_drv(&hdev->nic))
- hclgevf_info_show(hdev);
+ return 0;
+}
- if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
- struct hnae3_client *rc = hdev->roce_client;
+static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hnae3_client *client)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
- ret = hclgevf_init_roce_base_info(hdev);
- if (ret)
- goto clear_roce;
- ret = rc->ops->init_instance(&hdev->roce);
- if (ret)
- goto clear_roce;
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
+ !hdev->nic_client)
+ return 0;
- hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
- 1);
- }
- break;
- case HNAE3_CLIENT_UNIC:
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+
+ ret = client->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ return 0;
+}
+
+static int hclgevf_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
hdev->nic_client = client;
hdev->nic.client = client;
- ret = client->ops->init_instance(&hdev->nic);
+ ret = hclgevf_init_nic_client_instance(ae_dev, client);
if (ret)
goto clear_nic;
- hnae3_set_client_init_flag(client, ae_dev, 1);
+ ret = hclgevf_init_roce_client_instance(ae_dev,
+ hdev->roce_client);
+ if (ret)
+ goto clear_roce;
+
break;
case HNAE3_CLIENT_ROCE:
if (hnae3_dev_roce_supported(hdev)) {
@@ -2299,17 +2380,10 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
hdev->roce.client = client;
}
- if (hdev->roce_client && hdev->nic_client) {
- ret = hclgevf_init_roce_base_info(hdev);
- if (ret)
- goto clear_roce;
-
- ret = client->ops->init_instance(&hdev->roce);
- if (ret)
- goto clear_roce;
- }
+ ret = hclgevf_init_roce_client_instance(ae_dev, client);
+ if (ret)
+ goto clear_roce;
- hnae3_set_client_init_flag(client, ae_dev, 1);
break;
default:
return -EINVAL;
@@ -2342,6 +2416,8 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
/* un-init nic/unic, if this was not called by roce client */
if (client->ops->uninit_instance && hdev->nic_client &&
client->type != HNAE3_CLIENT_ROCE) {
+ clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+
client->ops->uninit_instance(&hdev->nic, 0);
hdev->nic_client = NULL;
hdev->nic.client = NULL;
@@ -2512,6 +2588,12 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ if (pdev->revision >= 0x21) {
+ ret = hclgevf_set_promisc_mode(hdev, true);
+ if (ret)
+ return ret;
+ }
+
dev_info(&hdev->pdev->dev, "Reset done\n");
return 0;
@@ -2591,9 +2673,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
* firmware makes sure broadcast packets can be accepted.
* For revision 0x21, default to enable broadcast promisc mode.
*/
- ret = hclgevf_set_promisc_mode(hdev, true);
- if (ret)
- goto err_config;
+ if (pdev->revision >= 0x21) {
+ ret = hclgevf_set_promisc_mode(hdev, true);
+ if (ret)
+ goto err_config;
+ }
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index cc52f54f8c08..5a9e30998a8f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -4,6 +4,7 @@
#ifndef __HCLGEVF_MAIN_H
#define __HCLGEVF_MAIN_H
#include <linux/fs.h>
+#include <linux/if_vlan.h>
#include <linux/types.h>
#include "hclge_mbx.h"
#include "hclgevf_cmd.h"
@@ -12,9 +13,12 @@
#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
+#define HCLGEVF_MAX_VLAN_ID 4095
#define HCLGEVF_MISC_VECTOR_NUM 0
#define HCLGEVF_INVALID_VPORT 0xffff
+#define HCLGEVF_GENERAL_TASK_INTERVAL 5
+#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2
/* This number in actual depends upon the total number of VFs
* created by physical function. But the maximum number of
@@ -130,6 +134,8 @@ enum hclgevf_states {
HCLGEVF_STATE_DOWN,
HCLGEVF_STATE_DISABLED,
HCLGEVF_STATE_IRQ_INITED,
+ HCLGEVF_STATE_REMOVING,
+ HCLGEVF_STATE_NIC_REGISTERED,
/* task states */
HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
@@ -220,6 +226,7 @@ struct hclgevf_rst_stats {
u32 vf_rst_cnt; /* the number of VF reset */
u32 rst_done_cnt; /* the number of reset completed */
u32 hw_rst_done_cnt; /* the number of HW reset completed */
+ u32 rst_fail_cnt; /* the number of VF reset fail */
};
struct hclgevf_dev {
@@ -265,6 +272,8 @@ struct hclgevf_dev {
u16 *vector_status;
int *vector_irq;
+ unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+
bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 30f2e9352cf3..f60b80bd605e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -102,7 +102,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
~HCLGE_MBX_NEED_RESP_BIT;
req->msg[0] = code;
req->msg[1] = subcode;
- memcpy(&req->msg[2], msg_data, msg_len);
+ if (msg_data)
+ memcpy(&req->msg[2], msg_data, msg_len);
/* synchronous send */
if (need_resp) {
diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile
index 99de5b6607d5..fe88ab88cacc 100644
--- a/drivers/net/ethernet/huawei/hinic/Makefile
+++ b/drivers/net/ethernet/huawei/hinic/Makefile
@@ -4,4 +4,4 @@ obj-$(CONFIG_HINIC) += hinic.o
hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \
hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \
hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \
- hinic_common.o
+ hinic_common.o hinic_ethtool.o
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index 353276fdcaed..a209b14160cc 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -22,6 +22,7 @@
enum hinic_flags {
HINIC_LINK_UP = BIT(0),
HINIC_INTF_UP = BIT(1),
+ HINIC_RSS_ENABLE = BIT(2),
};
struct hinic_rx_mode_work {
@@ -29,6 +30,23 @@ struct hinic_rx_mode_work {
u32 rx_mode;
};
+struct hinic_rss_type {
+ u8 tcp_ipv6_ext;
+ u8 ipv6_ext;
+ u8 tcp_ipv6;
+ u8 ipv6;
+ u8 tcp_ipv4;
+ u8 ipv4;
+ u8 udp_ipv6;
+ u8 udp_ipv4;
+};
+
+enum hinic_rss_hash_type {
+ HINIC_RSS_HASH_ENGINE_TYPE_XOR,
+ HINIC_RSS_HASH_ENGINE_TYPE_TOEP,
+ HINIC_RSS_HASH_ENGINE_TYPE_MAX,
+};
+
struct hinic_dev {
struct net_device *netdev;
struct hinic_hwdev *hwdev;
@@ -36,6 +54,8 @@ struct hinic_dev {
u32 msg_enable;
unsigned int tx_weight;
unsigned int rx_weight;
+ u16 num_qps;
+ u16 max_qps;
unsigned int flags;
@@ -50,6 +70,14 @@ struct hinic_dev {
struct hinic_txq_stats tx_stats;
struct hinic_rxq_stats rx_stats;
+
+ u8 rss_tmpl_idx;
+ u8 rss_hash_engine;
+ u16 num_rss;
+ u16 rss_limit;
+ struct hinic_rss_type rss_type;
+ u8 *rss_hkey_user;
+ s32 *rss_indir_user;
};
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
new file mode 100644
index 000000000000..60ec48fe4144
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -0,0 +1,762 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+
+#include "hinic_hw_qp.h"
+#include "hinic_hw_dev.h"
+#include "hinic_port.h"
+#include "hinic_tx.h"
+#include "hinic_rx.h"
+#include "hinic_dev.h"
+
+static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
+ enum hinic_speed speed)
+{
+ switch (speed) {
+ case HINIC_SPEED_10MB_LINK:
+ link_ksettings->base.speed = SPEED_10;
+ break;
+
+ case HINIC_SPEED_100MB_LINK:
+ link_ksettings->base.speed = SPEED_100;
+ break;
+
+ case HINIC_SPEED_1000MB_LINK:
+ link_ksettings->base.speed = SPEED_1000;
+ break;
+
+ case HINIC_SPEED_10GB_LINK:
+ link_ksettings->base.speed = SPEED_10000;
+ break;
+
+ case HINIC_SPEED_25GB_LINK:
+ link_ksettings->base.speed = SPEED_25000;
+ break;
+
+ case HINIC_SPEED_40GB_LINK:
+ link_ksettings->base.speed = SPEED_40000;
+ break;
+
+ case HINIC_SPEED_100GB_LINK:
+ link_ksettings->base.speed = SPEED_100000;
+ break;
+
+ default:
+ link_ksettings->base.speed = SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static int hinic_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings
+ *link_ksettings)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ enum hinic_port_link_state link_state;
+ struct hinic_port_cap port_cap;
+ int err;
+
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
+ Autoneg);
+
+ link_ksettings->base.speed = SPEED_UNKNOWN;
+ link_ksettings->base.autoneg = AUTONEG_DISABLE;
+ link_ksettings->base.duplex = DUPLEX_UNKNOWN;
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err)
+ return err;
+
+ err = hinic_port_link_state(nic_dev, &link_state);
+ if (err)
+ return err;
+
+ if (link_state != HINIC_LINK_STATE_UP)
+ return err;
+
+ set_link_speed(link_ksettings, port_cap.speed);
+
+ if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Autoneg);
+
+ if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
+ link_ksettings->base.autoneg = AUTONEG_ENABLE;
+
+ link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ return 0;
+}
+
+static void hinic_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ int err;
+
+ strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
+ strlcpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info));
+
+ err = hinic_get_mgmt_version(nic_dev, mgmt_ver);
+ if (err)
+ return;
+
+ snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver);
+}
+
+static void hinic_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ ring->rx_max_pending = HINIC_RQ_DEPTH;
+ ring->tx_max_pending = HINIC_SQ_DEPTH;
+ ring->rx_pending = HINIC_RQ_DEPTH;
+ ring->tx_pending = HINIC_SQ_DEPTH;
+}
+
+static void hinic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+
+ channels->max_rx = hwdev->nic_cap.max_qps;
+ channels->max_tx = hwdev->nic_cap.max_qps;
+ channels->max_other = 0;
+ channels->max_combined = 0;
+ channels->rx_count = hinic_hwdev_num_qps(hwdev);
+ channels->tx_count = hinic_hwdev_num_qps(hwdev);
+ channels->other_count = 0;
+ channels->combined_count = 0;
+}
+
+static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hinic_rss_type rss_type = { 0 };
+ int err;
+
+ cmd->data = 0;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE))
+ return 0;
+
+ err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
+ &rss_type);
+ if (err)
+ return err;
+
+ cmd->data = RXH_IP_SRC | RXH_IP_DST;
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (rss_type.tcp_ipv4)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case TCP_V6_FLOW:
+ if (rss_type.tcp_ipv6)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (rss_type.udp_ipv4)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (rss_type.udp_ipv6)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ cmd->data = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
+ struct hinic_rss_type *rss_type)
+{
+ u8 rss_l4_en = 0;
+
+ switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_l4_en = 0;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_l4_en = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ rss_type->tcp_ipv4 = rss_l4_en;
+ break;
+ case TCP_V6_FLOW:
+ rss_type->tcp_ipv6 = rss_l4_en;
+ break;
+ case UDP_V4_FLOW:
+ rss_type->udp_ipv4 = rss_l4_en;
+ break;
+ case UDP_V6_FLOW:
+ rss_type->udp_ipv6 = rss_l4_en;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hinic_rss_type *rss_type = &nic_dev->rss_type;
+ int err;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE)) {
+ cmd->data = 0;
+ return -EOPNOTSUPP;
+ }
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 |
+ RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST))
+ return -EINVAL;
+
+ err = hinic_get_rss_type(nic_dev,
+ nic_dev->rss_tmpl_idx, rss_type);
+ if (err)
+ return -EFAULT;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ err = set_l4_rss_hash_ops(cmd, rss_type);
+ if (err)
+ return err;
+ break;
+ case IPV4_FLOW:
+ rss_type->ipv4 = 1;
+ break;
+ case IPV6_FLOW:
+ rss_type->ipv6 = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
+ *rss_type);
+ if (err)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __set_rss_rxfh(struct net_device *netdev,
+ const u32 *indir, const u8 *key)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ if (indir) {
+ if (!nic_dev->rss_indir_user) {
+ nic_dev->rss_indir_user =
+ kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE,
+ GFP_KERNEL);
+ if (!nic_dev->rss_indir_user)
+ return -ENOMEM;
+ }
+
+ memcpy(nic_dev->rss_indir_user, indir,
+ sizeof(u32) * HINIC_RSS_INDIR_SIZE);
+
+ err = hinic_rss_set_indir_tbl(nic_dev,
+ nic_dev->rss_tmpl_idx, indir);
+ if (err)
+ return -EFAULT;
+ }
+
+ if (key) {
+ if (!nic_dev->rss_hkey_user) {
+ nic_dev->rss_hkey_user =
+ kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL);
+
+ if (!nic_dev->rss_hkey_user)
+ return -ENOMEM;
+ }
+
+ memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE);
+
+ err = hinic_rss_set_template_tbl(nic_dev,
+ nic_dev->rss_tmpl_idx, key);
+ if (err)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = nic_dev->num_qps;
+ break;
+ case ETHTOOL_GRXFH:
+ err = hinic_get_rss_hash_opts(nic_dev, cmd);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ err = hinic_set_rss_hash_opts(nic_dev, cmd);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int hinic_get_rxfh(struct net_device *netdev,
+ u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u8 hash_engine_type = 0;
+ int err = 0;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE))
+ return -EOPNOTSUPP;
+
+ if (hfunc) {
+ err = hinic_rss_get_hash_engine(nic_dev,
+ nic_dev->rss_tmpl_idx,
+ &hash_engine_type);
+ if (err)
+ return -EFAULT;
+
+ *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
+ }
+
+ if (indir) {
+ err = hinic_rss_get_indir_tbl(nic_dev,
+ nic_dev->rss_tmpl_idx, indir);
+ if (err)
+ return -EFAULT;
+ }
+
+ if (key)
+ err = hinic_rss_get_template_tbl(nic_dev,
+ nic_dev->rss_tmpl_idx, key);
+
+ return err;
+}
+
+static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int err = 0;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE))
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
+ if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)
+ return -EOPNOTSUPP;
+
+ nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ?
+ HINIC_RSS_HASH_ENGINE_TYPE_XOR :
+ HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
+ err = hinic_rss_set_hash_engine
+ (nic_dev, nic_dev->rss_tmpl_idx,
+ nic_dev->rss_hash_engine);
+ if (err)
+ return -EFAULT;
+ }
+
+ err = __set_rss_rxfh(netdev, indir, key);
+
+ return err;
+}
+
+static u32 hinic_get_rxfh_key_size(struct net_device *netdev)
+{
+ return HINIC_RSS_KEY_SIZE;
+}
+
+static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return HINIC_RSS_INDIR_SIZE;
+}
+
+#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0])))
+
+#define HINIC_FUNC_STAT(_stat_item) { \
+ .name = #_stat_item, \
+ .size = FIELD_SIZEOF(struct hinic_vport_stats, _stat_item), \
+ .offset = offsetof(struct hinic_vport_stats, _stat_item) \
+}
+
+static struct hinic_stats hinic_function_stats[] = {
+ HINIC_FUNC_STAT(tx_unicast_pkts_vport),
+ HINIC_FUNC_STAT(tx_unicast_bytes_vport),
+ HINIC_FUNC_STAT(tx_multicast_pkts_vport),
+ HINIC_FUNC_STAT(tx_multicast_bytes_vport),
+ HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
+ HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
+
+ HINIC_FUNC_STAT(rx_unicast_pkts_vport),
+ HINIC_FUNC_STAT(rx_unicast_bytes_vport),
+ HINIC_FUNC_STAT(rx_multicast_pkts_vport),
+ HINIC_FUNC_STAT(rx_multicast_bytes_vport),
+ HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
+ HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
+
+ HINIC_FUNC_STAT(tx_discard_vport),
+ HINIC_FUNC_STAT(rx_discard_vport),
+ HINIC_FUNC_STAT(tx_err_vport),
+ HINIC_FUNC_STAT(rx_err_vport),
+};
+
+#define HINIC_PORT_STAT(_stat_item) { \
+ .name = #_stat_item, \
+ .size = FIELD_SIZEOF(struct hinic_phy_port_stats, _stat_item), \
+ .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
+}
+
+static struct hinic_stats hinic_port_stats[] = {
+ HINIC_PORT_STAT(mac_rx_total_pkt_num),
+ HINIC_PORT_STAT(mac_rx_total_oct_num),
+ HINIC_PORT_STAT(mac_rx_bad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_bad_oct_num),
+ HINIC_PORT_STAT(mac_rx_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_good_oct_num),
+ HINIC_PORT_STAT(mac_rx_uni_pkt_num),
+ HINIC_PORT_STAT(mac_rx_multi_pkt_num),
+ HINIC_PORT_STAT(mac_rx_broad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_total_pkt_num),
+ HINIC_PORT_STAT(mac_tx_total_oct_num),
+ HINIC_PORT_STAT(mac_tx_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_bad_oct_num),
+ HINIC_PORT_STAT(mac_tx_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_good_oct_num),
+ HINIC_PORT_STAT(mac_tx_uni_pkt_num),
+ HINIC_PORT_STAT(mac_tx_multi_pkt_num),
+ HINIC_PORT_STAT(mac_tx_broad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
+ HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
+ HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
+ HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
+ HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
+ HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pause_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
+ HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
+ HINIC_PORT_STAT(mac_rx_control_pkt_num),
+ HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
+ HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
+ HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
+ HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
+ HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
+ HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
+ HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
+ HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
+ HINIC_PORT_STAT(mac_tx_jabber_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pause_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
+ HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
+ HINIC_PORT_STAT(mac_tx_control_pkt_num),
+ HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
+ HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
+ HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
+};
+
+#define HINIC_TXQ_STAT(_stat_item) { \
+ .name = "txq%d_"#_stat_item, \
+ .size = FIELD_SIZEOF(struct hinic_txq_stats, _stat_item), \
+ .offset = offsetof(struct hinic_txq_stats, _stat_item) \
+}
+
+static struct hinic_stats hinic_tx_queue_stats[] = {
+ HINIC_TXQ_STAT(pkts),
+ HINIC_TXQ_STAT(bytes),
+ HINIC_TXQ_STAT(tx_busy),
+ HINIC_TXQ_STAT(tx_wake),
+ HINIC_TXQ_STAT(tx_dropped),
+ HINIC_TXQ_STAT(big_frags_pkts),
+};
+
+#define HINIC_RXQ_STAT(_stat_item) { \
+ .name = "rxq%d_"#_stat_item, \
+ .size = FIELD_SIZEOF(struct hinic_rxq_stats, _stat_item), \
+ .offset = offsetof(struct hinic_rxq_stats, _stat_item) \
+}
+
+static struct hinic_stats hinic_rx_queue_stats[] = {
+ HINIC_RXQ_STAT(pkts),
+ HINIC_RXQ_STAT(bytes),
+ HINIC_RXQ_STAT(errors),
+ HINIC_RXQ_STAT(csum_errors),
+ HINIC_RXQ_STAT(other_errors),
+};
+
+static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
+{
+ struct hinic_txq_stats txq_stats;
+ struct hinic_rxq_stats rxq_stats;
+ u16 i = 0, j = 0, qid = 0;
+ char *p;
+
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ if (!nic_dev->txqs)
+ break;
+
+ hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
+ for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) {
+ p = (char *)&txq_stats +
+ hinic_tx_queue_stats[j].offset;
+ data[i] = (hinic_tx_queue_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ }
+
+ for (qid = 0; qid < nic_dev->num_qps; qid++) {
+ if (!nic_dev->rxqs)
+ break;
+
+ hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
+ for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) {
+ p = (char *)&rxq_stats +
+ hinic_rx_queue_stats[j].offset;
+ data[i] = (hinic_rx_queue_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ }
+}
+
+static void hinic_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_vport_stats vport_stats = {0};
+ struct hinic_phy_port_stats *port_stats;
+ u16 i = 0, j = 0;
+ char *p;
+ int err;
+
+ err = hinic_get_vport_stats(nic_dev, &vport_stats);
+ if (err)
+ netif_err(nic_dev, drv, netdev,
+ "Failed to get vport stats from firmware\n");
+
+ for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) {
+ p = (char *)&vport_stats + hinic_function_stats[j].offset;
+ data[i] = (hinic_function_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+ port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
+ if (!port_stats) {
+ memset(&data[i], 0,
+ ARRAY_LEN(hinic_port_stats) * sizeof(*data));
+ i += ARRAY_LEN(hinic_port_stats);
+ goto get_drv_stats;
+ }
+
+ err = hinic_get_phy_port_stats(nic_dev, port_stats);
+ if (err)
+ netif_err(nic_dev, drv, netdev,
+ "Failed to get port stats from firmware\n");
+
+ for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) {
+ p = (char *)port_stats + hinic_port_stats[j].offset;
+ data[i] = (hinic_port_stats[j].size ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+ kfree(port_stats);
+
+get_drv_stats:
+ get_drv_queue_stats(nic_dev, data + i);
+}
+
+static int hinic_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ int count, q_num;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ q_num = nic_dev->num_qps;
+ count = ARRAY_LEN(hinic_function_stats) +
+ (ARRAY_LEN(hinic_tx_queue_stats) +
+ ARRAY_LEN(hinic_rx_queue_stats)) * q_num;
+
+ count += ARRAY_LEN(hinic_port_stats);
+
+ return count;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void hinic_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ char *p = (char *)data;
+ u16 i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) {
+ memcpy(p, hinic_function_stats[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) {
+ memcpy(p, hinic_port_stats[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < nic_dev->num_qps; i++) {
+ for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) {
+ sprintf(p, hinic_tx_queue_stats[j].name, i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < nic_dev->num_qps; i++) {
+ for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) {
+ sprintf(p, hinic_rx_queue_stats[j].name, i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
+ return;
+ default:
+ return;
+ }
+}
+
+static const struct ethtool_ops hinic_ethtool_ops = {
+ .get_link_ksettings = hinic_get_link_ksettings,
+ .get_drvinfo = hinic_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = hinic_get_ringparam,
+ .get_channels = hinic_get_channels,
+ .get_rxnfc = hinic_get_rxnfc,
+ .set_rxnfc = hinic_set_rxnfc,
+ .get_rxfh_key_size = hinic_get_rxfh_key_size,
+ .get_rxfh_indir_size = hinic_get_rxfh_indir_size,
+ .get_rxfh = hinic_get_rxfh,
+ .set_rxfh = hinic_set_rxfh,
+ .get_sset_count = hinic_get_sset_count,
+ .get_ethtool_stats = hinic_get_ethtool_stats,
+ .get_strings = hinic_get_strings,
+};
+
+void hinic_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &hinic_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 408705687de6..6f2cf569a283 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -89,9 +89,6 @@ static int get_capability(struct hinic_hwdev *hwdev,
if (nic_cap->num_qps > HINIC_Q_CTXT_MAX)
nic_cap->num_qps = HINIC_Q_CTXT_MAX;
- /* num_qps must be power of 2 */
- nic_cap->num_qps = BIT(fls(nic_cap->num_qps) - 1);
-
nic_cap->max_qps = dev_cap->max_sqs + 1;
if (nic_cap->max_qps != (dev_cap->max_rqs + 1))
return -EFAULT;
@@ -304,6 +301,8 @@ static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
hw_ioctxt.cmdq_depth = 0;
+ hw_ioctxt.lro_en = 1;
+
hw_ioctxt.rq_depth = ilog2(rq_depth);
hw_ioctxt.rx_buf_sz_idx = HINIC_RX_BUF_SZ_IDX;
@@ -872,6 +871,13 @@ void hinic_free_hwdev(struct hinic_hwdev *hwdev)
hinic_free_hwif(hwdev->hwif);
}
+int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cap *nic_cap = &hwdev->nic_cap;
+
+ return nic_cap->max_qps;
+}
+
/**
* hinic_hwdev_num_qps - return the number QPs available for use
* @hwdev: the NIC HW device
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index a0a5b7434ad7..b069045de416 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -41,21 +41,73 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_GET_LINK_STATE = 24,
+ HINIC_PORT_CMD_SET_LRO = 25,
+
HINIC_PORT_CMD_SET_RX_CSUM = 26,
+ HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 27,
+
+ HINIC_PORT_CMD_GET_PORT_STATISTICS = 28,
+
+ HINIC_PORT_CMD_CLEAR_PORT_STATISTICS = 29,
+
+ HINIC_PORT_CMD_GET_VPORT_STAT = 30,
+
+ HINIC_PORT_CMD_CLEAN_VPORT_STAT = 31,
+
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 37,
+
HINIC_PORT_CMD_SET_PORT_STATE = 41,
+ HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 43,
+
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL = 44,
+
+ HINIC_PORT_CMD_SET_RSS_HASH_ENGINE = 45,
+
+ HINIC_PORT_CMD_GET_RSS_HASH_ENGINE = 46,
+
+ HINIC_PORT_CMD_GET_RSS_CTX_TBL = 47,
+
+ HINIC_PORT_CMD_SET_RSS_CTX_TBL = 48,
+
+ HINIC_PORT_CMD_RSS_TEMP_MGR = 49,
+
+ HINIC_PORT_CMD_RSS_CFG = 66,
+
HINIC_PORT_CMD_FWCTXT_INIT = 69,
+ HINIC_PORT_CMD_GET_MGMT_VERSION = 88,
+
HINIC_PORT_CMD_SET_FUNC_STATE = 93,
HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
HINIC_PORT_CMD_SET_TSO = 112,
+ HINIC_PORT_CMD_SET_RQ_IQ_MAP = 115,
+
HINIC_PORT_CMD_GET_CAP = 170,
+
+ HINIC_PORT_CMD_SET_LRO_TIMER = 244,
};
+enum hinic_ucode_cmd {
+ HINIC_UCODE_CMD_MODIFY_QUEUE_CONTEXT = 0,
+ HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
+ HINIC_UCODE_CMD_ARM_SQ,
+ HINIC_UCODE_CMD_ARM_RQ,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+ HINIC_UCODE_CMD_GET_RSS_INDIR_TABLE,
+ HINIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE,
+ HINIC_UCODE_CMD_SET_IQ_ENABLE,
+ HINIC_UCODE_CMD_SET_RQ_FLUSH = 10
+};
+
+#define NIC_RSS_CMD_TEMP_ALLOC 0x01
+#define NIC_RSS_CMD_TEMP_FREE 0x02
+
enum hinic_mgmt_msg_cmd {
HINIC_MGMT_MSG_CMD_BASE = 160,
@@ -97,7 +149,7 @@ struct hinic_cmd_hw_ioctxt {
u8 set_cmdq_depth;
u8 cmdq_depth;
- u8 rsvd2;
+ u8 lro_en;
u8 rsvd3;
u8 rsvd4;
u8 rsvd5;
@@ -215,6 +267,8 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev);
void hinic_free_hwdev(struct hinic_hwdev *hwdev);
+int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev);
+
int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev);
struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index 2d07bdd17432..d66f86fa3f46 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -36,6 +36,7 @@
enum io_cmd {
IO_CMD_MODIFY_QUEUE_CTXT = 0,
+ IO_CMD_CLEAN_QUEUE_CTXT,
};
static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
@@ -201,6 +202,59 @@ static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
write_rq_ctxts(func_to_io, base_qpn, num_qps));
}
+static int hinic_clean_queue_offload_ctxt(struct hinic_func_to_io *func_to_io,
+ enum hinic_qp_ctxt_type ctxt_type)
+{
+ struct hinic_hwif *hwif = func_to_io->hwif;
+ struct hinic_clean_queue_ctxt *ctxt_block;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_cmdq_buf cmdq_buf;
+ u64 out_param = 0;
+ int err;
+
+ err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
+ return err;
+ }
+
+ ctxt_block = cmdq_buf.buf;
+ ctxt_block->cmdq_hdr.num_queues = func_to_io->max_qps;
+ ctxt_block->cmdq_hdr.queue_type = ctxt_type;
+ ctxt_block->cmdq_hdr.addr_offset = 0;
+
+ /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
+ ctxt_block->ctxt_size = 0x3;
+
+ hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
+
+ cmdq_buf.size = sizeof(*ctxt_block);
+
+ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
+ IO_CMD_CLEAN_QUEUE_CTXT,
+ &cmdq_buf, &out_param);
+
+ if (err || out_param) {
+ dev_err(&pdev->dev, "Failed to clean offload ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+
+ err = -EFAULT;
+ }
+
+ hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
+
+ return err;
+}
+
+static int hinic_clean_qp_offload_ctxt(struct hinic_func_to_io *func_to_io)
+{
+ /* clean LRO/TSO context space */
+ return (hinic_clean_queue_offload_ctxt(func_to_io,
+ HINIC_QP_CTXT_TYPE_SQ) ||
+ hinic_clean_queue_offload_ctxt(func_to_io,
+ HINIC_QP_CTXT_TYPE_RQ));
+}
+
/**
* init_qp - Initialize a Queue Pair
* @func_to_io: func to io channel that holds the IO components
@@ -372,6 +426,12 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
goto err_write_qp_ctxts;
}
+ err = hinic_clean_qp_offload_ctxt(func_to_io);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to clean QP contexts space\n");
+ goto err_write_qp_ctxts;
+ }
+
return 0;
err_write_qp_ctxts:
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
index 1856fdcc1e32..00900a6640ad 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
@@ -192,6 +192,11 @@ struct hinic_rq_ctxt {
u32 wq_block_lo_pfn;
};
+struct hinic_clean_queue_ctxt {
+ struct hinic_qp_ctxt_header cmdq_hdr;
+ u32 ctxt_size;
+};
+
struct hinic_sq_ctxt_block {
struct hinic_qp_ctxt_header hdr;
struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX];
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index 8991c9a5ef04..f4b6d2c1061f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -210,6 +210,57 @@
#define HINIC_MSS_DEFAULT 0x3E00
#define HINIC_MSS_MIN 0x50
+#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16
+#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU
+
+#define RQ_CQE_STATUS_GET(val, member) (((val) >> \
+ RQ_CQE_STATUS_##member##_SHIFT) & \
+ RQ_CQE_STATUS_##member##_MASK)
+
+#define HINIC_GET_RX_NUM_LRO(status) \
+ RQ_CQE_STATUS_GET(status, NUM_LRO)
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0xFFFU
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U
+
+#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) (((val) >> \
+ RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \
+ RQ_CQE_OFFOLAD_TYPE_##member##_MASK)
+
+#define HINIC_GET_RX_PKT_TYPE(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE)
+
+#define HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \
+ RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN)
+
+#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU
+#define RQ_CQE_SGE_VLAN_SHIFT 0
+
+#define RQ_CQE_SGE_GET(val, member) (((val) >> \
+ RQ_CQE_SGE_##member##_SHIFT) & \
+ RQ_CQE_SGE_##member##_MASK)
+
+#define HINIC_GET_RX_VLAN_TAG(vlan_len) \
+ RQ_CQE_SGE_GET(vlan_len, VLAN)
+
+#define HINIC_RSS_TYPE_VALID_SHIFT 23
+#define HINIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24
+#define HINIC_RSS_TYPE_IPV6_EXT_SHIFT 25
+#define HINIC_RSS_TYPE_TCP_IPV6_SHIFT 26
+#define HINIC_RSS_TYPE_IPV6_SHIFT 27
+#define HINIC_RSS_TYPE_TCP_IPV4_SHIFT 28
+#define HINIC_RSS_TYPE_IPV4_SHIFT 29
+#define HINIC_RSS_TYPE_UDP_IPV6_SHIFT 30
+#define HINIC_RSS_TYPE_UDP_IPV4_SHIFT 31
+
+#define HINIC_RSS_TYPE_SET(val, member) \
+ (((u32)(val) & 0x1) << HINIC_RSS_TYPE_##member##_SHIFT)
+
+#define HINIC_RSS_TYPE_GET(val, member) \
+ (((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1)
+
enum hinic_l4offload_type {
HINIC_L4_OFF_DISABLE = 0,
HINIC_TCP_OFFLOAD_ENABLE = 1,
@@ -363,7 +414,7 @@ struct hinic_rq_cqe {
u32 status;
u32 len;
- u32 rsvd2;
+ u32 offload_type;
u32 rsvd3;
u32 rsvd4;
u32 rsvd5;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index b695d29d364c..2411ad270c98 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -53,6 +53,10 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
NETIF_MSG_IFUP | \
NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+#define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8
+
+#define HINIC_LRO_RX_TIMER_DEFAULT 16
+
#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8)
#define work_to_rx_mode_work(work) \
@@ -63,137 +67,9 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
static int change_mac_addr(struct net_device *netdev, const u8 *addr);
-static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
- enum hinic_speed speed)
-{
- switch (speed) {
- case HINIC_SPEED_10MB_LINK:
- link_ksettings->base.speed = SPEED_10;
- break;
-
- case HINIC_SPEED_100MB_LINK:
- link_ksettings->base.speed = SPEED_100;
- break;
-
- case HINIC_SPEED_1000MB_LINK:
- link_ksettings->base.speed = SPEED_1000;
- break;
-
- case HINIC_SPEED_10GB_LINK:
- link_ksettings->base.speed = SPEED_10000;
- break;
-
- case HINIC_SPEED_25GB_LINK:
- link_ksettings->base.speed = SPEED_25000;
- break;
-
- case HINIC_SPEED_40GB_LINK:
- link_ksettings->base.speed = SPEED_40000;
- break;
-
- case HINIC_SPEED_100GB_LINK:
- link_ksettings->base.speed = SPEED_100000;
- break;
-
- default:
- link_ksettings->base.speed = SPEED_UNKNOWN;
- break;
- }
-}
-
-static int hinic_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings
- *link_ksettings)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- enum hinic_port_link_state link_state;
- struct hinic_port_cap port_cap;
- int err;
-
- ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
- Autoneg);
-
- link_ksettings->base.speed = SPEED_UNKNOWN;
- link_ksettings->base.autoneg = AUTONEG_DISABLE;
- link_ksettings->base.duplex = DUPLEX_UNKNOWN;
-
- err = hinic_port_get_cap(nic_dev, &port_cap);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to get port capabilities\n");
- return err;
- }
-
- err = hinic_port_link_state(nic_dev, &link_state);
- if (err) {
- netif_err(nic_dev, drv, netdev,
- "Failed to get port link state\n");
- return err;
- }
-
- if (link_state != HINIC_LINK_STATE_UP) {
- netif_info(nic_dev, drv, netdev, "No link\n");
- return err;
- }
-
- set_link_speed(link_ksettings, port_cap.speed);
-
- if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising, Autoneg);
-
- if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
- link_ksettings->base.autoneg = AUTONEG_ENABLE;
-
- link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ?
- DUPLEX_FULL : DUPLEX_HALF;
- return 0;
-}
-
-static void hinic_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *info)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
-
- strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info));
-}
-
-static void hinic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- ring->rx_max_pending = HINIC_RQ_DEPTH;
- ring->tx_max_pending = HINIC_SQ_DEPTH;
- ring->rx_pending = HINIC_RQ_DEPTH;
- ring->tx_pending = HINIC_SQ_DEPTH;
-}
-
-static void hinic_get_channels(struct net_device *netdev,
- struct ethtool_channels *channels)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
-
- channels->max_rx = hwdev->nic_cap.max_qps;
- channels->max_tx = hwdev->nic_cap.max_qps;
- channels->max_other = 0;
- channels->max_combined = 0;
- channels->rx_count = hinic_hwdev_num_qps(hwdev);
- channels->tx_count = hinic_hwdev_num_qps(hwdev);
- channels->other_count = 0;
- channels->combined_count = 0;
-}
-
-static const struct ethtool_ops hinic_ethtool_ops = {
- .get_link_ksettings = hinic_get_link_ksettings,
- .get_drvinfo = hinic_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_ringparam = hinic_get_ringparam,
- .get_channels = hinic_get_channels,
-};
+static int set_features(struct hinic_dev *nic_dev,
+ netdev_features_t pre_features,
+ netdev_features_t features, bool force_change);
static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
{
@@ -207,6 +83,9 @@ static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
u64_stats_update_begin(&nic_rx_stats->syncp);
nic_rx_stats->bytes += rx_stats.bytes;
nic_rx_stats->pkts += rx_stats.pkts;
+ nic_rx_stats->errors += rx_stats.errors;
+ nic_rx_stats->csum_errors += rx_stats.csum_errors;
+ nic_rx_stats->other_errors += rx_stats.other_errors;
u64_stats_update_end(&nic_rx_stats->syncp);
hinic_rxq_clean_stats(rxq);
@@ -227,6 +106,7 @@ static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq)
nic_tx_stats->tx_busy += tx_stats.tx_busy;
nic_tx_stats->tx_wake += tx_stats.tx_wake;
nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
+ nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
u64_stats_update_end(&nic_tx_stats->syncp);
hinic_txq_clean_stats(txq);
@@ -363,11 +243,135 @@ static void free_rxqs(struct hinic_dev *nic_dev)
nic_dev->rxqs = NULL;
}
+static int hinic_configure_max_qnum(struct hinic_dev *nic_dev)
+{
+ int err;
+
+ err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int hinic_rss_init(struct hinic_dev *nic_dev)
+{
+ u8 default_rss_key[HINIC_RSS_KEY_SIZE];
+ u8 tmpl_idx = nic_dev->rss_tmpl_idx;
+ u32 *indir_tbl;
+ int err, i;
+
+ indir_tbl = kcalloc(HINIC_RSS_INDIR_SIZE, sizeof(u32), GFP_KERNEL);
+ if (!indir_tbl)
+ return -ENOMEM;
+
+ netdev_rss_key_fill(default_rss_key, sizeof(default_rss_key));
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
+ indir_tbl[i] = ethtool_rxfh_indir_default(i, nic_dev->num_rss);
+
+ err = hinic_rss_set_template_tbl(nic_dev, tmpl_idx, default_rss_key);
+ if (err)
+ goto out;
+
+ err = hinic_rss_set_indir_tbl(nic_dev, tmpl_idx, indir_tbl);
+ if (err)
+ goto out;
+
+ err = hinic_set_rss_type(nic_dev, tmpl_idx, nic_dev->rss_type);
+ if (err)
+ goto out;
+
+ err = hinic_rss_set_hash_engine(nic_dev, tmpl_idx,
+ nic_dev->rss_hash_engine);
+ if (err)
+ goto out;
+
+ err = hinic_rss_cfg(nic_dev, 1, tmpl_idx);
+ if (err)
+ goto out;
+
+out:
+ kfree(indir_tbl);
+ return err;
+}
+
+static void hinic_rss_deinit(struct hinic_dev *nic_dev)
+{
+ hinic_rss_cfg(nic_dev, 0, nic_dev->rss_tmpl_idx);
+}
+
+static void hinic_init_rss_parameters(struct hinic_dev *nic_dev)
+{
+ nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR;
+ nic_dev->rss_type.tcp_ipv6_ext = 1;
+ nic_dev->rss_type.ipv6_ext = 1;
+ nic_dev->rss_type.tcp_ipv6 = 1;
+ nic_dev->rss_type.ipv6 = 1;
+ nic_dev->rss_type.tcp_ipv4 = 1;
+ nic_dev->rss_type.ipv4 = 1;
+ nic_dev->rss_type.udp_ipv6 = 1;
+ nic_dev->rss_type.udp_ipv4 = 1;
+}
+
+static void hinic_enable_rss(struct hinic_dev *nic_dev)
+{
+ struct net_device *netdev = nic_dev->netdev;
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ int i, node, err = 0;
+ u16 num_cpus = 0;
+
+ nic_dev->max_qps = hinic_hwdev_max_num_qps(hwdev);
+ if (nic_dev->max_qps <= 1) {
+ nic_dev->flags &= ~HINIC_RSS_ENABLE;
+ nic_dev->rss_limit = nic_dev->max_qps;
+ nic_dev->num_qps = nic_dev->max_qps;
+ nic_dev->num_rss = nic_dev->max_qps;
+
+ return;
+ }
+
+ err = hinic_rss_template_alloc(nic_dev, &nic_dev->rss_tmpl_idx);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n");
+ nic_dev->flags &= ~HINIC_RSS_ENABLE;
+ nic_dev->max_qps = 1;
+ nic_dev->rss_limit = nic_dev->max_qps;
+ nic_dev->num_qps = nic_dev->max_qps;
+ nic_dev->num_rss = nic_dev->max_qps;
+
+ return;
+ }
+
+ nic_dev->flags |= HINIC_RSS_ENABLE;
+
+ for (i = 0; i < num_online_cpus(); i++) {
+ node = cpu_to_node(i);
+ if (node == dev_to_node(&pdev->dev))
+ num_cpus++;
+ }
+
+ if (!num_cpus)
+ num_cpus = num_online_cpus();
+
+ nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_cpus);
+
+ nic_dev->rss_limit = nic_dev->num_qps;
+ nic_dev->num_rss = nic_dev->num_qps;
+
+ hinic_init_rss_parameters(nic_dev);
+ err = hinic_rss_init(nic_dev);
+ if (err)
+ netif_err(nic_dev, drv, netdev, "Failed to init rss\n");
+}
+
static int hinic_open(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
enum hinic_port_link_state link_state;
- int err, ret, num_qps;
+ int err, ret;
if (!(nic_dev->flags & HINIC_INTF_UP)) {
err = hinic_hwdev_ifup(nic_dev->hwdev);
@@ -392,9 +396,17 @@ static int hinic_open(struct net_device *netdev)
goto err_create_rxqs;
}
- num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
- netif_set_real_num_tx_queues(netdev, num_qps);
- netif_set_real_num_rx_queues(netdev, num_qps);
+ hinic_enable_rss(nic_dev);
+
+ err = hinic_configure_max_qnum(nic_dev);
+ if (err) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to configure the maximum number of queues\n");
+ goto err_port_state;
+ }
+
+ netif_set_real_num_tx_queues(netdev, nic_dev->num_qps);
+ netif_set_real_num_rx_queues(netdev, nic_dev->num_qps);
err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE);
if (err) {
@@ -450,9 +462,12 @@ err_func_port_state:
if (ret)
netif_warn(nic_dev, drv, netdev,
"Failed to revert port state\n");
-
err_port_state:
free_rxqs(nic_dev);
+ if (nic_dev->flags & HINIC_RSS_ENABLE) {
+ hinic_rss_deinit(nic_dev);
+ hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
+ }
err_create_rxqs:
free_txqs(nic_dev);
@@ -496,6 +511,11 @@ static int hinic_close(struct net_device *netdev)
return err;
}
+ if (nic_dev->flags & HINIC_RSS_ENABLE) {
+ hinic_rss_deinit(nic_dev);
+ hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
+ }
+
free_rxqs(nic_dev);
free_txqs(nic_dev);
@@ -715,7 +735,6 @@ static void set_rx_mode(struct work_struct *work)
{
struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
- struct netdev_hw_addr *ha;
netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n");
@@ -723,9 +742,6 @@ static void set_rx_mode(struct work_struct *work)
__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
__dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
-
- netdev_for_each_mc_addr(ha, nic_dev->netdev)
- add_mac_addr(nic_dev->netdev, ha->addr);
}
static void hinic_set_rx_mode(struct net_device *netdev)
@@ -776,12 +792,36 @@ static void hinic_get_stats64(struct net_device *netdev,
stats->rx_bytes = nic_rx_stats->bytes;
stats->rx_packets = nic_rx_stats->pkts;
+ stats->rx_errors = nic_rx_stats->errors;
stats->tx_bytes = nic_tx_stats->bytes;
stats->tx_packets = nic_tx_stats->pkts;
stats->tx_errors = nic_tx_stats->tx_dropped;
}
+static int hinic_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ return set_features(nic_dev, nic_dev->netdev->features,
+ features, false);
+}
+
+static netdev_features_t hinic_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features & NETIF_F_RXCSUM)) {
+ netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n");
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
static const struct net_device_ops hinic_netdev_ops = {
.ndo_open = hinic_open,
.ndo_stop = hinic_close,
@@ -794,13 +834,16 @@ static const struct net_device_ops hinic_netdev_ops = {
.ndo_start_xmit = hinic_xmit_frame,
.ndo_tx_timeout = hinic_tx_timeout,
.ndo_get_stats64 = hinic_get_stats64,
+ .ndo_fix_features = hinic_fix_features,
+ .ndo_set_features = hinic_set_features,
};
static void netdev_features_init(struct net_device *netdev)
{
netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM | NETIF_F_LRO |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
netdev->vlan_features = netdev->hw_features;
@@ -873,6 +916,18 @@ static int set_features(struct hinic_dev *nic_dev,
if (changed & NETIF_F_RXCSUM)
err = hinic_set_rx_csum_offload(nic_dev, csum_en);
+ if (changed & NETIF_F_LRO) {
+ err = hinic_set_rx_lro_state(nic_dev,
+ !!(features & NETIF_F_LRO),
+ HINIC_LRO_RX_TIMER_DEFAULT,
+ HINIC_LRO_MAX_WQE_NUM_DEFAULT);
+ }
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ err = hinic_set_rx_vlan_offload(nic_dev,
+ !!(features &
+ NETIF_F_HW_VLAN_CTAG_RX));
+
return err;
}
@@ -912,8 +967,8 @@ static int nic_dev_init(struct pci_dev *pdev)
goto err_alloc_etherdev;
}
+ hinic_set_ethtool_ops(netdev);
netdev->netdev_ops = &hinic_netdev_ops;
- netdev->ethtool_ops = &hinic_ethtool_ops;
netdev->max_mtu = ETH_MAX_MTU;
nic_dev = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 4b3b7d39e437..1e389a004e50 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -430,3 +430,641 @@ int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
return 0;
}
+
+int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_vlan_cfg vlan_cfg;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u16 out_size;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+ vlan_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ vlan_cfg.vlan_rx_offload = en;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD,
+ &vlan_cfg, sizeof(vlan_cfg),
+ &vlan_cfg, &out_size);
+ if (err || !out_size || vlan_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_rq_num rq_num = { 0 };
+ u16 out_size = sizeof(rq_num);
+ int err;
+
+ rq_num.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rq_num.num_rqs = num_rqs;
+ rq_num.rq_depth = ilog2(HINIC_SQ_DEPTH);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP,
+ &rq_num, sizeof(rq_num),
+ &rq_num, &out_size);
+ if (err || !out_size || rq_num.status) {
+ dev_err(&pdev->dev,
+ "Failed to rxq number, ret = %d\n",
+ rq_num.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_set_rx_lro(struct hinic_dev *nic_dev, u8 ipv4_en, u8 ipv6_en,
+ u8 max_wqe_num)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_lro_config lro_cfg = { 0 };
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(lro_cfg);
+ int err;
+
+ lro_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ lro_cfg.lro_ipv4_en = ipv4_en;
+ lro_cfg.lro_ipv6_en = ipv6_en;
+ lro_cfg.lro_max_wqe_num = max_wqe_num;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO,
+ &lro_cfg, sizeof(lro_cfg),
+ &lro_cfg, &out_size);
+ if (err || !out_size || lro_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set lro offload, ret = %d\n",
+ lro_cfg.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic_set_rx_lro_timer(struct hinic_dev *nic_dev, u32 timer_value)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_lro_timer lro_timer = { 0 };
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(lro_timer);
+ int err;
+
+ lro_timer.status = 0;
+ lro_timer.type = 0;
+ lro_timer.enable = 1;
+ lro_timer.timer = timer_value;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO_TIMER,
+ &lro_timer, sizeof(lro_timer),
+ &lro_timer, &out_size);
+ if (lro_timer.status == 0xFF) {
+ /* For this case, we think status (0xFF) is OK */
+ lro_timer.status = 0;
+ dev_dbg(&pdev->dev,
+ "Set lro timer not supported by the current FW version, it will be 1ms default\n");
+ }
+
+ if (err || !out_size || lro_timer.status) {
+ dev_err(&pdev->dev,
+ "Failed to set lro timer, ret = %d\n",
+ lro_timer.status);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en,
+ u32 lro_timer, u32 wqe_num)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u8 ipv4_en;
+ u8 ipv6_en;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ ipv4_en = lro_en ? 1 : 0;
+ ipv6_en = lro_en ? 1 : 0;
+
+ err = hinic_set_rx_lro(nic_dev, ipv4_en, ipv6_en, (u8)wqe_num);
+ if (err)
+ return err;
+
+ err = hinic_set_rx_lro_timer(nic_dev, lro_timer);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int hinic_rss_set_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ const u32 *indir_table)
+{
+ struct hinic_rss_indirect_tbl *indir_tbl;
+ struct hinic_func_to_io *func_to_io;
+ struct hinic_cmdq_buf cmd_buf;
+ struct hinic_hwdev *hwdev;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u32 indir_size;
+ u64 out_param;
+ int err, i;
+ u32 *temp;
+
+ hwdev = nic_dev->hwdev;
+ func_to_io = &hwdev->func_to_io;
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
+ return err;
+ }
+
+ cmd_buf.size = sizeof(*indir_tbl);
+
+ indir_tbl = cmd_buf.buf;
+ indir_tbl->group_index = cpu_to_be32(tmpl_idx);
+
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {
+ indir_tbl->entry[i] = indir_table[i];
+
+ if (0x3 == (i & 0x3)) {
+ temp = (u32 *)&indir_tbl->entry[i - 3];
+ *temp = cpu_to_be32(*temp);
+ }
+ }
+
+ /* cfg the rss indirect table by command queue */
+ indir_size = HINIC_RSS_INDIR_SIZE / 2;
+ indir_tbl->offset = 0;
+ indir_tbl->size = cpu_to_be32(indir_size);
+
+ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ &cmd_buf, &out_param);
+ if (err || out_param != 0) {
+ dev_err(&pdev->dev, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ goto free_buf;
+ }
+
+ indir_tbl->offset = cpu_to_be32(indir_size);
+ indir_tbl->size = cpu_to_be32(indir_size);
+ memcpy(&indir_tbl->entry[0], &indir_tbl->entry[indir_size], indir_size);
+
+ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+ &cmd_buf, &out_param);
+ if (err || out_param != 0) {
+ dev_err(&pdev->dev, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ }
+
+free_buf:
+ hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
+
+ return err;
+}
+
+int hinic_rss_get_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ u32 *indir_table)
+{
+ struct hinic_rss_indir_table rss_cfg = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size = sizeof(rss_cfg);
+ int err = 0, i;
+
+ rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rss_cfg.template_id = tmpl_idx;
+
+ err = hinic_port_msg_cmd(hwdev,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,
+ &rss_cfg, sizeof(rss_cfg), &rss_cfg,
+ &out_size);
+ if (err || !out_size || rss_cfg.status) {
+ dev_err(&pdev->dev, "Failed to get indir table, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rss_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE);
+ for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
+ indir_table[i] = rss_cfg.indir[i];
+
+ return 0;
+}
+
+int hinic_set_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ struct hinic_rss_type rss_type)
+{
+ struct hinic_rss_context_tbl *ctx_tbl;
+ struct hinic_func_to_io *func_to_io;
+ struct hinic_cmdq_buf cmd_buf;
+ struct hinic_hwdev *hwdev;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u64 out_param;
+ u32 ctx = 0;
+ int err;
+
+ hwdev = nic_dev->hwdev;
+ func_to_io = &hwdev->func_to_io;
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ ctx |= HINIC_RSS_TYPE_SET(1, VALID) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |
+ HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |
+ HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |
+ HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |
+ HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);
+
+ cmd_buf.size = sizeof(struct hinic_rss_context_tbl);
+
+ ctx_tbl = (struct hinic_rss_context_tbl *)cmd_buf.buf;
+ ctx_tbl->group_index = cpu_to_be32(tmpl_idx);
+ ctx_tbl->offset = 0;
+ ctx_tbl->size = sizeof(u32);
+ ctx_tbl->size = cpu_to_be32(ctx_tbl->size);
+ ctx_tbl->rsvd = 0;
+ ctx_tbl->ctx = cpu_to_be32(ctx);
+
+ /* cfg the rss context table by command queue */
+ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
+ HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+ &cmd_buf, &out_param);
+
+ hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmd_buf);
+
+ if (err || out_param != 0) {
+ dev_err(&pdev->dev, "Failed to set rss context table, err: %d\n",
+ err);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ struct hinic_rss_type *rss_type)
+{
+ struct hinic_rss_context_table ctx_tbl = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u16 out_size = sizeof(ctx_tbl);
+ int err;
+
+ if (!hwdev || !rss_type)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ ctx_tbl.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ ctx_tbl.template_id = tmpl_idx;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL,
+ &ctx_tbl, sizeof(ctx_tbl),
+ &ctx_tbl, &out_size);
+ if (err || !out_size || ctx_tbl.status) {
+ dev_err(&pdev->dev, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, ctx_tbl.status, out_size);
+ return -EINVAL;
+ }
+
+ rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4);
+ rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6);
+ rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT);
+ rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4);
+ rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6);
+ rss_type->tcp_ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context,
+ TCP_IPV6_EXT);
+ rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4);
+ rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6);
+
+ return 0;
+}
+
+int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id,
+ const u8 *temp)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_rss_key rss_key = { 0 };
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size;
+ int err;
+
+ rss_key.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rss_key.template_id = template_id;
+ memcpy(rss_key.key, temp, HINIC_RSS_KEY_SIZE);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL,
+ &rss_key, sizeof(rss_key),
+ &rss_key, &out_size);
+ if (err || !out_size || rss_key.status) {
+ dev_err(&pdev->dev,
+ "Failed to set rss hash key, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rss_key.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ u8 *temp)
+{
+ struct hinic_rss_template_key temp_key = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u16 out_size = sizeof(temp_key);
+ int err;
+
+ if (!hwdev || !temp)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ temp_key.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ temp_key.template_id = tmpl_idx;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL,
+ &temp_key, sizeof(temp_key),
+ &temp_key, &out_size);
+ if (err || !out_size || temp_key.status) {
+ dev_err(&pdev->dev, "Failed to set hash key, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, temp_key.status, out_size);
+ return -EINVAL;
+ }
+
+ memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE);
+
+ return 0;
+}
+
+int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id,
+ u8 type)
+{
+ struct hinic_rss_engine_type rss_engine = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size;
+ int err;
+
+ rss_engine.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rss_engine.hash_engine = type;
+ rss_engine.template_id = template_id;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE,
+ &rss_engine, sizeof(rss_engine),
+ &rss_engine, &out_size);
+ if (err || !out_size || rss_engine.status) {
+ dev_err(&pdev->dev,
+ "Failed to set hash engine, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rss_engine.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx, u8 *type)
+{
+ struct hinic_rss_engine_type hash_type = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u16 out_size = sizeof(hash_type);
+ int err;
+
+ if (!hwdev || !type)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ hash_type.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ hash_type.template_id = tmpl_idx;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_HASH_ENGINE,
+ &hash_type, sizeof(hash_type),
+ &hash_type, &out_size);
+ if (err || !out_size || hash_type.status) {
+ dev_err(&pdev->dev, "Failed to get hash engine, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, hash_type.status, out_size);
+ return -EINVAL;
+ }
+
+ *type = hash_type.hash_engine;
+ return 0;
+}
+
+int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_rss_config rss_cfg = { 0 };
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size;
+ int err;
+
+ rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rss_cfg.rss_en = rss_en;
+ rss_cfg.template_id = template_id;
+ rss_cfg.rq_priority_number = 0;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_CFG,
+ &rss_cfg, sizeof(rss_cfg),
+ &rss_cfg, &out_size);
+ if (err || !out_size || rss_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, rss_cfg.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx)
+{
+ struct hinic_rss_template_mgmt template_mgmt = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size;
+ int err;
+
+ template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+ &template_mgmt, sizeof(template_mgmt),
+ &template_mgmt, &out_size);
+ if (err || !out_size || template_mgmt.status) {
+ dev_err(&pdev->dev, "Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, template_mgmt.status, out_size);
+ return -EINVAL;
+ }
+
+ *tmpl_idx = template_mgmt.template_id;
+
+ return 0;
+}
+
+int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx)
+{
+ struct hinic_rss_template_mgmt template_mgmt = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size;
+ int err;
+
+ template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ template_mgmt.template_id = tmpl_idx;
+ template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+ &template_mgmt, sizeof(template_mgmt),
+ &template_mgmt, &out_size);
+ if (err || !out_size || template_mgmt.status) {
+ dev_err(&pdev->dev, "Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, template_mgmt.status, out_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_get_vport_stats(struct hinic_dev *nic_dev,
+ struct hinic_vport_stats *stats)
+{
+ struct hinic_cmd_vport_stats vport_stats = { 0 };
+ struct hinic_port_stats_info stats_info = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(vport_stats);
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ stats_info.stats_version = HINIC_PORT_STATS_VERSION;
+ stats_info.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ stats_info.stats_size = sizeof(vport_stats);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT,
+ &stats_info, sizeof(stats_info),
+ &vport_stats, &out_size);
+ if (err || !out_size || vport_stats.status) {
+ dev_err(&pdev->dev,
+ "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vport_stats.status, out_size);
+ return -EFAULT;
+ }
+
+ memcpy(stats, &vport_stats.stats, sizeof(*stats));
+ return 0;
+}
+
+int hinic_get_phy_port_stats(struct hinic_dev *nic_dev,
+ struct hinic_phy_port_stats *stats)
+{
+ struct hinic_port_stats_info stats_info = { 0 };
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_port_stats *port_stats;
+ u16 out_size = sizeof(*port_stats);
+ struct pci_dev *pdev = hwif->pdev;
+ int err;
+
+ port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
+ if (!port_stats)
+ return -ENOMEM;
+
+ stats_info.stats_version = HINIC_PORT_STATS_VERSION;
+ stats_info.stats_size = sizeof(*port_stats);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS,
+ &stats_info, sizeof(stats_info),
+ port_stats, &out_size);
+ if (err || !out_size || port_stats->status) {
+ dev_err(&pdev->dev,
+ "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, port_stats->status, out_size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(stats, &port_stats->stats, sizeof(*stats));
+
+out:
+ kfree(port_stats);
+
+ return err;
+}
+
+int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_version_info up_ver = {0};
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u16 out_size;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION,
+ &up_ver, sizeof(up_ver), &up_ver,
+ &out_size);
+ if (err || !out_size || up_ver.status) {
+ dev_err(&pdev->dev,
+ "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, up_ver.status, out_size);
+ return -EINVAL;
+ }
+
+ snprintf(mgmt_ver, HINIC_MGMT_VERSION_MAX_LEN, "%s", up_ver.ver);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index c562afd206be..44772fd47fc1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -13,6 +13,22 @@
#include "hinic_dev.h"
+#define HINIC_RSS_KEY_SIZE 40
+#define HINIC_RSS_INDIR_SIZE 256
+#define HINIC_PORT_STATS_VERSION 0
+#define HINIC_FW_VERSION_NAME 16
+#define HINIC_COMPILE_TIME_LEN 20
+#define HINIC_MGMT_VERSION_MAX_LEN 32
+
+struct hinic_version_info {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ u8 ver[HINIC_FW_VERSION_NAME];
+ u8 time[HINIC_COMPILE_TIME_LEN];
+};
+
enum hinic_rx_mode {
HINIC_RX_MODE_UC = BIT(0),
HINIC_RX_MODE_MC = BIT(1),
@@ -183,6 +199,313 @@ struct hinic_checksum_offload {
u16 rsvd1;
u32 rx_csum_offload;
};
+
+struct hinic_rq_num {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1[33];
+ u32 num_rqs;
+ u32 rq_depth;
+};
+
+struct hinic_lro_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ u8 lro_max_wqe_num;
+ u8 resv2[13];
+};
+
+struct hinic_lro_timer {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 type; /* 0: set timer value, 1: get timer value */
+ u8 enable; /* when set lro time, enable should be 1 */
+ u16 rsvd1;
+ u32 timer;
+};
+
+struct hinic_vlan_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 vlan_rx_offload;
+ u8 rsvd1[5];
+};
+
+struct hinic_rss_template_mgmt {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 cmd;
+ u8 template_id;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_template_key {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 key[HINIC_RSS_KEY_SIZE];
+};
+
+struct hinic_rss_context_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u32 ctx;
+};
+
+struct hinic_rss_context_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u32 context;
+};
+
+struct hinic_rss_indirect_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u8 entry[HINIC_RSS_INDIR_SIZE];
+};
+
+struct hinic_rss_indir_table {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 indir[HINIC_RSS_INDIR_SIZE];
+};
+
+struct hinic_rss_key {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 key[HINIC_RSS_KEY_SIZE];
+};
+
+struct hinic_rss_engine_type {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 template_id;
+ u8 hash_engine;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u8 rss_en;
+ u8 template_id;
+ u8 rq_priority_number;
+ u8 rsvd1[11];
+};
+
+struct hinic_stats {
+ char name[ETH_GSTRING_LEN];
+ u32 size;
+ int offset;
+};
+
+struct hinic_vport_stats {
+ u64 tx_unicast_pkts_vport;
+ u64 tx_unicast_bytes_vport;
+ u64 tx_multicast_pkts_vport;
+ u64 tx_multicast_bytes_vport;
+ u64 tx_broadcast_pkts_vport;
+ u64 tx_broadcast_bytes_vport;
+
+ u64 rx_unicast_pkts_vport;
+ u64 rx_unicast_bytes_vport;
+ u64 rx_multicast_pkts_vport;
+ u64 rx_multicast_bytes_vport;
+ u64 rx_broadcast_pkts_vport;
+ u64 rx_broadcast_bytes_vport;
+
+ u64 tx_discard_vport;
+ u64 rx_discard_vport;
+ u64 tx_err_vport;
+ u64 rx_err_vport;
+};
+
+struct hinic_phy_port_stats {
+ u64 mac_rx_total_pkt_num;
+ u64 mac_rx_total_oct_num;
+ u64 mac_rx_bad_pkt_num;
+ u64 mac_rx_bad_oct_num;
+ u64 mac_rx_good_pkt_num;
+ u64 mac_rx_good_oct_num;
+ u64 mac_rx_uni_pkt_num;
+ u64 mac_rx_multi_pkt_num;
+ u64 mac_rx_broad_pkt_num;
+
+ u64 mac_tx_total_pkt_num;
+ u64 mac_tx_total_oct_num;
+ u64 mac_tx_bad_pkt_num;
+ u64 mac_tx_bad_oct_num;
+ u64 mac_tx_good_pkt_num;
+ u64 mac_tx_good_oct_num;
+ u64 mac_tx_uni_pkt_num;
+ u64 mac_tx_multi_pkt_num;
+ u64 mac_tx_broad_pkt_num;
+
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undersize_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_64_oct_pkt_num;
+ u64 mac_rx_65_127_oct_pkt_num;
+ u64 mac_rx_128_255_oct_pkt_num;
+ u64 mac_rx_256_511_oct_pkt_num;
+ u64 mac_rx_512_1023_oct_pkt_num;
+ u64 mac_rx_1024_1518_oct_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 mac_rx_8192_9216_oct_pkt_num;
+ u64 mac_rx_9217_12287_oct_pkt_num;
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_pkt_num;
+ u64 mac_rx_1519_max_good_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+
+ u64 mac_rx_pause_num;
+ u64 mac_rx_pfc_pkt_num;
+ u64 mac_rx_pfc_pri0_pkt_num;
+ u64 mac_rx_pfc_pri1_pkt_num;
+ u64 mac_rx_pfc_pri2_pkt_num;
+ u64 mac_rx_pfc_pri3_pkt_num;
+ u64 mac_rx_pfc_pri4_pkt_num;
+ u64 mac_rx_pfc_pri5_pkt_num;
+ u64 mac_rx_pfc_pri6_pkt_num;
+ u64 mac_rx_pfc_pri7_pkt_num;
+ u64 mac_rx_control_pkt_num;
+ u64 mac_rx_y1731_pkt_num;
+ u64 mac_rx_sym_err_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undersize_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_64_oct_pkt_num;
+ u64 mac_tx_65_127_oct_pkt_num;
+ u64 mac_tx_128_255_oct_pkt_num;
+ u64 mac_tx_256_511_oct_pkt_num;
+ u64 mac_tx_512_1023_oct_pkt_num;
+ u64 mac_tx_1024_1518_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 mac_tx_8192_9216_oct_pkt_num;
+ u64 mac_tx_9217_12287_oct_pkt_num;
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_pkt_num;
+ u64 mac_tx_1519_max_good_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
+ u64 mac_tx_jabber_pkt_num;
+
+ u64 mac_tx_pause_num;
+ u64 mac_tx_pfc_pkt_num;
+ u64 mac_tx_pfc_pri0_pkt_num;
+ u64 mac_tx_pfc_pri1_pkt_num;
+ u64 mac_tx_pfc_pri2_pkt_num;
+ u64 mac_tx_pfc_pri3_pkt_num;
+ u64 mac_tx_pfc_pri4_pkt_num;
+ u64 mac_tx_pfc_pri5_pkt_num;
+ u64 mac_tx_pfc_pri6_pkt_num;
+ u64 mac_tx_pfc_pri7_pkt_num;
+ u64 mac_tx_control_pkt_num;
+ u64 mac_tx_y1731_pkt_num;
+ u64 mac_tx_1588_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+
+ u64 mac_rx_higig2_ext_pkt_num;
+ u64 mac_rx_higig2_message_pkt_num;
+ u64 mac_rx_higig2_error_pkt_num;
+ u64 mac_rx_higig2_cpu_ctrl_pkt_num;
+ u64 mac_rx_higig2_unicast_pkt_num;
+ u64 mac_rx_higig2_broadcast_pkt_num;
+ u64 mac_rx_higig2_l2_multicast_pkt_num;
+ u64 mac_rx_higig2_l3_multicast_pkt_num;
+
+ u64 mac_tx_higig2_message_pkt_num;
+ u64 mac_tx_higig2_ext_pkt_num;
+ u64 mac_tx_higig2_cpu_ctrl_pkt_num;
+ u64 mac_tx_higig2_unicast_pkt_num;
+ u64 mac_tx_higig2_broadcast_pkt_num;
+ u64 mac_tx_higig2_l2_multicast_pkt_num;
+ u64 mac_tx_higig2_l3_multicast_pkt_num;
+};
+
+struct hinic_port_stats_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+struct hinic_port_stats {
+ u8 status;
+ u8 version;
+ u8 rsvd[6];
+
+ struct hinic_phy_port_stats stats;
+};
+
+struct hinic_cmd_vport_stats {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_vport_stats stats;
+};
+
int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id);
@@ -211,7 +534,55 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev,
int hinic_port_get_cap(struct hinic_dev *nic_dev,
struct hinic_port_cap *port_cap);
+int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs);
+
int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state);
int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en);
+
+int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en,
+ u32 lro_timer, u32 wqe_num);
+
+int hinic_set_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ struct hinic_rss_type rss_type);
+
+int hinic_rss_set_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ const u32 *indir_table);
+
+int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id,
+ const u8 *temp);
+
+int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id,
+ u8 type);
+
+int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id);
+
+int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx);
+
+int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx);
+
+void hinic_set_ethtool_ops(struct net_device *netdev);
+
+int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ struct hinic_rss_type *rss_type);
+
+int hinic_rss_get_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ u32 *indir_table);
+
+int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
+ u8 *temp);
+
+int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx,
+ u8 *type);
+
+int hinic_get_phy_port_stats(struct hinic_dev *nic_dev,
+ struct hinic_phy_port_stats *stats);
+
+int hinic_get_vport_stats(struct hinic_dev *nic_dev,
+ struct hinic_vport_stats *stats);
+
+int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en);
+
+int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 0850ea83d6c1..56ea6d692f1c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -18,6 +18,7 @@
#include <linux/dma-mapping.h>
#include <linux/prefetch.h>
#include <linux/cpumask.h>
+#include <linux/if_vlan.h>
#include <asm/barrier.h>
#include "hinic_common.h"
@@ -36,6 +37,15 @@
#define RX_IRQ_NO_RESEND_TIMER 0
#define HINIC_RX_BUFFER_WRITE 16
+#define HINIC_RX_IPV6_PKT 7
+#define LRO_PKT_HDR_LEN_IPV4 66
+#define LRO_PKT_HDR_LEN_IPV6 86
+#define LRO_REPLENISH_THLD 256
+
+#define LRO_PKT_HDR_LEN(cqe) \
+ (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
+ HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
+
/**
* hinic_rxq_clean_stats - Clean the statistics of specific queue
* @rxq: Logical Rx Queue
@@ -47,6 +57,9 @@ void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
u64_stats_update_begin(&rxq_stats->syncp);
rxq_stats->pkts = 0;
rxq_stats->bytes = 0;
+ rxq_stats->errors = 0;
+ rxq_stats->csum_errors = 0;
+ rxq_stats->other_errors = 0;
u64_stats_update_end(&rxq_stats->syncp);
}
@@ -65,6 +78,10 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
start = u64_stats_fetch_begin(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
stats->bytes = rxq_stats->bytes;
+ stats->errors = rxq_stats->csum_errors +
+ rxq_stats->other_errors;
+ stats->csum_errors = rxq_stats->csum_errors;
+ stats->other_errors = rxq_stats->other_errors;
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
u64_stats_update_end(&stats->syncp);
}
@@ -81,27 +98,25 @@ static void rxq_stats_init(struct hinic_rxq *rxq)
hinic_rxq_clean_stats(rxq);
}
-static void rx_csum(struct hinic_rxq *rxq, u16 cons_idx,
+static void rx_csum(struct hinic_rxq *rxq, u32 status,
struct sk_buff *skb)
{
struct net_device *netdev = rxq->netdev;
- struct hinic_rq_cqe *cqe;
- struct hinic_rq *rq;
u32 csum_err;
- u32 status;
- rq = rxq->rq;
- cqe = rq->cqe[cons_idx];
- status = be32_to_cpu(cqe->status);
csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
if (!(netdev->features & NETIF_F_RXCSUM))
return;
- if (!csum_err)
+ if (!csum_err) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
+ } else {
+ if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
+ HINIC_RX_CSUM_IPSU_OTHER_ERR)))
+ rxq->rxq_stats.csum_errors++;
skb->ip_summed = CHECKSUM_NONE;
+ }
}
/**
* rx_alloc_skb - allocate skb and map it to dma address
@@ -311,13 +326,21 @@ static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
static int rxq_recv(struct hinic_rxq *rxq, int budget)
{
struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
+ struct net_device *netdev = rxq->netdev;
u64 pkt_len = 0, rx_bytes = 0;
+ struct hinic_rq *rq = rxq->rq;
struct hinic_rq_wqe *rq_wqe;
unsigned int free_wqebbs;
+ struct hinic_rq_cqe *cqe;
int num_wqes, pkts = 0;
struct hinic_sge sge;
+ unsigned int status;
struct sk_buff *skb;
- u16 ci;
+ u32 offload_type;
+ u16 ci, num_lro;
+ u16 num_wqe = 0;
+ u32 vlan_len;
+ u16 vid;
while (pkts < budget) {
num_wqes = 0;
@@ -327,11 +350,13 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
if (!rq_wqe)
break;
+ cqe = rq->cqe[ci];
+ status = be32_to_cpu(cqe->status);
hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
- rx_csum(rxq, ci, skb);
+ rx_csum(rxq, status, skb);
prefetch(skb->data);
@@ -345,9 +370,17 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
HINIC_RX_BUF_SZ, ci);
}
- hinic_rq_put_wqe(rxq->rq, ci,
+ hinic_rq_put_wqe(rq, ci,
(num_wqes + 1) * HINIC_RQ_WQE_SIZE);
+ offload_type = be32_to_cpu(cqe->offload_type);
+ vlan_len = be32_to_cpu(cqe->len);
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
+ vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+
skb_record_rx_queue(skb, qp->q_id);
skb->protocol = eth_type_trans(skb, rxq->netdev);
@@ -355,6 +388,21 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
pkts++;
rx_bytes += pkt_len;
+
+ num_lro = HINIC_GET_RX_NUM_LRO(status);
+ if (num_lro) {
+ rx_bytes += ((num_lro - 1) *
+ LRO_PKT_HDR_LEN(cqe));
+
+ num_wqe +=
+ (u16)(pkt_len >> rxq->rx_buff_shift) +
+ ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
+ }
+
+ cqe->status = 0;
+
+ if (num_wqe >= LRO_REPLENISH_THLD)
+ break;
}
free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
@@ -469,20 +517,20 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
struct net_device *netdev)
{
struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
- int err, pkts, irqname_len;
+ int err, pkts;
rxq->netdev = netdev;
rxq->rq = rq;
+ rxq->buf_len = HINIC_RX_BUF_SZ;
+ rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
rxq_stats_init(rxq);
- irqname_len = snprintf(NULL, 0, "hinic_rxq%d", qp->q_id) + 1;
- rxq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
+ rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
+ "hinic_rxq%d", qp->q_id);
if (!rxq->irq_name)
return -ENOMEM;
- sprintf(rxq->irq_name, "hinic_rxq%d", qp->q_id);
-
pkts = rx_alloc_pkts(rxq);
if (!pkts) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index bc797498a87f..507dcbae9085 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -21,7 +21,10 @@
struct hinic_rxq_stats {
u64 pkts;
u64 bytes;
-
+ u64 errors;
+ u64 csum_errors;
+ u64 other_errors;
+ u64 alloc_skb_err;
struct u64_stats_sync syncp;
};
@@ -32,6 +35,8 @@ struct hinic_rxq {
struct hinic_rxq_stats rxq_stats;
char *irq_name;
+ u16 buf_len;
+ u32 rx_buff_shift;
struct napi_struct napi;
};
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index b9fd8d720349..9c78251f9c39 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -83,6 +83,7 @@ void hinic_txq_clean_stats(struct hinic_txq *txq)
txq_stats->tx_busy = 0;
txq_stats->tx_wake = 0;
txq_stats->tx_dropped = 0;
+ txq_stats->big_frags_pkts = 0;
u64_stats_update_end(&txq_stats->syncp);
}
@@ -104,6 +105,7 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
stats->tx_busy = txq_stats->tx_busy;
stats->tx_wake = txq_stats->tx_wake;
stats->tx_dropped = txq_stats->tx_dropped;
+ stats->big_frags_pkts = txq_stats->big_frags_pkts;
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
u64_stats_update_end(&stats->syncp);
}
@@ -405,10 +407,20 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
return 1;
}
+static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info,
+ u16 vlan_tag, u16 vlan_pri)
+{
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
+ HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
+
+ *queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI);
+}
+
static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
u32 *queue_info)
{
enum hinic_offload_type offload = 0;
+ u16 vlan_tag;
int enabled;
enabled = offload_tso(task, queue_info, skb);
@@ -422,6 +434,13 @@ static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
return -EPROTONOSUPPORT;
}
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ vlan_tag = skb_vlan_tag_get(skb);
+ offload_vlan(task, queue_info, vlan_tag,
+ vlan_tag >> VLAN_PRIO_SHIFT);
+ offload |= TX_OFFLOAD_VLAN;
+ }
+
if (offload)
hinic_task_set_l2hdr(task, skb_network_offset(skb));
@@ -464,6 +483,12 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
nr_sges = skb_shinfo(skb)->nr_frags + 1;
+ if (nr_sges > 17) {
+ u64_stats_update_begin(&txq->txq_stats.syncp);
+ txq->txq_stats.big_frags_pkts++;
+ u64_stats_update_end(&txq->txq_stats.syncp);
+ }
+
if (nr_sges > txq->max_sges) {
netdev_err(netdev, "Too many Tx sges\n");
goto skb_error;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
index ca5f537fc383..f158b7db7fb8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
@@ -21,6 +21,7 @@ struct hinic_txq_stats {
u64 tx_busy;
u64 tx_wake;
u64 tx_dropped;
+ u64 big_frags_pkts;
struct u64_stats_sync syncp;
};
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 551de8c2fef2..f703fa58458e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3019,7 +3019,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
- wmb();
+ dma_wmb();
tx_ring->next_to_use = i;
}
@@ -4540,7 +4540,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
- wmb();
+ dma_wmb();
writel(i, adapter->hw.hw_addr + rx_ring->rdt);
}
}
@@ -4655,7 +4655,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
- wmb();
+ dma_wmb();
writel(i, hw->hw_addr + rx_ring->rdt);
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index f86d55657959..4b103cca8a39 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -680,7 +680,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ew32(TCTL, E1000_TCTL_PSP);
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
ctrl = er32(CTRL);
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index b9309302c29e..2c1bab377b2a 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -959,7 +959,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(TCTL, tctl);
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
/* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset.
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index fd550dee4982..63c3c79380a1 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -222,6 +222,9 @@
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */
+/* PCIm function state */
+#define E1000_STATUS_PCIM_STATE 0x40000000
+
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index be13227f1697..34cd67951aec 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -186,12 +186,13 @@ struct e1000_phy_regs {
/* board specific private data structure */
struct e1000_adapter {
- struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
- struct work_struct watchdog_task;
+ struct delayed_work watchdog_task;
+
+ struct workqueue_struct *e1000_workqueue;
const struct e1000_info *ei;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 02ebf208f48b..08342698386d 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1014,7 +1014,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
/* Test each interrupt */
for (i = 0; i < 10; i++) {
@@ -1046,7 +1046,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
ew32(IMC, mask);
ew32(ICS, mask);
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 3;
@@ -1064,7 +1064,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
ew32(IMS, mask);
ew32(ICS, mask);
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
if (!(adapter->test_icr & mask)) {
*data = 4;
@@ -1082,7 +1082,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
ew32(IMC, ~mask & 0x00007FFF);
ew32(ICS, ~mask & 0x00007FFF);
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
if (adapter->test_icr) {
*data = 5;
@@ -1094,7 +1094,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
/* Unhook test interrupt handler */
free_irq(irq, netdev);
@@ -1470,7 +1470,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
*/
ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK);
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
return 0;
}
@@ -1584,7 +1584,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
hw->phy.media_type == e1000_media_type_internal_serdes) {
ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
break;
}
/* Fall Through */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index cdae0efde8e6..395b05701480 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -271,7 +271,7 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
u16 count = 20;
do {
- usleep_range(5000, 10000);
+ usleep_range(5000, 6000);
} while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
msleep(30);
@@ -405,7 +405,7 @@ out:
/* Ungate automatic PHY configuration on non-managed 82579 */
if ((hw->mac.type == e1000_pch2lan) &&
!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
e1000_gate_hw_phy_config_ich8lan(hw, false);
}
@@ -531,7 +531,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
phy->id = 0;
while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
(i++ < 100)) {
- usleep_range(1000, 2000);
+ usleep_range(1000, 1100);
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
return ret_val;
@@ -1244,7 +1244,7 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
goto out;
}
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
}
e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
@@ -1999,7 +1999,7 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
(i++ < 30))
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
return blocked ? E1000_BLK_PHY_RESET : 0;
}
@@ -2818,7 +2818,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
return 0;
/* Allow time for h/w to get to quiescent state after reset */
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
/* Perform any necessary post-reset workarounds */
switch (hw->mac.type) {
@@ -2854,7 +2854,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
if (hw->mac.type == e1000_pch2lan) {
/* Ungate automatic PHY configuration on non-managed 82579 */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
e1000_gate_hw_phy_config_ich8lan(hw, false);
}
@@ -3875,7 +3875,7 @@ release:
*/
if (!ret_val) {
nvm->ops.reload(hw);
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
}
out:
@@ -4026,7 +4026,7 @@ release:
*/
if (!ret_val) {
nvm->ops.reload(hw);
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
}
out:
@@ -4650,7 +4650,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ew32(TCTL, E1000_TCTL_PSP);
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
/* Workaround for ICH8 bit corruption issue in FIFO memory */
if (hw->mac.type == e1000_ich8lan) {
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 4abd55d646c5..e531976f8a67 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -797,7 +797,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
* milliseconds even if the other end is doing it in SW).
*/
for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
status = er32(STATUS);
if (status & E1000_STATUS_LU)
break;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 0e09bede42a2..e4baa13b3cda 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1780,7 +1780,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ queue_delayed_work(adapter->e1000_workqueue,
+ &adapter->watchdog_task, 1);
}
/* Reset on uncorrectable ECC error */
@@ -1860,7 +1861,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ queue_delayed_work(adapter->e1000_workqueue,
+ &adapter->watchdog_task, 1);
}
/* Reset on uncorrectable ECC error */
@@ -1905,7 +1907,8 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ queue_delayed_work(adapter->e1000_workqueue,
+ &adapter->watchdog_task, 1);
}
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -3208,7 +3211,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
if (adapter->flags2 & FLAG2_DMA_BURST) {
/* set the writeback threshold (only takes effect if the RDTR
@@ -4046,12 +4049,12 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
- fc->refresh_time = 0x0400;
+ fc->refresh_time = 0xFFFF;
+ fc->pause_time = 0xFFFF;
if (adapter->netdev->mtu <= ETH_DATA_LEN) {
fc->high_water = 0x05C20;
fc->low_water = 0x05048;
- fc->pause_time = 0x0650;
break;
}
@@ -4208,7 +4211,7 @@ void e1000e_up(struct e1000_adapter *adapter)
e1000_configure_msix(adapter);
e1000_irq_enable(adapter);
- netif_start_queue(adapter->netdev);
+ /* Tx queue started by watchdog timer when link is up */
e1000e_trigger_lsc(adapter);
}
@@ -4272,13 +4275,12 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
/* flush both disables and wait for them to finish */
e1e_flush();
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
e1000_irq_disable(adapter);
napi_synchronize(&adapter->napi);
- del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock);
@@ -4310,7 +4312,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
{
might_sleep();
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
+ usleep_range(1000, 1100);
e1000e_down(adapter, true);
e1000e_up(adapter);
clear_bit(__E1000_RESETTING, &adapter->state);
@@ -4606,6 +4608,7 @@ int e1000e_open(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
/* allocate transmit descriptors */
err = e1000e_setup_tx_resources(adapter->tx_ring);
@@ -4666,7 +4669,6 @@ int e1000e_open(struct net_device *netdev)
e1000_irq_enable(adapter);
adapter->tx_hang_recheck = false;
- netif_start_queue(netdev);
hw->mac.get_link_status = true;
pm_runtime_put(&pdev->dev);
@@ -4707,7 +4709,7 @@ int e1000e_close(struct net_device *netdev)
int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
@@ -5150,31 +5152,18 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
}
}
-/**
- * e1000_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
- **/
-static void e1000_watchdog(struct timer_list *t)
-{
- struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
-
- /* Do the rest outside of interrupt context */
- schedule_work(&adapter->watchdog_task);
-
- /* TODO: make this use queue_delayed_work() */
-}
-
static void e1000_watchdog_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
- watchdog_task);
+ watchdog_task.work);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_phy_info *phy = &adapter->hw.phy;
struct e1000_ring *tx_ring = adapter->tx_ring;
+ u32 dmoff_exit_timeout = 100, tries = 0;
struct e1000_hw *hw = &adapter->hw;
- u32 link, tctl;
+ u32 link, tctl, pcim_state;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -5199,6 +5188,21 @@ static void e1000_watchdog_task(struct work_struct *work)
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
+ /* Checking if MAC is in DMoff state*/
+ pcim_state = er32(STATUS);
+ while (pcim_state & E1000_STATUS_PCIM_STATE) {
+ if (tries++ == dmoff_exit_timeout) {
+ e_dbg("Error in exiting dmoff\n");
+ break;
+ }
+ usleep_range(10000, 20000);
+ pcim_state = er32(STATUS);
+
+ /* Checking if MAC exited DMoff state */
+ if (!(pcim_state & E1000_STATUS_PCIM_STATE))
+ e1000_phy_hw_reset(&adapter->hw);
+ }
+
/* update snapshot of PHY registers on LSC */
e1000_phy_read_status(adapter);
mac->ops.get_link_up_info(&adapter->hw,
@@ -5288,6 +5292,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if (phy->ops.cfg_on_link_up)
phy->ops.cfg_on_link_up(hw);
+ netif_wake_queue(netdev);
netif_carrier_on(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -5301,6 +5306,7 @@ static void e1000_watchdog_task(struct work_struct *work)
/* Link status message must follow this format */
pr_info("%s NIC Link is Down\n", adapter->netdev->name);
netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
@@ -5308,13 +5314,8 @@ static void e1000_watchdog_task(struct work_struct *work)
/* 8000ES2LAN requires a Rx packet buffer work-around
* on link down event; reset the controller to flush
* the Rx packet buffer.
- *
- * If the link is lost the controller stops DMA, but
- * if there is queued Tx work it cannot be done. So
- * reset the controller to flush the Tx packet buffers.
*/
- if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
- e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
+ if (adapter->flags & FLAG_RX_NEEDS_RESTART)
adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
@@ -5337,6 +5338,14 @@ link_up:
adapter->gotc_old = adapter->stats.gotc;
spin_unlock(&adapter->stats64_lock);
+ /* If the link is lost the controller stops DMA, but
+ * if there is queued Tx work it cannot be done. So
+ * reset the controller to flush the Tx packet buffers.
+ */
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ adapter->flags |= FLAG_RESTART_NOW;
+
/* If reset is necessary, do it outside of interrupt context. */
if (adapter->flags & FLAG_RESTART_NOW) {
schedule_work(&adapter->reset_task);
@@ -5395,8 +5404,9 @@ link_up:
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 2 * HZ));
+ queue_delayed_work(adapter->e1000_workqueue,
+ &adapter->watchdog_task,
+ round_jiffies(2 * HZ));
}
#define E1000_TX_FLAGS_CSUM 0x00000001
@@ -6016,7 +6026,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
}
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
+ usleep_range(1000, 1100);
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
adapter->max_frame_size = max_frame;
e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -6296,7 +6306,7 @@ static int e1000e_pm_freeze(struct device *dev)
int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
@@ -6711,7 +6721,7 @@ static int e1000e_pm_runtime_suspend(struct device *dev)
int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
@@ -7251,11 +7261,21 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
+ adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ e1000e_driver_name);
+
+ if (!adapter->e1000_workqueue) {
+ err = -ENOMEM;
+ goto err_workqueue;
+ }
+
+ INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
+ queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task,
+ 0);
+
timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
- INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
@@ -7349,6 +7369,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_register:
+ flush_workqueue(adapter->e1000_workqueue);
+ destroy_workqueue(adapter->e1000_workqueue);
+err_workqueue:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
@@ -7395,15 +7418,17 @@ static void e1000_remove(struct pci_dev *pdev)
*/
if (!down)
set_bit(__E1000_DOWN, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
- cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->downshift_task);
cancel_work_sync(&adapter->update_phy_task);
cancel_work_sync(&adapter->print_hang_task);
+ cancel_delayed_work(&adapter->watchdog_task);
+ flush_workqueue(adapter->e1000_workqueue);
+ destroy_workqueue(adapter->e1000_workqueue);
+
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
cancel_work_sync(&adapter->tx_hwtstamp_work);
if (adapter->tx_hwtstamp_skb) {
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 937f9af22d26..e609f4df86f4 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -392,7 +392,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
break;
}
}
- usleep_range(10000, 20000);
+ usleep_range(10000, 11000);
nvm->ops.release(hw);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 7ce42040b851..84bd06901014 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -27,6 +27,7 @@
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
#include <linux/if_bridge.h>
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
@@ -295,8 +296,6 @@ struct i40e_cloud_filter {
u8 tunnel_type;
};
-#define I40E_ETH_P_LLDP 0x88cc
-
#define I40E_DCB_PRIO_TYPE_STRICT 0
#define I40E_DCB_PRIO_TYPE_ETS 1
#define I40E_DCB_STRICT_PRIO_CREDITS 127
@@ -414,6 +413,11 @@ struct i40e_flex_pit {
u8 pit_index;
};
+struct i40e_fwd_adapter {
+ struct net_device *netdev;
+ int bit_no;
+};
+
struct i40e_channel {
struct list_head list;
bool initialized;
@@ -428,11 +432,25 @@ struct i40e_channel {
struct i40e_aqc_vsi_properties_data info;
u64 max_tx_rate;
+ struct i40e_fwd_adapter *fwd;
/* track this channel belongs to which VSI */
struct i40e_vsi *parent_vsi;
};
+static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch)
+{
+ return !!ch->fwd;
+}
+
+static inline u8 *i40e_channel_mac(struct i40e_channel *ch)
+{
+ if (i40e_is_channel_macvlan(ch))
+ return ch->fwd->netdev->dev_addr;
+ else
+ return NULL;
+}
+
/* struct that defines the Ethernet device */
struct i40e_pf {
struct pci_dev *pdev;
@@ -777,7 +795,8 @@ struct i40e_vsi {
u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
u16 req_queue_pairs; /* User requested queue pairs */
u16 num_queue_pairs; /* Used tx and rx pairs */
- u16 num_desc;
+ u16 num_tx_desc;
+ u16 num_rx_desc;
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
s16 vf_id; /* Virtual function ID for SRIOV VSIs */
@@ -814,6 +833,13 @@ struct i40e_vsi {
struct list_head ch_list;
u16 tc_seid_map[I40E_MAX_TRAFFIC_CLASS];
+ /* macvlan fields */
+#define I40E_MAX_MACVLANS 128 /* Max HW vectors - 1 on FVL */
+#define I40E_MIN_MACVLAN_VECTORS 2 /* Min vectors to enable macvlans */
+ DECLARE_BITMAP(fwd_bitmask, I40E_MAX_MACVLANS);
+ struct list_head macvlan_list;
+ int macvlan_cnt;
+
void *priv; /* client driver data reference. */
/* VSI specific handlers */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 243dcd4bec19..814acbe79ffd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -675,7 +675,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
@@ -835,7 +835,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
}
/* bump the tail */
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
buff, buff_size);
(hw->aq.asq.next_to_use)++;
@@ -886,7 +886,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
"AQTX: desc and buffer writeback:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
@@ -995,7 +995,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
e->msg_len);
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
hw->aq.arq_buf_size);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index ecb1adaa54ec..906cf68d3453 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -281,47 +281,49 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u32 effective_mask = hw->debug_mask & mask;
+ char prefix[27];
u16 len;
u8 *buf = (u8 *)buffer;
- if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ if (!effective_mask || !desc)
return;
len = le16_to_cpu(aq_desc->datalen);
- i40e_debug(hw, mask,
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
le16_to_cpu(aq_desc->opcode),
le16_to_cpu(aq_desc->flags),
le16_to_cpu(aq_desc->datalen),
le16_to_cpu(aq_desc->retval));
- i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\tcookie (h,l) 0x%08X 0x%08X\n",
le32_to_cpu(aq_desc->cookie_high),
le32_to_cpu(aq_desc->cookie_low));
- i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\tparam (0,1) 0x%08X 0x%08X\n",
le32_to_cpu(aq_desc->params.internal.param0),
le32_to_cpu(aq_desc->params.internal.param1));
- i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\taddr (h,l) 0x%08X 0x%08X\n",
le32_to_cpu(aq_desc->params.external.addr_high),
le32_to_cpu(aq_desc->params.external.addr_low));
- if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ if (buffer && buf_len != 0 && len != 0 &&
+ (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
- /* write the full 16-byte chunks */
- if (hw->debug_mask & mask) {
- char prefix[27];
-
- snprintf(prefix, sizeof(prefix),
- "i40e %02x:%02x.%x: \t0x",
- hw->bus.bus_id,
- hw->bus.device,
- hw->bus.func);
-
- print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
- 16, 1, buf, len, false);
- }
+
+ snprintf(prefix, sizeof(prefix),
+ "i40e %02x:%02x.%x: \t0x",
+ hw->bus.bus_id,
+ hw->bus.device,
+ hw->bus.func);
+
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
}
}
@@ -1859,8 +1861,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
- if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= 7) {
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
__le32 tmp;
memcpy(&tmp, resp->link_type, sizeof(tmp));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 7ea4f09229e4..55d20acfcf70 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -333,8 +333,9 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" seid = %d, id = %d, uplink_seid = %d\n",
vsi->seid, vsi->id, vsi->uplink_seid);
dev_info(&pf->pdev->dev,
- " base_queue = %d, num_queue_pairs = %d, num_desc = %d\n",
- vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc);
+ " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n",
+ vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc,
+ vsi->num_rx_desc);
dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
if (vsi->type == I40E_VSI_SRIOV)
dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
@@ -1330,7 +1331,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
pf->hw.mac.addr,
- I40E_ETH_P_LLDP, 0,
+ ETH_P_LLDP, 0,
pf->vsi[pf->lan_vsi]->seid,
0, true, NULL, NULL);
if (ret) {
@@ -1348,7 +1349,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
pf->hw.mac.addr,
- I40E_ETH_P_LLDP, 0,
+ ETH_P_LLDP, 0,
pf->vsi[pf->lan_vsi]->seid,
0, false, NULL, NULL);
if (ret) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 7545b21bee3c..527eb52c5401 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1982,6 +1982,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (i40e_enabled_xdp_vsi(vsi))
vsi->xdp_rings[i]->count = new_tx_count;
}
+ vsi->num_tx_desc = new_tx_count;
+ vsi->num_rx_desc = new_rx_count;
goto done;
}
@@ -2118,6 +2120,8 @@ rx_unwind:
rx_rings = NULL;
}
+ vsi->num_tx_desc = new_tx_count;
+ vsi->num_rx_desc = new_rx_count;
i40e_up(vsi);
free_tx:
@@ -4852,9 +4856,12 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
+ u64 orig_flags, new_flags, changed_flags;
+ enum i40e_admin_queue_err adq_err;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u64 orig_flags, new_flags, changed_flags;
+ bool is_reset_needed;
+ i40e_status status;
u32 i, j;
orig_flags = READ_ONCE(pf->flags);
@@ -4898,6 +4905,10 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
flags_complete:
changed_flags = orig_flags ^ new_flags;
+ is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
+ I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED |
+ I40E_FLAG_DISABLE_FW_LLDP));
+
/* Before we finalize any flag changes, we need to perform some
* checks to ensure that the changes are supported and safe.
*/
@@ -4932,13 +4943,6 @@ flags_complete:
return -EOPNOTSUPP;
}
- /* Now that we've checked to ensure that the new flags are valid, load
- * them into place. Since we only modify flags either (a) during
- * initialization or (b) while holding the RTNL lock, we don't need
- * anything fancy here.
- */
- pf->flags = new_flags;
-
/* Process any additional changes needed as a result of flag changes.
* The changed_flags value reflects the list of bits that were
* changed in the code above.
@@ -4946,7 +4950,7 @@ flags_complete:
/* Flush current ATR settings if ATR was disabled */
if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
- !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) {
set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
@@ -4955,7 +4959,7 @@ flags_complete:
u16 sw_flags = 0, valid_flags = 0;
int ret;
- if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
@@ -4974,13 +4978,13 @@ flags_complete:
(changed_flags & I40E_FLAG_BASE_R_FEC)) {
u8 fec_cfg = 0;
- if (pf->flags & I40E_FLAG_RS_FEC &&
- pf->flags & I40E_FLAG_BASE_R_FEC) {
+ if (new_flags & I40E_FLAG_RS_FEC &&
+ new_flags & I40E_FLAG_BASE_R_FEC) {
fec_cfg = I40E_AQ_SET_FEC_AUTO;
- } else if (pf->flags & I40E_FLAG_RS_FEC) {
+ } else if (new_flags & I40E_FLAG_RS_FEC) {
fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
I40E_AQ_SET_FEC_ABILITY_RS);
- } else if (pf->flags & I40E_FLAG_BASE_R_FEC) {
+ } else if (new_flags & I40E_FLAG_BASE_R_FEC) {
fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
I40E_AQ_SET_FEC_ABILITY_KR);
}
@@ -4988,14 +4992,14 @@ flags_complete:
dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
}
- if ((changed_flags & pf->flags &
+ if ((changed_flags & new_flags &
I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
- (pf->flags & I40E_FLAG_MFP_ENABLED))
+ (new_flags & I40E_FLAG_MFP_ENABLED))
dev_warn(&pf->pdev->dev,
"Turning on link-down-on-close flag may affect other partitions\n");
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
struct i40e_dcbx_config *dcbcfg;
i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
@@ -5013,17 +5017,43 @@ flags_complete:
dcbcfg->pfc.willing = 1;
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
} else {
- i40e_aq_start_lldp(&pf->hw, false, NULL);
+ status = i40e_aq_start_lldp(&pf->hw, false, NULL);
+ if (status) {
+ adq_err = pf->hw.aq.asq_last_status;
+ switch (adq_err) {
+ case I40E_AQ_RC_EEXIST:
+ dev_warn(&pf->pdev->dev,
+ "FW LLDP agent is already running\n");
+ is_reset_needed = false;
+ break;
+ case I40E_AQ_RC_EPERM:
+ dev_warn(&pf->pdev->dev,
+ "Device configuration forbids SW from starting the LLDP agent.\n");
+ return -EINVAL;
+ default:
+ dev_warn(&pf->pdev->dev,
+ "Starting FW LLDP agent failed: error: %s, %s\n",
+ i40e_stat_str(&pf->hw,
+ status),
+ i40e_aq_str(&pf->hw,
+ adq_err));
+ return -EINVAL;
+ }
+ }
}
}
+ /* Now that we've checked to ensure that the new flags are valid, load
+ * them into place. Since we only modify flags either (a) during
+ * initialization or (b) while holding the RTNL lock, we don't need
+ * anything fancy here.
+ */
+ pf->flags = new_flags;
+
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
*/
- if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
- I40E_FLAG_LEGACY_RX |
- I40E_FLAG_SOURCE_PRUNING_DISABLED |
- I40E_FLAG_DISABLE_FW_LLDP))
+ if (is_reset_needed)
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
return 0;
@@ -5181,6 +5211,16 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ return -EOPNOTSUPP;
+}
+
+static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ return -EOPNOTSUPP;
+}
+
static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
.set_eeprom = i40e_set_eeprom,
.get_eeprom_len = i40e_get_eeprom_len,
@@ -5208,6 +5248,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_rxnfc = i40e_set_rxnfc,
.self_test = i40e_diag_test,
.get_strings = i40e_get_strings,
+ .get_eee = i40e_get_eee,
+ .set_eee = i40e_set_eee,
.set_phys_id = i40e_set_phys_id,
.get_sset_count = i40e_get_sset_count,
.get_ethtool_stats = i40e_get_ethtool_stats,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 320562b39686..9ebbe3da61bb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -32,7 +32,7 @@ static const char i40e_driver_string[] =
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
const char i40e_driver_version_str[] = DRV_VERSION;
-static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
+static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
/* a bit of forward declarations */
static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
@@ -636,9 +636,6 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_unknown_protocol, &es->rx_unknown_protocol);
- i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->tx_errors, &es->tx_errors);
i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
I40E_GLV_GORCL(stat_idx),
@@ -5864,8 +5861,10 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
return -ENOENT;
}
- /* Success, update channel */
- ch->enabled_tc = enabled_tc;
+ /* Success, update channel, set enabled_tc only if the channel
+ * is not a macvlan
+ */
+ ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
ch->seid = ctxt.seid;
ch->vsi_number = ctxt.vsi_number;
ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
@@ -6413,6 +6412,50 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
}
/**
+ * i40e_update_dcb_config
+ * @hw: pointer to the HW struct
+ * @enable_mib_change: enable MIB change event
+ *
+ * Update DCB configuration from the firmware
+ **/
+static enum i40e_status_code
+i40e_update_dcb_config(struct i40e_hw *hw, bool enable_mib_change)
+{
+ struct i40e_lldp_variables lldp_cfg;
+ i40e_status ret;
+
+ if (!hw->func_caps.dcb)
+ return I40E_NOT_SUPPORTED;
+
+ /* Read LLDP NVM area */
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (ret)
+ return I40E_ERR_NOT_READY;
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
+ hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ if (ret)
+ return ret;
+ } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+ return I40E_ERR_NOT_READY;
+ }
+
+ /* Configure the LLDP MIB change event */
+ if (enable_mib_change)
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+
+ return ret;
+}
+
+/**
* i40e_init_pf_dcb - Initialize DCB configuration
* @pf: PF being configured
*
@@ -6428,11 +6471,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
* Also do not enable DCBx if FW LLDP agent is disabled
*/
if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
- (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
+ (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
+ dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
+ err = I40E_NOT_SUPPORTED;
goto out;
+ }
- /* Get the initial DCB configuration */
- err = i40e_init_dcb(hw, true);
+ err = i40e_update_dcb_config(hw, true);
if (!err) {
/* Device/Function is not DCBX capable */
if ((!hw->func_caps.dcb) ||
@@ -6869,6 +6914,489 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
}
/**
+ * i40e_del_macvlan_filter
+ * @hw: pointer to the HW structure
+ * @seid: seid of the channel VSI
+ * @macaddr: the mac address to apply as a filter
+ * @aq_err: store the admin Q error
+ *
+ * This function deletes a mac filter on the channel VSI which serves as the
+ * macvlan. Returns 0 on success.
+ **/
+static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
+{
+ struct i40e_aqc_remove_macvlan_element_data element;
+ i40e_status status;
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
+ *aq_err = hw->aq.asq_last_status;
+
+ return status;
+}
+
+/**
+ * i40e_add_macvlan_filter
+ * @hw: pointer to the HW structure
+ * @seid: seid of the channel VSI
+ * @macaddr: the mac address to apply as a filter
+ * @aq_err: store the admin Q error
+ *
+ * This function adds a mac filter on the channel VSI which serves as the
+ * macvlan. Returns 0 on success.
+ **/
+static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
+{
+ struct i40e_aqc_add_macvlan_element_data element;
+ i40e_status status;
+ u16 cmd_flags = 0;
+
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ element.queue_number = 0;
+ element.match_method = I40E_AQC_MM_ERR_NO_RES;
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+ element.flags = cpu_to_le16(cmd_flags);
+ status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
+ *aq_err = hw->aq.asq_last_status;
+
+ return status;
+}
+
+/**
+ * i40e_reset_ch_rings - Reset the queue contexts in a channel
+ * @vsi: the VSI we want to access
+ * @ch: the channel we want to access
+ */
+static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
+{
+ struct i40e_ring *tx_ring, *rx_ring;
+ u16 pf_q;
+ int i;
+
+ for (i = 0; i < ch->num_queue_pairs; i++) {
+ pf_q = ch->base_queue + i;
+ tx_ring = vsi->tx_rings[pf_q];
+ tx_ring->ch = NULL;
+ rx_ring = vsi->rx_rings[pf_q];
+ rx_ring->ch = NULL;
+ }
+}
+
+/**
+ * i40e_free_macvlan_channels
+ * @vsi: the VSI we want to access
+ *
+ * This function frees the Qs of the channel VSI from
+ * the stack and also deletes the channel VSIs which
+ * serve as macvlans.
+ */
+static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
+{
+ struct i40e_channel *ch, *ch_tmp;
+ int ret;
+
+ if (list_empty(&vsi->macvlan_list))
+ return;
+
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
+ struct i40e_vsi *parent_vsi;
+
+ if (i40e_is_channel_macvlan(ch)) {
+ i40e_reset_ch_rings(vsi, ch);
+ clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
+ netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
+ netdev_set_sb_channel(ch->fwd->netdev, 0);
+ kfree(ch->fwd);
+ ch->fwd = NULL;
+ }
+
+ list_del(&ch->list);
+ parent_vsi = ch->parent_vsi;
+ if (!parent_vsi || !ch->initialized) {
+ kfree(ch);
+ continue;
+ }
+
+ /* remove the VSI */
+ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
+ NULL);
+ if (ret)
+ dev_err(&vsi->back->pdev->dev,
+ "unable to remove channel (%d) for parent VSI(%d)\n",
+ ch->seid, parent_vsi->seid);
+ kfree(ch);
+ }
+ vsi->macvlan_cnt = 0;
+}
+
+/**
+ * i40e_fwd_ring_up - bring the macvlan device up
+ * @vsi: the VSI we want to access
+ * @vdev: macvlan netdevice
+ * @fwd: the private fwd structure
+ */
+static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
+ struct i40e_fwd_adapter *fwd)
+{
+ int ret = 0, num_tc = 1, i, aq_err;
+ struct i40e_channel *ch, *ch_tmp;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ if (list_empty(&vsi->macvlan_list))
+ return -EINVAL;
+
+ /* Go through the list and find an available channel */
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
+ if (!i40e_is_channel_macvlan(ch)) {
+ ch->fwd = fwd;
+ /* record configuration for macvlan interface in vdev */
+ for (i = 0; i < num_tc; i++)
+ netdev_bind_sb_channel_queue(vsi->netdev, vdev,
+ i,
+ ch->num_queue_pairs,
+ ch->base_queue);
+ for (i = 0; i < ch->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring, *rx_ring;
+ u16 pf_q;
+
+ pf_q = ch->base_queue + i;
+
+ /* Get to TX ring ptr */
+ tx_ring = vsi->tx_rings[pf_q];
+ tx_ring->ch = ch;
+
+ /* Get the RX ring ptr */
+ rx_ring = vsi->rx_rings[pf_q];
+ rx_ring->ch = ch;
+ }
+ break;
+ }
+ }
+
+ /* Guarantee all rings are updated before we update the
+ * MAC address filter.
+ */
+ wmb();
+
+ /* Add a mac filter */
+ ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
+ if (ret) {
+ /* if we cannot add the MAC rule then disable the offload */
+ macvlan_release_l2fw_offload(vdev);
+ for (i = 0; i < ch->num_queue_pairs; i++) {
+ struct i40e_ring *rx_ring;
+ u16 pf_q;
+
+ pf_q = ch->base_queue + i;
+ rx_ring = vsi->rx_rings[pf_q];
+ rx_ring->netdev = NULL;
+ }
+ dev_info(&pf->pdev->dev,
+ "Error adding mac filter on macvlan err %s, aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, aq_err));
+ netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_setup_macvlans - create the channels which will be macvlans
+ * @vsi: the VSI we want to access
+ * @macvlan_cnt: no. of macvlans to be setup
+ * @qcnt: no. of Qs per macvlan
+ * @vdev: macvlan netdevice
+ */
+static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
+ struct net_device *vdev)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi_context ctxt;
+ u16 sections, qmap, num_qps;
+ struct i40e_channel *ch;
+ int i, pow, ret = 0;
+ u8 offset = 0;
+
+ if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
+ return -EINVAL;
+
+ num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
+
+ /* find the next higher power-of-2 of num queue pairs */
+ pow = fls(roundup_pow_of_two(num_qps) - 1);
+
+ qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+ /* Setup context bits for the main VSI */
+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = vsi->back->hw.pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.info = vsi->info;
+ ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
+ ctxt.info.valid_sections |= cpu_to_le16(sections);
+
+ /* Reconfigure RSS for main VSI with new max queue count */
+ vsi->rss_size = max_t(u16, num_qps, qcnt);
+ ret = i40e_vsi_config_rss(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed to reconfig RSS for num_queues (%u)\n",
+ vsi->rss_size);
+ return ret;
+ }
+ vsi->reconfig_rss = true;
+ dev_dbg(&vsi->back->pdev->dev,
+ "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
+ vsi->next_base_queue = num_qps;
+ vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
+
+ /* Update the VSI after updating the VSI queue-mapping
+ * information
+ */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Update vsi tc config failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ /* update the local VSI info with updated queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+
+ /* Create channels for macvlans */
+ INIT_LIST_HEAD(&vsi->macvlan_list);
+ for (i = 0; i < macvlan_cnt; i++) {
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ INIT_LIST_HEAD(&ch->list);
+ ch->num_queue_pairs = qcnt;
+ if (!i40e_setup_channel(pf, vsi, ch)) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+ ch->parent_vsi = vsi;
+ vsi->cnt_q_avail -= ch->num_queue_pairs;
+ vsi->macvlan_cnt++;
+ list_add_tail(&ch->list, &vsi->macvlan_list);
+ }
+
+ return ret;
+
+err_free:
+ dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
+ i40e_free_macvlan_channels(vsi);
+
+ return ret;
+}
+
+/**
+ * i40e_fwd_add - configure macvlans
+ * @netdev: net device to configure
+ * @vdev: macvlan netdevice
+ **/
+static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_fwd_adapter *fwd;
+ int avail_macvlan, ret;
+
+ if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
+ netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
+ netdev_info(netdev, "Not enough vectors available to support macvlans\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* The macvlan device has to be a single Q device so that the
+ * tc_to_txq field can be reused to pick the tx queue.
+ */
+ if (netif_is_multiqueue(vdev))
+ return ERR_PTR(-ERANGE);
+
+ if (!vsi->macvlan_cnt) {
+ /* reserve bit 0 for the pf device */
+ set_bit(0, vsi->fwd_bitmask);
+
+ /* Try to reserve as many queues as possible for macvlans. First
+ * reserve 3/4th of max vectors, then half, then quarter and
+ * calculate Qs per macvlan as you go
+ */
+ vectors = pf->num_lan_msix;
+ if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
+ /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
+ q_per_macvlan = 4;
+ macvlan_cnt = (vectors - 32) / 4;
+ } else if (vectors <= 64 && vectors > 32) {
+ /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
+ q_per_macvlan = 2;
+ macvlan_cnt = (vectors - 16) / 2;
+ } else if (vectors <= 32 && vectors > 16) {
+ /* allocate 1 Q per macvlan and 16 Qs to the PF*/
+ q_per_macvlan = 1;
+ macvlan_cnt = vectors - 16;
+ } else if (vectors <= 16 && vectors > 8) {
+ /* allocate 1 Q per macvlan and 8 Qs to the PF */
+ q_per_macvlan = 1;
+ macvlan_cnt = vectors - 8;
+ } else {
+ /* allocate 1 Q per macvlan and 1 Q to the PF */
+ q_per_macvlan = 1;
+ macvlan_cnt = vectors - 1;
+ }
+
+ if (macvlan_cnt == 0)
+ return ERR_PTR(-EBUSY);
+
+ /* Quiesce VSI queues */
+ i40e_quiesce_vsi(vsi);
+
+ /* sets up the macvlans but does not "enable" them */
+ ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
+ vdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Unquiesce VSI */
+ i40e_unquiesce_vsi(vsi);
+ }
+ avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
+ vsi->macvlan_cnt);
+ if (avail_macvlan >= I40E_MAX_MACVLANS)
+ return ERR_PTR(-EBUSY);
+
+ /* create the fwd struct */
+ fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
+ if (!fwd)
+ return ERR_PTR(-ENOMEM);
+
+ set_bit(avail_macvlan, vsi->fwd_bitmask);
+ fwd->bit_no = avail_macvlan;
+ netdev_set_sb_channel(vdev, avail_macvlan);
+ fwd->netdev = vdev;
+
+ if (!netif_running(netdev))
+ return fwd;
+
+ /* Set fwd ring up */
+ ret = i40e_fwd_ring_up(vsi, vdev, fwd);
+ if (ret) {
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
+ kfree(fwd);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return fwd;
+}
+
+/**
+ * i40e_del_all_macvlans - Delete all the mac filters on the channels
+ * @vsi: the VSI we want to access
+ */
+static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
+{
+ struct i40e_channel *ch, *ch_tmp;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int aq_err, ret = 0;
+
+ if (list_empty(&vsi->macvlan_list))
+ return;
+
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
+ if (i40e_is_channel_macvlan(ch)) {
+ ret = i40e_del_macvlan_filter(hw, ch->seid,
+ i40e_channel_mac(ch),
+ &aq_err);
+ if (!ret) {
+ /* Reset queue contexts */
+ i40e_reset_ch_rings(vsi, ch);
+ clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
+ netdev_unbind_sb_channel(vsi->netdev,
+ ch->fwd->netdev);
+ netdev_set_sb_channel(ch->fwd->netdev, 0);
+ kfree(ch->fwd);
+ ch->fwd = NULL;
+ }
+ }
+ }
+}
+
+/**
+ * i40e_fwd_del - delete macvlan interfaces
+ * @netdev: net device to configure
+ * @vdev: macvlan netdevice
+ */
+static void i40e_fwd_del(struct net_device *netdev, void *vdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_fwd_adapter *fwd = vdev;
+ struct i40e_channel *ch, *ch_tmp;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int aq_err, ret = 0;
+
+ /* Find the channel associated with the macvlan and del mac filter */
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
+ if (i40e_is_channel_macvlan(ch) &&
+ ether_addr_equal(i40e_channel_mac(ch),
+ fwd->netdev->dev_addr)) {
+ ret = i40e_del_macvlan_filter(hw, ch->seid,
+ i40e_channel_mac(ch),
+ &aq_err);
+ if (!ret) {
+ /* Reset queue contexts */
+ i40e_reset_ch_rings(vsi, ch);
+ clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
+ netdev_unbind_sb_channel(netdev, fwd->netdev);
+ netdev_set_sb_channel(fwd->netdev, 0);
+ kfree(ch->fwd);
+ ch->fwd = NULL;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Error deleting mac filter on macvlan err %s, aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, aq_err));
+ }
+ break;
+ }
+ }
+}
+
+/**
* i40e_setup_tc - configure multiple traffic classes
* @netdev: net device to configure
* @type_data: tc offload data
@@ -6963,6 +7491,10 @@ config_tc:
vsi->seid);
need_reset = true;
goto exit;
+ } else {
+ dev_info(&vsi->back->pdev->dev,
+ "Setup channel (id:%u) utilizing num_queues %d\n",
+ vsi->seid, vsi->tc_config.tc_info[0].qcount);
}
if (pf->flags & I40E_FLAG_TC_MQPRIO) {
@@ -7227,15 +7759,15 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
/**
* i40e_parse_cls_flower - Parse tc flower filters provided by kernel
* @vsi: Pointer to VSI
- * @cls_flower: Pointer to struct tc_cls_flower_offload
+ * @cls_flower: Pointer to struct flow_cls_offload
* @filter: Pointer to cloud filter structure
*
**/
static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
struct i40e_cloud_filter *filter)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
struct i40e_pf *pf = vsi->back;
@@ -7469,11 +8001,11 @@ static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
/**
* i40e_configure_clsflower - Configure tc flower filters
* @vsi: Pointer to VSI
- * @cls_flower: Pointer to struct tc_cls_flower_offload
+ * @cls_flower: Pointer to struct flow_cls_offload
*
**/
static int i40e_configure_clsflower(struct i40e_vsi *vsi,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
struct i40e_cloud_filter *filter = NULL;
@@ -7565,11 +8097,11 @@ static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
/**
* i40e_delete_clsflower - Remove tc flower filters
* @vsi: Pointer to VSI
- * @cls_flower: Pointer to struct tc_cls_flower_offload
+ * @cls_flower: Pointer to struct flow_cls_offload
*
**/
static int i40e_delete_clsflower(struct i40e_vsi *vsi,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
struct i40e_cloud_filter *filter = NULL;
struct i40e_pf *pf = vsi->back;
@@ -7612,16 +8144,16 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
* @type_data: offload data
**/
static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
struct i40e_vsi *vsi = np->vsi;
switch (cls_flower->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return i40e_configure_clsflower(vsi, cls_flower);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return i40e_delete_clsflower(vsi, cls_flower);
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
@@ -7645,34 +8177,21 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-static int i40e_setup_tc_block(struct net_device *dev,
- struct tc_block_offload *f)
-{
- struct i40e_netdev_priv *np = netdev_priv(dev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
- np, np, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(i40e_block_cb_list);
static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+
switch (type) {
case TC_SETUP_QDISC_MQPRIO:
return i40e_setup_tc(netdev, type_data);
case TC_SETUP_BLOCK:
- return i40e_setup_tc_block(netdev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &i40e_block_cb_list,
+ i40e_setup_tc_block_cb,
+ np, np, true);
default:
return -EOPNOTSUPP;
}
@@ -8570,7 +9089,7 @@ static void i40e_link_event(struct i40e_pf *pf)
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
*/
- if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
else
i40e_vsi_link_event(vsi, new_link);
@@ -10031,8 +10550,12 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
switch (vsi->type) {
case I40E_VSI_MAIN:
vsi->alloc_queue_pairs = pf->num_lan_qps;
- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
- I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (!vsi->num_tx_desc)
+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (!vsi->num_rx_desc)
+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
vsi->num_q_vectors = pf->num_lan_msix;
else
@@ -10042,22 +10565,32 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
case I40E_VSI_FDIR:
vsi->alloc_queue_pairs = 1;
- vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
- I40E_REQ_DESCRIPTOR_MULTIPLE);
+ vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
vsi->num_q_vectors = pf->num_fdsb_msix;
break;
case I40E_VSI_VMDQ2:
vsi->alloc_queue_pairs = pf->num_vmdq_qps;
- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
- I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (!vsi->num_tx_desc)
+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (!vsi->num_rx_desc)
+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
vsi->num_q_vectors = pf->num_vmdq_msix;
break;
case I40E_VSI_SRIOV:
vsi->alloc_queue_pairs = pf->num_vf_qps;
- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
- I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (!vsi->num_tx_desc)
+ vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (!vsi->num_rx_desc)
+ vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
break;
default:
@@ -10333,7 +10866,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
+ ring->count = vsi->num_tx_desc;
ring->size = 0;
ring->dcb_tc = 0;
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
@@ -10350,7 +10883,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->vsi = vsi;
ring->netdev = NULL;
ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
+ ring->count = vsi->num_tx_desc;
ring->size = 0;
ring->dcb_tc = 0;
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
@@ -10366,7 +10899,7 @@ setup_rx:
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
+ ring->count = vsi->num_rx_desc;
ring->size = 0;
ring->dcb_tc = 0;
ring->itr_setting = pf->rx_itr_default;
@@ -11604,6 +12137,9 @@ static int i40e_set_features(struct net_device *netdev,
return -EINVAL;
}
+ if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
+ i40e_del_all_macvlans(vsi);
+
need_reset = i40e_set_ntuple(pf, features);
if (need_reset)
@@ -12348,6 +12884,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_bpf = i40e_xdp,
.ndo_xdp_xmit = i40e_xdp_xmit,
.ndo_xsk_async_xmit = i40e_xsk_async_xmit,
+ .ndo_dfwd_add_station = i40e_fwd_add,
+ .ndo_dfwd_del_station = i40e_fwd_del,
};
/**
@@ -12407,6 +12945,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
+ /* enable macvlan offloads */
+ netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
+
hw_features = hw_enc_features |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
@@ -12519,7 +13060,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
/* Uplink is not a bridge so default to VEB */
- if (vsi->veb_idx == I40E_NO_VEB)
+ if (vsi->veb_idx >= I40E_MAX_VEB)
return 1;
veb = pf->veb[vsi->veb_idx];
@@ -13577,7 +14118,7 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
/* Main VEB? */
if (uplink_seid != pf->mac_seid)
break;
- if (pf->lan_veb == I40E_NO_VEB) {
+ if (pf->lan_veb >= I40E_MAX_VEB) {
int v;
/* find existing or else empty VEB */
@@ -13587,13 +14128,15 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
break;
}
}
- if (pf->lan_veb == I40E_NO_VEB) {
+ if (pf->lan_veb >= I40E_MAX_VEB) {
v = i40e_veb_mem_alloc(pf);
if (v < 0)
break;
pf->lan_veb = v;
}
}
+ if (pf->lan_veb >= I40E_MAX_VEB)
+ break;
pf->veb[pf->lan_veb]->seid = seid;
pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
@@ -13747,7 +14290,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
/* Set up the PF VSI associated with the PF's main VSI
* that is already in the HW switch
*/
- if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+ if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
uplink_seid = pf->veb[pf->lan_veb]->seid;
else
uplink_seid = pf->mac_seid;
@@ -14203,7 +14746,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
I40E_MAX_CSR_SPACE);
-
+ /* We believe that the highest register to read is
+ * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
+ * is not less than that before mapping to prevent a
+ * kernel panic.
+ */
+ if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
+ dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
+ pf->ioremap_len);
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
if (!hw->hw_addr) {
err = -EIO;
@@ -14388,6 +14941,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
+ dev_info(&pdev->dev,
+ (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
+ "FW LLDP is disabled\n" :
+ "FW LLDP is enabled\n");
+
/* Enable FW to write default DCB config on link-up */
i40e_aq_set_dcb_parameters(hw, true, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 882627073dce..eac88bcc6c06 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -350,6 +350,10 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
return VIRTCHNL_LINK_SPEED_100MB;
case I40E_LINK_SPEED_1GB:
return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_2_5GB:
+ return VIRTCHNL_LINK_SPEED_2_5GB;
+ case I40E_LINK_SPEED_5GB:
+ return VIRTCHNL_LINK_SPEED_5GB;
case I40E_LINK_SPEED_10GB:
return VIRTCHNL_LINK_SPEED_10GB;
case I40E_LINK_SPEED_40GB:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 439c35f0c581..11394a52e21c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -140,8 +140,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
* @ptp: The PTP clock structure
* @delta: Offset in nanoseconds to adjust the PHC time by
*
- * Adjust the frequency of the PHC by the indicated parts per billion from the
- * base frequency.
+ * Adjust the current clock time by a delta specified in nanoseconds.
**/
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 20a283702c9f..2a2fe3ec7926 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -774,7 +774,7 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi)
static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
struct i40e_ring *tx_ring, int napi_budget)
{
- u16 i = tx_ring->next_to_clean;
+ int i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 479bc60c8f71..02b09a8ad54c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -440,7 +440,7 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
struct virtchnl_iwarp_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
- u32 msix_vf, size;
+ u32 msix_vf;
int ret = 0;
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -454,11 +454,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
goto err_out;
}
- size = sizeof(struct virtchnl_iwarp_qvlist_info) +
- (sizeof(struct virtchnl_iwarp_qv_info) *
- (qvlist_info->num_vectors - 1));
kfree(vf->qvlist_info);
- vf->qvlist_info = kzalloc(size, GFP_KERNEL);
+ vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
+ qvlist_info->num_vectors - 1),
+ GFP_KERNEL);
if (!vf->qvlist_info) {
ret = -ENOMEM;
goto err_out;
@@ -470,14 +469,15 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
qv_info = &qvlist_info->qv_info[i];
if (!qv_info)
continue;
- v_idx = qv_info->v_idx;
/* Validate vector id belongs to this vf */
- if (!i40e_vc_isvalid_vector_id(vf, v_idx)) {
+ if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
ret = -EINVAL;
goto err_free;
}
+ v_idx = qv_info->v_idx;
+
vf->qvlist_info->qv_info[i] = *qv_info;
reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
@@ -1845,7 +1845,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
int num_vsis = 1;
- int len = 0;
+ size_t len = 0;
int ret;
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
@@ -1853,9 +1853,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
goto err;
}
- len = (sizeof(struct virtchnl_vf_resource) +
- sizeof(struct virtchnl_vsi_resource) * num_vsis);
-
+ len = struct_size(vfres, vsi_res, num_vsis);
vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) {
aq_ret = I40E_ERR_NO_MEMORY;
@@ -2135,8 +2133,13 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
}
- if (vf->adq_enabled)
+ if (vf->adq_enabled) {
+ if (idx >= ARRAY_SIZE(vf->ch)) {
+ aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
+ goto error_param;
+ }
vsi_id = vf->ch[idx].vsi_id;
+ }
if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
&qpi->rxq) ||
@@ -2152,6 +2155,10 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
* to its appropriate VSIs based on TC mapping
**/
if (vf->adq_enabled) {
+ if (idx >= ARRAY_SIZE(vf->ch)) {
+ aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
+ goto error_param;
+ }
if (j == (vf->ch[idx].num_qps - 1)) {
idx++;
j = 0; /* resetting the queue count */
@@ -2318,7 +2325,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
int i;
@@ -2327,7 +2333,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2427,18 +2433,14 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
- int req_pairs = vfres->num_queue_pairs;
- int cur_pairs = vf->num_queue_pairs;
+ u16 req_pairs = vfres->num_queue_pairs;
+ u8 cur_pairs = vf->num_queue_pairs;
struct i40e_pf *pf = vf->pf;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
return -EINVAL;
- if (req_pairs <= 0) {
- dev_err(&pf->pdev->dev,
- "VF %d tried to request %d queues. Ignoring.\n",
- vf->vf_id, req_pairs);
- } else if (req_pairs > I40E_MAX_VF_QUEUES) {
+ if (req_pairs > I40E_MAX_VF_QUEUES) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
vf->vf_id,
@@ -2509,7 +2511,7 @@ error_param:
* MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
*/
#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
-#define I40E_VC_MAX_VLAN_PER_VF 8
+#define I40E_VC_MAX_VLAN_PER_VF 16
/**
* i40e_check_vf_permission
@@ -2587,12 +2589,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = al->vsi_id;
i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2657,12 +2658,11 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = al->vsi_id;
i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2726,7 +2726,6 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = vfl->vsi_id;
i40e_status aq_ret = 0;
int i;
@@ -2737,7 +2736,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2798,12 +2797,11 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = vfl->vsi_id;
i40e_status aq_ret = 0;
int i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2920,11 +2918,10 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_key *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = vrk->vsi_id;
i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+ !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
(vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
@@ -2951,16 +2948,22 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_lut *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- u16 vsi_id = vrl->vsi_id;
i40e_status aq_ret = 0;
+ u16 i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+ !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
(vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
+ for (i = 0; i < vrl->lut_entries; i++)
+ if (vrl->lut[i] >= vf->num_queue_pairs) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
vsi = pf->vsi[vf->lan_vsi_idx];
aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
/* send the response to the VF */
@@ -3041,14 +3044,15 @@ err:
**/
static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
+ vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_vlan_stripping_enable(vsi);
/* send the response to the VF */
@@ -3066,14 +3070,15 @@ err:
**/
static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
+ vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_vlan_stripping_disable(vsi);
/* send the response to the VF */
@@ -3531,8 +3536,9 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_tc_info *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
- int i, adq_request_qps = 0, speed = 0;
+ int i, adq_request_qps = 0;
i40e_status aq_ret = 0;
+ u64 speed = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -3558,8 +3564,8 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
/* max number of traffic classes for VF currently capped at 4 */
if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
dev_err(&pf->pdev->dev,
- "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n",
- vf->vf_id, tci->num_tc);
+ "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
+ vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3569,8 +3575,9 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
if (!tci->list[i].count ||
tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
dev_err(&pf->pdev->dev,
- "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n",
- vf->vf_id, i, tci->list[i].count);
+ "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
+ vf->vf_id, i, tci->list[i].count,
+ I40E_DEFAULT_QUEUES_PER_VF);
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3730,19 +3737,6 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
/* perform basic checks on the msg */
ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
- /* perform additional checks specific to this driver */
- if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
- struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
-
- if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
- ret = -EINVAL;
- } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
-
- if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
- ret = -EINVAL;
- }
-
if (ret) {
i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
@@ -3943,6 +3937,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
int bkt;
u8 i;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -3967,11 +3966,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
- if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
- dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
- return -EAGAIN;
- }
-
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
@@ -4302,10 +4296,8 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
vf = &pf->vf[vf_id];
/* first vsi is always the LAN vsi */
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
- vf_id);
- ret = -EAGAIN;
+ if (!vsi) {
+ ret = -ENOENT;
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 1b17486543ac..32bad014d76c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -215,6 +215,7 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
break;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fallthrough -- handle aborts by dropping packet */
@@ -640,8 +641,8 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
struct i40e_tx_desc *tx_desc = NULL;
struct i40e_tx_buffer *tx_bi;
bool work_done = true;
+ struct xdp_desc desc;
dma_addr_t dma;
- u32 len;
while (budget-- > 0) {
if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
@@ -650,21 +651,23 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
break;
}
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
+ if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break;
- dma_sync_single_for_device(xdp_ring->dev, dma, len,
+ dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
+
+ dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
DMA_BIDIRECTIONAL);
tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
- tx_bi->bytecount = len;
+ tx_bi->bytecount = desc.len;
tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
tx_desc->buffer_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz =
build_ctob(I40E_TX_DESC_CMD_ICRC
| I40E_TX_DESC_CMD_EOP,
- 0, len, 0);
+ 0, desc.len, 0);
xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count)
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 9cbb5743ed12..c997063ed728 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -12,4 +12,4 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \
- iavf_txrx.o iavf_common.o i40e_adminq.o iavf_client.o
+ iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
deleted file mode 100644
index e5ae4a1c0cff..000000000000
--- a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
+++ /dev/null
@@ -1,530 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_ADMINQ_CMD_H_
-#define _I40E_ADMINQ_CMD_H_
-
-/* This header file defines the i40e Admin Queue commands and is shared between
- * i40e Firmware and Software. Do not change the names in this file to IAVF
- * because this file should be diff-able against the i40e version, even
- * though many parts have been removed in this VF version.
- *
- * This file needs to comply with the Linux Kernel coding style.
- */
-
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0008
-
-#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
- I40E_FW_API_VERSION_MINOR_X710 : \
- I40E_FW_API_VERSION_MINOR_X722)
-
-/* API version 1.7 implements additional link and PHY-specific APIs */
-#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
-
-struct i40e_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- struct {
- __le32 param0;
- __le32 param1;
- __le32 param2;
- __le32 param3;
- } internal;
- struct {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
- } external;
- u8 raw[16];
- } params;
-};
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT 0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-#define I40E_AQ_FLAG_EI_SHIFT 14
-#define I40E_AQ_FLAG_FE_SHIFT 15
-
-#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
-
-/* error codes */
-enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
-};
-
-/* Admin Queue command opcodes */
-enum i40e_admin_queue_opc {
- /* aq commands */
- i40e_aqc_opc_get_version = 0x0001,
- i40e_aqc_opc_driver_version = 0x0002,
- i40e_aqc_opc_queue_shutdown = 0x0003,
- i40e_aqc_opc_set_pf_context = 0x0004,
-
- /* resource ownership */
- i40e_aqc_opc_request_resource = 0x0008,
- i40e_aqc_opc_release_resource = 0x0009,
-
- i40e_aqc_opc_list_func_capabilities = 0x000A,
- i40e_aqc_opc_list_dev_capabilities = 0x000B,
-
- /* Proxy commands */
- i40e_aqc_opc_set_proxy_config = 0x0104,
- i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
-
- /* LAA */
- i40e_aqc_opc_mac_address_read = 0x0107,
- i40e_aqc_opc_mac_address_write = 0x0108,
-
- /* PXE */
- i40e_aqc_opc_clear_pxe_mode = 0x0110,
-
- /* WoL commands */
- i40e_aqc_opc_set_wol_filter = 0x0120,
- i40e_aqc_opc_get_wake_reason = 0x0121,
-
- /* internal switch commands */
- i40e_aqc_opc_get_switch_config = 0x0200,
- i40e_aqc_opc_add_statistics = 0x0201,
- i40e_aqc_opc_remove_statistics = 0x0202,
- i40e_aqc_opc_set_port_parameters = 0x0203,
- i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
- i40e_aqc_opc_set_switch_config = 0x0205,
- i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
- i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
-
- i40e_aqc_opc_add_vsi = 0x0210,
- i40e_aqc_opc_update_vsi_parameters = 0x0211,
- i40e_aqc_opc_get_vsi_parameters = 0x0212,
-
- i40e_aqc_opc_add_pv = 0x0220,
- i40e_aqc_opc_update_pv_parameters = 0x0221,
- i40e_aqc_opc_get_pv_parameters = 0x0222,
-
- i40e_aqc_opc_add_veb = 0x0230,
- i40e_aqc_opc_update_veb_parameters = 0x0231,
- i40e_aqc_opc_get_veb_parameters = 0x0232,
-
- i40e_aqc_opc_delete_element = 0x0243,
-
- i40e_aqc_opc_add_macvlan = 0x0250,
- i40e_aqc_opc_remove_macvlan = 0x0251,
- i40e_aqc_opc_add_vlan = 0x0252,
- i40e_aqc_opc_remove_vlan = 0x0253,
- i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
- i40e_aqc_opc_add_tag = 0x0255,
- i40e_aqc_opc_remove_tag = 0x0256,
- i40e_aqc_opc_add_multicast_etag = 0x0257,
- i40e_aqc_opc_remove_multicast_etag = 0x0258,
- i40e_aqc_opc_update_tag = 0x0259,
- i40e_aqc_opc_add_control_packet_filter = 0x025A,
- i40e_aqc_opc_remove_control_packet_filter = 0x025B,
- i40e_aqc_opc_add_cloud_filters = 0x025C,
- i40e_aqc_opc_remove_cloud_filters = 0x025D,
- i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
-
- i40e_aqc_opc_add_mirror_rule = 0x0260,
- i40e_aqc_opc_delete_mirror_rule = 0x0261,
-
- /* Dynamic Device Personalization */
- i40e_aqc_opc_write_personalization_profile = 0x0270,
- i40e_aqc_opc_get_personalization_profile_list = 0x0271,
-
- /* DCB commands */
- i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
- i40e_aqc_opc_dcb_updated = 0x0302,
- i40e_aqc_opc_set_dcb_parameters = 0x0303,
-
- /* TX scheduler */
- i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
- i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
- i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
- i40e_aqc_opc_query_vsi_bw_config = 0x0408,
- i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
- i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
-
- i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
- i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
- i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
- i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
- i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
- i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
- i40e_aqc_opc_query_port_ets_config = 0x0419,
- i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
- i40e_aqc_opc_suspend_port_tx = 0x041B,
- i40e_aqc_opc_resume_port_tx = 0x041C,
- i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
- /* phy commands*/
- i40e_aqc_opc_get_phy_abilities = 0x0600,
- i40e_aqc_opc_set_phy_config = 0x0601,
- i40e_aqc_opc_set_mac_config = 0x0603,
- i40e_aqc_opc_set_link_restart_an = 0x0605,
- i40e_aqc_opc_get_link_status = 0x0607,
- i40e_aqc_opc_set_phy_int_mask = 0x0613,
- i40e_aqc_opc_get_local_advt_reg = 0x0614,
- i40e_aqc_opc_set_local_advt_reg = 0x0615,
- i40e_aqc_opc_get_partner_advt = 0x0616,
- i40e_aqc_opc_set_lb_modes = 0x0618,
- i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_debug = 0x0622,
- i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
- i40e_aqc_opc_run_phy_activity = 0x0626,
- i40e_aqc_opc_set_phy_register = 0x0628,
- i40e_aqc_opc_get_phy_register = 0x0629,
-
- /* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
- i40e_aqc_opc_nvm_config_read = 0x0704,
- i40e_aqc_opc_nvm_config_write = 0x0705,
- i40e_aqc_opc_oem_post_update = 0x0720,
- i40e_aqc_opc_thermal_sensor = 0x0721,
-
- /* virtualization commands */
- i40e_aqc_opc_send_msg_to_pf = 0x0801,
- i40e_aqc_opc_send_msg_to_vf = 0x0802,
- i40e_aqc_opc_send_msg_to_peer = 0x0803,
-
- /* alternate structure */
- i40e_aqc_opc_alternate_write = 0x0900,
- i40e_aqc_opc_alternate_write_indirect = 0x0901,
- i40e_aqc_opc_alternate_read = 0x0902,
- i40e_aqc_opc_alternate_read_indirect = 0x0903,
- i40e_aqc_opc_alternate_write_done = 0x0904,
- i40e_aqc_opc_alternate_set_mode = 0x0905,
- i40e_aqc_opc_alternate_clear_port = 0x0906,
-
- /* LLDP commands */
- i40e_aqc_opc_lldp_get_mib = 0x0A00,
- i40e_aqc_opc_lldp_update_mib = 0x0A01,
- i40e_aqc_opc_lldp_add_tlv = 0x0A02,
- i40e_aqc_opc_lldp_update_tlv = 0x0A03,
- i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
- i40e_aqc_opc_lldp_stop = 0x0A05,
- i40e_aqc_opc_lldp_start = 0x0A06,
-
- /* Tunnel commands */
- i40e_aqc_opc_add_udp_tunnel = 0x0B00,
- i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_set_rss_key = 0x0B02,
- i40e_aqc_opc_set_rss_lut = 0x0B03,
- i40e_aqc_opc_get_rss_key = 0x0B04,
- i40e_aqc_opc_get_rss_lut = 0x0B05,
-
- /* Async Events */
- i40e_aqc_opc_event_lan_overflow = 0x1001,
-
- /* OEM commands */
- i40e_aqc_opc_oem_parameter_change = 0xFE00,
- i40e_aqc_opc_oem_device_status_change = 0xFE01,
- i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
- i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
-
- /* debug commands */
- i40e_aqc_opc_debug_read_reg = 0xFF03,
- i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_modify_reg = 0xFF07,
- i40e_aqc_opc_debug_dump_internals = 0xFF08,
-};
-
-/* command structures and indirect data structures */
-
-/* Structure naming conventions:
- * - no suffix for direct command descriptor structures
- * - _data for indirect sent data
- * - _resp for indirect return data (data which is both will use _data)
- * - _completion for direct return data
- * - _element_ for repeated elements (may also be _data or _resp)
- *
- * Command structures are expected to overlay the params.raw member of the basic
- * descriptor, and as such cannot exceed 16 bytes in length.
- */
-
-/* This macro is used to generate a compilation error if a structure
- * is not exactly the correct length. It gives a divide by zero error if the
- * structure is not of the correct size, otherwise it creates an enum that is
- * never used.
- */
-#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
- { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
-
-/* This macro is used extensively to ensure that command structures are 16
- * bytes in length as they have to map to the raw array of that size.
- */
-#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
-
-/* Queue Shutdown (direct 0x0003) */
-struct i40e_aqc_queue_shutdown {
- __le32 driver_unloading;
-#define I40E_AQ_DRIVER_UNLOADING 0x1
- u8 reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
-
-struct i40e_aqc_vsi_properties_data {
- /* first 96 byte are written by SW */
- __le16 valid_sections;
-#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
-#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
-#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
-#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
-#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
-#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
- /* switch section */
- __le16 switch_id; /* 12bit id combined with flags below */
-#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
-#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
-#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
-#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
- u8 sw_reserved[2];
- /* security section */
- u8 sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
- u8 sec_reserved;
- /* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- __le16 fcoe_pvid;
- u8 port_vlan_flags;
-#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
-#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
- I40E_AQ_VSI_PVLAN_MODE_SHIFT)
-#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
-#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
-#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
-#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
-#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
- I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
-#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
-#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
- u8 pvlan_reserved[3];
- /* ingress egress up sections */
- __le32 ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
- __le32 egress_table; /* same defines as for ingress table */
- /* cascaded PV section */
- __le16 cas_pv_tag;
- u8 cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
- I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
- u8 cas_pv_reserved;
- /* queue mapping section */
- __le16 mapping_flags;
-#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
-#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
- __le16 queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
-#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
- __le16 tc_mapping[8];
-#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
- I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
-#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
- I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
- /* queueing option section */
- u8 queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
-#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
-#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
- u8 queueing_opt_reserved[3];
- /* scheduler section */
- u8 up_enable_bits;
- u8 sched_reserved;
- /* outer up section */
- __le32 outer_up_table; /* same structure and defines as ingress tbl */
- u8 cmd_reserved[8];
- /* last 32 bytes are written by FW */
- __le16 qs_handle[8];
-#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
- __le16 stat_counter_idx;
- __le16 sched_id;
- u8 resp_reserved[12];
-};
-
-I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
-
-/* Get VEB Parameters (direct 0x0232)
- * uses i40e_aqc_switch_seid for the descriptor
- */
-struct i40e_aqc_get_veb_parameters_completion {
- __le16 seid;
- __le16 switch_id;
- __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
-
-#define I40E_LINK_SPEED_100MB_SHIFT 0x1
-#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
-#define I40E_LINK_SPEED_10GB_SHIFT 0x3
-#define I40E_LINK_SPEED_40GB_SHIFT 0x4
-#define I40E_LINK_SPEED_20GB_SHIFT 0x5
-#define I40E_LINK_SPEED_25GB_SHIFT 0x6
-
-enum i40e_aq_link_speed {
- I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
- I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
-};
-
-/* Send to PF command (indirect 0x0801) id is only used by PF
- * Send to VF command (indirect 0x0802) id is only used by PF
- * Send to Peer PF command (indirect 0x0803)
- */
-struct i40e_aqc_pf_vf_message {
- __le32 id;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
-
-struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
-#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
-#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
- I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
- __le16 vsi_id;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
-
-struct i40e_aqc_get_set_rss_key_data {
- u8 standard_rss_key[0x28];
- u8 extended_hash_key[0xc];
-};
-
-I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
-
-struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
-#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
- I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
- __le16 vsi_id;
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
- BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
-
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
- __le16 flags;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
-#endif /* _I40E_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 272d76b733aa..9fc635d816d2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -109,7 +109,7 @@ struct iavf_q_vector {
/* Helper macros to switch between ints/sec and what the register uses.
* And yes, it's the same math going both ways. The lowest value
- * supported by all of the i40e hardware is 8.
+ * supported by all of the iavf hardware is 8.
*/
#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
@@ -171,6 +171,7 @@ enum iavf_state_t {
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
__IAVF_INIT_SW, /* got resources, setting up structs */
__IAVF_RESETTING, /* in reset */
+ __IAVF_COMM_FAILED, /* communication with PF failed */
/* Below here, watchdog is running */
__IAVF_DOWN, /* ready, can be opened */
__IAVF_DOWN_PENDING, /* descending, waiting for watchdog */
@@ -216,7 +217,6 @@ struct iavf_cloud_filter {
/* board specific private data structure */
struct iavf_adapter {
- struct timer_list watchdog_timer;
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work client_task;
@@ -244,7 +244,7 @@ struct iavf_adapter {
int num_iwarp_msix;
int iwarp_base_vector;
u32 client_pending;
- struct i40e_client_instance *cinst;
+ struct iavf_client_instance *cinst;
struct msix_entry *msix_entries;
u32 flags;
@@ -303,7 +303,7 @@ struct iavf_adapter {
enum iavf_state_t state;
unsigned long crit_section;
- struct work_struct watchdog_task;
+ struct delayed_work watchdog_task;
bool netdev_registered;
bool link_up;
enum virtchnl_link_speed link_speed;
@@ -351,7 +351,7 @@ struct iavf_adapter {
/* Ethtool Private Flags */
/* lan device, used by client interface */
-struct i40e_device {
+struct iavf_device {
struct list_head list;
struct iavf_adapter *vf;
};
@@ -359,6 +359,7 @@ struct i40e_device {
/* needed by iavf_ethtool.c */
extern char iavf_driver_name[];
extern const char iavf_driver_version[];
+extern struct workqueue_struct *iavf_wq;
int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
@@ -402,7 +403,7 @@ void iavf_enable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_disable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_virtchnl_completion(struct iavf_adapter *adapter,
enum virtchnl_ops v_opcode,
- iavf_status v_retval, u8 *msg, u16 msglen);
+ enum iavf_status v_retval, u8 *msg, u16 msglen);
int iavf_config_rss(struct iavf_adapter *adapter);
int iavf_lan_add_device(struct iavf_adapter *adapter);
int iavf_lan_del_device(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index fca1ecfd9f71..9fa3fa99b4c2 100644
--- a/drivers/net/ethernet/intel/iavf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -4,16 +4,16 @@
#include "iavf_status.h"
#include "iavf_type.h"
#include "iavf_register.h"
-#include "i40e_adminq.h"
+#include "iavf_adminq.h"
#include "iavf_prototype.h"
/**
- * i40e_adminq_init_regs - Initialize AdminQ registers
+ * iavf_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
* This assumes the alloc_asq and alloc_arq functions have already been called
**/
-static void i40e_adminq_init_regs(struct iavf_hw *hw)
+static void iavf_adminq_init_regs(struct iavf_hw *hw)
{
/* set head and tail registers in our local struct */
hw->aq.asq.tail = IAVF_VF_ATQT1;
@@ -29,24 +29,24 @@ static void i40e_adminq_init_regs(struct iavf_hw *hw)
}
/**
- * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
-static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)
+static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
{
- iavf_status ret_code;
+ enum iavf_status ret_code;
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
- i40e_mem_atq_ring,
+ iavf_mem_atq_ring,
(hw->aq.num_asq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct iavf_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
- sizeof(struct i40e_asq_cmd_details)));
+ sizeof(struct iavf_asq_cmd_details)));
if (ret_code) {
iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
@@ -56,55 +56,55 @@ static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)
}
/**
- * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
-static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw)
+static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
{
- iavf_status ret_code;
+ enum iavf_status ret_code;
ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
- i40e_mem_arq_ring,
+ iavf_mem_arq_ring,
(hw->aq.num_arq_entries *
- sizeof(struct i40e_aq_desc)),
+ sizeof(struct iavf_aq_desc)),
IAVF_ADMINQ_DESC_ALIGNMENT);
return ret_code;
}
/**
- * i40e_free_adminq_asq - Free Admin Queue send rings
+ * iavf_free_adminq_asq - Free Admin Queue send rings
* @hw: pointer to the hardware structure
*
* This assumes the posted send buffers have already been cleaned
* and de-allocated
**/
-static void i40e_free_adminq_asq(struct iavf_hw *hw)
+static void iavf_free_adminq_asq(struct iavf_hw *hw)
{
iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
- * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * iavf_free_adminq_arq - Free Admin Queue receive rings
* @hw: pointer to the hardware structure
*
* This assumes the posted receive buffers have already been cleaned
* and de-allocated
**/
-static void i40e_free_adminq_arq(struct iavf_hw *hw)
+static void iavf_free_adminq_arq(struct iavf_hw *hw)
{
iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
- * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
-static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
+static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
{
- struct i40e_aq_desc *desc;
+ struct iavf_aq_desc *desc;
struct iavf_dma_mem *bi;
- iavf_status ret_code;
+ enum iavf_status ret_code;
int i;
/* We'll be allocating the buffer info memory first, then we can
@@ -123,7 +123,7 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
for (i = 0; i < hw->aq.num_arq_entries; i++) {
bi = &hw->aq.arq.r.arq_bi[i];
ret_code = iavf_allocate_dma_mem(hw, bi,
- i40e_mem_arq_buf,
+ iavf_mem_arq_buf,
hw->aq.arq_buf_size,
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
@@ -132,9 +132,9 @@ static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
/* now configure the descriptors for use */
desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
- if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with Admin queue design, there is no
* register for buffer size configuration
@@ -165,13 +165,13 @@ unwind_alloc_arq_bufs:
}
/**
- * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
-static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)
+static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
{
struct iavf_dma_mem *bi;
- iavf_status ret_code;
+ enum iavf_status ret_code;
int i;
/* No mapped memory needed yet, just the buffer info structures */
@@ -186,7 +186,7 @@ static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)
for (i = 0; i < hw->aq.num_asq_entries; i++) {
bi = &hw->aq.asq.r.asq_bi[i];
ret_code = iavf_allocate_dma_mem(hw, bi,
- i40e_mem_asq_buf,
+ iavf_mem_asq_buf,
hw->aq.asq_buf_size,
IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
@@ -206,10 +206,10 @@ unwind_alloc_asq_bufs:
}
/**
- * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * iavf_free_arq_bufs - Free receive queue buffer info elements
* @hw: pointer to the hardware structure
**/
-static void i40e_free_arq_bufs(struct iavf_hw *hw)
+static void iavf_free_arq_bufs(struct iavf_hw *hw)
{
int i;
@@ -225,10 +225,10 @@ static void i40e_free_arq_bufs(struct iavf_hw *hw)
}
/**
- * i40e_free_asq_bufs - Free send queue buffer info elements
+ * iavf_free_asq_bufs - Free send queue buffer info elements
* @hw: pointer to the hardware structure
**/
-static void i40e_free_asq_bufs(struct iavf_hw *hw)
+static void iavf_free_asq_bufs(struct iavf_hw *hw)
{
int i;
@@ -248,14 +248,14 @@ static void i40e_free_asq_bufs(struct iavf_hw *hw)
}
/**
- * i40e_config_asq_regs - configure ASQ registers
+ * iavf_config_asq_regs - configure ASQ registers
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the transmit queue
**/
-static iavf_status i40e_config_asq_regs(struct iavf_hw *hw)
+static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -271,20 +271,20 @@ static iavf_status i40e_config_asq_regs(struct iavf_hw *hw)
/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.asq.bal);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
return ret_code;
}
/**
- * i40e_config_arq_regs - ARQ register configuration
+ * iavf_config_arq_regs - ARQ register configuration
* @hw: pointer to the hardware structure
*
* Configure base address and length registers for the receive (event queue)
**/
-static iavf_status i40e_config_arq_regs(struct iavf_hw *hw)
+static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -303,13 +303,13 @@ static iavf_status i40e_config_arq_regs(struct iavf_hw *hw)
/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.arq.bal);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
return ret_code;
}
/**
- * i40e_init_asq - main initialization routine for ASQ
+ * iavf_init_asq - main initialization routine for ASQ
* @hw: pointer to the hardware structure
*
* This is the main initialization routine for the Admin Send Queue
@@ -321,20 +321,20 @@ static iavf_status i40e_config_arq_regs(struct iavf_hw *hw)
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static iavf_status i40e_init_asq(struct iavf_hw *hw)
+static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = IAVF_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_asq_entries == 0) ||
(hw->aq.asq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
+ ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
@@ -342,17 +342,17 @@ static iavf_status i40e_init_asq(struct iavf_hw *hw)
hw->aq.asq.next_to_clean = 0;
/* allocate the ring memory */
- ret_code = i40e_alloc_adminq_asq_ring(hw);
+ ret_code = iavf_alloc_adminq_asq_ring(hw);
if (ret_code)
goto init_adminq_exit;
/* allocate buffers in the rings */
- ret_code = i40e_alloc_asq_bufs(hw);
+ ret_code = iavf_alloc_asq_bufs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* initialize base registers */
- ret_code = i40e_config_asq_regs(hw);
+ ret_code = iavf_config_asq_regs(hw);
if (ret_code)
goto init_adminq_free_rings;
@@ -361,14 +361,14 @@ static iavf_status i40e_init_asq(struct iavf_hw *hw)
goto init_adminq_exit;
init_adminq_free_rings:
- i40e_free_adminq_asq(hw);
+ iavf_free_adminq_asq(hw);
init_adminq_exit:
return ret_code;
}
/**
- * i40e_init_arq - initialize ARQ
+ * iavf_init_arq - initialize ARQ
* @hw: pointer to the hardware structure
*
* The main initialization routine for the Admin Receive (Event) Queue.
@@ -380,20 +380,20 @@ init_adminq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static iavf_status i40e_init_arq(struct iavf_hw *hw)
+static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = IAVF_ERR_NOT_READY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.arq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
+ ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
@@ -401,17 +401,17 @@ static iavf_status i40e_init_arq(struct iavf_hw *hw)
hw->aq.arq.next_to_clean = 0;
/* allocate the ring memory */
- ret_code = i40e_alloc_adminq_arq_ring(hw);
+ ret_code = iavf_alloc_adminq_arq_ring(hw);
if (ret_code)
goto init_adminq_exit;
/* allocate buffers in the rings */
- ret_code = i40e_alloc_arq_bufs(hw);
+ ret_code = iavf_alloc_arq_bufs(hw);
if (ret_code)
goto init_adminq_free_rings;
/* initialize base registers */
- ret_code = i40e_config_arq_regs(hw);
+ ret_code = iavf_config_arq_regs(hw);
if (ret_code)
goto init_adminq_free_rings;
@@ -420,26 +420,26 @@ static iavf_status i40e_init_arq(struct iavf_hw *hw)
goto init_adminq_exit;
init_adminq_free_rings:
- i40e_free_adminq_arq(hw);
+ iavf_free_adminq_arq(hw);
init_adminq_exit:
return ret_code;
}
/**
- * i40e_shutdown_asq - shutdown the ASQ
+ * iavf_shutdown_asq - shutdown the ASQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Send Queue
**/
-static iavf_status i40e_shutdown_asq(struct iavf_hw *hw)
+static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
mutex_lock(&hw->aq.asq_mutex);
if (hw->aq.asq.count == 0) {
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = IAVF_ERR_NOT_READY;
goto shutdown_asq_out;
}
@@ -453,7 +453,7 @@ static iavf_status i40e_shutdown_asq(struct iavf_hw *hw)
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
- i40e_free_asq_bufs(hw);
+ iavf_free_asq_bufs(hw);
shutdown_asq_out:
mutex_unlock(&hw->aq.asq_mutex);
@@ -461,19 +461,19 @@ shutdown_asq_out:
}
/**
- * i40e_shutdown_arq - shutdown ARQ
+ * iavf_shutdown_arq - shutdown ARQ
* @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Receive Queue
**/
-static iavf_status i40e_shutdown_arq(struct iavf_hw *hw)
+static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
mutex_lock(&hw->aq.arq_mutex);
if (hw->aq.arq.count == 0) {
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = IAVF_ERR_NOT_READY;
goto shutdown_arq_out;
}
@@ -487,7 +487,7 @@ static iavf_status i40e_shutdown_arq(struct iavf_hw *hw)
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
- i40e_free_arq_bufs(hw);
+ iavf_free_arq_bufs(hw);
shutdown_arq_out:
mutex_unlock(&hw->aq.arq_mutex);
@@ -505,32 +505,32 @@ shutdown_arq_out:
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
-iavf_status iavf_init_adminq(struct iavf_hw *hw)
+enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
{
- iavf_status ret_code;
+ enum iavf_status ret_code;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.num_asq_entries == 0) ||
(hw->aq.arq_buf_size == 0) ||
(hw->aq.asq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
+ ret_code = IAVF_ERR_CONFIG;
goto init_adminq_exit;
}
/* Set up register offsets */
- i40e_adminq_init_regs(hw);
+ iavf_adminq_init_regs(hw);
/* setup ASQ command write back timeout */
- hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+ hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
/* allocate the ASQ */
- ret_code = i40e_init_asq(hw);
+ ret_code = iavf_init_asq(hw);
if (ret_code)
goto init_adminq_destroy_locks;
/* allocate the ARQ */
- ret_code = i40e_init_arq(hw);
+ ret_code = iavf_init_arq(hw);
if (ret_code)
goto init_adminq_free_asq;
@@ -538,7 +538,7 @@ iavf_status iavf_init_adminq(struct iavf_hw *hw)
goto init_adminq_exit;
init_adminq_free_asq:
- i40e_shutdown_asq(hw);
+ iavf_shutdown_asq(hw);
init_adminq_destroy_locks:
init_adminq_exit:
@@ -549,53 +549,53 @@ init_adminq_exit:
* iavf_shutdown_adminq - shutdown routine for the Admin Queue
* @hw: pointer to the hardware structure
**/
-iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
+enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
{
- iavf_status ret_code = 0;
+ enum iavf_status ret_code = 0;
if (iavf_check_asq_alive(hw))
iavf_aq_queue_shutdown(hw, true);
- i40e_shutdown_asq(hw);
- i40e_shutdown_arq(hw);
+ iavf_shutdown_asq(hw);
+ iavf_shutdown_arq(hw);
return ret_code;
}
/**
- * i40e_clean_asq - cleans Admin send queue
+ * iavf_clean_asq - cleans Admin send queue
* @hw: pointer to the hardware structure
*
* returns the number of free desc
**/
-static u16 i40e_clean_asq(struct iavf_hw *hw)
+static u16 iavf_clean_asq(struct iavf_hw *hw)
{
struct iavf_adminq_ring *asq = &hw->aq.asq;
- struct i40e_asq_cmd_details *details;
+ struct iavf_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
- struct i40e_aq_desc desc_cb;
- struct i40e_aq_desc *desc;
+ struct iavf_aq_desc desc_cb;
+ struct iavf_aq_desc *desc;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
- details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ details = IAVF_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
- I40E_ADMINQ_CALLBACK cb_func =
- (I40E_ADMINQ_CALLBACK)details->callback;
+ IAVF_ADMINQ_CALLBACK cb_func =
+ (IAVF_ADMINQ_CALLBACK)details->callback;
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
memset((void *)details, 0,
- sizeof(struct i40e_asq_cmd_details));
+ sizeof(struct iavf_asq_cmd_details));
ntc++;
if (ntc == asq->count)
ntc = 0;
desc = IAVF_ADMINQ_DESC(*asq, ntc);
- details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ details = IAVF_ADMINQ_DETAILS(*asq, ntc);
}
asq->next_to_clean = ntc;
@@ -629,16 +629,17 @@ bool iavf_asq_done(struct iavf_hw *hw)
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details)
+enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
+ struct iavf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct iavf_asq_cmd_details *cmd_details)
{
struct iavf_dma_mem *dma_buff = NULL;
- struct i40e_asq_cmd_details *details;
- struct i40e_aq_desc *desc_on_ring;
+ struct iavf_asq_cmd_details *details;
+ struct iavf_aq_desc *desc_on_ring;
bool cmd_completed = false;
- iavf_status status = 0;
+ enum iavf_status status = 0;
u16 retval = 0;
u32 val = 0;
@@ -647,21 +648,21 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
if (hw->aq.asq.count == 0) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
- status = I40E_ERR_QUEUE_EMPTY;
+ status = IAVF_ERR_QUEUE_EMPTY;
goto asq_send_command_error;
}
- hw->aq.asq_last_status = I40E_AQ_RC_OK;
+ hw->aq.asq_last_status = IAVF_AQ_RC_OK;
val = rd32(hw, hw->aq.asq.head);
if (val >= hw->aq.num_asq_entries) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
- status = I40E_ERR_QUEUE_EMPTY;
+ status = IAVF_ERR_QUEUE_EMPTY;
goto asq_send_command_error;
}
- details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -676,7 +677,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
cpu_to_le32(lower_32_bits(details->cookie));
}
} else {
- memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ memset(details, 0, sizeof(struct iavf_asq_cmd_details));
}
/* clear requested flags and then set additional flags if defined */
@@ -688,7 +689,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Invalid buffer size: %d.\n",
buff_size);
- status = I40E_ERR_INVALID_SIZE;
+ status = IAVF_ERR_INVALID_SIZE;
goto asq_send_command_error;
}
@@ -696,7 +697,7 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Async flag not set along with postpone flag");
- status = I40E_ERR_PARAM;
+ status = IAVF_ERR_PARAM;
goto asq_send_command_error;
}
@@ -707,11 +708,11 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
/* the clean function called here could be called in a separate thread
* in case of asynchronous completions
*/
- if (i40e_clean_asq(hw) == 0) {
+ if (iavf_clean_asq(hw) == 0) {
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Error queue is full.\n");
- status = I40E_ERR_ADMIN_QUEUE_FULL;
+ status = IAVF_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
@@ -780,13 +781,13 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
retval &= 0xff;
}
cmd_completed = true;
- if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
status = 0;
- else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
- status = I40E_ERR_NOT_READY;
+ else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
+ status = IAVF_ERR_NOT_READY;
else
- status = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ status = IAVF_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
}
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
@@ -803,11 +804,11 @@ iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
- status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
} else {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
}
}
@@ -823,12 +824,12 @@ asq_send_command_error:
*
* Fill the desc with default values
**/
-void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode)
+void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
}
/**
@@ -841,13 +842,13 @@ void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode)
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
-iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *pending)
+enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct iavf_arq_event_info *e,
+ u16 *pending)
{
u16 ntc = hw->aq.arq.next_to_clean;
- struct i40e_aq_desc *desc;
- iavf_status ret_code = 0;
+ struct iavf_aq_desc *desc;
+ enum iavf_status ret_code = 0;
struct iavf_dma_mem *bi;
u16 desc_idx;
u16 datalen;
@@ -863,7 +864,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
if (hw->aq.arq.count == 0) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Admin queue not initialized.\n");
- ret_code = I40E_ERR_QUEUE_EMPTY;
+ ret_code = IAVF_ERR_QUEUE_EMPTY;
goto clean_arq_element_err;
}
@@ -871,7 +872,7 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
@@ -880,10 +881,10 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
desc_idx = ntc;
hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ (enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & I40E_AQ_FLAG_ERR) {
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ if (flags & IAVF_AQ_FLAG_ERR) {
+ ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
iavf_debug(hw,
IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
@@ -906,11 +907,11 @@ iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
- if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index ee983889eab0..baf2fe26f302 100644
--- a/drivers/net/ethernet/intel/iavf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -6,10 +6,10 @@
#include "iavf_osdep.h"
#include "iavf_status.h"
-#include "i40e_adminq_cmd.h"
+#include "iavf_adminq_cmd.h"
#define IAVF_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct iavf_aq_desc *)((R).desc_buf.va))[i]))
#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
@@ -39,22 +39,22 @@ struct iavf_adminq_ring {
};
/* ASQ transaction details */
-struct i40e_asq_cmd_details {
- void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+struct iavf_asq_cmd_details {
+ void *callback; /* cast from type IAVF_ADMINQ_CALLBACK */
u64 cookie;
u16 flags_ena;
u16 flags_dis;
bool async;
bool postpone;
- struct i40e_aq_desc *wb_desc;
+ struct iavf_aq_desc *wb_desc;
};
-#define I40E_ADMINQ_DETAILS(R, i) \
- (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+#define IAVF_ADMINQ_DETAILS(R, i) \
+ (&(((struct iavf_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
-struct i40e_arq_event_info {
- struct i40e_aq_desc desc;
+struct iavf_arq_event_info {
+ struct iavf_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -79,45 +79,45 @@ struct iavf_adminq_info {
struct mutex arq_mutex; /* Receive queue lock */
/* last status values on send and receive queues */
- enum i40e_admin_queue_err asq_last_status;
- enum i40e_admin_queue_err arq_last_status;
+ enum iavf_admin_queue_err asq_last_status;
+ enum iavf_admin_queue_err arq_last_status;
};
/**
- * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * iavf_aq_rc_to_posix - convert errors to user-land codes
* aq_ret: AdminQ handler error code can override aq_rc
* aq_rc: AdminQ firmware error code to convert
**/
-static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
+static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc)
{
int aq_to_posix[] = {
- 0, /* I40E_AQ_RC_OK */
- -EPERM, /* I40E_AQ_RC_EPERM */
- -ENOENT, /* I40E_AQ_RC_ENOENT */
- -ESRCH, /* I40E_AQ_RC_ESRCH */
- -EINTR, /* I40E_AQ_RC_EINTR */
- -EIO, /* I40E_AQ_RC_EIO */
- -ENXIO, /* I40E_AQ_RC_ENXIO */
- -E2BIG, /* I40E_AQ_RC_E2BIG */
- -EAGAIN, /* I40E_AQ_RC_EAGAIN */
- -ENOMEM, /* I40E_AQ_RC_ENOMEM */
- -EACCES, /* I40E_AQ_RC_EACCES */
- -EFAULT, /* I40E_AQ_RC_EFAULT */
- -EBUSY, /* I40E_AQ_RC_EBUSY */
- -EEXIST, /* I40E_AQ_RC_EEXIST */
- -EINVAL, /* I40E_AQ_RC_EINVAL */
- -ENOTTY, /* I40E_AQ_RC_ENOTTY */
- -ENOSPC, /* I40E_AQ_RC_ENOSPC */
- -ENOSYS, /* I40E_AQ_RC_ENOSYS */
- -ERANGE, /* I40E_AQ_RC_ERANGE */
- -EPIPE, /* I40E_AQ_RC_EFLUSHED */
- -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */
- -EROFS, /* I40E_AQ_RC_EMODE */
- -EFBIG, /* I40E_AQ_RC_EFBIG */
+ 0, /* IAVF_AQ_RC_OK */
+ -EPERM, /* IAVF_AQ_RC_EPERM */
+ -ENOENT, /* IAVF_AQ_RC_ENOENT */
+ -ESRCH, /* IAVF_AQ_RC_ESRCH */
+ -EINTR, /* IAVF_AQ_RC_EINTR */
+ -EIO, /* IAVF_AQ_RC_EIO */
+ -ENXIO, /* IAVF_AQ_RC_ENXIO */
+ -E2BIG, /* IAVF_AQ_RC_E2BIG */
+ -EAGAIN, /* IAVF_AQ_RC_EAGAIN */
+ -ENOMEM, /* IAVF_AQ_RC_ENOMEM */
+ -EACCES, /* IAVF_AQ_RC_EACCES */
+ -EFAULT, /* IAVF_AQ_RC_EFAULT */
+ -EBUSY, /* IAVF_AQ_RC_EBUSY */
+ -EEXIST, /* IAVF_AQ_RC_EEXIST */
+ -EINVAL, /* IAVF_AQ_RC_EINVAL */
+ -ENOTTY, /* IAVF_AQ_RC_ENOTTY */
+ -ENOSPC, /* IAVF_AQ_RC_ENOSPC */
+ -ENOSYS, /* IAVF_AQ_RC_ENOSYS */
+ -ERANGE, /* IAVF_AQ_RC_ERANGE */
+ -EPIPE, /* IAVF_AQ_RC_EFLUSHED */
+ -ESPIPE, /* IAVF_AQ_RC_BAD_ADDR */
+ -EROFS, /* IAVF_AQ_RC_EMODE */
+ -EFBIG, /* IAVF_AQ_RC_EFBIG */
};
/* aq_rc is invalid if AQ timed out */
- if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ if (aq_ret == IAVF_ERR_ADMIN_QUEUE_TIMEOUT)
return -EAGAIN;
if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
@@ -127,9 +127,9 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
}
/* general information */
-#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
+#define IAVF_AQ_LARGE_BUF 512
+#define IAVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
-void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode);
+void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode);
#endif /* _IAVF_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
new file mode 100644
index 000000000000..bc512308557b
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h
@@ -0,0 +1,528 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _IAVF_ADMINQ_CMD_H_
+#define _IAVF_ADMINQ_CMD_H_
+
+/* This header file defines the iavf Admin Queue commands and is shared between
+ * iavf Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define IAVF_FW_API_VERSION_MAJOR 0x0001
+#define IAVF_FW_API_VERSION_MINOR_X722 0x0005
+#define IAVF_FW_API_VERSION_MINOR_X710 0x0008
+
+#define IAVF_FW_MINOR_VERSION(_h) ((_h)->mac.type == IAVF_MAC_XL710 ? \
+ IAVF_FW_API_VERSION_MINOR_X710 : \
+ IAVF_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define IAVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+
+struct iavf_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define IAVF_AQ_FLAG_DD_SHIFT 0
+#define IAVF_AQ_FLAG_CMP_SHIFT 1
+#define IAVF_AQ_FLAG_ERR_SHIFT 2
+#define IAVF_AQ_FLAG_VFE_SHIFT 3
+#define IAVF_AQ_FLAG_LB_SHIFT 9
+#define IAVF_AQ_FLAG_RD_SHIFT 10
+#define IAVF_AQ_FLAG_VFC_SHIFT 11
+#define IAVF_AQ_FLAG_BUF_SHIFT 12
+#define IAVF_AQ_FLAG_SI_SHIFT 13
+#define IAVF_AQ_FLAG_EI_SHIFT 14
+#define IAVF_AQ_FLAG_FE_SHIFT 15
+
+#define IAVF_AQ_FLAG_DD BIT(IAVF_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define IAVF_AQ_FLAG_CMP BIT(IAVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define IAVF_AQ_FLAG_ERR BIT(IAVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define IAVF_AQ_FLAG_VFE BIT(IAVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define IAVF_AQ_FLAG_LB BIT(IAVF_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define IAVF_AQ_FLAG_RD BIT(IAVF_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define IAVF_AQ_FLAG_VFC BIT(IAVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define IAVF_AQ_FLAG_BUF BIT(IAVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define IAVF_AQ_FLAG_SI BIT(IAVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define IAVF_AQ_FLAG_EI BIT(IAVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define IAVF_AQ_FLAG_FE BIT(IAVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum iavf_admin_queue_err {
+ IAVF_AQ_RC_OK = 0, /* success */
+ IAVF_AQ_RC_EPERM = 1, /* Operation not permitted */
+ IAVF_AQ_RC_ENOENT = 2, /* No such element */
+ IAVF_AQ_RC_ESRCH = 3, /* Bad opcode */
+ IAVF_AQ_RC_EINTR = 4, /* operation interrupted */
+ IAVF_AQ_RC_EIO = 5, /* I/O error */
+ IAVF_AQ_RC_ENXIO = 6, /* No such resource */
+ IAVF_AQ_RC_E2BIG = 7, /* Arg too long */
+ IAVF_AQ_RC_EAGAIN = 8, /* Try again */
+ IAVF_AQ_RC_ENOMEM = 9, /* Out of memory */
+ IAVF_AQ_RC_EACCES = 10, /* Permission denied */
+ IAVF_AQ_RC_EFAULT = 11, /* Bad address */
+ IAVF_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ IAVF_AQ_RC_EEXIST = 13, /* object already exists */
+ IAVF_AQ_RC_EINVAL = 14, /* Invalid argument */
+ IAVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ IAVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ IAVF_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ IAVF_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ IAVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IAVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IAVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IAVF_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum iavf_admin_queue_opc {
+ /* aq commands */
+ iavf_aqc_opc_get_version = 0x0001,
+ iavf_aqc_opc_driver_version = 0x0002,
+ iavf_aqc_opc_queue_shutdown = 0x0003,
+ iavf_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ iavf_aqc_opc_request_resource = 0x0008,
+ iavf_aqc_opc_release_resource = 0x0009,
+
+ iavf_aqc_opc_list_func_capabilities = 0x000A,
+ iavf_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* Proxy commands */
+ iavf_aqc_opc_set_proxy_config = 0x0104,
+ iavf_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+ /* LAA */
+ iavf_aqc_opc_mac_address_read = 0x0107,
+ iavf_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ iavf_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* WoL commands */
+ iavf_aqc_opc_set_wol_filter = 0x0120,
+ iavf_aqc_opc_get_wake_reason = 0x0121,
+
+ /* internal switch commands */
+ iavf_aqc_opc_get_switch_config = 0x0200,
+ iavf_aqc_opc_add_statistics = 0x0201,
+ iavf_aqc_opc_remove_statistics = 0x0202,
+ iavf_aqc_opc_set_port_parameters = 0x0203,
+ iavf_aqc_opc_get_switch_resource_alloc = 0x0204,
+ iavf_aqc_opc_set_switch_config = 0x0205,
+ iavf_aqc_opc_rx_ctl_reg_read = 0x0206,
+ iavf_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ iavf_aqc_opc_add_vsi = 0x0210,
+ iavf_aqc_opc_update_vsi_parameters = 0x0211,
+ iavf_aqc_opc_get_vsi_parameters = 0x0212,
+
+ iavf_aqc_opc_add_pv = 0x0220,
+ iavf_aqc_opc_update_pv_parameters = 0x0221,
+ iavf_aqc_opc_get_pv_parameters = 0x0222,
+
+ iavf_aqc_opc_add_veb = 0x0230,
+ iavf_aqc_opc_update_veb_parameters = 0x0231,
+ iavf_aqc_opc_get_veb_parameters = 0x0232,
+
+ iavf_aqc_opc_delete_element = 0x0243,
+
+ iavf_aqc_opc_add_macvlan = 0x0250,
+ iavf_aqc_opc_remove_macvlan = 0x0251,
+ iavf_aqc_opc_add_vlan = 0x0252,
+ iavf_aqc_opc_remove_vlan = 0x0253,
+ iavf_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ iavf_aqc_opc_add_tag = 0x0255,
+ iavf_aqc_opc_remove_tag = 0x0256,
+ iavf_aqc_opc_add_multicast_etag = 0x0257,
+ iavf_aqc_opc_remove_multicast_etag = 0x0258,
+ iavf_aqc_opc_update_tag = 0x0259,
+ iavf_aqc_opc_add_control_packet_filter = 0x025A,
+ iavf_aqc_opc_remove_control_packet_filter = 0x025B,
+ iavf_aqc_opc_add_cloud_filters = 0x025C,
+ iavf_aqc_opc_remove_cloud_filters = 0x025D,
+ iavf_aqc_opc_clear_wol_switch_filters = 0x025E,
+
+ iavf_aqc_opc_add_mirror_rule = 0x0260,
+ iavf_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* Dynamic Device Personalization */
+ iavf_aqc_opc_write_personalization_profile = 0x0270,
+ iavf_aqc_opc_get_personalization_profile_list = 0x0271,
+
+ /* DCB commands */
+ iavf_aqc_opc_dcb_ignore_pfc = 0x0301,
+ iavf_aqc_opc_dcb_updated = 0x0302,
+ iavf_aqc_opc_set_dcb_parameters = 0x0303,
+
+ /* TX scheduler */
+ iavf_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ iavf_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ iavf_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ iavf_aqc_opc_query_vsi_bw_config = 0x0408,
+ iavf_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ iavf_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ iavf_aqc_opc_enable_switching_comp_ets = 0x0413,
+ iavf_aqc_opc_modify_switching_comp_ets = 0x0414,
+ iavf_aqc_opc_disable_switching_comp_ets = 0x0415,
+ iavf_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ iavf_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ iavf_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ iavf_aqc_opc_query_port_ets_config = 0x0419,
+ iavf_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ iavf_aqc_opc_suspend_port_tx = 0x041B,
+ iavf_aqc_opc_resume_port_tx = 0x041C,
+ iavf_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ iavf_aqc_opc_query_hmc_resource_profile = 0x0500,
+ iavf_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ iavf_aqc_opc_get_phy_abilities = 0x0600,
+ iavf_aqc_opc_set_phy_config = 0x0601,
+ iavf_aqc_opc_set_mac_config = 0x0603,
+ iavf_aqc_opc_set_link_restart_an = 0x0605,
+ iavf_aqc_opc_get_link_status = 0x0607,
+ iavf_aqc_opc_set_phy_int_mask = 0x0613,
+ iavf_aqc_opc_get_local_advt_reg = 0x0614,
+ iavf_aqc_opc_set_local_advt_reg = 0x0615,
+ iavf_aqc_opc_get_partner_advt = 0x0616,
+ iavf_aqc_opc_set_lb_modes = 0x0618,
+ iavf_aqc_opc_get_phy_wol_caps = 0x0621,
+ iavf_aqc_opc_set_phy_debug = 0x0622,
+ iavf_aqc_opc_upload_ext_phy_fm = 0x0625,
+ iavf_aqc_opc_run_phy_activity = 0x0626,
+ iavf_aqc_opc_set_phy_register = 0x0628,
+ iavf_aqc_opc_get_phy_register = 0x0629,
+
+ /* NVM commands */
+ iavf_aqc_opc_nvm_read = 0x0701,
+ iavf_aqc_opc_nvm_erase = 0x0702,
+ iavf_aqc_opc_nvm_update = 0x0703,
+ iavf_aqc_opc_nvm_config_read = 0x0704,
+ iavf_aqc_opc_nvm_config_write = 0x0705,
+ iavf_aqc_opc_oem_post_update = 0x0720,
+ iavf_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ iavf_aqc_opc_send_msg_to_pf = 0x0801,
+ iavf_aqc_opc_send_msg_to_vf = 0x0802,
+ iavf_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ iavf_aqc_opc_alternate_write = 0x0900,
+ iavf_aqc_opc_alternate_write_indirect = 0x0901,
+ iavf_aqc_opc_alternate_read = 0x0902,
+ iavf_aqc_opc_alternate_read_indirect = 0x0903,
+ iavf_aqc_opc_alternate_write_done = 0x0904,
+ iavf_aqc_opc_alternate_set_mode = 0x0905,
+ iavf_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ iavf_aqc_opc_lldp_get_mib = 0x0A00,
+ iavf_aqc_opc_lldp_update_mib = 0x0A01,
+ iavf_aqc_opc_lldp_add_tlv = 0x0A02,
+ iavf_aqc_opc_lldp_update_tlv = 0x0A03,
+ iavf_aqc_opc_lldp_delete_tlv = 0x0A04,
+ iavf_aqc_opc_lldp_stop = 0x0A05,
+ iavf_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ iavf_aqc_opc_add_udp_tunnel = 0x0B00,
+ iavf_aqc_opc_del_udp_tunnel = 0x0B01,
+ iavf_aqc_opc_set_rss_key = 0x0B02,
+ iavf_aqc_opc_set_rss_lut = 0x0B03,
+ iavf_aqc_opc_get_rss_key = 0x0B04,
+ iavf_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Async Events */
+ iavf_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ iavf_aqc_opc_oem_parameter_change = 0xFE00,
+ iavf_aqc_opc_oem_device_status_change = 0xFE01,
+ iavf_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ iavf_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ iavf_aqc_opc_debug_read_reg = 0xFF03,
+ iavf_aqc_opc_debug_write_reg = 0xFF04,
+ iavf_aqc_opc_debug_modify_reg = 0xFF07,
+ iavf_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define IAVF_CHECK_STRUCT_LEN(n, X) enum iavf_static_assert_enum_##X \
+ { iavf_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define IAVF_CHECK_CMD_LENGTH(X) IAVF_CHECK_STRUCT_LEN(16, X)
+
+/* Queue Shutdown (direct 0x0003) */
+struct iavf_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define IAVF_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_queue_shutdown);
+
+struct iavf_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define IAVF_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define IAVF_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define IAVF_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define IAVF_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define IAVF_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define IAVF_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define IAVF_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define IAVF_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define IAVF_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define IAVF_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define IAVF_AQ_VSI_SW_ID_SHIFT 0x0000
+#define IAVF_AQ_VSI_SW_ID_MASK (0xFFF << IAVF_AQ_VSI_SW_ID_SHIFT)
+#define IAVF_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define IAVF_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define IAVF_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define IAVF_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define IAVF_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define IAVF_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define IAVF_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ IAVF_AQ_VSI_PVLAN_MODE_SHIFT)
+#define IAVF_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define IAVF_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define IAVF_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define IAVF_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define IAVF_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define IAVF_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ IAVF_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define IAVF_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define IAVF_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define IAVF_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define IAVF_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define IAVF_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define IAVF_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define IAVF_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define IAVF_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define IAVF_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define IAVF_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ IAVF_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define IAVF_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ IAVF_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define IAVF_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define IAVF_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define IAVF_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define IAVF_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define IAVF_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define IAVF_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define IAVF_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define IAVF_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define IAVF_AQ_VSI_QUEUE_SHIFT 0x0
+#define IAVF_AQ_VSI_QUEUE_MASK (0x7FF << IAVF_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define IAVF_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ IAVF_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define IAVF_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ IAVF_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define IAVF_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define IAVF_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#define IAVF_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define IAVF_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define IAVF_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define IAVF_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+IAVF_CHECK_STRUCT_LEN(128, iavf_aqc_vsi_properties_data);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses iavf_aqc_switch_seid for the descriptor
+ */
+struct iavf_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_veb_parameters_completion);
+
+#define IAVF_LINK_SPEED_100MB_SHIFT 0x1
+#define IAVF_LINK_SPEED_1000MB_SHIFT 0x2
+#define IAVF_LINK_SPEED_10GB_SHIFT 0x3
+#define IAVF_LINK_SPEED_40GB_SHIFT 0x4
+#define IAVF_LINK_SPEED_20GB_SHIFT 0x5
+#define IAVF_LINK_SPEED_25GB_SHIFT 0x6
+
+enum iavf_aq_link_speed {
+ IAVF_LINK_SPEED_UNKNOWN = 0,
+ IAVF_LINK_SPEED_100MB = BIT(IAVF_LINK_SPEED_100MB_SHIFT),
+ IAVF_LINK_SPEED_1GB = BIT(IAVF_LINK_SPEED_1000MB_SHIFT),
+ IAVF_LINK_SPEED_10GB = BIT(IAVF_LINK_SPEED_10GB_SHIFT),
+ IAVF_LINK_SPEED_40GB = BIT(IAVF_LINK_SPEED_40GB_SHIFT),
+ IAVF_LINK_SPEED_20GB = BIT(IAVF_LINK_SPEED_20GB_SHIFT),
+ IAVF_LINK_SPEED_25GB = BIT(IAVF_LINK_SPEED_25GB_SHIFT),
+};
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct iavf_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_pf_vf_message);
+
+struct iavf_aqc_get_set_rss_key {
+#define IAVF_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
+#define IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_key);
+
+struct iavf_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+IAVF_CHECK_STRUCT_LEN(0x34, iavf_aqc_get_set_rss_key_data);
+
+struct iavf_aqc_get_set_rss_lut {
+#define IAVF_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
+#define IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
+ BIT(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IAVF_CHECK_CMD_LENGTH(iavf_aqc_get_set_rss_lut);
+#endif /* _IAVF_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_alloc.h b/drivers/net/ethernet/intel/iavf/iavf_alloc.h
index bf2753146f30..2711573c14ec 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_alloc.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_alloc.h
@@ -20,12 +20,15 @@ enum iavf_memory_type {
};
/* prototype for functions used for dynamic memory allocation */
-iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem,
- enum iavf_memory_type type,
- u64 size, u32 alignment);
-iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem);
-iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
- struct iavf_virt_mem *mem, u32 size);
-iavf_status iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem);
+enum iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem,
+ enum iavf_memory_type type,
+ u64 size, u32 alignment);
+enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem);
+enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem, u32 size);
+enum iavf_status iavf_free_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem);
#endif /* _IAVF_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c
index aea45364fd1c..0c77e4171808 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.c
@@ -10,19 +10,19 @@
static
const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR;
-static struct i40e_client *vf_registered_client;
-static LIST_HEAD(i40e_devices);
+static struct iavf_client *vf_registered_client;
+static LIST_HEAD(iavf_devices);
static DEFINE_MUTEX(iavf_device_mutex);
-static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,
- struct i40e_client *client,
+static u32 iavf_client_virtchnl_send(struct iavf_info *ldev,
+ struct iavf_client *client,
u8 *msg, u16 len);
-static int iavf_client_setup_qvlist(struct i40e_info *ldev,
- struct i40e_client *client,
- struct i40e_qvlist_info *qvlist_info);
+static int iavf_client_setup_qvlist(struct iavf_info *ldev,
+ struct iavf_client *client,
+ struct iavf_qvlist_info *qvlist_info);
-static struct i40e_ops iavf_lan_ops = {
+static struct iavf_ops iavf_lan_ops = {
.virtchnl_send = iavf_client_virtchnl_send,
.setup_qvlist = iavf_client_setup_qvlist,
};
@@ -33,11 +33,11 @@ static struct i40e_ops iavf_lan_ops = {
* @params: client param struct
**/
static
-void iavf_client_get_params(struct iavf_vsi *vsi, struct i40e_params *params)
+void iavf_client_get_params(struct iavf_vsi *vsi, struct iavf_params *params)
{
int i;
- memset(params, 0, sizeof(struct i40e_params));
+ memset(params, 0, sizeof(struct iavf_params));
params->mtu = vsi->netdev->mtu;
params->link_up = vsi->back->link_up;
@@ -57,7 +57,7 @@ void iavf_client_get_params(struct iavf_vsi *vsi, struct i40e_params *params)
**/
void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len)
{
- struct i40e_client_instance *cinst;
+ struct iavf_client_instance *cinst;
if (!vsi)
return;
@@ -81,8 +81,8 @@ void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len)
**/
void iavf_notify_client_l2_params(struct iavf_vsi *vsi)
{
- struct i40e_client_instance *cinst;
- struct i40e_params params;
+ struct iavf_client_instance *cinst;
+ struct iavf_params params;
if (!vsi)
return;
@@ -110,7 +110,7 @@ void iavf_notify_client_l2_params(struct iavf_vsi *vsi)
void iavf_notify_client_open(struct iavf_vsi *vsi)
{
struct iavf_adapter *adapter = vsi->back;
- struct i40e_client_instance *cinst = adapter->cinst;
+ struct iavf_client_instance *cinst = adapter->cinst;
int ret;
if (!cinst || !cinst->client || !cinst->client->ops ||
@@ -119,10 +119,10 @@ void iavf_notify_client_open(struct iavf_vsi *vsi)
"Cannot locate client instance open function\n");
return;
}
- if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) {
+ if (!(test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state))) {
ret = cinst->client->ops->open(&cinst->lan_info, cinst->client);
if (!ret)
- set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+ set_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state);
}
}
@@ -132,17 +132,17 @@ void iavf_notify_client_open(struct iavf_vsi *vsi)
*
* Return 0 on success or < 0 on error
**/
-static int iavf_client_release_qvlist(struct i40e_info *ldev)
+static int iavf_client_release_qvlist(struct iavf_info *ldev)
{
struct iavf_adapter *adapter = ldev->vf;
- iavf_status err;
+ enum iavf_status err;
if (adapter->aq_required)
return -EAGAIN;
err = iavf_aq_send_msg_to_pf(&adapter->hw,
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
- I40E_SUCCESS, NULL, 0, NULL);
+ IAVF_SUCCESS, NULL, 0, NULL);
if (err)
dev_err(&adapter->pdev->dev,
@@ -162,7 +162,7 @@ static int iavf_client_release_qvlist(struct i40e_info *ldev)
void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset)
{
struct iavf_adapter *adapter = vsi->back;
- struct i40e_client_instance *cinst = adapter->cinst;
+ struct iavf_client_instance *cinst = adapter->cinst;
if (!cinst || !cinst->client || !cinst->client->ops ||
!cinst->client->ops->close) {
@@ -172,7 +172,7 @@ void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset)
}
cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
iavf_client_release_qvlist(&cinst->lan_info);
- clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+ clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state);
}
/**
@@ -181,13 +181,13 @@ void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset)
*
* Returns cinst ptr on success, NULL on failure
**/
-static struct i40e_client_instance *
+static struct iavf_client_instance *
iavf_client_add_instance(struct iavf_adapter *adapter)
{
- struct i40e_client_instance *cinst = NULL;
+ struct iavf_client_instance *cinst = NULL;
struct iavf_vsi *vsi = &adapter->vsi;
struct netdev_hw_addr *mac = NULL;
- struct i40e_params params;
+ struct iavf_params params;
if (!vf_registered_client)
goto out;
@@ -205,7 +205,7 @@ iavf_client_add_instance(struct iavf_adapter *adapter)
cinst->lan_info.netdev = vsi->netdev;
cinst->lan_info.pcidev = adapter->pdev;
cinst->lan_info.fid = 0;
- cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
+ cinst->lan_info.ftype = IAVF_CLIENT_FTYPE_VF;
cinst->lan_info.hw_addr = adapter->hw.hw_addr;
cinst->lan_info.ops = &iavf_lan_ops;
cinst->lan_info.version.major = IAVF_CLIENT_VERSION_MAJOR;
@@ -213,7 +213,7 @@ iavf_client_add_instance(struct iavf_adapter *adapter)
cinst->lan_info.version.build = IAVF_CLIENT_VERSION_BUILD;
iavf_client_get_params(vsi, &params);
cinst->lan_info.params = params;
- set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
+ set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state);
cinst->lan_info.msix_count = adapter->num_iwarp_msix;
cinst->lan_info.msix_entries =
@@ -250,8 +250,8 @@ void iavf_client_del_instance(struct iavf_adapter *adapter)
**/
void iavf_client_subtask(struct iavf_adapter *adapter)
{
- struct i40e_client *client = vf_registered_client;
- struct i40e_client_instance *cinst;
+ struct iavf_client *client = vf_registered_client;
+ struct iavf_client_instance *cinst;
int ret = 0;
if (adapter->state < __IAVF_DOWN)
@@ -269,13 +269,13 @@ void iavf_client_subtask(struct iavf_adapter *adapter)
dev_info(&adapter->pdev->dev, "Added instance of Client %s\n",
client->name);
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ if (!test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) {
/* Send an Open request to the client */
if (client->ops && client->ops->open)
ret = client->ops->open(&cinst->lan_info, client);
if (!ret)
- set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ set_bit(__IAVF_CLIENT_INSTANCE_OPENED,
&cinst->state);
else
/* remove client instance */
@@ -291,11 +291,11 @@ void iavf_client_subtask(struct iavf_adapter *adapter)
**/
int iavf_lan_add_device(struct iavf_adapter *adapter)
{
- struct i40e_device *ldev;
+ struct iavf_device *ldev;
int ret = 0;
mutex_lock(&iavf_device_mutex);
- list_for_each_entry(ldev, &i40e_devices, list) {
+ list_for_each_entry(ldev, &iavf_devices, list) {
if (ldev->vf == adapter) {
ret = -EEXIST;
goto out;
@@ -308,7 +308,7 @@ int iavf_lan_add_device(struct iavf_adapter *adapter)
}
ldev->vf = adapter;
INIT_LIST_HEAD(&ldev->list);
- list_add(&ldev->list, &i40e_devices);
+ list_add(&ldev->list, &iavf_devices);
dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
adapter->hw.bus.bus_id, adapter->hw.bus.device,
adapter->hw.bus.func);
@@ -331,11 +331,11 @@ out:
**/
int iavf_lan_del_device(struct iavf_adapter *adapter)
{
- struct i40e_device *ldev, *tmp;
+ struct iavf_device *ldev, *tmp;
int ret = -ENODEV;
mutex_lock(&iavf_device_mutex);
- list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
+ list_for_each_entry_safe(ldev, tmp, &iavf_devices, list) {
if (ldev->vf == adapter) {
dev_info(&adapter->pdev->dev,
"Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
@@ -357,24 +357,24 @@ int iavf_lan_del_device(struct iavf_adapter *adapter)
* @client: pointer to the registered client
*
**/
-static void iavf_client_release(struct i40e_client *client)
+static void iavf_client_release(struct iavf_client *client)
{
- struct i40e_client_instance *cinst;
- struct i40e_device *ldev;
+ struct iavf_client_instance *cinst;
+ struct iavf_device *ldev;
struct iavf_adapter *adapter;
mutex_lock(&iavf_device_mutex);
- list_for_each_entry(ldev, &i40e_devices, list) {
+ list_for_each_entry(ldev, &iavf_devices, list) {
adapter = ldev->vf;
cinst = adapter->cinst;
if (!cinst)
continue;
- if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ if (test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) {
if (client->ops && client->ops->close)
client->ops->close(&cinst->lan_info, client,
false);
iavf_client_release_qvlist(&cinst->lan_info);
- clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+ clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state);
dev_warn(&adapter->pdev->dev,
"Client %s instance closed\n", client->name);
@@ -392,13 +392,13 @@ static void iavf_client_release(struct i40e_client *client)
* @client: pointer to the registered client
*
**/
-static void iavf_client_prepare(struct i40e_client *client)
+static void iavf_client_prepare(struct iavf_client *client)
{
- struct i40e_device *ldev;
+ struct iavf_device *ldev;
struct iavf_adapter *adapter;
mutex_lock(&iavf_device_mutex);
- list_for_each_entry(ldev, &i40e_devices, list) {
+ list_for_each_entry(ldev, &iavf_devices, list) {
adapter = ldev->vf;
/* Signal the watchdog to service the client */
adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
@@ -415,18 +415,18 @@ static void iavf_client_prepare(struct i40e_client *client)
*
* Return 0 on success or < 0 on error
**/
-static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,
- struct i40e_client *client,
+static u32 iavf_client_virtchnl_send(struct iavf_info *ldev,
+ struct iavf_client *client,
u8 *msg, u16 len)
{
struct iavf_adapter *adapter = ldev->vf;
- iavf_status err;
+ enum iavf_status err;
if (adapter->aq_required)
return -EAGAIN;
err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
- I40E_SUCCESS, msg, len, NULL);
+ IAVF_SUCCESS, msg, len, NULL);
if (err)
dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
err, adapter->hw.aq.asq_last_status);
@@ -442,16 +442,16 @@ static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,
*
* Return 0 on success or < 0 on error
**/
-static int iavf_client_setup_qvlist(struct i40e_info *ldev,
- struct i40e_client *client,
- struct i40e_qvlist_info *qvlist_info)
+static int iavf_client_setup_qvlist(struct iavf_info *ldev,
+ struct iavf_client *client,
+ struct iavf_qvlist_info *qvlist_info)
{
struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
struct iavf_adapter *adapter = ldev->vf;
- struct i40e_qv_info *qv_info;
- iavf_status err;
+ struct iavf_qv_info *qv_info;
+ enum iavf_status err;
u32 v_idx, i;
- u32 msg_size;
+ size_t msg_size;
if (adapter->aq_required)
return -EAGAIN;
@@ -469,13 +469,12 @@ static int iavf_client_setup_qvlist(struct i40e_info *ldev,
}
v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info;
- msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) +
- (sizeof(struct virtchnl_iwarp_qv_info) *
- (v_qvlist_info->num_vectors - 1));
+ msg_size = struct_size(v_qvlist_info, qv_info,
+ v_qvlist_info->num_vectors - 1);
adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
err = iavf_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, I40E_SUCCESS,
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, IAVF_SUCCESS,
(u8 *)v_qvlist_info, msg_size, NULL);
if (err) {
@@ -499,12 +498,12 @@ out:
}
/**
- * iavf_register_client - Register a i40e client driver with the L2 driver
- * @client: pointer to the i40e_client struct
+ * iavf_register_client - Register a iavf client driver with the L2 driver
+ * @client: pointer to the iavf_client struct
*
* Returns 0 on success or non-0 on error
**/
-int iavf_register_client(struct i40e_client *client)
+int iavf_register_client(struct iavf_client *client)
{
int ret = 0;
@@ -550,12 +549,12 @@ out:
EXPORT_SYMBOL(iavf_register_client);
/**
- * iavf_unregister_client - Unregister a i40e client driver with the L2 driver
- * @client: pointer to the i40e_client struct
+ * iavf_unregister_client - Unregister a iavf client driver with the L2 driver
+ * @client: pointer to the iavf_client struct
*
* Returns 0 on success or non-0 on error
**/
-int iavf_unregister_client(struct i40e_client *client)
+int iavf_unregister_client(struct iavf_client *client)
{
int ret = 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h
index e216fc9dfd81..9a7cf39ea75a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.h
@@ -17,86 +17,86 @@
__stringify(IAVF_CLIENT_VERSION_MINOR) "." \
__stringify(IAVF_CLIENT_VERSION_BUILD)
-struct i40e_client_version {
+struct iavf_client_version {
u8 major;
u8 minor;
u8 build;
u8 rsvd;
};
-enum i40e_client_state {
- __I40E_CLIENT_NULL,
- __I40E_CLIENT_REGISTERED
+enum iavf_client_state {
+ __IAVF_CLIENT_NULL,
+ __IAVF_CLIENT_REGISTERED
};
-enum i40e_client_instance_state {
- __I40E_CLIENT_INSTANCE_NONE,
- __I40E_CLIENT_INSTANCE_OPENED,
+enum iavf_client_instance_state {
+ __IAVF_CLIENT_INSTANCE_NONE,
+ __IAVF_CLIENT_INSTANCE_OPENED,
};
-struct i40e_ops;
-struct i40e_client;
+struct iavf_ops;
+struct iavf_client;
/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
* In order for us to keep the interface simple, SW will define a
* unique type value for AEQ.
*/
-#define I40E_QUEUE_TYPE_PE_AEQ 0x80
-#define I40E_QUEUE_INVALID_IDX 0xFFFF
+#define IAVF_QUEUE_TYPE_PE_AEQ 0x80
+#define IAVF_QUEUE_INVALID_IDX 0xFFFF
-struct i40e_qv_info {
+struct iavf_qv_info {
u32 v_idx; /* msix_vector */
u16 ceq_idx;
u16 aeq_idx;
u8 itr_idx;
};
-struct i40e_qvlist_info {
+struct iavf_qvlist_info {
u32 num_vectors;
- struct i40e_qv_info qv_info[1];
+ struct iavf_qv_info qv_info[1];
};
-#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
+#define IAVF_CLIENT_MSIX_ALL 0xFFFFFFFF
/* set of LAN parameters useful for clients managed by LAN */
/* Struct to hold per priority info */
-struct i40e_prio_qos_params {
+struct iavf_prio_qos_params {
u16 qs_handle; /* qs handle for prio */
u8 tc; /* TC mapped to prio */
u8 reserved;
};
-#define I40E_CLIENT_MAX_USER_PRIORITY 8
+#define IAVF_CLIENT_MAX_USER_PRIORITY 8
/* Struct to hold Client QoS */
-struct i40e_qos_params {
- struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+struct iavf_qos_params {
+ struct iavf_prio_qos_params prio_qos[IAVF_CLIENT_MAX_USER_PRIORITY];
};
-struct i40e_params {
- struct i40e_qos_params qos;
+struct iavf_params {
+ struct iavf_qos_params qos;
u16 mtu;
u16 link_up; /* boolean */
};
/* Structure to hold LAN device info for a client device */
-struct i40e_info {
- struct i40e_client_version version;
+struct iavf_info {
+ struct iavf_client_version version;
u8 lanmac[6];
struct net_device *netdev;
struct pci_dev *pcidev;
u8 __iomem *hw_addr;
u8 fid; /* function id, PF id or VF id */
-#define I40E_CLIENT_FTYPE_PF 0
-#define I40E_CLIENT_FTYPE_VF 1
+#define IAVF_CLIENT_FTYPE_PF 0
+#define IAVF_CLIENT_FTYPE_VF 1
u8 ftype; /* function type, PF or VF */
void *vf; /* cast to iavf_adapter */
/* All L2 params that could change during the life span of the device
* and needs to be communicated to the client when they change
*/
- struct i40e_params params;
- struct i40e_ops *ops;
+ struct iavf_params params;
+ struct iavf_ops *ops;
u16 msix_count; /* number of msix vectors*/
/* Array down below will be dynamically allocated based on msix_count */
@@ -104,66 +104,66 @@ struct i40e_info {
u16 itr_index; /* Which ITR index the PE driver is suppose to use */
};
-struct i40e_ops {
+struct iavf_ops {
/* setup_q_vector_list enables queues with a particular vector */
- int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
- struct i40e_qvlist_info *qv_info);
+ int (*setup_qvlist)(struct iavf_info *ldev, struct iavf_client *client,
+ struct iavf_qvlist_info *qv_info);
- u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+ u32 (*virtchnl_send)(struct iavf_info *ldev, struct iavf_client *client,
u8 *msg, u16 len);
/* If the PE Engine is unresponsive, RDMA driver can request a reset.*/
- void (*request_reset)(struct i40e_info *ldev,
- struct i40e_client *client);
+ void (*request_reset)(struct iavf_info *ldev,
+ struct iavf_client *client);
};
-struct i40e_client_ops {
+struct iavf_client_ops {
/* Should be called from register_client() or whenever the driver is
* ready to create a specific client instance.
*/
- int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+ int (*open)(struct iavf_info *ldev, struct iavf_client *client);
/* Should be closed when netdev is unavailable or when unregister
* call comes in. If the close happens due to a reset, set the reset
* bit to true.
*/
- void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+ void (*close)(struct iavf_info *ldev, struct iavf_client *client,
bool reset);
/* called when some l2 managed parameters changes - mss */
- void (*l2_param_change)(struct i40e_info *ldev,
- struct i40e_client *client,
- struct i40e_params *params);
+ void (*l2_param_change)(struct iavf_info *ldev,
+ struct iavf_client *client,
+ struct iavf_params *params);
/* called when a message is received from the PF */
- int (*virtchnl_receive)(struct i40e_info *ldev,
- struct i40e_client *client,
+ int (*virtchnl_receive)(struct iavf_info *ldev,
+ struct iavf_client *client,
u8 *msg, u16 len);
};
/* Client device */
-struct i40e_client_instance {
+struct iavf_client_instance {
struct list_head list;
- struct i40e_info lan_info;
- struct i40e_client *client;
+ struct iavf_info lan_info;
+ struct iavf_client *client;
unsigned long state;
};
-struct i40e_client {
+struct iavf_client {
struct list_head list; /* list of registered clients */
char name[IAVF_CLIENT_STR_LENGTH];
- struct i40e_client_version version;
+ struct iavf_client_version version;
unsigned long state; /* client state */
atomic_t ref_cnt; /* Count of all the client devices of this kind */
u32 flags;
-#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
-#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
+#define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
+#define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
u8 type;
-#define I40E_CLIENT_IWARP 0
- struct i40e_client_ops *ops; /* client ops provided by the client */
+#define IAVF_CLIENT_IWARP 0
+ struct iavf_client_ops *ops; /* client ops provided by the client */
};
/* used by clients */
-int iavf_register_client(struct i40e_client *client);
-int iavf_unregister_client(struct i40e_client *client);
+int iavf_register_client(struct iavf_client *client);
+int iavf_unregister_client(struct iavf_client *client);
#endif /* _IAVF_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 768369c89e77..8547fc8fdfd6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -2,7 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "iavf_type.h"
-#include "i40e_adminq.h"
+#include "iavf_adminq.h"
#include "iavf_prototype.h"
#include <linux/avf/virtchnl.h>
@@ -13,9 +13,9 @@
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-iavf_status iavf_set_mac_type(struct iavf_hw *hw)
+enum iavf_status iavf_set_mac_type(struct iavf_hw *hw)
{
- iavf_status status = 0;
+ enum iavf_status status = 0;
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
@@ -32,7 +32,7 @@ iavf_status iavf_set_mac_type(struct iavf_hw *hw)
break;
}
} else {
- status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ status = IAVF_ERR_DEVICE_NOT_SUPPORTED;
}
hw_dbg(hw, "found mac: %d, returns: %d\n", hw->mac.type, status);
@@ -44,55 +44,55 @@ iavf_status iavf_set_mac_type(struct iavf_hw *hw)
* @hw: pointer to the HW structure
* @aq_err: the AQ error code to convert
**/
-const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err)
+const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err)
{
switch (aq_err) {
- case I40E_AQ_RC_OK:
+ case IAVF_AQ_RC_OK:
return "OK";
- case I40E_AQ_RC_EPERM:
- return "I40E_AQ_RC_EPERM";
- case I40E_AQ_RC_ENOENT:
- return "I40E_AQ_RC_ENOENT";
- case I40E_AQ_RC_ESRCH:
- return "I40E_AQ_RC_ESRCH";
- case I40E_AQ_RC_EINTR:
- return "I40E_AQ_RC_EINTR";
- case I40E_AQ_RC_EIO:
- return "I40E_AQ_RC_EIO";
- case I40E_AQ_RC_ENXIO:
- return "I40E_AQ_RC_ENXIO";
- case I40E_AQ_RC_E2BIG:
- return "I40E_AQ_RC_E2BIG";
- case I40E_AQ_RC_EAGAIN:
- return "I40E_AQ_RC_EAGAIN";
- case I40E_AQ_RC_ENOMEM:
- return "I40E_AQ_RC_ENOMEM";
- case I40E_AQ_RC_EACCES:
- return "I40E_AQ_RC_EACCES";
- case I40E_AQ_RC_EFAULT:
- return "I40E_AQ_RC_EFAULT";
- case I40E_AQ_RC_EBUSY:
- return "I40E_AQ_RC_EBUSY";
- case I40E_AQ_RC_EEXIST:
- return "I40E_AQ_RC_EEXIST";
- case I40E_AQ_RC_EINVAL:
- return "I40E_AQ_RC_EINVAL";
- case I40E_AQ_RC_ENOTTY:
- return "I40E_AQ_RC_ENOTTY";
- case I40E_AQ_RC_ENOSPC:
- return "I40E_AQ_RC_ENOSPC";
- case I40E_AQ_RC_ENOSYS:
- return "I40E_AQ_RC_ENOSYS";
- case I40E_AQ_RC_ERANGE:
- return "I40E_AQ_RC_ERANGE";
- case I40E_AQ_RC_EFLUSHED:
- return "I40E_AQ_RC_EFLUSHED";
- case I40E_AQ_RC_BAD_ADDR:
- return "I40E_AQ_RC_BAD_ADDR";
- case I40E_AQ_RC_EMODE:
- return "I40E_AQ_RC_EMODE";
- case I40E_AQ_RC_EFBIG:
- return "I40E_AQ_RC_EFBIG";
+ case IAVF_AQ_RC_EPERM:
+ return "IAVF_AQ_RC_EPERM";
+ case IAVF_AQ_RC_ENOENT:
+ return "IAVF_AQ_RC_ENOENT";
+ case IAVF_AQ_RC_ESRCH:
+ return "IAVF_AQ_RC_ESRCH";
+ case IAVF_AQ_RC_EINTR:
+ return "IAVF_AQ_RC_EINTR";
+ case IAVF_AQ_RC_EIO:
+ return "IAVF_AQ_RC_EIO";
+ case IAVF_AQ_RC_ENXIO:
+ return "IAVF_AQ_RC_ENXIO";
+ case IAVF_AQ_RC_E2BIG:
+ return "IAVF_AQ_RC_E2BIG";
+ case IAVF_AQ_RC_EAGAIN:
+ return "IAVF_AQ_RC_EAGAIN";
+ case IAVF_AQ_RC_ENOMEM:
+ return "IAVF_AQ_RC_ENOMEM";
+ case IAVF_AQ_RC_EACCES:
+ return "IAVF_AQ_RC_EACCES";
+ case IAVF_AQ_RC_EFAULT:
+ return "IAVF_AQ_RC_EFAULT";
+ case IAVF_AQ_RC_EBUSY:
+ return "IAVF_AQ_RC_EBUSY";
+ case IAVF_AQ_RC_EEXIST:
+ return "IAVF_AQ_RC_EEXIST";
+ case IAVF_AQ_RC_EINVAL:
+ return "IAVF_AQ_RC_EINVAL";
+ case IAVF_AQ_RC_ENOTTY:
+ return "IAVF_AQ_RC_ENOTTY";
+ case IAVF_AQ_RC_ENOSPC:
+ return "IAVF_AQ_RC_ENOSPC";
+ case IAVF_AQ_RC_ENOSYS:
+ return "IAVF_AQ_RC_ENOSYS";
+ case IAVF_AQ_RC_ERANGE:
+ return "IAVF_AQ_RC_ERANGE";
+ case IAVF_AQ_RC_EFLUSHED:
+ return "IAVF_AQ_RC_EFLUSHED";
+ case IAVF_AQ_RC_BAD_ADDR:
+ return "IAVF_AQ_RC_BAD_ADDR";
+ case IAVF_AQ_RC_EMODE:
+ return "IAVF_AQ_RC_EMODE";
+ case IAVF_AQ_RC_EFBIG:
+ return "IAVF_AQ_RC_EFBIG";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
@@ -104,143 +104,143 @@ const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err)
* @hw: pointer to the HW structure
* @stat_err: the status error code to convert
**/
-const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err)
+const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
{
switch (stat_err) {
case 0:
return "OK";
- case I40E_ERR_NVM:
- return "I40E_ERR_NVM";
- case I40E_ERR_NVM_CHECKSUM:
- return "I40E_ERR_NVM_CHECKSUM";
- case I40E_ERR_PHY:
- return "I40E_ERR_PHY";
- case I40E_ERR_CONFIG:
- return "I40E_ERR_CONFIG";
- case I40E_ERR_PARAM:
- return "I40E_ERR_PARAM";
- case I40E_ERR_MAC_TYPE:
- return "I40E_ERR_MAC_TYPE";
- case I40E_ERR_UNKNOWN_PHY:
- return "I40E_ERR_UNKNOWN_PHY";
- case I40E_ERR_LINK_SETUP:
- return "I40E_ERR_LINK_SETUP";
- case I40E_ERR_ADAPTER_STOPPED:
- return "I40E_ERR_ADAPTER_STOPPED";
- case I40E_ERR_INVALID_MAC_ADDR:
- return "I40E_ERR_INVALID_MAC_ADDR";
- case I40E_ERR_DEVICE_NOT_SUPPORTED:
- return "I40E_ERR_DEVICE_NOT_SUPPORTED";
- case I40E_ERR_MASTER_REQUESTS_PENDING:
- return "I40E_ERR_MASTER_REQUESTS_PENDING";
- case I40E_ERR_INVALID_LINK_SETTINGS:
- return "I40E_ERR_INVALID_LINK_SETTINGS";
- case I40E_ERR_AUTONEG_NOT_COMPLETE:
- return "I40E_ERR_AUTONEG_NOT_COMPLETE";
- case I40E_ERR_RESET_FAILED:
- return "I40E_ERR_RESET_FAILED";
- case I40E_ERR_SWFW_SYNC:
- return "I40E_ERR_SWFW_SYNC";
- case I40E_ERR_NO_AVAILABLE_VSI:
- return "I40E_ERR_NO_AVAILABLE_VSI";
- case I40E_ERR_NO_MEMORY:
- return "I40E_ERR_NO_MEMORY";
- case I40E_ERR_BAD_PTR:
- return "I40E_ERR_BAD_PTR";
- case I40E_ERR_RING_FULL:
- return "I40E_ERR_RING_FULL";
- case I40E_ERR_INVALID_PD_ID:
- return "I40E_ERR_INVALID_PD_ID";
- case I40E_ERR_INVALID_QP_ID:
- return "I40E_ERR_INVALID_QP_ID";
- case I40E_ERR_INVALID_CQ_ID:
- return "I40E_ERR_INVALID_CQ_ID";
- case I40E_ERR_INVALID_CEQ_ID:
- return "I40E_ERR_INVALID_CEQ_ID";
- case I40E_ERR_INVALID_AEQ_ID:
- return "I40E_ERR_INVALID_AEQ_ID";
- case I40E_ERR_INVALID_SIZE:
- return "I40E_ERR_INVALID_SIZE";
- case I40E_ERR_INVALID_ARP_INDEX:
- return "I40E_ERR_INVALID_ARP_INDEX";
- case I40E_ERR_INVALID_FPM_FUNC_ID:
- return "I40E_ERR_INVALID_FPM_FUNC_ID";
- case I40E_ERR_QP_INVALID_MSG_SIZE:
- return "I40E_ERR_QP_INVALID_MSG_SIZE";
- case I40E_ERR_QP_TOOMANY_WRS_POSTED:
- return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
- case I40E_ERR_INVALID_FRAG_COUNT:
- return "I40E_ERR_INVALID_FRAG_COUNT";
- case I40E_ERR_QUEUE_EMPTY:
- return "I40E_ERR_QUEUE_EMPTY";
- case I40E_ERR_INVALID_ALIGNMENT:
- return "I40E_ERR_INVALID_ALIGNMENT";
- case I40E_ERR_FLUSHED_QUEUE:
- return "I40E_ERR_FLUSHED_QUEUE";
- case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
- return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
- case I40E_ERR_INVALID_IMM_DATA_SIZE:
- return "I40E_ERR_INVALID_IMM_DATA_SIZE";
- case I40E_ERR_TIMEOUT:
- return "I40E_ERR_TIMEOUT";
- case I40E_ERR_OPCODE_MISMATCH:
- return "I40E_ERR_OPCODE_MISMATCH";
- case I40E_ERR_CQP_COMPL_ERROR:
- return "I40E_ERR_CQP_COMPL_ERROR";
- case I40E_ERR_INVALID_VF_ID:
- return "I40E_ERR_INVALID_VF_ID";
- case I40E_ERR_INVALID_HMCFN_ID:
- return "I40E_ERR_INVALID_HMCFN_ID";
- case I40E_ERR_BACKING_PAGE_ERROR:
- return "I40E_ERR_BACKING_PAGE_ERROR";
- case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
- return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
- case I40E_ERR_INVALID_PBLE_INDEX:
- return "I40E_ERR_INVALID_PBLE_INDEX";
- case I40E_ERR_INVALID_SD_INDEX:
- return "I40E_ERR_INVALID_SD_INDEX";
- case I40E_ERR_INVALID_PAGE_DESC_INDEX:
- return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
- case I40E_ERR_INVALID_SD_TYPE:
- return "I40E_ERR_INVALID_SD_TYPE";
- case I40E_ERR_MEMCPY_FAILED:
- return "I40E_ERR_MEMCPY_FAILED";
- case I40E_ERR_INVALID_HMC_OBJ_INDEX:
- return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
- case I40E_ERR_INVALID_HMC_OBJ_COUNT:
- return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
- case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
- return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
- case I40E_ERR_SRQ_ENABLED:
- return "I40E_ERR_SRQ_ENABLED";
- case I40E_ERR_ADMIN_QUEUE_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_ERROR";
- case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
- return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
- case I40E_ERR_BUF_TOO_SHORT:
- return "I40E_ERR_BUF_TOO_SHORT";
- case I40E_ERR_ADMIN_QUEUE_FULL:
- return "I40E_ERR_ADMIN_QUEUE_FULL";
- case I40E_ERR_ADMIN_QUEUE_NO_WORK:
- return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
- case I40E_ERR_BAD_IWARP_CQE:
- return "I40E_ERR_BAD_IWARP_CQE";
- case I40E_ERR_NVM_BLANK_MODE:
- return "I40E_ERR_NVM_BLANK_MODE";
- case I40E_ERR_NOT_IMPLEMENTED:
- return "I40E_ERR_NOT_IMPLEMENTED";
- case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
- return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
- case I40E_ERR_DIAG_TEST_FAILED:
- return "I40E_ERR_DIAG_TEST_FAILED";
- case I40E_ERR_NOT_READY:
- return "I40E_ERR_NOT_READY";
- case I40E_NOT_SUPPORTED:
- return "I40E_NOT_SUPPORTED";
- case I40E_ERR_FIRMWARE_API_VERSION:
- return "I40E_ERR_FIRMWARE_API_VERSION";
- case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+ case IAVF_ERR_NVM:
+ return "IAVF_ERR_NVM";
+ case IAVF_ERR_NVM_CHECKSUM:
+ return "IAVF_ERR_NVM_CHECKSUM";
+ case IAVF_ERR_PHY:
+ return "IAVF_ERR_PHY";
+ case IAVF_ERR_CONFIG:
+ return "IAVF_ERR_CONFIG";
+ case IAVF_ERR_PARAM:
+ return "IAVF_ERR_PARAM";
+ case IAVF_ERR_MAC_TYPE:
+ return "IAVF_ERR_MAC_TYPE";
+ case IAVF_ERR_UNKNOWN_PHY:
+ return "IAVF_ERR_UNKNOWN_PHY";
+ case IAVF_ERR_LINK_SETUP:
+ return "IAVF_ERR_LINK_SETUP";
+ case IAVF_ERR_ADAPTER_STOPPED:
+ return "IAVF_ERR_ADAPTER_STOPPED";
+ case IAVF_ERR_INVALID_MAC_ADDR:
+ return "IAVF_ERR_INVALID_MAC_ADDR";
+ case IAVF_ERR_DEVICE_NOT_SUPPORTED:
+ return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
+ case IAVF_ERR_MASTER_REQUESTS_PENDING:
+ return "IAVF_ERR_MASTER_REQUESTS_PENDING";
+ case IAVF_ERR_INVALID_LINK_SETTINGS:
+ return "IAVF_ERR_INVALID_LINK_SETTINGS";
+ case IAVF_ERR_AUTONEG_NOT_COMPLETE:
+ return "IAVF_ERR_AUTONEG_NOT_COMPLETE";
+ case IAVF_ERR_RESET_FAILED:
+ return "IAVF_ERR_RESET_FAILED";
+ case IAVF_ERR_SWFW_SYNC:
+ return "IAVF_ERR_SWFW_SYNC";
+ case IAVF_ERR_NO_AVAILABLE_VSI:
+ return "IAVF_ERR_NO_AVAILABLE_VSI";
+ case IAVF_ERR_NO_MEMORY:
+ return "IAVF_ERR_NO_MEMORY";
+ case IAVF_ERR_BAD_PTR:
+ return "IAVF_ERR_BAD_PTR";
+ case IAVF_ERR_RING_FULL:
+ return "IAVF_ERR_RING_FULL";
+ case IAVF_ERR_INVALID_PD_ID:
+ return "IAVF_ERR_INVALID_PD_ID";
+ case IAVF_ERR_INVALID_QP_ID:
+ return "IAVF_ERR_INVALID_QP_ID";
+ case IAVF_ERR_INVALID_CQ_ID:
+ return "IAVF_ERR_INVALID_CQ_ID";
+ case IAVF_ERR_INVALID_CEQ_ID:
+ return "IAVF_ERR_INVALID_CEQ_ID";
+ case IAVF_ERR_INVALID_AEQ_ID:
+ return "IAVF_ERR_INVALID_AEQ_ID";
+ case IAVF_ERR_INVALID_SIZE:
+ return "IAVF_ERR_INVALID_SIZE";
+ case IAVF_ERR_INVALID_ARP_INDEX:
+ return "IAVF_ERR_INVALID_ARP_INDEX";
+ case IAVF_ERR_INVALID_FPM_FUNC_ID:
+ return "IAVF_ERR_INVALID_FPM_FUNC_ID";
+ case IAVF_ERR_QP_INVALID_MSG_SIZE:
+ return "IAVF_ERR_QP_INVALID_MSG_SIZE";
+ case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
+ return "IAVF_ERR_QP_TOOMANY_WRS_POSTED";
+ case IAVF_ERR_INVALID_FRAG_COUNT:
+ return "IAVF_ERR_INVALID_FRAG_COUNT";
+ case IAVF_ERR_QUEUE_EMPTY:
+ return "IAVF_ERR_QUEUE_EMPTY";
+ case IAVF_ERR_INVALID_ALIGNMENT:
+ return "IAVF_ERR_INVALID_ALIGNMENT";
+ case IAVF_ERR_FLUSHED_QUEUE:
+ return "IAVF_ERR_FLUSHED_QUEUE";
+ case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "IAVF_ERR_INVALID_PUSH_PAGE_INDEX";
+ case IAVF_ERR_INVALID_IMM_DATA_SIZE:
+ return "IAVF_ERR_INVALID_IMM_DATA_SIZE";
+ case IAVF_ERR_TIMEOUT:
+ return "IAVF_ERR_TIMEOUT";
+ case IAVF_ERR_OPCODE_MISMATCH:
+ return "IAVF_ERR_OPCODE_MISMATCH";
+ case IAVF_ERR_CQP_COMPL_ERROR:
+ return "IAVF_ERR_CQP_COMPL_ERROR";
+ case IAVF_ERR_INVALID_VF_ID:
+ return "IAVF_ERR_INVALID_VF_ID";
+ case IAVF_ERR_INVALID_HMCFN_ID:
+ return "IAVF_ERR_INVALID_HMCFN_ID";
+ case IAVF_ERR_BACKING_PAGE_ERROR:
+ return "IAVF_ERR_BACKING_PAGE_ERROR";
+ case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "IAVF_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case IAVF_ERR_INVALID_PBLE_INDEX:
+ return "IAVF_ERR_INVALID_PBLE_INDEX";
+ case IAVF_ERR_INVALID_SD_INDEX:
+ return "IAVF_ERR_INVALID_SD_INDEX";
+ case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
+ return "IAVF_ERR_INVALID_PAGE_DESC_INDEX";
+ case IAVF_ERR_INVALID_SD_TYPE:
+ return "IAVF_ERR_INVALID_SD_TYPE";
+ case IAVF_ERR_MEMCPY_FAILED:
+ return "IAVF_ERR_MEMCPY_FAILED";
+ case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
+ return "IAVF_ERR_INVALID_HMC_OBJ_INDEX";
+ case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
+ return "IAVF_ERR_INVALID_HMC_OBJ_COUNT";
+ case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "IAVF_ERR_INVALID_SRQ_ARM_LIMIT";
+ case IAVF_ERR_SRQ_ENABLED:
+ return "IAVF_ERR_SRQ_ENABLED";
+ case IAVF_ERR_ADMIN_QUEUE_ERROR:
+ return "IAVF_ERR_ADMIN_QUEUE_ERROR";
+ case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "IAVF_ERR_ADMIN_QUEUE_TIMEOUT";
+ case IAVF_ERR_BUF_TOO_SHORT:
+ return "IAVF_ERR_BUF_TOO_SHORT";
+ case IAVF_ERR_ADMIN_QUEUE_FULL:
+ return "IAVF_ERR_ADMIN_QUEUE_FULL";
+ case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
+ return "IAVF_ERR_ADMIN_QUEUE_NO_WORK";
+ case IAVF_ERR_BAD_IWARP_CQE:
+ return "IAVF_ERR_BAD_IWARP_CQE";
+ case IAVF_ERR_NVM_BLANK_MODE:
+ return "IAVF_ERR_NVM_BLANK_MODE";
+ case IAVF_ERR_NOT_IMPLEMENTED:
+ return "IAVF_ERR_NOT_IMPLEMENTED";
+ case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "IAVF_ERR_PE_DOORBELL_NOT_ENABLED";
+ case IAVF_ERR_DIAG_TEST_FAILED:
+ return "IAVF_ERR_DIAG_TEST_FAILED";
+ case IAVF_ERR_NOT_READY:
+ return "IAVF_ERR_NOT_READY";
+ case IAVF_NOT_SUPPORTED:
+ return "IAVF_NOT_SUPPORTED";
+ case IAVF_ERR_FIRMWARE_API_VERSION:
+ return "IAVF_ERR_FIRMWARE_API_VERSION";
+ case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -260,7 +260,7 @@ const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err)
void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
- struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc;
u8 *buf = (u8 *)buffer;
if ((!(mask & hw->debug_mask)) || !desc)
@@ -327,17 +327,17 @@ bool iavf_check_asq_alive(struct iavf_hw *hw)
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
-iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
+enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_queue_shutdown *cmd =
+ (struct iavf_aqc_queue_shutdown *)&desc.params.raw;
+ enum iavf_status status;
- iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown);
+ iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_queue_shutdown);
if (unloading)
- cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ cmd->driver_unloading = cpu_to_le32(IAVF_AQ_DRIVER_UNLOADING);
status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL);
return status;
@@ -354,43 +354,43 @@ iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
*
* Internal function to get or set RSS look up table
**/
-static iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
- u16 vsi_id, bool pf_lut,
- u8 *lut, u16 lut_size,
- bool set)
+static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
{
- iavf_status status;
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_lut *cmd_resp =
- (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+ enum iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_get_set_rss_lut *cmd_resp =
+ (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_rss_lut);
+ iavf_aqc_opc_set_rss_lut);
else
iavf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_rss_lut);
+ iavf_aqc_opc_get_rss_lut);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
cmd_resp->vsi_id =
cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID);
if (pf_lut)
cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
else
cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
@@ -407,8 +407,8 @@ static iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
*
* get the RSS lookup table, PF or VSI type
**/
-iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
false);
@@ -424,8 +424,8 @@ iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
*
* set the RSS lookup table, PF or VSI type
**/
-iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
}
@@ -439,33 +439,33 @@ iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
**/
-static
+static enum
iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key,
+ struct iavf_aqc_get_set_rss_key_data *key,
bool set)
{
- iavf_status status;
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_key *cmd_resp =
- (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
- u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+ enum iavf_status status;
+ struct iavf_aq_desc desc;
+ struct iavf_aqc_get_set_rss_key *cmd_resp =
+ (struct iavf_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data);
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_rss_key);
+ iavf_aqc_opc_set_rss_key);
else
iavf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_rss_key);
+ iavf_aqc_opc_get_rss_key);
/* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
cmd_resp->vsi_id =
cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID);
status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
@@ -479,8 +479,8 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
* @key: pointer to key info struct
*
**/
-iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key)
{
return iavf_aq_get_set_rss_key(hw, vsi_id, key, false);
}
@@ -493,8 +493,8 @@ iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,
*
* set the RSS key per VSI
**/
-iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
+ struct iavf_aqc_get_set_rss_key_data *key)
{
return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
}
@@ -515,7 +515,7 @@ iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
* IF NOT iavf_ptype_lookup[ptype].known
* THEN
* Packet is unknown
- * ELSE IF iavf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP
* Use the rest of the fields to look at the tunnels, inner protocols, etc
* ELSE
* Use the enum iavf_rx_l2_ptype to decode the packet type
@@ -877,24 +877,25 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
* is sent asynchronously, i.e. iavf_asq_send_command() does not wait for
* completion before returning.
**/
-iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
- enum virtchnl_ops v_opcode,
- iavf_status v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details)
+enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval,
+ u8 *msg, u16 msglen,
+ struct iavf_asq_cmd_details *cmd_details)
{
- struct i40e_asq_cmd_details details;
- struct i40e_aq_desc desc;
- iavf_status status;
+ struct iavf_asq_cmd_details details;
+ struct iavf_aq_desc desc;
+ enum iavf_status status;
- iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf);
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_SI);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
- | I40E_AQ_FLAG_RD));
- if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)(IAVF_AQ_FLAG_BUF
+ | IAVF_AQ_FLAG_RD));
+ if (msglen > IAVF_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_LB);
desc.datalen = cpu_to_le16(msglen);
}
if (!cmd_details) {
@@ -948,7 +949,7 @@ void iavf_vf_parse_hw_config(struct iavf_hw *hw,
* as none will be forthcoming. Immediately after calling this function,
* the admin queue should be shut down and (optionally) reinitialized.
**/
-iavf_status iavf_vf_reset(struct iavf_hw *hw)
+enum iavf_status iavf_vf_reset(struct iavf_hw *hw)
{
return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 9f87304109fe..dad3eec8ccd8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -280,10 +280,10 @@ static int iavf_get_link_ksettings(struct net_device *netdev,
cmd->base.port = PORT_NONE;
/* Set speed and duplex */
switch (adapter->link_speed) {
- case I40E_LINK_SPEED_40GB:
+ case IAVF_LINK_SPEED_40GB:
cmd->base.speed = SPEED_40000;
break;
- case I40E_LINK_SPEED_25GB:
+ case IAVF_LINK_SPEED_25GB:
#ifdef SPEED_25000
cmd->base.speed = SPEED_25000;
#else
@@ -291,16 +291,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev,
"Speed is 25G, display not supported by this version of ethtool.\n");
#endif
break;
- case I40E_LINK_SPEED_20GB:
+ case IAVF_LINK_SPEED_20GB:
cmd->base.speed = SPEED_20000;
break;
- case I40E_LINK_SPEED_10GB:
+ case IAVF_LINK_SPEED_10GB:
cmd->base.speed = SPEED_10000;
break;
- case I40E_LINK_SPEED_1GB:
+ case IAVF_LINK_SPEED_1GB:
cmd->base.speed = SPEED_1000;
break;
- case I40E_LINK_SPEED_100MB:
+ case IAVF_LINK_SPEED_100MB:
cmd->base.speed = SPEED_100;
break;
default:
@@ -510,7 +510,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
if (changed_flags & IAVF_FLAG_LEGACY_RX) {
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- schedule_work(&adapter->reset_task);
+ queue_work(iavf_wq, &adapter->reset_task);
}
}
@@ -622,7 +622,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- schedule_work(&adapter->reset_task);
+ queue_work(iavf_wq, &adapter->reset_task);
}
return 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 4569d69a2b55..9d2b50964a08 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -14,6 +14,8 @@
static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
static int iavf_close(struct net_device *netdev);
+static int iavf_init_get_resources(struct iavf_adapter *adapter);
+static int iavf_check_reset_complete(struct iavf_hw *hw);
char iavf_driver_name[] = "iavf";
static const char iavf_driver_string[] =
@@ -57,7 +59,8 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver")
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
-static struct workqueue_struct *iavf_wq;
+static const struct net_device_ops iavf_netdev_ops;
+struct workqueue_struct *iavf_wq;
/**
* iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
@@ -66,14 +69,14 @@ static struct workqueue_struct *iavf_wq;
* @size: size of memory requested
* @alignment: what to align the allocation to
**/
-iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
- struct iavf_dma_mem *mem,
- u64 size, u32 alignment)
+enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem,
+ u64 size, u32 alignment)
{
struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
if (!mem)
- return I40E_ERR_PARAM;
+ return IAVF_ERR_PARAM;
mem->size = ALIGN(size, alignment);
mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
@@ -81,7 +84,7 @@ iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
if (mem->va)
return 0;
else
- return I40E_ERR_NO_MEMORY;
+ return IAVF_ERR_NO_MEMORY;
}
/**
@@ -89,12 +92,13 @@ iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem)
+enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem)
{
struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
if (!mem || !mem->va)
- return I40E_ERR_PARAM;
+ return IAVF_ERR_PARAM;
dma_free_coherent(&adapter->pdev->dev, mem->size,
mem->va, (dma_addr_t)mem->pa);
return 0;
@@ -106,11 +110,11 @@ iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem)
* @mem: ptr to mem struct to fill out
* @size: size of memory requested
**/
-iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
- struct iavf_virt_mem *mem, u32 size)
+enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem, u32 size)
{
if (!mem)
- return I40E_ERR_PARAM;
+ return IAVF_ERR_PARAM;
mem->size = size;
mem->va = kzalloc(size, GFP_KERNEL);
@@ -118,7 +122,7 @@ iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
if (mem->va)
return 0;
else
- return I40E_ERR_NO_MEMORY;
+ return IAVF_ERR_NO_MEMORY;
}
/**
@@ -126,10 +130,11 @@ iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem)
+enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem)
{
if (!mem)
- return I40E_ERR_PARAM;
+ return IAVF_ERR_PARAM;
/* it's ok to kfree a NULL pointer */
kfree(mem->va);
@@ -168,7 +173,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
if (!(adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- schedule_work(&adapter->reset_task);
+ queue_work(iavf_wq, &adapter->reset_task);
}
}
@@ -287,7 +292,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
rd32(hw, IAVF_VFINT_ICR0_ENA1);
/* schedule work on the private workqueue */
- schedule_work(&adapter->adminq_task);
+ queue_work(iavf_wq, &adapter->adminq_task);
return IRQ_HANDLED;
}
@@ -657,14 +662,13 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
f = iavf_find_vlan(adapter, vlan);
if (!f) {
- f = kzalloc(sizeof(*f), GFP_KERNEL);
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
goto clearout;
f->vlan = vlan;
- INIT_LIST_HEAD(&f->list);
- list_add(&f->list, &adapter->vlan_filter_list);
+ list_add_tail(&f->list, &adapter->vlan_filter_list);
f->add = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
@@ -979,7 +983,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
if (CLIENT_ENABLED(adapter))
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
- mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
}
/**
@@ -1043,7 +1047,7 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
- mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
}
/**
@@ -1227,8 +1231,8 @@ out:
**/
static int iavf_config_rss_aq(struct iavf_adapter *adapter)
{
- struct i40e_aqc_get_set_rss_key_data *rss_key =
- (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
+ struct iavf_aqc_get_set_rss_key_data *rss_key =
+ (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
struct iavf_hw *hw = &adapter->hw;
int ret = 0;
@@ -1532,136 +1536,66 @@ err:
}
/**
- * iavf_watchdog_timer - Periodic call-back timer
- * @data: pointer to adapter disguised as unsigned long
- **/
-static void iavf_watchdog_timer(struct timer_list *t)
-{
- struct iavf_adapter *adapter = from_timer(adapter, t,
- watchdog_timer);
-
- schedule_work(&adapter->watchdog_task);
- /* timer will be rescheduled in watchdog task */
-}
-
-/**
- * iavf_watchdog_task - Periodic call-back task
- * @work: pointer to work_struct
+ * iavf_process_aq_command - process aq_required flags
+ * and sends aq command
+ * @adapter: pointer to iavf adapter structure
+ *
+ * Returns 0 on success
+ * Returns error code if no command was sent
+ * or error code if the command failed.
**/
-static void iavf_watchdog_task(struct work_struct *work)
+static int iavf_process_aq_command(struct iavf_adapter *adapter)
{
- struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- watchdog_task);
- struct iavf_hw *hw = &adapter->hw;
- u32 reg_val;
-
- if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
- goto restart_watchdog;
-
- if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
- reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
- IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
- (reg_val == VIRTCHNL_VFR_COMPLETED)) {
- /* A chance for redemption! */
- dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
- adapter->state = __IAVF_STARTUP;
- adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- schedule_delayed_work(&adapter->init_task, 10);
- clear_bit(__IAVF_IN_CRITICAL_TASK,
- &adapter->crit_section);
- /* Don't reschedule the watchdog, since we've restarted
- * the init task. When init_task contacts the PF and
- * gets everything set up again, it'll restart the
- * watchdog for us. Down, boy. Sit. Stay. Woof.
- */
- return;
- }
- adapter->aq_required = 0;
- adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- goto watchdog_done;
- }
-
- if ((adapter->state < __IAVF_DOWN) ||
- (adapter->flags & IAVF_FLAG_RESET_PENDING))
- goto watchdog_done;
-
- /* check for reset */
- reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
- if (!(adapter->flags & IAVF_FLAG_RESET_PENDING) && !reg_val) {
- adapter->state = __IAVF_RESETTING;
- adapter->flags |= IAVF_FLAG_RESET_PENDING;
- dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
- schedule_work(&adapter->reset_task);
- adapter->aq_required = 0;
- adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- goto watchdog_done;
- }
-
- /* Process admin queue tasks. After init, everything gets done
- * here so we don't race on the admin queue.
- */
- if (adapter->current_op) {
- if (!iavf_asq_done(hw)) {
- dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
- iavf_send_api_ver(adapter);
- }
- goto watchdog_done;
- }
- if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) {
- iavf_send_vf_config_msg(adapter);
- goto watchdog_done;
- }
-
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
+ return iavf_send_vf_config_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
iavf_disable_queues(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
iavf_map_queues(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
iavf_add_ether_addrs(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
iavf_add_vlans(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
iavf_del_ether_addrs(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
iavf_del_vlans(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
iavf_enable_vlan_stripping(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
iavf_disable_vlan_stripping(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
iavf_configure_queues(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
iavf_enable_queues(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
@@ -1669,81 +1603,414 @@ static void iavf_watchdog_task(struct work_struct *work)
* PF, so we don't have to set current_op as we will
* not get a response through the ARQ.
*/
- iavf_init_rss(adapter);
adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
iavf_get_hena(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
iavf_set_hena(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
iavf_set_rss_key(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
iavf_set_rss_lut(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
FLAG_VF_MULTICAST_PROMISC);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
- goto watchdog_done;
+ return 0;
}
if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
(adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
iavf_set_promiscuous(adapter, 0);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
iavf_enable_channels(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
iavf_disable_channels(adapter);
- goto watchdog_done;
+ return 0;
}
-
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
iavf_add_cloud_filter(adapter);
- goto watchdog_done;
+ return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
iavf_del_cloud_filter(adapter);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
+ iavf_del_cloud_filter(adapter);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
+ iavf_add_cloud_filter(adapter);
+ return 0;
+ }
+ return -EAGAIN;
+}
+
+/**
+ * iavf_startup - first step of driver startup
+ * @adapter: board private structure
+ *
+ * Function process __IAVF_STARTUP driver state.
+ * When success the state is changed to __IAVF_INIT_VERSION_CHECK
+ * when fails it returns -EAGAIN
+ **/
+static int iavf_startup(struct iavf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct iavf_hw *hw = &adapter->hw;
+ int err;
+
+ WARN_ON(adapter->state != __IAVF_STARTUP);
+
+ /* driver loaded, probe complete */
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
+ adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
+ err = iavf_set_mac_type(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
+ goto err;
+ }
+
+ err = iavf_check_reset_complete(hw);
+ if (err) {
+ dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
+ err);
+ goto err;
+ }
+ hw->aq.num_arq_entries = IAVF_AQ_LEN;
+ hw->aq.num_asq_entries = IAVF_AQ_LEN;
+ hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
+ hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
+
+ err = iavf_init_adminq(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
+ goto err;
+ }
+ err = iavf_send_api_ver(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
+ iavf_shutdown_adminq(hw);
+ goto err;
+ }
+ adapter->state = __IAVF_INIT_VERSION_CHECK;
+err:
+ return err;
+}
+
+/**
+ * iavf_init_version_check - second step of driver startup
+ * @adapter: board private structure
+ *
+ * Function process __IAVF_INIT_VERSION_CHECK driver state.
+ * When success the state is changed to __IAVF_INIT_GET_RESOURCES
+ * when fails it returns -EAGAIN
+ **/
+static int iavf_init_version_check(struct iavf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct iavf_hw *hw = &adapter->hw;
+ int err = -EAGAIN;
+
+ WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
+
+ if (!iavf_asq_done(hw)) {
+ dev_err(&pdev->dev, "Admin queue command never completed\n");
+ iavf_shutdown_adminq(hw);
+ adapter->state = __IAVF_STARTUP;
+ goto err;
+ }
+
+ /* aq msg sent, awaiting reply */
+ err = iavf_verify_api_ver(adapter);
+ if (err) {
+ if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
+ err = iavf_send_api_ver(adapter);
+ else
+ dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
+ adapter->pf_version.major,
+ adapter->pf_version.minor,
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
+ goto err;
+ }
+ err = iavf_send_vf_config_msg(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to send config request (%d)\n",
+ err);
+ goto err;
+ }
+ adapter->state = __IAVF_INIT_GET_RESOURCES;
+
+err:
+ return err;
+}
+
+/**
+ * iavf_init_get_resources - third step of driver startup
+ * @adapter: board private structure
+ *
+ * Function process __IAVF_INIT_GET_RESOURCES driver state and
+ * finishes driver initialization procedure.
+ * When success the state is changed to __IAVF_DOWN
+ * when fails it returns -EAGAIN
+ **/
+static int iavf_init_get_resources(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct iavf_hw *hw = &adapter->hw;
+ int err = 0, bufsz;
+
+ WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
+ /* aq msg sent, awaiting reply */
+ if (!adapter->vf_res) {
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (IAVF_MAX_VF_VSI *
+ sizeof(struct virtchnl_vsi_resource));
+ adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
+ if (!adapter->vf_res)
+ goto err;
+ }
+ err = iavf_get_vf_config(adapter);
+ if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
+ err = iavf_send_vf_config_msg(adapter);
+ goto err;
+ } else if (err == IAVF_ERR_PARAM) {
+ /* We only get ERR_PARAM if the device is in a very bad
+ * state or if we've been disabled for previous bad
+ * behavior. Either way, we're done now.
+ */
+ iavf_shutdown_adminq(hw);
+ dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
+ return 0;
+ }
+ if (err) {
+ dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
+ goto err_alloc;
+ }
+
+ if (iavf_process_config(adapter))
+ goto err_alloc;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+
+ adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
+
+ netdev->netdev_ops = &iavf_netdev_ops;
+ iavf_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+
+ /* MTU range: 68 - 9710 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
+
+ if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
+ adapter->hw.mac.addr);
+ eth_hw_addr_random(netdev);
+ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+ } else {
+ adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF;
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
+ }
+
+ adapter->tx_desc_count = IAVF_DEFAULT_TXD;
+ adapter->rx_desc_count = IAVF_DEFAULT_RXD;
+ err = iavf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+ iavf_map_rings_to_vectors(adapter);
+ if (adapter->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
+
+ err = iavf_request_misc_irq(adapter);
+ if (err)
+ goto err_sw_init;
+
+ netif_carrier_off(netdev);
+ adapter->link_up = false;
+
+ /* set the semaphore to prevent any callbacks after device registration
+ * up to time when state of driver will be set to __IAVF_DOWN
+ */
+ rtnl_lock();
+ if (!adapter->netdev_registered) {
+ err = register_netdevice(netdev);
+ if (err) {
+ rtnl_unlock();
+ goto err_register;
+ }
+ }
+
+ adapter->netdev_registered = true;
+
+ netif_tx_stop_all_queues(netdev);
+ if (CLIENT_ALLOWED(adapter)) {
+ err = iavf_lan_add_device(adapter);
+ if (err) {
+ rtnl_unlock();
+ dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
+ err);
+ }
+ }
+ dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
+ if (netdev->features & NETIF_F_GRO)
+ dev_info(&pdev->dev, "GRO is enabled\n");
+
+ adapter->state = __IAVF_DOWN;
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+ rtnl_unlock();
+
+ iavf_misc_irq_enable(adapter);
+ wake_up(&adapter->down_waitqueue);
+
+ adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
+ adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
+ if (!adapter->rss_key || !adapter->rss_lut)
+ goto err_mem;
+ if (RSS_AQ(adapter))
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
+ else
+ iavf_init_rss(adapter);
+
+ return err;
+err_mem:
+ iavf_free_rss(adapter);
+err_register:
+ iavf_free_misc_irq(adapter);
+err_sw_init:
+ iavf_reset_interrupt_capability(adapter);
+err_alloc:
+ kfree(adapter->vf_res);
+ adapter->vf_res = NULL;
+err:
+ return err;
+}
+
+/**
+ * iavf_watchdog_task - Periodic call-back task
+ * @work: pointer to work_struct
+ **/
+static void iavf_watchdog_task(struct work_struct *work)
+{
+ struct iavf_adapter *adapter = container_of(work,
+ struct iavf_adapter,
+ watchdog_task.work);
+ struct iavf_hw *hw = &adapter->hw;
+ u32 reg_val;
+
+ if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ goto restart_watchdog;
+
+ if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
+ adapter->state = __IAVF_COMM_FAILED;
+
+ switch (adapter->state) {
+ case __IAVF_COMM_FAILED:
+ reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
+ IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
+ reg_val == VIRTCHNL_VFR_COMPLETED) {
+ /* A chance for redemption! */
+ dev_err(&adapter->pdev->dev,
+ "Hardware came out of reset. Attempting reinit.\n");
+ adapter->state = __IAVF_STARTUP;
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
+ queue_delayed_work(iavf_wq, &adapter->init_task, 10);
+ clear_bit(__IAVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ /* Don't reschedule the watchdog, since we've restarted
+ * the init task. When init_task contacts the PF and
+ * gets everything set up again, it'll restart the
+ * watchdog for us. Down, boy. Sit. Stay. Woof.
+ */
+ return;
+ }
+ adapter->aq_required = 0;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ clear_bit(__IAVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ queue_delayed_work(iavf_wq,
+ &adapter->watchdog_task,
+ msecs_to_jiffies(10));
goto watchdog_done;
+ case __IAVF_RESETTING:
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+ return;
+ case __IAVF_DOWN:
+ case __IAVF_DOWN_PENDING:
+ case __IAVF_TESTING:
+ case __IAVF_RUNNING:
+ if (adapter->current_op) {
+ if (!iavf_asq_done(hw)) {
+ dev_dbg(&adapter->pdev->dev,
+ "Admin queue timeout\n");
+ iavf_send_api_ver(adapter);
+ }
+ } else {
+ if (!iavf_process_aq_command(adapter) &&
+ adapter->state == __IAVF_RUNNING)
+ iavf_request_stats(adapter);
+ }
+ break;
+ case __IAVF_REMOVE:
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return;
+ default:
+ goto restart_watchdog;
}
- schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
+ /* check for hw reset */
+ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
+ if (!reg_val) {
+ adapter->state = __IAVF_RESETTING;
+ adapter->flags |= IAVF_FLAG_RESET_PENDING;
+ adapter->aq_required = 0;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
+ queue_work(iavf_wq, &adapter->reset_task);
+ goto watchdog_done;
+ }
- if (adapter->state == __IAVF_RUNNING)
- iavf_request_stats(adapter);
+ schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
watchdog_done:
- if (adapter->state == __IAVF_RUNNING)
+ if (adapter->state == __IAVF_RUNNING ||
+ adapter->state == __IAVF_COMM_FAILED)
iavf_detect_recover_hung(&adapter->vsi);
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
- if (adapter->state == __IAVF_REMOVE)
- return;
if (adapter->aq_required)
- mod_timer(&adapter->watchdog_timer,
- jiffies + msecs_to_jiffies(20));
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(20));
else
- mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
- schedule_work(&adapter->adminq_task);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+ queue_work(iavf_wq, &adapter->adminq_task);
}
static void iavf_disable_vf(struct iavf_adapter *adapter)
@@ -1967,7 +2234,7 @@ continue_reset:
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
iavf_misc_irq_enable(adapter);
- mod_timer(&adapter->watchdog_timer, jiffies + 2);
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
/* We were running when the reset started, so we need to restore some
* state here.
@@ -2020,9 +2287,9 @@ static void iavf_adminq_task(struct work_struct *work)
struct iavf_adapter *adapter =
container_of(work, struct iavf_adapter, adminq_task);
struct iavf_hw *hw = &adapter->hw;
- struct i40e_arq_event_info event;
+ struct iavf_arq_event_info event;
enum virtchnl_ops v_op;
- iavf_status ret, v_ret;
+ enum iavf_status ret, v_ret;
u32 val, oldval;
u16 pending;
@@ -2037,7 +2304,7 @@ static void iavf_adminq_task(struct work_struct *work)
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low);
+ v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
if (ret || !v_op)
break; /* No event to process or error cleaning ARQ */
@@ -2239,22 +2506,22 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
int speed = 0, ret = 0;
switch (adapter->link_speed) {
- case I40E_LINK_SPEED_40GB:
+ case IAVF_LINK_SPEED_40GB:
speed = 40000;
break;
- case I40E_LINK_SPEED_25GB:
+ case IAVF_LINK_SPEED_25GB:
speed = 25000;
break;
- case I40E_LINK_SPEED_20GB:
+ case IAVF_LINK_SPEED_20GB:
speed = 20000;
break;
- case I40E_LINK_SPEED_10GB:
+ case IAVF_LINK_SPEED_10GB:
speed = 10000;
break;
- case I40E_LINK_SPEED_1GB:
+ case IAVF_LINK_SPEED_1GB:
speed = 1000;
break;
- case I40E_LINK_SPEED_100MB:
+ case IAVF_LINK_SPEED_100MB:
speed = 100;
break;
default:
@@ -2432,14 +2699,14 @@ exit:
/**
* iavf_parse_cls_flower - Parse tc flower filters provided by kernel
* @adapter: board private structure
- * @cls_flower: pointer to struct tc_cls_flower_offload
+ * @cls_flower: pointer to struct flow_cls_offload
* @filter: pointer to cloud filter structure
*/
static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
struct iavf_cloud_filter *filter)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
u16 n_proto_mask = 0;
u16 n_proto_key = 0;
@@ -2508,7 +2775,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
match.mask->dst);
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
@@ -2518,7 +2785,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
match.mask->src);
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
@@ -2553,7 +2820,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
match.mask->vlan_id);
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
@@ -2577,7 +2844,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
be32_to_cpu(match.mask->dst));
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
@@ -2587,13 +2854,13 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
be32_to_cpu(match.mask->dst));
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
if (match.key->dst) {
vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
@@ -2614,7 +2881,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
if (ipv6_addr_any(&match.mask->dst)) {
dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
IPV6_ADDR_ANY);
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
/* src and dest IPv6 address should not be LOOPBACK
@@ -2624,7 +2891,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
ipv6_addr_loopback(&match.key->src)) {
dev_err(&adapter->pdev->dev,
"ipv6 addr should not be loopback\n");
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
if (!ipv6_addr_any(&match.mask->dst) ||
!ipv6_addr_any(&match.mask->src))
@@ -2649,7 +2916,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
be16_to_cpu(match.mask->src));
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
@@ -2659,7 +2926,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
be16_to_cpu(match.mask->dst));
- return I40E_ERR_CONFIG;
+ return IAVF_ERR_CONFIG;
}
}
if (match.key->dst) {
@@ -2704,10 +2971,10 @@ static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
/**
* iavf_configure_clsflower - Add tc flower filters
* @adapter: board private structure
- * @cls_flower: Pointer to struct tc_cls_flower_offload
+ * @cls_flower: Pointer to struct flow_cls_offload
*/
static int iavf_configure_clsflower(struct iavf_adapter *adapter,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
struct iavf_cloud_filter *filter = NULL;
@@ -2783,10 +3050,10 @@ static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
/**
* iavf_delete_clsflower - Remove tc flower filters
* @adapter: board private structure
- * @cls_flower: Pointer to struct tc_cls_flower_offload
+ * @cls_flower: Pointer to struct flow_cls_offload
*/
static int iavf_delete_clsflower(struct iavf_adapter *adapter,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
struct iavf_cloud_filter *filter = NULL;
int err = 0;
@@ -2810,17 +3077,17 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
* @type_data: offload data
*/
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
switch (cls_flower->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return iavf_configure_clsflower(adapter, cls_flower);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return iavf_delete_clsflower(adapter, cls_flower);
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
@@ -2846,34 +3113,7 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-/**
- * iavf_setup_tc_block - register callbacks for tc
- * @netdev: network interface device structure
- * @f: tc offload data
- *
- * This function registers block callbacks for tc
- * offloads
- **/
-static int iavf_setup_tc_block(struct net_device *dev,
- struct tc_block_offload *f)
-{
- struct iavf_adapter *adapter = netdev_priv(dev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb,
- adapter, adapter, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb,
- adapter);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(iavf_block_cb_list);
/**
* iavf_setup_tc - configure multiple traffic classes
@@ -2889,11 +3129,16 @@ static int iavf_setup_tc_block(struct net_device *dev,
static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
switch (type) {
case TC_SETUP_QDISC_MQPRIO:
return __iavf_setup_tc(netdev, type_data);
case TC_SETUP_BLOCK:
- return iavf_setup_tc_block(netdev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &iavf_block_cb_list,
+ iavf_setup_tc_block_cb,
+ adapter, adapter, true);
default:
return -EOPNOTSUPP;
}
@@ -2908,7 +3153,7 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
+ * handler is registered with the OS, the watchdog is started,
* and the stack is notified that the interface is ready.
**/
static int iavf_open(struct net_device *netdev)
@@ -3020,7 +3265,7 @@ static int iavf_close(struct net_device *netdev)
status = wait_event_timeout(adapter->down_waitqueue,
adapter->state == __IAVF_DOWN,
- msecs_to_jiffies(200));
+ msecs_to_jiffies(500));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
return 0;
@@ -3043,7 +3288,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
}
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- schedule_work(&adapter->reset_task);
+ queue_work(iavf_wq, &adapter->reset_task);
return 0;
}
@@ -3348,217 +3593,41 @@ int iavf_process_config(struct iavf_adapter *adapter)
static void iavf_init_task(struct work_struct *work)
{
struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- init_task.work);
- struct net_device *netdev = adapter->netdev;
+ struct iavf_adapter,
+ init_task.work);
struct iavf_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
- int err, bufsz;
switch (adapter->state) {
case __IAVF_STARTUP:
- /* driver loaded, probe complete */
- adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
- err = iavf_set_mac_type(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
- err);
- goto err;
- }
- err = iavf_check_reset_complete(hw);
- if (err) {
- dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
- err);
- goto err;
- }
- hw->aq.num_arq_entries = IAVF_AQ_LEN;
- hw->aq.num_asq_entries = IAVF_AQ_LEN;
- hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
- hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
-
- err = iavf_init_adminq(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
- err);
- goto err;
- }
- err = iavf_send_api_ver(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
- iavf_shutdown_adminq(hw);
- goto err;
- }
- adapter->state = __IAVF_INIT_VERSION_CHECK;
- goto restart;
+ if (iavf_startup(adapter) < 0)
+ goto init_failed;
+ break;
case __IAVF_INIT_VERSION_CHECK:
- if (!iavf_asq_done(hw)) {
- dev_err(&pdev->dev, "Admin queue command never completed\n");
- iavf_shutdown_adminq(hw);
- adapter->state = __IAVF_STARTUP;
- goto err;
- }
-
- /* aq msg sent, awaiting reply */
- err = iavf_verify_api_ver(adapter);
- if (err) {
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
- err = iavf_send_api_ver(adapter);
- else
- dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
- adapter->pf_version.major,
- adapter->pf_version.minor,
- VIRTCHNL_VERSION_MAJOR,
- VIRTCHNL_VERSION_MINOR);
- goto err;
- }
- err = iavf_send_vf_config_msg(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to send config request (%d)\n",
- err);
- goto err;
- }
- adapter->state = __IAVF_INIT_GET_RESOURCES;
- goto restart;
- case __IAVF_INIT_GET_RESOURCES:
- /* aq msg sent, awaiting reply */
- if (!adapter->vf_res) {
- bufsz = sizeof(struct virtchnl_vf_resource) +
- (IAVF_MAX_VF_VSI *
- sizeof(struct virtchnl_vsi_resource));
- adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
- if (!adapter->vf_res)
- goto err;
- }
- err = iavf_get_vf_config(adapter);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
- err = iavf_send_vf_config_msg(adapter);
- goto err;
- } else if (err == I40E_ERR_PARAM) {
- /* We only get ERR_PARAM if the device is in a very bad
- * state or if we've been disabled for previous bad
- * behavior. Either way, we're done now.
- */
- iavf_shutdown_adminq(hw);
- dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
- return;
- }
- if (err) {
- dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
- err);
- goto err_alloc;
- }
- adapter->state = __IAVF_INIT_SW;
+ if (iavf_init_version_check(adapter) < 0)
+ goto init_failed;
break;
+ case __IAVF_INIT_GET_RESOURCES:
+ if (iavf_init_get_resources(adapter) < 0)
+ goto init_failed;
+ return;
default:
- goto err_alloc;
- }
-
- if (iavf_process_config(adapter))
- goto err_alloc;
- adapter->current_op = VIRTCHNL_OP_UNKNOWN;
-
- adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
-
- netdev->netdev_ops = &iavf_netdev_ops;
- iavf_set_ethtool_ops(netdev);
- netdev->watchdog_timeo = 5 * HZ;
-
- /* MTU range: 68 - 9710 */
- netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
-
- if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
- dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
- adapter->hw.mac.addr);
- eth_hw_addr_random(netdev);
- ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
- } else {
- adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF;
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
- ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
- }
-
- timer_setup(&adapter->watchdog_timer, iavf_watchdog_timer, 0);
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
-
- adapter->tx_desc_count = IAVF_DEFAULT_TXD;
- adapter->rx_desc_count = IAVF_DEFAULT_RXD;
- err = iavf_init_interrupt_scheme(adapter);
- if (err)
- goto err_sw_init;
- iavf_map_rings_to_vectors(adapter);
- if (adapter->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
- adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
-
- err = iavf_request_misc_irq(adapter);
- if (err)
- goto err_sw_init;
-
- netif_carrier_off(netdev);
- adapter->link_up = false;
-
- if (!adapter->netdev_registered) {
- err = register_netdev(netdev);
- if (err)
- goto err_register;
- }
-
- adapter->netdev_registered = true;
-
- netif_tx_stop_all_queues(netdev);
- if (CLIENT_ALLOWED(adapter)) {
- err = iavf_lan_add_device(adapter);
- if (err)
- dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
- err);
+ goto init_failed;
}
- dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
- if (netdev->features & NETIF_F_GRO)
- dev_info(&pdev->dev, "GRO is enabled\n");
-
- adapter->state = __IAVF_DOWN;
- set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
- iavf_misc_irq_enable(adapter);
- wake_up(&adapter->down_waitqueue);
-
- adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
- adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
- if (!adapter->rss_key || !adapter->rss_lut)
- goto err_mem;
-
- if (RSS_AQ(adapter)) {
- adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
- mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- } else {
- iavf_init_rss(adapter);
- }
- return;
-restart:
- schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
+ queue_delayed_work(iavf_wq, &adapter->init_task,
+ msecs_to_jiffies(30));
return;
-err_mem:
- iavf_free_rss(adapter);
-err_register:
- iavf_free_misc_irq(adapter);
-err_sw_init:
- iavf_reset_interrupt_capability(adapter);
-err_alloc:
- kfree(adapter->vf_res);
- adapter->vf_res = NULL;
-err:
- /* Things went into the weeds, so try again later */
+init_failed:
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
- dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
+ dev_err(&adapter->pdev->dev,
+ "Failed to communicate with PF; waiting before retry\n");
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
adapter->state = __IAVF_STARTUP;
- schedule_delayed_work(&adapter->init_task, HZ * 5);
+ queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
return;
}
- schedule_delayed_work(&adapter->init_task, HZ);
+ queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
}
/**
@@ -3683,11 +3752,11 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, iavf_reset_task);
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
- INIT_WORK(&adapter->watchdog_task, iavf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
- schedule_delayed_work(&adapter->init_task,
- msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
+ queue_delayed_work(iavf_wq, &adapter->init_task,
+ msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
init_waitqueue_head(&adapter->down_waitqueue);
@@ -3783,7 +3852,7 @@ static int iavf_resume(struct pci_dev *pdev)
return err;
}
- schedule_work(&adapter->reset_task);
+ queue_work(iavf_wq, &adapter->reset_task);
netif_device_attach(netdev);
@@ -3843,8 +3912,7 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_reset_interrupt_capability(adapter);
iavf_free_q_vectors(adapter);
- if (adapter->watchdog_timer.function)
- del_timer_sync(&adapter->watchdog_timer);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->adminq_task);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_osdep.h b/drivers/net/ethernet/intel/iavf/iavf_osdep.h
index e6e0b0328706..a452ce90679a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_osdep.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_osdep.h
@@ -44,9 +44,12 @@ struct iavf_virt_mem {
#define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s)
#define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m)
-#define iavf_debug(h, m, s, ...) iavf_debug_d(h, m, s, ##__VA_ARGS__)
-extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
- __attribute__ ((format(gnu_printf, 3, 4)));
+#define iavf_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ pr_info("iavf %02x:%02x.%x " s, \
+ (h)->bus.bus_id, (h)->bus.device, \
+ (h)->bus.func, ##__VA_ARGS__); \
+} while (0)
-typedef enum iavf_status_code iavf_status;
#endif /* _IAVF_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
index d6685103af39..edebfbbcffdc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -16,39 +16,40 @@
*/
/* adminq functions */
-iavf_status iavf_init_adminq(struct iavf_hw *hw);
-iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
-void i40e_adminq_init_ring_data(struct iavf_hw *hw);
-iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *events_pending);
-iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details);
+enum iavf_status iavf_init_adminq(struct iavf_hw *hw);
+enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
+void iavf_adminq_init_ring_data(struct iavf_hw *hw);
+enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct iavf_arq_event_info *e,
+ u16 *events_pending);
+enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
+ struct iavf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct iavf_asq_cmd_details *cmd_details);
bool iavf_asq_done(struct iavf_hw *hw);
/* debug function for adminq */
void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
-void i40e_idle_aq(struct iavf_hw *hw);
+void iavf_idle_aq(struct iavf_hw *hw);
void iavf_resume_aq(struct iavf_hw *hw);
bool iavf_check_asq_alive(struct iavf_hw *hw);
-iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
-const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err);
-const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err);
+enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
+const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
+const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err);
-iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
-iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
+enum iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid,
+ struct iavf_aqc_get_set_rss_key_data *key);
+enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
+ struct iavf_aqc_get_set_rss_key_data *key);
-iavf_status iavf_set_mac_type(struct iavf_hw *hw);
+enum iavf_status iavf_set_mac_type(struct iavf_hw *hw);
extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
@@ -59,9 +60,10 @@ static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
void iavf_vf_parse_hw_config(struct iavf_hw *hw,
struct virtchnl_vf_resource *msg);
-iavf_status iavf_vf_reset(struct iavf_hw *hw);
-iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
- enum virtchnl_ops v_opcode,
- iavf_status v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
+enum iavf_status iavf_vf_reset(struct iavf_hw *hw);
+enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval,
+ u8 *msg, u16 msglen,
+ struct iavf_asq_cmd_details *cmd_details);
#endif /* _IAVF_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h
index 46742fab7b8c..46e3d1f6b604 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_status.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_status.h
@@ -5,74 +5,74 @@
#define _IAVF_STATUS_H_
/* Error Codes */
-enum iavf_status_code {
- I40E_SUCCESS = 0,
- I40E_ERR_NVM = -1,
- I40E_ERR_NVM_CHECKSUM = -2,
- I40E_ERR_PHY = -3,
- I40E_ERR_CONFIG = -4,
- I40E_ERR_PARAM = -5,
- I40E_ERR_MAC_TYPE = -6,
- I40E_ERR_UNKNOWN_PHY = -7,
- I40E_ERR_LINK_SETUP = -8,
- I40E_ERR_ADAPTER_STOPPED = -9,
- I40E_ERR_INVALID_MAC_ADDR = -10,
- I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
- I40E_ERR_MASTER_REQUESTS_PENDING = -12,
- I40E_ERR_INVALID_LINK_SETTINGS = -13,
- I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
- I40E_ERR_RESET_FAILED = -15,
- I40E_ERR_SWFW_SYNC = -16,
- I40E_ERR_NO_AVAILABLE_VSI = -17,
- I40E_ERR_NO_MEMORY = -18,
- I40E_ERR_BAD_PTR = -19,
- I40E_ERR_RING_FULL = -20,
- I40E_ERR_INVALID_PD_ID = -21,
- I40E_ERR_INVALID_QP_ID = -22,
- I40E_ERR_INVALID_CQ_ID = -23,
- I40E_ERR_INVALID_CEQ_ID = -24,
- I40E_ERR_INVALID_AEQ_ID = -25,
- I40E_ERR_INVALID_SIZE = -26,
- I40E_ERR_INVALID_ARP_INDEX = -27,
- I40E_ERR_INVALID_FPM_FUNC_ID = -28,
- I40E_ERR_QP_INVALID_MSG_SIZE = -29,
- I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
- I40E_ERR_INVALID_FRAG_COUNT = -31,
- I40E_ERR_QUEUE_EMPTY = -32,
- I40E_ERR_INVALID_ALIGNMENT = -33,
- I40E_ERR_FLUSHED_QUEUE = -34,
- I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
- I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
- I40E_ERR_TIMEOUT = -37,
- I40E_ERR_OPCODE_MISMATCH = -38,
- I40E_ERR_CQP_COMPL_ERROR = -39,
- I40E_ERR_INVALID_VF_ID = -40,
- I40E_ERR_INVALID_HMCFN_ID = -41,
- I40E_ERR_BACKING_PAGE_ERROR = -42,
- I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
- I40E_ERR_INVALID_PBLE_INDEX = -44,
- I40E_ERR_INVALID_SD_INDEX = -45,
- I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
- I40E_ERR_INVALID_SD_TYPE = -47,
- I40E_ERR_MEMCPY_FAILED = -48,
- I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
- I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
- I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
- I40E_ERR_SRQ_ENABLED = -52,
- I40E_ERR_ADMIN_QUEUE_ERROR = -53,
- I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
- I40E_ERR_BUF_TOO_SHORT = -55,
- I40E_ERR_ADMIN_QUEUE_FULL = -56,
- I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
- I40E_ERR_BAD_IWARP_CQE = -58,
- I40E_ERR_NVM_BLANK_MODE = -59,
- I40E_ERR_NOT_IMPLEMENTED = -60,
- I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
- I40E_ERR_DIAG_TEST_FAILED = -62,
- I40E_ERR_NOT_READY = -63,
- I40E_NOT_SUPPORTED = -64,
- I40E_ERR_FIRMWARE_API_VERSION = -65,
- I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
+enum iavf_status {
+ IAVF_SUCCESS = 0,
+ IAVF_ERR_NVM = -1,
+ IAVF_ERR_NVM_CHECKSUM = -2,
+ IAVF_ERR_PHY = -3,
+ IAVF_ERR_CONFIG = -4,
+ IAVF_ERR_PARAM = -5,
+ IAVF_ERR_MAC_TYPE = -6,
+ IAVF_ERR_UNKNOWN_PHY = -7,
+ IAVF_ERR_LINK_SETUP = -8,
+ IAVF_ERR_ADAPTER_STOPPED = -9,
+ IAVF_ERR_INVALID_MAC_ADDR = -10,
+ IAVF_ERR_DEVICE_NOT_SUPPORTED = -11,
+ IAVF_ERR_MASTER_REQUESTS_PENDING = -12,
+ IAVF_ERR_INVALID_LINK_SETTINGS = -13,
+ IAVF_ERR_AUTONEG_NOT_COMPLETE = -14,
+ IAVF_ERR_RESET_FAILED = -15,
+ IAVF_ERR_SWFW_SYNC = -16,
+ IAVF_ERR_NO_AVAILABLE_VSI = -17,
+ IAVF_ERR_NO_MEMORY = -18,
+ IAVF_ERR_BAD_PTR = -19,
+ IAVF_ERR_RING_FULL = -20,
+ IAVF_ERR_INVALID_PD_ID = -21,
+ IAVF_ERR_INVALID_QP_ID = -22,
+ IAVF_ERR_INVALID_CQ_ID = -23,
+ IAVF_ERR_INVALID_CEQ_ID = -24,
+ IAVF_ERR_INVALID_AEQ_ID = -25,
+ IAVF_ERR_INVALID_SIZE = -26,
+ IAVF_ERR_INVALID_ARP_INDEX = -27,
+ IAVF_ERR_INVALID_FPM_FUNC_ID = -28,
+ IAVF_ERR_QP_INVALID_MSG_SIZE = -29,
+ IAVF_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ IAVF_ERR_INVALID_FRAG_COUNT = -31,
+ IAVF_ERR_QUEUE_EMPTY = -32,
+ IAVF_ERR_INVALID_ALIGNMENT = -33,
+ IAVF_ERR_FLUSHED_QUEUE = -34,
+ IAVF_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ IAVF_ERR_INVALID_IMM_DATA_SIZE = -36,
+ IAVF_ERR_TIMEOUT = -37,
+ IAVF_ERR_OPCODE_MISMATCH = -38,
+ IAVF_ERR_CQP_COMPL_ERROR = -39,
+ IAVF_ERR_INVALID_VF_ID = -40,
+ IAVF_ERR_INVALID_HMCFN_ID = -41,
+ IAVF_ERR_BACKING_PAGE_ERROR = -42,
+ IAVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ IAVF_ERR_INVALID_PBLE_INDEX = -44,
+ IAVF_ERR_INVALID_SD_INDEX = -45,
+ IAVF_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ IAVF_ERR_INVALID_SD_TYPE = -47,
+ IAVF_ERR_MEMCPY_FAILED = -48,
+ IAVF_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ IAVF_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ IAVF_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ IAVF_ERR_SRQ_ENABLED = -52,
+ IAVF_ERR_ADMIN_QUEUE_ERROR = -53,
+ IAVF_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ IAVF_ERR_BUF_TOO_SHORT = -55,
+ IAVF_ERR_ADMIN_QUEUE_FULL = -56,
+ IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ IAVF_ERR_BAD_IWARP_CQE = -58,
+ IAVF_ERR_NVM_BLANK_MODE = -59,
+ IAVF_ERR_NOT_IMPLEMENTED = -60,
+ IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ IAVF_ERR_DIAG_TEST_FAILED = -62,
+ IAVF_ERR_NOT_READY = -63,
+ IAVF_NOT_SUPPORTED = -64,
+ IAVF_ERR_FIRMWARE_API_VERSION = -65,
+ IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _IAVF_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 1474f5539751..1058e68a02b4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -17,8 +17,8 @@
/* See trace-events-sample.h for a detailed description of why this
* guard clause is different from most normal include files.
*/
-#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
-#define _I40E_TRACE_H_
+#if !defined(_IAVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _IAVF_TRACE_H_
#include <linux/tracepoint.h>
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 06d1509d57f7..0cca1b589b56 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -190,7 +190,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)
static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
struct iavf_ring *tx_ring, int napi_budget)
{
- u16 i = tx_ring->next_to_clean;
+ int i = tx_ring->next_to_clean;
struct iavf_tx_buffer *tx_buf;
struct iavf_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
@@ -379,19 +379,19 @@ static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
unsigned int divisor;
switch (q_vector->adapter->link_speed) {
- case I40E_LINK_SPEED_40GB:
+ case IAVF_LINK_SPEED_40GB:
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
break;
- case I40E_LINK_SPEED_25GB:
- case I40E_LINK_SPEED_20GB:
+ case IAVF_LINK_SPEED_25GB:
+ case IAVF_LINK_SPEED_20GB:
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
break;
default:
- case I40E_LINK_SPEED_10GB:
+ case IAVF_LINK_SPEED_10GB:
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
break;
- case I40E_LINK_SPEED_1GB:
- case I40E_LINK_SPEED_100MB:
+ case IAVF_LINK_SPEED_1GB:
+ case IAVF_LINK_SPEED_100MB:
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
break;
}
@@ -1236,6 +1236,9 @@ static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
#endif
+ if (!size)
+ return;
+
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
@@ -1260,6 +1263,9 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{
struct iavf_rx_buffer *rx_buffer;
+ if (!size)
+ return NULL;
+
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
@@ -1290,7 +1296,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer,
unsigned int size)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ void *va;
#if (PAGE_SIZE < 8192)
unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
@@ -1299,7 +1305,10 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
unsigned int headlen;
struct sk_buff *skb;
+ if (!rx_buffer)
+ return NULL;
/* prefetch first cache line of first page */
+ va = page_address(rx_buffer->page) + rx_buffer->page_offset;
prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
@@ -1354,7 +1363,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer,
unsigned int size)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ void *va;
#if (PAGE_SIZE < 8192)
unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
@@ -1363,7 +1372,10 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;
+ if (!rx_buffer)
+ return NULL;
/* prefetch first cache line of first page */
+ va = page_address(rx_buffer->page) + rx_buffer->page_offset;
prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
@@ -1398,6 +1410,9 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer)
{
+ if (!rx_buffer)
+ return;
+
if (iavf_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
iavf_reuse_rx_page(rx_ring, rx_buffer);
@@ -1496,11 +1511,12 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
* verified the descriptor has been written back.
*/
dma_rmb();
+#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
+ if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
+ break;
size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
- if (!size)
- break;
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = iavf_get_rx_buffer(rx_ring, size);
@@ -1516,7 +1532,8 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- rx_buffer->pagecnt_bias++;
+ if (rx_buffer)
+ rx_buffer->pagecnt_bias++;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index ca89583613fb..7190a40c540c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -7,7 +7,7 @@
#include "iavf_status.h"
#include "iavf_osdep.h"
#include "iavf_register.h"
-#include "i40e_adminq.h"
+#include "iavf_adminq.h"
#include "iavf_devids.h"
#define IAVF_RXQ_CTX_DBUFF_SHIFT 7
@@ -21,7 +21,7 @@
/* forward declaration */
struct iavf_hw;
-typedef void (*I40E_ADMINQ_CALLBACK)(struct iavf_hw *, struct i40e_aq_desc *);
+typedef void (*IAVF_ADMINQ_CALLBACK)(struct iavf_hw *, struct iavf_aq_desc *);
/* Data type manipulation macros. */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index e64751da0921..d49d58a6de80 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -22,7 +22,7 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
enum virtchnl_ops op, u8 *msg, u16 len)
{
struct iavf_hw *hw = &adapter->hw;
- iavf_status err;
+ enum iavf_status err;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
return 0; /* nothing to see here, move along */
@@ -41,7 +41,7 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
*
* Send API version admin queue message to the PF. The reply is not checked
* in this function. Returns 0 if the message was successfully
- * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
**/
int iavf_send_api_ver(struct iavf_adapter *adapter)
{
@@ -60,16 +60,16 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
*
* Compare API versions with the PF. Must be called after admin queue is
* initialized. Returns 0 if API versions match, -EIO if they do not,
- * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
+ * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
* from the firmware are propagated.
**/
int iavf_verify_api_ver(struct iavf_adapter *adapter)
{
struct virtchnl_version_info *pf_vvi;
struct iavf_hw *hw = &adapter->hw;
- struct i40e_arq_event_info event;
+ struct iavf_arq_event_info event;
enum virtchnl_ops op;
- iavf_status err;
+ enum iavf_status err;
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
@@ -92,7 +92,7 @@ int iavf_verify_api_ver(struct iavf_adapter *adapter)
}
- err = (iavf_status)le32_to_cpu(event.desc.cookie_low);
+ err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
if (err)
goto out_alloc;
@@ -123,7 +123,7 @@ out:
*
* Send VF configuration request admin queue message to the PF. The reply
* is not checked in this function. Returns 0 if the message was
- * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
**/
int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
{
@@ -189,9 +189,9 @@ static void iavf_validate_num_queues(struct iavf_adapter *adapter)
int iavf_get_vf_config(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
- struct i40e_arq_event_info event;
+ struct iavf_arq_event_info event;
enum virtchnl_ops op;
- iavf_status err;
+ enum iavf_status err;
u16 len;
len = sizeof(struct virtchnl_vf_resource) +
@@ -216,7 +216,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
break;
}
- err = (iavf_status)le32_to_cpu(event.desc.cookie_low);
+ err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
/* some PFs send more queues than we should have so validate that
@@ -242,7 +242,8 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
struct virtchnl_vsi_queue_config_info *vqci;
struct virtchnl_queue_pair_info *vqpi;
int pairs = adapter->num_active_queues;
- int i, len, max_frame = IAVF_MAX_RXBUFFER;
+ int i, max_frame = IAVF_MAX_RXBUFFER;
+ size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -251,8 +252,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
return;
}
adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
- len = sizeof(struct virtchnl_vsi_queue_config_info) +
- (sizeof(struct virtchnl_queue_pair_info) * pairs);
+ len = struct_size(vqci, qpair, pairs);
vqci = kzalloc(len, GFP_KERNEL);
if (!vqci)
return;
@@ -351,8 +351,9 @@ void iavf_map_queues(struct iavf_adapter *adapter)
{
struct virtchnl_irq_map_info *vimi;
struct virtchnl_vector_map *vecmap;
- int v_idx, q_vectors, len;
struct iavf_q_vector *q_vector;
+ int v_idx, q_vectors;
+ size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -364,9 +365,7 @@ void iavf_map_queues(struct iavf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- len = sizeof(struct virtchnl_irq_map_info) +
- (adapter->num_msix_vectors *
- sizeof(struct virtchnl_vector_map));
+ len = struct_size(vimi, vecmap, adapter->num_msix_vectors);
vimi = kzalloc(len, GFP_KERNEL);
if (!vimi)
return;
@@ -416,7 +415,7 @@ int iavf_request_queues(struct iavf_adapter *adapter, int num)
return -EBUSY;
}
- vfres.num_queue_pairs = num;
+ vfres.num_queue_pairs = min_t(int, num, num_online_cpus());
adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
@@ -433,9 +432,10 @@ int iavf_request_queues(struct iavf_adapter *adapter, int num)
void iavf_add_ether_addrs(struct iavf_adapter *adapter)
{
struct virtchnl_ether_addr_list *veal;
- int len, i = 0, count = 0;
struct iavf_mac_filter *f;
+ int i = 0, count = 0;
bool more = false;
+ size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -457,15 +457,13 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter)
}
adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
- len = sizeof(struct virtchnl_ether_addr_list) +
- (count * sizeof(struct virtchnl_ether_addr));
+ len = struct_size(veal, list, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct virtchnl_ether_addr);
- len = sizeof(struct virtchnl_ether_addr_list) +
- (count * sizeof(struct virtchnl_ether_addr));
+ len = struct_size(veal, list, count);
more = true;
}
@@ -505,8 +503,9 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
{
struct virtchnl_ether_addr_list *veal;
struct iavf_mac_filter *f, *ftmp;
- int len, i = 0, count = 0;
+ int i = 0, count = 0;
bool more = false;
+ size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -528,15 +527,13 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
}
adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
- len = sizeof(struct virtchnl_ether_addr_list) +
- (count * sizeof(struct virtchnl_ether_addr));
+ len = struct_size(veal, list, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct virtchnl_ether_addr);
- len = sizeof(struct virtchnl_ether_addr_list) +
- (count * sizeof(struct virtchnl_ether_addr));
+ len = struct_size(veal, list, count);
more = true;
}
veal = kzalloc(len, GFP_ATOMIC);
@@ -938,22 +935,22 @@ static void iavf_print_link_message(struct iavf_adapter *adapter)
}
switch (adapter->link_speed) {
- case I40E_LINK_SPEED_40GB:
+ case IAVF_LINK_SPEED_40GB:
speed = "40 G";
break;
- case I40E_LINK_SPEED_25GB:
+ case IAVF_LINK_SPEED_25GB:
speed = "25 G";
break;
- case I40E_LINK_SPEED_20GB:
+ case IAVF_LINK_SPEED_20GB:
speed = "20 G";
break;
- case I40E_LINK_SPEED_10GB:
+ case IAVF_LINK_SPEED_10GB:
speed = "10 G";
break;
- case I40E_LINK_SPEED_1GB:
+ case IAVF_LINK_SPEED_1GB:
speed = "1000 M";
break;
- case I40E_LINK_SPEED_100MB:
+ case IAVF_LINK_SPEED_100MB:
speed = "100 M";
break;
default:
@@ -973,7 +970,7 @@ static void iavf_print_link_message(struct iavf_adapter *adapter)
void iavf_enable_channels(struct iavf_adapter *adapter)
{
struct virtchnl_tc_info *vti = NULL;
- u16 len;
+ size_t len;
int i;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
@@ -983,9 +980,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter)
return;
}
- len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) +
- sizeof(struct virtchnl_tc_info);
-
+ len = struct_size(vti, list, adapter->num_tc - 1);
vti = kzalloc(len, GFP_KERNEL);
if (!vti)
return;
@@ -1184,8 +1179,8 @@ void iavf_request_reset(struct iavf_adapter *adapter)
* This function handles the reply messages.
**/
void iavf_virtchnl_completion(struct iavf_adapter *adapter,
- enum virtchnl_ops v_opcode, iavf_status v_retval,
- u8 *msg, u16 msglen)
+ enum virtchnl_ops v_opcode,
+ enum iavf_status v_retval, u8 *msg, u16 msglen)
{
struct net_device *netdev = adapter->netdev;
@@ -1238,7 +1233,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
adapter->flags |= IAVF_FLAG_RESET_PENDING;
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
- schedule_work(&adapter->reset_task);
+ queue_work(iavf_wq, &adapter->reset_task);
}
break;
default:
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 792e6e42030e..9ee6b55553c0 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -44,15 +44,22 @@
extern const char ice_drv_ver[];
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
-#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
+#define ICE_MIN_NUM_DESC 64
#define ICE_MAX_NUM_DESC 8160
-/* set default number of Rx/Tx descriptors to the minimum between
- * ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page
+#define ICE_DFLT_MIN_RX_DESC 512
+/* if the default number of Rx descriptors between ICE_MAX_NUM_DESC and the
+ * number of descriptors to fill up an entire page is greater than or equal to
+ * ICE_DFLT_MIN_RX_DESC set it based on page size, otherwise set it to
+ * ICE_DFLT_MIN_RX_DESC
+ */
+#define ICE_DFLT_NUM_RX_DESC \
+ min_t(u16, ICE_MAX_NUM_DESC, \
+ max_t(u16, ALIGN(PAGE_SIZE / sizeof(union ice_32byte_rx_desc), \
+ ICE_REQ_DESC_MULTIPLE), \
+ ICE_DFLT_MIN_RX_DESC))
+/* set default number of Tx descriptors to the minimum between ICE_MAX_NUM_DESC
+ * and the number of descriptors to fill up an entire page
*/
-#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
- ALIGN(PAGE_SIZE / \
- sizeof(union ice_32byte_rx_desc), \
- ICE_REQ_DESC_MULTIPLE))
#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
ALIGN(PAGE_SIZE / \
sizeof(struct ice_tx_desc), \
@@ -160,7 +167,7 @@ struct ice_tc_cfg {
struct ice_res_tracker {
u16 num_entries;
- u16 search_hint;
+ u16 end;
u16 list[1];
};
@@ -182,6 +189,7 @@ struct ice_sw {
};
enum ice_state {
+ __ICE_TESTING,
__ICE_DOWN,
__ICE_NEEDS_RESTART,
__ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
@@ -244,8 +252,7 @@ struct ice_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;
int num_q_vectors;
- int sw_base_vector; /* Irq base for OS reserved vectors */
- int hw_base_vector; /* HW (absolute) index of a vector */
+ int base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -277,10 +284,10 @@ struct ice_vsi {
struct list_head tmp_sync_list; /* MAC filters to be synced */
struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
- u8 irqs_ready;
- u8 current_isup; /* Sync 'link up' logging */
- u8 stat_offsets_loaded;
- u8 vlan_ena;
+ u8 irqs_ready:1;
+ u8 current_isup:1; /* Sync 'link up' logging */
+ u8 stat_offsets_loaded:1;
+ u8 vlan_ena:1;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -330,7 +337,7 @@ enum ice_pf_flags {
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
- ICE_FLAG_DISABLE_FW_LLDP,
+ ICE_FLAG_ENABLE_FW_LLDP,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -340,10 +347,12 @@ struct ice_pf {
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
- struct ice_res_tracker *sw_irq_tracker;
-
- /* HW reserved Interrupts for this PF */
- struct ice_res_tracker *hw_irq_tracker;
+ struct ice_res_tracker *irq_tracker;
+ /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
+ * number of MSIX vectors needed for all SR-IOV VFs from the number of
+ * MSIX vectors allowed on this PF.
+ */
+ u16 sriov_base_vector;
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
@@ -365,10 +374,8 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
u32 msg_enable;
u32 hw_csum_rx_error;
- u32 sw_oicr_idx; /* Other interrupt cause SW vector index */
+ u32 oicr_idx; /* Other interrupt cause MSIX vector index */
u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
- u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
- u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
@@ -384,7 +391,7 @@ struct ice_pf {
struct ice_hw_port_stats stats;
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
- u8 stat_prev_loaded; /* has previous stats been loaded */
+ u8 stat_prev_loaded:1; /* has previous stats been loaded */
#ifdef CONFIG_DCB
u16 dcbx_cap;
#endif /* CONFIG_DCB */
@@ -392,6 +399,7 @@ struct ice_pf {
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
+ u32 sw_int_count;
};
struct ice_netdev_priv {
@@ -409,7 +417,7 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
struct ice_q_vector *q_vector)
{
u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
- ((struct ice_pf *)hw->back)->hw_oicr_idx;
+ ((struct ice_pf *)hw->back)->oicr_idx;
int itr = ICE_ITR_NONE;
u32 val;
@@ -444,17 +452,22 @@ ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
return NULL;
}
+int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
+int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
+int ice_vsi_cfg(struct ice_vsi *vsi);
+struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
-void ice_napi_del(struct ice_vsi *vsi);
#ifdef CONFIG_DCB
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
#endif /* CONFIG_DCB */
+int ice_open(struct net_device *netdev);
+int ice_stop(struct net_device *netdev);
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 6ef083002f5b..765e3c2ed045 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -35,8 +35,8 @@ struct ice_aqc_get_ver {
/* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown {
-#define ICE_AQC_DRIVER_UNLOADING BIT(0)
__le32 driver_unloading;
+#define ICE_AQC_DRIVER_UNLOADING BIT(0)
u8 reserved[12];
};
@@ -120,11 +120,9 @@ struct ice_aqc_manage_mac_read {
#define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7)
#define ICE_AQC_MAN_MAC_READ_S 4
#define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S)
- u8 lport_num;
- u8 lport_num_valid;
-#define ICE_AQC_MAN_MAC_PORT_NUM_IS_VALID BIT(0)
+ u8 rsvd[2];
u8 num_addr; /* Used in response */
- u8 reserved[3];
+ u8 rsvd1[3];
__le32 addr_high;
__le32 addr_low;
};
@@ -140,7 +138,7 @@ struct ice_aqc_manage_mac_read_resp {
/* Manage MAC address, write command - direct (0x0108) */
struct ice_aqc_manage_mac_write {
- u8 port_num;
+ u8 rsvd;
u8 flags;
#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0)
#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1)
@@ -920,6 +918,8 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_EN_LINK BIT(3)
#define ICE_AQC_PHY_AN_MODE BIT(4)
#define ICE_AQC_GET_PHY_EN_MOD_QUAL BIT(5)
+#define ICE_AQC_PHY_EN_AUTO_FEC BIT(7)
+#define ICE_AQC_PHY_CAPS_MASK ICE_M(0xff, 0)
u8 low_power_ctrl;
#define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
__le16 eee_cap;
@@ -932,6 +932,7 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6)
__le16 eeer_value;
u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
u8 link_fec_options;
#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
@@ -940,6 +941,8 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4)
#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define ICE_AQC_PHY_FEC_MASK ICE_M(0xdf, 0)
+ u8 rsvd1; /* Byte 35 reserved */
u8 extended_compliance_code;
#define ICE_MODULE_TYPE_TOTAL_BYTE 3
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
@@ -954,13 +957,14 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
#define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
#define ICE_AQC_QUAL_MOD_COUNT_MAX 16
struct {
u8 v_oui[3];
- u8 rsvd1;
+ u8 rsvd3;
u8 v_part[16];
__le32 v_rev;
- __le64 rsvd8;
+ __le64 rsvd4;
} qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX];
};
@@ -1062,6 +1066,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0)
#define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1)
#define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2)
+#define ICE_AQ_FEC_MASK ICE_M(0x7, 0)
/* Pacing Config */
#define ICE_AQ_CFG_PACING_S 3
#define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S)
@@ -1112,6 +1117,14 @@ struct ice_aqc_set_event_mask {
u8 reserved1[6];
};
+/* Set MAC Loopback command (direct 0x0620) */
+struct ice_aqc_set_mac_lb {
+ u8 lb_mode;
+#define ICE_AQ_MAC_LB_EN BIT(0)
+#define ICE_AQ_MAC_LB_OSC_CLK BIT(1)
+ u8 reserved[15];
+};
+
/* Set Port Identification LED (direct, 0x06E9) */
struct ice_aqc_set_port_id_led {
u8 lport_num;
@@ -1145,6 +1158,17 @@ struct ice_aqc_nvm {
__le32 addr_low;
};
+/* NVM Checksum Command (direct, 0x0706) */
+struct ice_aqc_nvm_checksum {
+ u8 flags;
+#define ICE_AQC_NVM_CHECKSUM_VERIFY BIT(0)
+#define ICE_AQC_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define ICE_AQC_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
/**
* Send to PF command (indirect 0x0801) ID is only used by PF
*
@@ -1249,7 +1273,7 @@ struct ice_aqc_get_cee_dcb_cfg_resp {
};
/* Set Local LLDP MIB (indirect 0x0A08)
- * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBX
*/
struct ice_aqc_lldp_set_local_mib {
u8 type;
@@ -1266,7 +1290,7 @@ struct ice_aqc_lldp_set_local_mib {
};
/* Stop/Start LLDP Agent (direct 0x0A09)
- * Used for stopping/starting specific LLDP agent. e.g. DCBx.
+ * Used for stopping/starting specific LLDP agent. e.g. DCBX.
* The same structure is used for the response, with the command field
* being used as the status field.
*/
@@ -1539,6 +1563,7 @@ struct ice_aq_desc {
struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_nvm nvm;
+ struct ice_aqc_nvm_checksum nvm_checksum;
struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_lldp_get_mib lldp_get_mib;
struct ice_aqc_lldp_set_mib_change lldp_set_event;
@@ -1554,6 +1579,7 @@ struct ice_aq_desc {
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
struct ice_aqc_fw_logging fw_logging;
struct ice_aqc_get_clear_fw_log get_clear_fw_log;
+ struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
@@ -1642,10 +1668,12 @@ enum ice_adminq_opc {
ice_aqc_opc_restart_an = 0x0605,
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
+ ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_set_port_id_led = 0x06E9,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
+ ice_aqc_opc_nvm_checksum = 0x0706,
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
@@ -1671,6 +1699,7 @@ enum ice_adminq_opc {
/* debug commands */
ice_aqc_opc_fw_logging = 0xFF09,
+ ice_aqc_opc_fw_logging_info = 0xFF10,
};
#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index da7878529929..2e0731c1e1a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -51,9 +51,6 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
*/
void ice_dev_onetime_setup(struct ice_hw *hw)
{
- /* configure Rx - set non pxe mode */
- wr32(hw, GLLAN_RCTL_0, 0x1);
-
#define MBX_PF_VT_PFALLOC 0x00231E80
/* set VFs per PF */
wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
@@ -307,6 +304,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
hw_link_info->an_info = link_data.an_info;
hw_link_info->ext_info = link_data.ext_info;
hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
+ hw_link_info->topo_media_conflict = link_data.topo_media_conflict;
hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
/* update fc info */
@@ -476,6 +475,49 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
/**
+ * ice_get_fw_log_cfg - get FW logging configuration
+ * @hw: pointer to the HW struct
+ */
+static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
+{
+ struct ice_aqc_fw_logging_data *config;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 size;
+
+ size = ICE_FW_LOG_DESC_SIZE_MAX;
+ config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
+ if (!config)
+ return ICE_ERR_NO_MEMORY;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
+ if (!status) {
+ u16 i;
+
+ /* Save FW logging information into the HW structure */
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
+ u16 v, m, flgs;
+
+ v = le16_to_cpu(config->entry[i]);
+ m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
+ flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
+
+ if (m < ICE_AQC_FW_LOG_ID_MAX)
+ hw->fw_log.evnts[m].cur = flgs;
+ }
+ }
+
+ devm_kfree(ice_hw_to_dev(hw), config);
+
+ return status;
+}
+
+/**
* ice_cfg_fw_log - configure FW logging
* @hw: pointer to the HW struct
* @enable: enable certain FW logging events if true, disable all if false
@@ -529,6 +571,11 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
(!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
return 0;
+ /* Get current FW log settings */
+ status = ice_get_fw_log_cfg(hw);
+ if (status)
+ return status;
+
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
cmd = &desc.params.fw_logging;
@@ -634,17 +681,17 @@ out:
*/
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
{
- ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
- ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
+ ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
+ ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
le16_to_cpu(desc->datalen));
- ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
+ ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
}
/**
* ice_get_itr_intrl_gran - determine int/intrl granularity
* @hw: pointer to the HW struct
*
- * Determines the itr/intrl granularities based on the maximum aggregate
+ * Determines the ITR/intrl granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on.
*/
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
@@ -815,6 +862,10 @@ err_unroll_cqinit:
/**
* ice_deinit_hw - unroll initialization operations done by ice_init_hw
* @hw: pointer to the hardware structure
+ *
+ * This should be called only during nominal operation, not as a result of
+ * ice_init_hw() failing since ice_init_hw() will take care of unrolling
+ * applicable initializations if it fails for any reason.
*/
void ice_deinit_hw(struct ice_hw *hw)
{
@@ -1447,6 +1498,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
struct ice_hw_func_caps *func_p = NULL;
struct ice_hw_dev_caps *dev_p = NULL;
struct ice_hw_common_caps *caps;
+ char const *prefix;
u32 i;
if (!buf)
@@ -1457,9 +1509,11 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
if (opc == ice_aqc_opc_list_dev_caps) {
dev_p = &hw->dev_caps;
caps = &dev_p->common_cap;
+ prefix = "dev cap";
} else if (opc == ice_aqc_opc_list_func_caps) {
func_p = &hw->func_caps;
caps = &func_p->common_cap;
+ prefix = "func cap";
} else {
ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
return;
@@ -1475,28 +1529,29 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
case ICE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Valid Functions = %d\n",
+ "%s: valid functions = %d\n", prefix,
caps->valid_functions);
break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
+ "%s: SR-IOV = %d\n", prefix,
+ caps->sr_iov_1_1);
break;
case ICE_AQC_CAPS_VF:
if (dev_p) {
dev_p->num_vfs_exposed = number;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: VFs exposed = %d\n",
+ "%s: VFs exposed = %d\n", prefix,
dev_p->num_vfs_exposed);
} else if (func_p) {
func_p->num_allocd_vfs = number;
func_p->vf_base_id = logical_id;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: VFs allocated = %d\n",
+ "%s: VFs allocated = %d\n", prefix,
func_p->num_allocd_vfs);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: VF base_id = %d\n",
+ "%s: VF base_id = %d\n", prefix,
func_p->vf_base_id);
}
break;
@@ -1504,69 +1559,69 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
if (dev_p) {
dev_p->num_vsi_allocd_to_host = number;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Dev.VSI cnt = %d\n",
+ "%s: num VSI alloc to host = %d\n",
+ prefix,
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
func_p->guar_num_vsi =
ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Func.VSI cnt = %d\n",
- number);
+ "%s: num guaranteed VSI (fw) = %d\n",
+ prefix, number);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num guaranteed VSI = %d\n",
+ prefix, func_p->guar_num_vsi);
}
break;
case ICE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: RSS table size = %d\n",
+ "%s: RSS table size = %d\n", prefix,
caps->rss_table_size);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: RSS table width = %d\n",
+ "%s: RSS table width = %d\n", prefix,
caps->rss_table_entry_width);
break;
case ICE_AQC_CAPS_RXQS:
caps->num_rxq = number;
caps->rxq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
+ "%s: num Rx queues = %d\n", prefix,
+ caps->num_rxq);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Rx first queue ID = %d\n",
+ "%s: Rx first queue ID = %d\n", prefix,
caps->rxq_first_id);
break;
case ICE_AQC_CAPS_TXQS:
caps->num_txq = number;
caps->txq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Num Tx Qs = %d\n", caps->num_txq);
+ "%s: num Tx queues = %d\n", prefix,
+ caps->num_txq);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Tx first queue ID = %d\n",
+ "%s: Tx first queue ID = %d\n", prefix,
caps->txq_first_id);
break;
case ICE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: MSIX vector count = %d\n",
+ "%s: MSIX vector count = %d\n", prefix,
caps->num_msix_vectors);
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: MSIX first vector index = %d\n",
+ "%s: MSIX first vector index = %d\n", prefix,
caps->msix_vector_first_id);
break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
- if (dev_p)
- ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Dev.MaxMTU = %d\n",
- caps->max_mtu);
- else if (func_p)
- ice_debug(hw, ICE_DBG_INIT,
- "HW caps: func.MaxMTU = %d\n",
- caps->max_mtu);
+ ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n",
+ prefix, caps->max_mtu);
break;
default:
ice_debug(hw, ICE_DBG_INIT,
- "HW caps: Unknown capability[%d]: 0x%x\n", i,
- cap);
+ "%s: unknown capability[%d]: 0x%x\n", prefix,
+ i, cap);
break;
}
}
@@ -1947,36 +2002,37 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
*/
enum ice_status ice_update_link_info(struct ice_port_info *pi)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_phy_info *phy_info;
+ struct ice_link_status *li;
enum ice_status status;
- struct ice_hw *hw;
if (!pi)
return ICE_ERR_PARAM;
- hw = pi->hw;
-
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
- if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ li = &pi->phy.link_info;
- phy_info = &pi->phy;
status = ice_aq_get_link_info(pi, true, NULL, NULL);
if (status)
- goto out;
+ return status;
+
+ if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_hw *hw;
+
+ hw = pi->hw;
+ pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
+ GFP_KERNEL);
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
- if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
pcaps, NULL);
- if (status)
- goto out;
+ if (!status)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
- memcpy(phy_info->link_info.module_type, &pcaps->module_type,
- sizeof(phy_info->link_info.module_type));
+ devm_kfree(ice_hw_to_dev(hw), pcaps);
}
-out:
- devm_kfree(ice_hw_to_dev(hw), pcaps);
+
return status;
}
@@ -2081,6 +2137,74 @@ out:
}
/**
+ * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy date from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy AQC PHY get ability data to PHY set configuration
+ * data structure
+ */
+void
+ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl = caps->low_power_ctrl;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+}
+
+/**
+ * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
+ * @cfg: PHY configuration data to set FEC mode
+ * @fec: FEC mode to configure
+ *
+ * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
+ * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
+ * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
+ */
+void
+ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
+{
+ switch (fec) {
+ case ICE_FEC_BASER:
+ /* Clear auto FEC and RS bits, and AND BASE-R ability
+ * bits and OR request bits.
+ */
+ cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+ ICE_AQC_PHY_FEC_25G_KR_REQ;
+ break;
+ case ICE_FEC_RS:
+ /* Clear auto FEC and BASE-R bits, and AND RS ability
+ * bits and OR request bits.
+ */
+ cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ;
+ break;
+ case ICE_FEC_NONE:
+ /* Clear auto FEC and all FEC option bits. */
+ cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
+ break;
+ case ICE_FEC_AUTO:
+ /* AND auto FEC bit, and all caps bits. */
+ cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
+ break;
+ }
+}
+
+/**
* ice_get_link_status - get status of the HW network link
* @pi: port information structure
* @link_up: pointer to bool (true/false = linkup/linkdown)
@@ -2169,6 +2293,29 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
}
/**
+ * ice_aq_set_mac_loopback
+ * @hw: pointer to the HW struct
+ * @ena_lpbk: Enable or Disable loopback
+ * @cd: pointer to command details structure or NULL
+ *
+ * Enable/disable loopback on a given port
+ */
+enum ice_status
+ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_mac_lb *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_mac_lb;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
+ if (ena_lpbk)
+ cmd->lb_mode = ICE_AQ_MAC_LB_EN;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_aq_set_port_id_led
* @pi: pointer to the port information
* @is_orig_mode: is this LED set to original mode (by the net-list)
@@ -2552,7 +2699,7 @@ do_aq:
ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
vmvf_num, hw->adminq.sq_last_status);
else
- ice_debug(hw, ICE_DBG_SCHED, "disable Q %d failed %d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
le16_to_cpu(qg_list[0].q_id[0]),
hw->adminq.sq_last_status);
}
@@ -2924,7 +3071,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
-
if (!num_queues) {
/* if queue is disabled already yet the disable queue command
* has to be sent to complete the VF reset, then call
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index f1ddebf45231..d1f8353fe6bb 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -9,6 +9,8 @@
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
+enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
+
void
ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
@@ -84,7 +86,11 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
enum ice_status
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
-
+void
+ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec);
+void
+ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg);
enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
@@ -95,6 +101,9 @@ enum ice_status
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
+
+enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index cc8cb5fdcdc1..e91ac4df0242 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -439,7 +439,7 @@ do { \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
- /* free dma head */ \
+ /* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
} while (0)
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index e0585394d984..44945c2165d8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -35,7 +35,7 @@ enum ice_ctl_q {
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
struct ice_ctl_q_ring {
- void *dma_head; /* Virtual address to dma head */
+ void *dma_head; /* Virtual address to DMA head */
struct ice_dma_mem desc_buf; /* descriptor ring memory */
void *cmd_buf; /* command buffer memory */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 8bbf48e04a1c..c2002ded65f6 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -82,12 +82,14 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
* @hw: pointer to the HW struct
* @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown
* False if LLDP Agent needs to be Stopped
+ * @persist: True if Stop/Shutdown of LLDP Agent needs to be persistent across
+ * reboots
* @cd: pointer to command details structure or NULL
*
* Stop or Shutdown the embedded LLDP Agent (0x0A05)
*/
enum ice_status
-ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop *cmd;
@@ -100,17 +102,22 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
if (shutdown_lldp_agent)
cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN;
+ if (persist)
+ cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_DIS;
+
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_aq_start_lldp
* @hw: pointer to the HW struct
+ * @persist: True if Start of LLDP Agent needs to be persistent across reboots
* @cd: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports. (0x0A06)
*/
-enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd)
+enum ice_status
+ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
struct ice_aq_desc desc;
@@ -121,6 +128,9 @@ enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd)
cmd->command = ICE_AQ_LLDP_AGENT_START;
+ if (persist)
+ cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_ENA;
+
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -163,7 +173,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
*
* Get the DCBX status from the Firmware
*/
-u8 ice_get_dcbx_status(struct ice_hw *hw)
+static u8 ice_get_dcbx_status(struct ice_hw *hw)
{
u32 reg;
@@ -614,7 +624,8 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
*
* Parse DCB configuration from the LLDPDU
*/
-enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+static enum ice_status
+ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_lldp_org_tlv *tlv;
enum ice_status ret = 0;
@@ -658,13 +669,13 @@ enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
/**
* ice_aq_get_dcb_cfg
* @hw: pointer to the HW struct
- * @mib_type: mib type for the query
+ * @mib_type: MIB type for the query
* @bridgetype: bridge type for the query (remote)
* @dcbcfg: store for LLDPDU data
*
* Query DCB configuration from the firmware
*/
-static enum ice_status
+enum ice_status
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg)
{
@@ -689,13 +700,13 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
}
/**
- * ice_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * ice_aq_start_stop_dcbx - Start/Stop DCBX service in FW
* @hw: pointer to the HW struct
- * @start_dcbx_agent: True if DCBx Agent needs to be started
- * False if DCBx Agent needs to be stopped
- * @dcbx_agent_status: FW indicates back the DCBx agent status
- * True if DCBx Agent is active
- * False if DCBx Agent is stopped
+ * @start_dcbx_agent: True if DCBX Agent needs to be started
+ * False if DCBX Agent needs to be stopped
+ * @dcbx_agent_status: FW indicates back the DCBX agent status
+ * True if DCBX Agent is active
+ * False if DCBX Agent is stopped
* @cd: pointer to command details structure or NULL
*
* Start/Stop the embedded dcbx Agent. In case that this wrapper function
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index e7d4416e3a66..522e1452abe2 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -120,8 +120,9 @@ struct ice_cee_app_prio {
u8 prio_map;
} __packed;
-u8 ice_get_dcbx_status(struct ice_hw *hw);
-enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
+enum ice_status
+ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
+ struct ice_dcbx_cfg *dcbcfg);
enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_init_dcb(struct ice_hw *hw);
@@ -131,9 +132,10 @@ ice_query_port_ets(struct ice_port_info *pi,
struct ice_sq_cd *cmd_details);
#ifdef CONFIG_DCB
enum ice_status
-ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd);
-enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
enum ice_status
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd);
@@ -144,6 +146,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
static inline enum ice_status
ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
bool __always_unused shutdown_lldp_agent,
+ bool __always_unused persist,
struct ice_sq_cd __always_unused *cd)
{
return 0;
@@ -151,6 +154,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
static inline enum ice_status
ice_aq_start_lldp(struct ice_hw __always_unused *hw,
+ bool __always_unused persist,
struct ice_sq_cd __always_unused *cd)
{
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 3e81af1884fc..fe88b127ca42 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -120,12 +120,14 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
tc_map = ICE_DFLT_TRAFFIC_CLASS;
ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
- if (ret)
+ if (ret) {
dev_err(&pf->pdev->dev,
"Failed to config TC for VSI index: %d\n",
pf->vsi[v]->idx);
- else
- ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ continue;
+ }
+
+ ice_vsi_map_rings_to_vectors(pf->vsi[v]);
}
}
@@ -133,8 +135,10 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
* ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct
* @new_cfg: DCBX config to apply
+ * @locked: is the RTNL held
*/
-static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
+static
+int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
{
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
@@ -163,7 +167,8 @@ static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
/* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI
*/
- rtnl_lock();
+ if (!locked)
+ rtnl_lock();
ice_pf_dis_all_vsi(pf, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
@@ -192,7 +197,8 @@ static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
out:
ice_pf_ena_all_vsi(pf, true);
- rtnl_unlock();
+ if (!locked)
+ rtnl_unlock();
devm_kfree(&pf->pdev->dev, old_cfg);
return ret;
}
@@ -271,15 +277,16 @@ dcb_error:
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
- ice_pf_dcb_cfg(pf, prev_cfg);
+ ice_pf_dcb_cfg(pf, prev_cfg, false);
devm_kfree(&pf->pdev->dev, prev_cfg);
}
/**
* ice_dcb_init_cfg - set the initial DCB config in SW
- * @pf: pf to apply config to
+ * @pf: PF to apply config to
+ * @locked: Is the RTNL held
*/
-static int ice_dcb_init_cfg(struct ice_pf *pf)
+static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *newcfg;
struct ice_port_info *pi;
@@ -294,7 +301,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf)
memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
- if (ice_pf_dcb_cfg(pf, newcfg))
+ if (ice_pf_dcb_cfg(pf, newcfg, locked))
ret = -EINVAL;
devm_kfree(&pf->pdev->dev, newcfg);
@@ -304,9 +311,10 @@ static int ice_dcb_init_cfg(struct ice_pf *pf)
/**
* ice_dcb_sw_default_config - Apply a default DCB config
- * @pf: pf to apply config to
+ * @pf: PF to apply config to
+ * @locked: was this function called with RTNL held
*/
-static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
+static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
{
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg;
@@ -338,7 +346,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
dcbcfg->app[0].priority = 3;
dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
- ret = ice_pf_dcb_cfg(pf, dcbcfg);
+ ret = ice_pf_dcb_cfg(pf, dcbcfg, locked);
devm_kfree(&pf->pdev->dev, dcbcfg);
if (ret)
return ret;
@@ -348,9 +356,10 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
/**
* ice_init_pf_dcb - initialize DCB for a PF
- * @pf: pf to initiialize DCB for
+ * @pf: PF to initialize DCB for
+ * @locked: Was function called with RTNL held
*/
-int ice_init_pf_dcb(struct ice_pf *pf)
+int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
{
struct device *dev = &pf->pdev->dev;
struct ice_port_info *port_info;
@@ -360,33 +369,10 @@ int ice_init_pf_dcb(struct ice_pf *pf)
port_info = hw->port_info;
- /* check if device is DCB capable */
- if (!hw->func_caps.common_cap.dcb) {
- dev_dbg(dev, "DCB not supported\n");
- return -EOPNOTSUPP;
- }
-
- /* Best effort to put DCBx and LLDP into a good state */
- port_info->dcbx_status = ice_get_dcbx_status(hw);
- if (port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
- port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
- bool dcbx_status;
-
- /* Attempt to start LLDP engine. Ignore errors
- * as this will error if it is already started
- */
- ice_aq_start_lldp(hw, NULL);
-
- /* Attempt to start DCBX. Ignore errors as this
- * will error if it is already started
- */
- ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL);
- }
-
err = ice_init_dcb(hw);
if (err) {
- /* FW LLDP not in usable state, default to SW DCBx/LLDP */
- dev_info(&pf->pdev->dev, "FW LLDP not in usable state\n");
+ /* FW LLDP is not active, default to SW DCBX/LLDP */
+ dev_info(&pf->pdev->dev, "FW LLDP is not active\n");
hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
hw->port_info->is_sw_lldp = true;
}
@@ -398,15 +384,16 @@ int ice_init_pf_dcb(struct ice_pf *pf)
if (port_info->is_sw_lldp) {
sw_default = 1;
dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
+ clear_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
+ } else {
+ set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
}
- if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
- sw_default = 1;
+ if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED)
dev_info(&pf->pdev->dev, "DCBX not started\n");
- }
if (sw_default) {
- err = ice_dcb_sw_dflt_cfg(pf);
+ err = ice_dcb_sw_dflt_cfg(pf, locked);
if (err) {
dev_err(&pf->pdev->dev,
"Failed to set local DCB config %d\n", err);
@@ -425,7 +412,7 @@ int ice_init_pf_dcb(struct ice_pf *pf)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
- err = ice_dcb_init_cfg(pf);
+ err = ice_dcb_init_cfg(pf, locked);
if (err)
goto dcb_init_err;
@@ -515,6 +502,55 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
}
/**
+ * ice_dcb_need_recfg - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ */
+static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg)
+{
+ bool need_reconfig = false;
+
+ /* Check if ETS configuration has changed */
+ if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
+ sizeof(new_cfg->etscfg))) {
+ /* If Priority Table has changed reconfig is needed */
+ if (memcmp(&new_cfg->etscfg.prio_table,
+ &old_cfg->etscfg.prio_table,
+ sizeof(new_cfg->etscfg.prio_table))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ }
+
+ if (memcmp(&new_cfg->etscfg.tcbwtable,
+ &old_cfg->etscfg.tcbwtable,
+ sizeof(new_cfg->etscfg.tcbwtable)))
+ dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+ if (memcmp(&new_cfg->etscfg.tsatable,
+ &old_cfg->etscfg.tsatable,
+ sizeof(new_cfg->etscfg.tsatable)))
+ dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ }
+
+ /* Check if PFC configuration has changed */
+ if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+ }
+
+ /* Check if APP Table has changed */
+ if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
+ }
+
+ dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
+ return need_reconfig;
+}
+
+/**
* ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf
* @event: pointer to the admin queue receive event
@@ -523,29 +559,95 @@ void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event)
{
- if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
- struct ice_dcbx_cfg *dcbcfg, *prev_cfg;
- int err;
-
- prev_cfg = &pf->hw.port_info->local_dcbx_cfg;
- dcbcfg = devm_kmemdup(&pf->pdev->dev, prev_cfg,
- sizeof(*dcbcfg), GFP_KERNEL);
- if (!dcbcfg)
+ struct ice_aqc_port_ets_elem buf = { 0 };
+ struct ice_aqc_lldp_get_mib *mib;
+ struct ice_dcbx_cfg tmp_dcbx_cfg;
+ bool need_reconfig = false;
+ struct ice_port_info *pi;
+ u8 type;
+ int ret;
+
+ /* Not DCB capable or capability disabled */
+ if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)))
+ return;
+
+ if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) {
+ dev_dbg(&pf->pdev->dev,
+ "MIB Change Event in HOST mode\n");
+ return;
+ }
+
+ pi = pf->hw.port_info;
+ mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+ /* Ignore if event is not for Nearest Bridge */
+ type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M);
+ dev_dbg(&pf->pdev->dev, "LLDP event MIB bridge type 0x%x\n", type);
+ if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
+ return;
+
+ /* Check MIB Type and return if event for Remote MIB update */
+ type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
+ dev_dbg(&pf->pdev->dev,
+ "LLDP event mib type %s\n", type ? "remote" : "local");
+ if (type == ICE_AQ_LLDP_MIB_REMOTE) {
+ /* Update the remote cached instance and return */
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
+ &pi->remote_dcbx_cfg);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Failed to get remote DCB config\n");
return;
+ }
+ }
- err = ice_lldp_to_dcb_cfg(event->msg_buf, dcbcfg);
- if (!err)
- ice_pf_dcb_cfg(pf, dcbcfg);
+ /* store the old configuration */
+ tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
- devm_kfree(&pf->pdev->dev, dcbcfg);
+ /* Reset the old DCBX configuration data */
+ memset(&pi->local_dcbx_cfg, 0, sizeof(pi->local_dcbx_cfg));
- /* Get updated DCBx data from firmware */
- err = ice_get_dcb_cfg(pf->hw.port_info);
- if (err)
- dev_err(&pf->pdev->dev,
- "Failed to get DCB config\n");
- } else {
+ /* Get updated DCBX data from firmware */
+ ret = ice_get_dcb_cfg(pf->hw.port_info);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Failed to get DCB config\n");
+ return;
+ }
+
+ /* No change detected in DCBX configs */
+ if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(&pf->pdev->dev,
- "MIB Change Event in HOST mode\n");
+ "No change detected in DCBX configuration.\n");
+ return;
+ }
+
+ need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
+ &pi->local_dcbx_cfg);
+ if (!need_reconfig)
+ return;
+
+ /* Enable DCB tagging only when more than one TC */
+ if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
+ dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
+ set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ } else {
+ dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
+
+ rtnl_lock();
+ ice_pf_dis_all_vsi(pf, true);
+
+ ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ rtnl_unlock();
+ return;
+ }
+
+ /* changes in configuration update VSI */
+ ice_pf_dcb_recfg(pf);
+
+ ice_pf_ena_all_vsi(pf, true);
+ rtnl_unlock();
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index ca7b76faa03c..819081053ff5 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -14,7 +14,7 @@ void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
-int ice_init_pf_dcb(struct ice_pf *pf);
+int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
int
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
@@ -40,7 +40,8 @@ static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
return 1;
}
-static inline int ice_init_pf_dcb(struct ice_pf *pf)
+static inline int
+ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)
{
dev_dbg(&pf->pdev->dev, "DCB not supported\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 1341fde8d53f..52083a63dee6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -45,22 +45,40 @@ static int ice_q_stats_len(struct net_device *netdev)
ICE_VSI_STATS_LEN + ice_q_stats_len(n))
static const struct ice_stats ice_gstrings_vsi_stats[] = {
- ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
- ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
+ ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
- ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
+ ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
- ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
+ ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
- ICE_VSI_STAT("rx_discards", eth_stats.rx_discards),
- ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
- ICE_VSI_STAT("tx_linearize", tx_linearize),
+ ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
+ ICE_VSI_STAT("rx_dropped", eth_stats.rx_discards),
ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+ ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
+ ICE_VSI_STAT("tx_linearize", tx_linearize),
+};
+
+enum ice_ethtool_test_id {
+ ICE_ETH_TEST_REG = 0,
+ ICE_ETH_TEST_EEPROM,
+ ICE_ETH_TEST_INTR,
+ ICE_ETH_TEST_LOOP,
+ ICE_ETH_TEST_LINK,
};
+static const char ice_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "EEPROM test (offline)",
+ "Interrupt test (offline)",
+ "Loopback test (offline)",
+ "Link test (on/offline)",
+};
+
+#define ICE_TEST_LEN (sizeof(ice_gstrings_test) / ETH_GSTRING_LEN)
+
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
* but they aren't. This device is capable of supporting multiple
* VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
@@ -71,45 +89,45 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
* is queried on the base PF netdev.
*/
static const struct ice_stats ice_gstrings_pf_stats[] = {
- ICE_PF_STAT("port.tx_bytes", stats.eth.tx_bytes),
- ICE_PF_STAT("port.rx_bytes", stats.eth.rx_bytes),
- ICE_PF_STAT("port.tx_unicast", stats.eth.tx_unicast),
- ICE_PF_STAT("port.rx_unicast", stats.eth.rx_unicast),
- ICE_PF_STAT("port.tx_multicast", stats.eth.tx_multicast),
- ICE_PF_STAT("port.rx_multicast", stats.eth.rx_multicast),
- ICE_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
- ICE_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
- ICE_PF_STAT("port.tx_errors", stats.eth.tx_errors),
- ICE_PF_STAT("port.tx_size_64", stats.tx_size_64),
- ICE_PF_STAT("port.rx_size_64", stats.rx_size_64),
- ICE_PF_STAT("port.tx_size_127", stats.tx_size_127),
- ICE_PF_STAT("port.rx_size_127", stats.rx_size_127),
- ICE_PF_STAT("port.tx_size_255", stats.tx_size_255),
- ICE_PF_STAT("port.rx_size_255", stats.rx_size_255),
- ICE_PF_STAT("port.tx_size_511", stats.tx_size_511),
- ICE_PF_STAT("port.rx_size_511", stats.rx_size_511),
- ICE_PF_STAT("port.tx_size_1023", stats.tx_size_1023),
- ICE_PF_STAT("port.rx_size_1023", stats.rx_size_1023),
- ICE_PF_STAT("port.tx_size_1522", stats.tx_size_1522),
- ICE_PF_STAT("port.rx_size_1522", stats.rx_size_1522),
- ICE_PF_STAT("port.tx_size_big", stats.tx_size_big),
- ICE_PF_STAT("port.rx_size_big", stats.rx_size_big),
- ICE_PF_STAT("port.link_xon_tx", stats.link_xon_tx),
- ICE_PF_STAT("port.link_xon_rx", stats.link_xon_rx),
- ICE_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx),
- ICE_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx),
- ICE_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
- ICE_PF_STAT("port.rx_undersize", stats.rx_undersize),
- ICE_PF_STAT("port.rx_fragments", stats.rx_fragments),
- ICE_PF_STAT("port.rx_oversize", stats.rx_oversize),
- ICE_PF_STAT("port.rx_jabber", stats.rx_jabber),
- ICE_PF_STAT("port.rx_csum_bad", hw_csum_rx_error),
- ICE_PF_STAT("port.rx_length_errors", stats.rx_len_errors),
- ICE_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
- ICE_PF_STAT("port.rx_crc_errors", stats.crc_errors),
- ICE_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
- ICE_PF_STAT("port.mac_local_faults", stats.mac_local_faults),
- ICE_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults),
+ ICE_PF_STAT("rx_bytes.nic", stats.eth.rx_bytes),
+ ICE_PF_STAT("tx_bytes.nic", stats.eth.tx_bytes),
+ ICE_PF_STAT("rx_unicast.nic", stats.eth.rx_unicast),
+ ICE_PF_STAT("tx_unicast.nic", stats.eth.tx_unicast),
+ ICE_PF_STAT("rx_multicast.nic", stats.eth.rx_multicast),
+ ICE_PF_STAT("tx_multicast.nic", stats.eth.tx_multicast),
+ ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast),
+ ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast),
+ ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors),
+ ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64),
+ ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64),
+ ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127),
+ ICE_PF_STAT("tx_size_127.nic", stats.tx_size_127),
+ ICE_PF_STAT("rx_size_255.nic", stats.rx_size_255),
+ ICE_PF_STAT("tx_size_255.nic", stats.tx_size_255),
+ ICE_PF_STAT("rx_size_511.nic", stats.rx_size_511),
+ ICE_PF_STAT("tx_size_511.nic", stats.tx_size_511),
+ ICE_PF_STAT("rx_size_1023.nic", stats.rx_size_1023),
+ ICE_PF_STAT("tx_size_1023.nic", stats.tx_size_1023),
+ ICE_PF_STAT("rx_size_1522.nic", stats.rx_size_1522),
+ ICE_PF_STAT("tx_size_1522.nic", stats.tx_size_1522),
+ ICE_PF_STAT("rx_size_big.nic", stats.rx_size_big),
+ ICE_PF_STAT("tx_size_big.nic", stats.tx_size_big),
+ ICE_PF_STAT("link_xon_rx.nic", stats.link_xon_rx),
+ ICE_PF_STAT("link_xon_tx.nic", stats.link_xon_tx),
+ ICE_PF_STAT("link_xoff_rx.nic", stats.link_xoff_rx),
+ ICE_PF_STAT("link_xoff_tx.nic", stats.link_xoff_tx),
+ ICE_PF_STAT("tx_dropped_link_down.nic", stats.tx_dropped_link_down),
+ ICE_PF_STAT("rx_undersize.nic", stats.rx_undersize),
+ ICE_PF_STAT("rx_fragments.nic", stats.rx_fragments),
+ ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
+ ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
+ ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
+ ICE_PF_STAT("rx_length_errors.nic", stats.rx_len_errors),
+ ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
+ ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
+ ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
+ ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults),
+ ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
};
static const u32 ice_regs_dump_list[] = {
@@ -120,6 +138,9 @@ static const u32 ice_regs_dump_list[] = {
QINT_RQCTL(0),
PFINT_OICR_ENA,
QRX_ITR(0),
+ PF0INT_ITR_0(0),
+ PF0INT_ITR_1(0),
+ PF0INT_ITR_2(0),
};
struct ice_priv_flag {
@@ -134,7 +155,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
- ICE_PRIV_FLAG("disable-fw-lldp", ICE_FLAG_DISABLE_FW_LLDP),
+ ICE_PRIV_FLAG("enable-fw-lldp", ICE_FLAG_ENABLE_FW_LLDP),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -278,6 +299,571 @@ out:
return ret;
}
+/**
+ * ice_active_vfs - check if there are any active VFs
+ * @pf: board private structure
+ *
+ * Returns true if an active VF is found, otherwise returns false
+ */
+static bool ice_active_vfs(struct ice_pf *pf)
+{
+ struct ice_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++)
+ if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ return true;
+ return false;
+}
+
+/**
+ * ice_link_test - perform a link test on a given net_device
+ * @netdev: network interface device structure
+ *
+ * This function performs one of the self-tests required by ethtool.
+ * Returns 0 on success, non-zero on failure.
+ */
+static u64 ice_link_test(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ enum ice_status status;
+ bool link_up = false;
+
+ netdev_info(netdev, "link test\n");
+ status = ice_get_link_status(np->vsi->port_info, &link_up);
+ if (status) {
+ netdev_err(netdev, "link query error, status = %d\n", status);
+ return 1;
+ }
+
+ if (!link_up)
+ return 2;
+
+ return 0;
+}
+
+/**
+ * ice_eeprom_test - perform an EEPROM test on a given net_device
+ * @netdev: network interface device structure
+ *
+ * This function performs one of the self-tests required by ethtool.
+ * Returns 0 on success, non-zero on failure.
+ */
+static u64 ice_eeprom_test(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ netdev_info(netdev, "EEPROM test\n");
+ return !!(ice_nvm_validate_checksum(&pf->hw));
+}
+
+/**
+ * ice_reg_pattern_test
+ * @hw: pointer to the HW struct
+ * @reg: reg to be tested
+ * @mask: bits to be touched
+ */
+static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
+{
+ struct ice_pf *pf = (struct ice_pf *)hw->back;
+ static const u32 patterns[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5,
+ 0x00000000, 0xFFFFFFFF
+ };
+ u32 val, orig_val;
+ int i;
+
+ orig_val = rd32(hw, reg);
+ for (i = 0; i < ARRAY_SIZE(patterns); ++i) {
+ u32 pattern = patterns[i] & mask;
+
+ wr32(hw, reg, pattern);
+ val = rd32(hw, reg);
+ if (val == pattern)
+ continue;
+ dev_err(&pf->pdev->dev,
+ "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
+ , __func__, reg, pattern, val);
+ return 1;
+ }
+
+ wr32(hw, reg, orig_val);
+ val = rd32(hw, reg);
+ if (val != orig_val) {
+ dev_err(&pf->pdev->dev,
+ "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
+ , __func__, reg, orig_val, val);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_reg_test - perform a register test on a given net_device
+ * @netdev: network interface device structure
+ *
+ * This function performs one of the self-tests required by ethtool.
+ * Returns 0 on success, non-zero on failure.
+ */
+static u64 ice_reg_test(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_hw *hw = np->vsi->port_info->hw;
+ u32 int_elements = hw->func_caps.common_cap.num_msix_vectors ?
+ hw->func_caps.common_cap.num_msix_vectors - 1 : 1;
+ struct ice_diag_reg_test_info {
+ u32 address;
+ u32 mask;
+ u32 elem_num;
+ u32 elem_size;
+ } ice_reg_list[] = {
+ {GLINT_ITR(0, 0), 0x00000fff, int_elements,
+ GLINT_ITR(0, 1) - GLINT_ITR(0, 0)},
+ {GLINT_ITR(1, 0), 0x00000fff, int_elements,
+ GLINT_ITR(1, 1) - GLINT_ITR(1, 0)},
+ {GLINT_ITR(0, 0), 0x00000fff, int_elements,
+ GLINT_ITR(2, 1) - GLINT_ITR(2, 0)},
+ {GLINT_CTL, 0xffff0001, 1, 0}
+ };
+ int i;
+
+ netdev_dbg(netdev, "Register test\n");
+ for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) {
+ u32 j;
+
+ for (j = 0; j < ice_reg_list[i].elem_num; ++j) {
+ u32 mask = ice_reg_list[i].mask;
+ u32 reg = ice_reg_list[i].address +
+ (j * ice_reg_list[i].elem_size);
+
+ /* bail on failure (non-zero return) */
+ if (ice_reg_pattern_test(hw, reg, mask))
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_lbtest_prepare_rings - configure Tx/Rx test rings
+ * @vsi: pointer to the VSI structure
+ *
+ * Function configures rings of a VSI for loopback test without
+ * enabling interrupts or informing the kernel about new queues.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
+{
+ int status;
+
+ status = ice_vsi_setup_tx_rings(vsi);
+ if (status)
+ goto err_setup_tx_ring;
+
+ status = ice_vsi_setup_rx_rings(vsi);
+ if (status)
+ goto err_setup_rx_ring;
+
+ status = ice_vsi_cfg(vsi);
+ if (status)
+ goto err_setup_rx_ring;
+
+ status = ice_vsi_start_rx_rings(vsi);
+ if (status)
+ goto err_start_rx_ring;
+
+ return status;
+
+err_start_rx_ring:
+ ice_vsi_free_rx_rings(vsi);
+err_setup_rx_ring:
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
+err_setup_tx_ring:
+ ice_vsi_free_tx_rings(vsi);
+
+ return status;
+}
+
+/**
+ * ice_lbtest_disable_rings - disable Tx/Rx test rings after loopback test
+ * @vsi: pointer to the VSI structure
+ *
+ * Function stops and frees VSI rings after a loopback test.
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_lbtest_disable_rings(struct ice_vsi *vsi)
+{
+ int status;
+
+ status = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
+ if (status)
+ netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n",
+ vsi->vsi_num, status);
+
+ status = ice_vsi_stop_rx_rings(vsi);
+ if (status)
+ netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n",
+ vsi->vsi_num, status);
+
+ ice_vsi_free_tx_rings(vsi);
+ ice_vsi_free_rx_rings(vsi);
+
+ return status;
+}
+
+/**
+ * ice_lbtest_create_frame - create test packet
+ * @pf: pointer to the PF structure
+ * @ret_data: allocated frame buffer
+ * @size: size of the packet data
+ *
+ * Function allocates a frame with a test pattern on specific offsets.
+ * Returns 0 on success, non-zero on failure.
+ */
+static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
+{
+ u8 *data;
+
+ if (!pf)
+ return -EINVAL;
+
+ data = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Since the ethernet test frame should always be at least
+ * 64 bytes long, fill some octets in the payload with test data.
+ */
+ memset(data, 0xFF, size);
+ data[32] = 0xDE;
+ data[42] = 0xAD;
+ data[44] = 0xBE;
+ data[46] = 0xEF;
+
+ *ret_data = data;
+
+ return 0;
+}
+
+/**
+ * ice_lbtest_check_frame - verify received loopback frame
+ * @frame: pointer to the raw packet data
+ *
+ * Function verifies received test frame with a pattern.
+ * Returns true if frame matches the pattern, false otherwise.
+ */
+static bool ice_lbtest_check_frame(u8 *frame)
+{
+ /* Validate bytes of a frame under offsets chosen earlier */
+ if (frame[32] == 0xDE &&
+ frame[42] == 0xAD &&
+ frame[44] == 0xBE &&
+ frame[46] == 0xEF &&
+ frame[48] == 0xFF)
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_diag_send - send test frames to the test ring
+ * @tx_ring: pointer to the transmit ring
+ * @data: pointer to the raw packet data
+ * @size: size of the packet to send
+ *
+ * Function sends loopback packets on a test Tx ring.
+ */
+static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
+{
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ dma_addr_t dma;
+ u64 td_cmd;
+
+ tx_desc = ICE_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_buf = &tx_ring->tx_buf[tx_ring->next_to_use];
+
+ dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -EINVAL;
+
+ tx_desc->buf_addr = cpu_to_le64(dma);
+
+ /* These flags are required for a descriptor to be pushed out */
+ td_cmd = (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
+ tx_desc->cmd_type_offset_bsz =
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
+ (td_cmd << ICE_TXD_QW1_CMD_S) |
+ ((u64)0 << ICE_TXD_QW1_OFFSET_S) |
+ ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
+ ((u64)0 << ICE_TXD_QW1_L2TAG1_S));
+
+ tx_buf->next_to_watch = tx_desc;
+
+ /* Force memory write to complete before letting h/w know
+ * there are new descriptors to fetch.
+ */
+ wmb();
+
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use >= tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ writel_relaxed(tx_ring->next_to_use, tx_ring->tail);
+
+ /* Wait until the packets get transmitted to the receive queue. */
+ usleep_range(1000, 2000);
+ dma_unmap_single(tx_ring->dev, dma, size, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+#define ICE_LB_FRAME_SIZE 64
+/**
+ * ice_lbtest_receive_frames - receive and verify test frames
+ * @rx_ring: pointer to the receive ring
+ *
+ * Function receives loopback packets and verify their correctness.
+ * Returns number of received valid frames.
+ */
+static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
+{
+ struct ice_rx_buf *rx_buf;
+ int valid_frames, i;
+ u8 *received_buf;
+
+ valid_frames = 0;
+
+ for (i = 0; i < rx_ring->count; i++) {
+ union ice_32b_rx_flex_desc *rx_desc;
+
+ rx_desc = ICE_RX_DESC(rx_ring, i);
+
+ if (!(rx_desc->wb.status_error0 &
+ cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
+ continue;
+
+ rx_buf = &rx_ring->rx_buf[i];
+ received_buf = page_address(rx_buf->page);
+
+ if (ice_lbtest_check_frame(received_buf))
+ valid_frames++;
+ }
+
+ return valid_frames;
+}
+
+/**
+ * ice_loopback_test - perform a loopback test on a given net_device
+ * @netdev: network interface device structure
+ *
+ * This function performs one of the self-tests required by ethtool.
+ * Returns 0 on success, non-zero on failure.
+ */
+static u64 ice_loopback_test(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
+ struct ice_pf *pf = orig_vsi->back;
+ struct ice_ring *tx_ring, *rx_ring;
+ u8 broadcast[ETH_ALEN], ret = 0;
+ int num_frames, valid_frames;
+ LIST_HEAD(tmp_list);
+ u8 *tx_frame;
+ int i;
+
+ netdev_info(netdev, "loopback test\n");
+
+ test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
+ if (!test_vsi) {
+ netdev_err(netdev, "Failed to create a VSI for the loopback test");
+ return 1;
+ }
+
+ test_vsi->netdev = netdev;
+ tx_ring = test_vsi->tx_rings[0];
+ rx_ring = test_vsi->rx_rings[0];
+
+ if (ice_lbtest_prepare_rings(test_vsi)) {
+ ret = 2;
+ goto lbtest_vsi_close;
+ }
+
+ if (ice_alloc_rx_bufs(rx_ring, rx_ring->count)) {
+ ret = 3;
+ goto lbtest_rings_dis;
+ }
+
+ /* Enable MAC loopback in firmware */
+ if (ice_aq_set_mac_loopback(&pf->hw, true, NULL)) {
+ ret = 4;
+ goto lbtest_mac_dis;
+ }
+
+ /* Test VSI needs to receive broadcast packets */
+ eth_broadcast_addr(broadcast);
+ if (ice_add_mac_to_list(test_vsi, &tmp_list, broadcast)) {
+ ret = 5;
+ goto lbtest_mac_dis;
+ }
+
+ if (ice_add_mac(&pf->hw, &tmp_list)) {
+ ret = 6;
+ goto free_mac_list;
+ }
+
+ if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) {
+ ret = 7;
+ goto remove_mac_filters;
+ }
+
+ num_frames = min_t(int, tx_ring->count, 32);
+ for (i = 0; i < num_frames; i++) {
+ if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
+ ret = 8;
+ goto lbtest_free_frame;
+ }
+ }
+
+ valid_frames = ice_lbtest_receive_frames(rx_ring);
+ if (!valid_frames)
+ ret = 9;
+ else if (valid_frames != num_frames)
+ ret = 10;
+
+lbtest_free_frame:
+ devm_kfree(&pf->pdev->dev, tx_frame);
+remove_mac_filters:
+ if (ice_remove_mac(&pf->hw, &tmp_list))
+ netdev_err(netdev, "Could not remove MAC filter for the test VSI");
+free_mac_list:
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_list);
+lbtest_mac_dis:
+ /* Disable MAC loopback after the test is completed. */
+ if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
+ netdev_err(netdev, "Could not disable MAC loopback\n");
+lbtest_rings_dis:
+ if (ice_lbtest_disable_rings(test_vsi))
+ netdev_err(netdev, "Could not disable test rings\n");
+lbtest_vsi_close:
+ test_vsi->netdev = NULL;
+ if (ice_vsi_release(test_vsi))
+ netdev_err(netdev, "Failed to remove the test VSI");
+
+ return ret;
+}
+
+/**
+ * ice_intr_test - perform an interrupt test on a given net_device
+ * @netdev: network interface device structure
+ *
+ * This function performs one of the self-tests required by ethtool.
+ * Returns 0 on success, non-zero on failure.
+ */
+static u64 ice_intr_test(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ u16 swic_old = pf->sw_int_count;
+
+ netdev_info(netdev, "interrupt test\n");
+
+ wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_idx),
+ GLINT_DYN_CTL_SW_ITR_INDX_M |
+ GLINT_DYN_CTL_INTENA_MSK_M |
+ GLINT_DYN_CTL_SWINT_TRIG_M);
+
+ usleep_range(1000, 2000);
+ return (swic_old == pf->sw_int_count);
+}
+
+/**
+ * ice_self_test - handler function for performing a self-test by ethtool
+ * @netdev: network interface device structure
+ * @eth_test: ethtool_test structure
+ * @data: required by ethtool.self_test
+ *
+ * This function is called after invoking 'ethtool -t devname' command where
+ * devname is the name of the network device on which ethtool should operate.
+ * It performs a set of self-tests to check if a device works properly.
+ */
+static void
+ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ netdev_info(netdev, "offline testing starting\n");
+
+ set_bit(__ICE_TESTING, pf->state);
+
+ if (ice_active_vfs(pf)) {
+ dev_warn(&pf->pdev->dev,
+ "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
+ data[ICE_ETH_TEST_REG] = 1;
+ data[ICE_ETH_TEST_EEPROM] = 1;
+ data[ICE_ETH_TEST_INTR] = 1;
+ data[ICE_ETH_TEST_LOOP] = 1;
+ data[ICE_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__ICE_TESTING, pf->state);
+ goto skip_ol_tests;
+ }
+ /* If the device is online then take it offline */
+ if (if_running)
+ /* indicate we're in test mode */
+ ice_stop(netdev);
+
+ data[ICE_ETH_TEST_LINK] = ice_link_test(netdev);
+ data[ICE_ETH_TEST_EEPROM] = ice_eeprom_test(netdev);
+ data[ICE_ETH_TEST_INTR] = ice_intr_test(netdev);
+ data[ICE_ETH_TEST_LOOP] = ice_loopback_test(netdev);
+ data[ICE_ETH_TEST_REG] = ice_reg_test(netdev);
+
+ if (data[ICE_ETH_TEST_LINK] ||
+ data[ICE_ETH_TEST_EEPROM] ||
+ data[ICE_ETH_TEST_LOOP] ||
+ data[ICE_ETH_TEST_INTR] ||
+ data[ICE_ETH_TEST_REG])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ clear_bit(__ICE_TESTING, pf->state);
+
+ if (if_running) {
+ int status = ice_open(netdev);
+
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Could not open device %s, err %d",
+ pf->int_name, status);
+ }
+ }
+ } else {
+ /* Online tests */
+ netdev_info(netdev, "online testing starting\n");
+
+ data[ICE_ETH_TEST_LINK] = ice_link_test(netdev);
+ if (data[ICE_ETH_TEST_LINK])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Offline only tests, not run in online; pass by default */
+ data[ICE_ETH_TEST_REG] = 0;
+ data[ICE_ETH_TEST_EEPROM] = 0;
+ data[ICE_ETH_TEST_INTR] = 0;
+ data[ICE_ETH_TEST_LOOP] = 0;
+ }
+
+skip_ol_tests:
+ netdev_info(netdev, "testing finished\n");
+}
+
static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -295,17 +881,17 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
ice_for_each_alloc_txq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
- "tx-queue-%u.tx_packets", i);
+ "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i);
+ snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
ice_for_each_alloc_rxq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
- "rx-queue-%u.rx_packets", i);
+ "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i);
+ snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
@@ -320,21 +906,24 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "port.tx-priority-%u-xon", i);
+ "tx_priority_%u_xon.nic", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "port.tx-priority-%u-xoff", i);
+ "tx_priority_%u_xoff.nic", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN,
- "port.rx-priority-%u-xon", i);
+ "rx_priority_%u_xon.nic", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN,
- "port.rx-priority-%u-xoff", i);
+ "rx_priority_%u_xoff.nic", i);
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_TEST:
+ memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
snprintf(p, ETH_GSTRING_LEN, "%s",
@@ -371,6 +960,185 @@ ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
}
/**
+ * ice_set_fec_cfg - Set link FEC options
+ * @netdev: network interface device structure
+ * @req_fec: FEC mode to configure
+ */
+static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_aqc_set_phy_cfg_data config = { 0 };
+ struct ice_aqc_get_phy_caps_data *caps;
+ struct ice_vsi *vsi = np->vsi;
+ u8 sw_cfg_caps, sw_cfg_fec;
+ struct ice_port_info *pi;
+ enum ice_status status;
+ int err = 0;
+
+ pi = vsi->port_info;
+ if (!pi)
+ return -EOPNOTSUPP;
+
+ /* Changing the FEC parameters is not supported if not the PF VSI */
+ if (vsi->type != ICE_VSI_PF) {
+ netdev_info(netdev, "Changing FEC parameters only supported for PF VSI\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Get last SW configuration */
+ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
+ caps, NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* Copy SW configuration returned from PHY caps to PHY config */
+ ice_copy_phy_caps_to_cfg(caps, &config);
+ sw_cfg_caps = caps->caps;
+ sw_cfg_fec = caps->link_fec_options;
+
+ /* Get toloplogy caps, then copy PHY FEC topoloy caps to PHY config */
+ memset(caps, 0, sizeof(*caps));
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ caps, NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ config.caps |= (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
+ config.link_fec_opt = caps->link_fec_options;
+
+ ice_cfg_phy_fec(&config, req_fec);
+
+ /* If FEC mode has changed, then set PHY configuration and enable AN. */
+ if ((config.caps & ICE_AQ_PHY_ENA_AUTO_FEC) !=
+ (sw_cfg_caps & ICE_AQC_PHY_EN_AUTO_FEC) ||
+ config.link_fec_opt != sw_cfg_fec) {
+ if (caps->caps & ICE_AQC_PHY_AN_MODE)
+ config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL);
+
+ if (status)
+ err = -EAGAIN;
+ }
+
+done:
+ devm_kfree(&vsi->back->pdev->dev, caps);
+ return err;
+}
+
+/**
+ * ice_set_fecparam - Set FEC link options
+ * @netdev: network interface device structure
+ * @fecparam: Ethtool structure to retrieve FEC parameters
+ */
+static int
+ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ enum ice_fec_mode fec;
+
+ switch (fecparam->fec) {
+ case ETHTOOL_FEC_AUTO:
+ fec = ICE_FEC_AUTO;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec = ICE_FEC_RS;
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec = ICE_FEC_BASER;
+ break;
+ case ETHTOOL_FEC_OFF:
+ case ETHTOOL_FEC_NONE:
+ fec = ICE_FEC_NONE;
+ break;
+ default:
+ dev_warn(&vsi->back->pdev->dev, "Unsupported FEC mode: %d\n",
+ fecparam->fec);
+ return -EINVAL;
+ }
+
+ return ice_set_fec_cfg(netdev, fec);
+}
+
+/**
+ * ice_get_fecparam - Get link FEC options
+ * @netdev: network interface device structure
+ * @fecparam: Ethtool structure to retrieve FEC parameters
+ */
+static int
+ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_aqc_get_phy_caps_data *caps;
+ struct ice_link_status *link_info;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_port_info *pi;
+ enum ice_status status;
+ int err = 0;
+
+ pi = vsi->port_info;
+
+ if (!pi)
+ return -EOPNOTSUPP;
+ link_info = &pi->phy.link_info;
+
+ /* Set FEC mode based on negotiated link info */
+ switch (link_info->fec_info) {
+ case ICE_AQ_LINK_25G_KR_FEC_EN:
+ fecparam->active_fec = ETHTOOL_FEC_BASER;
+ break;
+ case ICE_AQ_LINK_25G_RS_528_FEC_EN:
+ /* fall through */
+ case ICE_AQ_LINK_25G_RS_544_FEC_EN:
+ fecparam->active_fec = ETHTOOL_FEC_RS;
+ break;
+ default:
+ fecparam->active_fec = ETHTOOL_FEC_OFF;
+ break;
+ }
+
+ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ caps, NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* Set supported/configured FEC modes based on PHY capability */
+ if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC)
+ fecparam->fec |= ETHTOOL_FEC_AUTO;
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
+ fecparam->fec |= ETHTOOL_FEC_BASER;
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+ fecparam->fec |= ETHTOOL_FEC_RS;
+ if (caps->link_fec_options == 0)
+ fecparam->fec |= ETHTOOL_FEC_OFF;
+
+done:
+ devm_kfree(&vsi->back->pdev->dev, caps);
+ return err;
+}
+
+/**
* ice_get_priv_flags - report device private flags
* @netdev: network interface device structure
*
@@ -433,10 +1201,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
- if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, change_flags)) {
- if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, pf->flags)) {
+ if (test_bit(ICE_FLAG_ENABLE_FW_LLDP, change_flags)) {
+ if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) {
enum ice_status status;
+ /* Disable FW LLDP engine */
status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
NULL);
/* If unregistering for LLDP events fails, this is
@@ -450,7 +1219,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* The AQ call to stop the FW LLDP agent will generate
* an error if the agent is already stopped.
*/
- status = ice_aq_stop_lldp(&pf->hw, true, NULL);
+ status = ice_aq_stop_lldp(&pf->hw, true, true, NULL);
if (status)
dev_warn(&pf->pdev->dev,
"Fail to stop LLDP agent\n");
@@ -458,9 +1227,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* will likely not need DCB, so failure to init is
* not a concern of ethtool
*/
- status = ice_init_pf_dcb(pf);
+ status = ice_init_pf_dcb(pf, true);
if (status)
dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
+
+ /* Forward LLDP packets to default VSI so that they
+ * are passed up the stack
+ */
+ ice_cfg_sw_lldp(vsi, false, true);
} else {
enum ice_status status;
bool dcbx_agent_status;
@@ -468,12 +1242,12 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* AQ command to start FW LLDP agent will return an
* error if the agent is already started
*/
- status = ice_aq_start_lldp(&pf->hw, NULL);
+ status = ice_aq_start_lldp(&pf->hw, true, NULL);
if (status)
dev_warn(&pf->pdev->dev,
"Fail to start LLDP Agent\n");
- /* AQ command to start FW DCBx agent will fail if
+ /* AQ command to start FW DCBX agent will fail if
* the agent is already started
*/
status = ice_aq_start_stop_dcbx(&pf->hw, true,
@@ -491,15 +1265,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* registration/init failed but do not return error
* state to ethtool
*/
- status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
- NULL);
- if (status)
- dev_dbg(&pf->pdev->dev,
- "Fail to reg for MIB change\n");
-
- status = ice_init_pf_dcb(pf);
+ status = ice_init_pf_dcb(pf, true);
if (status)
dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
+
+ /* Remove rule to direct LLDP packets to default VSI.
+ * The FW LLDP engine will now be consuming them.
+ */
+ ice_cfg_sw_lldp(vsi, false, false);
}
}
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
@@ -529,6 +1302,8 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
* not safe.
*/
return ICE_ALL_STATS_LEN(netdev);
+ case ETH_SS_TEST:
+ return ICE_TEST_LEN;
case ETH_SS_PRIV_FLAGS:
return ICE_PRIV_FLAG_ARRAY_SIZE;
default:
@@ -628,7 +1403,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
100baseT_Full);
}
@@ -636,14 +1412,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseT_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseKX_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseKX_Full);
}
@@ -651,14 +1429,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseX_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseX_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) {
ethtool_link_ksettings_add_link_mode(ks, supported,
2500baseT_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
2500baseT_Full);
}
@@ -666,7 +1446,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) {
ethtool_link_ksettings_add_link_mode(ks, supported,
2500baseX_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
2500baseX_Full);
}
@@ -674,7 +1455,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
5000baseT_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
5000baseT_Full);
}
@@ -684,28 +1466,32 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseT_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseKR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseKR_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseSR_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseLR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseLR_Full);
}
@@ -717,7 +1503,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
}
@@ -725,7 +1512,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseSR_Full);
}
@@ -734,14 +1522,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) {
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseKR_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseKR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseKR4_Full);
}
@@ -750,21 +1540,24 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseCR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseCR4_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseSR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseSR4_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseLR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseLR4_Full);
}
@@ -779,7 +1572,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseCR2_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseCR2_Full);
}
@@ -787,7 +1581,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseKR2_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseKR2_Full);
}
@@ -797,7 +1592,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseSR2_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseSR2_Full);
}
@@ -814,7 +1610,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseCR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode) {
@@ -826,7 +1623,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode) {
@@ -838,7 +1636,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseLR4_ER4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode) {
@@ -851,7 +1650,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full);
- if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
+ if (!hw_link_info->req_speeds ||
+ hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode)
@@ -1275,6 +2075,7 @@ ice_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
@@ -1345,6 +2146,40 @@ ice_get_link_ksettings(struct net_device *netdev,
break;
}
+ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ goto done;
+
+ if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP,
+ caps, NULL))
+ netdev_info(netdev, "Get phy capability failed.\n");
+
+ /* Set supported FEC modes based on PHY capability */
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+
+ if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG,
+ caps, NULL))
+ netdev_info(netdev, "Get phy capability failed.\n");
+
+ /* Set advertised FEC modes based on PHY capability */
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+
+done:
+ devm_kfree(&vsi->back->pdev->dev, caps);
return 0;
}
@@ -2371,8 +3206,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
- wr32(&pf->hw, GLINT_RATE(vsi->hw_base_vector +
- rc->ring->q_vector->v_idx),
+ wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx),
ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high,
pf->hw.intrl_gran));
}
@@ -2533,6 +3367,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_regs = ice_get_regs,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
+ .self_test = ice_self_test,
.get_link = ethtool_op_get_link,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
@@ -2557,6 +3392,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_per_queue_coalesce = ice_get_per_q_coalesce,
.set_per_queue_coalesce = ice_set_per_q_coalesce,
+ .get_fecparam = ice_get_fecparam,
+ .set_fecparam = ice_set_fecparam,
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index ec25f26069b0..6c5ce05742b1 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -6,6 +6,9 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
+#define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096))
+#define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096))
+#define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096))
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD_HEAD_S 0
@@ -155,6 +158,7 @@
#define PFINT_OICR_HMC_ERR_M BIT(26)
#define PFINT_OICR_PE_CRITERR_M BIT(28)
#define PFINT_OICR_VFLR_M BIT(29)
+#define PFINT_OICR_SWINT_M BIT(31)
#define PFINT_OICR_CTL 0x0016CA80
#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_OICR_CTL_ITR_INDX_S 11
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index fbf1eba0cc2a..a19f5920733b 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -137,6 +137,8 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
* for PF or EMP this field should be set to zero
*/
switch (vsi->type) {
+ case ICE_VSI_LB:
+ /* fall through */
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
@@ -251,6 +253,10 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->rx_rings)
goto err_rxrings;
+ /* There is no need to allocate q_vectors for a loopback VSI. */
+ if (vsi->type == ICE_VSI_LB)
+ return 0;
+
/* allocate memory for q_vector pointers */
vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
sizeof(*vsi->q_vectors), GFP_KERNEL);
@@ -275,6 +281,8 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ /* fall through */
+ case ICE_VSI_LB:
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break;
@@ -313,10 +321,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_vf_msix includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
- * of queues vectors, subtract 1 from the original vector
- * count
+ * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
+ * original vector count
*/
- vsi->num_q_vectors = pf->num_vf_msix - 1;
+ vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
+ break;
+ case ICE_VSI_LB:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
@@ -516,6 +528,10 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
break;
+ case ICE_VSI_LB:
+ if (ice_vsi_alloc_arrays(vsi))
+ goto err_rings;
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
goto unlock_pf;
@@ -732,6 +748,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
BIT(cap->rss_table_entry_width));
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
break;
+ case ICE_VSI_LB:
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
vsi->type);
@@ -924,6 +942,9 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
+ case ICE_VSI_LB:
+ dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type);
+ return;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
return;
@@ -955,6 +976,8 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ctxt->info = vsi->info;
switch (vsi->type) {
+ case ICE_VSI_LB:
+ /* fall through */
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
@@ -1145,61 +1168,32 @@ err_out:
static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- int num_q_vectors = 0;
+ u16 num_q_vectors;
+
+ /* SRIOV doesn't grab irq_tracker entries for each VSI */
+ if (vsi->type == ICE_VSI_VF)
+ return 0;
- if (vsi->sw_base_vector || vsi->hw_base_vector) {
- dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n",
- vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector);
+ if (vsi->base_vector) {
+ dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
+ vsi->vsi_num, vsi->base_vector);
return -EEXIST;
}
if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
return -ENOENT;
- switch (vsi->type) {
- case ICE_VSI_PF:
- num_q_vectors = vsi->num_q_vectors;
- /* reserve slots from OS requested IRQs */
- vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker,
- num_q_vectors, vsi->idx);
- if (vsi->sw_base_vector < 0) {
- dev_err(&pf->pdev->dev,
- "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n",
- num_q_vectors, vsi->vsi_num,
- vsi->sw_base_vector);
- return -ENOENT;
- }
- pf->num_avail_sw_msix -= num_q_vectors;
-
- /* reserve slots from HW interrupts */
- vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
- num_q_vectors, vsi->idx);
- break;
- case ICE_VSI_VF:
- /* take VF misc vector and data vectors into account */
- num_q_vectors = pf->num_vf_msix;
- /* For VF VSI, reserve slots only from HW interrupts */
- vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
- num_q_vectors, vsi->idx);
- break;
- default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
- break;
- }
-
- if (vsi->hw_base_vector < 0) {
+ num_q_vectors = vsi->num_q_vectors;
+ /* reserve slots from OS requested IRQs */
+ vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
+ vsi->idx);
+ if (vsi->base_vector < 0) {
dev_err(&pf->pdev->dev,
- "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
- num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
- if (vsi->type != ICE_VSI_VF) {
- ice_free_res(pf->sw_irq_tracker,
- vsi->sw_base_vector, vsi->idx);
- pf->num_avail_sw_msix += num_q_vectors;
- }
+ "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
+ num_q_vectors, vsi->vsi_num, vsi->base_vector);
return -ENOENT;
}
-
- pf->num_avail_hw_msix -= num_q_vectors;
+ pf->num_avail_sw_msix -= num_q_vectors;
return 0;
}
@@ -1842,8 +1836,73 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
}
/**
+ * ice_cfg_txq_interrupt - configure interrupt on Tx queue
+ * @vsi: the VSI being configured
+ * @txq: Tx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
+ * within the function space.
+ */
+#ifdef CONFIG_PCI_IOV
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
+#else
+static void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
+#endif /* CONFIG_PCI_IOV */
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+}
+
+/**
+ * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
+ * @vsi: the VSI being configured
+ * @rxq: Rx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
+ * within the function space.
+ */
+#ifdef CONFIG_PCI_IOV
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
+#else
+static void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
+#endif /* CONFIG_PCI_IOV */
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
+
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+
+ ice_flush(hw);
+}
+
+/**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured
+ *
+ * This configures MSIX mode interrupts for the PF VSI, and should not be used
+ * for the VF VSI.
*/
void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
@@ -1873,43 +1932,17 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
* tracked for this PF.
*/
for (q = 0; q < q_vector->num_ring_tx; q++) {
- int itr_idx = (q_vector->tx.itr_idx <<
- QINT_TQCTL_ITR_INDX_S) &
- QINT_TQCTL_ITR_INDX_M;
- u32 val;
-
- if (vsi->type == ICE_VSI_VF)
- val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
- (((i + 1) << QINT_TQCTL_MSIX_INDX_S) &
- QINT_TQCTL_MSIX_INDX_M);
- else
- val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
- ((reg_idx << QINT_TQCTL_MSIX_INDX_S) &
- QINT_TQCTL_MSIX_INDX_M);
- wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ ice_cfg_txq_interrupt(vsi, txq, reg_idx,
+ q_vector->tx.itr_idx);
txq++;
}
for (q = 0; q < q_vector->num_ring_rx; q++) {
- int itr_idx = (q_vector->rx.itr_idx <<
- QINT_RQCTL_ITR_INDX_S) &
- QINT_RQCTL_ITR_INDX_M;
- u32 val;
-
- if (vsi->type == ICE_VSI_VF)
- val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
- (((i + 1) << QINT_RQCTL_MSIX_INDX_S) &
- QINT_RQCTL_MSIX_INDX_M);
- else
- val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
- ((reg_idx << QINT_RQCTL_MSIX_INDX_S) &
- QINT_RQCTL_MSIX_INDX_M);
- wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+ ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
+ q_vector->rx.itr_idx);
rxq++;
}
}
-
- ice_flush(hw);
}
/**
@@ -2024,6 +2057,19 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
}
/**
+ * ice_trigger_sw_intr - trigger a software interrupt
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector to trigger the software interrupt for
+ */
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+{
+ wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_M);
+}
+
+/**
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
@@ -2070,8 +2116,9 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
break;
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- if (!rings || !rings[q_idx] ||
- !rings[q_idx]->q_vector) {
+ struct ice_q_vector *q_vector;
+
+ if (!rings || !rings[q_idx]) {
err = -EINVAL;
goto err_out;
}
@@ -2091,9 +2138,10 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
/* trigger a software interrupt for the vector
* associated to the queue to schedule NAPI handler
*/
- wr32(hw, GLINT_DYN_CTL(rings[i]->q_vector->reg_idx),
- GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_INTENA_MSK_M);
+ q_vector = rings[i]->q_vector;
+ if (q_vector)
+ ice_trigger_sw_intr(hw, q_vector);
+
q_idx++;
}
status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc,
@@ -2234,7 +2282,14 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
goto clear_reg_idx;
}
- q_vector->reg_idx = q_vector->v_idx + vsi->hw_base_vector;
+ if (vsi->type == ICE_VSI_VF) {
+ struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
+
+ q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
+ } else {
+ q_vector->reg_idx =
+ q_vector->v_idx + vsi->base_vector;
+ }
}
return 0;
@@ -2291,6 +2346,54 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
}
/**
+ * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
+ * @vsi: the VSI being configured
+ * @tx: bool to determine Tx or Rx rule
+ * @create: bool to determine create or remove Rule
+ */
+void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
+{
+ struct ice_fltr_list_entry *list;
+ struct ice_pf *pf = vsi->back;
+ LIST_HEAD(tmp_add_list);
+ enum ice_status status;
+
+ list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ if (!list)
+ return;
+
+ list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
+ list->fltr_info.vsi_handle = vsi->idx;
+ list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
+
+ if (tx) {
+ list->fltr_info.fltr_act = ICE_DROP_PACKET;
+ list->fltr_info.flag = ICE_FLTR_TX;
+ list->fltr_info.src_id = ICE_SRC_ID_VSI;
+ } else {
+ list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ list->fltr_info.flag = ICE_FLTR_RX;
+ list->fltr_info.src_id = ICE_SRC_ID_LPORT;
+ }
+
+ INIT_LIST_HEAD(&list->list_entry);
+ list_add(&list->list_entry, &tmp_add_list);
+
+ if (create)
+ status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
+ else
+ status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
+
+ if (status)
+ dev_err(&pf->pdev->dev,
+ "Fail %s %s LLDP rule on VSI %i error: %d\n",
+ create ? "adding" : "removing", tx ? "TX" : "RX",
+ vsi->vsi_num, status);
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+}
+
+/**
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
@@ -2310,6 +2413,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = &pf->pdev->dev;
+ enum ice_status status;
struct ice_vsi *vsi;
int ret, i;
@@ -2389,23 +2493,24 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_alloc_q_vector;
- /* Setup Vector base only during VF init phase or when VF asks
- * for more vectors than assigned number. In all other cases,
- * assign hw_base_vector to the value given earlier.
- */
- if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) {
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto unroll_vector_base;
- } else {
- vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
- }
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto unroll_vector_base;
pf->q_left_tx -= vsi->alloc_txq;
pf->q_left_rx -= vsi->alloc_rxq;
+
+ /* Do not exit if configuring RSS had an issue, at least
+ * receive traffic on first queue. Hence no need to capture
+ * return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_vsi_cfg_rss_lut_key(vsi);
+ break;
+ case ICE_VSI_LB:
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto unroll_vsi_init;
break;
default:
/* clean up the resources and exit */
@@ -2416,12 +2521,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = pf->num_lan_tx;
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
- if (ret) {
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (status) {
dev_err(&pf->pdev->dev,
"VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, ret);
+ vsi->vsi_num, status);
goto unroll_vector_base;
}
@@ -2430,19 +2535,28 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
* The rule is added once for PF VSI in order to create appropriate
* recipe, since VSI/VSI list is ignored with drop action...
+ * Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets
+ * need to be dropped so that VFs cannot send LLDP packets to reconfig
+ * DCB settings in the HW. Also, if the FW DCBX engine is not running
+ * then Rx LLDP packets need to be redirected up the stack.
*/
- if (vsi->type == ICE_VSI_PF)
+ if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, true);
+ /* Tx LLDP packets */
+ ice_cfg_sw_lldp(vsi, true, true);
+
+ /* Rx LLDP packets */
+ if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, true);
+ }
+
return vsi;
unroll_vector_base:
/* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
- /* reclaim HW interrupt back to the common pool */
- ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
- pf->num_avail_hw_msix += vsi->num_q_vectors;
unroll_alloc_q_vector:
ice_vsi_free_q_vectors(vsi);
unroll_vsi_init:
@@ -2463,17 +2577,17 @@ unroll_get_qs:
static void ice_vsi_release_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- u16 vector = vsi->hw_base_vector;
struct ice_hw *hw = &pf->hw;
u32 txq = 0;
u32 rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
+ u16 reg_idx = q_vector->reg_idx;
- wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0);
- wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0);
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
txq++;
@@ -2495,7 +2609,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
void ice_vsi_free_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- int base = vsi->sw_base_vector;
+ int base = vsi->base_vector;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
int i;
@@ -2591,11 +2705,11 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
int count = 0;
int i;
- if (!res || index >= res->num_entries)
+ if (!res || index >= res->end)
return -EINVAL;
id |= ICE_RES_VALID_BIT;
- for (i = index; i < res->num_entries && res->list[i] == id; i++) {
+ for (i = index; i < res->end && res->list[i] == id; i++) {
res->list[i] = 0;
count++;
}
@@ -2613,10 +2727,9 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
*/
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
- int start = res->search_hint;
- int end = start;
+ int start = 0, end = 0;
- if ((start + needed) > res->num_entries)
+ if (needed > res->end)
return -ENOMEM;
id |= ICE_RES_VALID_BIT;
@@ -2625,7 +2738,7 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
/* skip already allocated entries */
if (res->list[end++] & ICE_RES_VALID_BIT) {
start = end;
- if ((start + needed) > res->num_entries)
+ if ((start + needed) > res->end)
break;
}
@@ -2636,13 +2749,9 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
while (i != end)
res->list[i++] = id;
- if (end == res->num_entries)
- end = 0;
-
- res->search_hint = end;
return start;
}
- } while (1);
+ } while (end < res->end);
return -ENOMEM;
}
@@ -2654,16 +2763,11 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
* @needed: size of the block needed
* @id: identifier to track owner
*
- * Returns the base item index of the block, or -ENOMEM for error
- * The search_hint trick and lack of advanced fit-finding only works
- * because we're highly likely to have all the same sized requests.
- * Linear search time and any fragmentation should be minimal.
+ * Returns the base item index of the block, or negative for error
*/
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
{
- int ret;
-
if (!res || !pf)
return -EINVAL;
@@ -2674,16 +2778,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
return -EINVAL;
}
- /* search based on search_hint */
- ret = ice_search_res(res, needed, id);
-
- if (ret < 0) {
- /* previous search failed. Reset search hint and try again */
- res->search_hint = 0;
- ret = ice_search_res(res, needed, id);
- }
-
- return ret;
+ return ice_search_res(res, needed, id);
}
/**
@@ -2692,7 +2787,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
*/
void ice_vsi_dis_irq(struct ice_vsi *vsi)
{
- int base = vsi->sw_base_vector;
+ int base = vsi->base_vector;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 val;
@@ -2738,6 +2833,21 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/**
+ * ice_napi_del - Remove NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be removed
+ */
+void ice_napi_del(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ ice_for_each_q_vector(vsi, v_idx)
+ netif_napi_del(&vsi->q_vectors[v_idx]->napi);
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -2745,60 +2855,61 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
- struct ice_vf *vf = NULL;
struct ice_pf *pf;
if (!vsi->back)
return -ENODEV;
pf = vsi->back;
- if (vsi->type == ICE_VSI_VF)
- vf = &pf->vf[vsi->vf_id];
- /* do not unregister and free netdevs while driver is in the reset
- * recovery pending state. Since reset/rebuild happens through PF
- * service task workqueue, its not a good idea to unregister netdev
- * that is associated to the PF that is running the work queue items
- * currently. This is done to avoid check_flush_dependency() warning
- * on this wq
+ /* do not unregister while driver is in the reset recovery pending
+ * state. Since reset/rebuild happens through PF service task workqueue,
+ * it's not a good idea to unregister netdev that is associated to the
+ * PF that is running the work queue items currently. This is done to
+ * avoid check_flush_dependency() warning on this wq
*/
- if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
- ice_napi_del(vsi);
+ if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
/* Disable VSI and free resources */
- ice_vsi_dis_irq(vsi);
+ if (vsi->type != ICE_VSI_LB)
+ ice_vsi_dis_irq(vsi);
ice_vsi_close(vsi);
- /* reclaim interrupt vectors back to PF */
+ /* SR-IOV determines needed MSIX resources all at once instead of per
+ * VSI since when VFs are spawned we know how many VFs there are and how
+ * many interrupts each VF needs. SR-IOV MSIX resources are also
+ * cleared in the same manner.
+ */
if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
- /* reclaim HW interrupts back to the common pool */
- ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
- pf->num_avail_hw_msix += vsi->num_q_vectors;
- } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
- /* Reclaim VF resources back only while freeing all VFs or
- * vector reassignment is requested
- */
- ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
- vsi->idx);
- pf->num_avail_hw_msix += pf->num_vf_msix;
}
- if (vsi->type == ICE_VSI_PF)
+ if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, false);
+ ice_cfg_sw_lldp(vsi, true, false);
+ /* The Rx rule will only exist to remove if the LLDP FW
+ * engine is currently stopped
+ */
+ if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, false);
+ }
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
+
+ /* make sure unregister_netdev() was called by checking __ICE_DOWN */
+ if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+
ice_vsi_clear_rings(vsi);
ice_vsi_put_qs(vsi);
@@ -2825,6 +2936,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_vf *vf = NULL;
+ enum ice_status status;
struct ice_pf *pf;
int ret, i;
@@ -2838,24 +2950,17 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
+ /* SR-IOV determines needed MSIX resources all at once instead of per
+ * VSI since when VFs are spawned we know how many VFs there are and how
+ * many interrupts each VF needs. SR-IOV MSIX resources are also
+ * cleared in the same manner.
+ */
if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
- vsi->sw_base_vector = 0;
- /* reclaim HW interrupts back to the common pool */
- ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector,
- vsi->idx);
- pf->num_avail_hw_msix += vsi->num_q_vectors;
- } else {
- /* Reclaim VF resources back to the common pool for reset and
- * and rebuild, with vector reassignment
- */
- ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
- vsi->idx);
- pf->num_avail_hw_msix += pf->num_vf_msix;
+ vsi->base_vector = 0;
}
- vsi->hw_base_vector = 0;
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
@@ -2881,10 +2986,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_rings;
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_vectors;
-
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto err_vectors;
@@ -2929,12 +3030,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = pf->num_lan_tx;
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
- if (ret) {
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (status) {
dev_err(&pf->pdev->dev,
"VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, ret);
+ vsi->vsi_num, status);
goto err_vectors;
}
return 0;
@@ -2956,7 +3057,7 @@ err_vsi:
/**
* ice_is_reset_in_progress - check for a reset in progress
- * @state: pf state field
+ * @state: PF state field
*/
bool ice_is_reset_in_progress(unsigned long *state)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index a91d3553cc89..6e43ef03bfc3 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -19,6 +19,14 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
+#ifdef CONFIG_PCI_IOV
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
+
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
+#endif /* CONFIG_PCI_IOV */
+
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
@@ -37,6 +45,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
+void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+
void ice_vsi_delete(struct ice_vsi *vsi);
int ice_vsi_clear(struct ice_vsi *vsi);
@@ -49,6 +59,8 @@ struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id);
+void ice_napi_del(struct ice_vsi *vsi);
+
int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
@@ -64,6 +76,8 @@ bool ice_is_reset_in_progress(unsigned long *state);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+
void ice_vsi_put_qs(struct ice_vsi *vsi);
#ifdef CONFIG_DCB
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 7843abf4d44d..28ec0d57941d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -61,9 +61,10 @@ static u32 ice_get_tx_pending(struct ice_ring *ring)
static void ice_check_for_hang_subtask(struct ice_pf *pf)
{
struct ice_vsi *vsi = NULL;
+ struct ice_hw *hw;
unsigned int i;
- u32 v, v_idx;
int packets;
+ u32 v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
@@ -77,12 +78,12 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
return;
+ hw = &vsi->back->hw;
+
for (i = 0; i < vsi->num_txq; i++) {
struct ice_ring *tx_ring = vsi->tx_rings[i];
if (tx_ring && tx_ring->desc) {
- int itr = ICE_ITR_NONE;
-
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
* queue.
@@ -93,12 +94,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
packets = tx_ring->stats.pkts & INT_MAX;
if (tx_ring->tx_stats.prev_pkt == packets) {
/* Trigger sw interrupt to revive the queue */
- v_idx = tx_ring->q_vector->v_idx;
- wr32(&vsi->back->hw,
- GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
- (itr << GLINT_DYN_CTL_ITR_INDX_S) |
- GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_INTENA_MSK_M);
+ ice_trigger_sw_intr(hw, tx_ring->q_vector);
continue;
}
@@ -113,6 +109,67 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
}
/**
+ * ice_init_mac_fltr - Set initial MAC filters
+ * @pf: board private structure
+ *
+ * Set initial set of MAC filters for PF VSI; configure filters for permanent
+ * address and broadcast address. If an error is encountered, netdevice will be
+ * unregistered.
+ */
+static int ice_init_mac_fltr(struct ice_pf *pf)
+{
+ LIST_HEAD(tmp_add_list);
+ u8 broadcast[ETH_ALEN];
+ struct ice_vsi *vsi;
+ int status;
+
+ vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
+ if (!vsi)
+ return -EINVAL;
+
+ /* To add a MAC filter, first add the MAC to a list and then
+ * pass the list to ice_add_mac.
+ */
+
+ /* Add a unicast MAC filter so the VSI can get its packets */
+ status = ice_add_mac_to_list(vsi, &tmp_add_list,
+ vsi->port_info->mac.perm_addr);
+ if (status)
+ goto unregister;
+
+ /* VSI needs to receive broadcast traffic, so add the broadcast
+ * MAC address to the list as well.
+ */
+ eth_broadcast_addr(broadcast);
+ status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
+ if (status)
+ goto free_mac_list;
+
+ /* Program MAC filters for entries in tmp_add_list */
+ status = ice_add_mac(&pf->hw, &tmp_add_list);
+ if (status)
+ status = -ENOMEM;
+
+free_mac_list:
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+
+unregister:
+ /* We aren't useful with no MAC filters, so unregister if we
+ * had an error
+ */
+ if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
+ dev_err(&pf->pdev->dev,
+ "Could not add MAC filters error %d. Unregistering device\n",
+ status);
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+
+ return status;
+}
+
+/**
* ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
* @netdev: the net device on which the sync is happening
* @addr: MAC address to sync
@@ -567,7 +624,11 @@ static void ice_reset_subtask(struct ice_pf *pf)
*/
void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
{
+ struct ice_aqc_get_phy_caps_data *caps;
+ enum ice_status status;
+ const char *fec_req;
const char *speed;
+ const char *fec;
const char *fc;
if (!vsi)
@@ -584,6 +645,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = "100 G";
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ speed = "50 G";
+ break;
case ICE_AQ_LINK_SPEED_40GB:
speed = "40 G";
break;
@@ -615,13 +682,13 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
switch (vsi->port_info->fc.current_mode) {
case ICE_FC_FULL:
- fc = "RX/TX";
+ fc = "Rx/Tx";
break;
case ICE_FC_TX_PAUSE:
- fc = "TX";
+ fc = "Tx";
break;
case ICE_FC_RX_PAUSE:
- fc = "RX";
+ fc = "Rx";
break;
case ICE_FC_NONE:
fc = "None";
@@ -631,8 +698,47 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
break;
}
- netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n",
- speed, fc);
+ /* Get FEC mode based on negotiated link info */
+ switch (vsi->port_info->phy.link_info.fec_info) {
+ case ICE_AQ_LINK_25G_RS_528_FEC_EN:
+ /* fall through */
+ case ICE_AQ_LINK_25G_RS_544_FEC_EN:
+ fec = "RS-FEC";
+ break;
+ case ICE_AQ_LINK_25G_KR_FEC_EN:
+ fec = "FC-FEC/BASE-R";
+ break;
+ default:
+ fec = "NONE";
+ break;
+ }
+
+ /* Get FEC mode requested based on PHY caps last SW configuration */
+ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ if (!caps) {
+ fec_req = "Unknown";
+ goto done;
+ }
+
+ status = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_SW_CFG, caps, NULL);
+ if (status)
+ netdev_info(vsi->netdev, "Get phy capability failed.\n");
+
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
+ fec_req = "RS-FEC";
+ else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
+ fec_req = "FC-FEC/BASE-R";
+ else
+ fec_req = "NONE";
+
+ devm_kfree(&vsi->back->pdev->dev, caps);
+
+done:
+ netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Flow Control: %s\n",
+ speed, fec_req, fec, fc);
}
/**
@@ -664,7 +770,7 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
/**
* ice_link_event - process the link event
- * @pf: pf that the link event is associated with
+ * @pf: PF that the link event is associated with
* @pi: port_info for the port that the link event is associated with
* @link_up: true if the physical link is up and false if it is down
* @link_speed: current link speed received from the link event
@@ -774,7 +880,7 @@ static int ice_init_link_events(struct ice_port_info *pi)
/**
* ice_handle_link_event - handle link event via ARQ
- * @pf: pf that the link event is associated with
+ * @pf: PF that the link event is associated with
* @event: event structure containing link status info
*/
static int
@@ -1161,16 +1267,16 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
}
}
- /* see if one of the VFs needs to be reset */
- for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ /* check to see if one of the VFs caused the MDD */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
struct ice_vf *vf = &pf->vf[i];
- mdd_detected = false;
+ bool vf_mdd_detected = false;
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
- mdd_detected = true;
+ vf_mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1178,7 +1284,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
- mdd_detected = true;
+ vf_mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1186,7 +1292,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
- mdd_detected = true;
+ vf_mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1194,19 +1300,18 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
- mdd_detected = true;
+ vf_mdd_detected = true;
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
i);
}
- if (mdd_detected) {
+ if (vf_mdd_detected) {
vf->num_mdd_events++;
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ if (vf->num_mdd_events > 1)
+ dev_info(&pf->pdev->dev, "VF %d has had %llu MDD events since last boot\n",
+ i, vf->num_mdd_events);
}
}
-
}
/**
@@ -1327,7 +1432,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
{
int q_vectors = vsi->num_q_vectors;
struct ice_pf *pf = vsi->back;
- int base = vsi->sw_base_vector;
+ int base = vsi->base_vector;
int rx_int_idx = 0;
int tx_int_idx = 0;
int vector, err;
@@ -1408,7 +1513,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
@@ -1430,6 +1535,11 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
+ if (oicr & PFINT_OICR_SWINT_M) {
+ ena_mask &= ~PFINT_OICR_SWINT_M;
+ pf->sw_int_count++;
+ }
+
if (oicr & PFINT_OICR_MAL_DETECT_M) {
ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
@@ -1556,15 +1666,13 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
ice_flush(hw);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
- synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector);
+ synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
devm_free_irq(&pf->pdev->dev,
- pf->msix_entries[pf->sw_oicr_idx].vector, pf);
+ pf->msix_entries[pf->oicr_idx].vector, pf);
}
pf->num_avail_sw_msix += 1;
- ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID);
- pf->num_avail_hw_msix += 1;
- ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID);
+ ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
}
/**
@@ -1618,43 +1726,31 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
if (ice_is_reset_in_progress(pf->state))
goto skip_req_irq;
- /* reserve one vector in sw_irq_tracker for misc interrupts */
- oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ /* reserve one vector in irq_tracker for misc interrupts */
+ oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
if (oicr_idx < 0)
return oicr_idx;
pf->num_avail_sw_msix -= 1;
- pf->sw_oicr_idx = oicr_idx;
-
- /* reserve one vector in hw_irq_tracker for misc interrupts */
- oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
- if (oicr_idx < 0) {
- ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
- pf->num_avail_sw_msix += 1;
- return oicr_idx;
- }
- pf->num_avail_hw_msix -= 1;
- pf->hw_oicr_idx = oicr_idx;
+ pf->oicr_idx = oicr_idx;
err = devm_request_irq(&pf->pdev->dev,
- pf->msix_entries[pf->sw_oicr_idx].vector,
+ pf->msix_entries[pf->oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
if (err) {
dev_err(&pf->pdev->dev,
"devm_request_irq for %s failed: %d\n",
pf->int_name, err);
- ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
pf->num_avail_sw_msix += 1;
- ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
- pf->num_avail_hw_msix += 1;
return err;
}
skip_req_irq:
ice_ena_misc_vector(pf);
- ice_ena_ctrlq_interrupts(hw, pf->hw_oicr_idx);
- wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
+ ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
+ wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
ice_flush(hw);
@@ -1664,21 +1760,6 @@ skip_req_irq:
}
/**
- * ice_napi_del - Remove NAPI handler for the VSI
- * @vsi: VSI for which NAPI handler is to be removed
- */
-void ice_napi_del(struct ice_vsi *vsi)
-{
- int v_idx;
-
- if (!vsi->netdev)
- return;
-
- ice_for_each_q_vector(vsi, v_idx)
- netif_napi_del(&vsi->q_vectors[v_idx]->napi);
-}
-
-/**
* ice_napi_add - register NAPI handler for the VSI
* @vsi: VSI for which NAPI handler is to be registered
*
@@ -1803,8 +1884,8 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
* @pf: board private structure
* @pi: pointer to the port_info instance
*
- * Returns pointer to the successfully allocated VSI sw struct on success,
- * otherwise returns NULL on failure.
+ * Returns pointer to the successfully allocated VSI software struct
+ * on success, otherwise returns NULL on failure.
*/
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
@@ -1813,6 +1894,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
+ * ice_lb_vsi_setup - Set up a loopback VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ *
+ * Returns pointer to the successfully allocated VSI software struct
+ * on success, otherwise returns NULL on failure.
+ */
+struct ice_vsi *
+ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
+}
+
+/**
* ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
@@ -1900,8 +1995,6 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
*/
static int ice_setup_pf_sw(struct ice_pf *pf)
{
- LIST_HEAD(tmp_add_list);
- u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
int status = 0;
@@ -1926,38 +2019,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
*/
ice_napi_add(vsi);
- /* To add a MAC filter, first add the MAC to a list and then
- * pass the list to ice_add_mac.
- */
-
- /* Add a unicast MAC filter so the VSI can get its packets */
- status = ice_add_mac_to_list(vsi, &tmp_add_list,
- vsi->port_info->mac.perm_addr);
+ status = ice_init_mac_fltr(pf);
if (status)
goto unroll_napi_add;
- /* VSI needs to receive broadcast traffic, so add the broadcast
- * MAC address to the list as well.
- */
- eth_broadcast_addr(broadcast);
- status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
- if (status)
- goto free_mac_list;
-
- /* program MAC filters for entries in tmp_add_list */
- status = ice_add_mac(&pf->hw, &tmp_add_list);
- if (status) {
- dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
- status = -ENOMEM;
- goto free_mac_list;
- }
-
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
return status;
-free_mac_list:
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
-
unroll_napi_add:
if (vsi) {
ice_napi_del(vsi);
@@ -2149,14 +2216,9 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_dis_msix(pf);
- if (pf->sw_irq_tracker) {
- devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker);
- pf->sw_irq_tracker = NULL;
- }
-
- if (pf->hw_irq_tracker) {
- devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker);
- pf->hw_irq_tracker = NULL;
+ if (pf->irq_tracker) {
+ devm_kfree(&pf->pdev->dev, pf->irq_tracker);
+ pf->irq_tracker = NULL;
}
}
@@ -2166,7 +2228,7 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
*/
static int ice_init_interrupt_scheme(struct ice_pf *pf)
{
- int vectors = 0, hw_vectors = 0;
+ int vectors;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
vectors = ice_ena_msix_range(pf);
@@ -2177,31 +2239,18 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
return vectors;
/* set up vector assignment tracking */
- pf->sw_irq_tracker =
- devm_kzalloc(&pf->pdev->dev, sizeof(*pf->sw_irq_tracker) +
+ pf->irq_tracker =
+ devm_kzalloc(&pf->pdev->dev, sizeof(*pf->irq_tracker) +
(sizeof(u16) * vectors), GFP_KERNEL);
- if (!pf->sw_irq_tracker) {
+ if (!pf->irq_tracker) {
ice_dis_msix(pf);
return -ENOMEM;
}
/* populate SW interrupts pool with number of OS granted IRQs. */
pf->num_avail_sw_msix = vectors;
- pf->sw_irq_tracker->num_entries = vectors;
-
- /* set up HW vector assignment tracking */
- hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- pf->hw_irq_tracker =
- devm_kzalloc(&pf->pdev->dev, sizeof(*pf->hw_irq_tracker) +
- (sizeof(u16) * hw_vectors), GFP_KERNEL);
- if (!pf->hw_irq_tracker) {
- ice_clear_interrupt_scheme(pf);
- return -ENOMEM;
- }
-
- /* populate HW interrupts pool with number of HW supported irqs. */
- pf->num_avail_hw_msix = hw_vectors;
- pf->hw_irq_tracker->num_entries = hw_vectors;
+ pf->irq_tracker->num_entries = vectors;
+ pf->irq_tracker->end = pf->irq_tracker->num_entries;
return 0;
}
@@ -2252,7 +2301,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (!pf)
return -ENOMEM;
- /* set up for high or low dma */
+ /* set up for high or low DMA */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
@@ -2302,7 +2351,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_init_pf(pf);
- err = ice_init_pf_dcb(pf);
+ err = ice_init_pf_dcb(pf, false);
if (err) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
@@ -2368,7 +2417,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_setup_pf_sw(pf);
if (err) {
- dev_err(dev, "probe failed due to setup pf switch:%d\n", err);
+ dev_err(dev, "probe failed due to setup PF switch:%d\n", err);
goto err_alloc_sw_unroll;
}
@@ -2625,7 +2674,7 @@ static int __init ice_module_init(void)
status = pci_register_driver(&ice_driver);
if (status) {
- pr_err("failed to register pci driver, err %d\n", status);
+ pr_err("failed to register PCI driver, err %d\n", status);
destroy_workqueue(ice_wq);
}
@@ -2725,21 +2774,21 @@ free_lists:
ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
if (err) {
- netdev_err(netdev, "can't set mac %pM. filter update failed\n",
+ netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
mac);
return err;
}
/* change the netdev's MAC address */
memcpy(netdev->dev_addr, mac, netdev->addr_len);
- netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
+ netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
netdev->dev_addr);
/* write new MAC address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
- netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n",
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed.\n",
mac);
}
return 0;
@@ -2876,6 +2925,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
ret = ice_vsi_manage_vlan_insertion(vsi);
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ ret = ice_cfg_vlan_pruning(vsi, true, false);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ ret = ice_cfg_vlan_pruning(vsi, false, false);
+
return ret;
}
@@ -2901,7 +2957,7 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
*
* Return 0 on success and negative value on error
*/
-static int ice_vsi_cfg(struct ice_vsi *vsi)
+int ice_vsi_cfg(struct ice_vsi *vsi)
{
int err;
@@ -2933,7 +2989,7 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- ice_for_each_q_vector(vsi, q_idx) {
+ ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring)
@@ -3456,7 +3512,7 @@ int ice_down(struct ice_vsi *vsi)
*
* Return 0 on success, negative on failure
*/
-static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
+int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
{
int i, err = 0;
@@ -3482,7 +3538,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
*
* Return 0 on success, negative on failure
*/
-static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
+int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
{
int i, err = 0;
@@ -3658,7 +3714,7 @@ static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
}
/**
- * ice_vsi_rebuild_all - rebuild all VSIs in pf
+ * ice_vsi_rebuild_all - rebuild all VSIs in PF
* @pf: the PF
*/
static int ice_vsi_rebuild_all(struct ice_pf *pf)
@@ -3728,7 +3784,7 @@ static int ice_vsi_replay_all(struct ice_pf *pf)
/**
* ice_rebuild - rebuild after reset
- * @pf: pf to rebuild
+ * @pf: PF to rebuild
*/
static void ice_rebuild(struct ice_pf *pf)
{
@@ -3740,7 +3796,7 @@ static void ice_rebuild(struct ice_pf *pf)
if (test_bit(__ICE_DOWN, pf->state))
goto clear_recovery;
- dev_dbg(dev, "rebuilding pf\n");
+ dev_dbg(dev, "rebuilding PF\n");
ret = ice_init_all_ctrlq(hw);
if (ret) {
@@ -3768,12 +3824,6 @@ static void ice_rebuild(struct ice_pf *pf)
ice_dcb_rebuild(pf);
- /* reset search_hint of irq_trackers to 0 since interrupts are
- * reclaimed and could be allocated from beginning during VSI rebuild
- */
- pf->sw_irq_tracker->search_hint = 0;
- pf->hw_irq_tracker->search_hint = 0;
-
err = ice_vsi_rebuild_all(pf);
if (err) {
dev_err(dev, "ice_vsi_rebuild_all failed\n");
@@ -3857,16 +3907,16 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
u8 count = 0;
if (new_mtu == netdev->mtu) {
- netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
+ netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
return 0;
}
if (new_mtu < netdev->min_mtu) {
- netdev_err(netdev, "new mtu invalid. min_mtu is %d\n",
+ netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
} else if (new_mtu > netdev->max_mtu) {
- netdev_err(netdev, "new mtu invalid. max_mtu is %d\n",
+ netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
}
@@ -3882,7 +3932,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
} while (count < 100);
if (count == 100) {
- netdev_err(netdev, "can't change mtu. Device is busy\n");
+ netdev_err(netdev, "can't change MTU. Device is busy\n");
return -EBUSY;
}
@@ -3894,18 +3944,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
err = ice_down(vsi);
if (err) {
- netdev_err(netdev, "change mtu if_up err %d\n", err);
+ netdev_err(netdev, "change MTU if_up err %d\n", err);
return err;
}
err = ice_up(vsi);
if (err) {
- netdev_err(netdev, "change mtu if_up err %d\n", err);
+ netdev_err(netdev, "change MTU if_up err %d\n", err);
return err;
}
}
- netdev_dbg(netdev, "changed mtu to %d\n", new_mtu);
+ netdev_info(netdev, "changed MTU to %d\n", new_mtu);
return 0;
}
@@ -4241,7 +4291,7 @@ static void ice_tx_timeout(struct net_device *netdev)
*
* Returns 0 on success, negative value on failure
*/
-static int ice_open(struct net_device *netdev)
+int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -4278,7 +4328,7 @@ static int ice_open(struct net_device *netdev)
*
* Returns success only - not allowed to fail
*/
-static int ice_stop(struct net_device *netdev)
+int ice_stop(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 62571d33d0d6..bcb431f1bd92 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -119,7 +119,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
status = ice_read_sr_aq(hw, offset, 1, data, true);
if (!status)
- *data = le16_to_cpu(*(__le16 *)data);
+ *data = le16_to_cpu(*(__force __le16 *)data);
return status;
}
@@ -174,7 +174,7 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
} while (words_read < *words);
for (i = 0; i < *words; i++)
- data[i] = le16_to_cpu(((__le16 *)data)[i]);
+ data[i] = le16_to_cpu(((__force __le16 *)data)[i]);
read_nvm_buf_aq_exit:
*words = words_read;
@@ -316,3 +316,34 @@ ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
return status;
}
+
+/**
+ * ice_nvm_validate_checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity (0x0706)
+ */
+enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
+{
+ struct ice_aqc_nvm_checksum *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
+ cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ ice_release_nvm(hw);
+
+ if (!status)
+ if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
+ status = ICE_ERR_NVM_CHECKSUM;
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 8d49f83be7a5..2a232504379d 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -683,10 +683,10 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u16 i, num_groups_added = 0;
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
- u16 buf_size;
+ size_t buf_size;
u32 teid;
- buf_size = sizeof(*buf) + sizeof(*buf->generic) * (num_nodes - 1);
+ buf_size = struct_size(buf, generic, num_nodes - 1);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index 17afe6acb18a..c01597885629 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -26,6 +26,7 @@ enum ice_status {
ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
+ ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
ICE_ERR_AQ_ERROR = -100,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 9f1f595ae7e6..8271fd651725 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -799,7 +799,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
daddr = f_info->l_data.ethertype_mac.mac_addr;
/* fall-through */
case ICE_SW_LKUP_ETHERTYPE:
- off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
+ off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
break;
case ICE_SW_LKUP_MAC_VLAN:
@@ -829,7 +829,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
if (!(vlan_id > ICE_MAX_VLAN_ID)) {
- off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
+ off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
*off = cpu_to_be16(vlan_id);
}
@@ -1973,6 +1973,10 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
* ice_add_eth_mac - Add ethertype and MAC based filter rule
* @hw: pointer to the hardware structure
* @em_list: list of ether type MAC filter, MAC is optional
+ *
+ * This function requires the caller to populate the entries in
+ * the filter list with the necessary fields (including flags to
+ * indicate Tx or Rx rules).
*/
enum ice_status
ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
@@ -1990,7 +1994,6 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
l_type != ICE_SW_LKUP_ETHERTYPE)
return ICE_ERR_PARAM;
- em_list_itr->fltr_info.flag = ICE_FLTR_TX;
em_list_itr->status = ice_add_rule_internal(hw, l_type,
em_list_itr);
if (em_list_itr->status)
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 732b0b9b2e15..cb123fbe30be 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -8,9 +8,11 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_DFLT_VSI_INVAL 0xff
+#define ICE_FLTR_RX BIT(0)
+#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
-#define ICE_INVAL_Q_HANDLE 0xFFFF
/* VSI queue context structure */
struct ice_q_ctx {
@@ -69,9 +71,6 @@ struct ice_fltr_info {
/* rule ID returned by firmware once filter rule is created */
u16 fltr_rule_id;
u16 flag;
-#define ICE_FLTR_RX BIT(0)
-#define ICE_FLTR_TX BIT(1)
-#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
/* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
u16 src;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 2364eaf33d23..3c83230434b6 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -55,7 +55,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
if (!tx_ring->tx_buf)
return;
- /* Free all the Tx ring sk_bufss */
+ /* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++)
ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
@@ -1101,7 +1101,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
* ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
* @port_info: port_info structure containing the current link speed
* @avg_pkt_size: average size of Tx or Rx packets based on clean routine
- * @itr: itr value to update
+ * @itr: ITR value to update
*
* Calculate how big of an increment should be applied to the ITR value passed
* in based on wmem_default, SKB overhead, Ethernet overhead, and the current
@@ -1316,7 +1316,7 @@ clear_counts:
*/
static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
{
- /* The itr value is reported in microseconds, and the register value is
+ /* The ITR value is reported in microseconds, and the register value is
* recorded in 2 microsecond units. For this reason we only need to
* shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
* granularity as a shift instead of division. The mask makes sure the
@@ -1645,7 +1645,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
return;
dma_error:
- /* clear dma mappings for failed tx_buf map */
+ /* clear DMA mappings for failed tx_buf map */
for (;;) {
tx_buf = &tx_ring->tx_buf[i];
ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
@@ -1874,10 +1874,10 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
cd_mss = skb_shinfo(skb)->gso_size;
/* record cdesc_qw1 with TSO parameters */
- off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX |
- (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
- (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
- (cd_mss << ICE_TXD_CTX_QW1_MSS_S);
+ off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
+ (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
+ (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
first->tx_flags |= ICE_TX_FLAGS_TSO;
return 1;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 66e05032ee56..ec76aba347b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -58,19 +58,19 @@ struct ice_tx_buf {
unsigned int bytecount;
unsigned short gso_segs;
u32 tx_flags;
- DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
+ DEFINE_DMA_UNMAP_ADDR(dma);
};
struct ice_tx_offload_params {
- u8 header_len;
+ u64 cd_qw1;
+ struct ice_ring *tx_ring;
u32 td_cmd;
u32 td_offset;
u32 td_l2tag1;
- u16 cd_l2tag2;
u32 cd_tunnel_params;
- u64 cd_qw1;
- struct ice_ring *tx_ring;
+ u16 cd_l2tag2;
+ u8 header_len;
};
struct ice_rx_buf {
@@ -150,6 +150,7 @@ enum ice_rx_dtype {
/* descriptor ring, associated with a VSI */
struct ice_ring {
+ /* CL1 - 1st cacheline starts here */
struct ice_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
@@ -161,11 +162,11 @@ struct ice_ring {
struct ice_tx_buf *tx_buf;
struct ice_rx_buf *rx_buf;
};
+ /* CL2 - 2nd cacheline starts here */
u16 q_index; /* Queue number of ring */
- u32 txq_teid; /* Added Tx queue TEID */
-#ifdef CONFIG_DCB
- u8 dcb_tc; /* Traffic class of ring */
-#endif /* CONFIG_DCB */
+ u16 q_handle; /* Queue handle per TC */
+
+ u8 ring_active:1; /* is ring online or not */
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -173,8 +174,7 @@ struct ice_ring {
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
-
- u8 ring_active; /* is ring online or not */
+ u16 next_to_alloc;
/* stats structs */
struct ice_q_stats stats;
@@ -184,10 +184,17 @@ struct ice_ring {
struct ice_rxq_stats rx_stats;
};
- unsigned int size; /* length of descriptor ring in bytes */
- dma_addr_t dma; /* physical address of ring */
struct rcu_head rcu; /* to avoid race on free */
- u16 next_to_alloc;
+ /* CLX - the below items are only accessed infrequently and should be
+ * in their own cache line if possible
+ */
+ dma_addr_t dma; /* physical address of ring */
+ unsigned int size; /* length of descriptor ring in bytes */
+ u32 txq_teid; /* Added Tx queue TEID */
+ u16 rx_buf_len;
+#ifdef CONFIG_DCB
+ u8 dcb_tc; /* Traffic class of ring */
+#endif /* CONFIG_DCB */
} ____cacheline_internodealigned_in_smp;
struct ice_ring_container {
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a862af4cbf78..24bbef8bbe69 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -23,6 +23,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
+#define ICE_DBG_FW_LOG BIT_ULL(3)
#define ICE_DBG_LINK BIT_ULL(4)
#define ICE_DBG_PHY BIT_ULL(5)
#define ICE_DBG_QCTX BIT_ULL(6)
@@ -61,6 +62,13 @@ enum ice_fc_mode {
ICE_FC_DFLT
};
+enum ice_fec_mode {
+ ICE_FEC_NONE = 0,
+ ICE_FEC_RS,
+ ICE_FEC_BASER,
+ ICE_FEC_AUTO
+};
+
enum ice_set_fc_aq_failures {
ICE_SET_FC_AQ_FAIL_NONE = 0,
ICE_SET_FC_AQ_FAIL_GET,
@@ -86,12 +94,14 @@ enum ice_media_type {
enum ice_vsi_type {
ICE_VSI_PF = 0,
ICE_VSI_VF,
+ ICE_VSI_LB = 6,
};
struct ice_link_status {
/* Refer to ice_aq_phy_type for bits definition */
u64 phy_type_low;
u64 phy_type_high;
+ u8 topo_media_conflict;
u16 max_frame_size;
u16 link_speed;
u16 req_speeds;
@@ -99,6 +109,7 @@ struct ice_link_status {
u8 link_info;
u8 an_info;
u8 ext_info;
+ u8 fec_info;
u8 pacing;
/* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
* ice_aqc_get_phy_caps structure
@@ -423,7 +434,7 @@ struct ice_hw {
struct ice_fw_log_cfg fw_log;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
- * register. Used for determining the itr/intrl granularity during
+ * register. Used for determining the ITR/intrl granularity during
* initialization.
*/
#define ICE_MAX_AGG_BW_200G 0x0
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index a805cbdd69be..5d24b539648f 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -103,7 +103,7 @@ ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
u16 link_speed;
if (link_up)
- link_speed = ICE_AQ_LINK_SPEED_40GB;
+ link_speed = ICE_AQ_LINK_SPEED_100GB;
else
link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
@@ -141,32 +141,20 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
}
/**
- * ice_get_vf_vector - get VF interrupt vector register offset
- * @vf_msix: number of MSIx vector per VF on a PF
- * @vf_id: VF identifier
- * @i: index of MSIx vector
- */
-static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
-{
- return ((i == 0) ? VFINT_DYN_CTLN(vf_id) :
- VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1)));
-}
-
-/**
* ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info
*/
static void ice_free_vf_res(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
- int i, pf_vf_msix;
+ int i, last_vector_idx;
/* First, disable VF's configuration API to prevent OS from
* accessing the VF's VSI after it's freed or invalidated.
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
- /* free vsi & disconnect it from the parent uplink */
+ /* free VSI and disconnect it from the parent uplink */
if (vf->lan_vsi_idx) {
ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
vf->lan_vsi_idx = 0;
@@ -174,13 +162,10 @@ static void ice_free_vf_res(struct ice_vf *vf)
vf->num_mac = 0;
}
- pf_vf_msix = pf->num_vf_msix;
+ last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
/* Disable interrupts so that VF starts in a known state */
- for (i = 0; i < pf_vf_msix; i++) {
- u32 reg_idx;
-
- reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i);
- wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M);
+ for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
+ wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
ice_flush(&pf->hw);
}
/* reset some of the state variables keeping track of the resources */
@@ -205,8 +190,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
- first = vf->first_vector_idx +
- hw->func_caps.common_cap.msix_vector_first_id;
+ first = vf->first_vector_idx;
last = first + pf->num_vf_msix - 1;
for (v = first; v <= last; v++) {
u32 reg;
@@ -232,6 +216,42 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
}
/**
+ * ice_sriov_free_msix_res - Reset/free any used MSIX resources
+ * @pf: pointer to the PF structure
+ *
+ * If MSIX entries from the pf->irq_tracker were needed then we need to
+ * reset the irq_tracker->end and give back the entries we needed to
+ * num_avail_sw_msix.
+ *
+ * If no MSIX entries were taken from the pf->irq_tracker then just clear
+ * the pf->sriov_base_vector.
+ *
+ * Returns 0 on success, and -EINVAL on error.
+ */
+static int ice_sriov_free_msix_res(struct ice_pf *pf)
+{
+ struct ice_res_tracker *res;
+
+ if (!pf)
+ return -EINVAL;
+
+ res = pf->irq_tracker;
+ if (!res)
+ return -EINVAL;
+
+ /* give back irq_tracker resources used */
+ if (pf->sriov_base_vector < res->num_entries) {
+ res->end = res->num_entries;
+ pf->num_avail_sw_msix +=
+ res->num_entries - pf->sriov_base_vector;
+ }
+
+ pf->sriov_base_vector = 0;
+
+ return 0;
+}
+
+/**
* ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure
*/
@@ -246,15 +266,6 @@ void ice_free_vfs(struct ice_pf *pf)
while (test_and_set_bit(__ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
-
/* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
struct ice_vsi *vsi;
@@ -270,6 +281,15 @@ void ice_free_vfs(struct ice_pf *pf)
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
}
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
tmp = pf->num_alloc_vfs;
pf->num_vf_qps = 0;
pf->num_alloc_vfs = 0;
@@ -288,6 +308,10 @@ void ice_free_vfs(struct ice_pf *pf)
}
}
+ if (ice_sriov_free_msix_res(pf))
+ dev_err(&pf->pdev->dev,
+ "Failed to free MSIX resources used by SR-IOV\n");
+
devm_kfree(&pf->pdev->dev, pf->vf);
pf->vf = NULL;
@@ -457,6 +481,22 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
}
/**
+ * ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF that the first MSIX vector index is being calculated for
+ *
+ * This returns the first MSIX vector index in HW that is used by this VF and
+ * this will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration will have to increment by
+ * 1 to avoid meddling with the OICR index.
+ */
+static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return pf->hw.func_caps.common_cap.msix_vector_first_id +
+ pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
+}
+
+/**
* ice_alloc_vsi_res - Setup VF VSI and its resources
* @vf: pointer to the VF structure
*
@@ -470,8 +510,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
struct ice_vsi *vsi;
int status = 0;
- vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
+ /* first vector index is the VFs OICR index */
+ vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+ vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
if (!vsi) {
dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
return -ENOMEM;
@@ -480,14 +522,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_num = vsi->vsi_num;
- /* first vector index is the VFs OICR index */
- vf->first_vector_idx = vsi->hw_base_vector;
- /* Since hw_base_vector holds the vector where data queue interrupts
- * starts, increment by 1 since VFs allocated vectors include OICR intr
- * as well.
- */
- vsi->hw_base_vector += 1;
-
/* Check if port VLAN exist before, and restore it accordingly */
if (vf->port_vlan_id) {
ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
@@ -580,8 +614,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
- first = vf->first_vector_idx +
- hw->func_caps.common_cap.msix_vector_first_id;
+ first = vf->first_vector_idx;
last = (first + pf->num_vf_msix) - 1;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
@@ -687,6 +720,97 @@ ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
}
/**
+ * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
+ * @vf: VF to calculate the register index for
+ * @q_vector: a q_vector associated to the VF
+ */
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf;
+
+ if (!vf || !q_vector)
+ return -EINVAL;
+
+ pf = vf->pf;
+
+ /* always add one to account for the OICR being the first MSIX */
+ return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
+ q_vector->v_idx + 1;
+}
+
+/**
+ * ice_get_max_valid_res_idx - Get the max valid resource index
+ * @res: pointer to the resource to find the max valid index for
+ *
+ * Start from the end of the ice_res_tracker and return right when we find the
+ * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
+ * valid for SR-IOV because it is the only consumer that manipulates the
+ * res->end and this is always called when res->end is set to res->num_entries.
+ */
+static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
+{
+ int i;
+
+ if (!res)
+ return -EINVAL;
+
+ for (i = res->num_entries - 1; i >= 0; i--)
+ if (res->list[i] & ICE_RES_VALID_BIT)
+ return i;
+
+ return 0;
+}
+
+/**
+ * ice_sriov_set_msix_res - Set any used MSIX resources
+ * @pf: pointer to PF structure
+ * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
+ *
+ * This function allows SR-IOV resources to be taken from the end of the PF's
+ * allowed HW MSIX vectors so in many cases the irq_tracker will not
+ * be needed. In these cases we just set the pf->sriov_base_vector and return
+ * success.
+ *
+ * If SR-IOV needs to use any pf->irq_tracker entries it updates the
+ * irq_tracker->end based on the first entry needed for SR-IOV. This makes it
+ * so any calls to ice_get_res() using the irq_tracker will not try to use
+ * resources at or beyond the newly set value.
+ *
+ * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
+ * in the PF's space available for SR-IOV.
+ */
+static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
+{
+ int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
+ u16 pf_total_msix_vectors =
+ pf->hw.func_caps.common_cap.num_msix_vectors;
+ struct ice_res_tracker *res = pf->irq_tracker;
+ int sriov_base_vector;
+
+ if (max_valid_res_idx < 0)
+ return max_valid_res_idx;
+
+ sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
+
+ /* make sure we only grab irq_tracker entries from the list end and
+ * that we have enough available MSIX vectors
+ */
+ if (sriov_base_vector <= max_valid_res_idx)
+ return -EINVAL;
+
+ pf->sriov_base_vector = sriov_base_vector;
+
+ /* dip into irq_tracker entries and update used resources */
+ if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
+ pf->num_avail_sw_msix -=
+ res->num_entries - pf->sriov_base_vector;
+ res->end = pf->sriov_base_vector;
+ }
+
+ return 0;
+}
+
+/**
* ice_check_avail_res - check if vectors and queues are available
* @pf: pointer to the PF structure
*
@@ -696,11 +820,16 @@ ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
*/
static int ice_check_avail_res(struct ice_pf *pf)
{
- u16 num_msix, num_txq, num_rxq;
+ int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
+ u16 num_msix, num_txq, num_rxq, num_avail_msix;
- if (!pf->num_alloc_vfs)
+ if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
return -EINVAL;
+ /* add 1 to max_valid_res_idx to account for it being 0-based */
+ num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
+ (max_valid_res_idx + 1);
+
/* Grab from HW interrupts common pool
* Note: By the time the user decides it needs more vectors in a VF
* its already too late since one must decide this prior to creating the
@@ -717,11 +846,11 @@ static int ice_check_avail_res(struct ice_pf *pf)
* grab default interrupt vectors (5 as supported by AVF driver).
*/
if (pf->num_alloc_vfs <= 16) {
- num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ num_msix = ice_determine_res(pf, num_avail_msix,
ICE_MAX_INTR_PER_VF,
ICE_MIN_INTR_PER_VF);
} else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
- num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ num_msix = ice_determine_res(pf, num_avail_msix,
ICE_DFLT_INTR_PER_VF,
ICE_MIN_INTR_PER_VF);
} else {
@@ -750,6 +879,9 @@ static int ice_check_avail_res(struct ice_pf *pf)
if (!num_txq || !num_rxq)
return -EIO;
+ if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
+ return -EINVAL;
+
/* since AVF driver works with only queue pairs which means, it expects
* to have equal number of Rx and Tx queues, so take the minimum of
* available Tx or Rx queues
@@ -938,6 +1070,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
vf->num_vf_qs = 0;
}
+ if (ice_sriov_free_msix_res(pf))
+ dev_err(&pf->pdev->dev,
+ "Failed to free MSIX resources used by SR-IOV\n");
+
if (ice_check_avail_res(pf)) {
dev_err(&pf->pdev->dev,
"Cannot allocate VF resources, try with fewer number of VFs\n");
@@ -1119,7 +1255,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
int i, ret;
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
ice_flush(hw);
@@ -1134,7 +1270,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
GFP_KERNEL);
if (!vfs) {
ret = -ENOMEM;
- goto err_unroll_sriov;
+ goto err_pci_disable_sriov;
}
pf->vf = vfs;
@@ -1154,12 +1290,19 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated during reset */
- if (!ice_reset_all_vfs(pf, true))
+ if (!ice_reset_all_vfs(pf, true)) {
+ ret = -EIO;
goto err_unroll_sriov;
+ }
goto err_unroll_intr;
err_unroll_sriov:
+ pf->vf = NULL;
+ devm_kfree(&pf->pdev->dev, vfs);
+ vfs = NULL;
+ pf->num_alloc_vfs = 0;
+err_pci_disable_sriov:
pci_disable_sriov(pf->pdev);
err_unroll_intr:
/* rearm interrupts here */
@@ -1168,8 +1311,8 @@ err_unroll_intr:
}
/**
- * ice_pf_state_is_nominal - checks the pf for nominal state
- * @pf: pointer to pf to check
+ * ice_pf_state_is_nominal - checks the PF for nominal state
+ * @pf: pointer to PF to check
*
* Check the PF's state for a collection of bits that would indicate
* the PF is in a state that would inhibit normal operation for
@@ -1496,7 +1639,7 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
/**
* ice_find_vsi_from_id
- * @pf: the pf structure to search for the VSI
+ * @pf: the PF structure to search for the VSI
* @id: ID of the VSI it is searching for
*
* searches for the VSI with the given ID
@@ -1807,28 +1950,37 @@ error_param:
static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_irq_map_info *irqmap_info =
- (struct virtchnl_irq_map_info *)msg;
+ struct virtchnl_irq_map_info *irqmap_info;
u16 vsi_id, vsi_q_id, vector_id;
struct virtchnl_vector_map *map;
- struct ice_vsi *vsi = NULL;
struct ice_pf *pf = vf->pf;
+ u16 num_q_vectors_mapped;
+ struct ice_vsi *vsi;
unsigned long qmap;
- u16 num_q_vectors;
int i;
- num_q_vectors = irqmap_info->num_vectors - ICE_NONQ_VECS_VF;
+ irqmap_info = (struct virtchnl_irq_map_info *)msg;
+ num_q_vectors_mapped = irqmap_info->num_vectors;
+
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ /* Check to make sure number of VF vectors mapped is not greater than
+ * number of VF vectors originally allocated, and check that
+ * there is actually at least a single VF queue vector mapped
+ */
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- !vsi || vsi->num_q_vectors < num_q_vectors ||
- irqmap_info->num_vectors == 0) {
+ pf->num_vf_msix < num_q_vectors_mapped ||
+ !irqmap_info->num_vectors) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- for (i = 0; i < num_q_vectors; i++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[i];
+ for (i = 0; i < num_q_vectors_mapped; i++) {
+ struct ice_q_vector *q_vector;
map = &irqmap_info->vecmap[i];
@@ -1836,7 +1988,21 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
vsi_id = map->vsi_id;
/* validate msg params */
if (!(vector_id < pf->hw.func_caps.common_cap
- .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
+ .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (!vector_id && (map->rxq_map || map->txq_map))) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* No need to map VF miscellaneous or rogue vector */
+ if (!vector_id)
+ continue;
+
+ /* Subtract non queue vector from vector_id passed by VF
+ * to get actual number of VSI queue vector array index
+ */
+ q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
+ if (!q_vector) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1852,6 +2018,8 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
q_vector->num_ring_rx++;
q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->rx.itr_idx);
}
qmap = map->txq_map;
@@ -1864,11 +2032,11 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
q_vector->num_ring_tx++;
q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->tx.itr_idx);
}
}
- if (vsi)
- ice_vsi_cfg_msix(vsi);
error_param:
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
@@ -1903,9 +2071,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi) {
+ if (!vsi)
goto error_param;
- }
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 3725aea16840..c3ca522c245a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -49,29 +49,34 @@ struct ice_vf {
struct ice_pf *pf;
s16 vf_id; /* VF ID in the PF space */
- u32 driver_caps; /* reported by VF driver */
+ u16 lan_vsi_idx; /* index into PF struct */
int first_vector_idx; /* first vector index of this VF */
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
+ u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr;
u16 port_vlan_id;
- u8 pf_set_mac; /* VF MAC address set by VMM admin */
- u8 trusted;
- u16 lan_vsi_idx; /* index into PF struct */
+ u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
+ u8 trusted:1;
+ u8 spoofchk:1;
+ u8 link_forced:1;
+ u8 link_up:1; /* only valid if VF link is forced */
+ /* VSI indices - actual VSI pointers are maintained in the PF structure
+ * When assigned, these will be non-zero, because VSI 0 is always
+ * the main LAN VSI for the PF.
+ */
u16 lan_vsi_num; /* ID as used by firmware */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+
u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */
unsigned long vf_caps; /* VF's adv. capabilities */
- DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
- unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
- u8 link_forced;
- u8 link_up; /* only valid if VF link is forced */
- u8 spoofchk;
+ u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */
- u8 num_req_qs; /* num of queue pairs requested by VF */
};
#ifdef CONFIG_PCI_IOV
@@ -96,6 +101,8 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -161,5 +168,11 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
+static inline int
+ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
+ struct ice_q_vector __always_unused *q_vector)
+{
+ return 0;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index bafdcf70a353..3ec2ce0725d5 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -638,7 +638,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
dev_spec->sgmii_active = true;
break;
}
- /* fall through for I2C based SGMII */
+ /* fall through - for I2C based SGMII */
case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
/* read media type from SFP EEPROM */
ret_val = igb_set_sfp_media_type_82575(hw);
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 0ad737d2f289..9cb49980ec2d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -409,6 +409,8 @@ do { \
#define E1000_I210_TQAVCC(_n) (0x3004 + ((_n) * 0x40))
#define E1000_I210_TQAVHC(_n) (0x300C + ((_n) * 0x40))
+#define E1000_I210_RR2DCDELAY 0x5BF4
+
#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c645d9e648e0..3182b059bf55 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -448,7 +448,7 @@ static void igb_set_msglevel(struct net_device *netdev, u32 data)
static int igb_get_regs_len(struct net_device *netdev)
{
-#define IGB_REGS_LEN 739
+#define IGB_REGS_LEN 740
return IGB_REGS_LEN * sizeof(u32);
}
@@ -675,41 +675,44 @@ static void igb_get_regs(struct net_device *netdev,
regs_buff[554] = adapter->stats.b2ogprc;
}
- if (hw->mac.type != e1000_82576)
- return;
- for (i = 0; i < 12; i++)
- regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4));
- for (i = 0; i < 4; i++)
- regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[607 + i] = rd32(E1000_RDH(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[619 + i] = rd32(E1000_RDT(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4));
-
- for (i = 0; i < 12; i++)
- regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[679 + i] = rd32(E1000_TDH(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[691 + i] = rd32(E1000_TDT(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4));
+ if (hw->mac.type == e1000_82576) {
+ for (i = 0; i < 12; i++)
+ regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4));
+ for (i = 0; i < 4; i++)
+ regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[607 + i] = rd32(E1000_RDH(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[619 + i] = rd32(E1000_RDT(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4));
+
+ for (i = 0; i < 12; i++)
+ regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[679 + i] = rd32(E1000_TDH(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[691 + i] = rd32(E1000_TDT(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4));
+ for (i = 0; i < 12; i++)
+ regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4));
+ }
+
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211)
+ regs_buff[739] = rd32(E1000_I210_RR2DCDELAY);
}
static int igb_get_eeprom_len(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 39f33afc479c..b4df3e319467 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -753,6 +753,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg)
struct net_device *netdev = igb->netdev;
hw->hw_addr = NULL;
netdev_err(netdev, "PCIe link lost\n");
+ WARN(1, "igb: Failed to read reg 0x%x!\n", reg);
}
return value;
@@ -2577,11 +2578,11 @@ static int igb_offload_cbs(struct igb_adapter *adapter,
#define VLAN_PRIO_FULL_MASK (0x07)
static int igb_parse_cls_flower(struct igb_adapter *adapter,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
int traffic_class,
struct igb_nfc_filter *input)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = f->common.extack;
@@ -2659,7 +2660,7 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
}
static int igb_configure_clsflower(struct igb_adapter *adapter,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
struct netlink_ext_ack *extack = cls_flower->common.extack;
struct igb_nfc_filter *filter, *f;
@@ -2721,7 +2722,7 @@ err_parse:
}
static int igb_delete_clsflower(struct igb_adapter *adapter,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
struct igb_nfc_filter *filter;
int err;
@@ -2751,14 +2752,14 @@ out:
}
static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
- struct tc_cls_flower_offload *cls_flower)
+ struct flow_cls_offload *cls_flower)
{
switch (cls_flower->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return igb_configure_clsflower(adapter, cls_flower);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return igb_delete_clsflower(adapter, cls_flower);
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
@@ -2782,25 +2783,6 @@ static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-static int igb_setup_tc_block(struct igb_adapter *adapter,
- struct tc_block_offload *f)
-{
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
- adapter, adapter, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
- adapter);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int igb_offload_txtime(struct igb_adapter *adapter,
struct tc_etf_qopt_offload *qopt)
{
@@ -2824,6 +2806,8 @@ static int igb_offload_txtime(struct igb_adapter *adapter,
return 0;
}
+static LIST_HEAD(igb_block_cb_list);
+
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -2833,7 +2817,11 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
case TC_SETUP_QDISC_CBS:
return igb_offload_cbs(adapter, type_data);
case TC_SETUP_BLOCK:
- return igb_setup_tc_block(adapter, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &igb_block_cb_list,
+ igb_setup_tc_block_cb,
+ adapter, adapter, true);
+
case TC_SETUP_QDISC_ETF:
return igb_offload_txtime(adapter, type_data);
@@ -5687,6 +5675,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
*/
if (tx_ring->launchtime_enable) {
ts = ns_to_timespec64(first->skb->tstamp);
+ first->skb->tstamp = 0;
context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
} else {
context_desc->seqnum_seed = 0;
@@ -6695,7 +6684,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
igb_setup_dca(adapter);
break;
}
- /* Fall Through since DCA is disabled. */
+ /* Fall Through - since DCA is disabled. */
case DCA_PROVIDER_REMOVE:
if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
/* without this a class_device is left
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 51a8b8769c67..59258d791106 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -10,50 +10,6 @@
#include "igc.h"
/**
- * igc_set_pcie_completion_timeout - set pci-e completion timeout
- * @hw: pointer to the HW structure
- */
-static s32 igc_set_pcie_completion_timeout(struct igc_hw *hw)
-{
- u32 gcr = rd32(IGC_GCR);
- u16 pcie_devctl2;
- s32 ret_val = 0;
-
- /* only take action if timeout value is defaulted to 0 */
- if (gcr & IGC_GCR_CMPL_TMOUT_MASK)
- goto out;
-
- /* if capabilities version is type 1 we can write the
- * timeout of 10ms to 200ms through the GCR register
- */
- if (!(gcr & IGC_GCR_CAP_VER2)) {
- gcr |= IGC_GCR_CMPL_TMOUT_10ms;
- goto out;
- }
-
- /* for version 2 capabilities we need to write the config space
- * directly in order to set the completion timeout value for
- * 16ms to 55ms
- */
- ret_val = igc_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
- if (ret_val)
- goto out;
-
- pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
-
- ret_val = igc_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
-out:
- /* disable completion timeout resend */
- gcr &= ~IGC_GCR_CMPL_TMOUT_RESEND;
-
- wr32(IGC_GCR, gcr);
-
- return ret_val;
-}
-
-/**
* igc_reset_hw_base - Reset hardware
* @hw: pointer to the HW structure
*
@@ -72,11 +28,6 @@ static s32 igc_reset_hw_base(struct igc_hw *hw)
if (ret_val)
hw_dbg("PCI-E Master disable polling has failed.\n");
- /* set the completion timeout for interface */
- ret_val = igc_set_pcie_completion_timeout(hw);
- if (ret_val)
- hw_dbg("PCI-E Set completion timeout has failed.\n");
-
hw_dbg("Masking off all interrupts\n");
wr32(IGC_IMC, 0xffffffff);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index a9a30268de59..fc0ccfe38a20 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -5,8 +5,8 @@
#define _IGC_DEFINES_H_
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define REQ_TX_DESCRIPTOR_MULTIPLE 8
-#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
@@ -29,12 +29,6 @@
/* Status of Master requests. */
#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000
-/* PCI Express Control */
-#define IGC_GCR_CMPL_TMOUT_MASK 0x0000F000
-#define IGC_GCR_CMPL_TMOUT_10ms 0x00001000
-#define IGC_GCR_CMPL_TMOUT_RESEND 0x00010000
-#define IGC_GCR_CAP_VER2 0x00040000
-
/* Receive Address
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
@@ -72,6 +66,9 @@
#define IGC_CONNSW_AUTOSENSE_EN 0x1
+/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
+#define MAX_JUMBO_FRAME_SIZE 0x2600
+
/* PBA constants */
#define IGC_PBA_34K 0x0022
@@ -264,9 +261,6 @@
#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */
-#define IGC_CT_SHIFT 4
-#define IGC_COLLISION_THRESHOLD 15
-
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
@@ -398,7 +392,7 @@
#define IGC_MDIC_ERROR 0x40000000
#define IGC_MDIC_DEST 0x80000000
-#define IGC_N0_QUEUE -1
+#define IGC_N0_QUEUE -1
#define IGC_MAX_MAC_HDR_LEN 127
#define IGC_MAX_NETWORK_HDR_LEN 511
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 7c88b7bd4799..1039a224ac80 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -114,11 +114,8 @@ struct igc_nvm_operations {
struct igc_phy_operations {
s32 (*acquire)(struct igc_hw *hw);
- s32 (*check_polarity)(struct igc_hw *hw);
s32 (*check_reset_block)(struct igc_hw *hw);
s32 (*force_speed_duplex)(struct igc_hw *hw);
- s32 (*get_cfg_done)(struct igc_hw *hw);
- s32 (*get_cable_length)(struct igc_hw *hw);
s32 (*get_phy_info)(struct igc_hw *hw);
s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data);
void (*release)(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index f7683d3ae47c..ba4646737288 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -8,7 +8,6 @@
#include "igc_hw.h"
/* forward declaration */
-static s32 igc_set_default_fc(struct igc_hw *hw);
static s32 igc_set_fc_watermarks(struct igc_hw *hw);
/**
@@ -96,13 +95,10 @@ s32 igc_setup_link(struct igc_hw *hw)
goto out;
/* If requested flow control is set to default, set flow control
- * based on the EEPROM flow control settings.
+ * to the both 'rx' and 'tx' pause frames.
*/
- if (hw->fc.requested_mode == igc_fc_default) {
- ret_val = igc_set_default_fc(hw);
- if (ret_val)
- goto out;
- }
+ if (hw->fc.requested_mode == igc_fc_default)
+ hw->fc.requested_mode = igc_fc_full;
/* We want to save off the original Flow Control configuration just
* in case we get disconnected and then reconnected into a different
@@ -136,19 +132,6 @@ out:
}
/**
- * igc_set_default_fc - Set flow control default values
- * @hw: pointer to the HW structure
- *
- * Read the EEPROM for the default values for flow control and store the
- * values.
- */
-static s32 igc_set_default_fc(struct igc_hw *hw)
-{
- hw->fc.requested_mode = igc_fc_full;
- return 0;
-}
-
-/**
* igc_force_mac_fc - Force the MAC's flow control settings
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 34fa0e60a780..93f3b4e6185b 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -72,6 +72,27 @@ void igc_reset(struct igc_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
+ struct igc_fc_info *fc = &hw->fc;
+ u32 pba, hwm;
+
+ /* Repartition PBA for greater than 9k MTU if required */
+ pba = IGC_PBA_34K;
+
+ /* flow control settings
+ * The high water mark must be low enough to fit one full frame
+ * after transmitting the pause frame. As such we must have enough
+ * space to allow for us to complete our current transmit and then
+ * receive the frame that is in progress from the link partner.
+ * Set it to:
+ * - the full Rx FIFO size minus one full Tx plus one full Rx frame
+ */
+ hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
+
+ fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
+ fc->low_water = fc->high_water - 16;
+ fc->pause_time = 0xFFFF;
+ fc->send_xon = 1;
+ fc->current_mode = fc->requested_mode;
hw->mac.ops.reset_hw(hw);
@@ -3934,6 +3955,7 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
hw->hw_addr = NULL;
netif_device_detach(netdev);
netdev_err(netdev, "PCIe link lost, device now detached\n");
+ WARN(1, "igc: Failed to read reg 0x%x!\n", reg);
}
return value;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 08d85e336bd4..39e73ad60352 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -50,8 +50,6 @@
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
-#define IXGBE_ETH_P_LLDP 0x88CC
-
/* flow control */
#define IXGBE_MIN_FCRTL 0x40
#define IXGBE_MAX_FCRTL 0x7FF80
@@ -635,6 +633,7 @@ struct ixgbe_adapter {
/* XDP */
int num_xdp_queues;
struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
@@ -774,11 +773,6 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_IPSEC
struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_IXGBE_IPSEC */
-
- /* AF_XDP zero-copy */
- struct xdp_umem **xsk_umems;
- u16 num_xsk_umems_used;
- u16 num_xsk_umems;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -1039,4 +1033,10 @@ static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
u32 *mbuf, u32 vf) { return -EACCES; }
#endif /* CONFIG_IXGBE_IPSEC */
+
+static inline bool ixgbe_enabled_xdp_adapter(struct ixgbe_adapter *adapter)
+{
+ return !!adapter->xdp_prog;
+}
+
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index acba067cc15a..7c52ae8ac005 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3226,7 +3226,8 @@ static int ixgbe_get_module_info(struct net_device *dev,
page_swap = true;
}
- if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
+ !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
/* We have a SFP, but it does not support SFF-8472 */
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index ff85ce5791a3..31629fc7e820 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -842,6 +842,9 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
struct ixgbe_ipsec *ipsec = adapter->ipsec;
int i;
+ if (!ipsec)
+ return;
+
/* search rx sa table */
for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) {
if (!ipsec->rx_tbl[i].used)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 57fd9ee6de66..cbaf712d6529 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6288,6 +6288,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
if (ixgbe_init_rss_key(adapter))
return -ENOMEM;
+ adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
+ if (!adapter->af_xdp_zc_qps)
+ return -ENOMEM;
+
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -9603,27 +9607,6 @@ static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-static int ixgbe_setup_tc_block(struct net_device *dev,
- struct tc_block_offload *f)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
- adapter, adapter, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
- adapter);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int ixgbe_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt *mqprio)
{
@@ -9631,12 +9614,19 @@ static int ixgbe_setup_tc_mqprio(struct net_device *dev,
return ixgbe_setup_tc(dev, mqprio->num_tc);
}
+static LIST_HEAD(ixgbe_block_cb_list);
+
static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
switch (type) {
case TC_SETUP_BLOCK:
- return ixgbe_setup_tc_block(dev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &ixgbe_block_cb_list,
+ ixgbe_setup_tc_block_cb,
+ adapter, adapter, true);
case TC_SETUP_QDISC_MQPRIO:
return ixgbe_setup_tc_mqprio(dev, type_data);
default:
@@ -11161,6 +11151,7 @@ err_sw_init:
kfree(adapter->jump_tables[0]);
kfree(adapter->mac_table);
kfree(adapter->rss_key);
+ bitmap_free(adapter->af_xdp_zc_qps);
err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
@@ -11249,6 +11240,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
kfree(adapter->mac_table);
kfree(adapter->rss_key);
+ bitmap_free(adapter->af_xdp_zc_qps);
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 214b01085718..6544c4539c0d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -45,6 +45,7 @@
#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_DDM_IMPLEMENTED 0x40
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index d81a50dc9535..0be13a90ff79 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -72,13 +72,13 @@
#define IXGBE_INCPER_SHIFT_82599 24
#define IXGBE_OVERFLOW_PERIOD (HZ * 30)
-#define IXGBE_PTP_TX_TIMEOUT (HZ * 15)
+#define IXGBE_PTP_TX_TIMEOUT (HZ)
-/* half of a one second clock period, for use with PPS signal. We have to use
- * this instead of something pre-defined like IXGBE_PTP_PPS_HALF_SECOND, in
- * order to force at least 64bits of precision for shifting
+/* We use our own definitions instead of NSEC_PER_SEC because we want to mark
+ * the value as a ULL to force precision when bit shifting.
*/
-#define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL
+#define NS_PER_SEC 1000000000ULL
+#define NS_PER_HALF_SEC 500000000ULL
/* In contrast, the X550 controller has two registers, SYSTIMEH and SYSTIMEL
* which contain measurements of seconds and nanoseconds respectively. This
@@ -141,23 +141,26 @@
#define MAX_TIMADJ 0x7FFFFFFF
/**
- * ixgbe_ptp_setup_sdp_x540
+ * ixgbe_ptp_setup_sdp_X540
* @adapter: private adapter structure
*
* this function enables or disables the clock out feature on SDP0 for
- * the X540 device. It will create a 1second periodic output that can
+ * the X540 device. It will create a 1 second periodic output that can
* be used as the PPS (via an interrupt).
*
- * It calculates when the systime will be on an exact second, and then
- * aligns the start of the PPS signal to that value. The shift is
- * necessary because it can change based on the link speed.
+ * It calculates when the system time will be on an exact second, and then
+ * aligns the start of the PPS signal to that value.
+ *
+ * This works by using the cycle counter shift and mult values in reverse, and
+ * assumes that the values we're shifting will not overflow.
*/
-static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter)
+static void ixgbe_ptp_setup_sdp_X540(struct ixgbe_adapter *adapter)
{
+ struct cyclecounter *cc = &adapter->hw_cc;
struct ixgbe_hw *hw = &adapter->hw;
- int shift = adapter->hw_cc.shift;
u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem;
- u64 ns = 0, clock_edge = 0;
+ u64 ns = 0, clock_edge = 0, clock_period;
+ unsigned long flags;
/* disable the pin first */
IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0);
@@ -177,26 +180,33 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter)
/* enable the Clock Out feature on SDP0, and allow
* interrupts to occur when the pin changes
*/
- tsauxc = IXGBE_TSAUXC_EN_CLK |
- IXGBE_TSAUXC_SYNCLK |
- IXGBE_TSAUXC_SDP0_INT;
+ tsauxc = (IXGBE_TSAUXC_EN_CLK |
+ IXGBE_TSAUXC_SYNCLK |
+ IXGBE_TSAUXC_SDP0_INT);
- /* clock period (or pulse length) */
- clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift);
- clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32);
-
- /* Account for the cyclecounter wrap-around value by
- * using the converted ns value of the current time to
- * check for when the next aligned second would occur.
+ /* Determine the clock time period to use. This assumes that the
+ * cycle counter shift is small enough to avoid overflow.
*/
- clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
- clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
- ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge);
+ clock_period = div_u64((NS_PER_HALF_SEC << cc->shift), cc->mult);
+ clktiml = (u32)(clock_period);
+ clktimh = (u32)(clock_period >> 32);
- div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem);
- clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift);
+ /* Read the current clock time, and save the cycle counter value */
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_read(&adapter->hw_tc);
+ clock_edge = adapter->hw_tc.cycle_last;
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ /* Figure out how many seconds to add in order to round up */
+ div_u64_rem(ns, NS_PER_SEC, &rem);
+
+ /* Figure out how many nanoseconds to add to round the clock edge up
+ * to the next full second
+ */
+ rem = (NS_PER_SEC - rem);
- /* specify the initial clock start time */
+ /* Adjust the clock edge to align with the next full second. */
+ clock_edge += div_u64(((u64)rem << cc->shift), cc->mult);
trgttiml = (u32)clock_edge;
trgttimh = (u32)(clock_edge >> 32);
@@ -212,8 +222,100 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_setup_sdp_X550
+ * @adapter: private adapter structure
+ *
+ * Enable or disable a clock output signal on SDP 0 for X550 hardware.
+ *
+ * Use the target time feature to align the output signal on the next full
+ * second.
+ *
+ * This works by using the cycle counter shift and mult values in reverse, and
+ * assumes that the values we're shifting will not overflow.
+ */
+static void ixgbe_ptp_setup_sdp_X550(struct ixgbe_adapter *adapter)
+{
+ u32 esdp, tsauxc, freqout, trgttiml, trgttimh, rem, tssdp;
+ struct cyclecounter *cc = &adapter->hw_cc;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 ns = 0, clock_edge = 0;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ /* disable the pin first */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0);
+ IXGBE_WRITE_FLUSH(hw);
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
+ return;
+
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* enable the SDP0 pin as output, and connected to the
+ * native function for Timesync (ClockOut)
+ */
+ esdp |= IXGBE_ESDP_SDP0_DIR |
+ IXGBE_ESDP_SDP0_NATIVE;
+
+ /* enable the Clock Out feature on SDP0, and use Target Time 0 to
+ * enable generation of interrupts on the clock change.
+ */
+#define IXGBE_TSAUXC_DIS_TS_CLEAR 0x40000000
+ tsauxc = (IXGBE_TSAUXC_EN_CLK | IXGBE_TSAUXC_ST0 |
+ IXGBE_TSAUXC_EN_TT0 | IXGBE_TSAUXC_SDP0_INT |
+ IXGBE_TSAUXC_DIS_TS_CLEAR);
+
+ tssdp = (IXGBE_TSSDP_TS_SDP0_EN |
+ IXGBE_TSSDP_TS_SDP0_CLK0);
+
+ /* Determine the clock time period to use. This assumes that the
+ * cycle counter shift is small enough to avoid overflowing a 32bit
+ * value.
+ */
+ freqout = div_u64(NS_PER_HALF_SEC << cc->shift, cc->mult);
+
+ /* Read the current clock time, and save the cycle counter value */
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_read(&adapter->hw_tc);
+ clock_edge = adapter->hw_tc.cycle_last;
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ /* Figure out how far past the next second we are */
+ div_u64_rem(ns, NS_PER_SEC, &rem);
+
+ /* Figure out how many nanoseconds to add to round the clock edge up
+ * to the next full second
+ */
+ rem = (NS_PER_SEC - rem);
+
+ /* Adjust the clock edge to align with the next full second. */
+ clock_edge += div_u64(((u64)rem << cc->shift), cc->mult);
+
+ /* X550 hardware stores the time in 32bits of 'billions of cycles' and
+ * 32bits of 'cycles'. There's no guarantee that cycles represents
+ * nanoseconds. However, we can use the math from a timespec64 to
+ * convert into the hardware representation.
+ *
+ * See ixgbe_ptp_read_X550() for more details.
+ */
+ ts = ns_to_timespec64(clock_edge);
+ trgttiml = (u32)ts.tv_nsec;
+ trgttimh = (u32)ts.tv_sec;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FREQOUT0, freqout);
+ IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml);
+ IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh);
+
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_REG(hw, IXGBE_TSSDP, tssdp);
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_read_X550 - read cycle counter value
- * @hw_cc: cyclecounter structure
+ * @cc: cyclecounter structure
*
* This function reads SYSTIME registers. It is called by the cyclecounter
* structure to convert from internal representation into nanoseconds. We need
@@ -221,10 +323,10 @@ static void ixgbe_ptp_setup_sdp_x540(struct ixgbe_adapter *adapter)
* result of SYSTIME is 32bits of "billions of cycles" and 32 bits of
* "cycles", rather than seconds and nanoseconds.
*/
-static u64 ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc)
+static u64 ixgbe_ptp_read_X550(const struct cyclecounter *cc)
{
struct ixgbe_adapter *adapter =
- container_of(hw_cc, struct ixgbe_adapter, hw_cc);
+ container_of(cc, struct ixgbe_adapter, hw_cc);
struct ixgbe_hw *hw = &adapter->hw;
struct timespec64 ts;
@@ -838,6 +940,15 @@ void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector,
ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
+/**
+ * ixgbe_ptp_get_ts_config - get current hardware timestamping configuration
+ * @adapter: pointer to adapter structure
+ * @ifr: ioctl data
+ *
+ * This function returns the current timestamping settings. Rather than
+ * attempt to deconstruct registers to fill in the values, simply keep a copy
+ * of the old settings around, and return a copy when requested.
+ */
int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
{
struct hwtstamp_config *config = &adapter->tstamp_config;
@@ -1253,7 +1364,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
- adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540;
+ adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X540;
break;
case ixgbe_mac_82599EB:
snprintf(adapter->ptp_caps.name,
@@ -1280,13 +1391,13 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_alarm = 0;
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
- adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.pps = 1;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
- adapter->ptp_setup_sdp = NULL;
+ adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X550;
break;
default:
adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 345701af7749..537dfff585e0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1645,7 +1645,7 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
(IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_TX_ANTISPOOF |
- IXGBE_ETH_P_LLDP));
+ ETH_P_LLDP));
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
(IXGBE_ETQF_FILTER_EN |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 84f2dba39e36..2be1c4c72435 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1067,6 +1067,7 @@ struct ixgbe_nvm_version {
#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */
+#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */
/* Diagnostic Registers */
#define IXGBE_RDSTATCTL 0x02C20
@@ -2240,11 +2241,18 @@ enum {
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
-#define IXGBE_TSAUXC_EN_CLK 0x00000004
-#define IXGBE_TSAUXC_SYNCLK 0x00000008
-#define IXGBE_TSAUXC_SDP0_INT 0x00000040
+#define IXGBE_TSAUXC_EN_CLK 0x00000004
+#define IXGBE_TSAUXC_SYNCLK 0x00000008
+#define IXGBE_TSAUXC_SDP0_INT 0x00000040
+#define IXGBE_TSAUXC_EN_TT0 0x00000001
+#define IXGBE_TSAUXC_EN_TT1 0x00000002
+#define IXGBE_TSAUXC_ST0 0x00000010
#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000
+#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0
+#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080
+#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100
+
#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index bfe95ce0bd7f..6b609553329f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -14,57 +14,10 @@ struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
bool xdp_on = READ_ONCE(adapter->xdp_prog);
int qid = ring->ring_idx;
- if (!adapter->xsk_umems || !adapter->xsk_umems[qid] ||
- qid >= adapter->num_xsk_umems || !xdp_on)
+ if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
return NULL;
- return adapter->xsk_umems[qid];
-}
-
-static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter)
-{
- if (adapter->xsk_umems)
- return 0;
-
- adapter->num_xsk_umems_used = 0;
- adapter->num_xsk_umems = adapter->num_rx_queues;
- adapter->xsk_umems = kcalloc(adapter->num_xsk_umems,
- sizeof(*adapter->xsk_umems),
- GFP_KERNEL);
- if (!adapter->xsk_umems) {
- adapter->num_xsk_umems = 0;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter,
- struct xdp_umem *umem,
- u16 qid)
-{
- int err;
-
- err = ixgbe_alloc_xsk_umems(adapter);
- if (err)
- return err;
-
- adapter->xsk_umems[qid] = umem;
- adapter->num_xsk_umems_used++;
-
- return 0;
-}
-
-static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid)
-{
- adapter->xsk_umems[qid] = NULL;
- adapter->num_xsk_umems_used--;
-
- if (adapter->num_xsk_umems == 0) {
- kfree(adapter->xsk_umems);
- adapter->xsk_umems = NULL;
- adapter->num_xsk_umems = 0;
- }
+ return xdp_get_umem_from_qid(adapter->netdev, qid);
}
static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
@@ -113,6 +66,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
struct xdp_umem *umem,
u16 qid)
{
+ struct net_device *netdev = adapter->netdev;
struct xdp_umem_fq_reuse *reuseq;
bool if_running;
int err;
@@ -120,12 +74,9 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
if (qid >= adapter->num_rx_queues)
return -EINVAL;
- if (adapter->xsk_umems) {
- if (qid >= adapter->num_xsk_umems)
- return -EINVAL;
- if (adapter->xsk_umems[qid])
- return -EBUSY;
- }
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
if (!reuseq)
@@ -138,14 +89,12 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
return err;
if_running = netif_running(adapter->netdev) &&
- READ_ONCE(adapter->xdp_prog);
+ ixgbe_enabled_xdp_adapter(adapter);
if (if_running)
ixgbe_txrx_ring_disable(adapter, qid);
- err = ixgbe_add_xsk_umem(adapter, umem, qid);
- if (err)
- return err;
+ set_bit(qid, adapter->af_xdp_zc_qps);
if (if_running) {
ixgbe_txrx_ring_enable(adapter, qid);
@@ -161,20 +110,21 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
{
+ struct xdp_umem *umem;
bool if_running;
- if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems ||
- !adapter->xsk_umems[qid])
+ umem = xdp_get_umem_from_qid(adapter->netdev, qid);
+ if (!umem)
return -EINVAL;
if_running = netif_running(adapter->netdev) &&
- READ_ONCE(adapter->xdp_prog);
+ ixgbe_enabled_xdp_adapter(adapter);
if (if_running)
ixgbe_txrx_ring_disable(adapter, qid);
- ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]);
- ixgbe_remove_xsk_umem(adapter, qid);
+ clear_bit(qid, adapter->af_xdp_zc_qps);
+ ixgbe_xsk_umem_dma_unmap(adapter, umem);
if (if_running)
ixgbe_txrx_ring_enable(adapter, qid);
@@ -621,8 +571,9 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbe_tx_buffer *tx_bi;
bool work_done = true;
- u32 len, cmd_type;
+ struct xdp_desc desc;
dma_addr_t dma;
+ u32 cmd_type;
while (budget-- > 0) {
if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
@@ -631,15 +582,18 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
break;
}
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
+ if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break;
- dma_sync_single_for_device(xdp_ring->dev, dma, len,
+ dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
+
+ dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
DMA_BIDIRECTIONAL);
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
- tx_bi->bytecount = len;
+ tx_bi->bytecount = desc.len;
tx_bi->xdpf = NULL;
+ tx_bi->gso_segs = 1;
tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
@@ -648,10 +602,10 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
cmd_type = IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_DEXT |
IXGBE_ADVTXD_DCMD_IFCS;
- cmd_type |= len | IXGBE_TXD_CMD;
+ cmd_type |= desc.len | IXGBE_TXD_CMD;
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
tx_desc->read.olinfo_status =
- cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count)
@@ -704,7 +658,6 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
xsk_frames++;
tx_bi->xdpf = NULL;
- total_bytes += tx_bi->bytecount;
tx_bi++;
tx_desc++;
@@ -753,7 +706,7 @@ int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
if (qid >= adapter->num_xdp_queues)
return -ENXIO;
- if (!adapter->xsk_umems || !adapter->xsk_umems[qid])
+ if (!adapter->xdp_ring[qid]->xsk_umem)
return -ENXIO;
ring = adapter->xdp_ring[qid];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 5399787e07af..54459b69c948 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -85,22 +85,16 @@ static int ixgbevf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- u32 link_speed = 0;
- bool link_up;
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
cmd->base.autoneg = AUTONEG_DISABLE;
cmd->base.port = -1;
- hw->mac.get_link_status = 1;
- hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
-
- if (link_up) {
+ if (adapter->link_up) {
__u32 speed = SPEED_10000;
- switch (link_speed) {
+ switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
speed = SPEED_10000;
break;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d189ed247665..d2b41f9f87f8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1423,6 +1423,9 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
*/
/* what was last interrupt timeslice? */
timepassed_us = q_vector->itr >> 2;
+ if (timepassed_us == 0)
+ return;
+
bytes_perint = bytes / timepassed_us; /* bytes/usec */
switch (itr_setting) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index cd3b81300cc7..d5ce49636548 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -508,9 +508,8 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
- ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
-
- return 0;
+ return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ IXGBE_VFMAILBOX_SIZE);
}
/**
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index c5dac6bd2be4..f660cc2b8258 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -64,7 +64,7 @@
struct orion_mdio_dev {
void __iomem *regs;
- struct clk *clk[3];
+ struct clk *clk[4];
/*
* If we have access to the error interrupt pin (which is
* somewhat misnamed as it not only reflects internal errors
@@ -321,11 +321,19 @@ static int orion_mdio_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
+ if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk;
+ }
if (IS_ERR(dev->clk[i]))
break;
clk_prepare_enable(dev->clk[i]);
}
+ if (!IS_ERR(of_clk_get(pdev->dev.of_node, ARRAY_SIZE(dev->clk))))
+ dev_warn(&pdev->dev, "unsupported number of clocks, limiting to the first "
+ __stringify(ARRAY_SIZE(dev->clk)) "\n");
+
dev->err_interrupt = platform_get_irq(pdev, 0);
if (dev->err_interrupt > 0 &&
resource_size(r) < MVMDIO_ERR_INT_MASK + 4) {
@@ -362,6 +370,7 @@ out_mdio:
if (dev->err_interrupt > 0)
writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
+out_clk:
for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
if (IS_ERR(dev->clk[i]))
break;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 269bd73be1a0..895bfed26a8a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -437,6 +437,7 @@ struct mvneta_port {
struct device_node *dn;
unsigned int tx_csum_limit;
struct phylink *phylink;
+ struct phylink_config phylink_config;
struct phy *comphy;
struct mvneta_bm *bm_priv;
@@ -1118,7 +1119,7 @@ static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
/* Fill entire long pool */
- num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
+ num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
if (num != hwbm_pool->size) {
WARN(1, "pool %d: %d of %d allocated\n",
bm_pool->id, num, hwbm_pool->size);
@@ -3356,9 +3357,11 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
return 0;
}
-static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
+static void mvneta_validate(struct phylink_config *config,
+ unsigned long *supported,
struct phylink_link_state *state)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
@@ -3408,9 +3411,10 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
phylink_helper_basex_speed(state);
}
-static int mvneta_mac_link_state(struct net_device *ndev,
+static int mvneta_mac_link_state(struct phylink_config *config,
struct phylink_link_state *state)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
u32 gmac_stat;
@@ -3438,8 +3442,9 @@ static int mvneta_mac_link_state(struct net_device *ndev,
return 1;
}
-static void mvneta_mac_an_restart(struct net_device *ndev)
+static void mvneta_mac_an_restart(struct phylink_config *config)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
@@ -3449,9 +3454,10 @@ static void mvneta_mac_an_restart(struct net_device *ndev)
gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
}
-static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
- const struct phylink_link_state *state)
+static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
@@ -3581,9 +3587,10 @@ static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
}
-static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode,
- phy_interface_t interface)
+static void mvneta_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
u32 val;
@@ -3600,10 +3607,11 @@ static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode,
mvneta_set_eee(pp, false);
}
-static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
+static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
phy_interface_t interface,
struct phy_device *phy)
{
+ struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
u32 val;
@@ -4500,8 +4508,14 @@ static int mvneta_probe(struct platform_device *pdev)
comphy = NULL;
}
- phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
- &mvneta_phylink_ops);
+ pp = netdev_priv(dev);
+ spin_lock_init(&pp->lock);
+
+ pp->phylink_config.dev = &dev->dev;
+ pp->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
+ phy_mode, &mvneta_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
goto err_free_irq;
@@ -4513,8 +4527,6 @@ static int mvneta_probe(struct platform_device *pdev)
dev->ethtool_ops = &mvneta_eth_tool_ops;
- pp = netdev_priv(dev);
- spin_lock_init(&pp->lock);
pp->phylink = phylink;
pp->comphy = comphy;
pp->phy_interface = phy_mode;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index de468e1bdba9..82ee2bcca6fd 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -190,7 +190,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
hwbm_pool->construct = mvneta_bm_construct;
hwbm_pool->priv = new_pool;
- spin_lock_init(&hwbm_pool->lock);
+ mutex_init(&hwbm_pool->buf_lock);
/* Create new pool */
err = mvneta_bm_pool_create(priv, new_pool);
@@ -201,7 +201,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
}
/* Allocate buffers for this pool */
- num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
+ num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
if (num != hwbm_pool->size) {
WARN(1, "pool %d: %d of %d allocated\n",
new_pool->id, num, hwbm_pool->size);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 6171270a016c..4d9564ba68f6 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -148,6 +148,8 @@
#define MVPP22_CLS_C2_ATTR2 0x1b6c
#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30)
#define MVPP22_CLS_C2_ATTR3 0x1b70
+#define MVPP22_CLS_C2_TCAM_CTRL 0x1b90
+#define MVPP22_CLS_C2_TCAM_BYPASS_FIFO BIT(0)
/* Descriptor Manager Top Registers */
#define MVPP2_RXQ_NUM_REG 0x2040
@@ -327,8 +329,26 @@
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
+/* Packet Processor per-port counters */
+#define MVPP2_OVERRUN_ETH_DROP 0x7000
+#define MVPP2_CLS_ETH_DROP 0x7020
+
/* Hit counters registers */
#define MVPP2_CTRS_IDX 0x7040
+#define MVPP22_CTRS_TX_CTR(port, txq) ((txq) | ((port) << 3) | BIT(7))
+#define MVPP2_TX_DESC_ENQ_CTR 0x7100
+#define MVPP2_TX_DESC_ENQ_TO_DDR_CTR 0x7104
+#define MVPP2_TX_BUFF_ENQ_TO_DDR_CTR 0x7108
+#define MVPP2_TX_DESC_ENQ_HW_FWD_CTR 0x710c
+#define MVPP2_RX_DESC_ENQ_CTR 0x7120
+#define MVPP2_TX_PKTS_DEQ_CTR 0x7130
+#define MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR 0x7200
+#define MVPP2_TX_PKTS_EARLY_DROP_CTR 0x7204
+#define MVPP2_TX_PKTS_BM_DROP_CTR 0x7208
+#define MVPP2_TX_PKTS_BM_MC_DROP_CTR 0x720c
+#define MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR 0x7220
+#define MVPP2_RX_PKTS_EARLY_DROP_CTR 0x7224
+#define MVPP2_RX_PKTS_BM_DROP_CTR 0x7228
#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700
#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704
@@ -624,6 +644,7 @@
#define MVPP2_N_RFS_RULES (MVPP2_N_RFS_ENTRIES_PER_FLOW * 7)
/* RSS constants */
+#define MVPP22_N_RSS_TABLES 8
#define MVPP22_RSS_TABLE_ENTRIES 32
/* IPv6 max L3 address size */
@@ -725,6 +746,10 @@ enum mvpp2_prs_l3_cast {
/* Definitions */
struct mvpp2_dbgfs_entries;
+struct mvpp2_rss_table {
+ u32 indir[MVPP22_RSS_TABLE_ENTRIES];
+};
+
/* Shared Packet Processor resources */
struct mvpp2 {
/* Shared registers' base addresses */
@@ -788,6 +813,9 @@ struct mvpp2 {
/* Debugfs entries private data */
struct mvpp2_dbgfs_entries *dbgfs_entries;
+
+ /* RSS Indirection tables */
+ struct mvpp2_rss_table *rss_tables[MVPP22_N_RSS_TABLES];
};
struct mvpp2_pcpu_stats {
@@ -905,6 +933,7 @@ struct mvpp2_port {
phy_interface_t phy_interface;
struct phylink *phylink;
+ struct phylink_config phylink_config;
struct phy *comphy;
struct mvpp2_bm_pool *pool_long;
@@ -919,12 +948,14 @@ struct mvpp2_port {
u32 tx_time_coal;
- /* RSS indirection table */
- u32 indir[MVPP22_RSS_TABLE_ENTRIES];
-
/* List of steering rules active on that port */
- struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_RULES];
+ struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_ENTRIES_PER_FLOW];
int n_rfs_rules;
+
+ /* Each port has its own view of the rss contexts, so that it can number
+ * them from 0
+ */
+ int rss_ctx[MVPP22_N_RSS_TABLES];
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index a57d17ab91f0..35478cba2aa5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -44,17 +44,17 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* TCP over IPv4 flows, Not fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
@@ -79,17 +79,17 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* TCP over IPv4 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
@@ -114,17 +114,17 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* UDP over IPv4 flows, Not fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
@@ -149,17 +149,17 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* UDP over IPv4 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
@@ -178,12 +178,12 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* TCP over IPv6 flows, not fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
@@ -202,13 +202,13 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* TCP over IPv6 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
@@ -228,12 +228,12 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* UDP over IPv6 flows, not fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
@@ -252,13 +252,13 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* UDP over IPv6 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
@@ -279,15 +279,15 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* IPv4 flows, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP4_OTHER,
MVPP2_PRS_RI_L3_PROTO_MASK),
@@ -303,11 +303,11 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* IPv6 flows, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK),
@@ -548,6 +548,8 @@ void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
static int mvpp2_cls_ethtool_flow_to_type(int flow_type)
{
switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case ETHER_FLOW:
+ return MVPP22_FLOW_ETHERNET;
case TCP_V4_FLOW:
return MVPP22_FLOW_TCP4;
case TCP_V6_FLOW:
@@ -596,7 +598,7 @@ static void mvpp2_cls_flow_init(struct mvpp2 *priv,
mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
mvpp2_cls_flow_port_id_sel(&fe, true);
- mvpp2_cls_flow_lu_type_set(&fe, MVPP22_FLOW_ETHERNET);
+ mvpp2_cls_flow_lu_type_set(&fe, MVPP22_CLS_LU_TYPE_ALL);
/* Add all ports */
for (i = 0; i < MVPP2_MAX_PORTS; i++)
@@ -655,6 +657,9 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
case MVPP22_CLS_HEK_OPT_VLAN:
field_id = MVPP22_CLS_FIELD_VLAN;
break;
+ case MVPP22_CLS_HEK_OPT_VLAN_PRI:
+ field_id = MVPP22_CLS_FIELD_VLAN_PRI;
+ break;
case MVPP22_CLS_HEK_OPT_IP4SA:
field_id = MVPP22_CLS_FIELD_IP4SA;
break;
@@ -689,6 +694,10 @@ static int mvpp2_cls_hek_field_size(u32 field)
switch (field) {
case MVPP22_CLS_HEK_OPT_MAC_DA:
return 48;
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ return 12;
+ case MVPP22_CLS_HEK_OPT_VLAN_PRI:
+ return 3;
case MVPP22_CLS_HEK_OPT_IP4SA:
case MVPP22_CLS_HEK_OPT_IP4DA:
return 32;
@@ -777,6 +786,9 @@ u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
case MVPP22_CLS_FIELD_VLAN:
hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
break;
+ case MVPP22_CLS_FIELD_VLAN_PRI:
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
+ break;
case MVPP22_CLS_FIELD_L3_PROTO:
hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
break;
@@ -861,7 +873,7 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
/* Match on Lookup Type */
c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
- c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_FLOW_ETHERNET);
+ c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL);
/* Update RSS status after matching this entry */
c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
@@ -923,6 +935,12 @@ void mvpp2_cls_init(struct mvpp2 *priv)
mvpp2_cls_c2_write(priv, &c2);
}
+ /* Disable the FIFO stages in C2 engine, which are only used in BIST
+ * mode
+ */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL,
+ MVPP22_CLS_C2_TCAM_BYPASS_FIFO);
+
mvpp2_cls_port_init_flows(priv);
}
@@ -963,12 +981,22 @@ u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
}
-static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
{
struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql;
mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+ /* The RxQ number is used to select the RSS table. It that case, we set
+ * it to be the ctx number.
+ */
+ qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
mvpp2_cls_c2_write(port->priv, &c2);
@@ -977,22 +1005,45 @@ static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
{
struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql;
mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+ /* Reset the default destination RxQ to the port's first rx queue. */
+ qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
mvpp2_cls_c2_write(port->priv, &c2);
}
-void mvpp22_port_rss_enable(struct mvpp2_port *port)
+static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx)
+{
+ return port->rss_ctx[port_rss_ctx];
+}
+
+int mvpp22_port_rss_enable(struct mvpp2_port *port)
{
- mvpp2_rss_port_c2_enable(port);
+ if (mvpp22_rss_ctx(port, 0) < 0)
+ return -EINVAL;
+
+ mvpp2_rss_port_c2_enable(port, mvpp22_rss_ctx(port, 0));
+
+ return 0;
}
-void mvpp22_port_rss_disable(struct mvpp2_port *port)
+int mvpp22_port_rss_disable(struct mvpp2_port *port)
{
+ if (mvpp22_rss_ctx(port, 0) < 0)
+ return -EINVAL;
+
mvpp2_rss_port_c2_disable(port);
+
+ return 0;
}
static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
@@ -1029,7 +1080,7 @@ static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
struct flow_action_entry *act;
struct mvpp2_cls_c2_entry c2;
u8 qh, ql, pmap;
- int index;
+ int index, ctx;
memset(&c2, 0, sizeof(c2));
@@ -1042,13 +1093,13 @@ static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
rule->c2_index = c2.index;
- c2.tcam[0] = (rule->c2_tcam & 0xffff) |
+ c2.tcam[3] = (rule->c2_tcam & 0xffff) |
((rule->c2_tcam_mask & 0xffff) << 16);
- c2.tcam[1] = ((rule->c2_tcam >> 16) & 0xffff) |
+ c2.tcam[2] = ((rule->c2_tcam >> 16) & 0xffff) |
(((rule->c2_tcam_mask >> 16) & 0xffff) << 16);
- c2.tcam[2] = ((rule->c2_tcam >> 32) & 0xffff) |
+ c2.tcam[1] = ((rule->c2_tcam >> 32) & 0xffff) |
(((rule->c2_tcam_mask >> 32) & 0xffff) << 16);
- c2.tcam[3] = ((rule->c2_tcam >> 48) & 0xffff) |
+ c2.tcam[0] = ((rule->c2_tcam >> 48) & 0xffff) |
(((rule->c2_tcam_mask >> 48) & 0xffff) << 16);
pmap = BIT(port->id);
@@ -1069,14 +1120,36 @@ static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
*/
c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
+ /* Update RSS status after matching this entry */
+ if (act->queue.ctx)
+ c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ /* Always lock the RSS_EN decision. We might have high prio
+ * rules steering to an RXQ, and a lower one steering to RSS,
+ * we don't want the low prio RSS rule overwriting this flag.
+ */
+ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
+
/* Mark packet as "forwarded to software", needed for RSS */
c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
- qh = ((act->queue.index + port->first_rxq) >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
- ql = (act->queue.index + port->first_rxq) & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+ if (act->queue.ctx) {
+ /* Get the global ctx number */
+ ctx = mvpp22_rss_ctx(port, act->queue.ctx);
+ if (ctx < 0)
+ return -EINVAL;
+
+ qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+ } else {
+ qh = ((act->queue.index + port->first_rxq) >> 3) &
+ MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = (act->queue.index + port->first_rxq) &
+ MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+ }
c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
MVPP22_CLS_C2_ATTR0_QLOW(ql);
@@ -1140,6 +1213,9 @@ static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
if (!flow)
return 0;
+ if ((rule->hek_fields & flow->supported_hash_opts) != rule->hek_fields)
+ continue;
+
index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
mvpp2_cls_flow_read(priv, index, &fe);
@@ -1158,7 +1234,44 @@ static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
{
struct flow_rule *flow = rule->flow;
- int offs = 64;
+ int offs = 0;
+
+ /* The order of insertion in C2 tcam must match the order in which
+ * the fields are found in the header
+ */
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(flow, &match);
+ if (match.mask->vlan_id) {
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN;
+
+ rule->c2_tcam |= ((u64)match.key->vlan_id) << offs;
+ rule->c2_tcam_mask |= ((u64)match.mask->vlan_id) << offs;
+
+ /* Don't update the offset yet */
+ }
+
+ if (match.mask->vlan_priority) {
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
+
+ /* VLAN pri is always at offset 13 relative to the
+ * current offset
+ */
+ rule->c2_tcam |= ((u64)match.key->vlan_priority) <<
+ (offs + 13);
+ rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) <<
+ (offs + 13);
+ }
+
+ if (match.mask->vlan_dei)
+ return -EOPNOTSUPP;
+
+ /* vlan id and prio always seem to take a full 16-bit slot in
+ * the Header Extracted Key.
+ */
+ offs += 16;
+ }
if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
@@ -1166,18 +1279,18 @@ static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
flow_rule_match_ports(flow, &match);
if (match.mask->src) {
rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
- offs -= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs;
rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs;
+ offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
}
if (match.mask->dst) {
rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP;
- offs -= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs;
rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs;
+ offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
}
}
@@ -1196,6 +1309,13 @@ static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
return -EOPNOTSUPP;
+ /* When both an RSS context and an queue index are set, the index
+ * is considered as an offset to be added to the indirection table
+ * entries. We don't support this, so reject this rule.
+ */
+ if (act->queue.ctx && act->queue.index)
+ return -EOPNOTSUPP;
+
/* For now, only use the C2 engine which has a HEK size limited to 64
* bits for TCAM matching.
*/
@@ -1212,7 +1332,7 @@ int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
{
struct mvpp2_ethtool_fs *efs;
- if (rxnfc->fs.location >= MVPP2_N_RFS_RULES)
+ if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
return -EINVAL;
efs = port->rfs_rules[rxnfc->fs.location];
@@ -1232,8 +1352,7 @@ int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
struct mvpp2_ethtool_fs *efs, *old_efs;
int ret = 0;
- if (info->fs.location >= 4 ||
- info->fs.location < 0)
+ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
return -EINVAL;
efs = kzalloc(sizeof(*efs), GFP_KERNEL);
@@ -1242,6 +1361,12 @@ int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
input.fs = &info->fs;
+ /* We need to manually set the rss_ctx, since this info isn't present
+ * in info->fs
+ */
+ if (info->fs.flow_type & FLOW_RSS)
+ input.rss_ctx = info->rss_context;
+
ethtool_rule = ethtool_rx_flow_rule_create(&input);
if (IS_ERR(ethtool_rule)) {
ret = PTR_ERR(ethtool_rule);
@@ -1250,6 +1375,10 @@ int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
efs->rule.flow = ethtool_rule->rule;
efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
+ if (efs->rule.flow_type < 0) {
+ ret = efs->rule.flow_type;
+ goto clean_rule;
+ }
ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
if (ret)
@@ -1328,19 +1457,160 @@ static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
}
-void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
+static void mvpp22_rss_fill_table(struct mvpp2_port *port,
+ struct mvpp2_rss_table *table,
+ u32 rss_ctx)
{
struct mvpp2 *priv = port->priv;
int i;
for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
- u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
+ u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) |
MVPP22_RSS_INDEX_TABLE_ENTRY(i);
mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
- mvpp22_rxfh_indir(port, port->indir[i]));
+ mvpp22_rxfh_indir(port, table->indir[i]));
+ }
+}
+
+static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 ctx;
+
+ /* Find the first free RSS table */
+ for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) {
+ if (!priv->rss_tables[ctx])
+ break;
+ }
+
+ if (ctx == MVPP22_N_RSS_TABLES)
+ return -EINVAL;
+
+ priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]),
+ GFP_KERNEL);
+ if (!priv->rss_tables[ctx])
+ return -ENOMEM;
+
+ *rss_ctx = ctx;
+
+ /* Set the table width: replace the whole classifier Rx queue number
+ * with the ones configured in RSS table entries.
+ */
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx));
+ mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
+
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx));
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx));
+
+ return 0;
+}
+
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
+{
+ u32 rss_ctx;
+ int ret, i;
+
+ ret = mvpp22_rss_context_create(port, &rss_ctx);
+ if (ret)
+ return ret;
+
+ /* Find the first available context number in the port, starting from 1.
+ * Context 0 on each port is reserved for the default context.
+ */
+ for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
+ if (port->rss_ctx[i] < 0)
+ break;
+ }
+
+ if (i == MVPP22_N_RSS_TABLES)
+ return -EINVAL;
+
+ port->rss_ctx[i] = rss_ctx;
+ *port_ctx = i;
+
+ return 0;
+}
+
+static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv,
+ int rss_ctx)
+{
+ if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
+ return NULL;
+
+ return priv->rss_tables[rss_ctx];
+}
+
+int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx)
+{
+ struct mvpp2 *priv = port->priv;
+ struct ethtool_rxnfc *rxnfc;
+ int i, rss_ctx, ret;
+
+ rss_ctx = mvpp22_rss_ctx(port, port_ctx);
+
+ if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
+ return -EINVAL;
+
+ /* Invalidate any active classification rule that use this context */
+ for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
+ if (!port->rfs_rules[i])
+ continue;
+
+ rxnfc = &port->rfs_rules[i]->rxnfc;
+ if (!(rxnfc->fs.flow_type & FLOW_RSS) ||
+ rxnfc->rss_context != port_ctx)
+ continue;
+
+ ret = mvpp2_ethtool_cls_rule_del(port, rxnfc);
+ if (ret) {
+ netdev_warn(port->dev,
+ "couldn't remove classification rule %d associated to this context",
+ rxnfc->fs.location);
+ }
}
+
+ kfree(priv->rss_tables[rss_ctx]);
+
+ priv->rss_tables[rss_ctx] = NULL;
+ port->rss_ctx[port_ctx] = -1;
+
+ return 0;
+}
+
+int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx,
+ const u32 *indir)
+{
+ int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
+ struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
+ rss_ctx);
+
+ if (!rss_table)
+ return -EINVAL;
+
+ memcpy(rss_table->indir, indir,
+ MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
+
+ mvpp22_rss_fill_table(port, rss_table, rss_ctx);
+
+ return 0;
+}
+
+int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
+ u32 *indir)
+{
+ int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
+ struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
+ rss_ctx);
+
+ if (!rss_table)
+ return -EINVAL;
+
+ memcpy(indir, rss_table->indir,
+ MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
+
+ return 0;
}
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
@@ -1424,32 +1694,32 @@ int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
return 0;
}
-void mvpp22_port_rss_init(struct mvpp2_port *port)
+int mvpp22_port_rss_init(struct mvpp2_port *port)
{
- struct mvpp2 *priv = port->priv;
- int i;
+ struct mvpp2_rss_table *table;
+ u32 context = 0;
+ int i, ret;
- /* Set the table width: replace the whole classifier Rx queue number
- * with the ones configured in RSS table entries.
- */
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
- mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
+ for (i = 0; i < MVPP22_N_RSS_TABLES; i++)
+ port->rss_ctx[i] = -1;
- /* The default RxQ is used as a key to select the RSS table to use.
- * We use one RSS table per port.
- */
- mvpp2_write(priv, MVPP22_RSS_INDEX,
- MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
- mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
- MVPP22_RSS_TABLE_POINTER(port->id));
+ ret = mvpp22_rss_context_create(port, &context);
+ if (ret)
+ return ret;
+
+ table = mvpp22_rss_table_get(port->priv, context);
+ if (!table)
+ return -EINVAL;
+
+ port->rss_ctx[0] = context;
/* Configure the first table to evenly distribute the packets across
* real Rx Queues. The table entries map a hash to a port Rx Queue.
*/
for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
- port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
+ table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
- mvpp22_rss_fill_table(port, port->id);
+ mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0));
/* Configure default flows */
mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
@@ -1458,4 +1728,6 @@ void mvpp22_port_rss_init(struct mvpp2_port *port)
mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 56b617375a65..8867f25afab4 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -33,15 +33,16 @@ enum mvpp2_cls_engine {
};
#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0)
-#define MVPP22_CLS_HEK_OPT_VLAN BIT(1)
-#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(2)
-#define MVPP22_CLS_HEK_OPT_IP4SA BIT(3)
-#define MVPP22_CLS_HEK_OPT_IP4DA BIT(4)
-#define MVPP22_CLS_HEK_OPT_IP6SA BIT(5)
-#define MVPP22_CLS_HEK_OPT_IP6DA BIT(6)
-#define MVPP22_CLS_HEK_OPT_L4SIP BIT(7)
-#define MVPP22_CLS_HEK_OPT_L4DIP BIT(8)
-#define MVPP22_CLS_HEK_N_FIELDS 9
+#define MVPP22_CLS_HEK_OPT_VLAN_PRI BIT(1)
+#define MVPP22_CLS_HEK_OPT_VLAN BIT(2)
+#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(3)
+#define MVPP22_CLS_HEK_OPT_IP4SA BIT(4)
+#define MVPP22_CLS_HEK_OPT_IP4DA BIT(5)
+#define MVPP22_CLS_HEK_OPT_IP6SA BIT(6)
+#define MVPP22_CLS_HEK_OPT_IP6DA BIT(7)
+#define MVPP22_CLS_HEK_OPT_L4SIP BIT(8)
+#define MVPP22_CLS_HEK_OPT_L4DIP BIT(9)
+#define MVPP22_CLS_HEK_N_FIELDS 10
#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \
MVPP22_CLS_HEK_OPT_L4DIP)
@@ -59,8 +60,12 @@ enum mvpp2_cls_engine {
#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \
MVPP22_CLS_HEK_L4_OPTS)
+#define MVPP22_CLS_HEK_TAGGED (MVPP22_CLS_HEK_OPT_VLAN | \
+ MVPP22_CLS_HEK_OPT_VLAN_PRI)
+
enum mvpp2_cls_field_id {
MVPP22_CLS_FIELD_MAC_DA = 0x03,
+ MVPP22_CLS_FIELD_VLAN_PRI = 0x05,
MVPP22_CLS_FIELD_VLAN = 0x06,
MVPP22_CLS_FIELD_L3_PROTO = 0x0f,
MVPP22_CLS_FIELD_IP4SA = 0x10,
@@ -180,6 +185,11 @@ enum mvpp2_prs_flow {
/* LU Type defined for all engines, and specified in the flow table */
#define MVPP2_CLS_LU_TYPE_MASK 0x3f
+enum mvpp2_cls_lu_type {
+ /* rule->loc is used as a lu-type for the entries 0 - 62. */
+ MVPP22_CLS_LU_TYPE_ALL = 63,
+};
+
#define MVPP2_N_FLOWS (MVPP2_FL_LAST - MVPP2_FL_START)
struct mvpp2_cls_flow {
@@ -249,11 +259,18 @@ struct mvpp2_cls_lookup_entry {
u32 data;
};
-void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
-void mvpp22_port_rss_init(struct mvpp2_port *port);
+int mvpp22_port_rss_init(struct mvpp2_port *port);
+
+int mvpp22_port_rss_enable(struct mvpp2_port *port);
+int mvpp22_port_rss_disable(struct mvpp2_port *port);
+
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx);
+int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx);
-void mvpp22_port_rss_enable(struct mvpp2_port *port);
-void mvpp22_port_rss_disable(struct mvpp2_port *port);
+int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
+ const u32 *indir);
+int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx,
+ u32 *indir);
int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index d8e5241097a9..c51f1d5b550b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -56,9 +56,9 @@ static struct {
/* The prototype is added here to be used in start_dev when using ACPI. This
* will be removed once phylink is used for all modes (dt+ACPI).
*/
-static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
-static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
phy_interface_t interface, struct phy_device *phy);
/* Queue modes */
@@ -1258,6 +1258,17 @@ static u64 mvpp2_read_count(struct mvpp2_port *port,
return val;
}
+/* Some counters are accessed indirectly by first writing an index to
+ * MVPP2_CTRS_IDX. The index can represent various resources depending on the
+ * register we access, it can be a hit counter for some classification tables,
+ * a counter specific to a rxq, a txq or a buffer pool.
+ */
+static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+ return mvpp2_read(priv, reg);
+}
+
/* Due to the fact that software statistics and hardware statistics are, by
* design, incremented at different moments in the chain of packet processing,
* it is very likely that incoming packets could have been dropped after being
@@ -1267,7 +1278,7 @@ static u64 mvpp2_read_count(struct mvpp2_port *port,
* Hence, statistics gathered from userspace with ifconfig (software) and
* ethtool (hardware) cannot be compared.
*/
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
{ MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
{ MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
{ MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
@@ -1297,31 +1308,114 @@ static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
{ MVPP2_MIB_LATE_COLLISION, "late_collision" },
};
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
+ { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
+ { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
+};
+
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
+ { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
+ { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
+ { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
+ { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
+ { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
+ { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
+ { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
+ { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
+ { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
+};
+
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
+ { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
+ { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
+ { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
+ { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
+};
+
+#define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
+ ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
+ (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
+ (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)))
+
static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
- if (sset == ETH_SS_STATS) {
- int i;
+ struct mvpp2_port *port = netdev_priv(netdev);
+ int i, q;
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
- strscpy(data + i * ETH_GSTRING_LEN,
- mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
+ strscpy(data, mvpp2_ethtool_mib_regs[i].string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
+ strscpy(data, mvpp2_ethtool_port_regs[i].string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (q = 0; q < port->ntxqs; q++) {
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ mvpp2_ethtool_txq_regs[i].string, q);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (q = 0; q < port->nrxqs; q++) {
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ mvpp2_ethtool_rxq_regs[i].string,
+ q);
+ data += ETH_GSTRING_LEN;
+ }
}
}
+static void mvpp2_read_stats(struct mvpp2_port *port)
+{
+ u64 *pstats;
+ int i, q;
+
+ pstats = port->ethtool_stats;
+
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
+ *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
+
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
+ *pstats++ += mvpp2_read(port->priv,
+ mvpp2_ethtool_port_regs[i].offset +
+ 4 * port->id);
+
+ for (q = 0; q < port->ntxqs; q++)
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
+ *pstats++ += mvpp2_read_index(port->priv,
+ MVPP22_CTRS_TX_CTR(port->id, i),
+ mvpp2_ethtool_txq_regs[i].offset);
+
+ /* Rxqs are numbered from 0 from the user standpoint, but not from the
+ * driver's. We need to add the port->first_rxq offset.
+ */
+ for (q = 0; q < port->nrxqs; q++)
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
+ *pstats++ += mvpp2_read_index(port->priv,
+ port->first_rxq + i,
+ mvpp2_ethtool_rxq_regs[i].offset);
+}
+
static void mvpp2_gather_hw_statistics(struct work_struct *work)
{
struct delayed_work *del_work = to_delayed_work(work);
struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
stats_work);
- u64 *pstats;
- int i;
mutex_lock(&port->gather_stats_lock);
- pstats = port->ethtool_stats;
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
- *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
+ mvpp2_read_stats(port);
/* No need to read again the counters right after this function if it
* was called asynchronously by the user (ie. use of ethtool).
@@ -1345,27 +1439,24 @@ static void mvpp2_ethtool_get_stats(struct net_device *dev,
mutex_lock(&port->gather_stats_lock);
memcpy(data, port->ethtool_stats,
- sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
+ sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
mutex_unlock(&port->gather_stats_lock);
}
static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
{
+ struct mvpp2_port *port = netdev_priv(dev);
+
if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(mvpp2_ethtool_regs);
+ return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
return -EOPNOTSUPP;
}
static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
{
- unsigned int i;
u32 val;
- /* Read the GOP statistics to reset the hardware counters */
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
- mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
-
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
MVPP2_GMAC_PORT_RESET_MASK;
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
@@ -3237,9 +3328,9 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
struct phylink_link_state state = {
.interface = port->phy_interface,
};
- mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
- mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
- NULL);
+ mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
+ mvpp2_mac_link_up(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface, NULL);
}
netif_tx_start_all_queues(port->dev);
@@ -3954,7 +4045,7 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
ret = mvpp2_ethtool_cls_rule_get(port, info);
break;
case ETHTOOL_GRXCLSRLALL:
- for (i = 0; i < MVPP2_N_RFS_RULES; i++) {
+ for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
if (port->rfs_rules[i])
rules[loc++] = i;
}
@@ -4000,24 +4091,25 @@ static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
if (!mvpp22_rss_is_supported())
return -EOPNOTSUPP;
if (indir)
- memcpy(indir, port->indir,
- ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+ ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
if (hfunc)
*hfunc = ETH_RSS_HASH_CRC32;
- return 0;
+ return ret;
}
static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
if (!mvpp22_rss_is_supported())
return -EOPNOTSUPP;
@@ -4028,15 +4120,58 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
if (key)
return -EOPNOTSUPP;
- if (indir) {
- memcpy(port->indir, indir,
- ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
- mvpp22_rss_fill_table(port, port->id);
- }
+ if (indir)
+ ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
- return 0;
+ return ret;
+}
+
+static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
+ u8 *key, u8 *hfunc, u32 rss_context)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_CRC32;
+
+ if (indir)
+ ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
+
+ return ret;
}
+static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
+ const u32 *indir, const u8 *key,
+ const u8 hfunc, u32 *rss_context,
+ bool delete)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+ return -EOPNOTSUPP;
+
+ if (key)
+ return -EOPNOTSUPP;
+
+ if (delete)
+ return mvpp22_port_rss_ctx_delete(port, *rss_context);
+
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = mvpp22_port_rss_ctx_create(port, rss_context);
+ if (ret)
+ return ret;
+ }
+
+ return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
+}
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -4073,7 +4208,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
-
+ .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
+ .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -4327,6 +4463,11 @@ static int mvpp2_port_init(struct mvpp2_port *port)
if (err)
goto err_free_percpu;
+ /* Clear all port stats */
+ mvpp2_read_stats(port);
+ memset(port->ethtool_stats, 0,
+ MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
+
return 0;
err_free_percpu:
@@ -4416,11 +4557,12 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
eth_hw_addr_random(dev);
}
-static void mvpp2_phylink_validate(struct net_device *dev,
+static void mvpp2_phylink_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port *port = container_of(config, struct mvpp2_port,
+ phylink_config);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
/* Invalid combinations */
@@ -4544,10 +4686,11 @@ static void mvpp2_gmac_link_state(struct mvpp2_port *port,
state->pause |= MLO_PAUSE_TX;
}
-static int mvpp2_phylink_mac_link_state(struct net_device *dev,
+static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port *port = container_of(config, struct mvpp2_port,
+ phylink_config);
if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
@@ -4563,9 +4706,10 @@ static int mvpp2_phylink_mac_link_state(struct net_device *dev,
return 1;
}
-static void mvpp2_mac_an_restart(struct net_device *dev)
+static void mvpp2_mac_an_restart(struct phylink_config *config)
{
- struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port *port = container_of(config, struct mvpp2_port,
+ phylink_config);
u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
@@ -4750,9 +4894,10 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
}
}
-static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
+ struct net_device *dev = to_net_dev(config->dev);
struct mvpp2_port *port = netdev_priv(dev);
bool change_interface = port->phy_interface != state->interface;
@@ -4792,9 +4937,10 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
mvpp2_port_enable(port);
}
-static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
phy_interface_t interface, struct phy_device *phy)
{
+ struct net_device *dev = to_net_dev(config->dev);
struct mvpp2_port *port = netdev_priv(dev);
u32 val;
@@ -4819,9 +4965,10 @@ static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
netif_tx_wake_all_queues(dev);
}
-static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode,
- phy_interface_t interface)
+static void mvpp2_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
{
+ struct net_device *dev = to_net_dev(config->dev);
struct mvpp2_port *port = netdev_priv(dev);
u32 val;
@@ -5002,7 +5149,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
port->ethtool_stats = devm_kcalloc(&pdev->dev,
- ARRAY_SIZE(mvpp2_ethtool_regs),
+ MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
sizeof(u64), GFP_KERNEL);
if (!port->ethtool_stats) {
err = -ENOMEM;
@@ -5078,8 +5225,11 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* Phylink isn't used w/ ACPI as of now */
if (port_node) {
- phylink = phylink_create(dev, port_fwnode, phy_mode,
- &mvpp2_phylink_ops);
+ port->phylink_config.dev = &dev->dev;
+ port->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&port->phylink_config, port_fwnode,
+ phy_mode, &mvpp2_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
goto err_free_port_pcpu;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index ae2240074d8e..5692c6087bbb 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -312,7 +312,8 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
}
/* Set value */
- pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
+ pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
+ shift & MVPP2_PRS_SRAM_SHIFT_MASK;
/* Reset and set operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index d41a2414c575..2d8362f9341b 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -3,4 +3,5 @@
# Makefile for the Mediatek SoCs built-in ethernet macs
#
-obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth_soc.o
+obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
+mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
new file mode 100644
index 000000000000..7f05880cf9ef
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 MediaTek Inc.
+
+/* A library for configuring path from GMAC/GDM to target PHY
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/phy.h>
+#include <linux/regmap.h>
+
+#include "mtk_eth_soc.h"
+
+struct mtk_eth_muxc {
+ const char *name;
+ int cap_bit;
+ int (*set_path)(struct mtk_eth *eth, int path);
+};
+
+static const char *mtk_eth_path_name(int path)
+{
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_RGMII:
+ return "gmac1_rgmii";
+ case MTK_ETH_PATH_GMAC1_TRGMII:
+ return "gmac1_trgmii";
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ return "gmac1_sgmii";
+ case MTK_ETH_PATH_GMAC2_RGMII:
+ return "gmac2_rgmii";
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ return "gmac2_sgmii";
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ return "gmac2_gephy";
+ case MTK_ETH_PATH_GDM1_ESW:
+ return "gdm1_esw";
+ default:
+ return "unknown path";
+ }
+}
+
+static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
+{
+ bool updated = true;
+ u32 val, mask, set;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ mask = ~(u32)MTK_MUX_TO_ESW;
+ set = 0;
+ break;
+ case MTK_ETH_PATH_GDM1_ESW:
+ mask = ~(u32)MTK_MUX_TO_ESW;
+ set = MTK_MUX_TO_ESW;
+ break;
+ default:
+ updated = false;
+ break;
+ };
+
+ if (updated) {
+ val = mtk_r32(eth, MTK_MAC_MISC);
+ val = (val & mask) | set;
+ mtk_w32(eth, val, MTK_MAC_MISC);
+ }
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ val = ~(u32)GEPHY_MAC_SEL;
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val = CO_QPHY_SEL;
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->infra, INFRA_MISC2, CO_QPHY_SEL, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ val = SYSCFG0_SGMII_GMAC1;
+ break;
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val = SYSCFG0_SGMII_GMAC2;
+ break;
+ case MTK_ETH_PATH_GMAC1_RGMII:
+ case MTK_ETH_PATH_GMAC2_RGMII:
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= SYSCFG0_SGMII_MASK;
+
+ if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) ||
+ (path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2))
+ val = 0;
+ else
+ updated = false;
+ break;
+ default:
+ updated = false;
+ break;
+ };
+
+ if (updated)
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ val |= SYSCFG0_SGMII_GMAC1_V2;
+ break;
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2;
+ break;
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val |= SYSCFG0_SGMII_GMAC2_V2;
+ break;
+ default:
+ updated = false;
+ };
+
+ if (updated)
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static const struct mtk_eth_muxc mtk_eth_muxc[] = {
+ {
+ .name = "mux_gdm1_to_gmac1_esw",
+ .cap_bit = MTK_ETH_MUX_GDM1_TO_GMAC1_ESW,
+ .set_path = set_mux_gdm1_to_gmac1_esw,
+ }, {
+ .name = "mux_gmac2_gmac0_to_gephy",
+ .cap_bit = MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY,
+ .set_path = set_mux_gmac2_gmac0_to_gephy,
+ }, {
+ .name = "mux_u3_gmac2_to_qphy",
+ .cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
+ .set_path = set_mux_u3_gmac2_to_qphy,
+ }, {
+ .name = "mux_gmac1_gmac2_to_sgmii_rgmii",
+ .cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
+ .set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
+ }, {
+ .name = "mux_gmac12_to_gephy_sgmii",
+ .cap_bit = MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII,
+ .set_path = set_mux_gmac12_to_gephy_sgmii,
+ },
+};
+
+static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
+{
+ int i, err = 0;
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, path)) {
+ dev_err(eth->dev, "path %s isn't support on the SoC\n",
+ mtk_eth_path_name(path));
+ return -EINVAL;
+ }
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX))
+ return 0;
+
+ /* Setup MUX in path fabric */
+ for (i = 0; i < ARRAY_SIZE(mtk_eth_muxc); i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, mtk_eth_muxc[i].cap_bit)) {
+ err = mtk_eth_muxc[i].set_path(eth, path);
+ if (err)
+ goto out;
+ } else {
+ dev_dbg(eth->dev, "mux %s isn't present on the SoC\n",
+ mtk_eth_muxc[i].name);
+ }
+ }
+
+out:
+ return err;
+}
+
+static int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ unsigned int val = 0;
+ int sid, err, path;
+
+ path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
+ MTK_ETH_PATH_GMAC2_SGMII;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ /* The path GMAC to SGMII will be enabled once the SGMIISYS is being
+ * setup done.
+ */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, ~(u32)SYSCFG0_SGMII_MASK);
+
+ /* Decide how GMAC and SGMIISYS be mapped */
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? 0 : mac_id;
+
+ /* Setup SGMIISYS with the determined property */
+ if (MTK_HAS_FLAGS(eth->sgmii->flags[sid], MTK_SGMII_PHYSPEED_AN))
+ err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
+ else
+ err = mtk_sgmii_setup_mode_force(eth->sgmii, sid);
+
+ if (err)
+ return err;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ return 0;
+}
+
+static int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ int err, path = 0;
+
+ if (mac_id == 1)
+ path = MTK_ETH_PATH_GMAC2_GEPHY;
+
+ if (!path)
+ return -EINVAL;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ int err, path;
+
+ path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
+ MTK_ETH_PATH_GMAC2_RGMII;
+
+ /* Setup proper MUXes along the path */
+ err = mtk_eth_mux_setup(eth, path);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode)
+{
+ int err;
+
+ switch (phymode) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_RMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
+ err = mtk_gmac_rgmii_path_setup(eth, mac_id);
+ if (err)
+ return err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ err = mtk_gmac_sgmii_path_setup(eth, mac_id);
+ if (err)
+ return err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
+ err = mtk_gmac_gephy_path_setup(eth, mac_id);
+ if (err)
+ return err;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 6cfffb64cd51..b20b3a5a1ebb 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -48,8 +48,10 @@ static const struct mtk_ethtool_stats {
};
static const char * const mtk_clks_source_name[] = {
- "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m",
- "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll"
+ "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
+ "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
+ "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
+ "sgmii_ck", "eth2pll",
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@@ -132,6 +134,31 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
return _mtk_mdio_read(eth, phy_addr, phy_reg);
}
+static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ phy_interface_t interface)
+{
+ u32 val;
+
+ /* Check DDR memory type.
+ * Currently TRGMII mode with DDR2 memory is not supported.
+ */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
+ if (interface == PHY_INTERFACE_MODE_TRGMII &&
+ val & SYSCFG_DRAM_TYPE_DDR2) {
+ dev_err(eth->dev,
+ "TRGMII mode with DDR2 memory is not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
+ ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+ ETHSYS_TRGMII_MT7621_MASK, val);
+
+ return 0;
+}
+
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
{
u32 val;
@@ -159,47 +186,6 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
mtk_w32(eth, val, TRGMII_TCK_CTRL);
}
-static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id)
-{
- u32 val;
-
- /* Setup the link timer and QPHY power up inside SGMIISYS */
- regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER,
- SGMII_LINK_TIMER_DEFAULT);
-
- regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val);
- val |= SGMII_REMOTE_FAULT_DIS;
- regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val);
-
- regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val);
- val |= SGMII_AN_RESTART;
- regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val);
-
- regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
- val &= ~SGMII_PHYA_PWD;
- regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val);
-
- /* Determine MUX for which GMAC uses the SGMII interface */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) {
- regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
- val &= ~SYSCFG0_SGMII_MASK;
- val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2;
- regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
-
- dev_info(eth->dev, "setup shared sgmii for gmac=%d\n",
- mac_id);
- }
-
- /* Setup the GMAC1 going through SGMII path when SoC also support
- * ESW on GMAC1
- */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) &&
- !mac_id) {
- mtk_w32(eth, 0, MTK_MAC_MISC);
- dev_info(eth->dev, "setup gmac1 going through sgmii");
- }
-}
-
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -222,9 +208,17 @@ static void mtk_phy_link_adjust(struct net_device *dev)
break;
}
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
- !mac->id && !mac->trgmii)
- mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && !mac->id) {
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
+ if (mt7621_gmac0_rgmii_adjust(mac->hw,
+ dev->phydev->interface))
+ return;
+ } else {
+ if (!mac->trgmii)
+ mtk_gmac0_rgmii_adjust(mac->hw,
+ dev->phydev->speed);
+ }
+ }
if (dev->phydev->link)
mcr |= MAC_MCR_FORCE_LINK;
@@ -289,6 +283,7 @@ static int mtk_phy_connect(struct net_device *dev)
struct mtk_eth *eth;
struct device_node *np;
u32 val;
+ int err;
eth = mac->hw;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
@@ -298,6 +293,10 @@ static int mtk_phy_connect(struct net_device *dev)
if (!np)
return -ENODEV;
+ err = mtk_setup_hw_path(eth, mac->id, of_get_phy_mode(np));
+ if (err)
+ goto err_phy;
+
mac->ge_mode = 0;
switch (of_get_phy_mode(np)) {
case PHY_INTERFACE_MODE_TRGMII:
@@ -306,12 +305,10 @@ static int mtk_phy_connect(struct net_device *dev)
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
- break;
case PHY_INTERFACE_MODE_SGMII:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII))
- mtk_gmac_sgmii_hw_setup(eth, mac->id);
break;
case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
mac->ge_mode = 1;
break;
case PHY_INTERFACE_MODE_REVMII:
@@ -2477,16 +2474,28 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->ethsys);
}
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
- eth->sgmiisys =
- syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,sgmiisys");
- if (IS_ERR(eth->sgmiisys)) {
- dev_err(&pdev->dev, "no sgmiisys regmap found\n");
- return PTR_ERR(eth->sgmiisys);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
+ eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,infracfg");
+ if (IS_ERR(eth->infra)) {
+ dev_err(&pdev->dev, "no infracfg regmap found\n");
+ return PTR_ERR(eth->infra);
}
}
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
+ GFP_KERNEL);
+ if (!eth->sgmii)
+ return -ENOMEM;
+
+ err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
+ eth->soc->ana_rgc3);
+
+ if (err)
+ return err;
+ }
+
if (eth->soc->required_pctl) {
eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"mediatek,pctl");
@@ -2625,34 +2634,43 @@ static int mtk_remove(struct platform_device *pdev)
}
static const struct mtk_soc_data mt2701_data = {
- .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
+ .caps = MT7623_CAPS | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
static const struct mtk_soc_data mt7621_data = {
- .caps = MTK_SHARED_INT,
+ .caps = MT7621_CAPS,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
};
static const struct mtk_soc_data mt7622_data = {
- .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
+ .ana_rgc3 = 0x2028,
+ .caps = MT7622_CAPS | MTK_HWLRO,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
- .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
+ .caps = MT7623_CAPS | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
+static const struct mtk_soc_data mt7629_data = {
+ .ana_rgc3 = 0x128,
+ .caps = MT7629_CAPS | MTK_HWLRO,
+ .required_clks = MT7629_CLKS_BITMAP,
+ .required_pctl = false,
+};
+
const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
+ { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index baa85d5601e7..c6be599ed94d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -9,6 +9,10 @@
#ifndef MTK_ETH_H
#define MTK_ETH_H
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#define MTK_QDMA_PAGE_SIZE 2048
@@ -359,17 +363,27 @@
#define MT7622_ETH 7622
#define MT7621_ETH 7621
+/* ethernet system control register */
+#define ETHSYS_SYSCFG 0x10
+#define SYSCFG_DRAM_TYPE_DDR2 BIT(4)
+
/* ethernet subsystem config register */
#define ETHSYS_SYSCFG0 0x14
#define SYSCFG0_GE_MASK 0x3
#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
-#define SYSCFG0_SGMII_MASK (3 << 8)
-#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & GENMASK(9, 8))
-#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & GENMASK(9, 8))
+#define SYSCFG0_SGMII_MASK GENMASK(9, 8)
+#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
+#define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
+
/* ethernet subsystem clock register */
#define ETHSYS_CLKCFG0 0x2c
#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
+#define ETHSYS_TRGMII_MT7621_MASK (BIT(5) | BIT(6))
+#define ETHSYS_TRGMII_MT7621_APLL BIT(6)
+#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
/* ethernet reset control register */
#define ETHSYS_RSTCTRL 0x34
@@ -393,6 +407,11 @@
#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
#define SGMII_PHYA_PWD BIT(4)
+/* Infrasys subsystem config registers */
+#define INFRA_MISC2 0x70c
+#define CO_QPHY_SEL BIT(0)
+#define GEPHY_MAC_SEL BIT(1)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -457,15 +476,21 @@ enum mtk_tx_flags {
*/
enum mtk_clks_map {
MTK_CLK_ETHIF,
+ MTK_CLK_SGMIITOP,
MTK_CLK_ESW,
MTK_CLK_GP0,
MTK_CLK_GP1,
MTK_CLK_GP2,
+ MTK_CLK_FE,
MTK_CLK_TRGPLL,
MTK_CLK_SGMII_TX_250M,
MTK_CLK_SGMII_RX_250M,
MTK_CLK_SGMII_CDR_REF,
MTK_CLK_SGMII_CDR_FB,
+ MTK_CLK_SGMII2_TX_250M,
+ MTK_CLK_SGMII2_RX_250M,
+ MTK_CLK_SGMII2_CDR_REF,
+ MTK_CLK_SGMII2_CDR_FB,
MTK_CLK_SGMII_CK,
MTK_CLK_ETH2PLL,
MTK_CLK_MAX
@@ -484,6 +509,19 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL))
#define MT7621_CLKS_BITMAP (0)
+#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
+ BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT(MTK_CLK_SGMII_CK) | \
+ BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
enum mtk_dev_state {
MTK_HW_INIT,
@@ -554,21 +592,120 @@ struct mtk_rx_ring {
u32 crx_idx_reg;
};
-#define MTK_TRGMII BIT(0)
-#define MTK_GMAC1_TRGMII (BIT(1) | MTK_TRGMII)
-#define MTK_ESW BIT(4)
-#define MTK_GMAC1_ESW (BIT(5) | MTK_ESW)
-#define MTK_SGMII BIT(8)
-#define MTK_GMAC1_SGMII (BIT(9) | MTK_SGMII)
-#define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII)
-#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \
- MTK_GMAC2_SGMII)
-#define MTK_HWLRO BIT(12)
-#define MTK_SHARED_INT BIT(13)
+enum mkt_eth_capabilities {
+ MTK_RGMII_BIT = 0,
+ MTK_TRGMII_BIT,
+ MTK_SGMII_BIT,
+ MTK_ESW_BIT,
+ MTK_GEPHY_BIT,
+ MTK_MUX_BIT,
+ MTK_INFRA_BIT,
+ MTK_SHARED_SGMII_BIT,
+ MTK_HWLRO_BIT,
+ MTK_SHARED_INT_BIT,
+ MTK_TRGMII_MT7621_CLK_BIT,
+
+ /* MUX BITS*/
+ MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
+ MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
+ MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
+ MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
+ MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
+
+ /* PATH BITS */
+ MTK_ETH_PATH_GMAC1_RGMII_BIT,
+ MTK_ETH_PATH_GMAC1_TRGMII_BIT,
+ MTK_ETH_PATH_GMAC1_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_RGMII_BIT,
+ MTK_ETH_PATH_GMAC2_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_GEPHY_BIT,
+ MTK_ETH_PATH_GDM1_ESW_BIT,
+};
+
+/* Supported hardware group on SoCs */
+#define MTK_RGMII BIT(MTK_RGMII_BIT)
+#define MTK_TRGMII BIT(MTK_TRGMII_BIT)
+#define MTK_SGMII BIT(MTK_SGMII_BIT)
+#define MTK_ESW BIT(MTK_ESW_BIT)
+#define MTK_GEPHY BIT(MTK_GEPHY_BIT)
+#define MTK_MUX BIT(MTK_MUX_BIT)
+#define MTK_INFRA BIT(MTK_INFRA_BIT)
+#define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT)
+#define MTK_HWLRO BIT(MTK_HWLRO_BIT)
+#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
+#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
+
+#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
+ BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
+#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \
+ BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
+#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
+ BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
+ BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
+#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
+ BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
+
+/* Supported path present on SoCs */
+#define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
+#define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT)
+
+#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
+#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
+#define MTK_GMAC1_SGMII (MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII)
+#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
+#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
+#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
+#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
+
+/* MUXes present on SoCs */
+/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
+#define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX)
+
+/* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */
+#define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \
+ (MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA)
+
+/* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */
+#define MTK_MUX_U3_GMAC2_TO_QPHY \
+ (MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA)
+
+/* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */
+#define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
+ (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
+ MTK_SHARED_SGMII)
+
+/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
+#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
+ (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
+
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
+#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
+ MTK_GMAC2_RGMII | MTK_SHARED_INT | MTK_TRGMII_MT7621_CLK)
+
+#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
+ MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
+ MTK_MUX_GDM1_TO_GMAC1_ESW | \
+ MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII)
+
+#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII)
+
+#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
+ MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
+ MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
+ MTK_MUX_U3_GMAC2_TO_QPHY | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII)
+
/* struct mtk_eth_data - This is the structure holding all differences
* among various plaforms
+ * @ana_rgc3: The offset for register ANA_RGC3 related to
+ * sgmiisys syscon
* @caps Flags shown the extra capability for the SoC
* @required_clks Flags shown the bitmap for required clocks on
* the target SoC
@@ -576,6 +713,7 @@ struct mtk_rx_ring {
* the extra setup for those pins used by GMAC.
*/
struct mtk_soc_data {
+ u32 ana_rgc3;
u32 caps;
u32 required_clks;
bool required_pctl;
@@ -584,6 +722,26 @@ struct mtk_soc_data {
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
+#define MTK_SGMII_PHYSPEED_AN BIT(31)
+#define MTK_SGMII_PHYSPEED_MASK GENMASK(0, 2)
+#define MTK_SGMII_PHYSPEED_1000 BIT(0)
+#define MTK_SGMII_PHYSPEED_2500 BIT(1)
+#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
+
+/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
+ * characteristics
+ * @regmap: The register map pointing at the range used to setup
+ * SGMII modes
+ * @flags: The enum refers to which mode the sgmii wants to run on
+ * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
+ */
+
+struct mtk_sgmii {
+ struct regmap *regmap[MTK_MAX_DEVS];
+ u32 flags[MTK_MAX_DEVS];
+ u32 ana_rgc3;
+};
+
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
@@ -599,8 +757,8 @@ struct mtk_soc_data {
* @msg_enable: Ethtool msg level
* @ethsys: The register map pointing at the range used to setup
* MII modes
- * @sgmiisys: The register map pointing at the range used to setup
- * SGMII modes
+ * @infra: The register map pointing at the range used to setup
+ * SGMII and GePHY path
* @pctl: The register map pointing at the range used to setup
* GMAC port drive/slew values
* @dma_refcnt: track how many netdevs are using the DMA engine
@@ -632,7 +790,8 @@ struct mtk_eth {
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
- struct regmap *sgmiisys;
+ struct regmap *infra;
+ struct mtk_sgmii *sgmii;
struct regmap *pctl;
bool hwlro;
refcount_t dma_refcnt;
@@ -683,4 +842,10 @@ void mtk_stats_update_mac(struct mtk_mac *mac);
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
+ u32 ana_rgc3);
+int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id);
+int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode);
+
#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
new file mode 100644
index 000000000000..136f90ce5a65
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 MediaTek Inc.
+
+/* A library for MediaTek SGMII circuit
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "mtk_eth_soc.h"
+
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
+{
+ struct device_node *np;
+ const char *str;
+ int i, err;
+
+ ss->ana_rgc3 = ana_rgc3;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ np = of_parse_phandle(r, "mediatek,sgmiisys", i);
+ if (!np)
+ break;
+
+ ss->regmap[i] = syscon_node_to_regmap(np);
+ if (IS_ERR(ss->regmap[i]))
+ return PTR_ERR(ss->regmap[i]);
+
+ err = of_property_read_string(np, "mediatek,physpeed", &str);
+ if (err)
+ return err;
+
+ if (!strcmp(str, "2500"))
+ ss->flags[i] |= MTK_SGMII_PHYSPEED_2500;
+ else if (!strcmp(str, "1000"))
+ ss->flags[i] |= MTK_SGMII_PHYSPEED_1000;
+ else if (!strcmp(str, "auto"))
+ ss->flags[i] |= MTK_SGMII_PHYSPEED_AN;
+ else
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
+{
+ unsigned int val;
+
+ if (!ss->regmap[id])
+ return -EINVAL;
+
+ /* Setup the link timer and QPHY power up inside SGMIISYS */
+ regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
+ SGMII_LINK_TIMER_DEFAULT);
+
+ regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+ val |= SGMII_REMOTE_FAULT_DIS;
+ regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+
+ regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ val |= SGMII_AN_RESTART;
+ regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+
+ regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ val &= ~SGMII_PHYA_PWD;
+ regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+
+ return 0;
+}
+
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id)
+{
+ unsigned int val;
+ int mode;
+
+ if (!ss->regmap[id])
+ return -EINVAL;
+
+ regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
+ val &= ~GENMASK(2, 3);
+ mode = ss->flags[id] & MTK_SGMII_PHYSPEED_MASK;
+ val |= (mode == MTK_SGMII_PHYSPEED_1000) ? 0 : BIT(2);
+ regmap_write(ss->regmap[id], ss->ana_rgc3, val);
+
+ /* Disable SGMII AN */
+ regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ val &= ~BIT(12);
+ regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+
+ /* SGMII force mode setting */
+ val = 0x31120019;
+ regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+
+ /* Release PHYA power down state */
+ regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ val &= ~SGMII_PHYA_PWD;
+ regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 2391e3cfb56b..37fef8cd25e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -34,6 +34,7 @@ config MLX5_CORE_EN
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
depends on IPV6=y || IPV6=n || MLX5_CORE=m
select PAGE_POOL
+ select DIMLIB
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
@@ -96,26 +97,60 @@ config MLX5_CORE_IPOIB
---help---
MLX5 IPoIB offloads & acceleration support.
+config MLX5_FPGA_IPSEC
+ bool "Mellanox Technologies IPsec Innova support"
+ depends on MLX5_CORE
+ depends on MLX5_FPGA
+ default n
+ help
+ Build IPsec support for the Innova family of network cards by Mellanox
+ Technologies. Innova network cards are comprised of a ConnectX chip
+ and an FPGA chip on one board. If you select this option, the
+ mlx5_core driver will include the Innova FPGA core and allow building
+ sandbox-specific client drivers.
+
config MLX5_EN_IPSEC
bool "IPSec XFRM cryptography-offload accelaration"
- depends on MLX5_ACCEL
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ depends on MLX5_FPGA_IPSEC
default n
- ---help---
+ help
Build support for IPsec cryptography-offload accelaration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
-config MLX5_EN_TLS
- bool "TLS cryptography-offload accelaration"
+config MLX5_FPGA_TLS
+ bool "Mellanox Technologies TLS Innova support"
+ depends on TLS_DEVICE
+ depends on TLS=y || MLX5_CORE=m
+ depends on MLX5_FPGA
+ default n
+ help
+ Build TLS support for the Innova family of network cards by Mellanox
+ Technologies. Innova network cards are comprised of a ConnectX chip
+ and an FPGA chip on one board. If you select this option, the
+ mlx5_core driver will include the Innova FPGA core and allow building
+ sandbox-specific client drivers.
+
+config MLX5_TLS
+ bool "Mellanox Technologies TLS Connect-X support"
depends on MLX5_CORE_EN
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
- depends on MLX5_ACCEL
+ select MLX5_ACCEL
default n
- ---help---
- Build support for TLS cryptography-offload accelaration in the NIC.
- Note: Support for hardware with this capability needs to be selected
- for this option to become available.
+ help
+ Build TLS support for the Connect-X family of network cards by Mellanox
+ Technologies.
+
+config MLX5_EN_TLS
+ bool "TLS cryptography-offload accelaration"
+ depends on MLX5_CORE_EN
+ depends on MLX5_FPGA_TLS || MLX5_TLS
+ default y
+ help
+ Build support for TLS cryptography-offload accelaration in the NIC.
+ Note: Support for hardware with this capability needs to be selected
+ for this option to become available.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 243368dc23db..57d2cc666fe3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -13,9 +13,10 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
- transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
+ transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
- lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o
+ lib/devcom.o lib/pci_vsc.o diag/fs_tracepoint.o \
+ diag/fw_tracer.o diag/crdump.o devlink.o
#
# Netdev basic
@@ -23,7 +24,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o \
- en/params.o
+ en/params.o en/xsk/umem.o en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o
#
# Netdev extra
@@ -31,12 +32,15 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
+ lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
+ en/tc_tun_geneve.o
#
# Core extra
#
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o rdma.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
+ ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
@@ -49,12 +53,14 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
# Accelerations & FPGA
#
-mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
+mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
+mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
+mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
-mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
- fpga/ipsec.o fpga/tls.o
+mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
-mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
+mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
+ en_accel/ktls.o en_accel/ktls_tx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
index 9f1b1939716a..eddc34e4a762 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
@@ -31,6 +31,8 @@
*
*/
+#ifdef CONFIG_MLX5_FPGA_IPSEC
+
#include <linux/mlx5/device.h>
#include "accel/ipsec.h"
@@ -74,6 +76,11 @@ int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
return mlx5_fpga_ipsec_init(mdev);
}
+void mlx5_accel_ipsec_build_fs_cmds(void)
+{
+ mlx5_fpga_ipsec_build_fs_cmds();
+}
+
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
mlx5_fpga_ipsec_cleanup(mdev);
@@ -107,3 +114,5 @@ int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
return mlx5_fpga_esp_modify_xfrm(xfrm, attrs);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
index 024dbd22a89b..530e428d46ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
@@ -37,7 +37,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/accel.h>
-#ifdef CONFIG_MLX5_ACCEL
+#ifdef CONFIG_MLX5_FPGA_IPSEC
#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
MLX5_ACCEL_IPSEC_CAP_DEVICE)
@@ -54,6 +54,7 @@ void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
void mlx5_accel_esp_free_hw_context(void *context);
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
+void mlx5_accel_ipsec_build_fs_cmds(void);
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
#else
@@ -79,6 +80,10 @@ static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
return 0;
}
+static inline void mlx5_accel_ipsec_build_fs_cmds(void)
+{
+}
+
static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
index da7bd26368f9..cab708af3422 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
@@ -35,6 +35,9 @@
#include "accel/tls.h"
#include "mlx5_core.h"
+#include "lib/mlx5.h"
+
+#ifdef CONFIG_MLX5_FPGA_TLS
#include "fpga/tls.h"
int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
@@ -61,7 +64,8 @@ int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
{
- return mlx5_fpga_is_tls_device(mdev);
+ return mlx5_fpga_is_tls_device(mdev) ||
+ mlx5_accel_is_ktls_device(mdev);
}
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
@@ -78,3 +82,42 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
{
mlx5_fpga_tls_cleanup(mdev);
}
+#endif
+
+#ifdef CONFIG_MLX5_TLS
+int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info,
+ u32 *p_key_id)
+{
+ u32 sz_bytes;
+ void *key;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+
+ key = info->key;
+ sz_bytes = sizeof(info->key);
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+
+ key = info->key;
+ sz_bytes = sizeof(info->key);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return mlx5_create_encryption_key(mdev, key, sz_bytes, p_key_id);
+}
+
+void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
+{
+ mlx5_destroy_encryption_key(mdev, key_id);
+}
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
index def4093ebfae..879321b21616 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -37,8 +37,51 @@
#include <linux/mlx5/driver.h>
#include <linux/tls.h>
-#ifdef CONFIG_MLX5_ACCEL
+#ifdef CONFIG_MLX5_TLS
+int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info,
+ u32 *p_key_id);
+void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
+static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, tls))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+}
+
+static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info)
+{
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (crypto_info->version == TLS_1_2_VERSION)
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+ break;
+ }
+
+ return false;
+}
+#else
+static inline int
+mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info,
+ u32 *p_key_id) { return -ENOTSUPP; }
+static inline void
+mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {}
+
+static inline bool
+mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
+static inline bool
+mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
+ struct tls_crypto_info *crypto_info) { return false; }
+#endif
+
+#ifdef CONFIG_MLX5_FPGA_TLS
enum {
MLX5_ACCEL_TLS_TX = BIT(0),
MLX5_ACCEL_TLS_RX = BIT(1),
@@ -84,11 +127,13 @@ static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx) { }
static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
u32 seq, u64 rcd_sn) { return 0; }
-static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
+static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
+{
+ return mlx5_accel_is_ktls_device(mdev);
+}
static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
-
#endif
#endif /* __MLX5_ACCEL_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e94686c42000..8cdd7e66f8df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -316,7 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
- case MLX5_CMD_OP_QUERY_HOST_PARAMS:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -632,7 +632,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
- MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS);
+ MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
MLX5_COMMAND_STR_CASE(CREATE_UCTX);
MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
MLX5_COMMAND_STR_CASE(CREATE_UMEM);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 713a17ee3751..818edc63e428 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
list_for_each_entry_safe(mcq, temp, &ctx->process_list,
tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
- mcq->tasklet_ctx.comp(mcq);
+ mcq->tasklet_ctx.comp(mcq, NULL);
mlx5_cq_put(mcq);
if (time_after(jiffies, end))
break;
@@ -68,7 +68,8 @@ void mlx5_cq_tasklet_cb(unsigned long data)
tasklet_schedule(&ctx->task);
}
-static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
+static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
+ struct mlx5_eqe *eqe)
{
unsigned long flags;
struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
@@ -87,11 +88,10 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
}
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *in, int inlen)
+ u32 *in, int inlen, u32 *out, int outlen)
{
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
- u32 out[MLX5_ST_SZ_DW(create_cq_out)];
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
struct mlx5_eq_comp *eq;
int err;
@@ -100,9 +100,9 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
if (IS_ERR(eq))
return PTR_ERR(eq);
- memset(out, 0, sizeof(out));
+ memset(out, 0, outlen);
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err)
return err;
@@ -158,13 +158,8 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
int err;
- err = mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
- if (err)
- return err;
-
- err = mlx5_eq_del_cq(&cq->eq->core, cq);
- if (err)
- return err;
+ mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
+ mlx5_eq_del_cq(&cq->eq->core, cq);
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index f6b1da99e6c2..5bb6a26ea267 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -311,13 +311,20 @@ static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
- u32 pci_id = mlx5_gen_pci_id(dev);
struct mlx5_core_dev *res = NULL;
struct mlx5_core_dev *tmp_dev;
struct mlx5_priv *priv;
+ u32 pci_id;
+ if (!mlx5_core_is_pf(dev))
+ return NULL;
+
+ pci_id = mlx5_gen_pci_id(dev);
list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
+ if (!mlx5_core_is_pf(tmp_dev))
+ continue;
+
if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
res = tmp_dev;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
new file mode 100644
index 000000000000..a400f4430c28
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#include <devlink.h>
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+
+static int mlx5_devlink_flash_update(struct devlink *devlink,
+ const char *file_name,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ const struct firmware *fw;
+ int err;
+
+ if (component)
+ return -EOPNOTSUPP;
+
+ err = request_firmware_direct(&fw, file_name, &dev->pdev->dev);
+ if (err)
+ return err;
+
+ return mlx5_firmware_flash(dev, fw, extack);
+}
+
+static u8 mlx5_fw_ver_major(u32 version)
+{
+ return (version >> 24) & 0xff;
+}
+
+static u8 mlx5_fw_ver_minor(u32 version)
+{
+ return (version >> 16) & 0xff;
+}
+
+static u16 mlx5_fw_ver_subminor(u32 version)
+{
+ return version & 0xffff;
+}
+
+#define DEVLINK_FW_STRING_LEN 32
+
+static int
+mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ char version_str[DEVLINK_FW_STRING_LEN];
+ u32 running_fw, stored_fw;
+ int err;
+
+ err = devlink_info_driver_name_put(req, DRIVER_NAME);
+ if (err)
+ return err;
+
+ err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
+ if (err)
+ return err;
+
+ err = mlx5_fw_version_query(dev, &running_fw, &stored_fw);
+ if (err)
+ return err;
+
+ snprintf(version_str, sizeof(version_str), "%d.%d.%04d",
+ mlx5_fw_ver_major(running_fw), mlx5_fw_ver_minor(running_fw),
+ mlx5_fw_ver_subminor(running_fw));
+ err = devlink_info_version_running_put(req, "fw.version", version_str);
+ if (err)
+ return err;
+
+ /* no pending version, return running (stored) version */
+ if (stored_fw == 0)
+ stored_fw = running_fw;
+
+ snprintf(version_str, sizeof(version_str), "%d.%d.%04d",
+ mlx5_fw_ver_major(stored_fw), mlx5_fw_ver_minor(stored_fw),
+ mlx5_fw_ver_subminor(stored_fw));
+ err = devlink_info_version_stored_put(req, "fw.version", version_str);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct devlink_ops mlx5_devlink_ops = {
+#ifdef CONFIG_MLX5_ESWITCH
+ .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
+ .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
+ .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
+ .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
+ .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
+ .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
+#endif
+ .flash_update = mlx5_devlink_flash_update,
+ .info_get = mlx5_devlink_info_get,
+};
+
+struct devlink *mlx5_devlink_alloc(void)
+{
+ return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev));
+}
+
+void mlx5_devlink_free(struct devlink *devlink)
+{
+ devlink_free(devlink);
+}
+
+int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
+{
+ return devlink_register(devlink, dev);
+}
+
+void mlx5_devlink_unregister(struct devlink *devlink)
+{
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
new file mode 100644
index 000000000000..d0ba03774ddf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019, Mellanox Technologies */
+
+#ifndef __MLX5_DEVLINK_H__
+#define __MLX5_DEVLINK_H__
+
+#include <net/devlink.h>
+
+struct devlink *mlx5_devlink_alloc(void);
+void mlx5_devlink_free(struct devlink *devlink);
+int mlx5_devlink_register(struct devlink *devlink, struct device *dev);
+void mlx5_devlink_unregister(struct devlink *devlink);
+
+#endif /* __MLX5_DEVLINK_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c
new file mode 100644
index 000000000000..28d02749d3c4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "lib/pci_vsc.h"
+#include "lib/mlx5.h"
+
+#define BAD_ACCESS 0xBADACCE5
+#define MLX5_PROTECTED_CR_SCAN_CRSPACE 0x7
+
+static bool mlx5_crdump_enabled(struct mlx5_core_dev *dev)
+{
+ return !!dev->priv.health.crdump_size;
+}
+
+static int mlx5_crdump_fill(struct mlx5_core_dev *dev, u32 *cr_data)
+{
+ u32 crdump_size = dev->priv.health.crdump_size;
+ int i, ret;
+
+ for (i = 0; i < (crdump_size / 4); i++)
+ cr_data[i] = BAD_ACCESS;
+
+ ret = mlx5_vsc_gw_read_block_fast(dev, cr_data, crdump_size);
+ if (ret <= 0) {
+ if (ret == 0)
+ return -EIO;
+ return ret;
+ }
+
+ if (crdump_size != ret) {
+ mlx5_core_warn(dev, "failed to read full dump, read %d out of %u\n",
+ ret, crdump_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data)
+{
+ int ret;
+
+ if (!mlx5_crdump_enabled(dev))
+ return -ENODEV;
+
+ ret = mlx5_vsc_gw_lock(dev);
+ if (ret) {
+ mlx5_core_warn(dev, "crdump: failed to lock vsc gw err %d\n",
+ ret);
+ return ret;
+ }
+ /* Verify no other PF is running cr-dump or sw reset */
+ ret = mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET,
+ MLX5_VSC_LOCK);
+ if (ret) {
+ mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n");
+ goto unlock_gw;
+ }
+
+ ret = mlx5_vsc_gw_set_space(dev, MLX5_VSC_SPACE_SCAN_CRSPACE, NULL);
+ if (ret)
+ goto unlock_sem;
+
+ ret = mlx5_crdump_fill(dev, cr_data);
+
+unlock_sem:
+ mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET, MLX5_VSC_UNLOCK);
+unlock_gw:
+ mlx5_vsc_gw_unlock(dev);
+ return ret;
+}
+
+int mlx5_crdump_enable(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ u32 space_size;
+ int ret;
+
+ if (!mlx5_core_is_pf(dev) || !mlx5_vsc_accessible(dev) ||
+ mlx5_crdump_enabled(dev))
+ return 0;
+
+ ret = mlx5_vsc_gw_lock(dev);
+ if (ret)
+ return ret;
+
+ /* Check if space is supported and get space size */
+ ret = mlx5_vsc_gw_set_space(dev, MLX5_VSC_SPACE_SCAN_CRSPACE,
+ &space_size);
+ if (ret) {
+ /* Unlock and mask error since space is not supported */
+ mlx5_vsc_gw_unlock(dev);
+ return 0;
+ }
+
+ if (!space_size) {
+ mlx5_core_warn(dev, "Invalid Crspace size, zero\n");
+ mlx5_vsc_gw_unlock(dev);
+ return -EINVAL;
+ }
+
+ ret = mlx5_vsc_gw_unlock(dev);
+ if (ret)
+ return ret;
+
+ priv->health.crdump_size = space_size;
+ return 0;
+}
+
+void mlx5_crdump_disable(struct mlx5_core_dev *dev)
+{
+ dev->priv.health.crdump_size = 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index a4cf123e3f17..ddf1b87f1bc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -187,6 +187,7 @@ TRACE_EVENT(mlx5_fs_set_fte,
__field(u32, index)
__field(u32, action)
__field(u32, flow_tag)
+ __field(u32, flow_source)
__field(u8, mask_enable)
__field(int, new_fte)
__array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
@@ -204,7 +205,8 @@ TRACE_EVENT(mlx5_fs_set_fte,
__entry->index = fte->index;
__entry->action = fte->action.action;
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
- __entry->flow_tag = fte->action.flow_tag;
+ __entry->flow_tag = fte->flow_context.flow_tag;
+ __entry->flow_source = fte->flow_context.flow_source;
memcpy(__entry->mask_outer,
MLX5_ADDR_OF(fte_match_param,
&__entry->fg->mask.match_criteria,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 6999f4486e9e..8a4930c8bf62 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -243,6 +243,19 @@ free_strings_db:
return -ENOMEM;
}
+static void
+mlx5_fw_tracer_init_saved_traces_array(struct mlx5_fw_tracer *tracer)
+{
+ tracer->st_arr.saved_traces_index = 0;
+ mutex_init(&tracer->st_arr.lock);
+}
+
+static void
+mlx5_fw_tracer_clean_saved_traces_array(struct mlx5_fw_tracer *tracer)
+{
+ mutex_destroy(&tracer->st_arr.lock);
+}
+
static void mlx5_tracer_read_strings_db(struct work_struct *work)
{
struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer,
@@ -522,6 +535,24 @@ static void mlx5_fw_tracer_clean_ready_list(struct mlx5_fw_tracer *tracer)
list_del(&str_frmt->list);
}
+static void mlx5_fw_tracer_save_trace(struct mlx5_fw_tracer *tracer,
+ u64 timestamp, bool lost,
+ u8 event_id, char *msg)
+{
+ struct mlx5_fw_trace_data *trace_data;
+
+ mutex_lock(&tracer->st_arr.lock);
+ trace_data = &tracer->st_arr.straces[tracer->st_arr.saved_traces_index];
+ trace_data->timestamp = timestamp;
+ trace_data->lost = lost;
+ trace_data->event_id = event_id;
+ strncpy(trace_data->msg, msg, TRACE_STR_MSG);
+
+ tracer->st_arr.saved_traces_index =
+ (tracer->st_arr.saved_traces_index + 1) & (SAVED_TRACES_NUM - 1);
+ mutex_unlock(&tracer->st_arr.lock);
+}
+
static void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt,
struct mlx5_core_dev *dev,
u64 trace_timestamp)
@@ -540,6 +571,9 @@ static void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt,
trace_mlx5_fw(dev->tracer, trace_timestamp, str_frmt->lost,
str_frmt->event_id, tmp);
+ mlx5_fw_tracer_save_trace(dev->tracer, trace_timestamp,
+ str_frmt->lost, str_frmt->event_id, tmp);
+
/* remove it from hash */
mlx5_tracer_clean_message(str_frmt);
}
@@ -786,6 +820,109 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
mlx5_fw_tracer_start(tracer);
}
+static int mlx5_fw_tracer_set_core_dump_reg(struct mlx5_core_dev *dev,
+ u32 *in, int size_in)
+{
+ u32 out[MLX5_ST_SZ_DW(core_dump_reg)] = {};
+
+ if (!MLX5_CAP_DEBUG(dev, core_dump_general) &&
+ !MLX5_CAP_DEBUG(dev, core_dump_qp))
+ return -EOPNOTSUPP;
+
+ return mlx5_core_access_reg(dev, in, size_in, out, sizeof(out),
+ MLX5_REG_CORE_DUMP, 0, 1);
+}
+
+int mlx5_fw_tracer_trigger_core_dump_general(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_tracer *tracer = dev->tracer;
+ u32 in[MLX5_ST_SZ_DW(core_dump_reg)] = {};
+ int err;
+
+ if (!MLX5_CAP_DEBUG(dev, core_dump_general) || !tracer)
+ return -EOPNOTSUPP;
+ if (!tracer->owner)
+ return -EPERM;
+
+ MLX5_SET(core_dump_reg, in, core_dump_type, 0x0);
+
+ err = mlx5_fw_tracer_set_core_dump_reg(dev, in, sizeof(in));
+ if (err)
+ return err;
+ queue_work(tracer->work_queue, &tracer->handle_traces_work);
+ flush_workqueue(tracer->work_queue);
+ return 0;
+}
+
+static int
+mlx5_devlink_fmsg_fill_trace(struct devlink_fmsg *fmsg,
+ struct mlx5_fw_trace_data *trace_data)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "timestamp", trace_data->timestamp);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_bool_pair_put(fmsg, "lost", trace_data->lost);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "event_id", trace_data->event_id);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_string_pair_put(fmsg, "msg", trace_data->msg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+ return 0;
+}
+
+int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_fw_trace_data *straces = tracer->st_arr.straces;
+ u32 index, start_index, end_index;
+ u32 saved_traces_index;
+ int err;
+
+ if (!straces[0].timestamp)
+ return -ENOMSG;
+
+ mutex_lock(&tracer->st_arr.lock);
+ saved_traces_index = tracer->st_arr.saved_traces_index;
+ if (straces[saved_traces_index].timestamp)
+ start_index = saved_traces_index;
+ else
+ start_index = 0;
+ end_index = (saved_traces_index - 1) & (SAVED_TRACES_NUM - 1);
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "dump fw traces");
+ if (err)
+ goto unlock;
+ index = start_index;
+ while (index != end_index) {
+ err = mlx5_devlink_fmsg_fill_trace(fmsg, &straces[index]);
+ if (err)
+ goto unlock;
+
+ index = (index + 1) & (SAVED_TRACES_NUM - 1);
+ }
+
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+unlock:
+ mutex_unlock(&tracer->st_arr.lock);
+ return err;
+}
+
/* Create software resources (Buffers, etc ..) */
struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
{
@@ -833,6 +970,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
goto free_log_buf;
}
+ mlx5_fw_tracer_init_saved_traces_array(tracer);
mlx5_core_dbg(dev, "FWTracer: Tracer created\n");
return tracer;
@@ -917,6 +1055,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
cancel_work_sync(&tracer->read_fw_strings_work);
mlx5_fw_tracer_clean_ready_list(tracer);
mlx5_fw_tracer_clean_print_hash(tracer);
+ mlx5_fw_tracer_clean_saved_traces_array(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
flush_workqueue(tracer->work_queue);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index a8b8747f2b61..40601fba80ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -46,6 +46,9 @@
#define TRACER_BLOCK_SIZE_BYTE 256
#define TRACES_PER_BLOCK 32
+#define TRACE_STR_MSG 256
+#define SAVED_TRACES_NUM 8192
+
#define TRACER_MAX_PARAMS 7
#define MESSAGE_HASH_BITS 6
#define MESSAGE_HASH_SIZE BIT(MESSAGE_HASH_BITS)
@@ -53,6 +56,13 @@
#define MASK_52_7 (0x1FFFFFFFFFFF80)
#define MASK_6_0 (0x7F)
+struct mlx5_fw_trace_data {
+ u64 timestamp;
+ bool lost;
+ u8 event_id;
+ char msg[TRACE_STR_MSG];
+};
+
struct mlx5_fw_tracer {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -83,6 +93,13 @@ struct mlx5_fw_tracer {
u32 consumer_index;
} buff;
+ /* Saved Traces Array */
+ struct {
+ struct mlx5_fw_trace_data straces[SAVED_TRACES_NUM];
+ u32 saved_traces_index;
+ struct mutex lock; /* Protect st_arr access */
+ } st_arr;
+
u64 last_timestamp;
struct work_struct handle_traces_work;
struct hlist_head hash[MESSAGE_HASH_SIZE];
@@ -171,5 +188,8 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev);
int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer);
+int mlx5_fw_tracer_trigger_core_dump_general(struct mlx5_core_dev *dev);
+int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer,
+ struct devlink_fmsg *fmsg);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index 0ccd6d40baf7..d2228e37450f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -83,30 +83,3 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
mlx5_peer_pf_cleanup(dev);
}
-
-static int mlx5_query_host_params_context(struct mlx5_core_dev *dev,
- u32 *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_host_params_in)] = {};
-
- MLX5_SET(query_host_params_in, in, opcode,
- MLX5_CMD_OP_QUERY_HOST_PARAMS);
-
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
-int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
-{
- u32 out[MLX5_ST_SZ_DW(query_host_params_out)] = {};
- int err;
-
- err = mlx5_query_host_params_context(dev, out, sizeof(out));
- if (err)
- return err;
-
- *num_vf = MLX5_GET(query_host_params_out, out,
- host_params_context.host_num_of_vfs);
- mlx5_core_dbg(dev, "host_num_of_vfs %d\n", *num_vf);
-
- return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
index 346372df218f..d3d7a00a02ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
@@ -16,7 +16,6 @@ enum {
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
int mlx5_ec_init(struct mlx5_core_dev *dev);
void mlx5_ec_cleanup(struct mlx5_core_dev *dev);
-int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf);
#else /* CONFIG_MLX5_ESWITCH */
@@ -24,9 +23,6 @@ static inline bool
mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; }
static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {}
-static inline int
-mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
-{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_ESWITCH */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index cc6797e24571..263558875f20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -48,7 +48,7 @@
#include <linux/rhashtable.h>
#include <net/switchdev.h>
#include <net/xdp.h>
-#include <linux/net_dim.h>
+#include <linux/dim.h>
#include <linux/bits.h>
#include "wq.h"
#include "mlx5_core.h"
@@ -137,6 +137,7 @@ struct page_pool;
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
+#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
#define MLX5E_UMR_WQE_INLINE_SZ \
@@ -155,6 +156,11 @@ do { \
##__VA_ARGS__); \
} while (0)
+enum mlx5e_rq_group {
+ MLX5E_RQ_GROUP_REGULAR,
+ MLX5E_RQ_GROUP_XSK,
+ MLX5E_NUM_RQ_GROUPS /* Keep last. */
+};
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
{
@@ -179,7 +185,8 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
/* Use this function to get max num channels after netdev was created */
static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev)
{
- return min_t(unsigned int, netdev->num_rx_queues,
+ return min_t(unsigned int,
+ netdev->num_rx_queues / MLX5E_NUM_RQ_GROUPS,
netdev->num_tx_queues);
}
@@ -202,7 +209,10 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
- struct mlx5_mtt inline_mtts[0];
+ union {
+ struct mlx5_mtt inline_mtts[0];
+ u8 tls_static_params_ctx[0];
+ };
};
extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
@@ -238,9 +248,9 @@ struct mlx5e_params {
u16 num_channels;
u8 num_tc;
bool rx_cqe_compress_def;
- struct net_dim_cq_moder rx_cq_moderation;
- struct net_dim_cq_moder tx_cq_moderation;
bool tunneled_offload_en;
+ struct dim_cq_moder rx_cq_moderation;
+ struct dim_cq_moder tx_cq_moderation;
bool lro_en;
u8 tx_min_inline_mode;
bool vlan_strip_disable;
@@ -250,6 +260,7 @@ struct mlx5e_params {
u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
+ struct mlx5e_xsk *xsk;
unsigned int sw_mtu;
int hard_mtu;
};
@@ -325,6 +336,9 @@ struct mlx5e_tx_wqe_info {
u32 num_bytes;
u8 num_wqebbs;
u8 num_dma;
+#ifdef CONFIG_MLX5_EN_TLS
+ skb_frag_t *resync_dump_frag;
+#endif
};
enum mlx5e_dma_map_type {
@@ -348,6 +362,13 @@ enum {
struct mlx5e_sq_wqe_info {
u8 opcode;
+
+ /* Auxiliary data for different opcodes. */
+ union {
+ struct {
+ struct mlx5e_rq *rq;
+ } umr;
+ };
};
struct mlx5e_txqsq {
@@ -356,7 +377,7 @@ struct mlx5e_txqsq {
/* dirtied @completion */
u16 cc;
u32 dma_fifo_cc;
- struct net_dim dim; /* Adaptive Moderation */
+ struct dim dim; /* Adaptive Moderation */
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
@@ -375,6 +396,7 @@ struct mlx5e_txqsq {
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
+ u16 stop_room;
u8 min_inline_mode;
struct device *pdev;
__be32 mkey_be;
@@ -392,14 +414,55 @@ struct mlx5e_txqsq {
} ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
- struct page *page;
- dma_addr_t addr;
+ dma_addr_t addr;
+ union {
+ struct page *page;
+ struct {
+ u64 handle;
+ void *data;
+ } xsk;
+ };
+};
+
+/* XDP packets can be transmitted in different ways. On completion, we need to
+ * distinguish between them to clean up things in a proper way.
+ */
+enum mlx5e_xdp_xmit_mode {
+ /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
+ * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
+ * returned.
+ */
+ MLX5E_XDP_XMIT_MODE_FRAME,
+
+ /* The xdp_frame was created in place as a result of XDP_TX from a
+ * regular RQ. No DMA remapping happened, and the page belongs to us.
+ */
+ MLX5E_XDP_XMIT_MODE_PAGE,
+
+ /* No xdp_frame was created at all, the transmit happened from a UMEM
+ * page. The UMEM Completion Ring producer pointer has to be increased.
+ */
+ MLX5E_XDP_XMIT_MODE_XSK,
};
struct mlx5e_xdp_info {
- struct xdp_frame *xdpf;
- dma_addr_t dma_addr;
- struct mlx5e_dma_info di;
+ enum mlx5e_xdp_xmit_mode mode;
+ union {
+ struct {
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ } frame;
+ struct {
+ struct mlx5e_rq *rq;
+ struct mlx5e_dma_info di;
+ } page;
+ };
+};
+
+struct mlx5e_xdp_xmit_data {
+ dma_addr_t dma_addr;
+ void *data;
+ u32 len;
};
struct mlx5e_xdp_info_fifo {
@@ -425,8 +488,12 @@ struct mlx5e_xdp_mpwqe {
};
struct mlx5e_xdpsq;
-typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq*,
- struct mlx5e_xdp_info*);
+typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
+typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
+ struct mlx5e_xdp_xmit_data *,
+ struct mlx5e_xdp_info *,
+ int);
+
struct mlx5e_xdpsq {
/* data path */
@@ -443,8 +510,10 @@ struct mlx5e_xdpsq {
struct mlx5e_cq cq;
/* read only */
+ struct xdp_umem *umem;
struct mlx5_wq_cyc wq;
struct mlx5e_xdpsq_stats *stats;
+ mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
mlx5e_fp_xmit_xdp_frame xmit_xdp_frame;
struct {
struct mlx5e_xdp_wqe_info *wqe_info;
@@ -487,12 +556,6 @@ struct mlx5e_icosq {
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
-static inline bool
-mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
-{
- return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
-}
-
struct mlx5e_wqe_frag_info {
struct mlx5e_dma_info *di;
u32 offset;
@@ -571,9 +634,11 @@ struct mlx5e_rq {
u8 log_stride_sz;
u8 umr_in_progress;
u8 umr_last_bulk;
+ u8 umr_completed;
} mpwqe;
};
struct {
+ u16 umem_headroom;
u16 headroom;
u8 map_dir; /* dma map direction */
} buff;
@@ -596,14 +661,18 @@ struct mlx5e_rq {
int ix;
unsigned int hw_mtu;
- struct net_dim dim; /* Dynamic Interrupt Moderation */
+ struct dim dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog *xdp_prog;
- struct mlx5e_xdpsq xdpsq;
+ struct mlx5e_xdpsq *xdpsq;
DECLARE_BITMAP(flags, 8);
struct page_pool *page_pool;
+ /* AF_XDP zero-copy */
+ struct zero_copy_allocator zca;
+ struct xdp_umem *umem;
+
/* control */
struct mlx5_wq_ctrl wq_ctrl;
__be32 mkey_be;
@@ -616,9 +685,15 @@ struct mlx5e_rq {
struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
+enum mlx5e_channel_state {
+ MLX5E_CHANNEL_STATE_XSK,
+ MLX5E_CHANNEL_NUM_STATES
+};
+
struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
+ struct mlx5e_xdpsq rq_xdpsq;
struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_icosq icosq; /* internal control operations */
bool xdp;
@@ -631,6 +706,13 @@ struct mlx5e_channel {
/* XDP_REDIRECT */
struct mlx5e_xdpsq xdpsq;
+ /* AF_XDP zero-copy */
+ struct mlx5e_rq xskrq;
+ struct mlx5e_xdpsq xsksq;
+ struct mlx5e_icosq xskicosq;
+ /* xskicosq can be accessed from any CPU - the spinlock protects it. */
+ spinlock_t xskicosq_lock;
+
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
struct mlx5e_ch_stats *stats;
@@ -639,6 +721,7 @@ struct mlx5e_channel {
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
struct hwtstamp_config *tstamp;
+ DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
int cpu;
cpumask_var_t xps_cpumask;
@@ -654,14 +737,17 @@ struct mlx5e_channel_stats {
struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
+ struct mlx5e_rq_stats xskrq;
struct mlx5e_xdpsq_stats rq_xdpsq;
struct mlx5e_xdpsq_stats xdpsq;
+ struct mlx5e_xdpsq_stats xsksq;
} ____cacheline_aligned_in_smp;
enum {
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
MLX5E_STATE_XDP_TX_ENABLED,
+ MLX5E_STATE_XDP_OPEN,
};
struct mlx5e_rqt {
@@ -694,6 +780,17 @@ struct mlx5e_modify_sq_param {
int rl_index;
};
+struct mlx5e_xsk {
+ /* UMEMs are stored separately from channels, because we don't want to
+ * lose them when channels are recreated. The kernel also stores UMEMs,
+ * but it doesn't distinguish between zero-copy and non-zero-copy UMEMs,
+ * so rely on our mechanism.
+ */
+ struct xdp_umem **umems;
+ u16 refcnt;
+ bool ever_used;
+};
+
struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
@@ -714,6 +811,7 @@ struct mlx5e_priv {
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_rss_params rss_params;
u32 tx_rates[MLX5E_MAX_NUM_SQS];
@@ -750,6 +848,7 @@ struct mlx5e_priv {
struct mlx5e_tls *tls;
#endif
struct devlink_health_reporter *tx_reporter;
+ struct mlx5e_xsk xsk;
};
struct mlx5e_profile {
@@ -763,6 +862,7 @@ struct mlx5e_profile {
void (*cleanup_tx)(struct mlx5e_priv *priv);
void (*enable)(struct mlx5e_priv *priv);
void (*disable)(struct mlx5e_priv *priv);
+ int (*update_rx)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
struct {
@@ -781,7 +881,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
-void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
@@ -793,11 +893,13 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
-void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
- bool recycle);
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info,
+ bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+void mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
@@ -853,6 +955,30 @@ void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen);
struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
+struct mlx5e_xsk_param;
+
+struct mlx5e_rq_param;
+int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
+ struct xdp_umem *umem, struct mlx5e_rq *rq);
+int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
+void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
+void mlx5e_close_rq(struct mlx5e_rq *rq);
+
+struct mlx5e_sq_param;
+int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
+void mlx5e_close_icosq(struct mlx5e_icosq *sq);
+int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param, struct xdp_umem *umem,
+ struct mlx5e_xdpsq *sq, bool is_redirect);
+void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
+
+struct mlx5e_cq_param;
+int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
+ struct mlx5e_cq_param *param, struct mlx5e_cq *cq);
+void mlx5e_close_cq(struct mlx5e_cq *cq);
+
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
@@ -898,102 +1024,6 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
}
-struct mlx5e_swp_spec {
- __be16 l3_proto;
- u8 l4_proto;
- u8 is_tun;
- __be16 tun_l3_proto;
- u8 tun_l4_proto;
-};
-
-static inline void
-mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
- struct mlx5e_swp_spec *swp_spec)
-{
- /* SWP offsets are in 2-bytes words */
- eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
- if (swp_spec->l3_proto == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
- if (swp_spec->l4_proto) {
- eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
- if (swp_spec->l4_proto == IPPROTO_UDP)
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
- }
-
- if (swp_spec->is_tun) {
- eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
- if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
- eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
- if (swp_spec->l3_proto == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- }
- switch (swp_spec->tun_l4_proto) {
- case IPPROTO_UDP:
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- /* fall through */
- case IPPROTO_TCP:
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
- break;
- }
-}
-
-static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
-
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
- memset(*wqe, 0, sizeof(**wqe));
-}
-
-static inline
-struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
-{
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
-
- memset(cseg, 0, sizeof(*cseg));
-
- cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
- cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
-
- (*pc)++;
-
- return wqe;
-}
-
-static inline
-void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
- void __iomem *uar_map,
- struct mlx5_wqe_ctrl_seg *ctrl)
-{
- ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- /* ensure wqe is visible to device before updating doorbell record */
- dma_wmb();
-
- *wq->db = cpu_to_be32(pc);
-
- /* ensure doorbell record is visible to device before ringing the
- * doorbell
- */
- wmb();
-
- mlx5_write64((__be32 *)ctrl, uar_map);
-}
-
-static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
-{
- struct mlx5_core_cq *mcq;
-
- mcq = &cq->mcq;
- mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
-}
-
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
@@ -1023,17 +1053,17 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
-void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
-int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
- u32 underlay_qpn, u32 *tisn);
+int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
int mlx5e_create_tises(struct mlx5e_priv *priv);
+int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
@@ -1075,8 +1105,6 @@ u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info);
-int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
- struct ethtool_flash *flash);
void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam);
int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
@@ -1097,6 +1125,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_xsk *xsk,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
u16 max_channels, u16 mtu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index d3744bffbae3..79301d116667 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -3,65 +3,102 @@
#include "en/params.h"
-u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
+static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
- u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- u16 linear_rq_headroom = params->xdp_prog ?
- XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
- u32 frag_sz;
+ return params->xdp_prog || xsk;
+}
+
+u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ u16 headroom = NET_IP_ALIGN;
+
+ if (mlx5e_rx_is_xdp(params, xsk)) {
+ headroom += XDP_PACKET_HEADROOM;
+ if (xsk)
+ headroom += xsk->headroom;
+ } else {
+ headroom += MLX5_RX_HEADROOM;
+ }
+
+ return headroom;
+}
+
+u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+ u32 frag_sz = linear_rq_headroom + hw_mtu;
- linear_rq_headroom += NET_IP_ALIGN;
+ /* AF_XDP doesn't build SKBs in place. */
+ if (!xsk)
+ frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
- frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
+ /* XDP in mlx5e doesn't support multiple packets per page. */
+ if (mlx5e_rx_is_xdp(params, xsk))
+ frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
- if (params->xdp_prog && frag_sz < PAGE_SIZE)
- frag_sz = PAGE_SIZE;
+ /* Even if we can go with a smaller fragment size, we must not put
+ * multiple packets into a single frame.
+ */
+ if (xsk)
+ frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
return frag_sz;
}
-u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
+u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
- u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params);
+ u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
}
-bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params)
+bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
- u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
+ /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
+ * than one page. For this, check both with and without xsk.
+ */
+ u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
+ mlx5e_rx_get_linear_frag_sz(params, NULL));
- return !params->lro_en && frag_sz <= PAGE_SIZE;
+ return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
}
#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
- u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
+ u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
s8 signed_log_num_strides_param;
u8 log_num_strides;
- if (!mlx5e_rx_is_linear_skb(params))
+ if (!mlx5e_rx_is_linear_skb(params, xsk))
return false;
- if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
+ if (order_base_2(linear_frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
return false;
if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
return true;
- log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
+ log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
signed_log_num_strides_param =
(s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
return signed_log_num_strides_param >= 0;
}
-u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
- u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params);
+ u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
/* Numbers are unsigned, don't subtract to avoid underflow. */
if (params->log_rq_mtu_frames <
@@ -72,33 +109,30 @@ u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
}
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
- return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+ return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
}
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
return MLX5_MPWRQ_LOG_WQE_SZ -
- mlx5e_mpwqe_get_log_stride_size(mdev, params);
+ mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
}
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
- u16 linear_rq_headroom = params->xdp_prog ?
- XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
- bool is_linear_skb;
-
- linear_rq_headroom += NET_IP_ALIGN;
-
- is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
- mlx5e_rx_is_linear_skb(params) :
- mlx5e_rx_mpwqe_is_linear_skb(mdev, params);
+ bool is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
+ mlx5e_rx_is_linear_skb(params, xsk) :
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
- return is_linear_skb ? linear_rq_headroom : 0;
+ return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index b106a0236f36..bd882b5ee9a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -6,17 +6,119 @@
#include "en.h"
-u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params);
-u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params);
-bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params);
+struct mlx5e_xsk_param {
+ u16 headroom;
+ u16 chunk_size;
+};
+
+struct mlx5e_rq_param {
+ u32 rqc[MLX5_ST_SZ_DW(rqc)];
+ struct mlx5_wq_param wq;
+ struct mlx5e_rq_frags_info frags_info;
+};
+
+struct mlx5e_sq_param {
+ u32 sqc[MLX5_ST_SZ_DW(sqc)];
+ struct mlx5_wq_param wq;
+ bool is_mpw;
+};
+
+struct mlx5e_cq_param {
+ u32 cqc[MLX5_ST_SZ_DW(cqc)];
+ struct mlx5_wq_param wq;
+ u16 eq_ix;
+ u8 cq_period_mode;
+};
+
+struct mlx5e_channel_param {
+ struct mlx5e_rq_param rq;
+ struct mlx5e_sq_param sq;
+ struct mlx5e_sq_param xdp_sq;
+ struct mlx5e_sq_param icosq;
+ struct mlx5e_cq_param rx_cq;
+ struct mlx5e_cq_param tx_cq;
+ struct mlx5e_cq_param icosq_cq;
+};
+
+static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params,
+ u16 qid,
+ enum mlx5e_rq_group group,
+ u16 *ix)
+{
+ int nch = params->num_channels;
+ int ch = qid - nch * group;
+
+ if (ch < 0 || ch >= nch)
+ return false;
+
+ *ix = ch;
+ return true;
+}
+
+static inline void mlx5e_qid_get_ch_and_group(struct mlx5e_params *params,
+ u16 qid,
+ u16 *ix,
+ enum mlx5e_rq_group *group)
+{
+ u16 nch = params->num_channels;
+
+ *ix = qid % nch;
+ *group = qid / nch;
+}
+
+static inline bool mlx5e_qid_validate(struct mlx5e_params *params, u64 qid)
+{
+ return qid < params->num_channels * MLX5E_NUM_RQ_GROUPS;
+}
+
+/* Parameter calculations */
+
+u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
+u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
+bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
-u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params);
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
+
+/* Build queue parameters */
+
+void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_param *param);
+void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param);
+void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_cq_param *param);
+void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_cq_param *param);
+void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
+ u8 log_wq_size,
+ struct mlx5e_cq_param *param);
+void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
+ u8 log_wq_size,
+ struct mlx5e_sq_param *param);
+void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param);
#endif /* __MLX5_EN_PARAMS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 231e7cdfc6f7..a6a52806be45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -3,8 +3,22 @@
#include <net/vxlan.h>
#include <net/gre.h>
-#include "lib/vxlan.h"
+#include <net/geneve.h>
#include "en/tc_tun.h"
+#include "en_tc.h"
+
+struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
+{
+ if (netif_is_vxlan(tunnel_dev))
+ return &vxlan_tunnel;
+ else if (netif_is_geneve(tunnel_dev))
+ return &geneve_tunnel;
+ else if (netif_is_gretap(tunnel_dev) ||
+ netif_is_ip6gretap(tunnel_dev))
+ return &gre_tunnel;
+ else
+ return NULL;
+}
static int get_route_and_out_devs(struct mlx5e_priv *priv,
struct net_device *dev,
@@ -34,7 +48,8 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
*route_dev = dev;
if (is_vlan_dev(*route_dev))
*out_dev = uplink_dev;
- else if (mlx5e_eswitch_rep(dev))
+ else if (mlx5e_eswitch_rep(dev) &&
+ mlx5e_is_valid_eswitch_fwd_dev(priv, dev))
*out_dev = *route_dev;
else
return -EOPNOTSUPP;
@@ -142,63 +157,15 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
return 0;
}
-static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
-{
- __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
- struct udphdr *udp = (struct udphdr *)(buf);
- struct vxlanhdr *vxh = (struct vxlanhdr *)
- ((char *)udp + sizeof(struct udphdr));
-
- udp->dest = tun_key->tp_dst;
- vxh->vx_flags = VXLAN_HF_VNI;
- vxh->vx_vni = vxlan_vni_field(tun_id);
-
- return 0;
-}
-
-static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key)
-{
- __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
- int hdr_len;
- struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
-
- /* the HW does not calculate GRE csum or sequences */
- if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
- return -EOPNOTSUPP;
-
- greh->protocol = htons(ETH_P_TEB);
-
- /* GRE key */
- hdr_len = gre_calc_hlen(tun_key->tun_flags);
- greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
- if (tun_key->tun_flags & TUNNEL_KEY) {
- __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
-
- *ptr = tun_id;
- }
-
- return 0;
-}
-
static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
struct mlx5e_encap_entry *e)
{
- int err = 0;
- struct ip_tunnel_key *key = &e->tun_info.key;
-
- if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
- *ip_proto = IPPROTO_UDP;
- err = mlx5e_gen_vxlan_header(buf, key);
- } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
- *ip_proto = IPPROTO_GRE;
- err = mlx5e_gen_gre_header(buf, key);
- } else {
- pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
- , e->tunnel_type);
- err = -EOPNOTSUPP;
+ if (!e->tunnel) {
+ pr_warn("mlx5: Cannot generate tunnel header for this tunnel\n");
+ return -EOPNOTSUPP;
}
- return err;
+ return e->tunnel->generate_ip_tun_hdr(buf, ip_proto, e);
}
static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev,
@@ -230,7 +197,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
- struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
@@ -254,7 +221,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
ipv4_encap_size =
(is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
sizeof(struct iphdr) +
- e->tunnel_hlen;
+ e->tunnel->calc_hlen(e);
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -346,7 +313,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
- struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
@@ -370,7 +337,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
ipv6_encap_size =
(is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
sizeof(struct ipv6hdr) +
- e->tunnel_hlen;
+ e->tunnel->calc_hlen(e);
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -456,27 +423,12 @@ out:
return err;
}
-int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev)
-{
- if (netif_is_vxlan(tunnel_dev))
- return MLX5E_TC_TUNNEL_TYPE_VXLAN;
- else if (netif_is_gretap(tunnel_dev) ||
- netif_is_ip6gretap(tunnel_dev))
- return MLX5E_TC_TUNNEL_TYPE_GRETAP;
- else
- return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
-}
-
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev)
{
- int tunnel_type = mlx5e_tc_tun_get_type(netdev);
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(netdev);
- if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
- MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
- return true;
- else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP &&
- MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap))
+ if (tunnel && tunnel->can_offload(priv))
return true;
else
return false;
@@ -487,71 +439,87 @@ int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack)
{
- e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(tunnel_dev);
- if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
- int dst_port = be16_to_cpu(e->tun_info.key.tp_dst);
-
- if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vxlan udp dport was not registered with the HW");
- netdev_warn(priv->netdev,
- "%d isn't an offloaded vxlan udp dport\n",
- dst_port);
- return -EOPNOTSUPP;
- }
- e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
- e->tunnel_hlen = VXLAN_HLEN;
- } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
- e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
- e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags);
- } else {
+ if (!tunnel) {
e->reformat_type = -1;
- e->tunnel_hlen = -1;
return -EOPNOTSUPP;
}
- return 0;
+
+ return tunnel->init_encap_attr(tunnel_dev, priv, e, extack);
}
-static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f,
- void *headers_c,
- void *headers_v)
+int mlx5e_tc_tun_parse(struct net_device *filter_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v, u8 *match_level)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
+ int err = 0;
+
+ if (!tunnel) {
+ netdev_warn(priv->netdev,
+ "decapsulation offload is not supported for %s net device\n",
+ mlx5e_netdev_kind(filter_dev));
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ *match_level = tunnel->match_level;
+
+ if (tunnel->parse_udp_ports) {
+ err = tunnel->parse_udp_ports(priv, spec, f,
+ headers_c, headers_v);
+ if (err)
+ goto out;
+ }
+
+ if (tunnel->parse_tunnel) {
+ err = tunnel->parse_tunnel(priv, spec, f,
+ headers_c, headers_v);
+ if (err)
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
- void *misc_c = MLX5_ADDR_OF(fte_match_param,
- spec->match_criteria,
- misc_parameters);
- void *misc_v = MLX5_ADDR_OF(fte_match_param,
- spec->match_value,
- misc_parameters);
struct flow_match_ports enc_ports;
- flow_rule_match_enc_ports(rule, &enc_ports);
-
/* Full udp dst port must be given */
- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
- memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) {
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
NL_SET_ERR_MSG_MOD(extack,
- "VXLAN decap filter must include enc_dst_port condition");
+ "UDP tunnel decap filter must include enc_dst_port condition");
netdev_warn(priv->netdev,
- "VXLAN decap filter must include enc_dst_port condition\n");
+ "UDP tunnel decap filter must include enc_dst_port condition\n");
return -EOPNOTSUPP;
}
- /* udp dst port must be knonwn as a VXLAN port */
- if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(enc_ports.key->dst))) {
+ flow_rule_match_enc_ports(rule, &enc_ports);
+
+ if (memchr_inv(&enc_ports.mask->dst, 0xff,
+ sizeof(enc_ports.mask->dst))) {
NL_SET_ERR_MSG_MOD(extack,
- "Matched UDP port is not registered as a VXLAN port");
+ "UDP tunnel decap filter must match enc_dst_port fully");
netdev_warn(priv->netdev,
- "UDP port %d is not registered as a VXLAN port\n",
- be16_to_cpu(enc_ports.key->dst));
+ "UDP tunnel decap filter must match enc_dst_port fully\n");
return -EOPNOTSUPP;
}
- /* dst UDP port is valid here */
+ /* match on UDP protocol and dst port number */
+
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
@@ -560,92 +528,15 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
ntohs(enc_ports.key->dst));
+ /* UDP src port on outer header is generated by HW,
+ * so it is probably a bad idea to request matching it.
+ * Nonetheless, it is allowed.
+ */
+
MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
ntohs(enc_ports.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
ntohs(enc_ports.key->src));
- /* match on VNI */
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_match_enc_keyid enc_keyid;
-
- flow_rule_match_enc_keyid(rule, &enc_keyid);
-
- MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
- be32_to_cpu(enc_keyid.mask->keyid));
- MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
- be32_to_cpu(enc_keyid.key->keyid));
- }
- return 0;
-}
-
-static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f,
- void *outer_headers_c,
- void *outer_headers_v)
-{
- void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters);
- void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters);
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
-
- if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
- NL_SET_ERR_MSG_MOD(f->common.extack,
- "GRE HW offloading is not supported");
- netdev_warn(priv->netdev, "GRE HW offloading is not supported\n");
- return -EOPNOTSUPP;
- }
-
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- ip_protocol, IPPROTO_GRE);
-
- /* gre protocol*/
- MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
- MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
-
- /* gre key */
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_match_enc_keyid enc_keyid;
-
- flow_rule_match_enc_keyid(rule, &enc_keyid);
- MLX5_SET(fte_match_set_misc, misc_c,
- gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
- MLX5_SET(fte_match_set_misc, misc_v,
- gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
- }
-
return 0;
}
-
-int mlx5e_tc_tun_parse(struct net_device *filter_dev,
- struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f,
- void *headers_c,
- void *headers_v, u8 *match_level)
-{
- int tunnel_type;
- int err = 0;
-
- tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
- if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
- *match_level = MLX5_MATCH_L4;
- err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
- headers_c, headers_v);
- } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
- *match_level = MLX5_MATCH_L3;
- err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
- headers_c, headers_v);
- } else {
- netdev_warn(priv->netdev,
- "decapsulation offload is not supported for %s (kind: \"%s\")\n",
- netdev_name(filter_dev),
- mlx5e_netdev_kind(filter_dev));
-
- return -EOPNOTSUPP;
- }
- return err;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index b63f15de899d..c362b9225dc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -14,9 +14,41 @@
enum {
MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
MLX5E_TC_TUNNEL_TYPE_VXLAN,
- MLX5E_TC_TUNNEL_TYPE_GRETAP
+ MLX5E_TC_TUNNEL_TYPE_GENEVE,
+ MLX5E_TC_TUNNEL_TYPE_GRETAP,
};
+struct mlx5e_tc_tunnel {
+ int tunnel_type;
+ enum mlx5_flow_match_level match_level;
+
+ bool (*can_offload)(struct mlx5e_priv *priv);
+ int (*calc_hlen)(struct mlx5e_encap_entry *e);
+ int (*init_encap_attr)(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack);
+ int (*generate_ip_tun_hdr)(char buf[],
+ __u8 *ip_proto,
+ struct mlx5e_encap_entry *e);
+ int (*parse_udp_ports)(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v);
+ int (*parse_tunnel)(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v);
+};
+
+extern struct mlx5e_tc_tunnel vxlan_tunnel;
+extern struct mlx5e_tc_tunnel geneve_tunnel;
+extern struct mlx5e_tc_tunnel gre_tunnel;
+
+struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev);
+
int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
@@ -30,15 +62,20 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
-int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev);
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
int mlx5e_tc_tun_parse(struct net_device *filter_dev,
struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
void *headers_c,
void *headers_v, u8 *match_level);
+int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v);
+
#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
new file mode 100644
index 000000000000..951ea26d96bc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include <net/geneve.h>
+#include "lib/geneve.h"
+#include "en/tc_tun.h"
+
+#define MLX5E_GENEVE_VER 0
+
+static bool mlx5e_tc_tun_can_offload_geneve(struct mlx5e_priv *priv)
+{
+ return !!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_GENEVE);
+}
+
+static int mlx5e_tc_tun_calc_hlen_geneve(struct mlx5e_encap_entry *e)
+{
+ return sizeof(struct udphdr) +
+ sizeof(struct genevehdr) +
+ e->tun_info->options_len;
+}
+
+static int mlx5e_tc_tun_check_udp_dport_geneve(struct mlx5e_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_match_ports enc_ports;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS))
+ return -EOPNOTSUPP;
+
+ flow_rule_match_enc_ports(rule, &enc_ports);
+
+ /* Currently we support only default GENEVE
+ * port, so udp dst port must match.
+ */
+ if (be16_to_cpu(enc_ports.key->dst) != GENEVE_UDP_PORT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matched UDP dst port is not registered as a GENEVE port");
+ netdev_warn(priv->netdev,
+ "UDP port %d is not registered as a GENEVE port\n",
+ be16_to_cpu(enc_ports.key->dst));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_udp_ports_geneve(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ int err;
+
+ err = mlx5e_tc_tun_parse_udp_ports(priv, spec, f, headers_c, headers_v);
+ if (err)
+ return err;
+
+ return mlx5e_tc_tun_check_udp_dport_geneve(priv, f);
+}
+
+static int mlx5e_tc_tun_init_encap_attr_geneve(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ e->tunnel = &geneve_tunnel;
+
+ /* Reformat type for GENEVE encap is similar to VXLAN:
+ * in both cases the HW adds in the same place a
+ * defined encapsulation header that the SW provides.
+ */
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
+ return 0;
+}
+
+static void mlx5e_tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+ vni[0] = (__force __u8)(tun_id >> 16);
+ vni[1] = (__force __u8)(tun_id >> 8);
+ vni[2] = (__force __u8)tun_id;
+#else
+ vni[0] = (__force __u8)((__force u64)tun_id >> 40);
+ vni[1] = (__force __u8)((__force u64)tun_id >> 48);
+ vni[2] = (__force __u8)((__force u64)tun_id >> 56);
+#endif
+}
+
+static int mlx5e_gen_ip_tunnel_header_geneve(char buf[],
+ __u8 *ip_proto,
+ struct mlx5e_encap_entry *e)
+{
+ const struct ip_tunnel_info *tun_info = e->tun_info;
+ struct udphdr *udp = (struct udphdr *)(buf);
+ struct genevehdr *geneveh;
+
+ geneveh = (struct genevehdr *)((char *)udp + sizeof(struct udphdr));
+
+ *ip_proto = IPPROTO_UDP;
+
+ udp->dest = tun_info->key.tp_dst;
+
+ memset(geneveh, 0, sizeof(*geneveh));
+ geneveh->ver = MLX5E_GENEVE_VER;
+ geneveh->opt_len = tun_info->options_len / 4;
+ geneveh->oam = !!(tun_info->key.tun_flags & TUNNEL_OAM);
+ geneveh->critical = !!(tun_info->key.tun_flags & TUNNEL_CRIT_OPT);
+ mlx5e_tunnel_id_to_vni(tun_info->key.tun_id, geneveh->vni);
+ geneveh->proto_type = htons(ETH_P_TEB);
+
+ if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT) {
+ if (!geneveh->opt_len)
+ return -EOPNOTSUPP;
+ ip_tunnel_info_opts_get(geneveh->options, tun_info);
+ }
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_geneve_vni(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_match_enc_keyid enc_keyid;
+ void *misc_c, *misc_v;
+
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+ return 0;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+
+ if (!enc_keyid.mask->keyid)
+ return 0;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ft_field_support.outer_geneve_vni)) {
+ NL_SET_ERR_MSG_MOD(extack, "Matching on GENEVE VNI is not supported");
+ netdev_warn(priv->netdev, "Matching on GENEVE VNI is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(fte_match_set_misc, misc_c, geneve_vni, be32_to_cpu(enc_keyid.mask->keyid));
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_vni, be32_to_cpu(enc_keyid.key->keyid));
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f)
+{
+ u8 max_tlv_option_data_len = MLX5_CAP_GEN(priv->mdev, max_geneve_tlv_option_data_len);
+ u8 max_tlv_options = MLX5_CAP_GEN(priv->mdev, max_geneve_tlv_options);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ void *misc_c, *misc_v, *misc_3_c, *misc_3_v;
+ struct geneve_opt *option_key, *option_mask;
+ __be32 opt_data_key = 0, opt_data_mask = 0;
+ struct flow_match_enc_opts enc_opts;
+ int res = 0;
+
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ misc_3_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_3);
+ misc_3_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_3);
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
+ return 0;
+
+ flow_rule_match_enc_opts(rule, &enc_opts);
+
+ if (memchr_inv(&enc_opts.mask->data, 0, sizeof(enc_opts.mask->data)) &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ ft_field_support.geneve_tlv_option_0_data)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options is not supported");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* make sure that we're talking about GENEVE options */
+
+ if (enc_opts.key->dst_opt_type != TUNNEL_GENEVE_OPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options: option type is not GENEVE");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options: option type is not GENEVE\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (enc_opts.mask->len &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ ft_field_support.outer_geneve_opt_len)) {
+ NL_SET_ERR_MSG_MOD(extack, "Matching on GENEVE options len is not supported");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options len is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* max_geneve_tlv_option_data_len comes in multiples of 4 bytes, and it
+ * doesn't include the TLV option header. 'geneve_opt_len' is a total
+ * len of all the options, including the headers, also multiples of 4
+ * bytes. Len that comes from the dissector is in bytes.
+ */
+
+ if ((enc_opts.key->len / 4) > ((max_tlv_option_data_len + 1) * max_tlv_options)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options: unsupported options len");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options: unsupported options len (len=%d)\n",
+ enc_opts.key->len);
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(fte_match_set_misc, misc_c, geneve_opt_len, enc_opts.mask->len / 4);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, enc_opts.key->len / 4);
+
+ /* we support matching on one option only, so just get it */
+ option_key = (struct geneve_opt *)&enc_opts.key->data[0];
+ option_mask = (struct geneve_opt *)&enc_opts.mask->data[0];
+
+ if (option_key->length > max_tlv_option_data_len) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options: unsupported option len");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options: unsupported option len (key=%d, mask=%d)\n",
+ option_key->length, option_mask->length);
+ return -EOPNOTSUPP;
+ }
+
+ /* data can't be all 0 - fail to offload such rule */
+ if (!memchr_inv(option_key->opt_data, 0, option_key->length * 4)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options: can't match on 0 data field");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options: can't match on 0 data field\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* add new GENEVE TLV options object */
+ res = mlx5_geneve_tlv_option_add(priv->mdev->geneve, option_key);
+ if (res) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on GENEVE options: failed creating TLV opt object");
+ netdev_warn(priv->netdev,
+ "Matching on GENEVE options: failed creating TLV opt object (class:type:len = 0x%x:0x%x:%d)\n",
+ be16_to_cpu(option_key->opt_class),
+ option_key->type, option_key->length);
+ return res;
+ }
+
+ /* In general, after creating the object, need to query it
+ * in order to check which option data to set in misc3.
+ * But we support only geneve_tlv_option_0_data, so no
+ * point querying at this stage.
+ */
+
+ memcpy(&opt_data_key, option_key->opt_data, option_key->length * 4);
+ memcpy(&opt_data_mask, option_mask->opt_data, option_mask->length * 4);
+ MLX5_SET(fte_match_set_misc3, misc_3_v,
+ geneve_tlv_option_0_data, be32_to_cpu(opt_data_key));
+ MLX5_SET(fte_match_set_misc3, misc_3_c,
+ geneve_tlv_option_0_data, be32_to_cpu(opt_data_mask));
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f)
+{
+ void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ struct netlink_ext_ack *extack = f->common.extack;
+
+ /* match on OAM - packets with OAM bit on should NOT be offloaded */
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ft_field_support.outer_geneve_oam)) {
+ NL_SET_ERR_MSG_MOD(extack, "Matching on GENEVE OAM is not supported");
+ netdev_warn(priv->netdev, "Matching on GENEVE OAM is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, geneve_oam);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_oam, 0);
+
+ /* Match on GENEVE protocol. We support only Transparent Eth Bridge. */
+
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ ft_field_support.outer_geneve_protocol_type)) {
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, geneve_protocol_type);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
+ }
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ int err;
+
+ err = mlx5e_tc_tun_parse_geneve_params(priv, spec, f);
+ if (err)
+ return err;
+
+ err = mlx5e_tc_tun_parse_geneve_vni(priv, spec, f);
+ if (err)
+ return err;
+
+ return mlx5e_tc_tun_parse_geneve_options(priv, spec, f);
+}
+
+struct mlx5e_tc_tunnel geneve_tunnel = {
+ .tunnel_type = MLX5E_TC_TUNNEL_TYPE_GENEVE,
+ .match_level = MLX5_MATCH_L4,
+ .can_offload = mlx5e_tc_tun_can_offload_geneve,
+ .calc_hlen = mlx5e_tc_tun_calc_hlen_geneve,
+ .init_encap_attr = mlx5e_tc_tun_init_encap_attr_geneve,
+ .generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_geneve,
+ .parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_geneve,
+ .parse_tunnel = mlx5e_tc_tun_parse_geneve,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
new file mode 100644
index 000000000000..58b13192df23
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include <net/gre.h>
+#include "en/tc_tun.h"
+
+static bool mlx5e_tc_tun_can_offload_gretap(struct mlx5e_priv *priv)
+{
+ return !!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap);
+}
+
+static int mlx5e_tc_tun_calc_hlen_gretap(struct mlx5e_encap_entry *e)
+{
+ return gre_calc_hlen(e->tun_info->key.tun_flags);
+}
+
+static int mlx5e_tc_tun_init_encap_attr_gretap(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ e->tunnel = &gre_tunnel;
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
+ return 0;
+}
+
+static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
+ __u8 *ip_proto,
+ struct mlx5e_encap_entry *e)
+{
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
+ __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
+ int hdr_len;
+
+ *ip_proto = IPPROTO_GRE;
+
+ /* the HW does not calculate GRE csum or sequences */
+ if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
+ return -EOPNOTSUPP;
+
+ greh->protocol = htons(ETH_P_TEB);
+
+ /* GRE key */
+ hdr_len = mlx5e_tc_tun_calc_hlen_gretap(e);
+ greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
+ if (tun_key->tun_flags & TUNNEL_KEY) {
+ __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+ *ptr = tun_id;
+ }
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
+
+ /* gre protocol */
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
+
+ /* gre key */
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid enc_keyid;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+ MLX5_SET(fte_match_set_misc, misc_c,
+ gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
+ MLX5_SET(fte_match_set_misc, misc_v,
+ gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
+ }
+
+ return 0;
+}
+
+struct mlx5e_tc_tunnel gre_tunnel = {
+ .tunnel_type = MLX5E_TC_TUNNEL_TYPE_GRETAP,
+ .match_level = MLX5_MATCH_L3,
+ .can_offload = mlx5e_tc_tun_can_offload_gretap,
+ .calc_hlen = mlx5e_tc_tun_calc_hlen_gretap,
+ .init_encap_attr = mlx5e_tc_tun_init_encap_attr_gretap,
+ .generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_gretap,
+ .parse_udp_ports = NULL,
+ .parse_tunnel = mlx5e_tc_tun_parse_gretap,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
new file mode 100644
index 000000000000..37b176801bcc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include <net/vxlan.h>
+#include "lib/vxlan.h"
+#include "en/tc_tun.h"
+
+static bool mlx5e_tc_tun_can_offload_vxlan(struct mlx5e_priv *priv)
+{
+ return !!MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap);
+}
+
+static int mlx5e_tc_tun_calc_hlen_vxlan(struct mlx5e_encap_entry *e)
+{
+ return VXLAN_HLEN;
+}
+
+static int mlx5e_tc_tun_check_udp_dport_vxlan(struct mlx5e_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_match_ports enc_ports;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS))
+ return -EOPNOTSUPP;
+
+ flow_rule_match_enc_ports(rule, &enc_ports);
+
+ /* check the UDP destination port validity */
+
+ if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan,
+ be16_to_cpu(enc_ports.key->dst))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matched UDP dst port is not registered as a VXLAN port");
+ netdev_warn(priv->netdev,
+ "UDP port %d is not registered as a VXLAN port\n",
+ be16_to_cpu(enc_ports.key->dst));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_udp_ports_vxlan(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ int err = 0;
+
+ err = mlx5e_tc_tun_parse_udp_ports(priv, spec, f, headers_c, headers_v);
+ if (err)
+ return err;
+
+ return mlx5e_tc_tun_check_udp_dport_vxlan(priv, f);
+}
+
+static int mlx5e_tc_tun_init_encap_attr_vxlan(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ int dst_port = be16_to_cpu(e->tun_info->key.tp_dst);
+
+ e->tunnel = &vxlan_tunnel;
+
+ if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vxlan udp dport was not registered with the HW");
+ netdev_warn(priv->netdev,
+ "%d isn't an offloaded vxlan udp dport\n",
+ dst_port);
+ return -EOPNOTSUPP;
+ }
+
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
+ return 0;
+}
+
+static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
+ __u8 *ip_proto,
+ struct mlx5e_encap_entry *e)
+{
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
+ struct udphdr *udp = (struct udphdr *)(buf);
+ struct vxlanhdr *vxh;
+
+ vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
+ *ip_proto = IPPROTO_UDP;
+
+ udp->dest = tun_key->tp_dst;
+ vxh->vx_flags = VXLAN_HF_VNI;
+ vxh->vx_vni = vxlan_vni_field(tun_id);
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_match_enc_keyid enc_keyid;
+ void *misc_c, *misc_v;
+
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+ return 0;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+
+ if (!enc_keyid.mask->keyid)
+ return 0;
+
+ /* match on VNI is required */
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ ft_field_support.outer_vxlan_vni)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on VXLAN VNI is not supported");
+ netdev_warn(priv->netdev,
+ "Matching on VXLAN VNI is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
+ be32_to_cpu(enc_keyid.mask->keyid));
+ MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
+ be32_to_cpu(enc_keyid.key->keyid));
+
+ return 0;
+}
+
+struct mlx5e_tc_tunnel vxlan_tunnel = {
+ .tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN,
+ .match_level = MLX5_MATCH_L4,
+ .can_offload = mlx5e_tc_tun_can_offload_vxlan,
+ .calc_hlen = mlx5e_tc_tun_calc_hlen_vxlan,
+ .init_encap_attr = mlx5e_tc_tun_init_encap_attr_vxlan,
+ .generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_vxlan,
+ .parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
+ .parse_tunnel = mlx5e_tc_tun_parse_vxlan,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
new file mode 100644
index 000000000000..ddfe19adb3d9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TXRX_H___
+#define __MLX5_EN_TXRX_H___
+
+#include "en.h"
+
+#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
+#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
+ MLX5E_SQ_NOPS_ROOM)
+
+#ifndef CONFIG_MLX5_EN_TLS
+#define MLX5E_SQ_TLS_ROOM (0)
+#else
+/* TLS offload requires additional stop_room for:
+ * - a resync SKB.
+ * kTLS offload requires additional stop_room for:
+ * - static params WQE,
+ * - progress params WQE, and
+ * - resync DUMP per frag.
+ */
+#define MLX5E_SQ_TLS_ROOM \
+ (MLX5_SEND_WQE_MAX_WQEBBS + \
+ MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \
+ MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS)
+#endif
+
+#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+
+static inline bool
+mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
+{
+ return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
+}
+
+static inline void *
+mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, size_t size, u16 *pi)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ void *wqe;
+
+ *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+ memset(wqe, 0, size);
+
+ return wqe;
+}
+
+static inline struct mlx5e_tx_wqe *
+mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
+{
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+
+ memset(cseg, 0, sizeof(*cseg));
+
+ cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
+ cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
+
+ (*pc)++;
+
+ return wqe;
+}
+
+static inline struct mlx5e_tx_wqe *
+mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
+{
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+
+ memset(cseg, 0, sizeof(*cseg));
+
+ cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
+ cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
+ cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
+
+ (*pc)++;
+
+ return wqe;
+}
+
+static inline void
+mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
+ u16 pi, u16 nnops)
+{
+ struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
+
+ edge_wi = wi + nnops;
+
+ /* fill sq frag edge with nops to avoid wqe wrapping two pages */
+ for (; wi < edge_wi; wi++) {
+ wi->skb = NULL;
+ wi->num_wqebbs = 1;
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+ sq->stats->nop += nnops;
+}
+
+static inline void
+mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
+ struct mlx5_wqe_ctrl_seg *ctrl)
+{
+ ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ *wq->db = cpu_to_be32(pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)ctrl, uar_map);
+}
+
+static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe)
+{
+ return !!wqe->ctrl.tisn;
+}
+
+static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
+{
+ struct mlx5_core_cq *mcq;
+
+ mcq = &cq->mcq;
+ mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
+}
+
+static inline struct mlx5e_sq_dma *
+mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
+{
+ return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
+}
+
+static inline void
+mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
+ enum mlx5e_dma_map_type map_type)
+{
+ struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
+
+ dma->addr = addr;
+ dma->size = size;
+ dma->type = map_type;
+}
+
+static inline void
+mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
+{
+ switch (dma->type) {
+ case MLX5E_DMA_MAP_SINGLE:
+ dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+ break;
+ case MLX5E_DMA_MAP_PAGE:
+ dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+ break;
+ default:
+ WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
+ }
+}
+
+/* SW parser related functions */
+
+struct mlx5e_swp_spec {
+ __be16 l3_proto;
+ u8 l4_proto;
+ u8 is_tun;
+ __be16 tun_l3_proto;
+ u8 tun_l4_proto;
+};
+
+static inline void
+mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
+ struct mlx5e_swp_spec *swp_spec)
+{
+ /* SWP offsets are in 2-bytes words */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (swp_spec->l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+ if (swp_spec->l4_proto) {
+ eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
+ if (swp_spec->l4_proto == IPPROTO_UDP)
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
+ }
+
+ if (swp_spec->is_tun) {
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
+ eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
+ if (swp_spec->l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ }
+ switch (swp_spec->tun_l4_proto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ /* fall through */
+ case IPPROTO_TCP:
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ }
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index eb8ef78e5626..b0b982cf69bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -31,11 +31,13 @@
*/
#include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
#include "en/xdp.h"
+#include "en/params.h"
-int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
{
- int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+ int hr = mlx5e_get_linear_rq_headroom(params, xsk);
/* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
* The condition checked in mlx5e_rx_is_linear_skb is:
@@ -54,25 +56,70 @@ int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
}
static inline bool
-mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
- struct xdp_buff *xdp)
+mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *di, struct xdp_buff *xdp)
{
+ struct mlx5e_xdp_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
- xdpi.xdpf = convert_to_xdp_frame(xdp);
- if (unlikely(!xdpi.xdpf))
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf))
return false;
- xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf);
- dma_sync_single_for_device(sq->pdev, xdpi.dma_addr,
- xdpi.xdpf->len, PCI_DMA_TODEVICE);
- xdpi.di = *di;
- return sq->xmit_xdp_frame(sq, &xdpi);
+ xdptxd.data = xdpf->data;
+ xdptxd.len = xdpf->len;
+
+ if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) {
+ /* The xdp_buff was in the UMEM and was copied into a newly
+ * allocated page. The UMEM page was returned via the ZCA, and
+ * this new page has to be mapped at this point and has to be
+ * unmapped and returned via xdp_return_frame on completion.
+ */
+
+ /* Prevent double recycling of the UMEM page. Even in case this
+ * function returns false, the xdp_buff shouldn't be recycled,
+ * as it was already done in xdp_convert_zc_to_xdp_frame.
+ */
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
+
+ xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
+
+ dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sq->pdev, dma_addr)) {
+ xdp_return_frame(xdpf);
+ return false;
+ }
+
+ xdptxd.dma_addr = dma_addr;
+ xdpi.frame.xdpf = xdpf;
+ xdpi.frame.dma_addr = dma_addr;
+ } else {
+ /* Driver assumes that convert_to_xdp_frame returns an xdp_frame
+ * that points to the same memory region as the original
+ * xdp_buff. It allows to map the memory only once and to use
+ * the DMA_BIDIRECTIONAL mode.
+ */
+
+ xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+
+ dma_addr = di->addr + (xdpf->data - (void *)xdpf);
+ dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len,
+ DMA_TO_DEVICE);
+
+ xdptxd.dma_addr = dma_addr;
+ xdpi.page.rq = rq;
+ xdpi.page.di = *di;
+ }
+
+ return sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0);
}
/* returns true if packet was consumed by xdp */
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len)
+ void *va, u16 *rx_headroom, u32 *len, bool xsk)
{
struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
struct xdp_buff xdp;
@@ -86,16 +133,20 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.data_hard_start = va;
+ if (xsk)
+ xdp.handle = di->xsk.handle;
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(prog, &xdp);
+ if (xsk)
+ xdp.handle += xdp.data - xdp.data_hard_start;
switch (act) {
case XDP_PASS:
*rx_headroom = xdp.data - xdp.data_hard_start;
*len = xdp.data_end - xdp.data;
return false;
case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp)))
+ if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, &xdp)))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
return true;
@@ -106,7 +157,8 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
- mlx5e_page_dma_unmap(rq, di);
+ if (!xsk)
+ mlx5e_page_dma_unmap(rq, di);
rq->stats->xdp_redirect++;
return true;
default:
@@ -160,7 +212,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
stats->mpwqe++;
}
-static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
+void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
@@ -183,32 +235,55 @@ static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
session->wqe = NULL; /* Close session */
}
+enum {
+ MLX5E_XDP_CHECK_OK = 1,
+ MLX5E_XDP_CHECK_START_MPWQE = 2,
+};
+
+static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
+{
+ if (unlikely(!sq->mpwqe.wqe)) {
+ if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
+ MLX5_SEND_WQE_MAX_WQEBBS))) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ sq->stats->full++;
+ return -EBUSY;
+ }
+
+ return MLX5E_XDP_CHECK_START_MPWQE;
+ }
+
+ return MLX5E_XDP_CHECK_OK;
+}
+
static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
- struct mlx5e_xdp_info *xdpi)
+ struct mlx5e_xdp_xmit_data *xdptxd,
+ struct mlx5e_xdp_info *xdpi,
+ int check_result)
{
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
- struct xdp_frame *xdpf = xdpi->xdpf;
-
- if (unlikely(sq->hw_mtu < xdpf->len)) {
+ if (unlikely(xdptxd->len > sq->hw_mtu)) {
stats->err++;
return false;
}
- if (unlikely(!session->wqe)) {
- if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
- MLX5_SEND_WQE_MAX_WQEBBS))) {
- /* SQ is full, ring doorbell */
- mlx5e_xmit_xdp_doorbell(sq);
- stats->full++;
- return false;
- }
+ if (!check_result)
+ check_result = mlx5e_xmit_xdp_frame_check_mpwqe(sq);
+ if (unlikely(check_result < 0))
+ return false;
+ if (check_result == MLX5E_XDP_CHECK_START_MPWQE) {
+ /* Start the session when nothing can fail, so it's guaranteed
+ * that if there is an active session, it has at least one dseg,
+ * and it's safe to complete it at any time.
+ */
mlx5e_xdp_mpwqe_session_start(sq);
}
- mlx5e_xdp_mpwqe_add_dseg(sq, xdpi, stats);
+ mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
if (unlikely(session->complete ||
session->ds_count == session->max_ds_count))
@@ -219,7 +294,22 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
return true;
}
-static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
+static int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
+{
+ if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ sq->stats->full++;
+ return -EBUSY;
+ }
+
+ return MLX5E_XDP_CHECK_OK;
+}
+
+static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
+ struct mlx5e_xdp_xmit_data *xdptxd,
+ struct mlx5e_xdp_info *xdpi,
+ int check_result)
{
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
@@ -229,9 +319,8 @@ static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg = wqe->data;
- struct xdp_frame *xdpf = xdpi->xdpf;
- dma_addr_t dma_addr = xdpi->dma_addr;
- unsigned int dma_len = xdpf->len;
+ dma_addr_t dma_addr = xdptxd->dma_addr;
+ u32 dma_len = xdptxd->len;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -242,18 +331,16 @@ static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *
return false;
}
- if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
- /* SQ is full, ring doorbell */
- mlx5e_xmit_xdp_doorbell(sq);
- stats->full++;
+ if (!check_result)
+ check_result = mlx5e_xmit_xdp_frame_check(sq);
+ if (unlikely(check_result < 0))
return false;
- }
cseg->fm_ce_se = 0;
/* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE);
+ memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE);
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
@@ -277,7 +364,7 @@ static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *
static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_wqe_info *wi,
- struct mlx5e_rq *rq,
+ u32 *xsk_frames,
bool recycle)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
@@ -286,22 +373,32 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
for (i = 0; i < wi->num_pkts; i++) {
struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
- if (rq) {
- /* XDP_TX */
- mlx5e_page_release(rq, &xdpi.di, recycle);
- } else {
- /* XDP_REDIRECT */
- dma_unmap_single(sq->pdev, xdpi.dma_addr,
- xdpi.xdpf->len, DMA_TO_DEVICE);
- xdp_return_frame(xdpi.xdpf);
+ switch (xdpi.mode) {
+ case MLX5E_XDP_XMIT_MODE_FRAME:
+ /* XDP_TX from the XSK RQ and XDP_REDIRECT */
+ dma_unmap_single(sq->pdev, xdpi.frame.dma_addr,
+ xdpi.frame.xdpf->len, DMA_TO_DEVICE);
+ xdp_return_frame(xdpi.frame.xdpf);
+ break;
+ case MLX5E_XDP_XMIT_MODE_PAGE:
+ /* XDP_TX from the regular RQ */
+ mlx5e_page_release_dynamic(xdpi.page.rq, &xdpi.page.di, recycle);
+ break;
+ case MLX5E_XDP_XMIT_MODE_XSK:
+ /* AF_XDP send */
+ (*xsk_frames)++;
+ break;
+ default:
+ WARN_ON_ONCE(true);
}
}
}
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
{
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
+ u32 xsk_frames = 0;
u16 sqcc;
int i;
@@ -343,10 +440,13 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
sqcc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, rq, true);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true);
} while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ if (xsk_frames)
+ xsk_umem_complete_tx(sq->umem, xsk_frames);
+
sq->stats->cqes += i;
mlx5_cqwq_update_db_record(&cq->wq);
@@ -358,8 +458,10 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
{
+ u32 xsk_frames = 0;
+
while (sq->cc != sq->pc) {
struct mlx5e_xdp_wqe_info *wi;
u16 ci;
@@ -369,8 +471,11 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
sq->cc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, rq, false);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false);
}
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(sq->umem, xsk_frames);
}
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
@@ -398,21 +503,27 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
+ struct mlx5e_xdp_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
- xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) {
+ xdptxd.data = xdpf->data;
+ xdptxd.len = xdpf->len;
+ xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data,
+ xdptxd.len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr))) {
xdp_return_frame_rx_napi(xdpf);
drops++;
continue;
}
- xdpi.xdpf = xdpf;
+ xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
+ xdpi.frame.xdpf = xdpf;
+ xdpi.frame.dma_addr = xdptxd.dma_addr;
- if (unlikely(!sq->xmit_xdp_frame(sq, &xdpi))) {
- dma_unmap_single(sq->pdev, xdpi.dma_addr,
- xdpf->len, DMA_TO_DEVICE);
+ if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0))) {
+ dma_unmap_single(sq->pdev, xdptxd.dma_addr,
+ xdptxd.len, DMA_TO_DEVICE);
xdp_return_frame_rx_napi(xdpf);
drops++;
}
@@ -429,7 +540,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
{
- struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
+ struct mlx5e_xdpsq *xdpsq = rq->xdpsq;
if (xdpsq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(xdpsq);
@@ -444,6 +555,8 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw)
{
+ sq->xmit_xdp_frame_check = is_mpw ?
+ mlx5e_xmit_xdp_frame_check_mpwqe : mlx5e_xmit_xdp_frame_check;
sq->xmit_xdp_frame = is_mpw ?
mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 8b537a4b0840..b90923932668 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -33,17 +33,20 @@
#define __MLX5_EN_XDP_H__
#include "en.h"
+#include "en/txrx.h"
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
#define MLX5E_XDP_TX_EMPTY_DS_COUNT \
(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
-int mlx5e_xdp_max_mtu(struct mlx5e_params *params);
+struct mlx5e_xsk_param;
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len);
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq);
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq);
+ void *va, u16 *rx_headroom, u32 *len, bool xsk);
+void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
@@ -66,6 +69,21 @@ static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
}
+static inline void mlx5e_xdp_set_open(struct mlx5e_priv *priv)
+{
+ set_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
+}
+
+static inline void mlx5e_xdp_set_closed(struct mlx5e_priv *priv)
+{
+ clear_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
+}
+
+static inline bool mlx5e_xdp_is_open(struct mlx5e_priv *priv)
+{
+ return test_bit(MLX5E_STATE_XDP_OPEN, &priv->state);
+}
+
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{
if (sq->doorbell_cseg) {
@@ -97,15 +115,14 @@ static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
}
static inline void
-mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi,
+mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
+ struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdpsq_stats *stats)
{
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
- dma_addr_t dma_addr = xdpi->dma_addr;
- struct xdp_frame *xdpf = xdpi->xdpf;
struct mlx5_wqe_data_seg *dseg =
(struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
- u16 dma_len = xdpf->len;
+ u32 dma_len = xdptxd->len;
session->pkt_count++;
@@ -124,7 +141,7 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi,
}
inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
- memcpy(inline_dseg->data, xdpf->data, dma_len);
+ memcpy(inline_dseg->data, xdptxd->data, dma_len);
session->ds_count += ds_cnt;
stats->inlnw++;
@@ -132,7 +149,7 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi,
}
no_inline:
- dseg->addr = cpu_to_be64(dma_addr);
+ dseg->addr = cpu_to_be64(xdptxd->dma_addr);
dseg->byte_count = cpu_to_be32(dma_len);
dseg->lkey = sq->mkey_be;
session->ds_count++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/Makefile
new file mode 100644
index 000000000000..5ee42991900a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/Makefile
@@ -0,0 +1 @@
+subdir-ccflags-y += -I$(src)/../..
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
new file mode 100644
index 000000000000..6a55573ec8f2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "rx.h"
+#include "en/xdp.h"
+#include <net/xdp_sock.h>
+
+/* RX data path */
+
+bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count)
+{
+ /* Check in advance that we have enough frames, instead of allocating
+ * one-by-one, failing and moving frames to the Reuse Ring.
+ */
+ return xsk_umem_has_addrs_rq(rq->umem, count);
+}
+
+int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ struct xdp_umem *umem = rq->umem;
+ u64 handle;
+
+ if (!xsk_umem_peek_addr_rq(umem, &handle))
+ return -ENOMEM;
+
+ dma_info->xsk.handle = handle + rq->buff.umem_headroom;
+ dma_info->xsk.data = xdp_umem_get_data(umem, dma_info->xsk.handle);
+
+ /* No need to add headroom to the DMA address. In striding RQ case, we
+ * just provide pages for UMR, and headroom is counted at the setup
+ * stage when creating a WQE. In non-striding RQ case, headroom is
+ * accounted in mlx5e_alloc_rx_wqe.
+ */
+ dma_info->addr = xdp_umem_get_dma(umem, handle);
+
+ xsk_umem_discard_addr_rq(umem);
+
+ dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ return 0;
+}
+
+static inline void mlx5e_xsk_recycle_frame(struct mlx5e_rq *rq, u64 handle)
+{
+ xsk_umem_fq_reuse(rq->umem, handle & rq->umem->chunk_mask);
+}
+
+/* XSKRQ uses pages from UMEM, they must not be released. They are returned to
+ * the userspace if possible, and if not, this function is called to reuse them
+ * in the driver.
+ */
+void mlx5e_xsk_page_release(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ mlx5e_xsk_recycle_frame(rq, dma_info->xsk.handle);
+}
+
+/* Return a frame back to the hardware to fill in again. It is used by XDP when
+ * the XDP program returns XDP_TX or XDP_REDIRECT not to an XSKMAP.
+ */
+void mlx5e_xsk_zca_free(struct zero_copy_allocator *zca, unsigned long handle)
+{
+ struct mlx5e_rq *rq = container_of(zca, struct mlx5e_rq, zca);
+
+ mlx5e_xsk_recycle_frame(rq, handle);
+}
+
+static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data,
+ u32 cqe_bcnt)
+{
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt);
+ if (unlikely(!skb)) {
+ rq->stats->buff_alloc_err++;
+ return NULL;
+ }
+
+ skb_put_data(skb, data, cqe_bcnt);
+
+ return skb;
+}
+
+struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi,
+ u16 cqe_bcnt,
+ u32 head_offset,
+ u32 page_idx)
+{
+ struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
+ u16 rx_headroom = rq->buff.headroom - rq->buff.umem_headroom;
+ u32 cqe_bcnt32 = cqe_bcnt;
+ void *va, *data;
+ u32 frag_size;
+ bool consumed;
+
+ /* Check packet size. Note LRO doesn't use linear SKB */
+ if (unlikely(cqe_bcnt > rq->hw_mtu)) {
+ rq->stats->oversize_pkts_sw_drop++;
+ return NULL;
+ }
+
+ /* head_offset is not used in this function, because di->xsk.data and
+ * di->addr point directly to the necessary place. Furthermore, in the
+ * current implementation, one page = one packet = one frame, so
+ * head_offset should always be 0.
+ */
+ WARN_ON_ONCE(head_offset);
+
+ va = di->xsk.data;
+ data = va + rx_headroom;
+ frag_size = rq->buff.headroom + cqe_bcnt32;
+
+ dma_sync_single_for_cpu(rq->pdev, di->addr, frag_size, DMA_BIDIRECTIONAL);
+ prefetch(data);
+
+ rcu_read_lock();
+ consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, true);
+ rcu_read_unlock();
+
+ /* Possible flows:
+ * - XDP_REDIRECT to XSKMAP:
+ * The page is owned by the userspace from now.
+ * - XDP_TX and other XDP_REDIRECTs:
+ * The page was returned by ZCA and recycled.
+ * - XDP_DROP:
+ * Recycle the page.
+ * - XDP_PASS:
+ * Allocate an SKB, copy the data and recycle the page.
+ *
+ * Pages to be recycled go to the Reuse Ring on MPWQE deallocation. Its
+ * size is the same as the Driver RX Ring's size, and pages for WQEs are
+ * allocated first from the Reuse Ring, so it has enough space.
+ */
+
+ if (likely(consumed)) {
+ if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
+ __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
+ * frame. On SKB allocation failure, NULL is returned.
+ */
+ return mlx5e_xsk_construct_skb(rq, data, cqe_bcnt32);
+}
+
+struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_wqe_frag_info *wi,
+ u32 cqe_bcnt)
+{
+ struct mlx5e_dma_info *di = wi->di;
+ u16 rx_headroom = rq->buff.headroom - rq->buff.umem_headroom;
+ void *va, *data;
+ bool consumed;
+ u32 frag_size;
+
+ /* wi->offset is not used in this function, because di->xsk.data and
+ * di->addr point directly to the necessary place. Furthermore, in the
+ * current implementation, one page = one packet = one frame, so
+ * wi->offset should always be 0.
+ */
+ WARN_ON_ONCE(wi->offset);
+
+ va = di->xsk.data;
+ data = va + rx_headroom;
+ frag_size = rq->buff.headroom + cqe_bcnt;
+
+ dma_sync_single_for_cpu(rq->pdev, di->addr, frag_size, DMA_BIDIRECTIONAL);
+ prefetch(data);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
+ rq->stats->wqe_err++;
+ return NULL;
+ }
+
+ rcu_read_lock();
+ consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, true);
+ rcu_read_unlock();
+
+ if (likely(consumed))
+ return NULL; /* page/packet was consumed by XDP */
+
+ /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
+ * will be handled by mlx5e_put_rx_frag.
+ * On SKB allocation failure, NULL is returned.
+ */
+ return mlx5e_xsk_construct_skb(rq, data, cqe_bcnt);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
new file mode 100644
index 000000000000..307b923a1361
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_XSK_RX_H__
+#define __MLX5_EN_XSK_RX_H__
+
+#include "en.h"
+
+/* RX data path */
+
+bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count);
+int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info);
+void mlx5e_xsk_page_release(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info);
+void mlx5e_xsk_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
+struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi,
+ u16 cqe_bcnt,
+ u32 head_offset,
+ u32 page_idx);
+struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_wqe_frag_info *wi,
+ u32 cqe_bcnt);
+
+#endif /* __MLX5_EN_XSK_RX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
new file mode 100644
index 000000000000..aaffa6f68dc0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "setup.h"
+#include "en/params.h"
+
+bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5_core_dev *mdev)
+{
+ /* AF_XDP doesn't support frames larger than PAGE_SIZE, and the current
+ * mlx5e XDP implementation doesn't support multiple packets per page.
+ */
+ if (xsk->chunk_size != PAGE_SIZE)
+ return false;
+
+ /* Current MTU and XSK headroom don't allow packets to fit the frames. */
+ if (mlx5e_rx_get_linear_frag_sz(params, xsk) > xsk->chunk_size)
+ return false;
+
+ /* frag_sz is different for regular and XSK RQs, so ensure that linear
+ * SKB mode is possible.
+ */
+ switch (params->rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ return mlx5e_rx_is_linear_skb(params, xsk);
+ }
+}
+
+static void mlx5e_build_xskicosq_param(struct mlx5e_priv *priv,
+ u8 log_wq_size,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(priv, param);
+
+ MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+}
+
+static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_channel_param *cparam)
+{
+ const u8 xskicosq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+
+ mlx5e_build_rq_param(priv, params, xsk, &cparam->rq);
+ mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
+ mlx5e_build_xskicosq_param(priv, xskicosq_size, &cparam->icosq);
+ mlx5e_build_rx_cq_param(priv, params, xsk, &cparam->rx_cq);
+ mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
+ mlx5e_build_ico_cq_param(priv, xskicosq_size, &cparam->icosq_cq);
+}
+
+int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
+ struct mlx5e_channel *c)
+{
+ struct mlx5e_channel_param cparam = {};
+ struct dim_cq_moder icocq_moder = {};
+ int err;
+
+ if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
+ return -EINVAL;
+
+ mlx5e_build_xsk_cparam(priv, params, xsk, &cparam);
+
+ err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam.rx_cq, &c->xskrq.cq);
+ if (unlikely(err))
+ return err;
+
+ err = mlx5e_open_rq(c, params, &cparam.rq, xsk, umem, &c->xskrq);
+ if (unlikely(err))
+ goto err_close_rx_cq;
+
+ err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam.tx_cq, &c->xsksq.cq);
+ if (unlikely(err))
+ goto err_close_rq;
+
+ /* Create a separate SQ, so that when the UMEM is disabled, we could
+ * close this SQ safely and stop receiving CQEs. In other case, e.g., if
+ * the XDPSQ was used instead, we might run into trouble when the UMEM
+ * is disabled and then reenabled, but the SQ continues receiving CQEs
+ * from the old UMEM.
+ */
+ err = mlx5e_open_xdpsq(c, params, &cparam.xdp_sq, umem, &c->xsksq, true);
+ if (unlikely(err))
+ goto err_close_tx_cq;
+
+ err = mlx5e_open_cq(c, icocq_moder, &cparam.icosq_cq, &c->xskicosq.cq);
+ if (unlikely(err))
+ goto err_close_sq;
+
+ /* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be
+ * triggered and NAPI to be called on the correct CPU.
+ */
+ err = mlx5e_open_icosq(c, params, &cparam.icosq, &c->xskicosq);
+ if (unlikely(err))
+ goto err_close_icocq;
+
+ spin_lock_init(&c->xskicosq_lock);
+
+ set_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
+
+ return 0;
+
+err_close_icocq:
+ mlx5e_close_cq(&c->xskicosq.cq);
+
+err_close_sq:
+ mlx5e_close_xdpsq(&c->xsksq);
+
+err_close_tx_cq:
+ mlx5e_close_cq(&c->xsksq.cq);
+
+err_close_rq:
+ mlx5e_close_rq(&c->xskrq);
+
+err_close_rx_cq:
+ mlx5e_close_cq(&c->xskrq.cq);
+
+ return err;
+}
+
+void mlx5e_close_xsk(struct mlx5e_channel *c)
+{
+ clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
+ napi_synchronize(&c->napi);
+
+ mlx5e_close_rq(&c->xskrq);
+ mlx5e_close_cq(&c->xskrq.cq);
+ mlx5e_close_icosq(&c->xskicosq);
+ mlx5e_close_cq(&c->xskicosq.cq);
+ mlx5e_close_xdpsq(&c->xsksq);
+ mlx5e_close_cq(&c->xsksq.cq);
+}
+
+void mlx5e_activate_xsk(struct mlx5e_channel *c)
+{
+ set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+ /* TX queue is created active. */
+ mlx5e_trigger_irq(&c->xskicosq);
+}
+
+void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
+{
+ mlx5e_deactivate_rq(&c->xskrq);
+ /* TX queue is disabled on close. */
+}
+
+static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn)
+{
+ struct mlx5e_redirect_rqt_param direct_rrp = {
+ .is_rss = false,
+ {
+ .rqn = rqn,
+ },
+ };
+
+ u32 rqtn = priv->xsk_tir[ix].rqt.rqtn;
+
+ return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
+}
+
+int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c)
+{
+ return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn);
+}
+
+int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix)
+{
+ return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn);
+}
+
+int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+{
+ int err, i;
+
+ if (!priv->xsk.refcnt)
+ return 0;
+
+ for (i = 0; i < chs->num; i++) {
+ struct mlx5e_channel *c = chs->c[i];
+
+ if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+ continue;
+
+ err = mlx5e_xsk_redirect_rqt_to_channel(priv, c);
+ if (unlikely(err))
+ goto err_stop;
+ }
+
+ return 0;
+
+err_stop:
+ for (i--; i >= 0; i--) {
+ if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
+ continue;
+
+ mlx5e_xsk_redirect_rqt_to_drop(priv, i);
+ }
+
+ return err;
+}
+
+void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+{
+ int i;
+
+ if (!priv->xsk.refcnt)
+ return;
+
+ for (i = 0; i < chs->num; i++) {
+ if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
+ continue;
+
+ mlx5e_xsk_redirect_rqt_to_drop(priv, i);
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
new file mode 100644
index 000000000000..0dd11b81c046
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_XSK_SETUP_H__
+#define __MLX5_EN_XSK_SETUP_H__
+
+#include "en.h"
+
+struct mlx5e_xsk_param;
+
+bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5_core_dev *mdev);
+int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
+ struct mlx5e_channel *c);
+void mlx5e_close_xsk(struct mlx5e_channel *c);
+void mlx5e_activate_xsk(struct mlx5e_channel *c);
+void mlx5e_deactivate_xsk(struct mlx5e_channel *c);
+int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c);
+int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix);
+int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
+void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
+
+#endif /* __MLX5_EN_XSK_SETUP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
new file mode 100644
index 000000000000..35e188cf4ea4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "tx.h"
+#include "umem.h"
+#include "en/xdp.h"
+#include "en/params.h"
+#include <net/xdp_sock.h>
+
+int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_params *params = &priv->channels.params;
+ struct mlx5e_channel *c;
+ u16 ix;
+
+ if (unlikely(!mlx5e_xdp_is_open(priv)))
+ return -ENETDOWN;
+
+ if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
+ return -EINVAL;
+
+ c = priv->channels.c[ix];
+
+ if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)))
+ return -ENXIO;
+
+ if (!napi_if_scheduled_mark_missed(&c->napi)) {
+ spin_lock(&c->xskicosq_lock);
+ mlx5e_trigger_irq(&c->xskicosq);
+ spin_unlock(&c->xskicosq_lock);
+ }
+
+ return 0;
+}
+
+/* When TX fails (because of the size of the packet), we need to get completions
+ * in order, so post a NOP to get a CQE. Since AF_XDP doesn't distinguish
+ * between successful TX and errors, handling in mlx5e_poll_xdpsq_cq is the
+ * same.
+ */
+static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
+ struct mlx5e_xdp_info *xdpi)
+{
+ u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi];
+ struct mlx5e_tx_wqe *nopwqe;
+
+ wi->num_wqebbs = 1;
+ wi->num_pkts = 1;
+
+ nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
+ sq->doorbell_cseg = &nopwqe->ctrl;
+}
+
+bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
+{
+ struct xdp_umem *umem = sq->umem;
+ struct mlx5e_xdp_info xdpi;
+ struct mlx5e_xdp_xmit_data xdptxd;
+ bool work_done = true;
+ bool flush = false;
+
+ xdpi.mode = MLX5E_XDP_XMIT_MODE_XSK;
+
+ for (; budget; budget--) {
+ int check_result = sq->xmit_xdp_frame_check(sq);
+ struct xdp_desc desc;
+
+ if (unlikely(check_result < 0)) {
+ work_done = false;
+ break;
+ }
+
+ if (!xsk_umem_consume_tx(umem, &desc)) {
+ /* TX will get stuck until something wakes it up by
+ * triggering NAPI. Currently it's expected that the
+ * application calls sendto() if there are consumed, but
+ * not completed frames.
+ */
+ break;
+ }
+
+ xdptxd.dma_addr = xdp_umem_get_dma(umem, desc.addr);
+ xdptxd.data = xdp_umem_get_data(umem, desc.addr);
+ xdptxd.len = desc.len;
+
+ dma_sync_single_for_device(sq->pdev, xdptxd.dma_addr,
+ xdptxd.len, DMA_BIDIRECTIONAL);
+
+ if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) {
+ if (sq->mpwqe.wqe)
+ mlx5e_xdp_mpwqe_complete(sq);
+
+ mlx5e_xsk_tx_post_err(sq, &xdpi);
+ }
+
+ flush = true;
+ }
+
+ if (flush) {
+ if (sq->mpwqe.wqe)
+ mlx5e_xdp_mpwqe_complete(sq);
+ mlx5e_xmit_xdp_doorbell(sq);
+
+ xsk_umem_consume_tx_done(umem);
+ }
+
+ return !(budget && work_done);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
new file mode 100644
index 000000000000..7add18bf78d8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_XSK_TX_H__
+#define __MLX5_EN_XSK_TX_H__
+
+#include "en.h"
+
+/* TX data path */
+
+int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid);
+
+bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget);
+
+#endif /* __MLX5_EN_XSK_TX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
new file mode 100644
index 000000000000..4baaa5788320
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <net/xdp_sock.h>
+#include "umem.h"
+#include "setup.h"
+#include "en/params.h"
+
+static int mlx5e_xsk_map_umem(struct mlx5e_priv *priv,
+ struct xdp_umem *umem)
+{
+ struct device *dev = priv->mdev->device;
+ u32 i;
+
+ for (i = 0; i < umem->npgs; i++) {
+ dma_addr_t dma = dma_map_page(dev, umem->pgs[i], 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (unlikely(dma_mapping_error(dev, dma)))
+ goto err_unmap;
+ umem->pages[i].dma = dma;
+ }
+
+ return 0;
+
+err_unmap:
+ while (i--) {
+ dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ umem->pages[i].dma = 0;
+ }
+
+ return -ENOMEM;
+}
+
+static void mlx5e_xsk_unmap_umem(struct mlx5e_priv *priv,
+ struct xdp_umem *umem)
+{
+ struct device *dev = priv->mdev->device;
+ u32 i;
+
+ for (i = 0; i < umem->npgs; i++) {
+ dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ umem->pages[i].dma = 0;
+ }
+}
+
+static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk)
+{
+ if (!xsk->umems) {
+ xsk->umems = kcalloc(MLX5E_MAX_NUM_CHANNELS,
+ sizeof(*xsk->umems), GFP_KERNEL);
+ if (unlikely(!xsk->umems))
+ return -ENOMEM;
+ }
+
+ xsk->refcnt++;
+ xsk->ever_used = true;
+
+ return 0;
+}
+
+static void mlx5e_xsk_put_umems(struct mlx5e_xsk *xsk)
+{
+ if (!--xsk->refcnt) {
+ kfree(xsk->umems);
+ xsk->umems = NULL;
+ }
+}
+
+static int mlx5e_xsk_add_umem(struct mlx5e_xsk *xsk, struct xdp_umem *umem, u16 ix)
+{
+ int err;
+
+ err = mlx5e_xsk_get_umems(xsk);
+ if (unlikely(err))
+ return err;
+
+ xsk->umems[ix] = umem;
+ return 0;
+}
+
+static void mlx5e_xsk_remove_umem(struct mlx5e_xsk *xsk, u16 ix)
+{
+ xsk->umems[ix] = NULL;
+
+ mlx5e_xsk_put_umems(xsk);
+}
+
+static bool mlx5e_xsk_is_umem_sane(struct xdp_umem *umem)
+{
+ return umem->headroom <= 0xffff && umem->chunk_size_nohr <= 0xffff;
+}
+
+void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk)
+{
+ xsk->headroom = umem->headroom;
+ xsk->chunk_size = umem->chunk_size_nohr + umem->headroom;
+}
+
+static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
+ struct xdp_umem *umem, u16 ix)
+{
+ struct mlx5e_params *params = &priv->channels.params;
+ struct mlx5e_xsk_param xsk;
+ struct mlx5e_channel *c;
+ int err;
+
+ if (unlikely(mlx5e_xsk_get_umem(&priv->channels.params, &priv->xsk, ix)))
+ return -EBUSY;
+
+ if (unlikely(!mlx5e_xsk_is_umem_sane(umem)))
+ return -EINVAL;
+
+ err = mlx5e_xsk_map_umem(priv, umem);
+ if (unlikely(err))
+ return err;
+
+ err = mlx5e_xsk_add_umem(&priv->xsk, umem, ix);
+ if (unlikely(err))
+ goto err_unmap_umem;
+
+ mlx5e_build_xsk_param(umem, &xsk);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ /* XSK objects will be created on open. */
+ goto validate_closed;
+ }
+
+ if (!params->xdp_prog) {
+ /* XSK objects will be created when an XDP program is set,
+ * and the channels are reopened.
+ */
+ goto validate_closed;
+ }
+
+ c = priv->channels.c[ix];
+
+ err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
+ if (unlikely(err))
+ goto err_remove_umem;
+
+ mlx5e_activate_xsk(c);
+
+ /* Don't wait for WQEs, because the newer xdpsock sample doesn't provide
+ * any Fill Ring entries at the setup stage.
+ */
+
+ err = mlx5e_xsk_redirect_rqt_to_channel(priv, priv->channels.c[ix]);
+ if (unlikely(err))
+ goto err_deactivate;
+
+ return 0;
+
+err_deactivate:
+ mlx5e_deactivate_xsk(c);
+ mlx5e_close_xsk(c);
+
+err_remove_umem:
+ mlx5e_xsk_remove_umem(&priv->xsk, ix);
+
+err_unmap_umem:
+ mlx5e_xsk_unmap_umem(priv, umem);
+
+ return err;
+
+validate_closed:
+ /* Check the configuration in advance, rather than fail at a later stage
+ * (in mlx5e_xdp_set or on open) and end up with no channels.
+ */
+ if (!mlx5e_validate_xsk_param(params, &xsk, priv->mdev)) {
+ err = -EINVAL;
+ goto err_remove_umem;
+ }
+
+ return 0;
+}
+
+static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
+{
+ struct xdp_umem *umem = mlx5e_xsk_get_umem(&priv->channels.params,
+ &priv->xsk, ix);
+ struct mlx5e_channel *c;
+
+ if (unlikely(!umem))
+ return -EINVAL;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto remove_umem;
+
+ /* XSK RQ and SQ are only created if XDP program is set. */
+ if (!priv->channels.params.xdp_prog)
+ goto remove_umem;
+
+ c = priv->channels.c[ix];
+ mlx5e_xsk_redirect_rqt_to_drop(priv, ix);
+ mlx5e_deactivate_xsk(c);
+ mlx5e_close_xsk(c);
+
+remove_umem:
+ mlx5e_xsk_remove_umem(&priv->xsk, ix);
+ mlx5e_xsk_unmap_umem(priv, umem);
+
+ return 0;
+}
+
+static int mlx5e_xsk_enable_umem(struct mlx5e_priv *priv, struct xdp_umem *umem,
+ u16 ix)
+{
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_xsk_enable_locked(priv, umem, ix);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static int mlx5e_xsk_disable_umem(struct mlx5e_priv *priv, u16 ix)
+{
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_xsk_disable_locked(priv, ix);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_params *params = &priv->channels.params;
+ u16 ix;
+
+ if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
+ return -EINVAL;
+
+ return umem ? mlx5e_xsk_enable_umem(priv, umem, ix) :
+ mlx5e_xsk_disable_umem(priv, ix);
+}
+
+int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries)
+{
+ struct xdp_umem_fq_reuse *reuseq;
+
+ reuseq = xsk_reuseq_prepare(nentries);
+ if (unlikely(!reuseq))
+ return -ENOMEM;
+ xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
+
+ return 0;
+}
+
+u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk)
+{
+ u16 res = xsk->refcnt ? params->num_channels : 0;
+
+ while (res) {
+ if (mlx5e_xsk_get_umem(params, xsk, res - 1))
+ break;
+ --res;
+ }
+
+ return res;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h
new file mode 100644
index 000000000000..25b4cbe58b54
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_XSK_UMEM_H__
+#define __MLX5_EN_XSK_UMEM_H__
+
+#include "en.h"
+
+static inline struct xdp_umem *mlx5e_xsk_get_umem(struct mlx5e_params *params,
+ struct mlx5e_xsk *xsk, u16 ix)
+{
+ if (!xsk || !xsk->umems)
+ return NULL;
+
+ if (unlikely(ix >= params->num_channels))
+ return NULL;
+
+ return xsk->umems[ix];
+}
+
+struct mlx5e_xsk_param;
+void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk);
+
+/* .ndo_bpf callback. */
+int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid);
+
+int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries);
+
+u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk);
+
+#endif /* __MLX5_EN_XSK_UMEM_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 6da7c88742dc..3022463f2284 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -39,6 +39,7 @@
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "en.h"
+#include "en/txrx.h"
#if IS_ENABLED(CONFIG_GENEVE)
static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index ca47c0540904..db84500b024f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -39,6 +39,7 @@
#include <linux/skbuff.h>
#include <net/xfrm.h>
#include "en.h"
+#include "en/txrx.h"
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
new file mode 100644
index 000000000000..d2ff74d52720
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include "en.h"
+#include "en_accel/ktls.h"
+
+static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
+
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ MLX5_SET(tisc, tisc, tls_en, 1);
+
+ return mlx5e_create_tis(mdev, in, tisn);
+}
+
+static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_ktls_offload_context_tx *tx_priv;
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ if (WARN_ON(direction != TLS_OFFLOAD_CTX_DIR_TX))
+ return -EINVAL;
+
+ if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
+ return -EOPNOTSUPP;
+
+ tx_priv = kvzalloc(sizeof(*tx_priv), GFP_KERNEL);
+ if (!tx_priv)
+ return -ENOMEM;
+
+ tx_priv->expected_seq = start_offload_tcp_sn;
+ tx_priv->crypto_info = crypto_info;
+ mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv);
+
+ /* tc and underlay_qpn values are not in use for tls tis */
+ err = mlx5e_ktls_create_tis(mdev, &tx_priv->tisn);
+ if (err)
+ goto create_tis_fail;
+
+ err = mlx5_ktls_create_key(mdev, crypto_info, &tx_priv->key_id);
+ if (err)
+ goto encryption_key_create_fail;
+
+ mlx5e_ktls_tx_offload_set_pending(tx_priv);
+
+ return 0;
+
+encryption_key_create_fail:
+ mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
+create_tis_fail:
+ kvfree(tx_priv);
+ return err;
+}
+
+static void mlx5e_ktls_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_ktls_offload_context_tx *tx_priv =
+ mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
+
+ mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
+ mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
+ kvfree(tx_priv);
+}
+
+static const struct tlsdev_ops mlx5e_ktls_ops = {
+ .tls_dev_add = mlx5e_ktls_add,
+ .tls_dev_del = mlx5e_ktls_del,
+};
+
+void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+
+ if (!mlx5_accel_is_ktls_device(priv->mdev))
+ return;
+
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->features |= NETIF_F_HW_TLS_TX;
+
+ netdev->tlsdev_ops = &mlx5e_ktls_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
new file mode 100644
index 000000000000..407da83474ef
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5E_KTLS_H__
+#define __MLX5E_KTLS_H__
+
+#include "en.h"
+
+#ifdef CONFIG_MLX5_EN_TLS
+#include <net/tls.h>
+#include "accel/tls.h"
+
+#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
+ (sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params))
+#define MLX5E_KTLS_STATIC_WQEBBS \
+ (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
+
+#define MLX5E_KTLS_PROGRESS_WQE_SZ \
+ (sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params))
+#define MLX5E_KTLS_PROGRESS_WQEBBS \
+ (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
+#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
+
+enum {
+ MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
+ MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD = 1,
+ MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION = 2,
+};
+
+enum {
+ MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0,
+ MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1,
+ MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 2,
+};
+
+struct mlx5e_ktls_offload_context_tx {
+ struct tls_offload_context_tx *tx_ctx;
+ struct tls_crypto_info *crypto_info;
+ u32 expected_seq;
+ u32 tisn;
+ u32 key_id;
+ bool ctx_post_pending;
+};
+
+struct mlx5e_ktls_offload_context_tx_shadow {
+ struct tls_offload_context_tx tx_ctx;
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+};
+
+static inline void
+mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
+ struct mlx5e_ktls_offload_context_tx *priv_tx)
+{
+ struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
+ struct mlx5e_ktls_offload_context_tx_shadow *shadow;
+
+ BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
+
+ shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
+
+ shadow->priv_tx = priv_tx;
+ priv_tx->tx_ctx = tx_ctx;
+}
+
+static inline struct mlx5e_ktls_offload_context_tx *
+mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
+{
+ struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
+ struct mlx5e_ktls_offload_context_tx_shadow *shadow;
+
+ BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
+
+ shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
+
+ return shadow->priv_tx;
+}
+
+void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
+void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
+
+struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe **wqe, u16 *pi);
+void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+ struct mlx5e_sq_dma *dma);
+
+#else
+
+static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
+{
+}
+
+#endif
+
+#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
new file mode 100644
index 000000000000..5c08891806f0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include <linux/tls.h>
+#include "en.h"
+#include "en/txrx.h"
+#include "en_accel/ktls.h"
+
+enum {
+ MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
+};
+
+enum {
+ MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
+};
+
+#define EXTRACT_INFO_FIELDS do { \
+ salt = info->salt; \
+ rec_seq = info->rec_seq; \
+ salt_sz = sizeof(info->salt); \
+ rec_seq_sz = sizeof(info->rec_seq); \
+} while (0)
+
+static void
+fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
+{
+ struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
+ char *initial_rn, *gcm_iv;
+ u16 salt_sz, rec_seq_sz;
+ char *salt, *rec_seq;
+ u8 tls_version;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+
+ EXTRACT_INFO_FIELDS;
+ break;
+ }
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
+ initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
+
+ memcpy(gcm_iv, salt, salt_sz);
+ memcpy(initial_rn, rec_seq, rec_seq_sz);
+
+ tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
+
+ MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
+ MLX5_SET(tls_static_params, ctx, const_1, 1);
+ MLX5_SET(tls_static_params, ctx, const_2, 2);
+ MLX5_SET(tls_static_params, ctx, encryption_standard,
+ MLX5E_ENCRYPTION_STANDARD_TLS);
+ MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id);
+}
+
+static void
+build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
+ struct mlx5e_ktls_offload_context_tx *priv_tx,
+ bool fence)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+
+#define STATIC_PARAMS_DS_CNT \
+ DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS)
+
+ cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR |
+ (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
+ cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ STATIC_PARAMS_DS_CNT);
+ cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
+ cseg->imm = cpu_to_be32(priv_tx->tisn);
+
+ ucseg->flags = MLX5_UMR_INLINE;
+ ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
+
+ fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx);
+}
+
+static void
+fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
+{
+ MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn);
+ MLX5_SET(tls_progress_params, ctx, record_tracker_state,
+ MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
+ MLX5_SET(tls_progress_params, ctx, auth_state,
+ MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
+}
+
+static void
+build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
+ struct mlx5e_ktls_offload_context_tx *priv_tx,
+ bool fence)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+
+#define PROGRESS_PARAMS_DS_CNT \
+ DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS)
+
+ cseg->opmod_idx_opcode =
+ cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV |
+ (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
+ cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ PROGRESS_PARAMS_DS_CNT);
+ cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
+
+ fill_progress_params_ctx(wqe->data, priv_tx);
+}
+
+static void tx_fill_wi(struct mlx5e_txqsq *sq,
+ u16 pi, u8 num_wqebbs,
+ skb_frag_t *resync_dump_frag)
+{
+ struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
+
+ wi->skb = NULL;
+ wi->num_wqebbs = num_wqebbs;
+ wi->resync_dump_frag = resync_dump_frag;
+}
+
+void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
+{
+ priv_tx->ctx_post_pending = true;
+}
+
+static bool
+mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
+{
+ bool ret = priv_tx->ctx_post_pending;
+
+ priv_tx->ctx_post_pending = false;
+
+ return ret;
+}
+
+static void
+post_static_params(struct mlx5e_txqsq *sq,
+ struct mlx5e_ktls_offload_context_tx *priv_tx,
+ bool fence)
+{
+ struct mlx5e_umr_wqe *umr_wqe;
+ u16 pi;
+
+ umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
+ build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL);
+ sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
+}
+
+static void
+post_progress_params(struct mlx5e_txqsq *sq,
+ struct mlx5e_ktls_offload_context_tx *priv_tx,
+ bool fence)
+{
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi;
+
+ wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
+ build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL);
+ sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
+}
+
+static void
+mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
+ struct mlx5e_ktls_offload_context_tx *priv_tx,
+ bool skip_static_post, bool fence_first_post)
+{
+ bool progress_fence = skip_static_post || !fence_first_post;
+
+ if (!skip_static_post)
+ post_static_params(sq, priv_tx, fence_first_post);
+
+ post_progress_params(sq, priv_tx, progress_fence);
+}
+
+struct tx_sync_info {
+ u64 rcd_sn;
+ s32 sync_len;
+ int nr_frags;
+ skb_frag_t *frags[MAX_SKB_FRAGS];
+};
+
+static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
+ u32 tcp_seq, struct tx_sync_info *info)
+{
+ struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
+ struct tls_record_info *record;
+ int remaining, i = 0;
+ unsigned long flags;
+ bool ret = true;
+
+ spin_lock_irqsave(&tx_ctx->lock, flags);
+ record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
+
+ if (unlikely(!record)) {
+ ret = false;
+ goto out;
+ }
+
+ if (unlikely(tcp_seq < tls_record_start_seq(record))) {
+ if (!tls_record_is_start_marker(record))
+ ret = false;
+ goto out;
+ }
+
+ info->sync_len = tcp_seq - tls_record_start_seq(record);
+ remaining = info->sync_len;
+ while (remaining > 0) {
+ skb_frag_t *frag = &record->frags[i];
+
+ __skb_frag_ref(frag);
+ remaining -= skb_frag_size(frag);
+ info->frags[i++] = frag;
+ }
+ /* reduce the part which will be sent with the original SKB */
+ if (remaining < 0)
+ skb_frag_size_add(info->frags[i - 1], remaining);
+ info->nr_frags = i;
+out:
+ spin_unlock_irqrestore(&tx_ctx->lock, flags);
+ return ret;
+}
+
+static void
+tx_post_resync_params(struct mlx5e_txqsq *sq,
+ struct mlx5e_ktls_offload_context_tx *priv_tx,
+ u64 rcd_sn)
+{
+ struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
+ __be64 rn_be = cpu_to_be64(rcd_sn);
+ bool skip_static_post;
+ u16 rec_seq_sz;
+ char *rec_seq;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+
+ rec_seq = info->rec_seq;
+ rec_seq_sz = sizeof(info->rec_seq);
+ break;
+ }
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
+ if (!skip_static_post)
+ memcpy(rec_seq, &rn_be, rec_seq_sz);
+
+ mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
+}
+
+static int
+tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ skb_frag_t *frag, u32 tisn, bool first)
+{
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5_wqe_eth_seg *eseg;
+ struct mlx5_wqe_data_seg *dseg;
+ struct mlx5e_tx_wqe *wqe;
+ dma_addr_t dma_addr = 0;
+ u16 ds_cnt, ds_cnt_inl;
+ u8 num_wqebbs;
+ u16 pi, ihs;
+ int fsz;
+
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
+ ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
+ ds_cnt += ds_cnt_inl;
+ ds_cnt += 1; /* one frag */
+
+ wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
+
+ num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+
+ cseg = &wqe->ctrl;
+ eseg = &wqe->eth;
+ dseg = wqe->data;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ cseg->imm = cpu_to_be32(tisn);
+ cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
+
+ eseg->inline_hdr.sz = cpu_to_be16(ihs);
+ memcpy(eseg->inline_hdr.start, skb->data, ihs);
+ dseg += ds_cnt_inl;
+
+ fsz = skb_frag_size(frag);
+ dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ return -ENOMEM;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(fsz);
+ mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
+
+ tx_fill_wi(sq, pi, num_wqebbs, frag);
+ sq->pc += num_wqebbs;
+
+ WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
+ "unexpected DUMP num_wqebbs, %d > %d",
+ num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
+
+ return 0;
+}
+
+void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+ struct mlx5e_sq_dma *dma)
+{
+ struct mlx5e_sq_stats *stats = sq->stats;
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ __skb_frag_unref(wi->resync_dump_frag);
+ stats->tls_dump_packets++;
+ stats->tls_dump_bytes += wi->num_bytes;
+}
+
+static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+ tx_fill_wi(sq, pi, 1, NULL);
+
+ mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
+}
+
+static struct sk_buff *
+mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ u32 seq)
+{
+ struct mlx5e_sq_stats *stats = sq->stats;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct tx_sync_info info = {};
+ u16 contig_wqebbs_room, pi;
+ u8 num_wqebbs;
+ int i;
+
+ if (!tx_sync_info_get(priv_tx, seq, &info)) {
+ /* We might get here if a retransmission reaches the driver
+ * after the relevant record is acked.
+ * It should be safe to drop the packet in this case
+ */
+ stats->tls_drop_no_sync_data++;
+ goto err_out;
+ }
+
+ if (unlikely(info.sync_len < 0)) {
+ u32 payload;
+ int headln;
+
+ headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ payload = skb->len - headln;
+ if (likely(payload <= -info.sync_len))
+ return skb;
+
+ stats->tls_drop_bypass_req++;
+ goto err_out;
+ }
+
+ stats->tls_ooo++;
+
+ num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
+ (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1);
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < num_wqebbs))
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
+
+ tx_post_resync_params(sq, priv_tx, info.rcd_sn);
+
+ for (i = 0; i < info.nr_frags; i++)
+ if (tx_post_resync_dump(sq, skb, info.frags[i],
+ priv_tx->tisn, !i))
+ goto err_out;
+
+ /* If no dump WQE was sent, we need to have a fence NOP WQE before the
+ * actual data xmit.
+ */
+ if (!info.nr_frags)
+ tx_post_fence_nop(sq);
+
+ return skb;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe **wqe, u16 *pi)
+{
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+ struct mlx5e_sq_stats *stats = sq->stats;
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct tls_context *tls_ctx;
+ int datalen;
+ u32 seq;
+
+ if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
+ goto out;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (!datalen)
+ goto out;
+
+ tls_ctx = tls_get_ctx(skb->sk);
+ if (unlikely(tls_ctx->netdev != netdev))
+ goto err_out;
+
+ priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
+
+ if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
+ mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
+ *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
+ stats->tls_ctx++;
+ }
+
+ seq = ntohl(tcp_hdr(skb)->seq);
+ if (unlikely(priv_tx->expected_seq != seq)) {
+ skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
+ if (unlikely(!skb))
+ goto out;
+ *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
+ }
+
+ priv_tx->expected_seq = seq + datalen;
+
+ cseg = &(*wqe)->ctrl;
+ cseg->imm = cpu_to_be32(priv_tx->tisn);
+
+ stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+ stats->tls_encrypted_bytes += datalen;
+
+out:
+ return skb;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index e88340e196f7..fba561ffe1d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -160,25 +160,31 @@ static void mlx5e_tls_del(struct net_device *netdev,
direction == TLS_OFFLOAD_CTX_DIR_TX);
}
-static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
- u32 seq, u64 rcd_sn)
+static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
+ u32 seq, u8 *rcd_sn_data,
+ enum tls_offload_ctx_dir direction)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_rx *rx_ctx;
+ u64 rcd_sn = *(u64 *)rcd_sn_data;
+ if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
+ return -EINVAL;
rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
be64_to_cpu(rcd_sn));
mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
+
+ return 0;
}
static const struct tlsdev_ops mlx5e_tls_ops = {
.tls_dev_add = mlx5e_tls_add,
.tls_dev_del = mlx5e_tls_del,
- .tls_dev_resync_rx = mlx5e_tls_resync_rx,
+ .tls_dev_resync = mlx5e_tls_resync,
};
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
@@ -186,6 +192,11 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
u32 caps;
+ if (mlx5_accel_is_ktls_device(priv->mdev)) {
+ mlx5e_ktls_build_netdev(priv);
+ return;
+ }
+
if (!mlx5_accel_is_tls_device(priv->mdev))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index 3f5d72163b56..9015f3f7792d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -33,8 +33,10 @@
#ifndef __MLX5E_TLS_H__
#define __MLX5E_TLS_H__
-#ifdef CONFIG_MLX5_EN_TLS
+#include "accel/tls.h"
+#include "en_accel/ktls.h"
+#ifdef CONFIG_MLX5_EN_TLS
#include <net/tls.h>
#include "en.h"
@@ -94,7 +96,12 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
#else
-static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { }
+static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
+{
+ if (mlx5_accel_is_ktls_device(priv->mdev))
+ mlx5e_ktls_build_netdev(priv);
+}
+
static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 439bf5953885..71384ad1a443 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -248,7 +248,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
- mlx5e_sq_fetch_wqe(sq, wqe, pi);
+ *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
return skb;
err_out:
@@ -269,6 +269,11 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
int datalen;
u32 skb_seq;
+ if (MLX5_CAP_GEN(sq->channel->mdev, tls)) {
+ skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
+ goto out;
+ }
+
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 311667ec71b8..90bc1f2384c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -38,6 +38,7 @@
#include <linux/skbuff.h>
#include "en.h"
+#include "en/txrx.h"
struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_txqsq *sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 554672edf8c3..8dd31b5c740c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -680,7 +680,7 @@ static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
memset(perm_addr, 0xff, MAX_ADDR_LEN);
- mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
+ mlx5_query_mac_address(priv->mdev, perm_addr);
}
static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
index d67adf70a97b..ca9cfbf57d8f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -30,22 +30,22 @@
* SOFTWARE.
*/
-#include <linux/net_dim.h>
+#include <linux/dim.h>
#include "en.h"
static void
-mlx5e_complete_dim_work(struct net_dim *dim, struct net_dim_cq_moder moder,
+mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder,
struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq)
{
mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts);
- dim->state = NET_DIM_START_MEASURE;
+ dim->state = DIM_START_MEASURE;
}
void mlx5e_rx_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct dim *dim = container_of(work, struct dim, work);
struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
- struct net_dim_cq_moder cur_moder =
+ struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq);
@@ -53,9 +53,9 @@ void mlx5e_rx_dim_work(struct work_struct *work)
void mlx5e_tx_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct dim *dim = container_of(work, struct dim, work);
struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim);
- struct net_dim_cq_moder cur_moder =
+ struct dim_cq_moder cur_moder =
net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index dd764e0471f2..126ec4181286 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,6 +32,7 @@
#include "en.h"
#include "en/port.h"
+#include "en/xsk/umem.h"
#include "lib/clock.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
@@ -46,7 +47,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
"%d.%d.%04d (%.16s)",
fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
mdev->board_id);
- strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
+ strlcpy(drvinfo->bus_info, dev_name(mdev->device),
sizeof(drvinfo->bus_info));
}
@@ -388,8 +389,17 @@ static int mlx5e_set_ringparam(struct net_device *dev,
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch)
{
+ mutex_lock(&priv->state_lock);
+
ch->max_combined = mlx5e_get_netdev_max_channels(priv->netdev);
ch->combined_count = priv->channels.params.num_channels;
+ if (priv->xsk.refcnt) {
+ /* The upper half are XSK queues. */
+ ch->max_combined *= 2;
+ ch->combined_count *= 2;
+ }
+
+ mutex_unlock(&priv->state_lock);
}
static void mlx5e_get_channels(struct net_device *dev,
@@ -403,6 +413,7 @@ static void mlx5e_get_channels(struct net_device *dev,
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch)
{
+ struct mlx5e_params *cur_params = &priv->channels.params;
unsigned int count = ch->combined_count;
struct mlx5e_channels new_channels = {};
bool arfs_enabled;
@@ -414,16 +425,26 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
return -EINVAL;
}
- if (priv->channels.params.num_channels == count)
+ if (cur_params->num_channels == count)
return 0;
mutex_lock(&priv->state_lock);
+ /* Don't allow changing the number of channels if there is an active
+ * XSK, because the numeration of the XSK and regular RQs will change.
+ */
+ if (priv->xsk.refcnt) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: AF_XDP is active, cannot change the number of channels\n",
+ __func__);
+ goto out;
+ }
+
new_channels.params = priv->channels.params;
new_channels.params.num_channels = count;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
+ *cur_params = new_channels.params;
if (!netif_is_rxfh_configured(priv->netdev))
mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
@@ -466,7 +487,7 @@ static int mlx5e_set_channels(struct net_device *dev,
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal)
{
- struct net_dim_cq_moder *rx_moder, *tx_moder;
+ struct dim_cq_moder *rx_moder, *tx_moder;
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
@@ -521,7 +542,7 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal)
{
- struct net_dim_cq_moder *rx_moder, *tx_moder;
+ struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
int err = 0;
@@ -1867,40 +1888,6 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
- struct ethtool_flash *flash)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct net_device *dev = priv->netdev;
- const struct firmware *fw;
- int err;
-
- if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
- return -EOPNOTSUPP;
-
- err = request_firmware_direct(&fw, flash->data, &dev->dev);
- if (err)
- return err;
-
- dev_hold(dev);
- rtnl_unlock();
-
- err = mlx5_firmware_flash(mdev, fw);
- release_firmware(fw);
-
- rtnl_lock();
- dev_put(dev);
- return err;
-}
-
-static int mlx5e_flash_device(struct net_device *dev,
- struct ethtool_flash *flash)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- return mlx5e_ethtool_flash_device(priv, flash);
-}
-
#ifndef CONFIG_MLX5_EN_RXNFC
/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS
* otherwise this function will be defined from en_fs_ethtool.c
@@ -1939,7 +1926,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
#ifdef CONFIG_MLX5_EN_RXNFC
.set_rxnfc = mlx5e_set_rxnfc,
#endif
- .flash_device = mlx5e_flash_device,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 4421c10f58ae..ea3a490b569a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -32,6 +32,8 @@
#include <linux/mlx5/fs.h>
#include "en.h"
+#include "en/params.h"
+#include "en/xsk/umem.h"
struct mlx5e_ethtool_rule {
struct list_head list;
@@ -414,6 +416,14 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
} else {
+ struct mlx5e_params *params = &priv->channels.params;
+ enum mlx5e_rq_group group;
+ struct mlx5e_tir *tir;
+ u16 ix;
+
+ mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
+ tir = group == MLX5E_RQ_GROUP_XSK ? priv->xsk_tir : priv->direct_tir;
+
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst) {
err = -ENOMEM;
@@ -421,12 +431,12 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
}
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
+ dst->tir_num = tir[ix].tirn;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
- flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -600,9 +610,9 @@ static int validate_flow(struct mlx5e_priv *priv,
if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
return -ENOSPC;
- if (fs->ring_cookie >= priv->channels.params.num_channels &&
- fs->ring_cookie != RX_CLS_FLOW_DISC)
- return -EINVAL;
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC)
+ if (!mlx5e_qid_validate(&priv->channels.params, fs->ring_cookie))
+ return -EINVAL;
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a8e8350b38aa..6d0ae87c8ded 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -38,8 +38,10 @@
#include <linux/bpf.h>
#include <linux/if_bridge.h>
#include <net/page_pool.h>
+#include <net/xdp_sock.h>
#include "eswitch.h"
#include "en.h"
+#include "en/txrx.h"
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
@@ -56,35 +58,11 @@
#include "en/monitor_stats.h"
#include "en/reporter.h"
#include "en/params.h"
+#include "en/xsk/umem.h"
+#include "en/xsk/setup.h"
+#include "en/xsk/rx.h"
+#include "en/xsk/tx.h"
-struct mlx5e_rq_param {
- u32 rqc[MLX5_ST_SZ_DW(rqc)];
- struct mlx5_wq_param wq;
- struct mlx5e_rq_frags_info frags_info;
-};
-
-struct mlx5e_sq_param {
- u32 sqc[MLX5_ST_SZ_DW(sqc)];
- struct mlx5_wq_param wq;
- bool is_mpw;
-};
-
-struct mlx5e_cq_param {
- u32 cqc[MLX5_ST_SZ_DW(cqc)];
- struct mlx5_wq_param wq;
- u16 eq_ix;
- u8 cq_period_mode;
-};
-
-struct mlx5e_channel_param {
- struct mlx5e_rq_param rq;
- struct mlx5e_sq_param sq;
- struct mlx5e_sq_param xdp_sq;
- struct mlx5e_sq_param icosq;
- struct mlx5e_cq_param rx_cq;
- struct mlx5e_cq_param tx_cq;
- struct mlx5e_cq_param icosq_cq;
-};
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
@@ -114,18 +92,31 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
- BIT(mlx5e_mpwqe_get_log_rq_size(params)) :
+ BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
BIT(params->log_rq_mtu_frames),
- BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
+ BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
- return mlx5e_check_fragmented_striding_rq_cap(mdev) &&
- !MLX5_IPSEC_DEV(mdev) &&
- !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params));
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
+ return false;
+
+ if (MLX5_IPSEC_DEV(mdev))
+ return false;
+
+ if (params->xdp_prog) {
+ /* XSK params are not considered here. If striding RQ is in use,
+ * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
+ * be called with the known XSK params.
+ */
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
+ return false;
+ }
+
+ return true;
}
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
@@ -394,6 +385,8 @@ static void mlx5e_free_di_list(struct mlx5e_rq *rq)
static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct xdp_umem *umem,
struct mlx5e_rq_param *rqp,
struct mlx5e_rq *rq)
{
@@ -401,6 +394,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = c->mdev;
void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ u32 num_xsk_frames = 0;
+ u32 rq_xdp_ix;
u32 pool_size;
int wq_sz;
int err;
@@ -417,7 +412,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->ix = c->ix;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- rq->stats = &c->priv->channel_stats[c->ix].rq;
+ rq->xdpsq = &c->rq_xdpsq;
+ rq->umem = umem;
+
+ if (rq->umem)
+ rq->stats = &c->priv->channel_stats[c->ix].xskrq;
+ else
+ rq->stats = &c->priv->channel_stats[c->ix].rq;
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
if (IS_ERR(rq->xdp_prog)) {
@@ -426,12 +427,16 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
+ rq_xdp_ix = rq->ix;
+ if (xsk)
+ rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK;
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix);
if (err < 0)
goto err_rq_wq_destroy;
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
- rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params);
+ rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
+ rq->buff.umem_headroom = xsk ? xsk->headroom : 0;
pool_size = 1 << params->log_rq_mtu_frames;
switch (rq->wq_type) {
@@ -445,7 +450,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params);
+ if (xsk)
+ num_xsk_frames = wq_sz <<
+ mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+
+ pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
+ mlx5e_mpwqe_get_log_rq_size(params, xsk);
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
@@ -464,12 +474,15 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- rq->mpwqe.skb_from_cqe_mpwrq =
- mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ?
- mlx5e_skb_from_cqe_mpwrq_linear :
- mlx5e_skb_from_cqe_mpwrq_nonlinear;
- rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params);
- rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
+ rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
+ mlx5e_xsk_skb_from_cqe_mpwrq_linear :
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
+ mlx5e_skb_from_cqe_mpwrq_linear :
+ mlx5e_skb_from_cqe_mpwrq_nonlinear;
+
+ rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ rq->mpwqe.num_strides =
+ BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
@@ -490,6 +503,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
+ if (xsk)
+ num_xsk_frames = wq_sz << rq->wqe.info.log_num_frags;
+
rq->wqe.info = rqp->frags_info;
rq->wqe.frags =
kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
@@ -503,6 +519,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
if (err)
goto err_free;
+
rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
@@ -518,33 +535,49 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_free;
}
- rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params) ?
- mlx5e_skb_from_cqe_linear :
- mlx5e_skb_from_cqe_nonlinear;
+ rq->wqe.skb_from_cqe = xsk ?
+ mlx5e_xsk_skb_from_cqe_linear :
+ mlx5e_rx_is_linear_skb(params, NULL) ?
+ mlx5e_skb_from_cqe_linear :
+ mlx5e_skb_from_cqe_nonlinear;
rq->mkey_be = c->mkey_be;
}
- /* Create a page_pool and register it with rxq */
- pp_params.order = 0;
- pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
- pp_params.pool_size = pool_size;
- pp_params.nid = cpu_to_node(c->cpu);
- pp_params.dev = c->pdev;
- pp_params.dma_dir = rq->buff.map_dir;
-
- /* page_pool can be used even when there is no rq->xdp_prog,
- * given page_pool does not handle DMA mapping there is no
- * required state to clear. And page_pool gracefully handle
- * elevated refcnt.
- */
- rq->page_pool = page_pool_create(&pp_params);
- if (IS_ERR(rq->page_pool)) {
- err = PTR_ERR(rq->page_pool);
- rq->page_pool = NULL;
- goto err_free;
+ if (xsk) {
+ err = mlx5e_xsk_resize_reuseq(umem, num_xsk_frames);
+ if (unlikely(err)) {
+ mlx5_core_err(mdev, "Unable to allocate the Reuse Ring for %u frames\n",
+ num_xsk_frames);
+ goto err_free;
+ }
+
+ rq->zca.free = mlx5e_xsk_zca_free;
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
+ MEM_TYPE_ZERO_COPY,
+ &rq->zca);
+ } else {
+ /* Create a page_pool and register it with rxq */
+ pp_params.order = 0;
+ pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
+ pp_params.pool_size = pool_size;
+ pp_params.nid = cpu_to_node(c->cpu);
+ pp_params.dev = c->pdev;
+ pp_params.dma_dir = rq->buff.map_dir;
+
+ /* page_pool can be used even when there is no rq->xdp_prog,
+ * given page_pool does not handle DMA mapping there is no
+ * required state to clear. And page_pool gracefully handle
+ * elevated refcnt.
+ */
+ rq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rq->page_pool)) {
+ err = PTR_ERR(rq->page_pool);
+ rq->page_pool = NULL;
+ goto err_free;
+ }
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
+ MEM_TYPE_PAGE_POOL, rq->page_pool);
}
- err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
- MEM_TYPE_PAGE_POOL, rq->page_pool);
if (err)
goto err_free;
@@ -584,11 +617,11 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
switch (params->rx_cq_moderation.cq_period_mode) {
case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
- rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
break;
case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
default:
- rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
rq->page_cache.head = 0;
@@ -611,8 +644,7 @@ err_rq_wq_destroy:
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
xdp_rxq_info_unreg(&rq->xdp_rxq);
- if (rq->page_pool)
- page_pool_destroy(rq->page_pool);
+ page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
return err;
@@ -625,10 +657,6 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- if (rq->page_pool)
- page_pool_destroy(rq->page_pool);
-
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
@@ -643,8 +671,15 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
- mlx5e_page_release(rq, dma_info, false);
+ /* With AF_XDP, page_cache is not used, so this loop is not
+ * entered, and it's safe to call mlx5e_page_release_dynamic
+ * directly.
+ */
+ mlx5e_page_release_dynamic(rq, dma_info, false);
}
+
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
}
@@ -778,7 +813,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
mlx5_core_destroy_rq(rq->mdev, rq->rqn);
}
-static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
+int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
{
unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
struct mlx5e_channel *c = rq->channel;
@@ -836,14 +871,13 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
-static int mlx5e_open_rq(struct mlx5e_channel *c,
- struct mlx5e_params *params,
- struct mlx5e_rq_param *param,
- struct mlx5e_rq *rq)
+int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
+ struct xdp_umem *umem, struct mlx5e_rq *rq)
{
int err;
- err = mlx5e_alloc_rq(c, params, param, rq);
+ err = mlx5e_alloc_rq(c, params, xsk, umem, param, rq);
if (err)
return err;
@@ -881,13 +915,13 @@ static void mlx5e_activate_rq(struct mlx5e_rq *rq)
mlx5e_trigger_irq(&rq->channel->icosq);
}
-static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
+void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
{
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
}
-static void mlx5e_close_rq(struct mlx5e_rq *rq)
+void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
mlx5e_destroy_rq(rq);
@@ -940,6 +974,7 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
+ struct xdp_umem *umem,
struct mlx5e_sq_param *param,
struct mlx5e_xdpsq *sq,
bool is_redirect)
@@ -955,9 +990,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->stats = is_redirect ?
- &c->priv->channel_stats[c->ix].xdpsq :
- &c->priv->channel_stats[c->ix].rq_xdpsq;
+ sq->umem = umem;
+
+ sq->stats = sq->umem ?
+ &c->priv->channel_stats[c->ix].xsksq :
+ is_redirect ?
+ &c->priv->channel_stats[c->ix].xdpsq :
+ &c->priv->channel_stats[c->ix].rq_xdpsq;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1087,11 +1126,14 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+ sq->stop_room = MLX5E_SQ_STOP_ROOM;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
- if (mlx5_accel_is_tls_device(c->priv->mdev))
+ if (mlx5_accel_is_tls_device(c->priv->mdev)) {
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
+ sq->stop_room += MLX5E_SQ_TLS_ROOM;
+ }
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1337,10 +1379,8 @@ static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
mlx5e_tx_reporter_err_cqe(sq);
}
-static int mlx5e_open_icosq(struct mlx5e_channel *c,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param,
- struct mlx5e_icosq *sq)
+int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
{
struct mlx5e_create_sq_param csp = {};
int err;
@@ -1366,7 +1406,7 @@ err_free_icosq:
return err;
}
-static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+void mlx5e_close_icosq(struct mlx5e_icosq *sq)
{
struct mlx5e_channel *c = sq->channel;
@@ -1377,16 +1417,14 @@ static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
mlx5e_free_icosq(sq);
}
-static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param,
- struct mlx5e_xdpsq *sq,
- bool is_redirect)
+int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param, struct xdp_umem *umem,
+ struct mlx5e_xdpsq *sq, bool is_redirect)
{
struct mlx5e_create_sq_param csp = {};
int err;
- err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect);
+ err = mlx5e_alloc_xdpsq(c, params, umem, param, sq, is_redirect);
if (err)
return err;
@@ -1440,7 +1478,7 @@ err_free_xdpsq:
return err;
}
-static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
+void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
{
struct mlx5e_channel *c = sq->channel;
@@ -1448,7 +1486,7 @@ static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
napi_synchronize(&c->napi);
mlx5e_destroy_sq(c->mdev, sq->sqn);
- mlx5e_free_xdpsq_descs(sq, rq);
+ mlx5e_free_xdpsq_descs(sq);
mlx5e_free_xdpsq(sq);
}
@@ -1518,6 +1556,7 @@ static void mlx5e_free_cq(struct mlx5e_cq *cq)
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_core_dev *mdev = cq->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
@@ -1552,7 +1591,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
- err = mlx5_core_create_cq(mdev, mcq, in, inlen);
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
kvfree(in);
@@ -1569,10 +1608,8 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
}
-static int mlx5e_open_cq(struct mlx5e_channel *c,
- struct net_dim_cq_moder moder,
- struct mlx5e_cq_param *param,
- struct mlx5e_cq *cq)
+int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
+ struct mlx5e_cq_param *param, struct mlx5e_cq *cq)
{
struct mlx5_core_dev *mdev = c->mdev;
int err;
@@ -1595,7 +1632,7 @@ err_free_cq:
return err;
}
-static void mlx5e_close_cq(struct mlx5e_cq *cq)
+void mlx5e_close_cq(struct mlx5e_cq *cq)
{
mlx5e_destroy_cq(cq);
mlx5e_free_cq(cq);
@@ -1769,49 +1806,16 @@ static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c)
free_cpumask_var(c->xps_cpumask);
}
-static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
- struct mlx5e_params *params,
- struct mlx5e_channel_param *cparam,
- struct mlx5e_channel **cp)
+static int mlx5e_open_queues(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_channel_param *cparam)
{
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
- struct net_dim_cq_moder icocq_moder = {0, 0};
- struct net_device *netdev = priv->netdev;
- struct mlx5e_channel *c;
- unsigned int irq;
+ struct dim_cq_moder icocq_moder = {0, 0};
int err;
- int eqn;
-
- err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
- if (err)
- return err;
-
- c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
- if (!c)
- return -ENOMEM;
-
- c->priv = priv;
- c->mdev = priv->mdev;
- c->tstamp = &priv->tstamp;
- c->ix = ix;
- c->cpu = cpu;
- c->pdev = priv->mdev->device;
- c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
- c->num_tc = params->num_tc;
- c->xdp = !!params->xdp_prog;
- c->stats = &priv->channel_stats[ix].ch;
- c->irq_desc = irq_to_desc(irq);
-
- err = mlx5e_alloc_xps_cpumask(c, params);
- if (err)
- goto err_free_channel;
-
- netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
if (err)
- goto err_napi_del;
+ return err;
err = mlx5e_open_tx_cqs(c, params, cparam);
if (err)
@@ -1827,7 +1831,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
/* XDP SQ CQ params are same as normal TXQ sq CQ params */
err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
- &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
+ &cparam->tx_cq, &c->rq_xdpsq.cq) : 0;
if (err)
goto err_close_rx_cq;
@@ -1841,20 +1845,21 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_icosq;
- err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0;
- if (err)
- goto err_close_sqs;
+ if (c->xdp) {
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
+ &c->rq_xdpsq, false);
+ if (err)
+ goto err_close_sqs;
+ }
- err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
+ err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq);
if (err)
goto err_close_xdp_sq;
- err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true);
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
if (err)
goto err_close_rq;
- *cp = c;
-
return 0;
err_close_rq:
@@ -1862,7 +1867,7 @@ err_close_rq:
err_close_xdp_sq:
if (c->xdp)
- mlx5e_close_xdpsq(&c->rq.xdpsq, &c->rq);
+ mlx5e_close_xdpsq(&c->rq_xdpsq);
err_close_sqs:
mlx5e_close_sqs(c);
@@ -1872,8 +1877,9 @@ err_close_icosq:
err_disable_napi:
napi_disable(&c->napi);
+
if (c->xdp)
- mlx5e_close_cq(&c->rq.xdpsq.cq);
+ mlx5e_close_cq(&c->rq_xdpsq.cq);
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
@@ -1887,6 +1893,85 @@ err_close_tx_cqs:
err_close_icosq_cq:
mlx5e_close_cq(&c->icosq.cq);
+ return err;
+}
+
+static void mlx5e_close_queues(struct mlx5e_channel *c)
+{
+ mlx5e_close_xdpsq(&c->xdpsq);
+ mlx5e_close_rq(&c->rq);
+ if (c->xdp)
+ mlx5e_close_xdpsq(&c->rq_xdpsq);
+ mlx5e_close_sqs(c);
+ mlx5e_close_icosq(&c->icosq);
+ napi_disable(&c->napi);
+ if (c->xdp)
+ mlx5e_close_cq(&c->rq_xdpsq.cq);
+ mlx5e_close_cq(&c->rq.cq);
+ mlx5e_close_cq(&c->xdpsq.cq);
+ mlx5e_close_tx_cqs(c);
+ mlx5e_close_cq(&c->icosq.cq);
+}
+
+static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_params *params,
+ struct mlx5e_channel_param *cparam,
+ struct xdp_umem *umem,
+ struct mlx5e_channel **cp)
+{
+ int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
+ struct net_device *netdev = priv->netdev;
+ struct mlx5e_xsk_param xsk;
+ struct mlx5e_channel *c;
+ unsigned int irq;
+ int err;
+ int eqn;
+
+ err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
+ if (err)
+ return err;
+
+ c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+ if (!c)
+ return -ENOMEM;
+
+ c->priv = priv;
+ c->mdev = priv->mdev;
+ c->tstamp = &priv->tstamp;
+ c->ix = ix;
+ c->cpu = cpu;
+ c->pdev = priv->mdev->device;
+ c->netdev = priv->netdev;
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+ c->num_tc = params->num_tc;
+ c->xdp = !!params->xdp_prog;
+ c->stats = &priv->channel_stats[ix].ch;
+ c->irq_desc = irq_to_desc(irq);
+
+ err = mlx5e_alloc_xps_cpumask(c, params);
+ if (err)
+ goto err_free_channel;
+
+ netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+
+ err = mlx5e_open_queues(c, params, cparam);
+ if (unlikely(err))
+ goto err_napi_del;
+
+ if (umem) {
+ mlx5e_build_xsk_param(umem, &xsk);
+ err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
+ if (unlikely(err))
+ goto err_close_queues;
+ }
+
+ *cp = c;
+
+ return 0;
+
+err_close_queues:
+ mlx5e_close_queues(c);
+
err_napi_del:
netif_napi_del(&c->napi);
mlx5e_free_xps_cpumask(c);
@@ -1905,12 +1990,18 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_rq(&c->rq);
netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
+
+ if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+ mlx5e_activate_xsk(c);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
{
int tc;
+ if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+ mlx5e_deactivate_xsk(c);
+
mlx5e_deactivate_rq(&c->rq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
@@ -1918,19 +2009,9 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
- mlx5e_close_xdpsq(&c->xdpsq, NULL);
- mlx5e_close_rq(&c->rq);
- if (c->xdp)
- mlx5e_close_xdpsq(&c->rq.xdpsq, &c->rq);
- mlx5e_close_sqs(c);
- mlx5e_close_icosq(&c->icosq);
- napi_disable(&c->napi);
- if (c->xdp)
- mlx5e_close_cq(&c->rq.xdpsq.cq);
- mlx5e_close_cq(&c->rq.cq);
- mlx5e_close_cq(&c->xdpsq.cq);
- mlx5e_close_tx_cqs(c);
- mlx5e_close_cq(&c->icosq.cq);
+ if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+ mlx5e_close_xsk(c);
+ mlx5e_close_queues(c);
netif_napi_del(&c->napi);
mlx5e_free_xps_cpumask(c);
@@ -1941,6 +2022,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
struct mlx5e_rq_frags_info *info)
{
u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
@@ -1953,10 +2035,10 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
byte_count += MLX5E_METADATA_ETHER_LEN;
#endif
- if (mlx5e_rx_is_linear_skb(params)) {
+ if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride;
- frag_stride = mlx5e_rx_get_linear_frag_sz(params);
+ frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
frag_stride = roundup_pow_of_two(frag_stride);
info->arr[0].frag_size = byte_count;
@@ -2014,9 +2096,10 @@ static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
return MLX5_GET(wq, wq, log_wq_sz);
}
-static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_rq_param *param)
+void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_param *param)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
@@ -2026,16 +2109,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
MLX5_SET(wq, wq, log_wqe_num_of_strides,
- mlx5e_mpwqe_get_log_num_strides(mdev, params) -
+ mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) -
MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
MLX5_SET(wq, wq, log_wqe_stride_size,
- mlx5e_mpwqe_get_log_stride_size(mdev, params) -
+ mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) -
MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
- MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
+ MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
- mlx5e_build_rq_frags_info(mdev, params, &param->frags_info);
+ mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
ndsegs = param->frags_info.num_frags;
}
@@ -2066,8 +2149,8 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mdev->device);
}
-static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
- struct mlx5e_sq_param *param)
+void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -2103,9 +2186,10 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
}
-static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_cq_param *param)
+void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_cq_param *param)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *cqc = param->cqc;
@@ -2113,8 +2197,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
- mlx5e_mpwqe_get_log_num_strides(mdev, params);
+ log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
+ mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
log_cq_size = params->log_rq_mtu_frames;
@@ -2130,9 +2214,9 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
}
-static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_cq_param *param)
+void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
@@ -2142,9 +2226,9 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
}
-static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
- u8 log_wq_size,
- struct mlx5e_cq_param *param)
+void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
+ u8 log_wq_size,
+ struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
@@ -2152,12 +2236,12 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
-static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
- u8 log_wq_size,
- struct mlx5e_sq_param *param)
+void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
+ u8 log_wq_size,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -2168,9 +2252,9 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
}
-static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param)
+void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+ struct mlx5e_params *params,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -2198,14 +2282,14 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
{
u8 icosq_log_wq_sz;
- mlx5e_build_rq_param(priv, params, &cparam->rq);
+ mlx5e_build_rq_param(priv, params, NULL, &cparam->rq);
icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
mlx5e_build_sq_param(priv, params, &cparam->sq);
mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
- mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
+ mlx5e_build_rx_cq_param(priv, params, NULL, &cparam->rx_cq);
mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
}
@@ -2226,7 +2310,12 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
mlx5e_build_channel_param(priv, &chs->params, cparam);
for (i = 0; i < chs->num; i++) {
- err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
+ struct xdp_umem *umem = NULL;
+
+ if (chs->params.xdp_prog)
+ umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, i);
+
+ err = mlx5e_open_channel(priv, i, &chs->params, cparam, umem, &chs->c[i]);
if (err)
goto err_close_channels;
}
@@ -2268,6 +2357,10 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
+
+ /* Don't wait on the XSK RQ, because the newer xdpsock sample
+ * doesn't provide any Fill Ring entries at the setup stage.
+ */
}
return err ? -ETIMEDOUT : 0;
@@ -2340,35 +2433,35 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
return err;
}
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
- struct mlx5e_rqt *rqt;
+ const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int err;
int ix;
- for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
- rqt = &priv->direct_tir[ix].rqt;
- err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
- if (err)
+ for (ix = 0; ix < max_nch; ix++) {
+ err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
+ if (unlikely(err))
goto err_destroy_rqts;
}
return 0;
err_destroy_rqts:
- mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
+ mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err);
for (ix--; ix >= 0; ix--)
- mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
+ mlx5e_destroy_rqt(priv, &tirs[ix].rqt);
return err;
}
-void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
+void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
+ const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i;
- for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++)
- mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ for (i = 0; i < max_nch; i++)
+ mlx5e_destroy_rqt(priv, &tirs[i].rqt);
}
static int mlx5e_rx_hash_fn(int hfunc)
@@ -2788,11 +2881,12 @@ static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
int num_txqs = priv->channels.num * priv->channels.params.num_tc;
+ int num_rxqs = priv->channels.num * MLX5E_NUM_RQ_GROUPS;
struct net_device *netdev = priv->netdev;
mlx5e_netdev_set_tcs(netdev);
netif_set_real_num_tx_queues(netdev, num_txqs);
- netif_set_real_num_rx_queues(netdev, priv->channels.num);
+ netif_set_real_num_rx_queues(netdev, num_rxqs);
mlx5e_build_tx2sq_maps(priv);
mlx5e_activate_channels(&priv->channels);
@@ -2804,10 +2898,14 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
+
+ mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels);
}
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
+ mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels);
+
mlx5e_redirect_rqts_to_drop(priv);
if (mlx5e_is_vport_rep(priv))
@@ -2847,7 +2945,7 @@ static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
if (hw_modify)
hw_modify(priv);
- mlx5e_refresh_tirs(priv, false);
+ priv->profile->update_rx(priv);
mlx5e_activate_priv_channels(priv);
/* return carrier back if needed */
@@ -2886,15 +2984,18 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ bool is_xdp = priv->channels.params.xdp_prog;
int err;
set_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (is_xdp)
+ mlx5e_xdp_set_open(priv);
err = mlx5e_open_channels(priv, &priv->channels);
if (err)
goto err_clear_state_opened_flag;
- mlx5e_refresh_tirs(priv, false);
+ priv->profile->update_rx(priv);
mlx5e_activate_priv_channels(priv);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
@@ -2903,6 +3004,8 @@ int mlx5e_open_locked(struct net_device *netdev)
return 0;
err_clear_state_opened_flag:
+ if (is_xdp)
+ mlx5e_xdp_set_closed(priv);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
return err;
}
@@ -2934,6 +3037,8 @@ int mlx5e_close_locked(struct net_device *netdev)
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
+ if (priv->channels.params.xdp_prog)
+ mlx5e_xdp_set_closed(priv);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev);
@@ -3045,20 +3150,19 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
mlx5e_free_cq(&drop_rq->cq);
}
-int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
- u32 underlay_qpn, u32 *tisn)
+int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
{
- u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- MLX5_SET(tisc, tisc, prio, tc << 1);
- MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
+ if (MLX5_GET(tisc, tisc, tls_en))
+ MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn);
+
if (mlx5_lag_is_lacp_owner(mdev))
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
- return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
+ return mlx5_core_create_tis(mdev, in, MLX5_ST_SZ_BYTES(create_tis_in), tisn);
}
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
@@ -3072,7 +3176,14 @@ int mlx5e_create_tises(struct mlx5e_priv *priv)
int tc;
for (tc = 0; tc < priv->profile->max_tc; tc++) {
- err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
+
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ MLX5_SET(tisc, tisc, prio, tc << 1);
+
+ err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[tc]);
if (err)
goto err_close_tises;
}
@@ -3190,13 +3301,13 @@ err_destroy_inner_tirs:
return err;
}
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
- int nch = mlx5e_get_netdev_max_channels(priv->netdev);
+ const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
struct mlx5e_tir *tir;
void *tirc;
int inlen;
- int err;
+ int err = 0;
u32 *in;
int ix;
@@ -3205,25 +3316,24 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
if (!in)
return -ENOMEM;
- for (ix = 0; ix < nch; ix++) {
+ for (ix = 0; ix < max_nch; ix++) {
memset(in, 0, inlen);
- tir = &priv->direct_tir[ix];
+ tir = &tirs[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
- mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
+ mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc);
err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
- if (err)
+ if (unlikely(err))
goto err_destroy_ch_tirs;
}
- kvfree(in);
-
- return 0;
+ goto out;
err_destroy_ch_tirs:
- mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
+ mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err);
for (ix--; ix >= 0; ix--)
- mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
+ mlx5e_destroy_tir(priv->mdev, &tirs[ix]);
+out:
kvfree(in);
return err;
@@ -3243,13 +3353,13 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
}
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
- int nch = mlx5e_get_netdev_max_channels(priv->netdev);
+ const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i;
- for (i = 0; i < nch; i++)
- mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
+ for (i = 0; i < max_nch; i++)
+ mlx5e_destroy_tir(priv->mdev, &tirs[i]);
}
static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
@@ -3316,17 +3426,17 @@ out:
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *cls_flower,
+ struct flow_cls_offload *cls_flower,
int flags)
{
switch (cls_flower->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
flags);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
flags);
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
flags);
default:
@@ -3347,36 +3457,22 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return -EOPNOTSUPP;
}
}
-
-static int mlx5e_setup_tc_block(struct net_device *dev,
- struct tc_block_offload *f)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
- priv, priv, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
- priv);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
#endif
+static LIST_HEAD(mlx5e_block_cb_list);
+
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
case TC_SETUP_BLOCK:
- return mlx5e_setup_tc_block(dev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &mlx5e_block_cb_list,
+ mlx5e_setup_tc_block_cb,
+ priv, priv, true);
#endif
case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(dev, type_data);
@@ -3391,11 +3487,12 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
+ struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
int j;
- s->rx_packets += rq_stats->packets;
- s->rx_bytes += rq_stats->bytes;
+ s->rx_packets += rq_stats->packets + xskrq_stats->packets;
+ s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
@@ -3494,6 +3591,13 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
+ if (enable && priv->xsk.refcnt) {
+ netdev_warn(netdev, "LRO is incompatible with AF_XDP (%hu XSKs are active)\n",
+ priv->xsk.refcnt);
+ err = -EINVAL;
+ goto out;
+ }
+
old_params = &priv->channels.params;
if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
netdev_warn(netdev, "can't set LRO with legacy RQ\n");
@@ -3507,8 +3611,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
new_channels.params.lro_en = enable;
if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) ==
- mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params))
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
reset = false;
}
@@ -3698,6 +3802,43 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
return features;
}
+static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
+ struct mlx5e_channels *chs,
+ struct mlx5e_params *new_params,
+ struct mlx5_core_dev *mdev)
+{
+ u16 ix;
+
+ for (ix = 0; ix < chs->params.num_channels; ix++) {
+ struct xdp_umem *umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, ix);
+ struct mlx5e_xsk_param xsk;
+
+ if (!umem)
+ continue;
+
+ mlx5e_build_xsk_param(umem, &xsk);
+
+ if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
+ u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
+ int max_mtu_frame, max_mtu_page, max_mtu;
+
+ /* Two criteria must be met:
+ * 1. HW MTU + all headrooms <= XSK frame size.
+ * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
+ */
+ max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
+ max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk);
+ max_mtu = min(max_mtu_frame, max_mtu_page);
+
+ netdev_err(netdev, "MTU %d is too big for an XSK running on channel %hu. Try MTU <= %d\n",
+ new_params->sw_mtu, ix, max_mtu);
+ return false;
+ }
+ }
+
+ return true;
+}
+
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
change_hw_mtu_cb set_mtu_cb)
{
@@ -3718,18 +3859,31 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
new_channels.params.sw_mtu = new_mtu;
if (params->xdp_prog &&
- !mlx5e_rx_is_linear_skb(&new_channels.params)) {
+ !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
- new_mtu, mlx5e_xdp_max_mtu(params));
+ new_mtu, mlx5e_xdp_max_mtu(params, NULL));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (priv->xsk.refcnt &&
+ !mlx5e_xsk_validate_mtu(netdev, &priv->channels,
+ &new_channels.params, priv->mdev)) {
err = -EINVAL;
goto out;
}
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
- u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
- u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
+ bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
+ &new_channels.params,
+ NULL);
+ u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL);
+ u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params, NULL);
+
+ /* If XSK is active, XSK RQs are linear. */
+ is_linear |= priv->xsk.refcnt;
+ /* Always reset in linear mode - hw_mtu is used in data path. */
reset = reset && (is_linear || (ppw_old != ppw_new));
}
@@ -4162,16 +4316,29 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
new_channels.params = priv->channels.params;
new_channels.params.xdp_prog = prog;
- if (!mlx5e_rx_is_linear_skb(&new_channels.params)) {
+ /* No XSK params: AF_XDP can't be enabled yet at the point of setting
+ * the XDP program.
+ */
+ if (!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
new_channels.params.sw_mtu,
- mlx5e_xdp_max_mtu(&new_channels.params));
+ mlx5e_xdp_max_mtu(&new_channels.params, NULL));
return -EINVAL;
}
return 0;
}
+static int mlx5e_xdp_update_state(struct mlx5e_priv *priv)
+{
+ if (priv->channels.params.xdp_prog)
+ mlx5e_xdp_set_open(priv);
+ else
+ mlx5e_xdp_set_closed(priv);
+
+ return 0;
+}
+
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4192,8 +4359,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
/* no need for full reset when exchanging programs */
reset = (!priv->channels.params.xdp_prog || !prog);
- if (was_opened && reset)
- mlx5e_close_locked(netdev);
if (was_opened && !reset) {
/* num_channels is invariant here, so we can take the
* batched reference right upfront.
@@ -4205,20 +4370,31 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
}
}
- /* exchange programs, extra prog reference we got from caller
- * as long as we don't fail from this point onwards.
- */
- old_prog = xchg(&priv->channels.params.xdp_prog, prog);
+ if (was_opened && reset) {
+ struct mlx5e_channels new_channels = {};
+
+ new_channels.params = priv->channels.params;
+ new_channels.params.xdp_prog = prog;
+ mlx5e_set_rq_type(priv->mdev, &new_channels.params);
+ old_prog = priv->channels.params.xdp_prog;
+
+ err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_xdp_update_state);
+ if (err)
+ goto unlock;
+ } else {
+ /* exchange programs, extra prog reference we got from caller
+ * as long as we don't fail from this point onwards.
+ */
+ old_prog = xchg(&priv->channels.params.xdp_prog, prog);
+ }
+
if (old_prog)
bpf_prog_put(old_prog);
- if (reset) /* change RQ type according to priv->xdp_prog */
+ if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */
mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
- if (was_opened && reset)
- err = mlx5e_open_locked(netdev);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
+ if (!was_opened || reset)
goto unlock;
/* exchanging programs w/o reset, we update ref counts on behalf
@@ -4226,19 +4402,29 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
*/
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
+ bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
+ if (xsk_open)
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
napi_synchronize(&c->napi);
/* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
old_prog = xchg(&c->rq.xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (xsk_open) {
+ old_prog = xchg(&c->xskrq.xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
+ if (xsk_open)
+ set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
/* napi_schedule in case we have missed anything */
napi_schedule(&c->napi);
-
- if (old_prog)
- bpf_prog_put(old_prog);
}
unlock:
@@ -4269,6 +4455,9 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_QUERY_PROG:
xdp->prog_id = mlx5e_xdp_query(dev);
return 0;
+ case XDP_SETUP_XSK_UMEM:
+ return mlx5e_xsk_setup_umem(dev, xdp->xsk.umem,
+ xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -4351,6 +4540,7 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
.ndo_xdp_xmit = mlx5e_xdp_xmit,
+ .ndo_xsk_async_xmit = mlx5e_xsk_async_xmit,
#ifdef CONFIG_MLX5_EN_ARFS
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
@@ -4420,9 +4610,9 @@ static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}
-static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
+static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
{
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
@@ -4433,9 +4623,9 @@ static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
return moder;
}
-static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
+static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
{
- struct net_dim_cq_moder moder;
+ struct dim_cq_moder moder;
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
@@ -4449,8 +4639,8 @@ static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
{
return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
- NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE :
- NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
@@ -4502,11 +4692,13 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
* - Striding RQ configuration is not possible/supported.
* - Slow PCI heuristic.
* - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
+ *
+ * No XSK params: checking the availability of striding RQ in general.
*/
if (!slow_pci_heuristic(mdev) &&
mlx5e_striding_rq_possible(mdev, params) &&
- (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
- !mlx5e_rx_is_linear_skb(params)))
+ (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
+ !mlx5e_rx_is_linear_skb(params, NULL)))
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
mlx5e_set_rq_type(mdev, params);
mlx5e_init_rq_type_params(mdev, params);
@@ -4528,6 +4720,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
}
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_xsk *xsk,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
u16 max_channels, u16 mtu)
@@ -4563,9 +4756,11 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
/* HW LRO */
/* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
- if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ /* No XSK params: checking the availability of striding RQ in general. */
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
params->lro_en = !slow_pci_heuristic(mdev);
+ }
params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
@@ -4584,13 +4779,16 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_build_rss_params(rss_params, params->num_channels);
params->tunneled_offload_en =
mlx5e_tunnel_inner_ft_supported(mdev);
+
+ /* AF_XDP */
+ params->xsk = xsk;
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
+ mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
if (is_zero_ether_addr(netdev->dev_addr) &&
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
eth_hw_addr_random(netdev);
@@ -4619,14 +4817,18 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->ethtool_ops = &mlx5e_ethtool_ops;
netdev->vlan_features |= NETIF_F_SG;
- netdev->vlan_features |= NETIF_F_IP_CSUM;
- netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_HW_CSUM;
netdev->vlan_features |= NETIF_F_GRO;
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_RXCSUM;
netdev->vlan_features |= NETIF_F_RXHASH;
+ netdev->mpls_features |= NETIF_F_SG;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_TSO;
+ netdev->mpls_features |= NETIF_F_TSO6;
+
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
@@ -4642,8 +4844,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
- netdev->hw_enc_features |= NETIF_F_IP_CSUM;
- netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
+ netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6;
netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
@@ -4756,7 +4957,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
return err;
- mlx5e_build_nic_params(mdev, rss, &priv->channels.params,
+ mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
mlx5e_get_netdev_max_channels(netdev),
netdev->mtu);
@@ -4798,7 +4999,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv);
+ err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
if (err)
goto err_destroy_indirect_rqts;
@@ -4806,14 +5007,22 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv);
+ err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
if (err)
goto err_destroy_indirect_tirs;
+ err = mlx5e_create_direct_rqts(priv, priv->xsk_tir);
+ if (unlikely(err))
+ goto err_destroy_direct_tirs;
+
+ err = mlx5e_create_direct_tirs(priv, priv->xsk_tir);
+ if (unlikely(err))
+ goto err_destroy_xsk_rqts;
+
err = mlx5e_create_flow_steering(priv);
if (err) {
mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
- goto err_destroy_direct_tirs;
+ goto err_destroy_xsk_tirs;
}
err = mlx5e_tc_nic_init(priv);
@@ -4824,12 +5033,16 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
err_destroy_flow_steering:
mlx5e_destroy_flow_steering(priv);
+err_destroy_xsk_tirs:
+ mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
+err_destroy_xsk_rqts:
+ mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv, true);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
@@ -4843,9 +5056,11 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
- mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
+ mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
mlx5e_destroy_indirect_tirs(priv, true);
- mlx5e_destroy_direct_rqts(priv);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
@@ -4927,6 +5142,11 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5_lag_remove(mdev);
}
+int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
+{
+ return mlx5e_refresh_tirs(priv, false);
+}
+
static const struct mlx5e_profile mlx5e_nic_profile = {
.init = mlx5e_nic_init,
.cleanup = mlx5e_nic_cleanup,
@@ -4936,6 +5156,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.cleanup_tx = mlx5e_cleanup_nic_tx,
.enable = mlx5e_nic_enable,
.disable = mlx5e_nic_disable,
+ .update_rx = mlx5e_update_nic_rx,
.update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
@@ -4995,7 +5216,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
nch * profile->max_tc,
- nch);
+ nch * MLX5E_NUM_RQ_GROUPS);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
@@ -5133,7 +5354,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
#ifdef CONFIG_MLX5_ESWITCH
if (MLX5_ESWITCH_MANAGER(mdev) &&
- mlx5_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
+ mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
mlx5e_rep_register_vport_reps(mdev);
return mdev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2f406b161bcf..10ef90a7bddd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -37,6 +37,7 @@
#include <net/act_api.h>
#include <net/netevent.h>
#include <net/arp.h>
+#include <net/devlink.h>
#include "eswitch.h"
#include "en.h"
@@ -128,7 +129,7 @@ static void mlx5e_rep_get_strings(struct net_device *dev,
}
}
-static void mlx5e_vf_rep_update_hw_counters(struct mlx5e_priv *priv)
+static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -166,17 +167,6 @@ static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
}
-static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
-
- if (rep->vport == MLX5_VPORT_UPLINK)
- mlx5e_uplink_rep_update_hw_counters(priv);
- else
- mlx5e_vf_rep_update_hw_counters(priv);
-}
-
static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -203,7 +193,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock);
mlx5e_rep_update_sw_counters(priv);
- mlx5e_rep_update_hw_counters(priv);
+ priv->profile->update_stats(priv);
mutex_unlock(&priv->state_lock);
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
@@ -363,7 +353,7 @@ static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
}
-static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = {
+static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.get_drvinfo = mlx5e_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
@@ -402,30 +392,19 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *uplink_upper = NULL;
- struct mlx5e_priv *uplink_priv = NULL;
- struct net_device *uplink_dev;
-
- if (esw->mode == SRIOV_NONE)
- return -EOPNOTSUPP;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ u64 parent_id;
- uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
- if (uplink_dev) {
- uplink_upper = netdev_master_upper_dev_get(uplink_dev);
- uplink_priv = netdev_priv(uplink_dev);
- }
+ priv = netdev_priv(dev);
+ esw = priv->mdev->priv.eswitch;
- ppid->id_len = ETH_ALEN;
- if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
- ether_addr_copy(ppid->id, uplink_upper->dev_addr);
- } else {
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
+ if (esw->mode == MLX5_ESWITCH_NONE)
+ return -EOPNOTSUPP;
- ether_addr_copy(ppid->id, rep->hw_id);
- }
+ parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
+ ppid->id_len = sizeof(parent_id);
+ memcpy(ppid->id, &parent_id, sizeof(parent_id));
return 0;
}
@@ -436,7 +415,7 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
struct mlx5e_rep_sq *rep_sq, *tmp;
struct mlx5e_rep_priv *rpriv;
- if (esw->mode != SRIOV_OFFLOADS)
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
rpriv = mlx5e_rep_to_rep_priv(rep);
@@ -457,7 +436,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
int err;
int i;
- if (esw->mode != SRIOV_OFFLOADS)
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
rpriv = mlx5e_rep_to_rep_priv(rep);
@@ -677,7 +656,7 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
static int
mlx5e_rep_indr_offload(struct net_device *netdev,
- struct tc_cls_flower_offload *flower,
+ struct flow_cls_offload *flower,
struct mlx5e_rep_indr_block_priv *indr_priv)
{
struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
@@ -685,13 +664,13 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
int err = 0;
switch (flower->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
err = mlx5e_configure_flower(netdev, priv, flower, flags);
break;
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
err = mlx5e_delete_flower(netdev, priv, flower, flags);
break;
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
err = mlx5e_stats_flower(netdev, priv, flower, flags);
break;
default:
@@ -714,23 +693,39 @@ static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
}
}
+static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
+{
+ struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
+
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+}
+
+static LIST_HEAD(mlx5e_block_cb_list);
+
static int
mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
struct mlx5e_rep_priv *rpriv,
- struct tc_block_offload *f)
+ struct flow_block_offload *f)
{
struct mlx5e_rep_indr_block_priv *indr_priv;
- int err = 0;
+ struct flow_block_cb *block_cb;
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
+ f->driver_block_list = &mlx5e_block_cb_list;
+
switch (f->command) {
- case TC_BLOCK_BIND:
+ case FLOW_BLOCK_BIND:
indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
if (indr_priv)
return -EEXIST;
+ if (flow_block_cb_is_busy(mlx5e_rep_indr_setup_block_cb,
+ indr_priv, &mlx5e_block_cb_list))
+ return -EBUSY;
+
indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
if (!indr_priv)
return -ENOMEM;
@@ -740,26 +735,32 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
- err = tcf_block_cb_register(f->block,
- mlx5e_rep_indr_setup_block_cb,
- indr_priv, indr_priv, f->extack);
- if (err) {
+ block_cb = flow_block_cb_alloc(f->net,
+ mlx5e_rep_indr_setup_block_cb,
+ indr_priv, indr_priv,
+ mlx5e_rep_indr_tc_block_unbind);
+ if (IS_ERR(block_cb)) {
list_del(&indr_priv->list);
kfree(indr_priv);
+ return PTR_ERR(block_cb);
}
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
- return err;
- case TC_BLOCK_UNBIND:
+ return 0;
+ case FLOW_BLOCK_UNBIND:
indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
if (!indr_priv)
return -ENOENT;
- tcf_block_cb_unregister(f->block,
- mlx5e_rep_indr_setup_block_cb,
- indr_priv);
- list_del(&indr_priv->list);
- kfree(indr_priv);
+ block_cb = flow_block_cb_lookup(f,
+ mlx5e_rep_indr_setup_block_cb,
+ indr_priv);
+ if (!block_cb)
+ return -ENOENT;
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
@@ -1101,7 +1102,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
}
-static int mlx5e_vf_rep_open(struct net_device *dev)
+static int mlx5e_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1124,7 +1125,7 @@ unlock:
return err;
}
-static int mlx5e_vf_rep_close(struct net_device *dev)
+static int mlx5e_rep_close(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1141,42 +1142,18 @@ static int mlx5e_vf_rep_close(struct net_device *dev)
return ret;
}
-static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
- char *buf, size_t len)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
- unsigned int fn;
- int ret;
-
- fn = PCI_FUNC(priv->mdev->pdev->devfn);
- if (fn >= MLX5_MAX_PORTS)
- return -EOPNOTSUPP;
-
- if (rep->vport == MLX5_VPORT_UPLINK)
- ret = snprintf(buf, len, "p%d", fn);
- else
- ret = snprintf(buf, len, "pf%dvf%d", fn, rep->vport - 1);
-
- if (ret >= len)
- return -EOPNOTSUPP;
-
- return 0;
-}
-
static int
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *cls_flower, int flags)
+ struct flow_cls_offload *cls_flower, int flags)
{
switch (cls_flower->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
flags);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
flags);
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
flags);
default:
@@ -1198,32 +1175,16 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
}
}
-static int mlx5e_rep_setup_tc_block(struct net_device *dev,
- struct tc_block_offload *f)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
- priv, priv, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
switch (type) {
case TC_SETUP_BLOCK:
- return mlx5e_rep_setup_tc_block(dev, type_data);
+ return flow_block_cb_setup_simple(type_data, NULL,
+ mlx5e_rep_setup_tc_cb,
+ priv, priv, true);
default:
return -EOPNOTSUPP;
}
@@ -1276,7 +1237,7 @@ static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev
}
static void
-mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -1285,7 +1246,7 @@ mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
}
-static int mlx5e_vf_rep_change_mtu(struct net_device *netdev, int new_mtu)
+static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
{
return mlx5e_change_mtu(netdev, new_mtu, NULL);
}
@@ -1318,17 +1279,24 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0;
}
-static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
- .ndo_open = mlx5e_vf_rep_open,
- .ndo_stop = mlx5e_vf_rep_close,
+static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ return &rpriv->dl_port;
+}
+
+static const struct net_device_ops mlx5e_netdev_ops_rep = {
+ .ndo_open = mlx5e_rep_open,
+ .ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
- .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_stats64 = mlx5e_vf_rep_get_stats,
+ .ndo_get_devlink_port = mlx5e_get_devlink_port,
+ .ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
- .ndo_change_mtu = mlx5e_vf_rep_change_mtu,
- .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
+ .ndo_change_mtu = mlx5e_rep_change_mtu,
};
static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
@@ -1336,8 +1304,8 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
- .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_setup_tc,
+ .ndo_get_devlink_port = mlx5e_get_devlink_port,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -1350,13 +1318,12 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
.ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
- .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
.ndo_set_features = mlx5e_set_features,
};
bool mlx5e_eswitch_rep(struct net_device *netdev)
{
- if (netdev->netdev_ops == &mlx5e_netdev_ops_vf_rep ||
+ if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
return true;
@@ -1412,16 +1379,16 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
- mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
+ mlx5_query_mac_address(mdev, netdev->dev_addr);
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
if (MLX5_CAP_GEN(mdev, qos))
netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
#endif
} else {
- netdev->netdev_ops = &mlx5e_netdev_ops_vf_rep;
+ netdev->netdev_ops = &mlx5e_netdev_ops_rep;
eth_hw_addr_random(netdev);
- netdev->ethtool_ops = &mlx5e_vf_rep_ethtool_ops;
+ netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
}
netdev->watchdog_timeo = 15 * HZ;
@@ -1530,7 +1497,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv);
+ err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
if (err)
goto err_destroy_indirect_rqts;
@@ -1538,7 +1505,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv);
+ err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
if (err)
goto err_destroy_indirect_tirs;
@@ -1555,11 +1522,11 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv, false);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
@@ -1573,9 +1540,9 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5_del_flow_rules(rpriv->vport_rx_rule);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
- mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
mlx5e_destroy_indirect_tirs(priv, false);
- mlx5e_destroy_direct_rqts(priv);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
}
@@ -1642,11 +1609,16 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
}
}
-static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
+static void mlx5e_rep_enable(struct mlx5e_priv *priv)
{
mlx5e_set_netdev_mtu_boundaries(priv);
}
+static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
@@ -1714,15 +1686,16 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
mlx5_lag_remove(mdev);
}
-static const struct mlx5e_profile mlx5e_vf_rep_profile = {
+static const struct mlx5e_profile mlx5e_rep_profile = {
.init = mlx5e_init_rep,
.cleanup = mlx5e_cleanup_rep,
.init_rx = mlx5e_init_rep_rx,
.cleanup_rx = mlx5e_cleanup_rep_rx,
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_rep_tx,
- .enable = mlx5e_vf_rep_enable,
- .update_stats = mlx5e_vf_rep_update_hw_counters,
+ .enable = mlx5e_rep_enable,
+ .update_rx = mlx5e_update_rep_rx,
+ .update_stats = mlx5e_rep_update_hw_counters,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = 1,
@@ -1737,6 +1710,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_uplink_rep_enable,
.disable = mlx5e_uplink_rep_disable,
+ .update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_uplink_rep_update_hw_counters,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
@@ -1744,6 +1718,55 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.max_tc = MLX5E_MAX_NUM_TC,
};
+static bool
+is_devlink_port_supported(const struct mlx5_core_dev *dev,
+ const struct mlx5e_rep_priv *rpriv)
+{
+ return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
+ rpriv->rep->vport == MLX5_VPORT_PF ||
+ mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
+}
+
+static int register_devlink_port(struct mlx5_core_dev *dev,
+ struct mlx5e_rep_priv *rpriv)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct netdev_phys_item_id ppid = {};
+ int ret;
+
+ if (!is_devlink_port_supported(dev, rpriv))
+ return 0;
+
+ ret = mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
+ if (ret)
+ return ret;
+
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ devlink_port_attrs_set(&rpriv->dl_port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ PCI_FUNC(dev->pdev->devfn), false, 0,
+ &ppid.id[0], ppid.id_len);
+ else if (rep->vport == MLX5_VPORT_PF)
+ devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
+ &ppid.id[0], ppid.id_len,
+ dev->pdev->devfn);
+ else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport))
+ devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
+ &ppid.id[0], ppid.id_len,
+ dev->pdev->devfn,
+ rep->vport - 1);
+
+ return devlink_port_register(devlink, &rpriv->dl_port, rep->vport);
+}
+
+static void unregister_devlink_port(struct mlx5_core_dev *dev,
+ struct mlx5e_rep_priv *rpriv)
+{
+ if (is_devlink_port_supported(dev, rpriv))
+ devlink_port_unregister(&rpriv->dl_port);
+}
+
/* e-Switch vport representors */
static int
mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
@@ -1761,7 +1784,8 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
rpriv->rep = rep;
nch = mlx5e_get_max_num_channels(dev);
- profile = (rep->vport == MLX5_VPORT_UPLINK) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
+ profile = (rep->vport == MLX5_VPORT_UPLINK) ?
+ &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
@@ -1771,7 +1795,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
}
rpriv->netdev = netdev;
- rep->rep_if[REP_ETH].priv = rpriv;
+ rep->rep_data[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
if (rep->vport == MLX5_VPORT_UPLINK) {
@@ -1794,15 +1818,27 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
+ err = register_devlink_port(dev, rpriv);
+ if (err) {
+ esw_warn(dev, "Failed to register devlink port %d\n",
+ rep->vport);
+ goto err_neigh_cleanup;
+ }
+
err = register_netdev(netdev);
if (err) {
pr_warn("Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_neigh_cleanup;
+ goto err_devlink_cleanup;
}
+ if (is_devlink_port_supported(dev, rpriv))
+ devlink_port_type_eth_set(&rpriv->dl_port, netdev);
return 0;
+err_devlink_cleanup:
+ unregister_devlink_port(dev, rpriv);
+
err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv);
@@ -1825,9 +1861,13 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *dev = priv->mdev;
void *ppriv = priv->ppriv;
+ if (is_devlink_port_supported(dev, rpriv))
+ devlink_port_type_clear(&rpriv->dl_port);
unregister_netdev(netdev);
+ unregister_devlink_port(dev, rpriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
if (rep->vport == MLX5_VPORT_UPLINK)
@@ -1845,16 +1885,17 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
return rpriv->netdev;
}
+static const struct mlx5_eswitch_rep_ops rep_ops = {
+ .load = mlx5e_vport_rep_load,
+ .unload = mlx5e_vport_rep_unload,
+ .get_proto_dev = mlx5e_vport_rep_get_proto_dev
+};
+
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5_eswitch_rep_if rep_if = {};
-
- rep_if.load = mlx5e_vport_rep_load;
- rep_if.unload = mlx5e_vport_rep_unload;
- rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
- mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_ETH);
+ mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
}
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 83b573b1abac..c56e6ee4350c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -86,12 +86,13 @@ struct mlx5e_rep_priv {
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
+ struct devlink_port dl_port;
};
static inline
struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
{
- return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
+ return rep->rep_data[REP_ETH].priv;
}
struct mlx5e_neigh {
@@ -150,13 +151,12 @@ struct mlx5e_encap_entry {
struct hlist_node encap_hlist;
struct list_head flows;
u32 encap_id;
- struct ip_tunnel_info tun_info;
+ const struct ip_tunnel_info *tun_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
struct net_device *route_dev;
- int tunnel_type;
- int tunnel_hlen;
+ struct mlx5e_tc_tunnel *tunnel;
int reformat_type;
u8 flags;
char *encap_header;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 13133e7f088e..56a2f4666c47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,6 +34,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
@@ -46,6 +47,7 @@
#include "en_accel/tls_rxtx.h"
#include "lib/clock.h"
#include "en/xdp.h"
+#include "en/xsk/rx.h"
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
@@ -234,8 +236,8 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
return true;
}
-static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
{
if (mlx5e_rx_cache_get(rq, dma_info))
return 0;
@@ -247,7 +249,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
PAGE_SIZE, rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
- put_page(dma_info->page);
+ page_pool_recycle_direct(rq->page_pool, dma_info->page);
dma_info->page = NULL;
return -ENOMEM;
}
@@ -255,13 +257,23 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
return 0;
}
+static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ if (rq->umem)
+ return mlx5e_xsk_page_alloc_umem(rq, dma_info);
+ else
+ return mlx5e_page_alloc_pool(rq, dma_info);
+}
+
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
{
dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
}
-void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
- bool recycle)
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info,
+ bool recycle)
{
if (likely(recycle)) {
if (mlx5e_rx_cache_put(rq, dma_info))
@@ -271,10 +283,25 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
page_pool_recycle_direct(rq->page_pool, dma_info->page);
} else {
mlx5e_page_dma_unmap(rq, dma_info);
+ page_pool_release_page(rq->page_pool, dma_info->page);
put_page(dma_info->page);
}
}
+static inline void mlx5e_page_release(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info,
+ bool recycle)
+{
+ if (rq->umem)
+ /* The `recycle` parameter is ignored, and the page is always
+ * put into the Reuse Ring, because there is no way to return
+ * the page to the userspace when the interface goes down.
+ */
+ mlx5e_xsk_page_release(rq, dma_info);
+ else
+ mlx5e_page_release_dynamic(rq, dma_info, recycle);
+}
+
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *frag)
{
@@ -286,7 +313,7 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
* offset) should just use the new one without replenishing again
* by themselves.
*/
- err = mlx5e_page_alloc_mapped(rq, frag->di);
+ err = mlx5e_page_alloc(rq, frag->di);
return err;
}
@@ -352,6 +379,13 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
int err;
int i;
+ if (rq->umem) {
+ int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
+
+ if (unlikely(!mlx5e_xsk_pages_enough_umem(rq, pages_desired)))
+ return -ENOMEM;
+ }
+
for (i = 0; i < wqe_bulk; i++) {
struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i);
@@ -399,11 +433,17 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{
- const bool no_xdp_xmit =
- bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
+ bool no_xdp_xmit;
struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
int i;
+ /* A common case for AF_XDP. */
+ if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE))
+ return;
+
+ no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap,
+ MLX5_MPWRQ_PAGES_PER_WQE);
+
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
mlx5e_page_release(rq, &dma_info[i], recycle);
@@ -425,11 +465,6 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
mlx5_wq_ll_update_db_record(wq);
}
-static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
-{
- return mlx5_wq_cyc_get_ctr_wrap_cnt(&sq->wq, sq->pc);
-}
-
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
struct mlx5_wq_cyc *wq,
u16 pi, u16 nnops)
@@ -457,6 +492,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
int err;
int i;
+ if (rq->umem &&
+ unlikely(!mlx5e_xsk_pages_enough_umem(rq, MLX5_MPWRQ_PAGES_PER_WQE))) {
+ err = -ENOMEM;
+ goto err;
+ }
+
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
@@ -465,12 +506,10 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
}
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2))
- memcpy(umr_wqe, &rq->mpwqe.umr_wqe,
- offsetof(struct mlx5e_umr_wqe, inline_mtts));
+ memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
- err = mlx5e_page_alloc_mapped(rq, dma_info);
+ err = mlx5e_page_alloc(rq, dma_info);
if (unlikely(err))
goto err_unmap;
umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
@@ -485,6 +524,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
+ sq->db.ico_wqe[pi].umr.rq = rq;
sq->pc += MLX5E_UMR_WQEBBS;
sq->doorbell_cseg = &umr_wqe->ctrl;
@@ -496,6 +536,8 @@ err_unmap:
dma_info--;
mlx5e_page_release(rq, dma_info, true);
}
+
+err:
rq->stats->buff_alloc_err++;
return err;
@@ -542,11 +584,10 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !!err;
}
-static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
+void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe;
- u8 completed_umr = 0;
u16 sqcc;
int i;
@@ -587,7 +628,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
sqcc += MLX5E_UMR_WQEBBS;
- completed_umr++;
+ wi->umr.rq->mpwqe.umr_completed++;
} else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
sqcc++;
} else {
@@ -603,24 +644,25 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
sq->cc = sqcc;
mlx5_cqwq_update_db_record(&cq->wq);
-
- if (likely(completed_umr)) {
- mlx5e_post_rx_mpwqe(rq, completed_umr);
- rq->mpwqe.umr_in_progress -= completed_umr;
- }
}
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
+ u8 umr_completed = rq->mpwqe.umr_completed;
+ int alloc_err = 0;
u8 missing, i;
u16 head;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
- mlx5e_poll_ico_cq(&sq->cq, rq);
+ if (umr_completed) {
+ mlx5e_post_rx_mpwqe(rq, umr_completed);
+ rq->mpwqe.umr_in_progress -= umr_completed;
+ rq->mpwqe.umr_completed = 0;
+ }
missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
@@ -634,7 +676,9 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
head = rq->mpwqe.actual_wq_head;
i = missing;
do {
- if (unlikely(mlx5e_alloc_rx_mpwqe(rq, head)))
+ alloc_err = mlx5e_alloc_rx_mpwqe(rq, head);
+
+ if (unlikely(alloc_err))
break;
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
} while (--i);
@@ -648,6 +692,12 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
rq->mpwqe.actual_wq_head = head;
+ /* If XSK Fill Ring doesn't have enough frames, busy poll by
+ * rescheduling the NAPI poll.
+ */
+ if (unlikely(alloc_err == -ENOMEM && rq->umem))
+ return true;
+
return false;
}
@@ -1016,7 +1066,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
}
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt);
+ consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false);
rcu_read_unlock();
if (consumed)
return NULL; /* page/packet was consumed by XDP */
@@ -1092,7 +1142,10 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+ mlx5e_skb_from_cqe_linear,
+ mlx5e_skb_from_cqe_nonlinear,
+ rq, cqe, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1230,7 +1283,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prefetch(data);
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32);
+ consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, false);
rcu_read_unlock();
if (consumed) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
@@ -1279,8 +1332,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
- skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset,
- page_idx);
+ skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
+ mlx5e_skb_from_cqe_mpwrq_linear,
+ mlx5e_skb_from_cqe_mpwrq_nonlinear,
+ rq, wi, cqe_bcnt, head_offset, page_idx);
if (!skb)
goto mpwrq_cqe_out;
@@ -1327,7 +1382,8 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
mlx5_cqwq_pop(cqwq);
- rq->handle_rx_cqe(rq, cqe);
+ INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out:
@@ -1437,7 +1493,10 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+ mlx5e_skb_from_cqe_linear,
+ mlx5e_skb_from_cqe_nonlinear,
+ rq, cqe, wi, cqe_bcnt);
if (!skb)
goto wq_free_wqe;
@@ -1469,7 +1528,10 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+ mlx5e_skb_from_cqe_linear,
+ mlx5e_skb_from_cqe_nonlinear,
+ rq, cqe, wi, cqe_bcnt);
if (unlikely(!skb)) {
/* a DROP, save the page-reuse checks */
mlx5e_free_rx_wqe(rq, wi, true);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 4382ef85488c..840ec945ccba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -64,7 +64,7 @@ static int mlx5e_test_health_info(struct mlx5e_priv *priv)
{
struct mlx5_core_health *health = &priv->mdev->priv.health;
- return health->sick ? 1 : 0;
+ return health->fatal_error ? 1 : 0;
}
static int mlx5e_test_link_state(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 483d321d2151..539b4d3656da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -48,8 +48,15 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
#ifdef CONFIG_MLX5_EN_TLS
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
@@ -104,7 +111,33 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
};
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
@@ -144,6 +177,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
&priv->channel_stats[i];
struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
+ struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq;
+ struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
int j;
@@ -186,6 +221,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm;
s->ch_aff_change += ch_stats->aff_change;
+ s->ch_force_irq += ch_stats->force_irq;
s->ch_eq_rearm += ch_stats->eq_rearm;
/* xdp redirect */
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
@@ -194,6 +230,32 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
+ /* AF_XDP zero-copy */
+ s->rx_xsk_packets += xskrq_stats->packets;
+ s->rx_xsk_bytes += xskrq_stats->bytes;
+ s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
+ s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
+ s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
+ s->rx_xsk_csum_none += xskrq_stats->csum_none;
+ s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
+ s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
+ s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
+ s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
+ s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
+ s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
+ s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
+ s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
+ s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
+ s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
+ s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
+ s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
+ s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
+ s->tx_xsk_xmit += xsksq_stats->xmit;
+ s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
+ s->tx_xsk_inlnw += xsksq_stats->inlnw;
+ s->tx_xsk_full += xsksq_stats->full;
+ s->tx_xsk_err += xsksq_stats->err;
+ s->tx_xsk_cqes += xsksq_stats->cqes;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
@@ -216,8 +278,15 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_csum_none += sq_stats->csum_none;
s->tx_csum_partial += sq_stats->csum_partial;
#ifdef CONFIG_MLX5_EN_TLS
- s->tx_tls_ooo += sq_stats->tls_ooo;
- s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
+ s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
+ s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
+ s->tx_tls_ctx += sq_stats->tls_ctx;
+ s->tx_tls_ooo += sq_stats->tls_ooo;
+ s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
+ s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
+ s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
+ s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
+ s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
#endif
s->tx_cqes += sq_stats->cqes;
}
@@ -1238,6 +1307,16 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+#ifdef CONFIG_MLX5_EN_TLS
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
+#endif
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
@@ -1266,11 +1345,43 @@ static const struct counter_desc xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
};
+static const struct counter_desc xskrq_stats_desc[] = {
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
+};
+
+static const struct counter_desc xsksq_stats_desc[] = {
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
+};
+
static const struct counter_desc ch_stats_desc[] = {
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
};
@@ -1278,6 +1389,8 @@ static const struct counter_desc ch_stats_desc[] = {
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
+#define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
+#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
@@ -1288,13 +1401,16 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
(NUM_CH_STATS * max_nch) +
(NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
(NUM_RQ_XDPSQ_STATS * max_nch) +
- (NUM_XDPSQ_STATS * max_nch);
+ (NUM_XDPSQ_STATS * max_nch) +
+ (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
+ (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
}
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
{
int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
+ bool is_xsk = priv->xsk.ever_used;
int i, j, tc;
for (i = 0; i < max_nch; i++)
@@ -1306,6 +1422,9 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
rq_stats_desc[j].format, i);
+ for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ xskrq_stats_desc[j].format, i);
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
rq_xdpsq_stats_desc[j].format, i);
@@ -1318,10 +1437,14 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
sq_stats_desc[j].format,
priv->channel_tc2txq[i][tc]);
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < max_nch; i++) {
+ for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ xsksq_stats_desc[j].format, i);
for (j = 0; j < NUM_XDPSQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
xdpsq_stats_desc[j].format, i);
+ }
return idx;
}
@@ -1330,6 +1453,7 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
{
int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
+ bool is_xsk = priv->xsk.ever_used;
int i, j, tc;
for (i = 0; i < max_nch; i++)
@@ -1343,6 +1467,10 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
rq_stats_desc, j);
+ for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
+ xskrq_stats_desc, j);
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
@@ -1356,11 +1484,16 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
sq_stats_desc, j);
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < max_nch; i++) {
+ for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
+ xsksq_stats_desc, j);
for (j = 0; j < NUM_XDPSQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
xdpsq_stats_desc, j);
+ }
return idx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index cdddcc46971b..76ac111e14d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -46,6 +46,8 @@
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_XSKRQ_STAT(type, fld) "rx%d_xsk_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
struct counter_desc {
@@ -116,12 +118,46 @@ struct mlx5e_sw_stats {
u64 ch_poll;
u64 ch_arm;
u64 ch_aff_change;
+ u64 ch_force_irq;
u64 ch_eq_rearm;
#ifdef CONFIG_MLX5_EN_TLS
+ u64 tx_tls_encrypted_packets;
+ u64 tx_tls_encrypted_bytes;
+ u64 tx_tls_ctx;
u64 tx_tls_ooo;
u64 tx_tls_resync_bytes;
+ u64 tx_tls_drop_no_sync_data;
+ u64 tx_tls_drop_bypass_req;
+ u64 tx_tls_dump_packets;
+ u64 tx_tls_dump_bytes;
#endif
+
+ u64 rx_xsk_packets;
+ u64 rx_xsk_bytes;
+ u64 rx_xsk_csum_complete;
+ u64 rx_xsk_csum_unnecessary;
+ u64 rx_xsk_csum_unnecessary_inner;
+ u64 rx_xsk_csum_none;
+ u64 rx_xsk_ecn_mark;
+ u64 rx_xsk_removed_vlan_packets;
+ u64 rx_xsk_xdp_drop;
+ u64 rx_xsk_xdp_redirect;
+ u64 rx_xsk_wqe_err;
+ u64 rx_xsk_mpwqe_filler_cqes;
+ u64 rx_xsk_mpwqe_filler_strides;
+ u64 rx_xsk_oversize_pkts_sw_drop;
+ u64 rx_xsk_buff_alloc_err;
+ u64 rx_xsk_cqe_compress_blks;
+ u64 rx_xsk_cqe_compress_pkts;
+ u64 rx_xsk_congst_umr;
+ u64 rx_xsk_arfs_err;
+ u64 tx_xsk_xmit;
+ u64 tx_xsk_mpwqe;
+ u64 tx_xsk_inlnw;
+ u64 tx_xsk_full;
+ u64 tx_xsk_err;
+ u64 tx_xsk_cqes;
};
struct mlx5e_qcounter_stats {
@@ -227,8 +263,15 @@ struct mlx5e_sq_stats {
u64 added_vlan_packets;
u64 nop;
#ifdef CONFIG_MLX5_EN_TLS
+ u64 tls_encrypted_packets;
+ u64 tls_encrypted_bytes;
+ u64 tls_ctx;
u64 tls_ooo;
u64 tls_resync_bytes;
+ u64 tls_drop_no_sync_data;
+ u64 tls_drop_bypass_req;
+ u64 tls_dump_packets;
+ u64 tls_dump_bytes;
#endif
/* less likely accessed in data path */
u64 csum_none;
@@ -256,6 +299,7 @@ struct mlx5e_ch_stats {
u64 poll;
u64 arm;
u64 aff_change;
+ u64 force_irq;
u64 eq_rearm;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index e40c60d1631f..2d6436257f9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -53,6 +53,7 @@
#include "en/port.h"
#include "en/tc_tun.h"
#include "lib/devcom.h"
+#include "lib/geneve.h"
struct mlx5_nic_flow_attr {
u32 action;
@@ -126,7 +127,7 @@ struct mlx5e_tc_flow {
};
struct mlx5e_tc_flow_parse_attr {
- struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+ const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
@@ -716,19 +717,22 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
+ struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
- .flow_tag = attr->flow_tag,
.reformat_id = 0,
- .flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
+ .flags = FLOW_ACT_NO_APPEND,
};
struct mlx5_fc *counter = NULL;
bool table_created = false;
int err, dest_ix = 0;
+ flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
+ flow_context->flow_tag = attr->flow_tag;
+
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
if (err) {
@@ -799,7 +803,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (attr->match_level != MLX5_MATCH_NONE)
- parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
&flow_act, dest, dest_ix);
@@ -1063,6 +1067,19 @@ err_max_prio_chain:
return err;
}
+static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
+ void *headers_v = MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ misc_parameters_3);
+ u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
+ headers_v,
+ geneve_tlv_option_0_data);
+
+ return !!geneve_tlv_opt_0_data;
+}
+
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
@@ -1084,6 +1101,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
+ if (mlx5_flow_has_geneve_opt(flow))
+ mlx5_geneve_tlv_option_del(priv->mdev->geneve);
+
mlx5_eswitch_del_vlan_action(esw, attr);
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
@@ -1330,7 +1350,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
struct net_device *filter_dev, u8 *match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
@@ -1338,8 +1358,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
- struct flow_match_control enc_control;
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
int err;
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
@@ -1350,9 +1369,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return err;
}
- flow_rule_match_enc_control(rule, &enc_control);
-
- if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
flow_rule_match_enc_ipv4_addrs(rule, &match);
@@ -1372,7 +1389,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
- } else if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
flow_rule_match_enc_ipv6_addrs(rule, &match);
@@ -1461,7 +1478,7 @@ static void *get_match_headers_value(u32 flags,
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
struct net_device *filter_dev,
u8 *match_level, u8 *tunnel_match_level)
{
@@ -1474,7 +1491,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0;
u8 ip_proto = 0;
@@ -1497,29 +1514,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_IP) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
- if ((flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
- struct flow_match_control match;
-
- flow_rule_match_enc_control(rule, &match);
- switch (match.key->addr_type) {
- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
- case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
- if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
- return -EOPNOTSUPP;
- break;
- default:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
return -EOPNOTSUPP;
- }
/* In decap flow, header pointers should point to the inner
* headers, outer header were already set by parse_tunnel_attr
@@ -1822,7 +1831,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
struct net_device *filter_dev)
{
struct netlink_ext_ack *extack = f->common.extack;
@@ -2581,21 +2590,21 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
}
struct encap_key {
- struct ip_tunnel_key *ip_tun_key;
- int tunnel_type;
+ const struct ip_tunnel_key *ip_tun_key;
+ struct mlx5e_tc_tunnel *tc_tunnel;
};
static inline int cmp_encap_info(struct encap_key *a,
struct encap_key *b)
{
return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
- a->tunnel_type != b->tunnel_type;
+ a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
}
static inline int hash_encap_info(struct encap_key *key)
{
return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
- key->tunnel_type);
+ key->tc_tunnel->tunnel_type);
}
@@ -2625,7 +2634,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_info *tun_info;
struct encap_key key, e_key;
struct mlx5e_encap_entry *e;
unsigned short family;
@@ -2634,17 +2643,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
int err = 0;
parse_attr = attr->parse_attr;
- tun_info = &parse_attr->tun_info[out_index];
+ tun_info = parse_attr->tun_info[out_index];
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
- key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
+ key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
hash_key = hash_encap_info(&key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
- e_key.ip_tun_key = &e->tun_info.key;
- e_key.tunnel_type = e->tunnel_type;
+ e_key.ip_tun_key = &e->tun_info->key;
+ e_key.tc_tunnel = e->tunnel;
if (!cmp_encap_info(&e_key, &key)) {
found = true;
break;
@@ -2659,7 +2668,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
if (!e)
return -ENOMEM;
- e->tun_info = *tun_info;
+ e->tun_info = tun_info;
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
if (err)
goto out_err;
@@ -2793,6 +2802,16 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
return err;
}
+bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
+ struct net_device *out_dev)
+{
+ if (is_merged_eswitch_dev(priv, out_dev))
+ return true;
+
+ return mlx5e_eswitch_rep(out_dev) &&
+ same_hw_devs(priv, netdev_priv(out_dev));
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
@@ -2858,9 +2877,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- if (netdev_port_same_parent_id(priv->netdev,
- out_dev) ||
- is_merged_eswitch_dev(priv, out_dev)) {
+ if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev);
@@ -2877,6 +2894,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (err)
return err;
}
+
if (is_vlan_dev(parse_attr->filter_dev)) {
err = add_vlan_pop_action(priv, attr,
&action);
@@ -2884,8 +2902,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err;
}
- if (!mlx5e_eswitch_rep(out_dev))
+ if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not on same switch HW, can't offload forwarding");
+ pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
+ priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
+ }
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
@@ -2895,7 +2918,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
} else if (encap) {
parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex;
- parse_attr->tun_info[attr->out_count] = *info;
+ parse_attr->tun_info[attr->out_count] = info;
encap = false;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
@@ -3092,7 +3115,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
- struct tc_cls_flower_offload *f, u16 flow_flags,
+ struct flow_cls_offload *f, u16 flow_flags,
struct mlx5e_tc_flow_parse_attr **__parse_attr,
struct mlx5e_tc_flow **__flow)
{
@@ -3126,7 +3149,7 @@ static void
mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
struct mlx5_eswitch_rep *in_rep,
struct mlx5_core_dev *in_mdev)
{
@@ -3148,13 +3171,13 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
static struct mlx5e_tc_flow *
__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
u16 flow_flags,
struct net_device *filter_dev,
struct mlx5_eswitch_rep *in_rep,
struct mlx5_core_dev *in_mdev)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
@@ -3198,7 +3221,7 @@ out:
return ERR_PTR(err);
}
-static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
+static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
struct mlx5e_tc_flow *flow,
u16 flow_flags)
{
@@ -3250,7 +3273,7 @@ out:
static int
mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
u16 flow_flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
@@ -3284,12 +3307,12 @@ out:
static int
mlx5e_add_nic_flow(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
u16 flow_flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
@@ -3335,7 +3358,7 @@ out:
static int
mlx5e_tc_add_flow(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
int flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **flow)
@@ -3349,7 +3372,7 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
if (!tc_can_offload_extack(priv->netdev, f->common.extack))
return -EOPNOTSUPP;
- if (esw && esw->mode == SRIOV_OFFLOADS)
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
err = mlx5e_add_fdb_flow(priv, f, flow_flags,
filter_dev, flow);
else
@@ -3360,7 +3383,7 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
}
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f, int flags)
+ struct flow_cls_offload *f, int flags)
{
struct netlink_ext_ack *extack = f->common.extack;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
@@ -3407,7 +3430,7 @@ static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
}
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f, int flags)
+ struct flow_cls_offload *f, int flags)
{
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
@@ -3426,7 +3449,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
}
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f, int flags)
+ struct flow_cls_offload *f, int flags)
{
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index f62e81902d27..3ab39275ca7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -54,12 +54,12 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f, int flags);
+ struct flow_cls_offload *f, int flags);
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f, int flags);
+ struct flow_cls_offload *f, int flags);
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f, int flags);
+ struct flow_cls_offload *f, int flags);
struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -74,6 +74,9 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
+bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
+ struct net_device *out_dev);
+
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 701e5dc75bb0..600e92cb629a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -35,55 +35,12 @@
#include <net/geneve.h>
#include <net/dsfield.h>
#include "en.h"
+#include "en/txrx.h"
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
+#include "en_accel/ktls.h"
#include "lib/clock.h"
-#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
-
-#ifndef CONFIG_MLX5_EN_TLS
-#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
- MLX5E_SQ_NOPS_ROOM)
-#else
-/* TLS offload requires MLX5E_SQ_STOP_ROOM to have
- * enough room for a resync SKB, a normal SKB and a NOP
- */
-#define MLX5E_SQ_STOP_ROOM (2 * MLX5_SEND_WQE_MAX_WQEBBS +\
- MLX5E_SQ_NOPS_ROOM)
-#endif
-
-static inline void mlx5e_tx_dma_unmap(struct device *pdev,
- struct mlx5e_sq_dma *dma)
-{
- switch (dma->type) {
- case MLX5E_DMA_MAP_SINGLE:
- dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
- break;
- case MLX5E_DMA_MAP_PAGE:
- dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
- break;
- default:
- WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
- }
-}
-
-static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
-{
- return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
-}
-
-static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
- dma_addr_t addr,
- u32 size,
- enum mlx5e_dma_map_type map_type)
-{
- struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
-
- dma->addr = addr;
- dma->size = size;
- dma->type = map_type;
-}
-
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
{
int i;
@@ -277,23 +234,6 @@ dma_unmap_wqe_err:
return -ENOMEM;
}
-static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
- struct mlx5_wq_cyc *wq,
- u16 pi, u16 nnops)
-{
- struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
-
- edge_wi = wi + nnops;
-
- /* fill sq frag edge with nops to avoid wqe wrapping two pages */
- for (; wi < edge_wi; wi++) {
- wi->skb = NULL;
- wi->num_wqebbs = 1;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- }
- sq->stats->nop += nnops;
-}
-
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
@@ -301,6 +241,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
+ bool send_doorbell;
wi->num_bytes = num_bytes;
wi->num_dma = num_dma;
@@ -310,23 +251,21 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- netdev_tx_sent_queue(sq->txq, num_bytes);
-
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
sq->pc += wi->num_wqebbs;
- if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
+ if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) {
netif_tx_stop_queue(sq->txq);
sq->stats->stopped++;
}
- if (!xmit_more || netif_xmit_stopped(sq->txq))
+ send_doorbell = __netdev_tx_sent_queue(sq->txq, num_bytes,
+ xmit_more);
+ if (send_doorbell)
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
-#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
-
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
@@ -353,9 +292,12 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
stats->packets += skb_shinfo(skb)->gso_segs;
} else {
+ u8 mode = mlx5e_transport_inline_tx_wqe(wqe) ?
+ MLX5_INLINE_MODE_TCP_UDP : sq->min_inline_mode;
+
opcode = MLX5_OPCODE_SEND;
mss = 0;
- ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+ ihs = mlx5e_calc_min_inline(mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
stats->packets++;
}
@@ -380,11 +322,17 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
#endif
+#ifdef CONFIG_MLX5_EN_TLS
+ struct mlx5_wqe_ctrl_seg cur_ctrl = wqe->ctrl;
+#endif
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
+ wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
#ifdef CONFIG_MLX5_EN_IPSEC
wqe->eth = cur_eth;
#endif
+#ifdef CONFIG_MLX5_EN_TLS
+ wqe->ctrl = cur_ctrl;
+#endif
}
/* fill wqe */
@@ -443,7 +391,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
u16 pi;
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
- mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
+ wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
/* might send skbs and update wqe and pi */
skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
@@ -531,8 +479,16 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wi = &sq->db.wqe_info[ci];
skb = wi->skb;
- if (unlikely(!skb)) { /* nop */
- sqcc++;
+ if (unlikely(!skb)) {
+#ifdef CONFIG_MLX5_EN_TLS
+ if (wi->resync_dump_frag) {
+ struct mlx5e_sq_dma *dma =
+ mlx5e_dma_get(sq, dma_fifo_cc++);
+
+ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma);
+ }
+#endif
+ sqcc += wi->num_wqebbs;
continue;
}
@@ -574,8 +530,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
- MLX5E_SQ_STOP_ROOM) &&
+ mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
stats->wake++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index f9862bf75491..c50b6f0769c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -33,6 +33,7 @@
#include <linux/irq.h>
#include "en.h"
#include "en/xdp.h"
+#include "en/xsk/tx.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
@@ -48,26 +49,24 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
{
struct mlx5e_sq_stats *stats = sq->stats;
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
return;
- net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes,
- &dim_sample);
+ dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
net_dim(&sq->dim, dim_sample);
}
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
{
struct mlx5e_rq_stats *stats = rq->stats;
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
return;
- net_dim_sample(rq->cq.event_ctr, stats->packets, stats->bytes,
- &dim_sample);
+ dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
net_dim(&rq->dim, dim_sample);
}
@@ -87,7 +86,12 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
struct mlx5e_ch_stats *ch_stats = c->stats;
+ struct mlx5e_xdpsq *xsksq = &c->xsksq;
+ struct mlx5e_rq *xskrq = &c->xskrq;
struct mlx5e_rq *rq = &c->rq;
+ bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
+ bool aff_change = false;
+ bool busy_xsk = false;
bool busy = false;
int work_done = 0;
int i;
@@ -97,22 +101,38 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
- busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq, NULL);
+ busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
if (c->xdp)
- busy |= mlx5e_poll_xdpsq_cq(&rq->xdpsq.cq, rq);
+ busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
if (likely(budget)) { /* budget=0 means: don't poll rx rings */
- work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
+ if (xsk_open)
+ work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
+
+ if (likely(budget - work_done))
+ work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
+
busy |= work_done == budget;
}
- busy |= c->rq.post_wqes(rq);
+ mlx5e_poll_ico_cq(&c->icosq.cq);
+
+ busy |= rq->post_wqes(rq);
+ if (xsk_open) {
+ mlx5e_poll_ico_cq(&c->xskicosq.cq);
+ busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
+ busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
+ busy_xsk |= xskrq->post_wqes(xskrq);
+ }
+
+ busy |= busy_xsk;
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
return budget;
ch_stats->aff_change++;
+ aff_change = true;
if (budget && work_done == budget)
work_done--;
}
@@ -133,10 +153,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&c->icosq.cq);
mlx5e_cq_arm(&c->xdpsq.cq);
+ if (xsk_open) {
+ mlx5e_handle_rx_dim(xskrq);
+ mlx5e_cq_arm(&c->xskicosq.cq);
+ mlx5e_cq_arm(&xsksq->cq);
+ mlx5e_cq_arm(&xskrq->cq);
+ }
+
+ if (unlikely(aff_change && busy_xsk)) {
+ mlx5e_trigger_irq(&c->icosq);
+ ch_stats->force_irq++;
+ }
+
return work_done;
}
-void mlx5e_completion_event(struct mlx5_core_cq *mcq)
+void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 23883d1fa22f..41f25ea2e8d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -61,17 +61,21 @@ enum {
MLX5_EQ_DOORBEL_OFFSET = 0x40,
};
-struct mlx5_irq_info {
- cpumask_var_t mask;
- char name[MLX5_MAX_IRQ_NAME];
- void *context; /* dev_id provided to request_irq */
+/* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
+ * the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
+ * used to set the EQ size, budget must be smaller than the EQ size.
+ */
+enum {
+ MLX5_EQ_POLLING_BUDGET = 128,
};
+static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
+
struct mlx5_eq_table {
struct list_head comp_eqs_list;
- struct mlx5_eq pages_eq;
- struct mlx5_eq cmd_eq;
- struct mlx5_eq async_eq;
+ struct mlx5_eq_async pages_eq;
+ struct mlx5_eq_async cmd_eq;
+ struct mlx5_eq_async async_eq;
struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
@@ -79,11 +83,8 @@ struct mlx5_eq_table {
struct mlx5_nb cq_err_nb;
struct mutex lock; /* sync async eqs creations */
- int num_comp_vectors;
- struct mlx5_irq_info *irq_info;
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
+ int num_comp_eqs;
+ struct mlx5_irq_table *irq_table;
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -124,16 +125,24 @@ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
return cq;
}
-static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
+static int mlx5_eq_comp_int(struct notifier_block *nb,
+ __always_unused unsigned long action,
+ __always_unused void *data)
{
- struct mlx5_eq_comp *eq_comp = eq_ptr;
- struct mlx5_eq *eq = eq_ptr;
+ struct mlx5_eq_comp *eq_comp =
+ container_of(nb, struct mlx5_eq_comp, irq_nb);
+ struct mlx5_eq *eq = &eq_comp->core;
struct mlx5_eqe *eqe;
- int set_ci = 0;
+ int num_eqes = 0;
u32 cqn = -1;
- while ((eqe = next_eqe_sw(eq))) {
+ eqe = next_eqe_sw(eq);
+ if (!eqe)
+ goto out;
+
+ do {
struct mlx5_core_cq *cq;
+
/* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
@@ -144,33 +153,23 @@ static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
cq = mlx5_eq_cq_get(eq, cqn);
if (likely(cq)) {
++cq->arm_sn;
- cq->comp(cq);
+ cq->comp(cq, eqe);
mlx5_cq_put(cq);
} else {
mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
}
++eq->cons_index;
- ++set_ci;
- /* The HCA will think the queue has overflowed if we
- * don't tell it we've been processing events. We
- * create our EQs with MLX5_NUM_SPARE_EQE extra
- * entries, so we must update our consumer index at
- * least that often.
- */
- if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
- eq_update_ci(eq, 0);
- set_ci = 0;
- }
- }
+ } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+out:
eq_update_ci(eq, 1);
if (cqn != -1)
tasklet_schedule(&eq_comp->tasklet_ctx.task);
- return IRQ_HANDLED;
+ return 0;
}
/* Some architectures don't latch interrupts when they are disabled, so using
@@ -184,25 +183,32 @@ u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
disable_irq(eq->core.irqn);
count_eqe = eq->core.cons_index;
- mlx5_eq_comp_int(eq->core.irqn, eq);
+ mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
count_eqe = eq->core.cons_index - count_eqe;
enable_irq(eq->core.irqn);
return count_eqe;
}
-static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
+static int mlx5_eq_async_int(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- struct mlx5_eq *eq = eq_ptr;
+ struct mlx5_eq_async *eq_async =
+ container_of(nb, struct mlx5_eq_async, irq_nb);
+ struct mlx5_eq *eq = &eq_async->core;
struct mlx5_eq_table *eqt;
struct mlx5_core_dev *dev;
struct mlx5_eqe *eqe;
- int set_ci = 0;
+ int num_eqes = 0;
dev = eq->dev;
eqt = dev->priv.eq_table;
- while ((eqe = next_eqe_sw(eq))) {
+ eqe = next_eqe_sw(eq);
+ if (!eqe)
+ goto out;
+
+ do {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
@@ -217,23 +223,13 @@ static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index;
- ++set_ci;
- /* The HCA will think the queue has overflowed if we
- * don't tell it we've been processing events. We
- * create our EQs with MLX5_NUM_SPARE_EQE extra
- * entries, so we must update our consumer index at
- * least that often.
- */
- if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
- eq_update_ci(eq, 0);
- set_ci = 0;
- }
- }
+ } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+out:
eq_update_ci(eq, 1);
- return IRQ_HANDLED;
+ return 0;
}
static void init_eq_buf(struct mlx5_eq *eq)
@@ -248,22 +244,19 @@ static void init_eq_buf(struct mlx5_eq *eq)
}
static int
-create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
+create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct mlx5_eq_param *param)
{
- struct mlx5_eq_table *eq_table = dev->priv.eq_table;
struct mlx5_cq_table *cq_table = &eq->cq_table;
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
- u8 vecidx = param->index;
+ u8 vecidx = param->irq_index;
__be64 *pas;
void *eqc;
int inlen;
u32 *in;
int err;
-
- if (eq_table->irq_info[vecidx].context)
- return -EEXIST;
+ int i;
/* Init CQ table */
memset(cq_table, 0, sizeof(*cq_table));
@@ -291,10 +284,12 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
mlx5_fill_page_array(&eq->buf, pas);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
- if (!param->mask && MLX5_CAP_GEN(dev, log_max_uctx))
+ if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
- MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
+ for (i = 0; i < 4; i++)
+ MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
+ param->mask[i]);
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
@@ -307,34 +302,19 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
if (err)
goto err_in;
- snprintf(eq_table->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
- name, pci_name(dev->pdev));
- eq_table->irq_info[vecidx].context = param->context;
-
eq->vecidx = vecidx;
eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = pci_irq_vector(dev->pdev, vecidx);
eq->dev = dev;
eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(eq->irqn, param->handler, 0,
- eq_table->irq_info[vecidx].name, param->context);
- if (err)
- goto err_eq;
err = mlx5_debug_eq_add(dev, eq);
if (err)
- goto err_irq;
-
- /* EQs are created in ARMED state
- */
- eq_update_ci(eq, 1);
+ goto err_eq;
kvfree(in);
return 0;
-err_irq:
- free_irq(eq->irqn, eq);
-
err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn);
@@ -346,18 +326,48 @@ err_buf:
return err;
}
-static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+/**
+ * mlx5_eq_enable - Enable EQ for receiving EQEs
+ * @dev - Device which owns the eq
+ * @eq - EQ to enable
+ * @nb - notifier call block
+ * mlx5_eq_enable - must be called after EQ is created in device.
+ */
+int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ struct notifier_block *nb)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
- struct mlx5_irq_info *irq_info;
int err;
- irq_info = &eq_table->irq_info[eq->vecidx];
+ err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb);
+ if (!err)
+ eq_update_ci(eq, 1);
- mlx5_debug_eq_remove(dev, eq);
+ return err;
+}
+EXPORT_SYMBOL(mlx5_eq_enable);
+
+/**
+ * mlx5_eq_disable - Enable EQ for receiving EQEs
+ * @dev - Device which owns the eq
+ * @eq - EQ to disable
+ * @nb - notifier call block
+ * mlx5_eq_disable - must be called before EQ is destroyed.
+ */
+void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ struct notifier_block *nb)
+{
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+
+ mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);
+}
+EXPORT_SYMBOL(mlx5_eq_disable);
+
+static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+{
+ int err;
- free_irq(eq->irqn, irq_info->context);
- irq_info->context = NULL;
+ mlx5_debug_eq_remove(dev, eq);
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
@@ -382,7 +392,7 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
return err;
}
-int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
+void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *tmp;
@@ -392,16 +402,14 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
spin_unlock(&table->lock);
if (!tmp) {
- mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
- return -ENOENT;
- }
-
- if (tmp != cq) {
- mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
- return -EINVAL;
+ mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
+ eq->eqn, cq->cqn);
+ return;
}
- return 0;
+ if (tmp != cq)
+ mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
+ eq->eqn, cq->cqn);
}
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
@@ -423,6 +431,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
+ eq_table->irq_table = dev->priv.irq_table;
return 0;
kvfree_eq_table:
@@ -439,19 +448,20 @@ void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
/* Async EQs */
-static int create_async_eq(struct mlx5_core_dev *dev, const char *name,
+static int create_async_eq(struct mlx5_core_dev *dev,
struct mlx5_eq *eq, struct mlx5_eq_param *param)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
mutex_lock(&eq_table->lock);
- if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) {
- err = -ENOSPC;
+ /* Async EQs must share irq index 0 */
+ if (param->irq_index != 0) {
+ err = -EINVAL;
goto unlock;
}
- err = create_map_eq(dev, eq, name, param);
+ err = create_map_eq(dev, eq, param);
unlock:
mutex_unlock(&eq_table->lock);
return err;
@@ -480,7 +490,7 @@ static int cq_err_event_notifier(struct notifier_block *nb,
/* type == MLX5_EVENT_TYPE_CQ_ERROR */
eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
- eq = &eqt->async_eq;
+ eq = &eqt->async_eq.core;
eqe = data;
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
@@ -493,14 +503,31 @@ static int cq_err_event_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
- cq->event(cq, type);
+ if (cq->event)
+ cq->event(cq, type);
mlx5_cq_put(cq);
return NOTIFY_OK;
}
-static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
+static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
+{
+ __be64 *user_unaffiliated_events;
+ __be64 *user_affiliated_events;
+ int i;
+
+ user_affiliated_events =
+ MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
+ user_unaffiliated_events =
+ MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
+
+ for (i = 0; i < 4; i++)
+ mask[i] |= be64_to_cpu(user_affiliated_events[i] |
+ user_unaffiliated_events[i]);
+}
+
+static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
{
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
@@ -533,10 +560,14 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
- if (mlx5_core_is_ecpf_esw_manager(dev))
- async_event_mask |= (1ull << MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE);
+ if (mlx5_eswitch_is_funcs_handler(dev))
+ async_event_mask |=
+ (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
- return async_event_mask;
+ mask[0] = async_event_mask;
+
+ if (MLX5_CAP_GEN(dev, event_cap))
+ gather_user_async_events(dev, mask);
}
static int create_async_eqs(struct mlx5_core_dev *dev)
@@ -548,55 +579,76 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
+ table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
- .index = MLX5_EQ_CMD_IDX,
- .mask = 1ull << MLX5_EVENT_TYPE_CMD,
+ .irq_index = 0,
.nent = MLX5_NUM_CMD_EQE,
- .context = &table->cmd_eq,
- .handler = mlx5_eq_async_int,
};
- err = create_async_eq(dev, "mlx5_cmd_eq", &table->cmd_eq, &param);
+
+ param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD;
+ err = create_async_eq(dev, &table->cmd_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
goto err0;
}
-
+ err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
+ if (err) {
+ mlx5_core_warn(dev, "failed to enable cmd EQ %d\n", err);
+ goto err1;
+ }
mlx5_cmd_use_events(dev);
+ table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
- .index = MLX5_EQ_ASYNC_IDX,
- .mask = gather_async_events_mask(dev),
+ .irq_index = 0,
.nent = MLX5_NUM_ASYNC_EQE,
- .context = &table->async_eq,
- .handler = mlx5_eq_async_int,
};
- err = create_async_eq(dev, "mlx5_async_eq", &table->async_eq, &param);
+
+ gather_async_events_mask(dev, param.mask);
+ err = create_async_eq(dev, &table->async_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
- goto err1;
+ goto err2;
+ }
+ err = mlx5_eq_enable(dev, &table->async_eq.core,
+ &table->async_eq.irq_nb);
+ if (err) {
+ mlx5_core_warn(dev, "failed to enable async EQ %d\n", err);
+ goto err3;
}
+ table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
- .index = MLX5_EQ_PAGEREQ_IDX,
- .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
+ .irq_index = 0,
.nent = /* TODO: sriov max_vf + */ 1,
- .context = &table->pages_eq,
- .handler = mlx5_eq_async_int,
};
- err = create_async_eq(dev, "mlx5_pages_eq", &table->pages_eq, &param);
+
+ param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST;
+ err = create_async_eq(dev, &table->pages_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
- goto err2;
+ goto err4;
+ }
+ err = mlx5_eq_enable(dev, &table->pages_eq.core,
+ &table->pages_eq.irq_nb);
+ if (err) {
+ mlx5_core_warn(dev, "failed to enable pages EQ %d\n", err);
+ goto err5;
}
return err;
+err5:
+ destroy_async_eq(dev, &table->pages_eq.core);
+err4:
+ mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
+err3:
+ destroy_async_eq(dev, &table->async_eq.core);
err2:
- destroy_async_eq(dev, &table->async_eq);
-
-err1:
mlx5_cmd_use_polling(dev);
- destroy_async_eq(dev, &table->cmd_eq);
+ mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
+err1:
+ destroy_async_eq(dev, &table->cmd_eq.core);
err0:
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
return err;
@@ -607,19 +659,22 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = dev->priv.eq_table;
int err;
- err = destroy_async_eq(dev, &table->pages_eq);
+ mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb);
+ err = destroy_async_eq(dev, &table->pages_eq.core);
if (err)
mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
err);
- err = destroy_async_eq(dev, &table->async_eq);
+ mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
+ err = destroy_async_eq(dev, &table->async_eq.core);
if (err)
mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
err);
mlx5_cmd_use_polling(dev);
- err = destroy_async_eq(dev, &table->cmd_eq);
+ mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
+ err = destroy_async_eq(dev, &table->cmd_eq.core);
if (err)
mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
err);
@@ -629,24 +684,24 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
{
- return &dev->priv.eq_table->async_eq;
+ return &dev->priv.eq_table->async_eq.core;
}
void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
{
- synchronize_irq(dev->priv.eq_table->async_eq.irqn);
+ synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
}
void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
{
- synchronize_irq(dev->priv.eq_table->cmd_eq.irqn);
+ synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
}
/* Generic EQ API for mlx5_core consumers
* Needed For RDMA ODP EQ for now
*/
struct mlx5_eq *
-mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
+mlx5_eq_create_generic(struct mlx5_core_dev *dev,
struct mlx5_eq_param *param)
{
struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
@@ -655,7 +710,7 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
if (!eq)
return ERR_PTR(-ENOMEM);
- err = create_async_eq(dev, name, eq, param);
+ err = create_async_eq(dev, eq, param);
if (err) {
kvfree(eq);
eq = ERR_PTR(err);
@@ -713,84 +768,14 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
-/* Completion EQs */
-
-static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
-{
- struct mlx5_priv *priv = &mdev->priv;
- int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
- int irq = pci_irq_vector(mdev->pdev, vecidx);
- struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
-
- if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) {
- mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
- return -ENOMEM;
- }
-
- cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
- irq_info->mask);
-
- if (IS_ENABLED(CONFIG_SMP) &&
- irq_set_affinity_hint(irq, irq_info->mask))
- mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
-
- return 0;
-}
-
-static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
-{
- int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
- struct mlx5_priv *priv = &mdev->priv;
- int irq = pci_irq_vector(mdev->pdev, vecidx);
- struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
-
- irq_set_affinity_hint(irq, NULL);
- free_cpumask_var(irq_info->mask);
-}
-
-static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
-{
- int err;
- int i;
-
- for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) {
- err = set_comp_irq_affinity_hint(mdev, i);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- for (i--; i >= 0; i--)
- clear_comp_irq_affinity_hint(mdev, i);
-
- return err;
-}
-
-static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
-{
- int i;
-
- for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++)
- clear_comp_irq_affinity_hint(mdev, i);
-}
-
static void destroy_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq, *n;
- clear_comp_irqs_affinity_hints(dev);
-
-#ifdef CONFIG_RFS_ACCEL
- if (table->rmap) {
- free_irq_cpu_rmap(table->rmap);
- table->rmap = NULL;
- }
-#endif
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
list_del(&eq->list);
+ mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
if (destroy_unmap_eq(dev, &eq->core))
mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
eq->core.eqn);
@@ -802,23 +787,17 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
static int create_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- char name[MLX5_MAX_IRQ_NAME];
struct mlx5_eq_comp *eq;
- int ncomp_vec;
+ int ncomp_eqs;
int nent;
int err;
int i;
INIT_LIST_HEAD(&table->comp_eqs_list);
- ncomp_vec = table->num_comp_vectors;
+ ncomp_eqs = table->num_comp_eqs;
nent = MLX5_COMP_EQ_SIZE;
-#ifdef CONFIG_RFS_ACCEL
- table->rmap = alloc_irq_cpu_rmap(ncomp_vec);
- if (!table->rmap)
- return -ENOMEM;
-#endif
- for (i = 0; i < ncomp_vec; i++) {
- int vecidx = i + MLX5_EQ_VEC_COMP_BASE;
+ for (i = 0; i < ncomp_eqs; i++) {
+ int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
struct mlx5_eq_param param = {};
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
@@ -833,33 +812,28 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
(unsigned long)&eq->tasklet_ctx);
-#ifdef CONFIG_RFS_ACCEL
- irq_cpu_rmap_add(table->rmap, pci_irq_vector(dev->pdev, vecidx));
-#endif
- snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
+ eq->irq_nb.notifier_call = mlx5_eq_comp_int;
param = (struct mlx5_eq_param) {
- .index = vecidx,
- .mask = 0,
+ .irq_index = vecidx,
.nent = nent,
- .context = &eq->core,
- .handler = mlx5_eq_comp_int
};
- err = create_map_eq(dev, &eq->core, name, &param);
+ err = create_map_eq(dev, &eq->core, &param);
+ if (err) {
+ kfree(eq);
+ goto clean;
+ }
+ err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
if (err) {
+ destroy_unmap_eq(dev, &eq->core);
kfree(eq);
goto clean;
}
+
mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
/* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
list_add_tail(&eq->list, &table->comp_eqs_list);
}
- err = set_comp_irq_affinity_hints(dev);
- if (err) {
- mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
- goto clean;
- }
-
return 0;
clean:
@@ -890,22 +864,24 @@ EXPORT_SYMBOL(mlx5_vector2eqn);
unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
{
- return dev->priv.eq_table->num_comp_vectors;
+ return dev->priv.eq_table->num_comp_eqs;
}
EXPORT_SYMBOL(mlx5_comp_vectors_count);
struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{
- /* TODO: consider irq_get_affinity_mask(irq) */
- return dev->priv.eq_table->irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
+ int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE;
+
+ return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table,
+ vecidx);
}
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
{
- return dev->priv.eq_table->rmap;
+ return mlx5_irq_get_rmap(dev->priv.eq_table->irq_table);
}
#endif
@@ -926,82 +902,19 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int i, max_eqs;
-
- clear_comp_irqs_affinity_hints(dev);
-
-#ifdef CONFIG_RFS_ACCEL
- if (table->rmap) {
- free_irq_cpu_rmap(table->rmap);
- table->rmap = NULL;
- }
-#endif
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
- max_eqs = table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
- for (i = max_eqs - 1; i >= 0; i--) {
- if (!table->irq_info[i].context)
- continue;
- free_irq(pci_irq_vector(dev->pdev, i), table->irq_info[i].context);
- table->irq_info[i].context = NULL;
- }
+ mlx5_irq_table_destroy(dev);
mutex_unlock(&table->lock);
- pci_free_irq_vectors(dev->pdev);
-}
-
-static int alloc_irq_vectors(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_eq_table *table = priv->eq_table;
- int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
- MLX5_CAP_GEN(dev, max_num_eqs) :
- 1 << MLX5_CAP_GEN(dev, log_max_eq);
- int nvec;
- int err;
-
- nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
- MLX5_EQ_VEC_COMP_BASE;
- nvec = min_t(int, nvec, num_eqs);
- if (nvec <= MLX5_EQ_VEC_COMP_BASE)
- return -ENOMEM;
-
- table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL);
- if (!table->irq_info)
- return -ENOMEM;
-
- nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1,
- nvec, PCI_IRQ_MSIX);
- if (nvec < 0) {
- err = nvec;
- goto err_free_irq_info;
- }
-
- table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
-
- return 0;
-
-err_free_irq_info:
- kfree(table->irq_info);
- return err;
-}
-
-static void free_irq_vectors(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
-
- pci_free_irq_vectors(dev->pdev);
- kfree(priv->eq_table->irq_info);
}
int mlx5_eq_table_create(struct mlx5_core_dev *dev)
{
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
- err = alloc_irq_vectors(dev);
- if (err) {
- mlx5_core_err(dev, "alloc irq vectors failed\n");
- return err;
- }
+ eq_table->num_comp_eqs =
+ mlx5_irq_get_num_comp(eq_table->irq_table);
err = create_async_eqs(dev);
if (err) {
@@ -1019,7 +932,6 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
err_comp_eqs:
destroy_async_eqs(dev);
err_async_eqs:
- free_irq_vectors(dev);
return err;
}
@@ -1027,7 +939,6 @@ void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
{
destroy_comp_eqs(dev);
destroy_async_eqs(dev);
- free_irq_vectors(dev);
}
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
@@ -1039,6 +950,7 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
}
+EXPORT_SYMBOL(mlx5_eq_notifier_register);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
@@ -1049,3 +961,4 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
}
+EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6a921e24cd5e..7281f8d6cba6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -134,6 +134,30 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
+int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
+ void *in, int inlen)
+{
+ return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen);
+}
+
+static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
+ void *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
+
+ MLX5_SET(query_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
+ MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
+ void *out, int outlen)
+{
+ return query_esw_vport_context_cmd(esw->dev, vport, out, outlen);
+}
+
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
u16 vlan, u8 qos, u8 set_flags)
{
@@ -473,7 +497,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
fdb_add:
/* SRIOV is enabled: Forward UC MAC to vport */
- if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
+ if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
@@ -873,7 +897,7 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
struct mlx5_eswitch *esw = dev->priv.eswitch;
u8 mac[ETH_ALEN];
- mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
+ mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac);
@@ -939,7 +963,7 @@ int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- vport->vport);
+ mlx5_eswitch_vport_num_to_index(esw, vport->vport));
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
@@ -1057,7 +1081,7 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- vport->vport);
+ mlx5_eswitch_vport_num_to_index(esw, vport->vport));
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
@@ -1168,6 +1192,8 @@ void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
vport->ingress.drop_rule = NULL;
vport->ingress.allow_rule = NULL;
+
+ esw_vport_del_ingress_acl_modify_metadata(esw, vport);
}
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
@@ -1527,6 +1553,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
u16 vport_num = vport->vport;
+ int flags;
if (esw->manager_vport == vport_num)
return;
@@ -1544,11 +1571,13 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
vport->info.node_guid);
}
+ flags = (vport->info.vlan || vport->info.qos) ?
+ SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
- (vport->info.vlan || vport->info.qos));
+ flags);
/* Only legacy mode needs ACLs */
- if (esw->mode == SRIOV_LEGACY) {
+ if (esw->mode == MLX5_ESWITCH_LEGACY) {
esw_vport_ingress_config(esw, vport);
esw_vport_egress_config(esw, vport);
}
@@ -1600,7 +1629,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
/* Create steering drop counters for ingress and egress ACLs */
- if (vport_num && esw->mode == SRIOV_LEGACY)
+ if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
esw_vport_create_drop_counters(vport);
/* Restore old vport configuration */
@@ -1654,7 +1683,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
vport->enabled_events = 0;
esw_vport_disable_qos(esw, vport);
if (esw->manager_vport != vport_num &&
- esw->mode == SRIOV_LEGACY) {
+ esw->mode == MLX5_ESWITCH_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
@@ -1686,54 +1715,91 @@ static int eswitch_vport_event(struct notifier_block *nb,
return NOTIFY_OK;
}
+/**
+ * mlx5_esw_query_functions - Returns raw output about functions state
+ * @dev: Pointer to device to query
+ *
+ * mlx5_esw_query_functions() allocates and returns functions changed
+ * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
+ * Caller must free the memory using kvfree() when valid pointer is returned.
+ */
+const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
+ u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
+ u32 *out;
+ int err;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(query_esw_functions_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ if (!err)
+ return out;
+
+ kvfree(out);
+ return ERR_PTR(err);
+}
+
+static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
+{
+ MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
+ mlx5_eq_notifier_register(esw->dev, &esw->nb);
+
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
+ MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
+ ESW_FUNCTIONS_CHANGED);
+ mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
+ }
+}
+
+static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
+{
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
+ mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
+
+ mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+
+ flush_workqueue(esw->work_queue);
+}
+
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{
- int vf_nvports = 0, total_nvports = 0;
struct mlx5_vport *vport;
int err;
int i, enabled_events;
if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
- esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
+ esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
return -EOPNOTSUPP;
}
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
- esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
+ esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
- esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
-
- esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
-
- if (mode == SRIOV_OFFLOADS) {
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- err = mlx5_query_host_params_num_vfs(esw->dev, &vf_nvports);
- if (err)
- return err;
- total_nvports = esw->total_vports;
- } else {
- vf_nvports = nvfs;
- total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
- }
- }
+ esw_warn(esw->dev, "engress ACL is not supported by FW\n");
esw->mode = mode;
mlx5_lag_update(esw->dev);
- if (mode == SRIOV_LEGACY) {
+ if (mode == MLX5_ESWITCH_LEGACY) {
err = esw_create_legacy_table(esw);
if (err)
goto abort;
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- err = esw_offloads_init(esw, vf_nvports, total_nvports);
+ err = esw_offloads_init(esw);
}
if (err)
@@ -1743,11 +1809,8 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
if (err)
esw_warn(esw->dev, "Failed to create eswitch TSAR");
- /* Don't enable vport events when in SRIOV_OFFLOADS mode, since:
- * 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode
- * 2. FDB/Eswitch is programmed by user space tools
- */
- enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0;
+ enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
+ UC_ADDR_CHANGE;
/* Enable PF vport */
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
@@ -1760,22 +1823,21 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
}
/* Enable VF vports */
- mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
esw_enable_vport(esw, vport, enabled_events);
- if (mode == SRIOV_LEGACY) {
- MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
- mlx5_eq_notifier_register(esw->dev, &esw->nb);
- }
+ mlx5_eswitch_event_handlers_register(esw);
+
+ esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
+ mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->esw_funcs.num_vfs, esw->enabled_vports);
- esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
- esw->enabled_vports);
return 0;
abort:
- esw->mode = SRIOV_NONE;
+ esw->mode = MLX5_ESWITCH_NONE;
- if (mode == SRIOV_OFFLOADS) {
+ if (mode == MLX5_ESWITCH_OFFLOADS) {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
@@ -1783,23 +1845,22 @@ abort:
return err;
}
-void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
struct mlx5_vport *vport;
int old_mode;
int i;
- if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
+ if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
return;
- esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
- esw->enabled_vports, esw->mode);
+ esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->esw_funcs.num_vfs, esw->enabled_vports);
mc_promisc = &esw->mc_promisc;
-
- if (esw->mode == SRIOV_LEGACY)
- mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+ mlx5_eswitch_event_handlers_unregister(esw);
mlx5_esw_for_all_vports(esw, i, vport)
esw_disable_vport(esw, vport);
@@ -1809,17 +1870,17 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_destroy_tsar(esw);
- if (esw->mode == SRIOV_LEGACY)
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
esw_destroy_legacy_table(esw);
- else if (esw->mode == SRIOV_OFFLOADS)
+ else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_cleanup(esw);
old_mode = esw->mode;
- esw->mode = SRIOV_NONE;
+ esw->mode = MLX5_ESWITCH_NONE;
mlx5_lag_update(esw->dev);
- if (old_mode == SRIOV_OFFLOADS) {
+ if (old_mode == MLX5_ESWITCH_OFFLOADS) {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
@@ -1827,14 +1888,16 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
- int total_vports = MLX5_TOTAL_VPORTS(dev);
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
+ int total_vports;
int err, i;
if (!MLX5_VPORT_MANAGER(dev))
return 0;
+ total_vports = mlx5_eswitch_get_total_vports(dev);
+
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
total_vports,
@@ -1847,6 +1910,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->dev = dev;
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
+ esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
if (!esw->work_queue) {
@@ -1880,7 +1944,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
}
esw->enabled_vports = 0;
- esw->mode = SRIOV_NONE;
+ esw->mode = MLX5_ESWITCH_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
@@ -1950,7 +2014,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
ether_addr_copy(evport->info.mac, mac);
evport->info.node_guid = node_guid;
- if (evport->enabled && esw->mode == SRIOV_LEGACY)
+ if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
err = esw_vport_ingress_config(esw, evport);
unlock:
@@ -2034,7 +2098,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
evport->info.vlan = vlan;
evport->info.qos = qos;
- if (evport->enabled && esw->mode == SRIOV_LEGACY) {
+ if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
goto unlock;
@@ -2076,7 +2140,7 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
mlx5_core_warn(esw->dev,
"Spoofchk in set while MAC is invalid, vport(%d)\n",
evport->vport);
- if (evport->enabled && esw->mode == SRIOV_LEGACY)
+ if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
err = esw_vport_ingress_config(esw, evport);
if (err)
evport->info.spoofchk = pschk;
@@ -2172,7 +2236,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
return -EPERM;
mutex_lock(&esw->state_lock);
- if (esw->mode != SRIOV_LEGACY) {
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
err = -EOPNOTSUPP;
goto out;
}
@@ -2195,7 +2259,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
return -EPERM;
mutex_lock(&esw->state_lock);
- if (esw->mode != SRIOV_LEGACY) {
+ if (esw->mode != MLX5_ESWITCH_LEGACY) {
err = -EOPNOTSUPP;
goto out;
}
@@ -2338,7 +2402,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
u64 bytes = 0;
int err = 0;
- if (!vport->enabled || esw->mode != SRIOV_LEGACY)
+ if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
return 0;
if (vport->egress.drop_counter)
@@ -2448,16 +2512,27 @@ free_out:
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
{
- return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
+ return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
+enum devlink_eswitch_encap_mode
+mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw;
+
+ esw = dev->priv.eswitch;
+ return ESW_ALLOWED(esw) ? esw->offloads.encap :
+ DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+}
+EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
+
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
{
- if ((dev0->priv.eswitch->mode == SRIOV_NONE &&
- dev1->priv.eswitch->mode == SRIOV_NONE) ||
- (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
- dev1->priv.eswitch->mode == SRIOV_OFFLOADS))
+ if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
+ dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
+ (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
+ dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
return true;
return false;
@@ -2466,6 +2541,26 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1)
{
- return (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
- dev1->priv.eswitch->mode == SRIOV_OFFLOADS);
+ return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
+ dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
+}
+
+void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
+{
+ const u32 *out;
+
+ WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ esw->esw_funcs.num_vfs = num_vfs;
+ return;
+ }
+
+ out = mlx5_esw_query_functions(esw->dev);
+ if (IS_ERR(out))
+ return;
+
+ esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
+ host_params_context.host_num_of_vfs);
+ kvfree(out);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index d043d6f9797d..a38e8a3c7c9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -68,6 +68,8 @@ struct vport_ingress {
struct mlx5_flow_group *allow_spoofchk_only_grp;
struct mlx5_flow_group *allow_untagged_only_grp;
struct mlx5_flow_group *drop_grp;
+ int modify_metadata_id;
+ struct mlx5_flow_handle *modify_metadata_rule;
struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_handle *drop_rule;
struct mlx5_fc *drop_counter;
@@ -173,9 +175,12 @@ struct mlx5_esw_offload {
struct mutex peer_mutex;
DECLARE_HASHTABLE(encap_tbl, 8);
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ DECLARE_HASHTABLE(termtbl_tbl, 8);
+ struct mutex termtbl_mutex; /* protects termtbl hash */
+ const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode;
u64 num_flows;
- u8 encap;
+ enum devlink_eswitch_encap_mode encap;
};
/* E-Switch MC FDB table hash node */
@@ -190,11 +195,15 @@ struct mlx5_host_work {
struct mlx5_eswitch *esw;
};
-struct mlx5_host_info {
+struct mlx5_esw_functions {
struct mlx5_nb nb;
u16 num_vfs;
};
+enum {
+ MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
+};
+
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -202,6 +211,7 @@ struct mlx5_eswitch {
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct workqueue_struct *work_queue;
struct mlx5_vport *vports;
+ u32 flags;
int total_vports;
int enabled_vports;
/* Synchronize between vport change events
@@ -219,12 +229,12 @@ struct mlx5_eswitch {
int mode;
int nvports;
u16 manager_vport;
- struct mlx5_host_info host_info;
+ u16 first_host_vport;
+ struct mlx5_esw_functions esw_funcs;
};
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
-int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
- int total_nvports);
+int esw_offloads_init(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -239,12 +249,14 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
+void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
-void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, u8 mac[ETH_ALEN]);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -266,8 +278,32 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats);
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
+int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
+ void *in, int inlen);
+int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
+ void *out, int outlen);
+
struct mlx5_flow_spec;
struct mlx5_esw_flow_attr;
+struct mlx5_termtbl_handle;
+
+bool
+mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_spec *spec);
+
+struct mlx5_flow_handle *
+mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int num_dest);
+
+void
+mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
+ struct mlx5_termtbl_handle *tt);
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
@@ -338,6 +374,7 @@ struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *rep;
struct mlx5_core_dev *mdev;
u32 encap_id;
+ struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id;
u8 match_level;
@@ -355,10 +392,12 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
-int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
+int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode);
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
+ enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack);
-int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
+int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
+ enum devlink_eswitch_encap_mode *encap);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -386,6 +425,8 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
+const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
+
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(__dev, format, ...) \
@@ -404,6 +445,24 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
}
+static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_ecpf_esw_manager(dev) ?
+ MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
+}
+
+static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev)
+{
+ /* Ideally device should have the functions changed supported
+ * capability regardless of it being ECPF or PF wherever such
+ * event should be processed such as on eswitch manager device.
+ * However, some ECPF based device might not have this capability
+ * set. Hence OR for ECPF check to cover such device.
+ */
+ return MLX5_CAP_ESW(dev, esw_functions_changed) ||
+ mlx5_core_is_ecpf_esw_manager(dev);
+}
+
static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
{
/* Uplink always locate at the last element of the array.*/
@@ -488,16 +547,47 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
+/* Includes host PF (vport 0) if it's not esw manager. */
+#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
+ for ((i) = (esw)->first_host_vport; \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) <= (nvfs); (i)++)
+
+#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
+ for ((i) = (nvfs); \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) >= (esw)->first_host_vport; (i)--)
+
+#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
+ for ((vport) = (esw)->first_host_vport; \
+ (vport) <= (nvfs); (vport)++)
+
+#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
+ for ((vport) = (nvfs); \
+ (vport) >= (esw)->first_host_vport; (vport)--)
+
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
+bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
+
+void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
+int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
-static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
-static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
+static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
+static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
+static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
+static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
#define FDB_MAX_CHAIN 1
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 47b446d30f71..8ed4497929b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -41,7 +41,6 @@
#include "en.h"
#include "fs_core.h"
#include "lib/devcom.h"
-#include "ecpf.h"
#include "lib/eq.h"
/* There are two match-all miss flows, one for unicast dst mac and
@@ -89,6 +88,53 @@ u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
return 1;
}
+static void
+mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr)
+{
+ void *misc2;
+ void *misc;
+
+ /* Use metadata matching because vport is not represented by single
+ * VHCA in dual-port RoCE mode, and matching on source vport may fail.
+ */
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
+ attr->in_rep->vport));
+
+ misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
+ MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+ } else {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
+
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ MLX5_SET(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(attr->in_mdev, vhca_id));
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+ }
+
+ if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
+ attr->in_rep->vport == MLX5_VPORT_UPLINK)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -100,9 +146,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int j, i = 0;
- void *misc;
- if (esw->mode != SRIOV_OFFLOADS)
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
flow_act.action = attr->action;
@@ -160,21 +205,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
i++;
}
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
-
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
- MLX5_SET(fte_match_set_misc, misc,
- source_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(attr->in_mdev, vhca_id));
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr);
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
- MLX5_SET_TO_ONES(fte_match_set_misc, misc,
- source_eswitch_owner_vhca_id);
-
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
if (attr->tunnel_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
@@ -193,7 +225,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get;
}
- rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
+ if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
+ rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
+ &flow_act, dest, i);
+ else
+ rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
if (IS_ERR(rule))
goto err_add_rule;
else
@@ -220,7 +256,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
- void *misc;
int i;
fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
@@ -252,25 +287,11 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
dest[i].ft = fwd_fdb,
i++;
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
-
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
- MLX5_SET(fte_match_set_misc, misc,
- source_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(attr->in_mdev, vhca_id));
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr);
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
- MLX5_SET_TO_ONES(fte_match_set_misc, misc,
- source_eswitch_owner_vhca_id);
-
- if (attr->match_level == MLX5_MATCH_NONE)
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
- else
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+ if (attr->match_level != MLX5_MATCH_NONE)
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
@@ -295,8 +316,16 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
bool fwd_rule)
{
bool split = (attr->split_count > 0);
+ int i;
mlx5_del_flow_rules(rule);
+
+ /* unref the term table */
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (attr->dests[i].termtbl)
+ mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
+ }
+
esw->offloads.num_flows--;
if (fwd_rule) {
@@ -328,12 +357,11 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
{
struct mlx5_eswitch_rep *rep;
- int vf_vport, err = 0;
+ int i, err = 0;
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
- for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
- rep = &esw->offloads.vport_reps[vf_vport];
- if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED)
+ mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
+ if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -559,23 +587,87 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
+static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
+{
+ u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
+ u8 fdb_to_vport_reg_c_id;
+ int err;
+
+ err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
+ out, sizeof(out));
+ if (err)
+ return err;
+
+ fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
+ esw_vport_context.fdb_to_vport_reg_c_id);
+
+ fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
+ MLX5_SET(modify_esw_vport_context_in, in,
+ esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
+
+ MLX5_SET(modify_esw_vport_context_in, in,
+ field_select.fdb_to_vport_reg_c_id, 1);
+
+ return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
+ in, sizeof(in));
+}
+
+static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
+{
+ u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
+ u8 fdb_to_vport_reg_c_id;
+ int err;
+
+ err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
+ out, sizeof(out));
+ if (err)
+ return err;
+
+ fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
+ esw_vport_context.fdb_to_vport_reg_c_id);
+
+ fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
+
+ MLX5_SET(modify_esw_vport_context_in, in,
+ esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
+
+ MLX5_SET(modify_esw_vport_context_in, in,
+ field_select.fdb_to_vport_reg_c_id, 1);
+
+ return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
+ in, sizeof(in));
+}
+
+static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
+ struct mlx5_core_dev *peer_dev,
struct mlx5_flow_spec *spec,
struct mlx5_flow_destination *dest)
{
- void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters);
+ void *misc;
- MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(peer_dev, vhca_id));
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+ MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ } else {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc,
- source_eswitch_owner_vhca_id);
+ MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(peer_dev, vhca_id));
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+ }
dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest->vport.num = peer_dev->priv.eswitch->manager_vport;
@@ -583,6 +675,26 @@ static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
}
+static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch *peer_esw,
+ struct mlx5_flow_spec *spec,
+ u16 vport)
+{
+ void *misc;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
+ vport));
+ } else {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, vport);
+ }
+}
+
static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
@@ -600,7 +712,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
if (!spec)
return -ENOMEM;
- peer_miss_rules_setup(peer_dev, spec, &dest);
+ peer_miss_rules_setup(esw, peer_dev, spec, &dest);
flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
if (!flows) {
@@ -613,7 +725,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc_parameters);
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_PF);
+ esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
+ spec, MLX5_VPORT_PF);
+
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
@@ -635,7 +749,10 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
}
mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
- MLX5_SET(fte_match_set_misc, misc, source_port, i);
+ esw_set_peer_miss_rule_source_port(esw,
+ peer_dev->priv.eswitch,
+ spec, i);
+
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
@@ -919,6 +1036,30 @@ static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32
+static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
+ u32 *flow_group_in)
+{
+ void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ flow_group_in,
+ match_criteria);
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0);
+ } else {
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_port);
+ }
+}
+
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1016,19 +1157,21 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
/* create peer esw miss group */
memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS);
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
- match_criteria);
+ esw_set_flow_group_source_port(esw, flow_group_in);
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ flow_group_in,
+ match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters.source_port);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters.source_eswitch_owner_vhca_id);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ }
- MLX5_SET(create_flow_group_in, flow_group_in,
- source_eswitch_owner_vhca_id_valid, 1);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
ix + esw->total_vports - 1);
@@ -1142,7 +1285,6 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
u32 *flow_group_in;
- void *match_criteria, *misc;
int err = 0;
nvports = nvports + MLX5_ESW_MISS_FLOWS;
@@ -1152,12 +1294,8 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
/* create vport rx group */
memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS);
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
- misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ esw_set_flow_group_source_port(esw, flow_group_in);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
@@ -1196,13 +1334,24 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
goto out;
}
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, vport);
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
+ MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ } else {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, vport);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ }
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
@@ -1220,21 +1369,22 @@ out:
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
- int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+ int err, err1;
- if (esw->mode != SRIOV_LEGACY &&
+ if (esw->mode != MLX5_ESWITCH_LEGACY &&
!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set offloads mode, SRIOV legacy not enabled");
return -EINVAL;
}
- mlx5_eswitch_disable_sriov(esw);
- err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+ mlx5_eswitch_disable(esw);
+ mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
+ err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+ err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to legacy");
@@ -1242,7 +1392,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
- num_vfs,
&esw->offloads.inline_mode)) {
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
NL_SET_ERR_MSG_MOD(extack,
@@ -1259,11 +1408,11 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
- int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
+ int total_vports = esw->total_vports;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_eswitch_rep *rep;
u8 hw_id[ETH_ALEN], rep_type;
- int vport;
+ int vport_index;
esw->offloads.vport_reps = kcalloc(total_vports,
sizeof(struct mlx5_eswitch_rep),
@@ -1271,14 +1420,15 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
if (!esw->offloads.vport_reps)
return -ENOMEM;
- mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
+ mlx5_query_mac_address(dev, hw_id);
- mlx5_esw_for_all_reps(esw, vport, rep) {
- rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport);
+ mlx5_esw_for_all_reps(esw, vport_index, rep) {
+ rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
+ rep->vport_index = vport_index;
ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- atomic_set(&rep->rep_if[rep_type].state,
+ atomic_set(&rep->rep_data[rep_type].state,
REP_UNREGISTERED);
}
@@ -1288,9 +1438,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
- if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
+ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_LOADED, REP_REGISTERED) == REP_LOADED)
- rep->rep_if[rep_type].unload(rep);
+ esw->offloads.rep_ops[rep_type]->unload(rep);
}
static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
@@ -1329,21 +1479,20 @@ static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
__unload_reps_vf_vport(esw, nvports, rep_type);
}
-static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
- u8 rep_type)
+static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
- __unload_reps_vf_vport(esw, nvports, rep_type);
+ __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
/* Special vports must be the last to unload. */
__unload_reps_special_vport(esw, rep_type);
}
-static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
+static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
{
u8 rep_type = NUM_REP_TYPES;
while (rep_type-- > 0)
- __unload_reps_all_vport(esw, nvports, rep_type);
+ __unload_reps_all_vport(esw, rep_type);
}
static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
@@ -1351,11 +1500,11 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
{
int err = 0;
- if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
+ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
- err = rep->rep_if[rep_type].load(esw->dev, rep);
+ err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
if (err)
- atomic_set(&rep->rep_if[rep_type].state,
+ atomic_set(&rep->rep_data[rep_type].state,
REP_REGISTERED);
}
@@ -1419,6 +1568,26 @@ err_vf:
return err;
}
+static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
+{
+ int err;
+
+ /* Special vports must be loaded first, uplink rep creates mdev resource. */
+ err = __load_reps_special_vport(esw, rep_type);
+ if (err)
+ return err;
+
+ err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
+ if (err)
+ goto err_vfs;
+
+ return 0;
+
+err_vfs:
+ __unload_reps_special_vport(esw, rep_type);
+ return err;
+}
+
static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
{
u8 rep_type = 0;
@@ -1438,34 +1607,13 @@ err_reps:
return err;
}
-static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
- u8 rep_type)
-{
- int err;
-
- /* Special vports must be loaded first. */
- err = __load_reps_special_vport(esw, rep_type);
- if (err)
- return err;
-
- err = __load_reps_vf_vport(esw, nvports, rep_type);
- if (err)
- goto err_vfs;
-
- return 0;
-
-err_vfs:
- __unload_reps_special_vport(esw, rep_type);
- return err;
-}
-
-static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports)
+static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
{
u8 rep_type = 0;
int err;
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
- err = __load_reps_all_vport(esw, nvports, rep_type);
+ err = __load_reps_all_vport(esw, rep_type);
if (err)
goto err_reps;
}
@@ -1474,7 +1622,7 @@ static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports)
err_reps:
while (rep_type-- > 0)
- __unload_reps_all_vport(esw, nvports, rep_type);
+ __unload_reps_all_vport(esw, rep_type);
return err;
}
@@ -1510,6 +1658,10 @@ static int mlx5_esw_offloads_devcom_event(int event,
switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR:
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
+ mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
+ break;
+
err = mlx5_esw_offloads_pair(esw, peer_esw);
if (err)
goto err_out;
@@ -1578,32 +1730,16 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int err = 0;
/* For prio tag mode, there is only 1 FTEs:
- * 1) Untagged packets - push prio tag VLAN, allow
+ * 1) Untagged packets - push prio tag VLAN and modify metadata if
+ * required, allow
* Unmatched traffic is allowed by default
*/
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
- return -EOPNOTSUPP;
-
- esw_vport_cleanup_ingress_rules(esw, vport);
-
- err = esw_vport_enable_ingress_acl(esw, vport);
- if (err) {
- mlx5_core_warn(esw->dev,
- "failed to enable prio tag ingress acl (%d) on vport[%d]\n",
- err, vport->vport);
- return err;
- }
-
- esw_debug(esw->dev,
- "vport[%d] configure ingress rules\n", vport->vport);
-
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
@@ -1619,6 +1755,12 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
flow_act.vlan[0].ethtype = ETH_P_8021Q;
flow_act.vlan[0].vid = 0;
flow_act.vlan[0].prio = 0;
+
+ if (vport->ingress.modify_metadata_rule) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.modify_id = vport->ingress.modify_metadata_id;
+ }
+
vport->ingress.allow_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, NULL, 0);
@@ -1639,6 +1781,58 @@ out_no_mem:
return err;
}
+static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec spec = {};
+ int err = 0;
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
+ MLX5_SET(set_action_in, action, data,
+ mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
+
+ err = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ 1, action, &vport->ingress.modify_metadata_id);
+ if (err) {
+ esw_warn(esw->dev,
+ "failed to alloc modify header for vport %d ingress acl (%d)\n",
+ vport->vport, err);
+ return err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ flow_act.modify_id = vport->ingress.modify_metadata_id;
+ vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
+ &spec, &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.modify_metadata_rule)) {
+ err = PTR_ERR(vport->ingress.modify_metadata_rule);
+ esw_warn(esw->dev,
+ "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.modify_metadata_rule = NULL;
+ goto out;
+ }
+
+out:
+ if (err)
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
+ return err;
+}
+
+void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (vport->ingress.modify_metadata_rule) {
+ mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
+
+ vport->ingress.modify_metadata_rule = NULL;
+ }
+}
+
static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
@@ -1646,6 +1840,9 @@ static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec;
int err = 0;
+ if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
+ return 0;
+
/* For prio tag mode, there is only 1 FTEs:
* 1) prio tag packets - pop the prio tag VLAN, allow
* Unmatched traffic is allowed by default
@@ -1699,27 +1896,98 @@ out_no_mem:
return err;
}
-static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports)
+static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport = NULL;
- int i, j;
int err;
- mlx5_esw_for_each_vf_vport(esw, i, vport, nvports) {
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ !MLX5_CAP_GEN(esw->dev, prio_tag_required))
+ return 0;
+
+ esw_vport_cleanup_ingress_rules(esw, vport);
+
+ err = esw_vport_enable_ingress_acl(esw, vport);
+ if (err) {
+ esw_warn(esw->dev,
+ "failed to enable ingress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
+
+ esw_debug(esw->dev,
+ "vport[%d] configure ingress rules\n", vport->vport);
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
+ if (err)
+ goto out;
+ }
+
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
+ mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_vport_ingress_prio_tag_config(esw, vport);
if (err)
- goto err_ingress;
- err = esw_vport_egress_prio_tag_config(esw, vport);
+ goto out;
+ }
+
+out:
+ if (err)
+ esw_vport_disable_ingress_acl(esw, vport);
+ return err;
+}
+
+static bool
+esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
+{
+ if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
+ return false;
+
+ if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
+ MLX5_FDB_TO_VPORT_REG_C_0))
+ return false;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
+ return false;
+
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ mlx5_ecpf_vport_exists(esw->dev))
+ return false;
+
+ return true;
+}
+
+static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i, j;
+ int err;
+
+ if (esw_check_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ err = esw_vport_ingress_common_config(esw, vport);
if (err)
- goto err_egress;
+ goto err_ingress;
+
+ if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
+ err = esw_vport_egress_prio_tag_config(esw, vport);
+ if (err)
+ goto err_egress;
+ }
}
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw))
+ esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
+
return 0;
err_egress:
esw_vport_disable_ingress_acl(esw, vport);
err_ingress:
- mlx5_esw_for_each_vf_vport_reverse(esw, j, vport, i - 1) {
+ for (j = MLX5_VPORT_PF; j < i; j++) {
+ vport = &esw->vports[j];
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
}
@@ -1727,40 +1995,46 @@ err_ingress:
return err;
}
-static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw)
+static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
int i;
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->dev->priv.sriov.num_vfs) {
+ mlx5_esw_for_all_vports(esw, i, vport) {
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
}
+
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
}
-static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int vf_nvports,
- int nvports)
+static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
{
+ int num_vfs = esw->esw_funcs.num_vfs;
+ int total_vports;
int err;
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev))
+ total_vports = esw->total_vports;
+ else
+ total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
+
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
- if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
- err = esw_prio_tag_acls_config(esw, vf_nvports);
- if (err)
- return err;
- }
-
- err = esw_create_offloads_fdb_tables(esw, nvports);
+ err = esw_create_offloads_acl_tables(esw);
if (err)
return err;
- err = esw_create_offloads_table(esw, nvports);
+ err = esw_create_offloads_fdb_tables(esw, total_vports);
+ if (err)
+ goto create_fdb_err;
+
+ err = esw_create_offloads_table(esw, total_vports);
if (err)
goto create_ft_err;
- err = esw_create_vport_rx_group(esw, nvports);
+ err = esw_create_vport_rx_group(esw, total_vports);
if (err)
goto create_fg_err;
@@ -1772,6 +2046,9 @@ create_fg_err:
create_ft_err:
esw_destroy_offloads_fdb_tables(esw);
+create_fdb_err:
+ esw_destroy_offloads_acl_tables(esw);
+
return err;
}
@@ -1780,88 +2057,105 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
- if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
- esw_prio_tag_acls_cleanup(esw);
+ esw_destroy_offloads_acl_tables(esw);
}
-static void esw_host_params_event_handler(struct work_struct *work)
+static void
+esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
{
- struct mlx5_host_work *host_work;
- struct mlx5_eswitch *esw;
- int err, num_vf = 0;
+ bool host_pf_disabled;
+ u16 new_num_vfs;
- host_work = container_of(work, struct mlx5_host_work, work);
- esw = host_work->esw;
+ new_num_vfs = MLX5_GET(query_esw_functions_out, out,
+ host_params_context.host_num_of_vfs);
+ host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
+ host_params_context.host_pf_disabled);
- err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf);
- if (err || num_vf == esw->host_info.num_vfs)
- goto out;
+ if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
+ return;
/* Number of VFs can only change from "0 to x" or "x to 0". */
- if (esw->host_info.num_vfs > 0) {
- esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs);
+ if (esw->esw_funcs.num_vfs > 0) {
+ esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
} else {
- err = esw_offloads_load_vf_reps(esw, num_vf);
+ int err;
+ err = esw_offloads_load_vf_reps(esw, new_num_vfs);
if (err)
- goto out;
+ return;
}
+ esw->esw_funcs.num_vfs = new_num_vfs;
+}
+
+static void esw_functions_changed_event_handler(struct work_struct *work)
+{
+ struct mlx5_host_work *host_work;
+ struct mlx5_eswitch *esw;
+ const u32 *out;
- esw->host_info.num_vfs = num_vf;
+ host_work = container_of(work, struct mlx5_host_work, work);
+ esw = host_work->esw;
+ out = mlx5_esw_query_functions(esw->dev);
+ if (IS_ERR(out))
+ goto out;
+
+ esw_vfs_changed_event_handler(esw, out);
+ kvfree(out);
out:
kfree(host_work);
}
-static int esw_host_params_event(struct notifier_block *nb,
- unsigned long type, void *data)
+int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
{
+ struct mlx5_esw_functions *esw_funcs;
struct mlx5_host_work *host_work;
- struct mlx5_host_info *host_info;
struct mlx5_eswitch *esw;
host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
if (!host_work)
return NOTIFY_DONE;
- host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb);
- esw = container_of(host_info, struct mlx5_eswitch, host_info);
+ esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
+ esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
host_work->esw = esw;
- INIT_WORK(&host_work->work, esw_host_params_event_handler);
+ INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
queue_work(esw->work_queue, &host_work->work);
return NOTIFY_OK;
}
-int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
- int total_nvports)
+int esw_offloads_init(struct mlx5_eswitch *esw)
{
int err;
- err = esw_offloads_steering_init(esw, vf_nvports, total_nvports);
+ err = esw_offloads_steering_init(esw);
if (err)
return err;
- err = esw_offloads_load_all_reps(esw, vf_nvports);
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ err = mlx5_eswitch_enable_passing_vport_metadata(esw);
+ if (err)
+ goto err_vport_metadata;
+ }
+
+ err = esw_offloads_load_all_reps(esw);
if (err)
goto err_reps;
esw_offloads_devcom_init(esw);
-
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event,
- HOST_PARAMS_CHANGE);
- mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb);
- esw->host_info.num_vfs = vf_nvports;
- }
+ mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev);
return 0;
err_reps:
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw))
+ mlx5_eswitch_disable_passing_vport_metadata(esw);
+err_vport_metadata:
esw_offloads_steering_cleanup(esw);
return err;
}
@@ -1869,13 +2163,13 @@ err_reps:
static int esw_offloads_stop(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
- int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+ int err, err1;
- mlx5_eswitch_disable_sriov(esw);
- err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+ mlx5_eswitch_disable(esw);
+ err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
- err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+ err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to offloads");
@@ -1887,19 +2181,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_cleanup(struct mlx5_eswitch *esw)
{
- u16 num_vfs;
-
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb);
- flush_workqueue(esw->work_queue);
- num_vfs = esw->host_info.num_vfs;
- } else {
- num_vfs = esw->dev->priv.sriov.num_vfs;
- }
-
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
- esw_offloads_unload_all_reps(esw, num_vfs);
+ esw_offloads_unload_all_reps(esw);
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw))
+ mlx5_eswitch_disable_passing_vport_metadata(esw);
esw_offloads_steering_cleanup(esw);
}
@@ -1907,10 +2193,10 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
{
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
- *mlx5_mode = SRIOV_LEGACY;
+ *mlx5_mode = MLX5_ESWITCH_LEGACY;
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
- *mlx5_mode = SRIOV_OFFLOADS;
+ *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
break;
default:
return -EINVAL;
@@ -1922,10 +2208,10 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
{
switch (mlx5_mode) {
- case SRIOV_LEGACY:
+ case MLX5_ESWITCH_LEGACY:
*mode = DEVLINK_ESWITCH_MODE_LEGACY;
break;
- case SRIOV_OFFLOADS:
+ case MLX5_ESWITCH_OFFLOADS:
*mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
break;
default:
@@ -1989,7 +2275,7 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
if(!MLX5_ESWITCH_MANAGER(dev))
return -EPERM;
- if (dev->priv.eswitch->mode == SRIOV_NONE &&
+ if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
!mlx5_core_is_ecpf_esw_manager(dev))
return -EOPNOTSUPP;
@@ -2040,7 +2326,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
- int err, vport;
+ int err, vport, num_vport;
u8 mlx5_mode;
err = mlx5_devlink_eswitch_check(devlink);
@@ -2069,7 +2355,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (err)
goto out;
- for (vport = 1; vport < esw->enabled_vports; vport++) {
+ mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
@@ -2082,7 +2368,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
return 0;
revert_inline_mode:
- while (--vport > 0)
+ num_vport = --vport;
+ mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
mlx5_modify_nic_vport_min_inline(dev,
vport,
esw->offloads.inline_mode);
@@ -2103,7 +2390,7 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
+int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
struct mlx5_core_dev *dev = esw->dev;
@@ -2112,7 +2399,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
- if (esw->mode == SRIOV_NONE)
+ if (esw->mode == MLX5_ESWITCH_NONE)
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
@@ -2127,9 +2414,10 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
}
query_vports:
- for (vport = 1; vport <= nvfs; vport++) {
+ mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
+ mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
- if (vport > 1 && prev_mlx5_mode != mlx5_mode)
+ if (prev_mlx5_mode != mlx5_mode)
return -EINVAL;
prev_mlx5_mode = mlx5_mode;
}
@@ -2139,7 +2427,8 @@ out:
return 0;
}
-int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
+ enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -2158,7 +2447,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
return -EOPNOTSUPP;
- if (esw->mode == SRIOV_LEGACY) {
+ if (esw->mode == MLX5_ESWITCH_LEGACY) {
esw->offloads.encap = encap;
return 0;
}
@@ -2188,7 +2477,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
return err;
}
-int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
+int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
+ enum devlink_eswitch_encap_mode *encap)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -2203,36 +2493,31 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
}
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep_if *__rep_if,
+ const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type)
{
- struct mlx5_eswitch_rep_if *rep_if;
+ struct mlx5_eswitch_rep_data *rep_data;
struct mlx5_eswitch_rep *rep;
int i;
+ esw->offloads.rep_ops[rep_type] = ops;
mlx5_esw_for_all_reps(esw, i, rep) {
- rep_if = &rep->rep_if[rep_type];
- rep_if->load = __rep_if->load;
- rep_if->unload = __rep_if->unload;
- rep_if->get_proto_dev = __rep_if->get_proto_dev;
- rep_if->priv = __rep_if->priv;
-
- atomic_set(&rep_if->state, REP_REGISTERED);
+ rep_data = &rep->rep_data[rep_type];
+ atomic_set(&rep_data->state, REP_REGISTERED);
}
}
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
{
- u16 max_vf = mlx5_core_max_vfs(esw->dev);
struct mlx5_eswitch_rep *rep;
int i;
- if (esw->mode == SRIOV_OFFLOADS)
- __unload_reps_all_vport(esw, max_vf, rep_type);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ __unload_reps_all_vport(esw, rep_type);
mlx5_esw_for_all_reps(esw, i, rep)
- atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED);
+ atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
}
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
@@ -2241,7 +2526,7 @@ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
struct mlx5_eswitch_rep *rep;
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
- return rep->rep_if[rep_type].priv;
+ return rep->rep_data[rep_type].priv;
}
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
@@ -2252,9 +2537,9 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
rep = mlx5_eswitch_get_rep(esw, vport);
- if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED &&
- rep->rep_if[rep_type].get_proto_dev)
- return rep->rep_if[rep_type].get_proto_dev(rep);
+ if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
+ esw->offloads.rep_ops[rep_type]->get_proto_dev)
+ return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
return NULL;
}
EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
@@ -2271,3 +2556,22 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
return mlx5_eswitch_get_rep(esw, vport);
}
EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
+
+bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return vport_num >= MLX5_VPORT_FIRST_VF &&
+ vport_num <= esw->dev->priv.sriov.max_vfs;
+}
+
+bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
+{
+ return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
+}
+EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
+
+u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
+}
+EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
new file mode 100644
index 000000000000..1d55a324a17e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include <linux/mlx5/fs.h>
+#include "eswitch.h"
+
+struct mlx5_termtbl_handle {
+ struct hlist_node termtbl_hlist;
+
+ struct mlx5_flow_table *termtbl;
+ struct mlx5_flow_act flow_act;
+ struct mlx5_flow_destination dest;
+
+ struct mlx5_flow_handle *rule;
+ int ref_count;
+};
+
+static u32
+mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest)
+{
+ u32 hash;
+
+ hash = jhash_1word(flow_act->action, 0);
+ hash = jhash((const void *)&flow_act->vlan,
+ sizeof(flow_act->vlan), hash);
+ hash = jhash((const void *)&dest->vport.num,
+ sizeof(dest->vport.num), hash);
+ hash = jhash((const void *)&dest->vport.vhca_id,
+ sizeof(dest->vport.num), hash);
+ return hash;
+}
+
+static int
+mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,
+ struct mlx5_flow_destination *dest1,
+ struct mlx5_flow_act *flow_act2,
+ struct mlx5_flow_destination *dest2)
+{
+ return flow_act1->action != flow_act2->action ||
+ dest1->vport.num != dest2->vport.num ||
+ dest1->vport.vhca_id != dest2->vport.vhca_id ||
+ memcmp(&flow_act1->vlan, &flow_act2->vlan,
+ sizeof(flow_act1->vlan));
+}
+
+static int
+mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
+ struct mlx5_termtbl_handle *tt,
+ struct mlx5_flow_act *flow_act)
+{
+ static const struct mlx5_flow_spec spec = {};
+ struct mlx5_flow_namespace *root_ns;
+ int prio, flags;
+ int err;
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* As this is the terminating action then the termination table is the
+ * same prio as the slow path
+ */
+ prio = FDB_SLOW_PATH;
+ flags = MLX5_FLOW_TABLE_TERMINATION;
+ tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, prio, 1, 1,
+ 0, flags);
+ if (IS_ERR(tt->termtbl)) {
+ esw_warn(dev, "Failed to create termination table\n");
+ return -EOPNOTSUPP;
+ }
+
+ tt->rule = mlx5_add_flow_rules(tt->termtbl, &spec, flow_act,
+ &tt->dest, 1);
+
+ if (IS_ERR(tt->rule)) {
+ esw_warn(dev, "Failed to create termination table rule\n");
+ goto add_flow_err;
+ }
+ return 0;
+
+add_flow_err:
+ err = mlx5_destroy_flow_table(tt->termtbl);
+ if (err)
+ esw_warn(dev, "Failed to destroy termination table\n");
+
+ return -EOPNOTSUPP;
+}
+
+static struct mlx5_termtbl_handle *
+mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_termtbl_handle *tt;
+ bool found = false;
+ u32 hash_key;
+ int err;
+
+ mutex_lock(&esw->offloads.termtbl_mutex);
+
+ hash_key = mlx5_eswitch_termtbl_hash(flow_act, dest);
+ hash_for_each_possible(esw->offloads.termtbl_tbl, tt,
+ termtbl_hlist, hash_key) {
+ if (!mlx5_eswitch_termtbl_cmp(&tt->flow_act, &tt->dest,
+ flow_act, dest)) {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ goto tt_add_ref;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt) {
+ err = -ENOMEM;
+ goto tt_create_err;
+ }
+
+ tt->dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ tt->dest.vport.num = dest->vport.num;
+ tt->dest.vport.vhca_id = dest->vport.vhca_id;
+ memcpy(&tt->flow_act, flow_act, sizeof(*flow_act));
+
+ err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
+ if (err) {
+ esw_warn(esw->dev, "Failed to create termination table\n");
+ goto tt_create_err;
+ }
+ hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key);
+tt_add_ref:
+ tt->ref_count++;
+ mutex_unlock(&esw->offloads.termtbl_mutex);
+ return tt;
+tt_create_err:
+ kfree(tt);
+ mutex_unlock(&esw->offloads.termtbl_mutex);
+ return ERR_PTR(err);
+}
+
+void
+mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
+ struct mlx5_termtbl_handle *tt)
+{
+ mutex_lock(&esw->offloads.termtbl_mutex);
+ if (--tt->ref_count == 0)
+ hash_del(&tt->termtbl_hlist);
+ mutex_unlock(&esw->offloads.termtbl_mutex);
+
+ if (!tt->ref_count) {
+ mlx5_del_flow_rules(tt->rule);
+ mlx5_destroy_flow_table(tt->termtbl);
+ kfree(tt);
+ }
+}
+
+static void
+mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
+ struct mlx5_flow_act *dst)
+{
+ if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
+ return;
+
+ src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ memcpy(&dst->vlan[0], &src->vlan[0], sizeof(src->vlan[0]));
+ memset(&src->vlan[0], 0, sizeof(src->vlan[0]));
+
+ if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
+ return;
+
+ src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ memcpy(&dst->vlan[1], &src->vlan[1], sizeof(src->vlan[1]));
+ memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
+}
+
+bool
+mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_spec *spec)
+{
+ u32 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
+ misc_parameters.source_port);
+ u32 port_value = MLX5_GET(fte_match_param, spec->match_value,
+ misc_parameters.source_port);
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
+ return false;
+
+ /* push vlan on RX */
+ return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
+ ((port_mask & port_value) == MLX5_VPORT_UPLINK);
+}
+
+struct mlx5_flow_handle *
+mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int num_dest)
+{
+ struct mlx5_flow_act term_tbl_act = {};
+ struct mlx5_flow_handle *rule = NULL;
+ bool term_table_created = false;
+ int num_vport_dests = 0;
+ int i, curr_dest;
+
+ mlx5_eswitch_termtbl_actions_move(flow_act, &term_tbl_act);
+ term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ for (i = 0; i < num_dest; i++) {
+ struct mlx5_termtbl_handle *tt;
+
+ /* only vport destinations can be terminated */
+ if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ continue;
+
+ /* get the terminating table for the action list */
+ tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
+ &dest[i]);
+ if (IS_ERR(tt)) {
+ esw_warn(esw->dev, "Failed to create termination table\n");
+ goto revert_changes;
+ }
+ attr->dests[num_vport_dests].termtbl = tt;
+ num_vport_dests++;
+
+ /* link the destination with the termination table */
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = tt->termtbl;
+ term_table_created = true;
+ }
+
+ /* at least one destination should reference a termination table */
+ if (!term_table_created)
+ goto revert_changes;
+
+ /* create the FTE */
+ rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
+ if (IS_ERR(rule))
+ goto revert_changes;
+
+ goto out;
+
+revert_changes:
+ /* revert the changes that were made to the original flow_act
+ * and fall-back to the original rule actions
+ */
+ mlx5_eswitch_termtbl_actions_move(&term_tbl_act, flow_act);
+
+ for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
+ struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
+
+ /* search for the destination associated with the
+ * current term table
+ */
+ for (i = 0; i < num_dest; i++) {
+ if (dest[i].ft != tt->termtbl)
+ continue;
+
+ memset(&dest[i], 0, sizeof(dest[i]));
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest[i].vport.num = tt->dest.vport.num;
+ dest[i].vport.vhca_id = tt->dest.vport.vhca_id;
+ mlx5_eswitch_termtbl_put(esw, tt);
+ break;
+ }
+ }
+ rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
+out:
+ return rule;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index a81e8d2168d8..8bcf3426b9c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -108,8 +108,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_STALL_EVENT";
case MLX5_EVENT_TYPE_CMD:
return "MLX5_EVENT_TYPE_CMD";
- case MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE:
- return "MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE";
+ case MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED:
+ return "MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED";
case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index ca2296a2f9ee..4c50efe4e7f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -414,7 +414,8 @@ static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
}
-static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq)
+static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq,
+ struct mlx5_eqe *eqe)
{
struct mlx5_fpga_conn *conn;
@@ -429,6 +430,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
struct mlx5_fpga_device *fdev = conn->fdev;
struct mlx5_core_dev *mdev = fdev->mdev;
u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_wq_param wqp;
struct mlx5_cqe64 *cqe;
int inlen, err, eqn;
@@ -476,7 +478,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
- err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
+ err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out));
kvfree(in);
if (err)
@@ -867,7 +869,7 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
conn->cb_arg = attr->cb_arg;
remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
- err = mlx5_query_nic_vport_mac_address(fdev->mdev, 0, remote_mac);
+ err = mlx5_query_mac_address(fdev->mdev, remote_mac);
if (err) {
mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
ret = ERR_PTR(err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 52c47d3dd5a5..c76da309506b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -636,7 +636,8 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
u8 match_criteria_enable,
const u32 *match_c,
const u32 *match_v,
- struct mlx5_flow_act *flow_act)
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_context *flow_context)
{
const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers);
@@ -655,7 +656,7 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
(match_criteria_enable &
~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
(flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
- (flow_act->flags & FLOW_ACT_HAS_TAG))
+ (flow_context->flags & FLOW_CONTEXT_HAS_TAG))
return false;
return true;
@@ -767,7 +768,8 @@ mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
fg->mask.match_criteria_enable,
fg->mask.match_criteria,
fte->val,
- &fte->action))
+ &fte->action,
+ &fte->flow_context))
return ERR_PTR(-EINVAL);
else if (!mlx5_is_fpga_ipsec_rule(mdev,
fg->mask.match_criteria_enable,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
index 2b5e63b0d4d6..382985e65b48 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
@@ -37,8 +37,6 @@
#include "accel/ipsec.h"
#include "fs_cmd.h"
-#ifdef CONFIG_MLX5_FPGA
-
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
@@ -66,77 +64,4 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
-#else
-
-static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
-{
- return 0;
-}
-
-static inline unsigned int
-mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
-{
- return 0;
-}
-
-static inline int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev,
- u64 *counters)
-{
- return 0;
-}
-
-static inline void *
-mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *accel_xfrm,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6)
-{
- return NULL;
-}
-
-static inline void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
-{
-}
-
-static inline int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
-{
- return 0;
-}
-
-static inline void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
-}
-
-static inline void mlx5_fpga_ipsec_build_fs_cmds(void)
-{
-}
-
-static inline struct mlx5_accel_esp_xfrm *
-mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
-}
-
-static inline int
-mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- return -EOPNOTSUPP;
-}
-
-static inline const struct mlx5_flow_cmds *
-mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
-{
- return mlx5_fs_cmd_get_default(type);
-}
-
-#endif /* CONFIG_MLX5_FPGA */
-
#endif /* __MLX5_FPGA_SADB_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 013b1ca4a791..7ac1249eadc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -147,6 +147,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
{
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+ int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
struct mlx5_core_dev *dev = ns->dev;
@@ -167,6 +168,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
en_decap);
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
en_encap);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
+ term);
switch (ft->op_mod) {
case FS_FT_OP_MOD_NORMAL:
@@ -393,7 +396,11 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
- MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
+ MLX5_SET(flow_context, in_flow_context, flow_tag,
+ fte->flow_context.flow_tag);
+ MLX5_SET(flow_context, in_flow_context, flow_source,
+ fte->flow_context.flow_source);
+
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
if (extended_dest) {
@@ -768,6 +775,10 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
+ case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
+ table_type = FS_FT_ESW_INGRESS_ACL;
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index fe76c6fd6d80..3e99799bdb40 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -584,7 +584,7 @@ err_ida_remove:
}
static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
- u32 *match_value,
+ const struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act)
{
struct mlx5_flow_steering *steering = get_steering(&ft->node);
@@ -594,9 +594,10 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
if (!fte)
return ERR_PTR(-ENOMEM);
- memcpy(fte->val, match_value, sizeof(fte->val));
+ memcpy(fte->val, &spec->match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
fte->action = *flow_act;
+ fte->flow_context = spec->flow_context;
tree_init_node(&fte->node, NULL, del_sw_fte);
@@ -612,7 +613,7 @@ static void dealloc_flow_group(struct mlx5_flow_steering *steering,
static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
u8 match_criteria_enable,
- void *match_criteria,
+ const void *match_criteria,
int start_index,
int end_index)
{
@@ -642,7 +643,7 @@ static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steer
static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
- void *match_criteria,
+ const void *match_criteria,
int start_index,
int end_index,
struct list_head *prev)
@@ -1285,7 +1286,7 @@ free_handle:
}
static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec)
+ const struct mlx5_flow_spec *spec)
{
struct list_head *prev = &ft->node.children;
struct mlx5_flow_group *fg;
@@ -1430,7 +1431,9 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
return false;
}
-static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act)
+static int check_conflicting_ftes(struct fs_fte *fte,
+ const struct mlx5_flow_context *flow_context,
+ const struct mlx5_flow_act *flow_act)
{
if (check_conflicting_actions(flow_act->action, fte->action.action)) {
mlx5_core_warn(get_dev(&fte->node),
@@ -1438,12 +1441,12 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act
return -EEXIST;
}
- if ((flow_act->flags & FLOW_ACT_HAS_TAG) &&
- fte->action.flow_tag != flow_act->flow_tag) {
+ if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
+ fte->flow_context.flow_tag != flow_context->flow_tag) {
mlx5_core_warn(get_dev(&fte->node),
"FTE flow tag %u already exists with different flow tag %u\n",
- fte->action.flow_tag,
- flow_act->flow_tag);
+ fte->flow_context.flow_tag,
+ flow_context->flow_tag);
return -EEXIST;
}
@@ -1451,7 +1454,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act
}
static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
- u32 *match_value,
+ const struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num,
@@ -1462,7 +1465,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
int i;
int ret;
- ret = check_conflicting_ftes(fte, flow_act);
+ ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
if (ret)
return ERR_PTR(ret);
@@ -1536,7 +1539,7 @@ static void free_match_list(struct match_list_head *head)
static int build_match_list(struct match_list_head *match_head,
struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec)
+ const struct mlx5_flow_spec *spec)
{
struct rhlist_head *tmp, *list;
struct mlx5_flow_group *g;
@@ -1589,7 +1592,7 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group *g,
- u32 *match_value,
+ const u32 *match_value,
bool take_write)
{
struct fs_fte *fte_tmp;
@@ -1622,7 +1625,7 @@ out:
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
- struct mlx5_flow_spec *spec,
+ const struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num,
@@ -1637,7 +1640,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
u64 version;
int err;
- fte = alloc_fte(ft, spec->match_value, flow_act);
+ fte = alloc_fte(ft, spec, flow_act);
if (IS_ERR(fte))
return ERR_PTR(-ENOMEM);
@@ -1653,8 +1656,7 @@ search_again_locked:
fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
if (!fte_tmp)
continue;
- rule = add_rule_fg(g, spec->match_value,
- flow_act, dest, dest_num, fte_tmp);
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false);
kmem_cache_free(steering->ftes_cache, fte);
@@ -1701,8 +1703,7 @@ skip_search:
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
up_write_ref_node(&g->node, false);
- rule = add_rule_fg(g, spec->match_value,
- flow_act, dest, dest_num, fte);
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
return rule;
@@ -1715,7 +1716,7 @@ out:
static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
+ const struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num)
@@ -1788,7 +1789,7 @@ search_again_locked:
if (err)
goto err_release_fg;
- fte = alloc_fte(ft, spec->match_value, flow_act);
+ fte = alloc_fte(ft, spec, flow_act);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
goto err_release_fg;
@@ -1802,8 +1803,7 @@ search_again_locked:
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
up_write_ref_node(&g->node, false);
- rule = add_rule_fg(g, spec->match_value, flow_act, dest,
- dest_num, fte);
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
tree_put_node(&g->node, false);
@@ -1823,7 +1823,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
+ const struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int num_dest)
@@ -2092,7 +2092,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
{
struct mlx5_flow_steering *steering = dev->priv.steering;
- if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
+ if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
return NULL;
switch (type) {
@@ -2423,7 +2423,7 @@ static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
if (!steering->esw_egress_root_ns)
return;
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+ for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
cleanup_root_ns(steering->esw_egress_root_ns[i]);
kfree(steering->esw_egress_root_ns);
@@ -2438,7 +2438,7 @@ static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
if (!steering->esw_ingress_root_ns)
return;
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+ for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
kfree(steering->esw_ingress_root_ns);
@@ -2606,16 +2606,18 @@ static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vpo
static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
int err;
int i;
- steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
- sizeof(*steering->esw_egress_root_ns),
- GFP_KERNEL);
+ steering->esw_egress_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->esw_egress_root_ns),
+ GFP_KERNEL);
if (!steering->esw_egress_root_ns)
return -ENOMEM;
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+ for (i = 0; i < total_vports; i++) {
err = init_egress_acl_root_ns(steering, i);
if (err)
goto cleanup_root_ns;
@@ -2634,16 +2636,18 @@ cleanup_root_ns:
static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
int err;
int i;
- steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
- sizeof(*steering->esw_ingress_root_ns),
- GFP_KERNEL);
+ steering->esw_ingress_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->esw_ingress_root_ns),
+ GFP_KERNEL);
if (!steering->esw_ingress_root_ns)
return -ENOMEM;
- for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+ for (i = 0; i < total_vports; i++) {
err = init_ingress_acl_root_ns(steering, i);
if (err)
goto cleanup_root_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index a08c3d09a50f..c48c382f926f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -170,6 +170,7 @@ struct fs_fte {
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
u32 index;
+ struct mlx5_flow_context flow_context;
struct mlx5_flow_act action;
enum fs_fte_status status;
struct mlx5_fc *counter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index c6c28f56aa29..b3762123a69c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -102,13 +102,15 @@ static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
unsigned long next_id = (unsigned long)id + 1;
struct mlx5_fc *counter;
+ unsigned long tmp;
rcu_read_lock();
/* skip counters that are in idr, but not yet in counters list */
- while ((counter = idr_get_next_ul(&fc_stats->counters_idr,
- &next_id)) != NULL &&
- list_empty(&counter->list))
- next_id++;
+ idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
+ counter, tmp, next_id) {
+ if (!list_empty(&counter->list))
+ break;
+ }
rcu_read_unlock();
return counter ? &counter->list : &fc_stats->counters;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 1ab6f7e3bec6..a19790dee7b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -37,6 +37,37 @@
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
+enum {
+ MCQS_IDENTIFIER_BOOT_IMG = 0x1,
+ MCQS_IDENTIFIER_OEM_NVCONFIG = 0x4,
+ MCQS_IDENTIFIER_MLNX_NVCONFIG = 0x5,
+ MCQS_IDENTIFIER_CS_TOKEN = 0x6,
+ MCQS_IDENTIFIER_DBG_TOKEN = 0x7,
+ MCQS_IDENTIFIER_GEARBOX = 0xA,
+};
+
+enum {
+ MCQS_UPDATE_STATE_IDLE,
+ MCQS_UPDATE_STATE_IN_PROGRESS,
+ MCQS_UPDATE_STATE_APPLIED,
+ MCQS_UPDATE_STATE_ACTIVE,
+ MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET,
+ MCQS_UPDATE_STATE_FAILED,
+ MCQS_UPDATE_STATE_CANCELED,
+ MCQS_UPDATE_STATE_BUSY,
+};
+
+enum {
+ MCQI_INFO_TYPE_CAPABILITIES = 0x0,
+ MCQI_INFO_TYPE_VERSION = 0x1,
+ MCQI_INFO_TYPE_ACTIVATION_METHOD = 0x5,
+};
+
+enum {
+ MCQI_FW_RUNNING_VERSION = 0,
+ MCQI_FW_STORED_VERSION = 1,
+};
+
static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
int outlen)
{
@@ -202,6 +233,18 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, event_cap)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, tls)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -392,33 +435,49 @@ static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev,
}
static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev,
- u16 component_index,
- u32 *max_component_size,
- u8 *log_mcda_word_size,
- u16 *mcda_max_write_size)
+ u16 component_index, bool read_pending,
+ u8 info_type, u16 data_size, void *mcqi_data)
{
- u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_ST_SZ_DW(mcqi_cap)];
- int offset = MLX5_ST_SZ_DW(mcqi_reg);
- u32 in[MLX5_ST_SZ_DW(mcqi_reg)];
+ u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_UN_SZ_DW(mcqi_reg_data)] = {};
+ u32 in[MLX5_ST_SZ_DW(mcqi_reg)] = {};
+ void *data;
int err;
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
MLX5_SET(mcqi_reg, in, component_index, component_index);
- MLX5_SET(mcqi_reg, in, data_size, MLX5_ST_SZ_BYTES(mcqi_cap));
+ MLX5_SET(mcqi_reg, in, read_pending_component, read_pending);
+ MLX5_SET(mcqi_reg, in, info_type, info_type);
+ MLX5_SET(mcqi_reg, in, data_size, data_size);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_MCQI, 0, 0);
+ MLX5_ST_SZ_BYTES(mcqi_reg) + data_size,
+ MLX5_REG_MCQI, 0, 0);
if (err)
- goto out;
+ return err;
- *max_component_size = MLX5_GET(mcqi_cap, out + offset, max_component_size);
- *log_mcda_word_size = MLX5_GET(mcqi_cap, out + offset, log_mcda_word_size);
- *mcda_max_write_size = MLX5_GET(mcqi_cap, out + offset, mcda_max_write_size);
+ data = MLX5_ADDR_OF(mcqi_reg, out, data);
+ memcpy(mcqi_data, data, data_size);
-out:
- return err;
+ return 0;
+}
+
+static int mlx5_reg_mcqi_caps_query(struct mlx5_core_dev *dev, u16 component_index,
+ u32 *max_component_size, u8 *log_mcda_word_size,
+ u16 *mcda_max_write_size)
+{
+ u32 mcqi_reg[MLX5_ST_SZ_DW(mcqi_cap)] = {};
+ int err;
+
+ err = mlx5_reg_mcqi_query(dev, component_index, 0,
+ MCQI_INFO_TYPE_CAPABILITIES,
+ MLX5_ST_SZ_BYTES(mcqi_cap), mcqi_reg);
+ if (err)
+ return err;
+
+ *max_component_size = MLX5_GET(mcqi_cap, mcqi_reg, max_component_size);
+ *log_mcda_word_size = MLX5_GET(mcqi_cap, mcqi_reg, log_mcda_word_size);
+ *mcda_max_write_size = MLX5_GET(mcqi_cap, mcqi_reg, mcda_max_write_size);
+
+ return 0;
}
struct mlx5_mlxfw_dev {
@@ -434,8 +493,13 @@ static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev,
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
- return mlx5_reg_mcqi_query(dev, component_index, p_max_size,
- p_align_bits, p_max_write_size);
+ if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi)) {
+ mlx5_core_warn(dev, "caps query isn't supported by running FW\n");
+ return -EOPNOTSUPP;
+ }
+
+ return mlx5_reg_mcqi_caps_query(dev, component_index, p_max_size,
+ p_align_bits, p_max_write_size);
}
static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
@@ -552,7 +616,8 @@ static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
};
int mlx5_firmware_flash(struct mlx5_core_dev *dev,
- const struct firmware *firmware)
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
struct mlx5_mlxfw_dev mlx5_mlxfw_dev = {
.mlxfw_dev = {
@@ -571,5 +636,133 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev,
return -EOPNOTSUPP;
}
- return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, firmware);
+ return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev,
+ firmware, extack);
+}
+
+static int mlx5_reg_mcqi_version_query(struct mlx5_core_dev *dev,
+ u16 component_index, bool read_pending,
+ u32 *mcqi_version_out)
+{
+ return mlx5_reg_mcqi_query(dev, component_index, read_pending,
+ MCQI_INFO_TYPE_VERSION,
+ MLX5_ST_SZ_BYTES(mcqi_version),
+ mcqi_version_out);
+}
+
+static int mlx5_reg_mcqs_query(struct mlx5_core_dev *dev, u32 *out,
+ u16 component_index)
+{
+ u8 out_sz = MLX5_ST_SZ_BYTES(mcqs_reg);
+ u32 in[MLX5_ST_SZ_DW(mcqs_reg)] = {};
+ int err;
+
+ memset(out, 0, out_sz);
+
+ MLX5_SET(mcqs_reg, in, component_index, component_index);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ out_sz, MLX5_REG_MCQS, 0, 0);
+ return err;
+}
+
+/* scans component index sequentially, to find the boot img index */
+static int mlx5_get_boot_img_component_index(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(mcqs_reg)] = {};
+ u16 identifier, component_idx = 0;
+ bool quit;
+ int err;
+
+ do {
+ err = mlx5_reg_mcqs_query(dev, out, component_idx);
+ if (err)
+ return err;
+
+ identifier = MLX5_GET(mcqs_reg, out, identifier);
+ quit = !!MLX5_GET(mcqs_reg, out, last_index_flag);
+ quit |= identifier == MCQS_IDENTIFIER_BOOT_IMG;
+ } while (!quit && ++component_idx);
+
+ if (identifier != MCQS_IDENTIFIER_BOOT_IMG) {
+ mlx5_core_warn(dev, "mcqs: can't find boot_img component ix, last scanned idx %d\n",
+ component_idx);
+ return -EOPNOTSUPP;
+ }
+
+ return component_idx;
+}
+
+static int
+mlx5_fw_image_pending(struct mlx5_core_dev *dev,
+ int component_index,
+ bool *pending_version_exists)
+{
+ u32 out[MLX5_ST_SZ_DW(mcqs_reg)];
+ u8 component_update_state;
+ int err;
+
+ err = mlx5_reg_mcqs_query(dev, out, component_index);
+ if (err)
+ return err;
+
+ component_update_state = MLX5_GET(mcqs_reg, out, component_update_state);
+
+ if (component_update_state == MCQS_UPDATE_STATE_IDLE) {
+ *pending_version_exists = false;
+ } else if (component_update_state == MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET) {
+ *pending_version_exists = true;
+ } else {
+ mlx5_core_warn(dev,
+ "mcqs: can't read pending fw version while fw state is %d\n",
+ component_update_state);
+ return -ENODATA;
+ }
+ return 0;
+}
+
+int mlx5_fw_version_query(struct mlx5_core_dev *dev,
+ u32 *running_ver, u32 *pending_ver)
+{
+ u32 reg_mcqi_version[MLX5_ST_SZ_DW(mcqi_version)] = {};
+ bool pending_version_exists;
+ int component_index;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi) ||
+ !MLX5_CAP_MCAM_REG(dev, mcqs)) {
+ mlx5_core_warn(dev, "fw query isn't supported by the FW\n");
+ return -EOPNOTSUPP;
+ }
+
+ component_index = mlx5_get_boot_img_component_index(dev);
+ if (component_index < 0)
+ return component_index;
+
+ err = mlx5_reg_mcqi_version_query(dev, component_index,
+ MCQI_FW_RUNNING_VERSION,
+ reg_mcqi_version);
+ if (err)
+ return err;
+
+ *running_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version);
+
+ err = mlx5_fw_image_pending(dev, component_index, &pending_version_exists);
+ if (err)
+ return err;
+
+ if (!pending_version_exists) {
+ *pending_ver = 0;
+ return 0;
+ }
+
+ err = mlx5_reg_mcqi_version_query(dev, component_index,
+ MCQI_FW_STORED_VERSION,
+ reg_mcqi_version);
+ if (err)
+ return err;
+
+ *pending_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index a2656f4008d9..2fe6923f7ce0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -40,6 +40,8 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/mlx5.h"
+#include "lib/pci_vsc.h"
+#include "diag/fw_tracer.h"
enum {
MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
@@ -62,12 +64,20 @@ enum {
enum {
MLX5_DROP_NEW_HEALTH_WORK,
- MLX5_DROP_NEW_RECOVERY_WORK,
+};
+
+enum {
+ MLX5_SENSOR_NO_ERR = 0,
+ MLX5_SENSOR_PCI_COMM_ERR = 1,
+ MLX5_SENSOR_PCI_ERR = 2,
+ MLX5_SENSOR_NIC_DISABLED = 3,
+ MLX5_SENSOR_NIC_SW_RESET = 4,
+ MLX5_SENSOR_FW_SYND_RFR = 5,
};
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
{
- return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
+ return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7;
}
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
@@ -80,18 +90,105 @@ void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
&dev->iseg->cmdq_addr_l_sz);
}
-static int in_fatal(struct mlx5_core_dev *dev)
+static bool sensor_pci_not_working(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
+ /* Offline PCI reads return 0xffffffff */
+ return (ioread32be(&h->fw_ver) == 0xffffffff);
+}
+
+static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+ struct health_buffer __iomem *h = health->health;
+ u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET;
+ u8 synd = ioread8(&h->synd);
+
+ if (rfr && synd)
+ mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd);
+ return rfr && synd;
+}
+
+static u32 check_fatal_sensors(struct mlx5_core_dev *dev)
+{
+ if (sensor_pci_not_working(dev))
+ return MLX5_SENSOR_PCI_COMM_ERR;
+ if (pci_channel_offline(dev->pdev))
+ return MLX5_SENSOR_PCI_ERR;
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
- return 1;
+ return MLX5_SENSOR_NIC_DISABLED;
+ if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET)
+ return MLX5_SENSOR_NIC_SW_RESET;
+ if (sensor_fw_synd_rfr(dev))
+ return MLX5_SENSOR_FW_SYND_RFR;
- if (ioread32be(&h->fw_ver) == 0xffffffff)
- return 1;
+ return MLX5_SENSOR_NO_ERR;
+}
- return 0;
+static int lock_sem_sw_reset(struct mlx5_core_dev *dev, bool lock)
+{
+ enum mlx5_vsc_state state;
+ int ret;
+
+ if (!mlx5_core_is_pf(dev))
+ return -EBUSY;
+
+ /* Try to lock GW access, this stage doesn't return
+ * EBUSY because locked GW does not mean that other PF
+ * already started the reset.
+ */
+ ret = mlx5_vsc_gw_lock(dev);
+ if (ret == -EBUSY)
+ return -EINVAL;
+ if (ret)
+ return ret;
+
+ state = lock ? MLX5_VSC_LOCK : MLX5_VSC_UNLOCK;
+ /* At this stage, if the return status == EBUSY, then we know
+ * for sure that another PF started the reset, so don't allow
+ * another reset.
+ */
+ ret = mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET, state);
+ if (ret)
+ mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n");
+
+ /* Unlock GW access */
+ mlx5_vsc_gw_unlock(dev);
+
+ return ret;
+}
+
+static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
+{
+ bool supported = (ioread32be(&dev->iseg->initializing) >>
+ MLX5_FW_RESET_SUPPORTED_OFFSET) & 1;
+ u32 fatal_error;
+
+ if (!supported)
+ return false;
+
+ /* The reset only needs to be issued by one PF. The health buffer is
+ * shared between all functions, and will be cleared during a reset.
+ * Check again to avoid a redundant 2nd reset. If the fatal erros was
+ * PCI related a reset won't help.
+ */
+ fatal_error = check_fatal_sensors(dev);
+ if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
+ fatal_error == MLX5_SENSOR_NIC_DISABLED ||
+ fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
+ mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help.");
+ return false;
+ }
+
+ mlx5_core_warn(dev, "Issuing FW Reset\n");
+ /* Write the NIC interface field to initiate the reset, the command
+ * interface address also resides here, don't overwrite it.
+ */
+ mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET);
+
+ return true;
}
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
@@ -99,14 +196,65 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto unlock;
+ if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) {
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ goto unlock;
+ }
- mlx5_core_err(dev, "start\n");
- if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
+ if (check_fatal_sensors(dev) || force) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mlx5_cmd_flush(dev);
}
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
+unlock:
+ mutex_unlock(&dev->intf_state_mutex);
+}
+
+#define MLX5_CRDUMP_WAIT_MS 60000
+#define MLX5_FW_RESET_WAIT_MS 1000
+void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
+{
+ unsigned long end, delay_ms = MLX5_FW_RESET_WAIT_MS;
+ int lock = -EBUSY;
+
+ mutex_lock(&dev->intf_state_mutex);
+ if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ goto unlock;
+
+ mlx5_core_err(dev, "start\n");
+
+ if (check_fatal_sensors(dev) == MLX5_SENSOR_FW_SYND_RFR) {
+ /* Get cr-dump and reset FW semaphore */
+ lock = lock_sem_sw_reset(dev, true);
+
+ if (lock == -EBUSY) {
+ delay_ms = MLX5_CRDUMP_WAIT_MS;
+ goto recover_from_sw_reset;
+ }
+ /* Execute SW reset */
+ reset_fw_if_needed(dev);
+ }
+
+recover_from_sw_reset:
+ /* Recover from SW reset */
+ end = jiffies + msecs_to_jiffies(delay_ms);
+ do {
+ if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ break;
+
+ cond_resched();
+ } while (!time_after(jiffies, end));
+
+ if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
+ mlx5_get_nic_state(dev), delay_ms);
+ }
+
+ /* Release FW semaphore if you are the lock owner */
+ if (!lock)
+ lock_sem_sw_reset(dev, false);
+
mlx5_core_err(dev, "end\n");
unlock:
@@ -129,6 +277,20 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
case MLX5_NIC_IFC_NO_DRAM_NIC:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
break;
+
+ case MLX5_NIC_IFC_SW_RESET:
+ /* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
+ * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
+ * and this is a VF), this is not recoverable by SW reset.
+ * Logging of this is handled elsewhere.
+ * 2. FW reset has been issued by another function, driver can
+ * be reloaded to recover after the mode switches to
+ * MLX5_NIC_IFC_DISABLED.
+ */
+ if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
+ mlx5_core_warn(dev, "NIC SW reset in progress\n");
+ break;
+
default:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
nic_interface);
@@ -137,52 +299,32 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
mlx5_disable_device(dev);
}
-static void health_recover(struct work_struct *work)
-{
- struct mlx5_core_health *health;
- struct delayed_work *dwork;
- struct mlx5_core_dev *dev;
- struct mlx5_priv *priv;
- u8 nic_state;
-
- dwork = container_of(work, struct delayed_work, work);
- health = container_of(dwork, struct mlx5_core_health, recover_work);
- priv = container_of(health, struct mlx5_priv, health);
- dev = container_of(priv, struct mlx5_core_dev, priv);
-
- nic_state = mlx5_get_nic_state(dev);
- if (nic_state == MLX5_NIC_IFC_INVALID) {
- mlx5_core_err(dev, "health recovery flow aborted since the nic state is invalid\n");
- return;
- }
-
- mlx5_core_err(dev, "starting health recovery flow\n");
- mlx5_recover_device(dev);
-}
-
/* How much time to wait until health resetting the driver (in msecs) */
-#define MLX5_RECOVERY_DELAY_MSECS 60000
-static void health_care(struct work_struct *work)
+#define MLX5_RECOVERY_WAIT_MSECS 60000
+static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
{
- unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
- struct mlx5_core_health *health;
- struct mlx5_core_dev *dev;
- struct mlx5_priv *priv;
- unsigned long flags;
+ unsigned long end;
- health = container_of(work, struct mlx5_core_health, work);
- priv = container_of(health, struct mlx5_priv, health);
- dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
mlx5_handle_bad_state(dev);
+ end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS);
+ while (sensor_pci_not_working(dev)) {
+ if (time_after(jiffies, end)) {
+ mlx5_core_err(dev,
+ "health recovery flow aborted, PCI reads still not working\n");
+ return -EIO;
+ }
+ msleep(100);
+ }
- spin_lock_irqsave(&health->wq_lock, flags);
- if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
- schedule_delayed_work(&health->recover_work, recover_delay);
- else
- mlx5_core_err(dev,
- "new health works are not permitted at this stage\n");
- spin_unlock_irqrestore(&health->wq_lock, flags);
+ mlx5_core_err(dev, "starting health recovery flow\n");
+ mlx5_recover_device(dev);
+ if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) ||
+ check_fatal_sensors(dev)) {
+ mlx5_core_err(dev, "health recovery failed\n");
+ return -EIO;
+ }
+ return 0;
}
static const char *hsynd_str(u8 synd)
@@ -246,6 +388,282 @@ static void print_health_info(struct mlx5_core_dev *dev)
mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw);
}
+static int
+mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
+ struct mlx5_core_health *health = &dev->priv.health;
+ struct health_buffer __iomem *h = health->health;
+ u8 synd;
+ int err;
+
+ synd = ioread8(&h->synd);
+ err = devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
+ if (err || !synd)
+ return err;
+ return devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
+}
+
+struct mlx5_fw_reporter_ctx {
+ u8 err_synd;
+ int miss_counter;
+};
+
+static int
+mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg *fmsg,
+ struct mlx5_fw_reporter_ctx *fw_reporter_ctx)
+{
+ int err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "syndrome",
+ fw_reporter_ctx->err_synd);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter",
+ fw_reporter_ctx->miss_counter);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int
+mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+ struct health_buffer __iomem *h = health->health;
+ int err;
+ int i;
+
+ if (!ioread8(&h->synd))
+ return 0;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, "health buffer");
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var");
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) {
+ err = devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i));
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr",
+ ioread32be(&h->assert_exit_ptr));
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "assert_callra",
+ ioread32be(&h->assert_callra));
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id));
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index",
+ ioread8(&h->irisc_index));
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd));
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd",
+ ioread16be(&h->ext_synd));
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver",
+ ioread32be(&h->fw_ver));
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+ return devlink_fmsg_pair_nest_end(fmsg);
+}
+
+static int
+mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx)
+{
+ struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
+ int err;
+
+ err = mlx5_fw_tracer_trigger_core_dump_general(dev);
+ if (err)
+ return err;
+
+ if (priv_ctx) {
+ struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
+
+ err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
+ if (err)
+ return err;
+ }
+
+ err = mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg);
+ if (err)
+ return err;
+ return mlx5_fw_tracer_get_saved_traces_objects(dev->tracer, fmsg);
+}
+
+static void mlx5_fw_reporter_err_work(struct work_struct *work)
+{
+ struct mlx5_fw_reporter_ctx fw_reporter_ctx;
+ struct mlx5_core_health *health;
+
+ health = container_of(work, struct mlx5_core_health, report_work);
+
+ if (IS_ERR_OR_NULL(health->fw_reporter))
+ return;
+
+ fw_reporter_ctx.err_synd = health->synd;
+ fw_reporter_ctx.miss_counter = health->miss_counter;
+ if (fw_reporter_ctx.err_synd) {
+ devlink_health_report(health->fw_reporter,
+ "FW syndrom reported", &fw_reporter_ctx);
+ return;
+ }
+ if (fw_reporter_ctx.miss_counter)
+ devlink_health_report(health->fw_reporter,
+ "FW miss counter reported",
+ &fw_reporter_ctx);
+}
+
+static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = mlx5_fw_reporter_diagnose,
+ .dump = mlx5_fw_reporter_dump,
+};
+
+static int
+mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx)
+{
+ struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
+
+ return mlx5_health_try_recover(dev);
+}
+
+#define MLX5_CR_DUMP_CHUNK_SIZE 256
+static int
+mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx)
+{
+ struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
+ u32 crdump_size = dev->priv.health.crdump_size;
+ u32 *cr_data;
+ u32 data_size;
+ u32 offset;
+ int err;
+
+ if (!mlx5_core_is_pf(dev))
+ return -EPERM;
+
+ cr_data = kvmalloc(crdump_size, GFP_KERNEL);
+ if (!cr_data)
+ return -ENOMEM;
+ err = mlx5_crdump_collect(dev, cr_data);
+ if (err)
+ return err;
+
+ if (priv_ctx) {
+ struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
+
+ err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
+ if (err)
+ goto free_data;
+ }
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "crdump_data");
+ if (err)
+ goto free_data;
+ for (offset = 0; offset < crdump_size; offset += data_size) {
+ if (crdump_size - offset < MLX5_CR_DUMP_CHUNK_SIZE)
+ data_size = crdump_size - offset;
+ else
+ data_size = MLX5_CR_DUMP_CHUNK_SIZE;
+ err = devlink_fmsg_binary_put(fmsg, cr_data, data_size);
+ if (err)
+ goto free_data;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+
+free_data:
+ kfree(cr_data);
+ return err;
+}
+
+static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
+{
+ struct mlx5_fw_reporter_ctx fw_reporter_ctx;
+ struct mlx5_core_health *health;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+
+ health = container_of(work, struct mlx5_core_health, fatal_report_work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ mlx5_enter_error_state(dev, false);
+ if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
+ if (mlx5_health_try_recover(dev))
+ mlx5_core_err(dev, "health recovery failed\n");
+ return;
+ }
+ fw_reporter_ctx.err_synd = health->synd;
+ fw_reporter_ctx.miss_counter = health->miss_counter;
+ devlink_health_report(health->fw_fatal_reporter,
+ "FW fatal error reported", &fw_reporter_ctx);
+}
+
+static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = mlx5_fw_fatal_reporter_recover,
+ .dump = mlx5_fw_fatal_reporter_dump,
+};
+
+#define MLX5_REPORTER_FW_GRACEFUL_PERIOD 1200000
+static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ health->fw_reporter =
+ devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
+ 0, false, dev);
+ if (IS_ERR(health->fw_reporter))
+ mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
+ PTR_ERR(health->fw_reporter));
+
+ health->fw_fatal_reporter =
+ devlink_health_reporter_create(devlink,
+ &mlx5_fw_fatal_reporter_ops,
+ MLX5_REPORTER_FW_GRACEFUL_PERIOD,
+ true, dev);
+ if (IS_ERR(health->fw_fatal_reporter))
+ mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
+ PTR_ERR(health->fw_fatal_reporter));
+}
+
+static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ if (!IS_ERR_OR_NULL(health->fw_reporter))
+ devlink_health_reporter_destroy(health->fw_reporter);
+
+ if (!IS_ERR_OR_NULL(health->fw_fatal_reporter))
+ devlink_health_reporter_destroy(health->fw_fatal_reporter);
+}
+
static unsigned long get_next_poll_jiffies(void)
{
unsigned long next;
@@ -264,7 +682,7 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
spin_lock_irqsave(&health->wq_lock, flags);
if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
- queue_work(health->wq, &health->work);
+ queue_work(health->wq, &health->fatal_report_work);
else
mlx5_core_err(dev, "new health works are not permitted at this stage\n");
spin_unlock_irqrestore(&health->wq_lock, flags);
@@ -274,6 +692,9 @@ static void poll_health(struct timer_list *t)
{
struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
struct mlx5_core_health *health = &dev->priv.health;
+ struct health_buffer __iomem *h = health->health;
+ u32 fatal_error;
+ u8 prev_synd;
u32 count;
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
@@ -289,10 +710,19 @@ static void poll_health(struct timer_list *t)
if (health->miss_counter == MAX_MISSES) {
mlx5_core_err(dev, "device's health compromised - reached miss count\n");
print_health_info(dev);
+ queue_work(health->wq, &health->report_work);
}
- if (in_fatal(dev) && !health->sick) {
- health->sick = true;
+ prev_synd = health->synd;
+ health->synd = ioread8(&h->synd);
+ if (health->synd && health->synd != prev_synd)
+ queue_work(health->wq, &health->report_work);
+
+ fatal_error = check_fatal_sensors(dev);
+
+ if (fatal_error && !health->fatal_error) {
+ mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
+ dev->priv.health.fatal_error = fatal_error;
print_health_info(dev);
mlx5_trigger_health_work(dev);
}
@@ -306,9 +736,8 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
timer_setup(&health->timer, poll_health, 0);
- health->sick = 0;
+ health->fatal_error = MLX5_SENSOR_NO_ERR;
clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -324,7 +753,6 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
if (disable_health) {
spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
spin_unlock_irqrestore(&health->wq_lock, flags);
}
@@ -338,21 +766,9 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
spin_unlock_irqrestore(&health->wq_lock, flags);
- cancel_delayed_work_sync(&health->recover_work);
- cancel_work_sync(&health->work);
-}
-
-void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
-{
- struct mlx5_core_health *health = &dev->priv.health;
- unsigned long flags;
-
- spin_lock_irqsave(&health->wq_lock, flags);
- set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
- spin_unlock_irqrestore(&health->wq_lock, flags);
- cancel_delayed_work_sync(&dev->priv.health.recover_work);
+ cancel_work_sync(&health->report_work);
+ cancel_work_sync(&health->fatal_report_work);
}
void mlx5_health_flush(struct mlx5_core_dev *dev)
@@ -367,6 +783,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
destroy_workqueue(health->wq);
+ mlx5_fw_reporters_destroy(dev);
}
int mlx5_health_init(struct mlx5_core_dev *dev)
@@ -374,20 +791,26 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
struct mlx5_core_health *health;
char *name;
+ mlx5_fw_reporters_create(dev);
+
health = &dev->priv.health;
name = kmalloc(64, GFP_KERNEL);
if (!name)
- return -ENOMEM;
+ goto out_err;
strcpy(name, "mlx5_health");
strcat(name, dev_name(dev->device));
health->wq = create_singlethread_workqueue(name);
kfree(name);
if (!health->wq)
- return -ENOMEM;
+ goto out_err;
spin_lock_init(&health->wq_lock);
- INIT_WORK(&health->work, health_care);
- INIT_DELAYED_WORK(&health->recover_work, health_recover);
+ INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
+ INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
return 0;
+
+out_err:
+ mlx5_fw_reporters_destroy(dev);
+ return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 90cb50fe17fd..ebd81f6b556e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -122,14 +122,6 @@ static int mlx5i_get_ts_info(struct net_device *netdev,
return mlx5e_ethtool_get_ts_info(priv, info);
}
-static int mlx5i_flash_device(struct net_device *netdev,
- struct ethtool_flash *flash)
-{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
-
- return mlx5e_ethtool_flash_device(priv, flash);
-}
-
enum mlx5_ptys_width {
MLX5_PTYS_WIDTH_1X = 1 << 0,
MLX5_PTYS_WIDTH_2X = 1 << 1,
@@ -241,7 +233,6 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_ethtool_stats = mlx5i_get_ethtool_stats,
.get_ringparam = mlx5i_get_ringparam,
.set_ringparam = mlx5i_set_ringparam,
- .flash_device = mlx5i_flash_device,
.get_channels = mlx5i_get_channels,
.set_channels = mlx5i_set_channels,
.get_coalesce = mlx5i_get_coalesce,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 9ca492b430d8..faf197d53743 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -87,7 +87,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
mlx5e_set_netdev_mtu_boundaries(priv);
netdev->mtu = netdev->max_mtu;
- mlx5e_build_nic_params(mdev, &priv->rss_params, &priv->channels.params,
+ mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params,
mlx5e_get_netdev_max_channels(netdev),
netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
@@ -258,6 +258,18 @@ void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *
mlx5_core_destroy_qp(mdev, qp);
}
+int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
+
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
+
+ return mlx5e_create_tis(mdev, in, tisn);
+}
+
static int mlx5i_init_tx(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
@@ -269,7 +281,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err;
}
- err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
+ err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
goto err_destroy_underlay_qp;
@@ -365,7 +377,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_direct_rqts(priv);
+ err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
if (err)
goto err_destroy_indirect_rqts;
@@ -373,7 +385,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_rqts;
- err = mlx5e_create_direct_tirs(priv);
+ err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
if (err)
goto err_destroy_indirect_tirs;
@@ -384,11 +396,11 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
return 0;
err_destroy_direct_tirs:
- mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv, true);
err_destroy_direct_rqts:
- mlx5e_destroy_direct_rqts(priv);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
@@ -401,9 +413,9 @@ err_destroy_q_counters:
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5i_destroy_flow_steering(priv);
- mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
mlx5e_destroy_indirect_tirs(priv, true);
- mlx5e_destroy_direct_rqts(priv);
+ mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
@@ -418,6 +430,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.cleanup_rx = mlx5i_cleanup_rx,
.enable = NULL, /* mlx5i_enable */
.disable = NULL, /* mlx5i_disable */
+ .update_rx = mlx5e_update_nic_rx,
.update_stats = NULL, /* mlx5i_update_stats */
.update_carrier = NULL, /* no HW update in IB link */
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
@@ -526,7 +539,7 @@ static int mlx5i_open(struct net_device *netdev)
if (err)
goto err_remove_fs_underlay_qp;
- mlx5e_refresh_tirs(epriv, false);
+ epriv->profile->update_rx(epriv);
mlx5e_activate_priv_channels(epriv);
mutex_unlock(&epriv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index e19ba3fcd1b7..c87962cab921 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -59,6 +59,8 @@ struct mlx5i_priv {
char *mlx5e_priv[0];
};
+int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
+
/* Underlay QP create/destroy functions */
int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index b491b8f5fd6b..6e56fa769d2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -210,7 +210,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
goto err_unint_underlay_qp;
}
- err = mlx5e_create_tis(mdev, 0 /* tc */, ipriv->qp.qpn, &epriv->tisn[0]);
+ err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0]);
if (err) {
mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
goto err_remove_rx_uderlay_qp;
@@ -221,7 +221,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
mlx5_core_warn(mdev, "opening child channels failed, %d\n", err);
goto err_clear_state_opened_flag;
}
- mlx5e_refresh_tirs(epriv, false);
+ epriv->profile->update_rx(epriv);
mlx5e_activate_priv_channels(epriv);
mutex_unlock(&epriv->state_lock);
@@ -350,6 +350,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.cleanup_rx = mlx5i_pkey_cleanup_rx,
.enable = NULL,
.disable = NULL,
+ .update_rx = mlx5e_update_nic_rx,
.update_stats = NULL,
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 959605559858..c5ef2ff26465 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -305,8 +305,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
!mlx5_sriov_is_enabled(dev1);
#ifdef CONFIG_MLX5_ESWITCH
- roce_lag &= dev0->priv.eswitch->mode == SRIOV_NONE &&
- dev1->priv.eswitch->mode == SRIOV_NONE;
+ roce_lag &= dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
+ dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
#endif
if (roce_lag)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 8212bfd05733..e69766393990 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/netdevice.h>
+#include <net/nexthop.h>
#include "lag.h"
#include "lag_mp.h"
#include "mlx5_core.h"
@@ -110,6 +111,8 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
struct fib_info *fi)
{
struct lag_mp *mp = &ldev->lag_mp;
+ struct fib_nh *fib_nh0, *fib_nh1;
+ unsigned int nhs;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
@@ -120,9 +123,11 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
}
/* Handle add/replace event */
- if (fi->fib_nhs == 1) {
+ nhs = fib_info_num_path(fi);
+ if (nhs == 1) {
if (__mlx5_lag_is_active(ldev)) {
- struct net_device *nh_dev = fi->fib_nh[0].fib_nh_dev;
+ struct fib_nh *nh = fib_info_nh(fi, 0);
+ struct net_device *nh_dev = nh->fib_nh_dev;
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
mlx5_lag_set_port_affinity(ldev, ++i);
@@ -130,14 +135,16 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
return;
}
- if (fi->fib_nhs != 2)
+ if (nhs != 2)
return;
/* Verify next hops are ports of the same hca */
- if (!(fi->fib_nh[0].fib_nh_dev == ldev->pf[0].netdev &&
- fi->fib_nh[1].fib_nh_dev == ldev->pf[1].netdev) &&
- !(fi->fib_nh[0].fib_nh_dev == ldev->pf[1].netdev &&
- fi->fib_nh[1].fib_nh_dev == ldev->pf[0].netdev)) {
+ fib_nh0 = fib_info_nh(fi, 0);
+ fib_nh1 = fib_info_nh(fi, 1);
+ if (!(fib_nh0->fib_nh_dev == ldev->pf[0].netdev &&
+ fib_nh1->fib_nh_dev == ldev->pf[1].netdev) &&
+ !(fib_nh0->fib_nh_dev == ldev->pf[1].netdev &&
+ fib_nh1->fib_nh_dev == ldev->pf[0].netdev)) {
mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n");
return;
}
@@ -174,7 +181,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
mlx5_lag_set_port_affinity(ldev, i);
}
} else if (event == FIB_EVENT_NH_ADD &&
- fi->fib_nhs == 2) {
+ fib_info_num_path(fi) == 2) {
mlx5_lag_set_port_affinity(ldev, 0);
}
}
@@ -238,6 +245,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
struct mlx5_fib_event_work *fib_work;
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
+ struct net_device *fib_dev;
struct fib_info *fi;
if (info->family != AF_INET)
@@ -254,8 +262,13 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
fi = fen_info->fi;
- if (fi->fib_dev != ldev->pf[0].netdev &&
- fi->fib_dev != ldev->pf[1].netdev) {
+ if (fi->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
+ fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
+ if (fib_dev != ldev->pf[0].netdev &&
+ fib_dev != ldev->pf[1].netdev) {
return NOTIFY_DONE;
}
fib_work = mlx5_lag_init_fib_work(ldev, event);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
new file mode 100644
index 000000000000..ea9ee88491e5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include "mlx5_core.h"
+
+int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
+ void *key, u32 sz_bytes,
+ u32 *p_key_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u32 sz_bits = sz_bytes * BITS_PER_BYTE;
+ u8 general_obj_key_size;
+ u64 general_obj_types;
+ void *obj, *key_p;
+ int err;
+
+ obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
+ key_p = MLX5_ADDR_OF(encryption_key_obj, obj, key);
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY))
+ return -EINVAL;
+
+ switch (sz_bits) {
+ case 128:
+ general_obj_key_size =
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
+ break;
+ case 256:
+ general_obj_key_size =
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(key_p, key, sz_bytes);
+
+ MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
+ MLX5_SET(encryption_key_obj, obj, key_type,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK);
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+ MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.pdn);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *p_key_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ /* avoid leaking key on the stack */
+ memzero_explicit(in, sizeof(in));
+
+ return err;
+}
+
+void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, key_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index c0fb6d72b695..3dfab91ae5f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -7,7 +7,6 @@
#include <linux/mlx5/eq.h>
#include <linux/mlx5/cq.h>
-#define MLX5_MAX_IRQ_NAME (32)
#define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe))
struct mlx5_eq_tasklet {
@@ -36,8 +35,14 @@ struct mlx5_eq {
struct mlx5_rsc_debug *dbg;
};
+struct mlx5_eq_async {
+ struct mlx5_eq core;
+ struct notifier_block irq_nb;
+};
+
struct mlx5_eq_comp {
- struct mlx5_eq core; /* Must be first */
+ struct mlx5_eq core;
+ struct notifier_block irq_nb;
struct mlx5_eq_tasklet tasklet_ctx;
struct list_head list;
};
@@ -70,7 +75,7 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev);
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
-int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
+void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
void mlx5_cq_tasklet_cb(unsigned long data);
@@ -92,7 +97,4 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
#endif
-int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
-int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
-
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
new file mode 100644
index 000000000000..23361a9ae4fa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/kernel.h>
+#include "mlx5_core.h"
+#include "geneve.h"
+
+struct mlx5_geneve {
+ struct mlx5_core_dev *mdev;
+ __be16 opt_class;
+ u8 opt_type;
+ u32 obj_id;
+ struct mutex sync_lock; /* protect GENEVE obj operations */
+ u32 refcount;
+};
+
+static int mlx5_geneve_tlv_option_create(struct mlx5_core_dev *mdev,
+ __be16 class,
+ u8 type,
+ u8 len)
+{
+ u32 in[MLX5_ST_SZ_DW(create_geneve_tlv_option_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u64 general_obj_types;
+ void *hdr, *opt;
+ u16 obj_id;
+ int err;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT))
+ return -EINVAL;
+
+ hdr = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, hdr);
+ opt = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, geneve_tlv_opt);
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_GENEVE_TLV_OPT);
+
+ MLX5_SET(geneve_tlv_option, opt, option_class, be16_to_cpu(class));
+ MLX5_SET(geneve_tlv_option, opt, option_type, type);
+ MLX5_SET(geneve_tlv_option, opt, option_data_length, len);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return obj_id;
+}
+
+static void mlx5_geneve_tlv_option_destroy(struct mlx5_core_dev *mdev, u16 obj_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_GENEVE_TLV_OPT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *opt)
+{
+ int res = 0;
+
+ if (IS_ERR_OR_NULL(geneve))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&geneve->sync_lock);
+
+ if (geneve->refcount) {
+ if (geneve->opt_class == opt->opt_class &&
+ geneve->opt_type == opt->type) {
+ /* We already have TLV options obj allocated */
+ geneve->refcount++;
+ } else {
+ /* TLV options obj allocated, but its params
+ * do not match the new request.
+ * We support only one such object.
+ */
+ mlx5_core_warn(geneve->mdev,
+ "Won't create Geneve TLV opt object with class:type:len = 0x%x:0x%x:%d (another class:type already exists)\n",
+ be16_to_cpu(opt->opt_class),
+ opt->type,
+ opt->length);
+ res = -EOPNOTSUPP;
+ goto unlock;
+ }
+ } else {
+ /* We don't have any TLV options obj allocated */
+
+ res = mlx5_geneve_tlv_option_create(geneve->mdev,
+ opt->opt_class,
+ opt->type,
+ opt->length);
+ if (res < 0) {
+ mlx5_core_warn(geneve->mdev,
+ "Failed creating Geneve TLV opt object class:type:len = 0x%x:0x%x:%d (err=%d)\n",
+ be16_to_cpu(opt->opt_class),
+ opt->type, opt->length, res);
+ goto unlock;
+ }
+ geneve->opt_class = opt->opt_class;
+ geneve->opt_type = opt->type;
+ geneve->obj_id = res;
+ geneve->refcount++;
+ }
+
+unlock:
+ mutex_unlock(&geneve->sync_lock);
+ return res;
+}
+
+void mlx5_geneve_tlv_option_del(struct mlx5_geneve *geneve)
+{
+ if (IS_ERR_OR_NULL(geneve))
+ return;
+
+ mutex_lock(&geneve->sync_lock);
+ if (--geneve->refcount == 0) {
+ /* We've just removed the last user of Geneve option.
+ * Now delete the object in FW.
+ */
+ mlx5_geneve_tlv_option_destroy(geneve->mdev, geneve->obj_id);
+
+ geneve->opt_class = 0;
+ geneve->opt_type = 0;
+ geneve->obj_id = 0;
+ }
+ mutex_unlock(&geneve->sync_lock);
+}
+
+struct mlx5_geneve *mlx5_geneve_create(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_geneve *geneve =
+ kzalloc(sizeof(*geneve), GFP_KERNEL);
+
+ if (!geneve)
+ return ERR_PTR(-ENOMEM);
+ geneve->mdev = mdev;
+ mutex_init(&geneve->sync_lock);
+
+ return geneve;
+}
+
+void mlx5_geneve_destroy(struct mlx5_geneve *geneve)
+{
+ if (IS_ERR_OR_NULL(geneve))
+ return;
+
+ /* Lockless since we are unloading */
+ if (geneve->refcount)
+ mlx5_geneve_tlv_option_destroy(geneve->mdev, geneve->obj_id);
+
+ kfree(geneve);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.h
new file mode 100644
index 000000000000..adee0cbba19c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_GENEVE_H__
+#define __MLX5_GENEVE_H__
+
+#include <net/geneve.h>
+#include <linux/mlx5/driver.h>
+
+struct mlx5_geneve;
+
+#ifdef CONFIG_MLX5_ESWITCH
+
+struct mlx5_geneve *mlx5_geneve_create(struct mlx5_core_dev *mdev);
+void mlx5_geneve_destroy(struct mlx5_geneve *geneve);
+
+int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *opt);
+void mlx5_geneve_tlv_option_del(struct mlx5_geneve *geneve);
+
+#else /* CONFIG_MLX5_ESWITCH */
+
+static inline struct mlx5_geneve
+*mlx5_geneve_create(struct mlx5_core_dev *mdev) { return NULL; }
+static inline void
+mlx5_geneve_destroy(struct mlx5_geneve *geneve) {}
+static inline int
+mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *opt) { return 0; }
+static inline void
+mlx5_geneve_tlv_option_del(struct mlx5_geneve *geneve) {}
+
+#endif /* CONFIG_MLX5_ESWITCH */
+
+#endif /* __MLX5_GENEVE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 397a2847867a..b99d469e4e64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -41,6 +41,9 @@ int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count);
void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count);
int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index);
void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index);
+int mlx5_crdump_enable(struct mlx5_core_dev *dev);
+void mlx5_crdump_disable(struct mlx5_core_dev *dev);
+int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data);
/* TODO move to lib/events.h */
@@ -76,4 +79,9 @@ struct mlx5_pme_stats {
void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
+/* Crypto */
+int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
+ void *key, u32 sz_bytes, u32 *p_key_id);
+void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index a71d5b9c7ab2..3118e8d66407 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -67,6 +67,7 @@ static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
struct l2table_node {
struct l2addr_node node;
u32 index; /* index in HW l2 table */
+ int ref_count;
};
struct mlx5_mpfs {
@@ -134,8 +135,8 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
{
struct mlx5_mpfs *mpfs = dev->priv.mpfs;
struct l2table_node *l2addr;
+ int err = 0;
u32 index;
- int err;
if (!MLX5_ESWITCH_MANAGER(dev))
return 0;
@@ -144,30 +145,35 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
l2addr = l2addr_hash_find(mpfs->hash, mac, struct l2table_node);
if (l2addr) {
- err = -EEXIST;
- goto abort;
+ l2addr->ref_count++;
+ goto out;
}
err = alloc_l2table_index(mpfs, &index);
if (err)
- goto abort;
+ goto out;
l2addr = l2addr_hash_add(mpfs->hash, mac, struct l2table_node, GFP_KERNEL);
if (!l2addr) {
- free_l2table_index(mpfs, index);
err = -ENOMEM;
- goto abort;
+ goto hash_add_err;
}
- l2addr->index = index;
err = set_l2table_entry_cmd(dev, index, mac);
- if (err) {
- l2addr_hash_del(l2addr);
- free_l2table_index(mpfs, index);
- }
+ if (err)
+ goto set_table_entry_err;
+
+ l2addr->index = index;
+ l2addr->ref_count = 1;
mlx5_core_dbg(dev, "MPFS mac added %pM, index (%d)\n", mac, index);
-abort:
+ goto out;
+
+set_table_entry_err:
+ l2addr_hash_del(l2addr);
+hash_add_err:
+ free_l2table_index(mpfs, index);
+out:
mutex_unlock(&mpfs->lock);
return err;
}
@@ -190,6 +196,9 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
goto unlock;
}
+ if (--l2addr->ref_count > 0)
+ goto unlock;
+
index = l2addr->index;
del_l2table_entry_cmd(dev, index);
l2addr_hash_del(l2addr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
new file mode 100644
index 000000000000..6b774e0c2766
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#include <linux/pci.h>
+#include "mlx5_core.h"
+#include "pci_vsc.h"
+
+#define MLX5_EXTRACT_C(source, offset, size) \
+ ((((u32)(source)) >> (offset)) & MLX5_ONES32(size))
+#define MLX5_EXTRACT(src, start, len) \
+ (((len) == 32) ? (src) : MLX5_EXTRACT_C(src, start, len))
+#define MLX5_ONES32(size) \
+ ((size) ? (0xffffffff >> (32 - (size))) : 0)
+#define MLX5_MASK32(offset, size) \
+ (MLX5_ONES32(size) << (offset))
+#define MLX5_MERGE_C(rsrc1, rsrc2, start, len) \
+ ((((rsrc2) << (start)) & (MLX5_MASK32((start), (len)))) | \
+ ((rsrc1) & (~MLX5_MASK32((start), (len)))))
+#define MLX5_MERGE(rsrc1, rsrc2, start, len) \
+ (((len) == 32) ? (rsrc2) : MLX5_MERGE_C(rsrc1, rsrc2, start, len))
+#define vsc_read(dev, offset, val) \
+ pci_read_config_dword((dev)->pdev, (dev)->vsc_addr + (offset), (val))
+#define vsc_write(dev, offset, val) \
+ pci_write_config_dword((dev)->pdev, (dev)->vsc_addr + (offset), (val))
+#define VSC_MAX_RETRIES 2048
+
+enum {
+ VSC_CTRL_OFFSET = 0x4,
+ VSC_COUNTER_OFFSET = 0x8,
+ VSC_SEMAPHORE_OFFSET = 0xc,
+ VSC_ADDR_OFFSET = 0x10,
+ VSC_DATA_OFFSET = 0x14,
+
+ VSC_FLAG_BIT_OFFS = 31,
+ VSC_FLAG_BIT_LEN = 1,
+
+ VSC_SYND_BIT_OFFS = 30,
+ VSC_SYND_BIT_LEN = 1,
+
+ VSC_ADDR_BIT_OFFS = 0,
+ VSC_ADDR_BIT_LEN = 30,
+
+ VSC_SPACE_BIT_OFFS = 0,
+ VSC_SPACE_BIT_LEN = 16,
+
+ VSC_SIZE_VLD_BIT_OFFS = 28,
+ VSC_SIZE_VLD_BIT_LEN = 1,
+
+ VSC_STATUS_BIT_OFFS = 29,
+ VSC_STATUS_BIT_LEN = 3,
+};
+
+void mlx5_pci_vsc_init(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_is_pf(dev))
+ return;
+
+ dev->vsc_addr = pci_find_capability(dev->pdev,
+ PCI_CAP_ID_VNDR);
+ if (!dev->vsc_addr)
+ mlx5_core_warn(dev, "Failed to get valid vendor specific ID\n");
+}
+
+int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
+{
+ u32 counter = 0;
+ int retries = 0;
+ u32 lock_val;
+ int ret;
+
+ pci_cfg_access_lock(dev->pdev);
+ do {
+ if (retries > VSC_MAX_RETRIES) {
+ ret = -EBUSY;
+ goto pci_unlock;
+ }
+
+ /* Check if semaphore is already locked */
+ ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
+ if (ret)
+ goto pci_unlock;
+
+ if (lock_val) {
+ retries++;
+ usleep_range(1000, 2000);
+ continue;
+ }
+
+ /* Read and write counter value, if written value is
+ * the same, semaphore was acquired successfully.
+ */
+ ret = vsc_read(dev, VSC_COUNTER_OFFSET, &counter);
+ if (ret)
+ goto pci_unlock;
+
+ ret = vsc_write(dev, VSC_SEMAPHORE_OFFSET, counter);
+ if (ret)
+ goto pci_unlock;
+
+ ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
+ if (ret)
+ goto pci_unlock;
+
+ retries++;
+ } while (counter != lock_val);
+
+ return 0;
+
+pci_unlock:
+ pci_cfg_access_unlock(dev->pdev);
+ return ret;
+}
+
+int mlx5_vsc_gw_unlock(struct mlx5_core_dev *dev)
+{
+ int ret;
+
+ ret = vsc_write(dev, VSC_SEMAPHORE_OFFSET, MLX5_VSC_UNLOCK);
+ pci_cfg_access_unlock(dev->pdev);
+ return ret;
+}
+
+int mlx5_vsc_gw_set_space(struct mlx5_core_dev *dev, u16 space,
+ u32 *ret_space_size)
+{
+ int ret;
+ u32 val = 0;
+
+ if (!mlx5_vsc_accessible(dev))
+ return -EINVAL;
+
+ if (ret_space_size)
+ *ret_space_size = 0;
+
+ /* Get a unique val */
+ ret = vsc_read(dev, VSC_CTRL_OFFSET, &val);
+ if (ret)
+ goto out;
+
+ /* Try to modify the lock */
+ val = MLX5_MERGE(val, space, VSC_SPACE_BIT_OFFS, VSC_SPACE_BIT_LEN);
+ ret = vsc_write(dev, VSC_CTRL_OFFSET, val);
+ if (ret)
+ goto out;
+
+ /* Verify lock was modified */
+ ret = vsc_read(dev, VSC_CTRL_OFFSET, &val);
+ if (ret)
+ goto out;
+
+ if (MLX5_EXTRACT(val, VSC_STATUS_BIT_OFFS, VSC_STATUS_BIT_LEN) == 0)
+ return -EINVAL;
+
+ /* Get space max address if indicated by size valid bit */
+ if (ret_space_size &&
+ MLX5_EXTRACT(val, VSC_SIZE_VLD_BIT_OFFS, VSC_SIZE_VLD_BIT_LEN)) {
+ ret = vsc_read(dev, VSC_ADDR_OFFSET, &val);
+ if (ret) {
+ mlx5_core_warn(dev, "Failed to get max space size\n");
+ goto out;
+ }
+ *ret_space_size = MLX5_EXTRACT(val, VSC_ADDR_BIT_OFFS,
+ VSC_ADDR_BIT_LEN);
+ }
+ return 0;
+
+out:
+ return ret;
+}
+
+static int mlx5_vsc_wait_on_flag(struct mlx5_core_dev *dev, u8 expected_val)
+{
+ int retries = 0;
+ u32 flag;
+ int ret;
+
+ do {
+ if (retries > VSC_MAX_RETRIES)
+ return -EBUSY;
+
+ ret = vsc_read(dev, VSC_ADDR_OFFSET, &flag);
+ if (ret)
+ return ret;
+ flag = MLX5_EXTRACT(flag, VSC_FLAG_BIT_OFFS, VSC_FLAG_BIT_LEN);
+ retries++;
+
+ if ((retries & 0xf) == 0)
+ usleep_range(1000, 2000);
+
+ } while (flag != expected_val);
+
+ return 0;
+}
+
+static int mlx5_vsc_gw_write(struct mlx5_core_dev *dev, unsigned int address,
+ u32 data)
+{
+ int ret;
+
+ if (MLX5_EXTRACT(address, VSC_SYND_BIT_OFFS,
+ VSC_FLAG_BIT_LEN + VSC_SYND_BIT_LEN))
+ return -EINVAL;
+
+ /* Set flag to 0x1 */
+ address = MLX5_MERGE(address, 1, VSC_FLAG_BIT_OFFS, 1);
+ ret = vsc_write(dev, VSC_DATA_OFFSET, data);
+ if (ret)
+ goto out;
+
+ ret = vsc_write(dev, VSC_ADDR_OFFSET, address);
+ if (ret)
+ goto out;
+
+ /* Wait for the flag to be cleared */
+ ret = mlx5_vsc_wait_on_flag(dev, 0);
+
+out:
+ return ret;
+}
+
+static int mlx5_vsc_gw_read(struct mlx5_core_dev *dev, unsigned int address,
+ u32 *data)
+{
+ int ret;
+
+ if (MLX5_EXTRACT(address, VSC_SYND_BIT_OFFS,
+ VSC_FLAG_BIT_LEN + VSC_SYND_BIT_LEN))
+ return -EINVAL;
+
+ ret = vsc_write(dev, VSC_ADDR_OFFSET, address);
+ if (ret)
+ goto out;
+
+ ret = mlx5_vsc_wait_on_flag(dev, 1);
+ if (ret)
+ goto out;
+
+ ret = vsc_read(dev, VSC_DATA_OFFSET, data);
+out:
+ return ret;
+}
+
+static int mlx5_vsc_gw_read_fast(struct mlx5_core_dev *dev,
+ unsigned int read_addr,
+ unsigned int *next_read_addr,
+ u32 *data)
+{
+ int ret;
+
+ ret = mlx5_vsc_gw_read(dev, read_addr, data);
+ if (ret)
+ goto out;
+
+ ret = vsc_read(dev, VSC_ADDR_OFFSET, next_read_addr);
+ if (ret)
+ goto out;
+
+ *next_read_addr = MLX5_EXTRACT(*next_read_addr, VSC_ADDR_BIT_OFFS,
+ VSC_ADDR_BIT_LEN);
+
+ if (*next_read_addr <= read_addr)
+ ret = -EINVAL;
+out:
+ return ret;
+}
+
+int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data,
+ int length)
+{
+ unsigned int next_read_addr = 0;
+ unsigned int read_addr = 0;
+
+ while (read_addr < length) {
+ if (mlx5_vsc_gw_read_fast(dev, read_addr, &next_read_addr,
+ &data[(read_addr >> 2)]))
+ return read_addr;
+
+ read_addr = next_read_addr;
+ }
+ return length;
+}
+
+int mlx5_vsc_sem_set_space(struct mlx5_core_dev *dev, u16 space,
+ enum mlx5_vsc_state state)
+{
+ u32 data, id = 0;
+ int ret;
+
+ ret = mlx5_vsc_gw_set_space(dev, MLX5_SEMAPHORE_SPACE_DOMAIN, NULL);
+ if (ret) {
+ mlx5_core_warn(dev, "Failed to set gw space %d\n", ret);
+ return ret;
+ }
+
+ if (state == MLX5_VSC_LOCK) {
+ /* Get a unique ID based on the counter */
+ ret = vsc_read(dev, VSC_COUNTER_OFFSET, &id);
+ if (ret)
+ return ret;
+ }
+
+ /* Try to modify lock */
+ ret = mlx5_vsc_gw_write(dev, space, id);
+ if (ret)
+ return ret;
+
+ /* Verify lock was modified */
+ ret = mlx5_vsc_gw_read(dev, space, &data);
+ if (ret)
+ return -EINVAL;
+
+ if (data != id)
+ return -EBUSY;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.h
new file mode 100644
index 000000000000..64272a6d7754
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#ifndef __MLX5_PCI_VSC_H__
+#define __MLX5_PCI_VSC_H__
+
+enum mlx5_vsc_state {
+ MLX5_VSC_UNLOCK,
+ MLX5_VSC_LOCK,
+};
+
+enum {
+ MLX5_VSC_SPACE_SCAN_CRSPACE = 0x7,
+};
+
+void mlx5_pci_vsc_init(struct mlx5_core_dev *dev);
+int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev);
+int mlx5_vsc_gw_unlock(struct mlx5_core_dev *dev);
+int mlx5_vsc_gw_set_space(struct mlx5_core_dev *dev, u16 space,
+ u32 *ret_space_size);
+int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data,
+ int length);
+
+static inline bool mlx5_vsc_accessible(struct mlx5_core_dev *dev)
+{
+ return !!dev->vsc_addr;
+}
+
+int mlx5_vsc_sem_set_space(struct mlx5_core_dev *dev, u16 space,
+ enum mlx5_vsc_state state);
+
+#endif /* __MLX5_PCI_VSC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 23d53163ce15..b15b27a497fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -56,6 +56,7 @@
#include "fs_core.h"
#include "lib/mpfs.h"
#include "eswitch.h"
+#include "devlink.h"
#include "lib/mlx5.h"
#include "fpga/core.h"
#include "fpga/ipsec.h"
@@ -63,7 +64,9 @@
#include "accel/tls.h"
#include "lib/clock.h"
#include "lib/vxlan.h"
+#include "lib/geneve.h"
#include "lib/devcom.h"
+#include "lib/pci_vsc.h"
#include "diag/fw_tracer.h"
#include "ecpf.h"
@@ -169,18 +172,28 @@ static struct mlx5_profile profile[] = {
#define FW_INIT_TIMEOUT_MILI 2000
#define FW_INIT_WAIT_MS 2
-#define FW_PRE_INIT_TIMEOUT_MILI 10000
+#define FW_PRE_INIT_TIMEOUT_MILI 120000
+#define FW_INIT_WARN_MESSAGE_INTERVAL 20000
-static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
+static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
+ u32 warn_time_mili)
{
+ unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
int err = 0;
+ BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI < FW_INIT_WARN_MESSAGE_INTERVAL);
+
while (fw_initializing(dev)) {
if (time_after(jiffies, end)) {
err = -EBUSY;
break;
}
+ if (warn_time_mili && time_after(jiffies, warn)) {
+ mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds\n",
+ jiffies_to_msecs(end - warn) / 1000);
+ warn = jiffies + msecs_to_jiffies(warn_time_mili);
+ }
msleep(FW_INIT_WAIT_MS);
}
@@ -721,8 +734,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
struct mlx5_priv *priv = &dev->priv;
int err = 0;
- priv->pci_dev_data = id->driver_data;
-
+ mutex_init(&dev->pci_status_mutex);
pci_set_drvdata(dev->pdev, dev);
dev->bar_addr = pci_resource_start(pdev, 0);
@@ -761,6 +773,8 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
goto err_clr_master;
}
+ mlx5_pci_vsc_init(dev);
+
return 0;
err_clr_master:
@@ -794,10 +808,16 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_devcom;
}
+ err = mlx5_irq_table_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize irq table\n");
+ goto err_devcom;
+ }
+
err = mlx5_eq_table_init(dev);
if (err) {
mlx5_core_err(dev, "failed to initialize eq\n");
- goto err_devcom;
+ goto err_irq_cleanup;
}
err = mlx5_events_init(dev);
@@ -821,6 +841,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_init_clock(dev);
dev->vxlan = mlx5_vxlan_create(dev);
+ dev->geneve = mlx5_geneve_create(dev);
err = mlx5_init_rl_table(dev);
if (err) {
@@ -834,37 +855,38 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_rl_cleanup;
}
- err = mlx5_eswitch_init(dev);
+ err = mlx5_sriov_init(dev);
if (err) {
- mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
+ mlx5_core_err(dev, "Failed to init sriov %d\n", err);
goto err_mpfs_cleanup;
}
- err = mlx5_sriov_init(dev);
+ err = mlx5_eswitch_init(dev);
if (err) {
- mlx5_core_err(dev, "Failed to init sriov %d\n", err);
- goto err_eswitch_cleanup;
+ mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
+ goto err_sriov_cleanup;
}
err = mlx5_fpga_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
- goto err_sriov_cleanup;
+ goto err_eswitch_cleanup;
}
dev->tracer = mlx5_fw_tracer_create(dev);
return 0;
-err_sriov_cleanup:
- mlx5_sriov_cleanup(dev);
err_eswitch_cleanup:
mlx5_eswitch_cleanup(dev->priv.eswitch);
+err_sriov_cleanup:
+ mlx5_sriov_cleanup(dev);
err_mpfs_cleanup:
mlx5_mpfs_cleanup(dev);
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
err_tables_cleanup:
+ mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
@@ -873,6 +895,8 @@ err_events_cleanup:
mlx5_events_cleanup(dev);
err_eq_cleanup:
mlx5_eq_table_cleanup(dev);
+err_irq_cleanup:
+ mlx5_irq_table_cleanup(dev);
err_devcom:
mlx5_devcom_unregister_device(dev->priv.devcom);
@@ -883,10 +907,11 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_fpga_cleanup(dev);
- mlx5_sriov_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
+ mlx5_sriov_cleanup(dev);
mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev);
+ mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
@@ -895,6 +920,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
+ mlx5_irq_table_cleanup(dev);
mlx5_devcom_unregister_device(dev->priv.devcom);
}
@@ -911,7 +937,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
/* wait for firmware to accept initialization segments configurations
*/
- err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
+ err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL);
if (err) {
mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
FW_PRE_INIT_TIMEOUT_MILI);
@@ -924,7 +950,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
return err;
}
- err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
+ err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
if (err) {
mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
FW_INIT_TIMEOUT_MILI);
@@ -1028,6 +1054,12 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_events_start(dev);
mlx5_pagealloc_start(dev);
+ err = mlx5_irq_table_create(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to alloc IRQs\n");
+ goto err_irq_table;
+ }
+
err = mlx5_eq_table_create(dev);
if (err) {
mlx5_core_err(dev, "Failed to create EQs\n");
@@ -1099,6 +1131,8 @@ err_fpga_start:
err_fw_tracer:
mlx5_eq_table_destroy(dev);
err_eq_table:
+ mlx5_irq_table_destroy(dev);
+err_irq_table:
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
mlx5_put_uars_page(dev, dev->priv.uar);
@@ -1115,6 +1149,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_fpga_device_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev);
+ mlx5_irq_table_destroy(dev);
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
mlx5_put_uars_page(dev, dev->priv.uar);
@@ -1183,7 +1218,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
int err = 0;
if (cleanup)
- mlx5_drain_health_recovery(dev);
+ mlx5_drain_health_wq(dev);
mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1210,17 +1245,6 @@ out:
return err;
}
-static const struct devlink_ops mlx5_devlink_ops = {
-#ifdef CONFIG_MLX5_ESWITCH
- .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
- .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
- .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
- .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
- .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
- .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
-#endif
-};
-
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
@@ -1230,7 +1254,6 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
- mutex_init(&dev->pci_status_mutex);
mutex_init(&dev->intf_state_mutex);
mutex_init(&priv->bfregs.reg_head.lock);
@@ -1282,9 +1305,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct devlink *devlink;
int err;
- devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
+ devlink = mlx5_devlink_alloc();
if (!devlink) {
- dev_err(&pdev->dev, "kzalloc failed\n");
+ dev_err(&pdev->dev, "devlink alloc failed\n");
return -ENOMEM;
}
@@ -1292,6 +1315,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev->device = &pdev->dev;
dev->pdev = pdev;
+ dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
+ MLX5_COREDEV_VF : MLX5_COREDEV_PF;
+
err = mlx5_mdev_init(dev, prof_sel);
if (err)
goto mdev_init_err;
@@ -1312,10 +1338,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
request_module_nowait(MLX5_IB_MOD);
- err = devlink_register(devlink, &pdev->dev);
+ err = mlx5_devlink_register(devlink, &pdev->dev);
if (err)
goto clean_load;
+ err = mlx5_crdump_enable(dev);
+ if (err)
+ dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
+
pci_save_state(pdev);
return 0;
@@ -1327,7 +1357,7 @@ err_load_one:
pci_init_err:
mlx5_mdev_uninit(dev);
mdev_init_err:
- devlink_free(devlink);
+ mlx5_devlink_free(devlink);
return err;
}
@@ -1337,7 +1367,8 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
- devlink_unregister(devlink);
+ mlx5_crdump_disable(dev);
+ mlx5_devlink_unregister(devlink);
mlx5_unregister_device(dev);
if (mlx5_unload_one(dev, true)) {
@@ -1348,7 +1379,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
- devlink_free(devlink);
+ mlx5_devlink_free(devlink);
}
static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
@@ -1359,12 +1390,10 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_core_info(dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev, false);
+ mlx5_error_sw_reset(dev);
mlx5_unload_one(dev, false);
- /* In case of kernel call drain the health wq */
- if (state) {
- mlx5_drain_health_wq(dev);
- mlx5_pci_disable_device(dev);
- }
+ mlx5_drain_health_wq(dev);
+ mlx5_pci_disable_device(dev);
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
@@ -1532,7 +1561,8 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
- mlx5_pci_err_detected(dev->pdev, 0);
+ mlx5_error_sw_reset(dev);
+ mlx5_unload_one(dev, false);
}
void mlx5_recover_device(struct mlx5_core_dev *dev)
@@ -1570,7 +1600,7 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params();
- mlx5_fpga_ipsec_build_fs_cmds();
+ mlx5_accel_ipsec_build_fs_cmds();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 22e69d4813e4..471bbc48bc1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -111,6 +111,11 @@ enum {
MLX5_DRIVER_SYND = 0xbadd00de,
};
+enum mlx5_semaphore_space_address {
+ MLX5_SEMAPHORE_SPACE_DOMAIN = 0xA,
+ MLX5_SEMAPHORE_SW_RESET = 0x20,
+};
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
@@ -118,6 +123,7 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
+void mlx5_error_sw_reset(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
@@ -153,6 +159,19 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
+int mlx5_irq_table_init(struct mlx5_core_dev *dev);
+void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
+int mlx5_irq_table_create(struct mlx5_core_dev *dev);
+void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
+int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
+ struct notifier_block *nb);
+int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
+ struct notifier_block *nb);
+struct cpumask *
+mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
+struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
+int mlx5_irq_get_num_comp(struct mlx5_irq_table *table);
+
int mlx5_events_init(struct mlx5_core_dev *dev);
void mlx5_events_cleanup(struct mlx5_core_dev *dev);
void mlx5_events_start(struct mlx5_core_dev *dev);
@@ -184,7 +203,10 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
-int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
+int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+int mlx5_fw_version_query(struct mlx5_core_dev *dev,
+ u32 *running_ver, u32 *stored_ver);
void mlx5e_init(void);
void mlx5e_cleanup(void);
@@ -213,7 +235,7 @@ enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
MLX5_NIC_IFC_NO_DRAM_NIC = 2,
- MLX5_NIC_IFC_INVALID = 3
+ MLX5_NIC_IFC_SW_RESET = 7
};
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ea744d8466ea..9231b39d18b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -38,15 +38,12 @@
void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
{
- struct mlx5_mkey_table *table = &dev->priv.mkey_table;
-
- memset(table, 0, sizeof(*table));
- rwlock_init(&table->lock);
- INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+ xa_init_flags(&dev->priv.mkey_table, XA_FLAGS_LOCK_IRQ);
}
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
{
+ WARN_ON(!xa_empty(&dev->priv.mkey_table));
}
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
@@ -56,8 +53,8 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
mlx5_async_cbk_t callback,
struct mlx5_async_work *context)
{
- struct mlx5_mkey_table *table = &dev->priv.mkey_table;
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
+ struct xarray *mkeys = &dev->priv.mkey_table;
u32 mkey_index;
void *mkc;
int err;
@@ -88,12 +85,10 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
mkey_index, key, mkey->key);
- /* connect to mkey tree */
- write_lock_irq(&table->lock);
- err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key), mkey);
- write_unlock_irq(&table->lock);
+ err = xa_err(xa_store_irq(mkeys, mlx5_base_mkey(mkey->key), mkey,
+ GFP_KERNEL));
if (err) {
- mlx5_core_warn(dev, "failed radix tree insert of mkey 0x%x, %d\n",
+ mlx5_core_warn(dev, "failed xarray insert of mkey 0x%x, %d\n",
mlx5_base_mkey(mkey->key), err);
mlx5_core_destroy_mkey(dev, mkey);
}
@@ -114,17 +109,17 @@ EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey)
{
- struct mlx5_mkey_table *table = &dev->priv.mkey_table;
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
+ struct xarray *mkeys = &dev->priv.mkey_table;
struct mlx5_core_mkey *deleted_mkey;
unsigned long flags;
- write_lock_irqsave(&table->lock, flags);
- deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
- write_unlock_irqrestore(&table->lock, flags);
+ xa_lock_irqsave(mkeys, flags);
+ deleted_mkey = __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
+ xa_unlock_irqrestore(mkeys, flags);
if (!deleted_mkey) {
- mlx5_core_dbg(dev, "failed radix tree delete of mkey 0x%x\n",
+ mlx5_core_dbg(dev, "failed xarray delete of mkey 0x%x\n",
mlx5_base_mkey(mkey->key));
return -ENOENT;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
new file mode 100644
index 000000000000..373981a659c7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
+
+#define MLX5_MAX_IRQ_NAME (32)
+
+struct mlx5_irq {
+ struct atomic_notifier_head nh;
+ cpumask_var_t mask;
+ char name[MLX5_MAX_IRQ_NAME];
+};
+
+struct mlx5_irq_table {
+ struct mlx5_irq *irq;
+ int nvec;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap;
+#endif
+};
+
+int mlx5_irq_table_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_irq_table *irq_table;
+
+ irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
+ if (!irq_table)
+ return -ENOMEM;
+
+ dev->priv.irq_table = irq_table;
+ return 0;
+}
+
+void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
+{
+ kvfree(dev->priv.irq_table);
+}
+
+int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
+{
+ return table->nvec - MLX5_IRQ_VEC_COMP_BASE;
+}
+
+static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
+{
+ struct mlx5_irq_table *irq_table = dev->priv.irq_table;
+
+ return &irq_table->irq[vecidx];
+}
+
+int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
+ struct notifier_block *nb)
+{
+ struct mlx5_irq *irq;
+
+ irq = &irq_table->irq[vecidx];
+ return atomic_notifier_chain_register(&irq->nh, nb);
+}
+
+int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
+ struct notifier_block *nb)
+{
+ struct mlx5_irq *irq;
+
+ irq = &irq_table->irq[vecidx];
+ return atomic_notifier_chain_unregister(&irq->nh, nb);
+}
+
+static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
+{
+ atomic_notifier_call_chain(nh, 0, NULL);
+ return IRQ_HANDLED;
+}
+
+static void irq_set_name(char *name, int vecidx)
+{
+ if (vecidx == 0) {
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async");
+ return;
+ }
+
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
+ vecidx - MLX5_IRQ_VEC_COMP_BASE);
+ return;
+}
+
+static int request_irqs(struct mlx5_core_dev *dev, int nvec)
+{
+ char name[MLX5_MAX_IRQ_NAME];
+ int err;
+ int i;
+
+ for (i = 0; i < nvec; i++) {
+ struct mlx5_irq *irq = mlx5_irq_get(dev, i);
+ int irqn = pci_irq_vector(dev->pdev, i);
+
+ irq_set_name(name, i);
+ ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
+ snprintf(irq->name, MLX5_MAX_IRQ_NAME,
+ "%s@pci:%s", name, pci_name(dev->pdev));
+ err = request_irq(irqn, mlx5_irq_int_handler, 0, irq->name,
+ &irq->nh);
+ if (err) {
+ mlx5_core_err(dev, "Failed to request irq\n");
+ goto err_request_irq;
+ }
+ }
+ return 0;
+
+err_request_irq:
+ for (; i >= 0; i--) {
+ struct mlx5_irq *irq = mlx5_irq_get(dev, i);
+ int irqn = pci_irq_vector(dev->pdev, i);
+
+ free_irq(irqn, &irq->nh);
+ }
+ return err;
+}
+
+static void irq_clear_rmap(struct mlx5_core_dev *dev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct mlx5_irq_table *irq_table = dev->priv.irq_table;
+
+ free_irq_cpu_rmap(irq_table->rmap);
+#endif
+}
+
+static int irq_set_rmap(struct mlx5_core_dev *mdev)
+{
+ int err = 0;
+#ifdef CONFIG_RFS_ACCEL
+ struct mlx5_irq_table *irq_table = mdev->priv.irq_table;
+ int num_affinity_vec;
+ int vecidx;
+
+ num_affinity_vec = mlx5_irq_get_num_comp(irq_table);
+ irq_table->rmap = alloc_irq_cpu_rmap(num_affinity_vec);
+ if (!irq_table->rmap) {
+ err = -ENOMEM;
+ mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
+ goto err_out;
+ }
+
+ vecidx = MLX5_IRQ_VEC_COMP_BASE;
+ for (; vecidx < irq_table->nvec; vecidx++) {
+ err = irq_cpu_rmap_add(irq_table->rmap,
+ pci_irq_vector(mdev->pdev, vecidx));
+ if (err) {
+ mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
+ err);
+ goto err_irq_cpu_rmap_add;
+ }
+ }
+ return 0;
+
+err_irq_cpu_rmap_add:
+ irq_clear_rmap(mdev);
+err_out:
+#endif
+ return err;
+}
+
+/* Completion IRQ vectors */
+
+static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
+ struct mlx5_irq *irq;
+ int irqn;
+
+ irq = mlx5_irq_get(mdev, vecidx);
+ irqn = pci_irq_vector(mdev->pdev, vecidx);
+ if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
+ mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, mdev->priv.numa_node),
+ irq->mask);
+ if (IS_ENABLED(CONFIG_SMP) &&
+ irq_set_affinity_hint(irqn, irq->mask))
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x",
+ irqn);
+
+ return 0;
+}
+
+static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
+ struct mlx5_irq *irq;
+ int irqn;
+
+ irq = mlx5_irq_get(mdev, vecidx);
+ irqn = pci_irq_vector(mdev->pdev, vecidx);
+ irq_set_affinity_hint(irqn, NULL);
+ free_cpumask_var(irq->mask);
+}
+
+static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
+ int err;
+ int i;
+
+ for (i = 0; i < nvec; i++) {
+ err = set_comp_irq_affinity_hint(mdev, i);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ clear_comp_irq_affinity_hint(mdev, i);
+
+ return err;
+}
+
+static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
+ int i;
+
+ for (i = 0; i < nvec; i++)
+ clear_comp_irq_affinity_hint(mdev, i);
+}
+
+struct cpumask *
+mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx)
+{
+ return irq_table->irq[vecidx].mask;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *irq_table)
+{
+ return irq_table->rmap;
+}
+#endif
+
+static void unrequest_irqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_irq_table *table = dev->priv.irq_table;
+ int i;
+
+ for (i = 0; i < table->nvec; i++)
+ free_irq(pci_irq_vector(dev->pdev, i),
+ &mlx5_irq_get(dev, i)->nh);
+}
+
+int mlx5_irq_table_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_irq_table *table = priv->irq_table;
+ int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+ MLX5_CAP_GEN(dev, max_num_eqs) :
+ 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int nvec;
+ int err;
+
+ nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+ MLX5_IRQ_VEC_COMP_BASE;
+ nvec = min_t(int, nvec, num_eqs);
+ if (nvec <= MLX5_IRQ_VEC_COMP_BASE)
+ return -ENOMEM;
+
+ table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL);
+ if (!table->irq)
+ return -ENOMEM;
+
+ nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
+ nvec, PCI_IRQ_MSIX);
+ if (nvec < 0) {
+ err = nvec;
+ goto err_free_irq;
+ }
+
+ table->nvec = nvec;
+
+ err = irq_set_rmap(dev);
+ if (err)
+ goto err_set_rmap;
+
+ err = request_irqs(dev, nvec);
+ if (err)
+ goto err_request_irqs;
+
+ err = set_comp_irq_affinity_hints(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
+ goto err_set_affinity;
+ }
+
+ return 0;
+
+err_set_affinity:
+ unrequest_irqs(dev);
+err_request_irqs:
+ irq_clear_rmap(dev);
+err_set_rmap:
+ pci_free_irq_vectors(dev->pdev);
+err_free_irq:
+ kfree(table->irq);
+ return err;
+}
+
+void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
+{
+ struct mlx5_irq_table *table = dev->priv.irq_table;
+ int i;
+
+ /* free_irq requires that affinity and rmap will be cleared
+ * before calling it. This is why there is asymmetry with set_rmap
+ * which should be called after alloc_irq but before request_irq.
+ */
+ irq_clear_rmap(dev);
+ clear_comp_irqs_affinity_hints(dev);
+ for (i = 0; i < table->nvec; i++)
+ free_irq(pci_irq_vector(dev->pdev, i),
+ &mlx5_irq_get(dev, i)->nh);
+ pci_free_irq_vectors(dev->pdev);
+ kfree(table->irq);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 86f77456f873..17ce9dd56b13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -106,10 +106,10 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
return 0;
-destroy_flow_table:
- mlx5_destroy_flow_table(ft);
destroy_flow_group:
mlx5_destroy_flow_group(fg);
+destroy_flow_table:
+ mlx5_destroy_flow_table(ft);
free:
kvfree(spec);
kvfree(flow_group_in);
@@ -126,7 +126,7 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
{
u8 hw_id[ETH_ALEN];
- mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
+ mlx5_query_mac_address(dev, hw_id);
gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
addrconf_addr_eui48(&gid->raw[8], hw_id);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index a249b3c3843d..61fcfd8b39b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -74,17 +74,11 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
int err;
int vf;
- if (sriov->enabled_vfs) {
- mlx5_core_warn(dev,
- "failed to enable SRIOV on device, already enabled with %d vfs\n",
- sriov->enabled_vfs);
- return -EBUSY;
- }
-
if (!MLX5_ESWITCH_MANAGER(dev))
goto enable_vfs_hca;
- err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
+ mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs);
+ err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY);
if (err) {
mlx5_core_warn(dev,
"failed to enable eswitch SRIOV (%d)\n", err);
@@ -99,7 +93,6 @@ enable_vfs_hca:
continue;
}
sriov->vfs_ctx[vf].enabled = 1;
- sriov->enabled_vfs++;
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
err = sriov_restore_guids(dev, vf);
if (err) {
@@ -118,13 +111,11 @@ enable_vfs_hca:
static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+ int num_vfs = pci_num_vf(dev->pdev);
int err;
int vf;
- if (!sriov->enabled_vfs)
- goto out;
-
- for (vf = 0; vf < sriov->num_vfs; vf++) {
+ for (vf = num_vfs - 1; vf >= 0; vf--) {
if (!sriov->vfs_ctx[vf].enabled)
continue;
err = mlx5_core_disable_hca(dev, vf + 1);
@@ -133,12 +124,10 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
continue;
}
sriov->vfs_ctx[vf].enabled = 0;
- sriov->enabled_vfs--;
}
-out:
if (MLX5_ESWITCH_MANAGER(dev))
- mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+ mlx5_eswitch_disable(dev->priv.eswitch);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
@@ -191,13 +180,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
int mlx5_sriov_attach(struct mlx5_core_dev *dev)
{
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-
- if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
+ if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev))
return 0;
/* If sriov VFs exist in PCI level, enable them in device level */
- return mlx5_device_enable_sriov(dev, sriov->num_vfs);
+ return mlx5_device_enable_sriov(dev, pci_num_vf(dev->pdev));
}
void mlx5_sriov_detach(struct mlx5_core_dev *dev)
@@ -208,6 +195,30 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
mlx5_device_disable_sriov(dev);
}
+static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
+{
+ u16 host_total_vfs;
+ const u32 *out;
+
+ if (mlx5_core_is_ecpf_esw_manager(dev)) {
+ out = mlx5_esw_query_functions(dev);
+
+ /* Old FW doesn't support getting total_vfs from esw func
+ * but supports getting it from pci_sriov.
+ */
+ if (IS_ERR(out))
+ goto done;
+ host_total_vfs = MLX5_GET(query_esw_functions_out, out,
+ host_params_context.host_total_vfs);
+ kvfree(out);
+ if (host_total_vfs)
+ return host_total_vfs;
+ }
+
+done:
+ return pci_sriov_get_totalvfs(dev->pdev);
+}
+
int mlx5_sriov_init(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
@@ -218,6 +229,7 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
return 0;
total_vfs = pci_sriov_get_totalvfs(pdev);
+ sriov->max_vfs = mlx5_get_max_vfs(dev);
sriov->num_vfs = pci_num_vf(pdev);
sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
if (!sriov->vfs_ctx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 95cdc8cbcba4..c912d82ca64b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -34,6 +34,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
+#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
/* Mutex to hold while enabling or disabling RoCE */
@@ -155,11 +156,12 @@ int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
}
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
- u16 vport, u8 *addr)
+ u16 vport, bool other, u8 *addr)
{
- u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
u8 *out_addr;
+ u32 *out;
int err;
out = kvzalloc(outlen, GFP_KERNEL);
@@ -169,7 +171,12 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
nic_vport_context.permanent_address);
- err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+ MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
if (!err)
ether_addr_copy(addr, &out_addr[2]);
@@ -178,6 +185,12 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
+int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
+{
+ return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
+
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr)
{
@@ -194,9 +207,7 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
MLX5_SET(modify_nic_vport_context_in, in,
field_select.permanent_address, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
-
- if (vport)
- MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@@ -291,9 +302,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
-
- if (vport)
- MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
@@ -483,7 +492,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
MLX5_SET(modify_nic_vport_context_in, in,
field_select.node_guid, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
- MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@@ -1157,3 +1166,17 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
return tmp;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
+
+/**
+ * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
+ *
+ * @dev: Pointer to core device
+ *
+ * mlx5_eswitch_get_total_vports returns total number of vports for
+ * the eswitch.
+ */
+u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
+{
+ return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev);
+}
+EXPORT_SYMBOL(mlx5_eswitch_get_total_vports);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 1f87cce421e0..f1ec58c9e9e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -134,11 +134,6 @@ static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq)
*wq->db = cpu_to_be32(wq->wqe_ctr);
}
-static inline u16 mlx5_wq_cyc_get_ctr_wrap_cnt(struct mlx5_wq_cyc *wq, u16 ctr)
-{
- return ctr >> wq->fbc.log_sz;
-}
-
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
{
return ctr & wq->fbc.sz_m1;
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
index 14c0c62f8e73..c50e74ab02c4 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -5,6 +5,7 @@
#define _MLXFW_H
#include <linux/firmware.h>
+#include <linux/netlink.h>
enum mlxfw_fsm_state {
MLXFW_FSM_STATE_IDLE,
@@ -57,6 +58,10 @@ struct mlxfw_dev_ops {
void (*fsm_cancel)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
void (*fsm_release)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+
+ void (*status_notify)(struct mlxfw_dev *mlxfw_dev,
+ const char *msg, const char *comp_name,
+ u32 done_bytes, u32 total_bytes);
};
struct mlxfw_dev {
@@ -67,11 +72,13 @@ struct mlxfw_dev {
#if IS_REACHABLE(CONFIG_MLXFW)
int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
- const struct firmware *firmware);
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
#else
static inline
int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
- const struct firmware *firmware)
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 240c027e5f07..67990406cba2 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -39,8 +39,19 @@ static const char * const mlxfw_fsm_state_err_str[] = {
"unknown error"
};
+static void mlxfw_status_notify(struct mlxfw_dev *mlxfw_dev,
+ const char *msg, const char *comp_name,
+ u32 done_bytes, u32 total_bytes)
+{
+ if (!mlxfw_dev->ops->status_notify)
+ return;
+ mlxfw_dev->ops->status_notify(mlxfw_dev, msg, comp_name,
+ done_bytes, total_bytes);
+}
+
static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
- enum mlxfw_fsm_state fsm_state)
+ enum mlxfw_fsm_state fsm_state,
+ struct netlink_ext_ack *extack)
{
enum mlxfw_fsm_state_err fsm_state_err;
enum mlxfw_fsm_state curr_fsm_state;
@@ -57,11 +68,13 @@ retry:
if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
pr_err("Firmware flash failed: %s\n",
mlxfw_fsm_state_err_str[fsm_state_err]);
+ NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed");
return -EINVAL;
}
if (curr_fsm_state != fsm_state) {
if (--times == 0) {
pr_err("Timeout reached on FSM state change");
+ NL_SET_ERR_MSG_MOD(extack, "Timeout reached on FSM state change");
return -ETIMEDOUT;
}
msleep(MLXFW_FSM_STATE_WAIT_CYCLE_MS);
@@ -76,16 +89,20 @@ retry:
static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
u32 fwhandle,
- struct mlxfw_mfa2_component *comp)
+ struct mlxfw_mfa2_component *comp,
+ struct netlink_ext_ack *extack)
{
u16 comp_max_write_size;
u8 comp_align_bits;
u32 comp_max_size;
+ char comp_name[8];
u16 block_size;
u8 *block_ptr;
u32 offset;
int err;
+ sprintf(comp_name, "%u", comp->index);
+
err = mlxfw_dev->ops->component_query(mlxfw_dev, comp->index,
&comp_max_size, &comp_align_bits,
&comp_max_write_size);
@@ -96,6 +113,7 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
if (comp->data_size > comp_max_size) {
pr_err("Component %d is of size %d which is bigger than limit %d\n",
comp->index, comp->data_size, comp_max_size);
+ NL_SET_ERR_MSG_MOD(extack, "Component is bigger than limit");
return -EINVAL;
}
@@ -103,6 +121,7 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
comp_align_bits);
pr_debug("Component update\n");
+ mlxfw_status_notify(mlxfw_dev, "Updating component", comp_name, 0, 0);
err = mlxfw_dev->ops->fsm_component_update(mlxfw_dev, fwhandle,
comp->index,
comp->data_size);
@@ -110,11 +129,13 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
return err;
err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
- MLXFW_FSM_STATE_DOWNLOAD);
+ MLXFW_FSM_STATE_DOWNLOAD, extack);
if (err)
goto err_out;
pr_debug("Component download\n");
+ mlxfw_status_notify(mlxfw_dev, "Downloading component",
+ comp_name, 0, comp->data_size);
for (offset = 0;
offset < MLXFW_ALIGN_UP(comp->data_size, comp_align_bits);
offset += comp_max_write_size) {
@@ -126,15 +147,20 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
offset);
if (err)
goto err_out;
+ mlxfw_status_notify(mlxfw_dev, "Downloading component",
+ comp_name, offset + block_size,
+ comp->data_size);
}
pr_debug("Component verify\n");
+ mlxfw_status_notify(mlxfw_dev, "Verifying component", comp_name, 0, 0);
err = mlxfw_dev->ops->fsm_component_verify(mlxfw_dev, fwhandle,
comp->index);
if (err)
goto err_out;
- err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle, MLXFW_FSM_STATE_LOCKED);
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_LOCKED, extack);
if (err)
goto err_out;
return 0;
@@ -145,7 +171,8 @@ err_out:
}
static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
- struct mlxfw_mfa2_file *mfa2_file)
+ struct mlxfw_mfa2_file *mfa2_file,
+ struct netlink_ext_ack *extack)
{
u32 component_count;
int err;
@@ -156,6 +183,7 @@ static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
&component_count);
if (err) {
pr_err("Could not find device PSID in MFA2 file\n");
+ NL_SET_ERR_MSG_MOD(extack, "Could not find device PSID in MFA2 file");
return err;
}
@@ -168,7 +196,7 @@ static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
return PTR_ERR(comp);
pr_info("Flashing component type %d\n", comp->index);
- err = mlxfw_flash_component(mlxfw_dev, fwhandle, comp);
+ err = mlxfw_flash_component(mlxfw_dev, fwhandle, comp, extack);
mlxfw_mfa2_file_component_put(comp);
if (err)
return err;
@@ -177,7 +205,8 @@ static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
}
int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
- const struct firmware *firmware)
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
struct mlxfw_mfa2_file *mfa2_file;
u32 fwhandle;
@@ -185,6 +214,7 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
if (!mlxfw_mfa2_check(firmware)) {
pr_err("Firmware file is not MFA2\n");
+ NL_SET_ERR_MSG_MOD(extack, "Firmware file is not MFA2");
return -EINVAL;
}
@@ -193,29 +223,35 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
return PTR_ERR(mfa2_file);
pr_info("Initialize firmware flash process\n");
+ mlxfw_status_notify(mlxfw_dev, "Initializing firmware flash process",
+ NULL, 0, 0);
err = mlxfw_dev->ops->fsm_lock(mlxfw_dev, &fwhandle);
if (err) {
pr_err("Could not lock the firmware FSM\n");
+ NL_SET_ERR_MSG_MOD(extack, "Could not lock the firmware FSM");
goto err_fsm_lock;
}
err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
- MLXFW_FSM_STATE_LOCKED);
+ MLXFW_FSM_STATE_LOCKED, extack);
if (err)
goto err_state_wait_idle_to_locked;
- err = mlxfw_flash_components(mlxfw_dev, fwhandle, mfa2_file);
+ err = mlxfw_flash_components(mlxfw_dev, fwhandle, mfa2_file, extack);
if (err)
goto err_flash_components;
pr_debug("Activate image\n");
+ mlxfw_status_notify(mlxfw_dev, "Activating image", NULL, 0, 0);
err = mlxfw_dev->ops->fsm_activate(mlxfw_dev, fwhandle);
if (err) {
pr_err("Could not activate the downloaded image\n");
+ NL_SET_ERR_MSG_MOD(extack, "Could not activate the downloaded image");
goto err_fsm_activate;
}
- err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle, MLXFW_FSM_STATE_LOCKED);
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_LOCKED, extack);
if (err)
goto err_state_wait_activate_to_locked;
@@ -223,6 +259,7 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
pr_info("Firmware flash done.\n");
+ mlxfw_status_notify(mlxfw_dev, "Firmware flash done", NULL, 0, 0);
mlxfw_mfa2_file_fini(mfa2_file);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 11ded0bc7d98..06c80343d9ed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -83,6 +83,8 @@ config MLXSW_SPECTRUM
select PARMAN
select OBJAGG
select MLXFW
+ imply PTP_1588_CLOCK
+ select NET_PTP_CLASSIFY if PTP_1588_CLOCK
default m
---help---
This driver supports Mellanox Technologies Spectrum Ethernet
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index c4dc72e1ce63..171b36bd8a4e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -31,5 +31,6 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_nve.o spectrum_nve_vxlan.o \
spectrum_dpipe.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
+mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 0772e4339b33..5ffdfb532cb7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -317,6 +317,18 @@ MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
*/
MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
+/* cmd_mbox_query_fw_free_running_clock_offset
+ * The offset of the free running clock page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, free_running_clock_offset, 0x50, 0, 64);
+
+/* cmd_mbox_query_fw_fr_rn_clk_bar
+ * PCI base address register (BAR) of the free running clock page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fr_rn_clk_bar, 0x58, 30, 2);
+
/* QUERY_BOARDINFO - Query Board Information
* -----------------------------------------
* OpMod == 0 (N/A), INMmod == 0 (N/A)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 6ee6de7f0160..17ceac7505e5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1003,6 +1003,20 @@ static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
return err;
}
+static int mlxsw_devlink_flash_update(struct devlink *devlink,
+ const char *file_name,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->flash_update)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->flash_update(mlxsw_core, file_name,
+ component, extack);
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
.reload = mlxsw_devlink_core_bus_device_reload,
.port_type_set = mlxsw_devlink_port_type_set,
@@ -1019,6 +1033,7 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
.info_get = mlxsw_devlink_info_get,
+ .flash_update = mlxsw_devlink_flash_update,
};
static int
@@ -1098,6 +1113,12 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_register_params;
}
+ if (mlxsw_driver->init) {
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
+ if (err)
+ goto err_driver_init;
+ }
+
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
goto err_hwmon_init;
@@ -1107,22 +1128,17 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_thermal_init;
- if (mlxsw_driver->init) {
- err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
- if (err)
- goto err_driver_init;
- }
-
if (mlxsw_driver->params_register && !reload)
devlink_params_publish(devlink);
return 0;
-err_driver_init:
- mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
+ if (mlxsw_core->driver->fini)
+ mlxsw_core->driver->fini(mlxsw_core);
+err_driver_init:
if (mlxsw_driver->params_unregister && !reload)
mlxsw_driver->params_unregister(mlxsw_core);
err_register_params:
@@ -1187,10 +1203,10 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (mlxsw_core->driver->params_unregister && !reload)
devlink_params_unpublish(devlink);
- if (mlxsw_core->driver->fini)
- mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
+ if (mlxsw_core->driver->fini)
+ mlxsw_core->driver->fini(mlxsw_core);
if (mlxsw_core->driver->params_unregister && !reload)
mlxsw_core->driver->params_unregister(mlxsw_core);
if (!reload)
@@ -1229,6 +1245,15 @@ int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
}
EXPORT_SYMBOL(mlxsw_core_skb_transmit);
+void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb, u8 local_port)
+{
+ if (mlxsw_core->driver->ptp_transmitted)
+ mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
+ local_port);
+}
+EXPORT_SYMBOL(mlxsw_core_ptp_transmitted);
+
static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
const struct mlxsw_rx_listener *rxl_b)
{
@@ -2010,6 +2035,18 @@ int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
}
EXPORT_SYMBOL(mlxsw_core_resources_query);
+u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_frc_h);
+
+u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_frc_l);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index e3832cb5bdda..8efcff4b59cb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -48,6 +48,8 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info);
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
+void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb, u8 local_port);
struct mlxsw_rx_listener {
void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
@@ -284,6 +286,9 @@ struct mlxsw_driver {
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
+ int (*flash_update)(struct mlxsw_core *mlxsw_core,
+ const char *file_name, const char *component,
+ struct netlink_ext_ack *extack);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
@@ -293,6 +298,13 @@ struct mlxsw_driver {
u64 *p_linear_size);
int (*params_register)(struct mlxsw_core *mlxsw_core);
void (*params_unregister)(struct mlxsw_core *mlxsw_core);
+
+ /* Notify a driver that a timestamped packet was transmitted. Driver
+ * is responsible for freeing the passed-in SKB.
+ */
+ void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb, u8 local_port);
+
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool res_query_enabled;
@@ -306,6 +318,9 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
+
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
@@ -336,6 +351,8 @@ struct mlxsw_bus {
char *in_mbox, size_t in_mbox_size,
char *out_mbox, size_t out_mbox_size,
u8 *p_status);
+ u32 (*read_frc_h)(void *bus_priv);
+ u32 (*read_frc_l)(void *bus_priv);
u8 features;
};
@@ -353,7 +370,8 @@ struct mlxsw_bus_info {
struct mlxsw_fw_rev fw_rev;
u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
- u8 low_frequency;
+ u8 low_frequency:1,
+ read_frc_capable:1;
};
struct mlxsw_hwmon;
@@ -409,4 +427,14 @@ enum mlxsw_devlink_param_id {
MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
};
+struct mlxsw_skb_cb {
+ struct mlxsw_tx_info tx_info;
+};
+
+static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(mlxsw_skb_cb) > sizeof(skb->cb));
+ return (struct mlxsw_skb_cb *) skb->cb;
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index cb3e663b1d37..feb4672a5ac0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -30,8 +30,9 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
elinst = &block->instances[j];
if (elinst->type != elinst->info->type ||
- elinst->item.size.bits !=
- elinst->info->item.size.bits)
+ (!elinst->avoid_size_check &&
+ elinst->item.size.bits !=
+ elinst->info->item.size.bits))
return false;
}
}
@@ -385,12 +386,12 @@ EXPORT_SYMBOL(mlxsw_afk_values_add_buf);
static void mlxsw_sp_afk_encode_u32(const struct mlxsw_item *storage_item,
const struct mlxsw_item *output_item,
- char *storage, char *output)
+ char *storage, char *output, int diff)
{
u32 value;
value = __mlxsw_item_get32(storage, storage_item, 0);
- __mlxsw_item_set32(output, output_item, 0, value);
+ __mlxsw_item_set32(output, output_item, 0, value + diff);
}
static void mlxsw_sp_afk_encode_buf(const struct mlxsw_item *storage_item,
@@ -406,14 +407,14 @@ static void mlxsw_sp_afk_encode_buf(const struct mlxsw_item *storage_item,
static void
mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
- char *output, char *storage)
+ char *output, char *storage, int u32_diff)
{
const struct mlxsw_item *storage_item = &elinst->info->item;
const struct mlxsw_item *output_item = &elinst->item;
if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
mlxsw_sp_afk_encode_u32(storage_item, output_item,
- storage, output);
+ storage, output, u32_diff);
else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
mlxsw_sp_afk_encode_buf(storage_item, output_item,
storage, output);
@@ -446,9 +447,10 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
continue;
mlxsw_sp_afk_encode_one(elinst, block_key,
- values->storage.key);
+ values->storage.key,
+ elinst->u32_key_diff);
mlxsw_sp_afk_encode_one(elinst, block_mask,
- values->storage.mask);
+ values->storage.mask, 0);
}
mlxsw_afk->ops->encode_block(key, i, block_key);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 4a625cdf3e7c..cb229b55ecc4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -74,7 +74,7 @@ struct mlxsw_afk_element_info {
* define an internal storage geometry.
*/
static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2),
MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2),
@@ -107,9 +107,14 @@ struct mlxsw_afk_element_inst { /* element instance in actual block */
const struct mlxsw_afk_element_info *info;
enum mlxsw_afk_element_type type;
struct mlxsw_item item; /* element geometry in block */
+ int u32_key_diff; /* in case value needs to be adjusted before write
+ * this diff is here to handle that
+ */
+ bool avoid_size_check;
};
-#define MLXSW_AFK_ELEMENT_INST(_type, _element, _offset, _shift, _size) \
+#define MLXSW_AFK_ELEMENT_INST(_type, _element, _offset, \
+ _shift, _size, _u32_key_diff, _avoid_size_check) \
{ \
.info = &mlxsw_afk_element_infos[MLXSW_AFK_ELEMENT_##_element], \
.type = _type, \
@@ -119,15 +124,24 @@ struct mlxsw_afk_element_inst { /* element instance in actual block */
.size = {.bits = _size}, \
.name = #_element, \
}, \
+ .u32_key_diff = _u32_key_diff, \
+ .avoid_size_check = _avoid_size_check, \
}
#define MLXSW_AFK_ELEMENT_INST_U32(_element, _offset, _shift, _size) \
MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_U32, \
- _element, _offset, _shift, _size)
+ _element, _offset, _shift, _size, 0, false)
+
+#define MLXSW_AFK_ELEMENT_INST_EXT_U32(_element, _offset, \
+ _shift, _size, _key_diff, \
+ _avoid_size_check) \
+ MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_U32, \
+ _element, _offset, _shift, _size, \
+ _key_diff, _avoid_size_check)
#define MLXSW_AFK_ELEMENT_INST_BUF(_element, _offset, _size) \
MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_BUF, \
- _element, _offset, 0, _size)
+ _element, _offset, 0, _size, 0, false)
struct mlxsw_afk_block {
u16 encoding; /* block ID */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 72539a9a3847..d2c7ce67c300 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -92,33 +92,20 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
u16 temp;
} temp_thresh;
char mcia_pl[MLXSW_REG_MCIA_LEN] = {0};
- char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
- u16 module_temp;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ unsigned int module_temp;
bool qsfp;
int err;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
- err = mlxsw_reg_query(core, MLXSW_REG(mtbr), mtbr_pl);
+ mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
+ false, false);
+ err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
return err;
-
- /* Don't read temperature thresholds for module with no valid info. */
- mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &module_temp, NULL);
- switch (module_temp) {
- case MLXSW_REG_MTBR_BAD_SENS_INFO: /* fall-through */
- case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
- case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
- case MLXSW_REG_MTBR_INDEX_NA:
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &module_temp, NULL, NULL);
+ if (!module_temp) {
*temp = 0;
return 0;
- default:
- /* Do not consider thresholds for zero temperature. */
- if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) {
- *temp = 0;
- return 0;
- }
- break;
}
/* Read Free Side Device Temperature Thresholds from page 03h
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 496dc904c5ed..5b00726c4346 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -23,6 +23,14 @@ struct mlxsw_hwmon_attr {
char name[32];
};
+static int mlxsw_hwmon_get_attr_index(int index, int count)
+{
+ if (index >= count)
+ return index % count + MLXSW_REG_MTMP_GBOX_INDEX_MIN;
+
+ return index;
+}
+
struct mlxsw_hwmon {
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
@@ -33,6 +41,7 @@ struct mlxsw_hwmon {
struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT];
unsigned int attrs_count;
u8 sensor_count;
+ u8 module_sensor_count;
};
static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
@@ -43,18 +52,19 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
- unsigned int temp;
+ int temp, index;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index,
- false, false);
+ index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ mlxsw_hwmon->module_sensor_count);
+ mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
return err;
}
mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
- return sprintf(buf, "%u\n", temp);
+ return sprintf(buf, "%d\n", temp);
}
static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
@@ -65,18 +75,19 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
- unsigned int temp_max;
+ int temp_max, index;
int err;
- mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index,
- false, false);
+ index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ mlxsw_hwmon->module_sensor_count);
+ mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
return err;
}
mlxsw_reg_mtmp_unpack(mtmp_pl, NULL, &temp_max, NULL);
- return sprintf(buf, "%u\n", temp_max);
+ return sprintf(buf, "%d\n", temp_max);
}
static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
@@ -88,6 +99,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
unsigned long val;
+ int index;
int err;
err = kstrtoul(buf, 10, &val);
@@ -96,7 +108,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
if (val != 1)
return -EINVAL;
- mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, true, true);
+ index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ mlxsw_hwmon->module_sensor_count);
+ mlxsw_reg_mtmp_pack(mtmp_pl, index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to reset temp sensor history\n");
@@ -198,40 +212,20 @@ static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev,
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
- char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
- u16 temp;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
u8 module;
+ int temp;
int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
- 1);
- err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
- if (err) {
- dev_err(dev, "Failed to query module temperature sensor\n");
+ mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
+ false, false);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err)
return err;
- }
-
- mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL);
- /* Update status and temperature cache. */
- switch (temp) {
- case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
- case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
- case MLXSW_REG_MTBR_INDEX_NA:
- temp = 0;
- break;
- case MLXSW_REG_MTBR_BAD_SENS_INFO:
- /* Untrusted cable is connected. Reading temperature from its
- * sensor is faulty.
- */
- temp = 0;
- break;
- default:
- temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
- break;
- }
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
- return sprintf(buf, "%u\n", temp);
+ return sprintf(buf, "%d\n", temp);
}
static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
@@ -333,6 +327,20 @@ mlxsw_hwmon_module_temp_label_show(struct device *dev,
mlwsw_hwmon_attr->type_index);
}
+static ssize_t
+mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
+ struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ int index = mlwsw_hwmon_attr->type_index -
+ mlxsw_hwmon->module_sensor_count + 1;
+
+ return sprintf(buf, "gearbox %03u\n", index);
+}
+
enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX,
@@ -345,6 +353,7 @@ enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
};
static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
@@ -428,6 +437,13 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_label", num + 1);
break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_gbox_temp_label_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_label", num + 1);
+ break;
default:
WARN_ON(1);
}
@@ -556,6 +572,54 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
index, index);
index++;
}
+ mlxsw_hwmon->module_sensor_count = index;
+
+ return 0;
+}
+
+static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+ int index, max_index, sensor_index;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ u8 gbox_num;
+ int err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL);
+ if (!gbox_num)
+ return 0;
+
+ index = mlxsw_hwmon->module_sensor_count;
+ max_index = mlxsw_hwmon->module_sensor_count + gbox_num;
+ while (index < max_index) {
+ sensor_index = index % mlxsw_hwmon->module_sensor_count +
+ MLXSW_REG_MTMP_GBOX_INDEX_MIN;
+ mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, true, true);
+ err = mlxsw_reg_write(mlxsw_hwmon->core,
+ MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(mlxsw_hwmon->bus_info->dev, "Failed to setup temp sensor number %d\n",
+ sensor_index);
+ return err;
+ }
+ mlxsw_hwmon_attr_add(mlxsw_hwmon, MLXSW_HWMON_ATTR_TYPE_TEMP,
+ index, index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, index,
+ index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_RST, index,
+ index);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
+ index, index);
+ index++;
+ }
return 0;
}
@@ -586,6 +650,10 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_temp_module_init;
+ err = mlxsw_hwmon_gearbox_init(mlxsw_hwmon);
+ if (err)
+ goto err_temp_gearbox_init;
+
mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
@@ -602,6 +670,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
return 0;
err_hwmon_register:
+err_temp_gearbox_init:
err_temp_module_init:
err_fans_init:
err_temp_init:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index d3e851e7ca72..35a1dc89c28a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -23,6 +23,7 @@
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
#define MLXSW_THERMAL_ZONE_MAX_NAME 16
+#define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0)
#define MLXSW_THERMAL_MAX_STATE 10
#define MLXSW_THERMAL_MAX_DUTY 255
/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values
@@ -98,7 +99,7 @@ struct mlxsw_thermal_module {
struct thermal_zone_device *tzdev;
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
enum thermal_device_mode mode;
- int module;
+ int module; /* Module or gearbox number */
};
struct mlxsw_thermal {
@@ -111,6 +112,10 @@ struct mlxsw_thermal {
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
enum thermal_device_mode mode;
struct mlxsw_thermal_module *tz_module_arr;
+ struct mlxsw_thermal_module *tz_gearbox_arr;
+ u8 tz_gearbox_num;
+ unsigned int tz_highest_score;
+ struct thermal_zone_device *tz_highest_dev;
};
static inline u8 mlxsw_state_to_duty(int state)
@@ -195,6 +200,34 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
return 0;
}
+static void mlxsw_thermal_tz_score_update(struct mlxsw_thermal *thermal,
+ struct thermal_zone_device *tzdev,
+ struct mlxsw_thermal_trip *trips,
+ int temp)
+{
+ struct mlxsw_thermal_trip *trip = trips;
+ unsigned int score, delta, i, shift = 1;
+
+ /* Calculate thermal zone score, if temperature is above the critical
+ * threshold score is set to MLXSW_THERMAL_TEMP_SCORE_MAX.
+ */
+ score = MLXSW_THERMAL_TEMP_SCORE_MAX;
+ for (i = MLXSW_THERMAL_TEMP_TRIP_NORM; i < MLXSW_THERMAL_NUM_TRIPS;
+ i++, trip++) {
+ if (temp < trip->temp) {
+ delta = DIV_ROUND_CLOSEST(temp, trip->temp - temp);
+ score = delta * shift;
+ break;
+ }
+ shift *= 256;
+ }
+
+ if (score > thermal->tz_highest_score) {
+ thermal->tz_highest_score = score;
+ thermal->tz_highest_dev = tzdev;
+ }
+}
+
static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
struct thermal_cooling_device *cdev)
{
@@ -279,7 +312,7 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
struct mlxsw_thermal *thermal = tzdev->devdata;
struct device *dev = thermal->bus_info->dev;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
- unsigned int temp;
+ int temp;
int err;
mlxsw_reg_mtmp_pack(mtmp_pl, 0, false, false);
@@ -290,8 +323,11 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
return err;
}
mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+ if (temp > 0)
+ mlxsw_thermal_tz_score_update(thermal, tzdev, thermal->trips,
+ temp);
- *p_temp = (int) temp;
+ *p_temp = temp;
return 0;
}
@@ -351,6 +387,22 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
return 0;
}
+static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev,
+ int trip, enum thermal_trend *trend)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ if (tzdev == thermal->tz_highest_dev)
+ return 1;
+
+ *trend = THERMAL_TREND_STABLE;
+ return 0;
+}
+
static struct thermal_zone_device_ops mlxsw_thermal_ops = {
.bind = mlxsw_thermal_bind,
.unbind = mlxsw_thermal_unbind,
@@ -362,6 +414,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = {
.set_trip_temp = mlxsw_thermal_set_trip_temp,
.get_trip_hyst = mlxsw_thermal_get_trip_hyst,
.set_trip_hyst = mlxsw_thermal_set_trip_hyst,
+ .get_trend = mlxsw_thermal_trend_get,
};
static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
@@ -449,39 +502,33 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
struct mlxsw_thermal_module *tz = tzdev->devdata;
struct mlxsw_thermal *thermal = tz->parent;
struct device *dev = thermal->bus_info->dev;
- char mtbr_pl[MLXSW_REG_MTBR_LEN];
- u16 temp;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ int temp;
int err;
/* Read module temperature. */
- mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX +
- tz->module, 1);
- err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtbr), mtbr_pl);
- if (err)
- return err;
-
- mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL);
- /* Update temperature. */
- switch (temp) {
- case MLXSW_REG_MTBR_NO_CONN: /* fall-through */
- case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */
- case MLXSW_REG_MTBR_INDEX_NA: /* fall-through */
- case MLXSW_REG_MTBR_BAD_SENS_INFO:
+ mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN +
+ tz->module, false, false);
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ /* Do not return error - in case of broken module's sensor
+ * it will cause error message flooding.
+ */
temp = 0;
- break;
- default:
- temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
- /* Reset all trip point. */
- mlxsw_thermal_module_trips_reset(tz);
- /* Update trip points. */
- err = mlxsw_thermal_module_trips_update(dev, thermal->core,
- tz);
- if (err)
- return err;
- break;
+ *p_temp = (int) temp;
+ return 0;
}
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+ *p_temp = temp;
+
+ if (!temp)
+ return 0;
+
+ /* Update trip points. */
+ err = mlxsw_thermal_module_trips_update(dev, thermal->core, tz);
+ if (!err && temp > 0)
+ mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
- *p_temp = (int) temp;
return 0;
}
@@ -545,10 +592,6 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
return 0;
}
-static struct thermal_zone_params mlxsw_thermal_module_params = {
- .governor_name = "user_space",
-};
-
static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.bind = mlxsw_thermal_module_bind,
.unbind = mlxsw_thermal_module_unbind,
@@ -560,6 +603,46 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
+ .get_trend = mlxsw_thermal_trend_get,
+};
+
+static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
+ int *p_temp)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+ char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ u16 index;
+ int temp;
+ int err;
+
+ index = MLXSW_REG_MTMP_GBOX_INDEX_MIN + tz->module;
+ mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
+
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+ if (temp > 0)
+ mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
+
+ *p_temp = temp;
+ return 0;
+}
+
+static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
+ .bind = mlxsw_thermal_module_bind,
+ .unbind = mlxsw_thermal_module_unbind,
+ .get_mode = mlxsw_thermal_module_mode_get,
+ .set_mode = mlxsw_thermal_module_mode_set,
+ .get_temp = mlxsw_thermal_gearbox_temp_get,
+ .get_trip_type = mlxsw_thermal_module_trip_type_get,
+ .get_trip_temp = mlxsw_thermal_module_trip_temp_get,
+ .set_trip_temp = mlxsw_thermal_module_trip_temp_set,
+ .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
+ .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
+ .get_trend = mlxsw_thermal_trend_get,
};
static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
@@ -675,13 +758,13 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
MLXSW_THERMAL_TRIP_MASK,
module_tz,
&mlxsw_thermal_module_ops,
- &mlxsw_thermal_module_params,
- 0, 0);
+ NULL, 0, 0);
if (IS_ERR(module_tz->tzdev)) {
err = PTR_ERR(module_tz->tzdev);
return err;
}
+ module_tz->mode = THERMAL_DEVICE_ENABLED;
return 0;
}
@@ -787,6 +870,92 @@ mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
kfree(thermal->tz_module_arr);
}
+static int
+mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
+{
+ char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME];
+
+ snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
+ gearbox_tz->module + 1);
+ gearbox_tz->tzdev = thermal_zone_device_register(tz_name,
+ MLXSW_THERMAL_NUM_TRIPS,
+ MLXSW_THERMAL_TRIP_MASK,
+ gearbox_tz,
+ &mlxsw_thermal_gearbox_ops,
+ NULL, 0, 0);
+ if (IS_ERR(gearbox_tz->tzdev))
+ return PTR_ERR(gearbox_tz->tzdev);
+
+ gearbox_tz->mode = THERMAL_DEVICE_ENABLED;
+ return 0;
+}
+
+static void
+mlxsw_thermal_gearbox_tz_fini(struct mlxsw_thermal_module *gearbox_tz)
+{
+ thermal_zone_device_unregister(gearbox_tz->tzdev);
+}
+
+static int
+mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
+ struct mlxsw_thermal *thermal)
+{
+ struct mlxsw_thermal_module *gearbox_tz;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ int i;
+ int err;
+
+ if (!mlxsw_core_res_query_enabled(core))
+ return 0;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl);
+ err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL);
+ if (!thermal->tz_gearbox_num)
+ return 0;
+
+ thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num,
+ sizeof(*thermal->tz_gearbox_arr),
+ GFP_KERNEL);
+ if (!thermal->tz_gearbox_arr)
+ return -ENOMEM;
+
+ for (i = 0; i < thermal->tz_gearbox_num; i++) {
+ gearbox_tz = &thermal->tz_gearbox_arr[i];
+ memcpy(gearbox_tz->trips, default_thermal_trips,
+ sizeof(thermal->trips));
+ gearbox_tz->module = i;
+ gearbox_tz->parent = thermal;
+ err = mlxsw_thermal_gearbox_tz_init(gearbox_tz);
+ if (err)
+ goto err_unreg_tz_gearbox;
+ }
+
+ return 0;
+
+err_unreg_tz_gearbox:
+ for (i--; i >= 0; i--)
+ mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
+ kfree(thermal->tz_gearbox_arr);
+ return err;
+}
+
+static void
+mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal)
+{
+ int i;
+
+ if (!mlxsw_core_res_query_enabled(thermal->core))
+ return;
+
+ for (i = thermal->tz_gearbox_num - 1; i >= 0; i--)
+ mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
+ kfree(thermal->tz_gearbox_arr);
+}
+
int mlxsw_thermal_init(struct mlxsw_core *core,
const struct mlxsw_bus_info *bus_info,
struct mlxsw_thermal **p_thermal)
@@ -877,10 +1046,16 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
if (err)
goto err_unreg_tzdev;
+ err = mlxsw_thermal_gearboxes_init(dev, core, thermal);
+ if (err)
+ goto err_unreg_modules_tzdev;
+
thermal->mode = THERMAL_DEVICE_ENABLED;
*p_thermal = thermal;
return 0;
+err_unreg_modules_tzdev:
+ mlxsw_thermal_modules_fini(thermal);
err_unreg_tzdev:
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
@@ -899,6 +1074,7 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
{
int i;
+ mlxsw_thermal_gearboxes_fini(thermal);
mlxsw_thermal_modules_fini(thermal);
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 06aea1999518..95f408d0e103 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -43,11 +43,10 @@
#define MLXSW_I2C_PREP_SIZE (MLXSW_I2C_ADDR_WIDTH + 28)
#define MLXSW_I2C_MBOX_SIZE 20
#define MLXSW_I2C_MBOX_OUT_PARAM_OFF 12
-#define MLXSW_I2C_MAX_BUFF_SIZE 32
#define MLXSW_I2C_MBOX_OFFSET_BITS 20
#define MLXSW_I2C_MBOX_SIZE_BITS 12
#define MLXSW_I2C_ADDR_BUF_SIZE 4
-#define MLXSW_I2C_BLK_MAX 32
+#define MLXSW_I2C_BLK_DEF 32
#define MLXSW_I2C_RETRY 5
#define MLXSW_I2C_TIMEOUT_MSECS 5000
#define MLXSW_I2C_MAX_DATA_SIZE 256
@@ -62,6 +61,7 @@
* @dev: I2C device;
* @core: switch core pointer;
* @bus_info: bus info block;
+ * @block_size: maximum block size allowed to pass to under layer;
*/
struct mlxsw_i2c {
struct {
@@ -74,6 +74,7 @@ struct mlxsw_i2c {
struct device *dev;
struct mlxsw_core *core;
struct mlxsw_bus_info bus_info;
+ u16 block_size;
};
#define MLXSW_I2C_READ_MSG(_client, _addr_buf, _buf, _len) { \
@@ -315,20 +316,26 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
struct i2c_client *client = to_i2c_client(dev);
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
unsigned long timeout = msecs_to_jiffies(MLXSW_I2C_TIMEOUT_MSECS);
- u8 tran_buf[MLXSW_I2C_MAX_BUFF_SIZE + MLXSW_I2C_ADDR_BUF_SIZE];
int off = mlxsw_i2c->cmd.mb_off_in, chunk_size, i, j;
unsigned long end;
+ u8 *tran_buf;
struct i2c_msg write_tran =
- MLXSW_I2C_WRITE_MSG(client, tran_buf, MLXSW_I2C_PUSH_CMD_SIZE);
+ MLXSW_I2C_WRITE_MSG(client, NULL, MLXSW_I2C_PUSH_CMD_SIZE);
int err;
+ tran_buf = kmalloc(mlxsw_i2c->block_size + MLXSW_I2C_ADDR_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tran_buf)
+ return -ENOMEM;
+
+ write_tran.buf = tran_buf;
for (i = 0; i < num; i++) {
- chunk_size = (in_mbox_size > MLXSW_I2C_BLK_MAX) ?
- MLXSW_I2C_BLK_MAX : in_mbox_size;
+ chunk_size = (in_mbox_size > mlxsw_i2c->block_size) ?
+ mlxsw_i2c->block_size : in_mbox_size;
write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size;
mlxsw_i2c_set_slave_addr(tran_buf, off);
memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox +
- MLXSW_I2C_BLK_MAX * i, chunk_size);
+ mlxsw_i2c->block_size * i, chunk_size);
j = 0;
end = jiffies + timeout;
@@ -342,9 +349,10 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
(j++ < MLXSW_I2C_RETRY));
if (err != 1) {
- if (!err)
+ if (!err) {
err = -EIO;
- return err;
+ goto mlxsw_i2c_write_exit;
+ }
}
off += chunk_size;
@@ -355,24 +363,27 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
err = mlxsw_i2c_write_cmd(client, mlxsw_i2c, 0);
if (err) {
dev_err(&client->dev, "Could not start transaction");
- return -EIO;
+ err = -EIO;
+ goto mlxsw_i2c_write_exit;
}
/* Wait until go bit is cleared. */
err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, p_status);
if (err) {
dev_err(&client->dev, "HW semaphore is not released");
- return err;
+ goto mlxsw_i2c_write_exit;
}
/* Validate transaction completion status. */
if (*p_status) {
dev_err(&client->dev, "Bad transaction completion status %x\n",
*p_status);
- return -EIO;
+ err = -EIO;
}
- return 0;
+mlxsw_i2c_write_exit:
+ kfree(tran_buf);
+ return err;
}
/* Routine executes I2C command. */
@@ -395,8 +406,8 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
if (in_mbox) {
reg_size = mlxsw_i2c_get_reg_size(in_mbox);
- num = reg_size / MLXSW_I2C_BLK_MAX;
- if (reg_size % MLXSW_I2C_BLK_MAX)
+ num = reg_size / mlxsw_i2c->block_size;
+ if (reg_size % mlxsw_i2c->block_size)
num++;
if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
@@ -416,7 +427,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
} else {
/* No input mailbox is case of initialization query command. */
reg_size = MLXSW_I2C_MAX_DATA_SIZE;
- num = reg_size / MLXSW_I2C_BLK_MAX;
+ num = reg_size / mlxsw_i2c->block_size;
if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
dev_err(&client->dev, "Could not acquire lock");
@@ -432,8 +443,8 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
/* Send read transaction to get output mailbox content. */
read_tran[1].buf = out_mbox;
for (i = 0; i < num; i++) {
- chunk_size = (reg_size > MLXSW_I2C_BLK_MAX) ?
- MLXSW_I2C_BLK_MAX : reg_size;
+ chunk_size = (reg_size > mlxsw_i2c->block_size) ?
+ mlxsw_i2c->block_size : reg_size;
read_tran[1].len = chunk_size;
mlxsw_i2c_set_slave_addr(tran_buf, off);
@@ -509,8 +520,20 @@ mlxsw_i2c_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (!mbox)
return -ENOMEM;
+ err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+ if (err)
+ goto mbox_put;
+
+ mlxsw_i2c->bus_info.fw_rev.major =
+ mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+ mlxsw_i2c->bus_info.fw_rev.minor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+ mlxsw_i2c->bus_info.fw_rev.subminor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
+mbox_put:
mlxsw_cmd_mbox_free(mbox);
return err;
}
@@ -534,6 +557,7 @@ static const struct mlxsw_bus mlxsw_i2c_bus = {
static int mlxsw_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
struct mlxsw_i2c *mlxsw_i2c;
u8 status;
int err;
@@ -542,6 +566,22 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
if (!mlxsw_i2c)
return -ENOMEM;
+ if (quirks) {
+ if ((quirks->max_read_len &&
+ quirks->max_read_len < MLXSW_I2C_BLK_DEF) ||
+ (quirks->max_write_len &&
+ quirks->max_write_len < MLXSW_I2C_BLK_DEF)) {
+ dev_err(&client->dev, "Insufficient transaction buffer length\n");
+ return -EOPNOTSUPP;
+ }
+
+ mlxsw_i2c->block_size = max_t(u16, MLXSW_I2C_BLK_DEF,
+ min_t(u16, quirks->max_read_len,
+ quirks->max_write_len));
+ } else {
+ mlxsw_i2c->block_size = MLXSW_I2C_BLK_DEF;
+ }
+
i2c_set_clientdata(client, mlxsw_i2c);
mutex_init(&mlxsw_i2c->cmd.lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index cf2114273b72..471b0ca6d69a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -67,6 +67,23 @@ static const struct net_device_ops mlxsw_m_port_netdev_ops = {
.ndo_get_devlink_port = mlxsw_m_port_get_devlink_port,
};
+static void mlxsw_m_module_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ strlcpy(drvinfo->driver, mlxsw_m->bus_info->device_kind,
+ sizeof(drvinfo->driver));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ mlxsw_m->bus_info->fw_rev.major,
+ mlxsw_m->bus_info->fw_rev.minor,
+ mlxsw_m->bus_info->fw_rev.subminor);
+ strlcpy(drvinfo->bus_info, mlxsw_m->bus_info->device_name,
+ sizeof(drvinfo->bus_info));
+}
+
static int mlxsw_m_get_module_info(struct net_device *netdev,
struct ethtool_modinfo *modinfo)
{
@@ -88,6 +105,7 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
}
static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
+ .get_drvinfo = mlxsw_m_module_get_drvinfo,
.get_module_info = mlxsw_m_get_module_info,
.get_module_eeprom = mlxsw_m_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index b40455f8293d..051b19388a81 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -102,6 +102,7 @@ struct mlxsw_pci_queue_type_group {
struct mlxsw_pci {
struct pci_dev *pdev;
u8 __iomem *hw_addr;
+ u64 free_running_clock_offset;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -507,17 +508,28 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
{
struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_elem_info *elem_info;
+ struct mlxsw_tx_info tx_info;
char *wqe;
struct sk_buff *skb;
int i;
spin_lock(&q->lock);
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
skb = elem_info->u.sdq.skb;
wqe = elem_info->elem;
for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
- dev_kfree_skb_any(skb);
+
+ if (unlikely(!tx_info.is_emad &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
+ tx_info.local_port);
+ skb = NULL;
+ }
+
+ if (skb)
+ dev_kfree_skb_any(skb);
elem_info->u.sdq.skb = NULL;
if (q->consumer_counter++ != consumer_counter_limit)
@@ -1414,6 +1426,15 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
mlxsw_pci->doorbell_offset =
mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
+ if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_fr_rn_clk_bar;
+ }
+
+ mlxsw_pci->free_running_clock_offset =
+ mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
+
num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
if (err)
@@ -1469,6 +1490,7 @@ err_query_resources:
err_boardinfo:
mlxsw_pci_fw_area_fini(mlxsw_pci);
err_fw_area_init:
+err_fr_rn_clk_bar:
err_doorbell_page_bar:
err_iface_rev:
err_query_fw:
@@ -1537,6 +1559,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
err = -EAGAIN;
goto unlock;
}
+ mlxsw_skb_cb(skb)->tx_info = *tx_info;
elem_info->u.sdq.skb = skb;
wqe = elem_info->elem;
@@ -1560,6 +1583,9 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
goto unmap_frags;
}
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
/* Set unused sq entries byte count to zero. */
for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
@@ -1672,6 +1698,24 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
return err;
}
+static u32 mlxsw_pci_read_frc_h(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ u64 frc_offset;
+
+ frc_offset = mlxsw_pci->free_running_clock_offset;
+ return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_H(frc_offset));
+}
+
+static u32 mlxsw_pci_read_frc_l(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ u64 frc_offset;
+
+ frc_offset = mlxsw_pci->free_running_clock_offset;
+ return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_L(frc_offset));
+}
+
static const struct mlxsw_bus mlxsw_pci_bus = {
.kind = "pci",
.init = mlxsw_pci_init,
@@ -1679,6 +1723,8 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
.skb_transmit = mlxsw_pci_skb_transmit,
.cmd_exec = mlxsw_pci_cmd_exec,
+ .read_frc_h = mlxsw_pci_read_frc_h,
+ .read_frc_l = mlxsw_pci_read_frc_l,
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
@@ -1740,6 +1786,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
+ mlxsw_pci->bus_info.read_frc_capable = true;
mlxsw_pci->id = id;
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 8648ca171254..e57e42e2d2b2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -43,6 +43,9 @@
#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
((offset) + (type_offset) + (num) * 4)
+#define MLXSW_PCI_FREE_RUNNING_CLOCK_H(offset) (offset)
+#define MLXSW_PCI_FREE_RUNNING_CLOCK_L(offset) ((offset) + 4)
+
#define MLXSW_PCI_CQS_MAX 96
#define MLXSW_PCI_EQS_COUNT 2
#define MLXSW_PCI_EQ_ASYNC_NUM 0
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 7ed63ed657c7..ead36702549a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3515,6 +3515,18 @@ MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
*/
MLXSW_ITEM32(reg, qeec, mise, 0x0C, 31, 1);
+/* reg_qeec_ptps
+ * PTP shaper
+ * 0: regular shaper mode
+ * 1: PTP oriented shaper
+ * Allowed only for hierarchy 0
+ * Not supported for CPU port
+ * Note that ptps mode may affect the shaper rates of all hierarchies
+ * Supported only on Spectrum-1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, ptps, 0x0C, 29, 1);
+
enum {
MLXSW_REG_QEEC_BYTES_MODE,
MLXSW_REG_QEEC_PACKETS_MODE,
@@ -3601,6 +3613,16 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
mlxsw_reg_qeec_next_element_index_set(payload, next_index);
}
+static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u8 local_port,
+ bool ptps)
+{
+ MLXSW_REG_ZERO(qeec, payload);
+ mlxsw_reg_qeec_local_port_set(payload, local_port);
+ mlxsw_reg_qeec_element_hierarchy_set(payload,
+ MLXSW_REG_QEEC_HIERARCY_PORT);
+ mlxsw_reg_qeec_ptps_set(payload, ptps);
+}
+
/* QRWE - QoS ReWrite Enable
* -------------------------
* This register configures the rewrite enable per receive port.
@@ -3814,6 +3836,112 @@ mlxsw_reg_qtctm_pack(char *payload, u8 local_port, bool mc)
mlxsw_reg_qtctm_mc_set(payload, mc);
}
+/* QPSC - QoS PTP Shaper Configuration Register
+ * --------------------------------------------
+ * The QPSC allows advanced configuration of the shapers when QEEC.ptps=1.
+ * Supported only on Spectrum-1.
+ */
+#define MLXSW_REG_QPSC_ID 0x401B
+#define MLXSW_REG_QPSC_LEN 0x28
+
+MLXSW_REG_DEFINE(qpsc, MLXSW_REG_QPSC_ID, MLXSW_REG_QPSC_LEN);
+
+enum mlxsw_reg_qpsc_port_speed {
+ MLXSW_REG_QPSC_PORT_SPEED_100M,
+ MLXSW_REG_QPSC_PORT_SPEED_1G,
+ MLXSW_REG_QPSC_PORT_SPEED_10G,
+ MLXSW_REG_QPSC_PORT_SPEED_25G,
+};
+
+/* reg_qpsc_port_speed
+ * Port speed.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpsc, port_speed, 0x00, 0, 4);
+
+/* reg_qpsc_shaper_time_exp
+ * The base-time-interval for updating the shapers tokens (for all hierarchies).
+ * shaper_update_rate = 2 ^ shaper_time_exp * (1 + shaper_time_mantissa) * 32nSec
+ * shaper_rate = 64bit * shaper_inc / shaper_update_rate
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, shaper_time_exp, 0x04, 16, 4);
+
+/* reg_qpsc_shaper_time_mantissa
+ * The base-time-interval for updating the shapers tokens (for all hierarchies).
+ * shaper_update_rate = 2 ^ shaper_time_exp * (1 + shaper_time_mantissa) * 32nSec
+ * shaper_rate = 64bit * shaper_inc / shaper_update_rate
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, shaper_time_mantissa, 0x04, 0, 5);
+
+/* reg_qpsc_shaper_inc
+ * Number of tokens added to shaper on each update.
+ * Units of 8B.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, shaper_inc, 0x08, 0, 5);
+
+/* reg_qpsc_shaper_bs
+ * Max shaper Burst size.
+ * Burst size is 2 ^ max_shaper_bs * 512 [bits]
+ * Range is: 5..25 (from 2KB..2GB)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, shaper_bs, 0x0C, 0, 6);
+
+/* reg_qpsc_ptsc_we
+ * Write enable to port_to_shaper_credits.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, qpsc, ptsc_we, 0x10, 31, 1);
+
+/* reg_qpsc_port_to_shaper_credits
+ * For split ports: range 1..57
+ * For non-split ports: range 1..112
+ * Written only when ptsc_we is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, port_to_shaper_credits, 0x10, 0, 8);
+
+/* reg_qpsc_ing_timestamp_inc
+ * Ingress timestamp increment.
+ * 2's complement.
+ * The timestamp of MTPPTR at ingress will be incremented by this value. Global
+ * value for all ports.
+ * Same units as used by MTPPTR.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, ing_timestamp_inc, 0x20, 0, 32);
+
+/* reg_qpsc_egr_timestamp_inc
+ * Egress timestamp increment.
+ * 2's complement.
+ * The timestamp of MTPPTR at egress will be incremented by this value. Global
+ * value for all ports.
+ * Same units as used by MTPPTR.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpsc, egr_timestamp_inc, 0x24, 0, 32);
+
+static inline void
+mlxsw_reg_qpsc_pack(char *payload, enum mlxsw_reg_qpsc_port_speed port_speed,
+ u8 shaper_time_exp, u8 shaper_time_mantissa, u8 shaper_inc,
+ u8 shaper_bs, u8 port_to_shaper_credits,
+ int ing_timestamp_inc, int egr_timestamp_inc)
+{
+ MLXSW_REG_ZERO(qpsc, payload);
+ mlxsw_reg_qpsc_port_speed_set(payload, port_speed);
+ mlxsw_reg_qpsc_shaper_time_exp_set(payload, shaper_time_exp);
+ mlxsw_reg_qpsc_shaper_time_mantissa_set(payload, shaper_time_mantissa);
+ mlxsw_reg_qpsc_shaper_inc_set(payload, shaper_inc);
+ mlxsw_reg_qpsc_shaper_bs_set(payload, shaper_bs);
+ mlxsw_reg_qpsc_ptsc_we_set(payload, true);
+ mlxsw_reg_qpsc_port_to_shaper_credits_set(payload, port_to_shaper_credits);
+ mlxsw_reg_qpsc_ing_timestamp_inc_set(payload, ing_timestamp_inc);
+ mlxsw_reg_qpsc_egr_timestamp_inc_set(payload, egr_timestamp_inc);
+}
+
/* PMLP - Ports Module to Local Port Register
* ------------------------------------------
* Configures the assignment of modules to local ports.
@@ -5292,6 +5420,8 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
};
/* reg_htgt_trap_group
@@ -8039,16 +8169,21 @@ MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7);
MLXSW_REG_DEFINE(mtmp, MLXSW_REG_MTMP_ID, MLXSW_REG_MTMP_LEN);
+#define MLXSW_REG_MTMP_MODULE_INDEX_MIN 64
+#define MLXSW_REG_MTMP_GBOX_INDEX_MIN 256
/* reg_mtmp_sensor_index
* Sensors index to access.
* 64-127 of sensor_index are mapped to the SFP+/QSFP modules sequentially
* (module 0 is mapped to sensor_index 64).
* Access: Index
*/
-MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 7);
+MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 12);
/* Convert to milli degrees Celsius */
-#define MLXSW_REG_MTMP_TEMP_TO_MC(val) (val * 125)
+#define MLXSW_REG_MTMP_TEMP_TO_MC(val) ({ typeof(val) v_ = (val); \
+ ((v_) >= 0) ? ((v_) * 125) : \
+ ((s16)((GENMASK(15, 0) + (v_) + 1) \
+ * 125)); })
/* reg_mtmp_temperature
* Temperature reading from the sensor. Reading is in 0.125 Celsius
@@ -8107,7 +8242,7 @@ MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16);
*/
MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE);
-static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index,
+static inline void mlxsw_reg_mtmp_pack(char *payload, u16 sensor_index,
bool max_temp_enable,
bool max_temp_reset)
{
@@ -8119,11 +8254,10 @@ static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index,
MLXSW_REG_MTMP_THRESH_HI);
}
-static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
- unsigned int *p_max_temp,
- char *sensor_name)
+static inline void mlxsw_reg_mtmp_unpack(char *payload, int *p_temp,
+ int *p_max_temp, char *sensor_name)
{
- u16 temp;
+ s16 temp;
if (p_temp) {
temp = mlxsw_reg_mtmp_temperature_get(payload);
@@ -8156,7 +8290,7 @@ MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN);
* 64-127 are mapped to the SFP+/QSFP modules sequentially).
* Access: Index
*/
-MLXSW_ITEM32(reg, mtbr, base_sensor_index, 0x00, 0, 7);
+MLXSW_ITEM32(reg, mtbr, base_sensor_index, 0x00, 0, 12);
/* reg_mtbr_num_rec
* Request: Number of records to read
@@ -8183,7 +8317,7 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16,
MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16,
MLXSW_REG_MTBR_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_mtbr_pack(char *payload, u8 base_sensor_index,
+static inline void mlxsw_reg_mtbr_pack(char *payload, u16 base_sensor_index,
u8 num_rec)
{
MLXSW_REG_ZERO(mtbr, payload);
@@ -8689,6 +8823,107 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
MLXSW_REG_MLCR_DURATION_MAX : 0);
}
+/* MTPPS - Management Pulse Per Second Register
+ * --------------------------------------------
+ * This register provides the device PPS capabilities, configure the PPS in and
+ * out modules and holds the PPS in time stamp.
+ */
+#define MLXSW_REG_MTPPS_ID 0x9053
+#define MLXSW_REG_MTPPS_LEN 0x3C
+
+MLXSW_REG_DEFINE(mtpps, MLXSW_REG_MTPPS_ID, MLXSW_REG_MTPPS_LEN);
+
+/* reg_mtpps_enable
+ * Enables the PPS functionality the specific pin.
+ * A boolean variable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpps, enable, 0x20, 31, 1);
+
+enum mlxsw_reg_mtpps_pin_mode {
+ MLXSW_REG_MTPPS_PIN_MODE_VIRTUAL_PIN = 0x2,
+};
+
+/* reg_mtpps_pin_mode
+ * Pin mode to be used. The mode must comply with the supported modes of the
+ * requested pin.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpps, pin_mode, 0x20, 8, 4);
+
+#define MLXSW_REG_MTPPS_PIN_SP_VIRTUAL_PIN 7
+
+/* reg_mtpps_pin
+ * Pin to be configured or queried out of the supported pins.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtpps, pin, 0x20, 0, 8);
+
+/* reg_mtpps_time_stamp
+ * When pin_mode = pps_in, the latched device time when it was triggered from
+ * the external GPIO pin.
+ * When pin_mode = pps_out or virtual_pin or pps_out_and_virtual_pin, the target
+ * time to generate next output signal.
+ * Time is in units of device clock.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, mtpps, time_stamp, 0x28, 0, 64);
+
+static inline void
+mlxsw_reg_mtpps_vpin_pack(char *payload, u64 time_stamp)
+{
+ MLXSW_REG_ZERO(mtpps, payload);
+ mlxsw_reg_mtpps_pin_set(payload, MLXSW_REG_MTPPS_PIN_SP_VIRTUAL_PIN);
+ mlxsw_reg_mtpps_pin_mode_set(payload,
+ MLXSW_REG_MTPPS_PIN_MODE_VIRTUAL_PIN);
+ mlxsw_reg_mtpps_enable_set(payload, true);
+ mlxsw_reg_mtpps_time_stamp_set(payload, time_stamp);
+}
+
+/* MTUTC - Management UTC Register
+ * -------------------------------
+ * Configures the HW UTC counter.
+ */
+#define MLXSW_REG_MTUTC_ID 0x9055
+#define MLXSW_REG_MTUTC_LEN 0x1C
+
+MLXSW_REG_DEFINE(mtutc, MLXSW_REG_MTUTC_ID, MLXSW_REG_MTUTC_LEN);
+
+enum mlxsw_reg_mtutc_operation {
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC = 0,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ = 3,
+};
+
+/* reg_mtutc_operation
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mtutc, operation, 0x00, 0, 4);
+
+/* reg_mtutc_freq_adjustment
+ * Frequency adjustment: Every PPS the HW frequency will be
+ * adjusted by this value. Units of HW clock, where HW counts
+ * 10^9 HW clocks for 1 HW second.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtutc, freq_adjustment, 0x04, 0, 32);
+
+/* reg_mtutc_utc_sec
+ * UTC seconds.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, utc_sec, 0x10, 0, 32);
+
+static inline void
+mlxsw_reg_mtutc_pack(char *payload, enum mlxsw_reg_mtutc_operation oper,
+ u32 freq_adj, u32 utc_sec)
+{
+ MLXSW_REG_ZERO(mtutc, payload);
+ mlxsw_reg_mtutc_operation_set(payload, oper);
+ mlxsw_reg_mtutc_freq_adjustment_set(payload, freq_adj);
+ mlxsw_reg_mtutc_utc_sec_set(payload, utc_sec);
+}
+
/* MCQI - Management Component Query Information
* ---------------------------------------------
* This register allows querying information about firmware components.
@@ -9043,6 +9278,267 @@ static inline void mlxsw_reg_mprs_pack(char *payload, u16 parsing_depth,
mlxsw_reg_mprs_vxlan_udp_dport_set(payload, vxlan_udp_dport);
}
+/* MOGCR - Monitoring Global Configuration Register
+ * ------------------------------------------------
+ */
+#define MLXSW_REG_MOGCR_ID 0x9086
+#define MLXSW_REG_MOGCR_LEN 0x20
+
+MLXSW_REG_DEFINE(mogcr, MLXSW_REG_MOGCR_ID, MLXSW_REG_MOGCR_LEN);
+
+/* reg_mogcr_ptp_iftc
+ * PTP Ingress FIFO Trap Clear
+ * The PTP_ING_FIFO trap provides MTPPTR with clr according
+ * to this value. Default 0.
+ * Reserved when IB switches and when SwitchX/-2, Spectrum-2
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mogcr, ptp_iftc, 0x00, 1, 1);
+
+/* reg_mogcr_ptp_eftc
+ * PTP Egress FIFO Trap Clear
+ * The PTP_EGR_FIFO trap provides MTPPTR with clr according
+ * to this value. Default 0.
+ * Reserved when IB switches and when SwitchX/-2, Spectrum-2
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mogcr, ptp_eftc, 0x00, 0, 1);
+
+/* MTPPPC - Time Precision Packet Port Configuration
+ * -------------------------------------------------
+ * This register serves for configuration of which PTP messages should be
+ * timestamped. This is a global configuration, despite the register name.
+ *
+ * Reserved when Spectrum-2.
+ */
+#define MLXSW_REG_MTPPPC_ID 0x9090
+#define MLXSW_REG_MTPPPC_LEN 0x28
+
+MLXSW_REG_DEFINE(mtpppc, MLXSW_REG_MTPPPC_ID, MLXSW_REG_MTPPPC_LEN);
+
+/* reg_mtpppc_ing_timestamp_message_type
+ * Bitwise vector of PTP message types to timestamp at ingress.
+ * MessageType field as defined by IEEE 1588
+ * Each bit corresponds to a value (e.g. Bit0: Sync, Bit1: Delay_Req)
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpppc, ing_timestamp_message_type, 0x08, 0, 16);
+
+/* reg_mtpppc_egr_timestamp_message_type
+ * Bitwise vector of PTP message types to timestamp at egress.
+ * MessageType field as defined by IEEE 1588
+ * Each bit corresponds to a value (e.g. Bit0: Sync, Bit1: Delay_Req)
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpppc, egr_timestamp_message_type, 0x0C, 0, 16);
+
+static inline void mlxsw_reg_mtpppc_pack(char *payload, u16 ing, u16 egr)
+{
+ MLXSW_REG_ZERO(mtpppc, payload);
+ mlxsw_reg_mtpppc_ing_timestamp_message_type_set(payload, ing);
+ mlxsw_reg_mtpppc_egr_timestamp_message_type_set(payload, egr);
+}
+
+/* MTPPTR - Time Precision Packet Timestamping Reading
+ * ---------------------------------------------------
+ * The MTPPTR is used for reading the per port PTP timestamp FIFO.
+ * There is a trap for packets which are latched to the timestamp FIFO, thus the
+ * SW knows which FIFO to read. Note that packets enter the FIFO before been
+ * trapped. The sequence number is used to synchronize the timestamp FIFO
+ * entries and the trapped packets.
+ * Reserved when Spectrum-2.
+ */
+
+#define MLXSW_REG_MTPPTR_ID 0x9091
+#define MLXSW_REG_MTPPTR_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_MTPPTR_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_MTPPTR_REC_MAX_COUNT 4
+#define MLXSW_REG_MTPPTR_LEN (MLXSW_REG_MTPPTR_BASE_LEN + \
+ MLXSW_REG_MTPPTR_REC_LEN * MLXSW_REG_MTPPTR_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(mtpptr, MLXSW_REG_MTPPTR_ID, MLXSW_REG_MTPPTR_LEN);
+
+/* reg_mtpptr_local_port
+ * Not supported for CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtpptr, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_mtpptr_dir {
+ MLXSW_REG_MTPPTR_DIR_INGRESS,
+ MLXSW_REG_MTPPTR_DIR_EGRESS,
+};
+
+/* reg_mtpptr_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtpptr, dir, 0x00, 0, 1);
+
+/* reg_mtpptr_clr
+ * Clear the records.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mtpptr, clr, 0x04, 31, 1);
+
+/* reg_mtpptr_num_rec
+ * Number of valid records in the response
+ * Range 0.. cap_ptp_timestamp_fifo
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mtpptr, num_rec, 0x08, 0, 4);
+
+/* reg_mtpptr_rec_message_type
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req)
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtpptr, rec_message_type,
+ MLXSW_REG_MTPPTR_BASE_LEN, 8, 4,
+ MLXSW_REG_MTPPTR_REC_LEN, 0, false);
+
+/* reg_mtpptr_rec_domain_number
+ * DomainNumber field as defined by IEEE 1588
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtpptr, rec_domain_number,
+ MLXSW_REG_MTPPTR_BASE_LEN, 0, 8,
+ MLXSW_REG_MTPPTR_REC_LEN, 0, false);
+
+/* reg_mtpptr_rec_sequence_id
+ * SequenceId field as defined by IEEE 1588
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtpptr, rec_sequence_id,
+ MLXSW_REG_MTPPTR_BASE_LEN, 0, 16,
+ MLXSW_REG_MTPPTR_REC_LEN, 0x4, false);
+
+/* reg_mtpptr_rec_timestamp_high
+ * Timestamp of when the PTP packet has passed through the port Units of PLL
+ * clock time.
+ * For Spectrum-1 the PLL clock is 156.25Mhz and PLL clock time is 6.4nSec.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtpptr, rec_timestamp_high,
+ MLXSW_REG_MTPPTR_BASE_LEN, 0, 32,
+ MLXSW_REG_MTPPTR_REC_LEN, 0x8, false);
+
+/* reg_mtpptr_rec_timestamp_low
+ * See rec_timestamp_high.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, mtpptr, rec_timestamp_low,
+ MLXSW_REG_MTPPTR_BASE_LEN, 0, 32,
+ MLXSW_REG_MTPPTR_REC_LEN, 0xC, false);
+
+static inline void mlxsw_reg_mtpptr_unpack(const char *payload,
+ unsigned int rec,
+ u8 *p_message_type,
+ u8 *p_domain_number,
+ u16 *p_sequence_id,
+ u64 *p_timestamp)
+{
+ u32 timestamp_high, timestamp_low;
+
+ *p_message_type = mlxsw_reg_mtpptr_rec_message_type_get(payload, rec);
+ *p_domain_number = mlxsw_reg_mtpptr_rec_domain_number_get(payload, rec);
+ *p_sequence_id = mlxsw_reg_mtpptr_rec_sequence_id_get(payload, rec);
+ timestamp_high = mlxsw_reg_mtpptr_rec_timestamp_high_get(payload, rec);
+ timestamp_low = mlxsw_reg_mtpptr_rec_timestamp_low_get(payload, rec);
+ *p_timestamp = (u64)timestamp_high << 32 | timestamp_low;
+}
+
+/* MTPTPT - Monitoring Precision Time Protocol Trap Register
+ * ---------------------------------------------------------
+ * This register is used for configuring under which trap to deliver PTP
+ * packets depending on type of the packet.
+ */
+#define MLXSW_REG_MTPTPT_ID 0x9092
+#define MLXSW_REG_MTPTPT_LEN 0x08
+
+MLXSW_REG_DEFINE(mtptpt, MLXSW_REG_MTPTPT_ID, MLXSW_REG_MTPTPT_LEN);
+
+enum mlxsw_reg_mtptpt_trap_id {
+ MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
+ MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
+};
+
+/* reg_mtptpt_trap_id
+ * Trap id.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtptpt, trap_id, 0x00, 0, 4);
+
+/* reg_mtptpt_message_type
+ * Bitwise vector of PTP message types to trap. This is a necessary but
+ * non-sufficient condition since need to enable also per port. See MTPPPC.
+ * Message types are defined by IEEE 1588 Each bit corresponds to a value (e.g.
+ * Bit0: Sync, Bit1: Delay_Req)
+ */
+MLXSW_ITEM32(reg, mtptpt, message_type, 0x04, 0, 16);
+
+static inline void mlxsw_reg_mtptptp_pack(char *payload,
+ enum mlxsw_reg_mtptpt_trap_id trap_id,
+ u16 message_type)
+{
+ MLXSW_REG_ZERO(mtptpt, payload);
+ mlxsw_reg_mtptpt_trap_id_set(payload, trap_id);
+ mlxsw_reg_mtptpt_message_type_set(payload, message_type);
+}
+
+/* MGPIR - Management General Peripheral Information Register
+ * ----------------------------------------------------------
+ * MGPIR register allows software to query the hardware and
+ * firmware general information of peripheral entities.
+ */
+#define MLXSW_REG_MGPIR_ID 0x9100
+#define MLXSW_REG_MGPIR_LEN 0xA0
+
+MLXSW_REG_DEFINE(mgpir, MLXSW_REG_MGPIR_ID, MLXSW_REG_MGPIR_LEN);
+
+enum mlxsw_reg_mgpir_device_type {
+ MLXSW_REG_MGPIR_DEVICE_TYPE_NONE,
+ MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE,
+};
+
+/* device_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, device_type, 0x00, 24, 4);
+
+/* devices_per_flash
+ * Number of devices of device_type per flash (can be shared by few devices).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
+
+/* num_of_devices
+ * Number of devices of device_type.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
+
+static inline void mlxsw_reg_mgpir_pack(char *payload)
+{
+ MLXSW_REG_ZERO(mgpir, payload);
+}
+
+static inline void
+mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
+ enum mlxsw_reg_mgpir_device_type *device_type,
+ u8 *devices_per_flash)
+{
+ if (num_of_devices)
+ *num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload);
+ if (device_type)
+ *device_type = mlxsw_reg_mgpir_device_type_get(payload);
+ if (devices_per_flash)
+ *devices_per_flash =
+ mlxsw_reg_mgpir_devices_per_flash_get(payload);
+}
+
/* TNGCR - Tunneling NVE General Configuration Register
* ----------------------------------------------------
* The TNGCR register is used for setting up the NVE Tunneling configuration.
@@ -10006,6 +10502,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(qpdsm),
MLXSW_REG(qpdpm),
MLXSW_REG(qtctm),
+ MLXSW_REG(qpsc),
MLXSW_REG(pmlp),
MLXSW_REG(pmtu),
MLXSW_REG(ptys),
@@ -10052,12 +10549,19 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mgir),
MLXSW_REG(mrsr),
MLXSW_REG(mlcr),
+ MLXSW_REG(mtpps),
+ MLXSW_REG(mtutc),
MLXSW_REG(mpsc),
MLXSW_REG(mcqi),
MLXSW_REG(mcc),
MLXSW_REG(mcda),
MLXSW_REG(mgpc),
MLXSW_REG(mprs),
+ MLXSW_REG(mogcr),
+ MLXSW_REG(mtpppc),
+ MLXSW_REG(mtpptr),
+ MLXSW_REG(mtptpt),
+ MLXSW_REG(mgpir),
MLXSW_REG(tngcr),
MLXSW_REG(tnumt),
MLXSW_REG(tnqcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 23204356ad88..4d34d42b3b0e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -41,6 +41,7 @@
#include "spectrum_dpipe.h"
#include "spectrum_acl_flex_actions.h"
#include "spectrum_span.h"
+#include "spectrum_ptp.h"
#include "../mlxfw/mlxfw.h"
#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
@@ -146,6 +147,35 @@ struct mlxsw_sp_mlxfw_dev {
struct mlxsw_sp *mlxsw_sp;
};
+struct mlxsw_sp_ptp_ops {
+ struct mlxsw_sp_ptp_clock *
+ (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+ void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock);
+
+ struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp);
+ void (*fini)(struct mlxsw_sp_ptp_state *ptp_state);
+
+ /* Notify a driver that a packet that might be PTP was received. Driver
+ * is responsible for freeing the passed-in SKB.
+ */
+ void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port);
+
+ /* Notify a driver that a timestamped packet was transmitted. Driver
+ * is responsible for freeing the passed-in SKB.
+ */
+ void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port);
+
+ int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+ int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+ void (*shaper_work)(struct work_struct *work);
+ int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info);
+};
+
static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
u16 component_index, u32 *p_max_size,
u8 *p_align_bits, u16 *p_max_write_size)
@@ -294,6 +324,19 @@ static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
}
+static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev,
+ const char *msg, const char *comp_name,
+ u32 done_bytes, u32 total_bytes)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+
+ devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core),
+ msg, comp_name,
+ done_bytes, total_bytes);
+}
+
static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
.component_query = mlxsw_sp_component_query,
.fsm_lock = mlxsw_sp_fsm_lock,
@@ -303,11 +346,13 @@ static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
.fsm_activate = mlxsw_sp_fsm_activate,
.fsm_query_state = mlxsw_sp_fsm_query_state,
.fsm_cancel = mlxsw_sp_fsm_cancel,
- .fsm_release = mlxsw_sp_fsm_release
+ .fsm_release = mlxsw_sp_fsm_release,
+ .status_notify = mlxsw_sp_status_notify,
};
static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
- const struct firmware *firmware)
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
.mlxfw_dev = {
@@ -320,7 +365,10 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_core_fw_flash_start(mlxsw_sp->core);
- err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+ devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core));
+ err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev,
+ firmware, extack);
+ devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core));
mlxsw_core_fw_flash_end(mlxsw_sp->core);
return err;
@@ -374,7 +422,7 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
return err;
}
- err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
+ err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL);
release_firmware(firmware);
if (err)
dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
@@ -388,6 +436,27 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core,
+ const char *file_name, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct firmware *firmware;
+ int err;
+
+ if (component)
+ return -EOPNOTSUPP;
+
+ err = request_firmware_direct(&firmware, file_name,
+ mlxsw_sp->bus_info->dev);
+ if (err)
+ return err;
+ err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack);
+ release_firmware(firmware);
+
+ return err;
+}
+
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, u64 *packets,
u64 *bytes)
@@ -738,6 +807,8 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
+ memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
+
if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
return NETDEV_TX_BUSY;
@@ -1437,21 +1508,21 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
- struct tc_cls_flower_offload *f)
+ struct flow_cls_offload *f)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
switch (f->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
return 0;
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
- case TC_CLSFLOWER_TMPLT_CREATE:
+ case FLOW_CLS_TMPLT_CREATE:
return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
- case TC_CLSFLOWER_TMPLT_DESTROY:
+ case FLOW_CLS_TMPLT_DESTROY:
mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
return 0;
default:
@@ -1514,33 +1585,45 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
}
}
+static void mlxsw_sp_tc_block_flower_release(void *cb_priv)
+{
+ struct mlxsw_sp_acl_block *acl_block = cb_priv;
+
+ mlxsw_sp_acl_block_destroy(acl_block);
+}
+
+static LIST_HEAD(mlxsw_sp_block_cb_list);
+
static int
mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tcf_block *block, bool ingress,
- struct netlink_ext_ack *extack)
+ struct flow_block_offload *f, bool ingress)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_block *acl_block;
- struct tcf_block_cb *block_cb;
+ struct flow_block_cb *block_cb;
+ bool register_block = false;
int err;
- block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp);
+ block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
if (!block_cb) {
- acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
+ acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net);
if (!acl_block)
return -ENOMEM;
- block_cb = __tcf_block_cb_register(block,
- mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp, acl_block, extack);
+ block_cb = flow_block_cb_alloc(f->net,
+ mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp, acl_block,
+ mlxsw_sp_tc_block_flower_release);
if (IS_ERR(block_cb)) {
+ mlxsw_sp_acl_block_destroy(acl_block);
err = PTR_ERR(block_cb);
goto err_cb_register;
}
+ register_block = true;
} else {
- acl_block = tcf_block_cb_priv(block_cb);
+ acl_block = flow_block_cb_priv(block_cb);
}
- tcf_block_cb_incref(block_cb);
+ flow_block_cb_incref(block_cb);
err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
mlxsw_sp_port, ingress);
if (err)
@@ -1551,28 +1634,31 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
else
mlxsw_sp_port->eg_acl_block = acl_block;
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
+ }
+
return 0;
err_block_bind:
- if (!tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block, block_cb);
+ if (!flow_block_cb_decref(block_cb))
+ flow_block_cb_free(block_cb);
err_cb_register:
- mlxsw_sp_acl_block_destroy(acl_block);
- }
return err;
}
static void
mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tcf_block *block, bool ingress)
+ struct flow_block_offload *f, bool ingress)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_block *acl_block;
- struct tcf_block_cb *block_cb;
+ struct flow_block_cb *block_cb;
int err;
- block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp);
+ block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
if (!block_cb)
return;
@@ -1581,50 +1667,63 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
else
mlxsw_sp_port->eg_acl_block = NULL;
- acl_block = tcf_block_cb_priv(block_cb);
+ acl_block = flow_block_cb_priv(block_cb);
err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
mlxsw_sp_port, ingress);
- if (!err && !tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block, block_cb);
- mlxsw_sp_acl_block_destroy(acl_block);
+ if (!err && !flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
}
}
static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_block_offload *f)
+ struct flow_block_offload *f)
{
+ struct flow_block_cb *block_cb;
tc_setup_cb_t *cb;
bool ingress;
int err;
- if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
ingress = true;
- } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
ingress = false;
} else {
return -EOPNOTSUPP;
}
+ f->driver_block_list = &mlxsw_sp_block_cb_list;
+
switch (f->command) {
- case TC_BLOCK_BIND:
- err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
- mlxsw_sp_port, f->extack);
- if (err)
- return err;
- err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
- f->block, ingress,
- f->extack);
+ case FLOW_BLOCK_BIND:
+ if (flow_block_cb_is_busy(cb, mlxsw_sp_port,
+ &mlxsw_sp_block_cb_list))
+ return -EBUSY;
+
+ block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port,
+ mlxsw_sp_port, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+ err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f,
+ ingress);
if (err) {
- tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
+ flow_block_cb_free(block_cb);
return err;
}
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
return 0;
- case TC_BLOCK_UNBIND:
+ case FLOW_BLOCK_UNBIND:
mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
- f->block, ingress);
- tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
+ f, ingress);
+ block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
@@ -1745,6 +1844,65 @@ mlxsw_sp_port_get_devlink_port(struct net_device *dev)
mlxsw_sp_port->local_port);
}
+static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
+ &config);
+ if (err)
+ return err;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
+ &config);
+ if (err)
+ return err;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct hwtstamp_config config = {0};
+
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
+}
+
+static int
+mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
+ case SIOCGHWTSTAMP:
+ return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_open = mlxsw_sp_port_open,
.ndo_stop = mlxsw_sp_port_stop,
@@ -1760,6 +1918,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_set_features = mlxsw_sp_set_features,
.ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
+ .ndo_do_ioctl = mlxsw_sp_port_ioctl,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -2525,28 +2684,33 @@ mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
}
}
+static u32
+mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
+ return mlxsw_sp1_port_link_mode[i].speed;
+ }
+
+ return SPEED_UNKNOWN;
+}
+
static void
mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd)
{
- u32 speed = SPEED_UNKNOWN;
- u8 duplex = DUPLEX_UNKNOWN;
- int i;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
if (!carrier_ok)
- goto out;
+ return;
- for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) {
- speed = mlxsw_sp1_port_link_mode[i].speed;
- duplex = DUPLEX_FULL;
- break;
- }
- }
-out:
- cmd->base.speed = speed;
- cmd->base.duplex = duplex;
+ cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto);
+ if (cmd->base.speed != SPEED_UNKNOWN)
+ cmd->base.duplex = DUPLEX_FULL;
}
static u32
@@ -2617,6 +2781,7 @@ static const struct mlxsw_sp_port_type_speed_ops
mlxsw_sp1_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp1_from_ptys_link,
+ .from_ptys_speed = mlxsw_sp1_from_ptys_speed,
.from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex,
.to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp1_to_ptys_speed,
@@ -2867,28 +3032,33 @@ mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
}
}
+static u32
+mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
+ return mlxsw_sp2_port_link_mode[i].speed;
+ }
+
+ return SPEED_UNKNOWN;
+}
+
static void
mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd)
{
- u32 speed = SPEED_UNKNOWN;
- u8 duplex = DUPLEX_UNKNOWN;
- int i;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
if (!carrier_ok)
- goto out;
+ return;
- for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
- speed = mlxsw_sp2_port_link_mode[i].speed;
- duplex = DUPLEX_FULL;
- break;
- }
- }
-out:
- cmd->base.speed = speed;
- cmd->base.duplex = duplex;
+ cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto);
+ if (cmd->base.speed != SPEED_UNKNOWN)
+ cmd->base.duplex = DUPLEX_FULL;
}
static bool
@@ -2999,6 +3169,7 @@ static const struct mlxsw_sp_port_type_speed_ops
mlxsw_sp2_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp2_from_ptys_link,
+ .from_ptys_speed = mlxsw_sp2_from_ptys_speed,
.from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex,
.to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp2_to_ptys_speed,
@@ -3159,31 +3330,6 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_flash_device(struct net_device *dev,
- struct ethtool_flash *flash)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- const struct firmware *firmware;
- int err;
-
- if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
- return -EOPNOTSUPP;
-
- dev_hold(dev);
- rtnl_unlock();
-
- err = request_firmware_direct(&firmware, flash->data, &dev->dev);
- if (err)
- goto out;
- err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
- release_firmware(firmware);
-out:
- rtnl_lock();
- dev_put(dev);
- return err;
-}
-
static int mlxsw_sp_get_module_info(struct net_device *netdev,
struct ethtool_modinfo *modinfo)
{
@@ -3213,6 +3359,15 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
return err;
}
+static int
+mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info);
+}
+
static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -3224,9 +3379,9 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_sset_count = mlxsw_sp_port_get_sset_count,
.get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
.set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
- .flash_device = mlxsw_sp_flash_device,
.get_module_info = mlxsw_sp_get_module_info,
.get_module_eeprom = mlxsw_sp_get_module_eeprom,
+ .get_ts_info = mlxsw_sp_get_ts_info,
};
static int
@@ -3343,8 +3498,9 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return err;
}
- /* Make sure the max shaper is disabled in all hierarchies that
- * support it.
+ /* Make sure the max shaper is disabled in all hierarchies that support
+ * it. Note that this disables ptps (PTP shaper), but that is intended
+ * for the initial configuration.
*/
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
@@ -3589,6 +3745,9 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
+ INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
+ mlxsw_sp->ptp_ops->shaper_work);
+
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
err = register_netdev(dev);
if (err) {
@@ -3643,6 +3802,8 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
+ cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
+ mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
@@ -3927,14 +4088,55 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
if (status == MLXSW_PORT_OPER_STATUS_UP) {
netdev_info(mlxsw_sp_port->dev, "link up\n");
netif_carrier_on(mlxsw_sp_port->dev);
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
} else {
netdev_info(mlxsw_sp_port->dev, "link down\n");
netif_carrier_off(mlxsw_sp_port->dev);
}
}
-static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
- u8 local_port, void *priv)
+static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
+ char *mtpptr_pl, bool ingress)
+{
+ u8 local_port;
+ u8 num_rec;
+ int i;
+
+ local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
+ num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
+ for (i = 0; i < num_rec; i++) {
+ u8 domain_number;
+ u8 message_type;
+ u16 sequence_id;
+ u64 timestamp;
+
+ mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
+ &domain_number, &sequence_id,
+ &timestamp);
+ mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
+ message_type, domain_number,
+ sequence_id, timestamp);
+ }
+}
+
+static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
+ char *mtpptr_pl, void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
+}
+
+static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
+ char *mtpptr_pl, void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
+}
+
+void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
+ u8 local_port, void *priv)
{
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
@@ -4008,6 +4210,14 @@ out:
consume_skb(skb);
}
+static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
+}
+
#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
@@ -4029,7 +4239,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* L2 traps */
MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
- MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
+ MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU,
+ false, SP_LLDP, DISCARD),
MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
@@ -4098,6 +4309,16 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* NVE traps */
MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
+ /* PTP traps */
+ MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU,
+ false, SP_PTP0, DISCARD),
+ MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false),
+};
+
+static const struct mlxsw_listener mlxsw_sp1_listener[] = {
+ /* Events */
+ MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
+ MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
@@ -4149,6 +4370,14 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
rate = 1024;
burst_size = 7;
break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
+ rate = 24 * 1024;
+ burst_size = 12;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1:
+ rate = 19 * 1024;
+ burst_size = 12;
+ break;
default:
continue;
}
@@ -4187,6 +4416,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
priority = 5;
tc = 5;
break;
@@ -4204,6 +4434,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1:
priority = 2;
tc = 2;
break;
@@ -4237,22 +4468,16 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
return 0;
}
-static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_listener listeners[],
+ size_t listeners_count)
{
int i;
int err;
- err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
- if (err)
- return err;
-
- err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
- if (err)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
+ for (i = 0; i < listeners_count; i++) {
err = mlxsw_core_trap_register(mlxsw_sp->core,
- &mlxsw_sp_listener[i],
+ &listeners[i],
mlxsw_sp);
if (err)
goto err_listener_register;
@@ -4263,23 +4488,63 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
err_listener_register:
for (i--; i >= 0; i--) {
mlxsw_core_trap_unregister(mlxsw_sp->core,
- &mlxsw_sp_listener[i],
+ &listeners[i],
mlxsw_sp);
}
return err;
}
-static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_listener listeners[],
+ size_t listeners_count)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
+ for (i = 0; i < listeners_count; i++) {
mlxsw_core_trap_unregister(mlxsw_sp->core,
- &mlxsw_sp_listener[i],
+ &listeners[i],
mlxsw_sp);
}
}
+static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener));
+ if (err)
+ return err;
+
+ err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
+ mlxsw_sp->listeners_count);
+ if (err)
+ goto err_extra_traps_init;
+
+ return 0;
+
+err_extra_traps_init:
+ mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener));
+ return err;
+}
+
+static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
+ mlxsw_sp->listeners_count);
+ mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener));
+}
+
#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
@@ -4332,6 +4597,32 @@ static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
+static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
+ .clock_init = mlxsw_sp1_ptp_clock_init,
+ .clock_fini = mlxsw_sp1_ptp_clock_fini,
+ .init = mlxsw_sp1_ptp_init,
+ .fini = mlxsw_sp1_ptp_fini,
+ .receive = mlxsw_sp1_ptp_receive,
+ .transmitted = mlxsw_sp1_ptp_transmitted,
+ .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
+ .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
+ .shaper_work = mlxsw_sp1_ptp_shaper_work,
+ .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
+};
+
+static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
+ .clock_init = mlxsw_sp2_ptp_clock_init,
+ .clock_fini = mlxsw_sp2_ptp_clock_fini,
+ .init = mlxsw_sp2_ptp_init,
+ .fini = mlxsw_sp2_ptp_fini,
+ .receive = mlxsw_sp2_ptp_receive,
+ .transmitted = mlxsw_sp2_ptp_transmitted,
+ .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
+ .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
+ .shaper_work = mlxsw_sp2_ptp_shaper_work,
+ .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+};
+
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr);
@@ -4429,6 +4720,28 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_router_init;
}
+ if (mlxsw_sp->bus_info->read_frc_capable) {
+ /* NULL is a valid return value from clock_init */
+ mlxsw_sp->clock =
+ mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
+ mlxsw_sp->bus_info->dev);
+ if (IS_ERR(mlxsw_sp->clock)) {
+ err = PTR_ERR(mlxsw_sp->clock);
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
+ goto err_ptp_clock_init;
+ }
+ }
+
+ if (mlxsw_sp->clock) {
+ /* NULL is a valid return value from ptp_ops->init */
+ mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
+ if (IS_ERR(mlxsw_sp->ptp_state)) {
+ err = PTR_ERR(mlxsw_sp->ptp_state);
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
+ goto err_ptp_init;
+ }
+ }
+
/* Initialize netdevice notifier after router and SPAN is initialized,
* so that the event handler can use router structures and call SPAN
* respin.
@@ -4459,6 +4772,12 @@ err_ports_create:
err_dpipe_init:
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
err_netdev_notifier:
+ if (mlxsw_sp->clock)
+ mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
+err_ptp_init:
+ if (mlxsw_sp->clock)
+ mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
+err_ptp_clock_init:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
mlxsw_sp_acl_fini(mlxsw_sp);
@@ -4502,6 +4821,9 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
+ mlxsw_sp->listeners = mlxsw_sp1_listener;
+ mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -4521,6 +4843,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -4532,6 +4855,10 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ if (mlxsw_sp->clock) {
+ mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
+ mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
+ }
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_nve_fini(mlxsw_sp);
@@ -4874,6 +5201,15 @@ static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
mlxsw_sp_params_unregister(mlxsw_core);
}
+static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb, u8 local_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ skb_pull(skb, MLXSW_TXHDR_LEN);
+ mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
+}
+
static struct mlxsw_driver mlxsw_sp1_driver = {
.kind = mlxsw_sp1_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
@@ -4892,11 +5228,13 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .flash_update = mlxsw_sp_flash_update,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.params_register = mlxsw_sp_params_register,
.params_unregister = mlxsw_sp_params_unregister,
+ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
.res_query_enabled = true,
@@ -4920,10 +5258,12 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .flash_update = mlxsw_sp_flash_update,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.params_register = mlxsw_sp2_params_register,
.params_unregister = mlxsw_sp2_params_unregister,
+ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 8601b3041acd..a252b080dda9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -136,6 +136,8 @@ struct mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_nve_ops;
struct mlxsw_sp_sb_vals;
struct mlxsw_sp_port_type_speed_ops;
+struct mlxsw_sp_ptp_state;
+struct mlxsw_sp_ptp_ops;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
@@ -155,6 +157,8 @@ struct mlxsw_sp {
struct mlxsw_sp_kvdl *kvdl;
struct mlxsw_sp_nve *nve;
struct notifier_block netdevice_nb;
+ struct mlxsw_sp_ptp_clock *clock;
+ struct mlxsw_sp_ptp_state *ptp_state;
struct mlxsw_sp_counter_pool *counter_pool;
struct {
@@ -172,6 +176,9 @@ struct mlxsw_sp {
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_sb_vals *sb_vals;
const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
+ const struct mlxsw_sp_ptp_ops *ptp_ops;
+ const struct mlxsw_listener *listeners;
+ size_t listeners_count;
};
static inline struct mlxsw_sp_upper *
@@ -259,6 +266,12 @@ struct mlxsw_sp_port {
unsigned acl_rule_count;
struct mlxsw_sp_acl_block *ing_acl_block;
struct mlxsw_sp_acl_block *eg_acl_block;
+ struct {
+ struct delayed_work shaper_dw;
+ struct hwtstamp_config hwtstamp_config;
+ u16 ing_types;
+ u16 egr_types;
+ } ptp;
};
struct mlxsw_sp_port_type_speed_ops {
@@ -267,6 +280,7 @@ struct mlxsw_sp_port_type_speed_ops {
struct ethtool_link_ksettings *cmd);
void (*from_ptys_link)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
unsigned long *mode);
+ u32 (*from_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto);
void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp,
bool carrier_ok, u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd);
@@ -435,6 +449,8 @@ struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
+void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
+ u8 local_port, void *priv);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -620,6 +636,15 @@ enum mlxsw_sp_acl_profile {
MLXSW_SP_ACL_PROFILE_MR,
};
+struct mlxsw_sp_acl_block {
+ struct list_head binding_list;
+ struct mlxsw_sp_acl_ruleset *ruleset_zero;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int rule_count;
+ unsigned int disable_count;
+ struct net *net;
+};
+
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
@@ -782,19 +807,19 @@ extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
- struct tc_cls_flower_offload *f);
+ struct flow_cls_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
- struct tc_cls_flower_offload *f);
+ struct flow_cls_offload *f);
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
- struct tc_cls_flower_offload *f);
+ struct flow_cls_offload *f);
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
- struct tc_cls_flower_offload *f);
+ struct flow_cls_offload *f);
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
- struct tc_cls_flower_offload *f);
+ struct flow_cls_offload *f);
/* spectrum_qdisc.c */
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index a146a44634e9..e8ac90564dbe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -45,14 +45,6 @@ struct mlxsw_sp_acl_block_binding {
bool ingress;
};
-struct mlxsw_sp_acl_block {
- struct list_head binding_list;
- struct mlxsw_sp_acl_ruleset *ruleset_zero;
- struct mlxsw_sp *mlxsw_sp;
- unsigned int rule_count;
- unsigned int disable_count;
-};
-
struct mlxsw_sp_acl_ruleset_ht_key {
struct mlxsw_sp_acl_block *block;
u32 chain_index;
@@ -221,6 +213,7 @@ struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
return NULL;
INIT_LIST_HEAD(&block->binding_list);
block->mlxsw_sp = mlxsw_sp;
+ block->net = net;
return block;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index 2a998dea4f39..279c241f76f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -12,7 +12,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
@@ -20,7 +20,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
@@ -32,13 +32,13 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
@@ -149,7 +149,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x04, 0, 8), /* RX_ACL_SYSTEM_PORT */
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 8, -1, true), /* RX_ACL_SYSTEM_PORT */
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 96b23c856f4d..202e9a246019 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -120,8 +120,51 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_cls_offload *f,
+ struct mlxsw_sp_acl_block *block)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct net_device *ingress_dev;
+ struct flow_match_meta match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return 0;
+
+ flow_rule_match_meta(rule, &match);
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
+ return -EINVAL;
+ }
+
+ ingress_dev = __dev_get_by_index(block->net,
+ match.key->ingress_ifindex);
+ if (!ingress_dev) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
+ return -EINVAL;
+ }
+
+ if (!mlxsw_sp_port_dev_check(ingress_dev)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
+ return -EINVAL;
+ }
+
+ mlxsw_sp_port = netdev_priv(ingress_dev);
+ if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
+ return -EINVAL;
+ }
+
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
+ mlxsw_sp_port->local_port,
+ 0xFFFFFFFF);
+ return 0;
+}
+
static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
- struct tc_cls_flower_offload *f)
+ struct flow_cls_offload *f)
{
struct flow_match_ipv4_addrs match;
@@ -136,7 +179,7 @@ static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
}
static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
- struct tc_cls_flower_offload *f)
+ struct flow_cls_offload *f)
{
struct flow_match_ipv6_addrs match;
@@ -170,10 +213,10 @@ static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
u8 ip_proto)
{
- const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_match_ports match;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
@@ -197,10 +240,10 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
u8 ip_proto)
{
- const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_match_tcp match;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
@@ -222,10 +265,10 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct tc_cls_flower_offload *f,
+ struct flow_cls_offload *f,
u16 n_proto)
{
- const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_match_ip match;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
@@ -256,9 +299,9 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
- struct tc_cls_flower_offload *f)
+ struct flow_cls_offload *f)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
u16 n_proto_mask = 0;
u16 n_proto_key = 0;
@@ -267,7 +310,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
int err;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ ~(BIT(FLOW_DISSECTOR_KEY_META) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
@@ -283,6 +327,10 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
+ err = mlxsw_sp_flower_parse_meta(rulei, f, block);
+ if (err)
+ return err;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
@@ -378,7 +426,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
- struct tc_cls_flower_offload *f)
+ struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_rule_info *rulei;
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -425,7 +473,7 @@ err_rule_create:
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
- struct tc_cls_flower_offload *f)
+ struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
@@ -447,7 +495,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
- struct tc_cls_flower_offload *f)
+ struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
@@ -483,7 +531,7 @@ err_rule_get_stats:
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
- struct tc_cls_flower_offload *f)
+ struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule_info rulei;
@@ -504,7 +552,7 @@ int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
- struct tc_cls_flower_offload *f)
+ struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
new file mode 100644
index 000000000000..bd9c2bc2d5d6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -0,0 +1,1111 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/clocksource.h>
+#include <linux/timecounter.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/rhashtable.h>
+#include <linux/ptp_classify.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/net_tstamp.h>
+
+#include "spectrum.h"
+#include "spectrum_ptp.h"
+#include "core.h"
+
+#define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT 29
+#define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ 156257 /* 6.4nSec */
+#define MLXSW_SP1_PTP_CLOCK_MASK 64
+
+#define MLXSW_SP1_PTP_HT_GC_INTERVAL 500 /* ms */
+
+/* How long, approximately, should the unmatched entries stay in the hash table
+ * before they are collected. Should be evenly divisible by the GC interval.
+ */
+#define MLXSW_SP1_PTP_HT_GC_TIMEOUT 1000 /* ms */
+
+struct mlxsw_sp_ptp_state {
+ struct mlxsw_sp *mlxsw_sp;
+ struct rhashtable unmatched_ht;
+ spinlock_t unmatched_lock; /* protects the HT */
+ struct delayed_work ht_gc_dw;
+ u32 gc_cycle;
+};
+
+struct mlxsw_sp1_ptp_key {
+ u8 local_port;
+ u8 message_type;
+ u16 sequence_id;
+ u8 domain_number;
+ bool ingress;
+};
+
+struct mlxsw_sp1_ptp_unmatched {
+ struct mlxsw_sp1_ptp_key key;
+ struct rhash_head ht_node;
+ struct rcu_head rcu;
+ struct sk_buff *skb;
+ u64 timestamp;
+ u32 gc_cycle;
+};
+
+static const struct rhashtable_params mlxsw_sp1_ptp_unmatched_ht_params = {
+ .key_len = sizeof_field(struct mlxsw_sp1_ptp_unmatched, key),
+ .key_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, key),
+ .head_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, ht_node),
+};
+
+struct mlxsw_sp_ptp_clock {
+ struct mlxsw_core *core;
+ spinlock_t lock; /* protect this structure */
+ struct cyclecounter cycles;
+ struct timecounter tc;
+ u32 nominal_c_mult;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+ unsigned long overflow_period;
+ struct delayed_work overflow_work;
+};
+
+static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ u32 frc_h1, frc_h2, frc_l;
+
+ frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
+ ptp_read_system_prets(sts);
+ frc_l = mlxsw_core_read_frc_l(mlxsw_core);
+ ptp_read_system_postts(sts);
+ frc_h2 = mlxsw_core_read_frc_h(mlxsw_core);
+
+ if (frc_h1 != frc_h2) {
+ /* wrap around */
+ ptp_read_system_prets(sts);
+ frc_l = mlxsw_core_read_frc_l(mlxsw_core);
+ ptp_read_system_postts(sts);
+ }
+
+ return (u64) frc_l | (u64) frc_h2 << 32;
+}
+
+static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(cc, struct mlxsw_sp_ptp_clock, cycles);
+
+ return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
+}
+
+static int
+mlxsw_sp1_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+
+ mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
+ freq_adj, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
+{
+ u64 cycles = (u64) nsec;
+
+ cycles <<= tc->cc->shift;
+ cycles = div_u64(cycles, tc->cc->mult);
+
+ return cycles;
+}
+
+static int
+mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ u64 next_sec, next_sec_in_nsec, cycles;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+ char mtpps_pl[MLXSW_REG_MTPPS_LEN];
+ int err;
+
+ next_sec = div_u64(nsec, NSEC_PER_SEC) + 1;
+ next_sec_in_nsec = next_sec * NSEC_PER_SEC;
+
+ spin_lock_bh(&clock->lock);
+ cycles = mlxsw_sp1_ptp_ns2cycles(&clock->tc, next_sec_in_nsec);
+ spin_unlock_bh(&clock->lock);
+
+ mlxsw_reg_mtpps_vpin_pack(mtpps_pl, cycles);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtpps), mtpps_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
+ 0, next_sec);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ int neg_adj = 0;
+ u32 diff;
+ u64 adj;
+ s32 ppb;
+
+ ppb = scaled_ppm_to_ppb(scaled_ppm);
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ adj = clock->nominal_c_mult;
+ adj *= ppb;
+ diff = div_u64(adj, NSEC_PER_SEC);
+
+ spin_lock_bh(&clock->lock);
+ timecounter_read(&clock->tc);
+ clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
+ clock->nominal_c_mult + diff;
+ spin_unlock_bh(&clock->lock);
+
+ return mlxsw_sp1_ptp_phc_adjfreq(clock, neg_adj ? -ppb : ppb);
+}
+
+static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec;
+
+ spin_lock_bh(&clock->lock);
+ timecounter_adjtime(&clock->tc, delta);
+ nsec = timecounter_read(&clock->tc);
+ spin_unlock_bh(&clock->lock);
+
+ return mlxsw_sp1_ptp_phc_settime(clock, nsec);
+}
+
+static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 cycles, nsec;
+
+ spin_lock_bh(&clock->lock);
+ cycles = __mlxsw_sp1_ptp_read_frc(clock, sts);
+ nsec = timecounter_cyc2time(&clock->tc, cycles);
+ spin_unlock_bh(&clock->lock);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec = timespec64_to_ns(ts);
+
+ spin_lock_bh(&clock->lock);
+ timecounter_init(&clock->tc, &clock->cycles, nsec);
+ nsec = timecounter_read(&clock->tc);
+ spin_unlock_bh(&clock->lock);
+
+ return mlxsw_sp1_ptp_phc_settime(clock, nsec);
+}
+
+static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlxsw_sp_clock",
+ .max_adj = 100000000,
+ .adjfine = mlxsw_sp1_ptp_adjfine,
+ .adjtime = mlxsw_sp1_ptp_adjtime,
+ .gettimex64 = mlxsw_sp1_ptp_gettimex,
+ .settime64 = mlxsw_sp1_ptp_settime,
+};
+
+static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlxsw_sp_ptp_clock *clock;
+
+ clock = container_of(dwork, struct mlxsw_sp_ptp_clock, overflow_work);
+
+ spin_lock_bh(&clock->lock);
+ timecounter_read(&clock->tc);
+ spin_unlock_bh(&clock->lock);
+ mlxsw_core_schedule_dw(&clock->overflow_work, clock->overflow_period);
+}
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ u64 overflow_cycles, nsec, frac = 0;
+ struct mlxsw_sp_ptp_clock *clock;
+ int err;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&clock->lock);
+ clock->cycles.read = mlxsw_sp1_ptp_read_frc;
+ clock->cycles.shift = MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT;
+ clock->cycles.mult = clocksource_khz2mult(MLXSW_SP1_PTP_CLOCK_FREQ_KHZ,
+ clock->cycles.shift);
+ clock->nominal_c_mult = clock->cycles.mult;
+ clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
+ clock->core = mlxsw_sp->core;
+
+ timecounter_init(&clock->tc, &clock->cycles,
+ ktime_to_ns(ktime_get_real()));
+
+ /* Calculate period in seconds to call the overflow watchdog - to make
+ * sure counter is checked at least twice every wrap around.
+ * The period is calculated as the minimum between max HW cycles count
+ * (The clock source mask) and max amount of cycles that can be
+ * multiplied by clock multiplier where the result doesn't exceed
+ * 64bits.
+ */
+ overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
+ overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
+
+ nsec = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, 0, &frac);
+ clock->overflow_period = nsecs_to_jiffies(nsec);
+
+ INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
+ mlxsw_core_schedule_dw(&clock->overflow_work, 0);
+
+ clock->ptp_info = mlxsw_sp1_ptp_clock_info;
+ clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
+ if (IS_ERR(clock->ptp)) {
+ err = PTR_ERR(clock->ptp);
+ dev_err(dev, "ptp_clock_register failed %d\n", err);
+ goto err_ptp_clock_register;
+ }
+
+ return clock;
+
+err_ptp_clock_register:
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+{
+ ptp_clock_unregister(clock->ptp);
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+}
+
+static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
+ u8 *p_domain_number,
+ u8 *p_message_type,
+ u16 *p_sequence_id)
+{
+ unsigned int offset = 0;
+ unsigned int ptp_class;
+ u8 *data;
+
+ data = skb_mac_header(skb);
+ ptp_class = ptp_classify_raw(skb);
+
+ switch (ptp_class & PTP_CLASS_VMASK) {
+ case PTP_CLASS_V1:
+ case PTP_CLASS_V2:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (ptp_class & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (ptp_class & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* PTP header is 34 bytes. */
+ if (skb->len < offset + 34)
+ return -EINVAL;
+
+ *p_message_type = data[offset] & 0x0f;
+ *p_domain_number = data[offset + 4];
+ *p_sequence_id = (u16)(data[offset + 30]) << 8 | data[offset + 31];
+ return 0;
+}
+
+/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
+ * error.
+ */
+static struct mlxsw_sp1_ptp_unmatched *
+mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_key key,
+ struct sk_buff *skb,
+ u64 timestamp)
+{
+ int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
+ struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
+ struct mlxsw_sp1_ptp_unmatched *unmatched;
+ struct mlxsw_sp1_ptp_unmatched *conflict;
+
+ unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
+ if (!unmatched)
+ return ERR_PTR(-ENOMEM);
+
+ unmatched->key = key;
+ unmatched->skb = skb;
+ unmatched->timestamp = timestamp;
+ unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
+
+ conflict = rhashtable_lookup_get_insert_fast(&ptp_state->unmatched_ht,
+ &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+ if (conflict)
+ kfree(unmatched);
+
+ return conflict;
+}
+
+static struct mlxsw_sp1_ptp_unmatched *
+mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_key key)
+{
+ return rhashtable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+}
+
+static int
+mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_unmatched *unmatched)
+{
+ return rhashtable_remove_fast(&mlxsw_sp->ptp_state->unmatched_ht,
+ &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+}
+
+/* This function is called in the following scenarios:
+ *
+ * 1) When a packet is matched with its timestamp.
+ * 2) In several situation when it is necessary to immediately pass on
+ * an SKB without a timestamp.
+ * 3) From GC indirectly through mlxsw_sp1_ptp_unmatched_finish().
+ * This case is similar to 2) above.
+ */
+static void mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u8 local_port,
+ bool ingress,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ /* Between capturing the packet and finishing it, there is a window of
+ * opportunity for the originating port to go away (e.g. due to a
+ * split). Also make sure the SKB device reference is still valid.
+ */
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!(mlxsw_sp_port && (!skb->dev || skb->dev == mlxsw_sp_port->dev))) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (ingress) {
+ if (hwtstamps)
+ *skb_hwtstamps(skb) = *hwtstamps;
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+ } else {
+ /* skb_tstamp_tx() allows hwtstamps to be NULL. */
+ skb_tstamp_tx(skb, hwtstamps);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void mlxsw_sp1_packet_timestamp(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_key key,
+ struct sk_buff *skb,
+ u64 timestamp)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 nsec;
+
+ spin_lock_bh(&mlxsw_sp->clock->lock);
+ nsec = timecounter_cyc2time(&mlxsw_sp->clock->tc, timestamp);
+ spin_unlock_bh(&mlxsw_sp->clock->lock);
+
+ hwtstamps.hwtstamp = ns_to_ktime(nsec);
+ mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
+ key.local_port, key.ingress, &hwtstamps);
+}
+
+static void
+mlxsw_sp1_ptp_unmatched_finish(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_unmatched *unmatched)
+{
+ if (unmatched->skb && unmatched->timestamp)
+ mlxsw_sp1_packet_timestamp(mlxsw_sp, unmatched->key,
+ unmatched->skb,
+ unmatched->timestamp);
+ else if (unmatched->skb)
+ mlxsw_sp1_ptp_packet_finish(mlxsw_sp, unmatched->skb,
+ unmatched->key.local_port,
+ unmatched->key.ingress, NULL);
+ kfree_rcu(unmatched, rcu);
+}
+
+static void mlxsw_sp1_ptp_unmatched_free_fn(void *ptr, void *arg)
+{
+ struct mlxsw_sp1_ptp_unmatched *unmatched = ptr;
+
+ /* This is invoked at a point where the ports are gone already. Nothing
+ * to do with whatever is left in the HT but to free it.
+ */
+ if (unmatched->skb)
+ dev_kfree_skb_any(unmatched->skb);
+ kfree_rcu(unmatched, rcu);
+}
+
+static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_ptp_key key,
+ struct sk_buff *skb, u64 timestamp)
+{
+ struct mlxsw_sp1_ptp_unmatched *unmatched, *conflict;
+ int err;
+
+ rcu_read_lock();
+
+ unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key);
+
+ spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
+
+ if (unmatched) {
+ /* There was an unmatched entry when we looked, but it may have
+ * been removed before we took the lock.
+ */
+ err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
+ if (err)
+ unmatched = NULL;
+ }
+
+ if (!unmatched) {
+ /* We have no unmatched entry, but one may have been added after
+ * we looked, but before we took the lock.
+ */
+ unmatched = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
+ skb, timestamp);
+ if (IS_ERR(unmatched)) {
+ if (skb)
+ mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
+ key.local_port,
+ key.ingress, NULL);
+ unmatched = NULL;
+ } else if (unmatched) {
+ /* Save just told us, under lock, that the entry is
+ * there, so this has to work.
+ */
+ err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp,
+ unmatched);
+ WARN_ON_ONCE(err);
+ }
+ }
+
+ /* If unmatched is non-NULL here, it comes either from the lookup, or
+ * from the save attempt above. In either case the entry was removed
+ * from the hash table. If unmatched is NULL, a new unmatched entry was
+ * added to the hash table, and there was no conflict.
+ */
+
+ if (skb && unmatched && unmatched->timestamp) {
+ unmatched->skb = skb;
+ } else if (timestamp && unmatched && unmatched->skb) {
+ unmatched->timestamp = timestamp;
+ } else if (unmatched) {
+ /* unmatched holds an older entry of the same type: either an
+ * skb if we are handling skb, or a timestamp if we are handling
+ * timestamp. We can't match that up, so save what we have.
+ */
+ conflict = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
+ skb, timestamp);
+ if (IS_ERR(conflict)) {
+ if (skb)
+ mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
+ key.local_port,
+ key.ingress, NULL);
+ } else {
+ /* Above, we removed an object with this key from the
+ * hash table, under lock, so conflict can not be a
+ * valid pointer.
+ */
+ WARN_ON_ONCE(conflict);
+ }
+ }
+
+ spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
+
+ if (unmatched)
+ mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
+
+ rcu_read_unlock();
+}
+
+static void mlxsw_sp1_ptp_got_packet(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u8 local_port,
+ bool ingress)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp1_ptp_key key;
+ u8 types;
+ int err;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ goto immediate;
+
+ types = ingress ? mlxsw_sp_port->ptp.ing_types :
+ mlxsw_sp_port->ptp.egr_types;
+ if (!types)
+ goto immediate;
+
+ memset(&key, 0, sizeof(key));
+ key.local_port = local_port;
+ key.ingress = ingress;
+
+ err = mlxsw_sp_ptp_parse(skb, &key.domain_number, &key.message_type,
+ &key.sequence_id);
+ if (err)
+ goto immediate;
+
+ /* For packets whose timestamping was not enabled on this port, don't
+ * bother trying to match the timestamp.
+ */
+ if (!((1 << key.message_type) & types))
+ goto immediate;
+
+ mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, skb, 0);
+ return;
+
+immediate:
+ mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, local_port, ingress, NULL);
+}
+
+void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
+ u8 local_port, u8 message_type,
+ u8 domain_number, u16 sequence_id,
+ u64 timestamp)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp1_ptp_key key;
+ u8 types;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port)
+ return;
+
+ types = ingress ? mlxsw_sp_port->ptp.ing_types :
+ mlxsw_sp_port->ptp.egr_types;
+
+ /* For message types whose timestamping was not enabled on this port,
+ * don't bother with the timestamp.
+ */
+ if (!((1 << message_type) & types))
+ return;
+
+ memset(&key, 0, sizeof(key));
+ key.local_port = local_port;
+ key.domain_number = domain_number;
+ key.message_type = message_type;
+ key.sequence_id = sequence_id;
+ key.ingress = ingress;
+
+ mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, NULL, timestamp);
+}
+
+void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port)
+{
+ skb_reset_mac_header(skb);
+ mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true);
+}
+
+void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u8 local_port)
+{
+ mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false);
+}
+
+static void
+mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
+ struct mlxsw_sp1_ptp_unmatched *unmatched)
+{
+ int err;
+
+ /* If an unmatched entry has an SKB, it has to be handed over to the
+ * networking stack. This is usually done from a trap handler, which is
+ * invoked in a softirq context. Here we are going to do it in process
+ * context. If that were to be interrupted by a softirq, it could cause
+ * a deadlock when an attempt is made to take an already-taken lock
+ * somewhere along the sending path. Disable softirqs to prevent this.
+ */
+ local_bh_disable();
+
+ spin_lock(&ptp_state->unmatched_lock);
+ err = rhashtable_remove_fast(&ptp_state->unmatched_ht,
+ &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+ spin_unlock(&ptp_state->unmatched_lock);
+
+ if (err)
+ /* The packet was matched with timestamp during the walk. */
+ goto out;
+
+ /* mlxsw_sp1_ptp_unmatched_finish() invokes netif_receive_skb(). While
+ * the comment at that function states that it can only be called in
+ * soft IRQ context, this pattern of local_bh_disable() +
+ * netif_receive_skb(), in process context, is seen elsewhere in the
+ * kernel, notably in pktgen.
+ */
+ mlxsw_sp1_ptp_unmatched_finish(ptp_state->mlxsw_sp, unmatched);
+
+out:
+ local_bh_enable();
+}
+
+static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlxsw_sp1_ptp_unmatched *unmatched;
+ struct mlxsw_sp_ptp_state *ptp_state;
+ struct rhashtable_iter iter;
+ u32 gc_cycle;
+ void *obj;
+
+ ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
+ gc_cycle = ptp_state->gc_cycle++;
+
+ rhashtable_walk_enter(&ptp_state->unmatched_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while ((obj = rhashtable_walk_next(&iter))) {
+ if (IS_ERR(obj))
+ continue;
+
+ unmatched = obj;
+ if (unmatched->gc_cycle <= gc_cycle)
+ mlxsw_sp1_ptp_ht_gc_collect(ptp_state, unmatched);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+
+ mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
+ MLXSW_SP1_PTP_HT_GC_INTERVAL);
+}
+
+static int mlxsw_sp_ptp_mtptpt_set(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_reg_mtptpt_trap_id trap_id,
+ u16 message_type)
+{
+ char mtptpt_pl[MLXSW_REG_MTPTPT_LEN];
+
+ mlxsw_reg_mtptptp_pack(mtptpt_pl, trap_id, message_type);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtptpt), mtptpt_pl);
+}
+
+static int mlxsw_sp1_ptp_set_fifo_clr_on_trap(struct mlxsw_sp *mlxsw_sp,
+ bool clr)
+{
+ char mogcr_pl[MLXSW_REG_MOGCR_LEN] = {0};
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mogcr_ptp_iftc_set(mogcr_pl, clr);
+ mlxsw_reg_mogcr_ptp_eftc_set(mogcr_pl, clr);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
+}
+
+static int mlxsw_sp1_ptp_mtpppc_set(struct mlxsw_sp *mlxsw_sp,
+ u16 ing_types, u16 egr_types)
+{
+ char mtpppc_pl[MLXSW_REG_MTPPPC_LEN];
+
+ mlxsw_reg_mtpppc_pack(mtpppc_pl, ing_types, egr_types);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpppc), mtpppc_pl);
+}
+
+struct mlxsw_sp1_ptp_shaper_params {
+ u32 ethtool_speed;
+ enum mlxsw_reg_qpsc_port_speed port_speed;
+ u8 shaper_time_exp;
+ u8 shaper_time_mantissa;
+ u8 shaper_inc;
+ u8 shaper_bs;
+ u8 port_to_shaper_credits;
+ int ing_timestamp_inc;
+ int egr_timestamp_inc;
+};
+
+static const struct mlxsw_sp1_ptp_shaper_params
+mlxsw_sp1_ptp_shaper_params[] = {
+ {
+ .ethtool_speed = SPEED_100,
+ .port_speed = MLXSW_REG_QPSC_PORT_SPEED_100M,
+ .shaper_time_exp = 4,
+ .shaper_time_mantissa = 12,
+ .shaper_inc = 9,
+ .shaper_bs = 1,
+ .port_to_shaper_credits = 1,
+ .ing_timestamp_inc = -313,
+ .egr_timestamp_inc = 313,
+ },
+ {
+ .ethtool_speed = SPEED_1000,
+ .port_speed = MLXSW_REG_QPSC_PORT_SPEED_1G,
+ .shaper_time_exp = 0,
+ .shaper_time_mantissa = 12,
+ .shaper_inc = 6,
+ .shaper_bs = 0,
+ .port_to_shaper_credits = 1,
+ .ing_timestamp_inc = -35,
+ .egr_timestamp_inc = 35,
+ },
+ {
+ .ethtool_speed = SPEED_10000,
+ .port_speed = MLXSW_REG_QPSC_PORT_SPEED_10G,
+ .shaper_time_exp = 0,
+ .shaper_time_mantissa = 2,
+ .shaper_inc = 14,
+ .shaper_bs = 1,
+ .port_to_shaper_credits = 1,
+ .ing_timestamp_inc = -11,
+ .egr_timestamp_inc = 11,
+ },
+ {
+ .ethtool_speed = SPEED_25000,
+ .port_speed = MLXSW_REG_QPSC_PORT_SPEED_25G,
+ .shaper_time_exp = 0,
+ .shaper_time_mantissa = 0,
+ .shaper_inc = 11,
+ .shaper_bs = 1,
+ .port_to_shaper_credits = 1,
+ .ing_timestamp_inc = -14,
+ .egr_timestamp_inc = 14,
+ },
+};
+
+#define MLXSW_SP1_PTP_SHAPER_PARAMS_LEN ARRAY_SIZE(mlxsw_sp1_ptp_shaper_params)
+
+static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp1_ptp_shaper_params *params;
+ char qpsc_pl[MLXSW_REG_QPSC_LEN];
+ int i, err;
+
+ for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
+ params = &mlxsw_sp1_ptp_shaper_params[i];
+ mlxsw_reg_qpsc_pack(qpsc_pl, params->port_speed,
+ params->shaper_time_exp,
+ params->shaper_time_mantissa,
+ params->shaper_inc, params->shaper_bs,
+ params->port_to_shaper_credits,
+ params->ing_timestamp_inc,
+ params->egr_timestamp_inc);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpsc), qpsc_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_ptp_state *ptp_state;
+ u16 message_type;
+ int err;
+
+ err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
+ if (err)
+ return ERR_PTR(err);
+
+ ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
+ if (!ptp_state)
+ return ERR_PTR(-ENOMEM);
+ ptp_state->mlxsw_sp = mlxsw_sp;
+
+ spin_lock_init(&ptp_state->unmatched_lock);
+
+ err = rhashtable_init(&ptp_state->unmatched_ht,
+ &mlxsw_sp1_ptp_unmatched_ht_params);
+ if (err)
+ goto err_hashtable_init;
+
+ /* Delive these message types as PTP0. */
+ message_type = BIT(MLXSW_SP_PTP_MESSAGE_TYPE_SYNC) |
+ BIT(MLXSW_SP_PTP_MESSAGE_TYPE_DELAY_REQ) |
+ BIT(MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_REQ) |
+ BIT(MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_RESP);
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
+ message_type);
+ if (err)
+ goto err_mtptpt_set;
+
+ /* Everything else is PTP1. */
+ message_type = ~message_type;
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
+ message_type);
+ if (err)
+ goto err_mtptpt1_set;
+
+ err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
+ if (err)
+ goto err_fifo_clr;
+
+ INIT_DELAYED_WORK(&ptp_state->ht_gc_dw, mlxsw_sp1_ptp_ht_gc);
+ mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
+ MLXSW_SP1_PTP_HT_GC_INTERVAL);
+ return ptp_state;
+
+err_fifo_clr:
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
+err_mtptpt1_set:
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+err_mtptpt_set:
+ rhashtable_destroy(&ptp_state->unmatched_ht);
+err_hashtable_init:
+ kfree(ptp_state);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
+{
+ struct mlxsw_sp *mlxsw_sp = ptp_state->mlxsw_sp;
+
+ cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
+ mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
+ mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ rhashtable_free_and_destroy(&ptp_state->unmatched_ht,
+ &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
+ kfree(ptp_state);
+}
+
+int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ *config = mlxsw_sp_port->ptp.hwtstamp_config;
+ return 0;
+}
+
+static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
+{
+ enum hwtstamp_rx_filters rx_filter = config->rx_filter;
+ enum hwtstamp_tx_types tx_type = config->tx_type;
+ u16 ing_types = 0x00;
+ u16 egr_types = 0x00;
+
+ switch (tx_type) {
+ case HWTSTAMP_TX_OFF:
+ egr_types = 0x00;
+ break;
+ case HWTSTAMP_TX_ON:
+ egr_types = 0xff;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ return -ERANGE;
+ }
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ing_types = 0x00;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ ing_types = 0x01;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ing_types = 0x02;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ ing_types = 0x0f;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ ing_types = 0xff;
+ break;
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
+ }
+
+ *p_ing_types = ing_types;
+ *p_egr_types = egr_types;
+ *p_rx_filter = rx_filter;
+ return 0;
+}
+
+static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ing_types, u16 egr_types)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port *tmp;
+ int i;
+
+ /* MTPPPC configures timestamping globally, not per port. Find the
+ * configuration that contains all configured timestamping requests.
+ */
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
+ tmp = mlxsw_sp->ports[i];
+ if (tmp && tmp != mlxsw_sp_port) {
+ ing_types |= tmp->ptp.ing_types;
+ egr_types |= tmp->ptp.egr_types;
+ }
+ }
+
+ return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp,
+ ing_types, egr_types);
+}
+
+static bool mlxsw_sp1_ptp_hwtstamp_enabled(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_port->ptp.ing_types || mlxsw_sp_port->ptp.egr_types;
+}
+
+static int
+mlxsw_sp1_ptp_port_shaper_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_ptps_pack(qeec_pl, mlxsw_sp_port->local_port, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+static int mlxsw_sp1_ptp_port_shaper_check(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_oper, speed;
+ bool ptps = false;
+ int err, i;
+
+ if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
+ return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, false);
+
+ port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
+ port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
+ mlxsw_sp_port->local_port, 0,
+ false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+ port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
+ &eth_proto_oper);
+
+ speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
+ for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
+ if (mlxsw_sp1_ptp_shaper_params[i].ethtool_speed == speed) {
+ ptps = true;
+ break;
+ }
+ }
+
+ return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, ptps);
+}
+
+void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
+ ptp.shaper_dw);
+
+ if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
+ return;
+
+ err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
+ if (err)
+ netdev_err(mlxsw_sp_port->dev, "Failed to set up PTP shaper\n");
+}
+
+int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ enum hwtstamp_rx_filters rx_filter;
+ u16 ing_types;
+ u16 egr_types;
+ int err;
+
+ err = mlxsw_sp_ptp_get_message_types(config, &ing_types, &egr_types,
+ &rx_filter);
+ if (err)
+ return err;
+
+ err = mlxsw_sp1_ptp_mtpppc_update(mlxsw_sp_port, ing_types, egr_types);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->ptp.hwtstamp_config = *config;
+ mlxsw_sp_port->ptp.ing_types = ing_types;
+ mlxsw_sp_port->ptp.egr_types = egr_types;
+
+ err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
+ if (err)
+ return err;
+
+ /* Notify the ioctl caller what we are actually timestamping. */
+ config->rx_filter = rx_filter;
+
+ return 0;
+}
+
+int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
new file mode 100644
index 000000000000..72e55f6926b9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_PTP_H
+#define _MLXSW_SPECTRUM_PTP_H
+
+#include <linux/device.h>
+#include <linux/rhashtable.h>
+
+struct mlxsw_sp;
+struct mlxsw_sp_port;
+struct mlxsw_sp_ptp_clock;
+
+enum {
+ MLXSW_SP_PTP_MESSAGE_TYPE_SYNC,
+ MLXSW_SP_PTP_MESSAGE_TYPE_DELAY_REQ,
+ MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_REQ,
+ MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_RESP,
+};
+
+static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ return 0;
+}
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+
+void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock);
+
+struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp);
+
+void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
+
+void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port);
+
+void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u8 local_port);
+
+void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
+ u8 local_port, u8 message_type,
+ u8 domain_number, u16 sequence_id,
+ u64 timestamp);
+
+int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
+
+int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info);
+
+#else
+
+static inline struct mlxsw_sp_ptp_clock *
+mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ return NULL;
+}
+
+static inline void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+{
+}
+
+static inline struct mlxsw_sp_ptp_state *
+mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return NULL;
+}
+
+static inline void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
+{
+}
+
+static inline void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u8 local_port)
+{
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+}
+
+static inline void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u8 local_port)
+{
+ dev_kfree_skb_any(skb);
+}
+
+static inline void
+mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
+ u8 local_port, u8 message_type,
+ u8 domain_number,
+ u16 sequence_id, u64 timestamp)
+{
+}
+
+static inline int
+mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
+{
+}
+
+static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ return mlxsw_sp_ptp_get_ts_info_noptp(info);
+}
+
+#endif
+
+static inline struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ return NULL;
+}
+
+static inline void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+{
+}
+
+static inline struct mlxsw_sp_ptp_state *
+mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return NULL;
+}
+
+static inline void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
+{
+}
+
+static inline void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u8 local_port)
+{
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+}
+
+static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u8 local_port)
+{
+ dev_kfree_skb_any(skb);
+}
+
+static inline int
+mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
+{
+}
+
+static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ return mlxsw_sp_ptp_get_ts_info_noptp(info);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index ef554739dd54..e618be7ce6c6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -21,6 +21,7 @@
#include <net/arp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
+#include <net/nexthop.h>
#include <net/fib_rules.h>
#include <net/ip_tunnels.h>
#include <net/l3mdev.h>
@@ -2887,7 +2888,7 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
return false;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
struct in6_addr *gw;
int ifindex, weight;
@@ -2959,7 +2960,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
struct net_device *dev;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- dev = mlxsw_sp_rt6->rt->fib6_nh.fib_nh_dev;
+ dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
val ^= dev->ifindex;
}
@@ -3883,23 +3884,25 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
}
static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
- const struct fib_info *fi)
+ struct fib_info *fi)
{
- return fi->fib_nh->fib_nh_scope == RT_SCOPE_LINK ||
- mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
+ const struct fib_nh *nh = fib_info_nh(fi, 0);
+
+ return nh->fib_nh_scope == RT_SCOPE_LINK ||
+ mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
}
static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
{
+ unsigned int nhs = fib_info_num_path(fi);
struct mlxsw_sp_nexthop_group *nh_grp;
struct mlxsw_sp_nexthop *nh;
struct fib_nh *fib_nh;
int i;
int err;
- nh_grp = kzalloc(struct_size(nh_grp, nexthops, fi->fib_nhs),
- GFP_KERNEL);
+ nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
if (!nh_grp)
return ERR_PTR(-ENOMEM);
nh_grp->priv = fi;
@@ -3907,11 +3910,11 @@ mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
nh_grp->neigh_tbl = &arp_tbl;
nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
- nh_grp->count = fi->fib_nhs;
+ nh_grp->count = nhs;
fib_info_hold(fi);
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
- fib_nh = &fi->fib_nh[i];
+ fib_nh = fib_info_nh(fi, i);
err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
if (err)
goto err_nexthop4_init;
@@ -4027,9 +4030,9 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- if (nh->rif && nh->rif->dev == rt->fib6_nh.fib_nh_dev &&
+ if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
- &rt->fib6_nh.fib_nh_gw6))
+ &rt->fib6_nh->fib_nh_gw6))
return nh;
continue;
}
@@ -4089,13 +4092,13 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
- list)->rt->fib6_nh.fib_nh_flags |= RTNH_F_OFFLOAD;
+ list)->rt->fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
return;
}
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
- struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
struct mlxsw_sp_nexthop *nh;
nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
@@ -4117,7 +4120,7 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- rt->fib6_nh.fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ rt->fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4349,9 +4352,9 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
const struct fib_entry_notifier_info *fen_info,
struct mlxsw_sp_fib_entry *fib_entry)
{
+ struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
- struct net_device *dev = fen_info->fi->fib_dev;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct fib_info *fi = fen_info->fi;
@@ -4995,7 +4998,8 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
{
/* RTF_CACHE routes are ignored */
- return !(rt->fib6_flags & RTF_ADDRCONF) && rt->fib6_nh.fib_nh_gw_family;
+ return !(rt->fib6_flags & RTF_ADDRCONF) &&
+ rt->fib6_nh->fib_nh_gw_family;
}
static struct fib6_info *
@@ -5054,8 +5058,8 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt,
enum mlxsw_sp_ipip_type *ret)
{
- return rt->fib6_nh.fib_nh_dev &&
- mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.fib_nh_dev, ret);
+ return rt->fib6_nh->fib_nh_dev &&
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
}
static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
@@ -5065,7 +5069,7 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
- struct net_device *dev = rt->fib6_nh.fib_nh_dev;
+ struct net_device *dev = rt->fib6_nh->fib_nh_dev;
struct mlxsw_sp_rif *rif;
int err;
@@ -5108,11 +5112,11 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
const struct fib6_info *rt)
{
- struct net_device *dev = rt->fib6_nh.fib_nh_dev;
+ struct net_device *dev = rt->fib6_nh->fib_nh_dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = rt->fib6_nh.fib_nh_weight;
- memcpy(&nh->gw_addr, &rt->fib6_nh.fib_nh_gw6, sizeof(nh->gw_addr));
+ nh->nh_weight = rt->fib6_nh->fib_nh_weight;
+ memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -5135,7 +5139,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
- return rt->fib6_nh.fib_nh_gw_family ||
+ return rt->fib6_nh->fib_nh_gw_family ||
mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
}
@@ -5274,17 +5278,21 @@ err_nexthop6_group_get:
static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry,
- struct fib6_info *rt)
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
- int err;
+ int err, i;
- mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
- if (IS_ERR(mlxsw_sp_rt6))
- return PTR_ERR(mlxsw_sp_rt6);
+ for (i = 0; i < nrt6; i++) {
+ mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
+ if (IS_ERR(mlxsw_sp_rt6)) {
+ err = PTR_ERR(mlxsw_sp_rt6);
+ goto err_rt6_create;
+ }
- list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
- fib6_entry->nrt6++;
+ list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
+ fib6_entry->nrt6++;
+ }
err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
if (err)
@@ -5293,27 +5301,38 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
return 0;
err_nexthop6_group_update:
- fib6_entry->nrt6--;
- list_del(&mlxsw_sp_rt6->list);
- mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
+ i = nrt6;
+err_rt6_create:
+ for (i--; i >= 0; i--) {
+ fib6_entry->nrt6--;
+ mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
+ struct mlxsw_sp_rt6, list);
+ list_del(&mlxsw_sp_rt6->list);
+ mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
+ }
return err;
}
static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry,
- struct fib6_info *rt)
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
+ int i;
- mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
- if (WARN_ON(!mlxsw_sp_rt6))
- return;
+ for (i = 0; i < nrt6; i++) {
+ mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
+ rt_arr[i]);
+ if (WARN_ON_ONCE(!mlxsw_sp_rt6))
+ continue;
+
+ fib6_entry->nrt6--;
+ list_del(&mlxsw_sp_rt6->list);
+ mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
+ }
- fib6_entry->nrt6--;
- list_del(&mlxsw_sp_rt6->list);
mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
- mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
@@ -5354,29 +5373,32 @@ mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node,
- struct fib6_info *rt)
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
- int err;
+ int err, i;
fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
if (!fib6_entry)
return ERR_PTR(-ENOMEM);
fib_entry = &fib6_entry->common;
- mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
- if (IS_ERR(mlxsw_sp_rt6)) {
- err = PTR_ERR(mlxsw_sp_rt6);
- goto err_rt6_create;
+ INIT_LIST_HEAD(&fib6_entry->rt6_list);
+
+ for (i = 0; i < nrt6; i++) {
+ mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
+ if (IS_ERR(mlxsw_sp_rt6)) {
+ err = PTR_ERR(mlxsw_sp_rt6);
+ goto err_rt6_create;
+ }
+ list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
+ fib6_entry->nrt6++;
}
- mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
+ mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
- INIT_LIST_HEAD(&fib6_entry->rt6_list);
- list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
- fib6_entry->nrt6 = 1;
err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
if (err)
goto err_nexthop6_group_get;
@@ -5386,9 +5408,15 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
return fib6_entry;
err_nexthop6_group_get:
- list_del(&mlxsw_sp_rt6->list);
- mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
+ i = nrt6;
err_rt6_create:
+ for (i--; i >= 0; i--) {
+ fib6_entry->nrt6--;
+ mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
+ struct mlxsw_sp_rt6, list);
+ list_del(&mlxsw_sp_rt6->list);
+ mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
+ }
kfree(fib6_entry);
return ERR_PTR(err);
}
@@ -5431,16 +5459,16 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
static int
mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
- bool replace)
+ bool *p_replace)
{
struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
struct mlxsw_sp_fib6_entry *fib6_entry;
- fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
+ fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, *p_replace);
- if (replace && WARN_ON(!fib6_entry))
- return -EINVAL;
+ if (*p_replace && !fib6_entry)
+ *p_replace = false;
if (fib6_entry) {
list_add_tail(&new6_entry->common.list,
@@ -5475,11 +5503,11 @@ mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry,
- bool replace)
+ bool *p_replace)
{
int err;
- err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
+ err = mlxsw_sp_fib6_node_list_insert(fib6_entry, p_replace);
if (err)
return err;
@@ -5552,10 +5580,12 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info *rt, bool replace)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6, bool replace)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
+ struct fib6_info *rt = rt_arr[0];
int err;
if (mlxsw_sp->router->aborted)
@@ -5580,19 +5610,21 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
*/
fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
if (fib6_entry) {
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry,
+ rt_arr, nrt6);
if (err)
goto err_fib6_entry_nexthop_add;
return 0;
}
- fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
+ fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
+ nrt6);
if (IS_ERR(fib6_entry)) {
err = PTR_ERR(fib6_entry);
goto err_fib6_entry_create;
}
- err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
+ err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, &replace);
if (err)
goto err_fib6_node_entry_link;
@@ -5609,10 +5641,12 @@ err_fib6_entry_nexthop_add:
}
static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info *rt)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
+ struct fib6_info *rt = rt_arr[0];
if (mlxsw_sp->router->aborted)
return;
@@ -5624,11 +5658,12 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!fib6_entry))
return;
- /* If route is part of a multipath entry, but not the last one
- * removed, then only reduce its nexthop group.
+ /* If not all the nexthops are deleted, then only reduce the nexthop
+ * group.
*/
- if (!list_is_singular(&fib6_entry->rt6_list)) {
- mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
+ if (nrt6 != fib6_entry->nrt6) {
+ mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
return;
}
@@ -5889,10 +5924,15 @@ static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
}
+struct mlxsw_sp_fib6_event_work {
+ struct fib6_info **rt_arr;
+ unsigned int nrt6;
+};
+
struct mlxsw_sp_fib_event_work {
struct work_struct work;
union {
- struct fib6_entry_notifier_info fen6_info;
+ struct mlxsw_sp_fib6_event_work fib6_work;
struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
@@ -5903,6 +5943,54 @@ struct mlxsw_sp_fib_event_work {
unsigned long event;
};
+static int
+mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct fib6_info *rt = fen6_info->rt;
+ struct fib6_info **rt_arr;
+ struct fib6_info *iter;
+ unsigned int nrt6;
+ int i = 0;
+
+ nrt6 = fen6_info->nsiblings + 1;
+
+ rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
+ if (!rt_arr)
+ return -ENOMEM;
+
+ fib6_work->rt_arr = rt_arr;
+ fib6_work->nrt6 = nrt6;
+
+ rt_arr[0] = rt;
+ fib6_info_hold(rt);
+
+ if (!fen6_info->nsiblings)
+ return 0;
+
+ list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
+ if (i == fen6_info->nsiblings)
+ break;
+
+ rt_arr[i + 1] = iter;
+ fib6_info_hold(iter);
+ i++;
+ }
+ WARN_ON_ONCE(i != fen6_info->nsiblings);
+
+ return 0;
+}
+
+static void
+mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
+{
+ int i;
+
+ for (i = 0; i < fib6_work->nrt6; i++)
+ mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
+ kfree(fib6_work->rt_arr);
+}
+
static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
{
struct mlxsw_sp_fib_event_work *fib_work =
@@ -5961,18 +6049,21 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD:
replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
err = mlxsw_sp_router_fib6_add(mlxsw_sp,
- fib_work->fen6_info.rt, replace);
+ fib_work->fib6_work.rt_arr,
+ fib_work->fib6_work.nrt6,
+ replace);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
- mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
+ mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
- mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
+ mlxsw_sp_router_fib6_del(mlxsw_sp,
+ fib_work->fib6_work.rt_arr,
+ fib_work->fib6_work.nrt6);
+ mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
break;
case FIB_EVENT_RULE_ADD:
/* if we get here, a rule was added that we do not support.
@@ -6061,22 +6152,26 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
}
}
-static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
- struct fib_notifier_info *info)
+static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
+ struct fib_notifier_info *info)
{
struct fib6_entry_notifier_info *fen6_info;
+ int err;
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
- case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
- fib_work->fen6_info = *fen6_info;
- fib6_info_hold(fib_work->fen6_info.rt);
+ err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
+ fen6_info);
+ if (err)
+ return err;
break;
}
+
+ return 0;
}
static void
@@ -6185,6 +6280,20 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
return notifier_from_errno(-EINVAL);
}
+ if (fen_info->fi->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
+ } else if (info->family == AF_INET6) {
+ struct fib6_entry_notifier_info *fen6_info;
+
+ fen6_info = container_of(info,
+ struct fib6_entry_notifier_info,
+ info);
+ if (fen6_info->rt->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
}
break;
}
@@ -6203,7 +6312,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
break;
case AF_INET6:
INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
- mlxsw_sp_router_fib6_event(fib_work, info);
+ err = mlxsw_sp_router_fib6_event(fib_work, info);
+ if (err)
+ goto err_fib_event;
break;
case RTNL_FAMILY_IP6MR:
case RTNL_FAMILY_IPMR:
@@ -6215,6 +6326,10 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
mlxsw_core_schedule_work(&fib_work->work);
return NOTIFY_DONE;
+
+err_fib_event:
+ kfree(fib_work);
+ return NOTIFY_BAD;
}
struct mlxsw_sp_rif *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index fc4f19167262..bdab96f5bc70 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -299,6 +299,8 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
u64 len;
int err;
+ memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
+
if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 451216dd7f6b..19202bdb5105 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -17,6 +17,8 @@ enum {
MLXSW_TRAP_ID_MVRP = 0x15,
MLXSW_TRAP_ID_RPVST = 0x16,
MLXSW_TRAP_ID_DHCP = 0x19,
+ MLXSW_TRAP_ID_PTP0 = 0x28,
+ MLXSW_TRAP_ID_PTP1 = 0x29,
MLXSW_TRAP_ID_IGMP_QUERY = 0x30,
MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31,
MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
@@ -76,6 +78,10 @@ enum {
enum mlxsw_event_trap_id {
/* Port Up/Down event generated by hardware */
MLXSW_TRAP_ID_PUDE = 0x8,
+ /* PTP Ingress FIFO has a new entry */
+ MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D,
+ /* PTP Egress FIFO has a new entry */
+ MLXSW_TRAP_ID_PTP_EGR_FIFO = 0x2E,
};
#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index cb52a3b128ae..9a36c26095c8 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: (GPL-2.0 OR MIT)
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o
mscc_ocelot_common-y := ocelot.o ocelot_io.o
-mscc_ocelot_common-y += ocelot_regs.o
+mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_board.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 02ad11e0b0d8..b71e4ecbe469 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -22,6 +22,7 @@
#include <net/switchdev.h>
#include "ocelot.h"
+#include "ocelot_ace.h"
#define TABLE_UPDATE_SLEEP_US 10
#define TABLE_UPDATE_TIMEOUT_US 100000
@@ -130,6 +131,13 @@ static void ocelot_mact_init(struct ocelot *ocelot)
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
}
+static void ocelot_vcap_enable(struct ocelot *ocelot, struct ocelot_port *port)
+{
+ ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
+ ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
+ ANA_PORT_VCAP_S2_CFG, port->chip_port);
+}
+
static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
{
return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
@@ -884,6 +892,13 @@ static int ocelot_set_features(struct net_device *dev,
struct ocelot_port *port = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
+ if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
+ port->tc.offload_cnt) {
+ netdev_err(dev,
+ "Cannot disable HW TC offload while offloads active\n");
+ return -EBUSY;
+ }
+
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
ocelot_vlan_mode(port, features);
@@ -917,6 +932,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
.ndo_get_port_parent_id = ocelot_get_port_parent_id,
+ .ndo_setup_tc = ocelot_setup_tc,
};
static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -1636,8 +1652,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
dev->netdev_ops = &ocelot_port_netdev_ops;
dev->ethtool_ops = &ocelot_ethtool_ops;
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS;
- dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS |
+ NETIF_F_HW_TC;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
dev->dev_addr[ETH_ALEN - 1] += port;
@@ -1653,6 +1670,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
/* Basic L2 initialization */
ocelot_vlan_port_apply(ocelot, ocelot_port);
+ /* Enable vcap lookups */
+ ocelot_vcap_enable(ocelot, ocelot_port);
+
return 0;
err_register_netdev:
@@ -1687,6 +1707,7 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
+ ocelot_ace_init(ocelot);
for (port = 0; port < ocelot->num_phys_ports; port++) {
/* Clear all counters (5 groups) */
@@ -1799,6 +1820,7 @@ void ocelot_deinit(struct ocelot *ocelot)
{
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
+ ocelot_ace_deinit();
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 541fe41e60b0..f7eeb4806897 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -22,6 +22,7 @@
#include "ocelot_rew.h"
#include "ocelot_sys.h"
#include "ocelot_qs.h"
+#include "ocelot_tc.h"
#define PGID_AGGR 64
#define PGID_SRC 80
@@ -68,6 +69,7 @@ enum ocelot_target {
QSYS,
REW,
SYS,
+ S2,
HSIO,
TARGET_MAX,
};
@@ -334,6 +336,13 @@ enum ocelot_reg {
SYS_CM_DATA_RD,
SYS_CM_OP,
SYS_CM_DATA,
+ S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET,
+ S2_CORE_MV_CFG,
+ S2_CACHE_ENTRY_DAT,
+ S2_CACHE_MASK_DAT,
+ S2_CACHE_ACTION_DAT,
+ S2_CACHE_CNT_DAT,
+ S2_CACHE_TG_DAT,
};
enum ocelot_regfield {
@@ -454,6 +463,8 @@ struct ocelot_port {
phy_interface_t phy_mode;
struct phy *serdes;
+
+ struct ocelot_port_tc tc;
};
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
new file mode 100644
index 000000000000..39aca1ab4687
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#include <linux/iopoll.h>
+#include <linux/proc_fs.h>
+
+#include "ocelot_ace.h"
+#include "ocelot_vcap.h"
+#include "ocelot_s2.h"
+
+#define OCELOT_POLICER_DISCARD 0x17f
+
+static struct ocelot_acl_block *acl_block;
+
+struct vcap_props {
+ const char *name; /* Symbolic name */
+ u16 tg_width; /* Type-group width (in bits) */
+ u16 sw_count; /* Sub word count */
+ u16 entry_count; /* Entry count */
+ u16 entry_words; /* Number of entry words */
+ u16 entry_width; /* Entry width (in bits) */
+ u16 action_count; /* Action count */
+ u16 action_words; /* Number of action words */
+ u16 action_width; /* Action width (in bits) */
+ u16 action_type_width; /* Action type width (in bits) */
+ struct {
+ u16 width; /* Action type width (in bits) */
+ u16 count; /* Action type sub word count */
+ } action_table[2];
+ u16 counter_words; /* Number of counter words */
+ u16 counter_width; /* Counter width (in bits) */
+};
+
+#define ENTRY_WIDTH 32
+#define BITS_TO_32BIT(x) (1 + (((x) - 1) / ENTRY_WIDTH))
+
+static const struct vcap_props vcap_is2 = {
+ .name = "IS2",
+ .tg_width = 2,
+ .sw_count = 4,
+ .entry_count = VCAP_IS2_CNT,
+ .entry_words = BITS_TO_32BIT(VCAP_IS2_ENTRY_WIDTH),
+ .entry_width = VCAP_IS2_ENTRY_WIDTH,
+ .action_count = (VCAP_IS2_CNT + VCAP_PORT_CNT + 2),
+ .action_words = BITS_TO_32BIT(VCAP_IS2_ACTION_WIDTH),
+ .action_width = (VCAP_IS2_ACTION_WIDTH),
+ .action_type_width = 1,
+ .action_table = {
+ {
+ .width = (IS2_AO_ACL_ID + IS2_AL_ACL_ID),
+ .count = 2
+ },
+ {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .counter_words = BITS_TO_32BIT(4 * ENTRY_WIDTH),
+ .counter_width = ENTRY_WIDTH,
+};
+
+enum vcap_sel {
+ VCAP_SEL_ENTRY = 0x1,
+ VCAP_SEL_ACTION = 0x2,
+ VCAP_SEL_COUNTER = 0x4,
+ VCAP_SEL_ALL = 0x7,
+};
+
+enum vcap_cmd {
+ VCAP_CMD_WRITE = 0, /* Copy from Cache to TCAM */
+ VCAP_CMD_READ = 1, /* Copy from TCAM to Cache */
+ VCAP_CMD_MOVE_UP = 2, /* Move <count> up */
+ VCAP_CMD_MOVE_DOWN = 3, /* Move <count> down */
+ VCAP_CMD_INITIALIZE = 4, /* Write all (from cache) */
+};
+
+#define VCAP_ENTRY_WIDTH 12 /* Max entry width (32bit words) */
+#define VCAP_COUNTER_WIDTH 4 /* Max counter width (32bit words) */
+
+struct vcap_data {
+ u32 entry[VCAP_ENTRY_WIDTH]; /* ENTRY_DAT */
+ u32 mask[VCAP_ENTRY_WIDTH]; /* MASK_DAT */
+ u32 action[VCAP_ENTRY_WIDTH]; /* ACTION_DAT */
+ u32 counter[VCAP_COUNTER_WIDTH]; /* CNT_DAT */
+ u32 tg; /* TG_DAT */
+ u32 type; /* Action type */
+ u32 tg_sw; /* Current type-group */
+ u32 cnt; /* Current counter */
+ u32 key_offset; /* Current entry offset */
+ u32 action_offset; /* Current action offset */
+ u32 counter_offset; /* Current counter offset */
+ u32 tg_value; /* Current type-group value */
+ u32 tg_mask; /* Current type-group mask */
+};
+
+static u32 vcap_s2_read_update_ctrl(struct ocelot *oc)
+{
+ return ocelot_read(oc, S2_CORE_UPDATE_CTRL);
+}
+
+static void vcap_cmd(struct ocelot *oc, u16 ix, int cmd, int sel)
+{
+ u32 value = (S2_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) |
+ S2_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) |
+ S2_CORE_UPDATE_CTRL_UPDATE_SHOT);
+
+ if ((sel & VCAP_SEL_ENTRY) && ix >= vcap_is2.entry_count)
+ return;
+
+ if (!(sel & VCAP_SEL_ENTRY))
+ value |= S2_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS;
+
+ if (!(sel & VCAP_SEL_ACTION))
+ value |= S2_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS;
+
+ if (!(sel & VCAP_SEL_COUNTER))
+ value |= S2_CORE_UPDATE_CTRL_UPDATE_CNT_DIS;
+
+ ocelot_write(oc, value, S2_CORE_UPDATE_CTRL);
+ readx_poll_timeout(vcap_s2_read_update_ctrl, oc, value,
+ (value & S2_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0,
+ 10, 100000);
+}
+
+/* Convert from 0-based row to VCAP entry row and run command */
+static void vcap_row_cmd(struct ocelot *oc, u32 row, int cmd, int sel)
+{
+ vcap_cmd(oc, vcap_is2.entry_count - row - 1, cmd, sel);
+}
+
+static void vcap_entry2cache(struct ocelot *oc, struct vcap_data *data)
+{
+ u32 i;
+
+ for (i = 0; i < vcap_is2.entry_words; i++) {
+ ocelot_write_rix(oc, data->entry[i], S2_CACHE_ENTRY_DAT, i);
+ ocelot_write_rix(oc, ~data->mask[i], S2_CACHE_MASK_DAT, i);
+ }
+ ocelot_write(oc, data->tg, S2_CACHE_TG_DAT);
+}
+
+static void vcap_cache2entry(struct ocelot *oc, struct vcap_data *data)
+{
+ u32 i;
+
+ for (i = 0; i < vcap_is2.entry_words; i++) {
+ data->entry[i] = ocelot_read_rix(oc, S2_CACHE_ENTRY_DAT, i);
+ // Invert mask
+ data->mask[i] = ~ocelot_read_rix(oc, S2_CACHE_MASK_DAT, i);
+ }
+ data->tg = ocelot_read(oc, S2_CACHE_TG_DAT);
+}
+
+static void vcap_action2cache(struct ocelot *oc, struct vcap_data *data)
+{
+ u32 i, width, mask;
+
+ /* Encode action type */
+ width = vcap_is2.action_type_width;
+ if (width) {
+ mask = GENMASK(width, 0);
+ data->action[0] = ((data->action[0] & ~mask) | data->type);
+ }
+
+ for (i = 0; i < vcap_is2.action_words; i++)
+ ocelot_write_rix(oc, data->action[i], S2_CACHE_ACTION_DAT, i);
+
+ for (i = 0; i < vcap_is2.counter_words; i++)
+ ocelot_write_rix(oc, data->counter[i], S2_CACHE_CNT_DAT, i);
+}
+
+static void vcap_cache2action(struct ocelot *oc, struct vcap_data *data)
+{
+ u32 i, width;
+
+ for (i = 0; i < vcap_is2.action_words; i++)
+ data->action[i] = ocelot_read_rix(oc, S2_CACHE_ACTION_DAT, i);
+
+ for (i = 0; i < vcap_is2.counter_words; i++)
+ data->counter[i] = ocelot_read_rix(oc, S2_CACHE_CNT_DAT, i);
+
+ /* Extract action type */
+ width = vcap_is2.action_type_width;
+ data->type = (width ? (data->action[0] & GENMASK(width, 0)) : 0);
+}
+
+/* Calculate offsets for entry */
+static void is2_data_get(struct vcap_data *data, int ix)
+{
+ u32 i, col, offset, count, cnt, base, width = vcap_is2.tg_width;
+
+ count = (data->tg_sw == VCAP_TG_HALF ? 2 : 4);
+ col = (ix % 2);
+ cnt = (vcap_is2.sw_count / count);
+ base = (vcap_is2.sw_count - col * cnt - cnt);
+ data->tg_value = 0;
+ data->tg_mask = 0;
+ for (i = 0; i < cnt; i++) {
+ offset = ((base + i) * width);
+ data->tg_value |= (data->tg_sw << offset);
+ data->tg_mask |= GENMASK(offset + width - 1, offset);
+ }
+
+ /* Calculate key/action/counter offsets */
+ col = (count - col - 1);
+ data->key_offset = (base * vcap_is2.entry_width) / vcap_is2.sw_count;
+ data->counter_offset = (cnt * col * vcap_is2.counter_width);
+ i = data->type;
+ width = vcap_is2.action_table[i].width;
+ cnt = vcap_is2.action_table[i].count;
+ data->action_offset =
+ (((cnt * col * width) / count) + vcap_is2.action_type_width);
+}
+
+static void vcap_data_set(u32 *data, u32 offset, u32 len, u32 value)
+{
+ u32 i, v, m;
+
+ for (i = 0; i < len; i++, offset++) {
+ v = data[offset / ENTRY_WIDTH];
+ m = (1 << (offset % ENTRY_WIDTH));
+ if (value & (1 << i))
+ v |= m;
+ else
+ v &= ~m;
+ data[offset / ENTRY_WIDTH] = v;
+ }
+}
+
+static u32 vcap_data_get(u32 *data, u32 offset, u32 len)
+{
+ u32 i, v, m, value = 0;
+
+ for (i = 0; i < len; i++, offset++) {
+ v = data[offset / ENTRY_WIDTH];
+ m = (1 << (offset % ENTRY_WIDTH));
+ if (v & m)
+ value |= (1 << i);
+ }
+ return value;
+}
+
+static void vcap_key_set(struct vcap_data *data, u32 offset, u32 width,
+ u32 value, u32 mask)
+{
+ vcap_data_set(data->entry, offset + data->key_offset, width, value);
+ vcap_data_set(data->mask, offset + data->key_offset, width, mask);
+}
+
+static void vcap_key_bytes_set(struct vcap_data *data, u32 offset, u8 *val,
+ u8 *msk, u32 count)
+{
+ u32 i, j, n = 0, value = 0, mask = 0;
+
+ /* Data wider than 32 bits are split up in chunks of maximum 32 bits.
+ * The 32 LSB of the data are written to the 32 MSB of the TCAM.
+ */
+ offset += (count * 8);
+ for (i = 0; i < count; i++) {
+ j = (count - i - 1);
+ value += (val[j] << n);
+ mask += (msk[j] << n);
+ n += 8;
+ if (n == ENTRY_WIDTH || (i + 1) == count) {
+ offset -= n;
+ vcap_key_set(data, offset, n, value, mask);
+ n = 0;
+ value = 0;
+ mask = 0;
+ }
+ }
+}
+
+static void vcap_key_l4_port_set(struct vcap_data *data, u32 offset,
+ struct ocelot_vcap_udp_tcp *port)
+{
+ vcap_key_set(data, offset, 16, port->value, port->mask);
+}
+
+static void vcap_key_bit_set(struct vcap_data *data, u32 offset,
+ enum ocelot_vcap_bit val)
+{
+ vcap_key_set(data, offset, 1, val == OCELOT_VCAP_BIT_1 ? 1 : 0,
+ val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
+}
+
+#define VCAP_KEY_SET(fld, val, msk) \
+ vcap_key_set(&data, IS2_HKO_##fld, IS2_HKL_##fld, val, msk)
+#define VCAP_KEY_ANY_SET(fld) \
+ vcap_key_set(&data, IS2_HKO_##fld, IS2_HKL_##fld, 0, 0)
+#define VCAP_KEY_BIT_SET(fld, val) vcap_key_bit_set(&data, IS2_HKO_##fld, val)
+#define VCAP_KEY_BYTES_SET(fld, val, msk) \
+ vcap_key_bytes_set(&data, IS2_HKO_##fld, val, msk, IS2_HKL_##fld / 8)
+
+static void vcap_action_set(struct vcap_data *data, u32 offset, u32 width,
+ u32 value)
+{
+ vcap_data_set(data->action, offset + data->action_offset, width, value);
+}
+
+#define VCAP_ACT_SET(fld, val) \
+ vcap_action_set(data, IS2_AO_##fld, IS2_AL_##fld, val)
+
+static void is2_action_set(struct vcap_data *data,
+ enum ocelot_ace_action action)
+{
+ switch (action) {
+ case OCELOT_ACL_ACTION_DROP:
+ VCAP_ACT_SET(PORT_MASK, 0x0);
+ VCAP_ACT_SET(MASK_MODE, 0x1);
+ VCAP_ACT_SET(POLICE_ENA, 0x1);
+ VCAP_ACT_SET(POLICE_IDX, OCELOT_POLICER_DISCARD);
+ VCAP_ACT_SET(CPU_QU_NUM, 0x0);
+ VCAP_ACT_SET(CPU_COPY_ENA, 0x0);
+ break;
+ case OCELOT_ACL_ACTION_TRAP:
+ VCAP_ACT_SET(PORT_MASK, 0x0);
+ VCAP_ACT_SET(MASK_MODE, 0x0);
+ VCAP_ACT_SET(POLICE_ENA, 0x0);
+ VCAP_ACT_SET(POLICE_IDX, 0x0);
+ VCAP_ACT_SET(CPU_QU_NUM, 0x0);
+ VCAP_ACT_SET(CPU_COPY_ENA, 0x1);
+ break;
+ }
+}
+
+static void is2_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_ace_rule *ace)
+{
+ u32 val, msk, type, type_mask = 0xf, i, count;
+ struct ocelot_ace_vlan *tag = &ace->vlan;
+ struct ocelot_vcap_u64 payload;
+ struct vcap_data data;
+ int row = (ix / 2);
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&data, 0, sizeof(data));
+
+ /* Read row */
+ vcap_row_cmd(ocelot, row, VCAP_CMD_READ, VCAP_SEL_ALL);
+ vcap_cache2entry(ocelot, &data);
+ vcap_cache2action(ocelot, &data);
+
+ data.tg_sw = VCAP_TG_HALF;
+ is2_data_get(&data, ix);
+ data.tg = (data.tg & ~data.tg_mask);
+ if (ace->prio != 0)
+ data.tg |= data.tg_value;
+
+ data.type = IS2_ACTION_TYPE_NORMAL;
+
+ VCAP_KEY_ANY_SET(PAG);
+ VCAP_KEY_SET(IGR_PORT_MASK, 0, ~BIT(ace->chip_port));
+ VCAP_KEY_BIT_SET(FIRST, OCELOT_VCAP_BIT_1);
+ VCAP_KEY_BIT_SET(HOST_MATCH, OCELOT_VCAP_BIT_ANY);
+ VCAP_KEY_BIT_SET(L2_MC, ace->dmac_mc);
+ VCAP_KEY_BIT_SET(L2_BC, ace->dmac_bc);
+ VCAP_KEY_BIT_SET(VLAN_TAGGED, tag->tagged);
+ VCAP_KEY_SET(VID, tag->vid.value, tag->vid.mask);
+ VCAP_KEY_SET(PCP, tag->pcp.value[0], tag->pcp.mask[0]);
+ VCAP_KEY_BIT_SET(DEI, tag->dei);
+
+ switch (ace->type) {
+ case OCELOT_ACE_TYPE_ETYPE: {
+ struct ocelot_ace_frame_etype *etype = &ace->frame.etype;
+
+ type = IS2_TYPE_ETYPE;
+ VCAP_KEY_BYTES_SET(L2_DMAC, etype->dmac.value,
+ etype->dmac.mask);
+ VCAP_KEY_BYTES_SET(L2_SMAC, etype->smac.value,
+ etype->smac.mask);
+ VCAP_KEY_BYTES_SET(MAC_ETYPE_ETYPE, etype->etype.value,
+ etype->etype.mask);
+ VCAP_KEY_ANY_SET(MAC_ETYPE_L2_PAYLOAD); // Clear unused bits
+ vcap_key_bytes_set(&data, IS2_HKO_MAC_ETYPE_L2_PAYLOAD,
+ etype->data.value, etype->data.mask, 2);
+ break;
+ }
+ case OCELOT_ACE_TYPE_LLC: {
+ struct ocelot_ace_frame_llc *llc = &ace->frame.llc;
+
+ type = IS2_TYPE_LLC;
+ VCAP_KEY_BYTES_SET(L2_DMAC, llc->dmac.value, llc->dmac.mask);
+ VCAP_KEY_BYTES_SET(L2_SMAC, llc->smac.value, llc->smac.mask);
+ for (i = 0; i < 4; i++) {
+ payload.value[i] = llc->llc.value[i];
+ payload.mask[i] = llc->llc.mask[i];
+ }
+ VCAP_KEY_BYTES_SET(MAC_LLC_L2_LLC, payload.value, payload.mask);
+ break;
+ }
+ case OCELOT_ACE_TYPE_SNAP: {
+ struct ocelot_ace_frame_snap *snap = &ace->frame.snap;
+
+ type = IS2_TYPE_SNAP;
+ VCAP_KEY_BYTES_SET(L2_DMAC, snap->dmac.value, snap->dmac.mask);
+ VCAP_KEY_BYTES_SET(L2_SMAC, snap->smac.value, snap->smac.mask);
+ VCAP_KEY_BYTES_SET(MAC_SNAP_L2_SNAP,
+ ace->frame.snap.snap.value,
+ ace->frame.snap.snap.mask);
+ break;
+ }
+ case OCELOT_ACE_TYPE_ARP: {
+ struct ocelot_ace_frame_arp *arp = &ace->frame.arp;
+
+ type = IS2_TYPE_ARP;
+ VCAP_KEY_BYTES_SET(MAC_ARP_L2_SMAC, arp->smac.value,
+ arp->smac.mask);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_ADDR_SPACE_OK, arp->ethernet);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_PROTO_SPACE_OK, arp->ip);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_LEN_OK, arp->length);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_TGT_MATCH, arp->dmac_match);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_SENDER_MATCH, arp->smac_match);
+ VCAP_KEY_BIT_SET(MAC_ARP_ARP_OPCODE_UNKNOWN, arp->unknown);
+
+ /* OPCODE is inverse, bit 0 is reply flag, bit 1 is RARP flag */
+ val = ((arp->req == OCELOT_VCAP_BIT_0 ? 1 : 0) |
+ (arp->arp == OCELOT_VCAP_BIT_0 ? 2 : 0));
+ msk = ((arp->req == OCELOT_VCAP_BIT_ANY ? 0 : 1) |
+ (arp->arp == OCELOT_VCAP_BIT_ANY ? 0 : 2));
+ VCAP_KEY_SET(MAC_ARP_ARP_OPCODE, val, msk);
+ vcap_key_bytes_set(&data, IS2_HKO_MAC_ARP_L3_IP4_DIP,
+ arp->dip.value.addr, arp->dip.mask.addr, 4);
+ vcap_key_bytes_set(&data, IS2_HKO_MAC_ARP_L3_IP4_SIP,
+ arp->sip.value.addr, arp->sip.mask.addr, 4);
+ VCAP_KEY_ANY_SET(MAC_ARP_DIP_EQ_SIP);
+ break;
+ }
+ case OCELOT_ACE_TYPE_IPV4:
+ case OCELOT_ACE_TYPE_IPV6: {
+ enum ocelot_vcap_bit sip_eq_dip, sport_eq_dport, seq_zero, tcp;
+ enum ocelot_vcap_bit ttl, fragment, options, tcp_ack, tcp_urg;
+ enum ocelot_vcap_bit tcp_fin, tcp_syn, tcp_rst, tcp_psh;
+ struct ocelot_ace_frame_ipv4 *ipv4 = NULL;
+ struct ocelot_ace_frame_ipv6 *ipv6 = NULL;
+ struct ocelot_vcap_udp_tcp *sport, *dport;
+ struct ocelot_vcap_ipv4 sip, dip;
+ struct ocelot_vcap_u8 proto, ds;
+ struct ocelot_vcap_u48 *ip_data;
+
+ if (ace->type == OCELOT_ACE_TYPE_IPV4) {
+ ipv4 = &ace->frame.ipv4;
+ ttl = ipv4->ttl;
+ fragment = ipv4->fragment;
+ options = ipv4->options;
+ proto = ipv4->proto;
+ ds = ipv4->ds;
+ ip_data = &ipv4->data;
+ sip = ipv4->sip;
+ dip = ipv4->dip;
+ sport = &ipv4->sport;
+ dport = &ipv4->dport;
+ tcp_fin = ipv4->tcp_fin;
+ tcp_syn = ipv4->tcp_syn;
+ tcp_rst = ipv4->tcp_rst;
+ tcp_psh = ipv4->tcp_psh;
+ tcp_ack = ipv4->tcp_ack;
+ tcp_urg = ipv4->tcp_urg;
+ sip_eq_dip = ipv4->sip_eq_dip;
+ sport_eq_dport = ipv4->sport_eq_dport;
+ seq_zero = ipv4->seq_zero;
+ } else {
+ ipv6 = &ace->frame.ipv6;
+ ttl = ipv6->ttl;
+ fragment = OCELOT_VCAP_BIT_ANY;
+ options = OCELOT_VCAP_BIT_ANY;
+ proto = ipv6->proto;
+ ds = ipv6->ds;
+ ip_data = &ipv6->data;
+ for (i = 0; i < 8; i++) {
+ val = ipv6->sip.value[i + 8];
+ msk = ipv6->sip.mask[i + 8];
+ if (i < 4) {
+ dip.value.addr[i] = val;
+ dip.mask.addr[i] = msk;
+ } else {
+ sip.value.addr[i - 4] = val;
+ sip.mask.addr[i - 4] = msk;
+ }
+ }
+ sport = &ipv6->sport;
+ dport = &ipv6->dport;
+ tcp_fin = ipv6->tcp_fin;
+ tcp_syn = ipv6->tcp_syn;
+ tcp_rst = ipv6->tcp_rst;
+ tcp_psh = ipv6->tcp_psh;
+ tcp_ack = ipv6->tcp_ack;
+ tcp_urg = ipv6->tcp_urg;
+ sip_eq_dip = ipv6->sip_eq_dip;
+ sport_eq_dport = ipv6->sport_eq_dport;
+ seq_zero = ipv6->seq_zero;
+ }
+
+ VCAP_KEY_BIT_SET(IP4,
+ ipv4 ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
+ VCAP_KEY_BIT_SET(L3_FRAGMENT, fragment);
+ VCAP_KEY_ANY_SET(L3_FRAG_OFS_GT0);
+ VCAP_KEY_BIT_SET(L3_OPTIONS, options);
+ VCAP_KEY_BIT_SET(L3_TTL_GT0, ttl);
+ VCAP_KEY_BYTES_SET(L3_TOS, ds.value, ds.mask);
+ vcap_key_bytes_set(&data, IS2_HKO_L3_IP4_DIP, dip.value.addr,
+ dip.mask.addr, 4);
+ vcap_key_bytes_set(&data, IS2_HKO_L3_IP4_SIP, sip.value.addr,
+ sip.mask.addr, 4);
+ VCAP_KEY_BIT_SET(DIP_EQ_SIP, sip_eq_dip);
+ val = proto.value[0];
+ msk = proto.mask[0];
+ type = IS2_TYPE_IP_UDP_TCP;
+ if (msk == 0xff && (val == 6 || val == 17)) {
+ /* UDP/TCP protocol match */
+ tcp = (val == 6 ?
+ OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_TCP, tcp);
+ vcap_key_l4_port_set(&data,
+ IS2_HKO_IP4_TCP_UDP_L4_DPORT,
+ dport);
+ vcap_key_l4_port_set(&data,
+ IS2_HKO_IP4_TCP_UDP_L4_SPORT,
+ sport);
+ VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_RNG);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_SPORT_EQ_DPORT,
+ sport_eq_dport);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_SEQUENCE_EQ0, seq_zero);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_FIN, tcp_fin);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_SYN, tcp_syn);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_RST, tcp_rst);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_PSH, tcp_psh);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_ACK, tcp_ack);
+ VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_URG, tcp_urg);
+ VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_1588_DOM);
+ VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_1588_VER);
+ } else {
+ if (msk == 0) {
+ /* Any IP protocol match */
+ type_mask = IS2_TYPE_MASK_IP_ANY;
+ } else {
+ /* Non-UDP/TCP protocol match */
+ type = IS2_TYPE_IP_OTHER;
+ for (i = 0; i < 6; i++) {
+ payload.value[i] = ip_data->value[i];
+ payload.mask[i] = ip_data->mask[i];
+ }
+ }
+ VCAP_KEY_BYTES_SET(IP4_OTHER_L3_PROTO, proto.value,
+ proto.mask);
+ VCAP_KEY_BYTES_SET(IP4_OTHER_L3_PAYLOAD, payload.value,
+ payload.mask);
+ }
+ break;
+ }
+ case OCELOT_ACE_TYPE_ANY:
+ default:
+ type = 0;
+ type_mask = 0;
+ count = (vcap_is2.entry_width / 2);
+ for (i = (IS2_HKO_PCP + IS2_HKL_PCP); i < count;
+ i += ENTRY_WIDTH) {
+ /* Clear entry data */
+ vcap_key_set(&data, i, min(32u, count - i), 0, 0);
+ }
+ break;
+ }
+
+ VCAP_KEY_SET(TYPE, type, type_mask);
+ is2_action_set(&data, ace->action);
+ vcap_data_set(data.counter, data.counter_offset, vcap_is2.counter_width,
+ ace->stats.pkts);
+
+ /* Write row */
+ vcap_entry2cache(ocelot, &data);
+ vcap_action2cache(ocelot, &data);
+ vcap_row_cmd(ocelot, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+}
+
+static void is2_entry_get(struct ocelot_ace_rule *rule, int ix)
+{
+ struct ocelot *op = rule->port->ocelot;
+ struct vcap_data data;
+ int row = (ix / 2);
+ u32 cnt;
+
+ vcap_row_cmd(op, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
+ vcap_cache2action(op, &data);
+ data.tg_sw = VCAP_TG_HALF;
+ is2_data_get(&data, ix);
+ cnt = vcap_data_get(data.counter, data.counter_offset,
+ vcap_is2.counter_width);
+
+ rule->stats.pkts = cnt;
+}
+
+static void ocelot_ace_rule_add(struct ocelot_acl_block *block,
+ struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule *tmp;
+ struct list_head *pos, *n;
+
+ block->count++;
+
+ if (list_empty(&block->rules)) {
+ list_add(&rule->list, &block->rules);
+ return;
+ }
+
+ list_for_each_safe(pos, n, &block->rules) {
+ tmp = list_entry(pos, struct ocelot_ace_rule, list);
+ if (rule->prio < tmp->prio)
+ break;
+ }
+ list_add(&rule->list, pos->prev);
+}
+
+static int ocelot_ace_rule_get_index_id(struct ocelot_acl_block *block,
+ struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule *tmp;
+ int index = -1;
+
+ list_for_each_entry(tmp, &block->rules, list) {
+ ++index;
+ if (rule->id == tmp->id)
+ break;
+ }
+ return index;
+}
+
+static struct ocelot_ace_rule*
+ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index)
+{
+ struct ocelot_ace_rule *tmp;
+ int i = 0;
+
+ list_for_each_entry(tmp, &block->rules, list) {
+ if (i == index)
+ return tmp;
+ ++i;
+ }
+
+ return NULL;
+}
+
+int ocelot_ace_rule_offload_add(struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule *ace;
+ int i, index;
+
+ /* Add rule to the linked list */
+ ocelot_ace_rule_add(acl_block, rule);
+
+ /* Get the index of the inserted rule */
+ index = ocelot_ace_rule_get_index_id(acl_block, rule);
+
+ /* Move down the rules to make place for the new rule */
+ for (i = acl_block->count - 1; i > index; i--) {
+ ace = ocelot_ace_rule_get_rule_index(acl_block, i);
+ is2_entry_set(rule->port->ocelot, i, ace);
+ }
+
+ /* Now insert the new rule */
+ is2_entry_set(rule->port->ocelot, index, rule);
+ return 0;
+}
+
+static void ocelot_ace_rule_del(struct ocelot_acl_block *block,
+ struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule *tmp;
+ struct list_head *pos, *q;
+
+ list_for_each_safe(pos, q, &block->rules) {
+ tmp = list_entry(pos, struct ocelot_ace_rule, list);
+ if (tmp->id == rule->id) {
+ list_del(pos);
+ kfree(tmp);
+ }
+ }
+
+ block->count--;
+}
+
+int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule del_ace;
+ struct ocelot_ace_rule *ace;
+ int i, index;
+
+ memset(&del_ace, 0, sizeof(del_ace));
+
+ /* Gets index of the rule */
+ index = ocelot_ace_rule_get_index_id(acl_block, rule);
+
+ /* Delete rule */
+ ocelot_ace_rule_del(acl_block, rule);
+
+ /* Move up all the blocks over the deleted rule */
+ for (i = index; i < acl_block->count; i++) {
+ ace = ocelot_ace_rule_get_rule_index(acl_block, i);
+ is2_entry_set(rule->port->ocelot, i, ace);
+ }
+
+ /* Now delete the last rule, because it is duplicated */
+ is2_entry_set(rule->port->ocelot, acl_block->count, &del_ace);
+
+ return 0;
+}
+
+int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule)
+{
+ struct ocelot_ace_rule *tmp;
+ int index;
+
+ index = ocelot_ace_rule_get_index_id(acl_block, rule);
+ is2_entry_get(rule, index);
+
+ /* After we get the result we need to clear the counters */
+ tmp = ocelot_ace_rule_get_rule_index(acl_block, index);
+ tmp->stats.pkts = 0;
+ is2_entry_set(rule->port->ocelot, index, tmp);
+
+ return 0;
+}
+
+static struct ocelot_acl_block *ocelot_acl_block_create(struct ocelot *ocelot)
+{
+ struct ocelot_acl_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+
+ INIT_LIST_HEAD(&block->rules);
+ block->count = 0;
+ block->ocelot = ocelot;
+
+ return block;
+}
+
+static void ocelot_acl_block_destroy(struct ocelot_acl_block *block)
+{
+ kfree(block);
+}
+
+int ocelot_ace_init(struct ocelot *ocelot)
+{
+ struct vcap_data data;
+
+ memset(&data, 0, sizeof(data));
+ vcap_entry2cache(ocelot, &data);
+ ocelot_write(ocelot, vcap_is2.entry_count, S2_CORE_MV_CFG);
+ vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY);
+
+ vcap_action2cache(ocelot, &data);
+ ocelot_write(ocelot, vcap_is2.action_count, S2_CORE_MV_CFG);
+ vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE,
+ VCAP_SEL_ACTION | VCAP_SEL_COUNTER);
+
+ /* Create a policer that will drop the frames for the cpu.
+ * This policer will be used as action in the acl rules to drop
+ * frames.
+ */
+ ocelot_write_gix(ocelot, 0x299, ANA_POL_MODE_CFG,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x1, ANA_POL_PIR_CFG,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_PIR_STATE,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x0, ANA_POL_CIR_CFG,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE,
+ OCELOT_POLICER_DISCARD);
+
+ acl_block = ocelot_acl_block_create(ocelot);
+
+ return 0;
+}
+
+void ocelot_ace_deinit(void)
+{
+ ocelot_acl_block_destroy(acl_block);
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h
new file mode 100644
index 000000000000..e98944c87259
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ace.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_ACE_H_
+#define _MSCC_OCELOT_ACE_H_
+
+#include "ocelot.h"
+#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
+
+struct ocelot_ipv4 {
+ u8 addr[4];
+};
+
+enum ocelot_vcap_bit {
+ OCELOT_VCAP_BIT_ANY,
+ OCELOT_VCAP_BIT_0,
+ OCELOT_VCAP_BIT_1
+};
+
+struct ocelot_vcap_u8 {
+ u8 value[1];
+ u8 mask[1];
+};
+
+struct ocelot_vcap_u16 {
+ u8 value[2];
+ u8 mask[2];
+};
+
+struct ocelot_vcap_u24 {
+ u8 value[3];
+ u8 mask[3];
+};
+
+struct ocelot_vcap_u32 {
+ u8 value[4];
+ u8 mask[4];
+};
+
+struct ocelot_vcap_u40 {
+ u8 value[5];
+ u8 mask[5];
+};
+
+struct ocelot_vcap_u48 {
+ u8 value[6];
+ u8 mask[6];
+};
+
+struct ocelot_vcap_u64 {
+ u8 value[8];
+ u8 mask[8];
+};
+
+struct ocelot_vcap_u128 {
+ u8 value[16];
+ u8 mask[16];
+};
+
+struct ocelot_vcap_vid {
+ u16 value;
+ u16 mask;
+};
+
+struct ocelot_vcap_ipv4 {
+ struct ocelot_ipv4 value;
+ struct ocelot_ipv4 mask;
+};
+
+struct ocelot_vcap_udp_tcp {
+ u16 value;
+ u16 mask;
+};
+
+enum ocelot_ace_type {
+ OCELOT_ACE_TYPE_ANY,
+ OCELOT_ACE_TYPE_ETYPE,
+ OCELOT_ACE_TYPE_LLC,
+ OCELOT_ACE_TYPE_SNAP,
+ OCELOT_ACE_TYPE_ARP,
+ OCELOT_ACE_TYPE_IPV4,
+ OCELOT_ACE_TYPE_IPV6
+};
+
+struct ocelot_ace_vlan {
+ struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */
+ struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
+ enum ocelot_vcap_bit dei; /* DEI */
+ enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
+};
+
+struct ocelot_ace_frame_etype {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+ struct ocelot_vcap_u16 etype;
+ struct ocelot_vcap_u16 data; /* MAC data */
+};
+
+struct ocelot_ace_frame_llc {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+
+ /* LLC header: DSAP at byte 0, SSAP at byte 1, Control at byte 2 */
+ struct ocelot_vcap_u32 llc;
+};
+
+struct ocelot_ace_frame_snap {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+
+ /* SNAP header: Organization Code at byte 0, Type at byte 3 */
+ struct ocelot_vcap_u40 snap;
+};
+
+struct ocelot_ace_frame_arp {
+ struct ocelot_vcap_u48 smac;
+ enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */
+ enum ocelot_vcap_bit req; /* Opcode request/reply */
+ enum ocelot_vcap_bit unknown; /* Opcode unknown */
+ enum ocelot_vcap_bit smac_match; /* Sender MAC matches SMAC */
+ enum ocelot_vcap_bit dmac_match; /* Target MAC matches DMAC */
+
+ /**< Protocol addr. length 4, hardware length 6 */
+ enum ocelot_vcap_bit length;
+
+ enum ocelot_vcap_bit ip; /* Protocol address type IP */
+ enum ocelot_vcap_bit ethernet; /* Hardware address type Ethernet */
+ struct ocelot_vcap_ipv4 sip; /* Sender IP address */
+ struct ocelot_vcap_ipv4 dip; /* Target IP address */
+};
+
+struct ocelot_ace_frame_ipv4 {
+ enum ocelot_vcap_bit ttl; /* TTL zero */
+ enum ocelot_vcap_bit fragment; /* Fragment */
+ enum ocelot_vcap_bit options; /* Header options */
+ struct ocelot_vcap_u8 ds;
+ struct ocelot_vcap_u8 proto; /* Protocol */
+ struct ocelot_vcap_ipv4 sip; /* Source IP address */
+ struct ocelot_vcap_ipv4 dip; /* Destination IP address */
+ struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
+ struct ocelot_vcap_udp_tcp sport; /* UDP/TCP: Source port */
+ struct ocelot_vcap_udp_tcp dport; /* UDP/TCP: Destination port */
+ enum ocelot_vcap_bit tcp_fin;
+ enum ocelot_vcap_bit tcp_syn;
+ enum ocelot_vcap_bit tcp_rst;
+ enum ocelot_vcap_bit tcp_psh;
+ enum ocelot_vcap_bit tcp_ack;
+ enum ocelot_vcap_bit tcp_urg;
+ enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
+ enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
+ enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
+};
+
+struct ocelot_ace_frame_ipv6 {
+ struct ocelot_vcap_u8 proto; /* IPv6 protocol */
+ struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */
+ enum ocelot_vcap_bit ttl; /* TTL zero */
+ struct ocelot_vcap_u8 ds;
+ struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
+ struct ocelot_vcap_udp_tcp sport;
+ struct ocelot_vcap_udp_tcp dport;
+ enum ocelot_vcap_bit tcp_fin;
+ enum ocelot_vcap_bit tcp_syn;
+ enum ocelot_vcap_bit tcp_rst;
+ enum ocelot_vcap_bit tcp_psh;
+ enum ocelot_vcap_bit tcp_ack;
+ enum ocelot_vcap_bit tcp_urg;
+ enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
+ enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
+ enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
+};
+
+enum ocelot_ace_action {
+ OCELOT_ACL_ACTION_DROP,
+ OCELOT_ACL_ACTION_TRAP,
+};
+
+struct ocelot_ace_stats {
+ u64 bytes;
+ u64 pkts;
+ u64 used;
+};
+
+struct ocelot_ace_rule {
+ struct list_head list;
+ struct ocelot_port *port;
+
+ u16 prio;
+ u32 id;
+
+ enum ocelot_ace_action action;
+ struct ocelot_ace_stats stats;
+ int chip_port;
+
+ enum ocelot_vcap_bit dmac_mc;
+ enum ocelot_vcap_bit dmac_bc;
+ struct ocelot_ace_vlan vlan;
+
+ enum ocelot_ace_type type;
+ union {
+ /* ocelot_ACE_TYPE_ANY: No specific fields */
+ struct ocelot_ace_frame_etype etype;
+ struct ocelot_ace_frame_llc llc;
+ struct ocelot_ace_frame_snap snap;
+ struct ocelot_ace_frame_arp arp;
+ struct ocelot_ace_frame_ipv4 ipv4;
+ struct ocelot_ace_frame_ipv6 ipv6;
+ } frame;
+};
+
+struct ocelot_acl_block {
+ struct list_head rules;
+ struct ocelot *ocelot;
+ int count;
+};
+
+int ocelot_ace_rule_offload_add(struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule);
+
+int ocelot_ace_init(struct ocelot *ocelot);
+void ocelot_ace_deinit(void);
+
+int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
+ struct flow_block_offload *f);
+void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
+ struct flow_block_offload *f);
+
+#endif /* _MSCC_OCELOT_ACE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index e7f90101d2e0..58bde1a9eacb 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -188,6 +188,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ QSYS, "qsys" },
{ ANA, "ana" },
{ QS, "qs" },
+ { S2, "s2" },
};
if (!np && !pdev->dev.platform_data)
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
new file mode 100644
index 000000000000..7aaddc09c185
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+
+#include "ocelot_ace.h"
+
+struct ocelot_port_block {
+ struct ocelot_acl_block *block;
+ struct ocelot_port *port;
+};
+
+static u16 get_prio(u32 prio)
+{
+ /* prio starts from 0x1000 while the ids starts from 0 */
+ return prio >> 16;
+}
+
+static int ocelot_flower_parse_action(struct flow_cls_offload *f,
+ struct ocelot_ace_rule *rule)
+{
+ const struct flow_action_entry *a;
+ int i;
+
+ if (f->rule->action.num_entries != 1)
+ return -EOPNOTSUPP;
+
+ flow_action_for_each(i, a, &f->rule->action) {
+ switch (a->id) {
+ case FLOW_ACTION_DROP:
+ rule->action = OCELOT_ACL_ACTION_DROP;
+ break;
+ case FLOW_ACTION_TRAP:
+ rule->action = OCELOT_ACL_ACTION_TRAP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int ocelot_flower_parse(struct flow_cls_offload *f,
+ struct ocelot_ace_rule *ocelot_rule)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+ u16 proto = ntohs(f->common.protocol);
+
+ /* The hw support mac matches only for MAC_ETYPE key,
+ * therefore if other matches(port, tcp flags, etc) are added
+ * then just bail out
+ */
+ if ((dissector->used_keys &
+ (BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL))) !=
+ (BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL)))
+ return -EOPNOTSUPP;
+
+ if (proto == ETH_P_IP ||
+ proto == ETH_P_IPV6 ||
+ proto == ETH_P_ARP)
+ return -EOPNOTSUPP;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ocelot_rule->type = OCELOT_ACE_TYPE_ETYPE;
+ ether_addr_copy(ocelot_rule->frame.etype.dmac.value,
+ match.key->dst);
+ ether_addr_copy(ocelot_rule->frame.etype.smac.value,
+ match.key->src);
+ ether_addr_copy(ocelot_rule->frame.etype.dmac.mask,
+ match.mask->dst);
+ ether_addr_copy(ocelot_rule->frame.etype.smac.mask,
+ match.mask->src);
+ goto finished_key_parsing;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ if (ntohs(match.key->n_proto) == ETH_P_IP) {
+ ocelot_rule->type = OCELOT_ACE_TYPE_IPV4;
+ ocelot_rule->frame.ipv4.proto.value[0] =
+ match.key->ip_proto;
+ ocelot_rule->frame.ipv4.proto.mask[0] =
+ match.mask->ip_proto;
+ }
+ if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
+ ocelot_rule->type = OCELOT_ACE_TYPE_IPV6;
+ ocelot_rule->frame.ipv6.proto.value[0] =
+ match.key->ip_proto;
+ ocelot_rule->frame.ipv6.proto.mask[0] =
+ match.mask->ip_proto;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) &&
+ ntohs(f->common.protocol) == ETH_P_IP) {
+ struct flow_match_ipv4_addrs match;
+ u8 *tmp;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ tmp = &ocelot_rule->frame.ipv4.sip.value.addr[0];
+ memcpy(tmp, &match.key->src, 4);
+
+ tmp = &ocelot_rule->frame.ipv4.sip.mask.addr[0];
+ memcpy(tmp, &match.mask->src, 4);
+
+ tmp = &ocelot_rule->frame.ipv4.dip.value.addr[0];
+ memcpy(tmp, &match.key->dst, 4);
+
+ tmp = &ocelot_rule->frame.ipv4.dip.mask.addr[0];
+ memcpy(tmp, &match.mask->dst, 4);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) &&
+ ntohs(f->common.protocol) == ETH_P_IPV6) {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ ocelot_rule->frame.ipv4.sport.value = ntohs(match.key->src);
+ ocelot_rule->frame.ipv4.sport.mask = ntohs(match.mask->src);
+ ocelot_rule->frame.ipv4.dport.value = ntohs(match.key->dst);
+ ocelot_rule->frame.ipv4.dport.mask = ntohs(match.mask->dst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ ocelot_rule->type = OCELOT_ACE_TYPE_ANY;
+ ocelot_rule->vlan.vid.value = match.key->vlan_id;
+ ocelot_rule->vlan.vid.mask = match.mask->vlan_id;
+ ocelot_rule->vlan.pcp.value[0] = match.key->vlan_priority;
+ ocelot_rule->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ }
+
+finished_key_parsing:
+ ocelot_rule->prio = get_prio(f->common.prio);
+ ocelot_rule->id = f->cookie;
+ return ocelot_flower_parse_action(f, ocelot_rule);
+}
+
+static
+struct ocelot_ace_rule *ocelot_ace_rule_create(struct flow_cls_offload *f,
+ struct ocelot_port_block *block)
+{
+ struct ocelot_ace_rule *rule;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return NULL;
+
+ rule->port = block->port;
+ rule->chip_port = block->port->chip_port;
+ return rule;
+}
+
+static int ocelot_flower_replace(struct flow_cls_offload *f,
+ struct ocelot_port_block *port_block)
+{
+ struct ocelot_ace_rule *rule;
+ int ret;
+
+ rule = ocelot_ace_rule_create(f, port_block);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = ocelot_flower_parse(f, rule);
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ ret = ocelot_ace_rule_offload_add(rule);
+ if (ret)
+ return ret;
+
+ port_block->port->tc.offload_cnt++;
+ return 0;
+}
+
+static int ocelot_flower_destroy(struct flow_cls_offload *f,
+ struct ocelot_port_block *port_block)
+{
+ struct ocelot_ace_rule rule;
+ int ret;
+
+ rule.prio = get_prio(f->common.prio);
+ rule.port = port_block->port;
+ rule.id = f->cookie;
+
+ ret = ocelot_ace_rule_offload_del(&rule);
+ if (ret)
+ return ret;
+
+ port_block->port->tc.offload_cnt--;
+ return 0;
+}
+
+static int ocelot_flower_stats_update(struct flow_cls_offload *f,
+ struct ocelot_port_block *port_block)
+{
+ struct ocelot_ace_rule rule;
+ int ret;
+
+ rule.prio = get_prio(f->common.prio);
+ rule.port = port_block->port;
+ rule.id = f->cookie;
+ ret = ocelot_ace_rule_stats_update(&rule);
+ if (ret)
+ return ret;
+
+ flow_stats_update(&f->stats, 0x0, rule.stats.pkts, 0x0);
+ return 0;
+}
+
+static int ocelot_setup_tc_cls_flower(struct flow_cls_offload *f,
+ struct ocelot_port_block *port_block)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return ocelot_flower_replace(f, port_block);
+ case FLOW_CLS_DESTROY:
+ return ocelot_flower_destroy(f, port_block);
+ case FLOW_CLS_STATS:
+ return ocelot_flower_stats_update(f, port_block);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct ocelot_port_block *port_block = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(port_block->port->dev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ocelot_setup_tc_cls_flower(type_data, cb_priv);
+ case TC_SETUP_CLSMATCHALL:
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct ocelot_port_block*
+ocelot_port_block_create(struct ocelot_port *port)
+{
+ struct ocelot_port_block *port_block;
+
+ port_block = kzalloc(sizeof(*port_block), GFP_KERNEL);
+ if (!port_block)
+ return NULL;
+
+ port_block->port = port;
+
+ return port_block;
+}
+
+static void ocelot_port_block_destroy(struct ocelot_port_block *block)
+{
+ kfree(block);
+}
+
+static void ocelot_tc_block_unbind(void *cb_priv)
+{
+ struct ocelot_port_block *port_block = cb_priv;
+
+ ocelot_port_block_destroy(port_block);
+}
+
+int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
+ struct flow_block_offload *f)
+{
+ struct ocelot_port_block *port_block;
+ struct flow_block_cb *block_cb;
+ int ret;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ return -EOPNOTSUPP;
+
+ block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
+ port);
+ if (!block_cb) {
+ port_block = ocelot_port_block_create(port);
+ if (!port_block)
+ return -ENOMEM;
+
+ block_cb = flow_block_cb_alloc(f->net,
+ ocelot_setup_tc_block_cb_flower,
+ port, port_block,
+ ocelot_tc_block_unbind);
+ if (IS_ERR(block_cb)) {
+ ret = PTR_ERR(block_cb);
+ goto err_cb_register;
+ }
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, f->driver_block_list);
+ } else {
+ port_block = flow_block_cb_priv(block_cb);
+ }
+
+ flow_block_cb_incref(block_cb);
+ return 0;
+
+err_cb_register:
+ ocelot_port_block_destroy(port_block);
+
+ return ret;
+}
+
+void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
+ struct flow_block_offload *f)
+{
+ struct flow_block_cb *block_cb;
+
+ block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
+ port);
+ if (!block_cb)
+ return;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
new file mode 100644
index 000000000000..701e82dd749a
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#include "ocelot_police.h"
+
+enum mscc_qos_rate_mode {
+ MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */
+ MSCC_QOS_RATE_MODE_LINE, /* Measure line rate in kbps incl. IPG */
+ MSCC_QOS_RATE_MODE_DATA, /* Measures data rate in kbps excl. IPG */
+ MSCC_QOS_RATE_MODE_FRAME, /* Measures frame rate in fps */
+ __MSCC_QOS_RATE_MODE_END,
+ NUM_MSCC_QOS_RATE_MODE = __MSCC_QOS_RATE_MODE_END,
+ MSCC_QOS_RATE_MODE_MAX = __MSCC_QOS_RATE_MODE_END - 1,
+};
+
+/* Types for ANA:POL[0-192]:POL_MODE_CFG.FRM_MODE */
+#define POL_MODE_LINERATE 0 /* Incl IPG. Unit: 33 1/3 kbps, 4096 bytes */
+#define POL_MODE_DATARATE 1 /* Excl IPG. Unit: 33 1/3 kbps, 4096 bytes */
+#define POL_MODE_FRMRATE_HI 2 /* Unit: 33 1/3 fps, 32.8 frames */
+#define POL_MODE_FRMRATE_LO 3 /* Unit: 1/3 fps, 0.3 frames */
+
+/* Policer indexes */
+#define POL_IX_PORT 0 /* 0-11 : Port policers */
+#define POL_IX_QUEUE 32 /* 32-127 : Queue policers */
+
+/* Default policer order */
+#define POL_ORDER 0x1d3 /* Ocelot policer order: Serial (QoS -> Port -> VCAP) */
+
+struct qos_policer_conf {
+ enum mscc_qos_rate_mode mode;
+ bool dlb; /* Enable DLB (dual leaky bucket mode */
+ bool cf; /* Coupling flag (ignored in SLB mode) */
+ u32 cir; /* CIR in kbps/fps (ignored in SLB mode) */
+ u32 cbs; /* CBS in bytes/frames (ignored in SLB mode) */
+ u32 pir; /* PIR in kbps/fps */
+ u32 pbs; /* PBS in bytes/frames */
+ u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */
+};
+
+static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix,
+ struct qos_policer_conf *conf)
+{
+ u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE;
+ u32 cir = 0, cbs = 0, pir = 0, pbs = 0;
+ bool cir_discard = 0, pir_discard = 0;
+ struct ocelot *ocelot = port->ocelot;
+ u32 pbs_max = 0, cbs_max = 0;
+ u8 ipg = 20;
+ u32 value;
+
+ pir = conf->pir;
+ pbs = conf->pbs;
+
+ switch (conf->mode) {
+ case MSCC_QOS_RATE_MODE_LINE:
+ case MSCC_QOS_RATE_MODE_DATA:
+ if (conf->mode == MSCC_QOS_RATE_MODE_LINE) {
+ frm_mode = POL_MODE_LINERATE;
+ ipg = min_t(u8, GENMASK(4, 0), conf->ipg);
+ } else {
+ frm_mode = POL_MODE_DATARATE;
+ }
+ if (conf->dlb) {
+ cir_ena = 1;
+ cir = conf->cir;
+ cbs = conf->cbs;
+ if (cir == 0 && cbs == 0) {
+ /* Discard cir frames */
+ cir_discard = 1;
+ } else {
+ cir = DIV_ROUND_UP(cir, 100);
+ cir *= 3; /* 33 1/3 kbps */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ cbs = (cbs ? cbs : 1); /* No zero burst size */
+ cbs_max = 60; /* Limit burst size */
+ cf = conf->cf;
+ if (cf)
+ pir += conf->cir;
+ }
+ }
+ if (pir == 0 && pbs == 0) {
+ /* Discard PIR frames */
+ pir_discard = 1;
+ } else {
+ pir = DIV_ROUND_UP(pir, 100);
+ pir *= 3; /* 33 1/3 kbps */
+ pbs = DIV_ROUND_UP(pbs, 4096);
+ pbs = (pbs ? pbs : 1); /* No zero burst size */
+ pbs_max = 60; /* Limit burst size */
+ }
+ break;
+ case MSCC_QOS_RATE_MODE_FRAME:
+ if (pir >= 100) {
+ frm_mode = POL_MODE_FRMRATE_HI;
+ pir = DIV_ROUND_UP(pir, 100);
+ pir *= 3; /* 33 1/3 fps */
+ pbs = (pbs * 10) / 328; /* 32.8 frames */
+ pbs = (pbs ? pbs : 1); /* No zero burst size */
+ pbs_max = GENMASK(6, 0); /* Limit burst size */
+ } else {
+ frm_mode = POL_MODE_FRMRATE_LO;
+ if (pir == 0 && pbs == 0) {
+ /* Discard all frames */
+ pir_discard = 1;
+ cir_discard = 1;
+ } else {
+ pir *= 3; /* 1/3 fps */
+ pbs = (pbs * 10) / 3; /* 0.3 frames */
+ pbs = (pbs ? pbs : 1); /* No zero burst size */
+ pbs_max = 61; /* Limit burst size */
+ }
+ }
+ break;
+ default: /* MSCC_QOS_RATE_MODE_DISABLED */
+ /* Disable policer using maximum rate and zero burst */
+ pir = GENMASK(15, 0);
+ pbs = 0;
+ break;
+ }
+
+ /* Check limits */
+ if (pir > GENMASK(15, 0)) {
+ netdev_err(port->dev, "Invalid pir\n");
+ return -EINVAL;
+ }
+
+ if (cir > GENMASK(15, 0)) {
+ netdev_err(port->dev, "Invalid cir\n");
+ return -EINVAL;
+ }
+
+ if (pbs > pbs_max) {
+ netdev_err(port->dev, "Invalid pbs\n");
+ return -EINVAL;
+ }
+
+ if (cbs > cbs_max) {
+ netdev_err(port->dev, "Invalid cbs\n");
+ return -EINVAL;
+ }
+
+ value = (ANA_POL_MODE_CFG_IPG_SIZE(ipg) |
+ ANA_POL_MODE_CFG_FRM_MODE(frm_mode) |
+ (cf ? ANA_POL_MODE_CFG_DLB_COUPLED : 0) |
+ (cir_ena ? ANA_POL_MODE_CFG_CIR_ENA : 0) |
+ ANA_POL_MODE_CFG_OVERSHOOT_ENA);
+
+ ocelot_write_gix(ocelot, value, ANA_POL_MODE_CFG, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ ANA_POL_PIR_CFG_PIR_RATE(pir) |
+ ANA_POL_PIR_CFG_PIR_BURST(pbs),
+ ANA_POL_PIR_CFG, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ (pir_discard ? GENMASK(22, 0) : 0),
+ ANA_POL_PIR_STATE, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ ANA_POL_CIR_CFG_CIR_RATE(cir) |
+ ANA_POL_CIR_CFG_CIR_BURST(cbs),
+ ANA_POL_CIR_CFG, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ (cir_discard ? GENMASK(22, 0) : 0),
+ ANA_POL_CIR_STATE, pol_ix);
+
+ return 0;
+}
+
+int ocelot_port_policer_add(struct ocelot_port *port,
+ struct ocelot_policer *pol)
+{
+ struct ocelot *ocelot = port->ocelot;
+ struct qos_policer_conf pp = { 0 };
+ int err;
+
+ if (!pol)
+ return -EINVAL;
+
+ pp.mode = MSCC_QOS_RATE_MODE_DATA;
+ pp.pir = pol->rate;
+ pp.pbs = pol->burst;
+
+ netdev_dbg(port->dev,
+ "%s: port %u pir %u kbps, pbs %u bytes\n",
+ __func__, port->chip_port, pp.pir, pp.pbs);
+
+ err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp);
+ if (err)
+ return err;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_POL_CFG_PORT_POL_ENA |
+ ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
+ ANA_PORT_POL_CFG_PORT_POL_ENA |
+ ANA_PORT_POL_CFG_POL_ORDER_M,
+ ANA_PORT_POL_CFG, port->chip_port);
+
+ return 0;
+}
+
+int ocelot_port_policer_del(struct ocelot_port *port)
+{
+ struct ocelot *ocelot = port->ocelot;
+ struct qos_policer_conf pp = { 0 };
+ int err;
+
+ netdev_dbg(port->dev, "%s: port %u\n", __func__, port->chip_port);
+
+ pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
+
+ err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp);
+ if (err)
+ return err;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
+ ANA_PORT_POL_CFG_PORT_POL_ENA |
+ ANA_PORT_POL_CFG_POL_ORDER_M,
+ ANA_PORT_POL_CFG, port->chip_port);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
new file mode 100644
index 000000000000..d1137f79efda
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_POLICE_H_
+#define _MSCC_OCELOT_POLICE_H_
+
+#include "ocelot.h"
+
+struct ocelot_policer {
+ u32 rate; /* kilobit per second */
+ u32 burst; /* bytes */
+};
+
+int ocelot_port_policer_add(struct ocelot_port *port,
+ struct ocelot_policer *pol);
+
+int ocelot_port_policer_del(struct ocelot_port *port);
+
+#endif /* _MSCC_OCELOT_POLICE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index 9271af18b93b..6c387f994ec5 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -224,12 +224,23 @@ static const u32 ocelot_sys_regmap[] = {
REG(SYS_PTP_CFG, 0x0006c4),
};
+static const u32 ocelot_s2_regmap[] = {
+ REG(S2_CORE_UPDATE_CTRL, 0x000000),
+ REG(S2_CORE_MV_CFG, 0x000004),
+ REG(S2_CACHE_ENTRY_DAT, 0x000008),
+ REG(S2_CACHE_MASK_DAT, 0x000108),
+ REG(S2_CACHE_ACTION_DAT, 0x000208),
+ REG(S2_CACHE_CNT_DAT, 0x000308),
+ REG(S2_CACHE_TG_DAT, 0x000388),
+};
+
static const u32 *ocelot_regmap[] = {
[ANA] = ocelot_ana_regmap,
[QS] = ocelot_qs_regmap,
[QSYS] = ocelot_qsys_regmap,
[REW] = ocelot_rew_regmap,
[SYS] = ocelot_sys_regmap,
+ [S2] = ocelot_s2_regmap,
};
static const struct reg_field ocelot_regfields[] = {
diff --git a/drivers/net/ethernet/mscc/ocelot_s2.h b/drivers/net/ethernet/mscc/ocelot_s2.h
new file mode 100644
index 000000000000..80107bec2e45
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_s2.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Microsemi Ocelot Switch driver
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef _OCELOT_S2_CORE_H_
+#define _OCELOT_S2_CORE_H_
+
+#define S2_CORE_UPDATE_CTRL_UPDATE_CMD(x) (((x) << 22) & GENMASK(24, 22))
+#define S2_CORE_UPDATE_CTRL_UPDATE_CMD_M GENMASK(24, 22)
+#define S2_CORE_UPDATE_CTRL_UPDATE_CMD_X(x) (((x) & GENMASK(24, 22)) >> 22)
+#define S2_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define S2_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define S2_CORE_UPDATE_CTRL_UPDATE_CNT_DIS BIT(19)
+#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR(x) (((x) << 3) & GENMASK(18, 3))
+#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR_M GENMASK(18, 3)
+#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR_X(x) (((x) & GENMASK(18, 3)) >> 3)
+#define S2_CORE_UPDATE_CTRL_UPDATE_SHOT BIT(2)
+#define S2_CORE_UPDATE_CTRL_CLEAR_CACHE BIT(1)
+#define S2_CORE_UPDATE_CTRL_MV_TRAFFIC_IGN BIT(0)
+
+#define S2_CORE_MV_CFG_MV_NUM_POS(x) (((x) << 16) & GENMASK(31, 16))
+#define S2_CORE_MV_CFG_MV_NUM_POS_M GENMASK(31, 16)
+#define S2_CORE_MV_CFG_MV_NUM_POS_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define S2_CORE_MV_CFG_MV_SIZE(x) ((x) & GENMASK(15, 0))
+#define S2_CORE_MV_CFG_MV_SIZE_M GENMASK(15, 0)
+
+#define S2_CACHE_ENTRY_DAT_RSZ 0x4
+
+#define S2_CACHE_MASK_DAT_RSZ 0x4
+
+#define S2_CACHE_ACTION_DAT_RSZ 0x4
+
+#define S2_CACHE_CNT_DAT_RSZ 0x4
+
+#define S2_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+
+#define S2_BIST_CTRL_TCAM_BIST BIT(1)
+#define S2_BIST_CTRL_TCAM_INIT BIT(0)
+
+#define S2_BIST_CFG_TCAM_BIST_SOE_ENA BIT(8)
+#define S2_BIST_CFG_TCAM_HCG_DIS BIT(7)
+#define S2_BIST_CFG_TCAM_CG_DIS BIT(6)
+#define S2_BIST_CFG_TCAM_BIAS(x) ((x) & GENMASK(5, 0))
+#define S2_BIST_CFG_TCAM_BIAS_M GENMASK(5, 0)
+
+#define S2_BIST_STAT_BIST_RT_ERR BIT(15)
+#define S2_BIST_STAT_BIST_PENC_ERR BIT(14)
+#define S2_BIST_STAT_BIST_COMP_ERR BIT(13)
+#define S2_BIST_STAT_BIST_ADDR_ERR BIT(12)
+#define S2_BIST_STAT_BIST_BL1E_ERR BIT(11)
+#define S2_BIST_STAT_BIST_BL1_ERR BIT(10)
+#define S2_BIST_STAT_BIST_BL0E_ERR BIT(9)
+#define S2_BIST_STAT_BIST_BL0_ERR BIT(8)
+#define S2_BIST_STAT_BIST_PH1_ERR BIT(7)
+#define S2_BIST_STAT_BIST_PH0_ERR BIT(6)
+#define S2_BIST_STAT_BIST_PV1_ERR BIT(5)
+#define S2_BIST_STAT_BIST_PV0_ERR BIT(4)
+#define S2_BIST_STAT_BIST_RUN BIT(3)
+#define S2_BIST_STAT_BIST_ERR BIT(2)
+#define S2_BIST_STAT_BIST_BUSY BIT(1)
+#define S2_BIST_STAT_TCAM_RDY BIT(0)
+
+#endif /* _OCELOT_S2_CORE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
new file mode 100644
index 000000000000..9e6464ffae5d
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch TC driver
+ *
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#include "ocelot_tc.h"
+#include "ocelot_police.h"
+#include "ocelot_ace.h"
+#include <net/pkt_cls.h>
+
+static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct ocelot_policer pol = { 0 };
+ struct flow_action_entry *action;
+ int err;
+
+ netdev_dbg(port->dev, "%s: port %u command %d cookie %lu\n",
+ __func__, port->chip_port, f->command, f->cookie);
+
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
+ return -EOPNOTSUPP;
+ }
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one action is supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.block_shared) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rate limit is not supported on shared blocks");
+ return -EOPNOTSUPP;
+ }
+
+ action = &f->rule->action.entries[0];
+
+ if (action->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.police_id && port->tc.police_id != f->cookie) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one policer per port is supported\n");
+ return -EEXIST;
+ }
+
+ pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
+ pol.burst = (u32)div_u64(action->police.rate_bytes_ps *
+ PSCHED_NS2TICKS(action->police.burst),
+ PSCHED_TICKS_PER_SEC);
+
+ err = ocelot_port_policer_add(port, &pol);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not add policer\n");
+ return err;
+ }
+
+ port->tc.police_id = f->cookie;
+ port->tc.offload_cnt++;
+ return 0;
+ case TC_CLSMATCHALL_DESTROY:
+ if (port->tc.police_id != f->cookie)
+ return -ENOENT;
+
+ err = ocelot_port_policer_del(port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not delete policer\n");
+ return err;
+ }
+ port->tc.police_id = 0;
+ port->tc.offload_cnt--;
+ return 0;
+ case TC_CLSMATCHALL_STATS: /* fall through */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct ocelot_port *port = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(port->dev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ netdev_dbg(port->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n",
+ ingress ? "ingress" : "egress");
+
+ return ocelot_setup_tc_cls_matchall(port, type_data, ingress);
+ case TC_SETUP_CLSFLOWER:
+ return 0;
+ default:
+ netdev_dbg(port->dev, "tc_block_cb: type %d %s\n",
+ type,
+ ingress ? "ingress" : "egress");
+
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ocelot_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return ocelot_setup_tc_block_cb(type, type_data,
+ cb_priv, true);
+}
+
+static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return ocelot_setup_tc_block_cb(type, type_data,
+ cb_priv, false);
+}
+
+static LIST_HEAD(ocelot_block_cb_list);
+
+static int ocelot_setup_tc_block(struct ocelot_port *port,
+ struct flow_block_offload *f)
+{
+ struct flow_block_cb *block_cb;
+ tc_setup_cb_t *cb;
+ int err;
+
+ netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n",
+ f->command, f->binder_type);
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = ocelot_setup_tc_block_cb_ig;
+ port->tc.block_shared = f->block_shared;
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = ocelot_setup_tc_block_cb_eg;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ f->driver_block_list = &ocelot_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list))
+ return -EBUSY;
+
+ block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ err = ocelot_setup_tc_block_flower_bind(port, f);
+ if (err < 0) {
+ flow_block_cb_free(block_cb);
+ return err;
+ }
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, f->driver_block_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f, cb, port);
+ if (!block_cb)
+ return -ENOENT;
+
+ ocelot_setup_tc_block_flower_unbind(port, f);
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return ocelot_setup_tc_block(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.h b/drivers/net/ethernet/mscc/ocelot_tc.h
new file mode 100644
index 000000000000..61757c2250a6
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_tc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_TC_H_
+#define _MSCC_OCELOT_TC_H_
+
+#include <linux/netdevice.h>
+
+struct ocelot_port_tc {
+ bool block_shared;
+ unsigned long offload_cnt;
+
+ unsigned long police_id;
+};
+
+int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+#endif /* _MSCC_OCELOT_TC_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h
new file mode 100644
index 000000000000..e22eac1da783
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.h
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _OCELOT_VCAP_H_
+#define _OCELOT_VCAP_H_
+
+/* =================================================================
+ * VCAP Common
+ * =================================================================
+ */
+
+/* VCAP Type-Group values */
+#define VCAP_TG_NONE 0 /* Entry is invalid */
+#define VCAP_TG_FULL 1 /* Full entry */
+#define VCAP_TG_HALF 2 /* Half entry */
+#define VCAP_TG_QUARTER 3 /* Quarter entry */
+
+/* =================================================================
+ * VCAP IS2
+ * =================================================================
+ */
+
+#define VCAP_IS2_CNT 64
+#define VCAP_IS2_ENTRY_WIDTH 376
+#define VCAP_IS2_ACTION_WIDTH 99
+#define VCAP_PORT_CNT 11
+
+/* IS2 half key types */
+#define IS2_TYPE_ETYPE 0
+#define IS2_TYPE_LLC 1
+#define IS2_TYPE_SNAP 2
+#define IS2_TYPE_ARP 3
+#define IS2_TYPE_IP_UDP_TCP 4
+#define IS2_TYPE_IP_OTHER 5
+#define IS2_TYPE_IPV6 6
+#define IS2_TYPE_OAM 7
+#define IS2_TYPE_SMAC_SIP6 8
+#define IS2_TYPE_ANY 100 /* Pseudo type */
+
+/* IS2 half key type mask for matching any IP */
+#define IS2_TYPE_MASK_IP_ANY 0xe
+
+/* IS2 action types */
+#define IS2_ACTION_TYPE_NORMAL 0
+#define IS2_ACTION_TYPE_SMAC_SIP 1
+
+/* IS2 MASK_MODE values */
+#define IS2_ACT_MASK_MODE_NONE 0
+#define IS2_ACT_MASK_MODE_FILTER 1
+#define IS2_ACT_MASK_MODE_POLICY 2
+#define IS2_ACT_MASK_MODE_REDIR 3
+
+/* IS2 REW_OP values */
+#define IS2_ACT_REW_OP_NONE 0
+#define IS2_ACT_REW_OP_PTP_ONE 2
+#define IS2_ACT_REW_OP_PTP_TWO 3
+#define IS2_ACT_REW_OP_SPECIAL 8
+#define IS2_ACT_REW_OP_PTP_ORG 9
+#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_1 (IS2_ACT_REW_OP_PTP_ONE | (1 << 3))
+#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_2 (IS2_ACT_REW_OP_PTP_ONE | (2 << 3))
+#define IS2_ACT_REW_OP_PTP_ONE_ADD_DELAY (IS2_ACT_REW_OP_PTP_ONE | (1 << 5))
+#define IS2_ACT_REW_OP_PTP_ONE_ADD_SUB BIT(7)
+
+#define VCAP_PORT_WIDTH 4
+
+/* IS2 quarter key - SMAC_SIP4 */
+#define IS2_QKO_IGR_PORT 0
+#define IS2_QKL_IGR_PORT VCAP_PORT_WIDTH
+#define IS2_QKO_L2_SMAC (IS2_QKO_IGR_PORT + IS2_QKL_IGR_PORT)
+#define IS2_QKL_L2_SMAC 48
+#define IS2_QKO_L3_IP4_SIP (IS2_QKO_L2_SMAC + IS2_QKL_L2_SMAC)
+#define IS2_QKL_L3_IP4_SIP 32
+
+/* IS2 half key - common */
+#define IS2_HKO_TYPE 0
+#define IS2_HKL_TYPE 4
+#define IS2_HKO_FIRST (IS2_HKO_TYPE + IS2_HKL_TYPE)
+#define IS2_HKL_FIRST 1
+#define IS2_HKO_PAG (IS2_HKO_FIRST + IS2_HKL_FIRST)
+#define IS2_HKL_PAG 8
+#define IS2_HKO_IGR_PORT_MASK (IS2_HKO_PAG + IS2_HKL_PAG)
+#define IS2_HKL_IGR_PORT_MASK (VCAP_PORT_CNT + 1)
+#define IS2_HKO_SERVICE_FRM (IS2_HKO_IGR_PORT_MASK + IS2_HKL_IGR_PORT_MASK)
+#define IS2_HKL_SERVICE_FRM 1
+#define IS2_HKO_HOST_MATCH (IS2_HKO_SERVICE_FRM + IS2_HKL_SERVICE_FRM)
+#define IS2_HKL_HOST_MATCH 1
+#define IS2_HKO_L2_MC (IS2_HKO_HOST_MATCH + IS2_HKL_HOST_MATCH)
+#define IS2_HKL_L2_MC 1
+#define IS2_HKO_L2_BC (IS2_HKO_L2_MC + IS2_HKL_L2_MC)
+#define IS2_HKL_L2_BC 1
+#define IS2_HKO_VLAN_TAGGED (IS2_HKO_L2_BC + IS2_HKL_L2_BC)
+#define IS2_HKL_VLAN_TAGGED 1
+#define IS2_HKO_VID (IS2_HKO_VLAN_TAGGED + IS2_HKL_VLAN_TAGGED)
+#define IS2_HKL_VID 12
+#define IS2_HKO_DEI (IS2_HKO_VID + IS2_HKL_VID)
+#define IS2_HKL_DEI 1
+#define IS2_HKO_PCP (IS2_HKO_DEI + IS2_HKL_DEI)
+#define IS2_HKL_PCP 3
+
+/* IS2 half key - MAC_ETYPE/MAC_LLC/MAC_SNAP/OAM common */
+#define IS2_HKO_L2_DMAC (IS2_HKO_PCP + IS2_HKL_PCP)
+#define IS2_HKL_L2_DMAC 48
+#define IS2_HKO_L2_SMAC (IS2_HKO_L2_DMAC + IS2_HKL_L2_DMAC)
+#define IS2_HKL_L2_SMAC 48
+
+/* IS2 half key - MAC_ETYPE */
+#define IS2_HKO_MAC_ETYPE_ETYPE (IS2_HKO_L2_SMAC + IS2_HKL_L2_SMAC)
+#define IS2_HKL_MAC_ETYPE_ETYPE 16
+#define IS2_HKO_MAC_ETYPE_L2_PAYLOAD \
+ (IS2_HKO_MAC_ETYPE_ETYPE + IS2_HKL_MAC_ETYPE_ETYPE)
+#define IS2_HKL_MAC_ETYPE_L2_PAYLOAD 27
+
+/* IS2 half key - MAC_LLC */
+#define IS2_HKO_MAC_LLC_L2_LLC IS2_HKO_MAC_ETYPE_ETYPE
+#define IS2_HKL_MAC_LLC_L2_LLC 40
+
+/* IS2 half key - MAC_SNAP */
+#define IS2_HKO_MAC_SNAP_L2_SNAP IS2_HKO_MAC_ETYPE_ETYPE
+#define IS2_HKL_MAC_SNAP_L2_SNAP 40
+
+/* IS2 half key - ARP */
+#define IS2_HKO_MAC_ARP_L2_SMAC IS2_HKO_L2_DMAC
+#define IS2_HKL_MAC_ARP_L2_SMAC 48
+#define IS2_HKO_MAC_ARP_ARP_ADDR_SPACE_OK \
+ (IS2_HKO_MAC_ARP_L2_SMAC + IS2_HKL_MAC_ARP_L2_SMAC)
+#define IS2_HKL_MAC_ARP_ARP_ADDR_SPACE_OK 1
+#define IS2_HKO_MAC_ARP_ARP_PROTO_SPACE_OK \
+ (IS2_HKO_MAC_ARP_ARP_ADDR_SPACE_OK + IS2_HKL_MAC_ARP_ARP_ADDR_SPACE_OK)
+#define IS2_HKL_MAC_ARP_ARP_PROTO_SPACE_OK 1
+#define IS2_HKO_MAC_ARP_ARP_LEN_OK \
+ (IS2_HKO_MAC_ARP_ARP_PROTO_SPACE_OK + \
+ IS2_HKL_MAC_ARP_ARP_PROTO_SPACE_OK)
+#define IS2_HKL_MAC_ARP_ARP_LEN_OK 1
+#define IS2_HKO_MAC_ARP_ARP_TGT_MATCH \
+ (IS2_HKO_MAC_ARP_ARP_LEN_OK + IS2_HKL_MAC_ARP_ARP_LEN_OK)
+#define IS2_HKL_MAC_ARP_ARP_TGT_MATCH 1
+#define IS2_HKO_MAC_ARP_ARP_SENDER_MATCH \
+ (IS2_HKO_MAC_ARP_ARP_TGT_MATCH + IS2_HKL_MAC_ARP_ARP_TGT_MATCH)
+#define IS2_HKL_MAC_ARP_ARP_SENDER_MATCH 1
+#define IS2_HKO_MAC_ARP_ARP_OPCODE_UNKNOWN \
+ (IS2_HKO_MAC_ARP_ARP_SENDER_MATCH + IS2_HKL_MAC_ARP_ARP_SENDER_MATCH)
+#define IS2_HKL_MAC_ARP_ARP_OPCODE_UNKNOWN 1
+#define IS2_HKO_MAC_ARP_ARP_OPCODE \
+ (IS2_HKO_MAC_ARP_ARP_OPCODE_UNKNOWN + \
+ IS2_HKL_MAC_ARP_ARP_OPCODE_UNKNOWN)
+#define IS2_HKL_MAC_ARP_ARP_OPCODE 2
+#define IS2_HKO_MAC_ARP_L3_IP4_DIP \
+ (IS2_HKO_MAC_ARP_ARP_OPCODE + IS2_HKL_MAC_ARP_ARP_OPCODE)
+#define IS2_HKL_MAC_ARP_L3_IP4_DIP 32
+#define IS2_HKO_MAC_ARP_L3_IP4_SIP \
+ (IS2_HKO_MAC_ARP_L3_IP4_DIP + IS2_HKL_MAC_ARP_L3_IP4_DIP)
+#define IS2_HKL_MAC_ARP_L3_IP4_SIP 32
+#define IS2_HKO_MAC_ARP_DIP_EQ_SIP \
+ (IS2_HKO_MAC_ARP_L3_IP4_SIP + IS2_HKL_MAC_ARP_L3_IP4_SIP)
+#define IS2_HKL_MAC_ARP_DIP_EQ_SIP 1
+
+/* IS2 half key - IP4_TCP_UDP/IP4_OTHER common */
+#define IS2_HKO_IP4 IS2_HKO_L2_DMAC
+#define IS2_HKL_IP4 1
+#define IS2_HKO_L3_FRAGMENT (IS2_HKO_IP4 + IS2_HKL_IP4)
+#define IS2_HKL_L3_FRAGMENT 1
+#define IS2_HKO_L3_FRAG_OFS_GT0 (IS2_HKO_L3_FRAGMENT + IS2_HKL_L3_FRAGMENT)
+#define IS2_HKL_L3_FRAG_OFS_GT0 1
+#define IS2_HKO_L3_OPTIONS (IS2_HKO_L3_FRAG_OFS_GT0 + IS2_HKL_L3_FRAG_OFS_GT0)
+#define IS2_HKL_L3_OPTIONS 1
+#define IS2_HKO_L3_TTL_GT0 (IS2_HKO_L3_OPTIONS + IS2_HKL_L3_OPTIONS)
+#define IS2_HKL_L3_TTL_GT0 1
+#define IS2_HKO_L3_TOS (IS2_HKO_L3_TTL_GT0 + IS2_HKL_L3_TTL_GT0)
+#define IS2_HKL_L3_TOS 8
+#define IS2_HKO_L3_IP4_DIP (IS2_HKO_L3_TOS + IS2_HKL_L3_TOS)
+#define IS2_HKL_L3_IP4_DIP 32
+#define IS2_HKO_L3_IP4_SIP (IS2_HKO_L3_IP4_DIP + IS2_HKL_L3_IP4_DIP)
+#define IS2_HKL_L3_IP4_SIP 32
+#define IS2_HKO_DIP_EQ_SIP (IS2_HKO_L3_IP4_SIP + IS2_HKL_L3_IP4_SIP)
+#define IS2_HKL_DIP_EQ_SIP 1
+
+/* IS2 half key - IP4_TCP_UDP */
+#define IS2_HKO_IP4_TCP_UDP_TCP (IS2_HKO_DIP_EQ_SIP + IS2_HKL_DIP_EQ_SIP)
+#define IS2_HKL_IP4_TCP_UDP_TCP 1
+#define IS2_HKO_IP4_TCP_UDP_L4_DPORT \
+ (IS2_HKO_IP4_TCP_UDP_TCP + IS2_HKL_IP4_TCP_UDP_TCP)
+#define IS2_HKL_IP4_TCP_UDP_L4_DPORT 16
+#define IS2_HKO_IP4_TCP_UDP_L4_SPORT \
+ (IS2_HKO_IP4_TCP_UDP_L4_DPORT + IS2_HKL_IP4_TCP_UDP_L4_DPORT)
+#define IS2_HKL_IP4_TCP_UDP_L4_SPORT 16
+#define IS2_HKO_IP4_TCP_UDP_L4_RNG \
+ (IS2_HKO_IP4_TCP_UDP_L4_SPORT + IS2_HKL_IP4_TCP_UDP_L4_SPORT)
+#define IS2_HKL_IP4_TCP_UDP_L4_RNG 8
+#define IS2_HKO_IP4_TCP_UDP_SPORT_EQ_DPORT \
+ (IS2_HKO_IP4_TCP_UDP_L4_RNG + IS2_HKL_IP4_TCP_UDP_L4_RNG)
+#define IS2_HKL_IP4_TCP_UDP_SPORT_EQ_DPORT 1
+#define IS2_HKO_IP4_TCP_UDP_SEQUENCE_EQ0 \
+ (IS2_HKO_IP4_TCP_UDP_SPORT_EQ_DPORT + \
+ IS2_HKL_IP4_TCP_UDP_SPORT_EQ_DPORT)
+#define IS2_HKL_IP4_TCP_UDP_SEQUENCE_EQ0 1
+#define IS2_HKO_IP4_TCP_UDP_L4_FIN \
+ (IS2_HKO_IP4_TCP_UDP_SEQUENCE_EQ0 + IS2_HKL_IP4_TCP_UDP_SEQUENCE_EQ0)
+#define IS2_HKL_IP4_TCP_UDP_L4_FIN 1
+#define IS2_HKO_IP4_TCP_UDP_L4_SYN \
+ (IS2_HKO_IP4_TCP_UDP_L4_FIN + IS2_HKL_IP4_TCP_UDP_L4_FIN)
+#define IS2_HKL_IP4_TCP_UDP_L4_SYN 1
+#define IS2_HKO_IP4_TCP_UDP_L4_RST \
+ (IS2_HKO_IP4_TCP_UDP_L4_SYN + IS2_HKL_IP4_TCP_UDP_L4_SYN)
+#define IS2_HKL_IP4_TCP_UDP_L4_RST 1
+#define IS2_HKO_IP4_TCP_UDP_L4_PSH \
+ (IS2_HKO_IP4_TCP_UDP_L4_RST + IS2_HKL_IP4_TCP_UDP_L4_RST)
+#define IS2_HKL_IP4_TCP_UDP_L4_PSH 1
+#define IS2_HKO_IP4_TCP_UDP_L4_ACK \
+ (IS2_HKO_IP4_TCP_UDP_L4_PSH + IS2_HKL_IP4_TCP_UDP_L4_PSH)
+#define IS2_HKL_IP4_TCP_UDP_L4_ACK 1
+#define IS2_HKO_IP4_TCP_UDP_L4_URG \
+ (IS2_HKO_IP4_TCP_UDP_L4_ACK + IS2_HKL_IP4_TCP_UDP_L4_ACK)
+#define IS2_HKL_IP4_TCP_UDP_L4_URG 1
+#define IS2_HKO_IP4_TCP_UDP_L4_1588_DOM \
+ (IS2_HKO_IP4_TCP_UDP_L4_URG + IS2_HKL_IP4_TCP_UDP_L4_URG)
+#define IS2_HKL_IP4_TCP_UDP_L4_1588_DOM 8
+#define IS2_HKO_IP4_TCP_UDP_L4_1588_VER \
+ (IS2_HKO_IP4_TCP_UDP_L4_1588_DOM + IS2_HKL_IP4_TCP_UDP_L4_1588_DOM)
+#define IS2_HKL_IP4_TCP_UDP_L4_1588_VER 4
+
+/* IS2 half key - IP4_OTHER */
+#define IS2_HKO_IP4_OTHER_L3_PROTO IS2_HKO_IP4_TCP_UDP_TCP
+#define IS2_HKL_IP4_OTHER_L3_PROTO 8
+#define IS2_HKO_IP4_OTHER_L3_PAYLOAD \
+ (IS2_HKO_IP4_OTHER_L3_PROTO + IS2_HKL_IP4_OTHER_L3_PROTO)
+#define IS2_HKL_IP4_OTHER_L3_PAYLOAD 56
+
+/* IS2 half key - IP6_STD */
+#define IS2_HKO_IP6_STD_L3_TTL_GT0 IS2_HKO_L2_DMAC
+#define IS2_HKL_IP6_STD_L3_TTL_GT0 1
+#define IS2_HKO_IP6_STD_L3_IP6_SIP \
+ (IS2_HKO_IP6_STD_L3_TTL_GT0 + IS2_HKL_IP6_STD_L3_TTL_GT0)
+#define IS2_HKL_IP6_STD_L3_IP6_SIP 128
+#define IS2_HKO_IP6_STD_L3_PROTO \
+ (IS2_HKO_IP6_STD_L3_IP6_SIP + IS2_HKL_IP6_STD_L3_IP6_SIP)
+#define IS2_HKL_IP6_STD_L3_PROTO 8
+
+/* IS2 half key - OAM */
+#define IS2_HKO_OAM_OAM_MEL_FLAGS IS2_HKO_MAC_ETYPE_ETYPE
+#define IS2_HKL_OAM_OAM_MEL_FLAGS 7
+#define IS2_HKO_OAM_OAM_VER \
+ (IS2_HKO_OAM_OAM_MEL_FLAGS + IS2_HKL_OAM_OAM_MEL_FLAGS)
+#define IS2_HKL_OAM_OAM_VER 5
+#define IS2_HKO_OAM_OAM_OPCODE (IS2_HKO_OAM_OAM_VER + IS2_HKL_OAM_OAM_VER)
+#define IS2_HKL_OAM_OAM_OPCODE 8
+#define IS2_HKO_OAM_OAM_FLAGS (IS2_HKO_OAM_OAM_OPCODE + IS2_HKL_OAM_OAM_OPCODE)
+#define IS2_HKL_OAM_OAM_FLAGS 8
+#define IS2_HKO_OAM_OAM_MEPID (IS2_HKO_OAM_OAM_FLAGS + IS2_HKL_OAM_OAM_FLAGS)
+#define IS2_HKL_OAM_OAM_MEPID 16
+#define IS2_HKO_OAM_OAM_CCM_CNTS_EQ0 \
+ (IS2_HKO_OAM_OAM_MEPID + IS2_HKL_OAM_OAM_MEPID)
+#define IS2_HKL_OAM_OAM_CCM_CNTS_EQ0 1
+
+/* IS2 half key - SMAC_SIP6 */
+#define IS2_HKO_SMAC_SIP6_IGR_PORT IS2_HKL_TYPE
+#define IS2_HKL_SMAC_SIP6_IGR_PORT VCAP_PORT_WIDTH
+#define IS2_HKO_SMAC_SIP6_L2_SMAC \
+ (IS2_HKO_SMAC_SIP6_IGR_PORT + IS2_HKL_SMAC_SIP6_IGR_PORT)
+#define IS2_HKL_SMAC_SIP6_L2_SMAC 48
+#define IS2_HKO_SMAC_SIP6_L3_IP6_SIP \
+ (IS2_HKO_SMAC_SIP6_L2_SMAC + IS2_HKL_SMAC_SIP6_L2_SMAC)
+#define IS2_HKL_SMAC_SIP6_L3_IP6_SIP 128
+
+/* IS2 full key - common */
+#define IS2_FKO_TYPE 0
+#define IS2_FKL_TYPE 2
+#define IS2_FKO_FIRST (IS2_FKO_TYPE + IS2_FKL_TYPE)
+#define IS2_FKL_FIRST 1
+#define IS2_FKO_PAG (IS2_FKO_FIRST + IS2_FKL_FIRST)
+#define IS2_FKL_PAG 8
+#define IS2_FKO_IGR_PORT_MASK (IS2_FKO_PAG + IS2_FKL_PAG)
+#define IS2_FKL_IGR_PORT_MASK (VCAP_PORT_CNT + 1)
+#define IS2_FKO_SERVICE_FRM (IS2_FKO_IGR_PORT_MASK + IS2_FKL_IGR_PORT_MASK)
+#define IS2_FKL_SERVICE_FRM 1
+#define IS2_FKO_HOST_MATCH (IS2_FKO_SERVICE_FRM + IS2_FKL_SERVICE_FRM)
+#define IS2_FKL_HOST_MATCH 1
+#define IS2_FKO_L2_MC (IS2_FKO_HOST_MATCH + IS2_FKL_HOST_MATCH)
+#define IS2_FKL_L2_MC 1
+#define IS2_FKO_L2_BC (IS2_FKO_L2_MC + IS2_FKL_L2_MC)
+#define IS2_FKL_L2_BC 1
+#define IS2_FKO_VLAN_TAGGED (IS2_FKO_L2_BC + IS2_FKL_L2_BC)
+#define IS2_FKL_VLAN_TAGGED 1
+#define IS2_FKO_VID (IS2_FKO_VLAN_TAGGED + IS2_FKL_VLAN_TAGGED)
+#define IS2_FKL_VID 12
+#define IS2_FKO_DEI (IS2_FKO_VID + IS2_FKL_VID)
+#define IS2_FKL_DEI 1
+#define IS2_FKO_PCP (IS2_FKO_DEI + IS2_FKL_DEI)
+#define IS2_FKL_PCP 3
+
+/* IS2 full key - IP6_TCP_UDP/IP6_OTHER common */
+#define IS2_FKO_L3_TTL_GT0 (IS2_FKO_PCP + IS2_FKL_PCP)
+#define IS2_FKL_L3_TTL_GT0 1
+#define IS2_FKO_L3_TOS (IS2_FKO_L3_TTL_GT0 + IS2_FKL_L3_TTL_GT0)
+#define IS2_FKL_L3_TOS 8
+#define IS2_FKO_L3_IP6_DIP (IS2_FKO_L3_TOS + IS2_FKL_L3_TOS)
+#define IS2_FKL_L3_IP6_DIP 128
+#define IS2_FKO_L3_IP6_SIP (IS2_FKO_L3_IP6_DIP + IS2_FKL_L3_IP6_DIP)
+#define IS2_FKL_L3_IP6_SIP 128
+#define IS2_FKO_DIP_EQ_SIP (IS2_FKO_L3_IP6_SIP + IS2_FKL_L3_IP6_SIP)
+#define IS2_FKL_DIP_EQ_SIP 1
+
+/* IS2 full key - IP6_TCP_UDP */
+#define IS2_FKO_IP6_TCP_UDP_TCP (IS2_FKO_DIP_EQ_SIP + IS2_FKL_DIP_EQ_SIP)
+#define IS2_FKL_IP6_TCP_UDP_TCP 1
+#define IS2_FKO_IP6_TCP_UDP_L4_DPORT \
+ (IS2_FKO_IP6_TCP_UDP_TCP + IS2_FKL_IP6_TCP_UDP_TCP)
+#define IS2_FKL_IP6_TCP_UDP_L4_DPORT 16
+#define IS2_FKO_IP6_TCP_UDP_L4_SPORT \
+ (IS2_FKO_IP6_TCP_UDP_L4_DPORT + IS2_FKL_IP6_TCP_UDP_L4_DPORT)
+#define IS2_FKL_IP6_TCP_UDP_L4_SPORT 16
+#define IS2_FKO_IP6_TCP_UDP_L4_RNG \
+ (IS2_FKO_IP6_TCP_UDP_L4_SPORT + IS2_FKL_IP6_TCP_UDP_L4_SPORT)
+#define IS2_FKL_IP6_TCP_UDP_L4_RNG 8
+#define IS2_FKO_IP6_TCP_UDP_SPORT_EQ_DPORT \
+ (IS2_FKO_IP6_TCP_UDP_L4_RNG + IS2_FKL_IP6_TCP_UDP_L4_RNG)
+#define IS2_FKL_IP6_TCP_UDP_SPORT_EQ_DPORT 1
+#define IS2_FKO_IP6_TCP_UDP_SEQUENCE_EQ0 \
+ (IS2_FKO_IP6_TCP_UDP_SPORT_EQ_DPORT + \
+ IS2_FKL_IP6_TCP_UDP_SPORT_EQ_DPORT)
+#define IS2_FKL_IP6_TCP_UDP_SEQUENCE_EQ0 1
+#define IS2_FKO_IP6_TCP_UDP_L4_FIN \
+ (IS2_FKO_IP6_TCP_UDP_SEQUENCE_EQ0 + IS2_FKL_IP6_TCP_UDP_SEQUENCE_EQ0)
+#define IS2_FKL_IP6_TCP_UDP_L4_FIN 1
+#define IS2_FKO_IP6_TCP_UDP_L4_SYN \
+ (IS2_FKO_IP6_TCP_UDP_L4_FIN + IS2_FKL_IP6_TCP_UDP_L4_FIN)
+#define IS2_FKL_IP6_TCP_UDP_L4_SYN 1
+#define IS2_FKO_IP6_TCP_UDP_L4_RST \
+ (IS2_FKO_IP6_TCP_UDP_L4_SYN + IS2_FKL_IP6_TCP_UDP_L4_SYN)
+#define IS2_FKL_IP6_TCP_UDP_L4_RST 1
+#define IS2_FKO_IP6_TCP_UDP_L4_PSH \
+ (IS2_FKO_IP6_TCP_UDP_L4_RST + IS2_FKL_IP6_TCP_UDP_L4_RST)
+#define IS2_FKL_IP6_TCP_UDP_L4_PSH 1
+#define IS2_FKO_IP6_TCP_UDP_L4_ACK \
+ (IS2_FKO_IP6_TCP_UDP_L4_PSH + IS2_FKL_IP6_TCP_UDP_L4_PSH)
+#define IS2_FKL_IP6_TCP_UDP_L4_ACK 1
+#define IS2_FKO_IP6_TCP_UDP_L4_URG \
+ (IS2_FKO_IP6_TCP_UDP_L4_ACK + IS2_FKL_IP6_TCP_UDP_L4_ACK)
+#define IS2_FKL_IP6_TCP_UDP_L4_URG 1
+#define IS2_FKO_IP6_TCP_UDP_L4_1588_DOM \
+ (IS2_FKO_IP6_TCP_UDP_L4_URG + IS2_FKL_IP6_TCP_UDP_L4_URG)
+#define IS2_FKL_IP6_TCP_UDP_L4_1588_DOM 8
+#define IS2_FKO_IP6_TCP_UDP_L4_1588_VER \
+ (IS2_FKO_IP6_TCP_UDP_L4_1588_DOM + IS2_FKL_IP6_TCP_UDP_L4_1588_DOM)
+#define IS2_FKL_IP6_TCP_UDP_L4_1588_VER 4
+
+/* IS2 full key - IP6_OTHER */
+#define IS2_FKO_IP6_OTHER_L3_PROTO IS2_FKO_IP6_TCP_UDP_TCP
+#define IS2_FKL_IP6_OTHER_L3_PROTO 8
+#define IS2_FKO_IP6_OTHER_L3_PAYLOAD \
+ (IS2_FKO_IP6_OTHER_L3_PROTO + IS2_FKL_IP6_OTHER_L3_PROTO)
+#define IS2_FKL_IP6_OTHER_L3_PAYLOAD 56
+
+/* IS2 full key - CUSTOM */
+#define IS2_FKO_CUSTOM_CUSTOM_TYPE IS2_FKO_L3_TTL_GT0
+#define IS2_FKL_CUSTOM_CUSTOM_TYPE 1
+#define IS2_FKO_CUSTOM_CUSTOM \
+ (IS2_FKO_CUSTOM_CUSTOM_TYPE + IS2_FKL_CUSTOM_CUSTOM_TYPE)
+#define IS2_FKL_CUSTOM_CUSTOM 320
+
+/* IS2 action - BASE_TYPE */
+#define IS2_AO_HIT_ME_ONCE 0
+#define IS2_AL_HIT_ME_ONCE 1
+#define IS2_AO_CPU_COPY_ENA (IS2_AO_HIT_ME_ONCE + IS2_AL_HIT_ME_ONCE)
+#define IS2_AL_CPU_COPY_ENA 1
+#define IS2_AO_CPU_QU_NUM (IS2_AO_CPU_COPY_ENA + IS2_AL_CPU_COPY_ENA)
+#define IS2_AL_CPU_QU_NUM 3
+#define IS2_AO_MASK_MODE (IS2_AO_CPU_QU_NUM + IS2_AL_CPU_QU_NUM)
+#define IS2_AL_MASK_MODE 2
+#define IS2_AO_MIRROR_ENA (IS2_AO_MASK_MODE + IS2_AL_MASK_MODE)
+#define IS2_AL_MIRROR_ENA 1
+#define IS2_AO_LRN_DIS (IS2_AO_MIRROR_ENA + IS2_AL_MIRROR_ENA)
+#define IS2_AL_LRN_DIS 1
+#define IS2_AO_POLICE_ENA (IS2_AO_LRN_DIS + IS2_AL_LRN_DIS)
+#define IS2_AL_POLICE_ENA 1
+#define IS2_AO_POLICE_IDX (IS2_AO_POLICE_ENA + IS2_AL_POLICE_ENA)
+#define IS2_AL_POLICE_IDX 9
+#define IS2_AO_POLICE_VCAP_ONLY (IS2_AO_POLICE_IDX + IS2_AL_POLICE_IDX)
+#define IS2_AL_POLICE_VCAP_ONLY 1
+#define IS2_AO_PORT_MASK (IS2_AO_POLICE_VCAP_ONLY + IS2_AL_POLICE_VCAP_ONLY)
+#define IS2_AL_PORT_MASK VCAP_PORT_CNT
+#define IS2_AO_REW_OP (IS2_AO_PORT_MASK + IS2_AL_PORT_MASK)
+#define IS2_AL_REW_OP 9
+#define IS2_AO_LM_CNT_DIS (IS2_AO_REW_OP + IS2_AL_REW_OP)
+#define IS2_AL_LM_CNT_DIS 1
+#define IS2_AO_ISDX_ENA \
+ (IS2_AO_LM_CNT_DIS + IS2_AL_LM_CNT_DIS + 1) /* Reserved bit */
+#define IS2_AL_ISDX_ENA 1
+#define IS2_AO_ACL_ID (IS2_AO_ISDX_ENA + IS2_AL_ISDX_ENA)
+#define IS2_AL_ACL_ID 6
+
+/* IS2 action - SMAC_SIP */
+#define IS2_AO_SMAC_SIP_CPU_COPY_ENA 0
+#define IS2_AL_SMAC_SIP_CPU_COPY_ENA 1
+#define IS2_AO_SMAC_SIP_CPU_QU_NUM 1
+#define IS2_AL_SMAC_SIP_CPU_QU_NUM 3
+#define IS2_AO_SMAC_SIP_FWD_KILL_ENA 4
+#define IS2_AL_SMAC_SIP_FWD_KILL_ENA 1
+#define IS2_AO_SMAC_SIP_HOST_MATCH 5
+#define IS2_AL_SMAC_SIP_HOST_MATCH 1
+
+#endif /* _OCELOT_VCAP_H_ */
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 4ad5109059e0..bac5be4d4f43 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -20,6 +20,7 @@ config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
+ depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
---help---
This driver supports the Netronome(R) NFP4000/NFP6000 based
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 87bf784f8e8f..2805641965f3 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -16,6 +16,7 @@ nfp-objs := \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
ccm.o \
+ ccm_mbox.o \
nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
@@ -34,6 +35,11 @@ nfp-objs := \
nfp_shared_buf.o \
nic/main.o
+ifeq ($(CONFIG_TLS_DEVICE),y)
+nfp-objs += \
+ crypto/tls.o
+endif
+
ifeq ($(CONFIG_NFP_APP_FLOWER),y)
nfp-objs += \
flower/action.o \
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
index ff3913085665..23ebddfb9532 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/cls.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -262,22 +262,12 @@ static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
}
}
+static LIST_HEAD(nfp_abm_block_cb_list);
+
int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
- struct tc_block_offload *f)
+ struct flow_block_offload *f)
{
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- nfp_abm_setup_tc_block_cb,
- repr, repr, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb,
- repr);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+ return flow_block_cb_setup_simple(f, &nfp_abm_block_cb_list,
+ nfp_abm_setup_tc_block_cb,
+ repr, repr, true);
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index 49749c60885e..48746c9c6224 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -247,7 +247,7 @@ int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_gred_qopt_offload *opt);
int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
- struct tc_block_offload *opt);
+ struct flow_block_offload *opt);
int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index d4bf0e694541..4054b70d7719 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -623,6 +623,13 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
}
static void
+wrp_zext(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst)
+{
+ if (meta->flags & FLAG_INSN_DO_ZEXT)
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+}
+
+static void
wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
enum nfp_relo_type relo)
{
@@ -858,7 +865,8 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
}
static int
-data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
+data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, swreg offset,
+ u8 dst_gpr, int size)
{
unsigned int i;
u16 shift, sz;
@@ -881,14 +889,15 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
if (i < 2)
- wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
static int
-data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
- swreg lreg, swreg rreg, int size, enum cmd_mode mode)
+data_ld_host_order(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 dst_gpr, swreg lreg, swreg rreg, int size,
+ enum cmd_mode mode)
{
unsigned int i;
u8 mask, sz;
@@ -911,33 +920,34 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
if (i < 2)
- wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
static int
-data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, u8 size)
+data_ld_host_order_addr32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 src_gpr, swreg offset, u8 dst_gpr, u8 size)
{
- return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
- size, CMD_MODE_32b);
+ return data_ld_host_order(nfp_prog, meta, dst_gpr, reg_a(src_gpr),
+ offset, size, CMD_MODE_32b);
}
static int
-data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, u8 size)
+data_ld_host_order_addr40(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 src_gpr, swreg offset, u8 dst_gpr, u8 size)
{
swreg rega, regb;
addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
- return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
+ return data_ld_host_order(nfp_prog, meta, dst_gpr, rega, regb,
size, CMD_MODE_40b_BA);
}
static int
-construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
+construct_data_ind_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u16 offset, u16 src, u8 size)
{
swreg tmp_reg;
@@ -953,10 +963,12 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
- return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
+ return data_ld(nfp_prog, meta, imm_b(nfp_prog), 0, size);
}
-static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
+static int
+construct_data_ld(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u16 offset, u8 size)
{
swreg tmp_reg;
@@ -967,7 +979,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
/* Load data */
tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
- return data_ld(nfp_prog, tmp_reg, 0, size);
+ return data_ld(nfp_prog, meta, tmp_reg, 0, size);
}
static int
@@ -1204,7 +1216,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
}
if (clr_gpr && size < 8)
- wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+ wrp_zext(nfp_prog, meta, gpr);
while (size) {
u32 slice_end;
@@ -1305,9 +1317,10 @@ wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
enum alu_op alu_op)
{
const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
- wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+ wrp_alu_imm(nfp_prog, dst, alu_op, insn->imm);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -1319,7 +1332,7 @@ wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2396,12 +2409,14 @@ static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst = meta->insn.dst_reg * 2;
emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst));
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
-static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt) {
/* Set signedness bit (MSB of result). */
@@ -2410,7 +2425,7 @@ static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
reg_b(dst), SHF_SC_R_SHF, shift_amt);
}
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2425,7 +2440,7 @@ static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __ashr_imm(nfp_prog, dst, umin);
+ return __ashr_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
/* NOTE: the first insn will set both indirect shift amount (source A)
@@ -2434,7 +2449,7 @@ static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst));
emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
reg_b(dst), SHF_SC_R_SHF);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2444,15 +2459,17 @@ static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __ashr_imm(nfp_prog, dst, insn->imm);
+ return __ashr_imm(nfp_prog, meta, dst, insn->imm);
}
-static int __shr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_R_SHF, shift_amt);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2461,7 +2478,7 @@ static int shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __shr_imm(nfp_prog, dst, insn->imm);
+ return __shr_imm(nfp_prog, meta, dst, insn->imm);
}
static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2474,22 +2491,24 @@ static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __shr_imm(nfp_prog, dst, umin);
+ return __shr_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_R_SHF);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
-static int __shl_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+static int
+__shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 dst,
+ u8 shift_amt)
{
if (shift_amt)
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
reg_b(dst), SHF_SC_L_SHF, shift_amt);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2498,7 +2517,7 @@ static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
- return __shl_imm(nfp_prog, dst, insn->imm);
+ return __shl_imm(nfp_prog, meta, dst, insn->imm);
}
static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2511,11 +2530,11 @@ static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
- return __shl_imm(nfp_prog, dst, umin);
+ return __shl_imm(nfp_prog, meta, dst, umin);
src = insn->src_reg * 2;
shl_reg64_lt32_low(nfp_prog, dst, src);
- wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ wrp_zext(nfp_prog, meta, dst);
return 0;
}
@@ -2577,34 +2596,34 @@ static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 1);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 1);
}
static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 2);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 2);
}
static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ld(nfp_prog, meta->insn.imm, 4);
+ return construct_data_ld(nfp_prog, meta, meta->insn.imm, 4);
}
static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 1);
}
static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 2);
}
static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ return construct_data_ind_ld(nfp_prog, meta, meta->insn.imm,
meta->insn.src_reg * 2, 4);
}
@@ -2682,7 +2701,7 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
+ return data_ld_host_order_addr32(nfp_prog, meta, meta->insn.src_reg * 2,
tmp_reg, meta->insn.dst_reg * 2, size);
}
@@ -2694,7 +2713,7 @@ mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
+ return data_ld_host_order_addr40(nfp_prog, meta, meta->insn.src_reg * 2,
tmp_reg, meta->insn.dst_reg * 2, size);
}
@@ -2755,7 +2774,7 @@ mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off);
if (!len_mid) {
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
return 0;
}
@@ -2763,7 +2782,7 @@ mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
if (size <= REG_WIDTH) {
wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else {
swreg src_hi = reg_xfer(idx + 2);
@@ -2794,10 +2813,10 @@ mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog,
if (size < REG_WIDTH) {
wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else if (size == REG_WIDTH) {
wrp_mov(nfp_prog, dst_lo, src_lo);
- wrp_immed(nfp_prog, dst_hi, 0);
+ wrp_zext(nfp_prog, meta, dst_gpr);
} else {
swreg src_hi = reg_xfer(idx + 1);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 9c136da25221..1c9fb11470df 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -160,35 +160,19 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
return 0;
}
-static int nfp_bpf_setup_tc_block(struct net_device *netdev,
- struct tc_block_offload *f)
-{
- struct nfp_net *nn = netdev_priv(netdev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- nfp_bpf_setup_tc_block_cb,
- nn, nn, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block,
- nfp_bpf_setup_tc_block_cb,
- nn);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(nfp_bpf_block_cb_list);
static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data)
{
+ struct nfp_net *nn = netdev_priv(netdev);
+
switch (type) {
case TC_SETUP_BLOCK:
- return nfp_bpf_setup_tc_block(netdev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &nfp_bpf_block_cb_list,
+ nfp_bpf_setup_tc_block_cb,
+ nn, nn, true);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index e54d1ac84df2..57d6ff51e980 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -238,6 +238,8 @@ struct nfp_bpf_reg_state {
#define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4)
/* Instruction is optimized by the verifier */
#define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5)
+/* Instruction needs to zero extend to high 32-bit */
+#define FLAG_INSN_DO_ZEXT BIT(6)
#define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \
FLAG_INSN_SKIP_PREC_DEPENDENT | \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 36f56eb4cbe2..e92ee510fd52 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -744,6 +744,17 @@ continue_subprog:
goto continue_subprog;
}
+static void nfp_bpf_insn_flag_zext(struct nfp_prog *nfp_prog,
+ struct bpf_insn_aux_data *aux)
+{
+ struct nfp_insn_meta *meta;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ if (aux[meta->n].zext_dst)
+ meta->flags |= FLAG_INSN_DO_ZEXT;
+ }
+}
+
int nfp_bpf_finalize(struct bpf_verifier_env *env)
{
struct bpf_subprog_info *info;
@@ -784,6 +795,7 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env)
return -EOPNOTSUPP;
}
+ nfp_bpf_insn_flag_zext(nfp_prog, env->insn_aux_data);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.c b/drivers/net/ethernet/netronome/nfp/ccm.c
index 94476e41e261..71afd111bae3 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm.c
+++ b/drivers/net/ethernet/netronome/nfp/ccm.c
@@ -7,9 +7,6 @@
#include "nfp_app.h"
#include "nfp_net.h"
-#define NFP_CCM_TYPE_REPLY_BIT 7
-#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
-
#define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
#define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h
index ac963b128203..a460c75522be 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm.h
+++ b/drivers/net/ethernet/netronome/nfp/ccm.h
@@ -9,6 +9,7 @@
#include <linux/wait.h>
struct nfp_app;
+struct nfp_net;
/* Firmware ABI */
@@ -21,15 +22,27 @@ enum nfp_ccm_type {
NFP_CCM_TYPE_BPF_MAP_GETNEXT = 6,
NFP_CCM_TYPE_BPF_MAP_GETFIRST = 7,
NFP_CCM_TYPE_BPF_BPF_EVENT = 8,
+ NFP_CCM_TYPE_CRYPTO_RESET = 9,
+ NFP_CCM_TYPE_CRYPTO_ADD = 10,
+ NFP_CCM_TYPE_CRYPTO_DEL = 11,
+ NFP_CCM_TYPE_CRYPTO_UPDATE = 12,
__NFP_CCM_TYPE_MAX,
};
#define NFP_CCM_ABI_VERSION 1
+#define NFP_CCM_TYPE_REPLY_BIT 7
+#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
+
struct nfp_ccm_hdr {
- u8 type;
- u8 ver;
- __be16 tag;
+ union {
+ struct {
+ u8 type;
+ u8 ver;
+ __be16 tag;
+ };
+ __be32 raw;
+ };
};
static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
@@ -41,15 +54,31 @@ static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
return hdr->type;
}
-static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
+static inline __be16 __nfp_ccm_get_tag(struct sk_buff *skb)
{
struct nfp_ccm_hdr *hdr;
hdr = (struct nfp_ccm_hdr *)skb->data;
- return be16_to_cpu(hdr->tag);
+ return hdr->tag;
+}
+
+static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
+{
+ return be16_to_cpu(__nfp_ccm_get_tag(skb));
}
+#define NFP_NET_MBOX_TLV_TYPE GENMASK(31, 16)
+#define NFP_NET_MBOX_TLV_LEN GENMASK(15, 0)
+
+enum nfp_ccm_mbox_tlv_type {
+ NFP_NET_MBOX_TLV_TYPE_UNKNOWN = 0,
+ NFP_NET_MBOX_TLV_TYPE_END = 1,
+ NFP_NET_MBOX_TLV_TYPE_MSG = 2,
+ NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP = 3,
+ NFP_NET_MBOX_TLV_TYPE_RESV = 4,
+};
+
/* Implementation */
/**
@@ -71,7 +100,7 @@ struct nfp_ccm {
u16 tag_alloc_last;
struct sk_buff_head replies;
- struct wait_queue_head wq;
+ wait_queue_head_t wq;
};
int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app);
@@ -80,4 +109,23 @@ void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb);
struct sk_buff *
nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
enum nfp_ccm_type type, unsigned int reply_size);
+
+int nfp_ccm_mbox_alloc(struct nfp_net *nn);
+void nfp_ccm_mbox_free(struct nfp_net *nn);
+int nfp_ccm_mbox_init(struct nfp_net *nn);
+void nfp_ccm_mbox_clean(struct nfp_net *nn);
+bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size);
+struct sk_buff *
+nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
+ unsigned int reply_size, gfp_t flags);
+int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size, bool critical);
+int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size);
+int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int max_reply_size);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/ccm_mbox.c b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
new file mode 100644
index 000000000000..f0783aa9e66e
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
@@ -0,0 +1,743 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/skbuff.h>
+
+#include "ccm.h"
+#include "nfp_net.h"
+
+/* CCM messages via the mailbox. CMSGs get wrapped into simple TLVs
+ * and copied into the mailbox. Multiple messages can be copied to
+ * form a batch. Threads come in with CMSG formed in an skb, then
+ * enqueue that skb onto the request queue. If threads skb is first
+ * in queue this thread will handle the mailbox operation. It copies
+ * up to 64 messages into the mailbox (making sure that both requests
+ * and replies will fit. After FW is done processing the batch it
+ * copies the data out and wakes waiting threads.
+ * If a thread is waiting it either gets its the message completed
+ * (response is copied into the same skb as the request, overwriting
+ * it), or becomes the first in queue.
+ * Completions and next-to-run are signaled via the control buffer
+ * to limit potential cache line bounces.
+ */
+
+#define NFP_CCM_MBOX_BATCH_LIMIT 64
+#define NFP_CCM_TIMEOUT (NFP_NET_POLL_TIMEOUT * 1000)
+#define NFP_CCM_MAX_QLEN 1024
+
+enum nfp_net_mbox_cmsg_state {
+ NFP_NET_MBOX_CMSG_STATE_QUEUED,
+ NFP_NET_MBOX_CMSG_STATE_NEXT,
+ NFP_NET_MBOX_CMSG_STATE_BUSY,
+ NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND,
+ NFP_NET_MBOX_CMSG_STATE_DONE,
+};
+
+/**
+ * struct nfp_ccm_mbox_skb_cb - CCM mailbox specific info
+ * @state: processing state (/stage) of the message
+ * @err: error encountered during processing if any
+ * @max_len: max(request_len, reply_len)
+ * @exp_reply: expected reply length (0 means don't validate)
+ * @posted: the message was posted and nobody waits for the reply
+ */
+struct nfp_ccm_mbox_cmsg_cb {
+ enum nfp_net_mbox_cmsg_state state;
+ int err;
+ unsigned int max_len;
+ unsigned int exp_reply;
+ bool posted;
+};
+
+static u32 nfp_ccm_mbox_max_msg(struct nfp_net *nn)
+{
+ return round_down(nn->tlv_caps.mbox_len, 4) -
+ NFP_NET_CFG_MBOX_SIMPLE_VAL - /* common mbox command header */
+ 4 * 2; /* Msg TLV plus End TLV headers */
+}
+
+static void
+nfp_ccm_mbox_msg_init(struct sk_buff *skb, unsigned int exp_reply, int max_len)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED;
+ cb->err = 0;
+ cb->max_len = max_len;
+ cb->exp_reply = exp_reply;
+ cb->posted = false;
+}
+
+static int nfp_ccm_mbox_maxlen(const struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->max_len;
+}
+
+static bool nfp_ccm_mbox_done(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE;
+}
+
+static bool nfp_ccm_mbox_in_progress(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state != NFP_NET_MBOX_CMSG_STATE_QUEUED &&
+ cb->state != NFP_NET_MBOX_CMSG_STATE_NEXT;
+}
+
+static void nfp_ccm_mbox_set_busy(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_BUSY;
+}
+
+static bool nfp_ccm_mbox_is_posted(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->posted;
+}
+
+static void nfp_ccm_mbox_mark_posted(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ cb->posted = true;
+}
+
+static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb)
+{
+ return skb_queue_is_first(&nn->mbox_cmsg.queue, skb);
+}
+
+static bool nfp_ccm_mbox_should_run(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ return cb->state == NFP_NET_MBOX_CMSG_STATE_NEXT;
+}
+
+static void nfp_ccm_mbox_mark_next_runner(struct nfp_net *nn)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ struct sk_buff *skb;
+
+ skb = skb_peek(&nn->mbox_cmsg.queue);
+ if (!skb)
+ return;
+
+ cb = (void *)skb->cb;
+ cb->state = NFP_NET_MBOX_CMSG_STATE_NEXT;
+ if (cb->posted)
+ queue_work(nn->mbox_cmsg.workq, &nn->mbox_cmsg.runq_work);
+}
+
+static void
+nfp_ccm_mbox_write_tlv(struct nfp_net *nn, u32 off, u32 type, u32 len)
+{
+ nn_writel(nn, off,
+ FIELD_PREP(NFP_NET_MBOX_TLV_TYPE, type) |
+ FIELD_PREP(NFP_NET_MBOX_TLV_LEN, len));
+}
+
+static void nfp_ccm_mbox_copy_in(struct nfp_net *nn, struct sk_buff *last)
+{
+ struct sk_buff *skb;
+ int reserve, i, cnt;
+ __be32 *data;
+ u32 off, len;
+
+ off = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ skb = __skb_peek(&nn->mbox_cmsg.queue);
+ while (true) {
+ nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_MSG,
+ skb->len);
+ off += 4;
+
+ /* Write data word by word, skb->data should be aligned */
+ data = (__be32 *)skb->data;
+ cnt = skb->len / 4;
+ for (i = 0 ; i < cnt; i++) {
+ nn_writel(nn, off, be32_to_cpu(data[i]));
+ off += 4;
+ }
+ if (skb->len & 3) {
+ __be32 tmp = 0;
+
+ memcpy(&tmp, &data[i], skb->len & 3);
+ nn_writel(nn, off, be32_to_cpu(tmp));
+ off += 4;
+ }
+
+ /* Reserve space if reply is bigger */
+ len = round_up(skb->len, 4);
+ reserve = nfp_ccm_mbox_maxlen(skb) - len;
+ if (reserve > 0) {
+ nfp_ccm_mbox_write_tlv(nn, off,
+ NFP_NET_MBOX_TLV_TYPE_RESV,
+ reserve);
+ off += 4 + reserve;
+ }
+
+ if (skb == last)
+ break;
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
+ }
+
+ nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_END, 0);
+}
+
+static struct sk_buff *
+nfp_ccm_mbox_find_req(struct nfp_net *nn, __be16 tag, struct sk_buff *last)
+{
+ struct sk_buff *skb;
+
+ skb = __skb_peek(&nn->mbox_cmsg.queue);
+ while (true) {
+ if (__nfp_ccm_get_tag(skb) == tag)
+ return skb;
+
+ if (skb == last)
+ return NULL;
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
+ }
+}
+
+static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ u8 __iomem *data, *end;
+ struct sk_buff *skb;
+
+ data = nn->dp.ctrl_bar + nn->tlv_caps.mbox_off +
+ NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ end = data + nn->tlv_caps.mbox_len;
+
+ while (true) {
+ unsigned int length, offset, type;
+ struct nfp_ccm_hdr hdr;
+ u32 tlv_hdr;
+
+ tlv_hdr = readl(data);
+ type = FIELD_GET(NFP_NET_MBOX_TLV_TYPE, tlv_hdr);
+ length = FIELD_GET(NFP_NET_MBOX_TLV_LEN, tlv_hdr);
+ offset = data - nn->dp.ctrl_bar;
+
+ /* Advance past the header */
+ data += 4;
+
+ if (data + length > end) {
+ nn_dp_warn(&nn->dp, "mailbox oversized TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ if (type == NFP_NET_MBOX_TLV_TYPE_END)
+ break;
+ if (type == NFP_NET_MBOX_TLV_TYPE_RESV)
+ goto next_tlv;
+ if (type != NFP_NET_MBOX_TLV_TYPE_MSG &&
+ type != NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
+ nn_dp_warn(&nn->dp, "mailbox unknown TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ if (length < 4) {
+ nn_dp_warn(&nn->dp, "mailbox msg too short to contain header TLV type:%d offset:%u len:%u\n",
+ type, offset, length);
+ break;
+ }
+
+ hdr.raw = cpu_to_be32(readl(data));
+
+ skb = nfp_ccm_mbox_find_req(nn, hdr.tag, last);
+ if (!skb) {
+ nn_dp_warn(&nn->dp, "mailbox request not found:%u\n",
+ be16_to_cpu(hdr.tag));
+ break;
+ }
+ cb = (void *)skb->cb;
+
+ if (type == NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
+ nn_dp_warn(&nn->dp,
+ "mailbox msg not supported type:%d\n",
+ nfp_ccm_get_type(skb));
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+
+ if (hdr.type != __NFP_CCM_REPLY(nfp_ccm_get_type(skb))) {
+ nn_dp_warn(&nn->dp, "mailbox msg reply wrong type:%u expected:%lu\n",
+ hdr.type,
+ __NFP_CCM_REPLY(nfp_ccm_get_type(skb)));
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+ if (cb->exp_reply && length != cb->exp_reply) {
+ nn_dp_warn(&nn->dp, "mailbox msg reply wrong size type:%u expected:%u have:%u\n",
+ hdr.type, length, cb->exp_reply);
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+ if (length > cb->max_len) {
+ nn_dp_warn(&nn->dp, "mailbox msg oversized reply type:%u max:%u have:%u\n",
+ hdr.type, cb->max_len, length);
+ cb->err = -EIO;
+ goto next_tlv;
+ }
+
+ if (!cb->posted) {
+ __be32 *skb_data;
+ int i, cnt;
+
+ if (length <= skb->len)
+ __skb_trim(skb, length);
+ else
+ skb_put(skb, length - skb->len);
+
+ /* We overcopy here slightly, but that's okay,
+ * the skb is large enough, and the garbage will
+ * be ignored (beyond skb->len).
+ */
+ skb_data = (__be32 *)skb->data;
+ memcpy(skb_data, &hdr, 4);
+
+ cnt = DIV_ROUND_UP(length, 4);
+ for (i = 1 ; i < cnt; i++)
+ skb_data[i] = cpu_to_be32(readl(data + i * 4));
+ }
+
+ cb->state = NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND;
+next_tlv:
+ data += round_up(length, 4);
+ if (data + 4 > end) {
+ nn_dp_warn(&nn->dp,
+ "reached end of MBOX without END TLV\n");
+ break;
+ }
+ }
+
+ smp_wmb(); /* order the skb->data vs. cb->state */
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+ do {
+ skb = __skb_dequeue(&nn->mbox_cmsg.queue);
+ cb = (void *)skb->cb;
+
+ if (cb->state != NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND) {
+ cb->err = -ENOENT;
+ smp_wmb(); /* order the cb->err vs. cb->state */
+ }
+ cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
+
+ if (cb->posted) {
+ if (cb->err)
+ nn_dp_warn(&nn->dp,
+ "mailbox posted msg failed type:%u err:%d\n",
+ nfp_ccm_get_type(skb), cb->err);
+ dev_consume_skb_any(skb);
+ }
+ } while (skb != last);
+
+ nfp_ccm_mbox_mark_next_runner(nn);
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+}
+
+static void
+nfp_ccm_mbox_mark_all_err(struct nfp_net *nn, struct sk_buff *last, int err)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+ do {
+ skb = __skb_dequeue(&nn->mbox_cmsg.queue);
+ cb = (void *)skb->cb;
+
+ cb->err = err;
+ smp_wmb(); /* order the cb->err vs. cb->state */
+ cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
+ } while (skb != last);
+
+ nfp_ccm_mbox_mark_next_runner(nn);
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+}
+
+static void nfp_ccm_mbox_run_queue_unlock(struct nfp_net *nn)
+ __releases(&nn->mbox_cmsg.queue.lock)
+{
+ int space = nn->tlv_caps.mbox_len - NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ struct sk_buff *skb, *last;
+ int cnt, err;
+
+ space -= 4; /* for End TLV */
+
+ /* First skb must fit, because it's ours and we checked it fits */
+ cnt = 1;
+ last = skb = __skb_peek(&nn->mbox_cmsg.queue);
+ space -= 4 + nfp_ccm_mbox_maxlen(skb);
+
+ while (!skb_queue_is_last(&nn->mbox_cmsg.queue, last)) {
+ skb = skb_queue_next(&nn->mbox_cmsg.queue, last);
+ space -= 4 + nfp_ccm_mbox_maxlen(skb);
+ if (space < 0)
+ break;
+ last = skb;
+ nfp_ccm_mbox_set_busy(skb);
+ cnt++;
+ if (cnt == NFP_CCM_MBOX_BATCH_LIMIT)
+ break;
+ }
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ /* Now we own all skb's marked in progress, new requests may arrive
+ * at the end of the queue.
+ */
+
+ nn_ctrl_bar_lock(nn);
+
+ nfp_ccm_mbox_copy_in(nn, last);
+
+ err = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
+ if (!err)
+ nfp_ccm_mbox_copy_out(nn, last);
+ else
+ nfp_ccm_mbox_mark_all_err(nn, last, -EIO);
+
+ nn_ctrl_bar_unlock(nn);
+
+ wake_up_all(&nn->mbox_cmsg.wq);
+}
+
+static int nfp_ccm_mbox_skb_return(struct sk_buff *skb)
+{
+ struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
+
+ if (cb->err)
+ dev_kfree_skb_any(skb);
+ return cb->err;
+}
+
+/* If wait timed out but the command is already in progress we have
+ * to wait until it finishes. Runners has ownership of the skbs marked
+ * as busy.
+ */
+static int
+nfp_ccm_mbox_unlink_unlock(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type)
+ __releases(&nn->mbox_cmsg.queue.lock)
+{
+ bool was_first;
+
+ if (nfp_ccm_mbox_in_progress(skb)) {
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ wait_event(nn->mbox_cmsg.wq, nfp_ccm_mbox_done(skb));
+ smp_rmb(); /* pairs with smp_wmb() after data is written */
+ return nfp_ccm_mbox_skb_return(skb);
+ }
+
+ was_first = nfp_ccm_mbox_should_run(nn, skb);
+ __skb_unlink(skb, &nn->mbox_cmsg.queue);
+ if (was_first)
+ nfp_ccm_mbox_mark_next_runner(nn);
+
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ if (was_first)
+ wake_up_all(&nn->mbox_cmsg.wq);
+
+ nn_dp_warn(&nn->dp, "time out waiting for mbox response to 0x%02x\n",
+ type);
+ return -ETIMEDOUT;
+}
+
+static int
+nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size, unsigned int max_reply_size,
+ gfp_t flags)
+{
+ const unsigned int mbox_max = nfp_ccm_mbox_max_msg(nn);
+ unsigned int max_len;
+ ssize_t undersize;
+ int err;
+
+ if (unlikely(!(nn->tlv_caps.mbox_cmsg_types & BIT(type)))) {
+ nn_dp_warn(&nn->dp,
+ "message type %d not supported by mailbox\n", type);
+ return -EINVAL;
+ }
+
+ /* If the reply size is unknown assume it will take the entire
+ * mailbox, the callers should do their best for this to never
+ * happen.
+ */
+ if (!max_reply_size)
+ max_reply_size = mbox_max;
+ max_reply_size = round_up(max_reply_size, 4);
+
+ /* Make sure we can fit the entire reply into the skb,
+ * and that we don't have to slow down the mbox handler
+ * with allocations.
+ */
+ undersize = max_reply_size - (skb_end_pointer(skb) - skb->data);
+ if (undersize > 0) {
+ err = pskb_expand_head(skb, 0, undersize, flags);
+ if (err) {
+ nn_dp_warn(&nn->dp,
+ "can't allocate reply buffer for mailbox\n");
+ return err;
+ }
+ }
+
+ /* Make sure that request and response both fit into the mailbox */
+ max_len = max(max_reply_size, round_up(skb->len, 4));
+ if (max_len > mbox_max) {
+ nn_dp_warn(&nn->dp,
+ "message too big for tha mailbox: %u/%u vs %u\n",
+ skb->len, max_reply_size, mbox_max);
+ return -EMSGSIZE;
+ }
+
+ nfp_ccm_mbox_msg_init(skb, reply_size, max_len);
+
+ return 0;
+}
+
+static int
+nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type, bool critical)
+{
+ struct nfp_ccm_hdr *hdr;
+
+ assert_spin_locked(&nn->mbox_cmsg.queue.lock);
+
+ if (!critical && nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) {
+ nn_dp_warn(&nn->dp, "mailbox request queue too long\n");
+ return -EBUSY;
+ }
+
+ hdr = (void *)skb->data;
+ hdr->ver = NFP_CCM_ABI_VERSION;
+ hdr->type = type;
+ hdr->tag = cpu_to_be16(nn->mbox_cmsg.tag++);
+
+ __skb_queue_tail(&nn->mbox_cmsg.queue, skb);
+
+ return 0;
+}
+
+int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size, bool critical)
+{
+ int err;
+
+ err = nfp_ccm_mbox_msg_prepare(nn, skb, type, reply_size,
+ max_reply_size, GFP_KERNEL);
+ if (err)
+ goto err_free_skb;
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, critical);
+ if (err)
+ goto err_unlock;
+
+ /* First in queue takes the mailbox lock and processes the batch */
+ if (!nfp_ccm_mbox_is_first(nn, skb)) {
+ bool to;
+
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ to = !wait_event_timeout(nn->mbox_cmsg.wq,
+ nfp_ccm_mbox_done(skb) ||
+ nfp_ccm_mbox_should_run(nn, skb),
+ msecs_to_jiffies(NFP_CCM_TIMEOUT));
+
+ /* fast path for those completed by another thread */
+ if (nfp_ccm_mbox_done(skb)) {
+ smp_rmb(); /* pairs with wmb after data is written */
+ return nfp_ccm_mbox_skb_return(skb);
+ }
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ if (!nfp_ccm_mbox_is_first(nn, skb)) {
+ WARN_ON(!to);
+
+ err = nfp_ccm_mbox_unlink_unlock(nn, skb, type);
+ if (err)
+ goto err_free_skb;
+ return 0;
+ }
+ }
+
+ /* run queue expects the lock held */
+ nfp_ccm_mbox_run_queue_unlock(nn);
+ return nfp_ccm_mbox_skb_return(skb);
+
+err_unlock:
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+err_free_skb:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type,
+ unsigned int reply_size,
+ unsigned int max_reply_size)
+{
+ return __nfp_ccm_mbox_communicate(nn, skb, type, reply_size,
+ max_reply_size, false);
+}
+
+static void nfp_ccm_mbox_post_runq_work(struct work_struct *work)
+{
+ struct sk_buff *skb;
+ struct nfp_net *nn;
+
+ nn = container_of(work, struct nfp_net, mbox_cmsg.runq_work);
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ skb = __skb_peek(&nn->mbox_cmsg.queue);
+ if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb) ||
+ !nfp_ccm_mbox_should_run(nn, skb))) {
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+ return;
+ }
+
+ nfp_ccm_mbox_run_queue_unlock(nn);
+}
+
+static void nfp_ccm_mbox_post_wait_work(struct work_struct *work)
+{
+ struct sk_buff *skb;
+ struct nfp_net *nn;
+ int err;
+
+ nn = container_of(work, struct nfp_net, mbox_cmsg.wait_work);
+
+ skb = skb_peek(&nn->mbox_cmsg.queue);
+ if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb)))
+ /* Should never happen so it's unclear what to do here.. */
+ goto exit_unlock_wake;
+
+ err = nfp_net_mbox_reconfig_wait_posted(nn);
+ if (!err)
+ nfp_ccm_mbox_copy_out(nn, skb);
+ else
+ nfp_ccm_mbox_mark_all_err(nn, skb, -EIO);
+exit_unlock_wake:
+ nn_ctrl_bar_unlock(nn);
+ wake_up_all(&nn->mbox_cmsg.wq);
+}
+
+int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int max_reply_size)
+{
+ int err;
+
+ err = nfp_ccm_mbox_msg_prepare(nn, skb, type, 0, max_reply_size,
+ GFP_ATOMIC);
+ if (err)
+ goto err_free_skb;
+
+ nfp_ccm_mbox_mark_posted(skb);
+
+ spin_lock_bh(&nn->mbox_cmsg.queue.lock);
+
+ err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, false);
+ if (err)
+ goto err_unlock;
+
+ if (nfp_ccm_mbox_is_first(nn, skb)) {
+ if (nn_ctrl_bar_trylock(nn)) {
+ nfp_ccm_mbox_copy_in(nn, skb);
+ nfp_net_mbox_reconfig_post(nn,
+ NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
+ queue_work(nn->mbox_cmsg.workq,
+ &nn->mbox_cmsg.wait_work);
+ } else {
+ nfp_ccm_mbox_mark_next_runner(nn);
+ }
+ }
+
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
+err_free_skb:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+struct sk_buff *
+nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
+ unsigned int reply_size, gfp_t flags)
+{
+ unsigned int max_size;
+ struct sk_buff *skb;
+
+ if (!reply_size)
+ max_size = nfp_ccm_mbox_max_msg(nn);
+ else
+ max_size = max(req_size, reply_size);
+ max_size = round_up(max_size, 4);
+
+ skb = alloc_skb(max_size, flags);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, req_size);
+
+ return skb;
+}
+
+bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size)
+{
+ return nfp_ccm_mbox_max_msg(nn) >= size;
+}
+
+int nfp_ccm_mbox_init(struct nfp_net *nn)
+{
+ return 0;
+}
+
+void nfp_ccm_mbox_clean(struct nfp_net *nn)
+{
+ drain_workqueue(nn->mbox_cmsg.workq);
+}
+
+int nfp_ccm_mbox_alloc(struct nfp_net *nn)
+{
+ skb_queue_head_init(&nn->mbox_cmsg.queue);
+ init_waitqueue_head(&nn->mbox_cmsg.wq);
+ INIT_WORK(&nn->mbox_cmsg.wait_work, nfp_ccm_mbox_post_wait_work);
+ INIT_WORK(&nn->mbox_cmsg.runq_work, nfp_ccm_mbox_post_runq_work);
+
+ nn->mbox_cmsg.workq = alloc_workqueue("nfp-ccm-mbox", WQ_UNBOUND, 0);
+ if (!nn->mbox_cmsg.workq)
+ return -ENOMEM;
+ return 0;
+}
+
+void nfp_ccm_mbox_free(struct nfp_net *nn)
+{
+ destroy_workqueue(nn->mbox_cmsg.workq);
+ WARN_ON(!skb_queue_empty(&nn->mbox_cmsg.queue));
+}
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
new file mode 100644
index 000000000000..60372ddf69f0
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CRYPTO_H
+#define NFP_CRYPTO_H 1
+
+struct nfp_net_tls_offload_ctx {
+ __be32 fw_handle[2];
+
+ u8 rx_end[0];
+ /* Tx only fields follow - Rx side does not have enough driver state
+ * to fit these
+ */
+
+ u32 next_seq;
+};
+
+#ifdef CONFIG_TLS_DEVICE
+int nfp_net_tls_init(struct nfp_net *nn);
+#else
+static inline int nfp_net_tls_init(struct nfp_net *nn)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
new file mode 100644
index 000000000000..67413d946c4a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CRYPTO_FW_H
+#define NFP_CRYPTO_FW_H 1
+
+#include "../ccm.h"
+
+#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC 0
+#define NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC 1
+
+struct nfp_crypto_reply_simple {
+ struct nfp_ccm_hdr hdr;
+ __be32 error;
+};
+
+struct nfp_crypto_req_reset {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+};
+
+#define NFP_NET_TLS_IPVER GENMASK(15, 12)
+#define NFP_NET_TLS_VLAN GENMASK(11, 0)
+#define NFP_NET_TLS_VLAN_UNUSED 4095
+
+struct nfp_crypto_req_add_front {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ u8 resv[3];
+ u8 opcode;
+ u8 key_len;
+ __be16 ipver_vlan __packed;
+ u8 l4_proto;
+#define NFP_NET_TLS_NON_ADDR_KEY_LEN 8
+ u8 l3_addrs[0];
+};
+
+struct nfp_crypto_req_add_back {
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 key[8];
+ __be32 salt;
+ __be32 iv[2];
+ __be32 counter;
+ __be32 rec_no[2];
+ __be32 tcp_seq;
+};
+
+struct nfp_crypto_req_add_v4 {
+ struct nfp_crypto_req_add_front front;
+ __be32 src_ip;
+ __be32 dst_ip;
+ struct nfp_crypto_req_add_back back;
+};
+
+struct nfp_crypto_req_add_v6 {
+ struct nfp_crypto_req_add_front front;
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ struct nfp_crypto_req_add_back back;
+};
+
+struct nfp_crypto_reply_add {
+ struct nfp_ccm_hdr hdr;
+ __be32 error;
+ __be32 handle[2];
+};
+
+struct nfp_crypto_req_del {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ __be32 handle[2];
+};
+
+struct nfp_crypto_req_update {
+ struct nfp_ccm_hdr hdr;
+ __be32 ep_id;
+ u8 resv[3];
+ u8 opcode;
+ __be32 handle[2];
+ __be32 rec_no[2];
+ __be32 tcp_seq;
+};
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
new file mode 100644
index 000000000000..96a96b35c0ca
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <net/tls.h>
+
+#include "../ccm.h"
+#include "../nfp_net.h"
+#include "crypto.h"
+#include "fw.h"
+
+#define NFP_NET_TLS_CCM_MBOX_OPS_MASK \
+ (BIT(NFP_CCM_TYPE_CRYPTO_RESET) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_ADD) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_DEL) | \
+ BIT(NFP_CCM_TYPE_CRYPTO_UPDATE))
+
+#define NFP_NET_TLS_OPCODE_MASK_RX \
+ BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC)
+
+#define NFP_NET_TLS_OPCODE_MASK_TX \
+ BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC)
+
+#define NFP_NET_TLS_OPCODE_MASK \
+ (NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX)
+
+static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on)
+{
+ u32 off, val;
+
+ off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4);
+
+ val = nn_readl(nn, off);
+ if (on)
+ val |= BIT(opcode & 31);
+ else
+ val &= ~BIT(opcode & 31);
+ nn_writel(nn, off, val);
+}
+
+static bool
+__nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
+ enum tls_offload_ctx_dir direction)
+{
+ u8 opcode;
+ int cnt;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ nn->ktls_tx_conn_cnt += add;
+ cnt = nn->ktls_tx_conn_cnt;
+ nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt;
+ } else {
+ opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ nn->ktls_rx_conn_cnt += add;
+ cnt = nn->ktls_rx_conn_cnt;
+ }
+
+ /* Care only about 0 -> 1 and 1 -> 0 transitions */
+ if (cnt > 1)
+ return false;
+
+ nfp_net_crypto_set_op(nn, opcode, cnt);
+ return true;
+}
+
+static int
+nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
+ enum tls_offload_ctx_dir direction)
+{
+ int ret = 0;
+
+ /* Use the BAR lock to protect the connection counts */
+ nn_ctrl_bar_lock(nn);
+ if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) {
+ ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
+ /* Undo the cnt adjustment if failed */
+ if (ret)
+ __nfp_net_tls_conn_cnt_changed(nn, -add, direction);
+ }
+ nn_ctrl_bar_unlock(nn);
+
+ return ret;
+}
+
+static int
+nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
+{
+ return nfp_net_tls_conn_cnt_changed(nn, 1, direction);
+}
+
+static int
+nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
+{
+ return nfp_net_tls_conn_cnt_changed(nn, -1, direction);
+}
+
+static struct sk_buff *
+nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags)
+{
+ return nfp_ccm_mbox_msg_alloc(nn, req_sz,
+ sizeof(struct nfp_crypto_reply_simple),
+ flags);
+}
+
+static int
+nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
+ const char *name, enum nfp_ccm_type type)
+{
+ struct nfp_crypto_reply_simple *reply;
+ int err;
+
+ err = __nfp_ccm_mbox_communicate(nn, skb, type,
+ sizeof(*reply), sizeof(*reply),
+ type == NFP_CCM_TYPE_CRYPTO_DEL);
+ if (err) {
+ nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
+ return err;
+ }
+
+ reply = (void *)skb->data;
+ err = -be32_to_cpu(reply->error);
+ if (err)
+ nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n",
+ name, err);
+ dev_consume_skb_any(skb);
+
+ return err;
+}
+
+static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
+{
+ struct nfp_crypto_req_del *req;
+ struct sk_buff *skb;
+
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return;
+
+ req = (void *)skb->data;
+ req->ep_id = 0;
+ memcpy(req->handle, fw_handle, sizeof(req->handle));
+
+ nfp_net_tls_communicate_simple(nn, skb, "delete",
+ NFP_CCM_TYPE_CRYPTO_DEL);
+}
+
+static void
+nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver)
+{
+ front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) |
+ FIELD_PREP(NFP_NET_TLS_VLAN,
+ NFP_NET_TLS_VLAN_UNUSED));
+}
+
+static void
+nfp_net_tls_assign_conn_id(struct nfp_net *nn,
+ struct nfp_crypto_req_add_front *front)
+{
+ u32 len;
+ u64 id;
+
+ id = atomic64_inc_return(&nn->ktls_conn_id_gen);
+ len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN;
+
+ memcpy(front->l3_addrs, &id, sizeof(id));
+ memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id));
+}
+
+static struct nfp_crypto_req_add_back *
+nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
+ struct sock *sk, int direction)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ req->front.key_len += sizeof(__be32) * 2;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ nfp_net_tls_assign_conn_id(nn, &req->front);
+ } else {
+ req->src_ip = inet->inet_daddr;
+ req->dst_ip = inet->inet_saddr;
+ }
+
+ return &req->back;
+}
+
+static struct nfp_crypto_req_add_back *
+nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
+ struct sock *sk, int direction)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ req->front.key_len += sizeof(struct in6_addr) * 2;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ nfp_net_tls_assign_conn_id(nn, &req->front);
+ } else {
+ memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
+ memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
+ }
+
+#endif
+ return &req->back;
+}
+
+static void
+nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
+ struct nfp_crypto_req_add_back *back, struct sock *sk,
+ int direction)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ front->l4_proto = IPPROTO_TCP;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ back->src_port = 0;
+ back->dst_port = 0;
+ } else {
+ back->src_port = inet->inet_dport;
+ back->dst_port = inet->inet_sport;
+ }
+}
+
+static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction)
+{
+ switch (direction) {
+ case TLS_OFFLOAD_CTX_DIR_TX:
+ return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ case TLS_OFFLOAD_CTX_DIR_RX:
+ return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static bool
+nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type,
+ enum tls_offload_ctx_dir direction)
+{
+ u8 bit;
+
+ switch (cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
+ else
+ bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
+ break;
+ default:
+ return false;
+ }
+
+ return nn->tlv_caps.crypto_ops & BIT(bit);
+}
+
+static int
+nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct tls12_crypto_info_aes_gcm_128 *tls_ci;
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_crypto_req_add_front *front;
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct nfp_crypto_req_add_back *back;
+ struct nfp_crypto_reply_add *reply;
+ struct sk_buff *skb;
+ size_t req_sz;
+ void *req;
+ bool ipv6;
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) >
+ TLS_DRIVER_STATE_SIZE_TX);
+ BUILD_BUG_ON(offsetof(struct nfp_net_tls_offload_ctx, rx_end) >
+ TLS_DRIVER_STATE_SIZE_RX);
+
+ if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction))
+ return -EOPNOTSUPP;
+
+ switch (sk->sk_family) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (sk->sk_ipv6only ||
+ ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
+ req_sz = sizeof(struct nfp_crypto_req_add_v6);
+ ipv6 = true;
+ break;
+ }
+#endif
+ /* fall through */
+ case AF_INET:
+ req_sz = sizeof(struct nfp_crypto_req_add_v4);
+ ipv6 = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_net_tls_conn_add(nn, direction);
+ if (err)
+ return err;
+
+ skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
+ if (!skb) {
+ err = -ENOMEM;
+ goto err_conn_remove;
+ }
+
+ front = (void *)skb->data;
+ front->ep_id = 0;
+ front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN;
+ front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
+ memset(front->resv, 0, sizeof(front->resv));
+
+ nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4);
+
+ req = (void *)skb->data;
+ if (ipv6)
+ back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
+ else
+ back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
+
+ nfp_net_tls_set_l4(front, back, sk, direction);
+
+ back->counter = 0;
+ back->tcp_seq = cpu_to_be32(start_offload_tcp_sn);
+
+ tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0,
+ sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
+
+ /* Get an extra ref on the skb so we can wipe the key after */
+ skb_get(skb);
+
+ err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
+ sizeof(*reply), sizeof(*reply));
+ reply = (void *)skb->data;
+
+ /* We depend on CCM MBOX code not reallocating skb we sent
+ * so we can clear the key material out of the memory.
+ */
+ if (!WARN_ON_ONCE((u8 *)back < skb->head ||
+ (u8 *)back > skb_end_pointer(skb)) &&
+ !WARN_ON_ONCE((u8 *)&reply[1] > (u8 *)back))
+ memzero_explicit(back, sizeof(*back));
+ dev_consume_skb_any(skb); /* the extra ref from skb_get() above */
+
+ if (err) {
+ nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n",
+ err, direction == TLS_OFFLOAD_CTX_DIR_TX);
+ /* communicate frees skb on error */
+ goto err_conn_remove;
+ }
+
+ err = -be32_to_cpu(reply->error);
+ if (err) {
+ if (err == -ENOSPC) {
+ if (!atomic_fetch_inc(&nn->ktls_no_space))
+ nn_info(nn, "HW TLS table full\n");
+ } else {
+ nn_dp_warn(&nn->dp,
+ "failed to add TLS, FW replied: %d\n", err);
+ }
+ goto err_free_skb;
+ }
+
+ if (!reply->handle[0] && !reply->handle[1]) {
+ nn_dp_warn(&nn->dp, "FW returned NULL handle\n");
+ err = -EINVAL;
+ goto err_fw_remove;
+ }
+
+ ntls = tls_driver_ctx(sk, direction);
+ memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle));
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ ntls->next_seq = start_offload_tcp_sn;
+ dev_consume_skb_any(skb);
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ return 0;
+
+ tls_offload_rx_resync_set_type(sk,
+ TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
+ return 0;
+
+err_fw_remove:
+ nfp_net_tls_del_fw(nn, reply->handle);
+err_free_skb:
+ dev_consume_skb_any(skb);
+err_conn_remove:
+ nfp_net_tls_conn_remove(nn, direction);
+ return err;
+}
+
+static void
+nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+
+ nfp_net_tls_conn_remove(nn, direction);
+
+ ntls = __tls_driver_ctx(tls_ctx, direction);
+ nfp_net_tls_del_fw(nn, ntls->fw_handle);
+}
+
+static int
+nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
+ u8 *rcd_sn, enum tls_offload_ctx_dir direction)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct nfp_crypto_req_update *req;
+ struct sk_buff *skb;
+ gfp_t flags;
+ int err;
+
+ flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
+ if (!skb)
+ return -ENOMEM;
+
+ ntls = tls_driver_ctx(sk, direction);
+ req = (void *)skb->data;
+ req->ep_id = 0;
+ req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
+ memset(req->resv, 0, sizeof(req->resv));
+ memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
+ req->tcp_seq = cpu_to_be32(seq);
+ memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ err = nfp_net_tls_communicate_simple(nn, skb, "sync",
+ NFP_CCM_TYPE_CRYPTO_UPDATE);
+ if (err)
+ return err;
+ ntls->next_seq = seq;
+ } else {
+ nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
+ sizeof(struct nfp_crypto_reply_simple));
+ }
+
+ return 0;
+}
+
+static const struct tlsdev_ops nfp_net_tls_ops = {
+ .tls_dev_add = nfp_net_tls_add,
+ .tls_dev_del = nfp_net_tls_del,
+ .tls_dev_resync = nfp_net_tls_resync,
+};
+
+static int nfp_net_tls_reset(struct nfp_net *nn)
+{
+ struct nfp_crypto_req_reset *req;
+ struct sk_buff *skb;
+
+ skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->ep_id = 0;
+
+ return nfp_net_tls_communicate_simple(nn, skb, "reset",
+ NFP_CCM_TYPE_CRYPTO_RESET);
+}
+
+int nfp_net_tls_init(struct nfp_net *nn)
+{
+ struct net_device *netdev = nn->dp.netdev;
+ int err;
+
+ if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK))
+ return 0;
+
+ if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) !=
+ NFP_NET_TLS_CCM_MBOX_OPS_MASK)
+ return 0;
+
+ if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) {
+ nn_warn(nn, "disabling TLS offload - mbox too small: %d\n",
+ nn->tlv_caps.mbox_len);
+ return 0;
+ }
+
+ err = nfp_net_tls_reset(nn);
+ if (err)
+ return err;
+
+ nn_ctrl_bar_lock(nn);
+ nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0);
+ err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
+ nn_ctrl_bar_unlock(nn);
+ if (err)
+ return err;
+
+ if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_RX) {
+ netdev->hw_features |= NETIF_F_HW_TLS_RX;
+ netdev->features |= NETIF_F_HW_TLS_RX;
+ }
+ if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) {
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ }
+
+ netdev->tlsdev_ops = &nfp_net_tls_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index c56e31d9f8a4..5a54fe848de4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -54,7 +54,8 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
static int
nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
- struct nfp_fl_payload *nfp_flow, int act_len)
+ struct nfp_fl_payload *nfp_flow, int act_len,
+ struct netlink_ext_ack *extack)
{
size_t act_size = sizeof(struct nfp_fl_pre_lag);
struct nfp_fl_pre_lag *pre_lag;
@@ -65,8 +66,10 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
if (!out_dev || !netif_is_lag_master(out_dev))
return 0;
- if (act_len + act_size > NFP_FL_MAX_A_SIZ)
+ if (act_len + act_size > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action");
return -EOPNOTSUPP;
+ }
/* Pre_lag action must be first on action list.
* If other actions already exist they need pushed forward.
@@ -76,7 +79,7 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
nfp_flow->action_data, act_len);
pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
- err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
+ err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag, extack);
if (err)
return err;
@@ -93,7 +96,8 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_flow,
bool last, struct net_device *in_dev,
- enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
+ enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
+ struct netlink_ext_ack *extack)
{
size_t act_size = sizeof(struct nfp_fl_output);
struct nfp_flower_priv *priv = app->priv;
@@ -104,18 +108,24 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
out_dev = act->dev;
- if (!out_dev)
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action");
return -EOPNOTSUPP;
+ }
tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
if (tun_type) {
/* Verify the egress netdev matches the tunnel type. */
- if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
+ if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type");
return -EOPNOTSUPP;
+ }
- if (*tun_out_cnt)
+ if (*tun_out_cnt) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter");
return -EOPNOTSUPP;
+ }
(*tun_out_cnt)++;
output->flags = cpu_to_be16(tmp_flags |
@@ -127,8 +137,10 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
output->flags = cpu_to_be16(tmp_flags);
gid = nfp_flower_lag_get_output_id(app, out_dev);
- if (gid < 0)
+ if (gid < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot find group id for LAG action");
return gid;
+ }
output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
} else {
/* Set action output parameters. */
@@ -136,29 +148,58 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
if (nfp_netdev_is_nfp_repr(in_dev)) {
/* Confirm ingress and egress are on same device. */
- if (!netdev_port_same_parent_id(in_dev, out_dev))
+ if (!netdev_port_same_parent_id(in_dev, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices");
return -EOPNOTSUPP;
+ }
}
- if (!nfp_netdev_is_nfp_repr(out_dev))
+ if (!nfp_netdev_is_nfp_repr(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port");
return -EOPNOTSUPP;
+ }
output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
- if (!output->port)
+ if (!output->port) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface");
return -EOPNOTSUPP;
+ }
}
nfp_flow->meta.shortcut = output->port;
return 0;
}
+static bool
+nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
+{
+ struct flow_action_entry *act = flow->rule->action.entries;
+ int num_act = flow->rule->action.num_entries;
+ int act_idx;
+
+ /* Preparse action list for next mirred or redirect action */
+ for (act_idx = start_idx + 1; act_idx < num_act; act_idx++)
+ if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
+ act[act_idx].id == FLOW_ACTION_MIRRED)
+ return netif_is_gretap(act[act_idx].dev);
+
+ return false;
+}
+
static enum nfp_flower_tun_type
-nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
- const struct flow_action_entry *act)
+nfp_fl_get_tun_from_act(struct nfp_app *app,
+ struct flow_cls_offload *flow,
+ const struct flow_action_entry *act, int act_idx)
{
const struct ip_tunnel_info *tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
+ /* Determine the tunnel type based on the egress netdev
+ * in the mirred action for tunnels without l4.
+ */
+ if (nfp_flower_tun_is_gre(flow, act_idx))
+ return NFP_FL_TUNNEL_GRE;
+
switch (tun->key.tp_dst) {
case htons(IANA_VXLAN_UDP_PORT):
return NFP_FL_TUNNEL_VXLAN;
@@ -194,7 +235,8 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
static int
nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
- const struct flow_action_entry *act)
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
{
struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
int opt_len, opt_cnt, act_start, tot_push_len;
@@ -212,20 +254,26 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
struct geneve_opt *opt = (struct geneve_opt *)src;
opt_cnt++;
- if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
+ if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded");
return -EOPNOTSUPP;
+ }
tot_push_len += sizeof(struct nfp_fl_push_geneve) +
opt->length * 4;
- if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
+ if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
return -EOPNOTSUPP;
+ }
opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
src += sizeof(struct geneve_opt) + opt->length * 4;
}
- if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
+ if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
return -EOPNOTSUPP;
+ }
act_start = *list_len;
*list_len += tot_push_len;
@@ -256,14 +304,13 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
}
static int
-nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
- struct nfp_fl_set_ipv4_udp_tun *set_tun,
- const struct flow_action_entry *act,
- struct nfp_fl_pre_tunnel *pre_tun,
- enum nfp_flower_tun_type tun_type,
- struct net_device *netdev)
+nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
+ const struct flow_action_entry *act,
+ struct nfp_fl_pre_tunnel *pre_tun,
+ enum nfp_flower_tun_type tun_type,
+ struct net_device *netdev, struct netlink_ext_ack *extack)
{
- size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
+ size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun);
const struct ip_tunnel_info *ip_tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
@@ -275,8 +322,10 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
if (ip_tun->options_len &&
(tun_type != NFP_FL_TUNNEL_GENEVE ||
- !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
+ !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload");
return -EOPNOTSUPP;
+ }
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
@@ -316,8 +365,10 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
set_tun->tos = ip_tun->key.tos;
if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
- ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP;
+ }
set_tun->tun_flags = ip_tun->key.tun_flags;
if (tun_type == NFP_FL_TUNNEL_GENEVE) {
@@ -345,18 +396,22 @@ static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
static int
nfp_fl_set_eth(const struct flow_action_entry *act, u32 off,
- struct nfp_fl_set_eth *set_eth)
+ struct nfp_fl_set_eth *set_eth, struct netlink_ext_ack *extack)
{
u32 exact, mask;
- if (off + 4 > ETH_ALEN * 2)
+ if (off + 4 > ETH_ALEN * 2) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
return -EOPNOTSUPP;
+ }
mask = ~act->mangle.mask;
exact = act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
return -EOPNOTSUPP;
+ }
nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
&set_eth->eth_addr_mask[off]);
@@ -377,7 +432,8 @@ struct ipv4_ttl_word {
static int
nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
struct nfp_fl_set_ip4_addrs *set_ip_addr,
- struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
+ struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos,
+ struct netlink_ext_ack *extack)
{
struct ipv4_ttl_word *ttl_word_mask;
struct ipv4_ttl_word *ttl_word;
@@ -389,8 +445,10 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
mask = (__force __be32)~act->mangle.mask;
exact = (__force __be32)act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 action");
return -EOPNOTSUPP;
+ }
switch (off) {
case offsetof(struct iphdr, daddr):
@@ -413,8 +471,10 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
ttl_word_mask = (struct ipv4_ttl_word *)&mask;
ttl_word = (struct ipv4_ttl_word *)&exact;
- if (ttl_word_mask->protocol || ttl_word_mask->check)
+ if (ttl_word_mask->protocol || ttl_word_mask->check) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 ttl action");
return -EOPNOTSUPP;
+ }
set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
@@ -429,8 +489,10 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
tos_word = (struct iphdr *)&exact;
if (tos_word_mask->version || tos_word_mask->ihl ||
- tos_word_mask->tot_len)
+ tos_word_mask->tot_len) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 tos action");
return -EOPNOTSUPP;
+ }
set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
@@ -441,6 +503,7 @@ nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
NFP_FL_LW_SIZ;
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv4 header");
return -EOPNOTSUPP;
}
@@ -468,7 +531,8 @@ struct ipv6_hop_limit_word {
static int
nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
- struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
+ struct netlink_ext_ack *extack)
{
struct ipv6_hop_limit_word *fl_hl_mask;
struct ipv6_hop_limit_word *fl_hl;
@@ -478,8 +542,10 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
fl_hl = (struct ipv6_hop_limit_word *)&exact;
- if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len)
+ if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 hop limit action");
return -EOPNOTSUPP;
+ }
ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
@@ -488,8 +554,10 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
break;
case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
if (mask & ~IPV6_FLOW_LABEL_MASK ||
- exact & ~IPV6_FLOW_LABEL_MASK)
+ exact & ~IPV6_FLOW_LABEL_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action");
return -EOPNOTSUPP;
+ }
ip_hl_fl->ipv6_label_mask |= mask;
ip_hl_fl->ipv6_label &= ~mask;
@@ -507,7 +575,8 @@ static int
nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
struct nfp_fl_set_ipv6_addr *ip_dst,
struct nfp_fl_set_ipv6_addr *ip_src,
- struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
+ struct netlink_ext_ack *extack)
{
__be32 exact, mask;
int err = 0;
@@ -517,12 +586,14 @@ nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
mask = (__force __be32)~act->mangle.mask;
exact = (__force __be32)act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 action");
return -EOPNOTSUPP;
+ }
if (off < offsetof(struct ipv6hdr, saddr)) {
err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
- ip_hl_fl);
+ ip_hl_fl, extack);
} else if (off < offsetof(struct ipv6hdr, daddr)) {
word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
@@ -533,6 +604,7 @@ nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
exact, mask, ip_dst);
} else {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv6 header");
return -EOPNOTSUPP;
}
@@ -541,18 +613,23 @@ nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
static int
nfp_fl_set_tport(const struct flow_action_entry *act, u32 off,
- struct nfp_fl_set_tport *set_tport, int opcode)
+ struct nfp_fl_set_tport *set_tport, int opcode,
+ struct netlink_ext_ack *extack)
{
u32 exact, mask;
- if (off)
+ if (off) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of L4 header");
return -EOPNOTSUPP;
+ }
mask = ~act->mangle.mask;
exact = act->mangle.val;
- if (exact & ~mask)
+ if (exact & ~mask) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit L4 action");
return -EOPNOTSUPP;
+ }
nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
set_tport->tp_port_mask);
@@ -592,11 +669,11 @@ struct nfp_flower_pedit_acts {
};
static int
-nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
+nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
int *a_len, struct nfp_flower_pedit_acts *set_act,
u32 *csum_updated)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
size_t act_size = 0;
u8 ip_proto = 0;
@@ -694,8 +771,9 @@ nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
static int
nfp_fl_pedit(const struct flow_action_entry *act,
- struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len,
- u32 *csum_updated, struct nfp_flower_pedit_acts *set_act)
+ struct flow_cls_offload *flow, char *nfp_action, int *a_len,
+ u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
+ struct netlink_ext_ack *extack)
{
enum flow_action_mangle_base htype;
u32 offset;
@@ -705,21 +783,22 @@ nfp_fl_pedit(const struct flow_action_entry *act,
switch (htype) {
case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- return nfp_fl_set_eth(act, offset, &set_act->set_eth);
+ return nfp_fl_set_eth(act, offset, &set_act->set_eth, extack);
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
- &set_act->set_ip_ttl_tos);
+ &set_act->set_ip_ttl_tos, extack);
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
&set_act->set_ip6_src,
- &set_act->set_ip6_tc_hl_fl);
+ &set_act->set_ip6_tc_hl_fl, extack);
case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
return nfp_fl_set_tport(act, offset, &set_act->set_tport,
- NFP_FL_ACTION_OPCODE_SET_TCP);
+ NFP_FL_ACTION_OPCODE_SET_TCP, extack);
case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
return nfp_fl_set_tport(act, offset, &set_act->set_tport,
- NFP_FL_ACTION_OPCODE_SET_UDP);
+ NFP_FL_ACTION_OPCODE_SET_UDP, extack);
default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported header");
return -EOPNOTSUPP;
}
}
@@ -730,7 +809,8 @@ nfp_flower_output_action(struct nfp_app *app,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt, u32 *csum_updated)
+ int *out_cnt, u32 *csum_updated,
+ struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_output *output;
@@ -739,15 +819,19 @@ nfp_flower_output_action(struct nfp_app *app,
/* If csum_updated has not been reset by now, it means HW will
* incorrectly update csums when they are not requested.
*/
- if (*csum_updated)
+ if (*csum_updated) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported");
return -EOPNOTSUPP;
+ }
- if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
+ if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum");
return -EOPNOTSUPP;
+ }
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
- tun_out_cnt);
+ tun_out_cnt, extack);
if (err)
return err;
@@ -757,11 +841,13 @@ nfp_flower_output_action(struct nfp_app *app,
/* nfp_fl_pre_lag returns -err or size of prelag action added.
* This will be 0 if it is not egressing to a lag dev.
*/
- prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len);
- if (prelag_size < 0)
+ prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len, extack);
+ if (prelag_size < 0) {
return prelag_size;
- else if (prelag_size > 0 && (!last || *out_cnt))
+ } else if (prelag_size > 0 && (!last || *out_cnt)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list");
return -EOPNOTSUPP;
+ }
*a_len += prelag_size;
}
@@ -772,14 +858,15 @@ nfp_flower_output_action(struct nfp_app *app,
static int
nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt, u32 *csum_updated,
- struct nfp_flower_pedit_acts *set_act)
+ struct nfp_flower_pedit_acts *set_act,
+ struct netlink_ext_ack *extack, int act_idx)
{
- struct nfp_fl_set_ipv4_udp_tun *set_tun;
+ struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
@@ -792,20 +879,23 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
case FLOW_ACTION_REDIRECT:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
- out_cnt, csum_updated);
+ out_cnt, csum_updated, extack);
if (err)
return err;
break;
case FLOW_ACTION_MIRRED:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
- out_cnt, csum_updated);
+ out_cnt, csum_updated, extack);
if (err)
return err;
break;
case FLOW_ACTION_VLAN_POP:
- if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
+ if (*a_len +
+ sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop vlan");
return -EOPNOTSUPP;
+ }
pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
@@ -814,8 +904,11 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
*a_len += sizeof(struct nfp_fl_pop_vlan);
break;
case FLOW_ACTION_VLAN_PUSH:
- if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
+ if (*a_len +
+ sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push vlan");
return -EOPNOTSUPP;
+ }
psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
@@ -826,35 +919,41 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
case FLOW_ACTION_TUNNEL_ENCAP: {
const struct ip_tunnel_info *ip_tun = act->tunnel;
- *tun_type = nfp_fl_get_tun_from_act_l4_port(app, act);
- if (*tun_type == NFP_FL_TUNNEL_NONE)
+ *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
+ if (*tun_type == NFP_FL_TUNNEL_NONE) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
return -EOPNOTSUPP;
+ }
- if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
+ if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list");
return -EOPNOTSUPP;
+ }
/* Pre-tunnel action is required for tunnel encap.
* This checks for next hop entries on NFP.
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
- sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
+ sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
return -EOPNOTSUPP;
+ }
pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
- err = nfp_fl_push_geneve_options(nfp_fl, a_len, act);
+ err = nfp_fl_push_geneve_options(nfp_fl, a_len, act, extack);
if (err)
return err;
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_udp_tun(app, set_tun, act, pre_tun,
- *tun_type, netdev);
+ err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun,
+ *tun_type, netdev, extack);
if (err)
return err;
- *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
+ *a_len += sizeof(struct nfp_fl_set_ipv4_tun);
}
break;
case FLOW_ACTION_TUNNEL_DECAP:
@@ -862,13 +961,15 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return 0;
case FLOW_ACTION_MANGLE:
if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
- a_len, csum_updated, set_act))
+ a_len, csum_updated, set_act, extack))
return -EOPNOTSUPP;
break;
case FLOW_ACTION_CSUM:
/* csum action requests recalc of something we have not fixed */
- if (act->csum_flags & ~*csum_updated)
+ if (act->csum_flags & ~*csum_updated) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list");
return -EOPNOTSUPP;
+ }
/* If we will correctly fix the csum we can remove it from the
* csum update list. Which will later be used to check support.
*/
@@ -876,6 +977,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
break;
default:
/* Currently we do not handle any other actions. */
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
return -EOPNOTSUPP;
}
@@ -919,9 +1021,10 @@ static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
}
int nfp_flower_compile_action(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow)
+ struct nfp_fl_payload *nfp_flow,
+ struct netlink_ext_ack *extack)
{
int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
struct nfp_flower_pedit_acts set_act;
@@ -942,7 +1045,8 @@ int nfp_flower_compile_action(struct nfp_app *app,
memset(&set_act, 0, sizeof(set_act));
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
- &out_cnt, &csum_updated, &set_act);
+ &out_cnt, &csum_updated,
+ &set_act, extack, i);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 537f7fc19584..0f1706ae5bfc 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -8,6 +8,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/geneve.h>
+#include <net/gre.h>
#include <net/vxlan.h>
#include "../nfp_app.h"
@@ -22,6 +23,7 @@
#define NFP_FLOWER_LAYER_CT BIT(6)
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
+#define NFP_FLOWER_LAYER2_GRE BIT(0)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
@@ -37,6 +39,9 @@
#define NFP_FL_IP_FRAG_FIRST BIT(7)
#define NFP_FL_IP_FRAGMENTED BIT(6)
+/* GRE Tunnel flags */
+#define NFP_FL_GRE_FLAG_KEY BIT(2)
+
/* Compressed HW representation of TCP Flags */
#define NFP_FL_TCP_FLAG_URG BIT(4)
#define NFP_FL_TCP_FLAG_PSH BIT(3)
@@ -107,6 +112,7 @@
enum nfp_flower_tun_type {
NFP_FL_TUNNEL_NONE = 0,
+ NFP_FL_TUNNEL_GRE = 1,
NFP_FL_TUNNEL_VXLAN = 2,
NFP_FL_TUNNEL_GENEVE = 4,
};
@@ -203,7 +209,7 @@ struct nfp_fl_pre_tunnel {
__be32 extra[3];
};
-struct nfp_fl_set_ipv4_udp_tun {
+struct nfp_fl_set_ipv4_tun {
struct nfp_fl_act_head head;
__be16 reserved;
__be64 tun_id __packed;
@@ -354,6 +360,16 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
+struct nfp_flower_tun_ipv4 {
+ __be32 src;
+ __be32 dst;
+};
+
+struct nfp_flower_tun_ip_ext {
+ u8 tos;
+ u8 ttl;
+};
+
/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
* -----------------------------------------------------------------
* 3 2 1
@@ -371,15 +387,42 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_flower_ipv4_udp_tun {
- __be32 ip_src;
- __be32 ip_dst;
+ struct nfp_flower_tun_ipv4 ipv4;
__be16 reserved1;
- u8 tos;
- u8 ttl;
+ struct nfp_flower_tun_ip_ext ip_ext;
__be32 reserved2;
__be32 tun_id;
};
+/* Flow Frame GRE TUNNEL --> Tunnel details (6W/24B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_src |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | tun_flags | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Ethertype |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Key |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+struct nfp_flower_ipv4_gre_tun {
+ struct nfp_flower_tun_ipv4 ipv4;
+ __be16 tun_flags;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ __be16 reserved1;
+ __be16 ethertype;
+ __be32 tun_key;
+ __be32 reserved2;
+};
+
struct nfp_flower_geneve_options {
u8 data[NFP_FL_MAX_GENEVE_OPT_KEY];
};
@@ -530,6 +573,8 @@ nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
{
if (netif_is_vxlan(netdev))
return tun_type == NFP_FL_TUNNEL_VXLAN;
+ if (netif_is_gretap(netdev))
+ return tun_type == NFP_FL_TUNNEL_GRE;
if (netif_is_geneve(netdev))
return tun_type == NFP_FL_TUNNEL_GENEVE;
@@ -546,6 +591,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
return true;
if (netif_is_geneve(netdev))
return true;
+ if (netif_is_gretap(netdev))
+ return true;
return false;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 5db838f45694..63907aeb3884 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -156,7 +156,8 @@ nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
- struct nfp_fl_pre_lag *pre_act)
+ struct nfp_fl_pre_lag *pre_act,
+ struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_lag_group *group = NULL;
@@ -167,6 +168,7 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
master);
if (!group) {
mutex_unlock(&priv->nfp_lag.lock);
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
return -ENOENT;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 40957a8dbfe6..af9441d5787f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -343,19 +343,22 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2);
int nfp_flower_compile_flow_match(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
- enum nfp_flower_tun_type tun_type);
+ enum nfp_flower_tun_type tun_type,
+ struct netlink_ext_ack *extack);
int nfp_flower_compile_action(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow);
+ struct nfp_fl_payload *nfp_flow,
+ struct netlink_ext_ack *extack);
int nfp_compile_flow_metadata(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_payload *nfp_flow,
- struct net_device *netdev);
+ struct net_device *netdev,
+ struct netlink_ext_ack *extack);
void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
struct nfp_fl_payload *nfp_flow);
int nfp_modify_flow_metadata(struct nfp_app *app,
@@ -389,7 +392,8 @@ int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
- struct nfp_fl_pre_lag *pre_act);
+ struct nfp_fl_pre_lag *pre_act,
+ struct netlink_ext_ack *extack);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
void nfp_flower_qos_init(struct nfp_app *app);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index bfa4bf34911d..9cc3ba17ff69 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -10,9 +10,9 @@
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
- struct tc_cls_flower_offload *flow, u8 key_type)
+ struct flow_cls_offload *flow, u8 key_type)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
u16 tmp_tci;
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
@@ -54,7 +54,8 @@ nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
static int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
- bool mask_version, enum nfp_flower_tun_type tun_type)
+ bool mask_version, enum nfp_flower_tun_type tun_type,
+ struct netlink_ext_ack *extack)
{
if (mask_version) {
frame->in_port = cpu_to_be32(~0);
@@ -64,8 +65,10 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
if (tun_type) {
frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
} else {
- if (!cmsg_port)
+ if (!cmsg_port) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload");
return -EOPNOTSUPP;
+ }
frame->in_port = cpu_to_be32(cmsg_port);
}
@@ -75,9 +78,9 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
static void
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
struct nfp_flower_mac_mpls *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
@@ -127,9 +130,9 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
static void
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
struct nfp_flower_tp_ports *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
@@ -148,9 +151,9 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
static void
nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
struct nfp_flower_ip_ext *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -222,9 +225,9 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
struct nfp_flower_ipv4 *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_match_ipv4_addrs match;
memset(ext, 0, sizeof(struct nfp_flower_ipv4));
@@ -244,9 +247,9 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
static void
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
struct nfp_flower_ipv6 *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_ipv6));
memset(msk, 0, sizeof(struct nfp_flower_ipv6));
@@ -266,7 +269,7 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
static int
nfp_flower_compile_geneve_opt(void *ext, void *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
struct flow_match_enc_opts match;
@@ -278,11 +281,76 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk,
}
static void
+nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
+ struct nfp_flower_tun_ipv4 *msk,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
+ ext->src = match.key->src;
+ ext->dst = match.key->dst;
+ msk->src = match.mask->src;
+ msk->dst = match.mask->dst;
+ }
+}
+
+static void
+nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
+ struct nfp_flower_tun_ip_ext *msk,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_enc_ip(rule, &match);
+ ext->tos = match.key->tos;
+ ext->ttl = match.key->ttl;
+ msk->tos = match.mask->tos;
+ msk->ttl = match.mask->ttl;
+ }
+}
+
+static void
+nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
+ struct nfp_flower_ipv4_gre_tun *msk,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
+
+ /* NVGRE is the only supported GRE tunnel type */
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
+ msk->ethertype = cpu_to_be16(~0);
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+
+ flow_rule_match_enc_keyid(rule, &match);
+ ext->tun_key = match.key->keyid;
+ msk->tun_key = match.mask->keyid;
+
+ ext->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ msk->tun_flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
+ }
+
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
+}
+
+static void
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
struct nfp_flower_ipv4_udp_tun *msk,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
@@ -298,33 +366,17 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
msk->tun_id = cpu_to_be32(temp_vni);
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
- struct flow_match_ipv4_addrs match;
-
- flow_rule_match_enc_ipv4_addrs(rule, &match);
- ext->ip_src = match.key->src;
- ext->ip_dst = match.key->dst;
- msk->ip_src = match.mask->src;
- msk->ip_dst = match.mask->dst;
- }
-
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
- struct flow_match_ip match;
-
- flow_rule_match_enc_ip(rule, &match);
- ext->tos = match.key->tos;
- ext->ttl = match.key->ttl;
- msk->tos = match.mask->tos;
- msk->ttl = match.mask->ttl;
- }
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, flow);
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
}
int nfp_flower_compile_flow_match(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
- enum nfp_flower_tun_type tun_type)
+ enum nfp_flower_tun_type tun_type,
+ struct netlink_ext_ack *extack)
{
u32 port_id;
int err;
@@ -357,13 +409,13 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- port_id, false, tun_type);
+ port_id, false, tun_type, extack);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- port_id, true, tun_type);
+ port_id, true, tun_type, extack);
if (err)
return err;
@@ -402,12 +454,27 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_ipv6);
}
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
+ __be32 tun_dst;
+
+ nfp_flower_compile_ipv4_gre_tun((void *)ext, (void *)msk, flow);
+ tun_dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
+ ext += sizeof(struct nfp_flower_ipv4_gre_tun);
+ msk += sizeof(struct nfp_flower_ipv4_gre_tun);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+ nfp_tunnel_add_ipv4_off(app, tun_dst);
+ }
+
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
__be32 tun_dst;
nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
- tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
+ tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
ext += sizeof(struct nfp_flower_ipv4_udp_tun);
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 3d326efdc814..7c4a15e967df 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -290,9 +290,10 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
}
int nfp_compile_flow_metadata(struct nfp_app *app,
- struct tc_cls_flower_offload *flow,
+ struct flow_cls_offload *flow,
struct nfp_fl_payload *nfp_flow,
- struct net_device *netdev)
+ struct net_device *netdev,
+ struct netlink_ext_ack *extack)
{
struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv;
@@ -302,8 +303,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
int err;
err = nfp_get_stats_entry(app, &stats_cxt);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate new stats context");
return err;
+ }
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
@@ -328,6 +331,12 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
+ if (nfp_release_stats_entry(app, stats_cxt)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
+ err = -EINVAL;
+ goto err_remove_rhash;
+ }
err = -ENOENT;
goto err_remove_rhash;
}
@@ -343,6 +352,21 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
+ if (nfp_release_stats_entry(app, stats_cxt)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
+ err = -EINVAL;
+ goto err_remove_mask;
+ }
+
+ if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
+ nfp_flow->meta.mask_len,
+ NULL, &new_mask_id)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
+ err = -EINVAL;
+ goto err_remove_mask;
+ }
+
err = -EEXIST;
goto err_remove_mask;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 1fbfeb43c538..7e725fa60347 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -52,8 +52,7 @@
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
#define NFP_FLOWER_MERGE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
@@ -122,9 +121,9 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
return 0;
}
-static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
+static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
@@ -132,14 +131,25 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
}
+static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+
+ return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
+}
+
static int
-nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
- u32 *key_layer_two, int *key_size)
+nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
+ u32 *key_layer_two, int *key_size,
+ struct netlink_ext_ack *extack)
{
- if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY)
+ if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
return -EOPNOTSUPP;
+ }
- if (enc_opts->key->len > 0) {
+ if (enc_opts->len > 0) {
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
*key_size += sizeof(struct nfp_flower_geneve_options);
}
@@ -148,13 +158,65 @@ nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
}
static int
+nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
+ struct flow_dissector_key_enc_opts *enc_op,
+ u32 *key_layer_two, u8 *key_layer, int *key_size,
+ struct nfp_flower_priv *priv,
+ enum nfp_flower_tun_type *tun_type,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ switch (enc_ports->dst) {
+ case htons(IANA_VXLAN_UDP_PORT):
+ *tun_type = NFP_FL_TUNNEL_VXLAN;
+ *key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (enc_op) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case htons(GENEVE_UDP_PORT):
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
+ return -EOPNOTSUPP;
+ }
+ *tun_type = NFP_FL_TUNNEL_GENEVE;
+ *key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ *key_size += sizeof(struct nfp_flower_ext_meta);
+ *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
+ *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ if (!enc_op)
+ break;
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
+ return -EOPNOTSUPP;
+ }
+ err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
+ key_size, extack);
+ if (err)
+ return err;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
struct net_device *netdev,
struct nfp_fl_key_ls *ret_key_ls,
- struct tc_cls_flower_offload *flow,
- enum nfp_flower_tun_type *tun_type)
+ struct flow_cls_offload *flow,
+ enum nfp_flower_tun_type *tun_type,
+ struct netlink_ext_ack *extack)
{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
struct flow_match_basic basic = { NULL, NULL};
struct nfp_flower_priv *priv = app->priv;
@@ -163,14 +225,18 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
int key_size;
int err;
- if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
+ if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
return -EOPNOTSUPP;
+ }
/* If any tun dissector is used then the required set must be used. */
if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
(dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
- != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
return -EOPNOTSUPP;
+ }
key_layer_two = 0;
key_layer = NFP_FLOWER_LAYER_PORT;
@@ -188,8 +254,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_vlan(rule, &vlan);
if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
- vlan.key->vlan_priority)
+ vlan.key->vlan_priority) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
return -EOPNOTSUPP;
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -200,56 +268,68 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_control(rule, &enc_ctl);
- if (enc_ctl.mask->addr_type != 0xffff ||
- enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ if (enc_ctl.mask->addr_type != 0xffff) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
+ return -EOPNOTSUPP;
+ }
+ if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
return -EOPNOTSUPP;
+ }
/* These fields are already verified as used. */
flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
- if (ipv4_addrs.mask->dst != cpu_to_be32(~0))
- return -EOPNOTSUPP;
-
- flow_rule_match_enc_ports(rule, &enc_ports);
- if (enc_ports.mask->dst != cpu_to_be16(~0))
+ if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
return -EOPNOTSUPP;
+ }
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
flow_rule_match_enc_opts(rule, &enc_op);
- switch (enc_ports.key->dst) {
- case htons(IANA_VXLAN_UDP_PORT):
- *tun_type = NFP_FL_TUNNEL_VXLAN;
- key_layer |= NFP_FLOWER_LAYER_VXLAN;
- key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
- if (enc_op.key)
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ /* check if GRE, which has no enc_ports */
+ if (netif_is_gretap(netdev)) {
+ *tun_type = NFP_FL_TUNNEL_GRE;
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_layer_two |= NFP_FLOWER_LAYER2_GRE;
+ key_size +=
+ sizeof(struct nfp_flower_ipv4_gre_tun);
+
+ if (enc_op.key) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
return -EOPNOTSUPP;
- break;
- case htons(GENEVE_UDP_PORT):
- if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
+ }
+ } else {
+ flow_rule_match_enc_ports(rule, &enc_ports);
+ if (enc_ports.mask->dst != cpu_to_be16(~0)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
return -EOPNOTSUPP;
- *tun_type = NFP_FL_TUNNEL_GENEVE;
- key_layer |= NFP_FLOWER_LAYER_EXT_META;
- key_size += sizeof(struct nfp_flower_ext_meta);
- key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
- key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ }
- if (!enc_op.key)
- break;
- if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
- return -EOPNOTSUPP;
- err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
- &key_size);
+ err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
+ enc_op.key,
+ &key_layer_two,
+ &key_layer,
+ &key_size, priv,
+ tun_type, extack);
if (err)
return err;
- break;
- default:
- return -EOPNOTSUPP;
- }
- /* Ensure the ingress netdev matches the expected tun type. */
- if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
- return -EOPNOTSUPP;
+ /* Ensure the ingress netdev matches the expected
+ * tun type.
+ */
+ if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
+ return -EOPNOTSUPP;
+ }
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
@@ -272,6 +352,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
* because we rely on it to get to the host.
*/
case cpu_to_be16(ETH_P_ARP):
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
return -EOPNOTSUPP;
case cpu_to_be16(ETH_P_MPLS_UC):
@@ -290,14 +371,15 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
/* Other ethtype - we need check the masks for the
* remainder of the key to ensure we can offload.
*/
- if (nfp_flower_check_higher_than_mac(flow))
+ if (nfp_flower_check_higher_than_mac(flow)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: non IPv4/IPv6 offload with L3/L4 matches not supported");
return -EOPNOTSUPP;
+ }
break;
}
}
if (basic.mask && basic.mask->ip_proto) {
- /* Ethernet type is present in the key. */
switch (basic.key->ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
@@ -311,7 +393,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
/* Other ip proto - we need check the masks for the
* remainder of the key to ensure we can offload.
*/
- return -EOPNOTSUPP;
+ if (nfp_flower_check_higher_than_l3(flow)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unknown IP protocol with L4 matches not supported");
+ return -EOPNOTSUPP;
+ }
+ break;
}
}
@@ -322,22 +408,28 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_tcp(rule, &tcp);
tcp_flags = be16_to_cpu(tcp.key->flags);
- if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
+ if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
return -EOPNOTSUPP;
+ }
/* We only support PSH and URG flags when either
* FIN, SYN or RST is present as well.
*/
if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
- !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
+ !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
return -EOPNOTSUPP;
+ }
/* We need to store TCP flags in the either the IPv4 or IPv6 key
* space, thus we need to ensure we include a IPv4/IPv6 key
* layer if we have not done so already.
*/
- if (!basic.key)
+ if (!basic.key) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
return -EOPNOTSUPP;
+ }
if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
!(key_layer & NFP_FLOWER_LAYER_IPV6)) {
@@ -353,6 +445,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
return -EOPNOTSUPP;
}
}
@@ -362,8 +455,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
struct flow_match_control ctl;
flow_rule_match_control(rule, &ctl);
- if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
+ if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
return -EOPNOTSUPP;
+ }
}
ret_key_ls->key_layer = key_layer;
@@ -771,14 +866,16 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2)
{
- struct tc_cls_flower_offload merge_tc_off;
+ struct flow_cls_offload merge_tc_off;
struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *merge_flow;
struct nfp_fl_key_ls merge_key_ls;
int err;
ASSERT_RTNL();
+ extack = merge_tc_off.common.extack;
if (sub_flow1 == sub_flow2 ||
nfp_flower_is_merge_flow(sub_flow1) ||
nfp_flower_is_merge_flow(sub_flow2))
@@ -816,7 +913,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
merge_tc_off.cookie = merge_flow->tc_flower_cookie;
err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
- merge_flow->ingress_dev);
+ merge_flow->ingress_dev, extack);
if (err)
goto err_unlink_sub_flow2;
@@ -865,15 +962,17 @@ err_destroy_merge_flow:
*/
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
struct nfp_port *port = NULL;
int err;
+ extack = flow->common.extack;
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
@@ -882,7 +981,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
return -ENOMEM;
err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
- &tun_type);
+ &tun_type, extack);
if (err)
goto err_free_key_ls;
@@ -893,23 +992,25 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
}
err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
- flow_pay, tun_type);
+ flow_pay, tun_type, extack);
if (err)
goto err_destroy_flow;
- err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
+ err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
if (err)
goto err_destroy_flow;
- err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
+ err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
if (err)
goto err_destroy_flow;
flow_pay->tc_flower_cookie = flow->cookie;
err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
nfp_flower_table_params);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
goto err_release_metadata;
+ }
err = nfp_flower_xmit_flow(app, flow_pay,
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
@@ -1024,19 +1125,23 @@ nfp_flower_del_linked_merge_flows(struct nfp_app *app,
*/
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
struct nfp_port *port = NULL;
int err;
+ extack = flow->common.extack;
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
- if (!nfp_flow)
+ if (!nfp_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
return -ENOENT;
+ }
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
@@ -1127,15 +1232,19 @@ nfp_flower_update_merge_stats(struct nfp_app *app,
*/
static int
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
u32 ctx_id;
+ extack = flow->common.extack;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
- if (!nfp_flow)
+ if (!nfp_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
return -EINVAL;
+ }
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
@@ -1156,17 +1265,17 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flower)
+ struct flow_cls_offload *flower)
{
if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
switch (flower->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return nfp_flower_add_offload(app, netdev, flower);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return nfp_flower_del_offload(app, netdev, flower);
- case TC_CLSFLOWER_STATS:
+ case FLOW_CLS_STATS:
return nfp_flower_get_stats(app, netdev, flower);
default:
return -EOPNOTSUPP;
@@ -1193,27 +1302,45 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
}
}
+static LIST_HEAD(nfp_block_cb_list);
+
static int nfp_flower_setup_tc_block(struct net_device *netdev,
- struct tc_block_offload *f)
+ struct flow_block_offload *f)
{
struct nfp_repr *repr = netdev_priv(netdev);
struct nfp_flower_repr_priv *repr_priv;
+ struct flow_block_cb *block_cb;
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
repr_priv = repr->app_priv;
- repr_priv->block_shared = tcf_block_shared(f->block);
+ repr_priv->block_shared = f->block_shared;
+ f->driver_block_list = &nfp_block_cb_list;
switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- nfp_flower_setup_tc_block_cb,
- repr, repr, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block,
- nfp_flower_setup_tc_block_cb,
- repr);
+ case FLOW_BLOCK_BIND:
+ if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
+ &nfp_block_cb_list))
+ return -EBUSY;
+
+ block_cb = flow_block_cb_alloc(f->net,
+ nfp_flower_setup_tc_block_cb,
+ repr, repr, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb,
+ repr);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
@@ -1258,7 +1385,7 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
- struct tc_cls_flower_offload *flower = type_data;
+ struct flow_cls_offload *flower = type_data;
if (flower->common.chain_index)
return -EOPNOTSUPP;
@@ -1272,21 +1399,29 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
}
}
+static void nfp_flower_setup_indr_tc_release(void *cb_priv)
+{
+ struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
+
+ list_del(&priv->list);
+ kfree(priv);
+}
+
static int
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
- struct tc_block_offload *f)
+ struct flow_block_offload *f)
{
struct nfp_flower_indr_block_cb_priv *cb_priv;
struct nfp_flower_priv *priv = app->priv;
- int err;
+ struct flow_block_cb *block_cb;
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
- !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP;
switch (f->command) {
- case TC_BLOCK_BIND:
+ case FLOW_BLOCK_BIND:
cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
if (!cb_priv)
return -ENOMEM;
@@ -1295,26 +1430,32 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
cb_priv->app = app;
list_add(&cb_priv->list, &priv->indr_block_cb_priv);
- err = tcf_block_cb_register(f->block,
- nfp_flower_setup_indr_block_cb,
- cb_priv, cb_priv, f->extack);
- if (err) {
+ block_cb = flow_block_cb_alloc(f->net,
+ nfp_flower_setup_indr_block_cb,
+ cb_priv, cb_priv,
+ nfp_flower_setup_indr_tc_release);
+ if (IS_ERR(block_cb)) {
list_del(&cb_priv->list);
kfree(cb_priv);
+ return PTR_ERR(block_cb);
}
- return err;
- case TC_BLOCK_UNBIND:
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
if (!cb_priv)
return -ENOENT;
- tcf_block_cb_unregister(f->block,
- nfp_flower_setup_indr_block_cb,
- cb_priv);
- list_del(&cb_priv->list);
- kfree(cb_priv);
+ block_cb = flow_block_cb_lookup(f,
+ nfp_flower_setup_indr_block_cb,
+ cb_priv);
+ if (!block_cb)
+ return -ENOENT;
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 8c67505865a4..a7a80f4b722a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -162,8 +162,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
}
pay_len = nfp_flower_cmsg_get_data_len(skb);
- if (pay_len != sizeof(struct nfp_tun_active_tuns) +
- sizeof(struct route_ip_info) * count) {
+ if (pay_len != struct_size(payload, tun_info, count)) {
nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
return;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 948d1a4b4643..60e57f08de80 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -596,6 +596,10 @@ static int nfp_pci_probe(struct pci_dev *pdev,
struct nfp_pf *pf;
int err;
+ if (pdev->vendor == PCI_VENDOR_ID_NETRONOME &&
+ pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000_VF)
+ dev_warn(&pdev->dev, "Binding NFP VF device to the NFP PF driver, the VF driver is called 'nfp_netvf'\n");
+
err = pci_enable_device(pdev);
if (err < 0)
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index df9aff2684ed..5d6c3738b494 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -12,11 +12,14 @@
#ifndef _NFP_NET_H_
#define _NFP_NET_H_
+#include <linux/atomic.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
#include <net/xdp.h>
#include "nfp_net_ctrl.h"
@@ -238,7 +241,7 @@ struct nfp_net_tx_ring {
#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
-#define PCIE_DESC_RX_BPF cpu_to_le16(BIT(8))
+#define PCIE_DESC_RX_DECRYPTED cpu_to_le16(BIT(8))
#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
@@ -365,6 +368,7 @@ struct nfp_net_rx_ring {
* @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK
* @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported
* @hw_csum_rx_error: Counter of packets with bad checksums
+ * @hw_tls_rx: Number of packets with TLS decrypted by hardware
* @tx_sync: Seqlock for atomic updates of TX stats
* @tx_pkts: Number of Transmitted packets
* @tx_bytes: Number of Transmitted bytes
@@ -372,6 +376,11 @@ struct nfp_net_rx_ring {
* @hw_csum_tx_inner: Counter of inner TX checksum offload requests
* @tx_gather: Counter of packets with Gather DMA
* @tx_lso: Counter of LSO packets sent
+ * @hw_tls_tx: Counter of TLS packets sent with crypto offloaded to HW
+ * @tls_tx_fallback: Counter of TLS packets sent which had to be encrypted
+ * by the fallback path because packets came out of order
+ * @tls_tx_no_fallback: Counter of TLS packets not sent because the fallback
+ * path could not encrypt them
* @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)?
* @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
@@ -392,7 +401,7 @@ struct nfp_net_r_vector {
struct {
struct tasklet_struct tasklet;
struct sk_buff_head queue;
- struct spinlock lock;
+ spinlock_t lock;
};
};
@@ -408,22 +417,30 @@ struct nfp_net_r_vector {
u64 hw_csum_rx_ok;
u64 hw_csum_rx_inner_ok;
u64 hw_csum_rx_complete;
+ u64 hw_tls_rx;
+
+ u64 hw_csum_rx_error;
+ u64 rx_replace_buf_alloc_fail;
struct nfp_net_tx_ring *xdp_ring;
struct u64_stats_sync tx_sync;
u64 tx_pkts;
u64 tx_bytes;
- u64 hw_csum_tx;
+
+ u64 ____cacheline_aligned_in_smp hw_csum_tx;
u64 hw_csum_tx_inner;
u64 tx_gather;
u64 tx_lso;
+ u64 hw_tls_tx;
- u64 hw_csum_rx_error;
- u64 rx_replace_buf_alloc_fail;
+ u64 tls_tx_fallback;
+ u64 tls_tx_no_fallback;
u64 tx_errors;
u64 tx_busy;
+ /* Cold data follows */
+
u32 irq_vector;
irq_handler_t handler;
char name[IFNAMSIZ + 8];
@@ -458,6 +475,7 @@ struct nfp_stat_pair {
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
* @chained_metadata_format: Firemware will use new metadata format
+ * @ktls_tx: Is kTLS TX enabled?
* @rx_dma_dir: Mapping direction for RX buffers
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
* @rx_offset: Offset in the RX buffers where packet data starts
@@ -482,6 +500,7 @@ struct nfp_net_dp {
u8 is_vf:1;
u8 chained_metadata_format:1;
+ u8 ktls_tx:1;
u8 rx_dma_dir;
u8 rx_offset;
@@ -549,7 +568,7 @@ struct nfp_net_dp {
* @reconfig_timer: Timer for async reading of reconfig results
* @reconfig_in_progress_update: Update FW is processing now (debug only)
* @bar_lock: vNIC config BAR access lock, protects: update,
- * mailbox area
+ * mailbox area, crypto TLV
* @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
@@ -562,6 +581,18 @@ struct nfp_net_dp {
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
* @tlv_caps: Parsed TLV capabilities
+ * @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections
+ * @ktls_rx_conn_cnt: Number of offloaded kTLS RX connections
+ * @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX)
+ * @ktls_no_space: Counter of firmware rejecting kTLS connection due to
+ * lack of space
+ * @mbox_cmsg: Common Control Message via vNIC mailbox state
+ * @mbox_cmsg.queue: CCM mbox queue of pending messages
+ * @mbox_cmsg.wq: CCM mbox wait queue of waiting processes
+ * @mbox_cmsg.workq: CCM mbox work queue for @wait_work and @runq_work
+ * @mbox_cmsg.wait_work: CCM mbox posted msg reconfig wait work
+ * @mbox_cmsg.runq_work: CCM mbox posted msg queue runner work
+ * @mbox_cmsg.tag: CCM mbox message tag allocator
* @debugfs_dir: Device directory in debugfs
* @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
@@ -620,7 +651,7 @@ struct nfp_net {
struct timer_list reconfig_timer;
u32 reconfig_in_progress_update;
- struct mutex bar_lock;
+ struct semaphore bar_lock;
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
@@ -637,6 +668,22 @@ struct nfp_net {
struct nfp_net_tlv_caps tlv_caps;
+ unsigned int ktls_tx_conn_cnt;
+ unsigned int ktls_rx_conn_cnt;
+
+ atomic64_t ktls_conn_id_gen;
+
+ atomic_t ktls_no_space;
+
+ struct {
+ struct sk_buff_head queue;
+ wait_queue_head_t wq;
+ struct workqueue_struct *workq;
+ struct work_struct wait_work;
+ struct work_struct runq_work;
+ u16 tag;
+ } mbox_cmsg;
+
struct dentry *debugfs_dir;
struct list_head vnic_list;
@@ -848,12 +895,17 @@ static inline void nfp_ctrl_unlock(struct nfp_net *nn)
static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
{
- mutex_lock(&nn->bar_lock);
+ down(&nn->bar_lock);
+}
+
+static inline bool nn_ctrl_bar_trylock(struct nfp_net *nn)
+{
+ return !down_trylock(&nn->bar_lock);
}
static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
{
- mutex_unlock(&nn->bar_lock);
+ up(&nn->bar_lock);
}
/* Globals */
@@ -883,6 +935,7 @@ void nfp_ctrl_close(struct nfp_net *nn);
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
+int __nfp_net_reconfig(struct nfp_net *nn, u32 update);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
@@ -891,6 +944,8 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
+void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
+int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);
unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 36a3bd30cfd9..9903805717da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/lockdep.h>
#include <linux/mm.h>
#include <linux/overflow.h>
#include <linux/page_ref.h>
@@ -37,14 +36,17 @@
#include <linux/vmalloc.h>
#include <linux/ktime.h>
+#include <net/tls.h>
#include <net/vxlan.h>
#include "nfpcore/nfp_nsp.h"
+#include "ccm.h"
#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_net_sriov.h"
#include "nfp_port.h"
+#include "crypto/crypto.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
@@ -228,6 +230,7 @@ static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
spin_lock_bh(&nn->reconfig_lock);
+ WARN_ON(nn->reconfig_sync_present);
nn->reconfig_sync_present = true;
if (nn->reconfig_timer_active) {
@@ -271,12 +274,10 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
*
* Return: Negative errno on error, 0 on success
*/
-static int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
+int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
int ret;
- lockdep_assert_held(&nn->bar_lock);
-
nfp_net_reconfig_sync_enter(nn);
nfp_net_reconfig_start(nn, update);
@@ -331,7 +332,6 @@ int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
u32 mbox = nn->tlv_caps.mbox_off;
int ret;
- lockdep_assert_held(&nn->bar_lock);
nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
@@ -343,6 +343,24 @@ int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
}
+void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd)
+{
+ u32 mbox = nn->tlv_caps.mbox_off;
+
+ nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
+
+ nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX);
+}
+
+int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn)
+{
+ u32 mbox = nn->tlv_caps.mbox_off;
+
+ nfp_net_reconfig_wait_posted(nn);
+
+ return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
+}
+
int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
{
int ret;
@@ -804,6 +822,99 @@ static void nfp_net_tx_csum(struct nfp_net_dp *dp,
u64_stats_update_end(&r_vec->tx_sync);
}
+static struct sk_buff *
+nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
+{
+#ifdef CONFIG_TLS_DEVICE
+ struct nfp_net_tls_offload_ctx *ntls;
+ struct sk_buff *nskb;
+ bool resync_pending;
+ u32 datalen, seq;
+
+ if (likely(!dp->ktls_tx))
+ return skb;
+ if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
+ return skb;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ seq = ntohl(tcp_hdr(skb)->seq);
+ ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+ resync_pending = tls_offload_tx_resync_pending(skb->sk);
+ if (unlikely(resync_pending || ntls->next_seq != seq)) {
+ /* Pure ACK out of order already */
+ if (!datalen)
+ return skb;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tls_tx_fallback++;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ nskb = tls_encrypt_skb(skb);
+ if (!nskb) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tls_tx_no_fallback++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ return NULL;
+ }
+ /* encryption wasn't necessary */
+ if (nskb == skb)
+ return skb;
+ /* we don't re-check ring space */
+ if (unlikely(skb_is_nonlinear(nskb))) {
+ nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(nskb);
+ return NULL;
+ }
+
+ /* jump forward, a TX may have gotten lost, need to sync TX */
+ if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
+ tls_offload_tx_resync_request(nskb->sk);
+
+ *nr_frags = 0;
+ return nskb;
+ }
+
+ if (datalen) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ if (!skb_is_gso(skb))
+ r_vec->hw_tls_tx++;
+ else
+ r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
+ u64_stats_update_end(&r_vec->tx_sync);
+ }
+
+ memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
+ ntls->next_seq += datalen;
+#endif
+ return skb;
+}
+
+static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
+{
+#ifdef CONFIG_TLS_DEVICE
+ struct nfp_net_tls_offload_ctx *ntls;
+ u32 datalen, seq;
+
+ if (!tls_handle)
+ return;
+ if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
+ return;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ seq = ntohl(tcp_hdr(skb)->seq);
+
+ ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+ if (ntls->next_seq == seq + datalen)
+ ntls->next_seq = seq;
+ else
+ WARN_ON_ONCE(1);
+#endif
+}
+
static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
{
wmb();
@@ -811,24 +922,47 @@ static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
tx_ring->wr_ptr_add = 0;
}
-static int nfp_net_prep_port_id(struct sk_buff *skb)
+static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
unsigned char *data;
+ u32 meta_id = 0;
+ int md_bytes;
- if (likely(!md_dst))
- return 0;
- if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
+ if (likely(!md_dst && !tls_handle))
return 0;
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
+ if (!tls_handle)
+ return 0;
+ md_dst = NULL;
+ }
+
+ md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
- if (unlikely(skb_cow_head(skb, 8)))
+ if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
- data = skb_push(skb, 8);
- put_unaligned_be32(NFP_NET_META_PORTID, data);
- put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
+ meta_id = 0;
+ data = skb_push(skb, md_bytes) + md_bytes;
+ if (md_dst) {
+ data -= 4;
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+ meta_id = NFP_NET_META_PORTID;
+ }
+ if (tls_handle) {
+ /* conn handle is opaque, we just use u64 to be able to quickly
+ * compare it to zero
+ */
+ data -= 8;
+ memcpy(data, &tls_handle, sizeof(tls_handle));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_CONN_HANDLE;
+ }
+
+ data -= 4;
+ put_unaligned_be32(meta_id, data);
- return 8;
+ return md_bytes;
}
/**
@@ -851,6 +985,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
unsigned int fsize;
+ u64 tls_handle = 0;
u16 qidx;
dp = &nn->dp;
@@ -872,18 +1007,21 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- md_bytes = nfp_net_prep_port_id(skb);
- if (unlikely(md_bytes < 0)) {
+ skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
+ if (unlikely(!skb)) {
nfp_net_tx_xmit_more_flush(tx_ring);
- dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+ md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
+ if (unlikely(md_bytes < 0))
+ goto err_flush;
+
/* Start with the head skbuf */
dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (dma_mapping_error(dp->dev, dma_addr))
- goto err_free;
+ goto err_dma_err;
wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
@@ -979,12 +1117,14 @@ err_unmap:
tx_ring->txbufs[wr_idx].skb = NULL;
tx_ring->txbufs[wr_idx].dma_addr = 0;
tx_ring->txbufs[wr_idx].fidx = -2;
-err_free:
+err_dma_err:
nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
u64_stats_update_end(&r_vec->tx_sync);
+ nfp_net_tls_tx_undo(skb, tls_handle);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1857,6 +1997,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
+#ifdef CONFIG_TLS_DEVICE
+ if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
+ skb->decrypted = true;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_tls_rx++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+#endif
+
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxd->rxd.vlan));
@@ -3705,7 +3854,7 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
- mutex_init(&nn->bar_lock);
+ sema_init(&nn->bar_lock, 1);
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
@@ -3717,6 +3866,10 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
if (err)
goto err_free_nn;
+ err = nfp_ccm_mbox_alloc(nn);
+ if (err)
+ goto err_free_nn;
+
return nn;
err_free_nn:
@@ -3734,8 +3887,7 @@ err_free_nn:
void nfp_net_free(struct nfp_net *nn)
{
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
-
- mutex_destroy(&nn->bar_lock);
+ nfp_ccm_mbox_free(nn);
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
@@ -4010,14 +4162,27 @@ int nfp_net_init(struct nfp_net *nn)
if (err)
return err;
- if (nn->dp.netdev)
+ if (nn->dp.netdev) {
nfp_net_netdev_init(nn);
+ err = nfp_ccm_mbox_init(nn);
+ if (err)
+ return err;
+
+ err = nfp_net_tls_init(nn);
+ if (err)
+ goto err_clean_mbox;
+ }
+
nfp_net_vecs_init(nn);
if (!nn->dp.netdev)
return 0;
return register_netdev(nn->dp.netdev);
+
+err_clean_mbox:
+ nfp_ccm_mbox_clean(nn);
+ return err;
}
/**
@@ -4030,5 +4195,6 @@ void nfp_net_clean(struct nfp_net *nn)
return;
unregister_netdev(nn->dp.netdev);
+ nfp_ccm_mbox_clean(nn);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index 6d5213b5bcb0..d835c14b7257 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -99,6 +99,21 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
caps->repr_cap = readl(data);
break;
+ case NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES:
+ if (length >= 4)
+ caps->mbox_cmsg_types = readl(data);
+ break;
+ case NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
+ if (length < 32) {
+ dev_err(dev,
+ "CRYPTO OPS TLV should be at least 32B, is %dB offset:%u\n",
+ length, offset);
+ return -EINVAL;
+ }
+
+ caps->crypto_ops = readl(data);
+ caps->crypto_enable_off = data - ctrl_mem + 16;
+ break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
break;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 25919e338071..ee6b24e4eacd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -44,6 +44,7 @@
#define NFP_NET_META_MARK 2
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
+#define NFP_NET_META_CONN_HANDLE 7
#define NFP_META_PORT_ID_CTRL ~0U
@@ -135,6 +136,7 @@
#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */
#define NFP_NET_CFG_UPDATE_VF (0x1 << 13) /* VF settings change */
+#define NFP_NET_CFG_UPDATE_CRYPTO (0x1 << 14) /* Crypto on/off */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -394,6 +396,7 @@
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
+#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
/**
* VLAN filtering using general use mailbox
@@ -466,6 +469,16 @@
* %NFP_NET_CFG_TLV_TYPE_REPR_CAP:
* Single word, equivalent of %NFP_NET_CFG_CAP for representors, features which
* can be used on representors.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES:
+ * Variable, bitmap of control message types supported by the mailbox handler.
+ * Bit 0 corresponds to message type 0, bit 1 to 1, etc. Control messages are
+ * encapsulated into simple TLVs, with an end TLV and written to the Mailbox.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS:
+ * 8 words, bitmaps of supported and enabled crypto operations.
+ * First 16B (4 words) contains a bitmap of supported crypto operations,
+ * and next 16B contain the enabled operations.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
@@ -475,6 +488,8 @@
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6
#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7
+#define NFP_NET_CFG_TLV_TYPE_MBOX_CMSG_TYPES 10
+#define NFP_NET_CFG_TLV_TYPE_CRYPTO_OPS 11 /* see crypto/fw.h */
struct device;
@@ -484,12 +499,18 @@ struct device;
* @mbox_off: vNIC mailbox area offset
* @mbox_len: vNIC mailbox area length
* @repr_cap: capabilities for representors
+ * @mbox_cmsg_types: cmsgs which can be passed through the mailbox
+ * @crypto_ops: supported crypto operations
+ * @crypto_enable_off: offset of crypto ops enable region
*/
struct nfp_net_tlv_caps {
u32 me_freq_mhz;
unsigned int mbox_off;
unsigned int mbox_len;
u32 repr_cap;
+ u32 mbox_cmsg_types;
+ u32 crypto_ops;
+ unsigned int crypto_enable_off;
};
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 851e31e0ba8e..d9cbe84ac6ad 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -150,8 +150,9 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
-#define NN_RVEC_GATHER_STATS 9
+#define NN_RVEC_GATHER_STATS 13
#define NN_RVEC_PER_Q_STATS 3
+#define NN_CTRL_PATH_STATS 1
#define SFP_SFF_REV_COMPLIANCE 1
@@ -423,7 +424,8 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS;
+ return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS +
+ NN_CTRL_PATH_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -442,10 +444,16 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_complete");
data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
+ data = nfp_pr_et(data, "rx_tls_decrypted");
data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather");
data = nfp_pr_et(data, "tx_lso");
+ data = nfp_pr_et(data, "tx_tls_encrypted");
+ data = nfp_pr_et(data, "tx_tls_ooo");
+ data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
+
+ data = nfp_pr_et(data, "hw_tls_no_space");
return data;
}
@@ -468,16 +476,20 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[2] = nn->r_vecs[i].hw_csum_rx_complete;
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
+ tmp[5] = nn->r_vecs[i].hw_tls_rx;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
- tmp[5] = nn->r_vecs[i].hw_csum_tx;
- tmp[6] = nn->r_vecs[i].hw_csum_tx_inner;
- tmp[7] = nn->r_vecs[i].tx_gather;
- tmp[8] = nn->r_vecs[i].tx_lso;
+ tmp[6] = nn->r_vecs[i].hw_csum_tx;
+ tmp[7] = nn->r_vecs[i].hw_csum_tx_inner;
+ tmp[8] = nn->r_vecs[i].tx_gather;
+ tmp[9] = nn->r_vecs[i].tx_lso;
+ tmp[10] = nn->r_vecs[i].hw_tls_tx;
+ tmp[11] = nn->r_vecs[i].tls_tx_fallback;
+ tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
@@ -489,6 +501,8 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
*data++ = gathered_stats[j];
+ *data++ = atomic_read(&nn->ktls_no_space);
+
return data;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 42cf4fd875ea..9a08623c325d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -241,11 +241,16 @@ static int nfp_nsp_check(struct nfp_nsp *state)
state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg);
state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg);
- if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) {
+ if (state->ver.major != NSP_MAJOR) {
nfp_err(cpp, "Unsupported ABI %hu.%hu\n",
state->ver.major, state->ver.minor);
return -EINVAL;
}
+ if (state->ver.minor < NSP_MINOR) {
+ nfp_err(cpp, "ABI too old to support NIC operation (%u.%hu < %u.%u), please update the management FW on the flash\n",
+ NSP_MAJOR, state->ver.minor, NSP_MAJOR, NSP_MINOR);
+ return -EINVAL;
+ }
if (reg & NSP_STATUS_BUSY) {
nfp_err(cpp, "Service processor busy!\n");
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 96f7a9818294..0b384f97d2fd 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -990,7 +990,7 @@ static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
strlcpy(ed->driver, "nixge", sizeof(ed->driver));
- strlcpy(ed->bus_info, "platform", sizeof(ed->driver));
+ strlcpy(ed->bus_info, "platform", sizeof(ed->bus_info));
}
static int nixge_ethtools_get_coalesce(struct net_device *ndev,
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index bf5a7bca0298..be6660128b55 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1042,7 +1042,6 @@ static int pasemi_mac_phy_init(struct net_device *dev)
dn = pci_device_to_OF_node(mac->pdev);
phy_dn = of_parse_phandle(dn, "phy-handle", 0);
- of_node_put(phy_dn);
mac->link = 0;
mac->speed = 0;
@@ -1051,6 +1050,7 @@ static int pasemi_mac_phy_init(struct net_device *dev)
phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
PHY_INTERFACE_MODE_SGMII);
+ of_node_put(phy_dn);
if (!phydev) {
printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
return -ENODEV;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index fdbb3ce00e20..a391cf6ee4b2 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -87,6 +87,7 @@ config QED
depends on PCI
select ZLIB_INFLATE
select CRC8
+ select NET_DEVLINK
---help---
This enables the support for ...
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 84cb62434556..58e2eaf77014 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -3248,6 +3248,7 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
struct net_device *dev, unsigned long event)
{
struct in_device *indev;
+ struct in_ifaddr *ifa;
if (!netxen_destip_supported(adapter))
return;
@@ -3256,7 +3257,8 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
if (!indev)
return;
- for_ifa(indev) {
+ rcu_read_lock();
+ in_dev_for_each_ifa_rcu(ifa, indev) {
switch (event) {
case NETDEV_UP:
netxen_list_config_ip(adapter, ifa, NX_IP_UP);
@@ -3267,8 +3269,8 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
default:
break;
}
- } endfor_ifa(indev);
-
+ }
+ rcu_read_unlock();
in_dev_put(indev);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index c5e96ce20f59..89fe091c958d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -140,6 +140,7 @@ struct qed_cxt_mngr;
struct qed_sb_sp_info;
struct qed_ll2_info;
struct qed_mcp_info;
+struct qed_llh_info;
struct qed_rt_data {
u32 *init_val;
@@ -741,6 +742,7 @@ struct qed_dev {
#define QED_DEV_ID_MASK 0xff00
#define QED_DEV_ID_MASK_BB 0x1600
#define QED_DEV_ID_MASK_AH 0x8000
+#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev))
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
@@ -801,6 +803,11 @@ struct qed_dev {
u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+ /* Engine affinity */
+ u8 l2_affin_hint;
+ u8 fir_affin;
+ u8 iwarp_affin;
+
/* SRIOV */
struct qed_hw_sriov_info *p_iov_info;
#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
@@ -815,6 +822,10 @@ struct qed_dev {
/* Recovery */
bool recov_in_prog;
+ /* LLH info */
+ u8 ppfid_bitmap;
+ struct qed_llh_info *p_llh_info;
+
/* Linux specific here */
struct qede_dev *edev;
struct pci_dev *pdev;
@@ -852,6 +863,9 @@ struct qed_dev {
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
u16 tunn_feature_mask;
+
+ struct devlink *dl;
+ bool iwarp_cmt;
};
#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
@@ -904,6 +918,14 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+#define QED_IS_CMT(dev) ((dev)->num_hwfns > 1)
+/* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
+#define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin])
+#define QED_IWARP_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->iwarp_affin])
+#define QED_AFFIN_HWFN(dev) \
+ (QED_IS_IWARP_PERSONALITY(QED_LEADING_HWFN(dev)) ? \
+ QED_IWARP_AFFIN_HWFN(dev) : QED_FIR_AFFIN_HWFN(dev))
+#define QED_AFFIN_HWFN_IDX(dev) (IS_LEAD_HWFN(QED_AFFIN_HWFN(dev)) ? 0 : 1)
/* Flags for indication of required queues */
#define PQ_FLAGS_RLS (BIT(0))
@@ -923,8 +945,6 @@ u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
-#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
-
/* doorbell recovery mechanism */
void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index e61d1d905415..8e1bdf58b9e7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2351,7 +2351,8 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
- reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
+ reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
+ NULL);
if (elem_type == QED_ELEM_CXT) {
u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
@@ -2457,7 +2458,7 @@ qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
(u64) (uintptr_t) &ilt_hw_entry,
reg_offset,
sizeof(ilt_hw_entry) / sizeof(u32),
- 0);
+ NULL);
}
qed_ptt_release(p_hwfn, p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index ab8cacbdee3e..5ea6c4fc6050 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -2534,7 +2534,7 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
(len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
wide_bus)) {
if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
- (u64)(uintptr_t)(dump_buf), len, 0))
+ (u64)(uintptr_t)(dump_buf), len, NULL))
return len;
dev_data->use_dmae = 0;
DP_VERBOSE(p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index fccdb06fc5c5..a1ebc2b1ca0b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -361,6 +361,927 @@ void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
/******************** Doorbell Recovery end ****************/
+/********************************** NIG LLH ***********************************/
+
+enum qed_llh_filter_type {
+ QED_LLH_FILTER_TYPE_MAC,
+ QED_LLH_FILTER_TYPE_PROTOCOL,
+};
+
+struct qed_llh_mac_filter {
+ u8 addr[ETH_ALEN];
+};
+
+struct qed_llh_protocol_filter {
+ enum qed_llh_prot_filter_type_t type;
+ u16 source_port_or_eth_type;
+ u16 dest_port;
+};
+
+union qed_llh_filter {
+ struct qed_llh_mac_filter mac;
+ struct qed_llh_protocol_filter protocol;
+};
+
+struct qed_llh_filter_info {
+ bool b_enabled;
+ u32 ref_cnt;
+ enum qed_llh_filter_type type;
+ union qed_llh_filter filter;
+};
+
+struct qed_llh_info {
+ /* Number of LLH filters banks */
+ u8 num_ppfid;
+
+#define MAX_NUM_PPFID 8
+ u8 ppfid_array[MAX_NUM_PPFID];
+
+ /* Array of filters arrays:
+ * "num_ppfid" elements of filters banks, where each is an array of
+ * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters.
+ */
+ struct qed_llh_filter_info **pp_filters;
+};
+
+static void qed_llh_free(struct qed_dev *cdev)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ u32 i;
+
+ if (p_llh_info) {
+ if (p_llh_info->pp_filters)
+ for (i = 0; i < p_llh_info->num_ppfid; i++)
+ kfree(p_llh_info->pp_filters[i]);
+
+ kfree(p_llh_info->pp_filters);
+ }
+
+ kfree(p_llh_info);
+ cdev->p_llh_info = NULL;
+}
+
+static int qed_llh_alloc(struct qed_dev *cdev)
+{
+ struct qed_llh_info *p_llh_info;
+ u32 size, i;
+
+ p_llh_info = kzalloc(sizeof(*p_llh_info), GFP_KERNEL);
+ if (!p_llh_info)
+ return -ENOMEM;
+ cdev->p_llh_info = p_llh_info;
+
+ for (i = 0; i < MAX_NUM_PPFID; i++) {
+ if (!(cdev->ppfid_bitmap & (0x1 << i)))
+ continue;
+
+ p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
+ DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %hhd\n",
+ p_llh_info->num_ppfid, i);
+ p_llh_info->num_ppfid++;
+ }
+
+ size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters);
+ p_llh_info->pp_filters = kzalloc(size, GFP_KERNEL);
+ if (!p_llh_info->pp_filters)
+ return -ENOMEM;
+
+ size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
+ sizeof(**p_llh_info->pp_filters);
+ for (i = 0; i < p_llh_info->num_ppfid; i++) {
+ p_llh_info->pp_filters[i] = kzalloc(size, GFP_KERNEL);
+ if (!p_llh_info->pp_filters[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qed_llh_shadow_sanity(struct qed_dev *cdev,
+ u8 ppfid, u8 filter_idx, const char *action)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+
+ if (ppfid >= p_llh_info->num_ppfid) {
+ DP_NOTICE(cdev,
+ "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n",
+ action, ppfid, p_llh_info->num_ppfid);
+ return -EINVAL;
+ }
+
+ if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(cdev,
+ "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n",
+ action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define QED_LLH_INVALID_FILTER_IDX 0xff
+
+static int
+qed_llh_shadow_search_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ union qed_llh_filter *p_filter, u8 *p_filter_idx)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+ u8 i;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "search");
+ if (rc)
+ return rc;
+
+ *p_filter_idx = QED_LLH_INVALID_FILTER_IDX;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!memcmp(p_filter, &p_filters[i].filter,
+ sizeof(*p_filter))) {
+ *p_filter_idx = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qed_llh_shadow_get_free_idx(struct qed_dev *cdev, u8 ppfid, u8 *p_filter_idx)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+ u8 i;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "get_free_idx");
+ if (rc)
+ return rc;
+
+ *p_filter_idx = QED_LLH_INVALID_FILTER_IDX;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!p_filters[i].b_enabled) {
+ *p_filter_idx = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+__qed_llh_shadow_add_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ u8 filter_idx,
+ enum qed_llh_filter_type type,
+ union qed_llh_filter *p_filter, u32 *p_ref_cnt)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "add");
+ if (rc)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ if (!p_filters[filter_idx].ref_cnt) {
+ p_filters[filter_idx].b_enabled = true;
+ p_filters[filter_idx].type = type;
+ memcpy(&p_filters[filter_idx].filter, p_filter,
+ sizeof(p_filters[filter_idx].filter));
+ }
+
+ *p_ref_cnt = ++p_filters[filter_idx].ref_cnt;
+
+ return 0;
+}
+
+static int
+qed_llh_shadow_add_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_filter_type type,
+ union qed_llh_filter *p_filter,
+ u8 *p_filter_idx, u32 *p_ref_cnt)
+{
+ int rc;
+
+ /* Check if the same filter already exist */
+ rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx);
+ if (rc)
+ return rc;
+
+ /* Find a new entry in case of a new filter */
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
+ rc = qed_llh_shadow_get_free_idx(cdev, ppfid, p_filter_idx);
+ if (rc)
+ return rc;
+ }
+
+ /* No free entry was found */
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
+ DP_NOTICE(cdev,
+ "Failed to find an empty LLH filter to utilize [ppfid %d]\n",
+ ppfid);
+ return -EINVAL;
+ }
+
+ return __qed_llh_shadow_add_filter(cdev, ppfid, *p_filter_idx, type,
+ p_filter, p_ref_cnt);
+}
+
+static int
+__qed_llh_shadow_remove_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 filter_idx, u32 *p_ref_cnt)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "remove");
+ if (rc)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ if (!p_filters[filter_idx].ref_cnt) {
+ DP_NOTICE(cdev,
+ "LLH shadow: trying to remove a filter with ref_cnt=0\n");
+ return -EINVAL;
+ }
+
+ *p_ref_cnt = --p_filters[filter_idx].ref_cnt;
+ if (!p_filters[filter_idx].ref_cnt)
+ memset(&p_filters[filter_idx],
+ 0, sizeof(p_filters[filter_idx]));
+
+ return 0;
+}
+
+static int
+qed_llh_shadow_remove_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ union qed_llh_filter *p_filter,
+ u8 *p_filter_idx, u32 *p_ref_cnt)
+{
+ int rc;
+
+ rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx);
+ if (rc)
+ return rc;
+
+ /* No matching filter was found */
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
+ DP_NOTICE(cdev, "Failed to find a filter in the LLH shadow\n");
+ return -EINVAL;
+ }
+
+ return __qed_llh_shadow_remove_filter(cdev, ppfid, *p_filter_idx,
+ p_ref_cnt);
+}
+
+static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+
+ if (ppfid >= p_llh_info->num_ppfid) {
+ DP_NOTICE(cdev,
+ "ppfid %d is not valid, available indices are 0..%hhd\n",
+ ppfid, p_llh_info->num_ppfid - 1);
+ *p_abs_ppfid = 0;
+ return -EINVAL;
+ }
+
+ *p_abs_ppfid = p_llh_info->ppfid_array[ppfid];
+
+ return 0;
+}
+
+static int
+qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ enum qed_eng eng;
+ u8 ppfid;
+ int rc;
+
+ rc = qed_mcp_get_engine_config(p_hwfn, p_ptt);
+ if (rc != 0 && rc != -EOPNOTSUPP) {
+ DP_NOTICE(p_hwfn,
+ "Failed to get the engine affinity configuration\n");
+ return rc;
+ }
+
+ /* RoCE PF is bound to a single engine */
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
+ eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
+ rc = qed_llh_set_roce_affinity(cdev, eng);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to set the RoCE engine affinity\n");
+ return rc;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Set the engine affinity of RoCE packets as %d\n",
+ eng);
+ }
+
+ /* Storage PF is bound to a single engine while L2 PF uses both */
+ if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
+ else /* L2_PERSONALITY */
+ eng = QED_BOTH_ENG;
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
+ rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to set the engine affinity of ppfid %d\n",
+ ppfid);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_SP,
+ "LLH: Set the engine affinity of non-RoCE packets as %d\n",
+ eng);
+
+ return 0;
+}
+
+static int qed_llh_hw_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 ppfid, abs_ppfid;
+ int rc;
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
+ u32 addr;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ return rc;
+
+ addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
+ }
+
+ if (test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) &&
+ !QED_IS_FCOE_PERSONALITY(p_hwfn)) {
+ rc = qed_llh_add_mac_filter(cdev, 0,
+ p_hwfn->hw_info.hw_mac_addr);
+ if (rc)
+ DP_NOTICE(cdev,
+ "Failed to add an LLH filter with the primary MAC\n");
+ }
+
+ if (QED_IS_CMT(cdev)) {
+ rc = qed_llh_set_engine_affin(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+u8 qed_llh_get_num_ppfid(struct qed_dev *cdev)
+{
+ return cdev->p_llh_info->num_ppfid;
+}
+
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2
+
+int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 addr, val, eng_sel;
+ u8 abs_ppfid;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!QED_IS_CMT(cdev))
+ goto out;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto out;
+
+ switch (eng) {
+ case QED_ENG0:
+ eng_sel = 0;
+ break;
+ case QED_ENG1:
+ eng_sel = 1;
+ break;
+ case QED_BOTH_ENG:
+ eng_sel = 2;
+ break;
+ default:
+ DP_NOTICE(cdev, "Invalid affinity value for ppfid [%d]\n", eng);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel);
+ qed_wr(p_hwfn, p_ptt, addr, val);
+
+ /* The iWARP affinity is set as the affinity of ppfid 0 */
+ if (!ppfid && QED_IS_IWARP_PERSONALITY(p_hwfn))
+ cdev->iwarp_affin = (eng == QED_ENG1) ? 1 : 0;
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 addr, val, eng_sel;
+ u8 ppfid, abs_ppfid;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!QED_IS_CMT(cdev))
+ goto out;
+
+ switch (eng) {
+ case QED_ENG0:
+ eng_sel = 0;
+ break;
+ case QED_ENG1:
+ eng_sel = 1;
+ break;
+ case QED_BOTH_ENG:
+ eng_sel = 2;
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL,
+ 0xf); /* QP bit 15 */
+ break;
+ default:
+ DP_NOTICE(cdev, "Invalid affinity value for RoCE [%d]\n", eng);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto out;
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel);
+ qed_wr(p_hwfn, p_ptt, addr, val);
+ }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+struct qed_llh_filter_details {
+ u64 value;
+ u32 mode;
+ u32 protocol_type;
+ u32 hdr_sel;
+ u32 enable;
+};
+
+static int
+qed_llh_access_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 abs_ppfid,
+ u8 filter_idx,
+ struct qed_llh_filter_details *p_details)
+{
+ struct qed_dmae_params params = {0};
+ u32 addr;
+ u8 pfid;
+ int rc;
+
+ /* The NIG/LLH registers that are accessed in this function have only 16
+ * rows which are exposed to a PF. I.e. only the 16 filters of its
+ * default ppfid. Accessing filters of other ppfids requires pretending
+ * to another PFs.
+ * The calculation of PPFID->PFID in AH is based on the relative index
+ * of a PF on its port.
+ * For BB the pfid is actually the abs_ppfid.
+ */
+ if (QED_IS_BB(p_hwfn->cdev))
+ pfid = abs_ppfid;
+ else
+ pfid = abs_ppfid * p_hwfn->cdev->num_ports_in_engine +
+ MFW_PORT(p_hwfn);
+
+ /* Filter enable - should be done first when removing a filter */
+ if (!p_details->enable) {
+ qed_fid_pretend(p_hwfn, p_ptt,
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->enable);
+
+ qed_fid_pretend(p_hwfn, p_ptt,
+ p_hwfn->rel_pf_id <<
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+ }
+
+ /* Filter value */
+ addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
+
+ params.flags = QED_DMAE_FLAG_PF_DST;
+ params.dst_pfid = pfid;
+ rc = qed_dmae_host2grc(p_hwfn,
+ p_ptt,
+ (u64)(uintptr_t)&p_details->value,
+ addr, 2 /* size_in_dwords */,
+ &params);
+ if (rc)
+ return rc;
+
+ qed_fid_pretend(p_hwfn, p_ptt,
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ /* Filter mode */
+ addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->mode);
+
+ /* Filter protocol type */
+ addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->protocol_type);
+
+ /* Filter header select */
+ addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->hdr_sel);
+
+ /* Filter enable - should be done last when adding a filter */
+ if (p_details->enable) {
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->enable);
+ }
+
+ qed_fid_pretend(p_hwfn, p_ptt,
+ p_hwfn->rel_pf_id <<
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ return 0;
+}
+
+static int
+qed_llh_add_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 abs_ppfid,
+ u8 filter_idx, u8 filter_prot_type, u32 high, u32 low)
+{
+ struct qed_llh_filter_details filter_details;
+
+ filter_details.enable = 1;
+ filter_details.value = ((u64)high << 32) | low;
+ filter_details.hdr_sel = 0;
+ filter_details.protocol_type = filter_prot_type;
+ /* Mode: 0: MAC-address classification 1: protocol classification */
+ filter_details.mode = filter_prot_type ? 1 : 0;
+
+ return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details);
+}
+
+static int
+qed_llh_remove_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
+{
+ struct qed_llh_filter_details filter_details = {0};
+
+ return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details);
+}
+
+int qed_llh_add_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN])
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ union qed_llh_filter filter = {};
+ u8 filter_idx, abs_ppfid;
+ u32 high, low, ref_cnt;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ goto out;
+
+ memcpy(filter.mac.addr, mac_addr, ETH_ALEN);
+ rc = qed_llh_shadow_add_filter(cdev, ppfid,
+ QED_LLH_FILTER_TYPE_MAC,
+ &filter, &filter_idx, &ref_cnt);
+ if (rc)
+ goto err;
+
+ /* Configure the LLH only in case of a new the filter */
+ if (ref_cnt == 1) {
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ high = mac_addr[1] | (mac_addr[0] << 8);
+ low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) |
+ (mac_addr[2] << 24);
+ rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ 0, high, low);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Added MAC filter [%pM] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(cdev,
+ "LLH: Failed to add MAC filter [%pM] to ppfid %hhd\n",
+ mac_addr, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+static int
+qed_llh_protocol_filter_stringify(struct qed_dev *cdev,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type,
+ u16 dest_port, u8 *str, size_t str_len)
+{
+ switch (type) {
+ case QED_LLH_FILTER_ETHERTYPE:
+ snprintf(str, str_len, "Ethertype 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case QED_LLH_FILTER_TCP_SRC_PORT:
+ snprintf(str, str_len, "TCP src port 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case QED_LLH_FILTER_UDP_SRC_PORT:
+ snprintf(str, str_len, "UDP src port 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case QED_LLH_FILTER_TCP_DEST_PORT:
+ snprintf(str, str_len, "TCP dst port 0x%04x", dest_port);
+ break;
+ case QED_LLH_FILTER_UDP_DEST_PORT:
+ snprintf(str, str_len, "UDP dst port 0x%04x", dest_port);
+ break;
+ case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ snprintf(str, str_len, "TCP src/dst ports 0x%04x/0x%04x",
+ source_port_or_eth_type, dest_port);
+ break;
+ case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ snprintf(str, str_len, "UDP src/dst ports 0x%04x/0x%04x",
+ source_port_or_eth_type, dest_port);
+ break;
+ default:
+ DP_NOTICE(cdev,
+ "Non valid LLH protocol filter type %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_llh_protocol_filter_to_hilo(struct qed_dev *cdev,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type,
+ u16 dest_port, u32 *p_high, u32 *p_low)
+{
+ *p_high = 0;
+ *p_low = 0;
+
+ switch (type) {
+ case QED_LLH_FILTER_ETHERTYPE:
+ *p_high = source_port_or_eth_type;
+ break;
+ case QED_LLH_FILTER_TCP_SRC_PORT:
+ case QED_LLH_FILTER_UDP_SRC_PORT:
+ *p_low = source_port_or_eth_type << 16;
+ break;
+ case QED_LLH_FILTER_TCP_DEST_PORT:
+ case QED_LLH_FILTER_UDP_DEST_PORT:
+ *p_low = dest_port;
+ break;
+ case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ *p_low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(cdev,
+ "Non valid LLH protocol filter type %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+qed_llh_add_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid, str[32], type_bitmap;
+ union qed_llh_filter filter = {};
+ u32 high, low, ref_cnt;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits))
+ goto out;
+
+ rc = qed_llh_protocol_filter_stringify(cdev, type,
+ source_port_or_eth_type,
+ dest_port, str, sizeof(str));
+ if (rc)
+ goto err;
+
+ filter.protocol.type = type;
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
+ filter.protocol.dest_port = dest_port;
+ rc = qed_llh_shadow_add_filter(cdev,
+ ppfid,
+ QED_LLH_FILTER_TYPE_PROTOCOL,
+ &filter, &filter_idx, &ref_cnt);
+ if (rc)
+ goto err;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ /* Configure the LLH only in case of a new the filter */
+ if (ref_cnt == 1) {
+ rc = qed_llh_protocol_filter_to_hilo(cdev, type,
+ source_port_or_eth_type,
+ dest_port, &high, &low);
+ if (rc)
+ goto err;
+
+ type_bitmap = 0x1 << type;
+ rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx, type_bitmap, high, low);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(p_hwfn,
+ "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n",
+ str, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+void qed_llh_remove_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN])
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ union qed_llh_filter filter = {};
+ u8 filter_idx, abs_ppfid;
+ int rc = 0;
+ u32 ref_cnt;
+
+ if (!p_ptt)
+ return;
+
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ goto out;
+
+ ether_addr_copy(filter.mac.addr, mac_addr);
+ rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
+ &ref_cnt);
+ if (rc)
+ goto err;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ /* Remove from the LLH in case the filter is not in use */
+ if (!ref_cnt) {
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Removed MAC filter [%pM] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(cdev,
+ "LLH: Failed to remove MAC filter [%pM] from ppfid %hhd\n",
+ mac_addr, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+}
+
+void qed_llh_remove_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid, str[32];
+ union qed_llh_filter filter = {};
+ int rc = 0;
+ u32 ref_cnt;
+
+ if (!p_ptt)
+ return;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits))
+ goto out;
+
+ rc = qed_llh_protocol_filter_stringify(cdev, type,
+ source_port_or_eth_type,
+ dest_port, str, sizeof(str));
+ if (rc)
+ goto err;
+
+ filter.protocol.type = type;
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
+ filter.protocol.dest_port = dest_port;
+ rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
+ &ref_cnt);
+ if (rc)
+ goto err;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ /* Remove from the LLH in case the filter is not in use */
+ if (!ref_cnt) {
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(cdev,
+ "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n",
+ str, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+}
+
+/******************************* NIG LLH - End ********************************/
+
#define QED_MIN_DPIS (4)
#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
@@ -461,6 +1382,8 @@ void qed_resc_free(struct qed_dev *cdev)
kfree(cdev->reset_stats);
cdev->reset_stats = NULL;
+ qed_llh_free(cdev);
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -1428,6 +2351,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
}
+ rc = qed_llh_alloc(cdev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to allocate memory for the llh_info structure\n");
+ goto alloc_err;
+ }
+
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
if (!cdev->reset_stats)
goto alloc_no_mem;
@@ -1879,6 +2809,10 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
{
int rc = 0;
+ /* In CMT the gate should be cleared by the 2nd hwfn */
+ if (!QED_IS_CMT(p_hwfn->cdev) || !IS_LEAD_HWFN(p_hwfn))
+ STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
+
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
if (rc)
return rc;
@@ -1964,6 +2898,13 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ /* Use the leading hwfn since in CMT only NIG #0 is operational */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_llh_hw_init_pf(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
if (b_hw_start) {
/* enable interrupts */
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
@@ -2393,6 +3334,12 @@ int qed_hw_stop(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+ if (IS_LEAD_HWFN(p_hwfn) &&
+ test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) &&
+ !QED_IS_FCOE_PERSONALITY(p_hwfn))
+ qed_llh_remove_mac_filter(cdev, 0,
+ p_hwfn->hw_info.hw_mac_addr);
+
if (!cdev->recov_in_prog) {
rc = qed_mcp_unload_done(p_hwfn, p_ptt);
if (rc) {
@@ -2868,6 +3815,36 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
return 0;
}
+static int qed_hw_get_ppfid_bitmap(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 native_ppfid_idx;
+ int rc;
+
+ /* Calculation of BB/AH is different for native_ppfid_idx */
+ if (QED_IS_BB(cdev))
+ native_ppfid_idx = p_hwfn->rel_pf_id;
+ else
+ native_ppfid_idx = p_hwfn->rel_pf_id /
+ cdev->num_ports_in_engine;
+
+ rc = qed_mcp_get_ppfid_bitmap(p_hwfn, p_ptt);
+ if (rc != 0 && rc != -EOPNOTSUPP)
+ return rc;
+ else if (rc == -EOPNOTSUPP)
+ cdev->ppfid_bitmap = 0x1 << native_ppfid_idx;
+
+ if (!(cdev->ppfid_bitmap & (0x1 << native_ppfid_idx))) {
+ DP_INFO(p_hwfn,
+ "Fix the PPFID bitmap to include the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n",
+ native_ppfid_idx, cdev->ppfid_bitmap);
+ cdev->ppfid_bitmap = 0x1 << native_ppfid_idx;
+ }
+
+ return 0;
+}
+
static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_resc_unlock_params resc_unlock_params;
@@ -2925,6 +3902,13 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
"Failed to release the resource lock for the resource allocation commands\n");
}
+ /* PPFID bitmap */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_hw_get_ppfid_bitmap(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
/* Sanity for ILT */
if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
(!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
@@ -3443,6 +4427,7 @@ static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
void __iomem *p_regview,
void __iomem *p_doorbells,
+ u64 db_phys_addr,
enum qed_pci_personality personality)
{
struct qed_dev *cdev = p_hwfn->cdev;
@@ -3451,6 +4436,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* Split PCI bars evenly between hwfns */
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
+ p_hwfn->db_phys_addr = db_phys_addr;
if (IS_VF(p_hwfn->cdev))
return qed_vf_hw_prepare(p_hwfn);
@@ -3546,7 +4532,9 @@ int qed_hw_prepare(struct qed_dev *cdev,
/* Initialize the first hwfn - will learn number of hwfns */
rc = qed_hw_prepare_single(p_hwfn,
cdev->regview,
- cdev->doorbells, personality);
+ cdev->doorbells,
+ cdev->db_phys_addr,
+ personality);
if (rc)
return rc;
@@ -3555,22 +4543,25 @@ int qed_hw_prepare(struct qed_dev *cdev,
/* Initialize the rest of the hwfns */
if (cdev->num_hwfns > 1) {
void __iomem *p_regview, *p_doorbell;
- u8 __iomem *addr;
+ u64 db_phys_addr;
+ u32 offset;
/* adjust bar offset for second engine */
- addr = cdev->regview +
- qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
- BAR_ID_0) / 2;
- p_regview = addr;
+ offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
+ p_regview = cdev->regview + offset;
+
+ offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
- addr = cdev->doorbells +
- qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
- BAR_ID_1) / 2;
- p_doorbell = addr;
+ p_doorbell = cdev->doorbells + offset;
+
+ db_phys_addr = cdev->db_phys_addr + offset;
/* prepare second hw function */
rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
- p_doorbell, personality);
+ p_doorbell, db_phys_addr,
+ personality);
/* in case of error, need to free the previously
* initiliazed hwfn 0.
@@ -3951,269 +4942,6 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
return 0;
}
-static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
- u8 *p_filter)
-{
- *p_high = p_filter[1] | (p_filter[0] << 8);
- *p_low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
-}
-
-int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_filter)
-{
- u32 high = 0, low = 0, en;
- int i;
-
- if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
- return 0;
-
- qed_llh_mac_to_filter(&high, &low, p_filter);
-
- /* Find a free entry and utilize it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- en = qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
- if (en)
- continue;
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32), low);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), high);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
- break;
- }
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
- DP_NOTICE(p_hwfn,
- "Failed to find an empty LLH filter to utilize\n");
- return -EINVAL;
- }
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "mac: %pM is added at %d\n",
- p_filter, i);
-
- return 0;
-}
-
-void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_filter)
-{
- u32 high = 0, low = 0;
- int i;
-
- if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
- return;
-
- qed_llh_mac_to_filter(&high, &low, p_filter);
-
- /* Find the entry and clean it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- if (qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32)) != low)
- continue;
- if (qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32)) != high)
- continue;
-
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), 0);
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "mac: %pM is removed from %d\n",
- p_filter, i);
- break;
- }
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
-}
-
-int
-qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port, enum qed_llh_port_filter_type_t type)
-{
- u32 high = 0, low = 0, en;
- int i;
-
- if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
- return 0;
-
- switch (type) {
- case QED_LLH_FILTER_ETHERTYPE:
- high = source_port_or_eth_type;
- break;
- case QED_LLH_FILTER_TCP_SRC_PORT:
- case QED_LLH_FILTER_UDP_SRC_PORT:
- low = source_port_or_eth_type << 16;
- break;
- case QED_LLH_FILTER_TCP_DEST_PORT:
- case QED_LLH_FILTER_UDP_DEST_PORT:
- low = dest_port;
- break;
- case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- low = (source_port_or_eth_type << 16) | dest_port;
- break;
- default:
- DP_NOTICE(p_hwfn,
- "Non valid LLH protocol filter type %d\n", type);
- return -EINVAL;
- }
- /* Find a free entry and utilize it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- en = qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
- if (en)
- continue;
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32), low);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), high);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 1 << type);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
- break;
- }
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
- DP_NOTICE(p_hwfn,
- "Failed to find an empty LLH filter to utilize\n");
- return -EINVAL;
- }
- switch (type) {
- case QED_LLH_FILTER_ETHERTYPE:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "ETH type %x is added at %d\n",
- source_port_or_eth_type, i);
- break;
- case QED_LLH_FILTER_TCP_SRC_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "TCP src port %x is added at %d\n",
- source_port_or_eth_type, i);
- break;
- case QED_LLH_FILTER_UDP_SRC_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "UDP src port %x is added at %d\n",
- source_port_or_eth_type, i);
- break;
- case QED_LLH_FILTER_TCP_DEST_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "TCP dst port %x is added at %d\n", dest_port, i);
- break;
- case QED_LLH_FILTER_UDP_DEST_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "UDP dst port %x is added at %d\n", dest_port, i);
- break;
- case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "TCP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, i);
- break;
- case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
- "UDP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, i);
- break;
- }
- return 0;
-}
-
-void
-qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum qed_llh_port_filter_type_t type)
-{
- u32 high = 0, low = 0;
- int i;
-
- if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
- return;
-
- switch (type) {
- case QED_LLH_FILTER_ETHERTYPE:
- high = source_port_or_eth_type;
- break;
- case QED_LLH_FILTER_TCP_SRC_PORT:
- case QED_LLH_FILTER_UDP_SRC_PORT:
- low = source_port_or_eth_type << 16;
- break;
- case QED_LLH_FILTER_TCP_DEST_PORT:
- case QED_LLH_FILTER_UDP_DEST_PORT:
- low = dest_port;
- break;
- case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- low = (source_port_or_eth_type << 16) | dest_port;
- break;
- default:
- DP_NOTICE(p_hwfn,
- "Non valid LLH protocol filter type %d\n", type);
- return;
- }
-
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- if (!qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
- continue;
- if (!qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
- continue;
- if (!(qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32)) & BIT(type)))
- continue;
- if (qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32)) != low)
- continue;
- if (qed_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32)) != high)
- continue;
-
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), 0);
- break;
- }
-
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
-}
-
static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 hw_addr, void *p_eth_qzone,
size_t eth_qzone_size, u8 timeset)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index e4b4e3b78e8a..47376d4d071f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -241,11 +241,17 @@ enum qed_dmae_address_type_t {
#define QED_DMAE_FLAG_VF_SRC 0x00000002
#define QED_DMAE_FLAG_VF_DST 0x00000004
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
+#define QED_DMAE_FLAG_PORT 0x00000010
+#define QED_DMAE_FLAG_PF_SRC 0x00000020
+#define QED_DMAE_FLAG_PF_DST 0x00000040
struct qed_dmae_params {
u32 flags; /* consists of QED_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
+ u8 port_id;
+ u8 src_pfid;
+ u8 dst_pfid;
};
/**
@@ -257,7 +263,7 @@ struct qed_dmae_params {
* @param source_addr
* @param grc_addr (dmae_data_offset)
* @param size_in_dwords
- * @param flags (one of the flags defined above)
+ * @param p_params (default parameters will be used in case of NULL)
*/
int
qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
@@ -265,7 +271,7 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u64 source_addr,
u32 grc_addr,
u32 size_in_dwords,
- u32 flags);
+ struct qed_dmae_params *p_params);
/**
* @brief qed_dmae_grc2host - Read data from dmae data offset
@@ -275,11 +281,11 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
* @param grc_addr (dmae_data_offset)
* @param dest_addr
* @param size_in_dwords
- * @param flags - one of the flags defined above
+ * @param p_params (default parameters will be used in case of NULL)
*/
int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
- u32 flags);
+ struct qed_dmae_params *p_params);
/**
* @brief qed_dmae_host2host - copy data from to source address
@@ -290,7 +296,7 @@ int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
* @param source_addr
* @param dest_addr
* @param size_in_dwords
- * @param params
+ * @param p_params (default parameters will be used in case of NULL)
*/
int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -368,26 +374,66 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 *dst_id);
/**
- * @brief qed_llh_add_mac_filter - configures a MAC filter in llh
+ * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter
+ * banks that are allocated to the PF.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_filter - MAC to add
+ * @param cdev
+ *
+ * @return u8 - Number of LLH filter banks
*/
-int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_filter);
+u8 qed_llh_get_num_ppfid(struct qed_dev *cdev);
+
+enum qed_eng {
+ QED_ENG0,
+ QED_ENG1,
+ QED_BOTH_ENG,
+};
/**
- * @brief qed_llh_remove_mac_filter - removes a MAC filter from llh
+ * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given
+ * LLH filter bank.
+ *
+ * @param cdev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param eng
+ *
+ * @return int
+ */
+int qed_llh_set_ppfid_affinity(struct qed_dev *cdev,
+ u8 ppfid, enum qed_eng eng);
+
+/**
+ * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity
+ *
+ * @param cdev
+ * @param eng
+ *
+ * @return int
+ */
+int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng);
+
+/**
+ * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter
+ * bank.
+ *
+ * @param cdev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param mac_addr - MAC to add
+ */
+int qed_llh_add_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN]);
+
+/**
+ * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given
+ * filter bank.
*
- * @param p_hwfn
* @param p_ptt
* @param p_filter - MAC to remove
*/
-void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *p_filter);
+void qed_llh_remove_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN]);
-enum qed_llh_port_filter_type_t {
+enum qed_llh_prot_filter_type_t {
QED_LLH_FILTER_ETHERTYPE,
QED_LLH_FILTER_TCP_SRC_PORT,
QED_LLH_FILTER_TCP_DEST_PORT,
@@ -398,36 +444,37 @@ enum qed_llh_port_filter_type_t {
};
/**
- * @brief qed_llh_add_protocol_filter - configures a protocol filter in llh
+ * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the
+ * given filter bank.
*
- * @param p_hwfn
- * @param p_ptt
+ * @param cdev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param type - type of filters and comparing
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
* @param type - type of filters and comparing
*/
int
-qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum qed_llh_port_filter_type_t type);
+qed_llh_add_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port);
/**
- * @brief qed_llh_remove_protocol_filter - remove a protocol filter in llh
+ * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from
+ * the given filter bank.
*
- * @param p_hwfn
- * @param p_ptt
+ * @param cdev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param type - type of filters and comparing
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
- * @param type - type of filters and comparing
*/
void
-qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum qed_llh_port_filter_type_t type);
+qed_llh_remove_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port);
/**
* *@brief Cleanup of previous driver remains prior to load
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index 46dc93d3b9b5..de31a382f58e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -745,7 +745,7 @@ struct qed_hash_fcoe_con {
static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
struct qed_dev_fcoe_info *info)
{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
int rc;
memset(info, 0, sizeof(*info));
@@ -806,15 +806,15 @@ static int qed_fcoe_stop(struct qed_dev *cdev)
return -EINVAL;
}
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ p_ptt = qed_ptt_acquire(QED_AFFIN_HWFN(cdev));
if (!p_ptt)
return -EAGAIN;
/* Stop the fcoe */
- rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
+ rc = qed_sp_fcoe_func_stop(QED_AFFIN_HWFN(cdev), p_ptt,
QED_SPQ_MODE_EBLOCK, NULL);
cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
- qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ qed_ptt_release(QED_AFFIN_HWFN(cdev), p_ptt);
return rc;
}
@@ -828,8 +828,8 @@ static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
return 0;
}
- rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev),
- QED_SPQ_MODE_EBLOCK, NULL);
+ rc = qed_sp_fcoe_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL);
if (rc) {
DP_NOTICE(cdev, "Failed to start fcoe\n");
return rc;
@@ -849,7 +849,7 @@ static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
return -ENOMEM;
}
- rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info);
+ rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
if (rc) {
DP_NOTICE(cdev, "Failed to gather task information\n");
qed_fcoe_stop(cdev);
@@ -884,7 +884,7 @@ static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
}
/* Acquire the connection */
- rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
+ rc = qed_fcoe_acquire_connection(QED_AFFIN_HWFN(cdev), NULL,
&hash_con->con);
if (rc) {
DP_NOTICE(cdev, "Failed to acquire Connection\n");
@@ -898,7 +898,7 @@ static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
hash_add(cdev->connections, &hash_con->node, *handle);
if (p_doorbell)
- *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev),
+ *p_doorbell = qed_fcoe_get_db_addr(QED_AFFIN_HWFN(cdev),
*handle);
return 0;
@@ -916,7 +916,7 @@ static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
}
hlist_del(&hash_con->node);
- qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
+ qed_fcoe_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
kfree(hash_con);
return 0;
@@ -971,7 +971,7 @@ static int qed_fcoe_offload_conn(struct qed_dev *cdev,
con->d_id.addr_mid = conn_info->d_id.addr_mid;
con->d_id.addr_lo = conn_info->d_id.addr_lo;
- return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con,
+ return qed_sp_fcoe_conn_offload(QED_AFFIN_HWFN(cdev), con,
QED_SPQ_MODE_EBLOCK, NULL);
}
@@ -992,13 +992,13 @@ static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
con = hash_con->con;
con->terminate_params = terminate_params;
- return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con,
+ return qed_sp_fcoe_conn_destroy(QED_AFFIN_HWFN(cdev), con,
QED_SPQ_MODE_EBLOCK, NULL);
}
static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
{
- return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats);
+ return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats);
}
void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 37edaa847512..e054f6c69e3a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12612,8 +12612,10 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
-#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
+#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000
+#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
@@ -12802,6 +12804,18 @@ struct public_drv_mb {
#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3
+
+#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xFF
+#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0
+
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 72ec1c6bdf70..a4de9e3ef72c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -392,11 +392,15 @@ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
}
/* DMAE */
+#define QED_DMAE_FLAGS_IS_SET(params, flag) \
+ ((params) != NULL && ((params)->flags & QED_DMAE_FLAG_##flag))
+
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct qed_dmae_params *p_params)
{
+ u8 src_pfid, dst_pfid, port_id;
u16 opcode_b = 0;
u32 opcode = 0;
@@ -407,14 +411,18 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
: DMAE_CMD_SRC_MASK_PCIE) <<
DMAE_CMD_SRC_SHIFT;
- opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+ src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
+ p_params->src_pfid : p_hwfn->rel_pf_id;
+ opcode |= ((src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
DMAE_CMD_SRC_PF_ID_SHIFT);
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
: DMAE_CMD_DST_MASK_PCIE) <<
DMAE_CMD_DST_SHIFT;
- opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+ dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
+ p_params->dst_pfid : p_hwfn->rel_pf_id;
+ opcode |= ((dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
DMAE_CMD_DST_PF_ID_SHIFT);
/* Whether to write a completion word to the completion destination:
@@ -425,12 +433,14 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
DMAE_CMD_SRC_ADDR_RESET_SHIFT);
- if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
+ if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
- opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
+ port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
+ p_params->port_id : p_hwfn->port_id;
+ opcode |= (port_id << DMAE_CMD_PORT_ID_SHIFT);
/* reset source address in next go */
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
@@ -441,7 +451,7 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
DMAE_CMD_DST_ADDR_RESET_SHIFT);
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
- if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
+ if (QED_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
} else {
@@ -449,7 +459,7 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
DMAE_CMD_SRC_VF_ID_SHIFT;
}
- if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
+ if (QED_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
} else {
@@ -733,7 +743,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
for (i = 0; i <= cnt_split; i++) {
offset = length_limit * i;
- if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
+ if (!QED_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) {
if (src_type == QED_DMAE_ADDRESS_GRC)
src_addr_split = src_addr + offset;
else
@@ -771,14 +781,12 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags)
+ u64 source_addr, u32 grc_addr, u32 size_in_dwords,
+ struct qed_dmae_params *p_params)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
- struct qed_dmae_params params;
int rc;
- memset(&params, 0, sizeof(struct qed_dmae_params));
- params.flags = flags;
mutex_lock(&p_hwfn->dmae_info.mutex);
@@ -786,7 +794,7 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
grc_addr_in_dw,
QED_DMAE_ADDRESS_HOST_VIRT,
QED_DMAE_ADDRESS_GRC,
- size_in_dwords, &params);
+ size_in_dwords, p_params);
mutex_unlock(&p_hwfn->dmae_info.mutex);
@@ -796,21 +804,19 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 grc_addr,
- dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+ dma_addr_t dest_addr, u32 size_in_dwords,
+ struct qed_dmae_params *p_params)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
- struct qed_dmae_params params;
int rc;
- memset(&params, 0, sizeof(struct qed_dmae_params));
- params.flags = flags;
mutex_lock(&p_hwfn->dmae_info.mutex);
rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
dest_addr, QED_DMAE_ADDRESS_GRC,
QED_DMAE_ADDRESS_HOST_VIRT,
- size_in_dwords, &params);
+ size_in_dwords, p_params);
mutex_unlock(&p_hwfn->dmae_info.mutex);
@@ -842,7 +848,6 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase)
{
u32 size = PAGE_SIZE / 2, val;
- struct qed_dmae_params params;
int rc = 0;
dma_addr_t p_phys;
void *p_virt;
@@ -875,9 +880,8 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
(u64)p_phys,
p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
- memset(&params, 0, sizeof(params));
rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
- size / 4 /* size_in_dwords */, &params);
+ size / 4, NULL);
if (rc) {
DP_NOTICE(p_hwfn,
"DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 34193c2f1699..a868d7f88601 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -131,7 +131,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(p_init_val + i),
- addr + (i << 2), segment, 0);
+ addr + (i << 2), segment, NULL);
if (rc)
return rc;
@@ -194,7 +194,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
} else {
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(buf + dmae_data_offset),
- addr, size, 0);
+ addr, size, NULL);
}
return rc;
@@ -205,6 +205,7 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
u32 addr, u32 fill, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+ struct qed_dmae_params params = {};
memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
@@ -214,10 +215,10 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
* 3. p_hwfb->temp_data,
* 4. fill_count
*/
-
+ params.flags = QED_DMAE_FLAG_RW_REPL_SRC;
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
- addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
+ addr, fill_count, &params);
}
static void qed_init_fill(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index fdfedbc8e431..4e8118a08654 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1508,10 +1508,10 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
CAU_REG_SB_ADDR_MEMORY +
- igu_sb_id * sizeof(u64), 2, 0);
+ igu_sb_id * sizeof(u64), 2, NULL);
qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
- igu_sb_id * sizeof(u64), 2, 0);
+ igu_sb_id * sizeof(u64), 2, NULL);
} else {
/* Initialize Status Block Address */
STORE_RT_REG_AGG(p_hwfn,
@@ -2362,7 +2362,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
sb_id * sizeof(u64),
- (u64)(uintptr_t)&sb_entry, 2, 0);
+ (u64)(uintptr_t)&sb_entry, 2, NULL);
if (rc) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
@@ -2376,7 +2376,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(u64)(uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
- sb_id * sizeof(u64), 2, 0);
+ sb_id * sizeof(u64), 2, NULL);
if (rc) {
DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 4f8a685d1a55..5585c18053ec 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -1082,7 +1082,7 @@ struct qed_hash_iscsi_con {
static int qed_fill_iscsi_dev_info(struct qed_dev *cdev,
struct qed_dev_iscsi_info *info)
{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
int rc;
@@ -1141,8 +1141,8 @@ static int qed_iscsi_stop(struct qed_dev *cdev)
}
/* Stop the iscsi */
- rc = qed_sp_iscsi_func_stop(QED_LEADING_HWFN(cdev),
- QED_SPQ_MODE_EBLOCK, NULL);
+ rc = qed_sp_iscsi_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL);
cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
return rc;
@@ -1161,9 +1161,8 @@ static int qed_iscsi_start(struct qed_dev *cdev,
return 0;
}
- rc = qed_sp_iscsi_func_start(QED_LEADING_HWFN(cdev),
- QED_SPQ_MODE_EBLOCK, NULL, event_context,
- async_event_cb);
+ rc = qed_sp_iscsi_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL, event_context, async_event_cb);
if (rc) {
DP_NOTICE(cdev, "Failed to start iscsi\n");
return rc;
@@ -1182,8 +1181,7 @@ static int qed_iscsi_start(struct qed_dev *cdev,
return -ENOMEM;
}
- rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev),
- tid_info);
+ rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
if (rc) {
DP_NOTICE(cdev, "Failed to gather task information\n");
qed_iscsi_stop(cdev);
@@ -1215,7 +1213,7 @@ static int qed_iscsi_acquire_conn(struct qed_dev *cdev,
return -ENOMEM;
/* Acquire the connection */
- rc = qed_iscsi_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
+ rc = qed_iscsi_acquire_connection(QED_AFFIN_HWFN(cdev), NULL,
&hash_con->con);
if (rc) {
DP_NOTICE(cdev, "Failed to acquire Connection\n");
@@ -1229,7 +1227,7 @@ static int qed_iscsi_acquire_conn(struct qed_dev *cdev,
hash_add(cdev->connections, &hash_con->node, *handle);
if (p_doorbell)
- *p_doorbell = qed_iscsi_get_db_addr(QED_LEADING_HWFN(cdev),
+ *p_doorbell = qed_iscsi_get_db_addr(QED_AFFIN_HWFN(cdev),
*handle);
return 0;
@@ -1247,7 +1245,7 @@ static int qed_iscsi_release_conn(struct qed_dev *cdev, u32 handle)
}
hlist_del(&hash_con->node);
- qed_iscsi_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
+ qed_iscsi_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
kfree(hash_con);
return 0;
@@ -1324,7 +1322,7 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
/* Set default values on other connection fields */
con->offl_flags = 0x1;
- return qed_sp_iscsi_conn_offload(QED_LEADING_HWFN(cdev), con,
+ return qed_sp_iscsi_conn_offload(QED_AFFIN_HWFN(cdev), con,
QED_SPQ_MODE_EBLOCK, NULL);
}
@@ -1351,7 +1349,7 @@ static int qed_iscsi_update_conn(struct qed_dev *cdev,
con->first_seq_length = conn_info->first_seq_length;
con->exp_stat_sn = conn_info->exp_stat_sn;
- return qed_sp_iscsi_conn_update(QED_LEADING_HWFN(cdev), con,
+ return qed_sp_iscsi_conn_update(QED_AFFIN_HWFN(cdev), con,
QED_SPQ_MODE_EBLOCK, NULL);
}
@@ -1366,8 +1364,7 @@ static int qed_iscsi_clear_conn_sq(struct qed_dev *cdev, u32 handle)
return -EINVAL;
}
- return qed_sp_iscsi_conn_clear_sq(QED_LEADING_HWFN(cdev),
- hash_con->con,
+ return qed_sp_iscsi_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con,
QED_SPQ_MODE_EBLOCK, NULL);
}
@@ -1385,14 +1382,13 @@ static int qed_iscsi_destroy_conn(struct qed_dev *cdev,
hash_con->con->abortive_dsconnect = abrt_conn;
- return qed_sp_iscsi_conn_terminate(QED_LEADING_HWFN(cdev),
- hash_con->con,
+ return qed_sp_iscsi_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con,
QED_SPQ_MODE_EBLOCK, NULL);
}
static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
{
- return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
+ return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats);
}
static int qed_iscsi_change_mac(struct qed_dev *cdev,
@@ -1407,8 +1403,7 @@ static int qed_iscsi_change_mac(struct qed_dev *cdev,
return -EINVAL;
}
- return qed_sp_iscsi_mac_update(QED_LEADING_HWFN(cdev),
- hash_con->con,
+ return qed_sp_iscsi_mac_update(QED_AFFIN_HWFN(cdev), hash_con->con,
QED_SPQ_MODE_EBLOCK, NULL);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index ded556b7bab5..f380fae8799d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -63,7 +63,12 @@ struct mpa_v2_hdr {
#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
#define QED_IWARP_INVALID_TCP_CID 0xffffffff
-#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
+
+#define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024)
+
#define QED_IWARP_RCV_WND_SIZE_MIN (0xffff)
#define TIMESTAMP_HEADER_SIZE (12)
#define QED_IWARP_MAX_FIN_RT_DEFAULT (2)
@@ -532,7 +537,8 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
/* Make sure ep is closed before returning and freeing memory. */
if (ep) {
- while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200)
+ while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
+ wait_count++ < 200)
msleep(100);
if (ep->state != QED_IWARP_EP_CLOSED)
@@ -1022,8 +1028,6 @@ qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
params.ep_context = ep;
- ep->state = QED_IWARP_EP_CLOSED;
-
switch (fw_return_code) {
case RDMA_RETURN_OK:
ep->qp->max_rd_atomic_req = ep->cm_info.ord;
@@ -1083,6 +1087,10 @@ qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
break;
}
+ if (fw_return_code != RDMA_RETURN_OK)
+ /* paired with READ_ONCE in destroy_qp */
+ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
+
ep->event_cb(ep->cb_context, &params);
/* on passive side, if there is no associated QP (REJECT) we need to
@@ -2528,7 +2536,7 @@ qed_iwarp_ll2_slowpath(void *cxt,
memset(fpdu, 0, sizeof(*fpdu));
}
-static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
{
struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
int rc = 0;
@@ -2563,8 +2571,9 @@ static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
}
- qed_llh_remove_mac_filter(p_hwfn,
- p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
+ qed_llh_remove_mac_filter(p_hwfn->cdev, 0,
+ p_hwfn->p_rdma_info->iwarp.mac_addr);
+
return rc;
}
@@ -2609,7 +2618,7 @@ qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
static int
qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
struct qed_rdma_start_in_params *params,
- struct qed_ptt *p_ptt)
+ u32 rcv_wnd_size)
{
struct qed_iwarp_info *iwarp_info;
struct qed_ll2_acquire_data data;
@@ -2628,7 +2637,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
- rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
+ rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
if (rc)
return rc;
@@ -2637,6 +2646,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
+ cbs.slowpath_cb = NULL;
cbs.cookie = p_hwfn;
memset(&data, 0, sizeof(data));
@@ -2653,7 +2663,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
rc = qed_ll2_acquire_connection(p_hwfn, &data);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
- qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
+ qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
return rc;
}
@@ -2675,7 +2685,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
data.input.conn_type = QED_LL2_TYPE_OOO;
data.input.mtu = params->max_mtu;
- n_ooo_bufs = (QED_IWARP_MAX_OOO * QED_IWARP_RCV_WND_SIZE_DEF) /
+ n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
iwarp_info->max_mtu;
n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
@@ -2708,6 +2718,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
data.input.rx_num_desc = n_ooo_bufs * 2;
data.input.tx_num_desc = data.input.rx_num_desc;
data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
+ data.input.tx_tc = PKT_LB_TC;
+ data.input.tx_dest = QED_LL2_TX_DEST_LB;
data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
data.input.secondary_queue = true;
data.cbs = &cbs;
@@ -2757,21 +2769,35 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
&iwarp_info->mpa_buf_list);
return rc;
err:
- qed_iwarp_ll2_stop(p_hwfn, p_ptt);
+ qed_iwarp_ll2_stop(p_hwfn);
return rc;
}
-int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+static struct {
+ u32 two_ports;
+ u32 four_ports;
+} qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = {
+ {QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P},
+ {QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P}
+};
+
+int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
struct qed_rdma_start_in_params *params)
{
+ struct qed_dev *cdev = p_hwfn->cdev;
struct qed_iwarp_info *iwarp_info;
+ enum chip_ids chip_id;
u32 rcv_wnd_size;
iwarp_info = &p_hwfn->p_rdma_info->iwarp;
iwarp_info->tcp_flags = QED_IWARP_TS_EN;
- rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
+
+ chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
+ rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ?
+ qed_iwarp_rcv_wnd_size[chip_id].four_ports :
+ qed_iwarp_rcv_wnd_size[chip_id].two_ports;
/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
@@ -2794,10 +2820,10 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
qed_iwarp_async_event);
qed_ooo_setup(p_hwfn);
- return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
+ return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size);
}
-int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
{
int rc;
@@ -2808,7 +2834,7 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
- return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
+ return qed_iwarp_ll2_stop(p_hwfn);
}
static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
@@ -2825,7 +2851,9 @@ static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
0 : -ECONNRESET;
- ep->state = QED_IWARP_EP_CLOSED;
+ /* paired with READ_ONCE in destroy_qp */
+ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
+
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
list_del(&ep->list_entry);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
@@ -2914,7 +2942,8 @@ qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
params.ep_context = ep;
params.cm_info = &ep->cm_info;
- ep->state = QED_IWARP_EP_CLOSED;
+ /* paired with READ_ONCE in destroy_qp */
+ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
switch (fw_return_code) {
case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index 7ac959038324..c1b2057d23b8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -183,13 +183,13 @@ struct qed_iwarp_listener {
int qed_iwarp_alloc(struct qed_hwfn *p_hwfn);
-int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
struct qed_rdma_start_in_params *params);
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_init_func_ramrod_data *p_ramrod);
-int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_iwarp_stop(struct qed_hwfn *p_hwfn);
void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 57641728df69..9f36e7948222 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -2111,7 +2111,7 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
- (u64)(uintptr_t)&sb_entry, 2, 0);
+ (u64)(uintptr_t)&sb_entry, 2, NULL);
if (rc) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
@@ -2144,7 +2144,7 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
- (u64)(uintptr_t)&sb_entry, 2, 0);
+ (u64)(uintptr_t)&sb_entry, 2, NULL);
if (rc) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index b5f419b71287..19a1a58d60f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -239,9 +239,8 @@ out_post1:
buffer->phys_addr = new_phys_addr;
out_post:
- rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
- buffer->phys_addr, 0, buffer, 1);
-
+ rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
if (rc)
qed_ll2_dealloc_buffer(cdev, buffer);
}
@@ -926,16 +925,15 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
return 0;
}
-static void qed_ll2_stop_ooo(struct qed_dev *cdev)
+static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn)
{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+ u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
- DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
- *handle);
+ DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2),
+ "Stopping LL2 OOO queue [%02x]\n", *handle);
- qed_ll2_terminate_connection(hwfn, *handle);
- qed_ll2_release_connection(hwfn, *handle);
+ qed_ll2_terminate_connection(p_hwfn, *handle);
+ qed_ll2_release_connection(p_hwfn, *handle);
*handle = QED_LL2_UNUSED_HANDLE;
}
@@ -1574,12 +1572,12 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
- qed_llh_add_protocol_filter(p_hwfn, p_ptt,
- ETH_P_FCOE, 0,
- QED_LLH_FILTER_ETHERTYPE);
- qed_llh_add_protocol_filter(p_hwfn, p_ptt,
- ETH_P_FIP, 0,
- QED_LLH_FILTER_ETHERTYPE);
+ qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FCOE, 0);
+ qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FIP, 0);
}
out:
@@ -1980,12 +1978,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
- qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
- ETH_P_FCOE, 0,
- QED_LLH_FILTER_ETHERTYPE);
- qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
- ETH_P_FIP, 0,
- QED_LLH_FILTER_ETHERTYPE);
+ qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FCOE, 0);
+ qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FIP, 0);
}
out:
@@ -2086,12 +2084,12 @@ static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
sizeof(port_stats));
- p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
- p_stats->gsi_invalid_pkt_length =
+ p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
+ p_stats->gsi_invalid_pkt_length +=
HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
- p_stats->gsi_unsupported_pkt_typ =
+ p_stats->gsi_unsupported_pkt_typ +=
HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
- p_stats->gsi_crcchksm_error =
+ p_stats->gsi_crcchksm_error +=
HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
}
@@ -2109,9 +2107,9 @@ static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
- p_stats->packet_too_big_discard =
+ p_stats->packet_too_big_discard +=
HILO_64_REGPAIR(tstats.packet_too_big_discard);
- p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
+ p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard);
}
static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
@@ -2128,12 +2126,12 @@ static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
- p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
@@ -2150,23 +2148,21 @@ static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
- p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
}
-int qed_ll2_get_stats(void *cxt,
- u8 connection_handle, struct qed_ll2_stats *p_stats)
+static int __qed_ll2_get_stats(void *cxt, u8 connection_handle,
+ struct qed_ll2_stats *p_stats)
{
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ptt *p_ptt;
- memset(p_stats, 0, sizeof(*p_stats));
-
if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
!p_hwfn->p_ll2_info)
return -EINVAL;
@@ -2181,15 +2177,26 @@ int qed_ll2_get_stats(void *cxt,
if (p_ll2_conn->input.gsi_enable)
_qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
+
_qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
_qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
if (p_ll2_conn->tx_stats_en)
_qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
qed_ptt_release(p_hwfn, p_ptt);
+
return 0;
}
+int qed_ll2_get_stats(void *cxt,
+ u8 connection_handle, struct qed_ll2_stats *p_stats)
+{
+ memset(p_stats, 0, sizeof(*p_stats));
+ return __qed_ll2_get_stats(cxt, connection_handle, p_stats);
+}
+
static void qed_ll2b_release_rx_packet(void *cxt,
u8 connection_handle,
void *cookie,
@@ -2216,7 +2223,7 @@ struct qed_ll2_cbs ll2_cbs = {
.tx_release_cb = &qed_ll2b_complete_tx_packet,
};
-static void qed_ll2_set_conn_data(struct qed_dev *cdev,
+static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn,
struct qed_ll2_acquire_data *data,
struct qed_ll2_params *params,
enum qed_ll2_conn_type conn_type,
@@ -2232,7 +2239,7 @@ static void qed_ll2_set_conn_data(struct qed_dev *cdev,
data->input.tx_num_desc = QED_LL2_TX_SIZE;
data->p_connection_handle = handle;
data->cbs = &ll2_cbs;
- ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
+ ll2_cbs.cookie = p_hwfn;
if (lb) {
data->input.tx_tc = PKT_LB_TC;
@@ -2243,74 +2250,102 @@ static void qed_ll2_set_conn_data(struct qed_dev *cdev,
}
}
-static int qed_ll2_start_ooo(struct qed_dev *cdev,
+static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_params *params)
{
- struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+ u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
struct qed_ll2_acquire_data data;
int rc;
- qed_ll2_set_conn_data(cdev, &data, params,
+ qed_ll2_set_conn_data(p_hwfn, &data, params,
QED_LL2_TYPE_OOO, handle, true);
- rc = qed_ll2_acquire_connection(hwfn, &data);
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
if (rc) {
- DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
+ DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n");
goto out;
}
- rc = qed_ll2_establish_connection(hwfn, *handle);
+ rc = qed_ll2_establish_connection(p_hwfn, *handle);
if (rc) {
- DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
+ DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n");
goto fail;
}
return 0;
fail:
- qed_ll2_release_connection(hwfn, *handle);
+ qed_ll2_release_connection(p_hwfn, *handle);
out:
*handle = QED_LL2_UNUSED_HANDLE;
return rc;
}
-static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev)
{
- struct qed_ll2_buffer *buffer, *tmp_buffer;
- enum qed_ll2_conn_type conn_type;
- struct qed_ll2_acquire_data data;
- struct qed_ptt *p_ptt;
- int rc, i;
+ return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
+ QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
+ (QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev));
+}
+static int __qed_ll2_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc;
- /* Initialize LL2 locks & lists */
- INIT_LIST_HEAD(&cdev->ll2->list);
- spin_lock_init(&cdev->ll2->lock);
- cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
- L1_CACHE_BYTES + params->mtu;
+ rc = qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
+ if (rc)
+ DP_INFO(cdev, "Failed to terminate LL2 connection\n");
- /*Allocate memory for LL2 */
- DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
- cdev->ll2->rx_size);
- for (i = 0; i < QED_LL2_RX_SIZE; i++) {
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer) {
- DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
- goto fail;
- }
+ qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
- rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
- &buffer->phys_addr);
- if (rc) {
- kfree(buffer);
- goto fail;
- }
+ return rc;
+}
- list_add_tail(&buffer->list, &cdev->ll2->list);
+static int qed_ll2_stop(struct qed_dev *cdev)
+{
+ bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
+ int rc = 0, rc2 = 0;
+
+ if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
+ return 0;
+
+ qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
+ eth_zero_addr(cdev->ll2_mac_address);
+
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ qed_ll2_stop_ooo(p_hwfn);
+
+ /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
+ if (b_is_storage_eng1) {
+ rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev));
+ if (rc2)
+ DP_NOTICE(QED_LEADING_HWFN(cdev),
+ "Failed to stop LL2 on engine 0\n");
}
- switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
+ rc = __qed_ll2_stop(p_hwfn);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to stop LL2\n");
+
+ qed_ll2_kill_buffers(cdev);
+
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+ return rc | rc2;
+}
+
+static int __qed_ll2_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_params *params)
+{
+ struct qed_ll2_buffer *buffer, *tmp_buffer;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ enum qed_ll2_conn_type conn_type;
+ struct qed_ll2_acquire_data data;
+ int rc, rx_cnt;
+
+ switch (p_hwfn->hw_info.personality) {
case QED_PCI_FCOE:
conn_type = QED_LL2_TYPE_FCOE;
break;
@@ -2321,33 +2356,34 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
conn_type = QED_LL2_TYPE_ROCE;
break;
default:
+
conn_type = QED_LL2_TYPE_TEST;
}
- qed_ll2_set_conn_data(cdev, &data, params, conn_type,
+ qed_ll2_set_conn_data(p_hwfn, &data, params, conn_type,
&cdev->ll2->handle, false);
- rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
if (rc) {
- DP_INFO(cdev, "Failed to acquire LL2 connection\n");
- goto fail;
+ DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n");
+ return rc;
}
- rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
- cdev->ll2->handle);
+ rc = qed_ll2_establish_connection(p_hwfn, cdev->ll2->handle);
if (rc) {
- DP_INFO(cdev, "Failed to establish LL2 connection\n");
- goto release_fail;
+ DP_INFO(p_hwfn, "Failed to establish LL2 connection\n");
+ goto release_conn;
}
/* Post all Rx buffers to FW */
spin_lock_bh(&cdev->ll2->lock);
+ rx_cnt = cdev->ll2->rx_cnt;
list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
- rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+ rc = qed_ll2_post_rx_buffer(p_hwfn,
cdev->ll2->handle,
buffer->phys_addr, 0, buffer, 1);
if (rc) {
- DP_INFO(cdev,
+ DP_INFO(p_hwfn,
"Failed to post an Rx buffer; Deleting it\n");
dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
cdev->ll2->rx_size, DMA_FROM_DEVICE);
@@ -2355,100 +2391,127 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
list_del(&buffer->list);
kfree(buffer);
} else {
- cdev->ll2->rx_cnt++;
+ rx_cnt++;
}
}
spin_unlock_bh(&cdev->ll2->lock);
- if (!cdev->ll2->rx_cnt) {
- DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
- goto release_terminate;
+ if (rx_cnt == cdev->ll2->rx_cnt) {
+ DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n");
+ goto terminate_conn;
}
+ cdev->ll2->rx_cnt = rx_cnt;
+
+ return 0;
+
+terminate_conn:
+ qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
+release_conn:
+ qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
+ return rc;
+}
+
+static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+{
+ bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
+ struct qed_ll2_buffer *buffer;
+ int rx_num_desc, i, rc;
if (!is_valid_ether_addr(params->ll2_mac_address)) {
- DP_INFO(cdev, "Invalid Ethernet address\n");
- goto release_terminate;
+ DP_NOTICE(cdev, "Invalid Ethernet address\n");
+ return -EINVAL;
}
- if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
- DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
- rc = qed_ll2_start_ooo(cdev, params);
+ WARN_ON(!cdev->ll2->cbs);
+
+ /* Initialize LL2 locks & lists */
+ INIT_LIST_HEAD(&cdev->ll2->list);
+ spin_lock_init(&cdev->ll2->lock);
+
+ cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
+ L1_CACHE_BYTES + params->mtu;
+
+ /* Allocate memory for LL2.
+ * In CMT mode, in case of a storage PF which is affintized to engine 1,
+ * LL2 is started also on engine 0 and thus we need twofold buffers.
+ */
+ rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1);
+ DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n",
+ rx_num_desc, cdev->ll2->rx_size);
+ for (i = 0; i < rx_num_desc; i++) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
+ &buffer->phys_addr);
if (rc) {
- DP_INFO(cdev,
- "Failed to initialize the OOO LL2 queue\n");
- goto release_terminate;
+ kfree(buffer);
+ goto err0;
}
- }
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (!p_ptt) {
- DP_INFO(cdev, "Failed to acquire PTT\n");
- goto release_terminate;
+ list_add_tail(&buffer->list, &cdev->ll2->list);
}
- rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
- params->ll2_mac_address);
- qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ rc = __qed_ll2_start(p_hwfn, params);
if (rc) {
- DP_ERR(cdev, "Failed to allocate LLH filter\n");
- goto release_terminate_all;
+ DP_NOTICE(cdev, "Failed to start LL2\n");
+ goto err0;
}
- ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
- return 0;
-
-release_terminate_all:
-
-release_terminate:
- qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
-release_fail:
- qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
-fail:
- qed_ll2_kill_buffers(cdev);
- cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
- return -EINVAL;
-}
-
-static int qed_ll2_stop(struct qed_dev *cdev)
-{
- struct qed_ptt *p_ptt;
- int rc;
-
- if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
- return 0;
+ /* In CMT mode, always need to start LL2 on engine 0 for a storage PF,
+ * since broadcast/mutlicast packets are routed to engine 0.
+ */
+ if (b_is_storage_eng1) {
+ rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params);
+ if (rc) {
+ DP_NOTICE(QED_LEADING_HWFN(cdev),
+ "Failed to start LL2 on engine 0\n");
+ goto err1;
+ }
+ }
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (!p_ptt) {
- DP_INFO(cdev, "Failed to acquire PTT\n");
- goto fail;
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
+ rc = qed_ll2_start_ooo(p_hwfn, params);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start OOO LL2\n");
+ goto err2;
+ }
}
- qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
- cdev->ll2_mac_address);
- qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
- eth_zero_addr(cdev->ll2_mac_address);
+ rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to add an LLH filter\n");
+ goto err3;
+ }
- if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
- qed_ll2_stop_ooo(cdev);
+ ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
- rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
- cdev->ll2->handle);
- if (rc)
- DP_INFO(cdev, "Failed to terminate LL2 connection\n");
+ return 0;
+err3:
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ qed_ll2_stop_ooo(p_hwfn);
+err2:
+ if (b_is_storage_eng1)
+ __qed_ll2_stop(QED_LEADING_HWFN(cdev));
+err1:
+ __qed_ll2_stop(p_hwfn);
+err0:
qed_ll2_kill_buffers(cdev);
-
- qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
-
return rc;
-fail:
- return -EINVAL;
}
static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
unsigned long xmit_flags)
{
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
struct qed_ll2_tx_pkt_info pkt;
const skb_frag_t *frag;
u8 flags = 0, nr_frags;
@@ -2506,7 +2569,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
* routine may run and free the SKB, so no dereferencing the SKB
* beyond this point unless skb has any fragments.
*/
- rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
&pkt, 1);
if (rc)
goto err;
@@ -2524,13 +2587,13 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
goto err;
}
- rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
cdev->ll2->handle,
mapping,
skb_frag_size(frag));
/* if failed not much to do here, partial packet has been posted
- * we can't free memory, will need to wait for completion.
+ * we can't free memory, will need to wait for completion
*/
if (rc)
goto err2;
@@ -2540,18 +2603,37 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
err:
dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
-
err2:
return rc;
}
static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
{
+ bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
+ int rc;
+
if (!cdev->ll2)
return -EINVAL;
- return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
- cdev->ll2->handle, stats);
+ rc = qed_ll2_get_stats(p_hwfn, cdev->ll2->handle, stats);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n");
+ return rc;
+ }
+
+ /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
+ if (b_is_storage_eng1) {
+ rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle, stats);
+ if (rc) {
+ DP_NOTICE(QED_LEADING_HWFN(cdev),
+ "Failed to get LL2 stats on engine 0\n");
+ return rc;
+ }
+ }
+
+ return 0;
}
const struct qed_ll2_ops qed_ll2_ops_pass = {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 6de23b56b294..829dd60ab937 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -48,6 +48,7 @@
#include <linux/crc32.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
+#include <net/devlink.h>
#include "qed.h"
#include "qed_sriov.h"
@@ -342,6 +343,107 @@ static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
return 0;
}
+struct qed_devlink {
+ struct qed_dev *cdev;
+};
+
+enum qed_devlink_param_id {
+ QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+};
+
+static int qed_dl_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl;
+ struct qed_dev *cdev;
+
+ qed_dl = devlink_priv(dl);
+ cdev = qed_dl->cdev;
+ ctx->val.vbool = cdev->iwarp_cmt;
+
+ return 0;
+}
+
+static int qed_dl_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl;
+ struct qed_dev *cdev;
+
+ qed_dl = devlink_priv(dl);
+ cdev = qed_dl->cdev;
+ cdev->iwarp_cmt = ctx->val.vbool;
+
+ return 0;
+}
+
+static const struct devlink_param qed_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ qed_dl_param_get, qed_dl_param_set, NULL),
+};
+
+static const struct devlink_ops qed_dl_ops;
+
+static int qed_devlink_register(struct qed_dev *cdev)
+{
+ union devlink_param_value value;
+ struct qed_devlink *qed_dl;
+ struct devlink *dl;
+ int rc;
+
+ dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl));
+ if (!dl)
+ return -ENOMEM;
+
+ qed_dl = devlink_priv(dl);
+
+ cdev->dl = dl;
+ qed_dl->cdev = cdev;
+
+ rc = devlink_register(dl, &cdev->pdev->dev);
+ if (rc)
+ goto err_free;
+
+ rc = devlink_params_register(dl, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+ if (rc)
+ goto err_unregister;
+
+ value.vbool = false;
+ devlink_param_driverinit_value_set(dl,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ value);
+
+ devlink_params_publish(dl);
+ cdev->iwarp_cmt = false;
+
+ return 0;
+
+err_unregister:
+ devlink_unregister(dl);
+
+err_free:
+ cdev->dl = NULL;
+ devlink_free(dl);
+
+ return rc;
+}
+
+static void qed_devlink_unregister(struct qed_dev *cdev)
+{
+ if (!cdev->dl)
+ return;
+
+ devlink_params_unregister(cdev->dl, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+
+ devlink_unregister(cdev->dl);
+ devlink_free(cdev->dl);
+}
+
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
struct qed_probe_params *params)
@@ -370,6 +472,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
}
DP_INFO(cdev, "PCI init completed successfully\n");
+ rc = qed_devlink_register(cdev);
+ if (rc) {
+ DP_INFO(cdev, "Failed to register devlink.\n");
+ goto err2;
+ }
+
rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
if (rc) {
DP_ERR(cdev, "hw prepare failed\n");
@@ -399,6 +507,8 @@ static void qed_remove(struct qed_dev *cdev)
qed_set_power_state(cdev, PCI_D3hot);
+ qed_devlink_unregister(cdev);
+
qed_free_cdev(cdev);
}
@@ -1301,26 +1411,21 @@ static u32 qed_sb_init(struct qed_dev *cdev,
{
struct qed_hwfn *p_hwfn;
struct qed_ptt *p_ptt;
- int hwfn_index;
u16 rel_sb_id;
- u8 n_hwfns;
u32 rc;
- /* RoCE uses single engine and CMT uses two engines. When using both
- * we force only a single engine. Storage uses only engine 0 too.
- */
- if (type == QED_SB_TYPE_L2_QUEUE)
- n_hwfns = cdev->num_hwfns;
- else
- n_hwfns = 1;
-
- hwfn_index = sb_id % n_hwfns;
- p_hwfn = &cdev->hwfns[hwfn_index];
- rel_sb_id = sb_id / n_hwfns;
+ /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
+ if (type == QED_SB_TYPE_L2_QUEUE) {
+ p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
+ rel_sb_id = sb_id / cdev->num_hwfns;
+ } else {
+ p_hwfn = QED_AFFIN_HWFN(cdev);
+ rel_sb_id = sb_id;
+ }
DP_VERBOSE(cdev, NETIF_MSG_INTR,
"hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
- hwfn_index, rel_sb_id, sb_id);
+ IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
if (IS_PF(p_hwfn->cdev)) {
p_ptt = qed_ptt_acquire(p_hwfn);
@@ -1339,20 +1444,26 @@ static u32 qed_sb_init(struct qed_dev *cdev,
}
static u32 qed_sb_release(struct qed_dev *cdev,
- struct qed_sb_info *sb_info, u16 sb_id)
+ struct qed_sb_info *sb_info,
+ u16 sb_id,
+ enum qed_sb_type type)
{
struct qed_hwfn *p_hwfn;
- int hwfn_index;
u16 rel_sb_id;
u32 rc;
- hwfn_index = sb_id % cdev->num_hwfns;
- p_hwfn = &cdev->hwfns[hwfn_index];
- rel_sb_id = sb_id / cdev->num_hwfns;
+ /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
+ if (type == QED_SB_TYPE_L2_QUEUE) {
+ p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
+ rel_sb_id = sb_id / cdev->num_hwfns;
+ } else {
+ p_hwfn = QED_AFFIN_HWFN(cdev);
+ rel_sb_id = sb_id;
+ }
DP_VERBOSE(cdev, NETIF_MSG_INTR,
"hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
- hwfn_index, rel_sb_id, sb_id);
+ IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
@@ -2372,6 +2483,11 @@ static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
return rc;
}
+static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
+{
+ return QED_AFFIN_HWFN_IDX(cdev);
+}
+
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
@@ -2419,6 +2535,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.db_recovery_add = &qed_db_recovery_add,
.db_recovery_del = &qed_db_recovery_del,
.read_module_eeprom = &qed_read_module_eeprom,
+ .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index cc27fd60d689..758702c1ce9c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3685,3 +3685,68 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
}
+
+int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params = {0};
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 fir_valid, l2_valid;
+ int rc;
+
+ mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_engine_config command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ }
+
+ fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
+ if (fir_valid)
+ cdev->fir_affin =
+ QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
+
+ l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
+ if (l2_valid)
+ cdev->l2_affin_hint =
+ QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
+
+ DP_INFO(p_hwfn,
+ "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
+ fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
+
+ return 0;
+}
+
+int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params = {0};
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc;
+
+ mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_ppfid_bitmap command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ }
+
+ cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_PPFID_BITMAP);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
+ cdev->ppfid_bitmap);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 261c1a392e2c..e4f8fe4bd062 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -1186,4 +1186,20 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/
int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
+/**
+ * @brief Get the engine affinity configuration.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief Get the PPFID bitmap.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 1302b308bd87..0dacf2c18c09 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -44,6 +44,8 @@
/* Add/subtract the Adjustment_Value when making a Drift adjustment */
#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
#define QED_TIMESTAMP_MASK BIT(16)
+/* Param mask for Hardware to detect/timestamp the unicast PTP packets */
+#define QED_PTP_UCAST_PARAM_MASK 0xF
static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
{
@@ -157,7 +159,8 @@ static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
*timestamp = 0;
val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
if (!(val & QED_TIMESTAMP_MASK)) {
- DP_INFO(p_hwfn, "Invalid Tx timestamp, buf_seqid = %d\n", val);
+ DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
+ "Invalid Tx timestamp, buf_seqid = %08x\n", val);
return -EINVAL;
}
@@ -242,7 +245,8 @@ static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
return -EINVAL;
}
- qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK,
+ QED_PTP_UCAST_PARAM_MASK);
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
@@ -252,7 +256,8 @@ static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
} else {
qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
- qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK,
+ QED_PTP_UCAST_PARAM_MASK);
qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 7873d6dfd91f..f900fde448db 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -700,7 +700,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return rc;
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
- rc = qed_iwarp_setup(p_hwfn, p_ptt, params);
+ rc = qed_iwarp_setup(p_hwfn, params);
if (rc)
return rc;
} else {
@@ -742,7 +742,7 @@ static int qed_rdma_stop(void *rdma_cxt)
(ll2_ethertype_en & 0xFFFE));
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
- rc = qed_iwarp_stop(p_hwfn, p_ptt);
+ rc = qed_iwarp_stop(p_hwfn);
if (rc) {
qed_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -803,7 +803,7 @@ static int qed_rdma_add_user(void *rdma_cxt,
dpi_start_offset +
((out_params->dpi) * p_hwfn->dpi_size));
- out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+ out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
dpi_start_offset +
((out_params->dpi) * p_hwfn->dpi_size);
@@ -818,14 +818,17 @@ static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
+ struct qed_mcp_link_state *p_link_output;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
- /* Link may have changed */
- p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
- QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+ /* The link state is saved only for the leading hwfn */
+ p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
- p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+ p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP
+ : QED_RDMA_PORT_DOWN;
+
+ p_port->link_speed = p_link_output->speed;
p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
@@ -870,7 +873,7 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
struct qed_dev_rdma_info *info)
{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
memset(info, 0, sizeof(*info));
@@ -889,9 +892,9 @@ static int qed_rdma_get_sb_start(struct qed_dev *cdev)
int feat_num;
if (cdev->num_hwfns > 1)
- feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
+ feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE);
else
- feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
+ feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) *
cdev->num_hwfns;
return feat_num;
@@ -899,7 +902,7 @@ static int qed_rdma_get_sb_start(struct qed_dev *cdev)
static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
{
- int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
+ int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ);
int n_msix = cdev->int_params.rdma_msix_cnt;
return min_t(int, n_cnq, n_msix);
@@ -1653,7 +1656,7 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
{
- return QED_LEADING_HWFN(cdev);
+ return QED_AFFIN_HWFN(cdev);
}
static int qed_rdma_modify_srq(void *rdma_cxt,
@@ -1881,7 +1884,7 @@ err:
static int qed_rdma_init(struct qed_dev *cdev,
struct qed_rdma_start_in_params *params)
{
- return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
+ return qed_rdma_start(QED_AFFIN_HWFN(cdev), params);
}
static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
@@ -1899,23 +1902,12 @@ static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
u8 *old_mac_address,
u8 *new_mac_address)
{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt;
int rc = 0;
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- DP_ERR(cdev,
- "qed roce ll2 mac filter set: failed to acquire PTT\n");
- return -EINVAL;
- }
-
if (old_mac_address)
- qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
+ qed_llh_remove_mac_filter(cdev, 0, old_mac_address);
if (new_mac_address)
- rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
-
- qed_ptt_release(p_hwfn, p_ptt);
+ rc = qed_llh_add_mac_filter(cdev, 0, new_mac_address);
if (rc)
DP_ERR(cdev,
@@ -1924,6 +1916,36 @@ static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
return rc;
}
+static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset)
+{
+ enum qed_eng eng;
+ u8 ppfid = 0;
+ int rc;
+
+ /* Make sure iwarp cmt mode is enabled before setting affinity */
+ if (!cdev->iwarp_cmt)
+ return -EINVAL;
+
+ if (b_reset)
+ eng = QED_BOTH_ENG;
+ else
+ eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0;
+
+ rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to set the engine affinity of ppfid %d\n",
+ ppfid);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP),
+ "LLH: Set the engine affinity of non-RoCE packets as %d\n",
+ eng);
+
+ return 0;
+}
+
static const struct qed_rdma_ops qed_rdma_ops_pass = {
.common = &qed_common_ops_pass,
.fill_dev_info = &qed_fill_rdma_dev_info,
@@ -1963,6 +1985,7 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
.ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
.ll2_get_stats = &qed_ll2_get_stats,
+ .iwarp_set_engine_affin = &qed_iwarp_set_engine_affin,
.iwarp_connect = &qed_iwarp_connect,
.iwarp_create_listen = &qed_iwarp_create_listen,
.iwarp_destroy_listen = &qed_iwarp_destroy_listen,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 5ce825ca5f24..60f850c3bdd6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -254,6 +254,10 @@
0x500840UL
#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \
0x50196cUL
+#define NIG_REG_LLH_PPFID2PFID_TBL_0 \
+ 0x501970UL
+#define NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL \
+ 0x50
#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
0x501964UL
#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL
@@ -1626,6 +1630,8 @@
#define PHY_PCIE_REG_PHY1_K2_E5 \
0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
+#define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL
+#define NIG_REG_PPF_TO_ENGINE_SEL_SIZE 8
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
#define DORQ_REG_PF_DPM_ENABLE 0x100510UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 5a495fda9e9d..7e0b795230b2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -588,7 +588,7 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- int rc = -EINVAL;
+ int rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 2f318aaf2b05..78f77b712b10 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -917,10 +917,11 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
/* Configure igu sb in CAU which were marked valid */
qed_init_cau_sb_entry(p_hwfn, &sb_entry,
p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
+
qed_dmae_host2grc(p_hwfn, p_ptt,
(u64)(uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
- p_block->igu_sb_id * sizeof(u64), 2, 0);
+ p_block->igu_sb_id * sizeof(u64), 2, NULL);
}
vf->num_sbs = (u8) num_rx_queues;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 92fe226980fd..0e931c04fecf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -92,6 +92,7 @@ struct qede_stats_common {
u64 non_coalesced_pkts;
u64 coalesced_bytes;
u64 link_change_count;
+ u64 ptp_skip_txts;
/* port */
u64 rx_64_byte_packets;
@@ -189,6 +190,7 @@ struct qede_dev {
const struct qed_eth_ops *ops;
struct qede_ptp *ptp;
+ u64 ptp_skip_txts;
struct qed_dev_eth_info dev_info;
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
@@ -549,7 +551,7 @@ int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
- struct tc_cls_flower_offload *f);
+ struct flow_cls_offload *f);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 8911a97ab0ca..e85f9fef930c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -174,6 +174,7 @@ static const struct {
QEDE_STAT(coalesced_bytes),
QEDE_STAT(link_change_count),
+ QEDE_STAT(ptp_skip_txts),
};
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index add922b93d2c..9a6a9a008714 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1943,7 +1943,7 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
}
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
- struct tc_cls_flower_offload *f)
+ struct flow_cls_offload *f)
{
struct qede_arfs_fltr_node *n;
int min_hlen, rc = -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 02a97c659e29..8d1c208f778f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -390,6 +390,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
p_common->brb_discards = stats.common.brb_discards;
p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
p_common->link_change_count = stats.common.link_change_count;
+ p_common->ptp_skip_txts = edev->ptp_skip_txts;
if (QEDE_IS_BB(edev)) {
struct qede_stats_bb *p_bb = &edev->stats.bb;
@@ -547,13 +548,13 @@ static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
}
static int
-qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f,
+qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
__be16 proto)
{
switch (f->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return qede_add_tc_flower_fltr(edev, proto, f);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return qede_delete_flow_filter(edev, f->cookie);
default:
return -EOPNOTSUPP;
@@ -563,7 +564,7 @@ qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f,
static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
- struct tc_cls_flower_offload *f;
+ struct flow_cls_offload *f;
struct qede_dev *edev = cb_priv;
if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
@@ -578,24 +579,7 @@ static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-static int qede_setup_tc_block(struct qede_dev *edev,
- struct tc_block_offload *f)
-{
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- qede_setup_tc_block_cb,
- edev, edev, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(qede_block_cb_list);
static int
qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
@@ -606,7 +590,10 @@ qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
switch (type) {
case TC_SETUP_BLOCK:
- return qede_setup_tc_block(edev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &qede_block_cb_list,
+ qede_setup_tc_block_cb,
+ edev, edev, true);
case TC_SETUP_QDISC_MQPRIO:
mqprio = type_data;
@@ -959,13 +946,13 @@ void __qede_unlock(struct qede_dev *edev)
/* This version of the lock should be used when acquiring the RTNL lock is also
* needed in addition to the internal qede lock.
*/
-void qede_lock(struct qede_dev *edev)
+static void qede_lock(struct qede_dev *edev)
{
rtnl_lock();
__qede_lock(edev);
}
-void qede_unlock(struct qede_dev *edev)
+static void qede_unlock(struct qede_dev *edev)
{
__qede_unlock(edev);
rtnl_unlock();
@@ -1306,7 +1293,8 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
u16 sb_id)
{
if (sb_info->sb_virt) {
- edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
+ edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
+ QED_SB_TYPE_L2_QUEUE);
dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
(void *)sb_info->sb_virt, sb_info->sb_phys);
memset(sb_info, 0, sizeof(*sb_info));
@@ -2231,6 +2219,8 @@ out:
if (mode != QEDE_UNLOAD_RECOVERY)
DP_NOTICE(edev, "Link is down\n");
+ edev->ptp_skip_txts = 0;
+
DP_INFO(edev, "Ending qede unload\n");
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index bddb2b5982dc..f815435cf106 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
#include "qede_ptp.h"
+#define QEDE_PTP_TX_TIMEOUT (2 * HZ)
struct qede_ptp {
const struct qed_eth_ptp_ops *ops;
@@ -38,6 +39,7 @@ struct qede_ptp {
struct timecounter tc;
struct ptp_clock *clock;
struct work_struct work;
+ unsigned long ptp_tx_start;
struct qede_dev *edev;
struct sk_buff *tx_skb;
@@ -160,18 +162,30 @@ static void qede_ptp_task(struct work_struct *work)
struct qede_dev *edev;
struct qede_ptp *ptp;
u64 timestamp, ns;
+ bool timedout;
int rc;
ptp = container_of(work, struct qede_ptp, work);
edev = ptp->edev;
+ timedout = time_is_before_jiffies(ptp->ptp_tx_start +
+ QEDE_PTP_TX_TIMEOUT);
/* Read Tx timestamp registers */
spin_lock_bh(&ptp->lock);
rc = ptp->ops->read_tx_ts(edev->cdev, &timestamp);
spin_unlock_bh(&ptp->lock);
if (rc) {
- /* Reschedule to keep checking for a valid timestamp value */
- schedule_work(&ptp->work);
+ if (unlikely(timedout)) {
+ DP_INFO(edev, "Tx timestamp is not recorded\n");
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
+ &edev->flags);
+ edev->ptp_skip_txts++;
+ } else {
+ /* Reschedule to keep checking for a valid TS value */
+ schedule_work(&ptp->work);
+ }
return;
}
@@ -514,19 +528,28 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
if (!ptp)
return;
- if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
+ if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
+ &edev->flags)) {
+ DP_ERR(edev, "Timestamping in progress\n");
+ edev->ptp_skip_txts++;
return;
+ }
if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
- DP_NOTICE(edev,
- "Tx timestamping was not enabled, this packet will not be timestamped\n");
+ DP_ERR(edev,
+ "Tx timestamping was not enabled, this packet will not be timestamped\n");
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
+ edev->ptp_skip_txts++;
} else if (unlikely(ptp->tx_skb)) {
- DP_NOTICE(edev,
- "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ DP_ERR(edev,
+ "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
+ edev->ptp_skip_txts++;
} else {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/* schedule check for Tx timestamp */
ptp->tx_skb = skb_get(skb);
+ ptp->ptp_tx_start = jiffies;
schedule_work(&ptp->work);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 7a873002e626..c07438db30ba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -4119,13 +4119,14 @@ static void
qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
struct net_device *dev, unsigned long event)
{
+ const struct in_ifaddr *ifa;
struct in_device *indev;
indev = in_dev_get(dev);
if (!indev)
return;
- for_ifa(indev) {
+ in_dev_for_each_ifa_rtnl(ifa, indev) {
switch (event) {
case NETDEV_UP:
qlcnic_config_ipaddr(adapter,
@@ -4138,7 +4139,7 @@ qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
default:
break;
}
- } endfor_ifa(indev);
+ }
in_dev_put(indev);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index af3b037fa442..5632da05145a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1066,7 +1066,7 @@ static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
- int err = -EIO;
+ int err;
cmd->req.arg[1] |= vf->vp->handle << 16;
cmd->req.arg[1] |= BIT_31;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 4bf20d0651c4..576501db2a0b 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -4,6 +4,7 @@
#ifndef _RMNET_MAP_H_
#define _RMNET_MAP_H_
+#include <linux/if_rmnet.h>
struct rmnet_map_control_command {
u8 command_name;
@@ -31,30 +32,6 @@ enum rmnet_map_commands {
RMNET_MAP_COMMAND_ENUM_LENGTH
};
-struct rmnet_map_header {
- u8 pad_len:6;
- u8 reserved_bit:1;
- u8 cd_bit:1;
- u8 mux_id;
- __be16 pkt_len;
-} __aligned(1);
-
-struct rmnet_map_dl_csum_trailer {
- u8 reserved1;
- u8 valid:1;
- u8 reserved2:7;
- u16 csum_start_offset;
- u16 csum_length;
- __be16 csum_value;
-} __aligned(1);
-
-struct rmnet_map_ul_csum_header {
- __be16 csum_start_offset;
- u16 csum_insert_offset:14;
- u16 udp_ip4_ind:1;
- u16 csum_enabled:1;
-} __aligned(1);
-
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
(Y)->data)->mux_id)
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index 33be8c5ad0c9..d5304bad2372 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -6,4 +6,5 @@
obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ATP) += atp.o
+r8169-objs += r8169_main.o r8169_firmware.o
obj-$(CONFIG_R8169) += r8169.o
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
new file mode 100644
index 000000000000..8f54a2c832eb
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* r8169_firmware.c: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+
+#include "r8169_firmware.h"
+
+enum rtl_fw_opcode {
+ PHY_READ = 0x0,
+ PHY_DATA_OR = 0x1,
+ PHY_DATA_AND = 0x2,
+ PHY_BJMPN = 0x3,
+ PHY_MDIO_CHG = 0x4,
+ PHY_CLEAR_READCOUNT = 0x7,
+ PHY_WRITE = 0x8,
+ PHY_READCOUNT_EQ_SKIP = 0x9,
+ PHY_COMP_EQ_SKIPN = 0xa,
+ PHY_COMP_NEQ_SKIPN = 0xb,
+ PHY_WRITE_PREVIOUS = 0xc,
+ PHY_SKIPN = 0xd,
+ PHY_DELAY_MS = 0xe,
+};
+
+struct fw_info {
+ u32 magic;
+ char version[RTL_VER_SIZE];
+ __le32 fw_start;
+ __le32 fw_len;
+ u8 chksum;
+} __packed;
+
+#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
+
+static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw)
+{
+ const struct firmware *fw = rtl_fw->fw;
+ struct fw_info *fw_info = (struct fw_info *)fw->data;
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+
+ if (fw->size < FW_OPCODE_SIZE)
+ return false;
+
+ if (!fw_info->magic) {
+ size_t i, size, start;
+ u8 checksum = 0;
+
+ if (fw->size < sizeof(*fw_info))
+ return false;
+
+ for (i = 0; i < fw->size; i++)
+ checksum += fw->data[i];
+ if (checksum != 0)
+ return false;
+
+ start = le32_to_cpu(fw_info->fw_start);
+ if (start > fw->size)
+ return false;
+
+ size = le32_to_cpu(fw_info->fw_len);
+ if (size > (fw->size - start) / FW_OPCODE_SIZE)
+ return false;
+
+ strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE);
+
+ pa->code = (__le32 *)(fw->data + start);
+ pa->size = size;
+ } else {
+ if (fw->size % FW_OPCODE_SIZE)
+ return false;
+
+ strscpy(rtl_fw->version, rtl_fw->fw_name, RTL_VER_SIZE);
+
+ pa->code = (__le32 *)fw->data;
+ pa->size = fw->size / FW_OPCODE_SIZE;
+ }
+
+ return true;
+}
+
+static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw)
+{
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ size_t index;
+
+ for (index = 0; index < pa->size; index++) {
+ u32 action = le32_to_cpu(pa->code[index]);
+ u32 regno = (action & 0x0fff0000) >> 16;
+
+ switch (action >> 28) {
+ case PHY_READ:
+ case PHY_DATA_OR:
+ case PHY_DATA_AND:
+ case PHY_MDIO_CHG:
+ case PHY_CLEAR_READCOUNT:
+ case PHY_WRITE:
+ case PHY_WRITE_PREVIOUS:
+ case PHY_DELAY_MS:
+ break;
+
+ case PHY_BJMPN:
+ if (regno > index)
+ goto out;
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (index + 2 >= pa->size)
+ goto out;
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ case PHY_COMP_NEQ_SKIPN:
+ case PHY_SKIPN:
+ if (index + 1 + regno >= pa->size)
+ goto out;
+ break;
+
+ default:
+ dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action);
+ return false;
+ }
+ }
+
+ return true;
+out:
+ dev_err(rtl_fw->dev, "Out of range of firmware\n");
+ return false;
+}
+
+void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ rtl_fw_write_t fw_write = rtl_fw->phy_write;
+ rtl_fw_read_t fw_read = rtl_fw->phy_read;
+ int predata = 0, count = 0;
+ size_t index;
+
+ for (index = 0; index < pa->size; index++) {
+ u32 action = le32_to_cpu(pa->code[index]);
+ u32 data = action & 0x0000ffff;
+ u32 regno = (action & 0x0fff0000) >> 16;
+ enum rtl_fw_opcode opcode = action >> 28;
+
+ if (!action)
+ break;
+
+ switch (opcode) {
+ case PHY_READ:
+ predata = fw_read(tp, regno);
+ count++;
+ break;
+ case PHY_DATA_OR:
+ predata |= data;
+ break;
+ case PHY_DATA_AND:
+ predata &= data;
+ break;
+ case PHY_BJMPN:
+ index -= (regno + 1);
+ break;
+ case PHY_MDIO_CHG:
+ if (data == 0) {
+ fw_write = rtl_fw->phy_write;
+ fw_read = rtl_fw->phy_read;
+ } else if (data == 1) {
+ fw_write = rtl_fw->mac_mcu_write;
+ fw_read = rtl_fw->mac_mcu_read;
+ }
+
+ break;
+ case PHY_CLEAR_READCOUNT:
+ count = 0;
+ break;
+ case PHY_WRITE:
+ fw_write(tp, regno, data);
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (count == data)
+ index++;
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ if (predata == data)
+ index += regno;
+ break;
+ case PHY_COMP_NEQ_SKIPN:
+ if (predata != data)
+ index += regno;
+ break;
+ case PHY_WRITE_PREVIOUS:
+ fw_write(tp, regno, predata);
+ break;
+ case PHY_SKIPN:
+ index += regno;
+ break;
+ case PHY_DELAY_MS:
+ mdelay(data);
+ break;
+ }
+ }
+}
+
+void rtl_fw_release_firmware(struct rtl_fw *rtl_fw)
+{
+ release_firmware(rtl_fw->fw);
+}
+
+int rtl_fw_request_firmware(struct rtl_fw *rtl_fw)
+{
+ int rc;
+
+ rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev);
+ if (rc < 0)
+ goto out;
+
+ if (!rtl_fw_format_ok(rtl_fw) || !rtl_fw_data_ok(rtl_fw)) {
+ release_firmware(rtl_fw->fw);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n",
+ rtl_fw->fw_name, rc);
+ return rc;
+}
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.h b/drivers/net/ethernet/realtek/r8169_firmware.h
new file mode 100644
index 000000000000..7dc348ed8345
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169_firmware.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* r8169_firmware.h: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+
+struct rtl8169_private;
+typedef void (*rtl_fw_write_t)(struct rtl8169_private *tp, int reg, int val);
+typedef int (*rtl_fw_read_t)(struct rtl8169_private *tp, int reg);
+
+#define RTL_VER_SIZE 32
+
+struct rtl_fw {
+ rtl_fw_write_t phy_write;
+ rtl_fw_read_t phy_read;
+ rtl_fw_write_t mac_mcu_write;
+ rtl_fw_read_t mac_mcu_read;
+ const struct firmware *fw;
+ const char *fw_name;
+ struct device *dev;
+
+ char version[RTL_VER_SIZE];
+
+ struct rtl_fw_phy_action {
+ __le32 *code;
+ size_t size;
+ } phy_action;
+};
+
+int rtl_fw_request_firmware(struct rtl_fw *rtl_fw);
+void rtl_fw_release_firmware(struct rtl_fw *rtl_fw);
+void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169_main.c
index d06a61f00e78..efef5453b94f 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -27,12 +27,13 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
-#include <linux/firmware.h>
#include <linux/prefetch.h>
#include <linux/pci-aspm.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
+#include "r8169_firmware.h"
+
#define MODULENAME "r8169"
#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
@@ -72,6 +73,8 @@ static const int multicast_filter_limit = 32;
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
+#define RTL_CFG_NO_GBIT 1
+
/* write/read MMIO register */
#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg))
#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg))
@@ -81,7 +84,7 @@ static const int multicast_filter_limit = 32;
#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
enum mac_version {
- RTL_GIGA_MAC_VER_01 = 0,
+ /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */
RTL_GIGA_MAC_VER_02,
RTL_GIGA_MAC_VER_03,
RTL_GIGA_MAC_VER_04,
@@ -132,7 +135,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_49,
RTL_GIGA_MAC_VER_50,
RTL_GIGA_MAC_VER_51,
- RTL_GIGA_MAC_NONE = 0xff,
+ RTL_GIGA_MAC_NONE
};
#define JUMBO_1K ETH_DATA_LEN
@@ -146,7 +149,6 @@ static const struct {
const char *fw_name;
} rtl_chip_infos[] = {
/* PCI devices. */
- [RTL_GIGA_MAC_VER_01] = {"RTL8169" },
[RTL_GIGA_MAC_VER_02] = {"RTL8169s" },
[RTL_GIGA_MAC_VER_03] = {"RTL8110s" },
[RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" },
@@ -155,7 +157,7 @@ static const struct {
/* PCI-E devices. */
[RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
[RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
- [RTL_GIGA_MAC_VER_09] = {"RTL8102e" },
+ [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
[RTL_GIGA_MAC_VER_10] = {"RTL8101e" },
[RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
[RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" },
@@ -188,9 +190,9 @@ static const struct {
[RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
[RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
[RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" },
- [RTL_GIGA_MAC_VER_42] = {"RTL8168g/8111g", FIRMWARE_8168G_3},
- [RTL_GIGA_MAC_VER_43] = {"RTL8106e", FIRMWARE_8106E_2},
- [RTL_GIGA_MAC_VER_44] = {"RTL8411", FIRMWARE_8411_2 },
+ [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3},
+ [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2},
+ [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 },
[RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1},
[RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
[RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1},
@@ -200,32 +202,24 @@ static const struct {
[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
};
-enum cfg_version {
- RTL_CFG_0 = 0x00,
- RTL_CFG_1,
- RTL_CFG_2
-};
-
static const struct pci_device_id rtl8169_pci_tbl[] = {
- { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
- { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
- { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 },
- { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 },
- { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 },
- { PCI_VDEVICE(REALTEK, 0x8167), RTL_CFG_0 },
- { PCI_VDEVICE(REALTEK, 0x8168), RTL_CFG_1 },
- { PCI_VDEVICE(NCUBE, 0x8168), RTL_CFG_1 },
- { PCI_VDEVICE(REALTEK, 0x8169), RTL_CFG_0 },
+ { PCI_VDEVICE(REALTEK, 0x2502) },
+ { PCI_VDEVICE(REALTEK, 0x2600) },
+ { PCI_VDEVICE(REALTEK, 0x8129) },
+ { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT },
+ { PCI_VDEVICE(REALTEK, 0x8161) },
+ { PCI_VDEVICE(REALTEK, 0x8167) },
+ { PCI_VDEVICE(REALTEK, 0x8168) },
+ { PCI_VDEVICE(NCUBE, 0x8168) },
+ { PCI_VDEVICE(REALTEK, 0x8169) },
{ PCI_VENDOR_ID_DLINK, 0x4300,
- PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
- { PCI_VDEVICE(DLINK, 0x4300), RTL_CFG_0 },
- { PCI_VDEVICE(DLINK, 0x4302), RTL_CFG_0 },
- { PCI_VDEVICE(AT, 0xc107), RTL_CFG_0 },
- { PCI_VDEVICE(USR, 0x0116), RTL_CFG_0 },
- { PCI_VENDOR_ID_LINKSYS, 0x1032,
- PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
- { 0x0001, 0x8168,
- PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
+ PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 },
+ { PCI_VDEVICE(DLINK, 0x4300) },
+ { PCI_VDEVICE(DLINK, 0x4302) },
+ { PCI_VDEVICE(AT, 0xc107) },
+ { PCI_VDEVICE(USR, 0x0116) },
+ { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
+ { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
{}
};
@@ -406,8 +400,6 @@ enum rtl_register_content {
RxOK = 0x0001,
/* RxStatusDesc */
- RxBOVF = (1 << 24),
- RxFOVF = (1 << 23),
RxRWT = (1 << 22),
RxRES = (1 << 21),
RxRUNT = (1 << 20),
@@ -492,6 +484,7 @@ enum rtl_register_content {
PCIDAC = (1 << 4),
PCIMulRW = (1 << 3),
#define INTT_MASK GENMASK(1, 0)
+#define CPCMD_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK)
/* rtl8169_PHYstatus */
TBI_Enable = 0x80,
@@ -503,9 +496,6 @@ enum rtl_register_content {
LinkStatus = 0x02,
FullDup = 0x01,
- /* _TBICSRBit */
- TBILinkOK = 0x02000000,
-
/* ResetCounterCommand */
CounterReset = 0x1,
@@ -578,7 +568,6 @@ enum rtl_rx_desc_bit {
};
#define RsvdMask 0x3fffc000
-#define CPCMD_QUIRK_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK)
struct TxDesc {
__le32 opts1;
@@ -639,7 +628,7 @@ struct rtl8169_private {
struct phy_device *phydev;
struct napi_struct napi;
u32 msg_enable;
- u16 mac_version;
+ enum mac_version mac_version;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_tx;
@@ -652,24 +641,9 @@ struct rtl8169_private {
void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
-
u16 irq_mask;
- const struct rtl_coalesce_info *coalesce_info;
struct clk *clk;
- struct mdio_ops {
- void (*write)(struct rtl8169_private *, int, int);
- int (*read)(struct rtl8169_private *, int);
- } mdio_ops;
-
- struct jumbo_ops {
- void (*enable)(struct rtl8169_private *);
- void (*disable)(struct rtl8169_private *);
- } jumbo_ops;
-
- void (*hw_start)(struct rtl8169_private *tp);
- bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
-
struct {
DECLARE_BITMAP(flags, RTL_FLAG_MAX);
struct mutex mutex;
@@ -678,24 +652,14 @@ struct rtl8169_private {
unsigned irq_enabled:1;
unsigned supports_gmii:1;
+ unsigned aspm_manageable:1;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
const char *fw_name;
- struct rtl_fw {
- const struct firmware *fw;
-
-#define RTL_VER_SIZE 32
-
- char version[RTL_VER_SIZE];
-
- struct rtl_fw_phy_action {
- __le32 *code;
- size_t size;
- } phy_action;
- } *rtl_fw;
+ struct rtl_fw *rtl_fw;
u32 ocp_base;
};
@@ -759,6 +723,12 @@ static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force)
PCI_EXP_DEVCTL_READRQ, force);
}
+static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
+{
+ return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_39;
+}
+
struct rtl_cond {
bool (*check)(struct rtl8169_private *);
const char *msg;
@@ -847,7 +817,7 @@ static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
}
-static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
+static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
{
if (rtl_ocp_reg_failure(tp, reg))
return 0;
@@ -855,7 +825,7 @@ static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
RTL_W32(tp, GPHY_OCP, reg << 15);
return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
- (RTL_R32(tp, GPHY_OCP) & 0xffff) : ~0;
+ (RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
}
static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
@@ -938,7 +908,7 @@ static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
- RTL_R32(tp, PHYAR) & 0xffff : ~0;
+ RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT;
/*
* According to hardware specs a 20us delay is required after read
@@ -978,7 +948,7 @@ static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
RTL_W32(tp, EPHY_RXER_NUM, 0);
return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
- RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : ~0;
+ RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
}
#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
@@ -1015,14 +985,38 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
return value;
}
-static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
+static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
{
- tp->mdio_ops.write(tp, location, val);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_27:
+ r8168dp_1_mdio_write(tp, location, val);
+ break;
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ r8168dp_2_mdio_write(tp, location, val);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ r8168g_mdio_write(tp, location, val);
+ break;
+ default:
+ r8169_mdio_write(tp, location, val);
+ break;
+ }
}
static int rtl_readphy(struct rtl8169_private *tp, int location)
{
- return tp->mdio_ops.read(tp, location);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_27:
+ return r8168dp_1_mdio_read(tp, location);
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ return r8168dp_2_mdio_read(tp, location);
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ return r8168g_mdio_read(tp, location);
+ default:
+ return r8169_mdio_read(tp, location);
+ }
}
static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
@@ -1400,9 +1394,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
rtl_unlock_config_regs(tp);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ if (rtl_is_8168evl_up(tp)) {
tmp = ARRAY_SIZE(cfg) - 1;
if (wolopts & WAKE_MAGIC)
rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100,
@@ -1410,10 +1402,8 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
else
rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100,
MagicPacket_v2);
- break;
- default:
+ } else {
tmp = ARRAY_SIZE(cfg);
- break;
}
for (i = 0; i < tmp; i++) {
@@ -1424,7 +1414,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_17:
options = RTL_R8(tp, Config1) & ~PMEnable;
if (wolopts)
options |= PMEnable;
@@ -1794,18 +1784,16 @@ static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- struct ethtool_link_ksettings ecmd;
const struct rtl_coalesce_info *ci;
- int rc;
- rc = phy_ethtool_get_link_ksettings(dev, &ecmd);
- if (rc < 0)
- return ERR_PTR(rc);
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ ci = rtl_coalesce_info_8169;
+ else
+ ci = rtl_coalesce_info_8168_8136;
- for (ci = tp->coalesce_info; ci->speed != 0; ci++) {
- if (ecmd.base.speed == ci->speed) {
+ for (; ci->speed; ci++) {
+ if (tp->phydev->speed == ci->speed)
return ci;
- }
}
return ERR_PTR(-ELNRNG);
@@ -1954,9 +1942,7 @@ static int rtl_get_eee_supp(struct rtl8169_private *tp)
ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- phy_write(phydev, 0x1f, 0x0a5c);
- ret = phy_read(phydev, 0x12);
- phy_write(phydev, 0x1f, 0x0000);
+ ret = phy_read_paged(phydev, 0x0a5c, 0x12);
break;
default:
ret = -EPROTONOSUPPORT;
@@ -1979,9 +1965,7 @@ static int rtl_get_eee_lpadv(struct rtl8169_private *tp)
ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- phy_write(phydev, 0x1f, 0x0a5d);
- ret = phy_read(phydev, 0x11);
- phy_write(phydev, 0x1f, 0x0000);
+ ret = phy_read_paged(phydev, 0x0a5d, 0x11);
break;
default:
ret = -EPROTONOSUPPORT;
@@ -2004,9 +1988,7 @@ static int rtl_get_eee_adv(struct rtl8169_private *tp)
ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- phy_write(phydev, 0x1f, 0x0a5d);
- ret = phy_read(phydev, 0x10);
- phy_write(phydev, 0x1f, 0x0000);
+ ret = phy_read_paged(phydev, 0x0a5d, 0x10);
break;
default:
ret = -EPROTONOSUPPORT;
@@ -2029,9 +2011,7 @@ static int rtl_set_eee_adv(struct rtl8169_private *tp, int val)
ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- phy_write(phydev, 0x1f, 0x0a5d);
- phy_write(phydev, 0x10, val);
- phy_write(phydev, 0x1f, 0x0000);
+ phy_write_paged(phydev, 0x0a5d, 0x10, val);
break;
default:
ret = -EPROTONOSUPPORT;
@@ -2252,7 +2232,6 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{ 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 },
{ 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 },
{ 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 },
- { 0xfc8, 0x000, RTL_GIGA_MAC_VER_01 },
/* Catch-all */
{ 0x000, 0x000, RTL_GIGA_MAC_NONE }
@@ -2292,246 +2271,10 @@ static void __rtl_writephy_batch(struct rtl8169_private *tp,
#define rtl_writephy_batch(tp, a) __rtl_writephy_batch(tp, a, ARRAY_SIZE(a))
-#define PHY_READ 0x00000000
-#define PHY_DATA_OR 0x10000000
-#define PHY_DATA_AND 0x20000000
-#define PHY_BJMPN 0x30000000
-#define PHY_MDIO_CHG 0x40000000
-#define PHY_CLEAR_READCOUNT 0x70000000
-#define PHY_WRITE 0x80000000
-#define PHY_READCOUNT_EQ_SKIP 0x90000000
-#define PHY_COMP_EQ_SKIPN 0xa0000000
-#define PHY_COMP_NEQ_SKIPN 0xb0000000
-#define PHY_WRITE_PREVIOUS 0xc0000000
-#define PHY_SKIPN 0xd0000000
-#define PHY_DELAY_MS 0xe0000000
-
-struct fw_info {
- u32 magic;
- char version[RTL_VER_SIZE];
- __le32 fw_start;
- __le32 fw_len;
- u8 chksum;
-} __packed;
-
-#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
-
-static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
-{
- const struct firmware *fw = rtl_fw->fw;
- struct fw_info *fw_info = (struct fw_info *)fw->data;
- struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
- char *version = rtl_fw->version;
- bool rc = false;
-
- if (fw->size < FW_OPCODE_SIZE)
- goto out;
-
- if (!fw_info->magic) {
- size_t i, size, start;
- u8 checksum = 0;
-
- if (fw->size < sizeof(*fw_info))
- goto out;
-
- for (i = 0; i < fw->size; i++)
- checksum += fw->data[i];
- if (checksum != 0)
- goto out;
-
- start = le32_to_cpu(fw_info->fw_start);
- if (start > fw->size)
- goto out;
-
- size = le32_to_cpu(fw_info->fw_len);
- if (size > (fw->size - start) / FW_OPCODE_SIZE)
- goto out;
-
- memcpy(version, fw_info->version, RTL_VER_SIZE);
-
- pa->code = (__le32 *)(fw->data + start);
- pa->size = size;
- } else {
- if (fw->size % FW_OPCODE_SIZE)
- goto out;
-
- strlcpy(version, tp->fw_name, RTL_VER_SIZE);
-
- pa->code = (__le32 *)fw->data;
- pa->size = fw->size / FW_OPCODE_SIZE;
- }
- version[RTL_VER_SIZE - 1] = 0;
-
- rc = true;
-out:
- return rc;
-}
-
-static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
- struct rtl_fw_phy_action *pa)
-{
- bool rc = false;
- size_t index;
-
- for (index = 0; index < pa->size; index++) {
- u32 action = le32_to_cpu(pa->code[index]);
- u32 regno = (action & 0x0fff0000) >> 16;
-
- switch(action & 0xf0000000) {
- case PHY_READ:
- case PHY_DATA_OR:
- case PHY_DATA_AND:
- case PHY_MDIO_CHG:
- case PHY_CLEAR_READCOUNT:
- case PHY_WRITE:
- case PHY_WRITE_PREVIOUS:
- case PHY_DELAY_MS:
- break;
-
- case PHY_BJMPN:
- if (regno > index) {
- netif_err(tp, ifup, tp->dev,
- "Out of range of firmware\n");
- goto out;
- }
- break;
- case PHY_READCOUNT_EQ_SKIP:
- if (index + 2 >= pa->size) {
- netif_err(tp, ifup, tp->dev,
- "Out of range of firmware\n");
- goto out;
- }
- break;
- case PHY_COMP_EQ_SKIPN:
- case PHY_COMP_NEQ_SKIPN:
- case PHY_SKIPN:
- if (index + 1 + regno >= pa->size) {
- netif_err(tp, ifup, tp->dev,
- "Out of range of firmware\n");
- goto out;
- }
- break;
-
- default:
- netif_err(tp, ifup, tp->dev,
- "Invalid action 0x%08x\n", action);
- goto out;
- }
- }
- rc = true;
-out:
- return rc;
-}
-
-static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
-{
- struct net_device *dev = tp->dev;
- int rc = -EINVAL;
-
- if (!rtl_fw_format_ok(tp, rtl_fw)) {
- netif_err(tp, ifup, dev, "invalid firmware\n");
- goto out;
- }
-
- if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
- rc = 0;
-out:
- return rc;
-}
-
-static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
-{
- struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
- struct mdio_ops org, *ops = &tp->mdio_ops;
- u32 predata, count;
- size_t index;
-
- predata = count = 0;
- org.write = ops->write;
- org.read = ops->read;
-
- for (index = 0; index < pa->size; ) {
- u32 action = le32_to_cpu(pa->code[index]);
- u32 data = action & 0x0000ffff;
- u32 regno = (action & 0x0fff0000) >> 16;
-
- if (!action)
- break;
-
- switch(action & 0xf0000000) {
- case PHY_READ:
- predata = rtl_readphy(tp, regno);
- count++;
- index++;
- break;
- case PHY_DATA_OR:
- predata |= data;
- index++;
- break;
- case PHY_DATA_AND:
- predata &= data;
- index++;
- break;
- case PHY_BJMPN:
- index -= regno;
- break;
- case PHY_MDIO_CHG:
- if (data == 0) {
- ops->write = org.write;
- ops->read = org.read;
- } else if (data == 1) {
- ops->write = mac_mcu_write;
- ops->read = mac_mcu_read;
- }
-
- index++;
- break;
- case PHY_CLEAR_READCOUNT:
- count = 0;
- index++;
- break;
- case PHY_WRITE:
- rtl_writephy(tp, regno, data);
- index++;
- break;
- case PHY_READCOUNT_EQ_SKIP:
- index += (count == data) ? 2 : 1;
- break;
- case PHY_COMP_EQ_SKIPN:
- if (predata == data)
- index += regno;
- index++;
- break;
- case PHY_COMP_NEQ_SKIPN:
- if (predata != data)
- index += regno;
- index++;
- break;
- case PHY_WRITE_PREVIOUS:
- rtl_writephy(tp, regno, predata);
- index++;
- break;
- case PHY_SKIPN:
- index += regno + 1;
- break;
- case PHY_DELAY_MS:
- mdelay(data);
- index++;
- break;
-
- default:
- BUG();
- }
- }
-
- ops->write = org.write;
- ops->read = org.read;
-}
-
static void rtl_release_firmware(struct rtl8169_private *tp)
{
if (tp->rtl_fw) {
- release_firmware(tp->rtl_fw->fw);
+ rtl_fw_release_firmware(tp->rtl_fw);
kfree(tp->rtl_fw);
tp->rtl_fw = NULL;
}
@@ -2539,9 +2282,9 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
static void rtl_apply_firmware(struct rtl8169_private *tp)
{
- /* TODO: release firmware once rtl_phy_write_fw signals failures. */
+ /* TODO: release firmware if rtl_fw_write_firmware signals failure. */
if (tp->rtl_fw)
- rtl_phy_write_fw(tp, tp->rtl_fw);
+ rtl_fw_write_firmware(tp, tp->rtl_fw);
}
static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2578,9 +2321,7 @@ static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
{
- phy_write(tp->phydev, 0x1f, 0x0a43);
- phy_set_bits(tp->phydev, 0x11, BIT(4));
- phy_write(tp->phydev, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a43, 0x11, 0, BIT(4));
}
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
@@ -2910,50 +2651,59 @@ static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
rtl8168c_3_hw_phy_config(tp);
}
-static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
-{
- static const struct phy_reg phy_reg_init_0[] = {
- /* Channel Estimation */
- { 0x1f, 0x0001 },
- { 0x06, 0x4064 },
- { 0x07, 0x2863 },
- { 0x08, 0x059c },
- { 0x09, 0x26b4 },
- { 0x0a, 0x6a19 },
- { 0x0b, 0xdcc8 },
- { 0x10, 0xf06d },
- { 0x14, 0x7f68 },
- { 0x18, 0x7fd9 },
- { 0x1c, 0xf0ff },
- { 0x1d, 0x3d9c },
- { 0x1f, 0x0003 },
- { 0x12, 0xf49f },
- { 0x13, 0x070b },
- { 0x1a, 0x05ad },
- { 0x14, 0x94c0 },
+static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
+ /* Channel Estimation */
+ { 0x1f, 0x0001 },
+ { 0x06, 0x4064 },
+ { 0x07, 0x2863 },
+ { 0x08, 0x059c },
+ { 0x09, 0x26b4 },
+ { 0x0a, 0x6a19 },
+ { 0x0b, 0xdcc8 },
+ { 0x10, 0xf06d },
+ { 0x14, 0x7f68 },
+ { 0x18, 0x7fd9 },
+ { 0x1c, 0xf0ff },
+ { 0x1d, 0x3d9c },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xf49f },
+ { 0x13, 0x070b },
+ { 0x1a, 0x05ad },
+ { 0x14, 0x94c0 },
- /*
- * Tx Error Issue
- * Enhance line driver power
- */
- { 0x1f, 0x0002 },
- { 0x06, 0x5561 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8332 },
- { 0x06, 0x5561 },
+ /*
+ * Tx Error Issue
+ * Enhance line driver power
+ */
+ { 0x1f, 0x0002 },
+ { 0x06, 0x5561 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8332 },
+ { 0x06, 0x5561 },
- /*
- * Can not link to 1Gbps with bad cable
- * Decrease SNR threshold form 21.07dB to 19.04dB
- */
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
+ /*
+ * Can not link to 1Gbps with bad cable
+ * Decrease SNR threshold form 21.07dB to 19.04dB
+ */
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 }
- };
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 }
+};
- rtl_writephy_batch(tp, phy_reg_init_0);
+static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x669a },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x669a },
+ { 0x1f, 0x0002 }
+};
+
+static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
/*
* Rx Error Issue
@@ -2964,17 +2714,9 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x669a },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x669a },
- { 0x1f, 0x0002 }
- };
int val;
- rtl_writephy_batch(tp, phy_reg_init);
+ rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
val = rtl_readphy(tp, 0x0d);
@@ -3023,62 +2765,12 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{
- static const struct phy_reg phy_reg_init_0[] = {
- /* Channel Estimation */
- { 0x1f, 0x0001 },
- { 0x06, 0x4064 },
- { 0x07, 0x2863 },
- { 0x08, 0x059c },
- { 0x09, 0x26b4 },
- { 0x0a, 0x6a19 },
- { 0x0b, 0xdcc8 },
- { 0x10, 0xf06d },
- { 0x14, 0x7f68 },
- { 0x18, 0x7fd9 },
- { 0x1c, 0xf0ff },
- { 0x1d, 0x3d9c },
- { 0x1f, 0x0003 },
- { 0x12, 0xf49f },
- { 0x13, 0x070b },
- { 0x1a, 0x05ad },
- { 0x14, 0x94c0 },
-
- /*
- * Tx Error Issue
- * Enhance line driver power
- */
- { 0x1f, 0x0002 },
- { 0x06, 0x5561 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8332 },
- { 0x06, 0x5561 },
-
- /*
- * Can not link to 1Gbps with bad cable
- * Decrease SNR threshold form 21.07dB to 19.04dB
- */
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 }
- };
-
- rtl_writephy_batch(tp, phy_reg_init_0);
+ rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x669a },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x669a },
-
- { 0x1f, 0x0002 }
- };
int val;
- rtl_writephy_batch(tp, phy_reg_init);
+ rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_1);
val = rtl_readphy(tp, 0x0d);
if ((val & 0x00ff) != 0x006c) {
@@ -3528,20 +3220,15 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168g_disable_aldps(struct rtl8169_private *tp)
{
- phy_write(tp->phydev, 0x1f, 0x0a43);
- phy_clear_bits(tp->phydev, 0x10, BIT(2));
+ phy_modify_paged(tp->phydev, 0x0a43, 0x10, BIT(2), 0);
}
static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
{
struct phy_device *phydev = tp->phydev;
- phy_write(phydev, 0x1f, 0x0bcc);
- phy_clear_bits(phydev, 0x14, BIT(8));
-
- phy_write(phydev, 0x1f, 0x0a44);
- phy_set_bits(phydev, 0x11, BIT(7) | BIT(6));
-
+ phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6));
phy_write(phydev, 0x1f, 0x0a43);
phy_write(phydev, 0x13, 0x8084);
phy_clear_bits(phydev, 0x14, BIT(14) | BIT(13));
@@ -3552,43 +3239,36 @@ static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp)
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
{
+ int ret;
+
rtl_apply_firmware(tp);
- rtl_writephy(tp, 0x1f, 0x0a46);
- if (rtl_readphy(tp, 0x10) & 0x0100) {
- rtl_writephy(tp, 0x1f, 0x0bcc);
- rtl_w0w1_phy(tp, 0x12, 0x0000, 0x8000);
- } else {
- rtl_writephy(tp, 0x1f, 0x0bcc);
- rtl_w0w1_phy(tp, 0x12, 0x8000, 0x0000);
- }
+ ret = phy_read_paged(tp->phydev, 0x0a46, 0x10);
+ if (ret & BIT(8))
+ phy_modify_paged(tp->phydev, 0x0bcc, 0x12, BIT(15), 0);
+ else
+ phy_modify_paged(tp->phydev, 0x0bcc, 0x12, 0, BIT(15));
- rtl_writephy(tp, 0x1f, 0x0a46);
- if (rtl_readphy(tp, 0x13) & 0x0100) {
- rtl_writephy(tp, 0x1f, 0x0c41);
- rtl_w0w1_phy(tp, 0x15, 0x0002, 0x0000);
- } else {
- rtl_writephy(tp, 0x1f, 0x0c41);
- rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0002);
- }
+ ret = phy_read_paged(tp->phydev, 0x0a46, 0x13);
+ if (ret & BIT(8))
+ phy_modify_paged(tp->phydev, 0x0c41, 0x12, 0, BIT(1));
+ else
+ phy_modify_paged(tp->phydev, 0x0c41, 0x12, BIT(1), 0);
/* Enable PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
rtl8168g_phy_adjust_10m_aldps(tp);
/* EEE auto-fallback function */
- rtl_writephy(tp, 0x1f, 0x0a4b);
- rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
/* Enable UC LPF tune function */
rtl_writephy(tp, 0x1f, 0x0a43);
rtl_writephy(tp, 0x13, 0x8012);
rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0c42);
- rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
+ phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
/* Improve SWR Efficiency */
rtl_writephy(tp, 0x1f, 0x0bcd);
@@ -3600,6 +3280,7 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x14, 0x1065);
rtl_writephy(tp, 0x14, 0x9065);
rtl_writephy(tp, 0x14, 0x1065);
+ rtl_writephy(tp, 0x1f, 0x0000);
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3684,14 +3365,10 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* enable GPHY 10M */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
/* SAR ADC performance */
- rtl_writephy(tp, 0x1f, 0x0bca);
- rtl_w0w1_phy(tp, 0x17, 0x4000, 0x3000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
rtl_writephy(tp, 0x1f, 0x0a43);
rtl_writephy(tp, 0x13, 0x803f);
@@ -3711,9 +3388,7 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* disable phy pfm mode */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3743,9 +3418,7 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* enable GPHY 10M */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11));
r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
data = r8168_mac_ocp_read(tp, 0xdd02);
@@ -3781,9 +3454,7 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* disable phy pfm mode */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3793,16 +3464,12 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
{
/* Enable PHY auto speed down */
- rtl_writephy(tp, 0x1f, 0x0a44);
- rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
rtl8168g_phy_adjust_10m_aldps(tp);
/* Enable EEE auto-fallback function */
- rtl_writephy(tp, 0x1f, 0x0a4b);
- rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2));
/* Enable UC LPF tune function */
rtl_writephy(tp, 0x1f, 0x0a43);
@@ -3811,9 +3478,7 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* set rg_sel_sdm_rate */
- rtl_writephy(tp, 0x1f, 0x0c42);
- rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
rtl8168g_disable_aldps(tp);
rtl8168g_config_eee_phy(tp);
@@ -3831,9 +3496,7 @@ static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* Set rg_sel_sdm_rate */
- rtl_writephy(tp, 0x1f, 0x0c42);
- rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
- rtl_writephy(tp, 0x1f, 0x0000);
+ phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14));
/* Channel estimation parameters */
rtl_writephy(tp, 0x1f, 0x0a43);
@@ -3985,7 +3648,6 @@ static void rtl_hw_phy_config(struct net_device *dev)
{
static const rtl_generic_fct phy_configs[] = {
/* PCI devices. */
- [RTL_GIGA_MAC_VER_01] = NULL,
[RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
[RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
[RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
@@ -4050,12 +3712,6 @@ static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
schedule_work(&tp->wk.work);
}
-static bool rtl_tbi_enabled(struct rtl8169_private *tp)
-{
- return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
- (RTL_R8(tp, PHYstatus) & TBI_Enable);
-}
-
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
{
rtl_hw_phy_config(dev);
@@ -4124,31 +3780,6 @@ static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(tp->phydev, ifr, cmd);
}
-static void rtl_init_mdio_ops(struct rtl8169_private *tp)
-{
- struct mdio_ops *ops = &tp->mdio_ops;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- ops->write = r8168dp_1_mdio_write;
- ops->read = r8168dp_1_mdio_read;
- break;
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- ops->write = r8168dp_2_mdio_write;
- ops->read = r8168dp_2_mdio_read;
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- ops->write = r8168g_mdio_write;
- ops->read = r8168g_mdio_read;
- break;
- default:
- ops->write = r8169_mdio_write;
- ops->read = r8169_mdio_read;
- break;
- }
-}
-
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -4168,7 +3799,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
}
}
-static void r8168_pll_power_down(struct rtl8169_private *tp)
+static void rtl_pll_power_down(struct rtl8169_private *tp)
{
if (r8168_check_dash(tp))
return;
@@ -4203,10 +3834,12 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
break;
+ default:
+ break;
}
}
-static void r8168_pll_power_up(struct rtl8169_private *tp)
+static void rtl_pll_power_up(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
@@ -4230,6 +3863,8 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
break;
+ default:
+ break;
}
phy_resume(tp->phydev);
@@ -4237,32 +3872,10 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
msleep(20);
}
-static void rtl_pll_power_down(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
- case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15:
- break;
- default:
- r8168_pll_power_down(tp);
- }
-}
-
-static void rtl_pll_power_up(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
- case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15:
- break;
- default:
- r8168_pll_power_up(tp);
- }
-}
-
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break;
@@ -4285,24 +3898,6 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
}
-static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- if (tp->jumbo_ops.enable) {
- rtl_unlock_config_regs(tp);
- tp->jumbo_ops.enable(tp);
- rtl_lock_config_regs(tp);
- }
-}
-
-static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- if (tp->jumbo_ops.disable) {
- rtl_unlock_config_regs(tp);
- tp->jumbo_ops.disable(tp);
- rtl_lock_config_regs(tp);
- }
-}
-
static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
@@ -4369,55 +3964,56 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
-static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
+static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
- struct jumbo_ops *ops = &tp->jumbo_ops;
-
+ rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_11:
- ops->disable = r8168b_0_hw_jumbo_disable;
- ops->enable = r8168b_0_hw_jumbo_enable;
+ r8168b_0_hw_jumbo_enable(tp);
break;
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
- ops->disable = r8168b_1_hw_jumbo_disable;
- ops->enable = r8168b_1_hw_jumbo_enable;
+ r8168b_1_hw_jumbo_enable(tp);
break;
- case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- ops->disable = r8168c_hw_jumbo_disable;
- ops->enable = r8168c_hw_jumbo_enable;
+ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
+ r8168c_hw_jumbo_enable(tp);
break;
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- ops->disable = r8168dp_hw_jumbo_disable;
- ops->enable = r8168dp_hw_jumbo_enable;
+ case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
+ r8168dp_hw_jumbo_enable(tp);
break;
- case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_34:
- ops->disable = r8168e_hw_jumbo_disable;
- ops->enable = r8168e_hw_jumbo_enable;
+ case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
+ r8168e_hw_jumbo_enable(tp);
break;
+ default:
+ break;
+ }
+ rtl_lock_config_regs(tp);
+}
- /*
- * No action needed for jumbo frames with 8169.
- * No jumbo for 810x at all.
- */
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ rtl_unlock_config_regs(tp);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ r8168b_0_hw_jumbo_disable(tp);
+ break;
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ r8168b_1_hw_jumbo_disable(tp);
+ break;
+ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
+ r8168c_hw_jumbo_disable(tp);
+ break;
+ case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
+ r8168dp_hw_jumbo_disable(tp);
+ break;
+ case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
+ r8168e_hw_jumbo_disable(tp);
+ break;
default:
- ops->disable = NULL;
- ops->enable = NULL;
break;
}
+ rtl_lock_config_regs(tp);
}
DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -4435,35 +4031,28 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
static void rtl_request_firmware(struct rtl8169_private *tp)
{
struct rtl_fw *rtl_fw;
- int rc = -ENOMEM;
/* firmware loaded already or no firmware available */
if (tp->rtl_fw || !tp->fw_name)
return;
rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
- if (!rtl_fw)
- goto err_warn;
-
- rc = request_firmware(&rtl_fw->fw, tp->fw_name, tp_to_dev(tp));
- if (rc < 0)
- goto err_free;
-
- rc = rtl_check_firmware(tp, rtl_fw);
- if (rc < 0)
- goto err_release_firmware;
-
- tp->rtl_fw = rtl_fw;
+ if (!rtl_fw) {
+ netif_warn(tp, ifup, tp->dev, "Unable to load firmware, out of memory\n");
+ return;
+ }
- return;
+ rtl_fw->phy_write = rtl_writephy;
+ rtl_fw->phy_read = rtl_readphy;
+ rtl_fw->mac_mcu_write = mac_mcu_write;
+ rtl_fw->mac_mcu_read = mac_mcu_read;
+ rtl_fw->fw_name = tp->fw_name;
+ rtl_fw->dev = tp_to_dev(tp);
-err_release_firmware:
- release_firmware(rtl_fw->fw);
-err_free:
- kfree(rtl_fw);
-err_warn:
- netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
- tp->fw_name, rc);
+ if (rtl_fw_request_firmware(rtl_fw))
+ kfree(rtl_fw);
+ else
+ tp->rtl_fw = rtl_fw;
}
static void rtl_rx_close(struct rtl8169_private *tp)
@@ -4513,8 +4102,7 @@ static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
u32 val = TX_DMA_BURST << TxDMAShift |
InterFrameGap << TxInterFrameGapShift;
- if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
- tp->mac_version != RTL_GIGA_MAC_VER_39)
+ if (rtl_is_8168evl_up(tp))
val |= TXCFG_AUTO_FIFO;
RTL_W32(tp, TxConfig, val);
@@ -4608,53 +4196,6 @@ static void rtl_set_rx_mode(struct net_device *dev)
RTL_W32(tp, RxConfig, tmp);
}
-static void rtl_hw_start(struct rtl8169_private *tp)
-{
- rtl_unlock_config_regs(tp);
-
- tp->hw_start(tp);
-
- rtl_set_rx_max_size(tp);
- rtl_set_rx_tx_desc_registers(tp);
- rtl_lock_config_regs(tp);
-
- /* disable interrupt coalescing */
- RTL_W16(tp, IntrMitigate, 0x0000);
- /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
- RTL_R8(tp, IntrMask);
- RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
- rtl_init_rxcfg(tp);
- rtl_set_tx_config_registers(tp);
-
- rtl_set_rx_mode(tp->dev);
- /* no early-rx interrupts */
- RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
- rtl_irq_enable(tp);
-}
-
-static void rtl_hw_start_8169(struct rtl8169_private *tp)
-{
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
-
- RTL_W8(tp, EarlyTxThres, NoEarlyTx);
-
- tp->cp_cmd |= PCIMulRW;
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03) {
- netif_dbg(tp, drv, tp->dev,
- "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
- tp->cp_cmd |= (1 << 14);
- }
-
- RTL_W16(tp, CPlusCmd, tp->cp_cmd);
-
- rtl8169_set_magic_reg(tp, tp->mac_version);
-
- RTL_W32(tp, RxMissed, 0);
-}
-
DECLARE_RTL_COND(rtl_csiar_cond)
{
return RTL_R32(tp, CSIAR) & CSIAR_FLAG;
@@ -4746,7 +4287,8 @@ static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp)
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
- if (enable) {
+ /* Don't enable ASPM in the chip if OS can't control ASPM */
+ if (enable && tp->aspm_manageable) {
RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
} else {
@@ -4779,9 +4321,6 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
- tp->cp_cmd &= CPCMD_QUIRK_MASK;
- RTL_W16(tp, CPlusCmd, tp->cp_cmd);
-
if (tp->dev->mtu <= ETH_DATA_LEN) {
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
PCI_EXP_DEVCTL_NOSNOOP_EN);
@@ -4792,8 +4331,6 @@ static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
{
rtl_hw_start_8168bb(tp);
- RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
-
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
@@ -4807,9 +4344,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
rtl_disable_clock_request(tp);
-
- tp->cp_cmd &= CPCMD_QUIRK_MASK;
- RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
@@ -4837,9 +4371,6 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
- tp->cp_cmd &= CPCMD_QUIRK_MASK;
- RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
@@ -4851,13 +4382,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
/* Magic. */
RTL_W8(tp, DBG_REG, 0x20);
- RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
-
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
- tp->cp_cmd &= CPCMD_QUIRK_MASK;
- RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
@@ -4909,13 +4435,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
rtl_disable_clock_request(tp);
- RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
-
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
- tp->cp_cmd &= CPCMD_QUIRK_MASK;
- RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
@@ -4925,8 +4446,6 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
-
rtl_disable_clock_request(tp);
}
@@ -4942,8 +4461,6 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
-
rtl_ephy_init(tp, e_info_8168d_4);
rtl_enable_clock_request(tp);
@@ -4974,8 +4491,6 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
-
rtl_disable_clock_request(tp);
/* Reset tx FIFO pointer */
@@ -5007,8 +4522,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
- RTL_W8(tp, MaxTxPacketSize, EarlySize);
-
rtl_disable_clock_request(tp);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
@@ -5037,8 +4550,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
- RTL_W8(tp, MaxTxPacketSize, EarlySize);
-
rtl_disable_clock_request(tp);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
@@ -5095,7 +4606,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
- RTL_W8(tp, MaxTxPacketSize, EarlySize);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -5193,7 +4703,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
- RTL_W8(tp, MaxTxPacketSize, EarlySize);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -5269,7 +4778,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
- RTL_W8(tp, MaxTxPacketSize, EarlySize);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -5536,33 +5044,70 @@ static void rtl_hw_config(struct rtl8169_private *tp)
static void rtl_hw_start_8168(struct rtl8169_private *tp)
{
- RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_16)
+ pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
- /* Workaround for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
- tp->irq_mask |= RxFIFOOver;
- tp->irq_mask &= ~RxOverflow;
- }
+ if (rtl_is_8168evl_up(tp))
+ RTL_W8(tp, MaxTxPacketSize, EarlySize);
+ else
+ RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
rtl_hw_config(tp);
}
-static void rtl_hw_start_8101(struct rtl8169_private *tp)
+static void rtl_hw_start_8169(struct rtl8169_private *tp)
{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
- tp->irq_mask &= ~RxFIFOOver;
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
- if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
- tp->mac_version == RTL_GIGA_MAC_VER_16)
- pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_NOSNOOP_EN);
+ RTL_W8(tp, EarlyTxThres, NoEarlyTx);
+
+ tp->cp_cmd |= PCIMulRW;
- RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03) {
+ netif_dbg(tp, drv, tp->dev,
+ "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
+ tp->cp_cmd |= (1 << 14);
+ }
- tp->cp_cmd &= CPCMD_QUIRK_MASK;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- rtl_hw_config(tp);
+ rtl8169_set_magic_reg(tp, tp->mac_version);
+
+ RTL_W32(tp, RxMissed, 0);
+}
+
+static void rtl_hw_start(struct rtl8169_private *tp)
+{
+ rtl_unlock_config_regs(tp);
+
+ tp->cp_cmd &= CPCMD_MASK;
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
+
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ rtl_hw_start_8169(tp);
+ else
+ rtl_hw_start_8168(tp);
+
+ rtl_set_rx_max_size(tp);
+ rtl_set_rx_tx_desc_registers(tp);
+ rtl_lock_config_regs(tp);
+
+ /* disable interrupt coalescing */
+ RTL_W16(tp, IntrMitigate, 0x0000);
+ /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
+ RTL_R8(tp, IntrMask);
+ RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
+ rtl_init_rxcfg(tp);
+ rtl_set_tx_config_registers(tp);
+
+ rtl_set_rx_mode(tp->dev);
+ /* no early-rx interrupts */
+ RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
+ rtl_irq_enable(tp);
}
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -5834,7 +5379,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
static void r8169_csum_workaround(struct rtl8169_private *tp,
struct sk_buff *skb)
{
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
netdev_features_t features = tp->dev->features;
struct sk_buff *segs, *nskb;
@@ -5857,11 +5402,8 @@ static void r8169_csum_workaround(struct rtl8169_private *tp,
rtl8169_start_xmit(skb, tp->dev);
} else {
- struct net_device_stats *stats;
-
drop:
- stats = &tp->dev->stats;
- stats->tx_dropped++;
+ tp->dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
}
}
@@ -5889,8 +5431,7 @@ static int msdn_giant_send_check(struct sk_buff *skb)
return ret;
}
-static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
- struct sk_buff *skb, u32 *opts)
+static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
{
u32 mss = skb_shinfo(skb)->gso_size;
@@ -5907,8 +5448,6 @@ static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
else
WARN_ON_ONCE(1);
}
-
- return true;
}
static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
@@ -5998,6 +5537,18 @@ static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
return slots_avail > nr_frags;
}
+/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
+static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
+ return false;
+ default:
+ return true;
+ }
+}
+
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -6017,12 +5568,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop_0;
- opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
+ opts[1] = rtl8169_tx_vlan_tag(skb);
opts[0] = DescOwn;
- if (!tp->tso_csum(tp, skb, opts)) {
- r8169_csum_workaround(tp, skb);
- return NETDEV_TX_OK;
+ if (rtl_chip_supports_csum_v2(tp)) {
+ if (!rtl8169_tso_csum_v2(tp, skb, opts)) {
+ r8169_csum_workaround(tp, skb);
+ return NETDEV_TX_OK;
+ }
+ } else {
+ rtl8169_tso_csum_v1(skb, opts);
}
len = skb_headlen(skb);
@@ -6229,7 +5784,6 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
skb = napi_alloc_skb(&tp->napi, pkt_size);
if (skb)
skb_copy_to_linear_data(skb, data, pkt_size);
- dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
return skb;
}
@@ -6264,14 +5818,8 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
dev->stats.rx_length_errors++;
if (status & RxCRC)
dev->stats.rx_crc_errors++;
- /* RxFOVF is a reserved bit on later chip versions */
- if (tp->mac_version == RTL_GIGA_MAC_VER_01 &&
- status & RxFOVF) {
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
- dev->stats.rx_fifo_errors++;
- } else if (status & (RxRUNT | RxCRC) &&
- !(status & RxRWT) &&
- dev->features & NETIF_F_RXALL) {
+ if (status & (RxRUNT | RxCRC) && !(status & RxRWT) &&
+ dev->features & NETIF_F_RXALL) {
goto process_pkt;
}
} else {
@@ -6451,7 +5999,10 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
if (ret)
return ret;
- if (!tp->supports_gmii)
+ if (tp->supports_gmii)
+ phy_remove_link_mode(phydev,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ else
phy_set_max_speed(phydev, SPEED_100);
phy_support_asym_pause(phydev);
@@ -6884,30 +6435,18 @@ static const struct net_device_ops rtl_netdev_ops = {
};
-static const struct rtl_cfg_info {
- void (*hw_start)(struct rtl8169_private *tp);
- u16 irq_mask;
- unsigned int has_gmii:1;
- const struct rtl_coalesce_info *coalesce_info;
-} rtl_cfg_infos [] = {
- [RTL_CFG_0] = {
- .hw_start = rtl_hw_start_8169,
- .irq_mask = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
- .has_gmii = 1,
- .coalesce_info = rtl_coalesce_info_8169,
- },
- [RTL_CFG_1] = {
- .hw_start = rtl_hw_start_8168,
- .irq_mask = LinkChg | RxOverflow,
- .has_gmii = 1,
- .coalesce_info = rtl_coalesce_info_8168_8136,
- },
- [RTL_CFG_2] = {
- .hw_start = rtl_hw_start_8101,
- .irq_mask = LinkChg | RxOverflow | RxFIFOOver,
- .coalesce_info = rtl_coalesce_info_8168_8136,
- }
-};
+static void rtl_set_irq_mask(struct rtl8169_private *tp)
+{
+ tp->irq_mask = RTL_EVENT_NAPI | LinkChg;
+
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver;
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
+ /* special workaround needed */
+ tp->irq_mask |= RxFIFOOver;
+ else
+ tp->irq_mask |= RxOverflow;
+}
static int rtl_alloc_irq(struct rtl8169_private *tp)
{
@@ -6928,13 +6467,10 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
static void rtl_read_mac_address(struct rtl8169_private *tp,
u8 mac_addr[ETH_ALEN])
{
- u32 value;
-
/* Get MAC address */
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- value = rtl_eri_read(tp, 0xe0);
+ if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) {
+ u32 value = rtl_eri_read(tp, 0xe0);
+
mac_addr[0] = (value >> 0) & 0xff;
mac_addr[1] = (value >> 8) & 0xff;
mac_addr[2] = (value >> 16) & 0xff;
@@ -6943,9 +6479,6 @@ static void rtl_read_mac_address(struct rtl8169_private *tp,
value = rtl_eri_read(tp, 0xe4);
mac_addr[4] = (value >> 0) & 0xff;
mac_addr[5] = (value >> 8) & 0xff;
- break;
- default:
- break;
}
}
@@ -7046,42 +6579,23 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp)
data |= (1 << 15);
r8168_mac_ocp_write(tp, 0xe8de, data);
- if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
- return;
-}
-
-static void rtl_hw_init_8168ep(struct rtl8169_private *tp)
-{
- rtl8168ep_stop_cmac(tp);
- rtl_hw_init_8168g(tp);
+ rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
}
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
+ rtl8168ep_stop_cmac(tp);
+ /* fall through */
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
- rtl_hw_init_8168ep(tp);
- break;
default:
break;
}
}
-/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
-static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
- case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
- return false;
- default:
- return true;
- }
-}
-
static int rtl_jumbo_max(struct rtl8169_private *tp)
{
/* Non-GBit versions don't support jumbo frames */
@@ -7090,7 +6604,7 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
switch (tp->mac_version) {
/* RTL8169 */
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
return JUMBO_7K;
/* RTL8168b */
case RTL_GIGA_MAC_VER_11:
@@ -7136,14 +6650,36 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp)
return rc;
}
+static void rtl_init_mac_address(struct rtl8169_private *tp)
+{
+ struct net_device *dev = tp->dev;
+ u8 *mac_addr = dev->dev_addr;
+ int rc, i;
+
+ rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
+ if (!rc)
+ goto done;
+
+ rtl_read_mac_address(tp, mac_addr);
+ if (is_valid_ether_addr(mac_addr))
+ goto done;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = RTL_R8(tp, MAC0 + i);
+ if (is_valid_ether_addr(mac_addr))
+ goto done;
+
+ eth_hw_addr_random(dev);
+ dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
+done:
+ rtl_rar_set(tp, mac_addr);
+}
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
- /* align to u16 for is_valid_ether_addr() */
- u8 mac_addr[ETH_ALEN] __aligned(2) = {};
struct rtl8169_private *tp;
struct net_device *dev;
- int chipset, region, i;
+ int chipset, region;
int jumbo_max, rc;
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
@@ -7156,7 +6692,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->dev = dev;
tp->pci_dev = pdev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
- tp->supports_gmii = cfg->has_gmii;
+ tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
/* Get the *optional* external "ether_clk" used on some boards */
rc = rtl_get_ether_clk(tp);
@@ -7166,7 +6702,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disable ASPM completely as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users.
*/
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1);
+ tp->aspm_manageable = !rc;
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
@@ -7204,23 +6742,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (tp->mac_version == RTL_GIGA_MAC_NONE)
return -ENODEV;
- if (rtl_tbi_enabled(tp)) {
- dev_err(&pdev->dev, "TBI fiber mode not supported\n");
- return -ENODEV;
- }
-
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
- !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
+ !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
dev->features |= NETIF_F_HIGHDMA;
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc < 0) {
- dev_err(&pdev->dev, "DMA configuration failed\n");
- return rc;
- }
- }
rtl_init_rxcfg(tp);
@@ -7232,9 +6758,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- rtl_init_mdio_ops(tp);
- rtl_init_jumbo_ops(tp);
-
chipset = tp->mac_version;
rc = rtl_alloc_irq(tp);
@@ -7248,16 +6771,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u64_stats_init(&tp->rx_stats.syncp);
u64_stats_init(&tp->tx_stats.syncp);
- /* get MAC address */
- rc = eth_platform_get_mac_address(&pdev->dev, mac_addr);
- if (rc)
- rtl_read_mac_address(tp, mac_addr);
-
- if (is_valid_ether_addr(mac_addr))
- rtl_rar_set(tp, mac_addr);
-
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
+ rtl_init_mac_address(tp);
dev->ethtool_ops = &rtl8169_ethtool_ops;
@@ -7285,12 +6799,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- if (rtl_chip_supports_csum_v2(tp)) {
- tp->tso_csum = rtl8169_tso_csum_v2;
+ if (rtl_chip_supports_csum_v2(tp))
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
- } else {
- tp->tso_csum = rtl8169_tso_csum_v1;
- }
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
@@ -7300,9 +6810,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
jumbo_max = rtl_jumbo_max(tp);
dev->max_mtu = jumbo_max;
- tp->hw_start = cfg->hw_start;
- tp->irq_mask = RTL_EVENT_NAPI | cfg->irq_mask;
- tp->coalesce_info = cfg->coalesce_info;
+ rtl_set_irq_mask(tp);
tp->fw_name = rtl_chip_infos[chipset].fw_name;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 3e5bc1fc3c46..079f459c73a5 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2210,6 +2210,10 @@ static int rocker_router_fib_event(struct notifier_block *nb,
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
return notifier_from_errno(-EINVAL);
}
+ if (fen_info->fi->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
}
memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index bdfa6a19d620..7072b249c8bd 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -18,6 +18,7 @@
#include <net/neighbour.h>
#include <net/switchdev.h>
#include <net/ip_fib.h>
+#include <net/nexthop.h>
#include <net/arp.h>
#include "rocker.h"
@@ -2282,8 +2283,8 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst,
/* XXX support ECMP */
- nh = fi->fib_nh;
- nh_on_port = (fi->fib_dev == ofdpa_port->dev);
+ nh = fib_info_nh(fi, 0);
+ nh_on_port = (nh->fib_nh_dev == ofdpa_port->dev);
has_gw = !!nh->fib_nh_gw4;
if (has_gw && nh_on_port) {
@@ -2733,11 +2734,13 @@ static int ofdpa_fib4_add(struct rocker *rocker,
{
struct ofdpa *ofdpa = rocker->wpriv;
struct ofdpa_port *ofdpa_port;
+ struct fib_nh *nh;
int err;
if (ofdpa->fib_aborted)
return 0;
- ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
+ nh = fib_info_nh(fen_info->fi, 0);
+ ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
if (!ofdpa_port)
return 0;
err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
@@ -2745,7 +2748,7 @@ static int ofdpa_fib4_add(struct rocker *rocker,
fen_info->tb_id, 0);
if (err)
return err;
- fen_info->fi->fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ nh->fib_nh_flags |= RTNH_F_OFFLOAD;
return 0;
}
@@ -2754,13 +2757,15 @@ static int ofdpa_fib4_del(struct rocker *rocker,
{
struct ofdpa *ofdpa = rocker->wpriv;
struct ofdpa_port *ofdpa_port;
+ struct fib_nh *nh;
if (ofdpa->fib_aborted)
return 0;
- ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
+ nh = fib_info_nh(fen_info->fi, 0);
+ ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
if (!ofdpa_port)
return 0;
- fen_info->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
fen_info->dst_len, fen_info->fi,
fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
@@ -2780,14 +2785,16 @@ static void ofdpa_fib4_abort(struct rocker *rocker)
spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
+ struct fib_nh *nh;
+
if (flow_entry->key.tbl_id !=
ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
continue;
- ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev,
- rocker);
+ nh = fib_info_nh(flow_entry->fi, 0);
+ ofdpa_port = ofdpa_port_dev_lower_find(nh->fib_nh_dev, rocker);
if (!ofdpa_port)
continue;
- flow_entry->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
flow_entry);
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 53b726bfe945..ab58b837df47 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3614,11 +3614,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
- rc = pci_enable_pcie_error_reporting(pci_dev);
- if (rc && rc != -EINVAL)
- netif_notice(efx, probe, efx->net_dev,
- "PCIE error reporting unavailable (%d).\n",
- rc);
+ (void)pci_enable_pcie_error_reporting(pci_dev);
if (efx->type->udp_tnl_push_ports)
efx->type->udp_tnl_push_ports(efx);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 9b036c857b1d..aba6eea72f15 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -360,7 +360,7 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
* SiS962 or SiS963 model, use EEPROM to store MAC address. And EEPROM
* is shared by
* LAN and 1394. When access EEPROM, send EEREQ signal to hardware first
- * and wait for EEGNT. If EEGNT is ON, EEPROM is permitted to be access
+ * and wait for EEGNT. If EEGNT is ON, EEPROM is permitted to be accessed
* by LAN, otherwise is not. After MAC address is read from EEPROM, send
* EEDONE signal to refuse EEPROM access by LAN.
* The EEPROM map of SiS962 or SiS963 is different to SiS900.
@@ -882,7 +882,7 @@ static void mdio_reset(struct sis900_private *sp)
* mdio_read - read MII PHY register
* @net_dev: the net device to read
* @phy_id: the phy address to read
- * @location: the phy regiester id to read
+ * @location: the phy register id to read
*
* Read MII registers through MDIO and MDC
* using MDIO management frame structure and protocol(defined by ISO/IEC).
@@ -926,7 +926,7 @@ static int mdio_read(struct net_device *net_dev, int phy_id, int location)
* mdio_write - write MII PHY register
* @net_dev: the net device to write
* @phy_id: the phy address to write
- * @location: the phy regiester id to write
+ * @location: the phy register id to write
* @value: the register value to write with
*
* Write MII registers with @value through MDIO and MDC
@@ -1057,7 +1057,7 @@ sis900_open(struct net_device *net_dev)
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxDESC);
sw32(cr, RxENA | sr32(cr));
sw32(ier, IE);
@@ -1101,7 +1101,7 @@ sis900_init_rxfilter (struct net_device * net_dev)
sw32(rfdr, w);
if (netif_msg_hw(sis_priv)) {
- printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n",
+ printk(KERN_DEBUG "%s: Receive Filter Address[%d]=%x\n",
net_dev->name, i, sr32(rfdr));
}
}
@@ -1148,7 +1148,7 @@ sis900_init_tx_ring(struct net_device *net_dev)
* @net_dev: the net device to initialize for
*
* Initialize the Rx descriptor ring,
- * and pre-allocate recevie buffers (socket buffer)
+ * and pre-allocate receive buffers (socket buffer)
*/
static void
@@ -1578,7 +1578,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
sw32(txdp, sis_priv->tx_ring_dma);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxDESC);
}
/**
@@ -1674,8 +1674,8 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
do {
status = sr32(isr);
- if ((status & (HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0)
- /* nothing intresting happened */
+ if ((status & (HIBERR|TxURN|TxERR|TxDESC|RxORN|RxERR|RxOK)) == 0)
+ /* nothing interesting happened */
break;
handled = 1;
@@ -1684,7 +1684,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
/* Rx interrupt */
sis900_rx(net_dev);
- if (status & (TxURN | TxERR | TxIDLE | TxDESC))
+ if (status & (TxURN | TxERR | TxDESC))
/* Tx interrupt */
sis900_finish_xmit(net_dev);
@@ -1897,7 +1897,7 @@ static void sis900_finish_xmit (struct net_device *net_dev)
if (tx_status & OWN) {
/* The packet is not transmitted yet (owned by hardware) !
* Note: this is an almost impossible condition
- * in case of TxDESC ('descriptor interrupt') */
+ * on TxDESC interrupt ('descriptor interrupt') */
break;
}
@@ -2473,7 +2473,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxDESC);
sw32(cr, RxENA | sr32(cr));
sw32(ier, IE);
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index d1b6a78557ec..9e1c3752b200 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -49,7 +49,7 @@ config SMC91X
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
The module will be called smc91x. If you want to compile it as a
- module, say M here and read <file:Documentation/kbuild/modules.txt>.
+ module, say M here and read <file:Documentation/kbuild/modules.rst>.
config PCMCIA_SMC91C92
tristate "SMC 91Cxx PCMCIA support"
@@ -86,7 +86,7 @@ config SMC911X
This driver is also available as a module. The module will be
called smc911x. If you want to compile it as a module, say M
- here and read <file:Documentation/kbuild/modules.txt>
+ here and read <file:Documentation/kbuild/modules.rst>
config SMSC911X
tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
@@ -121,6 +121,6 @@ config SMSC9420
This driver is also available as a module. The module will be
called smsc9420. If you want to compile it as a module, say M
- here and read <file:Documentation/kbuild/modules.txt>
+ here and read <file:Documentation/kbuild/modules.rst>
endif # NET_VENDOR_SMSC
diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig
index 25f18be27423..95e99baf3f45 100644
--- a/drivers/net/ethernet/socionext/Kconfig
+++ b/drivers/net/ethernet/socionext/Kconfig
@@ -26,6 +26,7 @@ config SNI_NETSEC
tristate "Socionext NETSEC ethernet support"
depends on (ARCH_SYNQUACER || COMPILE_TEST) && OF
select PHYLIB
+ select PAGE_POOL
select MII
---help---
Enable to add support for the SocioNext NetSec Gigabit Ethernet
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index cba5881b2746..1502fe8b0456 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -9,8 +9,12 @@
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/netlink.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <net/tcp.h>
+#include <net/page_pool.h>
#include <net/ip6_checksum.h>
#define NETSEC_REG_SOFT_RST 0x104
@@ -235,22 +239,41 @@
#define DESC_NUM 256
#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#define NETSEC_RX_BUF_SZ 1536
+#define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
+ NET_IP_ALIGN)
+#define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define DESC_SZ sizeof(struct netsec_de)
#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
+#define NETSEC_XDP_PASS 0
+#define NETSEC_XDP_CONSUMED BIT(0)
+#define NETSEC_XDP_TX BIT(1)
+#define NETSEC_XDP_REDIR BIT(2)
+#define NETSEC_XDP_RX_OK (NETSEC_XDP_PASS | NETSEC_XDP_TX | NETSEC_XDP_REDIR)
+
enum ring_id {
NETSEC_RING_TX = 0,
NETSEC_RING_RX
};
+enum buf_type {
+ TYPE_NETSEC_SKB = 0,
+ TYPE_NETSEC_XDP_TX,
+ TYPE_NETSEC_XDP_NDO,
+};
+
struct netsec_desc {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
dma_addr_t dma_addr;
void *addr;
u16 len;
+ u8 buf_type;
};
struct netsec_desc_ring {
@@ -258,11 +281,17 @@ struct netsec_desc_ring {
struct netsec_desc *desc;
void *vaddr;
u16 head, tail;
+ u16 xdp_xmit; /* netsec_xdp_xmit packets */
+ bool is_xdp;
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+ spinlock_t lock; /* XDP tx queue locking */
};
struct netsec_priv {
struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
struct ethtool_coalesce et_coalesce;
+ struct bpf_prog *xdp_prog;
spinlock_t reglock; /* protect reg access */
struct napi_struct napi;
phy_interface_t phy_interface;
@@ -600,12 +629,14 @@ static void netsec_set_rx_de(struct netsec_priv *priv,
static bool netsec_clean_tx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
- unsigned int pkts, bytes;
struct netsec_de *entry;
int tail = dring->tail;
+ unsigned int bytes;
int cnt = 0;
- pkts = 0;
+ if (dring->is_xdp)
+ spin_lock(&dring->lock);
+
bytes = 0;
entry = dring->vaddr + DESC_SZ * tail;
@@ -618,13 +649,23 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
eop = (entry->attr >> NETSEC_TX_LAST) & 1;
dma_rmb();
- dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
- DMA_TO_DEVICE);
- if (eop) {
- pkts++;
+ /* if buf_type is either TYPE_NETSEC_SKB or
+ * TYPE_NETSEC_XDP_NDO we mapped it
+ */
+ if (desc->buf_type != TYPE_NETSEC_XDP_TX)
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ DMA_TO_DEVICE);
+
+ if (!eop)
+ goto next;
+
+ if (desc->buf_type == TYPE_NETSEC_SKB) {
bytes += desc->skb->len;
dev_kfree_skb(desc->skb);
+ } else {
+ xdp_return_frame(desc->xdpf);
}
+next:
/* clean up so netsec_uninit_pkt_dring() won't free the skb
* again
*/
@@ -641,6 +682,8 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
entry = dring->vaddr + DESC_SZ * tail;
cnt++;
}
+ if (dring->is_xdp)
+ spin_unlock(&dring->lock);
if (!cnt)
return false;
@@ -673,33 +716,31 @@ static void netsec_process_tx(struct netsec_priv *priv)
}
static void *netsec_alloc_rx_data(struct netsec_priv *priv,
- dma_addr_t *dma_handle, u16 *desc_len,
- bool napi)
+ dma_addr_t *dma_handle, u16 *desc_len)
+
{
- size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- size_t payload_len = NETSEC_RX_BUF_SZ;
- dma_addr_t mapping;
- void *buf;
- total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ enum dma_data_direction dma_dir;
+ struct page *page;
- buf = napi ? napi_alloc_frag(total_len) : netdev_alloc_frag(total_len);
- if (!buf)
+ page = page_pool_dev_alloc_pages(dring->page_pool);
+ if (!page)
return NULL;
- mapping = dma_map_single(priv->dev, buf + NETSEC_SKB_PAD, payload_len,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(priv->dev, mapping)))
- goto err_out;
-
- *dma_handle = mapping;
- *desc_len = payload_len;
-
- return buf;
+ /* We allocate the same buffer length for XDP and non-XDP cases.
+ * page_pool API will map the whole page, skip what's needed for
+ * network payloads and/or XDP
+ */
+ *dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM;
+ /* Make sure the incoming payload fits in the page for XDP and non-XDP
+ * cases and reserve enough space for headroom + skb_shared_info
+ */
+ *desc_len = PAGE_SIZE - NETSEC_RX_BUF_NON_DATA;
+ dma_dir = page_pool_get_dma_dir(dring->page_pool);
+ dma_sync_single_for_device(priv->dev, *dma_handle, *desc_len, dma_dir);
-err_out:
- skb_free_frag(buf);
- return NULL;
+ return page_address(page);
}
static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
@@ -716,22 +757,201 @@ static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
}
}
+static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts)
+{
+ if (likely(pkts))
+ netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts);
+}
+
+static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
+ u16 pkts)
+{
+ if (xdp_res & NETSEC_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_res & NETSEC_XDP_TX)
+ netsec_xdp_ring_tx_db(priv, pkts);
+}
+
+static void netsec_set_tx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring,
+ const struct netsec_tx_pkt_ctrl *tx_ctrl,
+ const struct netsec_desc *desc, void *buf)
+{
+ int idx = dring->head;
+ struct netsec_de *de;
+ u32 attr;
+
+ de = dring->vaddr + (DESC_SZ * idx);
+
+ attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
+ (1 << NETSEC_TX_SHIFT_PT_FIELD) |
+ (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
+ (1 << NETSEC_TX_SHIFT_FS_FIELD) |
+ (1 << NETSEC_TX_LAST) |
+ (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
+ (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
+ (1 << NETSEC_TX_SHIFT_TRS_FIELD);
+ if (idx == DESC_NUM - 1)
+ attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
+
+ de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+ de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+ de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
+ de->attr = attr;
+ /* under spin_lock if using XDP */
+ if (!dring->is_xdp)
+ dma_wmb();
+
+ dring->desc[idx] = *desc;
+ if (desc->buf_type == TYPE_NETSEC_SKB)
+ dring->desc[idx].skb = buf;
+ else if (desc->buf_type == TYPE_NETSEC_XDP_TX ||
+ desc->buf_type == TYPE_NETSEC_XDP_NDO)
+ dring->desc[idx].xdpf = buf;
+
+ /* move head ahead */
+ dring->head = (dring->head + 1) % DESC_NUM;
+}
+
+/* The current driver only supports 1 Txq, this should run under spin_lock() */
+static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
+ struct xdp_frame *xdpf, bool is_ndo)
+
+{
+ struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
+ struct page *page = virt_to_page(xdpf->data);
+ struct netsec_tx_pkt_ctrl tx_ctrl = {};
+ struct netsec_desc tx_desc;
+ dma_addr_t dma_handle;
+ u16 filled;
+
+ if (tx_ring->head >= tx_ring->tail)
+ filled = tx_ring->head - tx_ring->tail;
+ else
+ filled = tx_ring->head + DESC_NUM - tx_ring->tail;
+
+ if (DESC_NUM - filled <= 1)
+ return NETSEC_XDP_CONSUMED;
+
+ if (is_ndo) {
+ /* this is for ndo_xdp_xmit, the buffer needs mapping before
+ * sending
+ */
+ dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, dma_handle))
+ return NETSEC_XDP_CONSUMED;
+ tx_desc.buf_type = TYPE_NETSEC_XDP_NDO;
+ } else {
+ /* This is the device Rx buffer from page_pool. No need to remap
+ * just sync and send it
+ */
+ struct netsec_desc_ring *rx_ring =
+ &priv->desc_ring[NETSEC_RING_RX];
+ enum dma_data_direction dma_dir =
+ page_pool_get_dma_dir(rx_ring->page_pool);
+
+ dma_handle = page_pool_get_dma_addr(page) +
+ NETSEC_RXBUF_HEADROOM;
+ dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
+ dma_dir);
+ tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
+ }
+
+ tx_desc.dma_addr = dma_handle;
+ tx_desc.addr = xdpf->data;
+ tx_desc.len = xdpf->len;
+
+ netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
+
+ return NETSEC_XDP_TX;
+}
+
+static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
+{
+ struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
+ struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+ u32 ret;
+
+ if (unlikely(!xdpf))
+ return NETSEC_XDP_CONSUMED;
+
+ spin_lock(&tx_ring->lock);
+ ret = netsec_xdp_queue_one(priv, xdpf, false);
+ spin_unlock(&tx_ring->lock);
+
+ return ret;
+}
+
+static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
+ struct xdp_buff *xdp)
+{
+ u32 ret = NETSEC_XDP_PASS;
+ int err;
+ u32 act;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ ret = NETSEC_XDP_PASS;
+ break;
+ case XDP_TX:
+ ret = netsec_xdp_xmit_back(priv, xdp);
+ if (ret != NETSEC_XDP_TX)
+ xdp_return_buff(xdp);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(priv->ndev, xdp, prog);
+ if (!err) {
+ ret = NETSEC_XDP_REDIR;
+ } else {
+ ret = NETSEC_XDP_CONSUMED;
+ xdp_return_buff(xdp);
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->ndev, prog, act);
+ /* fall through -- handle aborts by dropping packet */
+ case XDP_DROP:
+ ret = NETSEC_XDP_CONSUMED;
+ xdp_return_buff(xdp);
+ break;
+ }
+
+ return ret;
+}
+
static int netsec_process_rx(struct netsec_priv *priv, int budget)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct net_device *ndev = priv->ndev;
struct netsec_rx_pkt_info rx_info;
- struct sk_buff *skb;
+ enum dma_data_direction dma_dir;
+ struct bpf_prog *xdp_prog;
+ struct sk_buff *skb = NULL;
+ u16 xdp_xmit = 0;
+ u32 xdp_act = 0;
int done = 0;
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(priv->xdp_prog);
+ dma_dir = page_pool_get_dma_dir(dring->page_pool);
+
while (done < budget) {
u16 idx = dring->tail;
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
struct netsec_desc *desc = &dring->desc[idx];
+ struct page *page = virt_to_page(desc->addr);
+ u32 xdp_result = XDP_PASS;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
+ struct xdp_buff xdp;
void *buf_addr;
- u32 truesize;
if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
/* reading the register clears the irq */
@@ -766,53 +986,71 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
/* allocate a fresh buffer and map it to the hardware.
* This will eventually replace the old buffer in the hardware
*/
- buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len,
- true);
+ buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
+
if (unlikely(!buf_addr))
break;
dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
- DMA_FROM_DEVICE);
+ dma_dir);
prefetch(desc->addr);
- truesize = SKB_DATA_ALIGN(desc->len + NETSEC_SKB_PAD) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- skb = build_skb(desc->addr, truesize);
+ xdp.data_hard_start = desc->addr;
+ xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.data_end = xdp.data + pkt_len;
+ xdp.rxq = &dring->xdp_rxq;
+
+ if (xdp_prog) {
+ xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
+ if (xdp_result != NETSEC_XDP_PASS) {
+ xdp_act |= xdp_result;
+ if (xdp_result == NETSEC_XDP_TX)
+ xdp_xmit++;
+ goto next;
+ }
+ }
+ skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA);
+
if (unlikely(!skb)) {
- /* free the newly allocated buffer, we are not going to
- * use it
+ /* If skb fails recycle_direct will either unmap and
+ * free the page or refill the cache depending on the
+ * cache state. Since we paid the allocation cost if
+ * building an skb fails try to put the page into cache
*/
- dma_unmap_single(priv->dev, dma_handle, desc_len,
- DMA_FROM_DEVICE);
- skb_free_frag(buf_addr);
+ page_pool_recycle_direct(dring->page_pool, page);
netif_err(priv, drv, priv->ndev,
"rx failed to build skb\n");
break;
}
- dma_unmap_single_attrs(priv->dev, desc->dma_addr, desc->len,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
-
- /* Update the descriptor with the new buffer we allocated */
- desc->len = desc_len;
- desc->dma_addr = dma_handle;
- desc->addr = buf_addr;
+ page_pool_release_page(dring->page_pool, page);
- skb_reserve(skb, NETSEC_SKB_PAD);
- skb_put(skb, pkt_len);
+ skb_reserve(skb, xdp.data - xdp.data_hard_start);
+ skb_put(skb, xdp.data_end - xdp.data);
skb->protocol = eth_type_trans(skb, priv->ndev);
if (priv->rx_cksum_offload_flag &&
rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
+next:
+ if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
+ xdp_result & NETSEC_XDP_RX_OK) {
ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_bytes += xdp.data_end - xdp.data;
}
+ /* Update the descriptor with fresh buffers */
+ desc->len = desc_len;
+ desc->dma_addr = dma_handle;
+ desc->addr = buf_addr;
+
netsec_rx_fill(priv, idx, 1);
dring->tail = (dring->tail + 1) % DESC_NUM;
}
+ netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit);
+
+ rcu_read_unlock();
return done;
}
@@ -820,19 +1058,12 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
static int netsec_napi_poll(struct napi_struct *napi, int budget)
{
struct netsec_priv *priv;
- int rx, done, todo;
+ int done;
priv = container_of(napi, struct netsec_priv, napi);
netsec_process_tx(priv);
-
- todo = budget;
- do {
- rx = netsec_process_rx(priv, todo);
- todo -= rx;
- } while (rx);
-
- done = budget - todo;
+ done = netsec_process_rx(priv, budget);
if (done < budget && napi_complete_done(napi, done)) {
unsigned long flags;
@@ -846,41 +1077,6 @@ static int netsec_napi_poll(struct napi_struct *napi, int budget)
return done;
}
-static void netsec_set_tx_de(struct netsec_priv *priv,
- struct netsec_desc_ring *dring,
- const struct netsec_tx_pkt_ctrl *tx_ctrl,
- const struct netsec_desc *desc,
- struct sk_buff *skb)
-{
- int idx = dring->head;
- struct netsec_de *de;
- u32 attr;
-
- de = dring->vaddr + (DESC_SZ * idx);
-
- attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
- (1 << NETSEC_TX_SHIFT_PT_FIELD) |
- (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
- (1 << NETSEC_TX_SHIFT_FS_FIELD) |
- (1 << NETSEC_TX_LAST) |
- (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
- (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
- (1 << NETSEC_TX_SHIFT_TRS_FIELD);
- if (idx == DESC_NUM - 1)
- attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
-
- de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
- de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
- de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
- de->attr = attr;
- dma_wmb();
-
- dring->desc[idx] = *desc;
- dring->desc[idx].skb = skb;
-
- /* move head ahead */
- dring->head = (dring->head + 1) % DESC_NUM;
-}
static int netsec_desc_used(struct netsec_desc_ring *dring)
{
@@ -927,8 +1123,12 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
u16 tso_seg_len = 0;
int filled;
+ if (dring->is_xdp)
+ spin_lock_bh(&dring->lock);
filled = netsec_desc_used(dring);
if (netsec_check_stop_tx(priv, filled)) {
+ if (dring->is_xdp)
+ spin_unlock_bh(&dring->lock);
net_warn_ratelimited("%s %s Tx queue full\n",
dev_name(priv->dev), ndev->name);
return NETDEV_TX_BUSY;
@@ -961,6 +1161,8 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
+ if (dring->is_xdp)
+ spin_unlock_bh(&dring->lock);
netif_err(priv, drv, priv->ndev,
"%s: DMA mapping failed\n", __func__);
ndev->stats.tx_dropped++;
@@ -969,11 +1171,14 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
}
tx_desc.addr = skb->data;
tx_desc.len = skb_headlen(skb);
+ tx_desc.buf_type = TYPE_NETSEC_SKB;
skb_tx_timestamp(skb);
netdev_sent_queue(priv->ndev, skb->len);
netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
+ if (dring->is_xdp)
+ spin_unlock_bh(&dring->lock);
netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
return NETDEV_TX_OK;
@@ -987,19 +1192,27 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
if (!dring->vaddr || !dring->desc)
return;
-
for (idx = 0; idx < DESC_NUM; idx++) {
desc = &dring->desc[idx];
if (!desc->addr)
continue;
- dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
- id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
- DMA_TO_DEVICE);
- if (id == NETSEC_RING_RX)
- skb_free_frag(desc->addr);
- else if (id == NETSEC_RING_TX)
+ if (id == NETSEC_RING_RX) {
+ struct page *page = virt_to_page(desc->addr);
+
+ page_pool_put_page(dring->page_pool, page, false);
+ } else if (id == NETSEC_RING_TX) {
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ DMA_TO_DEVICE);
dev_kfree_skb(desc->skb);
+ }
+ }
+
+ /* Rx is currently using page_pool */
+ if (id == NETSEC_RING_RX) {
+ if (xdp_rxq_info_is_reg(&dring->xdp_rxq))
+ xdp_rxq_info_unreg(&dring->xdp_rxq);
+ page_pool_destroy(dring->page_pool);
}
memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
@@ -1029,7 +1242,6 @@ static void netsec_free_dring(struct netsec_priv *priv, int id)
static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
{
struct netsec_desc_ring *dring = &priv->desc_ring[id];
- int i;
dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
&dring->desc_dma, GFP_KERNEL);
@@ -1040,19 +1252,6 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
if (!dring->desc)
goto err;
- if (id == NETSEC_RING_TX) {
- for (i = 0; i < DESC_NUM; i++) {
- struct netsec_de *de;
-
- de = dring->vaddr + (DESC_SZ * i);
- /* de->attr is not going to be accessed by the NIC
- * until netsec_set_tx_de() is called.
- * No need for a dma_wmb() here
- */
- de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
- }
- }
-
return 0;
err:
netsec_free_dring(priv, id);
@@ -1060,10 +1259,60 @@ err:
return -ENOMEM;
}
+static void netsec_setup_tx_dring(struct netsec_priv *priv)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
+ int i;
+
+ for (i = 0; i < DESC_NUM; i++) {
+ struct netsec_de *de;
+
+ de = dring->vaddr + (DESC_SZ * i);
+ /* de->attr is not going to be accessed by the NIC
+ * until netsec_set_tx_de() is called.
+ * No need for a dma_wmb() here
+ */
+ de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
+ }
+
+ if (xdp_prog)
+ dring->is_xdp = true;
+ else
+ dring->is_xdp = false;
+
+}
+
static int netsec_setup_rx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- int i;
+ struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
+ struct page_pool_params pp_params = { 0 };
+ int i, err;
+
+ pp_params.order = 0;
+ /* internal DMA mapping in page_pool */
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = DESC_NUM;
+ pp_params.nid = cpu_to_node(0);
+ pp_params.dev = priv->dev;
+ pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+
+ dring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(dring->page_pool)) {
+ err = PTR_ERR(dring->page_pool);
+ dring->page_pool = NULL;
+ goto err_out;
+ }
+
+ err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0);
+ if (err)
+ goto err_out;
+
+ err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ dring->page_pool);
+ if (err)
+ goto err_out;
for (i = 0; i < DESC_NUM; i++) {
struct netsec_desc *desc = &dring->desc[i];
@@ -1071,10 +1320,10 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
void *buf;
u16 len;
- buf = netsec_alloc_rx_data(priv, &dma_handle, &len,
- false);
+ buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
+
if (!buf) {
- netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+ err = -ENOMEM;
goto err_out;
}
desc->dma_addr = dma_handle;
@@ -1087,7 +1336,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
return 0;
err_out:
- return -ENOMEM;
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+ return err;
}
static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
@@ -1361,6 +1611,7 @@ static int netsec_netdev_open(struct net_device *ndev)
pm_runtime_get_sync(priv->dev);
+ netsec_setup_tx_dring(priv);
ret = netsec_setup_rx_dring(priv);
if (ret) {
netif_err(priv, probe, priv->ndev,
@@ -1466,6 +1717,9 @@ static int netsec_netdev_init(struct net_device *ndev)
if (ret)
goto err2;
+ spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
+ spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
+
return 0;
err2:
netsec_free_dring(priv, NETSEC_RING_RX);
@@ -1498,6 +1752,81 @@ static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
return phy_mii_ioctl(ndev->phydev, ifr, cmd);
}
+static int netsec_xdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
+ int drops = 0;
+ int i;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ spin_lock(&tx_ring->lock);
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = netsec_xdp_queue_one(priv, xdpf, true);
+ if (err != NETSEC_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ } else {
+ tx_ring->xdp_xmit++;
+ }
+ }
+ spin_unlock(&tx_ring->lock);
+
+ if (unlikely(flags & XDP_XMIT_FLUSH)) {
+ netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit);
+ tx_ring->xdp_xmit = 0;
+ }
+
+ return n - drops;
+}
+
+static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = priv->ndev;
+ struct bpf_prog *old_prog;
+
+ /* For now just support only the usual MTU sized frames */
+ if (prog && dev->mtu > 1500) {
+ NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+ return -EOPNOTSUPP;
+ }
+
+ if (netif_running(dev))
+ netsec_netdev_stop(dev);
+
+ /* Detach old prog, if any */
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (netif_running(dev))
+ netsec_netdev_open(dev);
+
+ return 0;
+}
+
+static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return netsec_xdp_setup(priv, xdp->prog, xdp->extack);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops netsec_netdev_ops = {
.ndo_init = netsec_netdev_init,
.ndo_uninit = netsec_netdev_uninit,
@@ -1508,6 +1837,8 @@ static const struct net_device_ops netsec_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = netsec_netdev_ioctl,
+ .ndo_xdp_xmit = netsec_xdp_xmit,
+ .ndo_bpf = netsec_xdp,
};
static int netsec_of_probe(struct platform_device *pdev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 06545d7399fc..2325b40dff6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
config STMMAC_ETH
- tristate "STMicroelectronics 10/100/1000/EQOS Ethernet driver"
+ tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
select MII
- select PHYLIB
+ select PAGE_POOL
+ select PHYLINK
select CRC32
imply PTP_1588_CLOCK
select RESET_CONTROLLER
@@ -13,6 +14,16 @@ config STMMAC_ETH
if STMMAC_ETH
+config STMMAC_SELFTESTS
+ bool "Support for STMMAC Selftests"
+ depends on INET
+ depends on STMMAC_ETH
+ default n
+ ---help---
+ This adds support for STMMAC Selftests using ethtool. Enable this
+ feature if you are facing problems with your HW and submit the test
+ results to the netdev Mailing List.
+
config STMMAC_PLATFORM
tristate "STMMAC Platform bus support"
depends on STMMAC_ETH
@@ -31,7 +42,6 @@ if STMMAC_PLATFORM
config DWMAC_DWC_QOS_ETH
tristate "Support for snps,dwc-qos-ethernet.txt DT binding."
- select PHYLIB
select CRC32
select MII
depends on OF && HAS_DMA
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index c529c21e9bdd..c59926d96bcc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -8,6 +8,8 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
$(stmmac-y)
+stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
+
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index ceb0d23f5041..ed872eed1cab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -246,12 +246,13 @@ struct stmmac_safety_stats {
/* Max/Min RI Watchdog Timer count value */
#define MAX_DMA_RIWT 0xff
-#define MIN_DMA_RIWT 0x20
+#define MIN_DMA_RIWT 0x10
/* Tx coalesce parameters */
#define STMMAC_COAL_TX_TIMER 1000
#define STMMAC_MAX_COAL_TX_TICK 100000
#define STMMAC_TX_MAX_FRAMES 256
-#define STMMAC_TX_FRAMES 25
+#define STMMAC_TX_FRAMES 1
+#define STMMAC_RX_FRAMES 25
/* Packets types */
enum packets_types {
@@ -325,6 +326,7 @@ struct dma_features {
/* 802.3az - Energy-Efficient Ethernet (EEE) */
unsigned int eee;
unsigned int av;
+ unsigned int hash_tb_sz;
unsigned int tsoen;
/* TX and RX csum */
unsigned int tx_coe;
@@ -351,6 +353,7 @@ struct dma_features {
unsigned int frpsel;
unsigned int frpbs;
unsigned int frpes;
+ unsigned int addr64;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -392,8 +395,12 @@ struct mac_link {
u32 speed100;
u32 speed1000;
u32 speed2500;
- u32 speed10000;
u32 duplex;
+ struct {
+ u32 speed2500;
+ u32 speed5000;
+ u32 speed10000;
+ } xgmii;
};
struct mii_regs {
@@ -414,12 +421,13 @@ struct mac_device_info {
const struct stmmac_mode_ops *mode;
const struct stmmac_hwtimestamp *ptp;
const struct stmmac_tc_ops *tc;
+ const struct stmmac_mmc_ops *mmc;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
- int multicast_filter_bins;
- int unicast_filter_entries;
- int mcast_bits_log2;
+ unsigned int multicast_filter_bins;
+ unsigned int unicast_filter_entries;
+ unsigned int mcast_bits_log2;
unsigned int rx_csum;
unsigned int pcs;
unsigned int pmt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 126b66bb73a6..79f2ee37afed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -9,6 +9,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/stmmac.h>
@@ -298,6 +299,9 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
return ret;
}
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
return 0;
}
@@ -307,6 +311,9 @@ static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
const struct mediatek_dwmac_variant *variant = plat->variant;
clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
}
static int mediatek_dwmac_probe(struct platform_device *pdev)
@@ -349,6 +356,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
plat_dat->has_gmac4 = 1;
plat_dat->has_gmac = 0;
plat_dat->pmt = 0;
+ plat_dat->riwt_off = 1;
plat_dat->maxmtu = ETH_DATA_LEN;
plat_dat->bsp_priv = priv_plat;
plat_dat->init = mediatek_dwmac_init;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 8bdbddeec117..c141fe783e87 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -27,9 +27,12 @@
#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
+#define SYSMGR_GEN10_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000100
#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
+#define SYSMGR_FPGAINTF_EMAC_REG 0x00000070
+#define SYSMGR_FPGAINTF_EMAC_BIT 0x1
#define EMAC_SPLITTER_CTRL_REG 0x0
#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
@@ -37,6 +40,11 @@
#define EMAC_SPLITTER_CTRL_SPEED_100 0x3
#define EMAC_SPLITTER_CTRL_SPEED_1000 0x0
+struct socfpga_dwmac;
+struct socfpga_dwmac_ops {
+ int (*set_phy_mode)(struct socfpga_dwmac *dwmac_priv);
+};
+
struct socfpga_dwmac {
int interface;
u32 reg_offset;
@@ -48,6 +56,7 @@ struct socfpga_dwmac {
void __iomem *splitter_base;
bool f2h_ptp_ref_clk;
struct tse_pcs pcs;
+ const struct socfpga_dwmac_ops *ops;
};
static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
@@ -222,25 +231,36 @@ err_node_put:
return ret;
}
-static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
+static int socfpga_set_phy_mode_common(int phymode, u32 *val)
{
- struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
- int phymode = dwmac->interface;
- u32 reg_offset = dwmac->reg_offset;
- u32 reg_shift = dwmac->reg_shift;
- u32 ctrl, val, module;
-
switch (phymode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
- val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
+ *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
break;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_SGMII:
- val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+ *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII;
break;
default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac)
+{
+ struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
+ int phymode = dwmac->interface;
+ u32 reg_offset = dwmac->reg_offset;
+ u32 reg_shift = dwmac->reg_shift;
+ u32 ctrl, val, module;
+
+ if (socfpga_set_phy_mode_common(phymode, &val)) {
dev_err(dwmac->dev, "bad phy mode %d\n", phymode);
return -EINVAL;
}
@@ -291,6 +311,62 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
return 0;
}
+static int socfpga_gen10_set_phy_mode(struct socfpga_dwmac *dwmac)
+{
+ struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
+ int phymode = dwmac->interface;
+ u32 reg_offset = dwmac->reg_offset;
+ u32 reg_shift = dwmac->reg_shift;
+ u32 ctrl, val, module;
+
+ if (socfpga_set_phy_mode_common(phymode, &val))
+ return -EINVAL;
+
+ /* Overwrite val to GMII if splitter core is enabled. The phymode here
+ * is the actual phy mode on phy hardware, but phy interface from
+ * EMAC core is GMII.
+ */
+ if (dwmac->splitter_base)
+ val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+
+ /* Assert reset to the enet controller before changing the phy mode */
+ reset_control_assert(dwmac->stmmac_ocp_rst);
+ reset_control_assert(dwmac->stmmac_rst);
+
+ regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
+ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK);
+ ctrl |= val;
+
+ if (dwmac->f2h_ptp_ref_clk ||
+ phymode == PHY_INTERFACE_MODE_MII ||
+ phymode == PHY_INTERFACE_MODE_GMII ||
+ phymode == PHY_INTERFACE_MODE_SGMII) {
+ ctrl |= SYSMGR_GEN10_EMACGRP_CTRL_PTP_REF_CLK_MASK;
+ regmap_read(sys_mgr_base_addr, SYSMGR_FPGAINTF_EMAC_REG,
+ &module);
+ module |= (SYSMGR_FPGAINTF_EMAC_BIT << reg_shift);
+ regmap_write(sys_mgr_base_addr, SYSMGR_FPGAINTF_EMAC_REG,
+ module);
+ } else {
+ ctrl &= ~SYSMGR_GEN10_EMACGRP_CTRL_PTP_REF_CLK_MASK;
+ }
+
+ regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
+
+ /* Deassert reset for the phy configuration to be sampled by
+ * the enet controller, and operation to start in requested mode
+ */
+ reset_control_deassert(dwmac->stmmac_ocp_rst);
+ reset_control_deassert(dwmac->stmmac_rst);
+ if (phymode == PHY_INTERFACE_MODE_SGMII) {
+ if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
+ dev_err(dwmac->dev, "Unable to initialize TSE PCS");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int socfpga_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -300,6 +376,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
struct socfpga_dwmac *dwmac;
struct net_device *ndev;
struct stmmac_priv *stpriv;
+ const struct socfpga_dwmac_ops *ops;
+
+ ops = device_get_match_data(&pdev->dev);
+ if (!ops) {
+ dev_err(&pdev->dev, "no of match data provided\n");
+ return -EINVAL;
+ }
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -330,6 +413,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ dwmac->ops = ops;
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
@@ -346,7 +430,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
*/
dwmac->stmmac_rst = stpriv->plat->stmmac_rst;
- ret = socfpga_dwmac_set_phy_mode(dwmac);
+ ret = ops->set_phy_mode(dwmac);
if (ret)
goto err_dvr_remove;
@@ -365,8 +449,9 @@ static int socfpga_dwmac_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct socfpga_dwmac *dwmac_priv = get_stmmac_bsp_priv(dev);
- socfpga_dwmac_set_phy_mode(priv->plat->bsp_priv);
+ dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
/* Before the enet controller is suspended, the phy is suspended.
* This causes the phy clock to be gated. The enet controller is
@@ -393,8 +478,17 @@ static int socfpga_dwmac_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
socfpga_dwmac_resume);
+static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
+ .set_phy_mode = socfpga_gen5_set_phy_mode,
+};
+
+static const struct socfpga_dwmac_ops socfpga_gen10_ops = {
+ .set_phy_mode = socfpga_gen10_set_phy_mode,
+};
+
static const struct of_device_id socfpga_dwmac_match[] = {
- { .compatible = "altr,socfpga-stmmac" },
+ { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gen5_ops },
+ { .compatible = "altr,socfpga-stmmac-a10-s10", .data = &socfpga_gen10_ops },
{ }
};
MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index a69c34f605b1..2856f3fe5266 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -138,6 +138,20 @@ static const struct emac_variant emac_variant_a64 = {
.tx_delay_max = 7,
};
+static const struct emac_variant emac_variant_h6 = {
+ .default_syscon_value = 0x50000,
+ .syscon_field = &sun8i_syscon_reg_field,
+ /* The "Internal PHY" of H6 is not on the die. It's on the
+ * co-packaged AC200 chip instead.
+ */
+ .soc_has_internal_phy = false,
+ .support_mii = true,
+ .support_rmii = true,
+ .support_rgmii = true,
+ .rx_delay_max = 31,
+ .tx_delay_max = 7,
+};
+
#define EMAC_BASIC_CTL0 0x00
#define EMAC_BASIC_CTL1 0x04
#define EMAC_INT_STA 0x08
@@ -275,18 +289,18 @@ static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_rx_phy, u32 chan)
+ dma_addr_t dma_rx_phy, u32 chan)
{
/* Write RX descriptors address */
- writel(dma_rx_phy, ioaddr + EMAC_RX_DESC_LIST);
+ writel(lower_32_bits(dma_rx_phy), ioaddr + EMAC_RX_DESC_LIST);
}
static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx_phy, u32 chan)
+ dma_addr_t dma_tx_phy, u32 chan)
{
/* Write TX descriptors address */
- writel(dma_tx_phy, ioaddr + EMAC_TX_DESC_LIST);
+ writel(lower_32_bits(dma_tx_phy), ioaddr + EMAC_TX_DESC_LIST);
}
/* sun8i_dwmac_dump_regs() - Dump EMAC address space
@@ -884,6 +898,11 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
* address. No need to mask it again.
*/
reg |= 1 << H3_EPHY_ADDR_SHIFT;
+ } else {
+ /* For SoCs without internal PHY the PHY selection bit should be
+ * set to 0 (external PHY).
+ */
+ reg &= ~H3_EPHY_SELECT;
}
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
@@ -977,6 +996,18 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
regulator_disable(gmac->regulator);
}
+static void sun8i_dwmac_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + EMAC_BASIC_CTL0);
+
+ if (enable)
+ value |= EMAC_LOOPBACK;
+ else
+ value &= ~EMAC_LOOPBACK;
+
+ writel(value, ioaddr + EMAC_BASIC_CTL0);
+}
+
static const struct stmmac_ops sun8i_dwmac_ops = {
.core_init = sun8i_dwmac_core_init,
.set_mac = sun8i_dwmac_set_mac,
@@ -986,6 +1017,7 @@ static const struct stmmac_ops sun8i_dwmac_ops = {
.flow_ctrl = sun8i_dwmac_flow_ctrl,
.set_umac_addr = sun8i_dwmac_set_umac_addr,
.get_umac_addr = sun8i_dwmac_get_umac_addr,
+ .set_mac_loopback = sun8i_dwmac_set_mac_loopback,
};
static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
@@ -1203,6 +1235,8 @@ static const struct of_device_id sun8i_dwmac_match[] = {
.data = &emac_variant_r40 },
{ .compatible = "allwinner,sun50i-a64-emac",
.data = &emac_variant_a64 },
+ { .compatible = "allwinner,sun50i-h6-emac",
+ .data = &emac_variant_h6 },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index b83d3a98f5f1..b70d44ac0990 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -136,6 +136,7 @@ enum inter_frame_gap {
#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */
#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 9fff81170163..3d69da112625 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -162,7 +162,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
memset(mc_filter, 0, sizeof(mc_filter));
if (dev->flags & IFF_PROMISC) {
- value = GMAC_FRAME_FILTER_PR;
+ value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF;
} else if (dev->flags & IFF_ALLMULTI) {
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
} else if (!netdev_mc_empty(dev)) {
@@ -188,6 +188,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
}
}
+ value |= GMAC_FRAME_FILTER_HPF;
dwmac1000_set_mchash(ioaddr, mc_filter, mcbitslog2);
/* Handle multiple unicast addresses (perfect filtering) */
@@ -206,6 +207,12 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
GMAC_ADDR_LOW(reg));
reg++;
}
+
+ while (reg <= perfect_addr_number) {
+ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+ reg++;
+ }
}
#ifdef FRAME_FILTER_DEBUG
@@ -489,6 +496,18 @@ static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
x->mac_gmii_rx_proto_engine++;
}
+static void dwmac1000_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+
+ if (enable)
+ value |= GMAC_CONTROL_LM;
+ else
+ value &= ~GMAC_CONTROL_LM;
+
+ writel(value, ioaddr + GMAC_CONTROL);
+}
+
const struct stmmac_ops dwmac1000_ops = {
.core_init = dwmac1000_core_init,
.set_mac = stmmac_set_mac,
@@ -508,6 +527,7 @@ const struct stmmac_ops dwmac1000_ops = {
.pcs_ctrl_ane = dwmac1000_ctrl_ane,
.pcs_rane = dwmac1000_rane,
.pcs_get_adv_lp = dwmac1000_get_adv_lp,
+ .set_mac_loopback = dwmac1000_set_mac_loopback,
};
int dwmac1000_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 1fdedf77678f..2bac49b49f73 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -112,18 +112,18 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_rx_phy, u32 chan)
+ dma_addr_t dma_rx_phy, u32 chan)
{
/* RX descriptor base address list must be written into DMA CSR3 */
- writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
+ writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
}
static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx_phy, u32 chan)
+ dma_addr_t dma_tx_phy, u32 chan)
{
/* TX descriptor base address list must be written into DMA CSR4 */
- writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
+ writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
}
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 8842f6627cb8..ebcad8dd99db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -150,6 +150,18 @@ static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode)
return;
}
+static void dwmac100_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (enable)
+ value |= MAC_CONTROL_OM;
+ else
+ value &= ~MAC_CONTROL_OM;
+
+ writel(value, ioaddr + MAC_CONTROL);
+}
+
const struct stmmac_ops dwmac100_ops = {
.core_init = dwmac100_core_init,
.set_mac = stmmac_set_mac,
@@ -161,6 +173,7 @@ const struct stmmac_ops dwmac100_ops = {
.pmt = dwmac100_pmt,
.set_umac_addr = dwmac100_set_umac_addr,
.get_umac_addr = dwmac100_get_umac_addr,
+ .set_mac_loopback = dwmac100_set_mac_loopback,
};
int dwmac100_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index c980cc7360a4..8f0d9bc7cab5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -31,18 +31,18 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
static void dwmac100_dma_init_rx(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_rx_phy, u32 chan)
+ dma_addr_t dma_rx_phy, u32 chan)
{
/* RX descriptor base addr lists must be written into DMA CSR3 */
- writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
+ writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
}
static void dwmac100_dma_init_tx(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx_phy, u32 chan)
+ dma_addr_t dma_tx_phy, u32 chan)
{
/* TX descriptor base addr lists must be written into DMA CSR4 */
- writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
+ writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
}
/* Store and Forward capability is not used at all.
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 80234f12bf7f..2ed11a581d80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -15,8 +15,7 @@
/* MAC registers */
#define GMAC_CONFIG 0x00000000
#define GMAC_PACKET_FILTER 0x00000008
-#define GMAC_HASH_TAB_0_31 0x00000010
-#define GMAC_HASH_TAB_32_63 0x00000014
+#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
#define GMAC_RX_FLOW_CTRL 0x00000090
#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
#define GMAC_TXQ_PRTY_MAP0 0x98
@@ -61,6 +60,8 @@
#define GMAC_PACKET_FILTER_PR BIT(0)
#define GMAC_PACKET_FILTER_HMC BIT(2)
#define GMAC_PACKET_FILTER_PM BIT(4)
+#define GMAC_PACKET_FILTER_PCF BIT(7)
+#define GMAC_PACKET_FILTER_HPF BIT(10)
#define GMAC_MAX_PERFECT_ADDRESSES 128
@@ -157,6 +158,7 @@ enum power_event {
#define GMAC_CONFIG_PS BIT(15)
#define GMAC_CONFIG_FES BIT(14)
#define GMAC_CONFIG_DM BIT(13)
+#define GMAC_CONFIG_LM BIT(12)
#define GMAC_CONFIG_DCRS BIT(9)
#define GMAC_CONFIG_TE BIT(1)
#define GMAC_CONFIG_RE BIT(0)
@@ -178,6 +180,7 @@ enum power_event {
#define GMAC_HW_FEAT_MIISEL BIT(0)
/* MAC HW features1 bitmap */
+#define GMAC_HW_HASH_TB_SZ GENMASK(25, 24)
#define GMAC_HW_FEAT_AVSEL BIT(20)
#define GMAC_HW_TSOEN BIT(18)
#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 99d772517242..01c2e2d83e76 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -400,57 +400,74 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
struct net_device *dev)
{
void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- unsigned int value = 0;
+ int numhashregs = (hw->multicast_filter_bins >> 5);
+ int mcbitslog2 = hw->mcast_bits_log2;
+ unsigned int value;
+ int i;
+ value = readl(ioaddr + GMAC_PACKET_FILTER);
+ value &= ~GMAC_PACKET_FILTER_HMC;
+ value &= ~GMAC_PACKET_FILTER_HPF;
+ value &= ~GMAC_PACKET_FILTER_PCF;
+ value &= ~GMAC_PACKET_FILTER_PM;
+ value &= ~GMAC_PACKET_FILTER_PR;
if (dev->flags & IFF_PROMISC) {
- value = GMAC_PACKET_FILTER_PR;
+ value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
} else if ((dev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
+ (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
/* Pass all multi */
- value = GMAC_PACKET_FILTER_PM;
- /* Set the 64 bits of the HASH tab. To be updated if taller
- * hash table is used
- */
- writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31);
- writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63);
+ value |= GMAC_PACKET_FILTER_PM;
+ /* Set all the bits of the HASH tab */
+ for (i = 0; i < numhashregs; i++)
+ writel(0xffffffff, ioaddr + GMAC_HASH_TAB(i));
} else if (!netdev_mc_empty(dev)) {
- u32 mc_filter[2];
struct netdev_hw_addr *ha;
+ u32 mc_filter[8];
/* Hash filter for multicast */
- value = GMAC_PACKET_FILTER_HMC;
+ value |= GMAC_PACKET_FILTER_HMC;
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
- /* The upper 6 bits of the calculated CRC are used to
- * index the content of the Hash Table Reg 0 and 1.
+ /* The upper n bits of the calculated CRC are used to
+ * index the contents of the hash table. The number of
+ * bits used depends on the hardware configuration
+ * selected at core configuration time.
*/
- int bit_nr =
- (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26);
- /* The most significant bit determines the register
- * to use while the other 5 bits determines the bit
- * within the selected register
+ int bit_nr = bitrev32(~crc32_le(~0, ha->addr,
+ ETH_ALEN)) >> (32 - mcbitslog2);
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register.
*/
- mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F));
+ mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
}
- writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31);
- writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
+ for (i = 0; i < numhashregs; i++)
+ writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
}
+ value |= GMAC_PACKET_FILTER_HPF;
+
/* Handle multiple unicast addresses */
if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
/* Switch to promiscuous mode if more than 128 addrs
* are required
*/
value |= GMAC_PACKET_FILTER_PR;
- } else if (!netdev_uc_empty(dev)) {
- int reg = 1;
+ } else {
struct netdev_hw_addr *ha;
+ int reg = 1;
netdev_for_each_uc_addr(ha, dev) {
dwmac4_set_umac_addr(hw, ha->addr, reg);
reg++;
}
+
+ while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
+ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+ reg++;
+ }
}
writel(value, ioaddr + GMAC_PACKET_FILTER);
@@ -468,8 +485,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
if (fc & FLOW_RX) {
pr_debug("\tReceive Flow-Control ON\n");
flow |= GMAC_RX_FLOW_CTRL_RFE;
- writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
}
+ writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
+
if (fc & FLOW_TX) {
pr_debug("\tTransmit Flow-Control ON\n");
@@ -477,7 +495,7 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
for (queue = 0; queue < tx_cnt; queue++) {
- flow |= GMAC_TX_FLOW_CTRL_TFE;
+ flow = GMAC_TX_FLOW_CTRL_TFE;
if (duplex)
flow |=
@@ -485,6 +503,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
}
+ } else {
+ for (queue = 0; queue < tx_cnt; queue++)
+ writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
}
}
@@ -700,6 +721,18 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
x->mac_gmii_rx_proto_engine++;
}
+static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ if (enable)
+ value |= GMAC_CONFIG_LM;
+ else
+ value &= ~GMAC_CONFIG_LM;
+
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.set_mac = stmmac_set_mac,
@@ -729,6 +762,7 @@ const struct stmmac_ops dwmac4_ops = {
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
+ .set_mac_loopback = dwmac4_set_mac_loopback,
};
const struct stmmac_ops dwmac410_ops = {
@@ -760,6 +794,7 @@ const struct stmmac_ops dwmac410_ops = {
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
+ .set_mac_loopback = dwmac4_set_mac_loopback,
};
const struct stmmac_ops dwmac510_ops = {
@@ -796,6 +831,7 @@ const struct stmmac_ops dwmac510_ops = {
.safety_feat_dump = dwmac5_safety_feat_dump,
.rxp_config = dwmac5_rxp_config,
.flex_pps_config = dwmac5_flex_pps_config,
+ .set_mac_loopback = dwmac4_set_mac_loopback,
};
int dwmac4_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index cf6436d3d6c7..dbde23e7e169 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -443,6 +443,15 @@ static void dwmac4_clear(struct dma_desc *p)
p->des3 = 0;
}
+static int set_16kib_bfsize(int mtu)
+{
+ int ret = 0;
+
+ if (unlikely(mtu >= BUF_SIZE_8KiB))
+ ret = BUF_SIZE_16KiB;
+ return ret;
+}
+
const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status,
@@ -469,4 +478,6 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.clear = dwmac4_clear,
};
-const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };
+const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
+ .set_16kib_bfsize = set_16kib_bfsize,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 0f208e13da9f..3ed5508586ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -70,7 +70,7 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_rx_phy, u32 chan)
+ dma_addr_t dma_rx_phy, u32 chan)
{
u32 value;
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
@@ -79,12 +79,12 @@ static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
- writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
+ writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
}
static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx_phy, u32 chan)
+ dma_addr_t dma_tx_phy, u32 chan)
{
u32 value;
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
@@ -97,7 +97,7 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
- writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
+ writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
}
static void dwmac4_dma_init_channel(void __iomem *ioaddr,
@@ -351,6 +351,7 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature1 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
+ dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24;
dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
/* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 85826524683c..f2a29a90e085 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -85,10 +85,6 @@ void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
value &= ~DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
-
- value = readl(ioaddr + GMAC_CONFIG);
- value &= ~GMAC_CONFIG_RE;
- writel(value, ioaddr + GMAC_CONFIG);
}
void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 085b700a4994..7f86dffb264d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -15,10 +15,14 @@
/* MAC Registers */
#define XGMAC_TX_CONFIG 0x00000000
#define XGMAC_CONFIG_SS_OFF 29
-#define XGMAC_CONFIG_SS_MASK GENMASK(30, 29)
+#define XGMAC_CONFIG_SS_MASK GENMASK(31, 29)
#define XGMAC_CONFIG_SS_10000 (0x0 << XGMAC_CONFIG_SS_OFF)
-#define XGMAC_CONFIG_SS_2500 (0x2 << XGMAC_CONFIG_SS_OFF)
-#define XGMAC_CONFIG_SS_1000 (0x3 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_2500_GMII (0x2 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_1000_GMII (0x3 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_100_MII (0x4 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_5000 (0x5 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_2500 (0x6 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_10_MII (0x7 << XGMAC_CONFIG_SS_OFF)
#define XGMAC_CONFIG_SARC GENMASK(22, 20)
#define XGMAC_CONFIG_SARC_SHIFT 20
#define XGMAC_CONFIG_JD BIT(16)
@@ -29,6 +33,7 @@
#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
#define XGMAC_CONFIG_GPSL_SHIFT 16
#define XGMAC_CONFIG_S2KP BIT(11)
+#define XGMAC_CONFIG_LM BIT(10)
#define XGMAC_CONFIG_IPC BIT(9)
#define XGMAC_CONFIG_JE BIT(8)
#define XGMAC_CONFIG_WD BIT(7)
@@ -39,6 +44,7 @@
#define XGMAC_CORE_INIT_RX 0
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_PCF BIT(7)
#define XGMAC_FILTER_PM BIT(4)
#define XGMAC_FILTER_HMC BIT(2)
#define XGMAC_FILTER_PR BIT(0)
@@ -81,6 +87,7 @@
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
#define XGMAC_HWFEAT_TSOEN BIT(18)
+#define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14)
#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6)
#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0)
#define XGMAC_HW_FEATURE2 0x00000124
@@ -166,6 +173,7 @@
#define XGMAC_EN_LPI BIT(15)
#define XGMAC_LPI_XIT_PKT BIT(14)
#define XGMAC_AAL BIT(12)
+#define XGMAC_EAME BIT(11)
#define XGMAC_BLEN GENMASK(7, 1)
#define XGMAC_BLEN256 BIT(7)
#define XGMAC_BLEN128 BIT(6)
@@ -175,6 +183,10 @@
#define XGMAC_BLEN8 BIT(2)
#define XGMAC_BLEN4 BIT(1)
#define XGMAC_UNDEF BIT(0)
+#define XGMAC_TX_EDMA_CTRL 0x00003040
+#define XGMAC_TDPS GENMASK(29, 0)
+#define XGMAC_RX_EDMA_CTRL 0x00003044
+#define XGMAC_RDPS GENMASK(29, 0)
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
#define XGMAC_PBLx8 BIT(16)
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
@@ -187,7 +199,9 @@
#define XGMAC_RxPBL GENMASK(21, 16)
#define XGMAC_RxPBL_SHIFT 16
#define XGMAC_RXST BIT(0)
+#define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x)))
#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_HADDR(x) (0x00003118 + (0x80 * (x)))
#define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x)))
#define XGMAC_DMA_CH_TxDESC_TAIL_LPTR(x) (0x00003124 + (0x80 * (x)))
#define XGMAC_DMA_CH_RxDESC_TAIL_LPTR(x) (0x0000312c + (0x80 * (x)))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 64b8cb88ea45..0a32c96a7854 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -36,7 +36,7 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
switch (hw->ps) {
case SPEED_10000:
- tx |= hw->link.speed10000;
+ tx |= hw->link.xgmii.speed10000;
break;
case SPEED_2500:
tx |= hw->link.speed2500;
@@ -310,7 +310,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
u32 value = XGMAC_FILTER_RA;
if (dev->flags & IFF_PROMISC) {
- value |= XGMAC_FILTER_PR;
+ value |= XGMAC_FILTER_PR | XGMAC_FILTER_PCF;
} else if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
value |= XGMAC_FILTER_PM;
@@ -321,6 +321,18 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
writel(value, ioaddr + XGMAC_PACKET_FILTER);
}
+static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ if (enable)
+ value |= XGMAC_CONFIG_LM;
+ else
+ value &= ~XGMAC_CONFIG_LM;
+
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+}
+
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
@@ -350,6 +362,7 @@ const struct stmmac_ops dwxgmac210_ops = {
.pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
+ .set_mac_loopback = dwxgmac2_set_mac_loopback,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
@@ -368,11 +381,13 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
mac->link.duplex = 0;
- mac->link.speed10 = 0;
- mac->link.speed100 = 0;
- mac->link.speed1000 = XGMAC_CONFIG_SS_1000;
- mac->link.speed2500 = XGMAC_CONFIG_SS_2500;
- mac->link.speed10000 = XGMAC_CONFIG_SS_10000;
+ mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
+ mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
+ mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
+ mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
+ mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
+ mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
+ mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
mac->mii.addr = XGMAC_MDIO_ADDR;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 98fa471da7c0..c4c45402b8f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -242,8 +242,8 @@ static void dwxgmac2_get_addr(struct dma_desc *p, unsigned int *addr)
static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr)
{
- p->des0 = cpu_to_le32(addr);
- p->des1 = 0;
+ p->des0 = cpu_to_le32(lower_32_bits(addr));
+ p->des1 = cpu_to_le32(upper_32_bits(addr));
}
static void dwxgmac2_clear(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index e79037f511e1..a4f236e3593e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -27,7 +27,7 @@ static void dwxgmac2_dma_init(void __iomem *ioaddr,
if (dma_cfg->aal)
value |= XGMAC_AAL;
- writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+ writel(value | XGMAC_EAME, ioaddr + XGMAC_DMA_SYSBUS_MODE);
}
static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
@@ -44,7 +44,7 @@ static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_rx_phy, u32 chan)
+ dma_addr_t phy, u32 chan)
{
u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
u32 value;
@@ -54,12 +54,13 @@ static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
- writel(dma_rx_phy, ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
+ writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan));
+ writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
}
static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx_phy, u32 chan)
+ dma_addr_t phy, u32 chan)
{
u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
u32 value;
@@ -70,7 +71,8 @@ static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
value |= XGMAC_OSP;
writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
- writel(dma_tx_phy, ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
+ writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan));
+ writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
}
static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
@@ -91,11 +93,11 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
XGMAC_RD_OSR_LMT;
+ if (!axi->axi_fb)
+ value |= XGMAC_UNDEF;
+
value &= ~XGMAC_BLEN;
for (i = 0; i < AXI_BLEN; i++) {
- if (axi->axi_blen[i])
- value &= ~XGMAC_UNDEF;
-
switch (axi->axi_blen[i]) {
case 256:
value |= XGMAC_BLEN256;
@@ -122,6 +124,8 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
}
writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+ writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
+ writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
}
static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
@@ -299,10 +303,6 @@ static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
value &= ~XGMAC_RXST;
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
-
- value = readl(ioaddr + XGMAC_RX_CONFIG);
- value &= ~XGMAC_CONFIG_RE;
- writel(value, ioaddr + XGMAC_RX_CONFIG);
}
static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
@@ -363,6 +363,23 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
+
+ dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
+ switch (dma_cap->addr64) {
+ case 0:
+ dma_cap->addr64 = 32;
+ break;
+ case 1:
+ dma_cap->addr64 = 40;
+ break;
+ case 2:
+ dma_cap->addr64 = 48;
+ break;
+ default:
+ dma_cap->addr64 = 32;
+ break;
+ }
+
dma_cap->tx_fifo_size =
128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
dma_cap->rx_fifo_size =
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 81b966a8261b..6c61b753b55e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -81,6 +81,7 @@ static const struct stmmac_hwif_entry {
const void *hwtimestamp;
const void *mode;
const void *tc;
+ const void *mmc;
int (*setup)(struct stmmac_priv *priv);
int (*quirks)(struct stmmac_priv *priv);
} stmmac_hw[] = {
@@ -100,6 +101,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = NULL,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac100_setup,
.quirks = stmmac_dwmac1_quirks,
}, {
@@ -117,6 +119,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = NULL,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac1000_setup,
.quirks = stmmac_dwmac1_quirks,
}, {
@@ -134,6 +137,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac4_setup,
.quirks = stmmac_dwmac4_quirks,
}, {
@@ -151,6 +155,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -168,6 +173,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -185,6 +191,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -202,6 +209,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = &dwmac510_tc_ops,
+ .mmc = NULL,
.setup = dwxgmac2_setup,
.quirks = NULL,
},
@@ -267,6 +275,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
mac->ptp = mac->ptp ? : entry->hwtimestamp;
mac->mode = mac->mode ? : entry->mode;
mac->tc = mac->tc ? : entry->tc;
+ mac->mmc = mac->mmc ? : entry->mmc;
priv->hw = mac;
priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 5bb00234d961..278c0dbec9d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -6,6 +6,7 @@
#define __STMMAC_HWIF_H__
#include <linux/netdevice.h>
+#include <linux/stmmac.h>
#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \
({ \
@@ -149,10 +150,10 @@ struct stmmac_dma_ops {
struct stmmac_dma_cfg *dma_cfg, u32 chan);
void (*init_rx_chan)(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_rx_phy, u32 chan);
+ dma_addr_t phy, u32 chan);
void (*init_tx_chan)(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx_phy, u32 chan);
+ dma_addr_t phy, u32 chan);
/* Configure the AXI Bus Mode Register */
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
/* Dump DMA registers */
@@ -324,6 +325,8 @@ struct stmmac_ops {
int (*flex_pps_config)(void __iomem *ioaddr, int index,
struct stmmac_pps_cfg *cfg, bool enable,
u32 sub_second_inc, u32 systime_flags);
+ /* Loopback for selftests */
+ void (*set_mac_loopback)(void __iomem *ioaddr, bool enable);
};
#define stmmac_core_init(__priv, __args...) \
@@ -392,6 +395,8 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, rxp_config, __args)
#define stmmac_flex_pps_config(__priv, __args...) \
stmmac_do_callback(__priv, mac, flex_pps_config, __args)
+#define stmmac_set_mac_loopback(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -464,6 +469,21 @@ struct stmmac_tc_ops {
#define stmmac_tc_setup_cbs(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cbs, __args)
+struct stmmac_counters;
+
+struct stmmac_mmc_ops {
+ void (*ctrl)(void __iomem *ioaddr, unsigned int mode);
+ void (*intr_all_mask)(void __iomem *ioaddr);
+ void (*read)(void __iomem *ioaddr, struct stmmac_counters *mmc);
+};
+
+#define stmmac_mmc_ctrl(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mmc, ctrl, __args)
+#define stmmac_mmc_intr_all_mask(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mmc, intr_all_mask, __args)
+#define stmmac_mmc_read(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mmc, read, __args)
+
struct stmmac_regs_off {
u32 ptp_off;
u32 mmc_off;
@@ -482,6 +502,7 @@ extern const struct stmmac_tc_ops dwmac510_tc_ops;
extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
+extern const struct stmmac_mmc_ops dwmac_mmc_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 6c8fdee3b25a..3587ceb9faf5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -118,8 +118,4 @@ struct stmmac_counters {
unsigned int mmc_rx_icmp_err_octets;
};
-void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
-void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
-void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
-
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 1d967b8f91a0..a471db6d7b11 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
+#include "hwif.h"
#include "mmc.h"
/* MAC Management Counters register offset */
@@ -118,7 +119,7 @@
#define MMC_RX_ICMP_GD_OCTETS 0x180
#define MMC_RX_ICMP_ERR_OCTETS 0x184
-void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
+static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
u32 value = readl(mmcaddr + MMC_CNTRL);
@@ -131,7 +132,7 @@ void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
}
/* To mask all all interrupts.*/
-void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+static void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
@@ -143,7 +144,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
* counter after a read. So all the field of the mmc struct
* have to be incremented.
*/
-void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
+static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
{
mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB);
mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB);
@@ -256,3 +257,9 @@ void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
}
+
+const struct stmmac_mmc_ops dwmac_mmc_ops = {
+ .ctrl = dwmac_mmc_ctrl,
+ .intr_all_mask = dwmac_mmc_intr_all_mask,
+ .read = dwmac_mmc_read,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 62a64356ad22..5cd966c154f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -14,12 +14,13 @@
#include <linux/clk.h>
#include <linux/stmmac.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/pci.h>
#include "common.h"
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/reset.h>
+#include <net/page_pool.h>
struct stmmac_resources {
void __iomem *addr;
@@ -54,13 +55,19 @@ struct stmmac_tx_queue {
u32 mss;
};
+struct stmmac_rx_buffer {
+ struct page *page;
+ dma_addr_t addr;
+};
+
struct stmmac_rx_queue {
+ u32 rx_count_frames;
u32 queue_index;
+ struct page_pool *page_pool;
+ struct stmmac_rx_buffer *buf_pool;
struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_erx;
struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
- struct sk_buff **rx_skbuff;
- dma_addr_t *rx_skbuff_dma;
unsigned int cur_rx;
unsigned int dirty_rx;
u32 rx_zeroc_thresh;
@@ -110,6 +117,7 @@ struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames;
u32 tx_coal_timer;
+ u32 rx_coal_frames;
int tx_coalesce;
int hwts_tx_en;
@@ -137,14 +145,15 @@ struct stmmac_priv {
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
- bool oldlink;
int speed;
- int oldduplex;
unsigned int flow_ctrl;
unsigned int pause;
struct mii_bus *mii;
int mii_irq[PHY_MAX_ADDR];
+ struct phylink_config phylink_config;
+ struct phylink *phylink;
+
struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
struct stmmac_safety_stats sstats;
struct plat_stmmacenet_data *plat;
@@ -219,4 +228,26 @@ int stmmac_dvr_probe(struct device *device,
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
+#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
+void stmmac_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf);
+void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data);
+int stmmac_selftest_get_count(struct stmmac_priv *priv);
+#else
+static inline void stmmac_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ /* Not enabled */
+}
+static inline void stmmac_selftest_get_strings(struct stmmac_priv *priv,
+ u8 *data)
+{
+ /* Not enabled */
+}
+static inline int stmmac_selftest_get_count(struct stmmac_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_STMMAC_SELFTESTS */
+
#endif /* __STMMAC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e7af3dc3dd8f..6efb66820d4c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -12,7 +12,7 @@
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/mii.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/net_tstamp.h>
#include <asm/io.h>
@@ -264,7 +264,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct phy_device *phy = dev->phydev;
if (priv->hw->pcs & STMMAC_PCS_RGMII ||
priv->hw->pcs & STMMAC_PCS_SGMII) {
@@ -343,18 +342,7 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
return 0;
}
- if (phy == NULL) {
- pr_err("%s: %s: PHY is not registered\n",
- __func__, dev->name);
- return -ENODEV;
- }
- if (!netif_running(dev)) {
- pr_err("%s: interface is disabled: we cannot track "
- "link speed / duplex setting\n", dev->name);
- return -EBUSY;
- }
- phy_ethtool_ksettings_get(phy, cmd);
- return 0;
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
}
static int
@@ -362,8 +350,6 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct phy_device *phy = dev->phydev;
- int rc;
if (priv->hw->pcs & STMMAC_PCS_RGMII ||
priv->hw->pcs & STMMAC_PCS_SGMII) {
@@ -387,9 +373,7 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
return 0;
}
- rc = phy_ethtool_ksettings_set(phy, cmd);
-
- return rc;
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
}
static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
@@ -433,6 +417,13 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
NUM_DWMAC1000_DMA_REGS * 4);
}
+static int stmmac_nway_reset(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return phylink_ethtool_nway_reset(priv->phylink);
+}
+
static void
stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
@@ -440,28 +431,13 @@ stmmac_get_pauseparam(struct net_device *netdev,
struct stmmac_priv *priv = netdev_priv(netdev);
struct rgmii_adv adv_lp;
- pause->rx_pause = 0;
- pause->tx_pause = 0;
-
if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
pause->autoneg = 1;
if (!adv_lp.pause)
return;
} else {
- if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- netdev->phydev->supported) ||
- !linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- netdev->phydev->supported))
- return;
+ phylink_ethtool_get_pauseparam(priv->phylink, pause);
}
-
- pause->autoneg = netdev->phydev->autoneg;
-
- if (priv->flow_ctrl & FLOW_RX)
- pause->rx_pause = 1;
- if (priv->flow_ctrl & FLOW_TX)
- pause->tx_pause = 1;
-
}
static int
@@ -469,39 +445,16 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- u32 tx_cnt = priv->plat->tx_queues_to_use;
- struct phy_device *phy = netdev->phydev;
- int new_pause = FLOW_OFF;
struct rgmii_adv adv_lp;
if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
pause->autoneg = 1;
if (!adv_lp.pause)
return -EOPNOTSUPP;
+ return 0;
} else {
- if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phy->supported) ||
- !linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phy->supported))
- return -EOPNOTSUPP;
- }
-
- if (pause->rx_pause)
- new_pause |= FLOW_RX;
- if (pause->tx_pause)
- new_pause |= FLOW_TX;
-
- priv->flow_ctrl = new_pause;
- phy->autoneg = pause->autoneg;
-
- if (phy->autoneg) {
- if (netif_running(netdev))
- return phy_start_aneg(phy);
+ return phylink_ethtool_set_pauseparam(priv->phylink, pause);
}
-
- stmmac_flow_ctrl(priv, priv->hw, phy->duplex, priv->flow_ctrl,
- priv->pause, tx_cnt);
- return 0;
}
static void stmmac_get_ethtool_stats(struct net_device *dev,
@@ -527,7 +480,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
if (ret) {
/* If supported, for new GMAC chips expose the MMC counters */
if (priv->dma_cap.rmon) {
- dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
+ stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
char *p;
@@ -539,7 +492,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
}
}
if (priv->eee_enabled) {
- int val = phy_get_eee_err(dev->phydev);
+ int val = phylink_get_eee_err(priv->phylink);
if (val)
priv->xstats.phy_eee_wakeup_error_n = val;
}
@@ -579,6 +532,8 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset)
}
return len;
+ case ETH_SS_TEST:
+ return stmmac_selftest_get_count(priv);
default:
return -EOPNOTSUPP;
}
@@ -615,6 +570,9 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_TEST:
+ stmmac_selftest_get_strings(priv, p);
+ break;
default:
WARN_ON(1);
break;
@@ -679,7 +637,7 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
- return phy_ethtool_get_eee(dev->phydev, edata);
+ return phylink_ethtool_get_eee(priv->phylink, edata);
}
static int stmmac_ethtool_op_set_eee(struct net_device *dev,
@@ -700,7 +658,7 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
return -EOPNOTSUPP;
}
- ret = phy_ethtool_set_eee(dev->phydev, edata);
+ ret = phylink_ethtool_set_eee(priv->phylink, edata);
if (ret)
return ret;
@@ -743,8 +701,10 @@ static int stmmac_get_coalesce(struct net_device *dev,
ec->tx_coalesce_usecs = priv->tx_coal_timer;
ec->tx_max_coalesced_frames = priv->tx_coal_frames;
- if (priv->use_riwt)
+ if (priv->use_riwt) {
+ ec->rx_max_coalesced_frames = priv->rx_coal_frames;
ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
+ }
return 0;
}
@@ -757,7 +717,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
unsigned int rx_riwt;
/* Check not supported parameters */
- if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
+ if ((ec->rx_coalesce_usecs_irq) ||
(ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
(ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
(ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
@@ -791,6 +751,7 @@ static int stmmac_set_coalesce(struct net_device *dev,
/* Only copy relevant parameters, ignore all others. */
priv->tx_coal_frames = ec->tx_max_coalesced_frames;
priv->tx_coal_timer = ec->tx_coalesce_usecs;
+ priv->rx_coal_frames = ec->rx_max_coalesced_frames;
priv->rx_riwt = rx_riwt;
stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
@@ -877,9 +838,10 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_regs = stmmac_ethtool_gregs,
.get_regs_len = stmmac_ethtool_get_regs_len,
.get_link = ethtool_op_get_link,
- .nway_reset = phy_ethtool_nway_reset,
+ .nway_reset = stmmac_nway_reset,
.get_pauseparam = stmmac_get_pauseparam,
.set_pauseparam = stmmac_set_pauseparam,
+ .self_test = stmmac_selftest_run,
.get_ethtool_stats = stmmac_get_ethtool_stats,
.get_strings = stmmac_get_strings,
.get_wol = stmmac_get_wol,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 06358fe5b245..c7c9e5f162e6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -35,6 +35,7 @@
#include <linux/seq_file.h>
#endif /* CONFIG_DEBUG_FS */
#include <linux/net_tstamp.h>
+#include <linux/phylink.h>
#include <net/pkt_cls.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
@@ -318,21 +319,6 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
}
/**
- * stmmac_hw_fix_mac_speed - callback for speed selection
- * @priv: driver private structure
- * Description: on some platforms (e.g. ST), some HW system configuration
- * registers have to be set according to the link speed negotiated.
- */
-static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
-{
- struct net_device *ndev = priv->dev;
- struct phy_device *phydev = ndev->phydev;
-
- if (likely(priv->plat->fix_mac_speed))
- priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
-}
-
-/**
* stmmac_enable_eee_mode - check and enter in LPI mode
* @priv: driver private structure
* Description: this function is to verify and enter in LPI mode in case of
@@ -395,14 +381,7 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
*/
bool stmmac_eee_init(struct stmmac_priv *priv)
{
- struct net_device *ndev = priv->dev;
- int interface = priv->plat->interface;
- bool ret = false;
-
- if ((interface != PHY_INTERFACE_MODE_MII) &&
- (interface != PHY_INTERFACE_MODE_GMII) &&
- !phy_interface_mode_is_rgmii(interface))
- goto out;
+ int tx_lpi_timer = priv->tx_lpi_timer;
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
@@ -410,52 +389,35 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
(priv->hw->pcs == STMMAC_PCS_TBI) ||
(priv->hw->pcs == STMMAC_PCS_RTBI))
- goto out;
-
- /* MAC core supports the EEE feature. */
- if (priv->dma_cap.eee) {
- int tx_lpi_timer = priv->tx_lpi_timer;
-
- /* Check if the PHY supports EEE */
- if (phy_init_eee(ndev->phydev, 1)) {
- /* To manage at run-time if the EEE cannot be supported
- * anymore (for example because the lp caps have been
- * changed).
- * In that case the driver disable own timers.
- */
- mutex_lock(&priv->lock);
- if (priv->eee_active) {
- netdev_dbg(priv->dev, "disable EEE\n");
- del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0,
- tx_lpi_timer);
- }
- priv->eee_active = 0;
- mutex_unlock(&priv->lock);
- goto out;
- }
- /* Activate the EEE and start timers */
- mutex_lock(&priv->lock);
- if (!priv->eee_active) {
- priv->eee_active = 1;
- timer_setup(&priv->eee_ctrl_timer,
- stmmac_eee_ctrl_timer, 0);
- mod_timer(&priv->eee_ctrl_timer,
- STMMAC_LPI_T(eee_timer));
-
- stmmac_set_eee_timer(priv, priv->hw,
- STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
- }
- /* Set HW EEE according to the speed */
- stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
+ return false;
+
+ /* Check if MAC core supports the EEE feature. */
+ if (!priv->dma_cap.eee)
+ return false;
- ret = true;
+ mutex_lock(&priv->lock);
+
+ /* Check if it needs to be deactivated */
+ if (!priv->eee_active) {
+ if (priv->eee_enabled) {
+ netdev_dbg(priv->dev, "disable EEE\n");
+ del_timer_sync(&priv->eee_ctrl_timer);
+ stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
+ }
mutex_unlock(&priv->lock);
+ return false;
+ }
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+ if (priv->eee_active && !priv->eee_enabled) {
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ tx_lpi_timer);
}
-out:
- return ret;
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+ return true;
}
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
@@ -838,97 +800,171 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
priv->pause, tx_cnt);
}
-/**
- * stmmac_adjust_link - adjusts the link parameters
- * @dev: net device structure
- * Description: this is the helper called by the physical abstraction layer
- * drivers to communicate the phy link status. According the speed and duplex
- * this driver can invoke registered glue-logic as well.
- * It also invoke the eee initialization because it could happen when switch
- * on different networks (that are eee capable).
- */
-static void stmmac_adjust_link(struct net_device *dev)
+static void stmmac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
{
- struct stmmac_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- bool new_state = false;
-
- if (!phydev)
- return;
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ int tx_cnt = priv->plat->tx_queues_to_use;
+ int max_speed = priv->plat->max_speed;
- mutex_lock(&priv->lock);
+ phylink_set(mac_supported, 10baseT_Half);
+ phylink_set(mac_supported, 10baseT_Full);
+ phylink_set(mac_supported, 100baseT_Half);
+ phylink_set(mac_supported, 100baseT_Full);
+
+ phylink_set(mac_supported, Autoneg);
+ phylink_set(mac_supported, Pause);
+ phylink_set(mac_supported, Asym_Pause);
+ phylink_set_port_modes(mac_supported);
+
+ if (priv->plat->has_gmac ||
+ priv->plat->has_gmac4 ||
+ priv->plat->has_xgmac) {
+ phylink_set(mac_supported, 1000baseT_Half);
+ phylink_set(mac_supported, 1000baseT_Full);
+ phylink_set(mac_supported, 1000baseKX_Full);
+ }
+
+ /* Cut down 1G if asked to */
+ if ((max_speed > 0) && (max_speed < 1000)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ } else if (priv->plat->has_xgmac) {
+ phylink_set(mac_supported, 2500baseT_Full);
+ phylink_set(mac_supported, 5000baseT_Full);
+ phylink_set(mac_supported, 10000baseSR_Full);
+ phylink_set(mac_supported, 10000baseLR_Full);
+ phylink_set(mac_supported, 10000baseER_Full);
+ phylink_set(mac_supported, 10000baseLRM_Full);
+ phylink_set(mac_supported, 10000baseT_Full);
+ phylink_set(mac_supported, 10000baseKX4_Full);
+ phylink_set(mac_supported, 10000baseKR_Full);
+ }
+
+ /* Half-Duplex can only work with single queue */
+ if (tx_cnt > 1) {
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ bitmap_and(supported, supported, mac_supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_andnot(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mac_supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_andnot(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
- if (phydev->link) {
- u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+static int stmmac_mac_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ return -EOPNOTSUPP;
+}
- /* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
- if (phydev->duplex != priv->oldduplex) {
- new_state = true;
- if (!phydev->duplex)
- ctrl &= ~priv->hw->link.duplex;
- else
- ctrl |= priv->hw->link.duplex;
- priv->oldduplex = phydev->duplex;
- }
- /* Flow Control operation */
- if (phydev->pause)
- stmmac_mac_flow_ctrl(priv, phydev->duplex);
-
- if (phydev->speed != priv->speed) {
- new_state = true;
- ctrl &= ~priv->hw->link.speed_mask;
- switch (phydev->speed) {
- case SPEED_1000:
- ctrl |= priv->hw->link.speed1000;
- break;
- case SPEED_100:
- ctrl |= priv->hw->link.speed100;
- break;
- case SPEED_10:
- ctrl |= priv->hw->link.speed10;
- break;
- default:
- netif_warn(priv, link, priv->dev,
- "broken speed: %d\n", phydev->speed);
- phydev->speed = SPEED_UNKNOWN;
- break;
- }
- if (phydev->speed != SPEED_UNKNOWN)
- stmmac_hw_fix_mac_speed(priv);
- priv->speed = phydev->speed;
- }
+static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ u32 ctrl;
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl &= ~priv->hw->link.speed_mask;
- if (!priv->oldlink) {
- new_state = true;
- priv->oldlink = true;
+ if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
+ switch (state->speed) {
+ case SPEED_10000:
+ ctrl |= priv->hw->link.xgmii.speed10000;
+ break;
+ case SPEED_5000:
+ ctrl |= priv->hw->link.xgmii.speed5000;
+ break;
+ case SPEED_2500:
+ ctrl |= priv->hw->link.xgmii.speed2500;
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (state->speed) {
+ case SPEED_2500:
+ ctrl |= priv->hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
+ break;
+ case SPEED_100:
+ ctrl |= priv->hw->link.speed100;
+ break;
+ case SPEED_10:
+ ctrl |= priv->hw->link.speed10;
+ break;
+ default:
+ return;
}
- } else if (priv->oldlink) {
- new_state = true;
- priv->oldlink = false;
- priv->speed = SPEED_UNKNOWN;
- priv->oldduplex = DUPLEX_UNKNOWN;
}
- if (new_state && netif_msg_link(priv))
- phy_print_status(phydev);
+ priv->speed = state->speed;
- mutex_unlock(&priv->lock);
+ if (priv->plat->fix_mac_speed)
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);
- if (phydev->is_pseudo_fixed_link)
- /* Stop PHY layer to call the hook to adjust the link in case
- * of a switch is attached to the stmmac driver.
- */
- phydev->irq = PHY_IGNORE_INTERRUPT;
+ if (!state->duplex)
+ ctrl &= ~priv->hw->link.duplex;
else
- /* At this stage, init the EEE if supported.
- * Never called in case of fixed_link.
- */
+ ctrl |= priv->hw->link.duplex;
+
+ /* Flow Control operation */
+ if (state->pause)
+ stmmac_mac_flow_ctrl(priv, state->duplex);
+
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+}
+
+static void stmmac_mac_an_restart(struct phylink_config *config)
+{
+ /* Not Supported */
+}
+
+static void stmmac_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ priv->eee_active = false;
+ stmmac_eee_init(priv);
+ stmmac_set_eee_pls(priv, priv->hw, false);
+}
+
+static void stmmac_mac_link_up(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phy)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_mac_set(priv, priv->ioaddr, true);
+ if (phy && priv->dma_cap.eee) {
+ priv->eee_active = phy_init_eee(phy, 1) >= 0;
priv->eee_enabled = stmmac_eee_init(priv);
+ stmmac_set_eee_pls(priv, priv->hw, true);
+ }
}
+static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
+ .validate = stmmac_validate,
+ .mac_link_state = stmmac_mac_link_state,
+ .mac_config = stmmac_mac_config,
+ .mac_an_restart = stmmac_mac_an_restart,
+ .mac_link_down = stmmac_mac_link_down,
+ .mac_link_up = stmmac_mac_link_up,
+};
+
/**
* stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
* @priv: driver private structure
@@ -965,79 +1001,48 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- u32 tx_cnt = priv->plat->tx_queues_to_use;
- struct phy_device *phydev;
- char phy_id_fmt[MII_BUS_ID_SIZE + 3];
- char bus_id[MII_BUS_ID_SIZE];
- int interface = priv->plat->interface;
- int max_speed = priv->plat->max_speed;
- priv->oldlink = false;
- priv->speed = SPEED_UNKNOWN;
- priv->oldduplex = DUPLEX_UNKNOWN;
+ struct device_node *node;
+ int ret;
- if (priv->plat->phy_node) {
- phydev = of_phy_connect(dev, priv->plat->phy_node,
- &stmmac_adjust_link, 0, interface);
- } else {
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
- priv->plat->bus_id);
+ node = priv->plat->phylink_node;
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
- priv->plat->phy_addr);
- netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
- phy_id_fmt);
+ if (node)
+ ret = phylink_of_phy_connect(priv->phylink, node, 0);
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
- interface);
- }
+ /* Some DT bindings do not set-up the PHY handle. Let's try to
+ * manually parse it
+ */
+ if (!node || ret) {
+ int addr = priv->plat->phy_addr;
+ struct phy_device *phydev;
- if (IS_ERR_OR_NULL(phydev)) {
- netdev_err(priv->dev, "Could not attach to PHY\n");
- if (!phydev)
+ phydev = mdiobus_get_phy(priv->mii, addr);
+ if (!phydev) {
+ netdev_err(priv->dev, "no phy at addr %d\n", addr);
return -ENODEV;
+ }
- return PTR_ERR(phydev);
+ ret = phylink_connect_phy(priv->phylink, phydev);
}
- /* Stop Advertising 1000BASE Capability if interface is not GMII */
- if ((interface == PHY_INTERFACE_MODE_MII) ||
- (interface == PHY_INTERFACE_MODE_RMII) ||
- (max_speed < 1000 && max_speed > 0))
- phy_set_max_speed(phydev, SPEED_100);
+ return ret;
+}
- /*
- * Half-duplex mode not supported with multiqueue
- * half-duplex can only works with single queue
- */
- if (tx_cnt > 1) {
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_100baseT_Half_BIT);
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
- }
+static int stmmac_phy_setup(struct stmmac_priv *priv)
+{
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ int mode = priv->plat->interface;
+ struct phylink *phylink;
- /*
- * Broken HW is sometimes missing the pull-up resistor on the
- * MDIO line, which results in reads to non-existent devices returning
- * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
- * device as well.
- * Note: phydev->phy_id is the result of reading the UID PHY registers.
- */
- if (!priv->plat->phy_node && phydev->phy_id == 0) {
- phy_disconnect(phydev);
- return -ENODEV;
- }
+ priv->phylink_config.dev = &priv->dev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
- /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
- * subsequent PHY polling, make sure we force a link transition if
- * we have a UP/DOWN/UP transition
- */
- if (phydev->is_pseudo_fixed_link)
- phydev->irq = PHY_POLL;
+ phylink = phylink_create(&priv->phylink_config, fwnode,
+ mode, &stmmac_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
- phy_attached_info(phydev);
+ priv->phylink = phylink;
return 0;
}
@@ -1192,26 +1197,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
int i, gfp_t flags, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- struct sk_buff *skb;
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
- skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
- if (!skb) {
- netdev_err(priv->dev,
- "%s: Rx init fails; skb is NULL\n", __func__);
+ buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->page)
return -ENOMEM;
- }
- rx_q->rx_skbuff[i] = skb;
- rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
- priv->dma_buf_sz,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
- netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
+ buf->addr = page_pool_get_dma_addr(buf->page);
+ stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
@@ -1227,13 +1220,11 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
- if (rx_q->rx_skbuff[i]) {
- dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_q->rx_skbuff[i]);
- }
- rx_q->rx_skbuff[i] = NULL;
+ if (buf->page)
+ page_pool_put_page(rx_q->page_pool, buf->page, false);
+ buf->page = NULL;
}
/**
@@ -1316,10 +1307,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
queue);
if (ret)
goto err_init_rx_buffers;
-
- netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
- rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
- (unsigned int)rx_q->rx_skbuff_dma[i]);
}
rx_q->cur_rx = 0;
@@ -1493,8 +1480,11 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
- kfree(rx_q->rx_skbuff_dma);
- kfree(rx_q->rx_skbuff);
+ kfree(rx_q->buf_pool);
+ if (rx_q->page_pool) {
+ page_pool_request_shutdown(rx_q->page_pool);
+ page_pool_destroy(rx_q->page_pool);
+ }
}
}
@@ -1546,20 +1536,29 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
/* RX queues buffers and DMA */
for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct page_pool_params pp_params = { 0 };
rx_q->queue_index = queue;
rx_q->priv_data = priv;
- rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
- sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!rx_q->rx_skbuff_dma)
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = DMA_RX_SIZE;
+ pp_params.order = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
+ pp_params.nid = dev_to_node(priv->device);
+ pp_params.dev = priv->device;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+
+ rx_q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx_q->page_pool)) {
+ ret = PTR_ERR(rx_q->page_pool);
+ rx_q->page_pool = NULL;
goto err_dma;
+ }
- rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
- sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!rx_q->rx_skbuff)
+ rx_q->buf_pool = kmalloc_array(DMA_RX_SIZE,
+ sizeof(*rx_q->buf_pool),
+ GFP_KERNEL);
+ if (!rx_q->buf_pool)
goto err_dma;
if (priv->extend_desc) {
@@ -2049,14 +2048,15 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
struct stmmac_channel *ch = &priv->channel[chan];
if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
- napi_schedule_irqoff(&ch->rx_napi);
+ if (napi_schedule_prep(&ch->rx_napi)) {
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ __napi_schedule_irqoff(&ch->rx_napi);
+ status |= handle_tx;
+ }
}
- if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
napi_schedule_irqoff(&ch->tx_napi);
- }
return status;
}
@@ -2118,10 +2118,10 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- dwmac_mmc_intr_all_mask(priv->mmcaddr);
+ stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
if (priv->dma_cap.rmon) {
- dwmac_mmc_ctrl(priv->mmcaddr, mode);
+ stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
} else
netdev_info(priv->dev, "No MAC Management Counters available\n");
@@ -2154,8 +2154,8 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
if (!is_valid_ether_addr(priv->dev->dev_addr))
eth_hw_addr_random(priv->dev);
- netdev_info(priv->dev, "device MAC address %pM\n",
- priv->dev->dev_addr);
+ dev_info(priv->device, "device MAC address %pM\n",
+ priv->dev->dev_addr);
}
}
@@ -2262,20 +2262,21 @@ static void stmmac_tx_timer(struct timer_list *t)
}
/**
- * stmmac_init_tx_coalesce - init tx mitigation options.
+ * stmmac_init_coalesce - init mitigation options.
* @priv: driver private structure
* Description:
- * This inits the transmit coalesce parameters: i.e. timer rate,
+ * This inits the coalesce parameters: i.e. timer rate,
* timer handler and default threshold used for enabling the
* interrupt on completion bit.
*/
-static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
+static void stmmac_init_coalesce(struct stmmac_priv *priv)
{
u32 tx_channel_count = priv->plat->tx_queues_to_use;
u32 chan;
priv->tx_coal_frames = STMMAC_TX_FRAMES;
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
+ priv->rx_coal_frames = STMMAC_RX_FRAMES;
for (chan = 0; chan < tx_channel_count; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
@@ -2561,9 +2562,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if (priv->use_riwt) {
- ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
+ ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt);
if (!ret)
- priv->rx_riwt = MAX_DMA_RIWT;
+ priv->rx_riwt = MIN_DMA_RIWT;
}
if (priv->hw->pcs)
@@ -2645,10 +2646,9 @@ static int stmmac_open(struct net_device *dev)
goto init_error;
}
- stmmac_init_tx_coalesce(priv);
+ stmmac_init_coalesce(priv);
- if (dev->phydev)
- phy_start(dev->phydev);
+ phylink_start(priv->phylink);
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
@@ -2695,8 +2695,7 @@ lpiirq_error:
wolirq_error:
free_irq(dev->irq, dev);
irq_error:
- if (dev->phydev)
- phy_stop(dev->phydev);
+ phylink_stop(priv->phylink);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
del_timer_sync(&priv->tx_queue[chan].txtimer);
@@ -2705,9 +2704,7 @@ irq_error:
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
- if (dev->phydev)
- phy_disconnect(dev->phydev);
-
+ phylink_disconnect_phy(priv->phylink);
return ret;
}
@@ -2726,10 +2723,8 @@ static int stmmac_release(struct net_device *dev)
del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
- if (dev->phydev) {
- phy_stop(dev->phydev);
- phy_disconnect(dev->phydev);
- }
+ phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
stmmac_stop_all_queues(priv);
@@ -2772,7 +2767,7 @@ static int stmmac_release(struct net_device *dev)
* This function fills descriptor and request new descriptors according to
* buffer length to fill
*/
-static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
+static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
int total_len, bool last_segment, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
@@ -2783,11 +2778,18 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
tmp_len = total_len;
while (tmp_len > 0) {
+ dma_addr_t curr_addr;
+
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
desc = tx_q->dma_tx + tx_q->cur_tx;
- desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
+ curr_addr = des + (total_len - tmp_len);
+ if (priv->dma_cap.addr64 <= 32)
+ desc->des0 = cpu_to_le32(curr_addr);
+ else
+ stmmac_set_desc_addr(priv, desc, curr_addr);
+
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
@@ -2833,11 +2835,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
- unsigned int first_entry, des;
+ unsigned int first_entry;
struct stmmac_tx_queue *tx_q;
int tmp_pay_len = 0;
u32 pay_len, mss;
u8 proto_hdr_len;
+ dma_addr_t des;
int i;
tx_q = &priv->tx_queue[queue];
@@ -2894,14 +2897,19 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[first_entry].buf = des;
tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
- first->des0 = cpu_to_le32(des);
+ if (priv->dma_cap.addr64 <= 32) {
+ first->des0 = cpu_to_le32(des);
- /* Fill start of payload in buff2 of first descriptor */
- if (pay_len)
- first->des1 = cpu_to_le32(des + proto_hdr_len);
+ /* Fill start of payload in buff2 of first descriptor */
+ if (pay_len)
+ first->des1 = cpu_to_le32(des + proto_hdr_len);
- /* If needed take extra descriptors to fill the remaining payload */
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+ /* If needed take extra descriptors to fill the remaining payload */
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+ } else {
+ stmmac_set_desc_addr(priv, first, des);
+ tmp_pay_len = pay_len;
+ }
stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
@@ -3031,12 +3039,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
- int entry;
- unsigned int first_entry;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
+ unsigned int first_entry;
unsigned int enh_desc;
- unsigned int des;
+ dma_addr_t des;
+ int entry;
tx_q = &priv->tx_queue[queue];
@@ -3045,17 +3053,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- /*
- * There is no way to determine the number of TSO
- * capable Queues. Let's use always the Queue 0
- * because if TSO is supported then at least this
- * one will be capable.
- */
- skb_set_queue_mapping(skb, 0);
-
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
return stmmac_tso_xmit(skb, dev);
- }
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -3281,59 +3280,38 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
- int bfsize = priv->dma_buf_sz;
-
while (dirty-- > 0) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
struct dma_desc *p;
+ bool use_rx_wd;
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
p = rx_q->dma_rx + entry;
- if (likely(!rx_q->rx_skbuff[entry])) {
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
- if (unlikely(!skb)) {
- /* so for a while no zero-copy! */
- rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
- if (unlikely(net_ratelimit()))
- dev_err(priv->device,
- "fail to alloc skb entry %d\n",
- entry);
+ if (!buf->page) {
+ buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->page)
break;
- }
-
- rx_q->rx_skbuff[entry] = skb;
- rx_q->rx_skbuff_dma[entry] =
- dma_map_single(priv->device, skb->data, bfsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->device,
- rx_q->rx_skbuff_dma[entry])) {
- netdev_err(priv->dev, "Rx DMA map failed\n");
- dev_kfree_skb(skb);
- break;
- }
-
- stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
- stmmac_refill_desc3(priv, rx_q, p);
-
- if (rx_q->rx_zeroc_thresh > 0)
- rx_q->rx_zeroc_thresh--;
-
- netif_dbg(priv, rx_status, priv->dev,
- "refill entry #%d\n", entry);
}
- dma_wmb();
- stmmac_set_rx_owner(priv, p, priv->use_riwt);
+ buf->addr = page_pool_get_dma_addr(buf->page);
+ stmmac_set_desc_addr(priv, p, buf->addr);
+ stmmac_refill_desc3(priv, rx_q, p);
+
+ rx_q->rx_count_frames++;
+ rx_q->rx_count_frames %= priv->rx_coal_frames;
+ use_rx_wd = priv->use_riwt && rx_q->rx_count_frames;
dma_wmb();
+ stmmac_set_rx_owner(priv, p, use_rx_wd);
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
rx_q->dirty_rx = entry;
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
}
@@ -3352,9 +3330,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int next_entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
unsigned int count = 0;
- bool xmac;
-
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -3368,11 +3343,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
+ struct stmmac_rx_buffer *buf;
+ struct dma_desc *np, *p;
int entry, status;
- struct dma_desc *p;
- struct dma_desc *np;
entry = next_entry;
+ buf = &rx_q->buf_pool[entry];
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry);
@@ -3402,20 +3378,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_rx_extended_status(priv, &priv->dev->stats,
&priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
priv->dev->stats.rx_errors++;
- if (priv->hwts_rx_en && !priv->extend_desc) {
- /* DESC2 & DESC3 will be overwritten by device
- * with timestamp value, hence reinitialize
- * them in stmmac_rx_refill() function so that
- * device can reuse it.
- */
- dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
- rx_q->rx_skbuff[entry] = NULL;
- dma_unmap_single(priv->device,
- rx_q->rx_skbuff_dma[entry],
- priv->dma_buf_sz,
- DMA_FROM_DEVICE);
- }
+ buf->page = NULL;
} else {
struct sk_buff *skb;
int frame_len;
@@ -3455,58 +3420,20 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
frame_len, status);
}
- /* The zero-copy is always used for all the sizes
- * in case of GMAC4 because it needs
- * to refill the used descriptors, always.
- */
- if (unlikely(!xmac &&
- ((frame_len < priv->rx_copybreak) ||
- stmmac_rx_threshold_count(rx_q)))) {
- skb = netdev_alloc_skb_ip_align(priv->dev,
- frame_len);
- if (unlikely(!skb)) {
- if (net_ratelimit())
- dev_warn(priv->device,
- "packet dropped\n");
- priv->dev->stats.rx_dropped++;
- continue;
- }
-
- dma_sync_single_for_cpu(priv->device,
- rx_q->rx_skbuff_dma
- [entry], frame_len,
- DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb,
- rx_q->
- rx_skbuff[entry]->data,
- frame_len);
-
- skb_put(skb, frame_len);
- dma_sync_single_for_device(priv->device,
- rx_q->rx_skbuff_dma
- [entry], frame_len,
- DMA_FROM_DEVICE);
- } else {
- skb = rx_q->rx_skbuff[entry];
- if (unlikely(!skb)) {
- if (net_ratelimit())
- netdev_err(priv->dev,
- "%s: Inconsistent Rx chain\n",
- priv->dev->name);
- priv->dev->stats.rx_dropped++;
- continue;
- }
- prefetch(skb->data - NET_IP_ALIGN);
- rx_q->rx_skbuff[entry] = NULL;
- rx_q->rx_zeroc_thresh++;
-
- skb_put(skb, frame_len);
- dma_unmap_single(priv->device,
- rx_q->rx_skbuff_dma[entry],
- priv->dma_buf_sz,
- DMA_FROM_DEVICE);
+ skb = netdev_alloc_skb_ip_align(priv->dev, frame_len);
+ if (unlikely(!skb)) {
+ priv->dev->stats.rx_dropped++;
+ continue;
}
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ frame_len, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, page_address(buf->page),
+ frame_len);
+ skb_put(skb, frame_len);
+ dma_sync_single_for_device(priv->device, buf->addr,
+ frame_len, DMA_FROM_DEVICE);
+
if (netif_msg_pktdata(priv)) {
netdev_dbg(priv->dev, "frame received (%dbytes)",
frame_len);
@@ -3526,6 +3453,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
napi_gro_receive(&ch->rx_napi, skb);
+ /* Data payload copied into SKB, page ready for recycle */
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
}
@@ -3568,8 +3499,8 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
work_done = min(work_done, budget);
- if (work_done < budget && napi_complete_done(napi, work_done))
- stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ if (work_done < budget)
+ napi_complete_done(napi, work_done);
/* Force transmission restart */
tx_q = &priv->tx_queue[chan];
@@ -3792,6 +3723,7 @@ static void stmmac_poll_controller(struct net_device *dev)
*/
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
+ struct stmmac_priv *priv = netdev_priv (dev);
int ret = -EOPNOTSUPP;
if (!netif_running(dev))
@@ -3801,9 +3733,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- if (!dev->phydev)
- return -EINVAL;
- ret = phy_mii_ioctl(dev->phydev, rq, cmd);
+ ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
break;
case SIOCSHWTSTAMP:
ret = stmmac_hwtstamp_set(dev, rq);
@@ -3839,23 +3769,7 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return ret;
}
-static int stmmac_setup_tc_block(struct stmmac_priv *priv,
- struct tc_block_offload *f)
-{
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
- priv, priv, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(stmmac_block_cb_list);
static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
@@ -3864,7 +3778,10 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_SETUP_BLOCK:
- return stmmac_setup_tc_block(priv, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &stmmac_block_cb_list,
+ stmmac_setup_tc_block_cb,
+ priv, priv, true);
case TC_SETUP_QDISC_CBS:
return stmmac_tc_setup_cbs(priv, priv, type_data);
default:
@@ -3872,6 +3789,22 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
}
}
+static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ /*
+ * There is no way to determine the number of TSO
+ * capable Queues. Let's use always the Queue 0
+ * because if TSO is supported then at least this
+ * one will be capable.
+ */
+ return 0;
+ }
+
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
+}
+
static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -4088,6 +4021,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
.ndo_setup_tc = stmmac_setup_tc,
+ .ndo_select_queue = stmmac_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
@@ -4160,6 +4094,12 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
priv->hw->pmt = priv->plat->pmt;
+ if (priv->dma_cap.hash_tb_sz) {
+ priv->hw->multicast_filter_bins =
+ (BIT(priv->dma_cap.hash_tb_sz) << 5);
+ priv->hw->mcast_bits_log2 =
+ ilog2(priv->hw->multicast_filter_bins);
+ }
/* TXCOE doesn't work in thresh DMA mode */
if (priv->plat->force_thresh_dma_mode)
@@ -4237,9 +4177,8 @@ int stmmac_dvr_probe(struct device *device,
u32 queue, maxq;
int ret = 0;
- ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
- MTL_MAX_TX_QUEUES,
- MTL_MAX_RX_QUEUES);
+ ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
+ MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
if (!ndev)
return -ENOMEM;
@@ -4271,8 +4210,7 @@ int stmmac_dvr_probe(struct device *device,
priv->wq = create_singlethread_workqueue("stmmac_wq");
if (!priv->wq) {
dev_err(priv->device, "failed to create workqueue\n");
- ret = -ENOMEM;
- goto error_wq;
+ return -ENOMEM;
}
INIT_WORK(&priv->service_task, stmmac_service_task);
@@ -4319,6 +4257,24 @@ int stmmac_dvr_probe(struct device *device,
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
}
+
+ if (priv->dma_cap.addr64) {
+ ret = dma_set_mask_and_coherent(device,
+ DMA_BIT_MASK(priv->dma_cap.addr64));
+ if (!ret) {
+ dev_info(priv->device, "Using %d bits DMA width\n",
+ priv->dma_cap.addr64);
+ } else {
+ ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(priv->device, "Failed to set DMA Mask\n");
+ goto error_hw_init;
+ }
+
+ priv->dma_cap.addr64 = 32;
+ }
+ }
+
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
@@ -4396,6 +4352,12 @@ int stmmac_dvr_probe(struct device *device,
}
}
+ ret = stmmac_phy_setup(priv);
+ if (ret) {
+ netdev_err(ndev, "failed to setup phy (%d)\n", ret);
+ goto error_phy_setup;
+ }
+
ret = register_netdev(ndev);
if (ret) {
dev_err(priv->device, "%s: ERROR %i registering the device\n",
@@ -4413,6 +4375,8 @@ int stmmac_dvr_probe(struct device *device,
return ret;
error_netdev_register:
+ phylink_destroy(priv->phylink);
+error_phy_setup:
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
@@ -4428,8 +4392,6 @@ error_mdio_register:
}
error_hw_init:
destroy_workqueue(priv->wq);
-error_wq:
- free_netdev(ndev);
return ret;
}
@@ -4456,6 +4418,7 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
+ phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
clk_disable_unprepare(priv->plat->pclk);
@@ -4466,7 +4429,6 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_mdio_unregister(ndev);
destroy_workqueue(priv->wq);
mutex_destroy(&priv->lock);
- free_netdev(ndev);
return 0;
}
@@ -4487,8 +4449,7 @@ int stmmac_suspend(struct device *dev)
if (!ndev || !netif_running(ndev))
return 0;
- if (ndev->phydev)
- phy_stop(ndev->phydev);
+ phylink_stop(priv->phylink);
mutex_lock(&priv->lock);
@@ -4513,9 +4474,7 @@ int stmmac_suspend(struct device *dev)
}
mutex_unlock(&priv->lock);
- priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
- priv->oldduplex = DUPLEX_UNKNOWN;
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -4590,7 +4549,7 @@ int stmmac_resume(struct device *dev)
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
- stmmac_init_tx_coalesce(priv);
+ stmmac_init_coalesce(priv);
stmmac_set_rx_mode(ndev);
stmmac_enable_all_queues(priv);
@@ -4599,8 +4558,7 @@ int stmmac_resume(struct device *dev)
mutex_unlock(&priv->lock);
- if (ndev->phydev)
- phy_start(ndev->phydev);
+ phylink_start(priv->phylink);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 1341bb5f693c..4304c1abc5d1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -10,13 +10,13 @@
Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mii.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include "dwxgmac2.h"
@@ -24,11 +24,14 @@
#define MII_BUSY 0x00000001
#define MII_WRITE 0x00000002
+#define MII_DATA_MASK GENMASK(15, 0)
/* GMAC4 defines */
#define MII_GMAC4_GOC_SHIFT 2
+#define MII_GMAC4_REG_ADDR_SHIFT 16
#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
+#define MII_GMAC4_C45E BIT(1)
/* XGMAC defines */
#define MII_XGMAC_SADDR BIT(18)
@@ -155,22 +158,34 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 v;
- int data;
u32 value = MII_BUSY;
+ int data = 0;
+ u32 v;
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
- if (priv->plat->has_gmac4)
+ if (priv->plat->has_gmac4) {
value |= MII_GMAC4_READ;
+ if (phyreg & MII_ADDR_C45) {
+ value |= MII_GMAC4_C45E;
+ value &= ~priv->hw->mii.reg_mask;
+ value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) <<
+ priv->hw->mii.reg_shift) &
+ priv->hw->mii.reg_mask;
+
+ data |= (phyreg & MII_REGADDR_C45_MASK) <<
+ MII_GMAC4_REG_ADDR_SHIFT;
+ }
+ }
if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
100, 10000))
return -EBUSY;
+ writel(data, priv->ioaddr + mii_data);
writel(value, priv->ioaddr + mii_address);
if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
@@ -178,7 +193,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
return -EBUSY;
/* Read the data from the MII data register */
- data = (int)readl(priv->ioaddr + mii_data);
+ data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK;
return data;
}
@@ -198,8 +213,9 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 v;
u32 value = MII_BUSY;
+ int data = phydata;
+ u32 v;
value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask;
@@ -207,10 +223,21 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
- if (priv->plat->has_gmac4)
+ if (priv->plat->has_gmac4) {
value |= MII_GMAC4_WRITE;
- else
+ if (phyreg & MII_ADDR_C45) {
+ value |= MII_GMAC4_C45E;
+ value &= ~priv->hw->mii.reg_mask;
+ value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) <<
+ priv->hw->mii.reg_shift) &
+ priv->hw->mii.reg_mask;
+
+ data |= (phyreg & MII_REGADDR_C45_MASK) <<
+ MII_GMAC4_REG_ADDR_SHIFT;
+ }
+ } else {
value |= MII_WRITE;
+ }
/* Wait until any existing MII operation is complete */
if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
@@ -218,7 +245,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
return -EBUSY;
/* Set the MII address register to write */
- writel(phydata, priv->ioaddr + mii_data);
+ writel(data, priv->ioaddr + mii_data);
writel(value, priv->ioaddr + mii_address);
/* Wait until any existing MII operation is complete */
@@ -237,51 +264,35 @@ int stmmac_mdio_reset(struct mii_bus *bus)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
- struct stmmac_mdio_bus_data *data = priv->plat->mdio_bus_data;
#ifdef CONFIG_OF
if (priv->device->of_node) {
- if (data->reset_gpio < 0) {
- struct device_node *np = priv->device->of_node;
+ struct gpio_desc *reset_gpio;
+ u32 delays[3] = { 0, 0, 0 };
- if (!np)
- return 0;
+ reset_gpio = devm_gpiod_get_optional(priv->device,
+ "snps,reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio))
+ return PTR_ERR(reset_gpio);
- data->reset_gpio = of_get_named_gpio(np,
- "snps,reset-gpio", 0);
- if (data->reset_gpio < 0)
- return 0;
+ device_property_read_u32_array(priv->device,
+ "snps,reset-delays-us",
+ delays, ARRAY_SIZE(delays));
- data->active_low = of_property_read_bool(np,
- "snps,reset-active-low");
- of_property_read_u32_array(np,
- "snps,reset-delays-us", data->delays, 3);
+ if (delays[0])
+ msleep(DIV_ROUND_UP(delays[0], 1000));
- if (devm_gpio_request(priv->device, data->reset_gpio,
- "mdio-reset"))
- return 0;
- }
-
- gpio_direction_output(data->reset_gpio,
- data->active_low ? 1 : 0);
- if (data->delays[0])
- msleep(DIV_ROUND_UP(data->delays[0], 1000));
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ if (delays[1])
+ msleep(DIV_ROUND_UP(delays[1], 1000));
- gpio_set_value(data->reset_gpio, data->active_low ? 0 : 1);
- if (data->delays[1])
- msleep(DIV_ROUND_UP(data->delays[1], 1000));
-
- gpio_set_value(data->reset_gpio, data->active_low ? 1 : 0);
- if (data->delays[2])
- msleep(DIV_ROUND_UP(data->delays[2], 1000));
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ if (delays[2])
+ msleep(DIV_ROUND_UP(delays[2], 1000));
}
#endif
- if (data->phy_reset) {
- netdev_dbg(ndev, "stmmac_mdio_reset: calling phy_reset\n");
- data->phy_reset(priv->plat->bsp_priv);
- }
-
/* This is a workaround for problems with the STE101P PHY.
* It doesn't complete its reset until at least one clock cycle
* on MDC, so perform a dummy mdio read. To be updated for GMAC4
@@ -318,11 +329,6 @@ int stmmac_mdio_register(struct net_device *ndev)
if (mdio_bus_data->irqs)
memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
-#ifdef CONFIG_OF
- if (priv->device->of_node)
- mdio_bus_data->reset_gpio = -1;
-#endif
-
new_bus->name = "stmmac";
if (priv->plat->has_xgmac) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 0bd72739a071..86f9c07a38cf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -63,7 +63,6 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
- plat->mdio_bus_data->phy_reset = NULL;
plat->mdio_bus_data->phy_mask = 0;
/* Set default value for multicast hash bins */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 0f0f4b31eb7e..73fc2524372e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -323,21 +323,6 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
{},
};
- /* If phy-handle property is passed from DT, use it as the PHY */
- plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
- if (plat->phy_node)
- dev_dbg(dev, "Found phy-handle subnode\n");
-
- /* If phy-handle is not specified, check if we have a fixed-phy */
- if (!plat->phy_node && of_phy_is_fixed_link(np)) {
- if ((of_phy_register_fixed_link(np) < 0))
- return -ENODEV;
-
- dev_dbg(dev, "Found fixed-link subnode\n");
- plat->phy_node = of_node_get(np);
- mdio = false;
- }
-
if (of_match_node(need_mdio_ids, np)) {
plat->mdio_node = of_get_child_by_name(np, "mdio");
} else {
@@ -387,6 +372,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
*mac = of_get_mac_address(np);
plat->interface = of_get_phy_mode(np);
+ /* Some wrapper drivers still rely on phy_node. Let's save it while
+ * they are not converted to phylink. */
+ plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* PHYLINK automatically parses the phy-handle property */
+ plat->phylink_node = np;
+
/* Get max speed of operation from device tree */
if (of_property_read_u32(np, "max-speed", &plat->max_speed))
plat->max_speed = -1;
@@ -581,10 +573,6 @@ error_pclk_get:
void stmmac_remove_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat)
{
- struct device_node *np = pdev->dev.of_node;
-
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
of_node_put(plat->phy_node);
of_node_put(plat->mdio_node);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
new file mode 100644
index 000000000000..a97b1ea76438
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
+ * stmmac Selftests Support
+ *
+ * Author: Jose Abreu <joabreu@synopsys.com>
+ */
+
+#include <linux/completion.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/phy.h>
+#include <linux/udp.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include "stmmac.h"
+
+struct stmmachdr {
+ __be32 version;
+ __be64 magic;
+ u8 id;
+} __packed;
+
+#define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
+ sizeof(struct stmmachdr))
+#define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
+#define STMMAC_LB_TIMEOUT msecs_to_jiffies(200)
+
+struct stmmac_packet_attrs {
+ int vlan;
+ int vlan_id_in;
+ int vlan_id_out;
+ unsigned char *src;
+ unsigned char *dst;
+ u32 ip_src;
+ u32 ip_dst;
+ int tcp;
+ int sport;
+ int dport;
+ u32 exp_hash;
+ int dont_wait;
+ int timeout;
+ int size;
+ int remove_sa;
+ u8 id;
+};
+
+static u8 stmmac_test_next_id;
+
+static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
+ struct stmmac_packet_attrs *attr)
+{
+ struct sk_buff *skb = NULL;
+ struct udphdr *uhdr = NULL;
+ struct tcphdr *thdr = NULL;
+ struct stmmachdr *shdr;
+ struct ethhdr *ehdr;
+ struct iphdr *ihdr;
+ int iplen, size;
+
+ size = attr->size + STMMAC_TEST_PKT_SIZE;
+ if (attr->vlan) {
+ size += 4;
+ if (attr->vlan > 1)
+ size += 4;
+ }
+
+ if (attr->tcp)
+ size += sizeof(struct tcphdr);
+ else
+ size += sizeof(struct udphdr);
+
+ skb = netdev_alloc_skb(priv->dev, size);
+ if (!skb)
+ return NULL;
+
+ prefetchw(skb->data);
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ if (attr->vlan > 1)
+ ehdr = skb_push(skb, ETH_HLEN + 8);
+ else if (attr->vlan)
+ ehdr = skb_push(skb, ETH_HLEN + 4);
+ else if (attr->remove_sa)
+ ehdr = skb_push(skb, ETH_HLEN - 6);
+ else
+ ehdr = skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+
+ skb_set_network_header(skb, skb->len);
+ ihdr = skb_put(skb, sizeof(*ihdr));
+
+ skb_set_transport_header(skb, skb->len);
+ if (attr->tcp)
+ thdr = skb_put(skb, sizeof(*thdr));
+ else
+ uhdr = skb_put(skb, sizeof(*uhdr));
+
+ if (!attr->remove_sa)
+ eth_zero_addr(ehdr->h_source);
+ eth_zero_addr(ehdr->h_dest);
+ if (attr->src && !attr->remove_sa)
+ ether_addr_copy(ehdr->h_source, attr->src);
+ if (attr->dst)
+ ether_addr_copy(ehdr->h_dest, attr->dst);
+
+ if (!attr->remove_sa) {
+ ehdr->h_proto = htons(ETH_P_IP);
+ } else {
+ __be16 *ptr = (__be16 *)ehdr;
+
+ /* HACK */
+ ptr[3] = htons(ETH_P_IP);
+ }
+
+ if (attr->vlan) {
+ __be16 *tag, *proto;
+
+ if (!attr->remove_sa) {
+ tag = (void *)ehdr + ETH_HLEN;
+ proto = (void *)ehdr + (2 * ETH_ALEN);
+ } else {
+ tag = (void *)ehdr + ETH_HLEN - 6;
+ proto = (void *)ehdr + ETH_ALEN;
+ }
+
+ proto[0] = htons(ETH_P_8021Q);
+ tag[0] = htons(attr->vlan_id_out);
+ tag[1] = htons(ETH_P_IP);
+ if (attr->vlan > 1) {
+ proto[0] = htons(ETH_P_8021AD);
+ tag[1] = htons(ETH_P_8021Q);
+ tag[2] = htons(attr->vlan_id_in);
+ tag[3] = htons(ETH_P_IP);
+ }
+ }
+
+ if (attr->tcp) {
+ thdr->source = htons(attr->sport);
+ thdr->dest = htons(attr->dport);
+ thdr->doff = sizeof(struct tcphdr) / 4;
+ thdr->check = 0;
+ } else {
+ uhdr->source = htons(attr->sport);
+ uhdr->dest = htons(attr->dport);
+ uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
+ uhdr->check = 0;
+ }
+
+ ihdr->ihl = 5;
+ ihdr->ttl = 32;
+ ihdr->version = 4;
+ if (attr->tcp)
+ ihdr->protocol = IPPROTO_TCP;
+ else
+ ihdr->protocol = IPPROTO_UDP;
+ iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
+ if (attr->tcp)
+ iplen += sizeof(*thdr);
+ else
+ iplen += sizeof(*uhdr);
+ ihdr->tot_len = htons(iplen);
+ ihdr->frag_off = 0;
+ ihdr->saddr = 0;
+ ihdr->daddr = htonl(attr->ip_dst);
+ ihdr->tos = 0;
+ ihdr->id = 0;
+ ip_send_check(ihdr);
+
+ shdr = skb_put(skb, sizeof(*shdr));
+ shdr->version = 0;
+ shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC);
+ attr->id = stmmac_test_next_id;
+ shdr->id = stmmac_test_next_id++;
+
+ if (attr->size)
+ skb_put(skb, attr->size);
+
+ skb->csum = 0;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ if (attr->tcp) {
+ thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ } else {
+ udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
+ }
+
+ skb->protocol = htons(ETH_P_IP);
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = priv->dev;
+
+ return skb;
+}
+
+struct stmmac_test_priv {
+ struct stmmac_packet_attrs *packet;
+ struct packet_type pt;
+ struct completion comp;
+ int double_vlan;
+ int vlan_id;
+ int ok;
+};
+
+static int stmmac_test_loopback_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ struct stmmachdr *shdr;
+ struct ethhdr *ehdr;
+ struct udphdr *uhdr;
+ struct tcphdr *thdr;
+ struct iphdr *ihdr;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ if (skb_linearize(skb))
+ goto out;
+ if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
+ goto out;
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (tpriv->packet->dst) {
+ if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ goto out;
+ }
+ if (tpriv->packet->src) {
+ if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ goto out;
+ }
+
+ ihdr = ip_hdr(skb);
+ if (tpriv->double_vlan)
+ ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
+
+ if (tpriv->packet->tcp) {
+ if (ihdr->protocol != IPPROTO_TCP)
+ goto out;
+
+ thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
+ if (thdr->dest != htons(tpriv->packet->dport))
+ goto out;
+
+ shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr));
+ } else {
+ if (ihdr->protocol != IPPROTO_UDP)
+ goto out;
+
+ uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
+ if (uhdr->dest != htons(tpriv->packet->dport))
+ goto out;
+
+ shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
+ }
+
+ if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
+ goto out;
+ if (tpriv->packet->exp_hash && !skb->hash)
+ goto out;
+ if (tpriv->packet->id != shdr->id)
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int __stmmac_test_loopback(struct stmmac_priv *priv,
+ struct stmmac_packet_attrs *attr)
+{
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_loopback_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = attr;
+ dev_add_pack(&tpriv->pt);
+
+ skb = stmmac_test_get_udp_skb(priv, attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto cleanup;
+
+ if (attr->dont_wait)
+ goto cleanup;
+
+ if (!attr->timeout)
+ attr->timeout = STMMAC_LB_TIMEOUT;
+
+ wait_for_completion_timeout(&tpriv->comp, attr->timeout);
+ ret = !tpriv->ok;
+
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_mac_loopback(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+
+ attr.dst = priv->dev->dev_addr;
+ return __stmmac_test_loopback(priv, &attr);
+}
+
+static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (!priv->dev->phydev)
+ return -EBUSY;
+
+ ret = phy_loopback(priv->dev->phydev, true);
+ if (ret)
+ return ret;
+
+ attr.dst = priv->dev->dev_addr;
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ phy_loopback(priv->dev->phydev, false);
+ return ret;
+}
+
+static int stmmac_test_mmc(struct stmmac_priv *priv)
+{
+ struct stmmac_counters initial, final;
+ int ret;
+
+ memset(&initial, 0, sizeof(initial));
+ memset(&final, 0, sizeof(final));
+
+ if (!priv->dma_cap.rmon)
+ return -EOPNOTSUPP;
+
+ /* Save previous results into internal struct */
+ stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
+
+ ret = stmmac_test_mac_loopback(priv);
+ if (ret)
+ return ret;
+
+ /* These will be loopback results so no need to save them */
+ stmmac_mmc_read(priv, priv->mmcaddr, &final);
+
+ /*
+ * The number of MMC counters available depends on HW configuration
+ * so we just use this one to validate the feature. I hope there is
+ * not a version without this counter.
+ */
+ if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stmmac_test_eee(struct stmmac_priv *priv)
+{
+ struct stmmac_extra_stats *initial, *final;
+ int retries = 10;
+ int ret;
+
+ if (!priv->dma_cap.eee || !priv->eee_active)
+ return -EOPNOTSUPP;
+
+ initial = kzalloc(sizeof(*initial), GFP_KERNEL);
+ if (!initial)
+ return -ENOMEM;
+
+ final = kzalloc(sizeof(*final), GFP_KERNEL);
+ if (!final) {
+ ret = -ENOMEM;
+ goto out_free_initial;
+ }
+
+ memcpy(initial, &priv->xstats, sizeof(*initial));
+
+ ret = stmmac_test_mac_loopback(priv);
+ if (ret)
+ goto out_free_final;
+
+ /* We have no traffic in the line so, sooner or later it will go LPI */
+ while (--retries) {
+ memcpy(final, &priv->xstats, sizeof(*final));
+
+ if (final->irq_tx_path_in_lpi_mode_n >
+ initial->irq_tx_path_in_lpi_mode_n)
+ break;
+ msleep(100);
+ }
+
+ if (!retries) {
+ ret = -ETIMEDOUT;
+ goto out_free_final;
+ }
+
+ if (final->irq_tx_path_in_lpi_mode_n <=
+ initial->irq_tx_path_in_lpi_mode_n) {
+ ret = -EINVAL;
+ goto out_free_final;
+ }
+
+ if (final->irq_tx_path_exit_lpi_mode_n <=
+ initial->irq_tx_path_exit_lpi_mode_n) {
+ ret = -EINVAL;
+ goto out_free_final;
+ }
+
+out_free_final:
+ kfree(final);
+out_free_initial:
+ kfree(initial);
+ return ret;
+}
+
+static int stmmac_filter_check(struct stmmac_priv *priv)
+{
+ if (!(priv->dev->flags & IFF_PROMISC))
+ return 0;
+
+ netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n");
+ return -EOPNOTSUPP;
+}
+
+static int stmmac_test_hfilt(struct stmmac_priv *priv)
+{
+ unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd};
+ unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ ret = stmmac_filter_check(priv);
+ if (ret)
+ return ret;
+
+ ret = dev_mc_add(priv->dev, gd_addr);
+ if (ret)
+ return ret;
+
+ attr.dst = gd_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = bd_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret;
+
+cleanup:
+ dev_mc_del(priv->dev, gd_addr);
+ return ret;
+}
+
+static int stmmac_test_pfilt(struct stmmac_priv *priv)
+{
+ unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
+ unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (stmmac_filter_check(priv))
+ return -EOPNOTSUPP;
+
+ ret = dev_uc_add(priv->dev, gd_addr);
+ if (ret)
+ return ret;
+
+ attr.dst = gd_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = bd_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret;
+
+cleanup:
+ dev_uc_del(priv->dev, gd_addr);
+ return ret;
+}
+
+static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr)
+{
+ return 0;
+}
+
+static void stmmac_test_set_rx_mode(struct net_device *netdev)
+{
+ /* As we are in test mode of ethtool we already own the rtnl lock
+ * so no address will change from user. We can just call the
+ * ndo_set_rx_mode() callback directly */
+ if (netdev->netdev_ops->ndo_set_rx_mode)
+ netdev->netdev_ops->ndo_set_rx_mode(netdev);
+}
+
+static int stmmac_test_mcfilt(struct stmmac_priv *priv)
+{
+ unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
+ unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (stmmac_filter_check(priv))
+ return -EOPNOTSUPP;
+
+ /* Remove all MC addresses */
+ __dev_mc_unsync(priv->dev, NULL);
+ stmmac_test_set_rx_mode(priv->dev);
+
+ ret = dev_uc_add(priv->dev, uc_addr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = uc_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = mc_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret;
+
+cleanup:
+ dev_uc_del(priv->dev, uc_addr);
+ __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL);
+ stmmac_test_set_rx_mode(priv->dev);
+ return ret;
+}
+
+static int stmmac_test_ucfilt(struct stmmac_priv *priv)
+{
+ unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77};
+ unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (stmmac_filter_check(priv))
+ return -EOPNOTSUPP;
+
+ /* Remove all UC addresses */
+ __dev_uc_unsync(priv->dev, NULL);
+ stmmac_test_set_rx_mode(priv->dev);
+
+ ret = dev_mc_add(priv->dev, mc_addr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = mc_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = uc_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret;
+
+cleanup:
+ dev_mc_del(priv->dev, mc_addr);
+ __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL);
+ stmmac_test_set_rx_mode(priv->dev);
+ return ret;
+}
+
+static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ struct ethhdr *ehdr;
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ goto out;
+ if (ehdr->h_proto != htons(ETH_P_PAUSE))
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int stmmac_test_flowctrl(struct stmmac_priv *priv)
+{
+ unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01};
+ struct phy_device *phydev = priv->dev->phydev;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ struct stmmac_test_priv *tpriv;
+ unsigned int pkt_count;
+ int i, ret = 0;
+
+ if (!phydev || !phydev->pause)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+ tpriv->pt.type = htons(ETH_P_PAUSE);
+ tpriv->pt.func = stmmac_test_flowctrl_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ dev_add_pack(&tpriv->pt);
+
+ /* Compute minimum number of packets to make FIFO full */
+ pkt_count = priv->plat->rx_fifo_size;
+ if (!pkt_count)
+ pkt_count = priv->dma_cap.rx_fifo_size;
+ pkt_count /= 1400;
+ pkt_count *= 2;
+
+ for (i = 0; i < rx_cnt; i++)
+ stmmac_stop_rx(priv, priv->ioaddr, i);
+
+ ret = dev_set_promiscuity(priv->dev, 1);
+ if (ret)
+ goto cleanup;
+
+ ret = dev_mc_add(priv->dev, paddr);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < pkt_count; i++) {
+ struct stmmac_packet_attrs attr = { };
+
+ attr.dst = priv->dev->dev_addr;
+ attr.dont_wait = true;
+ attr.size = 1400;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+ if (tpriv->ok)
+ break;
+ }
+
+ /* Wait for some time in case RX Watchdog is enabled */
+ msleep(200);
+
+ for (i = 0; i < rx_cnt; i++) {
+ struct stmmac_channel *ch = &priv->channel[i];
+
+ stmmac_start_rx(priv, priv->ioaddr, i);
+ local_bh_disable();
+ napi_reschedule(&ch->rx_napi);
+ local_bh_enable();
+ }
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = !tpriv->ok;
+
+cleanup:
+ dev_mc_del(priv->dev, paddr);
+ dev_set_promiscuity(priv->dev, -1);
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+#define STMMAC_LOOPBACK_NONE 0
+#define STMMAC_LOOPBACK_MAC 1
+#define STMMAC_LOOPBACK_PHY 2
+
+static const struct stmmac_test {
+ char name[ETH_GSTRING_LEN];
+ int lb;
+ int (*fn)(struct stmmac_priv *priv);
+} stmmac_selftests[] = {
+ {
+ .name = "MAC Loopback ",
+ .lb = STMMAC_LOOPBACK_MAC,
+ .fn = stmmac_test_mac_loopback,
+ }, {
+ .name = "PHY Loopback ",
+ .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
+ .fn = stmmac_test_phy_loopback,
+ }, {
+ .name = "MMC Counters ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_mmc,
+ }, {
+ .name = "EEE ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_eee,
+ }, {
+ .name = "Hash Filter MC ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_hfilt,
+ }, {
+ .name = "Perfect Filter UC ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_pfilt,
+ }, {
+ .name = "MC Filter ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_mcfilt,
+ }, {
+ .name = "UC Filter ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_ucfilt,
+ }, {
+ .name = "Flow Control ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_flowctrl,
+ },
+};
+
+void stmmac_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int count = stmmac_selftest_get_count(priv);
+ int carrier = netif_carrier_ok(dev);
+ int i, ret;
+
+ memset(buf, 0, sizeof(*buf) * count);
+ stmmac_test_next_id = 0;
+
+ if (etest->flags != ETH_TEST_FL_OFFLINE) {
+ netdev_err(priv->dev, "Only offline tests are supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ } else if (!carrier) {
+ netdev_err(priv->dev, "You need valid Link to execute tests\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ /* We don't want extra traffic */
+ netif_carrier_off(dev);
+
+ /* Wait for queues drain */
+ msleep(200);
+
+ for (i = 0; i < count; i++) {
+ ret = 0;
+
+ switch (stmmac_selftests[i].lb) {
+ case STMMAC_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, true);
+ if (!ret)
+ break;
+ /* Fallthrough */
+ case STMMAC_LOOPBACK_MAC:
+ ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
+ break;
+ case STMMAC_LOOPBACK_NONE:
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ /*
+ * First tests will always be MAC / PHY loobpack. If any of
+ * them is not supported we abort earlier.
+ */
+ if (ret) {
+ netdev_err(priv->dev, "Loopback is not supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ break;
+ }
+
+ ret = stmmac_selftests[i].fn(priv);
+ if (ret && (ret != -EOPNOTSUPP))
+ etest->flags |= ETH_TEST_FL_FAILED;
+ buf[i] = ret;
+
+ switch (stmmac_selftests[i].lb) {
+ case STMMAC_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, false);
+ if (!ret)
+ break;
+ /* Fallthrough */
+ case STMMAC_LOOPBACK_MAC:
+ stmmac_set_mac_loopback(priv, priv->ioaddr, false);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Restart everything */
+ if (carrier)
+ netif_carrier_on(dev);
+}
+
+void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ for (i = 0; i < stmmac_selftest_get_count(priv); i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
+ stmmac_selftests[i].name);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+int stmmac_selftest_get_count(struct stmmac_priv *priv)
+{
+ return ARRAY_SIZE(stmmac_selftests);
+}
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 6f99437a6962..0bc5863bffeb 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -1217,8 +1217,6 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
spin_lock_irqsave(&np->lock, flags);
- err = -EINVAL;
-
err = mii_read(np, np->phy_addr, MII_BMSR);
if (err < 0)
goto out;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index bd05a977ee7e..834afca3a019 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -50,6 +50,7 @@ config TI_CPSW
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
select TI_DAVINCI_MDIO
select MFD_SYSCON
+ select PAGE_POOL
select REGMAP
---help---
This driver supports TI's CPSW Ethernet Switch.
@@ -60,6 +61,7 @@ config TI_CPSW
config TI_CPTS
bool "TI Common Platform Time Sync (CPTS) Support"
depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST
+ depends on COMMON_CLK
depends on POSIX_TIMERS
---help---
This driver supports the Common Platform Time Sync unit of
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 634fc484a0b3..f320f9a0de8b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -31,6 +31,10 @@
#include <linux/if_vlan.h>
#include <linux/kmemleak.h>
#include <linux/sys_soc.h>
+#include <net/page_pool.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/filter.h>
#include <linux/pinctrl/consumer.h>
#include <net/pkt_cls.h>
@@ -60,6 +64,10 @@ static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
module_param(descs_pool_size, int, 0444);
MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
+/* The buf includes headroom compatible with both skb and xdpf */
+#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
+#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long))
+
#define for_each_slave(priv, func, arg...) \
do { \
struct cpsw_slave *slave; \
@@ -74,6 +82,11 @@ MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
(func)(slave++, ##arg); \
} while (0)
+#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long))
+
+#define CPSW_XDP_CONSUMED 1
+#define CPSW_XDP_PASS 0
+
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid);
@@ -337,24 +350,58 @@ void cpsw_intr_disable(struct cpsw_common *cpsw)
return;
}
+static int cpsw_is_xdpf_handle(void *handle)
+{
+ return (unsigned long)handle & BIT(0);
+}
+
+static void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf)
+{
+ return (void *)((unsigned long)xdpf | BIT(0));
+}
+
+static struct xdp_frame *cpsw_handle_to_xdpf(void *handle)
+{
+ return (struct xdp_frame *)((unsigned long)handle & ~BIT(0));
+}
+
+struct __aligned(sizeof(long)) cpsw_meta_xdp {
+ struct net_device *ndev;
+ int ch;
+};
+
void cpsw_tx_handler(void *token, int len, int status)
{
+ struct cpsw_meta_xdp *xmeta;
+ struct xdp_frame *xdpf;
+ struct net_device *ndev;
struct netdev_queue *txq;
- struct sk_buff *skb = token;
- struct net_device *ndev = skb->dev;
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct sk_buff *skb;
+ int ch;
+
+ if (cpsw_is_xdpf_handle(token)) {
+ xdpf = cpsw_handle_to_xdpf(token);
+ xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
+ ndev = xmeta->ndev;
+ ch = xmeta->ch;
+ xdp_return_frame(xdpf);
+ } else {
+ skb = token;
+ ndev = skb->dev;
+ ch = skb_get_queue_mapping(skb);
+ cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
+ dev_kfree_skb_any(skb);
+ }
/* Check whether the queue is stopped due to stalled tx dma, if the
* queue is stopped then start the queue as we have free desc for tx
*/
- txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
+ txq = netdev_get_tx_queue(ndev, ch);
if (unlikely(netif_tx_queue_stopped(txq)))
netif_tx_wake_queue(txq);
- cpts_tx_timestamp(cpsw->cpts, skb);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += len;
- dev_kfree_skb_any(skb);
}
static void cpsw_rx_vlan_encap(struct sk_buff *skb)
@@ -400,24 +447,252 @@ static void cpsw_rx_vlan_encap(struct sk_buff *skb)
}
}
+static int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
+ struct page *page)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_meta_xdp *xmeta;
+ struct cpdma_chan *txch;
+ dma_addr_t dma;
+ int ret, port;
+
+ xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
+ xmeta->ndev = priv->ndev;
+ xmeta->ch = 0;
+ txch = cpsw->txv[0].ch;
+
+ port = priv->emac_port + cpsw->data.dual_emac;
+ if (page) {
+ dma = page_pool_get_dma_addr(page);
+ dma += xdpf->headroom + sizeof(struct xdp_frame);
+ ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
+ dma, xdpf->len, port);
+ } else {
+ if (sizeof(*xmeta) > xdpf->headroom) {
+ xdp_return_frame_rx_napi(xdpf);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
+ xdpf->data, xdpf->len, port);
+ }
+
+ if (ret) {
+ priv->ndev->stats.tx_dropped++;
+ xdp_return_frame_rx_napi(xdpf);
+ }
+
+ return ret;
+}
+
+static int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
+ struct page *page)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct net_device *ndev = priv->ndev;
+ int ret = CPSW_XDP_CONSUMED;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act;
+
+ rcu_read_lock();
+
+ prog = READ_ONCE(priv->xdp_prog);
+ if (!prog) {
+ ret = CPSW_XDP_PASS;
+ goto out;
+ }
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ ret = CPSW_XDP_PASS;
+ break;
+ case XDP_TX:
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf))
+ goto drop;
+
+ cpsw_xdp_tx_frame(priv, xdpf, page);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(ndev, xdp, prog))
+ goto drop;
+
+ /* Have to flush here, per packet, instead of doing it in bulk
+ * at the end of the napi handler. The RX devices on this
+ * particular hardware is sharing a common queue, so the
+ * incoming device might change per packet.
+ */
+ xdp_do_flush_map();
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ /* fall through -- handle aborts by dropping packet */
+ case XDP_DROP:
+ goto drop;
+ }
+out:
+ rcu_read_unlock();
+ return ret;
+drop:
+ rcu_read_unlock();
+ page_pool_recycle_direct(cpsw->page_pool[ch], page);
+ return ret;
+}
+
+static unsigned int cpsw_rxbuf_total_len(unsigned int len)
+{
+ len += CPSW_HEADROOM;
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ return SKB_DATA_ALIGN(len);
+}
+
+static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
+ int size)
+{
+ struct page_pool_params pp_params;
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = size;
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = cpsw->dev;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ dev_err(cpsw->dev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
+static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct xdp_rxq_info *rxq;
+ struct page_pool *pool;
+ int ret;
+
+ pool = cpsw->page_pool[ch];
+ rxq = &priv->xdp_rxq[ch];
+
+ ret = xdp_rxq_info_reg(rxq, priv->ndev, ch);
+ if (ret)
+ return ret;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ xdp_rxq_info_unreg(rxq);
+
+ return ret;
+}
+
+static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
+{
+ struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
+
+ if (!xdp_rxq_info_is_reg(rxq))
+ return;
+
+ xdp_rxq_info_unreg(rxq);
+}
+
+static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
+{
+ struct page_pool *pool;
+ int ret = 0, pool_size;
+
+ pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
+ pool = cpsw_create_page_pool(cpsw, pool_size);
+ if (IS_ERR(pool))
+ ret = PTR_ERR(pool);
+ else
+ cpsw->page_pool[ch] = pool;
+
+ return ret;
+}
+
+void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i, ch;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
+ continue;
+
+ cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
+ }
+
+ page_pool_destroy(cpsw->page_pool[ch]);
+ cpsw->page_pool[ch] = NULL;
+ }
+}
+
+int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i, ch, ret;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ ret = cpsw_create_rx_pool(cpsw, ch);
+ if (ret)
+ goto err_cleanup;
+
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
+ continue;
+
+ ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
+ if (ret)
+ goto err_cleanup;
+ }
+ }
+
+ return 0;
+
+err_cleanup:
+ cpsw_destroy_xdp_rxqs(cpsw);
+
+ return ret;
+}
+
static void cpsw_rx_handler(void *token, int len, int status)
{
- struct cpdma_chan *ch;
- struct sk_buff *skb = token;
- struct sk_buff *new_skb;
- struct net_device *ndev = skb->dev;
- int ret = 0, port;
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct page *new_page, *page = token;
+ void *pa = page_address(page);
+ struct cpsw_meta_xdp *xmeta = pa + CPSW_XMETA_OFFSET;
+ struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev);
+ int pkt_size = cpsw->rx_packet_max;
+ int ret = 0, port, ch = xmeta->ch;
+ int headroom = CPSW_HEADROOM;
+ struct net_device *ndev = xmeta->ndev;
struct cpsw_priv *priv;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ dma_addr_t dma;
- if (cpsw->data.dual_emac) {
+ if (cpsw->data.dual_emac && status >= 0) {
port = CPDMA_RX_SOURCE_PORT(status);
- if (port) {
+ if (port)
ndev = cpsw->slaves[--port].ndev;
- skb->dev = ndev;
- }
}
+ priv = netdev_priv(ndev);
+ pool = cpsw->page_pool[ch];
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
/* In dual emac mode check for all interfaces */
if (cpsw->data.dual_emac && cpsw->usage_count &&
@@ -426,47 +701,88 @@ static void cpsw_rx_handler(void *token, int len, int status)
* is already down and the other interface is up
* and running, instead of freeing which results
* in reducing of the number of rx descriptor in
- * DMA engine, requeue skb back to cpdma.
+ * DMA engine, requeue page back to cpdma.
*/
- new_skb = skb;
+ new_page = page;
goto requeue;
}
- /* the interface is going down, skbs are purged */
- dev_kfree_skb_any(skb);
+ /* the interface is going down, pages are purged */
+ page_pool_recycle_direct(pool, page);
return;
}
- new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
- if (new_skb) {
- skb_copy_queue_mapping(new_skb, skb);
- skb_put(skb, len);
- if (status & CPDMA_RX_VLAN_ENCAP)
- cpsw_rx_vlan_encap(skb);
- priv = netdev_priv(ndev);
- if (priv->rx_ts_enabled)
- cpts_rx_timestamp(cpsw->cpts, skb);
- skb->protocol = eth_type_trans(skb, ndev);
- netif_receive_skb(skb);
- ndev->stats.rx_bytes += len;
- ndev->stats.rx_packets++;
- kmemleak_not_leak(new_skb);
- } else {
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
ndev->stats.rx_dropped++;
- new_skb = skb;
+ goto requeue;
}
-requeue:
- if (netif_dormant(ndev)) {
- dev_kfree_skb_any(new_skb);
- return;
+ if (priv->xdp_prog) {
+ if (status & CPDMA_RX_VLAN_ENCAP) {
+ xdp.data = pa + CPSW_HEADROOM +
+ CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ xdp.data_end = xdp.data + len -
+ CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ } else {
+ xdp.data = pa + CPSW_HEADROOM;
+ xdp.data_end = xdp.data + len;
+ }
+
+ xdp_set_data_meta_invalid(&xdp);
+
+ xdp.data_hard_start = pa;
+ xdp.rxq = &priv->xdp_rxq[ch];
+
+ ret = cpsw_run_xdp(priv, ch, &xdp, page);
+ if (ret != CPSW_XDP_PASS)
+ goto requeue;
+
+ /* XDP prog might have changed packet data and boundaries */
+ len = xdp.data_end - xdp.data;
+ headroom = xdp.data - xdp.data_hard_start;
+
+ /* XDP prog can modify vlan tag, so can't use encap header */
+ status &= ~CPDMA_RX_VLAN_ENCAP;
}
- ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
- ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
- skb_tailroom(new_skb), 0);
- if (WARN_ON(ret < 0))
- dev_kfree_skb_any(new_skb);
+ /* pass skb to netstack if no XDP prog or returned XDP_PASS */
+ skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
+ }
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, len);
+ skb->dev = ndev;
+ if (status & CPDMA_RX_VLAN_ENCAP)
+ cpsw_rx_vlan_encap(skb);
+ if (priv->rx_ts_enabled)
+ cpts_rx_timestamp(cpsw->cpts, skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* unmap page as no netstack skb page recycling */
+ page_pool_release_page(pool, page);
+ netif_receive_skb(skb);
+
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+
+requeue:
+ xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
+ xmeta->ndev = ndev;
+ xmeta->ch = ch;
+
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
+ pkt_size, 0);
+ if (ret < 0) {
+ WARN_ON(ret == -ENOMEM);
+ page_pool_recycle_direct(pool, new_page);
+ }
}
void cpsw_split_res(struct cpsw_common *cpsw)
@@ -1035,33 +1351,39 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
int cpsw_fill_rx_channels(struct cpsw_priv *priv)
{
struct cpsw_common *cpsw = priv->cpsw;
- struct sk_buff *skb;
+ struct cpsw_meta_xdp *xmeta;
+ struct page_pool *pool;
+ struct page *page;
int ch_buf_num;
int ch, i, ret;
+ dma_addr_t dma;
for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ pool = cpsw->page_pool[ch];
ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
for (i = 0; i < ch_buf_num; i++) {
- skb = __netdev_alloc_skb_ip_align(priv->ndev,
- cpsw->rx_packet_max,
- GFP_KERNEL);
- if (!skb) {
- cpsw_err(priv, ifup, "cannot allocate skb\n");
+ page = page_pool_dev_alloc_pages(pool);
+ if (!page) {
+ cpsw_err(priv, ifup, "allocate rx page err\n");
return -ENOMEM;
}
- skb_set_queue_mapping(skb, ch);
- ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
- skb->data, skb_tailroom(skb),
- 0);
+ xmeta = page_address(page) + CPSW_XMETA_OFFSET;
+ xmeta->ndev = priv->ndev;
+ xmeta->ch = ch;
+
+ dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
+ ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
+ page, dma,
+ cpsw->rx_packet_max,
+ 0);
if (ret < 0) {
cpsw_err(priv, ifup,
- "cannot submit skb to channel %d rx, error %d\n",
+ "cannot submit page to channel %d rx, error %d\n",
ch, ret);
- kfree_skb(skb);
+ page_pool_recycle_direct(pool, page);
return ret;
}
- kmemleak_not_leak(skb);
}
cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
@@ -1397,6 +1719,13 @@ static int cpsw_ndo_open(struct net_device *ndev)
enable_irq(cpsw->irqs_table[0]);
}
+ /* create rxqs for both infs in dual mac as they use same pool
+ * and must be destroyed together when no users.
+ */
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret < 0)
+ goto err_cleanup;
+
ret = cpsw_fill_rx_channels(priv);
if (ret < 0)
goto err_cleanup;
@@ -1423,7 +1752,11 @@ static int cpsw_ndo_open(struct net_device *ndev)
return 0;
err_cleanup:
- cpdma_ctlr_stop(cpsw->dma);
+ if (!cpsw->usage_count) {
+ cpdma_ctlr_stop(cpsw->dma);
+ cpsw_destroy_xdp_rxqs(cpsw);
+ }
+
for_each_slave(priv, cpsw_slave_stop, cpsw);
pm_runtime_put_sync(cpsw->dev);
netif_carrier_off(priv->ndev);
@@ -1447,6 +1780,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
cpsw_intr_disable(cpsw);
cpdma_ctlr_stop(cpsw->dma);
cpsw_ale_stop(cpsw->ale);
+ cpsw_destroy_xdp_rxqs(cpsw);
}
for_each_slave(priv, cpsw_slave_stop, cpsw);
@@ -2004,6 +2338,64 @@ static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
}
}
+static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog;
+
+ if (!priv->xdpi.prog && !prog)
+ return 0;
+
+ if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
+ return -EBUSY;
+
+ WRITE_ONCE(priv->xdp_prog, prog);
+
+ xdp_attachment_setup(&priv->xdpi, bpf);
+
+ return 0;
+}
+
+static int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return cpsw_xdp_prog_setup(priv, bpf);
+
+ case XDP_QUERY_PROG:
+ return xdp_attachment_query(&priv->xdpi, bpf);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct xdp_frame *xdpf;
+ int i, drops = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ continue;
+ }
+
+ if (cpsw_xdp_tx_frame(priv, xdpf, NULL))
+ drops++;
+ }
+
+ return n - drops;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cpsw_ndo_poll_controller(struct net_device *ndev)
{
@@ -2032,6 +2424,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = cpsw_ndo_setup_tc,
+ .ndo_bpf = cpsw_ndo_bpf,
+ .ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
@@ -2179,6 +2573,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
return ret;
}
+ slave_data->slave_node = slave_node;
slave_data->phy_node = of_parse_phandle(slave_node,
"phy-handle", 0);
parp = of_get_property(slave_node, "phy_id", &lenp);
@@ -2262,8 +2657,7 @@ no_phy_slave:
static void cpsw_remove_dt(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_common *cpsw = platform_get_drvdata(pdev);
struct cpsw_platform_data *data = &cpsw->data;
struct device_node *node = pdev->dev.of_node;
struct device_node *slave_node;
@@ -2330,6 +2724,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
/* register the network device */
SET_NETDEV_DEV(ndev, cpsw->dev);
+ ndev->dev.of_node = cpsw->slaves[1].data->slave_node;
ret = register_netdev(ndev);
if (ret)
dev_err(cpsw->dev, "cpsw: error registering net device\n");
@@ -2474,7 +2869,7 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_cpts;
}
- platform_set_drvdata(pdev, ndev);
+ platform_set_drvdata(pdev, cpsw);
priv = netdev_priv(ndev);
priv->cpsw = cpsw;
priv->ndev = ndev;
@@ -2507,6 +2902,7 @@ static int cpsw_probe(struct platform_device *pdev)
/* register the network device */
SET_NETDEV_DEV(ndev, dev);
+ ndev->dev.of_node = cpsw->slaves[0].data->slave_node;
ret = register_netdev(ndev);
if (ret) {
dev_err(dev, "error registering net device\n");
@@ -2567,9 +2963,8 @@ clean_runtime_disable_ret:
static int cpsw_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- int ret;
+ struct cpsw_common *cpsw = platform_get_drvdata(pdev);
+ int i, ret;
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
@@ -2577,9 +2972,9 @@ static int cpsw_remove(struct platform_device *pdev)
return ret;
}
- if (cpsw->data.dual_emac)
- unregister_netdev(cpsw->slaves[1].ndev);
- unregister_netdev(ndev);
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev)
+ unregister_netdev(cpsw->slaves[i].ndev);
cpts_release(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
@@ -2592,20 +2987,13 @@ static int cpsw_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int cpsw_suspend(struct device *dev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- if (cpsw->data.dual_emac) {
- int i;
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
- for (i = 0; i < cpsw->data.slaves; i++) {
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev)
if (netif_running(cpsw->slaves[i].ndev))
cpsw_ndo_stop(cpsw->slaves[i].ndev);
- }
- } else {
- if (netif_running(ndev))
- cpsw_ndo_stop(ndev);
- }
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
@@ -2615,25 +3003,20 @@ static int cpsw_suspend(struct device *dev)
static int cpsw_resume(struct device *dev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
rtnl_lock();
- if (cpsw->data.dual_emac) {
- int i;
- for (i = 0; i < cpsw->data.slaves; i++) {
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev)
if (netif_running(cpsw->slaves[i].ndev))
cpsw_ndo_open(cpsw->slaves[i].ndev);
- }
- } else {
- if (netif_running(ndev))
- cpsw_ndo_open(ndev);
- }
+
rtnl_unlock();
return 0;
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 6d1c9ebae7cc..31248a6cc642 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -458,21 +458,22 @@ int cpsw_nway_reset(struct net_device *ndev)
static void cpsw_suspend_data_pass(struct net_device *ndev)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- struct cpsw_slave *slave;
int i;
/* Disable NAPI scheduling */
cpsw_intr_disable(cpsw);
/* Stop all transmit queues for every network device.
- * Disable re-using rx descriptors with dormant_on.
*/
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!(ndev && netif_running(ndev)))
continue;
- netif_tx_stop_all_queues(slave->ndev);
- netif_dormant_on(slave->ndev);
+ netif_tx_stop_all_queues(ndev);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
}
/* Handle rest of tx packets and stop cpdma channels */
@@ -483,14 +484,8 @@ static int cpsw_resume_data_pass(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
int i, ret;
- /* Allow rx packets handling */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
- if (slave->ndev && netif_running(slave->ndev))
- netif_dormant_off(slave->ndev);
-
/* After this receive is started */
if (cpsw->usage_count) {
ret = cpsw_fill_rx_channels(priv);
@@ -502,9 +497,11 @@ static int cpsw_resume_data_pass(struct net_device *ndev)
}
/* Resume transmit for every affected interface */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
- if (slave->ndev && netif_running(slave->ndev))
- netif_tx_start_all_queues(slave->ndev);
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (ndev && netif_running(ndev))
+ netif_tx_start_all_queues(ndev);
+ }
return 0;
}
@@ -581,14 +578,26 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx,
return 0;
}
+static void cpsw_fail(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (ndev)
+ dev_close(ndev);
+ }
+}
+
int cpsw_set_channels_common(struct net_device *ndev,
struct ethtool_channels *chs,
cpdma_handler_fn rx_handler)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- int i, ret;
+ struct net_device *sl_ndev;
+ int i, new_pools, ret;
ret = cpsw_check_ch_settings(cpsw, chs);
if (ret < 0)
@@ -596,6 +605,8 @@ int cpsw_set_channels_common(struct net_device *ndev,
cpsw_suspend_data_pass(ndev);
+ new_pools = (chs->rx_count != cpsw->rx_ch_num) && cpsw->usage_count;
+
ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler);
if (ret)
goto err;
@@ -604,35 +615,40 @@ int cpsw_set_channels_common(struct net_device *ndev,
if (ret)
goto err;
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ sl_ndev = cpsw->slaves[i].ndev;
+ if (!(sl_ndev && netif_running(sl_ndev)))
continue;
/* Inform stack about new count of queues */
- ret = netif_set_real_num_tx_queues(slave->ndev,
- cpsw->tx_ch_num);
+ ret = netif_set_real_num_tx_queues(sl_ndev, cpsw->tx_ch_num);
if (ret) {
dev_err(priv->dev, "cannot set real number of tx queues\n");
goto err;
}
- ret = netif_set_real_num_rx_queues(slave->ndev,
- cpsw->rx_ch_num);
+ ret = netif_set_real_num_rx_queues(sl_ndev, cpsw->rx_ch_num);
if (ret) {
dev_err(priv->dev, "cannot set real number of rx queues\n");
goto err;
}
}
- if (cpsw->usage_count)
- cpsw_split_res(cpsw);
+ cpsw_split_res(cpsw);
+
+ if (new_pools) {
+ cpsw_destroy_xdp_rxqs(cpsw);
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret)
+ goto err;
+ }
ret = cpsw_resume_data_pass(ndev);
if (!ret)
return 0;
err:
dev_err(priv->dev, "cannot update channels number, closing device\n");
- dev_close(ndev);
+ cpsw_fail(cpsw);
return ret;
}
@@ -652,9 +668,8 @@ void cpsw_get_ringparam(struct net_device *ndev,
int cpsw_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int ret;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ int descs_num, ret;
/* ignore ering->tx_pending - only rx_pending adjustment is supported */
@@ -663,22 +678,34 @@ int cpsw_set_ringparam(struct net_device *ndev,
ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES))
return -EINVAL;
- if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
+ descs_num = cpdma_get_num_rx_descs(cpsw->dma);
+ if (ering->rx_pending == descs_num)
return 0;
cpsw_suspend_data_pass(ndev);
- cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+ ret = cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+ if (ret) {
+ if (cpsw_resume_data_pass(ndev))
+ goto err;
+
+ return ret;
+ }
- if (cpsw->usage_count)
- cpdma_chan_split_pool(cpsw->dma);
+ if (cpsw->usage_count) {
+ cpsw_destroy_xdp_rxqs(cpsw);
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret)
+ goto err;
+ }
ret = cpsw_resume_data_pass(ndev);
if (!ret)
return 0;
-
+err:
+ cpdma_set_num_rx_descs(cpsw->dma, descs_num);
dev_err(cpsw->dev, "cannot set ring params, closing device\n");
- dev_close(ndev);
+ cpsw_fail(cpsw);
return ret;
}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 04795b97ee71..362c5a986869 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -272,6 +272,7 @@ struct cpsw_host_regs {
};
struct cpsw_slave_data {
+ struct device_node *slave_node;
struct device_node *phy_node;
char phy_id[MII_BUS_ID_SIZE];
int phy_if;
@@ -346,6 +347,7 @@ struct cpsw_common {
int rx_ch_num, tx_ch_num;
int speed;
int usage_count;
+ struct page_pool *page_pool[CPSW_MAX_QUEUES];
};
struct cpsw_priv {
@@ -360,6 +362,10 @@ struct cpsw_priv {
int shp_cfg_speed;
int tx_ts_enabled;
int rx_ts_enabled;
+ struct bpf_prog *xdp_prog;
+ struct xdp_rxq_info xdp_rxq[CPSW_MAX_QUEUES];
+ struct xdp_attachment_info xdpi;
+
u32 emac_port;
struct cpsw_common *cpsw;
};
@@ -391,6 +397,8 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv);
void cpsw_intr_enable(struct cpsw_common *cpsw);
void cpsw_intr_disable(struct cpsw_common *cpsw);
void cpsw_tx_handler(void *token, int len, int status);
+int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw);
+void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index e257018ada71..61136428e2c0 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -5,6 +5,7 @@
* Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
*
*/
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/if.h>
#include <linux/hrtimer.h>
@@ -532,6 +533,82 @@ static void cpts_calc_mult_shift(struct cpts *cpts)
freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
}
+static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
+{
+ struct device_node *refclk_np;
+ const char **parent_names;
+ unsigned int num_parents;
+ struct clk_hw *clk_hw;
+ int ret = -EINVAL;
+ u32 *mux_table;
+
+ refclk_np = of_get_child_by_name(node, "cpts-refclk-mux");
+ if (!refclk_np)
+ /* refclk selection supported not for all SoCs */
+ return 0;
+
+ num_parents = of_clk_get_parent_count(refclk_np);
+ if (num_parents < 1) {
+ dev_err(cpts->dev, "mux-clock %s must have parents\n",
+ refclk_np->name);
+ goto mux_fail;
+ }
+
+ parent_names = devm_kzalloc(cpts->dev, (sizeof(char *) * num_parents),
+ GFP_KERNEL);
+
+ mux_table = devm_kzalloc(cpts->dev, sizeof(*mux_table) * num_parents,
+ GFP_KERNEL);
+ if (!mux_table || !parent_names) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ of_clk_parent_fill(refclk_np, parent_names, num_parents);
+
+ ret = of_property_read_variable_u32_array(refclk_np, "ti,mux-tbl",
+ mux_table,
+ num_parents, num_parents);
+ if (ret < 0)
+ goto mux_fail;
+
+ clk_hw = clk_hw_register_mux_table(cpts->dev, refclk_np->name,
+ parent_names, num_parents,
+ 0,
+ &cpts->reg->rftclk_sel, 0, 0x1F,
+ 0, mux_table, NULL);
+ if (IS_ERR(clk_hw)) {
+ ret = PTR_ERR(clk_hw);
+ goto mux_fail;
+ }
+
+ ret = devm_add_action_or_reset(cpts->dev,
+ (void(*)(void *))clk_hw_unregister_mux,
+ clk_hw);
+ if (ret) {
+ dev_err(cpts->dev, "add clkmux unreg action %d", ret);
+ goto mux_fail;
+ }
+
+ ret = of_clk_add_hw_provider(refclk_np, of_clk_hw_simple_get, clk_hw);
+ if (ret)
+ goto mux_fail;
+
+ ret = devm_add_action_or_reset(cpts->dev,
+ (void(*)(void *))of_clk_del_provider,
+ refclk_np);
+ if (ret) {
+ dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
+ goto mux_fail;
+ }
+
+ return ret;
+
+mux_fail:
+ of_node_put(refclk_np);
+ return ret;
+}
+
static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
{
int ret = -EINVAL;
@@ -547,7 +624,7 @@ static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
(!cpts->cc.mult && cpts->cc.shift))
goto of_error;
- return 0;
+ return cpts_of_mux_clk_setup(cpts, node);
of_error:
dev_err(cpts->dev, "CPTS: Missing property in the DT.\n");
@@ -572,9 +649,14 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
if (ret)
return ERR_PTR(ret);
- cpts->refclk = devm_clk_get(dev, "cpts");
+ cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
+ if (IS_ERR(cpts->refclk))
+ /* try get clk from dev node for compatibility */
+ cpts->refclk = devm_clk_get(dev, "cpts");
+
if (IS_ERR(cpts->refclk)) {
- dev_err(dev, "Failed to get cpts refclk\n");
+ dev_err(dev, "Failed to get cpts refclk %ld\n",
+ PTR_ERR(cpts->refclk));
return ERR_CAST(cpts->refclk);
}
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 024aab6af12f..bb997c11ee15 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -24,7 +24,7 @@
struct cpsw_cpts {
u32 idver; /* Identification and version */
u32 control; /* Time sync control */
- u32 res1;
+ u32 rftclk_sel; /* Reference Clock Select Register */
u32 ts_push; /* Time stamp event push */
u32 ts_load_val; /* Time stamp load value */
u32 ts_load_en; /* Time stamp load enable */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 35bf14d8e7af..0ca2a1a254de 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -134,6 +134,15 @@ struct cpdma_control_info {
#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
};
+struct submit_info {
+ struct cpdma_chan *chan;
+ int directed;
+ void *token;
+ void *data;
+ int flags;
+ int len;
+};
+
static struct cpdma_control_info controls[] = {
[CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW},
[CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
@@ -176,6 +185,8 @@ static struct cpdma_control_info controls[] = {
(directed << CPDMA_TO_PORT_SHIFT)); \
} while (0)
+#define CPDMA_DMA_EXT_MAP BIT(16)
+
static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
{
struct cpdma_desc_pool *pool = ctlr->pool;
@@ -1002,34 +1013,26 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
}
}
-int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, int directed)
+static int cpdma_chan_submit_si(struct submit_info *si)
{
+ struct cpdma_chan *chan = si->chan;
struct cpdma_ctlr *ctlr = chan->ctlr;
+ int len = si->len;
+ int swlen = len;
struct cpdma_desc __iomem *desc;
dma_addr_t buffer;
- unsigned long flags;
u32 mode;
- int ret = 0;
-
- spin_lock_irqsave(&chan->lock, flags);
-
- if (chan->state == CPDMA_STATE_TEARDOWN) {
- ret = -EINVAL;
- goto unlock_ret;
- }
+ int ret;
if (chan->count >= chan->desc_num) {
chan->stats.desc_alloc_fail++;
- ret = -ENOMEM;
- goto unlock_ret;
+ return -ENOMEM;
}
desc = cpdma_desc_alloc(ctlr->pool);
if (!desc) {
chan->stats.desc_alloc_fail++;
- ret = -ENOMEM;
- goto unlock_ret;
+ return -ENOMEM;
}
if (len < ctlr->params.min_packet_size) {
@@ -1037,16 +1040,21 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
chan->stats.runt_transmit_buff++;
}
- buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
- ret = dma_mapping_error(ctlr->dev, buffer);
- if (ret) {
- cpdma_desc_free(ctlr->pool, desc, 1);
- ret = -EINVAL;
- goto unlock_ret;
- }
-
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
- cpdma_desc_to_port(chan, mode, directed);
+ cpdma_desc_to_port(chan, mode, si->directed);
+
+ if (si->flags & CPDMA_DMA_EXT_MAP) {
+ buffer = (dma_addr_t)si->data;
+ dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
+ swlen |= CPDMA_DMA_EXT_MAP;
+ } else {
+ buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
+ ret = dma_mapping_error(ctlr->dev, buffer);
+ if (ret) {
+ cpdma_desc_free(ctlr->pool, desc, 1);
+ return -EINVAL;
+ }
+ }
/* Relaxed IO accessors can be used here as there is read barrier
* at the end of write sequence.
@@ -1055,9 +1063,9 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
writel_relaxed(buffer, &desc->hw_buffer);
writel_relaxed(len, &desc->hw_len);
writel_relaxed(mode | len, &desc->hw_mode);
- writel_relaxed((uintptr_t)token, &desc->sw_token);
+ writel_relaxed((uintptr_t)si->token, &desc->sw_token);
writel_relaxed(buffer, &desc->sw_buffer);
- writel_relaxed(len, &desc->sw_len);
+ writel_relaxed(swlen, &desc->sw_len);
desc_read(desc, sw_len);
__cpdma_chan_submit(chan, desc);
@@ -1066,8 +1074,105 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
chan_write(chan, rxfree, 1);
chan->count++;
+ return 0;
+}
-unlock_ret:
+int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data = data;
+ si.len = len;
+ si.directed = directed;
+ si.flags = 0;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data = (void *)data;
+ si.len = len;
+ si.directed = directed;
+ si.flags = CPDMA_DMA_EXT_MAP;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data = data;
+ si.len = len;
+ si.directed = directed;
+ si.flags = 0;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data = (void *)data;
+ si.len = len;
+ si.directed = directed;
+ si.flags = CPDMA_DMA_EXT_MAP;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
spin_unlock_irqrestore(&chan->lock, flags);
return ret;
}
@@ -1097,10 +1202,17 @@ static void __cpdma_chan_free(struct cpdma_chan *chan,
uintptr_t token;
token = desc_read(desc, sw_token);
- buff_dma = desc_read(desc, sw_buffer);
origlen = desc_read(desc, sw_len);
- dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+ buff_dma = desc_read(desc, sw_buffer);
+ if (origlen & CPDMA_DMA_EXT_MAP) {
+ origlen &= ~CPDMA_DMA_EXT_MAP;
+ dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
+ chan->dir);
+ } else {
+ dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+ }
+
cpdma_desc_free(pool, desc, 1);
(*chan->handler)((void *)token, outlen, status);
}
@@ -1311,8 +1423,23 @@ int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
return ctlr->num_tx_desc;
}
-void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
+int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
{
+ unsigned long flags;
+ int temp, ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ temp = ctlr->num_rx_desc;
ctlr->num_rx_desc = num_rx_desc;
ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+ ret = cpdma_chan_split_pool(ctlr);
+ if (ret) {
+ ctlr->num_rx_desc = temp;
+ ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+ }
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
+ return ret;
}
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 10376062dafa..d3cfe234d16a 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -77,8 +77,14 @@ int cpdma_chan_stop(struct cpdma_chan *chan);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
+int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed);
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
int len, int directed);
+int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed);
+int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, int directed);
int cpdma_chan_process(struct cpdma_chan *chan, int quota);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
@@ -110,8 +116,7 @@ enum cpdma_control {
int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr);
-void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
+int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr);
-int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr);
#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4bf65cab79e6..5f4ece0d5a73 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1428,8 +1428,8 @@ static int emac_dev_open(struct net_device *ndev)
if (!skb)
break;
- ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
- skb_tailroom(skb), 0);
+ ret = cpdma_chan_idle_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), 0);
if (WARN_ON(ret < 0))
break;
}
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index ec179700c184..2c1fac33136c 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3554,7 +3554,7 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
struct device_node *node, void **inst_priv)
{
- struct device_node *interfaces, *interface;
+ struct device_node *interfaces, *interface, *cpts_node;
struct device_node *secondary_ports;
struct cpsw_ale_params ale_params;
struct gbe_priv *gbe_dev;
@@ -3713,7 +3713,12 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
}
- gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, node);
+ cpts_node = of_get_child_by_name(node, "cpts");
+ if (!cpts_node)
+ cpts_node = of_node_get(node);
+
+ gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, cpts_node);
+ of_node_put(cpts_node);
if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
ret = PTR_ERR(gbe_dev->cpts);
goto free_sec_ports;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 3ecddb72f45a..051033580f0a 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -301,7 +301,7 @@ struct gelic_card {
*/
unsigned int irq;
struct gelic_descr *tx_top, *rx_top;
- struct gelic_descr descr[0]; /* must be the last */
+ struct gelic_descr descr[]; /* must be the last */
};
struct gelic_port {
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index c0ecc6c7b5e0..cdfe7809e3c1 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -1509,7 +1509,7 @@ static inline int velocity_get_ip(struct velocity_info *vptr)
rcu_read_lock();
in_dev = __in_dev_get_rcu(vptr->netdev);
if (in_dev != NULL) {
- ifa = (struct in_ifaddr *) in_dev->ifa_list;
+ ifa = rcu_dereference(in_dev->ifa_list);
if (ifa != NULL) {
memcpy(vptr->ip_addr, &ifa->ifa_address, 4);
res = 0;
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
index 918b3e50850a..2b4126d2427d 100644
--- a/drivers/net/ethernet/wiznet/w5100-spi.c
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/of_net.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include "w5100.h"
@@ -409,14 +410,32 @@ static const struct w5100_ops w5500_ops = {
.init = w5500_spi_init,
};
+static const struct of_device_id w5100_of_match[] = {
+ { .compatible = "wiznet,w5100", .data = (const void*)W5100, },
+ { .compatible = "wiznet,w5200", .data = (const void*)W5200, },
+ { .compatible = "wiznet,w5500", .data = (const void*)W5500, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, w5100_of_match);
+
static int w5100_spi_probe(struct spi_device *spi)
{
- const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct of_device_id *of_id;
const struct w5100_ops *ops;
+ kernel_ulong_t driver_data;
int priv_size;
const void *mac = of_get_mac_address(spi->dev.of_node);
- switch (id->driver_data) {
+ if (spi->dev.of_node) {
+ of_id = of_match_device(w5100_of_match, &spi->dev);
+ if (!of_id)
+ return -ENODEV;
+ driver_data = (kernel_ulong_t)of_id->data;
+ } else {
+ driver_data = spi_get_device_id(spi)->driver_data;
+ }
+
+ switch (driver_data) {
case W5100:
ops = &w5100_spi_ops;
priv_size = 0;
@@ -453,6 +472,7 @@ static struct spi_driver w5100_spi_driver = {
.driver = {
.name = "w5100",
.pm = &w5100_pm_ops,
+ .of_match_table = w5100_of_match,
},
.probe = w5100_spi_probe,
.remove = w5100_spi_remove,
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index af96e05c5bcd..8d994cebb6b0 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || COMPILE_TEST
+ depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || ARM || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -26,8 +26,8 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on MICROBLAZE
- select PHYLIB
+ depends on MICROBLAZE || X86 || ARM || COMPILE_TEST
+ select PHYLINK
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
AXI bus interface used in Xilinx Virtex FPGAs.
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 1aeda084b8f1..276292bca334 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -361,7 +361,7 @@ struct temac_local {
/* For synchronization of indirect register access. Must be
* shared mutex between interfaces in same TEMAC block.
*/
- struct mutex *indirect_mutex;
+ spinlock_t *indirect_lock;
u32 options; /* Current options word */
int last_link;
unsigned int temac_features;
@@ -388,8 +388,9 @@ struct temac_local {
/* xilinx_temac.c */
int temac_indirect_busywait(struct temac_local *lp);
u32 temac_indirect_in32(struct temac_local *lp, int reg);
+u32 temac_indirect_in32_locked(struct temac_local *lp, int reg);
void temac_indirect_out32(struct temac_local *lp, int reg, u32 value);
-
+void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value);
/* xilinx_temac_mdio.c */
int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 14870d659f7d..21c1b4322ea7 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -22,7 +22,6 @@
*
* TODO:
* - Factor out locallink DMA code into separate driver
- * - Fix multicast assignment.
* - Fix support for hardware checksumming.
* - Testing. Lots and lots of testing.
*
@@ -53,6 +52,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/processor.h>
#include <linux/platform_data/xilinx-ll-temac.h>
#include "ll_temac.h"
@@ -84,51 +84,118 @@ static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
return iowrite32(value, lp->regs + offset);
}
+static bool hard_acs_rdy(struct temac_local *lp)
+{
+ return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
+}
+
+static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
+{
+ ktime_t cur = ktime_get();
+
+ return hard_acs_rdy(lp) || ktime_after(cur, timeout);
+}
+
+/* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz
+ * that was used before, and should cover MDIO bus speed down to 3200
+ * Hz.
+ */
+#define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
+
+/**
+ * temac_indirect_busywait - Wait for current indirect register access
+ * to complete.
+ */
int temac_indirect_busywait(struct temac_local *lp)
{
- unsigned long end = jiffies + 2;
+ ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
- while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
- if (time_before_eq(end, jiffies)) {
- WARN_ON(1);
- return -ETIMEDOUT;
- }
- usleep_range(500, 1000);
- }
- return 0;
+ spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
+ if (WARN_ON(!hard_acs_rdy(lp)))
+ return -ETIMEDOUT;
+ else
+ return 0;
}
/**
- * temac_indirect_in32
- *
- * lp->indirect_mutex must be held when calling this function
+ * temac_indirect_in32 - Indirect register read access. This function
+ * must be called without lp->indirect_lock being held.
*/
u32 temac_indirect_in32(struct temac_local *lp, int reg)
{
- u32 val;
+ unsigned long flags;
+ int val;
+
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ val = temac_indirect_in32_locked(lp, reg);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+ return val;
+}
- if (temac_indirect_busywait(lp))
+/**
+ * temac_indirect_in32_locked - Indirect register read access. This
+ * function must be called with lp->indirect_lock being held. Use
+ * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
+ * repeated lock/unlock and to ensure uninterrupted access to indirect
+ * registers.
+ */
+u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
+{
+ /* This initial wait should normally not spin, as we always
+ * try to wait for indirect access to complete before
+ * releasing the indirect_lock.
+ */
+ if (WARN_ON(temac_indirect_busywait(lp)))
return -ETIMEDOUT;
+ /* Initiate read from indirect register */
temac_iow(lp, XTE_CTL0_OFFSET, reg);
- if (temac_indirect_busywait(lp))
+ /* Wait for indirect register access to complete. We really
+ * should not see timeouts, and could even end up causing
+ * problem for following indirect access, so let's make a bit
+ * of WARN noise.
+ */
+ if (WARN_ON(temac_indirect_busywait(lp)))
return -ETIMEDOUT;
- val = temac_ior(lp, XTE_LSW0_OFFSET);
-
- return val;
+ /* Value is ready now */
+ return temac_ior(lp, XTE_LSW0_OFFSET);
}
/**
- * temac_indirect_out32
- *
- * lp->indirect_mutex must be held when calling this function
+ * temac_indirect_out32 - Indirect register write access. This function
+ * must be called without lp->indirect_lock being held.
*/
void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
{
- if (temac_indirect_busywait(lp))
+ unsigned long flags;
+
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, reg, value);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+}
+
+/**
+ * temac_indirect_out32_locked - Indirect register write access. This
+ * function must be called with lp->indirect_lock being held. Use
+ * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
+ * repeated lock/unlock and to ensure uninterrupted access to indirect
+ * registers.
+ */
+void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
+{
+ /* As in temac_indirect_in32_locked(), we should normally not
+ * spin here. And if it happens, we actually end up silently
+ * ignoring the write request. Ouch.
+ */
+ if (WARN_ON(temac_indirect_busywait(lp)))
return;
+ /* Initiate write to indirect register */
temac_iow(lp, XTE_LSW0_OFFSET, value);
temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
- temac_indirect_busywait(lp);
+ /* As in temac_indirect_in32_locked(), we should not see timeouts
+ * here. And if it happens, we continue before the write has
+ * completed. Not good.
+ */
+ WARN_ON(temac_indirect_busywait(lp));
}
/**
@@ -344,20 +411,21 @@ out:
static void temac_do_set_mac_address(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
+ unsigned long flags;
/* set up unicast MAC address filter set its mac address */
- mutex_lock(lp->indirect_mutex);
- temac_indirect_out32(lp, XTE_UAW0_OFFSET,
- (ndev->dev_addr[0]) |
- (ndev->dev_addr[1] << 8) |
- (ndev->dev_addr[2] << 16) |
- (ndev->dev_addr[3] << 24));
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
+ (ndev->dev_addr[0]) |
+ (ndev->dev_addr[1] << 8) |
+ (ndev->dev_addr[2] << 16) |
+ (ndev->dev_addr[3] << 24));
/* There are reserved bits in EUAW1
* so don't affect them Set MAC bits [47:32] in EUAW1 */
- temac_indirect_out32(lp, XTE_UAW1_OFFSET,
- (ndev->dev_addr[4] & 0x000000ff) |
- (ndev->dev_addr[5] << 8));
- mutex_unlock(lp->indirect_mutex);
+ temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
+ (ndev->dev_addr[4] & 0x000000ff) |
+ (ndev->dev_addr[5] << 8));
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
}
static int temac_init_mac_address(struct net_device *ndev, const void *address)
@@ -383,49 +451,58 @@ static int temac_set_mac_address(struct net_device *ndev, void *p)
static void temac_set_multicast_list(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
- u32 multi_addr_msw, multi_addr_lsw, val;
- int i;
+ u32 multi_addr_msw, multi_addr_lsw;
+ int i = 0;
+ unsigned long flags;
+ bool promisc_mode_disabled = false;
- mutex_lock(lp->indirect_mutex);
- if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
- netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
- /*
- * We must make the kernel realise we had to move
- * into promisc mode or we start all out war on
- * the cable. If it was a promisc request the
- * flag is already set. If not we assert it.
- */
- ndev->flags |= IFF_PROMISC;
+ if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
+ (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
- } else if (!netdev_mc_empty(ndev)) {
+ return;
+ }
+
+ spin_lock_irqsave(lp->indirect_lock, flags);
+
+ if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
- i = 0;
netdev_for_each_mc_addr(ha, ndev) {
- if (i >= MULTICAST_CAM_TABLE_NUM)
+ if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
break;
multi_addr_msw = ((ha->addr[3] << 24) |
(ha->addr[2] << 16) |
(ha->addr[1] << 8) |
(ha->addr[0]));
- temac_indirect_out32(lp, XTE_MAW0_OFFSET,
- multi_addr_msw);
+ temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
+ multi_addr_msw);
multi_addr_lsw = ((ha->addr[5] << 8) |
(ha->addr[4]) | (i << 16));
- temac_indirect_out32(lp, XTE_MAW1_OFFSET,
- multi_addr_lsw);
+ temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
+ multi_addr_lsw);
i++;
}
- } else {
- val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
- temac_indirect_out32(lp, XTE_AFM_OFFSET,
- val & ~XTE_AFM_EPPRM_MASK);
- temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
- temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
- dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
- mutex_unlock(lp->indirect_mutex);
+
+ /* Clear all or remaining/unused address table entries */
+ while (i < MULTICAST_CAM_TABLE_NUM) {
+ temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
+ i++;
+ }
+
+ /* Enable address filter block if currently disabled */
+ if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
+ & XTE_AFM_EPPRM_MASK) {
+ temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
+ promisc_mode_disabled = true;
+ }
+
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+
+ if (promisc_mode_disabled)
+ dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
static struct temac_option {
@@ -516,17 +593,19 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
struct temac_local *lp = netdev_priv(ndev);
struct temac_option *tp = &temac_options[0];
int reg;
+ unsigned long flags;
- mutex_lock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
while (tp->opt) {
- reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
- if (options & tp->opt)
+ reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
+ if (options & tp->opt) {
reg |= tp->m_or;
- temac_indirect_out32(lp, tp->reg, reg);
+ temac_indirect_out32_locked(lp, tp->reg, reg);
+ }
tp++;
}
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
lp->options |= options;
- mutex_unlock(lp->indirect_mutex);
return 0;
}
@@ -537,6 +616,7 @@ static void temac_device_reset(struct net_device *ndev)
struct temac_local *lp = netdev_priv(ndev);
u32 timeout;
u32 val;
+ unsigned long flags;
/* Perform a software reset */
@@ -545,7 +625,6 @@ static void temac_device_reset(struct net_device *ndev)
dev_dbg(&ndev->dev, "%s()\n", __func__);
- mutex_lock(lp->indirect_mutex);
/* Reset the receiver and wait for it to finish reset */
temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
timeout = 1000;
@@ -571,8 +650,11 @@ static void temac_device_reset(struct net_device *ndev)
}
/* Disable the receiver */
- val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
- temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
+ temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
+ val & ~XTE_RXC1_RXEN_MASK);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
/* Reset Local Link (DMA) */
lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
@@ -592,12 +674,12 @@ static void temac_device_reset(struct net_device *ndev)
"temac_device_reset descriptor allocation failed\n");
}
- temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
- temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
- temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
- temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
-
- mutex_unlock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
/* Sync default options with HW
* but leave receiver and transmitter disabled. */
@@ -621,13 +703,14 @@ static void temac_adjust_link(struct net_device *ndev)
struct phy_device *phy = ndev->phydev;
u32 mii_speed;
int link_state;
+ unsigned long flags;
/* hash together the state values to decide if something has changed */
link_state = phy->speed | (phy->duplex << 1) | phy->link;
- mutex_lock(lp->indirect_mutex);
if (lp->last_link != link_state) {
- mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
switch (phy->speed) {
@@ -637,11 +720,12 @@ static void temac_adjust_link(struct net_device *ndev)
}
/* Write new speed setting out to TEMAC */
- temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
+ temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+
lp->last_link = link_state;
phy_print_status(phy);
}
- mutex_unlock(lp->indirect_mutex);
}
#ifdef CONFIG_64BIT
@@ -1011,6 +1095,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
+ .ndo_set_rx_mode = temac_set_multicast_list,
.ndo_set_mac_address = temac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = temac_ioctl,
@@ -1076,7 +1161,6 @@ static int temac_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
ndev->netdev_ops = &temac_netdev_ops;
ndev->ethtool_ops = &temac_ethtool_ops;
@@ -1103,17 +1187,17 @@ static int temac_probe(struct platform_device *pdev)
/* Setup mutex for synchronization of indirect register access */
if (pdata) {
- if (!pdata->indirect_mutex) {
+ if (!pdata->indirect_lock) {
dev_err(&pdev->dev,
- "indirect_mutex missing in platform_data\n");
+ "indirect_lock missing in platform_data\n");
return -EINVAL;
}
- lp->indirect_mutex = pdata->indirect_mutex;
+ lp->indirect_lock = pdata->indirect_lock;
} else {
- lp->indirect_mutex = devm_kmalloc(&pdev->dev,
- sizeof(*lp->indirect_mutex),
- GFP_KERNEL);
- mutex_init(lp->indirect_mutex);
+ lp->indirect_lock = devm_kmalloc(&pdev->dev,
+ sizeof(*lp->indirect_lock),
+ GFP_KERNEL);
+ spin_lock_init(lp->indirect_lock);
}
/* map device registers */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index a4667326f745..6fd2dea4e60f 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -25,14 +25,15 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
struct temac_local *lp = bus->priv;
u32 rc;
+ unsigned long flags;
/* Write the PHY address to the MIIM Access Initiator register.
* When the transfer completes, the PHY register value will appear
* in the LSW0 register */
- mutex_lock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
- rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET);
- mutex_unlock(lp->indirect_mutex);
+ rc = temac_indirect_in32_locked(lp, XTE_MIIMAI_OFFSET);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n",
phy_id, reg, rc);
@@ -43,6 +44,7 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
{
struct temac_local *lp = bus->priv;
+ unsigned long flags;
dev_dbg(lp->dev, "temac_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
phy_id, reg, val);
@@ -50,10 +52,10 @@ static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
/* First write the desired value into the write data register
* and then write the address into the access initiator register
*/
- mutex_lock(lp->indirect_mutex);
- temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val);
- temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
- mutex_unlock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, XTE_MGTDR_OFFSET, val);
+ temac_indirect_out32_locked(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
return 0;
}
@@ -87,9 +89,7 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
/* Enable the MDIO bus by asserting the enable bit and writing
* in the clock config */
- mutex_lock(lp->indirect_mutex);
temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
- mutex_unlock(lp->indirect_mutex);
bus = devm_mdiobus_alloc(&pdev->dev);
if (!bus)
@@ -116,10 +116,8 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
if (rc)
return rc;
- mutex_lock(lp->indirect_mutex);
dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n",
temac_indirect_in32(lp, XTE_MC_OFFSET));
- mutex_unlock(lp->indirect_mutex);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 011adae32b89..2dacfc85b3ba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/if_vlan.h>
+#include <linux/phylink.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
@@ -83,6 +84,8 @@
#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
+#define XAXIDMA_SR_HALT_MASK 0x00000001 /* Indicates DMA channel halted */
+
#define XAXIDMA_BD_NDESC_OFFSET 0x00 /* Next descriptor pointer */
#define XAXIDMA_BD_BUFA_OFFSET 0x08 /* Buffer address */
#define XAXIDMA_BD_CTRL_LEN_OFFSET 0x18 /* Control/buffer length */
@@ -356,9 +359,6 @@
* @app2: MM2S/S2MM User Application Field 2.
* @app3: MM2S/S2MM User Application Field 3.
* @app4: MM2S/S2MM User Application Field 4.
- * @sw_id_offset: MM2S/S2MM Sw ID
- * @reserved5: Reserved and not used
- * @reserved6: Reserved and not used
*/
struct axidma_bd {
u32 next; /* Physical address of next buffer descriptor */
@@ -373,11 +373,9 @@ struct axidma_bd {
u32 app1; /* TX start << 16 | insert */
u32 app2; /* TX csum seed */
u32 app3;
- u32 app4;
- u32 sw_id_offset;
- u32 reserved5;
- u32 reserved6;
-};
+ u32 app4; /* Last field used by HW */
+ struct sk_buff *skb;
+} __aligned(XAXIDMA_BD_MINIMUM_ALIGNMENT);
/**
* struct axienet_local - axienet private per device data
@@ -385,6 +383,7 @@ struct axidma_bd {
* @dev: Pointer to device structure
* @phy_node: Pointer to device node structure
* @mii_bus: Pointer to MII bus structure
+ * @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
* @dma_err_tasklet: Tasklet structure to process Axi DMA errors
@@ -422,10 +421,17 @@ struct axienet_local {
/* Connection to PHY device */
struct device_node *phy_node;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+
+ /* Clock for AXI bus */
+ struct clk *clk;
+
/* MDIO bus data */
struct mii_bus *mii_bus; /* MII bus reference */
/* IO registers, dma functions and IRQs */
+ resource_size_t regs_start;
void __iomem *regs;
void __iomem *dma_regs;
@@ -433,17 +439,19 @@ struct axienet_local {
int tx_irq;
int rx_irq;
+ int eth_irq;
phy_interface_t phy_mode;
u32 options; /* Current options word */
- u32 last_link;
u32 features;
/* Buffer descriptors */
struct axidma_bd *tx_bd_v;
dma_addr_t tx_bd_p;
+ u32 tx_bd_num;
struct axidma_bd *rx_bd_v;
dma_addr_t rx_bd_p;
+ u32 rx_bd_num;
u32 tx_bd_ci;
u32 tx_bd_tail;
u32 rx_bd_ci;
@@ -481,7 +489,7 @@ struct axienet_option {
*/
static inline u32 axienet_ior(struct axienet_local *lp, off_t offset)
{
- return in_be32(lp->regs + offset);
+ return ioread32(lp->regs + offset);
}
static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
@@ -501,12 +509,13 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
static inline void axienet_iow(struct axienet_local *lp, off_t offset,
u32 value)
{
- out_be32((lp->regs + offset), value);
+ iowrite32(value, lp->regs + offset);
}
/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
-int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np);
-int axienet_mdio_wait_until_ready(struct axienet_local *lp);
+int axienet_mdio_enable(struct axienet_local *lp);
+void axienet_mdio_disable(struct axienet_local *lp);
+int axienet_mdio_setup(struct axienet_local *lp);
void axienet_mdio_teardown(struct axienet_local *lp);
#endif /* XILINX_AXI_ENET_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 831967f6eff8..4fc627fb4d11 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -7,6 +7,7 @@
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
@@ -21,6 +22,7 @@
* - Add support for extended VLAN support.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
@@ -38,16 +40,18 @@
#include "xilinx_axienet.h"
-/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
-#define TX_BD_NUM 64
-#define RX_BD_NUM 128
+/* Descriptors defines for Tx and Rx DMA */
+#define TX_BD_NUM_DEFAULT 64
+#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MAX 4096
+#define RX_BD_NUM_MAX 4096
/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
#define DRIVER_NAME "xaxienet"
#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
#define DRIVER_VERSION "1.00a"
-#define AXIENET_REGS_N 32
+#define AXIENET_REGS_N 40
/* Match table for of_platform binding */
static const struct of_device_id axienet_of_match[] = {
@@ -125,7 +129,7 @@ static struct axienet_option axienet_options[] = {
*/
static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
{
- return in_be32(lp->dma_regs + reg);
+ return ioread32(lp->dma_regs + reg);
}
/**
@@ -140,7 +144,7 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
static inline void axienet_dma_out32(struct axienet_local *lp,
off_t reg, u32 value)
{
- out_be32((lp->dma_regs + reg), value);
+ iowrite32(value, lp->dma_regs + reg);
}
/**
@@ -156,22 +160,21 @@ static void axienet_dma_bd_release(struct net_device *ndev)
int i;
struct axienet_local *lp = netdev_priv(ndev);
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
lp->max_frm_size, DMA_FROM_DEVICE);
- dev_kfree_skb((struct sk_buff *)
- (lp->rx_bd_v[i].sw_id_offset));
+ dev_kfree_skb(lp->rx_bd_v[i].skb);
}
if (lp->rx_bd_v) {
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
lp->rx_bd_v,
lp->rx_bd_p);
}
if (lp->tx_bd_v) {
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
lp->tx_bd_v,
lp->tx_bd_p);
}
@@ -201,33 +204,33 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/* Allocate the Tx and Rx buffer descriptors. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
- for (i = 0; i < TX_BD_NUM; i++) {
+ for (i = 0; i < lp->tx_bd_num; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) *
- ((i + 1) % TX_BD_NUM);
+ ((i + 1) % lp->tx_bd_num);
}
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) *
- ((i + 1) % RX_BD_NUM);
+ ((i + 1) % lp->rx_bd_num);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!skb)
goto out;
- lp->rx_bd_v[i].sw_id_offset = (u32) skb;
+ lp->rx_bd_v[i].skb = skb;
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
lp->max_frm_size,
@@ -269,7 +272,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -434,17 +437,20 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
-static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
+static void __axienet_device_reset(struct axienet_local *lp)
{
u32 timeout;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
* reset process.
+ * Note that even though both TX and RX have their own reset register,
+ * they both reset the entire DMA core, so only one needs to be used.
*/
- axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
+ while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
+ XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
@@ -470,8 +476,7 @@ static void axienet_device_reset(struct net_device *ndev)
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
+ __axienet_device_reset(lp);
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
@@ -498,6 +503,8 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
if (axienet_status & XAE_INT_RXRJECT_MASK)
axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
+ XAE_INT_RECV_ERROR_MASK : 0);
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
@@ -514,63 +521,6 @@ static void axienet_device_reset(struct net_device *ndev)
}
/**
- * axienet_adjust_link - Adjust the PHY link speed/duplex.
- * @ndev: Pointer to the net_device structure
- *
- * This function is called to change the speed and duplex setting after
- * auto negotiation is done by the PHY. This is the function that gets
- * registered with the PHY interface through the "of_phy_connect" call.
- */
-static void axienet_adjust_link(struct net_device *ndev)
-{
- u32 emmc_reg;
- u32 link_state;
- u32 setspeed = 1;
- struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phy = ndev->phydev;
-
- link_state = phy->speed | (phy->duplex << 1) | phy->link;
- if (lp->last_link != link_state) {
- if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
- if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX)
- setspeed = 0;
- } else {
- if ((phy->speed == SPEED_1000) &&
- (lp->phy_mode == PHY_INTERFACE_MODE_MII))
- setspeed = 0;
- }
-
- if (setspeed == 1) {
- emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
- emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
-
- switch (phy->speed) {
- case SPEED_1000:
- emmc_reg |= XAE_EMMC_LINKSPD_1000;
- break;
- case SPEED_100:
- emmc_reg |= XAE_EMMC_LINKSPD_100;
- break;
- case SPEED_10:
- emmc_reg |= XAE_EMMC_LINKSPD_10;
- break;
- default:
- dev_err(&ndev->dev, "Speed other than 10, 100 "
- "or 1Gbps is not supported\n");
- break;
- }
-
- axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
- lp->last_link = link_state;
- phy_print_status(phy);
- } else {
- netdev_err(ndev,
- "Error setting Axi Ethernet mac speed\n");
- }
- }
-}
-
-/**
* axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @ndev: Pointer to the net_device structure
@@ -595,26 +545,31 @@ static void axienet_start_xmit_done(struct net_device *ndev)
dma_unmap_single(ndev->dev.parent, cur_p->phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_consume_skb_irq((struct sk_buff *)cur_p->app4);
+ if (cur_p->skb)
+ dev_consume_skb_irq(cur_p->skb);
/*cur_p->phys = 0;*/
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
cur_p->status = 0;
+ cur_p->skb = NULL;
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
- ++lp->tx_bd_ci;
- lp->tx_bd_ci %= TX_BD_NUM;
+ if (++lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
status = cur_p->status;
}
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
+
+ /* Matches barrier in axienet_start_xmit */
+ smp_mb();
+
netif_wake_queue(ndev);
}
@@ -635,7 +590,7 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
int num_frag)
{
struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
+ cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
return NETDEV_TX_BUSY;
return 0;
@@ -670,9 +625,19 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (!netif_queue_stopped(ndev))
- netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
+ if (netif_queue_stopped(ndev))
+ return NETDEV_TX_BUSY;
+
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (axienet_check_tx_bd_space(lp, num_frag))
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(ndev);
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -695,8 +660,8 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
+ lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -707,13 +672,13 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
- cur_p->app4 = (unsigned long)skb;
+ cur_p->skb = skb;
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
+ lp->tx_bd_tail = 0;
return NETDEV_TX_OK;
}
@@ -742,13 +707,15 @@ static void axienet_recv(struct net_device *ndev)
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- skb = (struct sk_buff *) (cur_p->sw_id_offset);
- length = cur_p->app4 & 0x0000FFFF;
dma_unmap_single(ndev->dev.parent, cur_p->phys,
lp->max_frm_size,
DMA_FROM_DEVICE);
+ skb = cur_p->skb;
+ cur_p->skb = NULL;
+ length = cur_p->app4 & 0x0000FFFF;
+
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, ndev);
/*skb_checksum_none_assert(skb);*/
@@ -783,10 +750,10 @@ static void axienet_recv(struct net_device *ndev)
DMA_FROM_DEVICE);
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
- cur_p->sw_id_offset = (u32) new_skb;
+ cur_p->skb = new_skb;
- ++lp->rx_bd_ci;
- lp->rx_bd_ci %= RX_BD_NUM;
+ if (++lp->rx_bd_ci >= lp->rx_bd_num)
+ lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
@@ -802,7 +769,7 @@ static void axienet_recv(struct net_device *ndev)
* @irq: irq number
* @_ndev: net_device pointer
*
- * Return: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
*
* This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
* to complete the BD processing.
@@ -821,7 +788,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
+ return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -851,7 +818,7 @@ out:
* @irq: irq number
* @_ndev: net_device pointer
*
- * Return: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
*
* This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
* processing.
@@ -870,7 +837,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
+ return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -895,6 +862,35 @@ out:
return IRQ_HANDLED;
}
+/**
+ * axienet_eth_irq - Ethernet core Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
+ *
+ * Handle miscellaneous conditions indicated by Ethernet core IRQ.
+ */
+static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ unsigned int pending;
+
+ pending = axienet_ior(lp, XAE_IP_OFFSET);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & XAE_INT_RXFIFOOVR_MASK)
+ ndev->stats.rx_missed_errors++;
+
+ if (pending & XAE_INT_RXRJECT_MASK)
+ ndev->stats.rx_frame_errors++;
+
+ axienet_iow(lp, XAE_IS_OFFSET, pending);
+ return IRQ_HANDLED;
+}
+
static void axienet_dma_err_handler(unsigned long data);
/**
@@ -904,67 +900,72 @@ static void axienet_dma_err_handler(unsigned long data);
* Return: 0, on success.
* non-zero error value on failure
*
- * This is the driver open routine. It calls phy_start to start the PHY device.
+ * This is the driver open routine. It calls phylink_start to start the
+ * PHY device.
* It also allocates interrupt service routines, enables the interrupt lines
* and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
* descriptors are initialized.
*/
static int axienet_open(struct net_device *ndev)
{
- int ret, mdio_mcreg;
+ int ret;
struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = NULL;
dev_dbg(&ndev->dev, "axienet_open()\n");
- mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
- ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
- return ret;
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. If MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards.
+ * including the MDIO. MDIO must be disabled before resetting
+ * and re-enabled afterwards.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- axienet_iow(lp, XAE_MDIO_MC_OFFSET,
- (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
axienet_device_reset(ndev);
- /* Enable the MDIO */
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
- ret = axienet_mdio_wait_until_ready(lp);
+ ret = axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
if (ret < 0)
return ret;
- if (lp->phy_node) {
- phydev = of_phy_connect(lp->ndev, lp->phy_node,
- axienet_adjust_link, 0, lp->phy_mode);
-
- if (!phydev)
- dev_err(lp->dev, "of_phy_connect() failed\n");
- else
- phy_start(phydev);
+ ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
+ if (ret) {
+ dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
+ return ret;
}
+ phylink_start(lp->phylink);
+
/* Enable tasklets for Axi DMA error handling */
tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
(unsigned long) lp);
/* Enable interrupts for Axi DMA Tx */
- ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
+ ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
+ ndev->name, ndev);
if (ret)
goto err_tx_irq;
/* Enable interrupts for Axi DMA Rx */
- ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
+ ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
+ ndev->name, ndev);
if (ret)
goto err_rx_irq;
+ /* Enable interrupts for Axi Ethernet core (if defined) */
+ if (lp->eth_irq > 0) {
+ ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
+ ndev->name, ndev);
+ if (ret)
+ goto err_eth_irq;
+ }
return 0;
+err_eth_irq:
+ free_irq(lp->rx_irq, ndev);
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
- if (phydev)
- phy_disconnect(phydev);
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
tasklet_kill(&lp->dma_err_tasklet);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
@@ -976,34 +977,61 @@ err_tx_irq:
*
* Return: 0, on success.
*
- * This is the driver stop routine. It calls phy_disconnect to stop the PHY
+ * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
* device. It also removes the interrupt handlers and disables the interrupts.
* The Axi DMA Tx/Rx BDs are released.
*/
static int axienet_stop(struct net_device *ndev)
{
- u32 cr;
+ u32 cr, sr;
+ int count;
struct axienet_local *lp = netdev_priv(ndev);
dev_dbg(&ndev->dev, "axienet_close()\n");
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
+
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ axienet_iow(lp, XAE_IE_OFFSET, 0);
+
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ }
+
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ }
+
+ /* Do a reset to ensure DMA is really stopped */
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ __axienet_device_reset(lp);
+ axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+
tasklet_kill(&lp->dma_err_tasklet);
+ if (lp->eth_irq > 0)
+ free_irq(lp->eth_irq, ndev);
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
- if (ndev->phydev)
- phy_disconnect(ndev->phydev);
-
axienet_dma_bd_release(ndev);
return 0;
}
@@ -1151,6 +1179,48 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
+ data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
+ data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
+ data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
+ data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
+}
+
+static void axienet_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ ering->rx_max_pending = RX_BD_NUM_MAX;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->tx_max_pending = TX_BD_NUM_MAX;
+ ering->rx_pending = lp->rx_bd_num;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->tx_pending = lp->tx_bd_num;
+}
+
+static int axienet_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ if (ering->rx_pending > RX_BD_NUM_MAX ||
+ ering->rx_mini_pending ||
+ ering->rx_jumbo_pending ||
+ ering->rx_pending > TX_BD_NUM_MAX)
+ return -EINVAL;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ lp->rx_bd_num = ering->rx_pending;
+ lp->tx_bd_num = ering->tx_pending;
+ return 0;
}
/**
@@ -1166,12 +1236,9 @@ static void
axienet_ethtools_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *epauseparm)
{
- u32 regval;
struct axienet_local *lp = netdev_priv(ndev);
- epauseparm->autoneg = 0;
- regval = axienet_ior(lp, XAE_FCC_OFFSET);
- epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
- epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
+
+ phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
}
/**
@@ -1190,27 +1257,9 @@ static int
axienet_ethtools_set_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *epauseparm)
{
- u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- if (netif_running(ndev)) {
- netdev_err(ndev,
- "Please stop netif before applying configuration\n");
- return -EFAULT;
- }
-
- regval = axienet_ior(lp, XAE_FCC_OFFSET);
- if (epauseparm->tx_pause)
- regval |= XAE_FCC_FCTX_MASK;
- else
- regval &= ~XAE_FCC_FCTX_MASK;
- if (epauseparm->rx_pause)
- regval |= XAE_FCC_FCRX_MASK;
- else
- regval &= ~XAE_FCC_FCRX_MASK;
- axienet_iow(lp, XAE_FCC_OFFSET, regval);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
}
/**
@@ -1289,17 +1338,170 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
return 0;
}
+static int
+axienet_ethtools_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(lp->phylink, cmd);
+}
+
+static int
+axienet_ethtools_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(lp->phylink, cmd);
+}
+
static const struct ethtool_ops axienet_ethtool_ops = {
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
.get_link = ethtool_op_get_link,
+ .get_ringparam = axienet_ethtools_get_ringparam,
+ .set_ringparam = axienet_ethtools_set_ringparam,
.get_pauseparam = axienet_ethtools_get_pauseparam,
.set_pauseparam = axienet_ethtools_set_pauseparam,
.get_coalesce = axienet_ethtools_get_coalesce,
.set_coalesce = axienet_ethtools_set_coalesce,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = axienet_ethtools_get_link_ksettings,
+ .set_link_ksettings = axienet_ethtools_set_link_ksettings,
+};
+
+static void axienet_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ /* Only support the mode we are configured for */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != lp->phy_mode) {
+ netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
+ phy_modes(state->interface),
+ phy_modes(lp->phy_mode));
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, Pause);
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int axienet_mac_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 emmc_reg, fcc_reg;
+
+ state->interface = lp->phy_mode;
+
+ emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
+ if (emmc_reg & XAE_EMMC_LINKSPD_1000)
+ state->speed = SPEED_1000;
+ else if (emmc_reg & XAE_EMMC_LINKSPD_100)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+
+ state->pause = 0;
+ fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
+ if (fcc_reg & XAE_FCC_FCTX_MASK)
+ state->pause |= MLO_PAUSE_TX;
+ if (fcc_reg & XAE_FCC_FCRX_MASK)
+ state->pause |= MLO_PAUSE_RX;
+
+ state->an_complete = 0;
+ state->duplex = 1;
+
+ return 1;
+}
+
+static void axienet_mac_an_restart(struct phylink_config *config)
+{
+ /* Unsupported, do nothing */
+}
+
+static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 emmc_reg, fcc_reg;
+
+ emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
+ emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
+
+ switch (state->speed) {
+ case SPEED_1000:
+ emmc_reg |= XAE_EMMC_LINKSPD_1000;
+ break;
+ case SPEED_100:
+ emmc_reg |= XAE_EMMC_LINKSPD_100;
+ break;
+ case SPEED_10:
+ emmc_reg |= XAE_EMMC_LINKSPD_10;
+ break;
+ default:
+ dev_err(&ndev->dev,
+ "Speed other than 10, 100 or 1Gbps is not supported\n");
+ break;
+ }
+
+ axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
+
+ fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
+ if (state->pause & MLO_PAUSE_TX)
+ fcc_reg |= XAE_FCC_FCTX_MASK;
+ else
+ fcc_reg &= ~XAE_FCC_FCTX_MASK;
+ if (state->pause & MLO_PAUSE_RX)
+ fcc_reg |= XAE_FCC_FCRX_MASK;
+ else
+ fcc_reg &= ~XAE_FCC_FCRX_MASK;
+ axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
+}
+
+static void axienet_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_up(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phy)
+{
+ /* nothing meaningful to do */
+}
+
+static const struct phylink_mac_ops axienet_phylink_ops = {
+ .validate = axienet_validate,
+ .mac_link_state = axienet_mac_link_state,
+ .mac_an_restart = axienet_mac_an_restart,
+ .mac_config = axienet_mac_config,
+ .mac_link_down = axienet_mac_link_down,
+ .mac_link_up = axienet_mac_link_up,
};
/**
@@ -1313,38 +1515,33 @@ static void axienet_dma_err_handler(unsigned long data)
{
u32 axienet_status;
u32 cr, i;
- int mdio_mcreg;
struct axienet_local *lp = (struct axienet_local *) data;
struct net_device *ndev = lp->ndev;
struct axidma_bd *cur_p;
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
- axienet_mdio_wait_until_ready(lp);
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. So if MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards.
+ * including the MDIO. MDIO must be disabled before resetting
+ * and re-enabled afterwards.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
- ~XAE_MDIO_MC_MDIOEN_MASK));
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ __axienet_device_reset(lp);
+ axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
-
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
- axienet_mdio_wait_until_ready(lp);
-
- for (i = 0; i < TX_BD_NUM; i++) {
+ for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
if (cur_p->phys)
dma_unmap_single(ndev->dev.parent, cur_p->phys,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
+ if (cur_p->skb)
+ dev_kfree_skb_irq(cur_p->skb);
cur_p->phys = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
@@ -1353,10 +1550,10 @@ static void axienet_dma_err_handler(unsigned long data)
cur_p->app2 = 0;
cur_p->app3 = 0;
cur_p->app4 = 0;
- cur_p->sw_id_offset = 0;
+ cur_p->skb = NULL;
}
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
cur_p = &lp->rx_bd_v[i];
cur_p->status = 0;
cur_p->app0 = 0;
@@ -1404,7 +1601,7 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -1422,6 +1619,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
if (axienet_status & XAE_INT_RXRJECT_MASK)
axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
+ XAE_INT_RECV_ERROR_MASK : 0);
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
@@ -1453,7 +1652,7 @@ static int axienet_probe(struct platform_device *pdev)
struct axienet_local *lp;
struct net_device *ndev;
const void *mac_addr;
- struct resource *ethres, dmares;
+ struct resource *ethres;
u32 value;
ndev = alloc_etherdev(sizeof(*lp));
@@ -1476,6 +1675,8 @@ static int axienet_probe(struct platform_device *pdev)
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
+ lp->rx_bd_num = RX_BD_NUM_DEFAULT;
+ lp->tx_bd_num = TX_BD_NUM_DEFAULT;
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
@@ -1484,6 +1685,7 @@ static int axienet_probe(struct platform_device *pdev)
ret = PTR_ERR(lp->regs);
goto free_netdev;
}
+ lp->regs_start = ethres->start;
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
@@ -1568,38 +1770,56 @@ static int axienet_probe(struct platform_device *pdev)
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not find DMA node\n");
- ret = -ENODEV;
- goto free_netdev;
- }
- ret = of_address_to_resource(np, 0, &dmares);
- if (ret) {
- dev_err(&pdev->dev, "unable to get DMA resource\n");
+ if (np) {
+ struct resource dmares;
+
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to get DMA resource\n");
+ of_node_put(np);
+ goto free_netdev;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev,
+ &dmares);
+ lp->rx_irq = irq_of_parse_and_map(np, 1);
+ lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- goto free_netdev;
+ lp->eth_irq = platform_get_irq(pdev, 0);
+ } else {
+ /* Check for these resources directly on the Ethernet node. */
+ struct resource *res = platform_get_resource(pdev,
+ IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "unable to get DMA memory resource\n");
+ goto free_netdev;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
+ lp->rx_irq = platform_get_irq(pdev, 1);
+ lp->tx_irq = platform_get_irq(pdev, 0);
+ lp->eth_irq = platform_get_irq(pdev, 2);
}
- lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
ret = PTR_ERR(lp->dma_regs);
- of_node_put(np);
goto free_netdev;
}
- lp->rx_irq = irq_of_parse_and_map(np, 1);
- lp->tx_irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
dev_err(&pdev->dev, "could not determine irqs\n");
ret = -ENOMEM;
goto free_netdev;
}
+ /* Check for Ethernet core IRQ (optional) */
+ if (lp->eth_irq <= 0)
+ dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
+
/* Retrieve the MAC address */
mac_addr = of_get_mac_address(pdev->dev.of_node);
if (IS_ERR(mac_addr)) {
- dev_err(&pdev->dev, "could not find MAC address\n");
- goto free_netdev;
+ dev_warn(&pdev->dev, "could not find MAC address property: %ld\n",
+ PTR_ERR(mac_addr));
+ mac_addr = NULL;
}
axienet_set_mac_address(ndev, mac_addr);
@@ -1608,9 +1828,36 @@ static int axienet_probe(struct platform_device *pdev)
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
- ret = axienet_mdio_setup(lp, pdev->dev.of_node);
+ lp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(lp->clk)) {
+ dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
+ PTR_ERR(lp->clk));
+ lp->clk = NULL;
+ } else {
+ ret = clk_prepare_enable(lp->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock: %d\n",
+ ret);
+ goto free_netdev;
+ }
+ }
+
+ ret = axienet_mdio_setup(lp);
if (ret)
- dev_warn(&pdev->dev, "error registering MDIO bus\n");
+ dev_warn(&pdev->dev,
+ "error registering MDIO bus: %d\n", ret);
+ }
+
+ lp->phylink_config.dev = &ndev->dev;
+ lp->phylink_config.type = PHYLINK_NETDEV;
+
+ lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
+ lp->phy_mode,
+ &axienet_phylink_ops);
+ if (IS_ERR(lp->phylink)) {
+ ret = PTR_ERR(lp->phylink);
+ dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
+ goto free_netdev;
}
ret = register_netdev(lp->ndev);
@@ -1632,9 +1879,16 @@ static int axienet_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
- axienet_mdio_teardown(lp);
unregister_netdev(ndev);
+ if (lp->phylink)
+ phylink_destroy(lp->phylink);
+
+ axienet_mdio_teardown(lp);
+
+ if (lp->clk)
+ clk_disable_unprepare(lp->clk);
+
of_node_put(lp->phy_node);
lp->phy_node = NULL;
@@ -1643,9 +1897,23 @@ static int axienet_remove(struct platform_device *pdev)
return 0;
}
+static void axienet_shutdown(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ rtnl_lock();
+ netif_device_detach(ndev);
+
+ if (netif_running(ndev))
+ dev_close(ndev);
+
+ rtnl_unlock();
+}
+
static struct platform_driver axienet_driver = {
.probe = axienet_probe,
.remove = axienet_remove,
+ .shutdown = axienet_shutdown,
.driver = {
.name = "xilinx_axienet",
.of_match_table = axienet_of_match,
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 704babdbc8a2..435ed308d990 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -5,9 +5,11 @@
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/jiffies.h>
@@ -16,10 +18,10 @@
#include "xilinx_axienet.h"
#define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */
-#define DEFAULT_CLOCK_DIVISOR XAE_MDIO_DIV_DFT
+#define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */
/* Wait till MDIO interface is ready to accept a new transaction.*/
-int axienet_mdio_wait_until_ready(struct axienet_local *lp)
+static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
{
u32 val;
@@ -112,23 +114,42 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
}
/**
- * axienet_mdio_setup - MDIO setup function
+ * axienet_mdio_enable - MDIO hardware setup function
* @lp: Pointer to axienet local data structure.
- * @np: Pointer to device node
*
- * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
- * mdiobus_alloc (to allocate memory for mii bus structure) fails.
+ * Return: 0 on success, -ETIMEDOUT on a timeout.
*
* Sets up the MDIO interface by initializing the MDIO clock and enabling the
- * MDIO interface in hardware. Register the MDIO interface.
+ * MDIO interface in hardware.
**/
-int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
+int axienet_mdio_enable(struct axienet_local *lp)
{
- int ret;
u32 clk_div, host_clock;
- struct mii_bus *bus;
- struct resource res;
- struct device_node *np1;
+
+ if (lp->clk) {
+ host_clock = clk_get_rate(lp->clk);
+ } else {
+ struct device_node *np1;
+
+ /* Legacy fallback: detect CPU clock frequency and use as AXI
+ * bus clock frequency. This only works on certain platforms.
+ */
+ np1 = of_find_node_by_name(NULL, "cpu");
+ if (!np1) {
+ netdev_warn(lp->ndev, "Could not find CPU device node.\n");
+ host_clock = DEFAULT_HOST_CLOCK;
+ } else {
+ int ret = of_property_read_u32(np1, "clock-frequency",
+ &host_clock);
+ if (ret) {
+ netdev_warn(lp->ndev, "CPU clock-frequency property not found.\n");
+ host_clock = DEFAULT_HOST_CLOCK;
+ }
+ of_node_put(np1);
+ }
+ netdev_info(lp->ndev, "Setting assumed host clock to %u\n",
+ host_clock);
+ }
/* clk_div can be calculated by deriving it from the equation:
* fMDIO = fHOST / ((1 + clk_div) * 2)
@@ -155,25 +176,6 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
* "clock-frequency" from the CPU
*/
- np1 = of_find_node_by_name(NULL, "cpu");
- if (!np1) {
- netdev_warn(lp->ndev, "Could not find CPU device node.\n");
- netdev_warn(lp->ndev,
- "Setting MDIO clock divisor to default %d\n",
- DEFAULT_CLOCK_DIVISOR);
- clk_div = DEFAULT_CLOCK_DIVISOR;
- goto issue;
- }
- if (of_property_read_u32(np1, "clock-frequency", &host_clock)) {
- netdev_warn(lp->ndev, "clock-frequency property not found.\n");
- netdev_warn(lp->ndev,
- "Setting MDIO clock divisor to default %d\n",
- DEFAULT_CLOCK_DIVISOR);
- clk_div = DEFAULT_CLOCK_DIVISOR;
- of_node_put(np1);
- goto issue;
- }
-
clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
/* If there is any remainder from the division of
* fHOST / (MAX_MDIO_FREQ * 2), then we need to add
@@ -186,12 +188,39 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
"Setting MDIO clock divisor to %u/%u Hz host clock.\n",
clk_div, host_clock);
- of_node_put(np1);
-issue:
- axienet_iow(lp, XAE_MDIO_MC_OFFSET,
- (((u32) clk_div) | XAE_MDIO_MC_MDIOEN_MASK));
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, clk_div | XAE_MDIO_MC_MDIOEN_MASK);
- ret = axienet_mdio_wait_until_ready(lp);
+ return axienet_mdio_wait_until_ready(lp);
+}
+
+/**
+ * axienet_mdio_disable - MDIO hardware disable function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Disable the MDIO interface in hardware.
+ **/
+void axienet_mdio_disable(struct axienet_local *lp)
+{
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, 0);
+}
+
+/**
+ * axienet_mdio_setup - MDIO setup function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
+ * mdiobus_alloc (to allocate memory for mii bus structure) fails.
+ *
+ * Sets up the MDIO interface by initializing the MDIO clock and enabling the
+ * MDIO interface in hardware. Register the MDIO interface.
+ **/
+int axienet_mdio_setup(struct axienet_local *lp)
+{
+ struct device_node *mdio_node;
+ struct mii_bus *bus;
+ int ret;
+
+ ret = axienet_mdio_enable(lp);
if (ret < 0)
return ret;
@@ -199,10 +228,8 @@ issue:
if (!bus)
return -ENOMEM;
- np1 = of_get_parent(lp->phy_node);
- of_address_to_resource(np1, 0, &res);
- snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
- (unsigned long long) res.start);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "axienet-%.8llx",
+ (unsigned long long)lp->regs_start);
bus->priv = lp;
bus->name = "Xilinx Axi Ethernet MDIO";
@@ -211,7 +238,9 @@ issue:
bus->parent = lp->dev;
lp->mii_bus = bus;
- ret = of_mdiobus_register(bus, np1);
+ mdio_node = of_get_child_by_name(lp->dev->of_node, "mdio");
+ ret = of_mdiobus_register(bus, mdio_node);
+ of_node_put(mdio_node);
if (ret) {
mdiobus_free(bus);
lp->mii_bus = NULL;
diff --git a/drivers/net/fddi/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c
index bdd5700e71fa..9c8aa3a95463 100644
--- a/drivers/net/fddi/skfp/drvfbi.c
+++ b/drivers/net/fddi/skfp/drvfbi.c
@@ -20,6 +20,7 @@
#include "h/supern_2.h"
#include "h/skfbiinc.h"
#include <linux/bitrev.h>
+#include <linux/pci_regs.h>
#ifndef lint
static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ;
@@ -127,7 +128,7 @@ static void card_start(struct s_smc *smc)
* at very first before any other initialization functions is
* executed.
*/
- rev_id = inp(PCI_C(PCI_REV_ID)) ;
+ rev_id = inp(PCI_C(PCI_REVISION_ID)) ;
if ((rev_id & 0xf0) == SK_ML_ID_1 || (rev_id & 0xf0) == SK_ML_ID_2) {
smc->hw.hw_is_64bit = TRUE ;
} else {
diff --git a/drivers/net/fddi/skfp/h/skfbi.h b/drivers/net/fddi/skfp/h/skfbi.h
index 89557457b352..480795681719 100644
--- a/drivers/net/fddi/skfp/h/skfbi.h
+++ b/drivers/net/fddi/skfp/h/skfbi.h
@@ -24,49 +24,6 @@
* (ML) = only defined for Monalisa
*/
-/*
- * Configuration Space header
- */
-#define PCI_VENDOR_ID 0x00 /* 16 bit Vendor ID */
-#define PCI_DEVICE_ID 0x02 /* 16 bit Device ID */
-#define PCI_COMMAND 0x04 /* 16 bit Command */
-#define PCI_STATUS 0x06 /* 16 bit Status */
-#define PCI_REV_ID 0x08 /* 8 bit Revision ID */
-#define PCI_CLASS_CODE 0x09 /* 24 bit Class Code */
-#define PCI_CACHE_LSZ 0x0c /* 8 bit Cache Line Size */
-#define PCI_LAT_TIM 0x0d /* 8 bit Latency Timer */
-#define PCI_HEADER_T 0x0e /* 8 bit Header Type */
-#define PCI_BIST 0x0f /* 8 bit Built-in selftest */
-#define PCI_BASE_1ST 0x10 /* 32 bit 1st Base address */
-#define PCI_BASE_2ND 0x14 /* 32 bit 2nd Base address */
-/* Byte 18..2b: Reserved */
-#define PCI_SUB_VID 0x2c /* 16 bit Subsystem Vendor ID */
-#define PCI_SUB_ID 0x2e /* 16 bit Subsystem ID */
-#define PCI_BASE_ROM 0x30 /* 32 bit Expansion ROM Base Address */
-/* Byte 34..33: Reserved */
-#define PCI_CAP_PTR 0x34 /* 8 bit (ML) Capabilities Ptr */
-/* Byte 35..3b: Reserved */
-#define PCI_IRQ_LINE 0x3c /* 8 bit Interrupt Line */
-#define PCI_IRQ_PIN 0x3d /* 8 bit Interrupt Pin */
-#define PCI_MIN_GNT 0x3e /* 8 bit Min_Gnt */
-#define PCI_MAX_LAT 0x3f /* 8 bit Max_Lat */
-/* Device Dependent Region */
-#define PCI_OUR_REG 0x40 /* 32 bit (DV) Our Register */
-#define PCI_OUR_REG_1 0x40 /* 32 bit (ML) Our Register 1 */
-#define PCI_OUR_REG_2 0x44 /* 32 bit (ML) Our Register 2 */
-/* Power Management Region */
-#define PCI_PM_CAP_ID 0x48 /* 8 bit (ML) Power Management Cap. ID */
-#define PCI_PM_NITEM 0x49 /* 8 bit (ML) Next Item Ptr */
-#define PCI_PM_CAP_REG 0x4a /* 16 bit (ML) Power Management Capabilities */
-#define PCI_PM_CTL_STS 0x4c /* 16 bit (ML) Power Manag. Control/Status */
-/* Byte 0x4e: Reserved */
-#define PCI_PM_DAT_REG 0x4f /* 8 bit (ML) Power Manag. Data Register */
-/* VPD Region */
-#define PCI_VPD_CAP_ID 0x50 /* 8 bit (ML) VPD Cap. ID */
-#define PCI_VPD_NITEM 0x51 /* 8 bit (ML) Next Item Ptr */
-#define PCI_VPD_ADR_REG 0x52 /* 16 bit (ML) VPD Address Register */
-#define PCI_VPD_DAT_REG 0x54 /* 32 bit (ML) VPD Data Register */
-/* Byte 58..ff: Reserved */
/*
* I2C Address (PCI Config)
@@ -76,176 +33,10 @@
*/
#define I2C_ADDR_VPD 0xA0 /* I2C address for the VPD EEPROM */
-/*
- * Define Bits and Values of the registers
- */
-/* PCI_VENDOR_ID 16 bit Vendor ID */
-/* PCI_DEVICE_ID 16 bit Device ID */
-/* Values for Vendor ID and Device ID shall be patched into the code */
-/* PCI_COMMAND 16 bit Command */
-#define PCI_FBTEN 0x0200 /* Bit 9: Fast Back-To-Back enable */
-#define PCI_SERREN 0x0100 /* Bit 8: SERR enable */
-#define PCI_ADSTEP 0x0080 /* Bit 7: Address Stepping */
-#define PCI_PERREN 0x0040 /* Bit 6: Parity Report Response enable */
-#define PCI_VGA_SNOOP 0x0020 /* Bit 5: VGA palette snoop */
-#define PCI_MWIEN 0x0010 /* Bit 4: Memory write an inv cycl ena */
-#define PCI_SCYCEN 0x0008 /* Bit 3: Special Cycle enable */
-#define PCI_BMEN 0x0004 /* Bit 2: Bus Master enable */
-#define PCI_MEMEN 0x0002 /* Bit 1: Memory Space Access enable */
-#define PCI_IOEN 0x0001 /* Bit 0: IO Space Access enable */
-
-/* PCI_STATUS 16 bit Status */
-#define PCI_PERR 0x8000 /* Bit 15: Parity Error */
-#define PCI_SERR 0x4000 /* Bit 14: Signaled SERR */
-#define PCI_RMABORT 0x2000 /* Bit 13: Received Master Abort */
-#define PCI_RTABORT 0x1000 /* Bit 12: Received Target Abort */
-#define PCI_STABORT 0x0800 /* Bit 11: Sent Target Abort */
-#define PCI_DEVSEL 0x0600 /* Bit 10..9: DEVSEL Timing */
-#define PCI_DEV_FAST (0<<9) /* fast */
-#define PCI_DEV_MEDIUM (1<<9) /* medium */
-#define PCI_DEV_SLOW (2<<9) /* slow */
-#define PCI_DATAPERR 0x0100 /* Bit 8: DATA Parity error detected */
-#define PCI_FB2BCAP 0x0080 /* Bit 7: Fast Back-to-Back Capability */
-#define PCI_UDF 0x0040 /* Bit 6: User Defined Features */
-#define PCI_66MHZCAP 0x0020 /* Bit 5: 66 MHz PCI bus clock capable */
-#define PCI_NEWCAP 0x0010 /* Bit 4: New cap. list implemented */
-
-#define PCI_ERRBITS (PCI_PERR|PCI_SERR|PCI_RMABORT|PCI_STABORT|PCI_DATAPERR)
-
-/* PCI_REV_ID 8 bit Revision ID */
-/* PCI_CLASS_CODE 24 bit Class Code */
-/* Byte 2: Base Class (02) */
-/* Byte 1: SubClass (02) */
-/* Byte 0: Programming Interface (00) */
-
-/* PCI_CACHE_LSZ 8 bit Cache Line Size */
-/* Possible values: 0,2,4,8,16 */
-
-/* PCI_LAT_TIM 8 bit Latency Timer */
-
-/* PCI_HEADER_T 8 bit Header Type */
-#define PCI_HD_MF_DEV 0x80 /* Bit 7: 0= single, 1= multi-func dev */
-#define PCI_HD_TYPE 0x7f /* Bit 6..0: Header Layout 0= normal */
-
-/* PCI_BIST 8 bit Built-in selftest */
-#define PCI_BIST_CAP 0x80 /* Bit 7: BIST Capable */
-#define PCI_BIST_ST 0x40 /* Bit 6: Start BIST */
-#define PCI_BIST_RET 0x0f /* Bit 3..0: Completion Code */
-
-/* PCI_BASE_1ST 32 bit 1st Base address */
-#define PCI_MEMSIZE 0x800L /* use 2 kB Memory Base */
-#define PCI_MEMBASE_BITS 0xfffff800L /* Bit 31..11: Memory Base Address */
-#define PCI_MEMSIZE_BIIS 0x000007f0L /* Bit 10..4: Memory Size Req. */
-#define PCI_PREFEN 0x00000008L /* Bit 3: Prefetchable */
-#define PCI_MEM_TYP 0x00000006L /* Bit 2..1: Memory Type */
-#define PCI_MEM32BIT (0<<1) /* Base addr anywhere in 32 Bit range */
-#define PCI_MEM1M (1<<1) /* Base addr below 1 MegaByte */
-#define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */
-#define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */
-
-/* PCI_SUB_VID 16 bit Subsystem Vendor ID */
-/* PCI_SUB_ID 16 bit Subsystem ID */
-
-/* PCI_BASE_ROM 32 bit Expansion ROM Base Address */
-#define PCI_ROMBASE 0xfffe0000L /* Bit 31..17: ROM BASE address (1st) */
-#define PCI_ROMBASZ 0x0001c000L /* Bit 16..14: Treat as BASE or SIZE */
-#define PCI_ROMSIZE 0x00003800L /* Bit 13..11: ROM Size Requirements */
-#define PCI_ROMEN 0x00000001L /* Bit 0: Address Decode enable */
-
-/* PCI_CAP_PTR 8 bit New Capabilities Pointers */
-/* PCI_IRQ_LINE 8 bit Interrupt Line */
-/* PCI_IRQ_PIN 8 bit Interrupt Pin */
-/* PCI_MIN_GNT 8 bit Min_Gnt */
-/* PCI_MAX_LAT 8 bit Max_Lat */
-/* Device Dependent Region */
-/* PCI_OUR_REG (DV) 32 bit Our Register */
-/* PCI_OUR_REG_1 (ML) 32 bit Our Register 1 */
- /* Bit 31..29: reserved */
-#define PCI_PATCH_DIR (3L<<27) /*(DV) Bit 28..27: Ext Patchs direction */
-#define PCI_PATCH_DIR_0 (1L<<27) /*(DV) Type of the pins EXT_PATCHS<1..0> */
-#define PCI_PATCH_DIR_1 (1L<<28) /* 0 = input */
- /* 1 = output */
-#define PCI_EXT_PATCHS (3L<<25) /*(DV) Bit 26..25: Extended Patches */
-#define PCI_EXT_PATCH_0 (1L<<25) /*(DV) */
-#define PCI_EXT_PATCH_1 (1L<<26) /* CLK for MicroWire (ML) */
-#define PCI_VIO (1L<<25) /*(ML) */
-#define PCI_EN_BOOT (1L<<24) /* Bit 24: Enable BOOT via ROM */
- /* 1 = Don't boot with ROM */
- /* 0 = Boot with ROM */
-#define PCI_EN_IO (1L<<23) /* Bit 23: Mapping to IO space */
-#define PCI_EN_FPROM (1L<<22) /* Bit 22: FLASH mapped to mem? */
- /* 1 = Map Flash to Memory */
- /* 0 = Disable all addr. decoding */
-#define PCI_PAGESIZE (3L<<20) /* Bit 21..20: FLASH Page Size */
-#define PCI_PAGE_16 (0L<<20) /* 16 k pages */
-#define PCI_PAGE_32K (1L<<20) /* 32 k pages */
-#define PCI_PAGE_64K (2L<<20) /* 64 k pages */
-#define PCI_PAGE_128K (3L<<20) /* 128 k pages */
- /* Bit 19: reserved (ML) and (DV) */
-#define PCI_PAGEREG (7L<<16) /* Bit 18..16: Page Register */
- /* Bit 15: reserved */
-#define PCI_FORCE_BE (1L<<14) /* Bit 14: Assert all BEs on MR */
-#define PCI_DIS_MRL (1L<<13) /* Bit 13: Disable Mem R Line */
-#define PCI_DIS_MRM (1L<<12) /* Bit 12: Disable Mem R multip */
-#define PCI_DIS_MWI (1L<<11) /* Bit 11: Disable Mem W & inv */
-#define PCI_DISC_CLS (1L<<10) /* Bit 10: Disc: cacheLsz bound */
-#define PCI_BURST_DIS (1L<<9) /* Bit 9: Burst Disable */
-#define PCI_BYTE_SWAP (1L<<8) /*(DV) Bit 8: Byte Swap in DATA */
-#define PCI_SKEW_DAS (0xfL<<4) /* Bit 7..4: Skew Ctrl, DAS Ext */
-#define PCI_SKEW_BASE (0xfL<<0) /* Bit 3..0: Skew Ctrl, Base */
-
-/* PCI_OUR_REG_2 (ML) 32 bit Our Register 2 (Monalisa only) */
-#define PCI_VPD_WR_TH (0xffL<<24) /* Bit 24..31 VPD Write Threshold */
-#define PCI_DEV_SEL (0x7fL<<17) /* Bit 17..23 EEPROM Device Select */
-#define PCI_VPD_ROM_SZ (7L<<14) /* Bit 14..16 VPD ROM Size */
- /* Bit 12..13 reserved */
-#define PCI_PATCH_DIR2 (0xfL<<8) /* Bit 8..11 Ext Patchs dir 2..5 */
-#define PCI_PATCH_DIR_2 (1L<<8) /* Bit 8 CS for MicroWire */
-#define PCI_PATCH_DIR_3 (1L<<9)
-#define PCI_PATCH_DIR_4 (1L<<10)
-#define PCI_PATCH_DIR_5 (1L<<11)
-#define PCI_EXT_PATCHS2 (0xfL<<4) /* Bit 4..7 Extended Patches */
-#define PCI_EXT_PATCH_2 (1L<<4) /* Bit 4 CS for MicroWire */
-#define PCI_EXT_PATCH_3 (1L<<5)
-#define PCI_EXT_PATCH_4 (1L<<6)
-#define PCI_EXT_PATCH_5 (1L<<7)
-#define PCI_EN_DUMMY_RD (1L<<3) /* Bit 3 Enable Dummy Read */
-#define PCI_REV_DESC (1L<<2) /* Bit 2 Reverse Desc. Bytes */
-#define PCI_USEADDR64 (1L<<1) /* Bit 1 Use 64 Bit Addresse */
-#define PCI_USEDATA64 (1L<<0) /* Bit 0 Use 64 Bit Data bus ext*/
-
-/* Power Management Region */
-/* PCI_PM_CAP_ID 8 bit (ML) Power Management Cap. ID */
-/* PCI_PM_NITEM 8 bit (ML) Next Item Ptr */
-/* PCI_PM_CAP_REG 16 bit (ML) Power Management Capabilities*/
-#define PCI_PME_SUP (0x1f<<11) /* Bit 11..15 PM Manag. Event Support*/
-#define PCI_PM_D2_SUB (1<<10) /* Bit 10 D2 Support Bit */
-#define PCI_PM_D1_SUB (1<<9) /* Bit 9 D1 Support Bit */
- /* Bit 6..8 reserved */
-#define PCI_PM_DSI (1<<5) /* Bit 5 Device Specific Init.*/
-#define PCI_PM_APS (1<<4) /* Bit 4 Auxialiary Power Src */
-#define PCI_PME_CLOCK (1<<3) /* Bit 3 PM Event Clock */
-#define PCI_PM_VER (7<<0) /* Bit 0..2 PM PCI Spec. version */
-
-/* PCI_PM_CTL_STS 16 bit (ML) Power Manag. Control/Status */
-#define PCI_PME_STATUS (1<<15) /* Bit 15 PFA doesn't sup. PME#*/
-#define PCI_PM_DAT_SCL (3<<13) /* Bit 13..14 dat reg Scaling factor */
-#define PCI_PM_DAT_SEL (0xf<<9) /* Bit 9..12 PM data selector field */
- /* Bit 7.. 2 reserved */
-#define PCI_PM_STATE (3<<0) /* Bit 0.. 1 Power Management State */
-#define PCI_PM_STATE_D0 (0<<0) /* D0: Operational (default) */
-#define PCI_PM_STATE_D1 (1<<0) /* D1: not supported */
-#define PCI_PM_STATE_D2 (2<<0) /* D2: not supported */
-#define PCI_PM_STATE_D3 (3<<0) /* D3: HOT, Power Down and Reset */
-
-/* PCI_PM_DAT_REG 8 bit (ML) Power Manag. Data Register */
-/* VPD Region */
-/* PCI_VPD_CAP_ID 8 bit (ML) VPD Cap. ID */
-/* PCI_VPD_NITEM 8 bit (ML) Next Item Ptr */
-/* PCI_VPD_ADR_REG 16 bit (ML) VPD Address Register */
-#define PCI_VPD_FLAG (1<<15) /* Bit 15 starts VPD rd/wd cycle*/
-
-/* PCI_VPD_DAT_REG 32 bit (ML) VPD Data Register */
+
+#define PCI_ERRBITS (PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_PARITY)
+
+
/*
* Control Register File:
@@ -873,20 +664,6 @@
#define T3_MUX (3<<2) /* Bit 3..2: Mux position */
#define T3_VRAM (3<<0) /* Bit 1..0: Virtual RAM buffer Address */
-/* PCI card IDs */
-/*
- * Note: The following 4 byte definitions shall not be used! Use OEM Concept!
- */
-#define PCI_VEND_ID0 0x48 /* PCI vendor ID (SysKonnect) */
-#define PCI_VEND_ID1 0x11 /* PCI vendor ID (SysKonnect) */
- /* (High byte) */
-#define PCI_DEV_ID0 0x00 /* PCI device ID */
-#define PCI_DEV_ID1 0x40 /* PCI device ID (High byte) */
-
-/*#define PCI_CLASS 0x02*/ /* PCI class code: network device */
-#define PCI_NW_CLASS 0x02 /* PCI class code: network device */
-#define PCI_SUB_CLASS 0x02 /* PCI subclass ID: FDDI device */
-#define PCI_PROG_INTFC 0x00 /* PCI programming Interface (=0) */
/*
* address transmission from logical to physical offset address on board
diff --git a/drivers/net/fjes/fjes_debugfs.c b/drivers/net/fjes/fjes_debugfs.c
index 153fc998f9c1..2c2095e7cf1e 100644
--- a/drivers/net/fjes/fjes_debugfs.c
+++ b/drivers/net/fjes/fjes_debugfs.c
@@ -52,20 +52,11 @@ DEFINE_SHOW_ATTRIBUTE(fjes_dbg_status);
void fjes_dbg_adapter_init(struct fjes_adapter *adapter)
{
const char *name = dev_name(&adapter->plat_dev->dev);
- struct dentry *pfile;
adapter->dbg_adapter = debugfs_create_dir(name, fjes_debug_root);
- if (!adapter->dbg_adapter) {
- dev_err(&adapter->plat_dev->dev,
- "debugfs entry for %s failed\n", name);
- return;
- }
- pfile = debugfs_create_file("status", 0444, adapter->dbg_adapter,
- adapter, &fjes_dbg_status_fops);
- if (!pfile)
- dev_err(&adapter->plat_dev->dev,
- "debugfs status for %s failed\n", name);
+ debugfs_create_file("status", 0444, adapter->dbg_adapter, adapter,
+ &fjes_dbg_status_fops);
}
void fjes_dbg_adapter_exit(struct fjes_adapter *adapter)
@@ -77,8 +68,6 @@ void fjes_dbg_adapter_exit(struct fjes_adapter *adapter)
void fjes_dbg_init(void)
{
fjes_debug_root = debugfs_create_dir(fjes_driver_name, NULL);
- if (!fjes_debug_root)
- pr_info("init of debugfs failed\n");
}
void fjes_dbg_exit(void)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index fc45b749db46..ecfe26215935 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -285,16 +285,29 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
-static void gtp_encap_destroy(struct sock *sk)
+static void __gtp_encap_destroy(struct sock *sk)
{
struct gtp_dev *gtp;
- gtp = rcu_dereference_sk_user_data(sk);
+ lock_sock(sk);
+ gtp = sk->sk_user_data;
if (gtp) {
+ if (gtp->sk0 == sk)
+ gtp->sk0 = NULL;
+ else
+ gtp->sk1u = NULL;
udp_sk(sk)->encap_type = 0;
rcu_assign_sk_user_data(sk, NULL);
sock_put(sk);
}
+ release_sock(sk);
+}
+
+static void gtp_encap_destroy(struct sock *sk)
+{
+ rtnl_lock();
+ __gtp_encap_destroy(sk);
+ rtnl_unlock();
}
static void gtp_encap_disable_sock(struct sock *sk)
@@ -302,7 +315,7 @@ static void gtp_encap_disable_sock(struct sock *sk)
if (!sk)
return;
- gtp_encap_destroy(sk);
+ __gtp_encap_destroy(sk);
}
static void gtp_encap_disable(struct gtp_dev *gtp)
@@ -681,7 +694,6 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
- gtp_encap_disable(gtp);
gtp_hashtable_free(gtp);
list_del_rcu(&gtp->list);
unregister_netdevice_queue(dev, head);
@@ -796,7 +808,8 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
goto out_sock;
}
- if (rcu_dereference_sk_user_data(sock->sk)) {
+ lock_sock(sock->sk);
+ if (sock->sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
goto out_sock;
}
@@ -812,6 +825,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
out_sock:
+ release_sock(sock->sk);
sockfd_put(sock);
return sk;
}
@@ -843,8 +857,13 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
if (data[IFLA_GTP_ROLE]) {
role = nla_get_u32(data[IFLA_GTP_ROLE]);
- if (role > GTP_ROLE_SGSN)
+ if (role > GTP_ROLE_SGSN) {
+ if (sk0)
+ gtp_encap_disable_sock(sk0);
+ if (sk1u)
+ gtp_encap_disable_sock(sk1u);
return -EINVAL;
+ }
}
gtp->sk0 = sk0;
@@ -945,7 +964,7 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
}
- pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
+ pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
if (pctx == NULL)
return -ENOMEM;
@@ -1034,6 +1053,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
+ rtnl_lock();
rcu_read_lock();
gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
@@ -1058,6 +1078,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
out_unlock:
rcu_read_unlock();
+ rtnl_unlock();
return err;
}
@@ -1360,9 +1381,9 @@ late_initcall(gtp_init);
static void __exit gtp_fini(void)
{
- unregister_pernet_subsys(&gtp_net_ops);
genl_unregister_family(&gtp_genl_family);
rtnl_link_unregister(&gtp_link_ops);
+ unregister_pernet_subsys(&gtp_net_ops);
pr_info("GTP module unloaded\n");
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 87d361666cdd..14545a8797a8 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -55,6 +55,13 @@
#include <net/net_namespace.h>
#include <linux/u64_stats_sync.h>
+/* blackhole_netdev - a device used for dsts that are marked expired!
+ * This is global device (instead of per-net-ns) since it's not needed
+ * to be per-ns and gets initialized at boot time.
+ */
+struct net_device *blackhole_netdev;
+EXPORT_SYMBOL(blackhole_netdev);
+
/* The higher levels take care of making this non-reentrant (it's
* called with bh's disabled).
*/
@@ -150,12 +157,14 @@ static const struct net_device_ops loopback_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-/* The loopback device is special. There is only one instance
- * per network namespace.
- */
-static void loopback_setup(struct net_device *dev)
+static void gen_lo_setup(struct net_device *dev,
+ unsigned int mtu,
+ const struct ethtool_ops *eth_ops,
+ const struct header_ops *hdr_ops,
+ const struct net_device_ops *dev_ops,
+ void (*dev_destructor)(struct net_device *dev))
{
- dev->mtu = 64 * 1024;
+ dev->mtu = mtu;
dev->hard_header_len = ETH_HLEN; /* 14 */
dev->min_header_len = ETH_HLEN; /* 14 */
dev->addr_len = ETH_ALEN; /* 6 */
@@ -174,11 +183,20 @@ static void loopback_setup(struct net_device *dev)
| NETIF_F_NETNS_LOCAL
| NETIF_F_VLAN_CHALLENGED
| NETIF_F_LOOPBACK;
- dev->ethtool_ops = &loopback_ethtool_ops;
- dev->header_ops = &eth_header_ops;
- dev->netdev_ops = &loopback_ops;
+ dev->ethtool_ops = eth_ops;
+ dev->header_ops = hdr_ops;
+ dev->netdev_ops = dev_ops;
dev->needs_free_netdev = true;
- dev->priv_destructor = loopback_dev_free;
+ dev->priv_destructor = dev_destructor;
+}
+
+/* The loopback device is special. There is only one instance
+ * per network namespace.
+ */
+static void loopback_setup(struct net_device *dev)
+{
+ gen_lo_setup(dev, (64 * 1024), &loopback_ethtool_ops, &eth_header_ops,
+ &loopback_ops, loopback_dev_free);
}
/* Setup and register the loopback device. */
@@ -213,3 +231,45 @@ out:
struct pernet_operations __net_initdata loopback_net_ops = {
.init = loopback_net_init,
};
+
+/* blackhole netdevice */
+static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ kfree_skb(skb);
+ net_warn_ratelimited("%s(): Dropping skb.\n", __func__);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops blackhole_netdev_ops = {
+ .ndo_start_xmit = blackhole_netdev_xmit,
+};
+
+/* This is a dst-dummy device used specifically for invalidated
+ * DSTs and unlike loopback, this is not per-ns.
+ */
+static void blackhole_netdev_setup(struct net_device *dev)
+{
+ gen_lo_setup(dev, ETH_MIN_MTU, NULL, NULL, &blackhole_netdev_ops, NULL);
+}
+
+/* Setup and register the blackhole_netdev. */
+static int __init blackhole_netdev_init(void)
+{
+ blackhole_netdev = alloc_netdev(0, "blackhole_dev", NET_NAME_UNKNOWN,
+ blackhole_netdev_setup);
+ if (!blackhole_netdev)
+ return -ENOMEM;
+
+ rtnl_lock();
+ dev_init_scheduler(blackhole_netdev);
+ dev_activate(blackhole_netdev);
+ rtnl_unlock();
+
+ blackhole_netdev->flags |= IFF_UP | IFF_RUNNING;
+ dev_net_set(blackhole_netdev, &init_net);
+
+ return 0;
+}
+
+device_initcall(blackhole_netdev_init);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 75aebf65cd09..8f46aa1ddec0 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -865,6 +865,7 @@ static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
{
+ skb->ip_summed = CHECKSUM_NONE;
memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
skb_pull(skb, hdr_len);
pskb_trim_unique(skb, skb->len - icv_len);
@@ -1099,10 +1100,9 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
}
skb = skb_unshare(skb, GFP_ATOMIC);
- if (!skb) {
- *pskb = NULL;
+ *pskb = skb;
+ if (!skb)
return RX_HANDLER_CONSUMED;
- }
pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
if (!pulled_sci) {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 681a882c32cd..940192c057b6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -827,7 +827,7 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct ifreq ifrr;
int err = -EOPNOTSUPP;
- strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+ strscpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
ifrr.ifr_ifru = ifr->ifr_ifru;
switch (cmd) {
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index b509b941d5ca..c5c417a3c0ce 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -38,6 +38,8 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
if (IS_ERR_OR_NULL(nsim_dev->ports_ddir))
return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL;
+ debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
+ &nsim_dev->fw_update_status);
return 0;
}
@@ -220,8 +222,49 @@ static int nsim_dev_reload(struct devlink *devlink,
return 0;
}
+#define NSIM_DEV_FLASH_SIZE 500000
+#define NSIM_DEV_FLASH_CHUNK_SIZE 1000
+#define NSIM_DEV_FLASH_CHUNK_TIME_MS 10
+
+static int nsim_dev_flash_update(struct devlink *devlink, const char *file_name,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ int i;
+
+ if (nsim_dev->fw_update_status) {
+ devlink_flash_update_begin_notify(devlink);
+ devlink_flash_update_status_notify(devlink,
+ "Preparing to flash",
+ component, 0, 0);
+ }
+
+ for (i = 0; i < NSIM_DEV_FLASH_SIZE / NSIM_DEV_FLASH_CHUNK_SIZE; i++) {
+ if (nsim_dev->fw_update_status)
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component,
+ i * NSIM_DEV_FLASH_CHUNK_SIZE,
+ NSIM_DEV_FLASH_SIZE);
+ msleep(NSIM_DEV_FLASH_CHUNK_TIME_MS);
+ }
+
+ if (nsim_dev->fw_update_status) {
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component,
+ NSIM_DEV_FLASH_SIZE,
+ NSIM_DEV_FLASH_SIZE);
+ devlink_flash_update_status_notify(devlink, "Flashing done",
+ component, 0, 0);
+ devlink_flash_update_end_notify(devlink);
+ }
+
+ return 0;
+}
+
static const struct devlink_ops nsim_dev_devlink_ops = {
.reload = nsim_dev_reload,
+ .flash_update = nsim_dev_flash_update,
};
static struct nsim_dev *
@@ -240,6 +283,7 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
INIT_LIST_HEAD(&nsim_dev->port_list);
mutex_init(&nsim_dev->port_list_lock);
+ nsim_dev->fw_update_status = true;
nsim_dev->fib_data = nsim_fib_create();
if (IS_ERR(nsim_dev->fib_data)) {
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index e5c8aa08e1cd..0740940f41b1 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -78,26 +78,6 @@ nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv);
}
-static int
-nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f)
-{
- struct netdevsim *ns = netdev_priv(dev);
-
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb,
- ns, ns, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct netdevsim *ns = netdev_priv(dev);
@@ -223,12 +203,19 @@ static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
return 0;
}
+static LIST_HEAD(nsim_block_cb_list);
+
static int
nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
{
+ struct netdevsim *ns = netdev_priv(dev);
+
switch (type) {
case TC_SETUP_BLOCK:
- return nsim_setup_tc_block(dev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &nsim_block_cb_list,
+ nsim_setup_tc_block_cb,
+ ns, ns, true);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 3f398797c2bc..79c05af2a7c0 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -157,6 +157,7 @@ struct nsim_dev {
struct netdev_phys_item_id switch_id;
struct list_head port_list;
struct mutex port_list_lock; /* protects port list */
+ bool fw_update_status;
};
int nsim_dev_init(void);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 1d406c6df790..20f14c5fbb7e 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -416,6 +416,12 @@ config NATIONAL_PHY
---help---
Currently supports the DP83865 PHY.
+config NXP_TJA11XX_PHY
+ tristate "NXP TJA11xx PHYs support"
+ depends on HWMON
+ ---help---
+ Currently supports the NXP TJA1100 and TJA1101 PHY.
+
config QSEMI_PHY
tristate "Quality Semiconductor PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 5b5c8669499e..839acb292c38 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -82,6 +82,7 @@ obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
+obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 0fedd28fdb6e..3b29d381116f 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -27,6 +27,7 @@
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI 2
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII 3
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII 6
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII 10
@@ -360,6 +361,9 @@ static int aqr107_read_status(struct phy_device *phydev)
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
phydev->interface = PHY_INTERFACE_MODE_10GKR;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
+ phydev->interface = PHY_INTERFACE_MODE_USXGMII;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
phydev->interface = PHY_INTERFACE_MODE_SGMII;
break;
@@ -488,9 +492,13 @@ static int aqr107_config_init(struct phy_device *phydev)
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
phydev->interface != PHY_INTERFACE_MODE_10GKR)
return -ENODEV;
+ WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
+ "Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n");
+
ret = aqr107_wait_reset_complete(phydev);
if (!ret)
aqr107_chip_info(phydev);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index f0c0eefe2202..f6dce6850850 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -81,22 +81,18 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
}
#endif /* CONFIG_OF_MDIO */
-static int bcm87xx_config_init(struct phy_device *phydev)
+static int bcm87xx_get_features(struct phy_device *phydev)
{
- linkmode_zero(phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
phydev->supported);
- linkmode_zero(phydev->advertising);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
- phydev->advertising);
- phydev->state = PHY_NOLINK;
- phydev->autoneg = AUTONEG_DISABLE;
-
- bcm87xx_of_reg_init(phydev);
-
return 0;
}
+static int bcm87xx_config_init(struct phy_device *phydev)
+{
+ return bcm87xx_of_reg_init(phydev);
+}
+
static int bcm87xx_config_aneg(struct phy_device *phydev)
{
return -EINVAL;
@@ -194,7 +190,7 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8706,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8706",
- .features = PHY_10GBIT_FEC_FEATURES,
+ .get_features = bcm87xx_get_features,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
@@ -206,7 +202,7 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8727",
- .features = PHY_10GBIT_FEC_FEATURES,
+ .get_features = bcm87xx_get_features,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 67fa05d67523..937d0059e8ac 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -663,6 +663,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
}, {
.phy_id = PHY_ID_BCM5481,
.phy_id_mask = 0xfffffff0,
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c71c7d0f53f0..1f1ecee0ee2f 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -34,6 +34,7 @@
#define DP83867_RGMIICTL 0x0032
#define DP83867_STRAP_STS1 0x006E
+#define DP83867_STRAP_STS2 0x006f
#define DP83867_RGMIIDCTL 0x0086
#define DP83867_IO_MUX_CFG 0x0170
#define DP83867_10M_SGMII_CFG 0x016F
@@ -63,19 +64,30 @@
/* STRAP_STS1 bits */
#define DP83867_STRAP_STS1_RESERVED BIT(11)
+/* STRAP_STS2 bits */
+#define DP83867_STRAP_STS2_CLK_SKEW_TX_MASK GENMASK(6, 4)
+#define DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT 4
+#define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0)
+#define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0
+#define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
+
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
-#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
+#define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03
+#define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14)
#define DP83867_PHYCR_RESERVED_MASK BIT(11)
/* RGMIIDCTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
+#define DP83867_RGMII_RX_CLK_DELAY_MAX 0xf
+#define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0
/* IO_MUX_CFG bits */
-#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL 0x1f
-
+#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN 0x1f
+#define DP83867_IO_MUX_CFG_CLK_O_DISABLE BIT(6)
#define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
#define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
@@ -89,13 +101,14 @@ enum {
};
struct dp83867_private {
- int rx_id_delay;
- int tx_id_delay;
- int fifo_depth;
+ u32 rx_id_delay;
+ u32 tx_id_delay;
+ u32 fifo_depth;
int io_impedance;
int port_mirroring;
bool rxctrl_strap_quirk;
- int clk_output_sel;
+ bool set_clk_output;
+ u32 clk_output_sel;
};
static int dp83867_ack_interrupt(struct phy_device *phydev)
@@ -157,38 +170,83 @@ static int dp83867_of_init(struct phy_device *phydev)
if (!of_node)
return -ENODEV;
- dp83867->io_impedance = -EINVAL;
-
/* Optional configuration */
ret = of_property_read_u32(of_node, "ti,clk-output-sel",
&dp83867->clk_output_sel);
- if (ret || dp83867->clk_output_sel > DP83867_CLK_O_SEL_REF_CLK)
- /* Keep the default value if ti,clk-output-sel is not set
- * or too high
+ /* If not set, keep default */
+ if (!ret) {
+ dp83867->set_clk_output = true;
+ /* Valid values are 0 to DP83867_CLK_O_SEL_REF_CLK or
+ * DP83867_CLK_O_SEL_OFF.
*/
- dp83867->clk_output_sel = DP83867_CLK_O_SEL_REF_CLK;
+ if (dp83867->clk_output_sel > DP83867_CLK_O_SEL_REF_CLK &&
+ dp83867->clk_output_sel != DP83867_CLK_O_SEL_OFF) {
+ phydev_err(phydev, "ti,clk-output-sel value %u out of range\n",
+ dp83867->clk_output_sel);
+ return -EINVAL;
+ }
+ }
if (of_property_read_bool(of_node, "ti,max-output-impedance"))
dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX;
else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN;
+ else
+ dp83867->io_impedance = -1; /* leave at default */
dp83867->rxctrl_strap_quirk = of_property_read_bool(of_node,
"ti,dp83867-rxctrl-strap-quirk");
- ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
- &dp83867->rx_id_delay);
- if (ret &&
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
- return ret;
+ /* Existing behavior was to use default pin strapping delay in rgmii
+ * mode, but rgmii should have meant no delay. Warn existing users.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
+ const u16 val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2);
+ const u16 txskew = (val & DP83867_STRAP_STS2_CLK_SKEW_TX_MASK) >>
+ DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT;
+ const u16 rxskew = (val & DP83867_STRAP_STS2_CLK_SKEW_RX_MASK) >>
+ DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT;
+
+ if (txskew != DP83867_STRAP_STS2_CLK_SKEW_NONE ||
+ rxskew != DP83867_STRAP_STS2_CLK_SKEW_NONE)
+ phydev_warn(phydev,
+ "PHY has delays via pin strapping, but phy-mode = 'rgmii'\n"
+ "Should be 'rgmii-id' to use internal delays\n");
+ }
- ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
- &dp83867->tx_id_delay);
- if (ret &&
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
- return ret;
+ /* RX delay *must* be specified if internal delay of RX is used. */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
+ &dp83867->rx_id_delay);
+ if (ret) {
+ phydev_err(phydev, "ti,rx-internal-delay must be specified\n");
+ return ret;
+ }
+ if (dp83867->rx_id_delay > DP83867_RGMII_RX_CLK_DELAY_MAX) {
+ phydev_err(phydev,
+ "ti,rx-internal-delay value of %u out of range\n",
+ dp83867->rx_id_delay);
+ return -EINVAL;
+ }
+ }
+
+ /* TX delay *must* be specified if internal delay of RX is used. */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
+ &dp83867->tx_id_delay);
+ if (ret) {
+ phydev_err(phydev, "ti,tx-internal-delay must be specified\n");
+ return ret;
+ }
+ if (dp83867->tx_id_delay > DP83867_RGMII_TX_CLK_DELAY_MAX) {
+ phydev_err(phydev,
+ "ti,tx-internal-delay value of %u out of range\n",
+ dp83867->tx_id_delay);
+ return -EINVAL;
+ }
+ }
if (of_property_read_bool(of_node, "enet-phy-lane-swap"))
dp83867->port_mirroring = DP83867_PORT_MIRROING_EN;
@@ -196,8 +254,20 @@ static int dp83867_of_init(struct phy_device *phydev)
if (of_property_read_bool(of_node, "enet-phy-lane-no-swap"))
dp83867->port_mirroring = DP83867_PORT_MIRROING_DIS;
- return of_property_read_u32(of_node, "ti,fifo-depth",
+ ret = of_property_read_u32(of_node, "ti,fifo-depth",
&dp83867->fifo_depth);
+ if (ret) {
+ phydev_err(phydev,
+ "ti,fifo-depth property is required\n");
+ return ret;
+ }
+ if (dp83867->fifo_depth > DP83867_PHYCR_FIFO_DEPTH_MAX) {
+ phydev_err(phydev,
+ "ti,fifo-depth value %u out of range\n",
+ dp83867->fifo_depth);
+ return -EINVAL;
+ }
+ return 0;
}
#else
static int dp83867_of_init(struct phy_device *phydev)
@@ -206,25 +276,29 @@ static int dp83867_of_init(struct phy_device *phydev)
}
#endif /* CONFIG_OF_MDIO */
-static int dp83867_config_init(struct phy_device *phydev)
+static int dp83867_probe(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
+
+ dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
+ GFP_KERNEL);
+ if (!dp83867)
+ return -ENOMEM;
+
+ phydev->priv = dp83867;
+
+ return 0;
+}
+
+static int dp83867_config_init(struct phy_device *phydev)
+{
+ struct dp83867_private *dp83867 = phydev->priv;
int ret, val, bs;
u16 delay;
- if (!phydev->priv) {
- dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
- GFP_KERNEL);
- if (!dp83867)
- return -ENOMEM;
-
- phydev->priv = dp83867;
- ret = dp83867_of_init(phydev);
- if (ret)
- return ret;
- } else {
- dp83867 = (struct dp83867_private *)phydev->priv;
- }
+ ret = dp83867_of_init(phydev);
+ if (ret)
+ return ret;
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
if (dp83867->rxctrl_strap_quirk)
@@ -256,9 +330,16 @@ static int dp83867_config_init(struct phy_device *phydev)
if (ret)
return ret;
- /* Set up RGMII delays */
+ /* If rgmii mode with no internal delay is selected, we do NOT use
+ * aligned mode as one might expect. Instead we use the PHY's default
+ * based on pin strapping. And the "mode 0" default is to *use*
+ * internal delay with a value of 7 (2.00 ns).
+ *
+ * Set up RGMII delays
+ */
val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
+ val &= ~(DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
@@ -275,14 +356,14 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
delay);
-
- if (dp83867->io_impedance >= 0)
- phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
- DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
- dp83867->io_impedance &
- DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
}
+ /* If specified, set io impedance */
+ if (dp83867->io_impedance >= 0)
+ phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
+ DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK,
+ dp83867->io_impedance);
+
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
/* For support SPEED_10 in SGMII mode
* DP83867_10M_SGMII_RATE_ADAPT bit
@@ -321,11 +402,20 @@ static int dp83867_config_init(struct phy_device *phydev)
dp83867_config_port_mirroring(phydev);
/* Clock output selection if muxing property is set */
- if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK)
+ if (dp83867->set_clk_output) {
+ u16 mask = DP83867_IO_MUX_CFG_CLK_O_DISABLE;
+
+ if (dp83867->clk_output_sel == DP83867_CLK_O_SEL_OFF) {
+ val = DP83867_IO_MUX_CFG_CLK_O_DISABLE;
+ } else {
+ mask |= DP83867_IO_MUX_CFG_CLK_O_SEL_MASK;
+ val = dp83867->clk_output_sel <<
+ DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT;
+ }
+
phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
- DP83867_IO_MUX_CFG_CLK_O_SEL_MASK,
- dp83867->clk_output_sel <<
- DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT);
+ mask, val);
+ }
return 0;
}
@@ -350,6 +440,7 @@ static struct phy_driver dp83867_driver[] = {
.name = "TI DP83867",
/* PHY_GBIT_FEATURES */
+ .probe = dp83867_probe,
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 314486288119..356bd6472f49 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -262,6 +262,8 @@ static struct phy_driver lxt97x_driver[] = {
/* PHY_BASIC_FEATURES */
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
}, {
.phy_id = 0x00137a10,
.name = "LXT973-A2",
@@ -271,6 +273,8 @@ static struct phy_driver lxt97x_driver[] = {
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
.read_status = lxt973a2_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
}, {
.phy_id = 0x00137a10,
.name = "LXT973",
@@ -279,6 +283,8 @@ static struct phy_driver lxt97x_driver[] = {
.flags = 0,
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
} };
module_phy_driver(lxt97x_driver);
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
new file mode 100644
index 000000000000..b705d0bd798b
--- /dev/null
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0
+/* NXP TJA1100 BroadRReach PHY driver
+ *
+ * Copyright (C) 2018 Marek Vasut <marex@denx.de>
+ */
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/hwmon.h>
+#include <linux/bitfield.h>
+
+#define PHY_ID_MASK 0xfffffff0
+#define PHY_ID_TJA1100 0x0180dc40
+#define PHY_ID_TJA1101 0x0180dd00
+
+#define MII_ECTRL 17
+#define MII_ECTRL_LINK_CONTROL BIT(15)
+#define MII_ECTRL_POWER_MODE_MASK GENMASK(14, 11)
+#define MII_ECTRL_POWER_MODE_NO_CHANGE (0x0 << 11)
+#define MII_ECTRL_POWER_MODE_NORMAL (0x3 << 11)
+#define MII_ECTRL_POWER_MODE_STANDBY (0xc << 11)
+#define MII_ECTRL_CONFIG_EN BIT(2)
+#define MII_ECTRL_WAKE_REQUEST BIT(0)
+
+#define MII_CFG1 18
+#define MII_CFG1_AUTO_OP BIT(14)
+#define MII_CFG1_SLEEP_CONFIRM BIT(6)
+#define MII_CFG1_LED_MODE_MASK GENMASK(5, 4)
+#define MII_CFG1_LED_MODE_LINKUP 0
+#define MII_CFG1_LED_ENABLE BIT(3)
+
+#define MII_CFG2 19
+#define MII_CFG2_SLEEP_REQUEST_TO GENMASK(1, 0)
+#define MII_CFG2_SLEEP_REQUEST_TO_16MS 0x3
+
+#define MII_INTSRC 21
+#define MII_INTSRC_TEMP_ERR BIT(1)
+#define MII_INTSRC_UV_ERR BIT(3)
+
+#define MII_COMMSTAT 23
+#define MII_COMMSTAT_LINK_UP BIT(15)
+
+#define MII_GENSTAT 24
+#define MII_GENSTAT_PLL_LOCKED BIT(14)
+
+#define MII_COMMCFG 27
+#define MII_COMMCFG_AUTO_OP BIT(15)
+
+struct tja11xx_priv {
+ char *hwmon_name;
+ struct device *hwmon_dev;
+};
+
+struct tja11xx_phy_stats {
+ const char *string;
+ u8 reg;
+ u8 off;
+ u16 mask;
+};
+
+static struct tja11xx_phy_stats tja11xx_hw_stats[] = {
+ { "phy_symbol_error_count", 20, 0, GENMASK(15, 0) },
+ { "phy_polarity_detect", 25, 6, BIT(6) },
+ { "phy_open_detect", 25, 7, BIT(7) },
+ { "phy_short_detect", 25, 8, BIT(8) },
+ { "phy_rem_rcvr_count", 26, 0, GENMASK(7, 0) },
+ { "phy_loc_rcvr_count", 26, 8, GENMASK(15, 8) },
+};
+
+static int tja11xx_check(struct phy_device *phydev, u8 reg, u16 mask, u16 set)
+{
+ int i, ret;
+
+ for (i = 0; i < 200; i++) {
+ ret = phy_read(phydev, reg);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & mask) == set)
+ return 0;
+
+ usleep_range(100, 150);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int phy_modify_check(struct phy_device *phydev, u8 reg,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ ret = phy_modify(phydev, reg, mask, set);
+ if (ret)
+ return ret;
+
+ return tja11xx_check(phydev, reg, mask, set);
+}
+
+static int tja11xx_enable_reg_write(struct phy_device *phydev)
+{
+ return phy_set_bits(phydev, MII_ECTRL, MII_ECTRL_CONFIG_EN);
+}
+
+static int tja11xx_enable_link_control(struct phy_device *phydev)
+{
+ return phy_set_bits(phydev, MII_ECTRL, MII_ECTRL_LINK_CONTROL);
+}
+
+static int tja11xx_wakeup(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_ECTRL);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & MII_ECTRL_POWER_MODE_MASK) {
+ case MII_ECTRL_POWER_MODE_NO_CHANGE:
+ break;
+ case MII_ECTRL_POWER_MODE_NORMAL:
+ ret = phy_set_bits(phydev, MII_ECTRL, MII_ECTRL_WAKE_REQUEST);
+ if (ret)
+ return ret;
+
+ ret = phy_clear_bits(phydev, MII_ECTRL, MII_ECTRL_WAKE_REQUEST);
+ if (ret)
+ return ret;
+ break;
+ case MII_ECTRL_POWER_MODE_STANDBY:
+ ret = phy_modify_check(phydev, MII_ECTRL,
+ MII_ECTRL_POWER_MODE_MASK,
+ MII_ECTRL_POWER_MODE_STANDBY);
+ if (ret)
+ return ret;
+
+ ret = phy_modify(phydev, MII_ECTRL, MII_ECTRL_POWER_MODE_MASK,
+ MII_ECTRL_POWER_MODE_NORMAL);
+ if (ret)
+ return ret;
+
+ ret = phy_modify_check(phydev, MII_GENSTAT,
+ MII_GENSTAT_PLL_LOCKED,
+ MII_GENSTAT_PLL_LOCKED);
+ if (ret)
+ return ret;
+
+ return tja11xx_enable_link_control(phydev);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int tja11xx_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = tja11xx_enable_reg_write(phydev);
+ if (ret)
+ return ret;
+
+ return genphy_soft_reset(phydev);
+}
+
+static int tja11xx_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = tja11xx_enable_reg_write(phydev);
+ if (ret)
+ return ret;
+
+ phydev->autoneg = AUTONEG_DISABLE;
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+
+ switch (phydev->phy_id & PHY_ID_MASK) {
+ case PHY_ID_TJA1100:
+ ret = phy_modify(phydev, MII_CFG1,
+ MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_MASK |
+ MII_CFG1_LED_ENABLE,
+ MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_LINKUP |
+ MII_CFG1_LED_ENABLE);
+ if (ret)
+ return ret;
+ break;
+ case PHY_ID_TJA1101:
+ ret = phy_set_bits(phydev, MII_COMMCFG, MII_COMMCFG_AUTO_OP);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_clear_bits(phydev, MII_CFG1, MII_CFG1_SLEEP_CONFIRM);
+ if (ret)
+ return ret;
+
+ ret = phy_modify(phydev, MII_CFG2, MII_CFG2_SLEEP_REQUEST_TO,
+ MII_CFG2_SLEEP_REQUEST_TO_16MS);
+ if (ret)
+ return ret;
+
+ ret = tja11xx_wakeup(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* ACK interrupts by reading the status register */
+ ret = phy_read(phydev, MII_INTSRC);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tja11xx_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ if (phydev->link) {
+ ret = phy_read(phydev, MII_COMMSTAT);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MII_COMMSTAT_LINK_UP))
+ phydev->link = 0;
+ }
+
+ return 0;
+}
+
+static int tja11xx_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(tja11xx_hw_stats);
+}
+
+static void tja11xx_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tja11xx_hw_stats); i++) {
+ strncpy(data + i * ETH_GSTRING_LEN,
+ tja11xx_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static void tja11xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(tja11xx_hw_stats); i++) {
+ ret = phy_read(phydev, tja11xx_hw_stats[i].reg);
+ if (ret < 0)
+ data[i] = U64_MAX;
+ else {
+ data[i] = ret & tja11xx_hw_stats[i].mask;
+ data[i] >>= tja11xx_hw_stats[i].off;
+ }
+ }
+}
+
+static int tja11xx_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int ret;
+
+ if (type == hwmon_in && attr == hwmon_in_lcrit_alarm) {
+ ret = phy_read(phydev, MII_INTSRC);
+ if (ret < 0)
+ return ret;
+
+ *value = !!(ret & MII_INTSRC_TEMP_ERR);
+ return 0;
+ }
+
+ if (type == hwmon_temp && attr == hwmon_temp_crit_alarm) {
+ ret = phy_read(phydev, MII_INTSRC);
+ if (ret < 0)
+ return ret;
+
+ *value = !!(ret & MII_INTSRC_UV_ERR);
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t tja11xx_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_in && attr == hwmon_in_lcrit_alarm)
+ return 0444;
+
+ if (type == hwmon_temp && attr == hwmon_temp_crit_alarm)
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *tja11xx_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(in, HWMON_I_LCRIT_ALARM),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops tja11xx_hwmon_hwmon_ops = {
+ .is_visible = tja11xx_hwmon_is_visible,
+ .read = tja11xx_hwmon_read,
+};
+
+static const struct hwmon_chip_info tja11xx_hwmon_chip_info = {
+ .ops = &tja11xx_hwmon_hwmon_ops,
+ .info = tja11xx_hwmon_info,
+};
+
+static int tja11xx_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct tja11xx_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!priv->hwmon_name)
+ return -ENOMEM;
+
+ for (i = 0; priv->hwmon_name[i]; i++)
+ if (hwmon_is_bad_char(priv->hwmon_name[i]))
+ priv->hwmon_name[i] = '_';
+
+ priv->hwmon_dev =
+ devm_hwmon_device_register_with_info(dev, priv->hwmon_name,
+ phydev,
+ &tja11xx_hwmon_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(priv->hwmon_dev);
+}
+
+static struct phy_driver tja11xx_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA1100),
+ .name = "NXP TJA1100",
+ .features = PHY_BASIC_T1_FEATURES,
+ .probe = tja11xx_probe,
+ .soft_reset = tja11xx_soft_reset,
+ .config_init = tja11xx_config_init,
+ .read_status = tja11xx_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .set_loopback = genphy_loopback,
+ /* Statistics */
+ .get_sset_count = tja11xx_get_sset_count,
+ .get_strings = tja11xx_get_strings,
+ .get_stats = tja11xx_get_stats,
+ }, {
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA1101),
+ .name = "NXP TJA1101",
+ .features = PHY_BASIC_T1_FEATURES,
+ .probe = tja11xx_probe,
+ .soft_reset = tja11xx_soft_reset,
+ .config_init = tja11xx_config_init,
+ .read_status = tja11xx_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .set_loopback = genphy_loopback,
+ /* Statistics */
+ .get_sset_count = tja11xx_get_sset_count,
+ .get_strings = tja11xx_get_strings,
+ .get_stats = tja11xx_get_stats,
+ }
+};
+
+module_phy_driver(tja11xx_driver);
+
+static struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
+ { PHY_ID_MATCH_MODEL(PHY_ID_TJA1100) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_TJA1101) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, tja11xx_tbl);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("NXP TJA11xx BoardR-Reach PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 3daf0214a242..16667fbac8bf 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -8,7 +8,7 @@
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 67,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 69,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -131,9 +131,11 @@ static const struct phy_setting settings[] = {
PHY_SETTING( 1000, FULL, 1000baseKX_Full ),
PHY_SETTING( 1000, FULL, 1000baseT_Full ),
PHY_SETTING( 1000, HALF, 1000baseT_Half ),
+ PHY_SETTING( 1000, FULL, 1000baseT1_Full ),
PHY_SETTING( 1000, FULL, 1000baseX_Full ),
/* 100M */
PHY_SETTING( 100, FULL, 100baseT_Full ),
+ PHY_SETTING( 100, FULL, 100baseT1_Full ),
PHY_SETTING( 100, HALF, 100baseT_Half ),
/* 10M */
PHY_SETTING( 10, FULL, 10baseT_Full ),
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e8885429293a..ef7aa738e0dc 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -29,6 +29,8 @@
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#define PHY_STATE_TIME HZ
+
#define PHY_STATE_STR(_state) \
case PHY_##_state: \
return __stringify(_state); \
@@ -41,7 +43,6 @@ static const char *phy_state_to_str(enum phy_state st)
PHY_STATE_STR(UP)
PHY_STATE_STR(RUNNING)
PHY_STATE_STR(NOLINK)
- PHY_STATE_STR(FORCING)
PHY_STATE_STR(HALTED)
}
@@ -297,12 +298,8 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
linkmode_copy(phydev->advertising, advertising);
- if (AUTONEG_ENABLE == cmd->autoneg)
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- phydev->advertising);
- else
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- phydev->advertising);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising, AUTONEG_ENABLE == cmd->autoneg);
phydev->duplex = cmd->duplex;
@@ -352,12 +349,8 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
linkmode_copy(phydev->advertising, advertising);
- if (autoneg == AUTONEG_ENABLE)
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- phydev->advertising);
- else
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- phydev->advertising);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising, autoneg == AUTONEG_ENABLE);
phydev->duplex = duplex;
@@ -407,6 +400,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
struct mii_ioctl_data *mii_data = if_mii(ifr);
u16 val = mii_data->val_in;
bool change_autoneg = false;
+ int prtad, devad;
switch (cmd) {
case SIOCGMIIPHY:
@@ -414,14 +408,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
/* fall through */
case SIOCGMIIREG:
- mii_data->val_out = mdiobus_read(phydev->mdio.bus,
- mii_data->phy_id,
- mii_data->reg_num);
+ if (mdio_phy_id_is_c45(mii_data->phy_id)) {
+ prtad = mdio_phy_id_prtad(mii_data->phy_id);
+ devad = mdio_phy_id_devad(mii_data->phy_id);
+ devad = MII_ADDR_C45 | devad << 16 | mii_data->reg_num;
+ } else {
+ prtad = mii_data->phy_id;
+ devad = mii_data->reg_num;
+ }
+ mii_data->val_out = mdiobus_read(phydev->mdio.bus, prtad,
+ devad);
return 0;
case SIOCSMIIREG:
- if (mii_data->phy_id == phydev->mdio.addr) {
- switch (mii_data->reg_num) {
+ if (mdio_phy_id_is_c45(mii_data->phy_id)) {
+ prtad = mdio_phy_id_prtad(mii_data->phy_id);
+ devad = mdio_phy_id_devad(mii_data->phy_id);
+ devad = MII_ADDR_C45 | devad << 16 | mii_data->reg_num;
+ } else {
+ prtad = mii_data->phy_id;
+ devad = mii_data->reg_num;
+ }
+ if (prtad == phydev->mdio.addr) {
+ switch (devad) {
case MII_BMCR:
if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
if (phydev->autoneg == AUTONEG_ENABLE)
@@ -454,11 +463,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
}
- mdiobus_write(phydev->mdio.bus, mii_data->phy_id,
- mii_data->reg_num, val);
+ mdiobus_write(phydev->mdio.bus, prtad, devad, val);
- if (mii_data->phy_id == phydev->mdio.addr &&
- mii_data->reg_num == MII_BMCR &&
+ if (prtad == phydev->mdio.addr &&
+ devad == MII_BMCR &&
val & BMCR_RESET)
return phy_init_hw(phydev);
@@ -478,12 +486,12 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_mii_ioctl);
-static void phy_queue_state_machine(struct phy_device *phydev,
- unsigned int secs)
+void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
{
mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
- secs * HZ);
+ jiffies);
}
+EXPORT_SYMBOL(phy_queue_state_machine);
static void phy_trigger_machine(struct phy_device *phydev)
{
@@ -560,15 +568,8 @@ int phy_start_aneg(struct phy_device *phydev)
if (err < 0)
goto out_unlock;
- if (phy_is_started(phydev)) {
- if (phydev->autoneg == AUTONEG_ENABLE) {
- err = phy_check_link_status(phydev);
- } else {
- phydev->state = PHY_FORCING;
- phydev->link_timeout = PHY_FORCE_TIMEOUT;
- }
- }
-
+ if (phy_is_started(phydev))
+ err = phy_check_link_status(phydev);
out_unlock:
mutex_unlock(&phydev->lock);
@@ -772,8 +773,13 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
return IRQ_NONE;
- /* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev);
+ if (phydev->drv->handle_interrupt) {
+ if (phydev->drv->handle_interrupt(phydev))
+ goto phy_err;
+ } else {
+ /* reschedule state queue work to run as soon as possible */
+ phy_trigger_machine(phydev);
+ }
if (phy_clear_interrupt(phydev))
goto phy_err;
@@ -799,10 +805,10 @@ static int phy_enable_interrupts(struct phy_device *phydev)
}
/**
- * phy_request_interrupt - request interrupt for a PHY device
+ * phy_request_interrupt - request and enable interrupt for a PHY device
* @phydev: target phy_device struct
*
- * Description: Request the interrupt for the given PHY.
+ * Description: Request and enable the interrupt for the given PHY.
* If this fails, then we set irq to PHY_POLL.
* This should only be called with a valid IRQ number.
*/
@@ -817,11 +823,31 @@ void phy_request_interrupt(struct phy_device *phydev)
phydev_warn(phydev, "Error %d requesting IRQ %d, falling back to polling\n",
err, phydev->irq);
phydev->irq = PHY_POLL;
+ } else {
+ if (phy_enable_interrupts(phydev)) {
+ phydev_warn(phydev, "Can't enable interrupt, falling back to polling\n");
+ phy_free_interrupt(phydev);
+ phydev->irq = PHY_POLL;
+ }
}
}
EXPORT_SYMBOL(phy_request_interrupt);
/**
+ * phy_free_interrupt - disable and free interrupt for a PHY device
+ * @phydev: target phy_device struct
+ *
+ * Description: Disable and free the interrupt for the given PHY.
+ * This should only be called with a valid IRQ number.
+ */
+void phy_free_interrupt(struct phy_device *phydev)
+{
+ phy_disable_interrupts(phydev);
+ free_irq(phydev->irq, phydev);
+}
+EXPORT_SYMBOL(phy_free_interrupt);
+
+/**
* phy_stop - Bring down the PHY link, and stop checking the status
* @phydev: target phy_device struct
*/
@@ -835,9 +861,6 @@ void phy_stop(struct phy_device *phydev)
mutex_lock(&phydev->lock);
- if (phy_interrupt_is_valid(phydev))
- phy_disable_interrupts(phydev);
-
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
@@ -864,8 +887,6 @@ EXPORT_SYMBOL(phy_stop);
*/
void phy_start(struct phy_device *phydev)
{
- int err;
-
mutex_lock(&phydev->lock);
if (phydev->state != PHY_READY && phydev->state != PHY_HALTED) {
@@ -877,13 +898,6 @@ void phy_start(struct phy_device *phydev)
/* if phy was suspended, bring the physical link up again */
__phy_resume(phydev);
- /* make sure interrupts are enabled for the PHY */
- if (phy_interrupt_is_valid(phydev)) {
- err = phy_enable_interrupts(phydev);
- if (err < 0)
- goto out;
- }
-
phydev->state = PHY_UP;
phy_start_machine(phydev);
@@ -921,20 +935,6 @@ void phy_state_machine(struct work_struct *work)
case PHY_RUNNING:
err = phy_check_link_status(phydev);
break;
- case PHY_FORCING:
- err = genphy_update_link(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- if (0 == phydev->link_timeout--)
- needs_aneg = true;
- phy_link_down(phydev, false);
- }
- break;
case PHY_HALTED:
if (phydev->link) {
phydev->link = 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index dcc93a873174..53878908adf4 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(phy_10_100_features_array);
const int phy_basic_t1_features_array[2] = {
ETHTOOL_LINK_MODE_TP_BIT,
- ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
};
EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
@@ -948,6 +948,9 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
{
int rc;
+ if (!dev)
+ return -EINVAL;
+
rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
if (rc)
return rc;
@@ -1013,7 +1016,7 @@ void phy_disconnect(struct phy_device *phydev)
phy_stop(phydev);
if (phy_interrupt_is_valid(phydev))
- free_irq(phydev->irq, phydev);
+ phy_free_interrupt(phydev);
phydev->adjust_link = NULL;
@@ -1133,6 +1136,44 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
}
EXPORT_SYMBOL(phy_attached_print);
+static void phy_sysfs_create_links(struct phy_device *phydev)
+{
+ struct net_device *dev = phydev->attached_dev;
+ int err;
+
+ if (!dev)
+ return;
+
+ err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
+ "attached_dev");
+ if (err)
+ return;
+
+ err = sysfs_create_link_nowarn(&dev->dev.kobj,
+ &phydev->mdio.dev.kobj,
+ "phydev");
+ if (err) {
+ dev_err(&dev->dev, "could not add device link to %s err %d\n",
+ kobject_name(&phydev->mdio.dev.kobj),
+ err);
+ /* non-fatal - some net drivers can use one netdevice
+ * with more then one phy
+ */
+ }
+
+ phydev->sysfs_links = true;
+}
+
+static ssize_t
+phy_standalone_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "%d\n", !phydev->attached_dev);
+}
+static DEVICE_ATTR_RO(phy_standalone);
+
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
@@ -1151,9 +1192,9 @@ EXPORT_SYMBOL(phy_attached_print);
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
{
- struct module *ndev_owner = dev->dev.parent->driver->owner;
struct mii_bus *bus = phydev->mdio.bus;
struct device *d = &phydev->mdio.dev;
+ struct module *ndev_owner = NULL;
bool using_genphy = false;
int err;
@@ -1162,8 +1203,10 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* our own module->refcnt here, otherwise we would not be able to
* unload later on.
*/
+ if (dev)
+ ndev_owner = dev->dev.parent->driver->owner;
if (ndev_owner != bus->owner && !try_module_get(bus->owner)) {
- dev_err(&dev->dev, "failed to get the bus module\n");
+ phydev_err(phydev, "failed to get the bus module\n");
return -EIO;
}
@@ -1182,7 +1225,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
}
if (!try_module_get(d->driver->owner)) {
- dev_err(&dev->dev, "failed to get the device driver module\n");
+ phydev_err(phydev, "failed to get the device driver module\n");
err = -EIO;
goto error_put_device;
}
@@ -1203,8 +1246,10 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
}
phydev->phy_link_change = phy_link_change;
- phydev->attached_dev = dev;
- dev->phydev = phydev;
+ if (dev) {
+ phydev->attached_dev = dev;
+ dev->phydev = phydev;
+ }
/* Some Ethernet drivers try to connect to a PHY device before
* calling register_netdevice() -> netdev_register_kobject() and
@@ -1216,22 +1261,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
*/
phydev->sysfs_links = false;
- err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
- "attached_dev");
- if (!err) {
- err = sysfs_create_link_nowarn(&dev->dev.kobj,
- &phydev->mdio.dev.kobj,
- "phydev");
- if (err) {
- dev_err(&dev->dev, "could not add device link to %s err %d\n",
- kobject_name(&phydev->mdio.dev.kobj),
- err);
- /* non-fatal - some net drivers can use one netdevice
- * with more then one phy
- */
- }
+ phy_sysfs_create_links(phydev);
- phydev->sysfs_links = true;
+ if (!phydev->attached_dev) {
+ err = sysfs_create_file(&phydev->mdio.dev.kobj,
+ &dev_attr_phy_standalone.attr);
+ if (err)
+ phydev_err(phydev, "error creating 'phy_standalone' sysfs entry\n");
}
phydev->dev_flags = flags;
@@ -1243,7 +1279,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Initial carrier state is off as the phy is about to be
* (re)initialized.
*/
- netif_carrier_off(phydev->attached_dev);
+ if (dev)
+ netif_carrier_off(phydev->attached_dev);
/* Do initial configuration here, now that
* we have certain key parameters
@@ -1290,6 +1327,9 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
struct device *d;
int rc;
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
/* Search the list of PHY devices on the mdio bus for the
* PHY with the requested name
*/
@@ -1349,16 +1389,24 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
void phy_detach(struct phy_device *phydev)
{
struct net_device *dev = phydev->attached_dev;
- struct module *ndev_owner = dev->dev.parent->driver->owner;
+ struct module *ndev_owner = NULL;
struct mii_bus *bus;
if (phydev->sysfs_links) {
- sysfs_remove_link(&dev->dev.kobj, "phydev");
+ if (dev)
+ sysfs_remove_link(&dev->dev.kobj, "phydev");
sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
}
+
+ if (!phydev->attached_dev)
+ sysfs_remove_file(&phydev->mdio.dev.kobj,
+ &dev_attr_phy_standalone.attr);
+
phy_suspend(phydev);
- phydev->attached_dev->phydev = NULL;
- phydev->attached_dev = NULL;
+ if (dev) {
+ phydev->attached_dev->phydev = NULL;
+ phydev->attached_dev = NULL;
+ }
phydev->phylink = NULL;
phy_led_triggers_unregister(phydev);
@@ -1381,6 +1429,8 @@ void phy_detach(struct phy_device *phydev)
bus = phydev->mdio.bus;
put_device(&phydev->mdio.dev);
+ if (dev)
+ ndev_owner = dev->dev.parent->driver->owner;
if (ndev_owner != bus->owner)
module_put(bus->owner);
@@ -1880,6 +1930,9 @@ int genphy_config_init(struct phy_device *phydev)
if (val & ESTATUS_1000_THALF)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
features);
+ if (val & ESTATUS_1000_XFULL)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ features);
}
linkmode_and(phydev->supported, phydev->supported, features);
@@ -1931,6 +1984,8 @@ int genphy_read_abilities(struct phy_device *phydev)
phydev->supported, val & ESTATUS_1000_TFULL);
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
phydev->supported, val & ESTATUS_1000_THALF);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported, val & ESTATUS_1000_XFULL);
}
return 0;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 4c0616ba314d..5d0af041b8f9 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -41,6 +41,9 @@ struct phylink {
/* private: */
struct net_device *netdev;
const struct phylink_mac_ops *ops;
+ struct phylink_config *config;
+ struct device *dev;
+ unsigned int old_link_state:1;
unsigned long phylink_disable_state; /* bitmask of disables */
struct phy_device *phydev;
@@ -56,6 +59,7 @@ struct phylink {
phy_interface_t cur_interface;
struct gpio_desc *link_gpio;
+ unsigned int link_irq;
struct timer_list link_poll;
void (*get_fixed_state)(struct net_device *dev,
struct phylink_link_state *s);
@@ -69,6 +73,23 @@ struct phylink {
struct sfp_bus *sfp_bus;
};
+#define phylink_printk(level, pl, fmt, ...) \
+ do { \
+ if ((pl)->config->type == PHYLINK_NETDEV) \
+ netdev_printk(level, (pl)->netdev, fmt, ##__VA_ARGS__); \
+ else if ((pl)->config->type == PHYLINK_DEV) \
+ dev_printk(level, (pl)->dev, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define phylink_err(pl, fmt, ...) \
+ phylink_printk(KERN_ERR, pl, fmt, ##__VA_ARGS__)
+#define phylink_warn(pl, fmt, ...) \
+ phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__)
+#define phylink_info(pl, fmt, ...) \
+ phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__)
+#define phylink_dbg(pl, fmt, ...) \
+ phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__)
+
/**
* phylink_set_port_modes() - set the port type modes in the ethtool mask
* @mask: ethtool link mode mask
@@ -115,7 +136,7 @@ static const char *phylink_an_mode_str(unsigned int mode)
static int phylink_validate(struct phylink *pl, unsigned long *supported,
struct phylink_link_state *state)
{
- pl->ops->validate(pl->netdev, supported, state);
+ pl->ops->validate(pl->config, supported, state);
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
}
@@ -165,7 +186,7 @@ static int phylink_parse_fixedlink(struct phylink *pl,
ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
NULL, 0);
if (ret != ARRAY_SIZE(prop)) {
- netdev_err(pl->netdev, "broken fixed-link?\n");
+ phylink_err(pl, "broken fixed-link?\n");
return -EINVAL;
}
@@ -184,8 +205,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
if (pl->link_config.speed > SPEED_1000 &&
pl->link_config.duplex != DUPLEX_FULL)
- netdev_warn(pl->netdev, "fixed link specifies half duplex for %dMbps link?\n",
- pl->link_config.speed);
+ phylink_warn(pl, "fixed link specifies half duplex for %dMbps link?\n",
+ pl->link_config.speed);
bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
linkmode_copy(pl->link_config.advertising, pl->supported);
@@ -198,9 +219,9 @@ static int phylink_parse_fixedlink(struct phylink *pl,
if (s) {
__set_bit(s->bit, pl->supported);
} else {
- netdev_warn(pl->netdev, "fixed link %s duplex %dMbps not recognised\n",
- pl->link_config.duplex == DUPLEX_FULL ? "full" : "half",
- pl->link_config.speed);
+ phylink_warn(pl, "fixed link %s duplex %dMbps not recognised\n",
+ pl->link_config.duplex == DUPLEX_FULL ? "full" : "half",
+ pl->link_config.speed);
}
linkmode_and(pl->link_config.advertising, pl->link_config.advertising,
@@ -225,8 +246,8 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
strcmp(managed, "in-band-status") == 0) {
if (pl->link_an_mode == MLO_AN_FIXED) {
- netdev_err(pl->netdev,
- "can't use both fixed-link and in-band-status\n");
+ phylink_err(pl,
+ "can't use both fixed-link and in-band-status\n");
return -EINVAL;
}
@@ -273,17 +294,17 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
break;
default:
- netdev_err(pl->netdev,
- "incorrect link mode %s for in-band status\n",
- phy_modes(pl->link_config.interface));
+ phylink_err(pl,
+ "incorrect link mode %s for in-band status\n",
+ phy_modes(pl->link_config.interface));
return -EINVAL;
}
linkmode_copy(pl->link_config.advertising, pl->supported);
if (phylink_validate(pl, pl->supported, &pl->link_config)) {
- netdev_err(pl->netdev,
- "failed to validate link configuration for in-band status\n");
+ phylink_err(pl,
+ "failed to validate link configuration for in-band status\n");
return -EINVAL;
}
}
@@ -294,16 +315,16 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
- netdev_dbg(pl->netdev,
- "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
- __func__, phylink_an_mode_str(pl->link_an_mode),
- phy_modes(state->interface),
- phy_speed_to_str(state->speed),
- phy_duplex_to_str(state->duplex),
- __ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
- state->pause, state->link, state->an_enabled);
-
- pl->ops->mac_config(pl->netdev, pl->link_an_mode, state);
+ phylink_dbg(pl,
+ "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
+ __func__, phylink_an_mode_str(pl->link_an_mode),
+ phy_modes(state->interface),
+ phy_speed_to_str(state->speed),
+ phy_duplex_to_str(state->duplex),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
+ state->pause, state->link, state->an_enabled);
+
+ pl->ops->mac_config(pl->config, pl->link_an_mode, state);
}
static void phylink_mac_config_up(struct phylink *pl,
@@ -317,12 +338,11 @@ static void phylink_mac_an_restart(struct phylink *pl)
{
if (pl->link_config.an_enabled &&
phy_interface_mode_is_8023z(pl->link_config.interface))
- pl->ops->mac_an_restart(pl->netdev);
+ pl->ops->mac_an_restart(pl->config);
}
static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *state)
{
- struct net_device *ndev = pl->netdev;
linkmode_copy(state->advertising, pl->link_config.advertising);
linkmode_zero(state->lp_advertising);
@@ -334,7 +354,7 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
state->an_complete = 0;
state->link = 1;
- return pl->ops->mac_link_state(ndev, state);
+ return pl->ops->mac_link_state(pl->config, state);
}
/* The fixed state is... fixed except for the link state,
@@ -399,11 +419,43 @@ static const char *phylink_pause_to_str(int pause)
}
}
+static void phylink_mac_link_up(struct phylink *pl,
+ struct phylink_link_state link_state)
+{
+ struct net_device *ndev = pl->netdev;
+
+ pl->cur_interface = link_state.interface;
+ pl->ops->mac_link_up(pl->config, pl->link_an_mode,
+ pl->phy_state.interface,
+ pl->phydev);
+
+ if (ndev)
+ netif_carrier_on(ndev);
+
+ phylink_info(pl,
+ "Link is Up - %s/%s - flow control %s\n",
+ phy_speed_to_str(link_state.speed),
+ phy_duplex_to_str(link_state.duplex),
+ phylink_pause_to_str(link_state.pause));
+}
+
+static void phylink_mac_link_down(struct phylink *pl)
+{
+ struct net_device *ndev = pl->netdev;
+
+ if (ndev)
+ netif_carrier_off(ndev);
+ pl->ops->mac_link_down(pl->config, pl->link_an_mode,
+ pl->cur_interface);
+ phylink_info(pl, "Link is Down\n");
+}
+
static void phylink_resolve(struct work_struct *w)
{
struct phylink *pl = container_of(w, struct phylink, resolve);
struct phylink_link_state link_state;
struct net_device *ndev = pl->netdev;
+ int link_changed;
mutex_lock(&pl->state_mutex);
if (pl->phylink_disable_state) {
@@ -446,25 +498,17 @@ static void phylink_resolve(struct work_struct *w)
}
}
- if (link_state.link != netif_carrier_ok(ndev)) {
- if (!link_state.link) {
- netif_carrier_off(ndev);
- pl->ops->mac_link_down(ndev, pl->link_an_mode,
- pl->cur_interface);
- netdev_info(ndev, "Link is Down\n");
- } else {
- pl->cur_interface = link_state.interface;
- pl->ops->mac_link_up(ndev, pl->link_an_mode,
- pl->cur_interface, pl->phydev);
-
- netif_carrier_on(ndev);
-
- netdev_info(ndev,
- "Link is Up - %s/%s - flow control %s\n",
- phy_speed_to_str(link_state.speed),
- phy_duplex_to_str(link_state.duplex),
- phylink_pause_to_str(link_state.pause));
- }
+ if (pl->netdev)
+ link_changed = (link_state.link != netif_carrier_ok(ndev));
+ else
+ link_changed = (link_state.link != pl->old_link_state);
+
+ if (link_changed) {
+ pl->old_link_state = link_state.link;
+ if (!link_state.link)
+ phylink_mac_link_down(pl);
+ else
+ phylink_mac_link_up(pl, link_state);
}
if (!link_state.link && pl->mac_link_dropped) {
pl->mac_link_dropped = false;
@@ -516,13 +560,12 @@ static int phylink_register_sfp(struct phylink *pl,
if (ret == -ENOENT)
return 0;
- netdev_err(pl->netdev, "unable to parse \"sfp\" node: %d\n",
- ret);
+ phylink_err(pl, "unable to parse \"sfp\" node: %d\n",
+ ret);
return ret;
}
- pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl->netdev, pl,
- &sfp_phylink_ops);
+ pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl, &sfp_phylink_ops);
if (!pl->sfp_bus)
return -ENOMEM;
@@ -543,7 +586,7 @@ static int phylink_register_sfp(struct phylink *pl,
* Returns a pointer to a &struct phylink, or an error-pointer value. Users
* must use IS_ERR() to check for errors from this function.
*/
-struct phylink *phylink_create(struct net_device *ndev,
+struct phylink *phylink_create(struct phylink_config *config,
struct fwnode_handle *fwnode,
phy_interface_t iface,
const struct phylink_mac_ops *ops)
@@ -557,7 +600,17 @@ struct phylink *phylink_create(struct net_device *ndev,
mutex_init(&pl->state_mutex);
INIT_WORK(&pl->resolve, phylink_resolve);
- pl->netdev = ndev;
+
+ pl->config = config;
+ if (config->type == PHYLINK_NETDEV) {
+ pl->netdev = to_net_dev(config->dev);
+ } else if (config->type == PHYLINK_DEV) {
+ pl->dev = config->dev;
+ } else {
+ kfree(pl);
+ return ERR_PTR(-EINVAL);
+ }
+
pl->phy_state.interface = iface;
pl->link_interface = iface;
if (iface == PHY_INTERFACE_MODE_MOCA)
@@ -612,7 +665,7 @@ void phylink_destroy(struct phylink *pl)
{
if (pl->sfp_bus)
sfp_unregister_upstream(pl->sfp_bus);
- if (!IS_ERR_OR_NULL(pl->link_gpio))
+ if (pl->link_gpio)
gpiod_put(pl->link_gpio);
cancel_work_sync(&pl->resolve);
@@ -639,10 +692,10 @@ static void phylink_phy_change(struct phy_device *phydev, bool up,
phylink_run_resolve(pl);
- netdev_dbg(pl->netdev, "phy link %s %s/%s/%s\n", up ? "up" : "down",
- phy_modes(phydev->interface),
- phy_speed_to_str(phydev->speed),
- phy_duplex_to_str(phydev->duplex));
+ phylink_dbg(pl, "phy link %s %s/%s/%s\n", up ? "up" : "down",
+ phy_modes(phydev->interface),
+ phy_speed_to_str(phydev->speed),
+ phy_duplex_to_str(phydev->duplex));
}
static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
@@ -675,9 +728,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
phy->phylink = pl;
phy->phy_link_change = phylink_phy_change;
- netdev_info(pl->netdev,
- "PHY [%s] driver [%s]\n", dev_name(&phy->mdio.dev),
- phy->drv->name);
+ phylink_info(pl,
+ "PHY [%s] driver [%s]\n", dev_name(&phy->mdio.dev),
+ phy->drv->name);
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
@@ -690,10 +743,10 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
- netdev_dbg(pl->netdev,
- "phy: setting supported %*pb advertising %*pb\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
+ phylink_dbg(pl,
+ "phy: setting supported %*pb advertising %*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
if (phy_interrupt_is_valid(phy))
phy_request_interrupt(phy);
@@ -871,10 +924,19 @@ void phylink_mac_change(struct phylink *pl, bool up)
if (!up)
pl->mac_link_dropped = true;
phylink_run_resolve(pl);
- netdev_dbg(pl->netdev, "mac link %s\n", up ? "up" : "down");
+ phylink_dbg(pl, "mac link %s\n", up ? "up" : "down");
}
EXPORT_SYMBOL_GPL(phylink_mac_change);
+static irqreturn_t phylink_link_handler(int irq, void *data)
+{
+ struct phylink *pl = data;
+
+ phylink_run_resolve(pl);
+
+ return IRQ_HANDLED;
+}
+
/**
* phylink_start() - start a phylink instance
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -887,12 +949,13 @@ void phylink_start(struct phylink *pl)
{
ASSERT_RTNL();
- netdev_info(pl->netdev, "configuring for %s/%s link mode\n",
- phylink_an_mode_str(pl->link_an_mode),
- phy_modes(pl->link_config.interface));
+ phylink_info(pl, "configuring for %s/%s link mode\n",
+ phylink_an_mode_str(pl->link_an_mode),
+ phy_modes(pl->link_config.interface));
/* Always set the carrier off */
- netif_carrier_off(pl->netdev);
+ if (pl->netdev)
+ netif_carrier_off(pl->netdev);
/* Apply the link configuration to the MAC when starting. This allows
* a fixed-link to start with the correct parameters, and also
@@ -910,7 +973,22 @@ void phylink_start(struct phylink *pl)
clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
phylink_run_resolve(pl);
- if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
+ if (pl->link_an_mode == MLO_AN_FIXED && pl->link_gpio) {
+ int irq = gpiod_to_irq(pl->link_gpio);
+
+ if (irq > 0) {
+ if (!request_irq(irq, phylink_link_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "netdev link", pl))
+ pl->link_irq = irq;
+ else
+ irq = 0;
+ }
+ if (irq <= 0)
+ mod_timer(&pl->link_poll, jiffies + HZ);
+ }
+ if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state)
mod_timer(&pl->link_poll, jiffies + HZ);
if (pl->sfp_bus)
sfp_upstream_start(pl->sfp_bus);
@@ -936,8 +1014,11 @@ void phylink_stop(struct phylink *pl)
phy_stop(pl->phydev);
if (pl->sfp_bus)
sfp_upstream_stop(pl->sfp_bus);
- if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
- del_timer_sync(&pl->link_poll);
+ del_timer_sync(&pl->link_poll);
+ if (pl->link_irq) {
+ free_irq(pl->link_irq, pl);
+ pl->link_irq = 0;
+ }
phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
}
@@ -1239,7 +1320,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
switch (pl->link_an_mode) {
case MLO_AN_PHY:
/* Silently mark the carrier down, and then trigger a resolve */
- netif_carrier_off(pl->netdev);
+ if (pl->netdev)
+ netif_carrier_off(pl->netdev);
phylink_run_resolve(pl);
break;
@@ -1342,8 +1424,8 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_set_eee);
*
* FIXME: should deal with negotiation state too.
*/
-static int phylink_mii_emul_read(struct net_device *ndev, unsigned int reg,
- struct phylink_link_state *state, bool aneg)
+static int phylink_mii_emul_read(unsigned int reg,
+ struct phylink_link_state *state)
{
struct fixed_phy_status fs;
int val;
@@ -1358,8 +1440,6 @@ static int phylink_mii_emul_read(struct net_device *ndev, unsigned int reg,
if (reg == MII_BMSR) {
if (!state->an_complete)
val &= ~BMSR_ANEGCOMPLETE;
- if (!aneg)
- val &= ~BMSR_ANEGCAPABLE;
}
return val;
}
@@ -1455,8 +1535,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
case MLO_AN_FIXED:
if (phy_id == 0) {
phylink_get_fixed_state(pl, &state);
- val = phylink_mii_emul_read(pl->netdev, reg, &state,
- true);
+ val = phylink_mii_emul_read(reg, &state);
}
break;
@@ -1469,8 +1548,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
if (val < 0)
return val;
- val = phylink_mii_emul_read(pl->netdev, reg, &state,
- true);
+ val = phylink_mii_emul_read(reg, &state);
}
break;
}
@@ -1573,6 +1651,20 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL_GPL(phylink_mii_ioctl);
+static void phylink_sfp_attach(void *upstream, struct sfp_bus *bus)
+{
+ struct phylink *pl = upstream;
+
+ pl->netdev->sfp_bus = bus;
+}
+
+static void phylink_sfp_detach(void *upstream, struct sfp_bus *bus)
+{
+ struct phylink *pl = upstream;
+
+ pl->netdev->sfp_bus = NULL;
+}
+
static int phylink_sfp_module_insert(void *upstream,
const struct sfp_eeprom_id *id)
{
@@ -1601,8 +1693,8 @@ static int phylink_sfp_module_insert(void *upstream,
/* Ignore errors if we're expecting a PHY to attach later */
ret = phylink_validate(pl, support, &config);
if (ret) {
- netdev_err(pl->netdev, "validation with support %*pb failed: %d\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ phylink_err(pl, "validation with support %*pb failed: %d\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
return ret;
}
@@ -1610,26 +1702,26 @@ static int phylink_sfp_module_insert(void *upstream,
iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
if (iface == PHY_INTERFACE_MODE_NA) {
- netdev_err(pl->netdev,
- "selection of interface failed, advertisement %*pb\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising);
+ phylink_err(pl,
+ "selection of interface failed, advertisement %*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising);
return -EINVAL;
}
config.interface = iface;
ret = phylink_validate(pl, support1, &config);
if (ret) {
- netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
- phylink_an_mode_str(MLO_AN_INBAND),
- phy_modes(config.interface),
- __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ phylink_err(pl, "validation of %s/%s with support %*pb failed: %d\n",
+ phylink_an_mode_str(MLO_AN_INBAND),
+ phy_modes(config.interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
return ret;
}
- netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n",
- phylink_an_mode_str(MLO_AN_INBAND),
- phy_modes(config.interface),
- __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+ phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
+ phylink_an_mode_str(MLO_AN_INBAND),
+ phy_modes(config.interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support);
if (phy_interface_mode_is_8023z(iface) && pl->phydev)
return -EINVAL;
@@ -1648,9 +1740,9 @@ static int phylink_sfp_module_insert(void *upstream,
changed = true;
- netdev_info(pl->netdev, "switched to %s/%s link mode\n",
- phylink_an_mode_str(MLO_AN_INBAND),
- phy_modes(config.interface));
+ phylink_info(pl, "switched to %s/%s link mode\n",
+ phylink_an_mode_str(MLO_AN_INBAND),
+ phy_modes(config.interface));
}
pl->link_port = port;
@@ -1694,6 +1786,8 @@ static void phylink_sfp_disconnect_phy(void *upstream)
}
static const struct sfp_upstream_ops sfp_phylink_ops = {
+ .attach = phylink_sfp_attach,
+ .detach = phylink_sfp_detach,
.module_insert = phylink_sfp_module_insert,
.link_up = phylink_sfp_link_up,
.link_down = phylink_sfp_link_down,
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index e9c187946cca..b23fc41896ef 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -24,7 +24,6 @@ struct sfp_bus {
const struct sfp_upstream_ops *upstream_ops;
void *upstream;
- struct net_device *netdev;
struct phy_device *phydev;
bool registered;
@@ -351,7 +350,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
bus->socket_ops->attach(bus->sfp);
if (bus->started)
bus->socket_ops->start(bus->sfp);
- bus->netdev->sfp_bus = bus;
+ bus->upstream_ops->attach(bus->upstream, bus);
bus->registered = true;
return 0;
}
@@ -360,8 +359,8 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
{
const struct sfp_upstream_ops *ops = bus->upstream_ops;
- bus->netdev->sfp_bus = NULL;
if (bus->registered) {
+ bus->upstream_ops->detach(bus->upstream, bus);
if (bus->started)
bus->socket_ops->stop(bus->sfp);
bus->socket_ops->detach(bus->sfp);
@@ -443,13 +442,11 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
{
bus->upstream_ops = NULL;
bus->upstream = NULL;
- bus->netdev = NULL;
}
/**
* sfp_register_upstream() - Register the neighbouring device
* @fwnode: firmware node for the SFP bus
- * @ndev: network device associated with the interface
* @upstream: the upstream private data
* @ops: the upstream's &struct sfp_upstream_ops
*
@@ -460,7 +457,7 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
* On error, returns %NULL.
*/
struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
- struct net_device *ndev, void *upstream,
+ void *upstream,
const struct sfp_upstream_ops *ops)
{
struct sfp_bus *bus = sfp_bus_get(fwnode);
@@ -470,7 +467,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
rtnl_lock();
bus->upstream_ops = ops;
bus->upstream = upstream;
- bus->netdev = ndev;
if (bus->sfp) {
ret = sfp_register_bus(bus);
@@ -592,7 +588,7 @@ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
bus->sfp = sfp;
bus->socket_ops = ops;
- if (bus->netdev) {
+ if (bus->upstream_ops) {
ret = sfp_register_bus(bus);
if (ret)
sfp_socket_clear(bus);
@@ -612,7 +608,7 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
void sfp_unregister_socket(struct sfp_bus *bus)
{
rtnl_lock();
- if (bus->netdev)
+ if (bus->upstream_ops)
sfp_unregister_bus(bus);
sfp_socket_clear(bus);
rtnl_unlock();
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 71812be0ac64..2d816aadea79 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/acpi.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
@@ -184,12 +185,14 @@ struct sfp {
int (*write)(struct sfp *, bool, u8, void *, size_t);
struct gpio_desc *gpio[GPIO_MAX];
+ int gpio_irq[GPIO_MAX];
bool attached;
+ struct mutex st_mutex; /* Protects state */
unsigned int state;
struct delayed_work poll;
struct delayed_work timeout;
- struct mutex sm_mutex;
+ struct mutex sm_mutex; /* Protects state machine */
unsigned char sm_mod_state;
unsigned char sm_dev_state;
unsigned short sm_state;
@@ -1719,6 +1722,7 @@ static void sfp_check_state(struct sfp *sfp)
{
unsigned int state, i, changed;
+ mutex_lock(&sfp->st_mutex);
state = sfp_get_state(sfp);
changed = state ^ sfp->state;
changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
@@ -1744,6 +1748,7 @@ static void sfp_check_state(struct sfp *sfp)
sfp_sm_event(sfp, state & SFP_F_LOS ?
SFP_E_LOS_HIGH : SFP_E_LOS_LOW);
rtnl_unlock();
+ mutex_unlock(&sfp->st_mutex);
}
static irqreturn_t sfp_irq(int irq, void *data)
@@ -1774,6 +1779,7 @@ static struct sfp *sfp_alloc(struct device *dev)
sfp->dev = dev;
mutex_init(&sfp->sm_mutex);
+ mutex_init(&sfp->st_mutex);
INIT_DELAYED_WORK(&sfp->poll, sfp_poll);
INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout);
@@ -1798,9 +1804,10 @@ static void sfp_cleanup(void *data)
static int sfp_probe(struct platform_device *pdev)
{
const struct sff_data *sff;
+ struct i2c_adapter *i2c;
struct sfp *sfp;
bool poll = false;
- int irq, err, i;
+ int err, i;
sfp = sfp_alloc(&pdev->dev);
if (IS_ERR(sfp))
@@ -1817,7 +1824,6 @@ static int sfp_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *id;
- struct i2c_adapter *i2c;
struct device_node *np;
id = of_match_node(sfp_of_match, node);
@@ -1834,14 +1840,32 @@ static int sfp_probe(struct platform_device *pdev)
i2c = of_find_i2c_adapter_by_node(np);
of_node_put(np);
- if (!i2c)
- return -EPROBE_DEFER;
-
- err = sfp_i2c_configure(sfp, i2c);
- if (err < 0) {
- i2c_put_adapter(i2c);
- return err;
+ } else if (has_acpi_companion(&pdev->dev)) {
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct fwnode_handle *fw = acpi_fwnode_handle(adev);
+ struct fwnode_reference_args args;
+ struct acpi_handle *acpi_handle;
+ int ret;
+
+ ret = acpi_node_get_property_reference(fw, "i2c-bus", 0, &args);
+ if (ret || !is_acpi_device_node(args.fwnode)) {
+ dev_err(&pdev->dev, "missing 'i2c-bus' property\n");
+ return -ENODEV;
}
+
+ acpi_handle = ACPI_HANDLE_FWNODE(args.fwnode);
+ i2c = i2c_acpi_find_adapter_by_handle(acpi_handle);
+ } else {
+ return -EINVAL;
+ }
+
+ if (!i2c)
+ return -EPROBE_DEFER;
+
+ err = sfp_i2c_configure(sfp, i2c);
+ if (err < 0) {
+ i2c_put_adapter(i2c);
+ return err;
}
for (i = 0; i < GPIO_MAX; i++)
@@ -1882,19 +1906,22 @@ static int sfp_probe(struct platform_device *pdev)
if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
continue;
- irq = gpiod_to_irq(sfp->gpio[i]);
- if (!irq) {
+ sfp->gpio_irq[i] = gpiod_to_irq(sfp->gpio[i]);
+ if (!sfp->gpio_irq[i]) {
poll = true;
continue;
}
- err = devm_request_threaded_irq(sfp->dev, irq, NULL, sfp_irq,
+ err = devm_request_threaded_irq(sfp->dev, sfp->gpio_irq[i],
+ NULL, sfp_irq,
IRQF_ONESHOT |
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
dev_name(sfp->dev), sfp);
- if (err)
+ if (err) {
+ sfp->gpio_irq[i] = 0;
poll = true;
+ }
}
if (poll)
@@ -1925,9 +1952,26 @@ static int sfp_remove(struct platform_device *pdev)
return 0;
}
+static void sfp_shutdown(struct platform_device *pdev)
+{
+ struct sfp *sfp = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < GPIO_MAX; i++) {
+ if (!sfp->gpio_irq[i])
+ continue;
+
+ devm_free_irq(sfp->dev, sfp->gpio_irq[i], sfp);
+ }
+
+ cancel_delayed_work_sync(&sfp->poll);
+ cancel_delayed_work_sync(&sfp->timeout);
+}
+
static struct platform_driver sfp_driver = {
.probe = sfp_probe,
.remove = sfp_remove,
+ .shutdown = sfp_shutdown,
.driver = {
.name = "sfp",
.of_match_table = sfp_of_match,
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 8ac33ca9ac3a..e89cdebae6f1 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1008,7 +1008,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
/* Any address will do - we take the first */
- const struct in_ifaddr *ifa = in_dev->ifa_list;
+ const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
if (ifa) {
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memset(eth->h_dest, 0xfc, 2);
@@ -1103,7 +1103,7 @@ plip_open(struct net_device *dev)
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
- struct in_ifaddr *ifa=in_dev->ifa_list;
+ const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
if (ifa != NULL) {
memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
}
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index bf395df3bb37..1a2e2f7629f3 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -87,8 +87,7 @@ config PPP_MPPE
depends on PPP
select CRYPTO
select CRYPTO_SHA1
- select CRYPTO_ARC4
- select CRYPTO_ECB
+ select CRYPTO_LIB_ARC4
---help---
Support for the MPPE Encryption protocol, as employed by the
Microsoft Point-to-Point Tunneling Protocol.
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 66c8e65f6872..bd3c80b0bc77 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -42,9 +42,10 @@
* deprecated in 2.6
*/
+#include <crypto/arc4.h>
#include <crypto/hash.h>
-#include <crypto/skcipher.h>
#include <linux/err.h>
+#include <linux/fips.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -66,13 +67,6 @@ MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
MODULE_SOFTDEP("pre: arc4");
MODULE_VERSION("1.0.2");
-static unsigned int
-setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
-{
- sg_set_buf(sg, address, length);
- return length;
-}
-
#define SHA1_PAD_SIZE 40
/*
@@ -96,7 +90,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
- struct crypto_sync_skcipher *arc4;
+ struct arc4_ctx arc4;
struct shash_desc *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
@@ -155,24 +149,11 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
*/
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
- struct scatterlist sg_in[1], sg_out[1];
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
-
- skcipher_request_set_sync_tfm(req, state->arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
-
get_new_key_from_sha(state);
if (!initial_key) {
- crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest,
- state->keylen);
- sg_init_table(sg_in, 1);
- sg_init_table(sg_out, 1);
- setup_sg(sg_in, state->sha1_digest, state->keylen);
- setup_sg(sg_out, state->session_key, state->keylen);
- skcipher_request_set_crypt(req, sg_in, sg_out, state->keylen,
- NULL);
- if (crypto_skcipher_encrypt(req))
- printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
+ arc4_setkey(&state->arc4, state->sha1_digest, state->keylen);
+ arc4_crypt(&state->arc4, state->session_key, state->sha1_digest,
+ state->keylen);
} else {
memcpy(state->session_key, state->sha1_digest, state->keylen);
}
@@ -182,9 +163,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
state->session_key[1] = 0x26;
state->session_key[2] = 0x9e;
}
- crypto_sync_skcipher_setkey(state->arc4, state->session_key,
- state->keylen);
- skcipher_request_zero(req);
+ arc4_setkey(&state->arc4, state->session_key, state->keylen);
}
/*
@@ -197,7 +176,8 @@ static void *mppe_alloc(unsigned char *options, int optlen)
unsigned int digestsize;
if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
- options[0] != CI_MPPE || options[1] != CILEN_MPPE)
+ options[0] != CI_MPPE || options[1] != CILEN_MPPE ||
+ fips_enabled)
goto out;
state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -205,12 +185,6 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out;
- state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
- if (IS_ERR(state->arc4)) {
- state->arc4 = NULL;
- goto out_free;
- }
-
shash = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(shash))
goto out_free;
@@ -251,7 +225,6 @@ out_free:
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
}
- crypto_free_sync_skcipher(state->arc4);
kfree(state);
out:
return NULL;
@@ -267,8 +240,7 @@ static void mppe_free(void *arg)
kfree(state->sha1_digest);
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
- crypto_free_sync_skcipher(state->arc4);
- kfree(state);
+ kzfree(state);
}
}
@@ -367,10 +339,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
int isize, int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
int proto;
- int err;
- struct scatterlist sg_in[1], sg_out[1];
/*
* Check that the protocol is in the range we handle.
@@ -421,21 +390,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
ibuf += 2; /* skip to proto field */
isize -= 2;
- /* Encrypt packet */
- sg_init_table(sg_in, 1);
- sg_init_table(sg_out, 1);
- setup_sg(sg_in, ibuf, isize);
- setup_sg(sg_out, obuf, osize);
-
- skcipher_request_set_sync_tfm(req, state->arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
- err = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- if (err) {
- printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
- return -1;
- }
+ arc4_crypt(&state->arc4, obuf, ibuf, isize);
state->stats.unc_bytes += isize;
state->stats.unc_packets++;
@@ -481,10 +436,8 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
- struct scatterlist sg_in[1], sg_out[1];
if (isize <= PPP_HDRLEN + MPPE_OVHD) {
if (state->debug)
@@ -611,19 +564,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
* Decrypt the first byte in order to check if it is
* a compressed or uncompressed protocol field.
*/
- sg_init_table(sg_in, 1);
- sg_init_table(sg_out, 1);
- setup_sg(sg_in, ibuf, 1);
- setup_sg(sg_out, obuf, 1);
-
- skcipher_request_set_sync_tfm(req, state->arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
- if (crypto_skcipher_decrypt(req)) {
- printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
- osize = DECOMP_ERROR;
- goto out_zap_req;
- }
+ arc4_crypt(&state->arc4, obuf, ibuf, 1);
/*
* Do PFC decompression.
@@ -638,14 +579,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
}
/* And finally, decrypt the rest of the packet. */
- setup_sg(sg_in, ibuf + 1, isize - 1);
- setup_sg(sg_out, obuf + 1, osize - 1);
- skcipher_request_set_crypt(req, sg_in, sg_out, isize - 1, NULL);
- if (crypto_skcipher_decrypt(req)) {
- printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
- osize = DECOMP_ERROR;
- goto out_zap_req;
- }
+ arc4_crypt(&state->arc4, obuf + 1, ibuf + 1, isize - 1);
state->stats.unc_bytes += osize;
state->stats.unc_packets++;
@@ -655,8 +589,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
/* good packet credit */
state->sanity_errors >>= 1;
-out_zap_req:
- skcipher_request_zero(req);
return osize;
sanity_error:
@@ -729,8 +661,7 @@ static struct compressor ppp_mppe = {
static int __init ppp_mppe_init(void)
{
int answer;
- if (!(crypto_has_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
- crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC)))
+ if (fips_enabled || !crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC))
return -ENODEV;
sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 8e01390c738e..dd614c2cd994 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -520,8 +520,7 @@ static int tap_open(struct inode *inode, struct file *file)
goto err;
}
- RCU_INIT_POINTER(q->sock.wq, &q->wq);
- init_waitqueue_head(&q->wq.wait);
+ init_waitqueue_head(&q->sock.wq.wait);
q->sock.type = SOCK_RAW;
q->sock.state = SS_CONNECTED;
q->sock.file = file;
@@ -579,7 +578,7 @@ static __poll_t tap_poll(struct file *file, poll_table *wait)
goto out;
mask = 0;
- poll_wait(file, &q->wq.wait, wait);
+ poll_wait(file, &q->sock.wq.wait, wait);
if (!ptr_ring_empty(&q->ring))
mask |= EPOLLIN | EPOLLRDNORM;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 36916bf51ee6..abfa0da9bbd2 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2054,9 +2054,34 @@ static void team_ethtool_get_drvinfo(struct net_device *dev,
strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
+static int team_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct team *team= netdev_priv(dev);
+ unsigned long speed = 0;
+ struct team_port *port;
+
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.port = PORT_OTHER;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ if (team_port_txable(port)) {
+ if (port->state.speed != SPEED_UNKNOWN)
+ speed += port->state.speed;
+ if (cmd->base.duplex == DUPLEX_UNKNOWN &&
+ port->state.duplex != DUPLEX_UNKNOWN)
+ cmd->base.duplex = port->state.duplex;
+ }
+ }
+ cmd->base.speed = speed ? : SPEED_UNKNOWN;
+
+ return 0;
+}
+
static const struct ethtool_ops team_ethtool_ops = {
.get_drvinfo = team_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = team_ethtool_get_link_ksettings,
};
/***********************
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d7c55e0fa8f4..3d443597bd04 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -160,7 +160,6 @@ struct tun_pcpu_stats {
struct tun_file {
struct sock sk;
struct socket socket;
- struct socket_wq wq;
struct tun_struct __rcu *tun;
struct fasync_struct *fasync;
/* only used for fasnyc */
@@ -2165,7 +2164,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
goto out;
}
- add_wait_queue(&tfile->wq.wait, &wait);
+ add_wait_queue(&tfile->socket.wq.wait, &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -2185,7 +2184,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
}
__set_current_state(TASK_RUNNING);
- remove_wait_queue(&tfile->wq.wait, &wait);
+ remove_wait_queue(&tfile->socket.wq.wait, &wait);
out:
*err = error;
@@ -3415,8 +3414,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->flags = 0;
tfile->ifindex = 0;
- init_waitqueue_head(&tfile->wq.wait);
- RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
+ init_waitqueue_head(&tfile->socket.wq.wait);
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index c9bc96310ed4..ef548beba684 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -226,7 +226,7 @@ static void asix_phy_reset(struct usbnet *dev, unsigned int reset_bits)
static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret = 0;
- u8 buf[ETH_ALEN];
+ u8 buf[ETH_ALEN] = {0};
int i;
unsigned long gpio_bits = dev->driver_info->data;
@@ -677,7 +677,7 @@ static int asix_resume(struct usb_interface *intf)
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret, i;
- u8 buf[ETH_ALEN], chipcode = 0;
+ u8 buf[ETH_ALEN] = {0}, chipcode = 0;
u32 phyid;
struct asix_common_private *priv;
@@ -1061,7 +1061,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
- u8 buf[ETH_ALEN];
+ u8 buf[ETH_ALEN] = {0};
usbnet_get_endpoints(dev,intf);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e0dcb681cfe5..39e0768d734d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -28,7 +28,7 @@
#define NETNEXT_VERSION "09"
/* Information for net */
-#define NET_VERSION "9"
+#define NET_VERSION "10"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -53,6 +53,9 @@
#define PAL_BDC_CR 0xd1a0
#define PLA_TEREDO_TIMER 0xd2cc
#define PLA_REALWOW_TIMER 0xd2e8
+#define PLA_SUSPEND_FLAG 0xd38a
+#define PLA_INDICATE_FALG 0xd38c
+#define PLA_EXTRA_STATUS 0xd398
#define PLA_EFUSE_DATA 0xdd00
#define PLA_EFUSE_CMD 0xdd02
#define PLA_LEDSEL 0xdd90
@@ -336,6 +339,15 @@
/* PLA_BOOT_CTRL */
#define AUTOLOAD_DONE 0x0002
+/* PLA_SUSPEND_FLAG */
+#define LINK_CHG_EVENT BIT(0)
+
+/* PLA_INDICATE_FALG */
+#define UPCOMING_RUNTIME_D3 BIT(0)
+
+/* PLA_EXTRA_STATUS */
+#define LINK_CHANGE_FLAG BIT(8)
+
/* USB_USB2PHY */
#define USB2PHY_SUSPEND 0x0001
#define USB2PHY_L1 0x0002
@@ -813,6 +825,14 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
return ret;
}
+static void rtl_set_unplug(struct r8152 *tp)
+{
+ if (tp->udev->state == USB_STATE_NOTATTACHED) {
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+ smp_mb__after_atomic();
+ }
+}
+
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
void *data, u16 type)
{
@@ -851,7 +871,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
}
if (ret == -ENODEV)
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
return ret;
}
@@ -921,7 +941,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
error1:
if (ret == -ENODEV)
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
return ret;
}
@@ -1309,7 +1329,7 @@ static void read_bulk_callback(struct urb *urb)
napi_schedule(&tp->napi);
return;
case -ESHUTDOWN:
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
netif_device_detach(tp->netdev);
return;
case -ENOENT:
@@ -1429,7 +1449,7 @@ static void intr_callback(struct urb *urb)
resubmit:
res = usb_submit_urb(urb, GFP_ATOMIC);
if (res == -ENODEV) {
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
netif_device_detach(tp->netdev);
} else if (res) {
netif_err(tp, intr, tp->netdev,
@@ -2024,7 +2044,7 @@ static void tx_bottom(struct r8152 *tp)
struct net_device *netdev = tp->netdev;
if (res == -ENODEV) {
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
netif_device_detach(netdev);
} else {
struct net_device_stats *stats = &netdev->stats;
@@ -2098,7 +2118,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
ret = usb_submit_urb(agg->urb, mem_flags);
if (ret == -ENODEV) {
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
netif_device_detach(tp->netdev);
} else if (ret) {
struct urb *urb = agg->urb;
@@ -2355,6 +2375,12 @@ static int rtl_stop_rx(struct r8152 *tp)
return 0;
}
+static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp)
+{
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN,
+ OWN_UPDATE | OWN_CLEAR);
+}
+
static int rtl_enable(struct r8152 *tp)
{
u32 ocp_data;
@@ -2365,6 +2391,15 @@ static int rtl_enable(struct r8152 *tp)
ocp_data |= CR_RE | CR_TE;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
+ switch (tp->version) {
+ case RTL_VER_08:
+ case RTL_VER_09:
+ r8153b_rx_agg_chg_indicate(tp);
+ break;
+ default:
+ break;
+ }
+
rxdy_gated_en(tp, false);
return 0;
@@ -2381,12 +2416,6 @@ static int rtl8152_enable(struct r8152 *tp)
return rtl_enable(tp);
}
-static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp)
-{
- ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN,
- OWN_UPDATE | OWN_CLEAR);
-}
-
static void r8153_set_rx_early_timeout(struct r8152 *tp)
{
u32 ocp_data = tp->coalesce / 8;
@@ -2409,7 +2438,6 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
128 / 8);
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR,
ocp_data);
- r8153b_rx_agg_chg_indicate(tp);
break;
default:
@@ -2433,7 +2461,6 @@ static void r8153_set_rx_early_size(struct r8152 *tp)
case RTL_VER_09:
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE,
ocp_data / 8);
- r8153b_rx_agg_chg_indicate(tp);
break;
default:
WARN_ON_ONCE(1);
@@ -2806,20 +2833,24 @@ static void r8153b_power_cut_en(struct r8152 *tp, bool enable)
ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
}
-static void r8153b_queue_wake(struct r8152 *tp, bool enable)
+static void r8153_queue_wake(struct r8152 *tp, bool enable)
{
u32 ocp_data;
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38a);
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG);
if (enable)
- ocp_data |= BIT(0);
+ ocp_data |= UPCOMING_RUNTIME_D3;
else
- ocp_data &= ~BIT(0);
- ocp_write_byte(tp, MCU_TYPE_PLA, 0xd38a, ocp_data);
+ ocp_data &= ~UPCOMING_RUNTIME_D3;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG);
+ ocp_data &= ~LINK_CHG_EVENT;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG, ocp_data);
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, 0xd38c);
- ocp_data &= ~BIT(0);
- ocp_write_byte(tp, MCU_TYPE_PLA, 0xd38c, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ ocp_data &= ~LINK_CHANGE_FLAG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
}
static bool rtl_can_wakeup(struct r8152 *tp)
@@ -2887,14 +2918,14 @@ static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
{
if (enable) {
- r8153b_queue_wake(tp, true);
+ r8153_queue_wake(tp, true);
r8153b_u1u2en(tp, false);
r8153_u2p3en(tp, false);
rtl_runtime_suspend_enable(tp, true);
r8153b_ups_en(tp, true);
} else {
r8153b_ups_en(tp, false);
- r8153b_queue_wake(tp, false);
+ r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
r8153_u2p3en(tp, true);
r8153b_u1u2en(tp, true);
@@ -4221,7 +4252,7 @@ static void r8153b_init(struct r8152 *tp)
r8153b_power_cut_en(tp, false);
r8153b_ups_en(tp, false);
- r8153b_queue_wake(tp, false);
+ r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
r8153b_u1u2en(tp, true);
usb_enable_lpm(tp->udev);
@@ -4903,8 +4934,17 @@ static int rtl8152_set_coalesce(struct net_device *netdev,
if (tp->coalesce != coalesce->rx_coalesce_usecs) {
tp->coalesce = coalesce->rx_coalesce_usecs;
- if (netif_running(tp->netdev) && netif_carrier_ok(netdev))
- r8153_set_rx_early_timeout(tp);
+ if (netif_running(netdev) && netif_carrier_ok(netdev)) {
+ netif_stop_queue(netdev);
+ napi_disable(&tp->napi);
+ tp->rtl_ops.disable(tp);
+ tp->rtl_ops.enable(tp);
+ rtl_start_rx(tp);
+ clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ _rtl8152_set_rx_mode(netdev);
+ napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ }
}
mutex_unlock(&tp->control);
@@ -5323,10 +5363,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (tp) {
- struct usb_device *udev = tp->udev;
-
- if (udev->state == USB_STATE_NOTATTACHED)
- set_bit(RTL8152_UNPLUG, &tp->flags);
+ rtl_set_unplug(tp);
netif_napi_del(&tp->napi);
unregister_netdev(tp->netdev);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 52110e54e621..9f3c839f9e5f 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -38,6 +38,8 @@
#define VETH_XDP_TX BIT(0)
#define VETH_XDP_REDIR BIT(1)
+#define VETH_XDP_TX_BULK_SIZE 16
+
struct veth_rq_stats {
u64 xdp_packets;
u64 xdp_bytes;
@@ -64,6 +66,11 @@ struct veth_priv {
unsigned int requested_headroom;
};
+struct veth_xdp_tx_bq {
+ struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
+ unsigned int count;
+};
+
/*
* ethtool interface
*/
@@ -442,13 +449,30 @@ drop:
return ret;
}
-static void veth_xdp_flush(struct net_device *dev)
+static void veth_xdp_flush_bq(struct net_device *dev, struct veth_xdp_tx_bq *bq)
+{
+ int sent, i, err = 0;
+
+ sent = veth_xdp_xmit(dev, bq->count, bq->q, 0);
+ if (sent < 0) {
+ err = sent;
+ sent = 0;
+ for (i = 0; i < bq->count; i++)
+ xdp_return_frame(bq->q[i]);
+ }
+ trace_xdp_bulk_tx(dev, sent, bq->count - sent, err);
+
+ bq->count = 0;
+}
+
+static void veth_xdp_flush(struct net_device *dev, struct veth_xdp_tx_bq *bq)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct net_device *rcv;
struct veth_rq *rq;
rcu_read_lock();
+ veth_xdp_flush_bq(dev, bq);
rcv = rcu_dereference(priv->peer);
if (unlikely(!rcv))
goto out;
@@ -464,19 +488,26 @@ out:
rcu_read_unlock();
}
-static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
+static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
+ struct veth_xdp_tx_bq *bq)
{
struct xdp_frame *frame = convert_to_xdp_frame(xdp);
if (unlikely(!frame))
return -EOVERFLOW;
- return veth_xdp_xmit(dev, 1, &frame, 0);
+ if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
+ veth_xdp_flush_bq(dev, bq);
+
+ bq->q[bq->count++] = frame;
+
+ return 0;
}
static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
struct xdp_frame *frame,
- unsigned int *xdp_xmit)
+ unsigned int *xdp_xmit,
+ struct veth_xdp_tx_bq *bq)
{
void *hard_start = frame->data - frame->headroom;
void *head = hard_start - sizeof(struct xdp_frame);
@@ -509,7 +540,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
orig_frame = *frame;
xdp.data_hard_start = head;
xdp.rxq->mem = frame->mem;
- if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
+ if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
frame = &orig_frame;
goto err_xdp;
@@ -547,6 +578,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
goto err;
}
+ xdp_release_frame(frame);
xdp_scrub_frame(frame);
skb->protocol = eth_type_trans(skb, rq->dev);
err:
@@ -559,7 +591,8 @@ xdp_xmit:
}
static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
- unsigned int *xdp_xmit)
+ unsigned int *xdp_xmit,
+ struct veth_xdp_tx_bq *bq)
{
u32 pktlen, headroom, act, metalen;
void *orig_data, *orig_data_end;
@@ -635,7 +668,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
get_page(virt_to_page(xdp.data));
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
- if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
+ if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
goto err_xdp;
}
@@ -690,7 +723,8 @@ xdp_xmit:
return NULL;
}
-static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit)
+static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit,
+ struct veth_xdp_tx_bq *bq)
{
int i, done = 0, drops = 0, bytes = 0;
@@ -706,11 +740,11 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit)
struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
bytes += frame->len;
- skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one);
+ skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one, bq);
} else {
skb = ptr;
bytes += skb->len;
- skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one);
+ skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one, bq);
}
*xdp_xmit |= xdp_xmit_one;
@@ -736,10 +770,13 @@ static int veth_poll(struct napi_struct *napi, int budget)
struct veth_rq *rq =
container_of(napi, struct veth_rq, xdp_napi);
unsigned int xdp_xmit = 0;
+ struct veth_xdp_tx_bq bq;
int done;
+ bq.count = 0;
+
xdp_set_return_frame_no_direct();
- done = veth_xdp_rcv(rq, budget, &xdp_xmit);
+ done = veth_xdp_rcv(rq, budget, &xdp_xmit, &bq);
if (done < budget && napi_complete_done(napi, done)) {
/* Write rx_notify_masked before reading ptr_ring */
@@ -751,7 +788,7 @@ static int veth_poll(struct napi_struct *napi, int budget)
}
if (xdp_xmit & VETH_XDP_TX)
- veth_xdp_flush(rq->dev);
+ veth_xdp_flush(rq->dev, &bq);
if (xdp_xmit & VETH_XDP_REDIR)
xdp_do_flush_map();
xdp_clear_return_frame_no_direct();
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0d4115c9e20b..4f3de0ac8b0b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -26,7 +26,7 @@
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
-static bool csum = true, gso = true, napi_tx;
+static bool csum = true, gso = true, napi_tx = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
module_param(napi_tx, bool, 0644);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 89984fcab01e..3f48f05dd2a6 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3247,6 +3247,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
.ndo_start_xmit = vmxnet3_xmit_frame,
.ndo_set_mac_address = vmxnet3_set_mac_addr,
.ndo_change_mtu = vmxnet3_change_mtu,
+ .ndo_fix_features = vmxnet3_fix_features,
.ndo_set_features = vmxnet3_set_features,
.ndo_get_stats64 = vmxnet3_get_stats64,
.ndo_tx_timeout = vmxnet3_tx_timeout,
@@ -3651,13 +3652,19 @@ vmxnet3_suspend(struct device *device)
}
if (adapter->wol & WAKE_ARP) {
- in_dev = in_dev_get(netdev);
- if (!in_dev)
+ rcu_read_lock();
+
+ in_dev = __in_dev_get_rcu(netdev);
+ if (!in_dev) {
+ rcu_read_unlock();
goto skip_arp;
+ }
- ifa = (struct in_ifaddr *)in_dev->ifa_list;
- if (!ifa)
+ ifa = rcu_dereference(in_dev->ifa_list);
+ if (!ifa) {
+ rcu_read_unlock();
goto skip_arp;
+ }
pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
sizeof(struct arphdr) + /* ARP header */
@@ -3677,7 +3684,9 @@ vmxnet3_suspend(struct device *device)
/* The Unicast IPv4 address in 'tip' field. */
arpreq += 2 * ETH_ALEN + sizeof(u32);
- *(u32 *)arpreq = ifa->ifa_address;
+ *(__be32 *)arpreq = ifa->ifa_address;
+
+ rcu_read_unlock();
/* The mask for the relevant bits. */
pmConf->filters[i].mask[0] = 0x00;
@@ -3686,7 +3695,6 @@ vmxnet3_suspend(struct device *device)
pmConf->filters[i].mask[3] = 0x00;
pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
- in_dev_put(in_dev);
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 559db051a500..0a38c76688ab 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -257,6 +257,16 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
}
}
+netdev_features_t vmxnet3_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index a2c554f8a61b..1cc1cd4aaa59 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,12 +69,12 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.17.0-k"
/* Each byte of this 32-bit integer encodes a version number in
* VMXNET3_DRIVER_VERSION_STRING.
*/
-#define VMXNET3_DRIVER_VERSION_NUM 0x01041000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01041100
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -454,6 +454,9 @@ vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
+netdev_features_t
+vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features);
+
int
vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 311b0cc6eb98..54edf8956a25 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1072,12 +1072,14 @@ static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
#if IS_ENABLED(CONFIG_IPV6)
/* send to link-local or multicast address via interface enslaved to
* VRF device. Force lookup to VRF table without changing flow struct
+ * Note: Caller to this function must hold rcu_read_lock() and no refcnt
+ * is taken on the dst by this function.
*/
static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
struct flowi6 *fl6)
{
struct net *net = dev_net(dev);
- int flags = RT6_LOOKUP_F_IFACE;
+ int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_DST_NOREF;
struct dst_entry *dst = NULL;
struct rt6_info *rt;
@@ -1087,7 +1089,6 @@ static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
*/
if (fl6->flowi6_oif == dev->ifindex) {
dst = &net->ipv6.ip6_null_entry->dst;
- dst_hold(dst);
return dst;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 083f3f0bf37f..3d9bcc957f7d 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -468,14 +468,19 @@ static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
}
+static u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
+{
+ if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
+ return eth_vni_hash(mac, vni);
+ else
+ return eth_hash(mac);
+}
+
/* Hash chain to use given mac address */
static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
const u8 *mac, __be32 vni)
{
- if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
- return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
- else
- return &vxlan->fdb_head[eth_hash(mac)];
+ return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)];
}
/* Look up Ethernet address in forwarding table */
@@ -590,8 +595,8 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
return -EINVAL;
vxlan = netdev_priv(dev);
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) {
if (f->vni == vni) {
list_for_each_entry(rdst, &f->remotes, list) {
@@ -599,14 +604,16 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
f, rdst,
extack);
if (rc)
- goto out;
+ goto unlock;
}
}
}
+ spin_unlock_bh(&vxlan->hash_lock[h]);
}
+ return 0;
-out:
- spin_unlock_bh(&vxlan->hash_lock);
+unlock:
+ spin_unlock_bh(&vxlan->hash_lock[h]);
return rc;
}
EXPORT_SYMBOL_GPL(vxlan_fdb_replay);
@@ -622,14 +629,15 @@ void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
return;
vxlan = netdev_priv(dev);
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist)
if (f->vni == vni)
list_for_each_entry(rdst, &f->remotes, list)
rdst->offloaded = false;
+ spin_unlock_bh(&vxlan->hash_lock[h]);
}
- spin_unlock_bh(&vxlan->hash_lock);
+
}
EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload);
@@ -804,6 +812,14 @@ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
return f;
}
+static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
+ __be32 src_vni, struct vxlan_fdb *f)
+{
+ ++vxlan->addrcnt;
+ hlist_add_head_rcu(&f->hlist,
+ vxlan_fdb_head(vxlan, mac, src_vni));
+}
+
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __be16 port, __be32 src_vni,
@@ -829,18 +845,13 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return rc;
}
- ++vxlan->addrcnt;
- hlist_add_head_rcu(&f->hlist,
- vxlan_fdb_head(vxlan, mac, src_vni));
-
*fdb = f;
return 0;
}
-static void vxlan_fdb_free(struct rcu_head *head)
+static void __vxlan_fdb_free(struct vxlan_fdb *f)
{
- struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
struct vxlan_rdst *rd, *nd;
list_for_each_entry_safe(rd, nd, &f->remotes, list) {
@@ -850,6 +861,13 @@ static void vxlan_fdb_free(struct rcu_head *head)
kfree(f);
}
+static void vxlan_fdb_free(struct rcu_head *head)
+{
+ struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
+
+ __vxlan_fdb_free(f);
+}
+
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
bool do_notify, bool swdev_notify)
{
@@ -977,6 +995,7 @@ static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
if (rc < 0)
return rc;
+ vxlan_fdb_insert(vxlan, mac, src_vni, f);
rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
swdev_notify, extack);
if (rc)
@@ -1105,6 +1124,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
__be16 port;
__be32 src_vni, vni;
u32 ifindex;
+ u32 hash_index;
int err;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@ -1123,12 +1143,13 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
return -EAFNOSUPPORT;
- spin_lock_bh(&vxlan->hash_lock);
+ hash_index = fdb_head_index(vxlan, addr, src_vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex,
ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
true, extack);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
@@ -1176,16 +1197,18 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
__be32 src_vni, vni;
__be16 port;
u32 ifindex;
+ u32 hash_index;
int err;
err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
if (err)
return err;
- spin_lock_bh(&vxlan->hash_lock);
+ hash_index = fdb_head_index(vxlan, addr, src_vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
@@ -1297,8 +1320,10 @@ static bool vxlan_snoop(struct net_device *dev,
f->updated = jiffies;
vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
+ u32 hash_index = fdb_head_index(vxlan, src_mac, vni);
+
/* learned new entry */
- spin_lock(&vxlan->hash_lock);
+ spin_lock(&vxlan->hash_lock[hash_index]);
/* close off race between vxlan_flush and incoming packets */
if (netif_running(dev))
@@ -1309,7 +1334,7 @@ static bool vxlan_snoop(struct net_device *dev,
vni,
vxlan->default_dst.remote_vni,
ifindex, NTF_SELF, true, NULL);
- spin_unlock(&vxlan->hash_lock);
+ spin_unlock(&vxlan->hash_lock[hash_index]);
}
return false;
@@ -2219,7 +2244,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
fl4.fl4_sport = sport;
rt = ip_route_output_key(vxlan->net, &fl4);
- if (likely(!IS_ERR(rt))) {
+ if (!IS_ERR(rt)) {
if (rt->dst.dev == dev) {
netdev_dbg(dev, "circular route to %pI4\n", &daddr);
ip_rt_put(rt);
@@ -2699,7 +2724,7 @@ static void vxlan_cleanup(struct timer_list *t)
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
- spin_lock(&vxlan->hash_lock);
+ spin_lock(&vxlan->hash_lock[h]);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
@@ -2721,7 +2746,7 @@ static void vxlan_cleanup(struct timer_list *t)
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
- spin_unlock(&vxlan->hash_lock);
+ spin_unlock(&vxlan->hash_lock[h]);
}
mod_timer(&vxlan->age_timer, next_timer);
@@ -2764,12 +2789,13 @@ static int vxlan_init(struct net_device *dev)
static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
{
struct vxlan_fdb *f;
+ u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
- spin_lock_bh(&vxlan->hash_lock);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f)
vxlan_fdb_destroy(vxlan, f, true, true);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
static void vxlan_uninit(struct net_device *dev)
@@ -2814,9 +2840,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
{
unsigned int h;
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
+
+ spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
@@ -2826,8 +2853,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
if (!is_zero_ether_addr(f->eth_addr))
vxlan_fdb_destroy(vxlan, f, true, true);
}
+ spin_unlock_bh(&vxlan->hash_lock[h]);
}
- spin_unlock_bh(&vxlan->hash_lock);
}
/* Cleanup timer and forwarding table on shutdown */
@@ -3011,7 +3038,6 @@ static void vxlan_setup(struct net_device *dev)
dev->max_mtu = ETH_MAX_MTU;
INIT_LIST_HEAD(&vxlan->next);
- spin_lock_init(&vxlan->hash_lock);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
@@ -3019,8 +3045,10 @@ static void vxlan_setup(struct net_device *dev)
gro_cells_init(&vxlan->gro_cells, dev);
- for (h = 0; h < FDB_HASH_SIZE; ++h)
+ for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ spin_lock_init(&vxlan->hash_lock[h]);
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
+ }
}
static void vxlan_ether_setup(struct net_device *dev)
@@ -3571,12 +3599,17 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
if (err)
goto errout;
- /* notify default fdb entry */
if (f) {
+ vxlan_fdb_insert(vxlan, all_zeros_mac,
+ vxlan->default_dst.remote_vni, f);
+
+ /* notify default fdb entry */
err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
RTM_NEWNEIGH, true, extack);
- if (err)
- goto errout;
+ if (err) {
+ vxlan_fdb_destroy(vxlan, f, false, false);
+ goto unregister;
+ }
}
list_add(&vxlan->next, &vn->vxlan_list);
@@ -3588,7 +3621,8 @@ errout:
* destroy the entry by hand here.
*/
if (f)
- vxlan_fdb_destroy(vxlan, f, false, false);
+ __vxlan_fdb_free(f);
+unregister:
if (unregister)
unregister_netdevice(dev);
return err;
@@ -3914,7 +3948,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
/* handle default dst entry */
if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
- spin_lock_bh(&vxlan->hash_lock);
+ u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
+
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
if (!vxlan_addr_any(&conf.remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
&conf.remote_ip,
@@ -3925,7 +3961,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
conf.remote_ifindex,
NTF_SELF, true, extack);
if (err) {
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
}
@@ -3937,7 +3973,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
dst->remote_vni,
dst->remote_ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
if (conf.age_interval != vxlan->cfg.age_interval)
@@ -4192,8 +4228,11 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
+ u32 hash_index;
+
+ hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
- spin_lock_bh(&vxlan->hash_lock);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
@@ -4209,7 +4248,7 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
rdst->offloaded = fdb_info->offloaded;
out:
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
static int
@@ -4218,11 +4257,13 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct netlink_ext_ack *extack;
+ u32 hash_index;
int err;
+ hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
extack = switchdev_notifier_info_to_extack(&fdb_info->info);
- spin_lock_bh(&vxlan->hash_lock);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
NUD_REACHABLE,
NLM_F_CREATE | NLM_F_REPLACE,
@@ -4232,7 +4273,7 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
fdb_info->remote_ifindex,
NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
false, extack);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
@@ -4243,9 +4284,11 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
+ u32 hash_index;
int err = 0;
- spin_lock_bh(&vxlan->hash_lock);
+ hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
@@ -4259,7 +4302,7 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
fdb_info->remote_ifindex,
false);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 61d8f6389c64..a030f5aa6b95 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -193,16 +193,15 @@ static int cisco_rx(struct sk_buff *skb)
mask = ~cpu_to_be32(0); /* is the mask correct? */
if (in_dev != NULL) {
- struct in_ifaddr **ifap = &in_dev->ifa_list;
+ const struct in_ifaddr *ifa;
- while (*ifap != NULL) {
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
if (strcmp(dev->name,
- (*ifap)->ifa_label) == 0) {
- addr = (*ifap)->ifa_local;
- mask = (*ifap)->ifa_mask;
+ ifa->ifa_label) == 0) {
+ addr = ifa->ifa_local;
+ mask = ifa->ifa_mask;
break;
}
- ifap = &(*ifap)->ifa_next;
}
cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d78bc838d631..914be5847386 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -602,8 +602,8 @@ static void x25_asy_close_tty(struct tty_struct *tty)
err = lapb_unregister(sl->dev);
if (err != LAPB_OK)
- pr_err("x25_asy_close: lapb_unregister error: %d\n",
- err);
+ pr_err("%s: lapb_unregister error: %d\n",
+ __func__, err);
tty->disc_data = NULL;
sl->tty = NULL;
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index af2049e99188..d98d6ac90f3d 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config ATH_COMMON
tristate
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index e4e460b5498e..ee2b2431e5a3 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_ATH5K) += ath5k/
obj-$(CONFIG_ATH9K_HW) += ath9k/
obj-$(CONFIG_CARL9170) += carl9170/
diff --git a/drivers/net/wireless/ath/ar5523/Kconfig b/drivers/net/wireless/ath/ar5523/Kconfig
index 75fc66983da5..41d3c9a48b08 100644
--- a/drivers/net/wireless/ath/ar5523/Kconfig
+++ b/drivers/net/wireless/ath/ar5523/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config AR5523
tristate "Atheros AR5523 wireless driver support"
depends on MAC80211 && USB
diff --git a/drivers/net/wireless/ath/ar5523/Makefile b/drivers/net/wireless/ath/ar5523/Makefile
index 84fc88aa109e..34efa5772096 100644
--- a/drivers/net/wireless/ath/ar5523/Makefile
+++ b/drivers/net/wireless/ath/ar5523/Makefile
@@ -1,2 +1,2 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_AR5523) := ar5523.o
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 3522f251fa7f..6b3ff02a373d 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config ATH10K
tristate "Atheros 802.11ac wireless cards support"
depends on MAC80211 && HAS_DMA
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index 0bf726c55736..f80854180e21 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -740,7 +740,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
enum ath10k_hw_rev hw_rev;
size_t size;
int ret;
- struct ath10k_bus_params bus_params;
+ struct ath10k_bus_params bus_params = {};
of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev);
if (!of_id) {
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index aff585658fc0..dc45d16e8d21 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
@@ -26,10 +26,13 @@
#include "coredump.h"
unsigned int ath10k_debug_mask;
+EXPORT_SYMBOL(ath10k_debug_mask);
+
static unsigned int ath10k_cryptmode_param;
static bool uart_print;
static bool skip_otp;
static bool rawmode;
+static bool fw_diag_log;
unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) |
BIT(ATH10K_FW_CRASH_DUMP_CE_DATA);
@@ -40,6 +43,7 @@ module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
module_param(uart_print, bool, 0644);
module_param(skip_otp, bool, 0644);
module_param(rawmode, bool, 0644);
+module_param(fw_diag_log, bool, 0644);
module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
@@ -48,6 +52,7 @@ MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
+MODULE_PARM_DESC(fw_diag_log, "Diag based fw log debugging");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
@@ -83,6 +88,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = true,
},
{
.id = QCA988X_HW_2_0_VERSION,
@@ -117,6 +123,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = true,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -152,6 +159,35 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = false,
+ },
+ {
+ .id = QCA6174_HW_3_2_VERSION,
+ .dev_id = QCA6174_3_2_DEVICE_ID,
+ .bus = ATH10K_BUS_SDIO,
+ .name = "qca6174 hw3.2 sdio",
+ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+ .uart_pin = 19,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 0,
+ .fw = {
+ .dir = QCA6174_HW_3_0_FW_DIR,
+ .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca6174_sdio_ops,
+ .hw_clk = qca6174_clk,
+ .target_cpu_freq = 176000000,
+ .decap_align_bytes = 4,
+ .n_cipher_suites = 8,
+ .num_peers = 10,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .uart_pin_workaround = true,
+ .tx_stats_over_pktlog = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -186,6 +222,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -220,6 +257,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = false,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -254,6 +292,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -291,6 +330,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = true,
+ .tx_stats_over_pktlog = false,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -331,6 +371,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = false,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -378,6 +419,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = false,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -422,6 +464,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = false,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -456,6 +499,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -492,6 +536,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = true,
+ .tx_stats_over_pktlog = false,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -533,6 +578,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = false,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
@@ -560,6 +606,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rri_on_ddr = true,
.hw_filter_reset_required = false,
.fw_diag_ce_download = false,
+ .tx_stats_over_pktlog = false,
},
};
@@ -585,6 +632,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
[ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
[ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL] = "single-chan-info-per-channel",
+ [ATH10K_FW_FEATURE_PEER_FIXED_RATE] = "peer-fixed-rate",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -629,7 +677,7 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
complete(&ar->target_suspend);
}
-static void ath10k_init_sdio(struct ath10k *ar)
+static void ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
{
u32 param = 0;
@@ -646,7 +694,12 @@ static void ath10k_init_sdio(struct ath10k *ar)
* not big enough for mac80211 / native wifi frames. disable it
*/
param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
- param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
+
+ if (mode == ATH10K_FIRMWARE_MODE_UTF)
+ param &= ~HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
+ else
+ param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
+
ath10k_bmi_write32(ar, hi_acs_flags, param);
/* Explicitly set fwlog prints to zero as target may turn it on
@@ -2065,8 +2118,16 @@ static int ath10k_init_uart(struct ath10k *ar)
return ret;
}
- if (!uart_print)
+ if (!uart_print && ar->hw_params.uart_pin_workaround) {
+ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
+ ar->hw_params.uart_pin);
+ if (ret) {
+ ath10k_warn(ar, "failed to set UART TX pin: %d", ret);
+ return ret;
+ }
+
return 0;
+ }
ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin);
if (ret) {
@@ -2139,6 +2200,7 @@ static void ath10k_core_restart(struct work_struct *work)
complete(&ar->offchan_tx_completed);
complete(&ar->install_key_done);
complete(&ar->vdev_setup_done);
+ complete(&ar->vdev_delete_done);
complete(&ar->thermal.wmi_sync);
complete(&ar->bss_survey_done);
wake_up(&ar->htt.empty_tx_wq);
@@ -2501,7 +2563,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err;
if (ar->hif.bus == ATH10K_BUS_SDIO)
- ath10k_init_sdio(ar);
+ ath10k_init_sdio(ar, mode);
}
ar->htc.htc_ops.target_send_suspend_complete =
@@ -2720,6 +2782,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (status)
goto err_hif_stop;
+ status = ath10k_hif_set_target_log_mode(ar, fw_diag_log);
+ if (status && status != -EOPNOTSUPP) {
+ ath10k_warn(ar, "set traget log mode faileds: %d\n", status);
+ goto err_hif_stop;
+ }
+
return 0;
err_hif_stop:
@@ -3105,8 +3173,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->vdev_delete_done);
init_completion(&ar->thermal.wmi_sync);
init_completion(&ar->bss_survey_done);
+ init_completion(&ar->peer_delete_done);
INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index e35aae5146f1..4d7db07db6ba 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _CORE_H_
@@ -196,7 +196,7 @@ struct ath10k_fw_extd_stats_peer {
struct list_head list;
u8 peer_macaddr[ETH_ALEN];
- u32 rx_duration;
+ u64 rx_duration;
};
struct ath10k_fw_stats_vdev {
@@ -400,6 +400,14 @@ struct ath10k_peer {
/* protected by ar->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+ union htt_rx_pn_t tids_last_pn[ATH10K_TXRX_NUM_EXT_TIDS];
+ bool tids_last_pn_valid[ATH10K_TXRX_NUM_EXT_TIDS];
+ union htt_rx_pn_t frag_tids_last_pn[ATH10K_TXRX_NUM_EXT_TIDS];
+ u32 frag_tids_seq[ATH10K_TXRX_NUM_EXT_TIDS];
+ struct {
+ enum htt_security_types sec_type;
+ int pn_len;
+ } rx_pn[ATH10K_HTT_TXRX_PEER_SECURITY_MAX];
};
struct ath10k_txq {
@@ -506,7 +514,8 @@ struct ath10k_sta {
u32 peer_ps_state;
};
-#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ)
enum ath10k_beacon_state {
ATH10K_BEACON_SCHEDULED = 0,
@@ -571,6 +580,10 @@ struct ath10k_vif {
struct work_struct ap_csa_work;
struct delayed_work connection_loss_work;
struct cfg80211_bitrate_mask bitrate_mask;
+
+ /* For setting VHT peer fixed rate, protected by conf_mutex */
+ int vht_num_rates;
+ u8 vht_pfr;
};
struct ath10k_vif_iter {
@@ -614,6 +627,7 @@ struct ath10k_debug {
bool fw_stats_done;
unsigned long htt_stats_mask;
+ unsigned long reset_htt_stats;
struct delayed_work htt_stats_dwork;
struct ath10k_dfs_stats dfs_stats;
struct ath_dfs_pool_stats dfs_pool_stats;
@@ -631,6 +645,7 @@ struct ath10k_debug {
u32 nf_cal_period;
void *cal_data;
u32 enable_extd_tx_stats;
+ u8 fw_dbglog_mode;
};
enum ath10k_state {
@@ -761,6 +776,9 @@ enum ath10k_fw_features {
/* Firmware sends only one chan_info event per channel */
ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL = 20,
+ /* Firmware allows setting peer fixed rate */
+ ATH10K_FW_FEATURE_PEER_FIXED_RATE = 21,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -919,6 +937,7 @@ struct ath10k_bus_params {
u32 chip_id;
enum ath10k_dev_type dev_type;
bool link_can_suspend;
+ bool hl_msdu_ids;
};
struct ath10k {
@@ -1055,6 +1074,7 @@ struct ath10k {
int last_wmi_vdev_start_status;
struct completion vdev_setup_done;
+ struct completion vdev_delete_done;
struct workqueue_struct *workqueue;
/* Auxiliary workqueue */
@@ -1189,6 +1209,7 @@ struct ath10k {
struct ath10k_radar_found_info last_radar_info;
struct work_struct radar_confirmation_work;
struct ath10k_bus_params bus_param;
+ struct completion peer_delete_done;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index 45a355fb62b9..b6d2932383cf 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -1192,8 +1192,8 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) {
dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
- dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
- CE_COUNT * sizeof(ce_hdr->entries[0]));
+ dump_tlv->tlv_len = cpu_to_le32(struct_size(ce_hdr, entries,
+ CE_COUNT));
ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 32d967a31c65..bd2b5628f850 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -305,6 +305,9 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
if (is_end)
ar->debug.fw_stats_done = true;
+ if (stats.extended)
+ ar->debug.fw_stats.extended = true;
+
is_started = !list_empty(&ar->debug.fw_stats.pdevs);
if (is_started && !is_end) {
@@ -873,7 +876,7 @@ static int ath10k_debug_htt_stats_req(struct ath10k *ar)
cookie = get_jiffies_64();
ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
- cookie);
+ ar->debug.reset_htt_stats, cookie);
if (ret) {
ath10k_warn(ar, "failed to send htt stats request: %d\n", ret);
return ret;
@@ -922,8 +925,8 @@ static ssize_t ath10k_write_htt_stats_mask(struct file *file,
if (ret)
return ret;
- /* max 8 bit masks (for now) */
- if (mask > 0xff)
+ /* max 17 bit masks (for now) */
+ if (mask > HTT_STATS_BIT_MASK)
return -E2BIG;
mutex_lock(&ar->conf_mutex);
@@ -2469,6 +2472,44 @@ static const struct file_operations fops_ps_state_enable = {
.llseek = default_llseek,
};
+static ssize_t ath10k_write_reset_htt_stats(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long reset;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &reset);
+ if (ret)
+ return ret;
+
+ if (reset == 0 || reset > 0x1ffff)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.reset_htt_stats = reset;
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ goto out;
+
+ ar->debug.reset_htt_stats = 0;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_reset_htt_stats = {
+ .write = ath10k_write_reset_htt_stats,
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
@@ -2609,6 +2650,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar,
&fops_ps_state_enable);
+ debugfs_create_file("reset_htt_stats", 0200, ar->debug.debugfs_phy, ar,
+ &fops_reset_htt_stats);
+
return 0;
}
@@ -2620,8 +2664,8 @@ void ath10k_debug_unregister(struct ath10k *ar)
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
-void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask,
- const char *fmt, ...)
+void __ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask,
+ const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -2638,7 +2682,7 @@ void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask,
va_end(args);
}
-EXPORT_SYMBOL(ath10k_dbg);
+EXPORT_SYMBOL(__ath10k_dbg);
void ath10k_dbg_dump(struct ath10k *ar,
enum ath10k_debug_mask mask,
@@ -2651,7 +2695,7 @@ void ath10k_dbg_dump(struct ath10k *ar,
if (ath10k_debug_mask & mask) {
if (msg)
- ath10k_dbg(ar, mask, "%s\n", msg);
+ __ath10k_dbg(ar, mask, "%s\n", msg);
for (ptr = buf; (ptr - buf) < len; ptr += 16) {
linebuflen = 0;
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index db78e855a80f..82f7eb8583d9 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -71,6 +71,9 @@ struct ath10k_pktlog_hdr {
/* FIXME: How to calculate the buffer size sanely? */
#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
+#define ATH10K_TX_POWER_MAX_VAL 70
+#define ATH10K_TX_POWER_MIN_VAL 0
+
extern unsigned int ath10k_debug_mask;
__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
@@ -240,18 +243,18 @@ void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar,
#endif /* CONFIG_MAC80211_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
-__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
- enum ath10k_debug_mask mask,
- const char *fmt, ...);
+__printf(3, 4) void __ath10k_dbg(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *fmt, ...);
void ath10k_dbg_dump(struct ath10k *ar,
enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len);
#else /* CONFIG_ATH10K_DEBUG */
-static inline int ath10k_dbg(struct ath10k *ar,
- enum ath10k_debug_mask dbg_mask,
- const char *fmt, ...)
+static inline int __ath10k_dbg(struct ath10k *ar,
+ enum ath10k_debug_mask dbg_mask,
+ const char *fmt, ...)
{
return 0;
}
@@ -263,4 +266,14 @@ static inline void ath10k_dbg_dump(struct ath10k *ar,
{
}
#endif /* CONFIG_ATH10K_DEBUG */
+
+/* Avoid calling __ath10k_dbg() if debug_mask is not set and tracing
+ * disabled.
+ */
+#define ath10k_dbg(ar, dbg_mask, fmt, ...) \
+do { \
+ if ((ath10k_debug_mask & dbg_mask) || \
+ trace_ath10k_log_dbg_enabled()) \
+ __ath10k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \
+} while (0)
#endif /* _DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index c704ae371c4d..42931a669b02 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -663,6 +663,13 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
mutex_lock(&ar->conf_mutex);
+ if (!arsta->tx_stats) {
+ ath10k_warn(ar, "failed to get tx stats");
+ mutex_unlock(&ar->conf_mutex);
+ kfree(buf);
+ return 0;
+ }
+
spin_lock_bh(&ar->data_lock);
for (k = 0; k < ATH10K_STATS_TYPE_MAX; k++) {
for (j = 0; j < ATH10K_COUNTER_TYPE_MAX; j++) {
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index fe5417962f40..496ee34a4d78 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -12,6 +12,12 @@
#include "bmi.h"
#include "debug.h"
+/* Types of fw logging mode */
+enum ath_dbg_mode {
+ ATH10K_ENABLE_FW_LOG_DIAG,
+ ATH10K_ENABLE_FW_LOG_CE,
+};
+
struct ath10k_hif_sg_item {
u16 transfer_id;
void *transfer_context; /* NULL = tx completion callback not called */
@@ -88,6 +94,7 @@ struct ath10k_hif_ops {
int (*get_target_info)(struct ath10k *ar,
struct bmi_target_info *target_info);
+ int (*set_target_log_mode)(struct ath10k *ar, u8 fw_log_mode);
};
static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -230,4 +237,12 @@ static inline int ath10k_hif_get_target_info(struct ath10k *ar,
return ar->hif.ops->get_target_info(ar, tgt_info);
}
+static inline int ath10k_hif_set_target_log_mode(struct ath10k *ar,
+ u8 fw_log_mode)
+{
+ if (!ar->hif.ops->set_target_log_mode)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->set_target_log_mode(ar, fw_log_mode);
+}
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 805a7f8a04f2..1d4d1a1992fe 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -73,6 +73,7 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct ath10k_htc_hdr *hdr;
hdr = (struct ath10k_htc_hdr *)skb->data;
+ memset(hdr, 0, sizeof(struct ath10k_htc_hdr));
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index d235ff3098e8..7b75200ceae5 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -257,7 +257,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
return status;
}
- status = htt->tx_ops->htt_h2t_aggr_cfg_msg(htt,
+ status = ath10k_htt_h2t_aggr_cfg_msg(htt,
htt->max_num_ampdu,
htt->max_num_amsdu);
if (status) {
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 4cee5492abc8..30c080094af1 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -315,6 +315,7 @@ struct htt_stats_req {
} __packed;
#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
+#define HTT_STATS_BIT_MASK GENMASK(16, 0)
/*
* htt_oob_sync_req - request out-of-band sync
@@ -733,6 +734,20 @@ struct htt_rx_indication_hl {
struct htt_rx_indication_mpdu_range mpdu_ranges[0];
} __packed;
+struct htt_hl_rx_desc {
+ __le32 info;
+ __le32 pn_31_0;
+ union {
+ struct {
+ __le16 pn_47_32;
+ __le16 pn_63_48;
+ } pn16;
+ __le32 pn_63_32;
+ } u0;
+ __le32 pn_95_64;
+ __le32 pn_127_96;
+} __packed;
+
static inline struct htt_rx_indication_mpdu_range *
htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
{
@@ -790,6 +805,21 @@ struct htt_rx_peer_unmap {
__le16 peer_id;
} __packed;
+enum htt_txrx_sec_cast_type {
+ HTT_TXRX_SEC_MCAST = 0,
+ HTT_TXRX_SEC_UCAST
+};
+
+enum htt_rx_pn_check_type {
+ HTT_RX_NON_PN_CHECK = 0,
+ HTT_RX_PN_CHECK
+};
+
+enum htt_rx_tkip_demic_type {
+ HTT_RX_NON_TKIP_MIC = 0,
+ HTT_RX_TKIP_MIC
+};
+
enum htt_security_types {
HTT_SECURITY_NONE,
HTT_SECURITY_WEP128,
@@ -803,6 +833,9 @@ enum htt_security_types {
HTT_NUM_SECURITY_TYPES /* keep this last! */
};
+#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
+#define ATH10K_TXRX_NUM_EXT_TIDS 19
+
enum htt_security_flags {
#define HTT_SECURITY_TYPE_MASK 0x7F
#define HTT_SECURITY_TYPE_LSB 0
@@ -1010,6 +1043,11 @@ struct htt_rx_fragment_indication {
u8 fw_msdu_rx_desc[0];
} __packed;
+#define ATH10K_IEEE80211_EXTIV BIT(5)
+#define ATH10K_IEEE80211_TKIP_MICLEN 8 /* trailing MIC */
+
+#define HTT_RX_FRAG_IND_INFO0_HEADER_LEN 16
+
#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F
#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0
#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
@@ -2048,6 +2086,19 @@ static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt)
htt->tx_ops->htt_free_txbuff(htt);
}
+static inline int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu)
+
+{
+ if (!htt->tx_ops->htt_h2t_aggr_cfg_msg)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_h2t_aggr_cfg_msg(htt,
+ max_subfrms_ampdu,
+ max_subfrms_amsdu);
+}
+
struct ath10k_htt_rx_ops {
size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
@@ -2055,6 +2106,9 @@ struct ath10k_htt_rx_ops {
int idx);
void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
+ bool (*htt_rx_proc_rx_frag_ind)(struct ath10k_htt *htt,
+ struct htt_rx_fragment_indication *rx,
+ struct sk_buff *skb);
};
static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt)
@@ -2094,6 +2148,16 @@ static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx)
htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
}
+static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt,
+ struct htt_rx_fragment_indication *rx,
+ struct sk_buff *skb)
+{
+ if (!htt->rx_ops->htt_rx_proc_rx_frag_ind)
+ return true;
+
+ return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
+}
+
#define RX_HTT_HDR_STATUS_LEN 64
/* This structure layout is programmed via rx ring setup
@@ -2128,10 +2192,8 @@ struct htt_rx_desc {
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12
#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13
-#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00008000
-#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 15
-#define HTT_RX_DESC_HL_INFO_FRAGMENT_MASK 0x00010000
-#define HTT_RX_DESC_HL_INFO_FRAGMENT_LSB 16
+#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00010000
+#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 16
#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000
#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17
@@ -2195,10 +2257,8 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
-int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
-int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
- u8 max_subfrms_ampdu,
- u8 max_subfrms_amsdu);
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
+ u64 cookie);
void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
__le32 token,
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 1acc622d2183..83a7fb68fd24 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2061,9 +2061,91 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return 0;
}
+static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
+ union htt_rx_pn_t *pn,
+ int pn_len_bits)
+{
+ switch (pn_len_bits) {
+ case 48:
+ pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
+ ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
+ break;
+ case 24:
+ pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
+ break;
+ };
+}
+
+static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
+ union htt_rx_pn_t *old_pn)
+{
+ return ((new_pn->pn48 & 0xffffffffffffULL) <=
+ (old_pn->pn48 & 0xffffffffffffULL));
+}
+
+static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
+ struct ath10k_peer *peer,
+ struct htt_rx_indication_hl *rx)
+{
+ bool last_pn_valid, pn_invalid = false;
+ enum htt_txrx_sec_cast_type sec_index;
+ enum htt_security_types sec_type;
+ union htt_rx_pn_t new_pn = {0};
+ struct htt_hl_rx_desc *rx_desc;
+ union htt_rx_pn_t *last_pn;
+ u32 rx_desc_info, tid;
+ int num_mpdu_ranges;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!peer)
+ return false;
+
+ if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
+ return false;
+
+ num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+
+ rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
+ rx_desc_info = __le32_to_cpu(rx_desc->info);
+
+ if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
+ return false;
+
+ tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
+ last_pn_valid = peer->tids_last_pn_valid[tid];
+ last_pn = &peer->tids_last_pn[tid];
+
+ if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
+ sec_index = HTT_TXRX_SEC_MCAST;
+ else
+ sec_index = HTT_TXRX_SEC_UCAST;
+
+ sec_type = peer->rx_pn[sec_index].sec_type;
+ ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
+
+ if (sec_type != HTT_SECURITY_AES_CCMP &&
+ sec_type != HTT_SECURITY_TKIP &&
+ sec_type != HTT_SECURITY_TKIP_NOMIC)
+ return false;
+
+ if (last_pn_valid)
+ pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
+ else
+ peer->tids_last_pn_valid[tid] = 1;
+
+ if (!pn_invalid)
+ last_pn->pn48 = new_pn.pn48;
+
+ return pn_invalid;
+}
+
static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
struct htt_rx_indication_hl *rx,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ enum htt_rx_pn_check_type check_pn_type,
+ enum htt_rx_tkip_demic_type tkip_mic_type)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
@@ -2076,13 +2158,14 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
int num_mpdu_ranges;
size_t tot_hdr_len;
struct ieee80211_channel *ch;
+ bool pn_invalid;
peer_id = __le16_to_cpu(rx->hdr.peer_id);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
spin_unlock_bh(&ar->data_lock);
- if (!peer)
+ if (!peer && peer_id != HTT_INVALID_PEERID)
ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
@@ -2101,12 +2184,22 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
num_mpdu_ranges);
if (mpdu_ranges->mpdu_range_status !=
- HTT_RX_IND_MPDU_STATUS_OK) {
+ HTT_RX_IND_MPDU_STATUS_OK &&
+ mpdu_ranges->mpdu_range_status !=
+ HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
ath10k_warn(ar, "MPDU range status: %d\n",
mpdu_ranges->mpdu_range_status);
goto err;
}
+ if (check_pn_type == HTT_RX_PN_CHECK) {
+ spin_lock_bh(&ar->data_lock);
+ pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
+ spin_unlock_bh(&ar->data_lock);
+ if (pn_invalid)
+ goto err;
+ }
+
/* Strip off all headers before the MAC header before delivery to
* mac80211
*/
@@ -2114,6 +2207,7 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
sizeof(rx->ppdu) + sizeof(rx->prefix) +
sizeof(rx->fw_desc) +
sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
+
skb_pull(skb, tot_hdr_len);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -2162,6 +2256,13 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
RX_FLAG_MMIC_STRIPPED;
}
+ if (tkip_mic_type == HTT_RX_TKIP_MIC)
+ rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
+ ~RX_FLAG_MMIC_STRIPPED;
+
+ if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
ieee80211_rx_ni(ar->hw, skb);
/* We have delivered the skb to the upper layers (mac80211) so we
@@ -2175,6 +2276,231 @@ err:
return true;
}
+static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
+ u16 head_len,
+ u16 hdr_len)
+{
+ u8 *ivp, *orig_hdr;
+
+ orig_hdr = skb->data;
+ ivp = orig_hdr + hdr_len + head_len;
+
+ /* the ExtIV bit is always set to 1 for TKIP */
+ if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
+ return -EINVAL;
+
+ memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
+ skb_pull(skb, IEEE80211_TKIP_IV_LEN);
+ skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
+ return 0;
+}
+
+static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
+ u16 head_len,
+ u16 hdr_len)
+{
+ u8 *ivp, *orig_hdr;
+
+ orig_hdr = skb->data;
+ ivp = orig_hdr + hdr_len + head_len;
+
+ /* the ExtIV bit is always set to 1 for TKIP */
+ if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
+ return -EINVAL;
+
+ memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
+ skb_pull(skb, IEEE80211_TKIP_IV_LEN);
+ skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
+ return 0;
+}
+
+static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
+ u16 head_len,
+ u16 hdr_len)
+{
+ u8 *ivp, *orig_hdr;
+
+ orig_hdr = skb->data;
+ ivp = orig_hdr + hdr_len + head_len;
+
+ /* the ExtIV bit is always set to 1 for CCMP */
+ if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
+ return -EINVAL;
+
+ skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
+ memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
+ skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
+ return 0;
+}
+
+static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
+ u16 head_len,
+ u16 hdr_len)
+{
+ u8 *orig_hdr;
+
+ orig_hdr = skb->data;
+
+ memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
+ orig_hdr, head_len + hdr_len);
+ skb_pull(skb, IEEE80211_WEP_IV_LEN);
+ skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
+ return 0;
+}
+
+static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
+ struct htt_rx_fragment_indication *rx,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = htt->ar;
+ enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
+ enum htt_txrx_sec_cast_type sec_index;
+ struct htt_rx_indication_hl *rx_hl;
+ enum htt_security_types sec_type;
+ u32 tid, frag, seq, rx_desc_info;
+ union htt_rx_pn_t new_pn = {0};
+ struct htt_hl_rx_desc *rx_desc;
+ u16 peer_id, sc, hdr_space;
+ union htt_rx_pn_t *last_pn;
+ struct ieee80211_hdr *hdr;
+ int ret, num_mpdu_ranges;
+ struct ath10k_peer *peer;
+ struct htt_resp *resp;
+ size_t tot_hdr_len;
+
+ resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
+ skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
+ skb_trim(skb, skb->len - FCS_LEN);
+
+ peer_id = __le16_to_cpu(rx->peer_id);
+ rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
+ goto err;
+ }
+
+ num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+
+ tot_hdr_len = sizeof(struct htt_resp_hdr) +
+ sizeof(rx_hl->hdr) +
+ sizeof(rx_hl->ppdu) +
+ sizeof(rx_hl->prefix) +
+ sizeof(rx_hl->fw_desc) +
+ sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
+
+ tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
+ rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
+ rx_desc_info = __le32_to_cpu(rx_desc->info);
+
+ if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
+ spin_unlock_bh(&ar->data_lock);
+ return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
+ HTT_RX_NON_PN_CHECK,
+ HTT_RX_NON_TKIP_MIC);
+ }
+
+ hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
+
+ if (ieee80211_has_retry(hdr->frame_control))
+ goto err;
+
+ hdr_space = ieee80211_hdrlen(hdr->frame_control);
+ sc = __le16_to_cpu(hdr->seq_ctrl);
+ seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
+ frag = sc & IEEE80211_SCTL_FRAG;
+
+ sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
+ HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
+ sec_type = peer->rx_pn[sec_index].sec_type;
+ ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
+
+ switch (sec_type) {
+ case HTT_SECURITY_TKIP:
+ tkip_mic = HTT_RX_TKIP_MIC;
+ ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
+ tot_hdr_len +
+ rx_hl->fw_desc.len,
+ hdr_space);
+ if (ret)
+ goto err;
+ break;
+ case HTT_SECURITY_TKIP_NOMIC:
+ ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
+ tot_hdr_len +
+ rx_hl->fw_desc.len,
+ hdr_space);
+ if (ret)
+ goto err;
+ break;
+ case HTT_SECURITY_AES_CCMP:
+ ret = ath10k_htt_rx_frag_ccmp_decap(skb,
+ tot_hdr_len + rx_hl->fw_desc.len,
+ hdr_space);
+ if (ret)
+ goto err;
+ break;
+ case HTT_SECURITY_WEP128:
+ case HTT_SECURITY_WEP104:
+ case HTT_SECURITY_WEP40:
+ ret = ath10k_htt_rx_frag_wep_decap(skb,
+ tot_hdr_len + rx_hl->fw_desc.len,
+ hdr_space);
+ if (ret)
+ goto err;
+ break;
+ default:
+ break;
+ }
+
+ resp = (struct htt_resp *)(skb->data);
+
+ if (sec_type != HTT_SECURITY_AES_CCMP &&
+ sec_type != HTT_SECURITY_TKIP &&
+ sec_type != HTT_SECURITY_TKIP_NOMIC) {
+ spin_unlock_bh(&ar->data_lock);
+ return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
+ HTT_RX_NON_PN_CHECK,
+ HTT_RX_NON_TKIP_MIC);
+ }
+
+ last_pn = &peer->frag_tids_last_pn[tid];
+
+ if (frag == 0) {
+ if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
+ goto err;
+
+ last_pn->pn48 = new_pn.pn48;
+ peer->frag_tids_seq[tid] = seq;
+ } else if (sec_type == HTT_SECURITY_AES_CCMP) {
+ if (seq != peer->frag_tids_seq[tid])
+ goto err;
+
+ if (new_pn.pn48 != last_pn->pn48 + 1)
+ goto err;
+
+ last_pn->pn48 = new_pn.pn48;
+ last_pn = &peer->tids_last_pn[tid];
+ last_pn->pn48 = new_pn.pn48;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
+ HTT_RX_NON_PN_CHECK, tkip_mic);
+
+err:
+ spin_unlock_bh(&ar->data_lock);
+
+ /* Tell the caller that it must free the skb since we have not
+ * consumed it
+ */
+ return true;
+}
+
static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
@@ -2193,9 +2519,7 @@ static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
- rx, sizeof(*rx) +
- (sizeof(struct htt_rx_indication_mpdu_range) *
- num_mpdu_ranges));
+ rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
for (i = 0; i < num_mpdu_ranges; i++)
mpdu_count += mpdu_ranges[i].mpdu_count;
@@ -2277,7 +2601,9 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these macro.
*/
- if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
tx_done.msdu_id, tx_done.status);
ath10k_txrx_tx_unref(htt, &tx_done);
@@ -2938,14 +3264,14 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
- if (txrate->flags == RATE_INFO_FLAGS_VHT_MCS) {
+ if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
- } else if (txrate->flags == RATE_INFO_FLAGS_MCS) {
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
@@ -2966,7 +3292,7 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
if (ATH10K_HW_AMPDU(pstats->flags)) {
tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
- if (txrate->flags == RATE_INFO_FLAGS_MCS) {
+ if (txrate->flags & RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
@@ -3265,6 +3591,51 @@ out:
rcu_read_unlock();
}
+static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
+{
+ switch (sec_type) {
+ case HTT_SECURITY_TKIP:
+ case HTT_SECURITY_TKIP_NOMIC:
+ case HTT_SECURITY_AES_CCMP:
+ return 48;
+ default:
+ return 0;
+ }
+}
+
+static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
+ struct htt_security_indication *ev)
+{
+ enum htt_txrx_sec_cast_type sec_index;
+ enum htt_security_types sec_type;
+ struct ath10k_peer *peer;
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
+ if (!peer) {
+ ath10k_warn(ar, "failed to find peer id %d for security indication",
+ __le16_to_cpu(ev->peer_id));
+ goto out;
+ }
+
+ sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
+
+ if (ev->flags & HTT_SECURITY_IS_UNICAST)
+ sec_index = HTT_TXRX_SEC_UCAST;
+ else
+ sec_index = HTT_TXRX_SEC_MCAST;
+
+ peer->rx_pn[sec_index].sec_type = sec_type;
+ peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
+
+ memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
+ memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+}
+
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@@ -3296,7 +3667,9 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
return ath10k_htt_rx_proc_rx_ind_hl(htt,
&resp->rx_ind_hl,
- skb);
+ skb,
+ HTT_RX_PN_CHECK,
+ HTT_RX_NON_TKIP_MIC);
else
ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
break;
@@ -3358,6 +3731,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
struct ath10k *ar = htt->ar;
struct htt_security_indication *ev = &resp->security_indication;
+ ath10k_htt_rx_sec_ind_handler(ar, ev);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"sec ind peer_id %d unicast %d type %d\n",
__le16_to_cpu(ev->peer_id),
@@ -3370,6 +3744,10 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
atomic_inc(&htt->num_mpdus_ready);
+
+ return ath10k_htt_rx_proc_rx_frag_ind(htt,
+ &resp->rx_frag_ind,
+ skb);
break;
}
case HTT_T2H_MSG_TYPE_TEST:
@@ -3583,6 +3961,7 @@ static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
};
static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
+ .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
};
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index d8e9cc0bb772..2ef717f18795 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -580,7 +580,8 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
return 0;
}
-int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
+ u64 cookie)
{
struct ath10k *ar = htt->ar;
struct htt_stats_req *req;
@@ -603,11 +604,11 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
memset(req, 0, sizeof(*req));
- /* currently we support only max 8 bit masks so no need to worry
+ /* currently we support only max 24 bit masks so no need to worry
* about endian support
*/
- req->upload_types[0] = mask;
- req->reset_types[0] = mask;
+ memcpy(req->upload_types, &mask, 3);
+ memcpy(req->reset_types, &reset_mask, 3);
req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
@@ -977,9 +978,9 @@ static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)
return 0;
}
-int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
- u8 max_subfrms_ampdu,
- u8 max_subfrms_amsdu)
+static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu)
{
struct ath10k *ar = htt->ar;
struct htt_aggr_conf *aggr_conf;
@@ -1244,6 +1245,7 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
u8 flags0 = 0;
u16 flags1 = 0;
+ u16 msdu_id = 0;
data_len = msdu->len;
@@ -1291,6 +1293,23 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
}
}
+ if (ar->bus_param.hl_msdu_ids) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ if (res < 0) {
+ ath10k_err(ar, "msdu_id allocation failed %d\n", res);
+ goto out;
+ }
+ msdu_id = res;
+ }
+
+ /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by
+ * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase
+ * reference by one to avoid a use-after-free case and a double
+ * free.
+ */
+ skb_get(msdu);
+
skb_push(msdu, sizeof(*cmd_hdr));
skb_push(msdu, sizeof(*tx_desc));
cmd_hdr = (struct htt_cmd_hdr *)msdu->data;
@@ -1300,7 +1319,7 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
tx_desc->flags0 = flags0;
tx_desc->flags1 = __cpu_to_le16(flags1);
tx_desc->len = __cpu_to_le16(data_len);
- tx_desc->id = 0;
+ tx_desc->id = __cpu_to_le16(msdu_id);
tx_desc->frags_paddr = 0; /* always zero */
/* Initialize peer_id to INVALID_PEER because this is NOT
* Reinjection path
@@ -1728,7 +1747,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
.htt_tx = ath10k_htt_tx_32,
.htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
- .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg,
+ .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
};
static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
@@ -1746,6 +1765,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,
.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
.htt_tx = ath10k_htt_tx_hl,
+ .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
};
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index ad082b7d7643..c415e971735b 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -158,7 +158,7 @@ const struct ath10k_hw_values qca6174_values = {
};
const struct ath10k_hw_values qca99x0_values = {
- .rtc_state_val_on = 5,
+ .rtc_state_val_on = 7,
.ce_count = 12,
.msi_assign_ce_max = 12,
.num_target_ce_config_wlan = 10,
@@ -1153,6 +1153,10 @@ const struct ath10k_hw_ops qca6174_ops = {
.is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
+const struct ath10k_hw_ops qca6174_sdio_ops = {
+ .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
+};
+
const struct ath10k_hw_ops wcn3990_ops = {
.tx_data_rssi_pad_bytes = ath10k_get_htt_tx_data_rssi_pad,
.is_rssi_enable = ath10k_htt_tx_rssi_enable_wcn3990,
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 71314999aa24..2ae57c1de7b5 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -24,6 +24,7 @@ enum ath10k_bus {
#define QCA988X_2_0_DEVICE_ID (0x003c)
#define QCA6164_2_1_DEVICE_ID (0x0041)
#define QCA6174_2_1_DEVICE_ID (0x003e)
+#define QCA6174_3_2_DEVICE_ID (0x0042)
#define QCA99X0_2_0_DEVICE_ID (0x0040)
#define QCA9888_2_0_DEVICE_ID (0x0056)
#define QCA9984_1_0_DEVICE_ID (0x0046)
@@ -151,6 +152,8 @@ enum qca9377_chip_id_rev {
#define ATH10K_FW_UTF_FILE "utf.bin"
#define ATH10K_FW_UTF_API2_FILE "utf-2.bin"
+#define ATH10K_FW_UTF_FILE_BASE "utf"
+
/* includes also the null byte */
#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
#define ATH10K_BOARD_MAGIC "QCA-ATH10K-BOARD"
@@ -606,6 +609,14 @@ struct ath10k_hw_params {
/* target supporting fw download via diag ce */
bool fw_diag_ce_download;
+
+ /* need to set uart pin if disable uart print, workaround for a
+ * firmware bug
+ */
+ bool uart_pin_workaround;
+
+ /* tx stats support over pktlog */
+ bool tx_stats_over_pktlog;
};
struct htt_rx_desc;
@@ -625,6 +636,7 @@ struct ath10k_hw_ops {
extern const struct ath10k_hw_ops qca988x_ops;
extern const struct ath10k_hw_ops qca99x0_ops;
extern const struct ath10k_hw_ops qca6174_ops;
+extern const struct ath10k_hw_ops qca6174_sdio_ops;
extern const struct ath10k_hw_ops wcn3990_ops;
extern const struct ath10k_hw_clk_params qca6174_clk[];
@@ -1095,6 +1107,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define MBOX_CPU_INT_STATUS_ENABLE_ADDRESS 0x00000819
#define MBOX_CPU_INT_STATUS_ENABLE_BIT_LSB 0
#define MBOX_CPU_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define MBOX_CPU_STATUS_ENABLE_ASSERT_MASK 0x00000001
#define MBOX_ERROR_STATUS_ENABLE_ADDRESS 0x0000081a
#define MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 1
#define MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00000002
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 9c703d287333..e43a566eef77 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -693,6 +693,26 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
*def = &conf->def;
}
+static void ath10k_wait_for_peer_delete_done(struct ath10k *ar, u32 vdev_id,
+ const u8 *addr)
+{
+ unsigned long time_left;
+ int ret;
+
+ if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
+ ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret) {
+ ath10k_warn(ar, "failed wait for peer deleted");
+ return;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->peer_delete_done,
+ 5 * HZ);
+ if (!time_left)
+ ath10k_warn(ar, "Timeout in receiving peer delete response\n");
+ }
+}
+
static int ath10k_peer_create(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -737,7 +757,7 @@ static int ath10k_peer_create(struct ath10k *ar,
spin_unlock_bh(&ar->data_lock);
ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
addr, vdev_id);
- ath10k_wmi_peer_delete(ar, vdev_id, addr);
+ ath10k_wait_for_peer_delete_done(ar, vdev_id, addr);
return -ENOENT;
}
@@ -819,6 +839,18 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
if (ret)
return ret;
+ if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout
+ (&ar->peer_delete_done, 5 * HZ);
+
+ if (!time_left) {
+ ath10k_warn(ar, "Timeout in receiving peer delete response\n");
+ return -ETIMEDOUT;
+ }
+ }
+
ar->num_peers--;
return 0;
@@ -1011,6 +1043,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
@@ -1060,6 +1093,7 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar)
ar->monitor_vdev_id, ret);
reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
@@ -1401,6 +1435,7 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
if (ret) {
@@ -1437,6 +1472,7 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
arg.vdev_id = arvif->vdev_id;
arg.dtim_period = arvif->dtim_period;
@@ -1630,6 +1666,10 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
+ /* For mesh, probe response and beacon share the same template */
+ if (ieee80211_vif_is_mesh(vif))
+ return 0;
+
prb = ieee80211_proberesp_get(hw, vif);
if (!prb) {
ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
@@ -5415,8 +5455,11 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
err_peer_delete:
if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
- arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+ ath10k_wait_for_peer_delete_done(ar, arvif->vdev_id,
+ vif->addr);
+ }
err_vdev_delete:
ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
@@ -5451,6 +5494,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_peer *peer;
+ unsigned long time_left;
int ret;
int i;
@@ -5481,6 +5525,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
arvif->vdev_id, ret);
+ ath10k_wait_for_peer_delete_done(ar, arvif->vdev_id,
+ vif->addr);
kfree(arvif->u.ap.noa_data);
}
@@ -5492,6 +5538,15 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
arvif->vdev_id, ret);
+ if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH10K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath10k_warn(ar, "Timeout in receiving vdev delete response\n");
+ goto out;
+ }
+ }
+
/* Some firmware revisions don't notify host about self-peer removal
* until after associated vdev is deleted.
*/
@@ -5542,6 +5597,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_mac_txq_unref(ar, vif->txq);
+out:
mutex_unlock(&ar->conf_mutex);
}
@@ -5588,8 +5644,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
u16 bitrate, hw_value;
- u8 rate, basic_rate_idx;
- int rateidx, ret = 0, hw_rate_code;
+ u8 rate, basic_rate_idx, rateidx;
+ int ret = 0, hw_rate_code, mcast_rate;
enum nl80211_band band;
const struct ieee80211_supported_band *sband;
@@ -5776,7 +5832,11 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_MCAST_RATE &&
!ath10k_mac_vif_chan(arvif->vif, &def)) {
band = def.chan->band;
- rateidx = vif->bss_conf.mcast_rate[band] - 1;
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+ if (mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
@@ -6350,6 +6410,41 @@ static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
ar->num_stations--;
}
+static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret = 0;
+ s16 txpwr;
+
+ if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ txpwr = 0;
+ } else {
+ txpwr = sta->txpwr.power;
+ if (!txpwr)
+ return -EINVAL;
+ }
+
+ if (txpwr > ATH10K_TX_POWER_MAX_VAL || txpwr < ATH10K_TX_POWER_MIN_VAL)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_USE_FIXED_PWR, txpwr);
+ if (ret) {
+ ath10k_warn(ar, "failed to set tx power for station ret: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -7099,18 +7194,23 @@ exit:
static bool
ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
enum nl80211_band band,
- const struct cfg80211_bitrate_mask *mask)
+ const struct cfg80211_bitrate_mask *mask,
+ int *vht_num_rates)
{
int num_rates = 0;
- int i;
+ int i, tmp;
num_rates += hweight32(mask->control[band].legacy);
for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
num_rates += hweight8(mask->control[band].ht_mcs[i]);
- for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
- num_rates += hweight16(mask->control[band].vht_mcs[i]);
+ *vht_num_rates = 0;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ tmp = hweight16(mask->control[band].vht_mcs[i]);
+ num_rates += tmp;
+ *vht_num_rates += tmp;
+ }
return num_rates == 1;
}
@@ -7168,7 +7268,7 @@ static int
ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
- u8 *rate, u8 *nss)
+ u8 *rate, u8 *nss, bool vht_only)
{
int rate_idx;
int i;
@@ -7176,6 +7276,9 @@ ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
u8 preamble;
u8 hw_rate;
+ if (vht_only)
+ goto next;
+
if (hweight32(mask->control[band].legacy) == 1) {
rate_idx = ffs(mask->control[band].legacy) - 1;
@@ -7209,6 +7312,7 @@ ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
}
}
+next:
for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
*nss = i + 1;
@@ -7270,7 +7374,8 @@ static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
static bool
ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
enum nl80211_band band,
- const struct cfg80211_bitrate_mask *mask)
+ const struct cfg80211_bitrate_mask *mask,
+ bool allow_pfr)
{
int i;
u16 vht_mcs;
@@ -7289,7 +7394,8 @@ ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
case BIT(10) - 1:
break;
default:
- ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
+ if (!allow_pfr)
+ ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
return false;
}
}
@@ -7297,6 +7403,26 @@ ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
return true;
}
+static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ int err;
+ u8 rate = arvif->vht_pfr;
+
+ /* skip non vht and multiple rate peers */
+ if (!sta->vht_cap.vht_supported || arvif->vht_num_rates != 1)
+ return false;
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_PARAM_FIXED_RATE, rate);
+ if (err)
+ ath10k_warn(ar, "failed to eanble STA %pM peer fixed rate: %d\n",
+ sta->addr, err);
+
+ return true;
+}
+
static void ath10k_mac_set_bitrate_mask_iter(void *data,
struct ieee80211_sta *sta)
{
@@ -7307,6 +7433,9 @@ static void ath10k_mac_set_bitrate_mask_iter(void *data,
if (arsta->arvif != arvif)
return;
+ if (ath10k_mac_set_vht_bitrate_mask_fixup(ar, arvif, sta))
+ return;
+
spin_lock_bh(&ar->data_lock);
arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
spin_unlock_bh(&ar->data_lock);
@@ -7314,6 +7443,26 @@ static void ath10k_mac_set_bitrate_mask_iter(void *data,
ieee80211_queue_work(ar->hw, &arsta->update_wk);
}
+static void ath10k_mac_clr_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_vif *arvif = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arvif->ar;
+ int err;
+
+ /* clear vht peers only */
+ if (arsta->arvif != arvif || !sta->vht_cap.vht_supported)
+ return;
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (err)
+ ath10k_warn(ar, "failed to clear STA %pM peer fixed rate: %d\n",
+ sta->addr, err);
+}
+
static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
@@ -7330,6 +7479,9 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
u8 ldpc;
int single_nss;
int ret;
+ int vht_num_rates, allow_pfr;
+ u8 vht_pfr;
+ bool update_bitrate_mask = true;
if (ath10k_mac_vif_chan(vif, &def))
return -EPERM;
@@ -7343,9 +7495,21 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
if (sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
- if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
+ allow_pfr = test_bit(ATH10K_FW_FEATURE_PEER_FIXED_RATE,
+ ar->normal_mode_fw.fw_file.fw_features);
+ if (allow_pfr) {
+ mutex_lock(&ar->conf_mutex);
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath10k_mac_clr_bitrate_mask_iter,
+ arvif);
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask,
+ &vht_num_rates)) {
ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
- &rate, &nss);
+ &rate, &nss,
+ false);
if (ret) {
ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
arvif->vdev_id, ret);
@@ -7361,12 +7525,30 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
max(ath10k_mac_max_ht_nss(ht_mcs_mask),
ath10k_mac_max_vht_nss(vht_mcs_mask)));
- if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
- return -EINVAL;
+ if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask,
+ allow_pfr)) {
+ u8 vht_nss;
+
+ if (!allow_pfr || vht_num_rates != 1)
+ return -EINVAL;
+
+ /* Reach here, firmware supports peer fixed rate and has
+ * single vht rate, and don't update vif birate_mask, as
+ * the rate only for specific peer.
+ */
+ ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+ &vht_pfr,
+ &vht_nss,
+ true);
+ update_bitrate_mask = false;
+ }
mutex_lock(&ar->conf_mutex);
- arvif->bitrate_mask = *mask;
+ if (update_bitrate_mask)
+ arvif->bitrate_mask = *mask;
+ arvif->vht_num_rates = vht_num_rates;
+ arvif->vht_pfr = vht_pfr;
ieee80211_iterate_stations_atomic(ar->hw,
ath10k_mac_set_bitrate_mask_iter,
arvif);
@@ -7869,7 +8051,8 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
- if (ath10k_peer_stats_enabled(ar)) {
+ if (ath10k_peer_stats_enabled(ar) &&
+ ar->hw_params.tx_stats_over_pktlog) {
ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
ret = ath10k_wmi_pdev_pktlog_enable(ar,
ar->pktlog_filter);
@@ -8007,6 +8190,7 @@ static const struct ieee80211_ops ath10k_ops = {
.set_key = ath10k_set_key,
.set_default_unicast_key = ath10k_set_default_unicast_key,
.sta_state = ath10k_sta_state,
+ .sta_set_txpwr = ath10k_sta_set_txpwr,
.conf_tx = ath10k_conf_tx,
.remain_on_channel = ath10k_remain_on_channel,
.cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
@@ -8695,6 +8879,9 @@ int ath10k_mac_register(struct ath10k *ar)
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
+ if (test_bit(WMI_SERVICE_TX_PWR_PER_PEER, ar->wmi.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_STA_TX_PWR);
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 2c27f407a851..a0b4d265c6eb 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -909,7 +909,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
/* Host buffer address in CE space */
u32 ce_data;
dma_addr_t ce_data_base = 0;
- void *data_buf = NULL;
+ void *data_buf;
int i;
mutex_lock(&ar_pci->ce_diag_mutex);
@@ -923,10 +923,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
*/
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
- data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes,
- &ce_data_base,
- GFP_ATOMIC);
-
+ data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
+ GFP_ATOMIC);
if (!data_buf) {
ret = -ENOMEM;
goto done;
@@ -1054,7 +1052,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
u32 *buf;
unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
struct ath10k_ce_pipe *ce_diag;
- void *data_buf = NULL;
+ void *data_buf;
dma_addr_t ce_data_base = 0;
int i;
@@ -1069,10 +1067,8 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
*/
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
- data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
- alloc_nbytes,
- &ce_data_base,
- GFP_ATOMIC);
+ data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
+ GFP_ATOMIC);
if (!data_buf) {
ret = -ENOMEM;
goto done;
@@ -2059,6 +2055,11 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+ ath10k_pci_irq_disable(ar);
+ ath10k_pci_irq_sync(ar);
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
+
/* Most likely the device has HTT Rx ring configured. The only way to
* prevent the device from accessing (and possible corrupting) host
* memory is to reset the chip now.
@@ -2072,10 +2073,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
*/
ath10k_pci_safe_chip_reset(ar);
- ath10k_pci_irq_disable(ar);
- ath10k_pci_irq_sync(ar);
- napi_synchronize(&ar->napi);
- napi_disable(&ar->napi);
ath10k_pci_flush(ar);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
@@ -3492,7 +3489,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k *ar;
struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev;
- struct ath10k_bus_params bus_params;
+ struct ath10k_bus_params bus_params = {};
bool pci_ps;
int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index a7bc2c70d076..3b63b6257c43 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -506,6 +506,7 @@ static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
struct wlfw_cap_resp_msg_v01 *resp;
struct wlfw_cap_req_msg_v01 req = {};
struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct qmi_txn txn;
int ret;
@@ -560,13 +561,13 @@ static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
strlcpy(qmi->fw_build_id, resp->fw_build_id,
MAX_BUILD_ID_LEN + 1);
- ath10k_dbg(ar, ATH10K_DBG_QMI,
- "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
- qmi->chip_info.chip_id, qmi->chip_info.chip_family,
- qmi->board_info.board_id, qmi->soc_info.soc_id);
- ath10k_dbg(ar, ATH10K_DBG_QMI,
- "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
- qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
+ if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
+ ath10k_info(ar, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
+ qmi->chip_info.chip_id, qmi->chip_info.chip_family,
+ qmi->board_info.board_id, qmi->soc_info.soc_id);
+ ath10k_info(ar, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
+ qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
+ }
kfree(resp);
return 0;
@@ -619,6 +620,51 @@ out:
return ret;
}
+int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct wlfw_ini_resp_msg_v01 resp = {};
+ struct ath10k_qmi *qmi = ar_snoc->qmi;
+ struct wlfw_ini_req_msg_v01 req = {};
+ struct qmi_txn txn;
+ int ret;
+
+ req.enablefwlog_valid = 1;
+ req.enablefwlog = fw_log_mode;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_ini_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_INI_REQ_V01,
+ WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_ini_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "fail to send fw log reqest: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "fw log request rejectedr: %d\n",
+ resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi fw log request completed, mode: %d\n",
+ fw_log_mode);
+ return 0;
+
+out:
+ return ret;
+}
+
static int
ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
{
@@ -1002,6 +1048,7 @@ int ath10k_qmi_deinit(struct ath10k *ar)
qmi_handle_release(&qmi->qmi_hdl);
cancel_work_sync(&qmi->event_work);
destroy_workqueue(qmi->event_wq);
+ kfree(qmi);
ar_snoc->qmi = NULL;
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
index e4aa20445666..40aafb875ed0 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.h
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -114,5 +114,6 @@ int ath10k_qmi_wlan_disable(struct ath10k *ar);
int ath10k_qmi_register_service_notifier(struct notifier_block *nb);
int ath10k_qmi_init(struct ath10k *ar, u32 msa_size);
int ath10k_qmi_deinit(struct ath10k *ar);
+int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode);
#endif /* ATH10K_QMI_H */
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index fae56c67766f..8ed4fbd8d6c3 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -584,6 +584,11 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
act_len,
&bndl_cnt);
+ if (ret) {
+ ath10k_warn(ar, "alloc_bundle error %d\n", ret);
+ goto err;
+ }
+
n_lookaheads += bndl_cnt;
i += bndl_cnt;
/*Next buffer will be the last in the bundle */
@@ -602,6 +607,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
full_len,
last_in_bundle,
last_in_bundle);
+ if (ret) {
+ ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
+ goto err;
+ }
}
ar_sdio->n_rx_pkts = i;
@@ -850,6 +859,10 @@ static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
out:
mutex_unlock(&irq_data->mtx);
+ if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK) {
+ ath10k_err(ar, "firmware crashed!\n");
+ queue_work(ar->workqueue, &ar->restart_work);
+ }
return ret;
}
@@ -1495,8 +1508,10 @@ static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
regs->int_status_en |=
FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
- /* Set up the CPU Interrupt status Register */
- regs->cpu_int_status_en = 0;
+ /* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #0
+ * #0 is used for report assertion from target
+ */
+ regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);
/* Set up the Error Interrupt status Register */
regs->err_int_status_en =
@@ -1637,7 +1652,12 @@ static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_SDIO,
"sdio mailbox swap service enabled\n");
ar_sdio->swap_mbox = true;
+ } else {
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio mailbox swap service disabled\n");
+ ar_sdio->swap_mbox = false;
}
+
return 0;
}
@@ -1954,7 +1974,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
struct ath10k *ar;
enum ath10k_hw_rev hw_rev;
u32 dev_id_base;
- struct ath10k_bus_params bus_params;
+ struct ath10k_bus_params bus_params = {};
int ret, i;
/* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
@@ -2045,6 +2065,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with SDIO */
bus_params.chip_id = 0;
+ bus_params.hl_msdu_ids = true;
+
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
@@ -2052,7 +2074,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
}
/* TODO: remove this once SDIO support is fully implemented */
- ath10k_warn(ar, "WARNING: ath10k SDIO support is incomplete, don't expect anything to work!\n");
+ ath10k_warn(ar, "WARNING: ath10k SDIO support is work-in-progress, problems may arise!\n");
return 0;
@@ -2073,10 +2095,11 @@ static void ath10k_sdio_remove(struct sdio_func *func)
"sdio removed func %d vendor 0x%x device 0x%x\n",
func->num, func->vendor, func->device);
- (void)ath10k_sdio_hif_disable_intrs(ar);
- cancel_work_sync(&ar_sdio->wr_async_work);
ath10k_core_unregister(ar);
ath10k_core_destroy(ar);
+
+ flush_workqueue(ar_sdio->workqueue);
+ destroy_workqueue(ar_sdio->workqueue);
}
static const struct sdio_device_id ath10k_sdio_devices[] = {
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 873cb4ce419b..b491361e6ed4 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -165,7 +165,7 @@ static struct ce_attr host_ce_config_wlan[] = {
/* CE4: host->target HTT */
{
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
- .src_nentries = 256,
+ .src_nentries = 2048,
.src_sz_max = 256,
.dest_nentries = 0,
.send_cb = ath10k_snoc_htt_tx_cb,
@@ -1050,6 +1050,19 @@ err_wlan_enable:
return ret;
}
+static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar,
+ u8 fw_log_mode)
+{
+ u8 fw_dbg_mode;
+
+ if (fw_log_mode)
+ fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE;
+ else
+ fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG;
+
+ return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode);
+}
+
#ifdef CONFIG_PM
static int ath10k_snoc_hif_suspend(struct ath10k *ar)
{
@@ -1103,6 +1116,8 @@ static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
.send_complete_check = ath10k_snoc_hif_send_complete_check,
.get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
.get_target_info = ath10k_snoc_hif_get_target_info,
+ .set_target_log_mode = ath10k_snoc_hif_set_target_log_mode,
+
#ifdef CONFIG_PM
.suspend = ath10k_snoc_hif_suspend,
.resume = ath10k_snoc_hif_resume,
@@ -1249,7 +1264,7 @@ out:
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_bus_params bus_params;
+ struct ath10k_bus_params bus_params = {};
int ret;
if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
index 4dddeee684b4..7198a386f2fb 100644
--- a/drivers/net/wireless/ath/ath10k/swap.c
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -106,10 +106,8 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
GFP_KERNEL);
- if (!virt_addr) {
- ath10k_err(ar, "failed to allocate dma coherent memory\n");
+ if (!virt_addr)
return NULL;
- }
seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index a29cfb9c72c2..1bffe3fbea3f 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -174,8 +174,23 @@ static int ath10k_tm_fetch_firmware(struct ath10k *ar)
{
struct ath10k_fw_components *utf_mode_fw;
int ret;
+ char fw_name[100];
+ int fw_api2 = 2;
+
+ switch (ar->hif.bus) {
+ case ATH10K_BUS_SDIO:
+ case ATH10K_BUS_USB:
+ scnprintf(fw_name, sizeof(fw_name), "%s-%s-%d.bin",
+ ATH10K_FW_UTF_FILE_BASE, ath10k_bus_str(ar->hif.bus),
+ fw_api2);
+ break;
+ default:
+ scnprintf(fw_name, sizeof(fw_name), "%s-%d.bin",
+ ATH10K_FW_UTF_FILE_BASE, fw_api2);
+ break;
+ }
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE,
+ ret = ath10k_core_fetch_firmware_api_n(ar, fw_name,
&ar->testmode.utf_mode_fw.fw_file);
if (ret == 0) {
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
index 3ecdff17f64e..c7d4c97e6079 100644
--- a/drivers/net/wireless/ath/ath10k/trace.c
+++ b/drivers/net/wireless/ath/ath10k/trace.c
@@ -7,3 +7,4 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
+EXPORT_SYMBOL(__tracepoint_ath10k_log_dbg);
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index ba977bbe6291..ab916459d237 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -29,7 +29,11 @@ static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
#if !defined(CONFIG_ATH10K_TRACING)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
+static inline void trace_ ## name(proto) {} \
+static inline bool trace_##name##_enabled(void) \
+{ \
+ return false; \
+}
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(...)
#undef DEFINE_EVENT
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index c5818d28f55a..4102df016931 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -150,6 +150,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
{
struct ath10k_peer *peer;
+ if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
+ return NULL;
+
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(peer, &ar->peers, list)
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index 970cf69ac35f..e1420f67f776 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -973,7 +973,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
struct usb_device *dev = interface_to_usbdev(interface);
int ret, vendor_id, product_id;
enum ath10k_hw_rev hw_rev;
- struct ath10k_bus_params bus_params;
+ struct ath10k_bus_params bus_params = {};
/* Assumption: All USB based chipsets (so far) are QCA9377 based.
* If there will be newer chipsets that does not use the hw reg
@@ -1016,7 +1016,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
}
/* TODO: remove this once USB support is fully implemented */
- ath10k_warn(ar, "WARNING: ath10k USB support is incomplete, don't expect anything to work!\n");
+ ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n");
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 582fb11f648a..2985bb17decd 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include "core.h"
#include "debug.h"
@@ -212,6 +212,13 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
return 0;
}
+static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n");
+ complete(&ar->vdev_delete_done);
+}
+
static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
struct sk_buff *skb)
{
@@ -458,6 +465,24 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
kfree(tb);
}
+static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct wmi_peer_delete_resp_ev_arg *arg;
+ struct wmi_tlv *tlv_hdr;
+
+ tlv_hdr = (struct wmi_tlv *)skb->data;
+ arg = (struct wmi_peer_delete_resp_ev_arg *)tlv_hdr->value;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev id %d", arg->vdev_id);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "peer mac addr %pM", &arg->peer_addr);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete response\n");
+
+ complete(&ar->peer_delete_done);
+
+ return 0;
+}
+
/***********/
/* TLV ops */
/***********/
@@ -514,6 +539,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_VDEV_STOPPED_EVENTID:
ath10k_wmi_event_vdev_stopped(ar, skb);
break;
+ case WMI_TLV_VDEV_DELETE_RESP_EVENTID:
+ ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb);
+ break;
case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
ath10k_wmi_event_peer_sta_kickout(ar, skb);
break;
@@ -607,6 +635,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_TDLS_PEER_EVENTID:
ath10k_wmi_event_tdls_peer(ar, skb);
break;
+ case WMI_TLV_PEER_DELETE_RESP_EVENTID:
+ ath10k_wmi_tlv_event_peer_delete_resp(ar, skb);
+ break;
case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
ath10k_wmi_event_mgmt_tx_compl(ar, skb);
break;
@@ -1905,6 +1936,28 @@ ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
return skb;
}
+static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_TLV_VDEV_SUBTYPE_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_TLV_VDEV_SUBTYPE_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_TLV_VDEV_SUBTYPE_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_TLV_VDEV_SUBTYPE_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_TLV_VDEV_SUBTYPE_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return -ENOTSUPP;
+ }
+ return -ENOTSUPP;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
u32 vdev_id,
@@ -2840,8 +2893,10 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control))
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
buf_len = round_up(buf_len, 4);
@@ -4305,7 +4360,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
- .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .get_vdev_subtype = ath10k_wmi_tlv_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_tlv_op_gen_echo,
.gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
.gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 65e6aa520b06..d691f06e58f2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _WMI_TLV_H
#define _WMI_TLV_H
@@ -301,11 +301,15 @@ enum wmi_tlv_event_id {
WMI_TLV_VDEV_STOPPED_EVENTID,
WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_TLV_VDEV_TSF_REPORT_EVENTID,
+ WMI_TLV_VDEV_DELETE_RESP_EVENTID,
WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER),
WMI_TLV_PEER_INFO_EVENTID,
WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID,
WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID,
WMI_TLV_PEER_STATE_EVENTID,
+ WMI_TLV_PEER_ASSOC_CONF_EVENTID,
+ WMI_TLV_PEER_DELETE_RESP_EVENTID,
WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT),
WMI_TLV_HOST_SWBA_EVENTID,
WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
@@ -1567,6 +1571,10 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI,
WMI_SERVICE_MGMT_TX_WMI, len);
+ SVCMAP(WMI_TLV_SERVICE_MESH_11S,
+ WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
+ WMI_SERVICE_SYNC_DELETE_CMDS, len);
}
static inline void
@@ -1775,6 +1783,16 @@ struct wmi_tlv_start_scan_cmd {
struct wmi_mac_addr mac_mask;
} __packed;
+enum wmi_tlv_vdev_subtype {
+ WMI_TLV_VDEV_SUBTYPE_NONE = 0,
+ WMI_TLV_VDEV_SUBTYPE_P2P_DEV = 1,
+ WMI_TLV_VDEV_SUBTYPE_P2P_CLI = 2,
+ WMI_TLV_VDEV_SUBTYPE_P2P_GO = 3,
+ WMI_TLV_VDEV_SUBTYPE_PROXY_STA = 4,
+ WMI_TLV_VDEV_SUBTYPE_MESH = 5,
+ WMI_TLV_VDEV_SUBTYPE_MESH_11S = 6,
+};
+
struct wmi_tlv_vdev_start_cmd {
__le32 vdev_id;
__le32 requestor_id;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 98a90e49d666..4f707c6394bb 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -8309,7 +8309,7 @@ ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
static void
ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
- char *buf, u32 *length)
+ char *buf, u32 *length, bool extended_peer)
{
u32 len = *length;
u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
@@ -8322,13 +8322,27 @@ ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
"Peer TX rate", peer->peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer RX rate", peer->peer_rx_rate);
- len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
- "Peer RX duration", peer->rx_duration);
+ if (!extended_peer)
+ len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+ "Peer RX duration", peer->rx_duration);
len += scnprintf(buf + len, buf_len - len, "\n");
*length = len;
}
+static void
+ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer *peer,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "Peer MAC address", peer->peer_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+ "Peer RX duration", peer->rx_duration);
+}
+
void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
struct ath10k_fw_stats *fw_stats,
char *buf)
@@ -8374,7 +8388,8 @@ void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
"=================");
list_for_each_entry(peer, &fw_stats->peers, list) {
- ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+ ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
+ fw_stats->extended);
}
unlock:
@@ -8432,7 +8447,8 @@ void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
"=================");
list_for_each_entry(peer, &fw_stats->peers, list) {
- ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+ ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
+ fw_stats->extended);
}
unlock:
@@ -8541,6 +8557,7 @@ void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
const struct ath10k_fw_stats_pdev *pdev;
const struct ath10k_fw_stats_vdev_extd *vdev;
const struct ath10k_fw_stats_peer *peer;
+ const struct ath10k_fw_extd_stats_peer *extd_peer;
size_t num_peers;
size_t num_vdevs;
@@ -8603,7 +8620,15 @@ void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
"=================");
list_for_each_entry(peer, &fw_stats->peers, list) {
- ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+ ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
+ fw_stats->extended);
+ }
+
+ if (fw_stats->extended) {
+ list_for_each_entry(extd_peer, &fw_stats->peers_extd, list) {
+ ath10k_wmi_fw_extd_peer_stats_fill(extd_peer, buf,
+ &len);
+ }
}
unlock:
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index e1c40bb69932..838768c98adc 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _WMI_H_
@@ -200,6 +200,8 @@ enum wmi_service {
WMI_SERVICE_RTT_RESPONDER_ROLE,
WMI_SERVICE_PER_PACKET_SW_ENCRYPT,
WMI_SERVICE_REPORT_AIRTIME,
+ WMI_SERVICE_SYNC_DELETE_CMDS,
+ WMI_SERVICE_TX_PWR_PER_PEER,
/* Remember to add the new value to wmi_service_name()! */
@@ -367,6 +369,7 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_RTT_RESPONDER_ROLE,
WMI_10_4_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
WMI_10_4_SERVICE_REPORT_AIRTIME,
+ WMI_10_4_SERVICE_TX_PWR_PER_PEER,
};
static inline char *wmi_service_name(enum wmi_service service_id)
@@ -491,6 +494,8 @@ static inline char *wmi_service_name(enum wmi_service service_id)
SVCSTR(WMI_SERVICE_RTT_RESPONDER_ROLE);
SVCSTR(WMI_SERVICE_PER_PACKET_SW_ENCRYPT);
SVCSTR(WMI_SERVICE_REPORT_AIRTIME);
+ SVCSTR(WMI_SERVICE_SYNC_DELETE_CMDS);
+ SVCSTR(WMI_SERVICE_TX_PWR_PER_PEER);
case WMI_SERVICE_MAX:
return NULL;
@@ -818,6 +823,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_PER_PACKET_SW_ENCRYPT, len);
SVCMAP(WMI_10_4_SERVICE_REPORT_AIRTIME,
WMI_SERVICE_REPORT_AIRTIME, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_PWR_PER_PEER,
+ WMI_SERVICE_TX_PWR_PER_PEER, len);
}
#undef SVCMAP
@@ -4535,9 +4542,10 @@ enum wmi_10_4_stats_id {
};
enum wmi_tlv_stats_id {
- WMI_TLV_STAT_PDEV = BIT(0),
- WMI_TLV_STAT_VDEV = BIT(1),
- WMI_TLV_STAT_PEER = BIT(2),
+ WMI_TLV_STAT_PEER = BIT(0),
+ WMI_TLV_STAT_AP = BIT(1),
+ WMI_TLV_STAT_PDEV = BIT(2),
+ WMI_TLV_STAT_VDEV = BIT(3),
WMI_TLV_STAT_PEER_EXTD = BIT(10),
};
@@ -6259,6 +6267,8 @@ enum wmi_peer_param {
WMI_PEER_CHAN_WIDTH = 0x4,
WMI_PEER_NSS = 0x5,
WMI_PEER_USE_4ADDR = 0x6,
+ WMI_PEER_USE_FIXED_PWR = 0x8,
+ WMI_PEER_PARAM_FIXED_RATE = 0x9,
WMI_PEER_DEBUG = 0xa,
WMI_PEER_PHYMODE = 0xd,
WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
@@ -6756,6 +6766,11 @@ struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg {
const __le32 *ack_rssi;
};
+struct wmi_peer_delete_resp_ev_arg {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_addr;
+};
+
struct wmi_mgmt_rx_ev_arg {
__le32 channel;
__le32 snr;
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index c587146795f6..802f8f87773a 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config ATH5K
tristate "Atheros 5xxx wireless cards support"
depends on (PCI || ATH25) && MAC80211
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index a8724eee21f8..78f318d49af5 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: ISC
ath5k-y += caps.o
ath5k-y += initvals.o
ath5k-y += eeprom.o
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index 2b27a87e74f5..dcf8ca0dcc52 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config ATH6KL
tristate "Atheros mobile chipsets support"
depends on CFG80211
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 5477a014e1fb..37cf602d8adf 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -2194,13 +2194,13 @@ static int ath6kl_wow_suspend_vif(struct ath6kl_vif *vif,
if (!in_dev)
return 0;
- ifa = in_dev->ifa_list;
+ ifa = rtnl_dereference(in_dev->ifa_list);
memset(&ips, 0, sizeof(ips));
/* Configure IP addr only if IP address count < MAX_IP_ADDRS */
while (index < MAX_IP_ADDRS && ifa) {
ips[index] = ifa->ifa_local;
- ifa = ifa->ifa_next;
+ ifa = rtnl_dereference(ifa->ifa_next);
index++;
}
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 4e94b22eaada..54337d60f288 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -1132,8 +1132,7 @@ int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
tbl = (const struct wmi_target_roam_tbl *) buf;
num_entries = le16_to_cpu(tbl->num_entries);
- if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) >
- len)
+ if (struct_size(tbl, info, num_entries) > len)
return -EINVAL;
if (ar->debug.roam_tbl == NULL ||
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 434b66829646..c68848819a52 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -898,9 +898,6 @@ static int htc_process_trailer(struct htc_target *target, u8 *buffer,
break;
}
- if (status != 0)
- break;
-
/* advance buffer past this record for next time around */
buffer += record->len;
len -= record->len;
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
index 91e735cfdef7..a3d3740419eb 100644
--- a/drivers/net/wireless/ath/ath6kl/trace.h
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: ISC */
#if !defined(_ATH6KL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#include <net/cfg80211.h>
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 68854c45d0a4..2382c6c46851 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1176,6 +1176,10 @@ static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
return -EINVAL;
ev = (struct wmi_pstream_timeout_event *) datap;
+ if (ev->traffic_class >= WMM_NUM_AC) {
+ ath6kl_err("invalid traffic class: %d\n", ev->traffic_class);
+ return -EINVAL;
+ }
/*
* When the pstream (fat pipe == AC) timesout, it means there were
@@ -1295,8 +1299,7 @@ static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
if (len < sizeof(*ev))
return -EINVAL;
ev = (struct wmi_neighbor_report_event *) datap;
- if (sizeof(*ev) + ev->num_neighbors * sizeof(struct wmi_neighbor_info)
- > len) {
+ if (struct_size(ev, neighbor, ev->num_neighbors) > len) {
ath6kl_dbg(ATH6KL_DBG_WMI,
"truncated neighbor event (num=%d len=%d)\n",
ev->num_neighbors, len);
@@ -1517,6 +1520,10 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
return -EINVAL;
reply = (struct wmi_cac_event *) datap;
+ if (reply->ac >= WMM_NUM_AC) {
+ ath6kl_err("invalid AC: %d\n", reply->ac);
+ return -EINVAL;
+ }
if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
(reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
@@ -2633,7 +2640,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
u16 active_tsids = 0;
int ret;
- if (traffic_class > 3) {
+ if (traffic_class >= WMM_NUM_AC) {
ath6kl_err("invalid traffic class: %d\n", traffic_class);
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index a1ef8769983a..5601cfd6a293 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config ATH9K_HW
tristate
config ATH9K_COMMON
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index f71b2ad8275c..15af0a836925 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: ISC
ath9k-y += beacon.o \
gpio.o \
init.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 98c5f524a360..daf30f9946b4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -157,7 +157,9 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
freq = centers.synth_center;
if (freq < 4800) { /* 2 GHz, fractional mode */
- if (AR_SREV_9330(ah)) {
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah) ||
+ AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
+ AR_SREV_9561(ah) || AR_SREV_9565(ah)) {
if (ah->is_clk_25mhz)
div = 75;
else
@@ -166,16 +168,6 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
channelSel = (freq * 4) / div;
chan_frac = (((freq * 4) % div) * 0x20000) / div;
channelSel = (channelSel << 17) | chan_frac;
- } else if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
- /*
- * freq_ref = 40 / (refdiva >> amoderefsel);
- * where refdiva=1 and amoderefsel=0
- * ndiv = ((chan_mhz * 4) / 3) / freq_ref;
- * chansel = int(ndiv), chanfrac = (ndiv - chansel) * 0x20000
- */
- channelSel = (freq * 4) / 120;
- chan_frac = (((freq * 4) % 120) * 0x20000) / 120;
- channelSel = (channelSel << 17) | chan_frac;
} else if (AR_SREV_9340(ah)) {
if (ah->is_clk_25mhz) {
channelSel = (freq * 2) / 75;
@@ -184,16 +176,6 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
} else {
channelSel = CHANSEL_2G(freq) >> 1;
}
- } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
- AR_SREV_9561(ah)) {
- if (ah->is_clk_25mhz)
- div = 75;
- else
- div = 120;
-
- channelSel = (freq * 4) / div;
- chan_frac = (((freq * 4) % div) * 0x20000) / div;
- channelSel = (channelSel << 17) | chan_frac;
} else {
channelSel = CHANSEL_2G(freq);
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 6fbd5559c0c0..c22d457dbc54 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -428,7 +428,7 @@ u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit,
else
power_limit = 0;
- return power_limit;
+ return min_t(u16, power_limit, MAX_RATE_POWER);
}
void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index b8c0a08066a0..e8c2cc03be0c 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -424,6 +424,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
ath9k_hw_get_channel_centers(ah, chan, &centers);
scaledPower = powerLimit - antenna_reduction;
+ scaledPower = min_t(u16, scaledPower, MAX_RATE_POWER);
numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40;
pCtlMode = ctlModesFor11g;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8581d917635a..052deffb4c9d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -252,8 +252,9 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
/* Chip Revisions */
/******************/
-static void ath9k_hw_read_revisions(struct ath_hw *ah)
+static bool ath9k_hw_read_revisions(struct ath_hw *ah)
{
+ u32 srev;
u32 val;
if (ah->get_mac_revision)
@@ -269,25 +270,33 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
val = REG_READ(ah, AR_SREV);
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
}
- return;
+ return true;
case AR9300_DEVID_AR9340:
ah->hw_version.macVersion = AR_SREV_VERSION_9340;
- return;
+ return true;
case AR9300_DEVID_QCA955X:
ah->hw_version.macVersion = AR_SREV_VERSION_9550;
- return;
+ return true;
case AR9300_DEVID_AR953X:
ah->hw_version.macVersion = AR_SREV_VERSION_9531;
- return;
+ return true;
case AR9300_DEVID_QCA956X:
ah->hw_version.macVersion = AR_SREV_VERSION_9561;
- return;
+ return true;
}
- val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
+ srev = REG_READ(ah, AR_SREV);
+
+ if (srev == -EIO) {
+ ath_err(ath9k_hw_common(ah),
+ "Failed to read SREV register");
+ return false;
+ }
+
+ val = srev & AR_SREV_ID;
if (val == 0xFF) {
- val = REG_READ(ah, AR_SREV);
+ val = srev;
ah->hw_version.macVersion =
(val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
@@ -306,6 +315,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)
ah->is_pciexpress = true;
}
+
+ return true;
}
/************************************/
@@ -446,7 +457,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
regulatory->country_code = CTRY_DEFAULT;
- regulatory->power_limit = MAX_RATE_POWER;
+ regulatory->power_limit = MAX_COMBINED_POWER;
ah->hw_version.magic = AR5416_MAGIC;
ah->hw_version.subvendorid = 0;
@@ -559,7 +570,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
int r = 0;
- ath9k_hw_read_revisions(ah);
+ if (!ath9k_hw_read_revisions(ah)) {
+ ath_err(common, "Could not read hardware revisions");
+ return -EOPNOTSUPP;
+ }
switch (ah->hw_version.macVersion) {
case AR_SREV_VERSION_5416_PCI:
@@ -2952,7 +2966,7 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
ctl = ath9k_regd_get_ctl(reg, chan);
channel = chan->chan;
- chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
+ chan_pwr = min_t(int, channel->max_power * 2, MAX_COMBINED_POWER);
new_pwr = min_t(int, chan_pwr, reg->power_limit);
ah->eep_ops->set_txpower(ah, chan, ctl,
@@ -2965,9 +2979,9 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
struct ath9k_channel *chan = ah->curchan;
struct ieee80211_channel *channel = chan->chan;
- reg->power_limit = min_t(u32, limit, MAX_RATE_POWER);
+ reg->power_limit = min_t(u32, limit, MAX_COMBINED_POWER);
if (test)
- channel->max_power = MAX_RATE_POWER / 2;
+ channel->max_power = MAX_COMBINED_POWER / 2;
ath9k_hw_apply_txpower(ah, chan, test);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 68956cdc8c9a..2e4489700a85 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -173,6 +173,7 @@
#define ATH9K_NUM_QUEUES 10
#define MAX_RATE_POWER 63
+#define MAX_COMBINED_POWER 254 /* 128 dBm, chosen to fit in u8 */
#define AH_WAIT_TIMEOUT 100000 /* (us) */
#define AH_TSF_WRITE_TIMEOUT 100 /* (us) */
#define AH_TIME_QUANTUM 10
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index a04d8616fe09..17c318902cb8 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -805,7 +805,7 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
ah->curchan = &ah->channels[chan->hw_value];
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
ath9k_cmn_get_channel(sc->hw, ah, &chandef);
- ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
+ ath9k_hw_set_txpowerlimit(ah, MAX_COMBINED_POWER, true);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4e97f7f3b2a3..06e660858766 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -815,6 +815,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hdr *hdr;
bool discard_current = sc->rx.discard_next;
+ bool is_phyerr;
/*
* Discard corrupt descriptors which are marked in
@@ -827,8 +828,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
/*
* Discard zero-length packets and packets smaller than an ACK
+ * which are not PHY_ERROR (short radar pulses have a length of 3)
*/
- if (rx_stats->rs_datalen < 10) {
+ is_phyerr = rx_stats->rs_status & ATH9K_RXERR_PHY;
+ if (!rx_stats->rs_datalen ||
+ (rx_stats->rs_datalen < 10 && !is_phyerr)) {
RX_STAT_INC(sc, rx_len_err);
goto corrupt;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index b17e1ca40995..31e7b108279c 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -410,7 +410,6 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int txok,
int *nframes, int *nbad)
{
- struct ath_frame_info *fi;
u16 seq_st = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
int ba_index;
@@ -426,7 +425,6 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
}
while (bf) {
- fi = get_frame_info(bf->bf_mpdu);
ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
(*nframes)++;
@@ -446,7 +444,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
{
struct ath_node *an = NULL;
struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct list_head bf_head;
@@ -463,8 +460,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
int bar_index = -1;
skb = bf->bf_mpdu;
- hdr = (struct ieee80211_hdr *)skb->data;
-
tx_info = IEEE80211_SKB_CB(skb);
memcpy(rates, bf->rates, sizeof(rates));
@@ -668,7 +663,8 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
static void ath_tx_count_airtime(struct ath_softc *sc,
struct ieee80211_sta *sta,
struct ath_buf *bf,
- struct ath_tx_status *ts)
+ struct ath_tx_status *ts,
+ u8 tid)
{
u32 airtime = 0;
int i;
@@ -679,7 +675,7 @@ static void ath_tx_count_airtime(struct ath_softc *sc,
airtime += rate_dur * bf->rates[i].count;
}
- ieee80211_sta_register_airtime(sta, ts->tid, airtime, 0);
+ ieee80211_sta_register_airtime(sta, tid, airtime, 0);
}
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
@@ -709,7 +705,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
if (sta) {
struct ath_node *an = (struct ath_node *)sta->drv_priv;
tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
- ath_tx_count_airtime(sc, sta, bf, ts);
+ ath_tx_count_airtime(sc, sta, bf, ts, tid->tidno);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->clear_ps_filter = true;
}
@@ -2269,12 +2265,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
- struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
struct ath_frame_info *fi = get_frame_info(skb);
- struct ath_vif *avp = NULL;
struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
struct ath_atx_tid *tid = NULL;
@@ -2283,16 +2277,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
bool ps_resp;
int q, ret;
- if (vif)
- avp = (void *)vif->drv_priv;
-
ps_resp = !!(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE);
ret = ath_tx_prepare(hw, skb, txctl);
if (ret)
return ret;
- hdr = (struct ieee80211_hdr *) skb->data;
/*
* At this point, the vif, hw_key and sta pointers in the tx control
* info are no longer valid (overwritten by the ath_frame_info data.
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index 7d4a72dc98db..b2eeb9fd68d2 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -519,7 +519,7 @@ int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel)
power = ar->power_5G_leg[0] & 0x3f;
break;
default:
- BUG_ON(1);
+ BUG();
}
power = min_t(unsigned int, power, ar->hw->conf.power_level * 2);
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 7f1bdea742b8..40a8054f8aa6 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1387,13 +1387,8 @@ static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
int ret;
mutex_lock(&ar->mutex);
- if (queue < ar->hw->queues) {
- memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
- ret = carl9170_set_qos(ar);
- } else {
- ret = -EINVAL;
- }
-
+ memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
+ ret = carl9170_set_qos(ar);
mutex_unlock(&ar->mutex);
return ret;
}
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 8e154f6364a3..23ab8a80c18c 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -795,7 +795,7 @@ static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len)
break;
default:
- BUG_ON(1);
+ BUG();
break;
}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index e7c3f3b8457d..99f1897a775d 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -128,6 +128,8 @@ static const struct usb_device_id carl9170_usb_ids[] = {
};
MODULE_DEVICE_TABLE(usb, carl9170_usb_ids);
+static struct usb_driver carl9170_driver;
+
static void carl9170_usb_submit_data_urb(struct ar9170 *ar)
{
struct urb *urb;
@@ -966,32 +968,28 @@ err_out:
static void carl9170_usb_firmware_failed(struct ar9170 *ar)
{
- struct device *parent = ar->udev->dev.parent;
- struct usb_device *udev;
-
- /*
- * Store a copy of the usb_device pointer locally.
- * This is because device_release_driver initiates
- * carl9170_usb_disconnect, which in turn frees our
- * driver context (ar).
+ /* Store a copies of the usb_interface and usb_device pointer locally.
+ * This is because release_driver initiates carl9170_usb_disconnect,
+ * which in turn frees our driver context (ar).
*/
- udev = ar->udev;
+ struct usb_interface *intf = ar->intf;
+ struct usb_device *udev = ar->udev;
complete(&ar->fw_load_wait);
+ /* at this point 'ar' could be already freed. Don't use it anymore */
+ ar = NULL;
/* unbind anything failed */
- if (parent)
- device_lock(parent);
-
- device_release_driver(&udev->dev);
- if (parent)
- device_unlock(parent);
+ usb_lock_device(udev);
+ usb_driver_release_interface(&carl9170_driver, intf);
+ usb_unlock_device(udev);
- usb_put_dev(udev);
+ usb_put_intf(intf);
}
static void carl9170_usb_firmware_finish(struct ar9170 *ar)
{
+ struct usb_interface *intf = ar->intf;
int err;
err = carl9170_parse_firmware(ar);
@@ -1009,7 +1007,7 @@ static void carl9170_usb_firmware_finish(struct ar9170 *ar)
goto err_unrx;
complete(&ar->fw_load_wait);
- usb_put_dev(ar->udev);
+ usb_put_intf(intf);
return;
err_unrx:
@@ -1052,7 +1050,6 @@ static int carl9170_usb_probe(struct usb_interface *intf,
return PTR_ERR(ar);
udev = interface_to_usbdev(intf);
- usb_get_dev(udev);
ar->udev = udev;
ar->intf = intf;
ar->features = id->driver_info;
@@ -1094,15 +1091,14 @@ static int carl9170_usb_probe(struct usb_interface *intf,
atomic_set(&ar->rx_anch_urbs, 0);
atomic_set(&ar->rx_pool_urbs, 0);
- usb_get_dev(ar->udev);
+ usb_get_intf(intf);
carl9170_set_state(ar, CARL9170_STOPPED);
err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
&ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
if (err) {
- usb_put_dev(udev);
- usb_put_dev(udev);
+ usb_put_intf(intf);
carl9170_free(ar);
}
return err;
@@ -1131,7 +1127,6 @@ static void carl9170_usb_disconnect(struct usb_interface *intf)
carl9170_release_firmware(ar);
carl9170_free(ar);
- usb_put_dev(udev);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index d52b31b45df7..a274eb0d1968 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -111,7 +111,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = {
JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
- JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
+ JP_PATTERN(3, 0, 4, 4000, 4000, 1, 18, 50, false),
JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 75ddaefdd049..8d5a16b558e6 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -28,7 +28,6 @@ enum ctl_group {
CTL_ETSI = 0x30,
};
-#define NO_CTL 0xff
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
#define CTL_11A 0
diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig
index 4ab2d59ff2ca..a4b153470a2c 100644
--- a/drivers/net/wireless/ath/wcn36xx/Kconfig
+++ b/drivers/net/wireless/ath/wcn36xx/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config WCN36XX
tristate "Qualcomm Atheros WCN3660/3680 support"
depends on MAC80211 && HAS_DMA
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
index 582049f65735..27413703ad69 100644
--- a/drivers/net/wireless/ath/wcn36xx/Makefile
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_WCN36XX) := wcn36xx.o
wcn36xx-y += main.o \
dxe.o \
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index b1a339859feb..0d1a8dab30ed 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: ISC
config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index d3d61ae459e2..53a0d995ddb0 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_WIL6210) += wil6210.o
wil6210-y := main.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 804955d24b30..d436cc51dfd1 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -314,7 +314,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid, &cmd, sizeof(cmd),
- WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20);
+ WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
@@ -380,8 +381,8 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy,
wil_dbg_misc(wil, "get_station: %pM CID %d MID %d\n", mac, cid,
vif->mid);
- if (cid < 0)
- return cid;
+ if (!wil_cid_valid(wil, cid))
+ return -ENOENT;
rc = wil_cid_fill_sinfo(vif, cid, sinfo);
@@ -395,7 +396,7 @@ static int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx)
{
int i;
- for (i = 0; i < max_assoc_sta; i++) {
+ for (i = 0; i < wil->max_assoc_sta; i++) {
if (wil->sta[i].status == wil_sta_unused)
continue;
if (wil->sta[i].mid != mid)
@@ -417,7 +418,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
int rc;
int cid = wil_find_cid_by_idx(wil, vif->mid, idx);
- if (cid < 0)
+ if (!wil_cid_valid(wil, cid))
return -ENOENT;
ether_addr_copy(mac, wil->sta[cid].addr);
@@ -643,6 +644,16 @@ out:
return rc;
}
+static bool wil_is_safe_switch(enum nl80211_iftype from,
+ enum nl80211_iftype to)
+{
+ if (from == NL80211_IFTYPE_STATION &&
+ to == NL80211_IFTYPE_P2P_CLIENT)
+ return true;
+
+ return false;
+}
+
static int wil_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type,
@@ -668,7 +679,8 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy,
* because it can cause significant disruption
*/
if (!wil_has_other_active_ifaces(wil, ndev, true, false) &&
- netif_running(ndev) && !wil_is_recovery_blocked(wil)) {
+ netif_running(ndev) && !wil_is_recovery_blocked(wil) &&
+ !wil_is_safe_switch(wdev->iftype, type)) {
wil_dbg_misc(wil, "interface is up. resetting...\n");
mutex_lock(&wil->mutex);
__wil_down(wil);
@@ -3022,7 +3034,7 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy,
wil, vif->mid, WMI_INVALID_RF_SECTOR_INDEX,
sector_type, WIL_CID_ALL);
if (rc == -EINVAL) {
- for (i = 0; i < max_assoc_sta; i++) {
+ for (i = 0; i < wil->max_assoc_sta; i++) {
if (wil->sta[i].mid != vif->mid)
continue;
rc = wil_rf_sector_wmi_set_selected(
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index df2adff6c33a..74834131cf7c 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -63,7 +63,9 @@ static void wil_print_desc_edma(struct seq_file *s, struct wil6210_priv *wil,
&ring->va[idx].rx.enhanced;
u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
- has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ if (wil->rx_buff_mgmt.buff_arr &&
+ wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))
+ has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
seq_printf(s, "%c", (has_skb) ? _h : _s);
} else {
struct wil_tx_enhanced_desc *d =
@@ -71,9 +73,9 @@ static void wil_print_desc_edma(struct seq_file *s, struct wil6210_priv *wil,
&ring->va[idx].tx.enhanced;
num_of_descs = (u8)d->mac.d[2];
- has_skb = ring->ctx[idx].skb;
+ has_skb = ring->ctx && ring->ctx[idx].skb;
if (num_of_descs >= 1)
- seq_printf(s, "%c", ring->ctx[idx].skb ? _h : _s);
+ seq_printf(s, "%c", has_skb ? _h : _s);
else
/* num_of_descs == 0, it's a frag in a list of descs */
seq_printf(s, "%c", has_skb ? 'h' : _s);
@@ -84,7 +86,7 @@ static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
const char *name, struct wil_ring *ring,
char _s, char _h)
{
- void __iomem *x = wmi_addr(wil, ring->hwtail);
+ void __iomem *x;
u32 v;
seq_printf(s, "RING %s = {\n", name);
@@ -96,7 +98,21 @@ static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
else
seq_printf(s, " swtail = %d\n", ring->swtail);
seq_printf(s, " swhead = %d\n", ring->swhead);
+ if (wil->use_enhanced_dma_hw) {
+ int ring_id = ring->is_rx ?
+ WIL_RX_DESC_RING_ID : ring - wil->ring_tx;
+ /* SUBQ_CONS is a table of 32 entries, one for each Q pair.
+ * lower 16bits are for even ring_id and upper 16bits are for
+ * odd ring_id
+ */
+ x = wmi_addr(wil, RGF_DMA_SCM_SUBQ_CONS + 4 * (ring_id / 2));
+ v = readl_relaxed(x);
+
+ v = (ring_id % 2 ? (v >> 16) : (v & 0xffff));
+ seq_printf(s, " hwhead = %u\n", v);
+ }
seq_printf(s, " hwtail = [0x%08x] -> ", ring->hwtail);
+ x = wmi_addr(wil, ring->hwtail);
if (x) {
v = readl(x);
seq_printf(s, "0x%08x = %d\n", v, v);
@@ -162,7 +178,7 @@ static int ring_show(struct seq_file *s, void *data)
snprintf(name, sizeof(name), "tx_%2d", i);
- if (cid < max_assoc_sta)
+ if (cid < wil->max_assoc_sta)
seq_printf(s,
"\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
wil->sta[cid].addr, cid, tid,
@@ -188,7 +204,7 @@ DEFINE_SHOW_ATTRIBUTE(ring);
static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
struct wil_status_ring *sring)
{
- void __iomem *x = wmi_addr(wil, sring->hwtail);
+ void __iomem *x;
int sring_idx = sring - wil->srings;
u32 v;
@@ -199,7 +215,19 @@ static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
seq_printf(s, " size = %d\n", sring->size);
seq_printf(s, " elem_size = %zu\n", sring->elem_size);
seq_printf(s, " swhead = %d\n", sring->swhead);
+ if (wil->use_enhanced_dma_hw) {
+ /* COMPQ_PROD is a table of 32 entries, one for each Q pair.
+ * lower 16bits are for even ring_id and upper 16bits are for
+ * odd ring_id
+ */
+ x = wmi_addr(wil, RGF_DMA_SCM_COMPQ_PROD + 4 * (sring_idx / 2));
+ v = readl_relaxed(x);
+
+ v = (sring_idx % 2 ? (v >> 16) : (v & 0xffff));
+ seq_printf(s, " hwhead = %u\n", v);
+ }
seq_printf(s, " hwtail = [0x%08x] -> ", sring->hwtail);
+ x = wmi_addr(wil, sring->hwtail);
if (x) {
v = readl_relaxed(x);
seq_printf(s, "0x%08x = %d\n", v, v);
@@ -394,25 +422,18 @@ static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
wil_debugfs_iomem_x32_set, "0x%08llx\n");
-static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
- umode_t mode,
- struct dentry *parent,
- void *value,
- struct wil6210_priv *wil)
+static void wil_debugfs_create_iomem_x32(const char *name, umode_t mode,
+ struct dentry *parent, void *value,
+ struct wil6210_priv *wil)
{
- struct dentry *file;
struct wil_debugfs_iomem_data *data = &wil->dbg_data.data_arr[
wil->dbg_data.iomem_data_count];
data->wil = wil;
data->offset = value;
- file = debugfs_create_file_unsafe(name, mode, parent, data,
- &fops_iomem_x32);
- if (!IS_ERR_OR_NULL(file))
- wil->dbg_data.iomem_data_count++;
-
- return file;
+ debugfs_create_file_unsafe(name, mode, parent, data, &fops_iomem_x32);
+ wil->dbg_data.iomem_data_count++;
}
static int wil_debugfs_ulong_set(void *data, u64 val)
@@ -430,14 +451,6 @@ static int wil_debugfs_ulong_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
wil_debugfs_ulong_set, "0x%llx\n");
-static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
- struct dentry *parent,
- ulong *value)
-{
- return debugfs_create_file_unsafe(name, mode, parent, value,
- &wil_fops_ulong);
-}
-
/**
* wil6210_debugfs_init_offset - create set of debugfs files
* @wil - driver's context, used for printing
@@ -454,37 +467,30 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
int i;
for (i = 0; tbl[i].name; i++) {
- struct dentry *f;
-
switch (tbl[i].type) {
case doff_u32:
- f = debugfs_create_u32(tbl[i].name, tbl[i].mode, dbg,
- base + tbl[i].off);
+ debugfs_create_u32(tbl[i].name, tbl[i].mode, dbg,
+ base + tbl[i].off);
break;
case doff_x32:
- f = debugfs_create_x32(tbl[i].name, tbl[i].mode, dbg,
- base + tbl[i].off);
+ debugfs_create_x32(tbl[i].name, tbl[i].mode, dbg,
+ base + tbl[i].off);
break;
case doff_ulong:
- f = wil_debugfs_create_ulong(tbl[i].name, tbl[i].mode,
- dbg, base + tbl[i].off);
+ debugfs_create_file_unsafe(tbl[i].name, tbl[i].mode,
+ dbg, base + tbl[i].off,
+ &wil_fops_ulong);
break;
case doff_io32:
- f = wil_debugfs_create_iomem_x32(tbl[i].name,
- tbl[i].mode, dbg,
- base + tbl[i].off,
- wil);
+ wil_debugfs_create_iomem_x32(tbl[i].name, tbl[i].mode,
+ dbg, base + tbl[i].off,
+ wil);
break;
case doff_u8:
- f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
- base + tbl[i].off);
+ debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
+ base + tbl[i].off);
break;
- default:
- f = ERR_PTR(-EINVAL);
}
- if (IS_ERR_OR_NULL(f))
- wil_err(wil, "Create file \"%s\": err %ld\n",
- tbl[i].name, PTR_ERR(f));
}
}
@@ -499,19 +505,14 @@ static const struct dbg_off isr_off[] = {
{},
};
-static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
- const char *name,
- struct dentry *parent, u32 off)
+static void wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
+ const char *name, struct dentry *parent,
+ u32 off)
{
struct dentry *d = debugfs_create_dir(name, parent);
- if (IS_ERR_OR_NULL(d))
- return -ENODEV;
-
wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr + off,
isr_off);
-
- return 0;
}
static const struct dbg_off pseudo_isr_off[] = {
@@ -521,18 +522,13 @@ static const struct dbg_off pseudo_isr_off[] = {
{},
};
-static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
- struct dentry *parent)
+static void wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
+ struct dentry *parent)
{
struct dentry *d = debugfs_create_dir("PSEUDO_ISR", parent);
- if (IS_ERR_OR_NULL(d))
- return -ENODEV;
-
wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr,
pseudo_isr_off);
-
- return 0;
}
static const struct dbg_off lgc_itr_cnt_off[] = {
@@ -580,13 +576,9 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
struct dentry *d, *dtx, *drx;
d = debugfs_create_dir("ITR_CNT", parent);
- if (IS_ERR_OR_NULL(d))
- return -ENODEV;
dtx = debugfs_create_dir("TX", d);
drx = debugfs_create_dir("RX", d);
- if (IS_ERR_OR_NULL(dtx) || IS_ERR_OR_NULL(drx))
- return -ENODEV;
wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr,
lgc_itr_cnt_off);
@@ -749,6 +741,44 @@ static const struct file_operations fops_rxon = {
.open = simple_open,
};
+static ssize_t wil_write_file_rbufcap(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int val;
+ int rc;
+
+ rc = kstrtoint_from_user(buf, count, 0, &val);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+ /* input value: negative to disable, 0 to use system default,
+ * 1..ring size to set descriptor threshold
+ */
+ wil_info(wil, "%s RBUFCAP, descriptors threshold - %d\n",
+ val < 0 ? "Disabling" : "Enabling", val);
+
+ if (!wil->ring_rx.va || val > wil->ring_rx.size) {
+ wil_err(wil, "Invalid descriptors threshold, %d\n", val);
+ return -EINVAL;
+ }
+
+ rc = wmi_rbufcap_cfg(wil, val < 0 ? 0 : 1, val < 0 ? 0 : val);
+ if (rc) {
+ wil_err(wil, "RBUFCAP config failed: %d\n", rc);
+ return rc;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_rbufcap = {
+ .write = wil_write_file_rbufcap,
+ .open = simple_open,
+};
+
/* block ack control, write:
* - "add <ringid> <agg_size> <timeout>" to trigger ADDBA
* - "del_tx <ringid> <reason>" to trigger DELBA for Tx side
@@ -811,7 +841,7 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
"BACK: del_rx require at least 2 params\n");
return -EINVAL;
}
- if (p1 < 0 || p1 >= max_assoc_sta) {
+ if (p1 < 0 || p1 >= wil->max_assoc_sta) {
wil_err(wil, "BACK: invalid CID %d\n", p1);
return -EINVAL;
}
@@ -910,9 +940,8 @@ static ssize_t wil_read_pmccfg(struct file *file, char __user *user_buf,
" - \"alloc <num descriptors> <descriptor_size>\" to allocate pmc\n"
" - \"free\" to free memory allocated for pmc\n";
- sprintf(text, "Last command status: %d\n\n%s",
- wil_pmc_last_cmd_status(wil),
- help);
+ snprintf(text, sizeof(text), "Last command status: %d\n\n%s",
+ wil_pmc_last_cmd_status(wil), help);
return simple_read_from_buffer(user_buf, count, ppos, text,
strlen(text) + 1);
@@ -1091,19 +1120,18 @@ static int txdesc_show(struct seq_file *s, void *data)
if (wil->use_enhanced_dma_hw) {
if (tx) {
- skb = ring->ctx[txdesc_idx].skb;
- } else {
+ skb = ring->ctx ? ring->ctx[txdesc_idx].skb : NULL;
+ } else if (wil->rx_buff_mgmt.buff_arr) {
struct wil_rx_enhanced_desc *rx_d =
(struct wil_rx_enhanced_desc *)
&ring->va[txdesc_idx].rx.enhanced;
u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
if (!wil_val_in_range(buff_id, 0,
- wil->rx_buff_mgmt.size)) {
+ wil->rx_buff_mgmt.size))
seq_printf(s, "invalid buff_id %d\n", buff_id);
- return 0;
- }
- skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
+ else
+ skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
}
} else {
skb = ring->ctx[txdesc_idx].skb;
@@ -1136,7 +1164,7 @@ static int status_msg_show(struct seq_file *s, void *data)
struct wil6210_priv *wil = s->private;
int sring_idx = dbg_sring_index;
struct wil_status_ring *sring;
- bool tx = sring_idx == wil->tx_sring_idx ? 1 : 0;
+ bool tx;
u32 status_msg_idx = dbg_status_msg_index;
u32 *u;
@@ -1146,6 +1174,7 @@ static int status_msg_show(struct seq_file *s, void *data)
}
sring = &wil->srings[sring_idx];
+ tx = !sring->is_rx;
if (!sring->va) {
seq_printf(s, "No %cX status ring\n", tx ? 'T' : 'R');
@@ -1262,14 +1291,14 @@ static int bf_show(struct seq_file *s, void *data)
memset(&reply, 0, sizeof(reply));
- for (i = 0; i < max_assoc_sta; i++) {
+ for (i = 0; i < wil->max_assoc_sta; i++) {
u32 status;
cmd.cid = i;
rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid,
&cmd, sizeof(cmd),
WMI_NOTIFY_REQ_DONE_EVENTID, &reply,
- sizeof(reply), 20);
+ sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
/* if reply is all-0, ignore this CID */
if (rc || is_all_zeros(&reply.evt, sizeof(reply.evt)))
continue;
@@ -1307,7 +1336,7 @@ static void print_temp(struct seq_file *s, const char *prefix, s32 t)
{
switch (t) {
case 0:
- case ~(u32)0:
+ case WMI_INVALID_TEMPERATURE:
seq_printf(s, "%s N/A\n", prefix);
break;
default:
@@ -1320,17 +1349,41 @@ static void print_temp(struct seq_file *s, const char *prefix, s32 t)
static int temp_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- s32 t_m, t_r;
- int rc = wmi_get_temperature(wil, &t_m, &t_r);
+ int rc, i;
- if (rc) {
- seq_puts(s, "Failed\n");
- return 0;
- }
+ if (test_bit(WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF,
+ wil->fw_capabilities)) {
+ struct wmi_temp_sense_all_done_event sense_all_evt;
- print_temp(s, "T_mac =", t_m);
- print_temp(s, "T_radio =", t_r);
+ wil_dbg_misc(wil,
+ "WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF is supported");
+ rc = wmi_get_all_temperatures(wil, &sense_all_evt);
+ if (rc) {
+ seq_puts(s, "Failed\n");
+ return 0;
+ }
+ print_temp(s, "T_mac =",
+ le32_to_cpu(sense_all_evt.baseband_t1000));
+ seq_printf(s, "Connected RFs [0x%08x]\n",
+ sense_all_evt.rf_bitmap);
+ for (i = 0; i < WMI_MAX_XIF_PORTS_NUM; i++) {
+ seq_printf(s, "RF[%d] = ", i);
+ print_temp(s, "",
+ le32_to_cpu(sense_all_evt.rf_t1000[i]));
+ }
+ } else {
+ s32 t_m, t_r;
+ wil_dbg_misc(wil,
+ "WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF is not supported");
+ rc = wmi_get_temperature(wil, &t_m, &t_r);
+ if (rc) {
+ seq_puts(s, "Failed\n");
+ return 0;
+ }
+ print_temp(s, "T_mac =", t_m);
+ print_temp(s, "T_radio =", t_r);
+ }
return 0;
}
DEFINE_SHOW_ATTRIBUTE(temp);
@@ -1359,7 +1412,7 @@ static int link_show(struct seq_file *s, void *data)
if (!sinfo)
return -ENOMEM;
- for (i = 0; i < max_assoc_sta; i++) {
+ for (i = 0; i < wil->max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
struct wil6210_vif *vif;
@@ -1561,7 +1614,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
struct wil6210_priv *wil = s->private;
int i, tid, mcs;
- for (i = 0; i < max_assoc_sta; i++) {
+ for (i = 0; i < wil->max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
u8 aid = 0;
@@ -1670,7 +1723,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
struct wil6210_priv *wil = s->private;
int i, bin;
- for (i = 0; i < max_assoc_sta; i++) {
+ for (i = 0; i < wil->max_assoc_sta; i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
u8 aid = 0;
@@ -1759,7 +1812,7 @@ static ssize_t wil_tx_latency_write(struct file *file, const char __user *buf,
size_t sz = sizeof(u64) * WIL_NUM_LATENCY_BINS;
wil->tx_latency_res = val;
- for (i = 0; i < max_assoc_sta; i++) {
+ for (i = 0; i < wil->max_assoc_sta; i++) {
struct wil_sta_info *sta = &wil->sta[i];
kfree(sta->tx_latency_bins);
@@ -1844,7 +1897,7 @@ static void wil_link_stats_debugfs_show_vif(struct wil6210_vif *vif,
}
seq_printf(s, "TSF %lld\n", vif->fw_stats_tsf);
- for (i = 0; i < max_assoc_sta; i++) {
+ for (i = 0; i < wil->max_assoc_sta; i++) {
if (wil->sta[i].status == wil_sta_unused)
continue;
if (wil->sta[i].mid != vif->mid)
@@ -2336,6 +2389,7 @@ static const struct {
{"tx_latency", 0644, &fops_tx_latency},
{"link_stats", 0644, &fops_link_stats},
{"link_stats_global", 0644, &fops_link_stats_global},
+ {"rbufcap", 0244, &fops_rbufcap},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -2460,7 +2514,7 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
wil->debug = NULL;
kfree(wil->dbg_data.data_arr);
- for (i = 0; i < max_assoc_sta; i++)
+ for (i = 0; i < wil->max_assoc_sta; i++)
kfree(wil->sta[i].tx_latency_bins);
/* free pmc memory without sending command to fw, as it will
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index 3e7a28045cab..fa3164765b20 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -109,12 +109,17 @@ struct wil_fw_record_concurrency { /* type == wil_fw_type_comment */
/* brd file info encoded inside a comment record */
#define WIL_BRD_FILE_MAGIC (0xabcddcbb)
+
+struct brd_info {
+ __le32 base_addr;
+ __le32 max_size_bytes;
+} __packed;
+
struct wil_fw_record_brd_file { /* type == wil_fw_type_comment */
/* identifies brd file record */
struct wil_fw_record_comment_hdr hdr;
__le32 version;
- __le32 base_addr;
- __le32 max_size_bytes;
+ struct brd_info brd_info[0];
} __packed;
/* perform action
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 3ec0f2fab9b7..94ebfa338e3f 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -156,17 +156,52 @@ fw_handle_brd_file(struct wil6210_priv *wil, const void *data,
size_t size)
{
const struct wil_fw_record_brd_file *rec = data;
+ u32 max_num_ent, i, ent_size;
- if (size < sizeof(*rec)) {
- wil_err_fw(wil, "brd_file record too short: %zu\n", size);
- return 0;
+ if (size <= offsetof(struct wil_fw_record_brd_file, brd_info)) {
+ wil_err(wil, "board record too short, size %zu\n", size);
+ return -EINVAL;
+ }
+
+ ent_size = size - offsetof(struct wil_fw_record_brd_file, brd_info);
+ max_num_ent = ent_size / sizeof(struct brd_info);
+
+ if (!max_num_ent) {
+ wil_err(wil, "brd info entries are missing\n");
+ return -EINVAL;
}
- wil->brd_file_addr = le32_to_cpu(rec->base_addr);
- wil->brd_file_max_size = le32_to_cpu(rec->max_size_bytes);
+ wil->brd_info = kcalloc(max_num_ent, sizeof(struct wil_brd_info),
+ GFP_KERNEL);
+ if (!wil->brd_info)
+ return -ENOMEM;
- wil_dbg_fw(wil, "brd_file_addr 0x%x, brd_file_max_size %d\n",
- wil->brd_file_addr, wil->brd_file_max_size);
+ for (i = 0; i < max_num_ent; i++) {
+ wil->brd_info[i].file_addr =
+ le32_to_cpu(rec->brd_info[i].base_addr);
+ wil->brd_info[i].file_max_size =
+ le32_to_cpu(rec->brd_info[i].max_size_bytes);
+
+ if (!wil->brd_info[i].file_addr)
+ break;
+
+ wil_dbg_fw(wil,
+ "brd info %d: file_addr 0x%x, file_max_size %d\n",
+ i, wil->brd_info[i].file_addr,
+ wil->brd_info[i].file_max_size);
+ }
+
+ wil->num_of_brd_entries = i;
+ if (wil->num_of_brd_entries == 0) {
+ kfree(wil->brd_info);
+ wil->brd_info = NULL;
+ wil_dbg_fw(wil,
+ "no valid brd info entries, using brd file addr\n");
+
+ } else {
+ wil_dbg_fw(wil, "num of brd info entries %d\n",
+ wil->num_of_brd_entries);
+ }
return 0;
}
@@ -634,6 +669,11 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name,
}
wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size);
+ /* re-initialize board info params */
+ wil->num_of_brd_entries = 0;
+ kfree(wil->brd_info);
+ wil->brd_info = NULL;
+
for (sz = fw->size, d = fw->data; sz; sz -= rc1, d += rc1) {
rc1 = wil_fw_verify(wil, d, sz);
if (rc1 < 0) {
@@ -662,11 +702,13 @@ static int wil_brd_process(struct wil6210_priv *wil, const void *data,
{
int rc = 0;
const struct wil_fw_record_head *hdr = data;
- size_t s, hdr_sz;
+ size_t s, hdr_sz = 0;
u16 type;
+ int i = 0;
- /* Assuming the board file includes only one header record and one data
- * record. Each record starts with wil_fw_record_head.
+ /* Assuming the board file includes only one file header
+ * and one or several data records.
+ * Each record starts with wil_fw_record_head.
*/
if (size < sizeof(*hdr))
return -EINVAL;
@@ -674,40 +716,67 @@ static int wil_brd_process(struct wil6210_priv *wil, const void *data,
if (s > size)
return -EINVAL;
- /* Skip the header record and handle the data record */
- hdr = (const void *)hdr + s;
+ /* Skip the header record and handle the data records */
size -= s;
- if (size < sizeof(*hdr))
- return -EINVAL;
- hdr_sz = le32_to_cpu(hdr->size);
- if (wil->brd_file_max_size && hdr_sz > wil->brd_file_max_size)
- return -EINVAL;
- if (sizeof(*hdr) + hdr_sz > size)
- return -EINVAL;
- if (hdr_sz % 4) {
- wil_err_fw(wil, "unaligned record size: %zu\n",
- hdr_sz);
- return -EINVAL;
- }
- type = le16_to_cpu(hdr->type);
- if (type != wil_fw_type_data) {
- wil_err_fw(wil, "invalid record type for board file: %d\n",
- type);
- return -EINVAL;
+ for (hdr = data + s;; hdr = (const void *)hdr + s, size -= s, i++) {
+ if (size < sizeof(*hdr))
+ break;
+
+ if (i >= wil->num_of_brd_entries) {
+ wil_err_fw(wil,
+ "Too many brd records: %d, num of expected entries %d\n",
+ i, wil->num_of_brd_entries);
+ break;
+ }
+
+ hdr_sz = le32_to_cpu(hdr->size);
+ s = sizeof(*hdr) + hdr_sz;
+ if (wil->brd_info[i].file_max_size &&
+ hdr_sz > wil->brd_info[i].file_max_size)
+ return -EINVAL;
+ if (sizeof(*hdr) + hdr_sz > size)
+ return -EINVAL;
+ if (hdr_sz % 4) {
+ wil_err_fw(wil, "unaligned record size: %zu\n",
+ hdr_sz);
+ return -EINVAL;
+ }
+ type = le16_to_cpu(hdr->type);
+ if (type != wil_fw_type_data) {
+ wil_err_fw(wil,
+ "invalid record type for board file: %d\n",
+ type);
+ return -EINVAL;
+ }
+ if (hdr_sz < sizeof(struct wil_fw_record_data)) {
+ wil_err_fw(wil, "data record too short: %zu\n", hdr_sz);
+ return -EINVAL;
+ }
+
+ wil_dbg_fw(wil,
+ "using info from fw file for record %d: addr[0x%08x], max size %d\n",
+ i, wil->brd_info[i].file_addr,
+ wil->brd_info[i].file_max_size);
+
+ rc = __fw_handle_data(wil, &hdr[1], hdr_sz,
+ cpu_to_le32(wil->brd_info[i].file_addr));
+ if (rc)
+ return rc;
}
- if (hdr_sz < sizeof(struct wil_fw_record_data)) {
- wil_err_fw(wil, "data record too short: %zu\n", hdr_sz);
+
+ if (size) {
+ wil_err_fw(wil, "unprocessed bytes: %zu\n", size);
+ if (size >= sizeof(*hdr)) {
+ wil_err_fw(wil,
+ "Stop at offset %ld record type %d [%zd bytes]\n",
+ (long)((const void *)hdr - data),
+ le16_to_cpu(hdr->type), hdr_sz);
+ }
return -EINVAL;
}
- wil_dbg_fw(wil, "using addr from fw file: [0x%08x]\n",
- wil->brd_file_addr);
-
- rc = __fw_handle_data(wil, &hdr[1], hdr_sz,
- cpu_to_le32(wil->brd_file_addr));
-
- return rc;
+ return 0;
}
/**
@@ -738,7 +807,8 @@ int wil_request_board(struct wil6210_priv *wil, const char *name)
rc = dlen;
goto out;
}
- /* Process the data record */
+
+ /* Process the data records */
rc = wil_brd_process(wil, brd->data, dlen);
out:
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 3f5bd177d55f..b00a13d6d530 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -296,21 +296,24 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
- u32 isr = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_RX_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ u32 isr;
bool need_unmask = true;
+ wil6210_mask_irq_rx(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
if (unlikely(!isr)) {
wil_err_ratelimited(wil, "spurious IRQ: RX\n");
+ wil6210_unmask_irq_rx(wil);
return IRQ_NONE;
}
- wil6210_mask_irq_rx(wil);
-
/* RX_DONE and RX_HTRSH interrupts are the same if interrupt
* moderation is not used. Interrupt moderation may cause RX
* buffer overflow while RX_DONE is delayed. The required
@@ -355,21 +358,24 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
- u32 isr = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_INT_GEN_RX_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ u32 isr;
bool need_unmask = true;
+ wil6210_mask_irq_rx_edma(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: RX\n");
+ wil6210_unmask_irq_rx_edma(wil);
return IRQ_NONE;
}
- wil6210_mask_irq_rx_edma(wil);
-
if (likely(isr & BIT_RX_STATUS_IRQ)) {
wil_dbg_irq(wil, "RX status ring\n");
isr &= ~BIT_RX_STATUS_IRQ;
@@ -403,21 +409,24 @@ static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
- u32 isr = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_INT_GEN_TX_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ u32 isr;
bool need_unmask = true;
+ wil6210_mask_irq_tx_edma(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: TX\n");
+ wil6210_unmask_irq_tx_edma(wil);
return IRQ_NONE;
}
- wil6210_mask_irq_tx_edma(wil);
-
if (likely(isr & BIT_TX_STATUS_IRQ)) {
wil_dbg_irq(wil, "TX status ring\n");
isr &= ~BIT_TX_STATUS_IRQ;
@@ -446,21 +455,24 @@ static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
- u32 isr = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_TX_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ u32 isr;
bool need_unmask = true;
+ wil6210_mask_irq_tx(wil);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
if (unlikely(!isr)) {
wil_err_ratelimited(wil, "spurious IRQ: TX\n");
+ wil6210_unmask_irq_tx(wil);
return IRQ_NONE;
}
- wil6210_mask_irq_tx(wil);
-
if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
wil_dbg_irq(wil, "TX done\n");
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
@@ -532,20 +544,23 @@ static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
- u32 isr = wil_ioread32_and_clear(wil->csr +
- HOSTADDR(RGF_DMA_EP_MISC_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ u32 isr;
+
+ wil6210_mask_irq_misc(wil, false);
+
+ isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
trace_wil6210_irq_misc(isr);
wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
if (!isr) {
wil_err(wil, "spurious IRQ: MISC\n");
+ wil6210_unmask_irq_misc(wil, false);
return IRQ_NONE;
}
- wil6210_mask_irq_misc(wil, false);
-
if (isr & ISR_MISC_FW_ERROR) {
u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
u32 ucode_assert_code =
@@ -580,7 +595,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
/* no need to handle HALP ICRs until next vote */
wil->halp.handle_icr = false;
wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
- wil6210_mask_halp(wil);
+ wil6210_mask_irq_misc(wil, true);
complete(&wil->halp.comp);
}
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 9b9c9ec01536..173561fe593d 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -241,7 +241,7 @@ static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
{
int i;
- for (i = 0; i < max_assoc_sta; i++) {
+ for (i = 0; i < wil->max_assoc_sta; i++) {
if (wil->sta[i].mid == mid &&
wil->sta[i].status == wil_sta_connected)
return true;
@@ -340,11 +340,11 @@ static void _wil6210_disconnect_complete(struct wil6210_vif *vif,
wil_dbg_misc(wil,
"Disconnect complete %pM, CID=%d, reason=%d\n",
bssid, cid, reason_code);
- if (cid >= 0) /* disconnect 1 peer */
+ if (wil_cid_valid(wil, cid)) /* disconnect 1 peer */
wil_disconnect_cid_complete(vif, cid, reason_code);
} else { /* all */
wil_dbg_misc(wil, "Disconnect complete all\n");
- for (cid = 0; cid < max_assoc_sta; cid++)
+ for (cid = 0; cid < wil->max_assoc_sta; cid++)
wil_disconnect_cid_complete(vif, cid, reason_code);
}
@@ -452,11 +452,11 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
cid = wil_find_cid(wil, vif->mid, bssid);
wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
bssid, cid, reason_code);
- if (cid >= 0) /* disconnect 1 peer */
+ if (wil_cid_valid(wil, cid)) /* disconnect 1 peer */
wil_disconnect_cid(vif, cid, reason_code);
} else { /* all */
wil_dbg_misc(wil, "Disconnect all\n");
- for (cid = 0; cid < max_assoc_sta; cid++)
+ for (cid = 0; cid < wil->max_assoc_sta; cid++)
wil_disconnect_cid(vif, cid, reason_code);
}
@@ -753,6 +753,7 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->reply_mid = U8_MAX;
wil->max_vifs = 1;
+ wil->max_assoc_sta = max_assoc_sta;
/* edma configuration can be updated via debugfs before allocation */
wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
@@ -838,6 +839,7 @@ void wil_priv_deinit(struct wil6210_priv *wil)
wmi_event_flush(wil);
destroy_workqueue(wil->wq_service);
destroy_workqueue(wil->wmi_wq);
+ kfree(wil->brd_info);
}
static void wil_shutdown_bl(struct wil6210_priv *wil)
@@ -1520,6 +1522,7 @@ int wil_ps_update(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile)
static void wil_pre_fw_config(struct wil6210_priv *wil)
{
+ wil_clear_fw_log_addr(wil);
/* Mark FW as loaded from host */
wil_s(wil, RGF_USER_USAGE_6, 1);
@@ -1577,6 +1580,20 @@ static int wil_restore_vifs(struct wil6210_priv *wil)
}
/*
+ * Clear FW and ucode log start addr to indicate FW log is not ready. The host
+ * driver clears the addresses before FW starts and FW initializes the address
+ * when it is ready to send logs.
+ */
+void wil_clear_fw_log_addr(struct wil6210_priv *wil)
+{
+ /* FW log addr */
+ wil_w(wil, RGF_USER_USAGE_1, 0);
+ /* ucode log addr */
+ wil_w(wil, RGF_USER_USAGE_2, 0);
+ wil_dbg_misc(wil, "Cleared FW and ucode log address");
+}
+
+/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
* the firmware.
@@ -1709,7 +1726,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil_request_firmware(wil, wil->wil_fw_name, true);
if (rc)
goto out;
- if (wil->brd_file_addr)
+ if (wil->num_of_brd_entries)
rc = wil_request_board(wil, board_file);
else
rc = wil_request_firmware(wil, board_file, true);
@@ -1921,7 +1938,7 @@ int wil_find_cid(struct wil6210_priv *wil, u8 mid, const u8 *mac)
int i;
int rc = -ENOENT;
- for (i = 0; i < max_assoc_sta; i++) {
+ for (i = 0; i < wil->max_assoc_sta; i++) {
if (wil->sta[i].mid == mid &&
wil->sta[i].status != wil_sta_unused &&
ether_addr_equal(wil->sta[i].addr, mac)) {
@@ -1938,6 +1955,9 @@ void wil_halp_vote(struct wil6210_priv *wil)
unsigned long rc;
unsigned long to_jiffies = msecs_to_jiffies(WAIT_FOR_HALP_VOTE_MS);
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ return;
+
mutex_lock(&wil->halp.lock);
wil_dbg_irq(wil, "halp_vote: start, HALP ref_cnt (%d)\n",
@@ -1969,6 +1989,9 @@ void wil_halp_vote(struct wil6210_priv *wil)
void wil_halp_unvote(struct wil6210_priv *wil)
{
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ return;
+
WARN_ON(wil->halp.ref_cnt == 0);
mutex_lock(&wil->halp.lock);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 3b82d6cfc218..9f5a914abc18 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -142,6 +142,8 @@ int wil_set_capabilities(struct wil6210_priv *wil)
min(sizeof(wil->platform_capa), sizeof(platform_capa)));
}
+ wil_info(wil, "platform_capa 0x%lx\n", *wil->platform_capa);
+
/* extract FW capabilities from file without loading the FW */
wil_request_firmware(wil, wil->wil_fw_name, false);
wil_refresh_fw_capabilities(wil);
@@ -418,6 +420,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* rollback to bus_disable */
+ wil_clear_fw_log_addr(wil);
rc = wil_if_add(wil);
if (rc) {
wil_err(wil, "wil_if_add failed: %d\n", rc);
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 32b14fc33a59..784239bcb3a6 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -316,7 +316,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
u16 agg_timeout = le16_to_cpu(ba_timeout);
u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
struct wil_sta_info *sta;
- u16 agg_wsize = 0;
+ u16 agg_wsize;
/* bit 0: A-MSDU supported
* bit 1: policy (should be 0 for us)
* bits 2..5: TID
@@ -328,7 +328,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
wil->amsdu_en && (param_set & BIT(0));
int ba_policy = param_set & BIT(1);
- u16 status = WLAN_STATUS_SUCCESS;
u16 ssn = seq_ctrl >> 4;
struct wil_tid_ampdu_rx *r;
int rc = 0;
@@ -336,7 +335,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
might_sleep();
/* sanity checks */
- if (cid >= max_assoc_sta) {
+ if (cid >= wil->max_assoc_sta) {
wil_err(wil, "BACK: invalid CID %d\n", cid);
rc = -EINVAL;
goto out;
@@ -355,27 +354,19 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
/* apply policies */
- if (ba_policy) {
- wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
- status = WLAN_STATUS_INVALID_QOS_PARAM;
- }
- if (status == WLAN_STATUS_SUCCESS) {
- if (req_agg_wsize == 0) {
- wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
- wil->max_agg_wsize);
- agg_wsize = wil->max_agg_wsize;
- } else {
- agg_wsize = min_t(u16,
- wil->max_agg_wsize, req_agg_wsize);
- }
+ if (req_agg_wsize == 0) {
+ wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
+ wil->max_agg_wsize);
+ agg_wsize = wil->max_agg_wsize;
+ } else {
+ agg_wsize = min_t(u16, wil->max_agg_wsize, req_agg_wsize);
}
rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
- status, agg_amsdu, agg_wsize,
- agg_timeout);
- if (rc || (status != WLAN_STATUS_SUCCESS)) {
- wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
- status);
+ WLAN_STATUS_SUCCESS, agg_amsdu,
+ agg_wsize, agg_timeout);
+ if (rc) {
+ wil_err(wil, "do not apply ba, rc(%d)\n", rc);
goto out;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 4ccfd1404458..eae00aafaa88 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -411,7 +411,7 @@ static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
ta = hdr->addr2;
}
- if (max_assoc_sta <= WIL6210_RX_DESC_MAX_CID)
+ if (wil->max_assoc_sta <= WIL6210_RX_DESC_MAX_CID)
return cid;
/* assuming no concurrency between AP interfaces and STA interfaces.
@@ -426,14 +426,14 @@ static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
* to find the real cid, compare transmitter address with the stored
* stations mac address in the driver sta array
*/
- for (i = cid; i < max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) {
+ for (i = cid; i < wil->max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) {
if (wil->sta[i].status != wil_sta_unused &&
ether_addr_equal(wil->sta[i].addr, ta)) {
cid = i;
break;
}
}
- if (i >= max_assoc_sta) {
+ if (i >= wil->max_assoc_sta) {
wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n",
ta, vif->wdev.iftype, ftype, skb->len);
cid = -ENOENT;
@@ -750,6 +750,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
[GRO_HELD] = "GRO_HELD",
[GRO_NORMAL] = "GRO_NORMAL",
[GRO_DROP] = "GRO_DROP",
+ [GRO_CONSUMED] = "GRO_CONSUMED",
};
wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
@@ -1036,7 +1037,8 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
if (!vif->privacy)
txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
- WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
goto out_free;
@@ -1063,7 +1065,7 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
wil_vring_free(wil, vring);
- wil->ring2cid_tid[id][0] = max_assoc_sta;
+ wil->ring2cid_tid[id][0] = wil->max_assoc_sta;
wil->ring2cid_tid[id][1] = 0;
out:
@@ -1124,7 +1126,8 @@ static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid,
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
- WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
goto fail;
@@ -1148,7 +1151,7 @@ fail:
txdata->dot1x_open = false;
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
- wil->ring2cid_tid[ring_id][0] = max_assoc_sta;
+ wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta;
wil->ring2cid_tid[ring_id][1] = 0;
return rc;
}
@@ -1195,7 +1198,7 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
if (rc)
goto out;
- wil->ring2cid_tid[id][0] = max_assoc_sta; /* CID */
+ wil->ring2cid_tid[id][0] = wil->max_assoc_sta; /* CID */
wil->ring2cid_tid[id][1] = 0; /* TID */
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
@@ -1204,7 +1207,8 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid,
&cmd, sizeof(cmd),
- WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
goto out_free;
@@ -1243,7 +1247,7 @@ static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
cid = wil_find_cid(wil, vif->mid, da);
- if (cid < 0 || cid >= max_assoc_sta)
+ if (cid < 0 || cid >= wil->max_assoc_sta)
return NULL;
/* TODO: fix for multiple TID */
@@ -1295,7 +1299,7 @@ static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
continue;
cid = wil->ring2cid_tid[i][0];
- if (cid >= max_assoc_sta) /* skip BCAST */
+ if (cid >= wil->max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
@@ -1373,7 +1377,7 @@ static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
continue;
cid = wil->ring2cid_tid[i][0];
- if (cid >= max_assoc_sta) /* skip BCAST */
+ if (cid >= wil->max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
@@ -1401,7 +1405,7 @@ found:
if (!v2->va || txdata2->mid != vif->mid)
continue;
cid = wil->ring2cid_tid[i][0];
- if (cid >= max_assoc_sta) /* skip BCAST */
+ if (cid >= wil->max_assoc_sta) /* skip BCAST */
continue;
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
@@ -1760,6 +1764,9 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
}
}
+ if (!_desc)
+ goto mem_error;
+
/* first descriptor may also be the last.
* in this case d pointer is invalid
*/
@@ -2254,7 +2261,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
used_before_complete = wil_ring_used_tx(vring);
- if (cid < max_assoc_sta)
+ if (cid < wil->max_assoc_sta)
stats = &wil->sta[cid].stats;
while (!wil_ring_is_empty(vring)) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index f6fce6ff73d9..dc040cd4ab06 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -26,6 +26,10 @@
#include "txrx.h"
#include "trace.h"
+/* Max number of entries (packets to complete) to update the hwtail of tx
+ * status ring. Should be power of 2
+ */
+#define WIL_EDMA_TX_SRING_UPDATE_HW_TAIL 128
#define WIL_EDMA_MAX_DATA_OFFSET (2)
/* RX buffer size must be aligned to 4 bytes */
#define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
@@ -269,6 +273,9 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
struct list_head *active = &wil->rx_buff_mgmt.active;
dma_addr_t pa;
+ if (!wil->rx_buff_mgmt.buff_arr)
+ return;
+
while (!list_empty(active)) {
struct wil_rx_buff *rx_buff =
list_first_entry(active, struct wil_rx_buff, list);
@@ -734,7 +741,7 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
wil_ring_free_edma(wil, ring);
- wil->ring2cid_tid[ring_id][0] = max_assoc_sta;
+ wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta;
wil->ring2cid_tid[ring_id][1] = 0;
out:
@@ -944,7 +951,7 @@ again:
eop = wil_rx_status_get_eop(msg);
cid = wil_rx_status_get_cid(msg);
- if (unlikely(!wil_val_in_range(cid, 0, max_assoc_sta))) {
+ if (unlikely(!wil_val_in_range(cid, 0, wil->max_assoc_sta))) {
wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
cid, sring->swhead);
rxdata->skipping = true;
@@ -1152,7 +1159,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
struct wil_net_stats *stats;
struct wil_tx_enhanced_desc *_d;
unsigned int ring_id;
- unsigned int num_descs;
+ unsigned int num_descs, num_statuses = 0;
int i;
u8 dr_bit; /* Descriptor Ready bit */
struct wil_ring_tx_status msg;
@@ -1199,7 +1206,8 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
ndev = vif_to_ndev(vif);
cid = wil->ring2cid_tid[ring_id][0];
- stats = (cid < max_assoc_sta ? &wil->sta[cid].stats : NULL);
+ stats = (cid < wil->max_assoc_sta) ? &wil->sta[cid].stats :
+ NULL;
wil_dbg_txrx(wil,
"tx_status: completed desc_ring (%d), num_descs (%d)\n",
@@ -1272,6 +1280,11 @@ int wil_tx_sring_handler(struct wil6210_priv *wil,
}
again:
+ num_statuses++;
+ if (num_statuses % WIL_EDMA_TX_SRING_UPDATE_HW_TAIL == 0)
+ /* update HW tail to allow HW to push new statuses */
+ wil_w(wil, sring->hwtail, sring->swhead);
+
wil_sring_advance_swhead(sring);
wil_get_next_tx_status_msg(sring, &msg);
@@ -1282,8 +1295,9 @@ again:
if (desc_cnt)
wil_update_net_queues(wil, vif, NULL, false);
- /* Update the HW tail ptr (RD ptr) */
- wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
+ if (num_statuses % WIL_EDMA_TX_SRING_UPDATE_HW_TAIL != 0)
+ /* Update the HW tail ptr (RD ptr) */
+ wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
return desc_cnt;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index bb4ff28b73e5..e9e6ea9b16b9 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -24,7 +24,7 @@
#define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX)
/* RX sring order should be bigger than RX ring order */
#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (12)
-#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12)
+#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (14)
#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (2600)
#define WIL_DEFAULT_RX_STATUS_RING_ID 0
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 8724d9975606..6f456b311a39 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -99,6 +99,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL_MAX_AMPDU_SIZE_128 (128 * 1024) /* FW/HW limit */
#define WIL_MAX_AGG_WSIZE_64 (64) /* FW/HW limit */
#define WIL6210_MAX_STATUS_RINGS (8)
+#define WIL_WMI_CALL_GENERAL_TO_MS 100
/* Hardware offload block adds the following:
* 26 bytes - 3-address QoS data header
@@ -335,6 +336,11 @@ struct RGF_ICR {
#define BIT_BOOT_FROM_ROM BIT(31)
/* eDMA */
+#define RGF_SCM_PTRS_SUBQ_RD_PTR (0x8b4000)
+#define RGF_SCM_PTRS_COMPQ_RD_PTR (0x8b4100)
+#define RGF_DMA_SCM_SUBQ_CONS (0x8b60ec)
+#define RGF_DMA_SCM_COMPQ_PROD (0x8b616c)
+
#define RGF_INT_COUNT_ON_SPECIAL_EVT (0x8b62d8)
#define RGF_INT_CTRL_INT_GEN_CFG_0 (0x8bc000)
@@ -456,15 +462,6 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
*tid = (cidxtid >> 4) & 0xf;
}
-/**
- * wil_cid_valid - check cid is valid
- * @cid: CID value
- */
-static inline bool wil_cid_valid(u8 cid)
-{
- return (cid >= 0 && cid < max_assoc_sta);
-}
-
struct wil6210_mbox_ring {
u32 base;
u16 entry_size; /* max. size of mbox entry, incl. all headers */
@@ -913,6 +910,11 @@ struct wil_fw_stats_global {
struct wmi_link_stats_global stats;
};
+struct wil_brd_info {
+ u32 file_addr;
+ u32 file_max_size;
+};
+
struct wil6210_priv {
struct pci_dev *pdev;
u32 bar_size;
@@ -927,8 +929,8 @@ struct wil6210_priv {
const char *hw_name;
const char *wil_fw_name;
char *board_file;
- u32 brd_file_addr;
- u32 brd_file_max_size;
+ u32 num_of_brd_entries;
+ struct wil_brd_info *brd_info;
DECLARE_BITMAP(hw_capa, hw_capa_last);
DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX);
@@ -940,6 +942,8 @@ struct wil6210_priv {
struct wil6210_vif *vifs[WIL_MAX_VIFS];
struct mutex vif_mutex; /* protects access to VIF entries */
atomic_t connected_vifs;
+ u32 max_assoc_sta; /* max sta's supported by the driver and the FW */
+
/* profile */
struct cfg80211_chan_def monitor_chandef;
u32 monitor_flags;
@@ -1137,6 +1141,14 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
wil_w(wil, reg, wil_r(wil, reg) & ~val);
}
+/**
+ * wil_cid_valid - check cid is valid
+ */
+static inline bool wil_cid_valid(struct wil6210_priv *wil, u8 cid)
+{
+ return (cid >= 0 && cid < wil->max_assoc_sta);
+}
+
void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len);
#if defined(CONFIG_DYNAMIC_DEBUG)
@@ -1241,6 +1253,9 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
+int wmi_get_all_temperatures(struct wil6210_priv *wil,
+ struct wmi_temp_sense_all_done_event
+ *sense_all_evt);
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
bool del_sta);
int wmi_addba(struct wil6210_priv *wil, u8 mid,
@@ -1395,6 +1410,7 @@ int wmi_stop_sched_scan(struct wil6210_priv *wil);
int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len);
int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
u8 channel, u16 duration_ms);
+int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold);
int reverse_memcmp(const void *cs, const void *ct, size_t count);
@@ -1413,4 +1429,5 @@ int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
void update_supported_bands(struct wil6210_priv *wil);
+void wil_clear_fw_log_addr(struct wil6210_priv *wil);
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index d89cd41e78ac..475b1a233cc9 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -40,7 +40,6 @@ MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
-#define WIL_WMI_CALL_GENERAL_TO_MS 100
#define WIL_WMI_PCP_STOP_TO_MS 5000
/**
@@ -484,6 +483,10 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_FT_REASSOC_CMD";
case WMI_UPDATE_FT_IES_CMDID:
return "WMI_UPDATE_FT_IES_CMD";
+ case WMI_RBUFCAP_CFG_CMDID:
+ return "WMI_RBUFCAP_CFG_CMD";
+ case WMI_TEMP_SENSE_ALL_CMDID:
+ return "WMI_TEMP_SENSE_ALL_CMDID";
default:
return "Untracked CMD";
}
@@ -628,6 +631,10 @@ static const char *eventid2name(u16 eventid)
return "WMI_FT_AUTH_STATUS_EVENT";
case WMI_FT_REASSOC_STATUS_EVENTID:
return "WMI_FT_REASSOC_STATUS_EVENT";
+ case WMI_RBUFCAP_CFG_EVENTID:
+ return "WMI_RBUFCAP_CFG_EVENT";
+ case WMI_TEMP_SENSE_ALL_DONE_EVENTID:
+ return "WMI_TEMP_SENSE_ALL_DONE_EVENTID";
default:
return "Untracked EVENT";
}
@@ -806,8 +813,8 @@ static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len)
}
}
- max_assoc_sta = min_t(uint, max_assoc_sta, fw_max_assoc_sta);
- wil_dbg_wmi(wil, "setting max assoc sta to %d\n", max_assoc_sta);
+ wil->max_assoc_sta = min_t(uint, max_assoc_sta, fw_max_assoc_sta);
+ wil_dbg_wmi(wil, "setting max assoc sta to %d\n", wil->max_assoc_sta);
wil_set_recovery_state(wil, fw_recovery_idle);
set_bit(wil_status_fwready, wil->status);
@@ -974,7 +981,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
evt->assoc_req_len, evt->assoc_resp_len);
return;
}
- if (evt->cid >= max_assoc_sta) {
+ if (evt->cid >= wil->max_assoc_sta) {
wil_err(wil, "Connect CID invalid : %d\n", evt->cid);
return;
}
@@ -1236,7 +1243,7 @@ static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
return;
cid = wil->ring2cid_tid[vri][0];
- if (!wil_cid_valid(cid)) {
+ if (!wil_cid_valid(wil, cid)) {
wil_err(wil, "invalid cid %d for vring %d\n", cid, vri);
return;
}
@@ -1439,7 +1446,7 @@ static void wil_link_stats_store_basic(struct wil6210_vif *vif,
u8 cid = basic->cid;
struct wil_sta_info *sta;
- if (cid < 0 || cid >= max_assoc_sta) {
+ if (cid < 0 || cid >= wil->max_assoc_sta) {
wil_err(wil, "invalid cid %d\n", cid);
return;
}
@@ -1589,7 +1596,7 @@ static int wil_find_cid_ringid_sta(struct wil6210_priv *wil,
continue;
lcid = wil->ring2cid_tid[i][0];
- if (lcid >= max_assoc_sta) /* skip BCAST */
+ if (lcid >= wil->max_assoc_sta) /* skip BCAST */
continue;
wil_dbg_wmi(wil, "find sta -> ringid %d cid %d\n", i, lcid);
@@ -2051,7 +2058,8 @@ int wmi_echo(struct wil6210_priv *wil)
};
return wmi_call(wil, WMI_ECHO_CMDID, vif->mid, &cmd, sizeof(cmd),
- WMI_ECHO_RSP_EVENTID, NULL, 0, 50);
+ WMI_ECHO_RSP_EVENTID, NULL, 0,
+ WIL_WMI_CALL_GENERAL_TO_MS);
}
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
@@ -2110,7 +2118,7 @@ int wmi_led_cfg(struct wil6210_priv *wil, bool enable)
rc = wmi_call(wil, WMI_LED_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
WMI_LED_CFG_DONE_EVENTID, &reply, sizeof(reply),
- 100);
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
goto out;
@@ -2124,6 +2132,37 @@ out:
return rc;
}
+int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+
+ struct wmi_rbufcap_cfg_cmd cmd = {
+ .enable = enable,
+ .rx_desc_threshold = cpu_to_le16(threshold),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_rbufcap_cfg_event evt;
+ } __packed reply = {
+ .evt = {.status = WMI_FW_STATUS_FAILURE},
+ };
+
+ rc = wmi_call(wil, WMI_RBUFCAP_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_RBUFCAP_CFG_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "RBUFCAP_CFG failed. status %d\n",
+ reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
int wmi_pcp_start(struct wil6210_vif *vif,
int bi, u8 wmi_nettype, u8 chan, u8 hidden_ssid, u8 is_go)
{
@@ -2135,7 +2174,7 @@ int wmi_pcp_start(struct wil6210_vif *vif,
.network_type = wmi_nettype,
.disable_sec_offload = 1,
.channel = chan - 1,
- .pcp_max_assoc_sta = max_assoc_sta,
+ .pcp_max_assoc_sta = wil->max_assoc_sta,
.hidden_ssid = hidden_ssid,
.is_go = is_go,
.ap_sme_offload_mode = disable_ap_sme ?
@@ -2228,7 +2267,8 @@ int wmi_get_ssid(struct wil6210_vif *vif, u8 *ssid_len, void *ssid)
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_GET_SSID_CMDID, vif->mid, NULL, 0,
- WMI_GET_SSID_EVENTID, &reply, sizeof(reply), 20);
+ WMI_GET_SSID_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
@@ -2265,7 +2305,8 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, vif->mid, NULL, 0,
- WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
+ WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
@@ -2361,7 +2402,8 @@ int wmi_stop_discovery(struct wil6210_vif *vif)
wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n");
rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, vif->mid, NULL, 0,
- WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 100);
+ WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0,
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
wil_err(wil, "Failed to stop discovery\n");
@@ -2507,12 +2549,14 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
if (on) {
rc = wmi_call(wil, WMI_START_LISTEN_CMDID, vif->mid, NULL, 0,
WMI_LISTEN_STARTED_EVENTID,
- &reply, sizeof(reply), 100);
+ &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS))
rc = -EINVAL;
} else {
rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, vif->mid, NULL, 0,
- WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 20);
+ WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0,
+ WIL_WMI_CALL_GENERAL_TO_MS);
}
return rc;
@@ -2601,7 +2645,8 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, vif->mid, &cmd, sizeof(cmd),
- WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100);
+ WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
@@ -2613,6 +2658,44 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
return 0;
}
+int wmi_get_all_temperatures(struct wil6210_priv *wil,
+ struct wmi_temp_sense_all_done_event
+ *sense_all_evt)
+{
+ struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
+ int rc;
+ struct wmi_temp_sense_all_cmd cmd = {
+ .measure_baseband_en = true,
+ .measure_rf_en = true,
+ .measure_mode = TEMPERATURE_MEASURE_NOW,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_temp_sense_all_done_event evt;
+ } __packed reply;
+
+ if (!sense_all_evt) {
+ wil_err(wil, "Invalid sense_all_evt value\n");
+ return -EINVAL;
+ }
+
+ memset(&reply, 0, sizeof(reply));
+ reply.evt.status = WMI_FW_STATUS_FAILURE;
+ rc = wmi_call(wil, WMI_TEMP_SENSE_ALL_CMDID, vif->mid, &cmd,
+ sizeof(cmd), WMI_TEMP_SENSE_ALL_DONE_EVENTID,
+ &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status == WMI_FW_STATUS_FAILURE) {
+ wil_err(wil, "Failed geting TEMP_SENSE_ALL\n");
+ return -EINVAL;
+ }
+
+ memcpy(sense_all_evt, &reply.evt, sizeof(reply.evt));
+ return 0;
+}
+
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
bool del_sta)
{
@@ -2715,7 +2798,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
.dialog_token = token,
.status_code = cpu_to_le16(status),
/* bit 0: A-MSDU supported
- * bit 1: policy (should be 0 for us)
+ * bit 1: policy (controlled by FW)
* bits 2..5: TID
* bits 6..15: buffer size
*/
@@ -2745,7 +2828,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, mid, &cmd, sizeof(cmd),
WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply),
- 100);
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
@@ -2769,7 +2852,7 @@ int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
.dialog_token = token,
.status_code = cpu_to_le16(status),
/* bit 0: A-MSDU supported
- * bit 1: policy (should be 0 for us)
+ * bit 1: policy (controlled by FW)
* bits 2..5: TID
* bits 6..15: buffer size
*/
@@ -2827,7 +2910,7 @@ int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, vif->mid,
&cmd, sizeof(cmd),
WMI_PS_DEV_PROFILE_CFG_EVENTID, &reply, sizeof(reply),
- 100);
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
@@ -2864,7 +2947,7 @@ int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short)
rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, vif->mid,
&cmd, sizeof(cmd),
WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
- 100);
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
@@ -2894,7 +2977,7 @@ int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short)
memset(&reply, 0, sizeof(reply));
rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, vif->mid, NULL, 0,
WMI_GET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply),
- 100);
+ WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
@@ -3220,7 +3303,18 @@ static void wmi_event_handle(struct wil6210_priv *wil,
/* check if someone waits for this event */
if (wil->reply_id && wil->reply_id == id &&
wil->reply_mid == mid) {
- WARN_ON(wil->reply_buf);
+ if (wil->reply_buf) {
+ /* event received while wmi_call is waiting
+ * with a buffer. Such event should be handled
+ * in wmi_recv_cmd function. Handling the event
+ * here means a previous wmi_call was timeout.
+ * Drop the event and do not handle it.
+ */
+ wil_err(wil,
+ "Old event (%d, %s) while wmi_call is waiting. Drop it and Continue waiting\n",
+ id, eventid2name(id));
+ return;
+ }
wmi_evt_call_handler(vif, id, evt_data,
len - sizeof(*wmi));
@@ -3800,6 +3894,7 @@ int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id)
.ring_size = cpu_to_le16(ring->size),
.ring_id = ring_id,
},
+ .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
.status_ring_id = wil->tx_sring_idx,
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
};
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index da46fc8d39cf..3e37229b36b5 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -35,6 +35,7 @@
#define WMI_PROX_RANGE_NUM (3)
#define WMI_MAX_LOSS_DMG_BEACONS (20)
#define MAX_NUM_OF_SECTORS (128)
+#define WMI_INVALID_TEMPERATURE (0xFFFFFFFF)
#define WMI_SCHED_MAX_ALLOCS_PER_CMD (4)
#define WMI_RF_DTYPE_LENGTH (3)
#define WMI_RF_ETYPE_LENGTH (3)
@@ -64,6 +65,7 @@
#define WMI_QOS_MAX_WEIGHT 50
#define WMI_QOS_SET_VIF_PRIORITY (0xFF)
#define WMI_QOS_DEFAULT_PRIORITY (WMI_QOS_NUM_OF_PRIORITY)
+#define WMI_MAX_XIF_PORTS_NUM (8)
/* Mailbox interface
* used for commands and events
@@ -105,6 +107,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_TX_REQ_EXT = 25,
WMI_FW_CAPABILITY_CHANNEL_4 = 26,
WMI_FW_CAPABILITY_IPA = 27,
+ WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF = 30,
WMI_FW_CAPABILITY_MAX,
};
@@ -296,6 +299,7 @@ enum wmi_command_id {
WMI_SET_VRING_PRIORITY_WEIGHT_CMDID = 0xA10,
WMI_SET_VRING_PRIORITY_CMDID = 0xA11,
WMI_RBUFCAP_CFG_CMDID = 0xA12,
+ WMI_TEMP_SENSE_ALL_CMDID = 0xA13,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
@@ -1411,12 +1415,7 @@ struct wmi_rf_xpm_write_cmd {
u8 data_bytes[0];
} __packed;
-/* WMI_TEMP_SENSE_CMDID
- *
- * Measure MAC and radio temperatures
- *
- * Possible modes for temperature measurement
- */
+/* Possible modes for temperature measurement */
enum wmi_temperature_measure_mode {
TEMPERATURE_USE_OLD_VALUE = 0x01,
TEMPERATURE_MEASURE_NOW = 0x02,
@@ -1942,6 +1941,14 @@ struct wmi_set_ap_slot_size_cmd {
__le32 slot_size;
} __packed;
+/* WMI_TEMP_SENSE_ALL_CMDID */
+struct wmi_temp_sense_all_cmd {
+ u8 measure_baseband_en;
+ u8 measure_rf_en;
+ u8 measure_mode;
+ u8 reserved;
+} __packed;
+
/* WMI Events
* List of Events (target to host)
*/
@@ -2101,6 +2108,7 @@ enum wmi_event_id {
WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID = 0x1A10,
WMI_SET_VRING_PRIORITY_EVENTID = 0x1A11,
WMI_RBUFCAP_CFG_EVENTID = 0x1A12,
+ WMI_TEMP_SENSE_ALL_DONE_EVENTID = 0x1A13,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
@@ -2784,11 +2792,13 @@ struct wmi_fixed_scheduling_ul_config_event {
*/
struct wmi_temp_sense_done_event {
/* Temperature times 1000 (actual temperature will be achieved by
- * dividing the value by 1000)
+ * dividing the value by 1000). When temperature cannot be read from
+ * device return WMI_INVALID_TEMPERATURE
*/
__le32 baseband_t1000;
/* Temperature times 1000 (actual temperature will be achieved by
- * dividing the value by 1000)
+ * dividing the value by 1000). When temperature cannot be read from
+ * device return WMI_INVALID_TEMPERATURE
*/
__le32 rf_t1000;
} __packed;
@@ -4140,4 +4150,25 @@ struct wmi_rbufcap_cfg_event {
u8 reserved[3];
} __packed;
+/* WMI_TEMP_SENSE_ALL_DONE_EVENTID
+ * Measure MAC and all radio temperatures
+ */
+struct wmi_temp_sense_all_done_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ /* Bitmap of connected RFs */
+ u8 rf_bitmap;
+ u8 reserved[2];
+ /* Temperature times 1000 (actual temperature will be achieved by
+ * dividing the value by 1000). When temperature cannot be read from
+ * device return WMI_INVALID_TEMPERATURE
+ */
+ __le32 rf_t1000[WMI_MAX_XIF_PORTS_NUM];
+ /* Temperature times 1000 (actual temperature will be achieved by
+ * dividing the value by 1000). When temperature cannot be read from
+ * device return WMI_INVALID_TEMPERATURE
+ */
+ __le32 baseband_t1000;
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 806406aab43d..31bf71a80c26 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -797,7 +797,7 @@ static void free_all_descbuffers(struct b43_dmaring *ring)
}
}
-static u64 supported_dma_mask(struct b43_wldev *dev)
+static enum b43_dmatype b43_engine_type(struct b43_wldev *dev)
{
u32 tmp;
u16 mmio_base;
@@ -807,14 +807,14 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
case B43_BUS_BCMA:
tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
if (tmp & BCMA_IOST_DMA64)
- return DMA_BIT_MASK(64);
+ return B43_DMA_64BIT;
break;
#endif
#ifdef CONFIG_B43_SSB
case B43_BUS_SSB:
tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
if (tmp & SSB_TMSHIGH_DMA64)
- return DMA_BIT_MASK(64);
+ return B43_DMA_64BIT;
break;
#endif
}
@@ -823,20 +823,7 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
if (tmp & B43_DMA32_TXADDREXT_MASK)
- return DMA_BIT_MASK(32);
-
- return DMA_BIT_MASK(30);
-}
-
-static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
-{
- if (dmamask == DMA_BIT_MASK(30))
- return B43_DMA_30BIT;
- if (dmamask == DMA_BIT_MASK(32))
return B43_DMA_32BIT;
- if (dmamask == DMA_BIT_MASK(64))
- return B43_DMA_64BIT;
- B43_WARN_ON(1);
return B43_DMA_30BIT;
}
@@ -1043,42 +1030,6 @@ void b43_dma_free(struct b43_wldev *dev)
destroy_ring(dma, tx_ring_mcast);
}
-static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
-{
- u64 orig_mask = mask;
- bool fallback = false;
- int err;
-
- /* Try to set the DMA mask. If it fails, try falling back to a
- * lower mask, as we can always also support a lower one. */
- while (1) {
- err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
- if (!err)
- break;
- if (mask == DMA_BIT_MASK(64)) {
- mask = DMA_BIT_MASK(32);
- fallback = true;
- continue;
- }
- if (mask == DMA_BIT_MASK(32)) {
- mask = DMA_BIT_MASK(30);
- fallback = true;
- continue;
- }
- b43err(dev->wl, "The machine/kernel does not support "
- "the required %u-bit DMA mask\n",
- (unsigned int)dma_mask_to_engine_type(orig_mask));
- return -EOPNOTSUPP;
- }
- if (fallback) {
- b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
- (unsigned int)dma_mask_to_engine_type(orig_mask),
- (unsigned int)dma_mask_to_engine_type(mask));
- }
-
- return 0;
-}
-
/* Some hardware with 64-bit DMA seems to be bugged and looks for translation
* bit in low address word instead of high one.
*/
@@ -1101,15 +1052,15 @@ static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
int b43_dma_init(struct b43_wldev *dev)
{
struct b43_dma *dma = &dev->dma;
+ enum b43_dmatype type = b43_engine_type(dev);
int err;
- u64 dmamask;
- enum b43_dmatype type;
- dmamask = supported_dma_mask(dev);
- type = dma_mask_to_engine_type(dmamask);
- err = b43_dma_set_mask(dev, dmamask);
- if (err)
+ err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type));
+ if (err) {
+ b43err(dev->wl, "The machine/kernel does not support "
+ "the required %u-bit DMA mask\n", type);
return err;
+ }
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
@@ -1813,7 +1764,7 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
enum b43_dmatype type;
u16 mmio_base;
- type = dma_mask_to_engine_type(supported_dma_mask(dev));
+ type = b43_engine_type(dev);
mmio_base = b43_dmacontroller_base(type, engine_index);
direct_fifo_rx(dev, type, mmio_base, enable);
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 20815a71680b..b85603e91c7a 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -2590,18 +2590,13 @@ start_ieee80211:
err = ieee80211_register_hw(wl->hw);
if (err)
- goto err_one_core_detach;
+ goto out;
wl->hw_registered = true;
b43_leds_register(wl->current_dev);
/* Register HW RNG driver */
b43_rng_init(wl);
- goto out;
-
-err_one_core_detach:
- b43_one_core_detach(dev->dev);
-
out:
kfree(ctx);
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index 1cc25f44dd9a..f7594e2a896e 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -603,7 +603,7 @@ static void free_all_descbuffers(struct b43legacy_dmaring *ring)
}
}
-static u64 supported_dma_mask(struct b43legacy_wldev *dev)
+static enum b43legacy_dmatype b43legacy_engine_type(struct b43legacy_wldev *dev)
{
u32 tmp;
u16 mmio_base;
@@ -615,18 +615,7 @@ static u64 supported_dma_mask(struct b43legacy_wldev *dev)
tmp = b43legacy_read32(dev, mmio_base +
B43legacy_DMA32_TXCTL);
if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
- return DMA_BIT_MASK(32);
-
- return DMA_BIT_MASK(30);
-}
-
-static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
-{
- if (dmamask == DMA_BIT_MASK(30))
- return B43legacy_DMA_30BIT;
- if (dmamask == DMA_BIT_MASK(32))
return B43legacy_DMA_32BIT;
- B43legacy_WARN_ON(1);
return B43legacy_DMA_30BIT;
}
@@ -784,54 +773,14 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
dma->tx_ring0 = NULL;
}
-static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
-{
- u64 orig_mask = mask;
- bool fallback = false;
- int err;
-
- /* Try to set the DMA mask. If it fails, try falling back to a
- * lower mask, as we can always also support a lower one. */
- while (1) {
- err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
- if (!err)
- break;
- if (mask == DMA_BIT_MASK(64)) {
- mask = DMA_BIT_MASK(32);
- fallback = true;
- continue;
- }
- if (mask == DMA_BIT_MASK(32)) {
- mask = DMA_BIT_MASK(30);
- fallback = true;
- continue;
- }
- b43legacyerr(dev->wl, "The machine/kernel does not support "
- "the required %u-bit DMA mask\n",
- (unsigned int)dma_mask_to_engine_type(orig_mask));
- return -EOPNOTSUPP;
- }
- if (fallback) {
- b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-"
- "bit\n",
- (unsigned int)dma_mask_to_engine_type(orig_mask),
- (unsigned int)dma_mask_to_engine_type(mask));
- }
-
- return 0;
-}
-
int b43legacy_dma_init(struct b43legacy_wldev *dev)
{
struct b43legacy_dma *dma = &dev->dma;
struct b43legacy_dmaring *ring;
+ enum b43legacy_dmatype type = b43legacy_engine_type(dev);
int err;
- u64 dmamask;
- enum b43legacy_dmatype type;
- dmamask = supported_dma_mask(dev);
- type = dma_mask_to_engine_type(dmamask);
- err = b43legacy_dma_set_mask(dev, dmamask);
+ err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type));
if (err) {
#ifdef CONFIG_B43LEGACY_PIO
b43legacywarn(dev->wl, "DMA for this device not supported. "
diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig
index 1df56d1f5e00..a5bf16c4f495 100644
--- a/drivers/net/wireless/broadcom/brcm80211/Kconfig
+++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig
@@ -18,55 +18,7 @@ config BRCMSMAC
be available if you select BCMA_DRIVER_GPIO. If you choose to build a
module, the driver will be called brcmsmac.ko.
-config BRCMFMAC
- tristate "Broadcom FullMAC WLAN driver"
- depends on CFG80211
- select BRCMUTIL
- ---help---
- This module adds support for wireless adapters based on Broadcom
- FullMAC chipsets. It has to work with at least one of the bus
- interface support. If you choose to build a module, it'll be called
- brcmfmac.ko.
-
-config BRCMFMAC_PROTO_BCDC
- bool
-
-config BRCMFMAC_PROTO_MSGBUF
- bool
-
-config BRCMFMAC_SDIO
- bool "SDIO bus interface support for FullMAC driver"
- depends on (MMC = y || MMC = BRCMFMAC)
- depends on BRCMFMAC
- select BRCMFMAC_PROTO_BCDC
- select FW_LOADER
- default y
- ---help---
- This option enables the SDIO bus interface support for Broadcom
- IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
- use the driver for a SDIO wireless card.
-
-config BRCMFMAC_USB
- bool "USB bus interface support for FullMAC driver"
- depends on (USB = y || USB = BRCMFMAC)
- depends on BRCMFMAC
- select BRCMFMAC_PROTO_BCDC
- select FW_LOADER
- ---help---
- This option enables the USB bus interface support for Broadcom
- IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
- use the driver for an USB wireless card.
-
-config BRCMFMAC_PCIE
- bool "PCIE bus interface support for FullMAC driver"
- depends on BRCMFMAC
- depends on PCI
- select BRCMFMAC_PROTO_MSGBUF
- select FW_LOADER
- ---help---
- This option enables the PCIE bus interface support for Broadcom
- IEEE802.11ac embedded FullMAC WLAN driver. Say Y if you want to
- use the driver for an PCIE wireless card.
+source "drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig"
config BRCM_TRACING
bool "Broadcom device tracing"
@@ -82,6 +34,6 @@ config BRCM_TRACING
config BRCMDBG
bool "Broadcom driver debug functions"
depends on BRCMSMAC || BRCMFMAC
- select WANT_DEV_COREDUMP
+ select WANT_DEV_COREDUMP if BRCMFMAC
---help---
Selecting this enables additional code for debug purposes.
diff --git a/drivers/net/wireless/broadcom/brcm80211/Makefile b/drivers/net/wireless/broadcom/brcm80211/Makefile
index b987920e982e..88115d072624 100644
--- a/drivers/net/wireless/broadcom/brcm80211/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/Makefile
@@ -1,19 +1,9 @@
+# SPDX-License-Identifier: ISC
#
-# Makefile fragment for Broadcom 802.11n Networking Device Driver
+# Makefile fragment for Broadcom 802.11 Networking Device Driver
#
# Copyright (c) 2010 Broadcom Corporation
#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
-# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# common flags
subdir-ccflags-$(CONFIG_BRCMDBG) += -DDEBUG
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig
new file mode 100644
index 000000000000..32794c1eca23
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig
@@ -0,0 +1,50 @@
+config BRCMFMAC
+ tristate "Broadcom FullMAC WLAN driver"
+ depends on CFG80211
+ select BRCMUTIL
+ help
+ This module adds support for wireless adapters based on Broadcom
+ FullMAC chipsets. It has to work with at least one of the bus
+ interface support. If you choose to build a module, it'll be called
+ brcmfmac.ko.
+
+config BRCMFMAC_PROTO_BCDC
+ bool
+
+config BRCMFMAC_PROTO_MSGBUF
+ bool
+
+config BRCMFMAC_SDIO
+ bool "SDIO bus interface support for FullMAC driver"
+ depends on (MMC = y || MMC = BRCMFMAC)
+ depends on BRCMFMAC
+ select BRCMFMAC_PROTO_BCDC
+ select FW_LOADER
+ default y
+ help
+ This option enables the SDIO bus interface support for Broadcom
+ IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
+ use the driver for a SDIO wireless card.
+
+config BRCMFMAC_USB
+ bool "USB bus interface support for FullMAC driver"
+ depends on (USB = y || USB = BRCMFMAC)
+ depends on BRCMFMAC
+ select BRCMFMAC_PROTO_BCDC
+ select FW_LOADER
+ help
+ This option enables the USB bus interface support for Broadcom
+ IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
+ use the driver for an USB wireless card.
+
+config BRCMFMAC_PCIE
+ bool "PCIE bus interface support for FullMAC driver"
+ depends on BRCMFMAC
+ depends on PCI
+ select BRCMFMAC_PROTO_MSGBUF
+ select FW_LOADER
+ help
+ This option enables the PCIE bus interface support for Broadcom
+ IEEE802.11ac embedded FullMAC WLAN driver. Say Y if you want to
+ use the driver for an PCIE wireless card.
+
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index f7cf3e5f4849..9b15bc3f6054 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -1,19 +1,9 @@
+# SPDX-License-Identifier: ISC
#
-# Makefile fragment for Broadcom 802.11n Networking Device Driver
+# Makefile fragment for Broadcom 802.11 Networking Device Driver
#
# Copyright (c) 2010 Broadcom Corporation
#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
-# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y += \
-I $(srctree)/$(src) \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 98b168736df0..322e913ca7aa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*******************************************************************************
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
index 4bc52240ccea..102e6938905c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BRCMFMAC_BCDC_H
#define BRCMFMAC_BCDC_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 60aede5abb4d..fc12598b2dd3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* ****************** SDIO CARD Interface Functions **************************/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index 372363a6e752..ec2bec0999d1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.h
index 19647c68aa9e..418b9424a179 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WL_BTCOEX_H_
#define WL_BTCOEX_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 2fe167eae22c..0988a166a785 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BRCMFMAC_BUS_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 8ee8af4e7ec4..b6d0df354b36 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Toplevel file. Relies on dhd_linux.c to send commands to the dongle. */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 9a6287f084a9..b7b50b07f776 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BRCMFMAC_CFG80211_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 22534bf2a90c..1ec48c4f4d4a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
index 0ae3b33bab62..206d7695d57a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BRCMF_CHIP_H
#define BRCMF_CHIP_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 96b8d5b3aeed..aa89d620ee5d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
@@ -269,7 +258,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
- strcpy(buf, "ver");
+ strlcpy(buf, "ver", sizeof(buf));
err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
if (err < 0) {
bphy_err(drvr, "Retrieving version information failed, %d\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index 4ce56be90b74..144cf4570bc3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -1,16 +1,6 @@
-/* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014 Broadcom Corporation
*/
#ifndef BRCMFMAC_COMMON_H
#define BRCMFMAC_COMMON_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c
index 7b0e52195a85..49db54d23e03 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c
@@ -1,16 +1,6 @@
-/* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014 Broadcom Corporation
*/
#include <linux/types.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.h
index b85033611c8d..7fb11f4823e4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.h
@@ -1,16 +1,6 @@
-/* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014 Broadcom Corporation
*/
#ifndef BRCMFMAC_COMMONRING_H
#define BRCMFMAC_COMMONRING_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 7d6a08779693..bf18491a33a5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 9f09aa31eeda..86517a3d74b1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/****************
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
index 489b5dfdf5b9..120515fe8250 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/debugfs.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
index 2998726b62c3..ea6e8e839cae 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BRCMFMAC_DEBUG_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 9f1417e00073..4aa2561934d7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright 2018 Hans de Goede <hdegoede@redhat.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/dmi.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index acca719b3907..73aff4e4039d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 5e88a7f16ad2..f127eb2030a6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCMF_FEATURE_H
#define _BRCMF_FEATURE_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 6a333dd80b2d..3aed4c4b887a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/efi.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index a0834be8864e..3347439543bb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BRCMFMAC_FIRMWARE_H
#define BRCMFMAC_FIRMWARE_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
index d0d8b32af7d0..8e9d067bdfed 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
@@ -1,16 +1,6 @@
-/* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014 Broadcom Corporation
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
index 068e68d94999..818882b0fd01 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
@@ -1,16 +1,6 @@
-/* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014 Broadcom Corporation
*/
#ifndef BRCMFMAC_FLOWRING_H
#define BRCMFMAC_FLOWRING_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 63e98fd583ab..adedd4fac10b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index 7027243db17e..a82f51bc1e69 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index 8ea27489734e..9ed85420f3ca 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* FWIL is the Firmware Interface Layer. In this module the support functions
@@ -314,7 +303,7 @@ brcmf_create_bsscfg(s32 bsscfgidx, char *name, char *data, u32 datalen,
return brcmf_create_iovar(name, data, datalen, buf, buflen);
prefixlen = strlen(prefix);
- namelen = strlen(name) + 1; /* lengh of iovar name + null */
+ namelen = strlen(name) + 1; /* length of iovar name + null */
iolen = prefixlen + namelen + sizeof(bsscfgidx_le) + datalen;
if (buflen < iolen) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index b6b183b18413..0ff6f5212a94 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _fwil_h_
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 39ac1bbb6cc0..37c512036e0e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index c22c49ae552e..b8452cb46297 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
#include <linux/module.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index 749c06dcdc17..10184eeaad94 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-
#ifndef FWSIGNAL_H_
#define FWSIGNAL_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 9d1f9ff25bfa..241747bd5cb2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -1,16 +1,6 @@
-/* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014 Broadcom Corporation
*/
/*******************************************************************************
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
index 692235d25277..2e322edbb907 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
@@ -1,16 +1,6 @@
-/* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014 Broadcom Corporation
*/
#ifndef BRCMFMAC_MSGBUF_H
#define BRCMFMAC_MSGBUF_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 84e3373289eb..b886b56a5e5a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/init.h>
#include <linux/of.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h
index 95b7032d54b1..10bf52253337 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef CONFIG_OF
void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 73a0e550f2b2..7ba9f6a68645 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index 39f0d0218088..64ab9b6a677d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WL_CFGP2P_H_
#define WL_CFGP2P_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 83e4938527f4..4ea5401c4d6b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1,16 +1,6 @@
-/* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014 Broadcom Corporation
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
index 6edaaf8ef5ce..d026401d2001 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
@@ -1,16 +1,6 @@
-/* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014 Broadcom Corporation
*/
#ifndef BRCMFMAC_PCIE_H
#define BRCMFMAC_PCIE_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index 0fb97f7dd5a2..14e530601ef3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2016 Broadcom
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/netdevice.h>
#include <linux/gcd.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
index cd9e35ae3b21..25d406019ac3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2016 Broadcom
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCMF_PNO_H
#define _BRCMF_PNO_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
index c7964ccdda69..e3d1b075044b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index 72355aea9028..8d55fad531d0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BRCMFMAC_PROTO_H
#define BRCMFMAC_PROTO_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 9a51f1ba87c3..629140b6d7e2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 34b031154da9..0bd47c119dae 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BRCMFMAC_SDIO_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
index a5c271bff446..814fcc7538d5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/device.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
index 4d7d51f95716..338c66d0c5f8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#if !defined(BRCMF_TRACEPOINT_H_) || defined(TRACE_HEADER_MULTI_READ)
#define BRCMF_TRACEPOINT_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 75fcd6752edc..d33628b79a3a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h
index f483a8c9945b..ee273e3bb101 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BRCMFMAC_USB_H
#define BRCMFMAC_USB_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
index d493021f6031..f6500899fc14 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/vmalloc.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h
index 061b7bfa2e1c..418f33ea6fd3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _vendor_h_
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index 35e3b101e5cf..2441714169de 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h
index 4d3734f48d9c..2e6a3d454ee8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
index e9e8337f386c..8668fa5558a2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCM_PHY_INT_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index c6e107f41948..7ef36234a25d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.h
index f4a8ab09da43..ae0e8d5df339 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCM_PHY_LCN_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index f4f5e9044152..07f61d6155ea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
index b24bc57ca91b..45dcd277a89f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "phy_qmath.h"
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.h
index 20e3783f921b..5d0083a87fd0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCM_QMATH_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_radio.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_radio.h
index c3a675455ff5..706ab03c8346 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_radio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_radio.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCM_PHY_RADIO_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phyreg_n.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phyreg_n.h
index a97c3a799479..f49a10c452e9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phyreg_n.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phyreg_n.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define NPHY_TBL_ID_GAIN1 0
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c
index d7fa312214f3..be703be34616 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <types.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.h
index 489422a36085..b49580c654fb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <types.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
index 533bd4b0277e..7607e67d20c7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.h
index dc8a84e85117..28208aba4af2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define ANT_SWCTRL_TBL_REV3_IDX (0)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
index bb02c6220a88..7a82d615ba2a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
@@ -1,20 +1,9 @@
+# SPDX-License-Identifier: ISC
#
# Makefile fragment for Broadcom 802.11n Networking Device Driver Utilities
#
# Copyright (c) 2011 Broadcom Corporation
#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
-# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
ccflags-y := -I $(srctree)/$(src)/../include
obj-$(CONFIG_BRCMUTIL) += brcmutil.o
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
index 8ac34821f1c1..1e2b1e487eb7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*********************channel spec common functions*********************/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
index 0543607002fd..4c84c3001c3f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index 839980da9643..d1037b6ef2d6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCM_HW_IDS_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
index 8b8b2ecb3199..f6344023855c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCMU_D11_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h
index 41969527b459..946532328667 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCMU_UTILS_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
index dddebaa60352..7b31c212694d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCMU_WIFI_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
index de8225e6248b..0340bba96868 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _SBCHIPC_H
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/defs.h b/drivers/net/wireless/broadcom/brcm80211/include/defs.h
index 8d1e85e0ed51..9e7e6116eb74 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/defs.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/defs.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCM_DEFS_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/soc.h b/drivers/net/wireless/broadcom/brcm80211/include/soc.h
index 123cfa854a0d..92d942b44f2c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/soc.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/soc.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BRCM_SOC_H
diff --git a/drivers/net/wireless/cisco/Kconfig b/drivers/net/wireless/cisco/Kconfig
index 7329830ed7cc..01e173ede894 100644
--- a/drivers/net/wireless/cisco/Kconfig
+++ b/drivers/net/wireless/cisco/Kconfig
@@ -17,6 +17,7 @@ config AIRO
depends on CFG80211 && ISA_DMA_API && (PCI || BROKEN)
select WIRELESS_EXT
select CRYPTO
+ select CRYPTO_BLKCIPHER
select WEXT_SPY
select WEXT_PRIV
---help---
@@ -40,6 +41,7 @@ config AIRO_CS
select WEXT_PRIV
select CRYPTO
select CRYPTO_AES
+ select CRYPTO_CTR
---help---
This is the standard Linux driver to support Cisco/Aironet PCMCIA
802.11 wireless cards. This driver is the same as the Aironet
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 3f5a14112c6b..9342ffbe1e81 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -49,6 +49,9 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <crypto/aes.h>
+#include <crypto/skcipher.h>
+
#include <net/cfg80211.h>
#include <net/iw_handler.h>
@@ -951,7 +954,7 @@ typedef struct {
} mic_statistics;
typedef struct {
- u32 coeff[((EMMH32_MSGLEN_MAX)+3)>>2];
+ __be32 coeff[((EMMH32_MSGLEN_MAX)+3)>>2];
u64 accum; // accumulated mic, reduced to u32 in final()
int position; // current position (byte offset) in message
union {
@@ -1216,7 +1219,7 @@ struct airo_info {
struct iw_spy_data spy_data;
struct iw_public_data wireless_data;
/* MIC stuff */
- struct crypto_cipher *tfm;
+ struct crypto_sync_skcipher *tfm;
mic_module mod[2];
mic_statistics micstats;
HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors
@@ -1291,14 +1294,14 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev);
static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq);
static void MoveWindow(miccntx *context, u32 micSeq);
static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
- struct crypto_cipher *tfm);
+ struct crypto_sync_skcipher *tfm);
static void emmh32_init(emmh32_context *context);
static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
static void emmh32_final(emmh32_context *context, u8 digest[4]);
static int flashpchar(struct airo_info *ai,int byte,int dwelltime);
static void age_mic_context(miccntx *cur, miccntx *old, u8 *key, int key_len,
- struct crypto_cipher *tfm)
+ struct crypto_sync_skcipher *tfm)
{
/* If the current MIC context is valid and its key is the same as
* the MIC register, there's nothing to do.
@@ -1359,7 +1362,7 @@ static int micsetup(struct airo_info *ai) {
int i;
if (ai->tfm == NULL)
- ai->tfm = crypto_alloc_cipher("aes", 0, 0);
+ ai->tfm = crypto_alloc_sync_skcipher("ctr(aes)", 0, 0);
if (IS_ERR(ai->tfm)) {
airo_print_err(ai->dev->name, "failed to load transform for AES");
@@ -1624,37 +1627,31 @@ static void MoveWindow(miccntx *context, u32 micSeq)
/* mic accumulate */
#define MIC_ACCUM(val) \
- context->accum += (u64)(val) * context->coeff[coeff_position++];
-
-static unsigned char aes_counter[16];
+ context->accum += (u64)(val) * be32_to_cpu(context->coeff[coeff_position++]);
/* expand the key to fill the MMH coefficient array */
static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
- struct crypto_cipher *tfm)
+ struct crypto_sync_skcipher *tfm)
{
/* take the keying material, expand if necessary, truncate at 16-bytes */
/* run through AES counter mode to generate context->coeff[] */
- int i,j;
- u32 counter;
- u8 *cipher, plain[16];
-
- crypto_cipher_setkey(tfm, pkey, 16);
- counter = 0;
- for (i = 0; i < ARRAY_SIZE(context->coeff); ) {
- aes_counter[15] = (u8)(counter >> 0);
- aes_counter[14] = (u8)(counter >> 8);
- aes_counter[13] = (u8)(counter >> 16);
- aes_counter[12] = (u8)(counter >> 24);
- counter++;
- memcpy (plain, aes_counter, 16);
- crypto_cipher_encrypt_one(tfm, plain, plain);
- cipher = plain;
- for (j = 0; (j < 16) && (i < ARRAY_SIZE(context->coeff)); ) {
- context->coeff[i++] = ntohl(*(__be32 *)&cipher[j]);
- j += 4;
- }
- }
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ struct scatterlist sg;
+ u8 iv[AES_BLOCK_SIZE] = {};
+ int ret;
+
+ crypto_sync_skcipher_setkey(tfm, pkey, 16);
+
+ memset(context->coeff, 0, sizeof(context->coeff));
+ sg_init_one(&sg, context->coeff, sizeof(context->coeff));
+
+ skcipher_request_set_sync_tfm(req, tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, &sg, &sg, sizeof(context->coeff), iv);
+
+ ret = crypto_skcipher_encrypt(req);
+ WARN_ON_ONCE(ret);
}
/* prepare for calculation of a new mic */
@@ -2415,7 +2412,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
ai->shared, ai->shared_dma);
}
}
- crypto_free_cipher(ai->tfm);
+ crypto_free_sync_skcipher(ai->tfm);
del_airo_dev(ai);
free_netdev( dev );
}
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index 5bd8a9ee8b1e..6209f85a71dd 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -631,9 +631,6 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
il_sta = NULL;
}
- if (rate_control_send_low(sta, il_sta, txrc))
- return;
-
rate_mask = sta->supp_rates[sband->band];
/* get user max rate if set */
@@ -846,17 +843,8 @@ il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
{
struct il3945_rs_sta *lq_sta = il_sta;
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
- &rs_sta_dbgfs_stats_table_ops);
-
-}
-
-static void
-il3945_remove_debugfs(void *il, void *il_sta)
-{
- struct il3945_rs_sta *lq_sta = il_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
}
#endif
@@ -883,7 +871,6 @@ static const struct rate_control_ops rs_ops = {
.free_sta = il3945_rs_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = il3945_add_debugfs,
- .remove_sta_debugfs = il3945_remove_debugfs,
#endif
};
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.h b/drivers/net/wireless/intel/iwlegacy/3945.h
index 8e97e95fcbc4..82e4a4878bc2 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.h
+++ b/drivers/net/wireless/intel/iwlegacy/3945.h
@@ -72,9 +72,6 @@ struct il3945_rs_sta {
u8 start_rate;
struct timer_list rate_scale_flush;
struct il3945_rate_scale_data win[RATE_COUNT_3945];
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_stats_table_file;
-#endif
/* used to be in sta_info */
int last_txrate_idx;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index a824a10a43b6..7c6e2c863497 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -2209,10 +2209,6 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
il_sta = NULL;
}
- /* Send management frames and NO_ACK data using lowest rate. */
- if (rate_control_send_low(sta, il_sta, txrc))
- return;
-
if (!lq_sta)
return;
@@ -2752,29 +2748,15 @@ static void
il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
{
struct il_lq_sta *lq_sta = il_sta;
- lq_sta->rs_sta_dbgfs_scale_table_file =
- debugfs_create_file("rate_scale_table", 0600, dir,
- lq_sta, &rs_sta_dbgfs_scale_table_ops);
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", 0400, dir, lq_sta,
- &rs_sta_dbgfs_stats_table_ops);
- lq_sta->rs_sta_dbgfs_rate_scale_data_file =
- debugfs_create_file("rate_scale_data", 0400, dir, lq_sta,
- &rs_sta_dbgfs_rate_scale_data_ops);
- lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
- debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
- &lq_sta->tx_agg_tid_en);
-
-}
-static void
-il4965_rs_remove_debugfs(void *il, void *il_sta)
-{
- struct il_lq_sta *lq_sta = il_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+ debugfs_create_file("rate_scale_table", 0600, dir, lq_sta,
+ &rs_sta_dbgfs_scale_table_ops);
+ debugfs_create_file("rate_stats_table", 0400, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
+ debugfs_create_file("rate_scale_data", 0400, dir, lq_sta,
+ &rs_sta_dbgfs_rate_scale_data_ops);
+ debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
+ &lq_sta->tx_agg_tid_en);
}
#endif
@@ -2801,7 +2783,6 @@ static const struct rate_control_ops rs_4965_ops = {
.free_sta = il4965_rs_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = il4965_rs_add_debugfs,
- .remove_sta_debugfs = il4965_rs_remove_debugfs,
#endif
};
diff --git a/drivers/net/wireless/intel/iwlegacy/Kconfig b/drivers/net/wireless/intel/iwlegacy/Kconfig
index aa01c83e0060..e329fd7b09c0 100644
--- a/drivers/net/wireless/intel/iwlegacy/Kconfig
+++ b/drivers/net/wireless/intel/iwlegacy/Kconfig
@@ -32,7 +32,7 @@ config IWL4965
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/kbuild/modules.txt>. The
+ say M here and read <file:Documentation/kbuild/modules.rst>. The
module will be called iwl4965.
config IWL3945
@@ -58,7 +58,7 @@ config IWL3945
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/kbuild/modules.txt>. The
+ say M here and read <file:Documentation/kbuild/modules.rst>. The
module will be called iwl3945.
menu "iwl3945 / iwl4965 Debugging Options"
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 6685b9a7e7d1..e7fb8e6bb9e7 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -2807,10 +2807,6 @@ struct il_lq_sta {
struct il_traffic_load load[TID_MAX_LOAD_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_scale_table_file;
- struct dentry *rs_sta_dbgfs_stats_table_file;
- struct dentry *rs_sta_dbgfs_rate_scale_data_file;
- struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
u32 dbg_fixed_rate;
#endif
struct il_priv *drv;
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index e5528189163f..235349a33a3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -40,7 +40,7 @@ config IWLWIFI
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/kbuild/modules.txt>. The
+ say M here and read <file:Documentation/kbuild/modules.rst>. The
module will be called iwlwifi.
if IWLWIFI
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index a9c846c59289..93526dfaf791 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -82,6 +82,7 @@
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
+#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-"
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
@@ -106,6 +107,8 @@
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \
+ IWL_QUZ_A_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
@@ -241,6 +244,18 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
+ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
+ .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
.name = "Intel(R) Wi-Fi 6 AX101",
.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
@@ -253,6 +268,42 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
+ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
const struct iwl_cfg iwl_ax200_cfg_cc = {
.name = "Intel(R) Wi-Fi 6 AX200 160MHz",
.fw_name_pre = IWL_CC_A_FW_PRE,
@@ -333,6 +384,90 @@ const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9461",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9462",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
+ .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
+ .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
@@ -424,12 +559,12 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
};
const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
+ .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
};
-const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0 = {
+const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
.name = "Intel(R) Wi-Fi 7 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
@@ -443,8 +578,8 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
IWL_DEVICE_AX210,
};
-const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0 = {
- .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
+const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
+ .name = "Intel(R) Wi-Fi 7 AX411 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
IWL_DEVICE_AX210,
};
@@ -457,6 +592,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 1fd6bf578474..eab94d2f46b1 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -1009,8 +1009,7 @@ int iwlagn_send_patterns(struct iwl_priv *priv,
if (!wowlan->n_patterns)
return 0;
- cmd.len[0] = sizeof(*pattern_cmd) +
- wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
+ cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns);
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
if (!pattern_cmd)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index b500c9279a32..b1e5d64ca60d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -2720,10 +2720,6 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
priv_sta = NULL;
}
- /* Send management frames and NO_ACK data using lowest rate. */
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
rate_idx = lq_sta->last_txrate_idx;
if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 405038ce98d6..7573af2d88ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -97,7 +97,7 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
- int data_size)
+ int data_size, int *tbl_rev)
{
int i;
union acpi_object *wifi_pkg;
@@ -113,16 +113,19 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
/*
* We need at least two packages, one for the revision and one
* for the data itself. Also check that the revision is valid
- * (i.e. it is an integer set to 0).
+ * (i.e. it is an integer smaller than 2, as we currently support only
+ * 2 revisions).
*/
if (data->type != ACPI_TYPE_PACKAGE ||
data->package.count < 2 ||
data->package.elements[0].type != ACPI_TYPE_INTEGER ||
- data->package.elements[0].integer.value != 0) {
+ data->package.elements[0].integer.value > 1) {
IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n");
return ERR_PTR(-EINVAL);
}
+ *tbl_rev = data->package.elements[0].integer.value;
+
/* loop through all the packages to find the one for WiFi */
for (i = 1; i < data->package.count; i++) {
union acpi_object *domain;
@@ -151,14 +154,15 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc)
{
union acpi_object *wifi_pkg, *data;
u32 mcc_val;
- int ret;
+ int ret, tbl_rev;
data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE);
- if (IS_ERR(wifi_pkg)) {
+ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
@@ -185,6 +189,7 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev)
{
union acpi_object *data, *wifi_pkg;
u64 dflt_pwr_limit;
+ int tbl_rev;
data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
if (IS_ERR(data)) {
@@ -193,8 +198,8 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev)
}
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
- ACPI_SPLC_WIFI_DATA_SIZE);
- if (IS_ERR(wifi_pkg) ||
+ ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0 ||
wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
dflt_pwr_limit = 0;
goto out_free;
@@ -211,14 +216,15 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
{
union acpi_object *wifi_pkg, *data;
- int ret;
+ int ret, tbl_rev;
data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE);
- if (IS_ERR(wifi_pkg)) {
+ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index f5704e16643f..991a23450999 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -97,7 +97,7 @@
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
- int data_size);
+ int data_size, int *tbl_rev);
/**
* iwl_acpi_get_mcc - read MCC from ACPI, if available
@@ -131,7 +131,8 @@ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
union acpi_object *data,
- int data_size)
+ int data_size,
+ int *tbl_rev)
{
return ERR_PTR(-ENOENT);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index f4202bc231a6..aaf3974a9a20 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -291,6 +291,28 @@ struct iwl_fw_ini_trigger_tlv {
struct iwl_fw_ini_trigger trigger_config[];
} __packed; /* FW_TLV_DEBUG_TRIGGERS_API_S_VER_1 */
+#define IWL_FW_INI_MAX_IMG_NAME_LEN 32
+#define IWL_FW_INI_MAX_DBG_CFG_NAME_LEN 64
+
+/**
+ * struct iwl_fw_ini_debug_info_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_INFO)
+ *
+ * holds image name and debug configuration name
+ *
+ * @header: header
+ * @img_name_len: length of the image name string
+ * @img_name: image name string
+ * @dbg_cfg_name_len : length of the debug configuration name string
+ * @dbg_cfg_name: debug configuration name string
+ */
+struct iwl_fw_ini_debug_info_tlv {
+ struct iwl_fw_ini_header header;
+ __le32 img_name_len;
+ u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
+ __le32 dbg_cfg_name_len;
+ u8 dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
+} __packed; /* FW_DEBUG_TLV_INFO_API_S_VER_1 */
+
/**
* enum iwl_fw_ini_trigger_id
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 8d78b0e671c0..ec864c7b497f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -937,8 +937,13 @@ struct iwl_ftm_responder_stats {
__le16 reserved;
} __packed; /* TOF_RESPONDER_STATISTICS_NTFY_S_VER_2 */
-#define IWL_CSI_CHUNK_CTL_NUM_MASK 0x3
-#define IWL_CSI_CHUNK_CTL_IDX_MASK 0xc
+#define IWL_CSI_MAX_EXPECTED_CHUNKS 16
+
+#define IWL_CSI_CHUNK_CTL_NUM_MASK_VER_1 0x0003
+#define IWL_CSI_CHUNK_CTL_IDX_MASK_VER_1 0x000c
+
+#define IWL_CSI_CHUNK_CTL_NUM_MASK_VER_2 0x00ff
+#define IWL_CSI_CHUNK_CTL_IDX_MASK_VER_2 0xff00
struct iwl_csi_chunk_notification {
__le32 token;
@@ -946,6 +951,6 @@ struct iwl_csi_chunk_notification {
__le16 ctl;
__le32 size;
u8 data[];
-} __packed; /* CSI_CHUNKS_HDR_NTFY_API_S_VER_1 */
+} __packed; /* CSI_CHUNKS_HDR_NTFY_API_S_VER_1/VER_2 */
#endif /* __iwl_fw_api_location_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 01f003c6cff9..f195db398bed 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -420,13 +420,25 @@ struct iwl_per_chain_offset_group {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
/**
+ * struct iwl_geo_tx_power_profile_cmd_v1 - struct for GEO_TX_POWER_LIMIT cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ */
+struct iwl_geo_tx_power_profiles_cmd_v1 {
+ __le32 ops;
+ struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
+} __packed; /* GEO_TX_POWER_LIMIT_VER_1 */
+
+/**
* struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
+ * @table_revision: BIOS table revision.
*/
struct iwl_geo_tx_power_profiles_cmd {
__le32 ops;
struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
+ __le32 table_revision;
} __packed; /* GEO_TX_POWER_LIMIT */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 1a67a2a439ab..c4960f045415 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -750,6 +750,21 @@ struct iwl_scan_req_umac {
struct iwl_scan_umac_chan_param channel;
u8 data[];
} v8; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_8 */
+ struct {
+ u8 active_dwell[SCAN_TWO_LMACS];
+ u8 adwell_default_hb_n_aps;
+ u8 adwell_default_lb_n_aps;
+ u8 adwell_default_n_aps_social;
+ u8 general_flags2;
+ __le16 adwell_max_budget;
+ __le32 max_out_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ u8 passive_dwell[SCAN_TWO_LMACS];
+ u8 num_of_fragments[SCAN_TWO_LMACS];
+ struct iwl_scan_umac_chan_param channel;
+ u8 data[];
+ } v9; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_9 */
};
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 33d7bc5500db..e411ac98290d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1059,7 +1059,7 @@ static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt,
u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
int i;
- range->start_addr = cpu_to_le64(addr);
+ range->internal_base_addr = cpu_to_le32(addr);
range->range_data_size = reg->internal.range_data_size;
for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
prph_val = iwl_read_prph(fwrt->trans, addr + i);
@@ -1080,7 +1080,7 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
int i;
- range->start_addr = cpu_to_le64(addr);
+ range->internal_base_addr = cpu_to_le32(addr);
range->range_data_size = reg->internal.range_data_size;
for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4)
*val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i));
@@ -1095,7 +1095,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_error_dump_range *range = range_ptr;
u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
- range->start_addr = cpu_to_le64(addr);
+ range->internal_base_addr = cpu_to_le32(addr);
range->range_data_size = reg->internal.range_data_size;
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
le32_to_cpu(reg->internal.range_data_size));
@@ -1111,7 +1111,7 @@ iwl_dump_ini_paging_gen2_iter(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_error_dump_range *range = range_ptr;
u32 page_size = fwrt->trans->init_dram.paging[idx].size;
- range->start_addr = cpu_to_le64(idx);
+ range->page_num = cpu_to_le32(idx);
range->range_data_size = cpu_to_le32(page_size);
memcpy(range->data, fwrt->trans->init_dram.paging[idx].block,
page_size);
@@ -1131,7 +1131,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys;
u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size;
- range->start_addr = cpu_to_le64(idx);
+ range->page_num = cpu_to_le32(idx);
range->range_data_size = cpu_to_le32(page_size);
dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size,
DMA_BIDIRECTIONAL);
@@ -1154,11 +1154,11 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
if (start_addr == 0x5a5a5a5a)
return -EBUSY;
- range->start_addr = cpu_to_le64(start_addr);
- range->range_data_size = cpu_to_le32(fwrt->trans->fw_mon[idx].size);
+ range->dram_base_addr = cpu_to_le64(start_addr);
+ range->range_data_size = cpu_to_le32(fwrt->trans->dbg.fw_mon[idx].size);
- memcpy(range->data, fwrt->trans->fw_mon[idx].block,
- fwrt->trans->fw_mon[idx].size);
+ memcpy(range->data, fwrt->trans->dbg.fw_mon[idx].block,
+ fwrt->trans->dbg.fw_mon[idx].size);
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@@ -1228,7 +1228,7 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_region_cfg *reg,
void *range_ptr, int idx)
{
- struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_ini_txf_iter_data *iter;
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
u32 offs = le32_to_cpu(reg->offset), addr;
@@ -1246,8 +1246,8 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
iter = fwrt->dump.fifo_iter;
- range->fifo_num = cpu_to_le32(iter->fifo);
- range->num_of_registers = reg->fifos.num_of_registers;
+ range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo);
+ range->fifo_hdr.num_of_registers = reg->fifos.num_of_registers;
range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size);
iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
@@ -1336,7 +1336,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_region_cfg *reg,
void *range_ptr, int idx)
{
- struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_ini_rxf_data rxf_data;
struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
u32 offs = le32_to_cpu(reg->offset), addr;
@@ -1353,8 +1353,8 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return -EBUSY;
- range->fifo_num = cpu_to_le32(rxf_data.fifo_num);
- range->num_of_registers = reg->fifos.num_of_registers;
+ range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num);
+ range->fifo_hdr.num_of_registers = reg->fifos.num_of_registers;
range->range_data_size = cpu_to_le32(rxf_data.size + registers_size);
/*
@@ -1408,7 +1408,7 @@ static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_error_dump *dump = data;
- dump->header.version = cpu_to_le32(IWL_INI_DUMP_MEM_VER);
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
return dump->ranges;
}
@@ -1433,7 +1433,7 @@ static void
iwl_trans_release_nic_access(fwrt->trans, &flags);
- data->header.version = cpu_to_le32(IWL_INI_DUMP_MONITOR_VER);
+ data->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
data->write_ptr = cpu_to_le32(write_ptr & write_ptr_msk);
data->cycle_cnt = cpu_to_le32(cycle_cnt & cycle_cnt_msk);
@@ -1490,17 +1490,6 @@ static void
}
-static void *iwl_dump_ini_fifo_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
-{
- struct iwl_fw_ini_fifo_error_dump *dump = data;
-
- dump->header.version = cpu_to_le32(IWL_INI_DUMP_FIFO_VER);
-
- return dump->ranges;
-}
-
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_region_cfg *reg)
{
@@ -1592,8 +1581,8 @@ static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
u32 size = sizeof(struct iwl_fw_ini_monitor_dump) +
sizeof(struct iwl_fw_ini_error_dump_range);
- if (fwrt->trans->num_blocks)
- size += fwrt->trans->fw_mon[0].size;
+ if (fwrt->trans->dbg.num_blocks)
+ size += fwrt->trans->dbg.fw_mon[0].size;
return size;
}
@@ -1613,8 +1602,9 @@ static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_ini_txf_iter_data iter = { .init = true };
void *fifo_iter = fwrt->dump.fifo_iter;
u32 size = 0;
- u32 fifo_hdr = sizeof(struct iwl_fw_ini_fifo_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32) * 2;
+ u32 fifo_hdr = sizeof(struct iwl_fw_ini_error_dump_range) +
+ le32_to_cpu(reg->fifos.num_of_registers) *
+ sizeof(struct iwl_fw_ini_error_dump_register);
fwrt->dump.fifo_iter = &iter;
while (iwl_ini_txf_iter(fwrt, reg)) {
@@ -1624,7 +1614,7 @@ static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
}
if (size)
- size += sizeof(struct iwl_fw_ini_fifo_error_dump);
+ size += sizeof(struct iwl_fw_ini_error_dump);
fwrt->dump.fifo_iter = fifo_iter;
@@ -1635,9 +1625,10 @@ static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_region_cfg *reg)
{
struct iwl_ini_rxf_data rx_data;
- u32 size = sizeof(struct iwl_fw_ini_fifo_error_dump) +
- sizeof(struct iwl_fw_ini_fifo_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32) * 2;
+ u32 size = sizeof(struct iwl_fw_ini_error_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range) +
+ le32_to_cpu(reg->fifos.num_of_registers) *
+ sizeof(struct iwl_fw_ini_error_dump_register);
if (reg->fifos.header_only)
return size;
@@ -1683,20 +1674,24 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_mem_ops *ops)
{
struct iwl_fw_ini_error_dump_header *header = (void *)(*data)->data;
- u32 num_of_ranges, i, type = le32_to_cpu(reg->region_type);
+ u32 num_of_ranges, i, type = le32_to_cpu(reg->region_type), size;
void *range;
if (WARN_ON(!ops || !ops->get_num_of_ranges || !ops->get_size ||
!ops->fill_mem_hdr || !ops->fill_range))
return;
+ size = ops->get_size(fwrt, reg);
+ if (!size)
+ return;
+
IWL_DEBUG_FW(fwrt, "WRT: collecting region: id=%d, type=%d\n",
le32_to_cpu(reg->region_id), type);
num_of_ranges = ops->get_num_of_ranges(fwrt, reg);
- (*data)->type = cpu_to_le32(type | INI_DUMP_BIT);
- (*data)->len = cpu_to_le32(ops->get_size(fwrt, reg));
+ (*data)->type = cpu_to_le32(type);
+ (*data)->len = cpu_to_le32(size);
header->region_id = reg->region_id;
header->num_of_ranges = cpu_to_le32(num_of_ranges);
@@ -1709,7 +1704,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
IWL_ERR(fwrt,
"WRT: failed to fill region header: id=%d, type=%d\n",
le32_to_cpu(reg->region_id), type);
- memset(*data, 0, le32_to_cpu((*data)->len));
+ memset(*data, 0, size);
return;
}
@@ -1720,7 +1715,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
IWL_ERR(fwrt,
"WRT: failed to dump region: id=%d, type=%d\n",
le32_to_cpu(reg->region_id), type);
- memset(*data, 0, le32_to_cpu((*data)->len));
+ memset(*data, 0, size);
return;
}
range = range + range_size;
@@ -1728,10 +1723,71 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
*data = iwl_fw_error_next_data(*data);
}
+static void iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_trigger *trigger,
+ struct iwl_fw_error_dump_data **data)
+{
+ struct iwl_fw_ini_dump_info *dump = (void *)(*data)->data;
+ u32 reg_ids_size = le32_to_cpu(trigger->num_regions) * sizeof(__le32);
+
+ (*data)->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE);
+ (*data)->len = cpu_to_le32(sizeof(*dump) + reg_ids_size);
+
+ dump->version = cpu_to_le32(IWL_INI_DUMP_VER);
+ dump->trigger_id = trigger->trigger_id;
+ dump->is_external_cfg =
+ cpu_to_le32(fwrt->trans->dbg.external_ini_loaded);
+
+ dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type);
+ dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype);
+
+ dump->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
+ dump->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
+
+ dump->rf_id_flavor =
+ cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id));
+ dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->hw_rf_id));
+ dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->hw_rf_id));
+ dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id));
+
+ dump->lmac_major = cpu_to_le32(fwrt->dump.fw_ver.lmac_major);
+ dump->lmac_minor = cpu_to_le32(fwrt->dump.fw_ver.lmac_minor);
+ dump->umac_major = cpu_to_le32(fwrt->dump.fw_ver.umac_major);
+ dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor);
+
+ dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag));
+ memcpy(dump->build_tag, fwrt->fw->human_readable,
+ sizeof(dump->build_tag));
+
+ dump->img_name_len = cpu_to_le32(sizeof(dump->img_name));
+ memcpy(dump->img_name, fwrt->dump.img_name, sizeof(dump->img_name));
+
+ dump->internal_dbg_cfg_name_len =
+ cpu_to_le32(sizeof(dump->internal_dbg_cfg_name));
+ memcpy(dump->internal_dbg_cfg_name, fwrt->dump.internal_dbg_cfg_name,
+ sizeof(dump->internal_dbg_cfg_name));
+
+ dump->external_dbg_cfg_name_len =
+ cpu_to_le32(sizeof(dump->external_dbg_cfg_name));
+
+ /* dump info size is allocated in iwl_fw_ini_get_trigger_len.
+ * The driver allocates (sizeof(*dump) + reg_ids_size) so it is safe to
+ * use reg_ids_size
+ */
+ memcpy(dump->external_dbg_cfg_name, fwrt->dump.external_dbg_cfg_name,
+ sizeof(dump->external_dbg_cfg_name));
+
+ dump->regions_num = trigger->num_regions;
+ memcpy(dump->region_ids, trigger->data, reg_ids_size);
+
+ *data = iwl_fw_error_next_data(*data);
+}
+
static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_trigger *trigger)
{
- int i, size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data);
+ int i, ret_size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data);
+ u32 size;
if (!trigger || !trigger->num_regions)
return 0;
@@ -1763,32 +1819,40 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_REGION_CSR:
case IWL_FW_INI_REGION_LMAC_ERROR_TABLE:
case IWL_FW_INI_REGION_UMAC_ERROR_TABLE:
- size += hdr_len + iwl_dump_ini_mem_get_size(fwrt, reg);
+ size = iwl_dump_ini_mem_get_size(fwrt, reg);
+ if (size)
+ ret_size += hdr_len + size;
break;
case IWL_FW_INI_REGION_TXF:
- size += hdr_len + iwl_dump_ini_txf_get_size(fwrt, reg);
+ size = iwl_dump_ini_txf_get_size(fwrt, reg);
+ if (size)
+ ret_size += hdr_len + size;
break;
case IWL_FW_INI_REGION_RXF:
- size += hdr_len + iwl_dump_ini_rxf_get_size(fwrt, reg);
+ size = iwl_dump_ini_rxf_get_size(fwrt, reg);
+ if (size)
+ ret_size += hdr_len + size;
break;
case IWL_FW_INI_REGION_PAGING:
- size += hdr_len;
- if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
- size += iwl_dump_ini_paging_get_size(fwrt, reg);
- } else {
- size += iwl_dump_ini_paging_gen2_get_size(fwrt,
- reg);
- }
+ if (iwl_fw_dbg_is_paging_enabled(fwrt))
+ size = iwl_dump_ini_paging_get_size(fwrt, reg);
+ else
+ size = iwl_dump_ini_paging_gen2_get_size(fwrt,
+ reg);
+ if (size)
+ ret_size += hdr_len + size;
break;
case IWL_FW_INI_REGION_DRAM_BUFFER:
- if (!fwrt->trans->num_blocks)
+ if (!fwrt->trans->dbg.num_blocks)
break;
- size += hdr_len +
- iwl_dump_ini_mon_dram_get_size(fwrt, reg);
+ size = iwl_dump_ini_mon_dram_get_size(fwrt, reg);
+ if (size)
+ ret_size += hdr_len + size;
break;
case IWL_FW_INI_REGION_INTERNAL_BUFFER:
- size += hdr_len +
- iwl_dump_ini_mon_smem_get_size(fwrt, reg);
+ size = iwl_dump_ini_mon_smem_get_size(fwrt, reg);
+ if (size)
+ ret_size += hdr_len + size;
break;
case IWL_FW_INI_REGION_DRAM_IMR:
/* Undefined yet */
@@ -1796,7 +1860,13 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
break;
}
}
- return size;
+
+ /* add dump info size */
+ if (ret_size)
+ ret_size += hdr_len + sizeof(struct iwl_fw_ini_dump_info) +
+ (le32_to_cpu(trigger->num_regions) * sizeof(__le32));
+
+ return ret_size;
}
static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
@@ -1805,6 +1875,8 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
{
int i, num = le32_to_cpu(trigger->num_regions);
+ iwl_dump_ini_info(fwrt, trigger, data);
+
for (i = 0; i < num; i++) {
u32 reg_id = le32_to_cpu(trigger->data[i]);
struct iwl_fw_ini_region_cfg *reg;
@@ -1879,7 +1951,7 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
fwrt->dump.fifo_iter = &iter;
ops.get_num_of_ranges = iwl_dump_ini_txf_ranges;
ops.get_size = iwl_dump_ini_txf_get_size;
- ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header;
+ ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
ops.fill_range = iwl_dump_ini_txf_iter;
iwl_dump_ini_mem(fwrt, data, reg, &ops);
fwrt->dump.fifo_iter = fifo_iter;
@@ -1888,7 +1960,7 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_REGION_RXF:
ops.get_num_of_ranges = iwl_dump_ini_rxf_ranges;
ops.get_size = iwl_dump_ini_rxf_get_size;
- ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header;
+ ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
ops.fill_range = iwl_dump_ini_rxf_iter;
iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
@@ -1908,18 +1980,18 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
}
static struct iwl_fw_error_dump_file *
-iwl_fw_error_ini_dump_file(struct iwl_fw_runtime *fwrt)
+iwl_fw_error_ini_dump_file(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_trigger_id trig_id)
{
int size;
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_ini_trigger *trigger;
- enum iwl_fw_ini_trigger_id id = fwrt->dump.ini_trig_id;
- if (!iwl_fw_ini_trigger_on(fwrt, id))
+ if (!iwl_fw_ini_trigger_on(fwrt, trig_id))
return NULL;
- trigger = fwrt->dump.active_trigs[id].trig;
+ trigger = fwrt->dump.active_trigs[trig_id].trig;
size = iwl_fw_ini_get_trigger_len(fwrt, trigger);
if (!size)
@@ -1931,7 +2003,7 @@ iwl_fw_error_ini_dump_file(struct iwl_fw_runtime *fwrt)
if (!dump_file)
return NULL;
- dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
+ dump_file->barker = cpu_to_le32(IWL_FW_INI_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
dump_file->file_len = cpu_to_le32(size);
@@ -1952,7 +2024,7 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (!dump_file)
goto out;
- if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only)
+ if (fwrt->dump.monitor_only)
dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
@@ -1984,16 +2056,16 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
out:
iwl_fw_free_dump_desc(fwrt);
- clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
}
-static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt)
+static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, u8 wk_idx)
{
+ enum iwl_fw_ini_trigger_id trig_id = fwrt->dump.wks[wk_idx].ini_trig_id;
struct iwl_fw_error_dump_file *dump_file;
struct scatterlist *sg_dump_data;
u32 file_len;
- dump_file = iwl_fw_error_ini_dump_file(fwrt);
+ dump_file = iwl_fw_error_ini_dump_file(fwrt, trig_id);
if (!dump_file)
goto out;
@@ -2008,8 +2080,7 @@ static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt)
}
vfree(dump_file);
out:
- fwrt->dump.ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
- clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
+ fwrt->dump.wks[wk_idx].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
}
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
@@ -2027,7 +2098,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
u32 trig_type = le32_to_cpu(desc->trig_desc.type);
int ret;
- if (fwrt->trans->ini_valid) {
+ if (fwrt->trans->dbg.ini_valid) {
ret = iwl_fw_dbg_ini_collect(fwrt, trig_type);
if (!ret)
iwl_fw_free_dump_desc(fwrt);
@@ -2035,7 +2106,10 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
return ret;
}
- if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
+ /* use wks[0] since dump flow prior to ini does not need to support
+ * consecutive triggers collection
+ */
+ if (test_and_set_bit(fwrt->dump.wks[0].idx, &fwrt->dump.active_wks))
return -EBUSY;
if (WARN_ON(fwrt->dump.desc))
@@ -2047,7 +2121,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
fwrt->dump.desc = desc;
fwrt->dump.monitor_only = monitor_only;
- schedule_delayed_work(&fwrt->dump.wk, usecs_to_jiffies(delay));
+ schedule_delayed_work(&fwrt->dump.wks[0].wk, usecs_to_jiffies(delay));
return 0;
}
@@ -2057,9 +2131,12 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig_type)
{
int ret;
- struct iwl_fw_dump_desc *iwl_dump_error_desc =
- kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
+ struct iwl_fw_dump_desc *iwl_dump_error_desc;
+
+ if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status))
+ return -EIO;
+ iwl_dump_error_desc = kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL);
if (!iwl_dump_error_desc)
return -ENOMEM;
@@ -2123,13 +2200,11 @@ int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_active_triggers *active;
u32 occur, delay;
+ unsigned long idx;
if (WARN_ON(!iwl_fw_ini_trigger_on(fwrt, id)))
return -EINVAL;
- if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
- return -EBUSY;
-
if (!iwl_fw_ini_trigger_on(fwrt, id)) {
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
id);
@@ -2150,14 +2225,24 @@ int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
return 0;
}
- if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
+ /* Check there is an available worker.
+ * ffz return value is undefined if no zero exists,
+ * so check against ~0UL first.
+ */
+ if (fwrt->dump.active_wks == ~0UL)
+ return -EBUSY;
+
+ idx = ffz(fwrt->dump.active_wks);
+
+ if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM ||
+ test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
return -EBUSY;
- fwrt->dump.ini_trig_id = id;
+ fwrt->dump.wks[idx].ini_trig_id = id;
IWL_WARN(fwrt, "WRT: collecting data: ini trigger %d fired.\n", id);
- schedule_delayed_work(&fwrt->dump.wk, usecs_to_jiffies(delay));
+ schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
return 0;
}
@@ -2191,9 +2276,6 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
int ret, len = 0;
char buf[64];
- if (fwrt->trans->ini_valid)
- return 0;
-
if (fmt) {
va_list ap;
@@ -2270,56 +2352,57 @@ IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
/* this function assumes dump_start was called beforehand and dump_end will be
* called afterwards
*/
-void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
+static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
{
struct iwl_fw_dbg_params params = {0};
- if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
+ if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
if (fwrt->ops && fwrt->ops->fw_running &&
!fwrt->ops->fw_running(fwrt->ops_ctx)) {
IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
iwl_fw_free_dump_desc(fwrt);
- clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
- return;
+ goto out;
}
/* there's no point in fw dump if the bus is dead */
if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
- return;
+ goto out;
}
- iwl_fw_dbg_stop_recording(fwrt, &params);
+ iwl_fw_dbg_stop_recording(fwrt->trans, &params);
IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection start\n");
- if (fwrt->trans->ini_valid)
- iwl_fw_error_ini_dump(fwrt);
+ if (fwrt->trans->dbg.ini_valid)
+ iwl_fw_error_ini_dump(fwrt, wk_idx);
else
iwl_fw_error_dump(fwrt);
IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection done\n");
- /* start recording again if the firmware is not crashed */
- if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
- fwrt->fw->dbg.dest_tlv) {
- /* wait before we collect the data till the DBGC stop */
- udelay(500);
- iwl_fw_dbg_restart_recording(fwrt, &params);
- }
+ iwl_fw_dbg_restart_recording(fwrt, &params);
+
+out:
+ clear_bit(wk_idx, &fwrt->dump.active_wks);
}
-IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
void iwl_fw_error_dump_wk(struct work_struct *work)
{
- struct iwl_fw_runtime *fwrt =
- container_of(work, struct iwl_fw_runtime, dump.wk.work);
+ struct iwl_fw_runtime *fwrt;
+ typeof(fwrt->dump.wks[0]) *wks;
+
+ wks = container_of(work, typeof(fwrt->dump.wks[0]), wk.work);
+ fwrt = container_of(wks, struct iwl_fw_runtime, dump.wks[wks->idx]);
+ /* assumes the op mode mutex is locked in dump_start since
+ * iwl_fw_dbg_collect_sync can't run in parallel
+ */
if (fwrt->ops && fwrt->ops->dump_start &&
fwrt->ops->dump_start(fwrt->ops_ctx))
return;
- iwl_fw_dbg_collect_sync(fwrt);
+ iwl_fw_dbg_collect_sync(fwrt, wks->idx);
if (fwrt->ops && fwrt->ops->dump_end)
fwrt->ops->dump_end(fwrt->ops_ctx);
@@ -2349,6 +2432,38 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
+static void iwl_fw_dbg_info_apply(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_debug_info_tlv *dbg_info,
+ bool ext, enum iwl_fw_ini_apply_point pnt)
+{
+ u32 img_name_len = le32_to_cpu(dbg_info->img_name_len);
+ u32 dbg_cfg_name_len = le32_to_cpu(dbg_info->dbg_cfg_name_len);
+ const char err_str[] =
+ "WRT: ext=%d. Invalid %s name length %d, expected %d\n";
+
+ if (img_name_len != IWL_FW_INI_MAX_IMG_NAME_LEN) {
+ IWL_WARN(fwrt, err_str, ext, "image", img_name_len,
+ IWL_FW_INI_MAX_IMG_NAME_LEN);
+ return;
+ }
+
+ if (dbg_cfg_name_len != IWL_FW_INI_MAX_DBG_CFG_NAME_LEN) {
+ IWL_WARN(fwrt, err_str, ext, "debug cfg", dbg_cfg_name_len,
+ IWL_FW_INI_MAX_DBG_CFG_NAME_LEN);
+ return;
+ }
+
+ if (ext) {
+ memcpy(fwrt->dump.external_dbg_cfg_name, dbg_info->dbg_cfg_name,
+ sizeof(fwrt->dump.external_dbg_cfg_name));
+ } else {
+ memcpy(fwrt->dump.img_name, dbg_info->img_name,
+ sizeof(fwrt->dump.img_name));
+ memcpy(fwrt->dump.internal_dbg_cfg_name, dbg_info->dbg_cfg_name,
+ sizeof(fwrt->dump.internal_dbg_cfg_name));
+ }
+}
+
static void
iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, u32 size)
{
@@ -2356,7 +2471,8 @@ iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, u32 size)
void *virtual_addr = NULL;
dma_addr_t phys_addr;
- if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon)))
+ if (WARN_ON_ONCE(trans->dbg.num_blocks ==
+ ARRAY_SIZE(trans->dbg.fw_mon)))
return;
virtual_addr =
@@ -2370,12 +2486,12 @@ iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, u32 size)
IWL_DEBUG_FW(trans,
"Allocated DRAM buffer[%d], size=0x%x\n",
- trans->num_blocks, size);
+ trans->dbg.num_blocks, size);
- trans->fw_mon[trans->num_blocks].block = virtual_addr;
- trans->fw_mon[trans->num_blocks].physical = phys_addr;
- trans->fw_mon[trans->num_blocks].size = size;
- trans->num_blocks++;
+ trans->dbg.fw_mon[trans->dbg.num_blocks].block = virtual_addr;
+ trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys_addr;
+ trans->dbg.fw_mon[trans->dbg.num_blocks].size = size;
+ trans->dbg.num_blocks++;
}
static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
@@ -2393,20 +2509,26 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
.data[0] = &ldbg_cmd,
.len[0] = sizeof(ldbg_cmd),
};
- int block_idx = trans->num_blocks;
+ int block_idx = trans->dbg.num_blocks;
u32 buf_location = le32_to_cpu(alloc->tlv.buffer_location);
+ if (fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID)
+ fwrt->trans->dbg.ini_dest = buf_location;
+
+ if (buf_location != fwrt->trans->dbg.ini_dest) {
+ WARN(fwrt,
+ "WRT: attempt to override buffer location on apply point %d\n",
+ pnt);
+
+ return;
+ }
+
if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH) {
- if (!WARN(pnt != IWL_FW_INI_APPLY_EARLY,
- "WRT: Invalid apply point %d for SMEM buffer allocation, aborting\n",
- pnt)) {
- IWL_DEBUG_FW(trans,
- "WRT: applying SMEM buffer destination\n");
-
- /* set sram monitor by enabling bit 7 */
- iwl_set_bit(fwrt->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
- }
+ IWL_DEBUG_FW(trans, "WRT: applying SMEM buffer destination\n");
+ /* set sram monitor by enabling bit 7 */
+ iwl_set_bit(fwrt->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
+
return;
}
@@ -2416,13 +2538,13 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
if (!alloc->is_alloc) {
iwl_fw_dbg_buffer_allocation(fwrt,
le32_to_cpu(alloc->tlv.size));
- if (block_idx == trans->num_blocks)
+ if (block_idx == trans->dbg.num_blocks)
return;
alloc->is_alloc = 1;
}
/* First block is assigned via registers / context info */
- if (trans->num_blocks == 1)
+ if (trans->dbg.num_blocks == 1)
return;
IWL_DEBUG_FW(trans,
@@ -2430,7 +2552,7 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
cmd->num_frags = cpu_to_le32(1);
cmd->fragments[0].address =
- cpu_to_le64(trans->fw_mon[block_idx].physical);
+ cpu_to_le64(trans->dbg.fw_mon[block_idx].physical);
cmd->fragments[0].size = alloc->tlv.size;
cmd->allocation_id = alloc->tlv.allocation_id;
cmd->buffer_location = alloc->tlv.buffer_location;
@@ -2653,20 +2775,30 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
struct iwl_ucode_tlv *tlv = iter;
void *ini_tlv = (void *)tlv->data;
u32 type = le32_to_cpu(tlv->type);
+ const char invalid_ap_str[] =
+ "WRT: ext=%d. Invalid apply point %d for %s\n";
switch (type) {
+ case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
+ iwl_fw_dbg_info_apply(fwrt, ini_tlv, ext, pnt);
+ break;
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: {
struct iwl_fw_ini_allocation_data *buf_alloc = ini_tlv;
+ if (pnt != IWL_FW_INI_APPLY_EARLY) {
+ IWL_ERR(fwrt, invalid_ap_str, ext, pnt,
+ "buffer allocation");
+ goto next;
+ }
+
iwl_fw_dbg_buffer_apply(fwrt, ini_tlv, pnt);
iter += sizeof(buf_alloc->is_alloc);
break;
}
case IWL_UCODE_TLV_TYPE_HCMD:
if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
- IWL_ERR(fwrt,
- "WRT: ext=%d. Invalid apply point %d for host command\n",
- ext, pnt);
+ IWL_ERR(fwrt, invalid_ap_str, ext, pnt,
+ "host command");
goto next;
}
iwl_fw_dbg_send_hcmd(fwrt, tlv, ext);
@@ -2690,34 +2822,51 @@ next:
}
}
+static void iwl_fw_dbg_ini_reset_cfg(struct iwl_fw_runtime *fwrt)
+{
+ int i;
+
+ for (i = 0; i < IWL_FW_INI_MAX_REGION_ID; i++)
+ fwrt->dump.active_regs[i] = NULL;
+
+ /* disable the triggers, used in recovery flow */
+ for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++)
+ fwrt->dump.active_trigs[i].active = false;
+
+ memset(fwrt->dump.img_name, 0,
+ sizeof(fwrt->dump.img_name));
+ memset(fwrt->dump.internal_dbg_cfg_name, 0,
+ sizeof(fwrt->dump.internal_dbg_cfg_name));
+ memset(fwrt->dump.external_dbg_cfg_name, 0,
+ sizeof(fwrt->dump.external_dbg_cfg_name));
+
+ fwrt->trans->dbg.ini_dest = IWL_FW_INI_LOCATION_INVALID;
+}
+
void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_apply_point apply_point)
{
- void *data = &fwrt->trans->apply_points[apply_point];
- int i;
+ void *data = &fwrt->trans->dbg.apply_points[apply_point];
IWL_DEBUG_FW(fwrt, "WRT: enabling apply point %d\n", apply_point);
- if (apply_point == IWL_FW_INI_APPLY_EARLY) {
- for (i = 0; i < IWL_FW_INI_MAX_REGION_ID; i++)
- fwrt->dump.active_regs[i] = NULL;
-
- /* disable the triggers, used in recovery flow */
- for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++)
- fwrt->dump.active_trigs[i].active = false;
- }
+ if (apply_point == IWL_FW_INI_APPLY_EARLY)
+ iwl_fw_dbg_ini_reset_cfg(fwrt);
_iwl_fw_dbg_apply_point(fwrt, data, apply_point, false);
- data = &fwrt->trans->apply_points_ext[apply_point];
+ data = &fwrt->trans->dbg.apply_points_ext[apply_point];
_iwl_fw_dbg_apply_point(fwrt, data, apply_point, true);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
{
+ int i;
+
del_timer(&fwrt->dump.periodic_trig);
- iwl_fw_dbg_collect_sync(fwrt);
+ for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
+ iwl_fw_dbg_collect_sync(fwrt, i);
iwl_trans_stop_device(fwrt->trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index fd0ad220e961..a8459ac71b2c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -73,6 +73,7 @@
#include "error-dump.h"
#include "api/commands.h"
#include "api/dbg-tlv.h"
+#include "api/alive.h"
/**
* struct iwl_fw_dump_desc - describes the dump
@@ -201,7 +202,7 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_dbg_trigger_tlv *trig;
- if (fwrt->trans->ini_valid)
+ if (fwrt->trans->dbg.ini_valid)
return NULL;
if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id))
@@ -228,7 +229,7 @@ iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_trigger *trig;
u32 usec;
- if (!fwrt->trans->ini_valid || id == IWL_FW_TRIGGER_ID_INVALID ||
+ if (!fwrt->trans->dbg.ini_valid || id == IWL_FW_TRIGGER_ID_INVALID ||
id >= IWL_FW_TRIGGER_ID_NUM || !fwrt->dump.active_trigs[id].active)
return false;
@@ -262,23 +263,6 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
iwl_fw_dbg_get_trigger((fwrt)->fw,\
(trig)))
-static inline int
-iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start)
-{
- struct iwl_ldbg_config_cmd cmd = {
- .type = start ? cpu_to_le32(START_DEBUG_RECORDING) :
- cpu_to_le32(STOP_DEBUG_RECORDING),
- };
- struct iwl_host_cmd hcmd = {
- .id = LDBG_CONFIG_CMD,
- .flags = CMD_ASYNC,
- .data[0] = &cmd,
- .len[0] = sizeof(cmd),
- };
-
- return iwl_trans_send_cmd(fwrt->trans, &hcmd);
-}
-
static inline void
_iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
struct iwl_fw_dbg_params *params)
@@ -294,21 +278,35 @@ _iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
}
iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0);
- udelay(100);
+ /* wait for the DBGC to finish writing the internal buffer to DRAM to
+ * avoid halting the HW while writing
+ */
+ usleep_range(700, 1000);
iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- trans->dbg_rec_on = false;
+ trans->dbg.rec_on = false;
#endif
}
static inline void
-iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt,
+iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
struct iwl_fw_dbg_params *params)
{
- if (fwrt->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
- _iwl_fw_dbg_stop_recording(fwrt->trans, params);
- else
- iwl_fw_dbg_start_stop_hcmd(fwrt, false);
+ /* if the FW crashed or not debug monitor cfg was given, there is
+ * no point in stopping
+ */
+ if (test_bit(STATUS_FW_ERROR, &trans->status) ||
+ (!trans->dbg.dest_tlv &&
+ trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
+ return;
+
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ IWL_ERR(trans,
+ "WRT: unsupported device family %d for debug stop recording\n",
+ trans->cfg->device_family);
+ return;
+ }
+ _iwl_fw_dbg_stop_recording(trans, params);
}
static inline void
@@ -324,7 +322,6 @@ _iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
} else {
iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample);
- udelay(100);
iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl);
}
}
@@ -332,8 +329,10 @@ _iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
#ifdef CONFIG_IWLWIFI_DEBUGFS
static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt)
{
- if (fwrt->fw->dbg.dest_tlv && fwrt->cur_fw_img == IWL_UCODE_REGULAR)
- fwrt->trans->dbg_rec_on = true;
+ if (fwrt->cur_fw_img == IWL_UCODE_REGULAR &&
+ (fwrt->fw->dbg.dest_tlv ||
+ fwrt->trans->dbg.ini_dest != IWL_FW_INI_LOCATION_INVALID))
+ fwrt->trans->dbg.rec_on = true;
}
#endif
@@ -341,10 +340,21 @@ static inline void
iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_params *params)
{
- if (fwrt->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
- _iwl_fw_dbg_restart_recording(fwrt->trans, params);
- else
- iwl_fw_dbg_start_stop_hcmd(fwrt, true);
+ /* if the FW crashed or not debug monitor cfg was given, there is
+ * no point in restarting
+ */
+ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) ||
+ (!fwrt->trans->dbg.dest_tlv &&
+ fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
+ return;
+
+ if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ IWL_ERR(fwrt,
+ "WRT: unsupported device family %d for debug restart recording\n",
+ fwrt->trans->cfg->device_family);
+ return;
+ }
+ _iwl_fw_dbg_restart_recording(fwrt->trans, params);
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_fw_set_dbg_rec_on(fwrt);
#endif
@@ -359,7 +369,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work);
static inline bool iwl_fw_dbg_type_on(struct iwl_fw_runtime *fwrt, u32 type)
{
- return (fwrt->fw->dbg.dump_mask & BIT(type) || fwrt->trans->ini_valid);
+ return (fwrt->fw->dbg.dump_mask & BIT(type));
}
static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
@@ -383,16 +393,26 @@ static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt)
void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
-static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt)
+static inline void iwl_fw_flush_dumps(struct iwl_fw_runtime *fwrt)
{
+ int i;
+
del_timer(&fwrt->dump.periodic_trig);
- flush_delayed_work(&fwrt->dump.wk);
+ for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) {
+ flush_delayed_work(&fwrt->dump.wks[i].wk);
+ fwrt->dump.wks[i].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
+ }
}
-static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt)
+static inline void iwl_fw_cancel_dumps(struct iwl_fw_runtime *fwrt)
{
+ int i;
+
del_timer(&fwrt->dump.periodic_trig);
- cancel_delayed_work_sync(&fwrt->dump.wk);
+ for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) {
+ cancel_delayed_work_sync(&fwrt->dump.wks[i].wk);
+ fwrt->dump.wks[i].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
+ }
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -431,7 +451,6 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
-void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt);
void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_apply_point apply_point);
@@ -440,31 +459,28 @@ void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt);
static inline void iwl_fw_lmac1_set_alive_err_table(struct iwl_trans *trans,
u32 lmac_error_event_table)
{
- if (!(trans->error_event_table_tlv_status &
+ if (!(trans->dbg.error_event_table_tlv_status &
IWL_ERROR_EVENT_TABLE_LMAC1) ||
- WARN_ON(trans->lmac_error_event_table[0] !=
+ WARN_ON(trans->dbg.lmac_error_event_table[0] !=
lmac_error_event_table))
- trans->lmac_error_event_table[0] = lmac_error_event_table;
+ trans->dbg.lmac_error_event_table[0] = lmac_error_event_table;
}
static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
u32 umac_error_event_table)
{
- if (!(trans->error_event_table_tlv_status &
+ if (!(trans->dbg.error_event_table_tlv_status &
IWL_ERROR_EVENT_TABLE_UMAC) ||
- WARN_ON(trans->umac_error_event_table !=
+ WARN_ON(trans->dbg.umac_error_event_table !=
umac_error_event_table))
- trans->umac_error_event_table = umac_error_event_table;
+ trans->dbg.umac_error_event_table = umac_error_event_table;
}
-/* This bit is used to differentiate the legacy dump from the ini dump */
-#define INI_DUMP_BIT BIT(31)
-
static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
{
- if (fwrt->trans->ini_valid && fwrt->trans->hw_error) {
+ if (fwrt->trans->dbg.ini_valid && fwrt->trans->dbg.hw_error) {
_iwl_fw_dbg_ini_collect(fwrt, IWL_FW_TRIGGER_ID_FW_HW_ERROR);
- fwrt->trans->hw_error = false;
+ fwrt->trans->dbg.hw_error = false;
} else {
iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
}
@@ -473,4 +489,21 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t);
void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
+
+static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
+ struct iwl_lmac_alive *lmac,
+ struct iwl_umac_alive *umac)
+{
+ if (lmac) {
+ fwrt->dump.fw_ver.type = lmac->ver_type;
+ fwrt->dump.fw_ver.subtype = lmac->ver_subtype;
+ fwrt->dump.fw_ver.lmac_major = le32_to_cpu(lmac->ucode_major);
+ fwrt->dump.fw_ver.lmac_minor = le32_to_cpu(lmac->ucode_minor);
+ }
+
+ if (umac) {
+ fwrt->dump.fw_ver.umac_major = le32_to_cpu(umac->umac_major);
+ fwrt->dump.fw_ver.umac_minor = le32_to_cpu(umac->umac_minor);
+ }
+}
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 0feff4c33e39..00a45ea85b69 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -67,6 +67,7 @@
#include <linux/types.h>
#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
+#define IWL_FW_INI_ERROR_DUMP_BARKER 0x14789633
/**
* enum iwl_fw_error_dump_type - types of data in the dump file
@@ -278,19 +279,42 @@ struct iwl_fw_error_dump_mem {
u8 data[];
};
-#define IWL_INI_DUMP_MEM_VER 1
-#define IWL_INI_DUMP_MONITOR_VER 1
-#define IWL_INI_DUMP_FIFO_VER 1
+/* Dump version, used by the dump parser to differentiate between
+ * different dump formats
+ */
+#define IWL_INI_DUMP_VER 1
+
+/* Use bit 31 as dump info type to avoid colliding with region types */
+#define IWL_INI_DUMP_INFO_TYPE BIT(31)
+
+/**
+ * struct iwl_fw_ini_fifo_hdr - fifo range header
+ * @fifo_num: the fifo number. In case of umac rx fifo, set BIT(31) to
+ * distinguish between lmac and umac rx fifos
+ * @num_of_registers: num of registers to dump, dword size each
+ */
+struct iwl_fw_ini_fifo_hdr {
+ __le32 fifo_num;
+ __le32 num_of_registers;
+} __packed;
/**
* struct iwl_fw_ini_error_dump_range - range of memory
* @range_data_size: the size of this range, in bytes
- * @start_addr: the start address of this range
+ * @internal_base_addr - base address of internal memory range
+ * @dram_base_addr - base address of dram monitor range
+ * @page_num - page number of memory range
+ * @fifo_hdr - fifo header of memory range
* @data: the actual memory
*/
struct iwl_fw_ini_error_dump_range {
__le32 range_data_size;
- __le64 start_addr;
+ union {
+ __le32 internal_base_addr;
+ __le64 dram_base_addr;
+ __le32 page_num;
+ struct iwl_fw_ini_fifo_hdr fifo_hdr;
+ };
__le32 data[];
} __packed;
@@ -333,30 +357,63 @@ struct iwl_fw_ini_error_dump_register {
__le32 data;
} __packed;
-/**
- * struct iwl_fw_ini_fifo_error_dump_range - ini fifo range dump
- * @fifo_num: the fifo num. In case of rxf and umac rxf, set BIT(31) to
- * distinguish between lmac and umac
- * @num_of_registers: num of registers to dump, dword size each
- * @range_data_size: the size of the data
- * @data: consist of
- * num_of_registers * (register address + register value) + fifo data
+/* struct iwl_fw_ini_dump_info - ini dump information
+ * @version: dump version
+ * @trigger_id: trigger id that caused the dump collection
+ * @trigger_reason: not supported yet
+ * @is_external_cfg: 1 if an external debug configuration was loaded
+ * and 0 otherwise
+ * @ver_type: FW version type
+ * @ver_subtype: FW version subype
+ * @hw_step: HW step
+ * @hw_type: HW type
+ * @rf_id_flavor: HW RF id flavor
+ * @rf_id_dash: HW RF id dash
+ * @rf_id_step: HW RF id step
+ * @rf_id_type: HW RF id type
+ * @lmac_major: lmac major version
+ * @lmac_minor: lmac minor version
+ * @umac_major: umac major version
+ * @umac_minor: umac minor version
+ * @build_tag_len: length of the build tag
+ * @build_tag: build tag string
+ * @img_name_len: length of the FW image name
+ * @img_name: FW image name
+ * @internal_dbg_cfg_name_len: length of the internal debug configuration name
+ * @internal_dbg_cfg_name: internal debug configuration name
+ * @external_dbg_cfg_name_len: length of the external debug configuration name
+ * @external_dbg_cfg_name: external debug configuration name
+ * @regions_num: number of region ids
+ * @region_ids: region ids the trigger configured to collect
*/
-struct iwl_fw_ini_fifo_error_dump_range {
- __le32 fifo_num;
- __le32 num_of_registers;
- __le32 range_data_size;
- __le32 data[];
-} __packed;
+struct iwl_fw_ini_dump_info {
+ __le32 version;
+ __le32 trigger_id;
+ __le32 trigger_reason;
+ __le32 is_external_cfg;
+ __le32 ver_type;
+ __le32 ver_subtype;
+ __le32 hw_step;
+ __le32 hw_type;
+ __le32 rf_id_flavor;
+ __le32 rf_id_dash;
+ __le32 rf_id_step;
+ __le32 rf_id_type;
+ __le32 lmac_major;
+ __le32 lmac_minor;
+ __le32 umac_major;
+ __le32 umac_minor;
+ __le32 build_tag_len;
+ u8 build_tag[FW_VER_HUMAN_READABLE_SZ];
+ __le32 img_name_len;
+ u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
+ __le32 internal_dbg_cfg_name_len;
+ u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
+ __le32 external_dbg_cfg_name_len;
+ u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
+ __le32 regions_num;
+ __le32 region_ids[];
-/**
- * struct iwl_fw_ini_fifo_error_dump - ini fifo region dump
- * @header: the header of this region
- * @ranges: the memory ranges of this region
- */
-struct iwl_fw_ini_fifo_error_dump {
- struct iwl_fw_ini_error_dump_header header;
- struct iwl_fw_ini_fifo_error_dump_range ranges[];
} __packed;
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index de9243d30135..0c38e7392b61 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -151,12 +151,13 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
- IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP + 0x1,
- IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION,
- IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP + 0x2,
- IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP + 0x3,
- IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP + 0x4,
- IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP + 0x5,
+ IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_INI_TLV_GROUP,
+ IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0,
+ IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1,
+ IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2,
+ IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_TLV_DEBUG_BASE + 3,
+ IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_TLV_DEBUG_BASE + 4,
+ IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_TLV_DEBUG_BASE + 5,
IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_DEBUG_FLOW,
/* TLVs 0x1000-0x2000 are for internal driver usage */
@@ -286,6 +287,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
* @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
* STA_CONTEXT_DOT11AX_API_S
+ * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar
+ * version tables.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -318,6 +321,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52,
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53,
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55,
+ IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 4435c0ce3013..c16d6e126e3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -67,6 +67,8 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
struct dentry *dbgfs_dir)
{
+ int i;
+
memset(fwrt, 0, sizeof(*fwrt));
fwrt->trans = trans;
fwrt->fw = fw;
@@ -74,7 +76,10 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
fwrt->dump.conf = FW_DBG_INVALID;
fwrt->ops = ops;
fwrt->ops_ctx = ops_ctx;
- INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
+ for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) {
+ fwrt->dump.wks[i].idx = i;
+ INIT_DELAYED_WORK(&fwrt->dump.wks[i].wk, iwl_fw_error_dump_wk);
+ }
iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
timer_setup(&fwrt->dump.periodic_trig,
iwl_fw_dbg_periodic_trig_handler, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index a6402a0b3854..406ef73992c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -89,9 +89,7 @@ struct iwl_fwrt_shared_mem_cfg {
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
-enum iwl_fw_runtime_status {
- IWL_FWRT_STATUS_DUMPING = 0,
-};
+#define IWL_FW_RUNTIME_DUMP_WK_NUM 5
/**
* struct iwl_fw_runtime - runtime data for firmware
@@ -100,7 +98,6 @@ enum iwl_fw_runtime_status {
* @dev: device pointer
* @ops: user ops
* @ops_ctx: user ops context
- * @status: status flags
* @fw_paging_db: paging database
* @num_of_paging_blk: number of paging blocks
* @num_of_pages_in_last_blk: number of pages in the last block
@@ -117,8 +114,6 @@ struct iwl_fw_runtime {
const struct iwl_fw_runtime_ops *ops;
void *ops_ctx;
- unsigned long status;
-
/* Paging */
struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
u16 num_of_paging_blk;
@@ -133,7 +128,12 @@ struct iwl_fw_runtime {
struct {
const struct iwl_fw_dump_desc *desc;
bool monitor_only;
- struct delayed_work wk;
+ struct {
+ u8 idx;
+ enum iwl_fw_ini_trigger_id ini_trig_id;
+ struct delayed_work wk;
+ } wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
+ unsigned long active_wks;
u8 conf;
@@ -145,8 +145,20 @@ struct iwl_fw_runtime {
u32 lmac_err_id[MAX_NUM_LMAC];
u32 umac_err_id;
void *fifo_iter;
- enum iwl_fw_ini_trigger_id ini_trig_id;
struct timer_list periodic_trig;
+
+ u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
+ u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
+ u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
+
+ struct {
+ u8 type;
+ u8 subtype;
+ u32 lmac_major;
+ u32 lmac_minor;
+ u32 umac_major;
+ u32 umac_minor;
+ } fw_ver;
} dump;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index ff85d69c2a8c..557ee47bffd8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -134,6 +134,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
.len = { 0, },
};
struct iwl_rx_packet *pkt;
+ int ret;
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
@@ -141,8 +142,13 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
else
cmd.id = SHARED_MEM_CFG;
- if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd)))
+ ret = iwl_trans_send_cmd(fwrt->trans, &cmd);
+
+ if (ret) {
+ WARN(ret != -ERFKILL,
+ "Could not send the SMEM command: %d\n", ret);
return;
+ }
pkt = cmd.resp_pkt;
if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index f3e69edf8907..bc267bd2c3b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -540,14 +540,20 @@ extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
+extern const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9560_2ac_160_cfg;
+extern const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc;
extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
@@ -562,6 +568,10 @@ extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
+extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
+extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
+extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
@@ -580,9 +590,9 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
-extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0;
+extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
-extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0;
+extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 553554846009..93da96a7247c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -336,6 +336,7 @@ enum {
/* RF_ID value */
#define CSR_HW_RF_ID_TYPE_JF (0x00105100)
#define CSR_HW_RF_ID_TYPE_HR (0x0010A000)
+#define CSR_HW_RF_ID_TYPE_HR1 (0x0010c100)
#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00)
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index ba66f7fba064..fcaec410b3be 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -81,9 +81,9 @@ void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
return;
if (ext)
- data = &trans->apply_points_ext[apply_point];
+ data = &trans->dbg.apply_points_ext[apply_point];
else
- data = &trans->apply_points[apply_point];
+ data = &trans->dbg.apply_points[apply_point];
/* add room for is_alloc field in &iwl_fw_ini_allocation_data struct */
if (le32_to_cpu(tlv->type) == IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION) {
@@ -172,14 +172,14 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
}
if (ext) {
- trans->apply_points_ext[i].data = mem;
- trans->apply_points_ext[i].size = size[i];
+ trans->dbg.apply_points_ext[i].data = mem;
+ trans->dbg.apply_points_ext[i].size = size[i];
} else {
- trans->apply_points[i].data = mem;
- trans->apply_points[i].size = size[i];
+ trans->dbg.apply_points[i].data = mem;
+ trans->dbg.apply_points[i].size = size[i];
}
- trans->ini_valid = true;
+ trans->dbg.ini_valid = true;
}
}
@@ -187,14 +187,14 @@ void iwl_fw_dbg_free(struct iwl_trans *trans)
{
int i;
- for (i = 0; i < ARRAY_SIZE(trans->apply_points); i++) {
- kfree(trans->apply_points[i].data);
- trans->apply_points[i].size = 0;
- trans->apply_points[i].offset = 0;
+ for (i = 0; i < ARRAY_SIZE(trans->dbg.apply_points); i++) {
+ kfree(trans->dbg.apply_points[i].data);
+ trans->dbg.apply_points[i].size = 0;
+ trans->dbg.apply_points[i].offset = 0;
- kfree(trans->apply_points_ext[i].data);
- trans->apply_points_ext[i].size = 0;
- trans->apply_points_ext[i].offset = 0;
+ kfree(trans->dbg.apply_points_ext[i].data);
+ trans->dbg.apply_points_ext[i].size = 0;
+ trans->dbg.apply_points_ext[i].offset = 0;
}
}
@@ -221,6 +221,7 @@ static int iwl_parse_fw_dbg_tlv(struct iwl_trans *trans, const u8 *data,
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
switch (tlv_type) {
+ case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
case IWL_UCODE_TLV_TYPE_HCMD:
case IWL_UCODE_TLV_TYPE_REGIONS:
@@ -242,7 +243,7 @@ void iwl_load_fw_dbg_tlv(struct device *dev, struct iwl_trans *trans)
const struct firmware *fw;
int res;
- if (trans->external_ini_loaded || !iwlwifi_mod_params.enable_ini)
+ if (trans->dbg.external_ini_loaded || !iwlwifi_mod_params.enable_ini)
return;
res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev);
@@ -252,6 +253,6 @@ void iwl_load_fw_dbg_tlv(struct device *dev, struct iwl_trans *trans)
iwl_alloc_dbg_tlv(trans, fw->size, fw->data, true);
iwl_parse_fw_dbg_tlv(trans, fw->data, fw->size);
- trans->external_ini_loaded = true;
+ trans->dbg.external_ini_loaded = true;
release_firmware(fw);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index fba242284507..57d09049e615 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1105,6 +1105,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
le32_to_cpu(recov_info->buf_size);
}
break;
+ case IWL_UCODE_TLV_FW_FSEQ_VERSION: {
+ struct {
+ u8 version[32];
+ u8 sha1[20];
+ } *fseq_ver = (void *)tlv_data;
+
+ if (tlv_len != sizeof(*fseq_ver))
+ goto invalid_tlv_len;
+ IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n",
+ fseq_ver->version);
+ }
+ break;
case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
struct iwl_umac_debug_addrs *dbg_ptrs =
(void *)tlv_data;
@@ -1114,10 +1126,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (drv->trans->cfg->device_family <
IWL_DEVICE_FAMILY_22000)
break;
- drv->trans->umac_error_event_table =
+ drv->trans->dbg.umac_error_event_table =
le32_to_cpu(dbg_ptrs->error_info_addr) &
~FW_ADDR_CACHE_CONTROL;
- drv->trans->error_event_table_tlv_status |=
+ drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_UMAC;
break;
}
@@ -1130,13 +1142,14 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (drv->trans->cfg->device_family <
IWL_DEVICE_FAMILY_22000)
break;
- drv->trans->lmac_error_event_table[0] =
+ drv->trans->dbg.lmac_error_event_table[0] =
le32_to_cpu(dbg_ptrs->error_event_table_ptr) &
~FW_ADDR_CACHE_CONTROL;
- drv->trans->error_event_table_tlv_status |=
+ drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_LMAC1;
break;
}
+ case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
case IWL_UCODE_TLV_TYPE_HCMD:
case IWL_UCODE_TLV_TYPE_REGIONS:
@@ -1744,7 +1757,7 @@ IWL_EXPORT_SYMBOL(iwl_opmode_deregister);
static int __init iwl_drv_init(void)
{
- int i;
+ int i, err;
mutex_init(&iwlwifi_opmode_table_mtx);
@@ -1759,7 +1772,17 @@ static int __init iwl_drv_init(void)
iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
#endif
- return iwl_pci_register_driver();
+ err = iwl_pci_register_driver();
+ if (err)
+ goto cleanup_debugfs;
+
+ return 0;
+
+cleanup_debugfs:
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ debugfs_remove_recursive(iwl_dbgfs_root);
+#endif
+ return err;
}
module_init(iwl_drv_init);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 1e4c9ef548cc..0f8aeb111b0e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -722,6 +722,50 @@ struct iwl_self_init_dram {
};
/**
+ * struct iwl_trans_debug - transport debug related data
+ *
+ * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
+ * @rec_on: true iff there is a fw debug recording currently active
+ * @dest_tlv: points to the destination TLV for debug
+ * @conf_tlv: array of pointers to configuration TLVs for debug
+ * @trigger_tlv: array of pointers to triggers TLVs for debug
+ * @lmac_error_event_table: addrs of lmacs error tables
+ * @umac_error_event_table: addr of umac error table
+ * @error_event_table_tlv_status: bitmap that indicates what error table
+ * pointers was recevied via TLV. uses enum &iwl_error_event_table_status
+ * @external_ini_loaded: indicates if an external ini cfg was given
+ * @ini_valid: indicates if debug ini mode is on
+ * @num_blocks: number of blocks in fw_mon
+ * @fw_mon: address of the buffers for firmware monitor
+ * @hw_error: equals true if hw error interrupt was received from the FW
+ * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
+ */
+struct iwl_trans_debug {
+ u8 n_dest_reg;
+ bool rec_on;
+
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
+ const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
+ struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
+
+ u32 lmac_error_event_table[2];
+ u32 umac_error_event_table;
+ unsigned int error_event_table_tlv_status;
+
+ bool external_ini_loaded;
+ bool ini_valid;
+
+ struct iwl_apply_point_data apply_points[IWL_FW_INI_APPLY_NUM];
+ struct iwl_apply_point_data apply_points_ext[IWL_FW_INI_APPLY_NUM];
+
+ int num_blocks;
+ struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM];
+
+ bool hw_error;
+ enum iwl_fw_ini_buffer_location ini_dest;
+};
+
+/**
* struct iwl_trans - transport common data
*
* @ops - pointer to iwl_trans_ops
@@ -750,24 +794,12 @@ struct iwl_self_init_dram {
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
* @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
- * @dbg_dest_tlv: points to the destination TLV for debug
- * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
- * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
- * @dbg_n_dest_reg: num of reg_ops in %dbg_dest_tlv
- * @num_blocks: number of blocks in fw_mon
- * @fw_mon: address of the buffers for firmware monitor
* @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
* @runtime_pm_mode: the runtime power management mode in use. This
* mode is set during the initialization phase and is not
* supposed to change during runtime.
- * @dbg_rec_on: true iff there is a fw debug recording currently active
- * @lmac_error_event_table: addrs of lmacs error tables
- * @umac_error_event_table: addr of umac error table
- * @error_event_table_tlv_status: bitmap that indicates what error table
- * pointers was recevied via TLV. use enum &iwl_error_event_table_status
- * @hw_error: equals true if hw error interrupt was received from the FW
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -808,29 +840,12 @@ struct iwl_trans {
struct lockdep_map sync_cmd_lockdep_map;
#endif
- struct iwl_apply_point_data apply_points[IWL_FW_INI_APPLY_NUM];
- struct iwl_apply_point_data apply_points_ext[IWL_FW_INI_APPLY_NUM];
-
- bool external_ini_loaded;
- bool ini_valid;
-
- const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
- const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
- struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
- u8 dbg_n_dest_reg;
- int num_blocks;
- struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM];
+ struct iwl_trans_debug dbg;
struct iwl_self_init_dram init_dram;
enum iwl_plat_pm_mode system_pm_mode;
enum iwl_plat_pm_mode runtime_pm_mode;
bool suspending;
- bool dbg_rec_on;
-
- u32 lmac_error_event_table[2];
- u32 umac_error_event_table;
- unsigned int error_event_table_tlv_status;
- bool hw_error;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index dff14f1ec55f..915b172da57a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -152,5 +152,6 @@
#define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
#define IWL_MVM_FTM_INITIATOR_DYNACK true
#define IWL_MVM_D3_DEBUG false
+#define IWL_MVM_USE_TWT false
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index e7e68fb2bd29..cec40855a641 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -398,8 +398,7 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
if (!wowlan->n_patterns)
return 0;
- cmd.len[0] = sizeof(*pattern_cmd) +
- wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v1);
+ cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns);
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
if (!pattern_cmd)
@@ -1079,11 +1078,12 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
#endif
/*
- * TODO: this is needed because the firmware is not stopping
- * the recording automatically before entering D3. This can
- * be removed once the FW starts doing that.
+ * Prior to 9000 device family the driver needs to stop the dbg
+ * recording before entering D3. In later devices the FW stops the
+ * recording automatically.
*/
- _iwl_fw_dbg_stop_recording(mvm->fwrt.trans, NULL);
+ if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ iwl_fw_dbg_stop_recording(mvm->trans, NULL);
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
@@ -1986,7 +1986,7 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
- u32 base = mvm->trans->lmac_error_event_table[0];
+ u32 base = mvm->trans->dbg.lmac_error_event_table[0];
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 5b1bb76c5d28..0c188a82cfc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -467,6 +467,46 @@ static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf,
return ret;
}
+static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int i;
+ u16 amsdu_len;
+
+ if (kstrtou16(buf, 0, &amsdu_len))
+ return -EINVAL;
+
+ if (amsdu_len) {
+ mvmsta->orig_amsdu_len = sta->max_amsdu_len;
+ sta->max_amsdu_len = amsdu_len;
+ for (i = 0; i < ARRAY_SIZE(sta->max_tid_amsdu_len); i++)
+ sta->max_tid_amsdu_len[i] = amsdu_len;
+ } else {
+ sta->max_amsdu_len = mvmsta->orig_amsdu_len;
+ mvmsta->orig_amsdu_len = 0;
+ }
+ return count;
+}
+
+static ssize_t iwl_dbgfs_amsdu_len_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ char buf[32];
+ int pos;
+
+ pos = scnprintf(buf, sizeof(buf), "current %d ", sta->max_amsdu_len);
+ pos += scnprintf(buf + pos, sizeof(buf) - pos, "stored %d\n",
+ mvmsta->orig_amsdu_len);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1356,24 +1396,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
return count;
}
-static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm,
- char *buf, size_t count,
- loff_t *ppos)
-{
- unsigned int max_amsdu_len;
- int ret;
-
- ret = kstrtouint(buf, 0, &max_amsdu_len);
- if (ret)
- return ret;
-
- if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454)
- return -EINVAL;
- mvm->max_amsdu_len = max_amsdu_len;
-
- return count;
-}
-
#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
@@ -1873,7 +1895,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
-MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
@@ -1891,6 +1912,8 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
#endif
+MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(amsdu_len, 16);
+
MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32);
static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
@@ -2032,8 +2055,10 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- if (iwl_mvm_has_tlc_offload(mvm))
+ if (iwl_mvm_has_tlc_offload(mvm)) {
MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400);
+ }
+ MVM_DEBUGFS_ADD_STA_FILE(amsdu_len, dir, 0600);
}
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
@@ -2069,7 +2094,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index fec38a47696e..9f4b117db9d7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -93,7 +93,7 @@ void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
struct cfg80211_pmsr_result result = {
.status = NL80211_PMSR_STATUS_FAILURE,
.final = 1,
- .host_time = ktime_get_boot_ns(),
+ .host_time = ktime_get_boottime_ns(),
.type = NL80211_PMSR_TYPE_FTM,
};
int i;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 153717587aeb..1d608e9e9101 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -238,7 +238,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);
if (lmac2)
- mvm->trans->lmac_error_event_table[1] =
+ mvm->trans->dbg.lmac_error_event_table[1] =
le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
@@ -276,6 +276,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
le32_to_cpu(umac->umac_major),
le32_to_cpu(umac->umac_minor));
+ iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac);
+
return true;
}
@@ -419,6 +421,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
lockdep_assert_held(&mvm->mutex);
+ mvm->rfkill_safe_init_done = false;
+
iwl_init_notification_wait(&mvm->notif_wait,
&init_wait,
init_complete,
@@ -537,8 +541,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
lockdep_assert_held(&mvm->mutex);
- if (WARN_ON_ONCE(mvm->rfkill_safe_init_done))
- return 0;
+ mvm->rfkill_safe_init_done = false;
iwl_init_notification_wait(&mvm->notif_wait,
&calib_wait,
@@ -681,15 +684,15 @@ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
{
union acpi_object *wifi_pkg, *table, *data;
bool enabled;
- int ret;
+ int ret, tbl_rev;
data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_WRDS_WIFI_DATA_SIZE);
- if (IS_ERR(wifi_pkg)) {
+ ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
@@ -718,15 +721,15 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
{
union acpi_object *wifi_pkg, *data;
bool enabled;
- int i, n_profiles, ret;
+ int i, n_profiles, ret, tbl_rev;
data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_EWRD_WIFI_DATA_SIZE);
- if (IS_ERR(wifi_pkg)) {
+ ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
@@ -777,7 +780,7 @@ out_free:
static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
{
union acpi_object *wifi_pkg, *data;
- int i, j, ret;
+ int i, j, ret, tbl_rev;
int idx = 1;
data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
@@ -785,12 +788,13 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
return PTR_ERR(data);
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_WGDS_WIFI_DATA_SIZE);
- if (IS_ERR(wifi_pkg)) {
+ ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
+ if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
+ mvm->geo_rev = tbl_rev;
for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
union acpi_object *entry;
@@ -858,6 +862,9 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
return -ENOENT;
}
+ IWL_DEBUG_INFO(mvm,
+ "SAR EWRD: chain %d profile index %d\n",
+ i, profs[i]);
IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
@@ -877,15 +884,29 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
struct iwl_geo_tx_power_profiles_resp *resp;
int ret;
+ u16 len;
+ void *data;
+ struct iwl_geo_tx_power_profiles_cmd geo_cmd;
+ struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
+ struct iwl_host_cmd cmd;
+
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ geo_cmd.ops =
+ cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
+ len = sizeof(geo_cmd);
+ data = &geo_cmd;
+ } else {
+ geo_cmd_v1.ops =
+ cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
+ len = sizeof(geo_cmd_v1);
+ data = &geo_cmd_v1;
+ }
- struct iwl_geo_tx_power_profiles_cmd geo_cmd = {
- .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE),
- };
- struct iwl_host_cmd cmd = {
+ cmd = (struct iwl_host_cmd){
.id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
- .len = { sizeof(geo_cmd), },
+ .len = { len, },
.flags = CMD_WANT_SKB,
- .data = { &geo_cmd },
+ .data = { data },
};
ret = iwl_mvm_send_cmd(mvm, &cmd);
@@ -955,6 +976,16 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
i, j, value[1], value[2], value[0]);
}
}
+
+ cmd.table_revision = cpu_to_le32(mvm->geo_rev);
+
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0,
+ sizeof(struct iwl_geo_tx_power_profiles_cmd_v1),
+ &cmd);
+ }
+
return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
}
@@ -1108,10 +1139,13 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY);
+ mvm->rfkill_safe_init_done = false;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret)
return ret;
+ mvm->rfkill_safe_init_done = true;
+
iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE);
return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
@@ -1144,7 +1178,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
- if (!mvm->trans->ini_valid) {
+ if (!mvm->trans->dbg.ini_valid) {
mvm->fwrt.dump.conf = FW_DBG_INVALID;
/* if we have a destination, assume EARLY START */
if (mvm->fw->dbg.dest_tlv)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 53c217af13c8..cb22d447fcb8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -558,15 +558,16 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
+ u8 ucode_ac = iwl_mvm_mac80211_ac_to_ucode_ac(i);
- cmd->ac[txf].cw_min =
+ cmd->ac[ucode_ac].cw_min =
cpu_to_le16(mvmvif->queue_params[i].cw_min);
- cmd->ac[txf].cw_max =
+ cmd->ac[ucode_ac].cw_max =
cpu_to_le16(mvmvif->queue_params[i].cw_max);
- cmd->ac[txf].edca_txop =
+ cmd->ac[ucode_ac].edca_txop =
cpu_to_le16(mvmvif->queue_params[i].txop * 32);
- cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs;
- cmd->ac[txf].fifos_mask = BIT(txf);
+ cmd->ac[ucode_ac].aifsn = mvmvif->queue_params[i].aifs;
+ cmd->ac[ucode_ac].fifos_mask = BIT(txf);
}
if (vif->bss_conf.qos)
@@ -678,7 +679,7 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
- if (vif->bss_conf.twt_requester)
+ if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT)
ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED);
}
@@ -1081,9 +1082,6 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
}
- if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax)
- cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
-
ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
vif->bss_conf.dtim_period);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index fdbabca0280e..55cd49ccbf0b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -207,6 +207,12 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
},
};
+static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
if (!iwl_mvm_is_d0i3_supported(mvm))
@@ -1439,7 +1445,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
*/
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
- iwl_fw_cancel_dump(&mvm->fwrt);
+ iwl_fw_cancel_dumps(&mvm->fwrt);
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
iwl_fw_free_dump_desc(&mvm->fwrt);
@@ -2365,22 +2371,23 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
/* Mark MU EDCA as enabled, unless none detected on some AC */
flags |= STA_CTXT_HE_MU_EDCA_CW;
- for (i = 0; i < AC_NUM; i++) {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
&mvmvif->queue_params[i].mu_edca_param_rec;
+ u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i);
if (!mvmvif->queue_params[i].mu_edca) {
flags &= ~STA_CTXT_HE_MU_EDCA_CW;
break;
}
- sta_ctxt_cmd.trig_based_txf[i].cwmin =
+ sta_ctxt_cmd.trig_based_txf[ac].cwmin =
cpu_to_le16(mu_edca->ecw_min_max & 0xf);
- sta_ctxt_cmd.trig_based_txf[i].cwmax =
+ sta_ctxt_cmd.trig_based_txf[ac].cwmax =
cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
- sta_ctxt_cmd.trig_based_txf[i].aifsn =
+ sta_ctxt_cmd.trig_based_txf[ac].aifsn =
cpu_to_le16(mu_edca->aifsn);
- sta_ctxt_cmd.trig_based_txf[i].mu_time =
+ sta_ctxt_cmd.trig_based_txf[ac].mu_time =
cpu_to_le16(mu_edca->mu_edca_timer);
}
@@ -2636,7 +2643,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int ret;
+ int ret, i;
/*
* iwl_mvm_mac_ctxt_add() might read directly from the device
@@ -2710,6 +2717,20 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
/* must be set before quota calculations */
mvmvif->ap_ibss_active = true;
+ /* send all the early keys to the device now */
+ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) {
+ struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i];
+
+ if (!key)
+ continue;
+
+ mvmvif->ap_early_keys[i] = NULL;
+
+ ret = iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
+ if (ret)
+ goto out_quota_failed;
+ }
+
if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
iwl_mvm_vif_set_low_latency(mvmvif, true,
LOW_LATENCY_VIF_TYPE);
@@ -3479,11 +3500,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_key_pn *ptk_pn;
int keyidx = key->keyidx;
- int ret;
+ int ret, i;
u8 key_offset;
if (iwlwifi_mod_params.swcrypto) {
@@ -3556,6 +3578,22 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
key->hw_key_idx = STA_KEY_IDX_INVALID;
break;
}
+
+ if (!mvmvif->ap_ibss_active) {
+ for (i = 0;
+ i < ARRAY_SIZE(mvmvif->ap_early_keys);
+ i++) {
+ if (!mvmvif->ap_early_keys[i]) {
+ mvmvif->ap_early_keys[i] = key;
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(mvmvif->ap_early_keys))
+ ret = -ENOSPC;
+
+ break;
+ }
}
/* During FW restart, in order to restore the state as it was,
@@ -3624,6 +3662,18 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
case DISABLE_KEY:
+ ret = -ENOENT;
+ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) {
+ if (mvmvif->ap_early_keys[i] == key) {
+ mvmvif->ap_early_keys[i] = NULL;
+ ret = 0;
+ }
+ }
+
+ /* found in pending list - don't do anything else */
+ if (ret == 0)
+ break;
+
if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
ret = 0;
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 02efcf2189c4..48c77af54e99 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -501,6 +501,9 @@ struct iwl_mvm_vif {
netdev_features_t features;
struct iwl_probe_resp_data __rcu *probe_resp_data;
+
+ /* we can only have 2 GTK + 2 IGTK active at a time */
+ struct ieee80211_key_conf *ap_early_keys[4];
};
static inline struct iwl_mvm_vif *
@@ -1107,7 +1110,6 @@ struct iwl_mvm {
u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
/* Indicate if 32Khz external clock is valid */
u32 ext_clock_valid;
- unsigned int max_amsdu_len; /* used for debugfs only */
struct ieee80211_vif __rcu *csa_vif;
struct ieee80211_vif __rcu *csa_tx_blocked_vif;
@@ -1181,6 +1183,7 @@ struct iwl_mvm {
#ifdef CONFIG_ACPI
struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
+ u32 geo_rev;
#endif
};
@@ -1307,6 +1310,12 @@ static inline bool iwl_mvm_is_adaptive_dwell_v2_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2);
}
+static inline bool iwl_mvm_is_adwell_hb_ap_num_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP);
+}
+
static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm)
{
/* OCE should never be enabled for LMAC scan FWs */
@@ -1532,6 +1541,7 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
enum nl80211_band band,
struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
+u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 7bdbd010ae6b..719f793b3487 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -620,6 +620,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
enum iwl_mcc_source src;
char mcc[3];
struct ieee80211_regdomain *regd;
+ u32 wgds_tbl_idx;
lockdep_assert_held(&mvm->mutex);
@@ -643,6 +644,14 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
if (IS_ERR_OR_NULL(regd))
return;
+ wgds_tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
+ if (wgds_tbl_idx < 0)
+ IWL_DEBUG_INFO(mvm, "SAR WGDS is disabled (%d)\n",
+ wgds_tbl_idx);
+ else
+ IWL_DEBUG_INFO(mvm, "SAR WGDS: geo profile %d is configured\n",
+ wgds_tbl_idx);
+
regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
kfree(regd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index fad3bf563712..d7d6f3398f86 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -564,24 +564,24 @@ unlock:
static int iwl_mvm_fwrt_dump_start(void *ctx)
{
struct iwl_mvm *mvm = ctx;
- int ret;
+ int ret = 0;
+
+ mutex_lock(&mvm->mutex);
ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
if (ret)
- return ret;
-
- mutex_lock(&mvm->mutex);
+ mutex_unlock(&mvm->mutex);
- return 0;
+ return ret;
}
static void iwl_mvm_fwrt_dump_end(void *ctx)
{
struct iwl_mvm *mvm = ctx;
- mutex_unlock(&mvm->mutex);
-
iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
+
+ mutex_unlock(&mvm->mutex);
}
static bool iwl_mvm_fwrt_fw_running(void *ctx)
@@ -799,11 +799,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_trans_configure(mvm->trans, &trans_cfg);
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
- trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
- trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
- memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
- sizeof(trans->dbg_conf_tlv));
- trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
+ trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
+ trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
+ memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv,
+ sizeof(trans->dbg.conf_tlv));
+ trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv;
trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len;
@@ -880,7 +880,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return op_mode;
out_free:
- iwl_fw_flush_dump(&mvm->fwrt);
+ iwl_fw_flush_dumps(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
if (iwlmvm_mod_params.init_dbg)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index be62f499c595..08b67812e94e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -101,7 +101,7 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u8 supp = 0;
- if (he_cap && he_cap->has_he)
+ if (he_cap->has_he)
return 0;
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
@@ -123,12 +123,12 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
- bool vht_ena = vht_cap && vht_cap->vht_supported;
+ bool vht_ena = vht_cap->vht_supported;
u16 flags = 0;
if (mvm->cfg->ht_params->stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
- if (he_cap && he_cap->has_he) {
+ if (he_cap->has_he) {
if (he_cap->he_cap_elem.phy_cap_info[2] &
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
@@ -136,15 +136,14 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
if (he_cap->he_cap_elem.phy_cap_info[7] &
IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
- } else if ((ht_cap &&
- (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+ } else if ((ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) ||
(vht_ena &&
(vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))
flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
}
if (mvm->cfg->ht_params->ldpc &&
- ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
+ ((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) ||
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
@@ -154,7 +153,7 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
- if (he_cap && he_cap->has_he &&
+ if (he_cap->has_he &&
(he_cap->he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
@@ -293,13 +292,13 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
/* HT/VHT rates */
- if (he_cap && he_cap->has_he) {
+ if (he_cap->has_he) {
cmd->mode = IWL_TLC_MNG_MODE_HE;
rs_fw_he_set_enabled_rates(sta, sband, cmd);
- } else if (vht_cap && vht_cap->vht_supported) {
+ } else if (vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
- } else if (ht_cap && ht_cap->ht_supported) {
+ } else if (ht_cap->ht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_HT;
cmd->ht_rates[0][0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
cmd->ht_rates[1][0] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
@@ -344,7 +343,7 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
lq_sta->last_rate_n_flags);
}
- if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) {
+ if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvmsta->orig_amsdu_len) {
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
@@ -381,7 +380,7 @@ static u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- if (vht_cap && vht_cap->vht_supported) {
+ if (vht_cap->vht_supported) {
switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
return IEEE80211_MAX_MPDU_LEN_VHT_11454;
@@ -391,7 +390,7 @@ static u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
}
- } else if (ht_cap && ht_cap->ht_supported) {
+ } else if (ht_cap->ht_supported) {
if (ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU)
/*
* agg is offloaded so we need to assume that agg
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 63fdb4e68e9d..8c9069f28a58 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2949,10 +2949,6 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
mvm_sta = NULL;
}
- /* Send management frames and NO_ACK data using lowest rate. */
- if (rate_control_send_low(sta, mvm_sta, txrc))
- return;
-
if (!mvm_sta)
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index fbd3014e8b82..160b0db27103 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -555,7 +555,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)))
- rx_status->boottime_ns = ktime_get_boot_ns();
+ rx_status->boottime_ns = ktime_get_boottime_ns();
/* Take a reference briefly to kick off a d0i3 entry delay so
* we can handle bursts of RX packets without toggling the
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 1824566d08fc..64f950501287 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1684,7 +1684,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)))
- rx_status->boottime_ns = ktime_get_boot_ns();
+ rx_status->boottime_ns = ktime_get_boottime_ns();
}
if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index d9ddf9ff6428..c284e6975b1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -83,8 +83,10 @@
#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
/* adaptive dwell max budget time [TU] for directed scan */
#define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
-/* adaptive dwell default APs number */
-#define IWL_SCAN_ADWELL_DEFAULT_N_APS 2
+/* adaptive dwell default high band APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8
+/* adaptive dwell default low band APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
/* adaptive dwell default APs number in social channels (1, 6, 11) */
#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
@@ -1288,7 +1290,11 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->v7.adwell_default_n_aps_social =
IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
cmd->v7.adwell_default_n_aps =
- IWL_SCAN_ADWELL_DEFAULT_N_APS;
+ IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
+
+ if (iwl_mvm_is_adwell_hb_ap_num_supported(mvm))
+ cmd->v9.adwell_default_hb_n_aps =
+ IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
/* if custom max budget was configured with debugfs */
if (IWL_MVM_ADWELL_MAX_BUDGET)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index b4d4071b865d..4487cc3e07c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -386,6 +386,9 @@ struct iwl_mvm_rxq_dup_data {
* @amsdu_enabled: bitmap of TX AMSDU allowed TIDs.
* In case TLC offload is not active it is either 0xFFFF or 0.
* @max_amsdu_len: max AMSDU length
+ * @orig_amsdu_len: used to save the original amsdu_len when it is changed via
+ * debugfs. If it's set to 0, it means that it is it's not set via
+ * debugfs.
* @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
* @sleep_tx_count: the number of frames that we told the firmware to let out
* even when that station is asleep. This is useful in case the queue
@@ -434,6 +437,7 @@ struct iwl_mvm_sta {
bool disable_tx;
u16 amsdu_enabled;
u16 max_amsdu_len;
+ u16 orig_amsdu_len;
bool sleeping;
u8 agg_tids;
u8 sleep_tx_count;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 0c2aabc842f9..a3e5d88f1c07 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -726,6 +726,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
memcpy(&info, skb->cb, sizeof(info));
+ if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
+ return -1;
+
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
return -1;
@@ -893,18 +896,15 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
unsigned int mss = skb_shinfo(skb)->gso_size;
unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
u16 snap_ip_tcp, pad;
- unsigned int dbg_max_amsdu_len;
netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 tid;
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
tcp_hdrlen(skb);
- dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
-
if (!mvmsta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
- (!mvmsta->amsdu_enabled && !dbg_max_amsdu_len))
+ !mvmsta->amsdu_enabled)
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
/*
@@ -936,10 +936,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
- if (unlikely(dbg_max_amsdu_len))
- max_amsdu_len = min_t(unsigned int, max_amsdu_len,
- dbg_max_amsdu_len);
-
/*
* Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
* supported. This is a spec requirement (IEEE 802.11-2015
@@ -1063,7 +1059,9 @@ static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
}
/*
- * Sets the fields in the Tx cmd that are crypto related
+ * Sets the fields in the Tx cmd that are crypto related.
+ *
+ * This function must be called with BHs disabled.
*/
static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index cc56ab88fb43..9ecd5f09615a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -238,6 +238,18 @@ u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
return fw_rate_idx_to_plcp[rate_idx];
}
+u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
+{
+ static const u8 mac80211_ac_to_ucode_ac[] = {
+ AC_VO,
+ AC_VI,
+ AC_BE,
+ AC_BK
+ };
+
+ return mac80211_ac_to_ucode_ac[ac];
+}
+
void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -457,10 +469,10 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_umac_error_event_table table;
- u32 base = mvm->trans->umac_error_event_table;
+ u32 base = mvm->trans->dbg.umac_error_event_table;
if (!mvm->support_umac_log &&
- !(mvm->trans->error_event_table_tlv_status &
+ !(mvm->trans->dbg.error_event_table_tlv_status &
IWL_ERROR_EVENT_TABLE_UMAC))
return;
@@ -496,7 +508,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table;
- u32 val, base = mvm->trans->lmac_error_event_table[lmac_num];
+ u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num];
if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
if (!base)
@@ -592,7 +604,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_lmac_error_log(mvm, 0);
- if (mvm->trans->lmac_error_event_table[1])
+ if (mvm->trans->dbg.lmac_error_event_table[1])
iwl_mvm_dump_lmac_error_log(mvm, 1);
iwl_mvm_dump_umac_error_log(mvm);
@@ -1445,7 +1457,7 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
}
*gp2 = iwl_mvm_get_systime(mvm);
- *boottime = ktime_get_boot_ns();
+ *boottime = ktime_get_boottime_ns();
if (!ps_disabled) {
mvm->ps_disabled = ps_disabled;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index f496d1bcb643..5e86783d616b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -96,13 +96,13 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
cpu_to_le64(trans_pcie->rxq->bd_dma);
/* Configure debug, for integration */
- if (!trans->ini_valid)
+ if (!trans->dbg.ini_valid)
iwl_pcie_alloc_fw_monitor(trans, 0);
- if (trans->num_blocks) {
+ if (trans->dbg.num_blocks) {
prph_sc_ctrl->hwm_cfg.hwm_base_addr =
- cpu_to_le64(trans->fw_mon[0].physical);
+ cpu_to_le64(trans->dbg.fw_mon[0].physical);
prph_sc_ctrl->hwm_cfg.hwm_size =
- cpu_to_le32(trans->fw_mon[0].size);
+ cpu_to_le32(trans->dbg.fw_mon[0].size);
}
/* allocate ucode sections in dram and set addresses */
@@ -169,7 +169,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
memcpy(iml_img, trans->iml, trans->iml_len);
- iwl_enable_interrupts(trans);
+ iwl_enable_fw_load_int_ctx_info(trans);
/* kick FW self load */
iwl_write64(trans, CSR_CTXT_INFO_ADDR,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 8969b47bacf2..d38cefbb779e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -222,7 +222,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
trans_pcie->ctxt_info = ctxt_info;
- iwl_enable_interrupts(trans);
+ iwl_enable_fw_load_int_ctx_info(trans);
/* Configure debug, if exists */
if (iwl_pcie_dbg_on(trans))
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index cd035061cdd5..ccc83fd74649 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -513,62 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
@@ -621,7 +615,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -630,7 +623,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
@@ -708,7 +700,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
@@ -717,7 +708,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
@@ -764,7 +754,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -773,7 +762,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
@@ -833,7 +821,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -842,7 +829,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
@@ -890,63 +876,80 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_cfg_qnj_jf_b0)},
/* 22000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x06F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x2074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x4244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
@@ -958,13 +961,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_so_hr_a0)},
- {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax210_2ax_cfg_so_hr_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
+ {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 85973dd57234..9f5d0fc839fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -874,6 +874,33 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
+static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
+
+ if (!trans_pcie->msix_enabled) {
+ /*
+ * When we'll receive the ALIVE interrupt, the ISR will call
+ * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
+ * interrupt (which is not really needed anymore) but also the
+ * RX interrupt which will allow us to receive the ALIVE
+ * notification (which is Rx) and continue the flow.
+ */
+ trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ } else {
+ iwl_enable_hw_int_msk_msix(trans,
+ MSIX_HW_INT_CAUSES_REG_ALIVE);
+ /*
+ * Leave all the FH causes enabled to get the ALIVE
+ * notification.
+ */
+ iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
+ }
+}
+
static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
@@ -1018,7 +1045,7 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
{
- return (trans->dbg_dest_tlv || trans->ini_valid);
+ return (trans->dbg.dest_tlv || trans->dbg.ini_valid);
}
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 31b3591f71d1..a2d709642b2a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1827,26 +1827,26 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
goto out;
}
- if (iwl_have_debug_level(IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(trans,
- "Scheduler finished to transmit the frame/frames.\n");
- isr_stats->sch++;
- }
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ IWL_DEBUG_ISR(trans,
+ "Scheduler finished to transmit the frame/frames.\n");
+ isr_stats->sch++;
+ }
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(trans, "Alive interrupt\n");
- isr_stats->alive++;
- if (trans->cfg->gen2) {
- /*
- * We can restock, since firmware configured
- * the RFH
- */
- iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
- }
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+ isr_stats->alive++;
+ if (trans->cfg->gen2) {
+ /*
+ * We can restock, since firmware configured
+ * the RFH
+ */
+ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
}
+
+ handled |= CSR_INT_BIT_ALIVE;
}
/* Safely ignore these bits for debug checks below */
@@ -1965,6 +1965,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
iwl_enable_rfkill_int(trans);
+ /* Re-enable the ALIVE / Rx interrupt if it occurred */
+ else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
+ iwl_enable_fw_load_int_ctx_info(trans);
spin_unlock(&trans_pcie->irq_lock);
out:
@@ -2108,10 +2111,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
- inta_fh,
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+ inta_fh, trans_pcie->fh_mask,
iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+ if (inta_fh & ~trans_pcie->fh_mask)
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)\n",
+ inta_fh & ~trans_pcie->fh_mask);
+ }
+
+ inta_fh &= trans_pcie->fh_mask;
if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
inta_fh & MSIX_FH_INT_CAUSES_Q0) {
@@ -2151,11 +2162,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
/* After checking FH register check HW register */
- if (iwl_have_debug_level(IWL_DL_ISR))
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
IWL_DEBUG_ISR(trans,
- "ISR inta_hw 0x%08x, enabled 0x%08x\n",
- inta_hw,
+ "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+ inta_hw, trans_pcie->hw_mask,
iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
+ if (inta_hw & ~trans_pcie->hw_mask)
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt 0x%08x\n",
+ inta_hw & ~trans_pcie->hw_mask);
+ }
+
+ inta_hw &= trans_pcie->hw_mask;
/* Alive notification via Rx interrupt will do the real work */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
@@ -2212,7 +2230,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
"Hardware error detected. Restarting.\n");
isr_stats->hw++;
- trans->hw_error = true;
+ trans->dbg.hw_error = true;
iwl_pcie_irq_handle_error(trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 8507a7bdcfdd..8d17e68577fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -148,7 +148,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true;
/* Stop dbgc before stopping device */
- _iwl_fw_dbg_stop_recording(trans, NULL);
+ iwl_fw_dbg_stop_recording(trans, NULL);
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
@@ -273,6 +273,15 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
* paging memory cannot be freed included since FW will still use it
*/
iwl_pcie_ctxt_info_free(trans);
+
+ /*
+ * Re-enable all the interrupts, including the RF-Kill one, now that
+ * the firmware is alive.
+ */
+ iwl_enable_interrupts(trans);
+ mutex_lock(&trans_pcie->mutex);
+ iwl_pcie_check_hw_rf_kill(trans);
+ mutex_unlock(&trans_pcie->mutex);
}
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index dfa1bed124aa..f5df5b370d78 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -90,8 +90,10 @@
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
{
-#define PCI_DUMP_SIZE 64
-#define PREFIX_LEN 32
+#define PCI_DUMP_SIZE 352
+#define PCI_MEM_DUMP_SIZE 64
+#define PCI_PARENT_DUMP_SIZE 524
+#define PREFIX_LEN 32
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct pci_dev *pdev = trans_pcie->pci_dev;
u32 i, pos, alloc_size, *ptr, *buf;
@@ -102,11 +104,15 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
/* Should be a multiple of 4 */
BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
+ BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
+ BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
+
/* Alloc a max size buffer */
- if (PCI_ERR_ROOT_ERR_SRC + 4 > PCI_DUMP_SIZE)
- alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
- else
- alloc_size = PCI_DUMP_SIZE + PREFIX_LEN;
+ alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
+ alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
+ alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
+ alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
+
buf = kmalloc(alloc_size, GFP_ATOMIC);
if (!buf)
return;
@@ -123,7 +129,7 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
- for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+ for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
*ptr = iwl_read32(trans, i);
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
@@ -146,7 +152,7 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
pci_name(pdev));
- for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+ for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
if (pci_read_config_dword(pdev, i, ptr))
goto err_read;
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
@@ -188,14 +194,14 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
int i;
- for (i = 0; i < trans->num_blocks; i++) {
- dma_free_coherent(trans->dev, trans->fw_mon[i].size,
- trans->fw_mon[i].block,
- trans->fw_mon[i].physical);
- trans->fw_mon[i].block = NULL;
- trans->fw_mon[i].physical = 0;
- trans->fw_mon[i].size = 0;
- trans->num_blocks--;
+ for (i = 0; i < trans->dbg.num_blocks; i++) {
+ dma_free_coherent(trans->dev, trans->dbg.fw_mon[i].size,
+ trans->dbg.fw_mon[i].block,
+ trans->dbg.fw_mon[i].physical);
+ trans->dbg.fw_mon[i].block = NULL;
+ trans->dbg.fw_mon[i].physical = 0;
+ trans->dbg.fw_mon[i].size = 0;
+ trans->dbg.num_blocks--;
}
}
@@ -230,10 +236,10 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
(unsigned long)BIT(power - 10),
(unsigned long)BIT(max_power - 10));
- trans->fw_mon[trans->num_blocks].block = cpu_addr;
- trans->fw_mon[trans->num_blocks].physical = phys;
- trans->fw_mon[trans->num_blocks].size = size;
- trans->num_blocks++;
+ trans->dbg.fw_mon[trans->dbg.num_blocks].block = cpu_addr;
+ trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys;
+ trans->dbg.fw_mon[trans->dbg.num_blocks].size = size;
+ trans->dbg.num_blocks++;
}
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
@@ -254,7 +260,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
* This function allocats the default fw monitor.
* The optional additional ones will be allocated in runtime
*/
- if (trans->num_blocks)
+ if (trans->dbg.num_blocks)
return;
iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
@@ -889,21 +895,21 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
- const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
int i;
- if (trans->ini_valid) {
- if (!trans->num_blocks)
+ if (trans->dbg.ini_valid) {
+ if (!trans->dbg.num_blocks)
return;
IWL_DEBUG_FW(trans,
"WRT: applying DRAM buffer[0] destination\n");
iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
- trans->fw_mon[0].physical >>
+ trans->dbg.fw_mon[0].physical >>
MON_BUFF_SHIFT_VER2);
iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
- (trans->fw_mon[0].physical +
- trans->fw_mon[0].size - 256) >>
+ (trans->dbg.fw_mon[0].physical +
+ trans->dbg.fw_mon[0].size - 256) >>
MON_BUFF_SHIFT_VER2);
return;
}
@@ -916,7 +922,7 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
else
IWL_WARN(trans, "PCI should have external buffer debug\n");
- for (i = 0; i < trans->dbg_n_dest_reg; i++) {
+ for (i = 0; i < trans->dbg.n_dest_reg; i++) {
u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
u32 val = le32_to_cpu(dest->reg_ops[i].val);
@@ -955,18 +961,19 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
}
monitor:
- if (dest->monitor_mode == EXTERNAL_MODE && trans->fw_mon[0].size) {
+ if (dest->monitor_mode == EXTERNAL_MODE && trans->dbg.fw_mon[0].size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
- trans->fw_mon[0].physical >> dest->base_shift);
+ trans->dbg.fw_mon[0].physical >>
+ dest->base_shift);
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->fw_mon[0].physical +
- trans->fw_mon[0].size - 256) >>
+ (trans->dbg.fw_mon[0].physical +
+ trans->dbg.fw_mon[0].size - 256) >>
dest->end_shift);
else
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->fw_mon[0].physical +
- trans->fw_mon[0].size) >>
+ (trans->dbg.fw_mon[0].physical +
+ trans->dbg.fw_mon[0].size) >>
dest->end_shift);
}
}
@@ -1003,12 +1010,12 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
iwl_pcie_alloc_fw_monitor(trans, 0);
- if (trans->fw_mon[0].size) {
+ if (trans->dbg.fw_mon[0].size) {
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
- trans->fw_mon[0].physical >> 4);
+ trans->dbg.fw_mon[0].physical >> 4);
iwl_write_prph(trans, MON_BUFF_END_ADDR,
- (trans->fw_mon[0].physical +
- trans->fw_mon[0].size) >> 4);
+ (trans->dbg.fw_mon[0].physical +
+ trans->dbg.fw_mon[0].size) >> 4);
}
} else if (iwl_pcie_dbg_on(trans)) {
iwl_pcie_apply_destination(trans);
@@ -1236,7 +1243,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true;
/* Stop dbgc before stopping device */
- _iwl_fw_dbg_stop_recording(trans, NULL);
+ iwl_fw_dbg_stop_recording(trans, NULL);
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
@@ -2729,8 +2736,8 @@ static int iwl_dbgfs_monitor_data_open(struct inode *inode,
struct iwl_trans *trans = inode->i_private;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!trans->dbg_dest_tlv ||
- trans->dbg_dest_tlv->monitor_mode != EXTERNAL_MODE) {
+ if (!trans->dbg.dest_tlv ||
+ trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
IWL_ERR(trans, "Debug destination is not set to DRAM\n");
return -ENOENT;
}
@@ -2777,22 +2784,22 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- void *cpu_addr = (void *)trans->fw_mon[0].block, *curr_buf;
+ void *cpu_addr = (void *)trans->dbg.fw_mon[0].block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;
bool b_full;
- if (trans->dbg_dest_tlv) {
+ if (trans->dbg.dest_tlv) {
write_ptr_addr =
- le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
- wrap_cnt_addr = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+ le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
+ wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
} else {
write_ptr_addr = MON_BUFF_WRPTR;
wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
}
- if (unlikely(!trans->dbg_rec_on))
+ if (unlikely(!trans->dbg.rec_on))
return 0;
mutex_lock(&data->mutex);
@@ -2816,7 +2823,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
write_ptr < data->prev_wr_ptr) {
- size = trans->fw_mon[0].size - data->prev_wr_ptr;
+ size = trans->dbg.fw_mon[0].size - data->prev_wr_ptr;
curr_buf = cpu_addr + data->prev_wr_ptr;
b_full = iwl_write_to_user_buf(user_buf, count,
curr_buf, &size,
@@ -3035,14 +3042,10 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
write_ptr = DBGC_CUR_DBGBUF_STATUS;
wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
- } else if (trans->ini_valid) {
- base = iwl_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2);
- write_ptr = iwl_umac_prph(trans, MON_BUFF_WRPTR_VER2);
- wrap_cnt = iwl_umac_prph(trans, MON_BUFF_CYCLE_CNT_VER2);
- } else if (trans->dbg_dest_tlv) {
- write_ptr = le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
- wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
- base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ } else if (trans->dbg.dest_tlv) {
+ write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
+ wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
+ base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
} else {
base = MON_BUFF_BASE_ADDR;
write_ptr = MON_BUFF_WRPTR;
@@ -3069,11 +3072,10 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
{
u32 len = 0;
- if ((trans->num_blocks &&
+ if (trans->dbg.dest_tlv ||
+ (trans->dbg.num_blocks &&
(trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
- trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210 ||
- trans->ini_valid)) ||
- (trans->dbg_dest_tlv && !trans->ini_valid)) {
+ trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
@@ -3082,32 +3084,32 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
len += sizeof(**data) + sizeof(*fw_mon_data);
- if (trans->num_blocks) {
+ if (trans->dbg.num_blocks) {
memcpy(fw_mon_data->data,
- trans->fw_mon[0].block,
- trans->fw_mon[0].size);
+ trans->dbg.fw_mon[0].block,
+ trans->dbg.fw_mon[0].size);
- monitor_len = trans->fw_mon[0].size;
- } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+ monitor_len = trans->dbg.fw_mon[0].size;
+ } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
/*
* Update pointers to reflect actual values after
* shifting
*/
- if (trans->dbg_dest_tlv->version) {
+ if (trans->dbg.dest_tlv->version) {
base = (iwl_read_prph(trans, base) &
IWL_LDBG_M2S_BUF_BA_MSK) <<
- trans->dbg_dest_tlv->base_shift;
+ trans->dbg.dest_tlv->base_shift;
base *= IWL_M2S_UNIT_SIZE;
base += trans->cfg->smem_offset;
} else {
base = iwl_read_prph(trans, base) <<
- trans->dbg_dest_tlv->base_shift;
+ trans->dbg.dest_tlv->base_shift;
}
iwl_trans_read_mem(trans, base, fw_mon_data->data,
monitor_len / sizeof(u32));
- } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+ } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
monitor_len =
iwl_trans_pci_dump_marbh_monitor(trans,
fw_mon_data,
@@ -3126,40 +3128,40 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
{
- if (trans->num_blocks) {
+ if (trans->dbg.num_blocks) {
*len += sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_fw_mon) +
- trans->fw_mon[0].size;
- return trans->fw_mon[0].size;
- } else if (trans->dbg_dest_tlv) {
+ trans->dbg.fw_mon[0].size;
+ return trans->dbg.fw_mon[0].size;
+ } else if (trans->dbg.dest_tlv) {
u32 base, end, cfg_reg, monitor_len;
- if (trans->dbg_dest_tlv->version == 1) {
- cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ if (trans->dbg.dest_tlv->version == 1) {
+ cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
cfg_reg = iwl_read_prph(trans, cfg_reg);
base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
- trans->dbg_dest_tlv->base_shift;
+ trans->dbg.dest_tlv->base_shift;
base *= IWL_M2S_UNIT_SIZE;
base += trans->cfg->smem_offset;
monitor_len =
(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
- trans->dbg_dest_tlv->end_shift;
+ trans->dbg.dest_tlv->end_shift;
monitor_len *= IWL_M2S_UNIT_SIZE;
} else {
- base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
- end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
+ base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
+ end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
base = iwl_read_prph(trans, base) <<
- trans->dbg_dest_tlv->base_shift;
+ trans->dbg.dest_tlv->base_shift;
end = iwl_read_prph(trans, end) <<
- trans->dbg_dest_tlv->end_shift;
+ trans->dbg.dest_tlv->end_shift;
/* Make "end" point to the actual end */
if (trans->cfg->device_family >=
IWL_DEVICE_FAMILY_8000 ||
- trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
- end += (1 << trans->dbg_dest_tlv->end_shift);
+ trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
+ end += (1 << trans->dbg.dest_tlv->end_shift);
monitor_len = end - base;
}
*len += sizeof(struct iwl_fw_error_dump_data) +
@@ -3192,7 +3194,7 @@ static struct iwl_trans_dump_data
len = sizeof(*dump_data);
/* host commands */
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD))
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
len += sizeof(*data) +
cmdq->n_window * (sizeof(*txcmd) +
TFD_MAX_PAYLOAD_SIZE);
@@ -3244,7 +3246,7 @@ static struct iwl_trans_dump_data
len = 0;
data = (void *)dump_data->data;
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
u16 tfd_size = trans_pcie->tfd_size;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
@@ -3569,15 +3571,17 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
- trans->cfg = &iwlax210_2ax_cfg_so_gf_a0;
+ trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
- trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0;
+ trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
- if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+ if ((CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
+ (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
@@ -3599,8 +3603,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
((trans->cfg != &iwl_ax200_cfg_cc &&
- trans->cfg != &killer1650x_2ax_cfg &&
- trans->cfg != &killer1650w_2ax_cfg) ||
+ trans->cfg != &killer1650x_2ax_cfg &&
+ trans->cfg != &killer1650w_2ax_cfg &&
+ trans->cfg != &iwl_ax201_cfg_quz_hr) ||
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
u32 hw_status;
@@ -3681,6 +3686,7 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
+ bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status);
u32 inta_addr, sw_err_bit;
if (trans_pcie->msix_enabled) {
@@ -3691,7 +3697,12 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
sw_err_bit = CSR_INT_BIT_SW_ERR;
}
- iwl_disable_interrupts(trans);
+ /* if the interrupts were already disabled, there is no point in
+ * calling iwl_disable_interrupts
+ */
+ if (interrupts_enabled)
+ iwl_disable_interrupts(trans);
+
iwl_force_nmi(trans);
while (time_after(timeout, jiffies)) {
u32 inta_hw = iwl_read32(trans, inta_addr);
@@ -3705,6 +3716,13 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
mdelay(1);
}
- iwl_enable_interrupts(trans);
+
+ /* enable interrupts only if there were already enabled before this
+ * function to avoid a case were the driver enable interrupts before
+ * proper configurations were made
+ */
+ if (interrupts_enabled)
+ iwl_enable_interrupts(trans);
+
iwl_trans_fw_error(trans);
}
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index ca2676f79bbb..a3ca6620dc0c 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -411,12 +411,9 @@ static int p54_conf_tx(struct ieee80211_hw *dev,
int ret;
mutex_lock(&priv->conf_mutex);
- if (queue < dev->queues) {
- P54_SET_QUEUE(priv->qos_params[queue], params->aifs,
- params->cw_min, params->cw_max, params->txop);
- ret = p54_set_edcf(priv);
- } else
- ret = -EINVAL;
+ P54_SET_QUEUE(priv->qos_params[queue], params->aifs,
+ params->cw_min, params->cw_max, params->txop);
+ ret = p54_set_edcf(priv);
mutex_unlock(&priv->conf_mutex);
return ret;
}
diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c
index f937815f0f2c..b94764c88750 100644
--- a/drivers/net/wireless/intersil/p54/p54usb.c
+++ b/drivers/net/wireless/intersil/p54/p54usb.c
@@ -30,6 +30,8 @@ MODULE_ALIAS("prism54usb");
MODULE_FIRMWARE("isl3886usb");
MODULE_FIRMWARE("isl3887usb");
+static struct usb_driver p54u_driver;
+
/*
* Note:
*
@@ -918,9 +920,9 @@ static void p54u_load_firmware_cb(const struct firmware *firmware,
{
struct p54u_priv *priv = context;
struct usb_device *udev = priv->udev;
+ struct usb_interface *intf = priv->intf;
int err;
- complete(&priv->fw_wait_load);
if (firmware) {
priv->fw = firmware;
err = p54u_start_ops(priv);
@@ -929,26 +931,22 @@ static void p54u_load_firmware_cb(const struct firmware *firmware,
dev_err(&udev->dev, "Firmware not found.\n");
}
- if (err) {
- struct device *parent = priv->udev->dev.parent;
-
- dev_err(&udev->dev, "failed to initialize device (%d)\n", err);
-
- if (parent)
- device_lock(parent);
+ complete(&priv->fw_wait_load);
+ /*
+ * At this point p54u_disconnect may have already freed
+ * the "priv" context. Do not use it anymore!
+ */
+ priv = NULL;
- device_release_driver(&udev->dev);
- /*
- * At this point p54u_disconnect has already freed
- * the "priv" context. Do not use it anymore!
- */
- priv = NULL;
+ if (err) {
+ dev_err(&intf->dev, "failed to initialize device (%d)\n", err);
- if (parent)
- device_unlock(parent);
+ usb_lock_device(udev);
+ usb_driver_release_interface(&p54u_driver, intf);
+ usb_unlock_device(udev);
}
- usb_put_dev(udev);
+ usb_put_intf(intf);
}
static int p54u_load_firmware(struct ieee80211_hw *dev,
@@ -969,14 +967,14 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
dev_info(&priv->udev->dev, "Loading firmware file %s\n",
p54u_fwlist[i].fw);
- usb_get_dev(udev);
+ usb_get_intf(intf);
err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw,
device, GFP_KERNEL, priv,
p54u_load_firmware_cb);
if (err) {
dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
"(%d)!\n", p54u_fwlist[i].fw, err);
- usb_put_dev(udev);
+ usb_put_intf(intf);
}
return err;
@@ -1008,8 +1006,6 @@ static int p54u_probe(struct usb_interface *intf,
skb_queue_head_init(&priv->rx_queue);
init_usb_anchor(&priv->submitted);
- usb_get_dev(udev);
-
/* really lazy and simple way of figuring out if we're a 3887 */
/* TODO: should just stick the identification in the device table */
i = intf->altsetting->desc.bNumEndpoints;
@@ -1050,10 +1046,8 @@ static int p54u_probe(struct usb_interface *intf,
priv->upload_fw = p54u_upload_firmware_net2280;
}
err = p54u_load_firmware(dev, intf);
- if (err) {
- usb_put_dev(udev);
+ if (err)
p54_free_common(dev);
- }
return err;
}
@@ -1069,7 +1063,6 @@ static void p54u_disconnect(struct usb_interface *intf)
wait_for_completion(&priv->fw_wait_load);
p54_unregister_common(dev);
- usb_put_dev(interface_to_usbdev(intf));
release_firmware(priv->fw);
p54_free_common(dev);
}
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index ff9acd1563f4..873fea59894f 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -139,7 +139,10 @@ static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb)
unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON))
priv->beacon_req_id = data->req_id;
- __skb_queue_after(&priv->tx_queue, target_skb, skb);
+ if (target_skb)
+ __skb_queue_after(&priv->tx_queue, target_skb, skb);
+ else
+ __skb_queue_head(&priv->tx_queue, skb);
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
return 0;
}
@@ -328,6 +331,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
u16 freq = le16_to_cpu(hdr->freq);
size_t header_len = sizeof(*hdr);
u32 tsf32;
+ __le16 fc;
u8 rate = hdr->rate & 0xf;
/*
@@ -376,6 +380,11 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
skb_pull(skb, header_len);
skb_trim(skb, le16_to_cpu(hdr->len));
+
+ fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
+ if (ieee80211_is_probe_resp(fc) || ieee80211_is_beacon(fc))
+ rx_status->boottime_ns = ktime_get_boottime_ns();
+
if (unlikely(priv->hw->conf.flags & IEEE80211_CONF_PS))
p54_pspoll_workaround(priv, skb);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 1c699a9fa866..519b4ee88c5c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -454,6 +454,8 @@ static struct wiphy_vendor_command mac80211_hwsim_vendor_commands[] = {
.subcmd = QCA_NL80211_SUBCMD_TEST },
.flags = WIPHY_VENDOR_CMD_NEED_NETDEV,
.doit = mac80211_hwsim_vendor_cmd_test,
+ .policy = hwsim_vendor_test_policy,
+ .maxattr = QCA_WLAN_VENDOR_ATTR_MAX,
}
};
@@ -1271,7 +1273,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
*/
if (ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)) {
- rx_status.boottime_ns = ktime_get_boot_ns();
+ rx_status.boottime_ns = ktime_get_boottime_ns();
now = data->abs_bcn_ts;
} else {
now = mac80211_hwsim_get_tsf_raw();
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index f1622f0ff8c9..afac2481909b 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -368,7 +368,7 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
cardp->fwseqnum, cardp->totalbytes);
} else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) {
lbs_deb_usb2(&cardp->udev->dev, "Host has finished FW downloading\n");
- lbs_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n");
+ lbs_deb_usb2(&cardp->udev->dev, "Downloading FW JUMP BLOCK\n");
cardp->fwfinalblk = 1;
}
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index 28a8bd3cf10c..25ac9db35dbf 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -315,7 +315,7 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
} else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) {
lbtf_deb_usb2(&cardp->udev->dev,
"Host has finished FW downloading\n");
- lbtf_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n");
+ lbtf_deb_usb2(&cardp->udev->dev, "Downloading FW JUMP BLOCK\n");
/* Host has finished FW downloading
* Donwloading FW JUMP BLOCK
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 5d75c971004b..e435f801bc91 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -84,17 +84,15 @@ mwifiex_get_ba_status(struct mwifiex_private *priv,
enum mwifiex_ba_status ba_status)
{
struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
if (tx_ba_tsr_tbl->ba_status == ba_status) {
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
- flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return tx_ba_tsr_tbl;
}
}
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return NULL;
}
@@ -516,13 +514,12 @@ void mwifiex_11n_delete_all_tx_ba_stream_tbl(struct mwifiex_private *priv)
{
int i;
struct mwifiex_tx_ba_stream_tbl *del_tbl_ptr, *tmp_node;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
&priv->tx_ba_stream_tbl_ptr, list)
mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, del_tbl_ptr);
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
@@ -539,18 +536,16 @@ struct mwifiex_tx_ba_stream_tbl *
mwifiex_get_ba_tbl(struct mwifiex_private *priv, int tid, u8 *ra)
{
struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
if (ether_addr_equal_unaligned(tx_ba_tsr_tbl->ra, ra) &&
tx_ba_tsr_tbl->tid == tid) {
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
- flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return tx_ba_tsr_tbl;
}
}
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return NULL;
}
@@ -563,7 +558,6 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
{
struct mwifiex_tx_ba_stream_tbl *new_node;
struct mwifiex_ra_list_tbl *ra_list;
- unsigned long flags;
int tid_down;
if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
@@ -584,9 +578,9 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
new_node->ba_status = ba_status;
memcpy(new_node->ra, ra, ETH_ALEN);
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_add_tail(&new_node->list, &priv->tx_ba_stream_tbl_ptr);
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
}
}
@@ -599,7 +593,6 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
u32 tx_win_size = priv->add_ba_param.tx_win_size;
static u8 dialog_tok;
int ret;
- unsigned long flags;
u16 block_ack_param_set;
mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid);
@@ -612,10 +605,10 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
struct mwifiex_sta_node *sta_ptr;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
if (!sta_ptr) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
mwifiex_dbg(priv->adapter, ERROR,
"BA setup with unknown TDLS peer %pM!\n",
peer_mac);
@@ -623,7 +616,7 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
}
if (sta_ptr->is_11ac_enabled)
tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
block_ack_param_set = (u16)((tid << BLOCKACKPARAM_TID_POS) |
@@ -687,9 +680,8 @@ int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
{
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry(rx_reor_tbl_ptr, &priv->rx_reorder_tbl_ptr, list) {
if (rx_reor_tbl_ptr->tid == tid) {
dev_dbg(priv->adapter->dev,
@@ -700,7 +692,7 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
}
}
exit:
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
}
/*
@@ -729,9 +721,8 @@ int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
struct mwifiex_ds_rx_reorder_tbl *rx_reo_tbl = buf;
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr;
int count = 0;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry(rx_reorder_tbl_ptr, &priv->rx_reorder_tbl_ptr,
list) {
rx_reo_tbl->tid = (u16) rx_reorder_tbl_ptr->tid;
@@ -750,7 +741,7 @@ int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
if (count >= MWIFIEX_MAX_RX_BASTREAM_SUPPORTED)
break;
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return count;
}
@@ -764,9 +755,8 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
struct mwifiex_ds_tx_ba_stream_tbl *rx_reo_tbl = buf;
int count = 0;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid;
mwifiex_dbg(priv->adapter, DATA, "data: %s tid=%d\n",
@@ -778,7 +768,7 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
if (count >= MWIFIEX_MAX_TX_BASTREAM_SUPPORTED)
break;
}
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return count;
}
@@ -790,16 +780,15 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra)
{
struct mwifiex_tx_ba_stream_tbl *tbl, *tmp;
- unsigned long flags;
if (!ra)
return;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list)
if (!memcmp(tbl->ra, ra, ETH_ALEN))
mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl);
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h
index ea0fa68b9913..33268ce2cd82 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n.h
@@ -147,11 +147,10 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
int tid;
u8 ret = false;
struct mwifiex_tx_ba_stream_tbl *tx_tbl;
- unsigned long flags;
tid = priv->aggr_prio_tbl[ptr_tid].ampdu_user;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry(tx_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
if (tid > priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user) {
tid = priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user;
@@ -160,7 +159,7 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
ret = true;
}
}
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index 042a1d07f686..088612438530 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -155,7 +155,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
int
mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *pra_list,
- int ptrindex, unsigned long ra_list_flags)
+ int ptrindex)
__releases(&priv->wmm.ra_list_spinlock)
{
struct mwifiex_adapter *adapter = priv->adapter;
@@ -168,8 +168,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_src = skb_peek(&pra_list->skb_head);
if (!skb_src) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return 0;
}
@@ -177,8 +176,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
GFP_ATOMIC);
if (!skb_aggr) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return -1;
}
@@ -208,17 +206,15 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
pra_list->total_pkt_count--;
atomic_dec(&priv->wmm.tx_pkts_queued);
aggr_num++;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
mwifiex_write_data_complete(adapter, skb_src, 0, 0);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return -1;
}
@@ -232,7 +228,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
} while (skb_src);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
/* Last AMSDU packet does not need padding */
skb_trim(skb_aggr, skb_aggr->len - pad);
@@ -265,10 +261,9 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
}
switch (ret) {
case -EBUSY:
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_write_data_complete(adapter, skb_aggr, 1, -1);
return -1;
}
@@ -286,8 +281,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
atomic_inc(&priv->wmm.tx_pkts_queued);
tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
break;
case -1:
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
index 0cd2a3eb6c17..8279b159da7c 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
@@ -27,7 +27,7 @@ int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
struct sk_buff *skb);
int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ptr,
- int ptr_index, unsigned long flags)
+ int ptr_index)
__releases(&priv->wmm.ra_list_spinlock);
#endif /* !_MWIFIEX_11N_AGGR_H_ */
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 5380fba652cc..05a3c61ac603 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -76,7 +76,8 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
/* This function will process the rx packet and forward it to kernel/upper
* layer.
*/
-static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
+static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
+ struct sk_buff *payload)
{
int ret;
@@ -109,27 +110,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *tbl,
int start_win)
{
+ struct sk_buff_head list;
+ struct sk_buff *skb;
int pkt_to_send, i;
- void *rx_tmp_ptr;
- unsigned long flags;
+
+ __skb_queue_head_init(&list);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
pkt_to_send = (start_win > tbl->start_win) ?
min((start_win - tbl->start_win), tbl->win_size) :
tbl->win_size;
for (i = 0; i < pkt_to_send; ++i) {
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- rx_tmp_ptr = NULL;
if (tbl->rx_reorder_ptr[i]) {
- rx_tmp_ptr = tbl->rx_reorder_ptr[i];
+ skb = tbl->rx_reorder_ptr[i];
+ __skb_queue_tail(&list, skb);
tbl->rx_reorder_ptr[i] = NULL;
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
- if (rx_tmp_ptr)
- mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -140,7 +139,10 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
}
tbl->start_win = start_win;
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
+
+ while ((skb = __skb_dequeue(&list)))
+ mwifiex_11n_dispatch_pkt(priv, skb);
}
/*
@@ -155,24 +157,21 @@ static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *tbl)
{
+ struct sk_buff_head list;
+ struct sk_buff *skb;
int i, j, xchg;
- void *rx_tmp_ptr;
- unsigned long flags;
+
+ __skb_queue_head_init(&list);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
for (i = 0; i < tbl->win_size; ++i) {
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- if (!tbl->rx_reorder_ptr[i]) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ if (!tbl->rx_reorder_ptr[i])
break;
- }
- rx_tmp_ptr = tbl->rx_reorder_ptr[i];
+ skb = tbl->rx_reorder_ptr[i];
+ __skb_queue_tail(&list, skb);
tbl->rx_reorder_ptr[i] = NULL;
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
- mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -185,7 +184,11 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
}
}
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
+
+ while ((skb = __skb_dequeue(&list)))
+ mwifiex_11n_dispatch_pkt(priv, skb);
}
/*
@@ -198,19 +201,18 @@ static void
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *tbl)
{
- unsigned long flags;
int start_win;
if (!tbl)
return;
- spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
+ spin_lock_bh(&priv->adapter->rx_proc_lock);
priv->adapter->rx_locked = true;
if (priv->adapter->rx_processing) {
- spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&priv->adapter->rx_proc_lock);
flush_workqueue(priv->adapter->rx_workqueue);
} else {
- spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&priv->adapter->rx_proc_lock);
}
start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1);
@@ -219,16 +221,16 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
del_timer_sync(&tbl->timer_context.timer);
tbl->timer_context.timer_is_set = false;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_del(&tbl->list);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
kfree(tbl->rx_reorder_ptr);
kfree(tbl);
- spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
+ spin_lock_bh(&priv->adapter->rx_proc_lock);
priv->adapter->rx_locked = false;
- spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&priv->adapter->rx_proc_lock);
}
@@ -240,17 +242,15 @@ struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return tbl;
}
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return NULL;
}
@@ -261,21 +261,19 @@ mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl, *tmp;
- unsigned long flags;
if (!ta)
return;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
mwifiex_del_rx_reorder_entry(priv, tbl);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
}
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return;
}
@@ -289,18 +287,16 @@ mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
{
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
struct mwifiex_private *priv = ctx->priv;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return i;
}
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return -1;
}
@@ -348,7 +344,6 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
int i;
struct mwifiex_rx_reorder_tbl *tbl, *new_node;
u16 last_seq = 0;
- unsigned long flags;
struct mwifiex_sta_node *node;
/*
@@ -372,7 +367,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
new_node->init_win = seq_num;
new_node->flags = 0;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
if (mwifiex_queuing_ra_based(priv)) {
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
node = mwifiex_get_sta_entry(priv, ta);
@@ -386,7 +381,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
else
last_seq = priv->rx_seq[tid];
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
mwifiex_dbg(priv->adapter, INFO,
"info: last_seq=%d start_win=%d\n",
@@ -418,9 +413,9 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
for (i = 0; i < win_size; ++i)
new_node->rx_reorder_ptr[i] = NULL;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
}
static void
@@ -476,18 +471,17 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
u32 rx_win_size = priv->add_ba_param.rx_win_size;
u8 tid;
int win_size;
- unsigned long flags;
uint16_t block_ack_param_set;
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
priv->adapter->is_hw_11ac_capable &&
memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv,
cmd_addba_req->peer_mac_addr);
if (!sta_ptr) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
mwifiex_dbg(priv->adapter, ERROR,
"BA setup with unknown TDLS peer %pM!\n",
cmd_addba_req->peer_mac_addr);
@@ -495,7 +489,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
}
if (sta_ptr->is_11ac_enabled)
rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
@@ -682,7 +676,6 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
struct mwifiex_ra_list_tbl *ra_list;
u8 cleanup_rx_reorder_tbl;
- unsigned long flags;
int tid_down;
if (type == TYPE_DELBA_RECEIVE)
@@ -716,9 +709,9 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
ra_list->amsdu_in_ampdu = false;
ra_list->ba_status = BA_SETUP_NONE;
}
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
}
}
@@ -804,17 +797,16 @@ void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
{
struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
&priv->rx_reorder_tbl_ptr, list) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
}
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
mwifiex_reset_11n_rx_seq_num(priv);
}
@@ -826,7 +818,6 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
{
struct mwifiex_private *priv;
struct mwifiex_rx_reorder_tbl *tbl;
- unsigned long lock_flags;
int i;
for (i = 0; i < adapter->priv_num; i++) {
@@ -834,10 +825,10 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
if (!priv)
continue;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
tbl->flags = flags;
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
}
return;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index e11a4bb67172..d89684168500 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -876,13 +876,13 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
}
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
adapter->rx_locked = true;
if (adapter->rx_processing) {
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
flush_workqueue(adapter->rx_workqueue);
} else {
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
}
mwifiex_free_priv(priv);
@@ -934,9 +934,9 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
adapter->main_locked = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
adapter->rx_locked = false;
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
mwifiex_set_mac_address(priv, dev, false, NULL);
@@ -1827,7 +1827,6 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_sta_node *sta_node;
u8 deauth_mac[ETH_ALEN];
- unsigned long flags;
if (!priv->bss_started && priv->wdev.cac_started) {
mwifiex_dbg(priv->adapter, INFO, "%s: abort CAC!\n", __func__);
@@ -1845,11 +1844,11 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
eth_zero_addr(deauth_mac);
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_node = mwifiex_get_sta_entry(priv, params->mac);
if (sta_node)
ether_addr_copy(deauth_mac, params->mac);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
if (is_valid_ether_addr(deauth_mac)) {
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
@@ -3268,7 +3267,7 @@ static void mwifiex_set_auto_arp_mef_entry(struct mwifiex_private *priv,
in_dev = __in_dev_get_rtnl(adapter->priv[i]->netdev);
if (!in_dev)
continue;
- ifa = in_dev->ifa_list;
+ ifa = rtnl_dereference(in_dev->ifa_list);
if (!ifa || !ifa->ifa_local)
continue;
ips[i] = ifa->ifa_local;
@@ -3852,15 +3851,14 @@ mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_chan_def *chandef)
{
struct mwifiex_sta_node *sta_ptr;
- unsigned long flags;
u16 chan;
u8 second_chan_offset, band;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv, addr);
if (!sta_ptr) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
__func__, addr);
return -ENOENT;
@@ -3868,18 +3866,18 @@ mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev,
if (!(sta_ptr->tdls_cap.extcap.ext_capab[3] &
WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
wiphy_err(wiphy, "%pM do not support tdls cs\n", addr);
return -ENOENT;
}
if (sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
wiphy_err(wiphy, "channel switch is running, abort request\n");
return -EALREADY;
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
chan = chandef->chan->hw_value;
second_chan_offset = mwifiex_get_sec_chan_offset(chan);
@@ -3895,23 +3893,22 @@ mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy,
const u8 *addr)
{
struct mwifiex_sta_node *sta_ptr;
- unsigned long flags;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv, addr);
if (!sta_ptr) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
__func__, addr);
} else if (!(sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
sta_ptr->tdls_status == TDLS_IN_BASE_CHAN ||
sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
wiphy_err(wiphy, "tdls chan switch not initialize by %pM\n",
addr);
} else {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
mwifiex_stop_tdls_cs(priv, addr);
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 8c35441fd9b7..e8788c35a453 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -39,10 +39,11 @@ static void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter);
static void
mwifiex_init_cmd_node(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node,
- u32 cmd_oid, void *data_buf, bool sync)
+ u32 cmd_no, void *data_buf, bool sync)
{
cmd_node->priv = priv;
- cmd_node->cmd_oid = cmd_oid;
+ cmd_node->cmd_no = cmd_no;
+
if (sync) {
cmd_node->wait_q_enabled = true;
cmd_node->cmd_wait_q_woken = false;
@@ -60,19 +61,18 @@ static struct cmd_ctrl_node *
mwifiex_get_cmd_node(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node;
- unsigned long flags;
- spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_free_q_lock);
if (list_empty(&adapter->cmd_free_q)) {
mwifiex_dbg(adapter, ERROR,
"GET_CMD_NODE: cmd node not available\n");
- spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_free_q_lock);
return NULL;
}
cmd_node = list_first_entry(&adapter->cmd_free_q,
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_free_q_lock);
return cmd_node;
}
@@ -92,7 +92,7 @@ static void
mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
- cmd_node->cmd_oid = 0;
+ cmd_node->cmd_no = 0;
cmd_node->cmd_flag = 0;
cmd_node->data_buf = NULL;
cmd_node->wait_q_enabled = false;
@@ -116,8 +116,6 @@ static void
mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
- unsigned long flags;
-
if (!cmd_node)
return;
@@ -127,9 +125,9 @@ mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
mwifiex_clean_cmd_node(adapter, cmd_node);
/* Insert node into cmd_free_q */
- spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_free_q_lock);
list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
- spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_free_q_lock);
}
/* This function reuses a command node. */
@@ -182,7 +180,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
struct host_cmd_ds_command *host_cmd;
uint16_t cmd_code;
uint16_t cmd_size;
- unsigned long flags;
if (!adapter || !cmd_node)
return -1;
@@ -201,6 +198,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
}
cmd_code = le16_to_cpu(host_cmd->command);
+ cmd_node->cmd_no = cmd_code;
cmd_size = le16_to_cpu(host_cmd->size);
if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET &&
@@ -221,9 +219,9 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
cmd_node->priv->bss_num,
cmd_node->priv->bss_type));
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->curr_cmd = cmd_node;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
/* Adjust skb length */
if (cmd_node->cmd_skb->len > cmd_size)
@@ -274,9 +272,9 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
adapter->dbg.num_cmd_host_to_card_failure++;
return -1;
@@ -621,7 +619,7 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
}
/* Initialize the command node */
- mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync);
+ mwifiex_init_cmd_node(priv, cmd_node, cmd_no, data_buf, sync);
if (!cmd_node->cmd_skb) {
mwifiex_dbg(adapter, ERROR,
@@ -695,7 +693,6 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
{
struct host_cmd_ds_command *host_cmd = NULL;
u16 command;
- unsigned long flags;
bool add_tail = true;
host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
@@ -717,12 +714,12 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
}
}
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
if (add_tail)
list_add_tail(&cmd_node->list, &adapter->cmd_pending_q);
else
list_add(&cmd_node->list, &adapter->cmd_pending_q);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
atomic_inc(&adapter->cmd_pending);
mwifiex_dbg(adapter, CMD,
@@ -747,8 +744,6 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
struct cmd_ctrl_node *cmd_node;
int ret = 0;
struct host_cmd_ds_command *host_cmd;
- unsigned long cmd_flags;
- unsigned long cmd_pending_q_flags;
/* Check if already in processing */
if (adapter->curr_cmd) {
@@ -757,13 +752,12 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
return -1;
}
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
/* Check if any command is pending */
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
if (list_empty(&adapter->cmd_pending_q)) {
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
return 0;
}
cmd_node = list_first_entry(&adapter->cmd_pending_q,
@@ -776,17 +770,15 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, ERROR,
"%s: cannot send cmd in sleep state,\t"
"this should not happen\n", __func__);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
return ret;
}
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
ret = mwifiex_dnld_cmd_to_fw(priv, cmd_node);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
/* Any command sent to the firmware when host is in sleep
@@ -820,10 +812,6 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
uint16_t orig_cmdresp_no;
uint16_t cmdresp_no;
uint16_t cmdresp_result;
- unsigned long flags;
-
- /* Now we got response from FW, cancel the command timer */
- del_timer_sync(&adapter->cmd_timer);
if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
resp = (struct host_cmd_ds_command *) adapter->upld_buf;
@@ -833,9 +821,20 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
return -1;
}
+ resp = (struct host_cmd_ds_command *)adapter->curr_cmd->resp_skb->data;
+ orig_cmdresp_no = le16_to_cpu(resp->command);
+ cmdresp_no = (orig_cmdresp_no & HostCmd_CMD_ID_MASK);
+
+ if (adapter->curr_cmd->cmd_no != cmdresp_no) {
+ mwifiex_dbg(adapter, ERROR,
+ "cmdresp error: cmd=0x%x cmd_resp=0x%x\n",
+ adapter->curr_cmd->cmd_no, cmdresp_no);
+ return -1;
+ }
+ /* Now we got response from FW, cancel the command timer */
+ del_timer_sync(&adapter->cmd_timer);
clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
- resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
/* Copy original response back to response buffer */
struct mwifiex_ds_misc_cmd *hostcmd;
@@ -849,7 +848,6 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
memcpy(hostcmd->cmd, resp, size);
}
}
- orig_cmdresp_no = le16_to_cpu(resp->command);
/* Get BSS number and corresponding priv */
priv = mwifiex_get_priv_by_id(adapter,
@@ -882,9 +880,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
return -1;
}
@@ -916,9 +914,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
}
return ret;
@@ -1024,17 +1022,16 @@ void
mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
- unsigned long flags;
/* Cancel all pending scan command */
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
list_for_each_entry_safe(cmd_node, tmp_node,
&adapter->scan_pending_q, list) {
list_del(&cmd_node->list);
cmd_node->wait_q_enabled = false;
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
}
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
}
/*
@@ -1048,9 +1045,8 @@ void
mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
- unsigned long flags, cmd_flags;
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
/* Cancel current cmd */
if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
adapter->cmd_wait_q.status = -1;
@@ -1059,7 +1055,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
/* no recycle probably wait for response */
}
/* Cancel all pending command */
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
list_for_each_entry_safe(cmd_node, tmp_node,
&adapter->cmd_pending_q, list) {
list_del(&cmd_node->list);
@@ -1068,8 +1064,8 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, cmd_node);
}
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
mwifiex_cancel_scan(adapter);
}
@@ -1088,11 +1084,10 @@ static void
mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL;
- unsigned long cmd_flags;
if ((adapter->curr_cmd) &&
(adapter->curr_cmd->wait_q_enabled)) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
cmd_node = adapter->curr_cmd;
/* setting curr_cmd to NULL is quite dangerous, because
* mwifiex_process_cmdresp checks curr_cmd to be != NULL
@@ -1103,7 +1098,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
* at that point
*/
adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
mwifiex_recycle_cmd_node(adapter, cmd_node);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index b73f99dc5a72..1fb76d2f5d3f 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -1759,9 +1759,10 @@ struct mwifiex_ie_types_wmm_queue_status {
struct ieee_types_vendor_header {
u8 element_id;
u8 len;
- u8 oui[4]; /* 0~2: oui, 3: oui_type */
- u8 oui_subtype;
- u8 version;
+ struct {
+ u8 oui[3];
+ u8 oui_type;
+ } __packed oui;
} __packed;
struct ieee_types_wmm_parameter {
@@ -1775,6 +1776,9 @@ struct ieee_types_wmm_parameter {
* Version [1]
*/
struct ieee_types_vendor_header vend_hdr;
+ u8 oui_subtype;
+ u8 version;
+
u8 qos_info_bitmap;
u8 reserved;
struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
@@ -1792,6 +1796,8 @@ struct ieee_types_wmm_info {
* Version [1]
*/
struct ieee_types_vendor_header vend_hdr;
+ u8 oui_subtype;
+ u8 version;
u8 qos_info_bitmap;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 673e89dff0b5..6c0e52eb8794 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -36,7 +36,6 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_bss_prio_node *bss_prio;
struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
- unsigned long flags;
bss_prio = kzalloc(sizeof(struct mwifiex_bss_prio_node), GFP_KERNEL);
if (!bss_prio)
@@ -45,9 +44,9 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
bss_prio->priv = priv;
INIT_LIST_HEAD(&bss_prio->list);
- spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock);
list_add_tail(&bss_prio->list, &tbl[priv->bss_priority].bss_prio_head);
- spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock);
return 0;
}
@@ -344,11 +343,9 @@ void mwifiex_set_trans_start(struct net_device *dev)
void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
struct mwifiex_adapter *adapter)
{
- unsigned long dev_queue_flags;
-
- spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ spin_lock_bh(&adapter->queue_lock);
netif_tx_wake_all_queues(netdev);
- spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+ spin_unlock_bh(&adapter->queue_lock);
}
/*
@@ -357,11 +354,9 @@ void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
void mwifiex_stop_net_dev_queue(struct net_device *netdev,
struct mwifiex_adapter *adapter)
{
- unsigned long dev_queue_flags;
-
- spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ spin_lock_bh(&adapter->queue_lock);
netif_tx_stop_all_queues(netdev);
- spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+ spin_unlock_bh(&adapter->queue_lock);
}
/*
@@ -506,7 +501,6 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
struct mwifiex_private *priv;
u8 i, first_sta = true;
int is_cmd_pend_q_empty;
- unsigned long flags;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
@@ -547,9 +541,9 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
}
}
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
is_cmd_pend_q_empty = list_empty(&adapter->cmd_pending_q);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
if (!is_cmd_pend_q_empty) {
/* Send the first command in queue and return */
if (mwifiex_main_process(adapter) != -1)
@@ -574,7 +568,6 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
struct mwifiex_bss_prio_node *bssprio_node, *tmp_node;
struct list_head *head;
spinlock_t *lock; /* bss priority lock */
- unsigned long flags;
for (i = 0; i < adapter->priv_num; ++i) {
head = &adapter->bss_prio_tbl[i].bss_prio_head;
@@ -586,7 +579,7 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
priv->bss_type, priv->bss_num, i, head);
{
- spin_lock_irqsave(lock, flags);
+ spin_lock_bh(lock);
list_for_each_entry_safe(bssprio_node, tmp_node, head,
list) {
if (bssprio_node->priv == priv) {
@@ -598,7 +591,7 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
kfree(bssprio_node);
}
}
- spin_unlock_irqrestore(lock, flags);
+ spin_unlock_bh(lock);
}
}
}
@@ -630,7 +623,6 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv;
s32 i;
- unsigned long flags;
struct sk_buff *skb;
/* mwifiex already shutdown */
@@ -665,7 +657,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
while ((skb = skb_dequeue(&adapter->tx_data_q)))
mwifiex_write_data_complete(adapter, skb, 0, 0);
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
while ((skb = skb_dequeue(&adapter->rx_data_q))) {
struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
@@ -678,7 +670,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
dev_kfree_skb_any(skb);
}
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
mwifiex_adapter_cleanup(adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index f6da8edab7f1..a9657ae6d782 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -173,30 +173,27 @@ EXPORT_SYMBOL_GPL(mwifiex_queue_main_work);
static void mwifiex_queue_rx_work(struct mwifiex_adapter *adapter)
{
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
if (adapter->rx_processing) {
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
} else {
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
queue_work(adapter->rx_workqueue, &adapter->rx_work);
}
}
static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
{
- unsigned long flags;
struct sk_buff *skb;
struct mwifiex_rxinfo *rx_info;
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
if (adapter->rx_processing || adapter->rx_locked) {
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
goto exit_rx_proc;
} else {
adapter->rx_processing = true;
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
}
/* Check for Rx data */
@@ -219,9 +216,9 @@ static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
mwifiex_handle_rx_packet(adapter, skb);
}
}
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
adapter->rx_processing = false;
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
exit_rx_proc:
return 0;
@@ -825,13 +822,12 @@ mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv,
skb = skb_clone(skb, GFP_ATOMIC);
if (skb) {
- unsigned long flags;
int id;
- spin_lock_irqsave(&priv->ack_status_lock, flags);
+ spin_lock_bh(&priv->ack_status_lock);
id = idr_alloc(&priv->ack_status_frames, orig_skb,
1, 0x10, GFP_ATOMIC);
- spin_unlock_irqrestore(&priv->ack_status_lock, flags);
+ spin_unlock_bh(&priv->ack_status_lock);
if (id >= 0) {
tx_info = MWIFIEX_SKB_TXCB(skb);
@@ -960,10 +956,10 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv,
mac_addr = old_mac_addr;
- if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P)
+ if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P) {
mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT);
-
- if (mwifiex_get_intf_num(priv->adapter, priv->bss_type) > 1) {
+ mac_addr += priv->bss_num;
+ } else if (priv->adapter->priv[0] != priv) {
/* Set mac address based on bss_type/bss_num */
mac_addr ^= BIT_ULL(priv->bss_type + 8);
mac_addr += priv->bss_num;
@@ -1354,12 +1350,11 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
*/
int is_command_pending(struct mwifiex_adapter *adapter)
{
- unsigned long flags;
int is_cmd_pend_q_empty;
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
is_cmd_pend_q_empty = list_empty(&adapter->cmd_pending_q);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
return !is_cmd_pend_q_empty;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index b025ba164412..3e442c7f7882 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -747,7 +747,7 @@ struct mwifiex_bss_prio_tbl {
struct cmd_ctrl_node {
struct list_head list;
struct mwifiex_private *priv;
- u32 cmd_oid;
+ u32 cmd_no;
u32 cmd_flag;
struct sk_buff *cmd_skb;
struct sk_buff *resp_skb;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 3fe81b2a929a..b54f73e3d508 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -2924,10 +2924,9 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
pci_set_master(pdev);
- pr_notice("try set_consistent_dma_mask(32)\n");
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- pr_err("set_dma_mask(32) failed\n");
+ pr_err("set_dma_mask(32) failed: %d\n", ret);
goto err_set_dma_mask;
}
@@ -2960,7 +2959,7 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
goto err_iomap2;
}
- pr_notice("PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
+ pr_notice("PCI memory map Virt0: %pK PCI memory map Virt2: %pK\n",
card->pci_mmap, card->pci_mmap1);
ret = mwifiex_pcie_alloc_buffers(adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index c269a0de9413..0d6d41727037 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1361,21 +1361,25 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_VENDOR_SPECIFIC:
- if (element_len + 2 < sizeof(vendor_ie->vend_hdr))
- return -EINVAL;
-
vendor_ie = (struct ieee_types_vendor_specific *)
current_ptr;
- if (!memcmp
- (vendor_ie->vend_hdr.oui, wpa_oui,
- sizeof(wpa_oui))) {
+ /* 802.11 requires at least 3-byte OUI. */
+ if (element_len < sizeof(vendor_ie->vend_hdr.oui.oui))
+ return -EINVAL;
+
+ /* Not long enough for a match? Skip it. */
+ if (element_len < sizeof(wpa_oui))
+ break;
+
+ if (!memcmp(&vendor_ie->vend_hdr.oui, wpa_oui,
+ sizeof(wpa_oui))) {
bss_entry->bcn_wpa_ie =
(struct ieee_types_vendor_specific *)
current_ptr;
bss_entry->wpa_offset = (u16)
(current_ptr - bss_entry->beacon_buf);
- } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
+ } else if (!memcmp(&vendor_ie->vend_hdr.oui, wmm_oui,
sizeof(wmm_oui))) {
if (total_ie_len ==
sizeof(struct ieee_types_wmm_parameter) ||
@@ -1500,7 +1504,6 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
u8 filtered_scan;
u8 scan_current_chan_only;
u8 max_chan_per_scan;
- unsigned long flags;
if (adapter->scan_processing) {
mwifiex_dbg(adapter, WARN,
@@ -1521,9 +1524,9 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
return -EFAULT;
}
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = true;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv),
GFP_KERNEL);
@@ -1551,13 +1554,12 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
/* Get scan command from scan_pending_q and put to cmd_pending_q */
if (!ret) {
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
if (!list_empty(&adapter->scan_pending_q)) {
cmd_node = list_first_entry(&adapter->scan_pending_q,
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node);
queue_work(adapter->workqueue, &adapter->main_work);
@@ -1568,8 +1570,7 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
mwifiex_wait_queue_complete(adapter, cmd_node);
}
} else {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
}
}
@@ -1577,9 +1578,9 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
kfree(scan_chan_list);
done:
if (ret) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
}
return ret;
}
@@ -1715,7 +1716,6 @@ static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
{
struct mwifiex_bssdescriptor *bss_desc;
int ret;
- unsigned long flags;
/* Allocate and fill new bss descriptor */
bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), GFP_KERNEL);
@@ -1730,7 +1730,7 @@ static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
if (ret)
goto done;
- spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
+ spin_lock_bh(&priv->curr_bcn_buf_lock);
/* Make a copy of current BSSID descriptor */
memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc,
sizeof(priv->curr_bss_params.bss_descriptor));
@@ -1739,7 +1739,7 @@ static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
* in mwifiex_save_curr_bcn()
*/
mwifiex_save_curr_bcn(priv);
- spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
+ spin_unlock_bh(&priv->curr_bcn_buf_lock);
done:
/* beacon_ie buffer was allocated in function
@@ -1993,15 +1993,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct cmd_ctrl_node *cmd_node;
- unsigned long flags;
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
if (list_empty(&adapter->scan_pending_q)) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
mwifiex_active_scan_req_for_passive_chan(priv);
@@ -2025,13 +2024,13 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
}
} else if ((priv->scan_aborting && !priv->scan_request) ||
priv->scan_block) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
mwifiex_cancel_pending_scan_cmd(adapter);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
if (!adapter->active_scan_triggered) {
if (priv->scan_request) {
@@ -2057,7 +2056,7 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
cmd_node = list_first_entry(&adapter->scan_pending_q,
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node);
}
@@ -2067,15 +2066,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
void mwifiex_cancel_scan(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv;
- unsigned long cmd_flags;
int i;
mwifiex_cancel_pending_scan_cmd(adapter);
if (adapter->scan_processing) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
if (!priv)
@@ -2557,7 +2555,6 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd_ptr;
struct cmd_ctrl_node *cmd_node;
- unsigned long cmd_flags, scan_flags;
bool complete_scan = false;
mwifiex_dbg(adapter, INFO, "info: EXT scan returns successfully\n");
@@ -2592,8 +2589,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie_types_header));
}
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_flags);
- spin_lock_irqsave(&adapter->scan_pending_q_lock, scan_flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
if (list_empty(&adapter->scan_pending_q)) {
complete_scan = true;
list_for_each_entry(cmd_node, &adapter->cmd_pending_q, list) {
@@ -2607,8 +2604,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
}
}
}
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, scan_flags);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
if (complete_scan)
mwifiex_complete_scan(priv);
@@ -2780,13 +2777,12 @@ mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node)
{
struct mwifiex_adapter *adapter = priv->adapter;
- unsigned long flags;
cmd_node->wait_q_enabled = true;
cmd_node->condition = &adapter->scan_wait_q_woken;
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
list_add_tail(&cmd_node->list, &adapter->scan_pending_q);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 24b33e20e7a9..20c206da0631 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -46,7 +46,6 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_ps_mode_enh *pm;
- unsigned long flags;
mwifiex_dbg(adapter, ERROR,
"CMD_RESP: cmd %#x error, result=%#x\n",
@@ -87,9 +86,9 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
/* Handling errors here */
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 8b3123cb84c8..5fdffb114913 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -345,7 +345,6 @@ static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv,
{
struct mwifiex_tx_pause_tlv *tp;
struct mwifiex_sta_node *sta_ptr;
- unsigned long flags;
tp = (void *)tlv;
mwifiex_dbg(priv->adapter, EVENT,
@@ -361,14 +360,14 @@ static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv,
} else if (is_multicast_ether_addr(tp->peermac)) {
mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause);
} else {
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
sta_ptr->tx_pause = tp->tx_pause;
mwifiex_update_ralist_tx_pause(priv, tp->peermac,
tp->tx_pause);
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
}
@@ -378,7 +377,6 @@ static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv,
struct mwifiex_tx_pause_tlv *tp;
struct mwifiex_sta_node *sta_ptr;
int status;
- unsigned long flags;
tp = (void *)tlv;
mwifiex_dbg(priv->adapter, EVENT,
@@ -397,7 +395,7 @@ static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv,
status = mwifiex_get_tdls_link_status(priv, tp->peermac);
if (mwifiex_is_tdls_link_setup(status)) {
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
sta_ptr->tx_pause = tp->tx_pause;
@@ -405,7 +403,7 @@ static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv,
tp->peermac,
tp->tx_pause);
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index ebc0e41e5d3b..74e50566db1f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -1351,7 +1351,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
/* Test to see if it is a WPA IE, if not, then
* it is a gen IE
*/
- if (!memcmp(pvendor_ie->oui, wpa_oui,
+ if (!memcmp(&pvendor_ie->oui, wpa_oui,
sizeof(wpa_oui))) {
/* IE is a WPA/WPA2 IE so call set_wpa function
*/
@@ -1361,7 +1361,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
goto next_ie;
}
- if (!memcmp(pvendor_ie->oui, wps_oui,
+ if (!memcmp(&pvendor_ie->oui, wps_oui,
sizeof(wps_oui))) {
/* Test to see if it is a WPS IE,
* if so, enable wps session flag
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 27779d7317fd..18e654dc34c6 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -33,12 +33,11 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
struct list_head *tid_list;
struct sk_buff *skb, *tmp;
struct mwifiex_txinfo *tx_info;
- unsigned long flags;
u32 tid;
u8 tid_down;
mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
if (!ether_addr_equal(mac, skb->data))
@@ -78,7 +77,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
atomic_inc(&priv->wmm.tx_pkts_queued);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return;
}
@@ -88,11 +87,10 @@ static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra_list;
struct list_head *ra_list_head;
struct sk_buff *skb, *tmp;
- unsigned long flags;
int i;
mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; i++) {
if (!list_empty(&priv->wmm.tid_tbl_ptr[i].ra_list)) {
@@ -111,7 +109,7 @@ static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv,
}
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return;
}
@@ -1070,7 +1068,6 @@ mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
- unsigned long flags;
memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
sta_ptr = mwifiex_get_sta_entry(priv, peer);
@@ -1078,11 +1075,9 @@ mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer)
if (sta_ptr) {
if (sta_ptr->is_11n_enabled) {
mwifiex_11n_cleanup_reorder_tbl(priv);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
mwifiex_del_sta_entry(priv, peer);
}
@@ -1100,7 +1095,6 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct ieee80211_mcs_info mcs;
- unsigned long flags;
int i;
sta_ptr = mwifiex_get_sta_entry(priv, peer);
@@ -1145,11 +1139,9 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
"tdls: enable link %pM failed\n", peer);
if (sta_ptr) {
mwifiex_11n_cleanup_reorder_tbl(priv);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_del_sta_entry(priv, peer);
}
mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
@@ -1194,7 +1186,6 @@ int mwifiex_get_tdls_list(struct mwifiex_private *priv,
struct mwifiex_sta_node *sta_ptr;
struct tdls_peer_info *peer = buf;
int count = 0;
- unsigned long flags;
if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
return 0;
@@ -1203,7 +1194,7 @@ int mwifiex_get_tdls_list(struct mwifiex_private *priv,
if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
return 0;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
list_for_each_entry(sta_ptr, &priv->sta_list, list) {
if (mwifiex_is_tdls_link_setup(sta_ptr->tdls_status)) {
ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr);
@@ -1213,7 +1204,7 @@ int mwifiex_get_tdls_list(struct mwifiex_private *priv,
break;
}
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
return count;
}
@@ -1222,7 +1213,6 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
- unsigned long flags;
if (list_empty(&priv->sta_list))
return;
@@ -1232,11 +1222,9 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
if (sta_ptr->is_11n_enabled) {
mwifiex_11n_cleanup_reorder_tbl(priv);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
mwifiex_restore_tdls_packets(priv, sta_ptr->mac_addr,
@@ -1256,12 +1244,11 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb)
{
struct mwifiex_auto_tdls_peer *peer;
- unsigned long flags;
u8 mac[ETH_ALEN];
ether_addr_copy(mac, skb->data);
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(peer, &priv->auto_tdls_list, list) {
if (!memcmp(mac, peer->mac_addr, ETH_ALEN)) {
if (peer->rssi <= MWIFIEX_TDLS_RSSI_HIGH &&
@@ -1290,7 +1277,7 @@ int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb)
}
}
}
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
return 0;
}
@@ -1298,33 +1285,31 @@ int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb)
void mwifiex_flush_auto_tdls_list(struct mwifiex_private *priv)
{
struct mwifiex_auto_tdls_peer *peer, *tmp_node;
- unsigned long flags;
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry_safe(peer, tmp_node, &priv->auto_tdls_list, list) {
list_del(&peer->list);
kfree(peer);
}
INIT_LIST_HEAD(&priv->auto_tdls_list);
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
priv->check_tdls_tx = false;
}
void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_auto_tdls_peer *tdls_peer;
- unsigned long flags;
if (!priv->adapter->auto_tdls)
return;
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) {
if (!memcmp(tdls_peer->mac_addr, mac, ETH_ALEN)) {
tdls_peer->tdls_status = TDLS_SETUP_INPROGRESS;
tdls_peer->rssi_jiffies = jiffies;
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
return;
}
}
@@ -1341,19 +1326,18 @@ void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac)
"Add auto TDLS peer= %pM to list\n", mac);
}
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
}
void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
const u8 *mac, u8 link_status)
{
struct mwifiex_auto_tdls_peer *peer;
- unsigned long flags;
if (!priv->adapter->auto_tdls)
return;
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(peer, &priv->auto_tdls_list, list) {
if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) {
if ((link_status == TDLS_NOT_SETUP) &&
@@ -1366,19 +1350,18 @@ void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
break;
}
}
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
}
void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv,
u8 *mac, s8 snr, s8 nflr)
{
struct mwifiex_auto_tdls_peer *peer;
- unsigned long flags;
if (!priv->adapter->auto_tdls)
return;
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(peer, &priv->auto_tdls_list, list) {
if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) {
peer->rssi = nflr - snr;
@@ -1386,14 +1369,13 @@ void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv,
break;
}
}
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
}
void mwifiex_check_auto_tdls(struct timer_list *t)
{
struct mwifiex_private *priv = from_timer(priv, t, auto_tdls_timer);
struct mwifiex_auto_tdls_peer *tdls_peer;
- unsigned long flags;
u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
if (WARN_ON_ONCE(!priv || !priv->adapter)) {
@@ -1413,7 +1395,7 @@ void mwifiex_check_auto_tdls(struct timer_list *t)
priv->check_tdls_tx = false;
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) {
if ((jiffies - tdls_peer->rssi_jiffies) >
(MWIFIEX_AUTO_TDLS_IDLE_TIME * HZ)) {
@@ -1448,7 +1430,7 @@ void mwifiex_check_auto_tdls(struct timer_list *t)
tdls_peer->rssi);
}
}
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
mod_timer(&priv->auto_tdls_timer,
jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index d848933466d9..e3c1446dd847 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -334,15 +334,14 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
{
struct tx_status_event *tx_status = (void *)priv->adapter->event_body;
struct sk_buff *ack_skb;
- unsigned long flags;
struct mwifiex_txinfo *tx_info;
if (!tx_status->tx_token_id)
return;
- spin_lock_irqsave(&priv->ack_status_lock, flags);
+ spin_lock_bh(&priv->ack_status_lock);
ack_skb = idr_remove(&priv->ack_status_frames, tx_status->tx_token_id);
- spin_unlock_irqrestore(&priv->ack_status_lock, flags);
+ spin_unlock_bh(&priv->ack_status_lock);
if (ack_skb) {
tx_info = MWIFIEX_SKB_TXCB(ack_skb);
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 5ce85d5727e4..354b09c5e8dc 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -71,11 +71,10 @@ mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv,
*/
static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv)
{
- unsigned long flags;
struct list_head *ra_list;
int i;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) {
if (priv->del_list_idx == MAX_NUM_TID)
@@ -87,7 +86,7 @@ static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv)
}
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
@@ -378,7 +377,6 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
struct rx_packet_hdr *rx_pkt_hdr;
u16 rx_pkt_type;
u8 ta[ETH_ALEN], pkt_type;
- unsigned long flags;
struct mwifiex_sta_node *node;
uap_rx_pd = (struct uap_rxpd *)(skb->data);
@@ -413,12 +411,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
node = mwifiex_get_sta_entry(priv, ta);
if (node)
node->rx_seq[uap_rx_pd->priority] =
le16_to_cpu(uap_rx_pd->seq_num);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
if (!priv->ap_11n_enabled ||
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index d445acc4786b..c2365eeb7016 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -1128,10 +1128,9 @@ static void mwifiex_usb_tx_aggr_tmo(struct timer_list *t)
from_timer(timer_context, t, hold_timer);
struct mwifiex_adapter *adapter = timer_context->adapter;
struct usb_tx_data_port *port = timer_context->port;
- unsigned long flags;
int err = 0;
- spin_lock_irqsave(&port->tx_aggr_lock, flags);
+ spin_lock_bh(&port->tx_aggr_lock);
err = mwifiex_usb_prepare_tx_aggr_skb(adapter, port, &skb_send);
if (err) {
mwifiex_dbg(adapter, ERROR,
@@ -1158,7 +1157,7 @@ done:
if (err == -1)
mwifiex_write_data_complete(adapter, skb_send, 0, -1);
unlock:
- spin_unlock_irqrestore(&port->tx_aggr_lock, flags);
+ spin_unlock_bh(&port->tx_aggr_lock);
}
/* This function write a command/data packet to card. */
@@ -1169,7 +1168,6 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
struct usb_card_rec *card = adapter->card;
struct urb_context *context = NULL;
struct usb_tx_data_port *port = NULL;
- unsigned long flags;
int idx, ret;
if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
@@ -1211,10 +1209,10 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
}
if (adapter->bus_aggr.enable) {
- spin_lock_irqsave(&port->tx_aggr_lock, flags);
+ spin_lock_bh(&port->tx_aggr_lock);
ret = mwifiex_usb_aggr_tx_data(adapter, ep, skb,
tx_param, port);
- spin_unlock_irqrestore(&port->tx_aggr_lock, flags);
+ spin_unlock_bh(&port->tx_aggr_lock);
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index f9b71539d33e..3b0d31827681 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -607,12 +607,11 @@ struct mwifiex_sta_node *
mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *node;
- unsigned long flags;
if (!mac)
return NULL;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
node = mwifiex_get_sta_entry(priv, mac);
if (node)
goto done;
@@ -625,7 +624,7 @@ mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac)
list_add_tail(&node->list, &priv->sta_list);
done:
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
return node;
}
@@ -662,9 +661,8 @@ mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *node;
- unsigned long flags;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
node = mwifiex_get_sta_entry(priv, mac);
if (node) {
@@ -672,7 +670,7 @@ void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac)
kfree(node);
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
return;
}
@@ -680,9 +678,8 @@ void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac)
void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
{
struct mwifiex_sta_node *node, *tmp;
- unsigned long flags;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
list_del(&node->list);
@@ -690,7 +687,7 @@ void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
}
INIT_LIST_HEAD(&priv->sta_list);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
return;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 407b9932ca4d..41f0231376c0 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -138,7 +138,6 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
struct mwifiex_ra_list_tbl *ra_list;
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_sta_node *node;
- unsigned long flags;
for (i = 0; i < MAX_NUM_TID; ++i) {
@@ -163,7 +162,7 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
}
} else {
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
node = mwifiex_get_sta_entry(priv, ra);
if (node)
ra_list->tx_paused = node->tx_pause;
@@ -171,7 +170,7 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
mwifiex_is_sta_11n_enabled(priv, node);
if (ra_list->is_11n_enabled)
ra_list->max_amsdu = node->max_amsdu;
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
@@ -240,7 +239,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, INFO,
"info: WMM Parameter IE: version=%d,\t"
"qos_info Parameter Set Count=%d, Reserved=%#x\n",
- wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
+ wmm_ie->version, wmm_ie->qos_info_bitmap &
IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
wmm_ie->reserved);
@@ -583,11 +582,10 @@ static int mwifiex_free_ack_frame(int id, void *p, void *data)
void
mwifiex_clean_txrx(struct mwifiex_private *priv)
{
- unsigned long flags;
struct sk_buff *skb, *tmp;
mwifiex_11n_cleanup_reorder_tbl(priv);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_wmm_cleanup_queues(priv);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
@@ -601,7 +599,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
if (priv->adapter->if_ops.clean_pcie_ring &&
!test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
skb_unlink(skb, &priv->tdls_txq);
@@ -642,10 +640,9 @@ void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
{
struct mwifiex_ra_list_tbl *ra_list;
u32 pkt_cnt = 0, tx_pkts_queued;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
@@ -671,7 +668,7 @@ void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/* This function updates non-tdls peer ralist tx_pause while
@@ -682,10 +679,9 @@ void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
{
struct mwifiex_ra_list_tbl *ra_list;
u32 pkt_cnt = 0, tx_pkts_queued;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; ++i) {
list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
@@ -716,7 +712,7 @@ void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
@@ -748,10 +744,9 @@ void
mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
{
struct mwifiex_ra_list_tbl *ra_list;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
@@ -767,7 +762,7 @@ mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
list_del(&ra_list->list);
kfree(ra_list);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
@@ -818,7 +813,6 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
u32 tid;
struct mwifiex_ra_list_tbl *ra_list;
u8 ra[ETH_ALEN], tid_down;
- unsigned long flags;
struct list_head list_head;
int tdls_status = TDLS_NOT_SETUP;
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
@@ -844,7 +838,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
tid = skb->priority;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
@@ -864,8 +858,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
break;
case TDLS_SETUP_INPROGRESS:
skb_queue_tail(&priv->tdls_txq, skb);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return;
default:
list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
@@ -881,7 +874,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
}
if (!ra_list) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_write_data_complete(adapter, skb, 0, -1);
return;
}
@@ -901,7 +894,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
else
atomic_inc(&priv->wmm.tx_pkts_queued);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
@@ -1092,7 +1085,6 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
struct mwifiex_ra_list_tbl *ptr;
struct mwifiex_tid_tbl *tid_ptr;
atomic_t *hqp;
- unsigned long flags_ra;
int i, j;
/* check the BSS with highest priority first */
@@ -1118,8 +1110,7 @@ try_again:
hqp = &priv_tmp->wmm.highest_queued_prio;
for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
- spin_lock_irqsave(&priv_tmp->wmm.
- ra_list_spinlock, flags_ra);
+ spin_lock_bh(&priv_tmp->wmm.ra_list_spinlock);
tid_ptr = &(priv_tmp)->wmm.
tid_tbl_ptr[tos_to_tid[i]];
@@ -1134,9 +1125,7 @@ try_again:
goto found;
}
- spin_unlock_irqrestore(&priv_tmp->wmm.
- ra_list_spinlock,
- flags_ra);
+ spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
}
if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) {
@@ -1158,7 +1147,7 @@ found:
/* holds ra_list_spinlock */
if (atomic_read(hqp) > i)
atomic_set(hqp, i);
- spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
+ spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
*priv = priv_tmp;
*tid = tos_to_tid[i];
@@ -1182,24 +1171,23 @@ void mwifiex_rotate_priolists(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
- unsigned long flags;
- spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock);
/*
* dirty trick: we remove 'head' temporarily and reinsert it after
* curr bss node. imagine list to stay fixed while head is moved
*/
list_move(&tbl[priv->bss_priority].bss_prio_head,
&tbl[priv->bss_priority].bss_prio_cur->list);
- spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (mwifiex_is_ralist_valid(priv, ra, tid)) {
priv->wmm.packets_out[tid]++;
/* same as above */
list_move(&tid_ptr->ra_list, &ra->list);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
@@ -1236,8 +1224,7 @@ mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
*/
static void
mwifiex_send_single_packet(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ptr, int ptr_index,
- unsigned long ra_list_flags)
+ struct mwifiex_ra_list_tbl *ptr, int ptr_index)
__releases(&priv->wmm.ra_list_spinlock)
{
struct sk_buff *skb, *skb_next;
@@ -1246,8 +1233,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
struct mwifiex_txinfo *tx_info;
if (skb_queue_empty(&ptr->skb_head)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
return;
}
@@ -1265,18 +1251,17 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
else
skb_next = NULL;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
sizeof(struct txpd) : 0);
if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
/* Queue the packet back at the head */
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_write_data_complete(adapter, skb, 0, -1);
return;
}
@@ -1286,8 +1271,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
ptr->total_pkt_count++;
ptr->ba_pkt_count++;
tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
} else {
mwifiex_rotate_priolists(priv, ptr, ptr_index);
atomic_dec(&priv->wmm.tx_pkts_queued);
@@ -1323,8 +1307,7 @@ mwifiex_is_ptr_processed(struct mwifiex_private *priv,
*/
static void
mwifiex_send_processed_packet(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ptr, int ptr_index,
- unsigned long ra_list_flags)
+ struct mwifiex_ra_list_tbl *ptr, int ptr_index)
__releases(&priv->wmm.ra_list_spinlock)
{
struct mwifiex_tx_param tx_param;
@@ -1334,8 +1317,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
struct mwifiex_txinfo *tx_info;
if (skb_queue_empty(&ptr->skb_head)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return;
}
@@ -1343,8 +1325,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
if (adapter->data_sent || adapter->tx_lock_flag) {
ptr->total_pkt_count--;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
skb_queue_tail(&adapter->tx_data_q, skb);
atomic_dec(&priv->wmm.tx_pkts_queued);
atomic_inc(&adapter->tx_queued);
@@ -1358,7 +1339,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
tx_info = MWIFIEX_SKB_TXCB(skb);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
tx_param.next_pkt_len =
((skb_next) ? skb_next->len +
@@ -1374,11 +1355,10 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
switch (ret) {
case -EBUSY:
mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_write_data_complete(adapter, skb, 0, -1);
return;
}
@@ -1386,8 +1366,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
skb_queue_tail(&ptr->skb_head, skb);
tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
break;
case -1:
mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
@@ -1404,10 +1383,9 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
if (ret != -EBUSY) {
mwifiex_rotate_priolists(priv, ptr, ptr_index);
atomic_dec(&priv->wmm.tx_pkts_queued);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
ptr->total_pkt_count--;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
}
@@ -1423,7 +1401,6 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
int ptr_index = 0;
u8 ra[ETH_ALEN];
int tid_del = 0, tid = 0;
- unsigned long flags;
ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
if (!ptr)
@@ -1433,14 +1410,14 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return -1;
}
if (mwifiex_is_ptr_processed(priv, ptr)) {
- mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
+ mwifiex_send_processed_packet(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
mwifiex_send_processed_packet() */
return 0;
@@ -1455,12 +1432,12 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
mwifiex_is_amsdu_allowed(priv, tid) &&
mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
- mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
+ mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
* mwifiex_11n_aggregate_pkt()
*/
else
- mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
+ mwifiex_send_single_packet(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
* mwifiex_send_single_packet()
*/
@@ -1481,11 +1458,11 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
if (mwifiex_is_amsdu_allowed(priv, tid) &&
mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
- mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
+ mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
mwifiex_11n_aggregate_pkt() */
else
- mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
+ mwifiex_send_single_packet(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
mwifiex_send_single_packet() */
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 4381155375e1..d8f61e540bfd 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -588,6 +588,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;
+ netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
mt76_dma_tx_cleanup(dev, i, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 5b6a81ee457e..ec9efb79985f 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -766,10 +766,21 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*dbm = DIV_ROUND_UP(dev->txpower_cur, 2);
/* convert from per-chain power to combined
- * output on 2x2 devices
+ * output power
*/
- if (n_chains > 1)
+ switch (n_chains) {
+ case 4:
+ *dbm += 6;
+ break;
+ case 3:
+ *dbm += 4;
+ break;
+ case 2:
*dbm += 3;
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -820,3 +831,50 @@ mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
return 0;
}
EXPORT_SYMBOL_GPL(mt76_set_tim);
+
+void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u8 *hdr, *pn = status->iv;
+
+ __skb_push(skb, 8);
+ memmove(skb->data, skb->data + 8, hdr_len);
+ hdr = skb->data + hdr_len;
+
+ hdr[0] = pn[5];
+ hdr[1] = pn[4];
+ hdr[2] = 0;
+ hdr[3] = 0x20 | (key_id << 6);
+ hdr[4] = pn[3];
+ hdr[5] = pn[2];
+ hdr[6] = pn[1];
+ hdr[7] = pn[0];
+
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+}
+EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
+
+int mt76_get_rate(struct mt76_dev *dev,
+ struct ieee80211_supported_band *sband,
+ int idx, bool cck)
+{
+ int i, offset = 0, len = sband->n_bitrates;
+
+ if (cck) {
+ if (sband == &dev->sband_5g.sband)
+ return 0;
+
+ idx &= ~BIT(2); /* short preamble */
+ } else if (sband == &dev->sband_2g.sband) {
+ offset = 4;
+ }
+
+ for (i = offset; i < len; i++) {
+ if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
+ return i;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_get_rate);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 8ecbf81a906f..989386ecb5e4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -30,6 +30,7 @@
#define MT_TX_RING_SIZE 256
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
+#define MT_SKB_HEAD_LEN 128
struct mt76_dev;
struct mt76_wcid;
@@ -258,10 +259,11 @@ struct mt76_rx_tid {
#define MT_TX_CB_TXS_DONE BIT(1)
#define MT_TX_CB_TXS_FAILED BIT(2)
-#define MT_PACKET_ID_MASK GENMASK(7, 0)
+#define MT_PACKET_ID_MASK GENMASK(6, 0)
#define MT_PACKET_ID_NO_ACK 0
#define MT_PACKET_ID_NO_SKB 1
#define MT_PACKET_ID_FIRST 2
+#define MT_PACKET_ID_HAS_RATE BIT(7)
#define MT_TX_STATUS_SKB_TIMEOUT HZ
@@ -381,7 +383,8 @@ enum mt76u_out_ep {
__MT_EP_OUT_MAX,
};
-#define MT_SG_MAX_SIZE 8
+#define MT_TX_SG_MAX_SIZE 8
+#define MT_RX_SG_MAX_SIZE 1
#define MT_NUM_TX_ENTRIES 256
#define MT_NUM_RX_ENTRIES 128
#define MCU_RESP_URB_SIZE 1024
@@ -393,9 +396,7 @@ struct mt76_usb {
struct delayed_work stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
- u16 out_max_packet;
u8 in_ep[__MT_EP_IN_MAX];
- u16 in_max_packet;
bool sg_en;
struct mt76u_mcu {
@@ -452,6 +453,7 @@ struct mt76_dev {
int tx_dma_idx[4];
struct tasklet_struct tx_tasklet;
+ struct napi_struct tx_napi;
struct delayed_work mac_work;
wait_queue_head_t tx_wait;
@@ -483,6 +485,8 @@ struct mt76_dev {
int txpower_conf;
int txpower_cur;
+ enum nl80211_dfs_regions region;
+
u32 debugfs_reg;
struct led_classdev led_cdev;
@@ -688,6 +692,14 @@ static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
skb->data[len + 1] = 0;
}
+static inline bool mt76_is_skb_pktid(u8 pktid)
+{
+ if (pktid & MT_PACKET_ID_HAS_RATE)
+ return false;
+
+ return pktid >= MT_PACKET_ID_FIRST;
+}
+
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb);
@@ -749,6 +761,10 @@ void mt76_csa_check(struct mt76_dev *dev);
void mt76_csa_finish(struct mt76_dev *dev);
int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
+void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
+int mt76_get_rate(struct mt76_dev *dev,
+ struct ieee80211_supported_band *sband,
+ int idx, bool cck);
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
index 37e5644b45ef..e7ee58e3379c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
@@ -35,7 +35,7 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
if (intr & MT_INT_TX_DONE_ALL) {
mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL);
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ napi_schedule(&dev->mt76.tx_napi);
}
if (intr & MT_INT_RX_DONE(0)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
index f8b3b6ab6297..a1bc3103cbe9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -40,6 +40,35 @@ mt7603_radio_read(struct seq_file *s, void *data)
return 0;
}
+static int
+mt7603_edcca_set(void *data, u64 val)
+{
+ struct mt7603_dev *dev = data;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ dev->ed_monitor_enabled = !!val;
+ dev->ed_monitor = dev->ed_monitor_enabled &&
+ dev->mt76.region == NL80211_DFS_ETSI;
+ mt7603_init_edcca(dev);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int
+mt7603_edcca_get(void *data, u64 *val)
+{
+ struct mt7603_dev *dev = data;
+
+ *val = dev->ed_monitor_enabled;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt7603_edcca_get,
+ mt7603_edcca_set, "%lld\n");
+
void mt7603_init_debugfs(struct mt7603_dev *dev)
{
struct dentry *dir;
@@ -48,6 +77,7 @@ void mt7603_init_debugfs(struct mt7603_dev *dev)
if (!dir)
return;
+ debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test);
debugfs_create_devm_seqfile(dev->mt76.dev, "reset", dir,
mt7603_reset_read);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 27e2d9f90553..58dc511f93c5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -139,15 +139,30 @@ static void
mt7603_tx_tasklet(unsigned long data)
{
struct mt7603_dev *dev = (struct mt7603_dev *)data;
+
+ mt76_txq_schedule_all(&dev->mt76);
+}
+
+static int mt7603_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct mt7603_dev *dev;
int i;
+ dev = container_of(napi, struct mt7603_dev, mt76.tx_napi);
dev->tx_dma_check = 0;
+
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
- mt76_txq_schedule_all(&dev->mt76);
+ if (napi_complete_done(napi, 0))
+ mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
- mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ tasklet_schedule(&dev->mt76.tx_tasklet);
+
+ return 0;
}
int mt7603_dma_init(struct mt7603_dev *dev)
@@ -216,7 +231,15 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret;
mt76_wr(dev, MT_DELAY_INT_CFG, 0);
- return mt76_init_queues(dev);
+ ret = mt76_init_queues(dev);
+ if (ret)
+ return ret;
+
+ netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ mt7603_poll_tx, NAPI_POLL_WEIGHT);
+ napi_enable(&dev->mt76.tx_napi);
+
+ return 0;
}
void mt7603_dma_cleanup(struct mt7603_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
index f27b99b7e359..b893facfba48 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
@@ -69,6 +69,8 @@ enum mt7603_eeprom_field {
MT_EE_CP_FT_VERSION = 0x0f0,
+ MT_EE_TX_POWER_TSSI_OFF = 0x0f2,
+
MT_EE_XTAL_FREQ_OFFSET = 0x0f4,
MT_EE_XTAL_TRIM_2_COMP = 0x0f5,
MT_EE_XTAL_TRIM_3_COMP = 0x0f6,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 78cdbb70e178..38834c7d0891 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -227,11 +227,19 @@ mt7603_mac_init(struct mt7603_dev *dev)
mt76_rmw_field(dev, MT_LPON_BTEIR, MT_LPON_BTEIR_MBSS_MODE, 2);
mt76_rmw_field(dev, MT_WF_RMACDR, MT_WF_RMACDR_MBSSID_MASK, 2);
- mt76_wr(dev, MT_AGG_ARUCR, FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7));
+ mt76_wr(dev, MT_AGG_ARUCR,
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), 2) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1));
+
mt76_wr(dev, MT_AGG_ARDCR,
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 0) |
- FIELD_PREP(MT_AGG_ARxCR_LIMIT(1),
- max_t(int, 0, MT7603_RATE_RETRY - 2)) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7603_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7603_RATE_RETRY - 1) |
@@ -437,7 +445,9 @@ mt7603_regd_notifier(struct wiphy *wiphy,
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt7603_dev *dev = hw->priv;
- dev->ed_monitor = request->dfs_region == NL80211_DFS_ETSI;
+ dev->mt76.region = request->dfs_region;
+ dev->ed_monitor = dev->ed_monitor_enabled &&
+ dev->mt76.region == NL80211_DFS_ETSI;
}
static int
@@ -463,9 +473,13 @@ mt7603_init_txpower(struct mt7603_dev *dev,
u8 *eeprom = (u8 *)dev->mt76.eeprom.data;
int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7);
u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK];
+ bool ext_pa = eeprom[MT_EE_NIC_CONF_0 + 1] & BIT(1);
int max_offset, cur_offset;
int i;
+ if (ext_pa && is_mt7603(dev))
+ target_power = eeprom[MT_EE_TX_POWER_TSSI_OFF] & ~BIT(7);
+
if (target_power & BIT(6))
target_power = -(target_power & GENMASK(5, 0));
@@ -488,7 +502,7 @@ mt7603_init_txpower(struct mt7603_dev *dev,
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
- chan->max_power = target_power;
+ chan->max_power = min_t(int, chan->max_reg_power, target_power);
chan->orig_mpwr = target_power;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 6d506e34c3ee..40db1cbc832d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -370,31 +370,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val);
}
-static int
-mt7603_get_rate(struct mt7603_dev *dev, struct ieee80211_supported_band *sband,
- int idx, bool cck)
-{
- int offset = 0;
- int len = sband->n_bitrates;
- int i;
-
- if (cck) {
- if (sband == &dev->mt76.sband_5g.sband)
- return 0;
-
- idx &= ~BIT(2); /* short preamble */
- } else if (sband == &dev->mt76.sband_2g.sband) {
- offset = 4;
- }
-
- for (i = offset; i < len; i++) {
- if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
- return i;
- }
-
- return 0;
-}
-
static struct mt76_wcid *
mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
{
@@ -418,30 +393,6 @@ mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
return &sta->vif->sta.wcid;
}
-static void
-mt7603_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
- u8 *pn = status->iv;
- u8 *hdr;
-
- __skb_push(skb, 8);
- memmove(skb->data, skb->data + 8, hdr_len);
- hdr = skb->data + hdr_len;
-
- hdr[0] = pn[5];
- hdr[1] = pn[4];
- hdr[2] = 0;
- hdr[3] = 0x20 | (key_id << 6);
- hdr[4] = pn[3];
- hdr[5] = pn[2];
- hdr[6] = pn[1];
- hdr[7] = pn[0];
-
- status->flag &= ~RX_FLAG_IV_STRIPPED;
-}
-
int
mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
{
@@ -532,7 +483,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
cck = true;
/* fall through */
case MT_PHY_TYPE_OFDM:
- i = mt7603_get_rate(dev, sband, i, cck);
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
case MT_PHY_TYPE_HT_GF:
case MT_PHY_TYPE_HT:
@@ -580,7 +531,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
if (insert_ccmp_hdr) {
u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
- mt7603_insert_ccmp_hdr(skb, key_id);
+ mt76_insert_ccmp_hdr(skb, key_id);
}
hdr = (struct ieee80211_hdr *)skb->data;
@@ -640,6 +591,7 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates)
{
+ struct ieee80211_tx_rate *ref;
int wcid = sta->wcid.idx;
u32 addr = mt7603_wtbl2_addr(wcid);
bool stbc = false;
@@ -648,7 +600,8 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
u16 val[4];
u16 probe_val;
u32 w9 = mt76_rr(dev, addr + 9 * 4);
- int i;
+ bool rateset;
+ int i, k;
if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
return;
@@ -656,6 +609,41 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
for (i = n_rates; i < 4; i++)
rates[i] = rates[n_rates - 1];
+ rateset = !(sta->rate_set_tsf & BIT(0));
+ memcpy(sta->rateset[rateset].rates, rates,
+ sizeof(sta->rateset[rateset].rates));
+ if (probe_rate) {
+ sta->rateset[rateset].probe_rate = *probe_rate;
+ ref = &sta->rateset[rateset].probe_rate;
+ } else {
+ sta->rateset[rateset].probe_rate.idx = -1;
+ ref = &sta->rateset[rateset].rates[0];
+ }
+
+ rates = sta->rateset[rateset].rates;
+ for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
+ /*
+ * We don't support switching between short and long GI
+ * within the rate set. For accurate tx status reporting, we
+ * need to make sure that flags match.
+ * For improved performance, avoid duplicate entries by
+ * decrementing the MCS index if necessary
+ */
+ if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
+ rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
+
+ for (k = 0; k < i; k++) {
+ if (rates[i].idx != rates[k].idx)
+ continue;
+ if ((rates[i].flags ^ rates[k].flags) &
+ IEEE80211_TX_RC_40_MHZ_WIDTH)
+ continue;
+
+ rates[i].idx--;
+ }
+
+ }
+
w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
MT_WTBL2_W9_SHORT_GI_80;
@@ -699,19 +687,22 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
mt76_wr(dev, MT_WTBL_RIUCR1,
FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
- FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[0]));
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1]));
mt76_wr(dev, MT_WTBL_RIUCR2,
- FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[0] >> 8) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
- FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
mt76_wr(dev, MT_WTBL_RIUCR3,
FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
- FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[2]) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) |
FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
+ mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset;
+
mt76_wr(dev, MT_WTBL_UPDATE,
FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
MT_WTBL_UPDATE_RATE_UPDATE |
@@ -938,9 +929,9 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
spin_lock_bh(&dev->mt76.lock);
- msta->rate_probe = true;
mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0],
msta->rates);
+ msta->rate_probe = true;
spin_unlock_bh(&dev->mt76.lock);
}
@@ -955,10 +946,12 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
struct ieee80211_tx_info *info, __le32 *txs_data)
{
struct ieee80211_supported_band *sband;
- int final_idx = 0;
+ struct mt7603_rate_set *rs;
+ int first_idx = 0, last_idx;
+ u32 rate_set_tsf;
u32 final_rate;
u32 final_rate_flags;
- bool final_mpdu;
+ bool rs_idx;
bool ack_timeout;
bool fixed_rate;
bool probe;
@@ -966,7 +959,6 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
bool cck = false;
int count;
u32 txs;
- u8 pid;
int idx;
int i;
@@ -974,10 +966,9 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
txs = le32_to_cpu(txs_data[4]);
- final_mpdu = txs & MT_TXS4_ACKED_MPDU;
ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU);
- pid = FIELD_GET(MT_TXS4_PID, txs);
count = FIELD_GET(MT_TXS4_TX_COUNT, txs);
+ last_idx = FIELD_GET(MT_TXS4_LAST_TX_RATE, txs);
txs = le32_to_cpu(txs_data[0]);
final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
@@ -999,38 +990,57 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
+ first_idx = max_t(int, 0, last_idx - (count + 1) / MT7603_RATE_RETRY);
+
if (fixed_rate && !probe) {
info->status.rates[0].count = count;
+ i = 0;
goto out;
}
- for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) {
- int cur_count = min_t(int, count, 2 * MT7603_RATE_RETRY);
+ rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
+ rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) -
+ rate_set_tsf) < 1000000);
+ rs_idx ^= rate_set_tsf & BIT(0);
+ rs = &sta->rateset[rs_idx];
- if (!i && probe) {
- cur_count = 1;
- } else {
- info->status.rates[i] = sta->rates[idx];
- idx++;
- }
+ if (!first_idx && rs->probe_rate.idx >= 0) {
+ info->status.rates[0] = rs->probe_rate;
- if (i && info->status.rates[i].idx < 0) {
- info->status.rates[i - 1].count += count;
- break;
+ spin_lock_bh(&dev->mt76.lock);
+ if (sta->rate_probe) {
+ mt7603_wtbl_set_rates(dev, sta, NULL,
+ sta->rates);
+ sta->rate_probe = false;
}
+ spin_unlock_bh(&dev->mt76.lock);
+ } else
+ info->status.rates[0] = rs->rates[first_idx / 2];
+ info->status.rates[0].count = 0;
- if (!count) {
- info->status.rates[i].idx = -1;
- break;
- }
+ for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
+ struct ieee80211_tx_rate *cur_rate;
+ int cur_count;
- info->status.rates[i].count = cur_count;
- final_idx = i;
+ cur_rate = &rs->rates[idx / 2];
+ cur_count = min_t(int, MT7603_RATE_RETRY, count);
count -= cur_count;
+
+ if (idx && (cur_rate->idx != info->status.rates[i].idx ||
+ cur_rate->flags != info->status.rates[i].flags)) {
+ i++;
+ if (i == ARRAY_SIZE(info->status.rates))
+ break;
+
+ info->status.rates[i] = *cur_rate;
+ info->status.rates[i].count = 0;
+ }
+
+ info->status.rates[i].count += cur_count;
}
out:
- final_rate_flags = info->status.rates[final_idx].flags;
+ final_rate_flags = info->status.rates[i].flags;
switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
case MT_PHY_TYPE_CCK:
@@ -1042,7 +1052,8 @@ out:
else
sband = &dev->mt76.sband_2g.sband;
final_rate &= GENMASK(5, 0);
- final_rate = mt7603_get_rate(dev, sband, final_rate, cck);
+ final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
+ cck);
final_rate_flags = 0;
break;
case MT_PHY_TYPE_HT_GF:
@@ -1056,8 +1067,8 @@ out:
return false;
}
- info->status.rates[final_idx].idx = final_rate;
- info->status.rates[final_idx].flags = final_rate_flags;
+ info->status.rates[i].idx = final_rate;
+ info->status.rates[i].flags = final_rate_flags;
return true;
}
@@ -1078,16 +1089,6 @@ mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
if (skb) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
- spin_lock_bh(&dev->mt76.lock);
- if (sta->rate_probe) {
- mt7603_wtbl_set_rates(dev, sta, NULL,
- sta->rates);
- sta->rate_probe = false;
- }
- spin_unlock_bh(&dev->mt76.lock);
- }
-
if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
ieee80211_tx_info_clear_status(info);
info->status.rates[0].idx = -1;
@@ -1282,6 +1283,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
napi_disable(&dev->mt76.napi[0]);
napi_disable(&dev->mt76.napi[1]);
+ napi_disable(&dev->mt76.tx_napi);
mutex_lock(&dev->mt76.mutex);
@@ -1326,7 +1328,8 @@ skip_dma_reset:
mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->mt76.tx_tasklet);
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
mt7603_beacon_set_timer(dev, -1, beacon_int);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 0a0334dc40d5..e5d4cb6381a8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -103,8 +103,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_unlock(&dev->mt76.mutex);
}
-static void
-mt7603_init_edcca(struct mt7603_dev *dev)
+void mt7603_init_edcca(struct mt7603_dev *dev)
{
/* Set lower signal level to -65dBm */
mt76_rmw_field(dev, MT_RXTD(8), MT_RXTD_8_LOWER_SIGNAL, 0x23);
@@ -207,8 +206,11 @@ mt7603_config(struct ieee80211_hw *hw, u32 changed)
int ret = 0;
if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
- IEEE80211_CONF_CHANGE_POWER))
+ IEEE80211_CONF_CHANGE_POWER)) {
+ ieee80211_stop_queues(hw);
ret = mt7603_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
mutex_lock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 6357b5658a32..343ddc5543c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -346,7 +346,7 @@ int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
};
struct req_data {
- u16 addr;
+ __le16 addr;
u8 val;
u8 pad;
} __packed;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index fa64bbaab0d2..2c6f7b4cf0e9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -51,6 +51,11 @@ enum mt7603_bw {
MT_BW_80,
};
+struct mt7603_rate_set {
+ struct ieee80211_tx_rate probe_rate;
+ struct ieee80211_tx_rate rates[4];
+};
+
struct mt7603_sta {
struct mt76_wcid wcid; /* must be first */
@@ -58,7 +63,11 @@ struct mt7603_sta {
struct sk_buff_head psq;
- struct ieee80211_tx_rate rates[8];
+ struct ieee80211_tx_rate rates[4];
+
+ struct mt7603_rate_set rateset[2];
+ u32 rate_set_tsf;
+
u8 rate_count;
u8 n_rates;
@@ -117,8 +126,9 @@ struct mt7603_dev {
u8 mac_work_count;
u8 mcu_running;
- u8 ed_monitor;
+ u8 ed_monitor_enabled;
+ u8 ed_monitor;
s8 ed_trigger;
u8 ed_strict_mode;
u8 ed_strong_signal;
@@ -241,4 +251,5 @@ void mt7603_update_channel(struct mt76_dev *mdev);
void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val);
void mt7603_cca_stats_reset(struct mt7603_dev *dev);
+void mt7603_init_edcca(struct mt7603_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
index 9d257d5c309d..eb9eefe8e125 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -480,6 +480,12 @@ enum {
#define MT_LPON_BASE 0x24000
#define MT_LPON(n) (MT_LPON_BASE + (n))
+#define MT_LPON_T0CR MT_LPON(0x010)
+#define MT_LPON_T0CR_MODE GENMASK(1, 0)
+
+#define MT_LPON_UTTR0 MT_LPON(0x018)
+#define MT_LPON_UTTR1 MT_LPON(0x01c)
+
#define MT_LPON_BTEIR MT_LPON(0x020)
#define MT_LPON_BTEIR_MBSS_MODE GENMASK(31, 29)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 3ec6582afd8f..6a70273d4a69 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -93,18 +93,33 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
static void mt7615_tx_tasklet(unsigned long data)
{
struct mt7615_dev *dev = (struct mt7615_dev *)data;
+
+ mt76_txq_schedule_all(&dev->mt76);
+}
+
+static int mt7615_poll_tx(struct napi_struct *napi, int budget)
+{
static const u8 queue_map[] = {
MT_TXQ_MCU,
MT_TXQ_BE
};
+ struct mt7615_dev *dev;
int i;
+ dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
+
for (i = 0; i < ARRAY_SIZE(queue_map); i++)
mt76_queue_tx_cleanup(dev, queue_map[i], false);
- mt76_txq_schedule_all(&dev->mt76);
+ if (napi_complete_done(napi, 0))
+ mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
- mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++)
+ mt76_queue_tx_cleanup(dev, queue_map[i], false);
+
+ tasklet_schedule(&dev->mt76.tx_tasklet);
+
+ return 0;
}
int mt7615_dma_init(struct mt7615_dev *dev)
@@ -178,6 +193,10 @@ int mt7615_dma_init(struct mt7615_dev *dev)
if (ret < 0)
return ret;
+ netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ mt7615_poll_tx, NAPI_POLL_WEIGHT);
+ napi_enable(&dev->mt76.tx_napi);
+
mt76_poll(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index dd5ab46a4f66..dc94f52e6e8b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -42,13 +42,13 @@ static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base,
static int mt7615_efuse_init(struct mt7615_dev *dev)
{
- u32 base = mt7615_reg_map(dev, MT_EFUSE_BASE);
- int len = MT7615_EEPROM_SIZE;
- int ret, i;
+ u32 val, base = mt7615_reg_map(dev, MT_EFUSE_BASE);
+ int i, len = MT7615_EEPROM_SIZE;
void *buf;
- if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY)
- return -EINVAL;
+ val = mt76_rr(dev, base + MT_EFUSE_BASE_CTRL);
+ if (val & MT_EFUSE_BASE_CTRL_EMPTY)
+ return 0;
dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
dev->mt76.otp.size = len;
@@ -57,6 +57,8 @@ static int mt7615_efuse_init(struct mt7615_dev *dev)
buf = dev->mt76.otp.data;
for (i = 0; i + 16 <= len; i += 16) {
+ int ret;
+
ret = mt7615_efuse_read(dev, base, i, buf + i);
if (ret)
return ret;
@@ -76,6 +78,82 @@ static int mt7615_eeprom_load(struct mt7615_dev *dev)
return mt7615_efuse_init(dev);
}
+static int mt7615_check_eeprom(struct mt76_dev *dev)
+{
+ u16 val = get_unaligned_le16(dev->eeprom.data);
+
+ switch (val) {
+ case 0x7615:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
+{
+ u8 val, *eeprom = dev->mt76.eeprom.data;
+
+ val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
+ eeprom[MT_EE_WIFI_CONF]);
+ switch (val) {
+ case MT_EE_5GHZ:
+ dev->mt76.cap.has_5ghz = true;
+ break;
+ case MT_EE_2GHZ:
+ dev->mt76.cap.has_2ghz = true;
+ break;
+ default:
+ dev->mt76.cap.has_2ghz = true;
+ dev->mt76.cap.has_5ghz = true;
+ break;
+ }
+}
+
+int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx)
+{
+ int index;
+
+ if (chain_idx > 3)
+ return -EINVAL;
+
+ /* TSSI disabled */
+ if (mt7615_ext_pa_enabled(dev, chan->band)) {
+ if (chan->band == NL80211_BAND_2GHZ)
+ return MT_EE_EXT_PA_2G_TARGET_POWER;
+ else
+ return MT_EE_EXT_PA_5G_TARGET_POWER;
+ }
+
+ /* TSSI enabled */
+ if (chan->band == NL80211_BAND_2GHZ) {
+ index = MT_EE_TX0_2G_TARGET_POWER + chain_idx * 6;
+ } else {
+ int group = mt7615_get_channel_group(chan->hw_value);
+
+ switch (chain_idx) {
+ case 1:
+ index = MT_EE_TX1_5G_G0_TARGET_POWER;
+ break;
+ case 2:
+ index = MT_EE_TX2_5G_G0_TARGET_POWER;
+ break;
+ case 3:
+ index = MT_EE_TX3_5G_G0_TARGET_POWER;
+ break;
+ case 0:
+ default:
+ index = MT_EE_TX0_5G_G0_TARGET_POWER;
+ break;
+ }
+ index += 5 * group;
+ }
+
+ return index;
+}
+
int mt7615_eeprom_init(struct mt7615_dev *dev)
{
int ret;
@@ -84,11 +162,12 @@ int mt7615_eeprom_init(struct mt7615_dev *dev)
if (ret < 0)
return ret;
- memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data, MT7615_EEPROM_SIZE);
-
- dev->mt76.cap.has_2ghz = true;
- dev->mt76.cap.has_5ghz = true;
+ ret = mt7615_check_eeprom(&dev->mt76);
+ if (ret && dev->mt76.otp.data)
+ memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
+ MT7615_EEPROM_SIZE);
+ mt7615_eeprom_parse_hw_cap(dev);
memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
index a4cf16688171..f4a4280768d2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -11,8 +11,69 @@ enum mt7615_eeprom_field {
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
MT_EE_NIC_CONF_0 = 0x034,
+ MT_EE_NIC_CONF_1 = 0x036,
+ MT_EE_WIFI_CONF = 0x03e,
+ MT_EE_TX0_2G_TARGET_POWER = 0x058,
+ MT_EE_TX0_5G_G0_TARGET_POWER = 0x070,
+ MT_EE_TX1_5G_G0_TARGET_POWER = 0x098,
+ MT_EE_EXT_PA_2G_TARGET_POWER = 0x0f2,
+ MT_EE_EXT_PA_5G_TARGET_POWER = 0x0f3,
+ MT_EE_TX2_5G_G0_TARGET_POWER = 0x142,
+ MT_EE_TX3_5G_G0_TARGET_POWER = 0x16a,
__MT_EE_MAX = 0x3bf
};
+#define MT_EE_NIC_CONF_TSSI_2G BIT(5)
+#define MT_EE_NIC_CONF_TSSI_5G BIT(6)
+
+#define MT_EE_NIC_WIFI_CONF_BAND_SEL GENMASK(5, 4)
+enum mt7615_eeprom_band {
+ MT_EE_DUAL_BAND,
+ MT_EE_5GHZ,
+ MT_EE_2GHZ,
+ MT_EE_DBDC,
+};
+
+enum mt7615_channel_group {
+ MT_CH_5G_JAPAN,
+ MT_CH_5G_UNII_1,
+ MT_CH_5G_UNII_2A,
+ MT_CH_5G_UNII_2B,
+ MT_CH_5G_UNII_2E_1,
+ MT_CH_5G_UNII_2E_2,
+ MT_CH_5G_UNII_2E_3,
+ MT_CH_5G_UNII_3,
+ __MT_CH_MAX
+};
+
+static inline enum mt7615_channel_group
+mt7615_get_channel_group(int channel)
+{
+ if (channel >= 184 && channel <= 196)
+ return MT_CH_5G_JAPAN;
+ if (channel <= 48)
+ return MT_CH_5G_UNII_1;
+ if (channel <= 64)
+ return MT_CH_5G_UNII_2A;
+ if (channel <= 114)
+ return MT_CH_5G_UNII_2E_1;
+ if (channel <= 144)
+ return MT_CH_5G_UNII_2E_2;
+ if (channel <= 161)
+ return MT_CH_5G_UNII_2E_3;
+ return MT_CH_5G_UNII_3;
+}
+
+static inline bool
+mt7615_ext_pa_enabled(struct mt7615_dev *dev, enum nl80211_band band)
+{
+ u8 *eep = dev->mt76.eeprom.data;
+
+ if (band == NL80211_BAND_5GHZ)
+ return !(eep[MT_EE_NIC_CONF_1 + 1] & MT_EE_NIC_CONF_TSSI_5G);
+ else
+ return !(eep[MT_EE_NIC_CONF_1 + 1] & MT_EE_NIC_CONF_TSSI_2G);
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 3ab3ff553ef2..859de2454ec6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -9,6 +9,7 @@
#include <linux/etherdevice.h>
#include "mt7615.h"
#include "mac.h"
+#include "eeprom.h"
static void mt7615_phy_init(struct mt7615_dev *dev)
{
@@ -62,16 +63,11 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
-
- dev->mt76.global_wcid.idx = MT7615_WTBL_RESERVED;
- dev->mt76.global_wcid.hw_key_idx = -1;
- rcu_assign_pointer(dev->mt76.wcid[MT7615_WTBL_RESERVED],
- &dev->mt76.global_wcid);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
{
- int ret;
+ int ret, idx;
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
@@ -98,6 +94,15 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
mt7615_mcu_ctrl_pm_state(dev, 0);
mt7615_mcu_del_wtbl_all(dev);
+ /* Beacon and mgmt frames should occupy wcid 0 */
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
+ if (idx)
+ return -ENOSPC;
+
+ dev->mt76.global_wcid.idx = idx;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
return 0;
}
@@ -133,6 +138,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
{
.max = MT7615_MAX_INTERFACES,
.types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
BIT(NL80211_IFTYPE_STATION)
}
};
@@ -158,6 +166,48 @@ static int mt7615_init_debugfs(struct mt7615_dev *dev)
return 0;
}
+static void
+mt7615_init_txpower(struct mt7615_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ int i, n_chains = hweight8(dev->mt76.antenna_mask), target_chains;
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+ enum nl80211_band band = sband->band;
+
+ target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
+ for (i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *chan = &sband->channels[i];
+ u8 target_power = 0;
+ int j;
+
+ for (j = 0; j < target_chains; j++) {
+ int index;
+
+ index = mt7615_eeprom_get_power_index(dev, chan, j);
+ target_power = max(target_power, eep[index]);
+ }
+
+ target_power = DIV_ROUND_UP(target_power, 2);
+ switch (n_chains) {
+ case 4:
+ target_power += 6;
+ break;
+ case 3:
+ target_power += 4;
+ break;
+ case 2:
+ target_power += 3;
+ break;
+ default:
+ break;
+ }
+
+ chan->max_power = min_t(int, chan->max_reg_power,
+ target_power);
+ chan->orig_mpwr = target_power;
+ }
+}
+
int mt7615_register_device(struct mt7615_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -195,6 +245,9 @@ int mt7615_register_device(struct mt7615_dev *dev)
dev->mt76.antenna_mask = 0xf;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
BIT(NL80211_IFTYPE_AP);
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
@@ -202,6 +255,9 @@ int mt7615_register_device(struct mt7615_dev *dev)
if (ret)
return ret;
+ mt7615_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt7615_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
return mt7615_init_debugfs(dev);
@@ -212,6 +268,10 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
struct mt76_txwi_cache *txwi;
int id;
+ mt76_unregister_device(&dev->mt76);
+ mt7615_mcu_exit(dev);
+ mt7615_dma_cleanup(dev);
+
spin_lock_bh(&dev->token_lock);
idr_for_each_entry(&dev->token, txwi, id) {
mt7615_txp_skb_unmap(&dev->mt76, txwi);
@@ -221,9 +281,6 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
}
spin_unlock_bh(&dev->token_lock);
idr_destroy(&dev->token);
- mt76_unregister_device(&dev->mt76);
- mt7615_mcu_exit(dev);
- mt7615_dma_cleanup(dev);
- ieee80211_free_hw(mt76_hw(dev));
+ mt76_free_device(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index b8f48d10f27a..1eb0e9c9970c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -13,6 +13,11 @@
#include "../dma.h"
#include "mac.h"
+static inline s8 to_rssi(u32 field, u32 rxv)
+{
+ return (FIELD_GET(field, rxv) - 220) / 2;
+}
+
static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
u8 idx, bool unicast)
{
@@ -36,54 +41,6 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
return &sta->vif->sta.wcid;
}
-static int mt7615_get_rate(struct mt7615_dev *dev,
- struct ieee80211_supported_band *sband,
- int idx, bool cck)
-{
- int offset = 0;
- int len = sband->n_bitrates;
- int i;
-
- if (cck) {
- if (sband == &dev->mt76.sband_5g.sband)
- return 0;
-
- idx &= ~BIT(2); /* short preamble */
- } else if (sband == &dev->mt76.sband_2g.sband) {
- offset = 4;
- }
-
- for (i = offset; i < len; i++) {
- if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
- return i;
- }
-
- return 0;
-}
-
-static void mt7615_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
- int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
- u8 *pn = status->iv;
- u8 *hdr;
-
- __skb_push(skb, 8);
- memmove(skb->data, skb->data + 8, hdr_len);
- hdr = skb->data + hdr_len;
-
- hdr[0] = pn[5];
- hdr[1] = pn[4];
- hdr[2] = 0;
- hdr[3] = 0x20 | (key_id << 6);
- hdr[4] = pn[3];
- hdr[5] = pn[2];
- hdr[6] = pn[1];
- hdr[7] = pn[0];
-
- status->flag &= ~RX_FLAG_IV_STRIPPED;
-}
-
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -96,6 +53,9 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
bool unicast, remove_pad, insert_ccmp_hdr = false;
int i, idx;
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ return -EINVAL;
+
memset(status, 0, sizeof(*status));
unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
@@ -165,6 +125,7 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
u32 rxdg0 = le32_to_cpu(rxd[0]);
u32 rxdg1 = le32_to_cpu(rxd[1]);
+ u32 rxdg3 = le32_to_cpu(rxd[3]);
u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
bool cck = false;
@@ -174,7 +135,7 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
cck = true;
/* fall through */
case MT_PHY_TYPE_OFDM:
- i = mt7615_get_rate(dev, sband, i, cck);
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
case MT_PHY_TYPE_HT_GF:
case MT_PHY_TYPE_HT:
@@ -214,7 +175,21 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- /* TODO: RSSI */
+ status->chains = dev->mt76.antenna_mask;
+ status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
+ status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
+ status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
+ status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
+ status->signal = status->chain_signal[0];
+
+ for (i = 1; i < hweight8(dev->mt76.antenna_mask); i++) {
+ if (!(status->chains & BIT(i)))
+ continue;
+
+ status->signal = max(status->signal,
+ status->chain_signal[i]);
+ }
+
rxd += 6;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
@@ -225,7 +200,7 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
if (insert_ccmp_hdr) {
u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
- mt7615_insert_ccmp_hdr(skb, key_id);
+ mt76_insert_ccmp_hdr(skb, key_id);
}
hdr = (struct ieee80211_hdr *)skb->data;
@@ -549,23 +524,20 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
{
struct ieee80211_supported_band *sband;
int i, idx, count, final_idx = 0;
- bool fixed_rate, final_mpdu, ack_timeout;
+ bool fixed_rate, ack_timeout;
bool probe, ampdu, cck = false;
u32 final_rate, final_rate_flags, final_nss, txs;
- u8 pid;
fixed_rate = info->status.rates[0].count;
probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
txs = le32_to_cpu(txs_data[1]);
- final_mpdu = txs & MT_TXS1_ACKED_MPDU;
ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
txs = le32_to_cpu(txs_data[3]);
count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
txs = le32_to_cpu(txs_data[0]);
- pid = FIELD_GET(MT_TXS0_PID, txs);
final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
@@ -628,7 +600,8 @@ out:
else
sband = &dev->mt76.sband_2g.sband;
final_rate &= MT_TX_RATE_IDX;
- final_rate = mt7615_get_rate(dev, sband, final_rate, cck);
+ final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
+ cck);
final_rate_flags = 0;
break;
case MT_PHY_TYPE_HT_GF:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
index 18ad4b8a3807..b00ce8db58e9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -98,6 +98,11 @@ enum rx_pkt_type {
#define MT_RXV2_GROUP_ID GENMASK(26, 21)
#define MT_RXV2_LENGTH GENMASK(20, 0)
+#define MT_RXV4_RCPI3 GENMASK(31, 24)
+#define MT_RXV4_RCPI2 GENMASK(23, 16)
+#define MT_RXV4_RCPI1 GENMASK(15, 8)
+#define MT_RXV4_RCPI0 GENMASK(7, 0)
+
enum tx_header_format {
MT_HDR_FORMAT_802_3,
MT_HDR_FORMAT_CMD,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 80e6b211f60b..b4d6af812c54 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -37,6 +37,7 @@ static int get_omac_idx(enum nl80211_iftype type, u32 mask)
switch (type) {
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
/* ap use hw bssid 0 and ext bssid */
if (~mask & BIT(HW_BSSID_0))
return HW_BSSID_0;
@@ -77,11 +78,12 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
goto out;
}
- mvif->omac_idx = get_omac_idx(vif->type, dev->omac_mask);
- if (mvif->omac_idx < 0) {
+ idx = get_omac_idx(vif->type, dev->omac_mask);
+ if (idx < 0) {
ret = -ENOSPC;
goto out;
}
+ mvif->omac_idx = idx;
/* TODO: DBDC support. Use band 0 and wmm 0 for now */
mvif->band_idx = 0;
@@ -93,7 +95,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
dev->vif_mask |= BIT(mvif->idx);
dev->omac_mask |= BIT(mvif->omac_idx);
- idx = MT7615_WTBL_RESERVED - 1 - mvif->idx;
+ idx = MT7615_WTBL_RESERVED - mvif->idx;
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
@@ -128,8 +130,7 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
-static int mt7615_set_channel(struct mt7615_dev *dev,
- struct cfg80211_chan_def *def)
+static int mt7615_set_channel(struct mt7615_dev *dev)
{
int ret;
@@ -190,28 +191,28 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
struct mt7615_dev *dev = hw->priv;
int ret = 0;
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- mutex_lock(&dev->mt76.mutex);
+ mutex_lock(&dev->mt76.mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
- ret = mt7615_set_channel(dev, &hw->conf.chandef);
+ ret = mt7615_set_channel(dev);
ieee80211_wake_queues(hw);
-
- mutex_unlock(&dev->mt76.mutex);
}
- if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- mutex_lock(&dev->mt76.mutex);
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
+ ret = mt7615_mcu_set_tx_power(dev);
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
dev->mt76.rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
else
dev->mt76.rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
-
- mutex_unlock(&dev->mt76.mutex);
}
+
+ mutex_unlock(&dev->mt76.mutex);
+
return ret;
}
@@ -281,26 +282,18 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&dev->mt76.mutex);
- /* TODO: sta mode connect/disconnect
- * BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID
- */
+ if (changed & BSS_CHANGED_ASSOC)
+ mt7615_mcu_set_bss_info(dev, vif, info->assoc);
/* TODO: update beacon content
* BSS_CHANGED_BEACON
*/
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- if (info->enable_beacon) {
- mt7615_mcu_set_bss_info(dev, vif, 1);
- mt7615_mcu_add_wtbl_bmc(dev, vif);
- mt7615_mcu_set_sta_rec_bmc(dev, vif, 1);
- mt7615_mcu_set_bcn(dev, vif, 1);
- } else {
- mt7615_mcu_set_sta_rec_bmc(dev, vif, 0);
- mt7615_mcu_del_wtbl_bmc(dev, vif);
- mt7615_mcu_set_bss_info(dev, vif, 0);
- mt7615_mcu_set_bcn(dev, vif, 0);
- }
+ mt7615_mcu_set_bss_info(dev, vif, info->enable_beacon);
+ mt7615_mcu_wtbl_bmc(dev, vif, info->enable_beacon);
+ mt7615_mcu_set_sta_rec_bmc(dev, vif, info->enable_beacon);
+ mt7615_mcu_set_bcn(dev, vif, info->enable_beacon);
}
mutex_unlock(&dev->mt76.mutex);
@@ -343,7 +336,7 @@ void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
- mt7615_mcu_del_wtbl(dev, vif, sta);
+ mt7615_mcu_del_wtbl(dev, sta);
}
static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
@@ -496,4 +489,5 @@ const struct ieee80211_ops mt7615_ops = {
.sw_scan_start = mt7615_sw_scan,
.sw_scan_complete = mt7615_sw_scan_complete,
.release_buffered_frames = mt76_release_buffered_frames,
+ .get_txpower = mt76_get_txpower,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index ea67c6022fe6..cdad2c8dc297 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -49,7 +49,7 @@ struct mt7615_fw_trailer {
#define FW_START_WORKING_PDA_CR4 BIT(2)
static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
- int cmd, int query, int dest, int *wait_seq)
+ int cmd, int *wait_seq)
{
struct mt7615_mcu_txd *mcu_txd;
u8 seq, q_idx, pkt_fmt;
@@ -57,9 +57,6 @@ static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
u32 val;
__le32 *txd;
- if (!skb)
- return -EINVAL;
-
seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
if (!seq)
seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
@@ -94,16 +91,15 @@ static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
mcu_txd->seq = seq;
if (cmd < 0) {
+ mcu_txd->set_query = MCU_Q_NA;
mcu_txd->cid = -cmd;
} else {
mcu_txd->cid = MCU_CMD_EXT_CID;
+ mcu_txd->set_query = MCU_Q_SET;
mcu_txd->ext_cid = cmd;
- if (query != MCU_Q_NA)
- mcu_txd->ext_cid_ack = 1;
+ mcu_txd->ext_cid_ack = 1;
}
-
- mcu_txd->set_query = query;
- mcu_txd->s2d_index = dest;
+ mcu_txd->s2d_index = MCU_S2D_H2N;
if (wait_seq)
*wait_seq = seq;
@@ -116,24 +112,30 @@ static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
return mt76_tx_queue_skb_raw(dev, qid, skb, 0);
}
-static int mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
- int cmd, int query, int dest,
- struct sk_buff **skb_ret)
+static int
+mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
unsigned long expires = jiffies + 10 * HZ;
struct mt7615_mcu_rxd *rxd;
+ struct sk_buff *skb;
int ret, seq;
- mutex_lock(&dev->mt76.mmio.mcu.mutex);
+ skb = mt7615_mcu_msg_alloc(data, len);
+ if (!skb)
+ return -ENOMEM;
- ret = __mt7615_mcu_msg_send(dev, skb, cmd, query, dest, &seq);
+ mutex_lock(&mdev->mmio.mcu.mutex);
+
+ ret = __mt7615_mcu_msg_send(dev, skb, cmd, &seq);
if (ret)
goto out;
- while (1) {
- skb = mt76_mcu_get_response(&dev->mt76, expires);
+ while (wait_resp) {
+ skb = mt76_mcu_get_response(mdev, expires);
if (!skb) {
- dev_err(dev->mt76.dev, "Message %d (seq %d) timeout\n",
+ dev_err(mdev->dev, "Message %d (seq %d) timeout\n",
cmd, seq);
ret = -ETIMEDOUT;
break;
@@ -143,23 +145,16 @@ static int mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
if (seq != rxd->seq)
continue;
- if (skb_ret) {
- int hdr_len = sizeof(*rxd);
-
- if (!test_bit(MT76_STATE_MCU_RUNNING,
- &dev->mt76.state))
- hdr_len -= 4;
- skb_pull(skb, hdr_len);
- *skb_ret = skb;
- } else {
- dev_kfree_skb(skb);
+ if (cmd == -MCU_CMD_PATCH_SEM_CONTROL) {
+ skb_pull(skb, sizeof(*rxd) - 4);
+ ret = *skb->data;
}
-
+ dev_kfree_skb(skb);
break;
}
out:
- mutex_unlock(&dev->mt76.mmio.mcu.mutex);
+ mutex_unlock(&mdev->mmio.mcu.mutex);
return ret;
}
@@ -176,28 +171,22 @@ static int mt7615_mcu_init_download(struct mt7615_dev *dev, u32 addr,
.len = cpu_to_le32(len),
.mode = cpu_to_le32(mode),
};
- struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
- return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
- MCU_Q_NA, MCU_S2D_H2N, NULL);
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ &req, sizeof(req), true);
}
static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
int len)
{
- struct sk_buff *skb;
- int ret = 0;
+ int ret = 0, cur_len;
while (len > 0) {
- int cur_len = min_t(int, 4096 - sizeof(struct mt7615_mcu_txd),
- len);
-
- skb = mt7615_mcu_msg_alloc(data, cur_len);
- if (!skb)
- return -ENOMEM;
+ cur_len = min_t(int, 4096 - sizeof(struct mt7615_mcu_txd),
+ len);
- ret = __mt7615_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER,
- MCU_Q_NA, MCU_S2D_H2N, NULL);
+ ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
+ data, cur_len, false);
if (ret)
break;
@@ -218,47 +207,27 @@ static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr,
.option = cpu_to_le32(option),
.addr = cpu_to_le32(addr),
};
- struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
- return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ,
- MCU_Q_NA, MCU_S2D_H2N, NULL);
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
+ &req, sizeof(req), true);
}
-static int mt7615_mcu_restart(struct mt7615_dev *dev)
+static int mt7615_mcu_restart(struct mt76_dev *dev)
{
- struct sk_buff *skb = mt7615_mcu_msg_alloc(NULL, 0);
-
- return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ,
- MCU_Q_NA, MCU_S2D_H2N, NULL);
+ return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ, NULL,
+ 0, true);
}
static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get)
{
struct {
- __le32 operation;
+ __le32 op;
} req = {
- .operation = cpu_to_le32(get ? PATCH_SEM_GET :
- PATCH_SEM_RELEASE),
+ .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
};
- struct event {
- u8 status;
- u8 reserved[3];
- } *resp;
- struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
- struct sk_buff *skb_ret;
- int ret;
- ret = mt7615_mcu_msg_send(dev, skb, -MCU_CMD_PATCH_SEM_CONTROL,
- MCU_Q_NA, MCU_S2D_H2N, &skb_ret);
- if (ret)
- goto out;
-
- resp = (struct event *)(skb_ret->data);
- ret = resp->status;
- dev_kfree_skb(skb_ret);
-
-out:
- return ret;
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_SEM_CONTROL,
+ &req, sizeof(req), true);
}
static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
@@ -269,10 +238,9 @@ static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
} req = {
.check_crc = 0,
};
- struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
- return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_PATCH_FINISH_REQ,
- MCU_Q_NA, MCU_S2D_H2N, NULL);
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_FINISH_REQ,
+ &req, sizeof(req), true);
}
static int mt7615_driver_own(struct mt7615_dev *dev)
@@ -508,8 +476,14 @@ static int mt7615_load_firmware(struct mt7615_dev *dev)
int mt7615_mcu_init(struct mt7615_dev *dev)
{
+ static const struct mt76_mcu_ops mt7615_mcu_ops = {
+ .mcu_send_msg = mt7615_mcu_msg_send,
+ .mcu_restart = mt7615_mcu_restart,
+ };
int ret;
+ dev->mt76.mcu_ops = &mt7615_mcu_ops,
+
ret = mt7615_driver_own(dev);
if (ret)
return ret;
@@ -525,16 +499,13 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
void mt7615_mcu_exit(struct mt7615_dev *dev)
{
- mt7615_mcu_restart(dev);
+ __mt76_mcu_restart(&dev->mt76);
mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_FW_OWN);
skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
}
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
{
- struct req_data {
- u8 val;
- } __packed;
struct {
u8 buffer_mode;
u8 pad;
@@ -543,23 +514,22 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
.buffer_mode = 1,
.len = __MT_EE_MAX - MT_EE_NIC_CONF_0,
};
- struct sk_buff *skb;
- struct req_data *data;
- const int size = (__MT_EE_MAX - MT_EE_NIC_CONF_0) *
- sizeof(struct req_data);
- u8 *eep = (u8 *)dev->mt76.eeprom.data;
- u16 off;
-
- skb = mt7615_mcu_msg_alloc(NULL, size + sizeof(req_hdr));
- memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
- data = (struct req_data *)skb_put(skb, size);
- memset(data, 0, size);
-
- for (off = MT_EE_NIC_CONF_0; off < __MT_EE_MAX; off++)
- data[off - MT_EE_NIC_CONF_0].val = eep[off];
-
- return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
+ int ret, len = sizeof(req_hdr) + __MT_EE_MAX - MT_EE_NIC_CONF_0;
+ u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
+
+ req = kzalloc(len, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memcpy(req, &req_hdr, sizeof(req_hdr));
+ memcpy(req + sizeof(req_hdr), eep + MT_EE_NIC_CONF_0,
+ __MT_EE_MAX - MT_EE_NIC_CONF_0);
+
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ req, len, true);
+ kfree(req);
+
+ return ret;
}
int mt7615_mcu_init_mac(struct mt7615_dev *dev)
@@ -572,10 +542,9 @@ int mt7615_mcu_init_mac(struct mt7615_dev *dev)
.enable = 1,
.band = 0,
};
- struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
- return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_MAC_INIT_CTRL,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL,
+ &req, sizeof(req), true);
}
int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val)
@@ -592,10 +561,9 @@ int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val)
.len_thresh = cpu_to_le32(val),
.pkt_thresh = cpu_to_le32(0x2),
};
- struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
- return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_PROTECT_CTRL,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL,
+ &req, sizeof(req), true);
}
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
@@ -621,7 +589,6 @@ int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
.aifs = params->aifs,
.txop = cpu_to_le16(params->txop),
};
- struct sk_buff *skb;
if (params->cw_min) {
req.valid |= WMM_CW_MIN_SET;
@@ -632,9 +599,8 @@ int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
req.cw_max = cpu_to_le16(params->cw_max);
}
- skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
- return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_EDCA_UPDATE,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE,
+ &req, sizeof(req), true);
}
int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter)
@@ -662,300 +628,200 @@ int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter)
.pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
.band_idx = 0,
};
- struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
-
- return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_PM_STATE_CTRL,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
-}
-
-static int __mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
- struct dev_info *dev_info)
-{
- struct req_hdr {
- u8 omac_idx;
- u8 band_idx;
- __le16 tlv_num;
- u8 is_tlv_append;
- u8 rsv[3];
- } __packed req_hdr = {0};
- struct req_tlv {
- __le16 tag;
- __le16 len;
- u8 active;
- u8 band_idx;
- u8 omac_addr[ETH_ALEN];
- } __packed;
- struct sk_buff *skb;
- u16 tlv_num = 0;
-
- skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) +
- sizeof(struct req_tlv));
- skb_reserve(skb, sizeof(req_hdr));
-
- if (dev_info->feature & BIT(DEV_INFO_ACTIVE)) {
- struct req_tlv req_tlv = {
- .tag = cpu_to_le16(DEV_INFO_ACTIVE),
- .len = cpu_to_le16(sizeof(req_tlv)),
- .active = dev_info->enable,
- .band_idx = dev_info->band_idx,
- };
- memcpy(req_tlv.omac_addr, dev_info->omac_addr, ETH_ALEN);
- memcpy(skb_put(skb, sizeof(req_tlv)), &req_tlv,
- sizeof(req_tlv));
- tlv_num++;
- }
-
- req_hdr.omac_idx = dev_info->omac_idx;
- req_hdr.band_idx = dev_info->band_idx;
- req_hdr.tlv_num = cpu_to_le16(tlv_num);
- req_hdr.is_tlv_append = tlv_num ? 1 : 0;
- memcpy(skb_push(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
-
- return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_DEV_INFO_UPDATE,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL,
+ &req, sizeof(req), true);
}
-int mt7615_mcu_set_dev_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- int en)
+int mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct dev_info dev_info = {0};
-
- dev_info.omac_idx = mvif->omac_idx;
- memcpy(dev_info.omac_addr, vif->addr, ETH_ALEN);
- dev_info.band_idx = mvif->band_idx;
- dev_info.enable = en;
- dev_info.feature = BIT(DEV_INFO_ACTIVE);
+ struct {
+ struct req_hdr {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 rsv[3];
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 band_idx;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } data = {
+ .hdr = {
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .tlv_num = cpu_to_le16(1),
+ .is_tlv_append = 1,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ .band_idx = mvif->band_idx,
+ },
+ };
- return __mt7615_mcu_set_dev_info(dev, &dev_info);
+ memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE,
+ &data, sizeof(data), true);
}
-static void bss_info_omac_handler (struct mt7615_dev *dev,
- struct bss_info *bss_info,
- struct sk_buff *skb)
+static void
+mt7615_mcu_bss_info_omac_header(struct mt7615_vif *mvif, u8 *data,
+ u32 conn_type)
{
- struct bss_info_omac tlv = {0};
-
- tlv.tag = cpu_to_le16(BSS_INFO_OMAC);
- tlv.len = cpu_to_le16(sizeof(tlv));
- tlv.hw_bss_idx = (bss_info->omac_idx > EXT_BSSID_START) ?
- HW_BSSID_0 : bss_info->omac_idx;
- tlv.omac_idx = bss_info->omac_idx;
- tlv.band_idx = bss_info->band_idx;
- tlv.conn_type = cpu_to_le32(bss_info->conn_type);
-
- memcpy(skb_put(skb, sizeof(tlv)), &tlv, sizeof(tlv));
+ struct bss_info_omac *hdr = (struct bss_info_omac *)data;
+ u8 idx;
+
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ hdr->tag = cpu_to_le16(BSS_INFO_OMAC);
+ hdr->len = cpu_to_le16(sizeof(struct bss_info_omac));
+ hdr->hw_bss_idx = idx;
+ hdr->omac_idx = mvif->omac_idx;
+ hdr->band_idx = mvif->band_idx;
+ hdr->conn_type = cpu_to_le32(conn_type);
}
-static void bss_info_basic_handler (struct mt7615_dev *dev,
- struct bss_info *bss_info,
- struct sk_buff *skb)
+static void
+mt7615_mcu_bss_info_basic_header(struct ieee80211_vif *vif, u8 *data,
+ u32 net_type, u8 tx_wlan_idx,
+ bool enable)
{
- struct bss_info_basic tlv = {0};
-
- tlv.tag = cpu_to_le16(BSS_INFO_BASIC);
- tlv.len = cpu_to_le16(sizeof(tlv));
- tlv.network_type = cpu_to_le32(bss_info->network_type);
- tlv.active = bss_info->enable;
- tlv.bcn_interval = cpu_to_le16(bss_info->bcn_interval);
- memcpy(tlv.bssid, bss_info->bssid, ETH_ALEN);
- tlv.wmm_idx = bss_info->wmm_idx;
- tlv.dtim_period = bss_info->dtim_period;
- tlv.bmc_tx_wlan_idx = bss_info->bmc_tx_wlan_idx;
-
- memcpy(skb_put(skb, sizeof(tlv)), &tlv, sizeof(tlv));
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct bss_info_basic *hdr = (struct bss_info_basic *)data;
+
+ hdr->tag = cpu_to_le16(BSS_INFO_BASIC);
+ hdr->len = cpu_to_le16(sizeof(struct bss_info_basic));
+ hdr->network_type = cpu_to_le32(net_type);
+ hdr->active = enable;
+ hdr->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ memcpy(hdr->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ hdr->wmm_idx = mvif->wmm_idx;
+ hdr->dtim_period = vif->bss_conf.dtim_period;
+ hdr->bmc_tx_wlan_idx = tx_wlan_idx;
}
-static void bss_info_ext_bss_handler (struct mt7615_dev *dev,
- struct bss_info *bss_info,
- struct sk_buff *skb)
+static void
+mt7615_mcu_bss_info_ext_header(struct mt7615_vif *mvif, u8 *data)
{
/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
#define BCN_TX_ESTIMATE_TIME (4096 + 20)
- struct bss_info_ext_bss tlv = {0};
- int ext_bss_idx;
-
- ext_bss_idx = bss_info->omac_idx - EXT_BSSID_START;
+ struct bss_info_ext_bss *hdr = (struct bss_info_ext_bss *)data;
+ int ext_bss_idx, tsf_offset;
+ ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
if (ext_bss_idx < 0)
return;
- tlv.tag = cpu_to_le16(BSS_INFO_EXT_BSS);
- tlv.len = cpu_to_le16(sizeof(tlv));
- tlv.mbss_tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
-
- memcpy(skb_put(skb, sizeof(tlv)), &tlv, sizeof(tlv));
+ hdr->tag = cpu_to_le16(BSS_INFO_EXT_BSS);
+ hdr->len = cpu_to_le16(sizeof(struct bss_info_ext_bss));
+ tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+ hdr->mbss_tsf_offset = cpu_to_le32(tsf_offset);
}
-static struct bss_info_tag_handler bss_info_tag_handler[] = {
- {BSS_INFO_OMAC, sizeof(struct bss_info_omac), bss_info_omac_handler},
- {BSS_INFO_BASIC, sizeof(struct bss_info_basic), bss_info_basic_handler},
- {BSS_INFO_RF_CH, sizeof(struct bss_info_rf_ch), NULL},
- {BSS_INFO_PM, 0, NULL},
- {BSS_INFO_UAPSD, 0, NULL},
- {BSS_INFO_ROAM_DETECTION, 0, NULL},
- {BSS_INFO_LQ_RM, 0, NULL},
- {BSS_INFO_EXT_BSS, sizeof(struct bss_info_ext_bss), bss_info_ext_bss_handler},
- {BSS_INFO_BMC_INFO, 0, NULL},
- {BSS_INFO_SYNC_MODE, 0, NULL},
- {BSS_INFO_RA, 0, NULL},
- {BSS_INFO_MAX_NUM, 0, NULL},
-};
-
-static int __mt7615_mcu_set_bss_info(struct mt7615_dev *dev,
- struct bss_info *bss_info)
+int mt7615_mcu_set_bss_info(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, int en)
{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct req_hdr {
u8 bss_idx;
u8 rsv0;
__le16 tlv_num;
u8 is_tlv_append;
u8 rsv1[3];
- } __packed req_hdr = {0};
- struct sk_buff *skb;
- u16 tlv_num = 0;
- u32 size = 0;
- int i;
+ } __packed;
+ int len = sizeof(struct req_hdr) + sizeof(struct bss_info_basic);
+ int ret, i, features = BIT(BSS_INFO_BASIC), ntlv = 1;
+ u32 conn_type = 0, net_type = NETWORK_INFRA;
+ u8 *buf, *data, tx_wlan_idx = 0;
+ struct req_hdr *hdr;
- for (i = 0; i < BSS_INFO_MAX_NUM; i++)
- if ((BIT(bss_info_tag_handler[i].tag) & bss_info->feature) &&
- bss_info_tag_handler[i].handler) {
- tlv_num++;
- size += bss_info_tag_handler[i].len;
+ if (en) {
+ len += sizeof(struct bss_info_omac);
+ features |= BIT(BSS_INFO_OMAC);
+ if (mvif->omac_idx > EXT_BSSID_START) {
+ len += sizeof(struct bss_info_ext_bss);
+ features |= BIT(BSS_INFO_EXT_BSS);
+ ntlv++;
}
+ ntlv++;
+ }
- skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + size);
-
- req_hdr.bss_idx = bss_info->bss_idx;
- req_hdr.tlv_num = cpu_to_le16(tlv_num);
- req_hdr.is_tlv_append = tlv_num ? 1 : 0;
-
- memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
-
- for (i = 0; i < BSS_INFO_MAX_NUM; i++)
- if ((BIT(bss_info_tag_handler[i].tag) & bss_info->feature) &&
- bss_info_tag_handler[i].handler)
- bss_info_tag_handler[i].handler(dev, bss_info, skb);
-
- return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_BSS_INFO_UPDATE,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
-}
-
-static void bss_info_convert_vif_type(enum nl80211_iftype type,
- u32 *network_type, u32 *conn_type)
-{
- switch (type) {
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
- if (network_type)
- *network_type = NETWORK_INFRA;
- if (conn_type)
- *conn_type = CONNECTION_INFRA_AP;
+ case NL80211_IFTYPE_MESH_POINT:
+ tx_wlan_idx = mvif->sta.wcid.idx;
+ conn_type = CONNECTION_INFRA_AP;
break;
- case NL80211_IFTYPE_STATION:
- if (network_type)
- *network_type = NETWORK_INFRA;
- if (conn_type)
- *conn_type = CONNECTION_INFRA_STA;
+ case NL80211_IFTYPE_STATION: {
+ /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ if (en) {
+ struct ieee80211_sta *sta;
+ struct mt7615_sta *msta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ msta = (struct mt7615_sta *)sta->drv_priv;
+ tx_wlan_idx = msta->wcid.idx;
+ rcu_read_unlock();
+ }
+ conn_type = CONNECTION_INFRA_STA;
break;
+ }
default:
WARN_ON(1);
break;
- };
-}
-
-int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- int en)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct bss_info bss_info = {0};
- u8 bmc_tx_wlan_idx = 0;
- u32 network_type = 0, conn_type = 0;
-
- if (vif->type == NL80211_IFTYPE_AP) {
- bmc_tx_wlan_idx = mvif->sta.wcid.idx;
- } else if (vif->type == NL80211_IFTYPE_STATION) {
- /* find the unicast entry for sta mode bmc tx */
- struct ieee80211_sta *ap_sta;
- struct mt7615_sta *msta;
-
- rcu_read_lock();
-
- ap_sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!ap_sta) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
- msta = (struct mt7615_sta *)ap_sta->drv_priv;
- bmc_tx_wlan_idx = msta->wcid.idx;
-
- rcu_read_unlock();
- } else {
- WARN_ON(1);
}
- bss_info_convert_vif_type(vif->type, &network_type, &conn_type);
-
- bss_info.bss_idx = mvif->idx;
- memcpy(bss_info.bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss_info.omac_idx = mvif->omac_idx;
- bss_info.band_idx = mvif->band_idx;
- bss_info.bmc_tx_wlan_idx = bmc_tx_wlan_idx;
- bss_info.wmm_idx = mvif->wmm_idx;
- bss_info.network_type = network_type;
- bss_info.conn_type = conn_type;
- bss_info.bcn_interval = vif->bss_conf.beacon_int;
- bss_info.dtim_period = vif->bss_conf.dtim_period;
- bss_info.enable = en;
- bss_info.feature = BIT(BSS_INFO_BASIC);
- if (en) {
- bss_info.feature |= BIT(BSS_INFO_OMAC);
- if (mvif->omac_idx > EXT_BSSID_START)
- bss_info.feature |= BIT(BSS_INFO_EXT_BSS);
- }
-
- return __mt7615_mcu_set_bss_info(dev, &bss_info);
-}
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
-static int __mt7615_mcu_set_wtbl(struct mt7615_dev *dev, int wlan_idx,
- int operation, void *buf, int buf_len)
-{
- struct req_hdr {
- u8 wlan_idx;
- u8 operation;
- __le16 tlv_num;
- u8 rsv[4];
- } __packed req_hdr = {0};
- struct tlv {
- __le16 tag;
- __le16 len;
- u8 buf[0];
- } __packed;
- struct sk_buff *skb;
- u16 tlv_num = 0;
- int offset = 0;
+ hdr = (struct req_hdr *)buf;
+ hdr->bss_idx = mvif->idx;
+ hdr->tlv_num = cpu_to_le16(ntlv);
+ hdr->is_tlv_append = 1;
- while (offset < buf_len) {
- struct tlv *tlv = (struct tlv *)((u8 *)buf + offset);
+ data = buf + sizeof(*hdr);
+ for (i = 0; i < BSS_INFO_MAX_NUM; i++) {
+ int tag = ffs(features & BIT(i)) - 1;
- tlv_num++;
- offset += tlv->len;
+ switch (tag) {
+ case BSS_INFO_OMAC:
+ mt7615_mcu_bss_info_omac_header(mvif, data,
+ conn_type);
+ data += sizeof(struct bss_info_omac);
+ break;
+ case BSS_INFO_BASIC:
+ mt7615_mcu_bss_info_basic_header(vif, data, net_type,
+ tx_wlan_idx, en);
+ data += sizeof(struct bss_info_basic);
+ break;
+ case BSS_INFO_EXT_BSS:
+ mt7615_mcu_bss_info_ext_header(mvif, data);
+ data += sizeof(struct bss_info_ext_bss);
+ break;
+ default:
+ break;
+ }
}
- skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + buf_len);
-
- req_hdr.wlan_idx = wlan_idx;
- req_hdr.operation = operation;
- req_hdr.tlv_num = cpu_to_le16(tlv_num);
-
- memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
-
- if (buf && buf_len)
- memcpy(skb_put(skb, buf_len), buf, buf_len);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BSS_INFO_UPDATE,
+ buf, len, true);
+ kfree(buf);
- return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_WTBL_UPDATE,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
+ return ret;
}
static enum mt7615_cipher_type
@@ -995,70 +861,90 @@ int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
- struct wtbl_sec_key wtbl_sec_key = {0};
- int buf_len = sizeof(struct wtbl_sec_key);
- u8 cipher;
-
- wtbl_sec_key.tag = cpu_to_le16(WTBL_SEC_KEY);
- wtbl_sec_key.len = cpu_to_le16(buf_len);
- wtbl_sec_key.add = cmd;
+ struct {
+ struct wtbl_req_hdr hdr;
+ struct wtbl_sec_key key;
+ } req = {
+ .hdr = {
+ .wlan_idx = wcid,
+ .operation = WTBL_SET,
+ .tlv_num = cpu_to_le16(1),
+ },
+ .key = {
+ .tag = cpu_to_le16(WTBL_SEC_KEY),
+ .len = cpu_to_le16(sizeof(struct wtbl_sec_key)),
+ .add = cmd,
+ },
+ };
if (cmd == SET_KEY) {
- cipher = mt7615_get_key_info(key, wtbl_sec_key.key_material);
- if (cipher == MT_CIPHER_NONE && key)
+ u8 cipher;
+
+ cipher = mt7615_get_key_info(key, req.key.key_material);
+ if (cipher == MT_CIPHER_NONE)
return -EOPNOTSUPP;
- wtbl_sec_key.cipher_id = cipher;
- wtbl_sec_key.key_id = key->keyidx;
- wtbl_sec_key.key_len = key->keylen;
+ req.key.rkv = 1;
+ req.key.cipher_id = cipher;
+ req.key.key_id = key->keyidx;
+ req.key.key_len = key->keylen;
} else {
- wtbl_sec_key.key_len = sizeof(wtbl_sec_key.key_material);
+ req.key.key_len = sizeof(req.key.key_material);
}
- return __mt7615_mcu_set_wtbl(dev, wcid, WTBL_SET, &wtbl_sec_key,
- buf_len);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+ &req, sizeof(req), true);
}
-int mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif)
+static int
+mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev,
+ struct mt7615_vif *mvif)
{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct wtbl_generic *wtbl_generic;
- struct wtbl_rx *wtbl_rx;
- int buf_len, ret;
- u8 *buf;
-
- buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- wtbl_generic = (struct wtbl_generic *)buf;
- buf_len = sizeof(*wtbl_generic);
- wtbl_generic->tag = cpu_to_le16(WTBL_GENERIC);
- wtbl_generic->len = cpu_to_le16(buf_len);
- eth_broadcast_addr(wtbl_generic->peer_addr);
- wtbl_generic->muar_idx = 0xe;
-
- wtbl_rx = (struct wtbl_rx *)(buf + buf_len);
- buf_len += sizeof(*wtbl_rx);
- wtbl_rx->tag = cpu_to_le16(WTBL_RX);
- wtbl_rx->len = cpu_to_le16(sizeof(*wtbl_rx));
- wtbl_rx->rca1 = 1;
- wtbl_rx->rca2 = 1;
- wtbl_rx->rv = 1;
-
- ret = __mt7615_mcu_set_wtbl(dev, mvif->sta.wcid.idx,
- WTBL_RESET_AND_SET, buf, buf_len);
+ struct {
+ struct wtbl_req_hdr hdr;
+ struct wtbl_generic g_wtbl;
+ struct wtbl_rx rx_wtbl;
+ } req = {
+ .hdr = {
+ .wlan_idx = mvif->sta.wcid.idx,
+ .operation = WTBL_RESET_AND_SET,
+ .tlv_num = cpu_to_le16(2),
+ },
+ .g_wtbl = {
+ .tag = cpu_to_le16(WTBL_GENERIC),
+ .len = cpu_to_le16(sizeof(struct wtbl_generic)),
+ .muar_idx = 0xe,
+ },
+ .rx_wtbl = {
+ .tag = cpu_to_le16(WTBL_RX),
+ .len = cpu_to_le16(sizeof(struct wtbl_rx)),
+ .rca1 = 1,
+ .rca2 = 1,
+ .rv = 1,
+ },
+ };
+ eth_broadcast_addr(req.g_wtbl.peer_addr);
- kfree(buf);
- return ret;
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+ &req, sizeof(req), true);
}
-int mt7615_mcu_del_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif)
+int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- return __mt7615_mcu_set_wtbl(dev, mvif->sta.wcid.idx,
- WTBL_RESET_AND_SET, NULL, 0);
+ if (!enable) {
+ struct wtbl_req_hdr req = {
+ .wlan_idx = mvif->sta.wcid.idx,
+ .operation = WTBL_RESET_AND_SET,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+ &req, sizeof(req), true);
+ }
+
+ return mt7615_mcu_add_wtbl_bmc(dev, mvif);
}
int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
@@ -1066,175 +952,153 @@ int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct wtbl_generic *wtbl_generic;
- struct wtbl_rx *wtbl_rx;
- int buf_len, ret;
- u8 *buf;
-
- buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- wtbl_generic = (struct wtbl_generic *)buf;
- buf_len = sizeof(*wtbl_generic);
- wtbl_generic->tag = cpu_to_le16(WTBL_GENERIC);
- wtbl_generic->len = cpu_to_le16(buf_len);
- memcpy(wtbl_generic->peer_addr, sta->addr, ETH_ALEN);
- wtbl_generic->muar_idx = mvif->omac_idx;
- wtbl_generic->qos = sta->wme;
- wtbl_generic->partial_aid = cpu_to_le16(sta->aid);
-
- wtbl_rx = (struct wtbl_rx *)(buf + buf_len);
- buf_len += sizeof(*wtbl_rx);
- wtbl_rx->tag = cpu_to_le16(WTBL_RX);
- wtbl_rx->len = cpu_to_le16(sizeof(*wtbl_rx));
- wtbl_rx->rca1 = (vif->type == NL80211_IFTYPE_AP) ? 0 : 1;
- wtbl_rx->rca2 = 1;
- wtbl_rx->rv = 1;
-
- ret = __mt7615_mcu_set_wtbl(dev, msta->wcid.idx,
- WTBL_RESET_AND_SET, buf, buf_len);
+ struct {
+ struct wtbl_req_hdr hdr;
+ struct wtbl_generic g_wtbl;
+ struct wtbl_rx rx_wtbl;
+ } req = {
+ .hdr = {
+ .wlan_idx = msta->wcid.idx,
+ .operation = WTBL_RESET_AND_SET,
+ .tlv_num = cpu_to_le16(2),
+ },
+ .g_wtbl = {
+ .tag = cpu_to_le16(WTBL_GENERIC),
+ .len = cpu_to_le16(sizeof(struct wtbl_generic)),
+ .muar_idx = mvif->omac_idx,
+ .qos = sta->wme,
+ .partial_aid = cpu_to_le16(sta->aid),
+ },
+ .rx_wtbl = {
+ .tag = cpu_to_le16(WTBL_RX),
+ .len = cpu_to_le16(sizeof(struct wtbl_rx)),
+ .rca1 = vif->type != NL80211_IFTYPE_AP,
+ .rca2 = 1,
+ .rv = 1,
+ },
+ };
+ memcpy(req.g_wtbl.peer_addr, sta->addr, ETH_ALEN);
- kfree(buf);
- return ret;
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+ &req, sizeof(req), true);
}
-int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+int mt7615_mcu_del_wtbl(struct mt7615_dev *dev,
struct ieee80211_sta *sta)
{
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct wtbl_req_hdr req = {
+ .wlan_idx = msta->wcid.idx,
+ .operation = WTBL_RESET_AND_SET,
+ };
- return __mt7615_mcu_set_wtbl(dev, msta->wcid.idx,
- WTBL_RESET_AND_SET, NULL, 0);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+ &req, sizeof(req), true);
}
int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
{
- return __mt7615_mcu_set_wtbl(dev, 0, WTBL_RESET_ALL, NULL, 0);
-}
-
-static int __mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, int bss_idx,
- int wlan_idx, int muar_idx, void *buf,
- int buf_len)
-{
- struct req_hdr {
- u8 bss_idx;
- u8 wlan_idx;
- __le16 tlv_num;
- u8 is_tlv_append;
- u8 muar_idx;
- u8 rsv[2];
- } __packed req_hdr = {0};
- struct tlv {
- __le16 tag;
- __le16 len;
- u8 buf[0];
- } __packed;
- struct sk_buff *skb;
- u16 tlv_num = 0;
- int offset = 0;
-
- while (offset < buf_len) {
- struct tlv *tlv = (struct tlv *)((u8 *)buf + offset);
-
- tlv_num++;
- offset += tlv->len;
- }
-
- skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + buf_len);
-
- req_hdr.bss_idx = bss_idx;
- req_hdr.wlan_idx = wlan_idx;
- req_hdr.tlv_num = cpu_to_le16(tlv_num);
- req_hdr.is_tlv_append = tlv_num ? 1 : 0;
- req_hdr.muar_idx = muar_idx;
-
- memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
-
- if (buf && buf_len)
- memcpy(skb_put(skb, buf_len), buf, buf_len);
+ struct wtbl_req_hdr req = {
+ .operation = WTBL_RESET_ALL,
+ };
- return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_STA_REC_UPDATE,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+ &req, sizeof(req), true);
}
int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool en)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct sta_rec_basic sta_rec_basic = {0};
- int buf_len = sizeof(struct sta_rec_basic);
+ struct {
+ struct sta_req_hdr hdr;
+ struct sta_rec_basic basic;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ .wlan_idx = mvif->sta.wcid.idx,
+ .tlv_num = cpu_to_le16(1),
+ .is_tlv_append = 1,
+ .muar_idx = mvif->omac_idx,
+ },
+ .basic = {
+ .tag = cpu_to_le16(STA_REC_BASIC),
+ .len = cpu_to_le16(sizeof(struct sta_rec_basic)),
+ .conn_type = cpu_to_le32(CONNECTION_INFRA_BC),
+ },
+ };
+ eth_broadcast_addr(req.basic.peer_addr);
- sta_rec_basic.tag = cpu_to_le16(STA_REC_BASIC);
- sta_rec_basic.len = cpu_to_le16(buf_len);
- sta_rec_basic.conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
- eth_broadcast_addr(sta_rec_basic.peer_addr);
if (en) {
- sta_rec_basic.conn_state = CONN_STATE_PORT_SECURE;
- sta_rec_basic.extra_info =
- cpu_to_le16(EXTRA_INFO_VER | EXTRA_INFO_NEW);
+ req.basic.conn_state = CONN_STATE_PORT_SECURE;
+ req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
+ EXTRA_INFO_NEW);
} else {
- sta_rec_basic.conn_state = CONN_STATE_DISCONNECT;
- sta_rec_basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
+ req.basic.conn_state = CONN_STATE_DISCONNECT;
+ req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
}
- return __mt7615_mcu_set_sta_rec(dev, mvif->idx, mvif->sta.wcid.idx,
- mvif->omac_idx, &sta_rec_basic,
- buf_len);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+ &req, sizeof(req), true);
}
-static void sta_rec_convert_vif_type(enum nl80211_iftype type, u32 *conn_type)
+int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool en)
{
- switch (type) {
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+ struct {
+ struct sta_req_hdr hdr;
+ struct sta_rec_basic basic;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ .wlan_idx = msta->wcid.idx,
+ .tlv_num = cpu_to_le16(1),
+ .is_tlv_append = 1,
+ .muar_idx = mvif->omac_idx,
+ },
+ .basic = {
+ .tag = cpu_to_le16(STA_REC_BASIC),
+ .len = cpu_to_le16(sizeof(struct sta_rec_basic)),
+ .qos = sta->wme,
+ .aid = cpu_to_le16(sta->aid),
+ },
+ };
+ memcpy(req.basic.peer_addr, sta->addr, ETH_ALEN);
+
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
- if (conn_type)
- *conn_type = CONNECTION_INFRA_STA;
+ case NL80211_IFTYPE_MESH_POINT:
+ req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
break;
case NL80211_IFTYPE_STATION:
- if (conn_type)
- *conn_type = CONNECTION_INFRA_AP;
+ req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
break;
default:
WARN_ON(1);
break;
};
-}
-
-int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool en)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct sta_rec_basic sta_rec_basic = {0};
- int buf_len = sizeof(struct sta_rec_basic);
- u32 conn_type = 0;
-
- sta_rec_convert_vif_type(vif->type, &conn_type);
-
- sta_rec_basic.tag = cpu_to_le16(STA_REC_BASIC);
- sta_rec_basic.len = cpu_to_le16(buf_len);
- sta_rec_basic.conn_type = cpu_to_le32(conn_type);
- sta_rec_basic.qos = sta->wme;
- sta_rec_basic.aid = cpu_to_le16(sta->aid);
- memcpy(sta_rec_basic.peer_addr, sta->addr, ETH_ALEN);
if (en) {
- sta_rec_basic.conn_state = CONN_STATE_PORT_SECURE;
- sta_rec_basic.extra_info =
- cpu_to_le16(EXTRA_INFO_VER | EXTRA_INFO_NEW);
+ req.basic.conn_state = CONN_STATE_PORT_SECURE;
+ req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
+ EXTRA_INFO_NEW);
} else {
- sta_rec_basic.conn_state = CONN_STATE_DISCONNECT;
- sta_rec_basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
+ req.basic.conn_state = CONN_STATE_DISCONNECT;
+ req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
}
- return __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
- mvif->omac_idx, &sta_rec_basic,
- buf_len);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+ &req, sizeof(req), true);
}
int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
int en)
{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
struct req {
u8 omac_idx;
u8 enable;
@@ -1250,14 +1114,18 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
/* bss color change */
u8 bcc_cnt;
__le16 bcc_ie_pos;
- } __packed req = {0};
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ } __packed req = {
+ .omac_idx = mvif->omac_idx,
+ .enable = en,
+ .wlan_idx = wcid->idx,
+ .band_idx = mvif->band_idx,
+ /* pky_type: 0 for bcn, 1 for tim */
+ .pkt_type = 0,
+ };
struct sk_buff *skb;
- u16 tim_off, tim_len;
-
- skb = ieee80211_beacon_get_tim(mt76_hw(dev), vif, &tim_off, &tim_len);
+ u16 tim_off;
+ skb = ieee80211_beacon_get_tim(mt76_hw(dev), vif, &tim_off, NULL);
if (!skb)
return -EINVAL;
@@ -1270,21 +1138,79 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
0, NULL);
memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
- dev_kfree_skb(skb);
-
- req.omac_idx = mvif->omac_idx;
- req.enable = en;
- req.wlan_idx = wcid->idx;
- req.band_idx = mvif->band_idx;
- /* pky_type: 0 for bcn, 1 for tim */
- req.pkt_type = 0;
req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + tim_off);
- skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+ dev_kfree_skb(skb);
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD,
+ &req, sizeof(req), true);
+}
+
+int mt7615_mcu_set_tx_power(struct mt7615_dev *dev)
+{
+ int i, ret, n_chains = hweight8(dev->mt76.antenna_mask);
+ struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ int freq = chandef->center_freq1, len, target_chains;
+ u8 *req, *data, *eep = (u8 *)dev->mt76.eeprom.data;
+ enum nl80211_band band = chandef->chan->band;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct {
+ u8 center_chan;
+ u8 dbdc_idx;
+ u8 band;
+ u8 rsv;
+ } __packed req_hdr = {
+ .center_chan = ieee80211_frequency_to_channel(freq),
+ .band = band,
+ };
+ s8 tx_power;
+
+ len = sizeof(req_hdr) + __MT_EE_MAX - MT_EE_NIC_CONF_0;
+ req = kzalloc(len, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
- return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_BCN_OFFLOAD,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
+ memcpy(req, &req_hdr, sizeof(req_hdr));
+ data = req + sizeof(req_hdr);
+ memcpy(data, eep + MT_EE_NIC_CONF_0,
+ __MT_EE_MAX - MT_EE_NIC_CONF_0);
+
+ tx_power = hw->conf.power_level * 2;
+ switch (n_chains) {
+ case 4:
+ tx_power -= 12;
+ break;
+ case 3:
+ tx_power -= 8;
+ break;
+ case 2:
+ tx_power -= 6;
+ break;
+ default:
+ break;
+ }
+ tx_power = max_t(s8, tx_power, 0);
+ dev->mt76.txpower_cur = tx_power;
+
+ target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
+ for (i = 0; i < target_chains; i++) {
+ int index = -MT_EE_NIC_CONF_0;
+
+ ret = mt7615_eeprom_get_power_index(dev, chandef->chan, i);
+ if (ret < 0)
+ goto out;
+
+ index += ret;
+ data[index] = min_t(u8, data[index], tx_power);
+ }
+
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
+ req, len, true);
+out:
+ kfree(req);
+
+ return ret;
}
int mt7615_mcu_set_channel(struct mt7615_dev *dev)
@@ -1309,7 +1235,6 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
u8 txpower_sku[53];
u8 rsv2[3];
} req = {0};
- struct sk_buff *skb;
int ret;
req.control_chan = chdef->chan->hw_value;
@@ -1345,18 +1270,15 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
default:
req.bw = CMD_CBW_20MHZ;
}
-
memset(req.txpower_sku, 0x3f, 49);
- skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
- ret = mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_CHANNEL_SWITCH,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
+ &req, sizeof(req), true);
if (ret)
return ret;
- skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
- return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_SET_RX_PATH,
- MCU_Q_SET, MCU_S2D_H2N, NULL);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RX_PATH,
+ &req, sizeof(req), true);
}
int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
@@ -1364,10 +1286,12 @@ int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
{
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct wtbl_ht *wtbl_ht;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sta_req_hdr *sta_hdr;
struct wtbl_raw *wtbl_raw;
- struct sta_rec_ht *sta_rec_ht;
- int buf_len, ret;
+ struct sta_rec_ht *sta_ht;
+ struct wtbl_ht *wtbl_ht;
+ int buf_len, ret, ntlv = 2;
u32 msk, val = 0;
u8 *buf;
@@ -1375,15 +1299,20 @@ int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
if (!buf)
return -ENOMEM;
+ wtbl_hdr = (struct wtbl_req_hdr *)buf;
+ wtbl_hdr->wlan_idx = msta->wcid.idx;
+ wtbl_hdr->operation = WTBL_SET;
+ buf_len = sizeof(*wtbl_hdr);
+
/* ht basic */
- buf_len = sizeof(*wtbl_ht);
- wtbl_ht = (struct wtbl_ht *)buf;
+ wtbl_ht = (struct wtbl_ht *)(buf + buf_len);
wtbl_ht->tag = cpu_to_le16(WTBL_HT);
wtbl_ht->len = cpu_to_le16(sizeof(*wtbl_ht));
wtbl_ht->ht = 1;
wtbl_ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
wtbl_ht->af = sta->ht_cap.ampdu_factor;
wtbl_ht->mm = sta->ht_cap.ampdu_density;
+ buf_len += sizeof(*wtbl_ht);
if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
val |= MT_WTBL_W5_SHORT_GI_20;
@@ -1400,6 +1329,7 @@ int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
wtbl_vht->len = cpu_to_le16(sizeof(*wtbl_vht));
wtbl_vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC;
wtbl_vht->vht = 1;
+ ntlv++;
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
val |= MT_WTBL_W5_SHORT_GI_80;
@@ -1416,6 +1346,7 @@ int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
wtbl_smps->tag = cpu_to_le16(WTBL_SMPS);
wtbl_smps->len = cpu_to_le16(sizeof(*wtbl_smps));
wtbl_smps->smps = 1;
+ ntlv++;
}
/* sgi */
@@ -1431,38 +1362,46 @@ int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
wtbl_raw->msk = cpu_to_le32(~msk);
wtbl_raw->val = cpu_to_le32(val);
- ret = __mt7615_mcu_set_wtbl(dev, msta->wcid.idx, WTBL_SET, buf,
- buf_len);
- if (ret) {
- kfree(buf);
- return ret;
- }
+ wtbl_hdr->tlv_num = cpu_to_le16(ntlv);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+ buf, buf_len, true);
+ if (ret)
+ goto out;
memset(buf, 0, MT7615_WTBL_UPDATE_MAX_SIZE);
- buf_len = sizeof(*sta_rec_ht);
- sta_rec_ht = (struct sta_rec_ht *)buf;
- sta_rec_ht->tag = cpu_to_le16(STA_REC_HT);
- sta_rec_ht->len = cpu_to_le16(sizeof(*sta_rec_ht));
- sta_rec_ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ sta_hdr = (struct sta_req_hdr *)buf;
+ sta_hdr->bss_idx = mvif->idx;
+ sta_hdr->wlan_idx = msta->wcid.idx;
+ sta_hdr->is_tlv_append = 1;
+ ntlv = sta->vht_cap.vht_supported ? 2 : 1;
+ sta_hdr->tlv_num = cpu_to_le16(ntlv);
+ sta_hdr->muar_idx = mvif->omac_idx;
+ buf_len = sizeof(*sta_hdr);
+
+ sta_ht = (struct sta_rec_ht *)(buf + buf_len);
+ sta_ht->tag = cpu_to_le16(STA_REC_HT);
+ sta_ht->len = cpu_to_le16(sizeof(*sta_ht));
+ sta_ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ buf_len += sizeof(*sta_ht);
if (sta->vht_cap.vht_supported) {
- struct sta_rec_vht *sta_rec_vht;
-
- sta_rec_vht = (struct sta_rec_vht *)(buf + buf_len);
- buf_len += sizeof(*sta_rec_vht);
- sta_rec_vht->tag = cpu_to_le16(STA_REC_VHT);
- sta_rec_vht->len = cpu_to_le16(sizeof(*sta_rec_vht));
- sta_rec_vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
- sta_rec_vht->vht_rx_mcs_map =
- cpu_to_le16(sta->vht_cap.vht_mcs.rx_mcs_map);
- sta_rec_vht->vht_tx_mcs_map =
- cpu_to_le16(sta->vht_cap.vht_mcs.tx_mcs_map);
+ struct sta_rec_vht *sta_vht;
+
+ sta_vht = (struct sta_rec_vht *)(buf + buf_len);
+ buf_len += sizeof(*sta_vht);
+ sta_vht->tag = cpu_to_le16(STA_REC_VHT);
+ sta_vht->len = cpu_to_le16(sizeof(*sta_vht));
+ sta_vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+ sta_vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
+ sta_vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
}
- ret = __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
- mvif->omac_idx, buf, buf_len);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+ buf, buf_len, true);
+out:
kfree(buf);
+
return ret;
}
@@ -1470,98 +1409,128 @@ int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
struct ieee80211_ampdu_params *params,
bool add)
{
- struct ieee80211_sta *sta = params->sta;
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
struct mt7615_vif *mvif = msta->vif;
- u8 ba_range[8] = {4, 8, 12, 24, 36, 48, 54, 64};
- u16 tid = params->tid;
- u16 ba_size = params->buf_size;
- u16 ssn = params->ssn;
- struct wtbl_ba wtbl_ba = {0};
- struct sta_rec_ba sta_rec_ba = {0};
- int ret, buf_len;
-
- buf_len = sizeof(struct wtbl_ba);
-
- wtbl_ba.tag = cpu_to_le16(WTBL_BA);
- wtbl_ba.len = cpu_to_le16(buf_len);
- wtbl_ba.tid = tid;
- wtbl_ba.ba_type = MT_BA_TYPE_ORIGINATOR;
+ struct {
+ struct wtbl_req_hdr hdr;
+ struct wtbl_ba ba;
+ } wtbl_req = {
+ .hdr = {
+ .wlan_idx = msta->wcid.idx,
+ .operation = WTBL_SET,
+ .tlv_num = cpu_to_le16(1),
+ },
+ .ba = {
+ .tag = cpu_to_le16(WTBL_BA),
+ .len = cpu_to_le16(sizeof(struct wtbl_ba)),
+ .tid = params->tid,
+ .ba_type = MT_BA_TYPE_ORIGINATOR,
+ .sn = add ? cpu_to_le16(params->ssn) : 0,
+ .ba_en = add,
+ },
+ };
+ struct {
+ struct sta_req_hdr hdr;
+ struct sta_rec_ba ba;
+ } sta_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ .wlan_idx = msta->wcid.idx,
+ .tlv_num = cpu_to_le16(1),
+ .is_tlv_append = 1,
+ .muar_idx = mvif->omac_idx,
+ },
+ .ba = {
+ .tag = cpu_to_le16(STA_REC_BA),
+ .len = cpu_to_le16(sizeof(struct sta_rec_ba)),
+ .tid = params->tid,
+ .ba_type = MT_BA_TYPE_ORIGINATOR,
+ .amsdu = params->amsdu,
+ .ba_en = add << params->tid,
+ .ssn = cpu_to_le16(params->ssn),
+ .winsize = cpu_to_le16(params->buf_size),
+ },
+ };
+ int ret;
if (add) {
- u8 idx;
+ u8 idx, ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
for (idx = 7; idx > 0; idx--) {
- if (ba_size >= ba_range[idx])
+ if (params->buf_size >= ba_range[idx])
break;
}
- wtbl_ba.sn = cpu_to_le16(ssn);
- wtbl_ba.ba_en = 1;
- wtbl_ba.ba_winsize_idx = idx;
+ wtbl_req.ba.ba_winsize_idx = idx;
}
- ret = __mt7615_mcu_set_wtbl(dev, msta->wcid.idx, WTBL_SET, &wtbl_ba,
- buf_len);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+ &wtbl_req, sizeof(wtbl_req), true);
if (ret)
return ret;
- buf_len = sizeof(struct sta_rec_ba);
-
- sta_rec_ba.tag = cpu_to_le16(STA_REC_BA);
- sta_rec_ba.len = cpu_to_le16(buf_len);
- sta_rec_ba.tid = tid;
- sta_rec_ba.ba_type = MT_BA_TYPE_ORIGINATOR;
- sta_rec_ba.amsdu = params->amsdu;
- sta_rec_ba.ba_en = add << tid;
- sta_rec_ba.ssn = cpu_to_le16(ssn);
- sta_rec_ba.winsize = cpu_to_le16(ba_size);
-
- return __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
- mvif->omac_idx, &sta_rec_ba, buf_len);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+ &sta_req, sizeof(sta_req), true);
}
int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
struct ieee80211_ampdu_params *params,
bool add)
{
- struct ieee80211_sta *sta = params->sta;
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
struct mt7615_vif *mvif = msta->vif;
- u16 tid = params->tid;
- struct wtbl_ba wtbl_ba = {0};
- struct sta_rec_ba sta_rec_ba = {0};
- int ret, buf_len;
-
- buf_len = sizeof(struct sta_rec_ba);
-
- sta_rec_ba.tag = cpu_to_le16(STA_REC_BA);
- sta_rec_ba.len = cpu_to_le16(buf_len);
- sta_rec_ba.tid = tid;
- sta_rec_ba.ba_type = MT_BA_TYPE_RECIPIENT;
- sta_rec_ba.amsdu = params->amsdu;
- sta_rec_ba.ba_en = add << tid;
- sta_rec_ba.ssn = cpu_to_le16(params->ssn);
- sta_rec_ba.winsize = cpu_to_le16(params->buf_size);
-
- ret = __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
- mvif->omac_idx, &sta_rec_ba, buf_len);
- if (ret || !add)
- return ret;
+ struct {
+ struct wtbl_req_hdr hdr;
+ struct wtbl_ba ba;
+ } wtbl_req = {
+ .hdr = {
+ .wlan_idx = msta->wcid.idx,
+ .operation = WTBL_SET,
+ .tlv_num = cpu_to_le16(1),
+ },
+ .ba = {
+ .tag = cpu_to_le16(WTBL_BA),
+ .len = cpu_to_le16(sizeof(struct wtbl_ba)),
+ .tid = params->tid,
+ .ba_type = MT_BA_TYPE_RECIPIENT,
+ .rst_ba_tid = params->tid,
+ .rst_ba_sel = RST_BA_MAC_TID_MATCH,
+ .rst_ba_sb = 1,
+ },
+ };
+ struct {
+ struct sta_req_hdr hdr;
+ struct sta_rec_ba ba;
+ } sta_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ .wlan_idx = msta->wcid.idx,
+ .tlv_num = cpu_to_le16(1),
+ .is_tlv_append = 1,
+ .muar_idx = mvif->omac_idx,
+ },
+ .ba = {
+ .tag = cpu_to_le16(STA_REC_BA),
+ .len = cpu_to_le16(sizeof(struct sta_rec_ba)),
+ .tid = params->tid,
+ .ba_type = MT_BA_TYPE_RECIPIENT,
+ .amsdu = params->amsdu,
+ .ba_en = add << params->tid,
+ .ssn = cpu_to_le16(params->ssn),
+ .winsize = cpu_to_le16(params->buf_size),
+ },
+ };
+ int ret;
- buf_len = sizeof(struct wtbl_ba);
+ memcpy(wtbl_req.ba.peer_addr, params->sta->addr, ETH_ALEN);
- wtbl_ba.tag = cpu_to_le16(WTBL_BA);
- wtbl_ba.len = cpu_to_le16(buf_len);
- wtbl_ba.tid = tid;
- wtbl_ba.ba_type = MT_BA_TYPE_RECIPIENT;
- memcpy(wtbl_ba.peer_addr, sta->addr, ETH_ALEN);
- wtbl_ba.rst_ba_tid = tid;
- wtbl_ba.rst_ba_sel = RST_BA_MAC_TID_MATCH;
- wtbl_ba.rst_ba_sb = 1;
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+ &sta_req, sizeof(sta_req), true);
+ if (ret || !add)
+ return ret;
- return __mt7615_mcu_set_wtbl(dev, msta->wcid.idx, WTBL_SET,
- &wtbl_ba, buf_len);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+ &wtbl_req, sizeof(wtbl_req), true);
}
void mt7615_mcu_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
index 9455f8fa475d..f8b51ad25220 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -70,6 +70,7 @@ enum {
enum {
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
+ MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
@@ -105,25 +106,19 @@ enum {
#define STA_TYPE_STA BIT(0)
#define STA_TYPE_AP BIT(1)
#define STA_TYPE_ADHOC BIT(2)
-#define STA_TYPE_TDLS BIT(3)
#define STA_TYPE_WDS BIT(4)
#define STA_TYPE_BC BIT(5)
#define NETWORK_INFRA BIT(16)
#define NETWORK_P2P BIT(17)
#define NETWORK_IBSS BIT(18)
-#define NETWORK_MESH BIT(19)
-#define NETWORK_BOW BIT(20)
#define NETWORK_WDS BIT(21)
#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
-#define CONNECTION_MESH_STA (STA_TYPE_STA | NETWORK_MESH)
-#define CONNECTION_MESH_AP (STA_TYPE_AP | NETWORK_MESH)
#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
-#define CONNECTION_TDLS (STA_TYPE_STA | NETWORK_INFRA | STA_TYPE_TDLS)
#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
@@ -131,41 +126,11 @@ enum {
#define CONN_STATE_CONNECT 1
#define CONN_STATE_PORT_SECURE 2
-struct dev_info {
- u8 omac_idx;
- u8 omac_addr[ETH_ALEN];
- u8 band_idx;
- u8 enable;
- u32 feature;
-};
-
enum {
DEV_INFO_ACTIVE,
DEV_INFO_MAX_NUM
};
-struct bss_info {
- u8 bss_idx;
- u8 bssid[ETH_ALEN];
- u8 omac_idx;
- u8 band_idx;
- u8 bmc_tx_wlan_idx; /* for bmc tx (sta mode use uc entry) */
- u8 wmm_idx;
- u32 network_type;
- u32 conn_type;
- u16 bcn_interval;
- u8 dtim_period;
- u8 enable;
- u32 feature;
-};
-
-struct bss_info_tag_handler {
- u32 tag;
- u32 len;
- void (*handler)(struct mt7615_dev *dev,
- struct bss_info *bss_info, struct sk_buff *skb);
-};
-
struct bss_info_omac {
__le16 tag;
__le16 len;
@@ -231,6 +196,13 @@ enum {
WTBL_RESET_ALL
};
+struct wtbl_req_hdr {
+ u8 wlan_idx;
+ u8 operation;
+ __le16 tlv_num;
+ u8 rsv[4];
+} __packed;
+
struct wtbl_generic {
__le16 tag;
__le16 len;
@@ -396,7 +368,8 @@ struct wtbl_raw {
__le32 val;
} __packed;
-#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_generic) + \
+#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_generic) + \
sizeof(struct wtbl_rx) + \
sizeof(struct wtbl_ht) + \
sizeof(struct wtbl_vht) + \
@@ -430,6 +403,15 @@ enum {
WTBL_MAX_NUM
};
+struct sta_req_hdr {
+ u8 bss_idx;
+ u8 wlan_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 muar_idx;
+ u8 rsv[2];
+} __packed;
+
struct sta_rec_basic {
__le16 tag;
__le16 len;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 895c2904d7eb..f02ffcffe637 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -105,11 +105,14 @@ u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
int mt7615_register_device(struct mt7615_dev *dev);
void mt7615_unregister_device(struct mt7615_dev *dev);
int mt7615_eeprom_init(struct mt7615_dev *dev);
+int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx);
int mt7615_dma_init(struct mt7615_dev *dev);
void mt7615_dma_cleanup(struct mt7615_dev *dev);
int mt7615_mcu_init(struct mt7615_dev *dev);
-int mt7615_mcu_set_dev_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- int en);
+int mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable);
int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
int en);
int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid,
@@ -118,12 +121,11 @@ int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid,
void mt7615_mcu_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates);
-int mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
-int mt7615_mcu_del_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
+int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_sta *sta);
int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool en);
@@ -168,6 +170,7 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
int mt7615_mcu_init_mac(struct mt7615_dev *dev);
int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val);
int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter);
+int mt7615_mcu_set_tx_power(struct mt7615_dev *dev);
void mt7615_mcu_exit(struct mt7615_dev *dev);
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
@@ -180,7 +183,6 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
-void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index 11122bd2d727..9e82cb53fd60 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -27,14 +27,15 @@ u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
return MT_PCIE_REMAP_BASE_2 + offset;
}
-void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+static void
+mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
}
-irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
+static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
{
struct mt7615_dev *dev = dev_instance;
u32 intr;
@@ -49,7 +50,7 @@ irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
if (intr & MT_INT_TX_DONE_ALL) {
mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ napi_schedule(&dev->mt76.tx_napi);
}
if (intr & MT_INT_RX_DONE(0)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 71237d5cdf7f..cf7fc307322b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -271,8 +271,9 @@ mt76x0_init_txpower(struct mt76x02_dev *dev,
mt76x0_get_tx_power_per_rate(dev, chan, &t);
mt76x0_get_power_info(dev, chan, &tp);
- chan->max_power = (mt76x02_get_max_rate_power(&t) + tp) / 2;
- chan->orig_mpwr = chan->max_power;
+ chan->orig_mpwr = (mt76x02_get_max_rate_power(&t) + tp) / 2;
+ chan->max_power = min_t(int, chan->max_reg_power,
+ chan->orig_mpwr);
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index a7f335d6e8f8..d7bf7bc15e52 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -25,7 +25,7 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76_rr(dev, MT_CH_IDLE);
mt76_rr(dev, MT_CH_BUSY);
- mt76x02_edcca_init(dev, true);
+ mt76x02_edcca_init(dev);
if (mt76_is_mmio(dev)) {
mt76x02_dfs_init_params(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index e11da6900222..1ecfc334ae79 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -422,15 +422,15 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
static void mt76x0_phy_ant_select(struct mt76x02_dev *dev)
{
u16 ee_ant = mt76x02_eeprom_get(dev, MT_EE_ANTENNA);
+ u16 ee_cfg1 = mt76x02_eeprom_get(dev, MT_EE_CFG1_INIT);
u16 nic_conf2 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
- u32 wlan, coex3, cmb;
+ u32 wlan, coex3;
bool ant_div;
wlan = mt76_rr(dev, MT_WLAN_FUN_CTRL);
- cmb = mt76_rr(dev, MT_CMB_CTRL);
coex3 = mt76_rr(dev, MT_COEXCFG3);
- cmb &= ~(BIT(14) | BIT(12));
+ ee_ant &= ~(BIT(14) | BIT(12));
wlan &= ~(BIT(6) | BIT(5));
coex3 &= ~GENMASK(5, 2);
@@ -439,7 +439,7 @@ static void mt76x0_phy_ant_select(struct mt76x02_dev *dev)
ant_div = !(nic_conf2 & MT_EE_NIC_CONF_2_ANT_OPT) &&
(nic_conf2 & MT_EE_NIC_CONF_2_ANT_DIV);
if (ant_div)
- cmb |= BIT(12);
+ ee_ant |= BIT(12);
else
coex3 |= BIT(4);
coex3 |= BIT(3);
@@ -456,10 +456,11 @@ static void mt76x0_phy_ant_select(struct mt76x02_dev *dev)
}
if (is_mt7630(dev))
- cmb |= BIT(14) | BIT(11);
+ ee_ant |= BIT(14) | BIT(11);
mt76_wr(dev, MT_WLAN_FUN_CTRL, wlan);
- mt76_wr(dev, MT_CMB_CTRL, cmb);
+ mt76_rmw(dev, MT_CMB_CTRL, GENMASK(15, 0), ee_ant);
+ mt76_rmw(dev, MT_CSR_EE_CFG1, GENMASK(15, 0), ee_cfg1);
mt76_clear(dev, MT_COEXCFG0, BIT(2));
mt76_wr(dev, MT_COEXCFG3, coex3);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 2dc67e68c6a2..627ed1fc7b15 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -183,7 +183,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
/* check hw sg support in order to enable AMSDU */
if (dev->mt76.usb.sg_en)
- hw->max_tx_fragments = MT_SG_MAX_SIZE;
+ hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
else
hw->max_tx_fragments = 1;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 687bd14b2d77..f7fd53a1738a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -90,7 +90,6 @@ struct mt76x02_dev {
struct sk_buff *rx_head;
- struct napi_struct tx_napi;
struct delayed_work cal_work;
struct delayed_work wdt_work;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
index e196b9c0a686..d61c686e08de 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -189,10 +189,8 @@ mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL, timer_val);
- if (dev->tbtt_count >= 64) {
+ if (dev->tbtt_count >= 64)
dev->tbtt_count = 0;
- return;
- }
}
EXPORT_SYMBOL_GPL(mt76x02_resync_beacon_timer);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index b1d6fd4861e3..1b1e424ccbb2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -120,12 +120,16 @@ static int
mt76_edcca_set(void *data, u64 val)
{
struct mt76x02_dev *dev = data;
- enum nl80211_dfs_regions region = dev->dfs_pd.region;
+ enum nl80211_dfs_regions region = dev->mt76.region;
+
+ mutex_lock(&dev->mt76.mutex);
dev->ed_monitor_enabled = !!val;
dev->ed_monitor = dev->ed_monitor_enabled &&
region == NL80211_DFS_ETSI;
- mt76x02_edcca_init(dev, true);
+ mt76x02_edcca_init(dev);
+
+ mutex_unlock(&dev->mt76.mutex);
return 0;
}
@@ -153,7 +157,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
- debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca);
+ debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index 17d12d212d1b..50e9b310e496 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -283,7 +283,7 @@ static bool mt76x02_dfs_check_hw_pulse(struct mt76x02_dev *dev,
if (!pulse->period || !pulse->w1)
return false;
- switch (dev->dfs_pd.region) {
+ switch (dev->mt76.region) {
case NL80211_DFS_FCC:
if (pulse->engine > 3)
break;
@@ -457,7 +457,7 @@ static int mt76x02_dfs_create_sequence(struct mt76x02_dev *dev,
with_sum = event->width + cur_event->width;
sw_params = &dfs_pd->sw_dpd_params;
- switch (dev->dfs_pd.region) {
+ switch (dev->mt76.region) {
case NL80211_DFS_FCC:
case NL80211_DFS_JP:
if (with_sum < 600)
@@ -685,7 +685,7 @@ static void mt76x02_dfs_init_sw_detector(struct mt76x02_dev *dev)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- switch (dev->dfs_pd.region) {
+ switch (dev->mt76.region) {
case NL80211_DFS_FCC:
dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI;
dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI;
@@ -725,7 +725,7 @@ static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
break;
}
- switch (dev->dfs_pd.region) {
+ switch (dev->mt76.region) {
case NL80211_DFS_FCC:
radar_specs = &fcc_radar_specs[shift];
break;
@@ -836,7 +836,7 @@ void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- dev->dfs_pd.region != NL80211_DFS_UNSET) {
+ dev->mt76.region != NL80211_DFS_UNSET) {
mt76x02_dfs_init_sw_detector(dev);
mt76x02_dfs_set_bbp_params(dev);
/* enable debug mode */
@@ -869,7 +869,7 @@ void mt76x02_dfs_init_detector(struct mt76x02_dev *dev)
INIT_LIST_HEAD(&dfs_pd->sequences);
INIT_LIST_HEAD(&dfs_pd->seq_pool);
- dfs_pd->region = NL80211_DFS_UNSET;
+ dev->mt76.region = NL80211_DFS_UNSET;
dfs_pd->last_sw_check = jiffies;
tasklet_init(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet,
(unsigned long)dev);
@@ -882,14 +882,14 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
mutex_lock(&dev->mt76.mutex);
- if (dfs_pd->region != region) {
+ if (dev->mt76.region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet);
dev->ed_monitor = dev->ed_monitor_enabled &&
region == NL80211_DFS_ETSI;
- mt76x02_edcca_init(dev, true);
+ mt76x02_edcca_init(dev);
- dfs_pd->region = region;
+ dev->mt76.region = region;
mt76x02_dfs_init_params(dev);
tasklet_enable(&dfs_pd->dfs_tasklet);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
index 70b394e17340..0408613b45a4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
@@ -118,8 +118,6 @@ struct mt76x02_dfs_seq_stats {
};
struct mt76x02_dfs_pattern_detector {
- enum nl80211_dfs_regions region;
-
u8 chirp_pulse_cnt;
u32 chirp_pulse_ts;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
index e3442bc4e0a4..0ba536de3d6e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
@@ -26,6 +26,7 @@ enum mt76x02_eeprom_field {
MT_EE_MAC_ADDR = 0x004,
MT_EE_PCI_ID = 0x00A,
MT_EE_ANTENNA = 0x022,
+ MT_EE_CFG1_INIT = 0x024,
MT_EE_NIC_CONF_0 = 0x034,
MT_EE_NIC_CONF_1 = 0x036,
MT_EE_COUNTRY_REGION_5GHZ = 0x038,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 56510a1a843a..82bafb5ac326 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -420,30 +420,92 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
static void
-mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev,
+mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy)
+{
+ u8 mcs, nss;
+
+ if (!idx)
+ return;
+
+ rates += idx - 1;
+ rates[1] = rates[0];
+ switch (phy) {
+ case MT_PHY_TYPE_VHT:
+ mcs = ieee80211_rate_get_vht_mcs(rates);
+ nss = ieee80211_rate_get_vht_nss(rates);
+
+ if (mcs == 0)
+ nss = max_t(int, nss - 1, 1);
+ else
+ mcs--;
+
+ ieee80211_rate_set_vht(rates + 1, mcs, nss);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ /* MCS 8 falls back to MCS 0 */
+ if (rates[0].idx == 8) {
+ rates[1].idx = 0;
+ break;
+ }
+ /* fall through */
+ default:
+ rates[1].idx = max_t(int, rates[0].idx - 1, 0);
+ break;
+ }
+}
+
+static void
+mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
struct ieee80211_tx_info *info,
struct mt76x02_tx_status *st, int n_frames)
{
struct ieee80211_tx_rate *rate = info->status.rates;
- int cur_idx, last_rate;
+ struct ieee80211_tx_rate last_rate;
+ u16 first_rate;
+ int retry = st->retry;
+ int phy;
int i;
if (!n_frames)
return;
- last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
- mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate,
+ phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate);
+
+ if (st->pktid & MT_PACKET_ID_HAS_RATE) {
+ first_rate = st->rate & ~MT_RXWI_RATE_INDEX;
+ first_rate |= st->pktid & MT_RXWI_RATE_INDEX;
+
+ mt76x02_mac_process_tx_rate(&rate[0], first_rate,
+ dev->mt76.chandef.chan->band);
+ } else if (rate[0].idx < 0) {
+ if (!msta)
+ return;
+
+ mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
+ dev->mt76.chandef.chan->band);
+ }
+
+ mt76x02_mac_process_tx_rate(&last_rate, st->rate,
dev->mt76.chandef.chan->band);
- if (last_rate < IEEE80211_TX_MAX_RATES - 1)
- rate[last_rate + 1].idx = -1;
-
- cur_idx = rate[last_rate].idx + last_rate;
- for (i = 0; i <= last_rate; i++) {
- rate[i].flags = rate[last_rate].flags;
- rate[i].idx = max_t(int, 0, cur_idx - i);
- rate[i].count = 1;
+
+ for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
+ retry--;
+ if (i + 1 == ARRAY_SIZE(info->status.rates)) {
+ info->status.rates[i] = last_rate;
+ info->status.rates[i].count = max_t(int, retry, 1);
+ break;
+ }
+
+ mt76x02_tx_rate_fallback(info->status.rates, i, phy);
+ if (info->status.rates[i].idx == last_rate.idx)
+ break;
+ }
+
+ if (i + 1 < ARRAY_SIZE(info->status.rates)) {
+ info->status.rates[i + 1].idx = -1;
+ info->status.rates[i + 1].count = 0;
}
- rate[last_rate].count = st->retry + 1 - last_rate;
info->status.ampdu_len = n_frames;
info->status.ampdu_ack_len = st->success ? n_frames : 0;
@@ -489,13 +551,19 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
mt76_tx_status_lock(mdev, &list);
if (wcid) {
- if (stat->pktid >= MT_PACKET_ID_FIRST)
+ if (mt76_is_skb_pktid(stat->pktid))
status.skb = mt76_tx_status_skb_get(mdev, wcid,
stat->pktid, &list);
if (status.skb)
status.info = IEEE80211_SKB_CB(status.skb);
}
+ if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
+ mt76_tx_status_unlock(mdev, &list);
+ rcu_read_unlock();
+ return;
+ }
+
if (msta && stat->aggr && !status.skb) {
u32 stat_val, stat_cache;
@@ -512,14 +580,14 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
return;
}
- mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
- msta->n_frames);
+ mt76x02_mac_fill_tx_status(dev, msta, status.info,
+ &msta->status, msta->n_frames);
msta->status = *stat;
msta->n_frames = 1;
*update = 0;
} else {
- mt76x02_mac_fill_tx_status(dev, status.info, stat, 1);
+ mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
*update = 1;
}
@@ -945,12 +1013,12 @@ mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
dev->ed_tx_blocked = !enable;
}
-void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
+void mt76x02_edcca_init(struct mt76x02_dev *dev)
{
dev->ed_trigger = 0;
dev->ed_silent = 0;
- if (dev->ed_monitor && enable) {
+ if (dev->ed_monitor) {
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index e4a9e0d0924b..cb39da79527a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -209,5 +209,5 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
struct ieee80211_vif *vif, bool val);
-void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable);
+void mt76x02_edcca_init(struct mt76x02_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 7b7163bc3b62..467b28379870 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -166,7 +166,8 @@ static void mt76x02_tx_tasklet(unsigned long data)
static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
{
- struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev, tx_napi);
+ struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev,
+ mt76.tx_napi);
int i;
mt76x02_mac_poll_tx_status(dev, false);
@@ -245,9 +246,9 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (ret)
return ret;
- netif_tx_napi_add(&dev->mt76.napi_dev, &dev->tx_napi, mt76x02_poll_tx,
- NAPI_POLL_WEIGHT);
- napi_enable(&dev->tx_napi);
+ netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
+ mt76x02_poll_tx, NAPI_POLL_WEIGHT);
+ napi_enable(&dev->mt76.tx_napi);
return 0;
}
@@ -303,7 +304,7 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) {
mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
- napi_schedule(&dev->tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
}
if (intr & MT_INT_GPTIMER) {
@@ -334,7 +335,6 @@ static void mt76x02_dma_enable(struct mt76x02_dev *dev)
void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
{
tasklet_kill(&dev->mt76.tx_tasklet);
- netif_napi_del(&dev->tx_napi);
mt76_dma_cleanup(&dev->mt76);
}
EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
@@ -454,7 +454,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->mt76.tx_tasklet);
- napi_disable(&dev->tx_napi);
+ napi_disable(&dev->mt76.tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
napi_disable(&dev->mt76.napi[i]);
@@ -508,8 +508,8 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
clear_bit(MT76_RESET, &dev->mt76.state);
tasklet_enable(&dev->mt76.tx_tasklet);
- napi_enable(&dev->tx_napi);
- napi_schedule(&dev->tx_napi);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index 2ce05b543dff..ea7833964ec0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -66,6 +66,9 @@
#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
+/* MT76x0 */
+#define MT_CSR_EE_CFG1 0x0104
+
#define MT_XO_CTRL0 0x0100
#define MT_XO_CTRL1 0x0104
#define MT_XO_CTRL2 0x0108
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index cf7abd9b7d2e..04118f08debc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -154,6 +154,7 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
struct mt76x02_txwi *txwi = txwi_ptr;
+ bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128)
@@ -164,9 +165,15 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ /* encode packet rate for no-skb packet id to fix up status reporting */
+ if (pid == MT_PACKET_ID_NO_SKB)
+ pid = MT_PACKET_ID_HAS_RATE |
+ (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX);
+
txwi->pktid = pid;
- if (pid >= MT_PACKET_ID_FIRST)
+ if (mt76_is_skb_pktid(pid) && ampdu)
qsel = MT_QSEL_MGMT;
tx_info->info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 6b89f7eab26c..5e4f3a8c5784 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -14,7 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include "mt76x02.h"
+#include "mt76x02_usb.h"
static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
{
@@ -79,6 +79,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
struct mt76x02_txwi *txwi;
+ bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
u32 flags;
@@ -89,9 +90,15 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
skb_push(tx_info->skb, sizeof(*txwi));
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ /* encode packet rate for no-skb packet id to fix up status reporting */
+ if (pid == MT_PACKET_ID_NO_SKB)
+ pid = MT_PACKET_ID_HAS_RATE |
+ (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX);
+
txwi->pktid = pid;
- if (pid >= MT_PACKET_ID_FIRST || ep == MT_EP_OUT_HCCA)
+ if ((mt76_is_skb_pktid(pid) && ampdu) || ep == MT_EP_OUT_HCCA)
qsel = MT_QSEL_MGMT;
else
qsel = MT_QSEL_EDCA;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index c6078e90ca43..97c3543eed8a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -173,13 +173,14 @@ void mt76x2_init_txpower(struct mt76x02_dev *dev,
mt76x2_get_power_info(dev, &txp, chan);
mt76x2_get_rate_power(dev, &t, chan);
- chan->max_power = mt76x02_get_max_rate_power(&t) +
+ chan->orig_mpwr = mt76x02_get_max_rate_power(&t) +
txp.target_power;
- chan->max_power = DIV_ROUND_UP(chan->max_power, 2);
+ chan->orig_mpwr = DIV_ROUND_UP(chan->orig_mpwr, 2);
/* convert to combined output power on 2x2 devices */
- chan->max_power += 3;
- chan->orig_mpwr = chan->max_power;
+ chan->orig_mpwr += 3;
+ chan->max_power = min_t(int, chan->max_reg_power,
+ chan->orig_mpwr);
}
}
EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index e416eee6a306..3a1467326f4d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -54,14 +54,14 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
int ret;
cancel_delayed_work_sync(&dev->cal_work);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+ tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+ mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &dev->mt76.state);
mt76_set_channel(&dev->mt76);
- tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
- tasklet_disable(&dev->dfs_pd.dfs_tasklet);
-
mt76x2_mac_stop(dev, true);
ret = mt76x2_phy_set_channel(dev, chandef);
@@ -72,10 +72,12 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76x02_dfs_init_params(dev);
mt76x2_mac_resume(dev);
- tasklet_enable(&dev->dfs_pd.dfs_tasklet);
- tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
clear_bit(MT76_RESET, &dev->mt76.state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ tasklet_enable(&dev->dfs_pd.dfs_tasklet);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
mt76_txq_schedule_all(&dev->mt76);
@@ -111,14 +113,14 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ mutex_unlock(&dev->mt76.mutex);
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
ret = mt76x2_set_channel(dev, &hw->conf.chandef);
ieee80211_wake_queues(hw);
}
- mutex_unlock(&dev->mt76.mutex);
-
return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
index cc1aebcb0696..2edf1bd0c18c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -74,7 +74,7 @@ mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
mt76x2_mac_resume(dev);
mt76x2_apply_gain_adj(dev);
- mt76x02_edcca_init(dev, true);
+ mt76x02_edcca_init(dev);
dev->cal.channel_cal_done = true;
}
@@ -294,10 +294,16 @@ void mt76x2_phy_calibrate(struct work_struct *work)
struct mt76x02_dev *dev;
dev = container_of(work, struct mt76x02_dev, cal_work.work);
+
+ mutex_lock(&dev->mt76.mutex);
+
mt76x2_phy_channel_calibrate(dev, false);
mt76x2_phy_tssi_compensate(dev);
mt76x2_phy_temp_compensate(dev);
mt76x2_phy_update_channel_gain(dev);
+
+ mutex_unlock(&dev->mt76.mutex);
+
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index f2c57d5b87f9..94f52f98019b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -225,7 +225,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
/* check hw sg support in order to enable AMSDU */
if (dev->mt76.usb.sg_en)
- hw->max_tx_fragments = MT_SG_MAX_SIZE;
+ hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
else
hw->max_tx_fragments = 1;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 97bcf6494ec1..e4dfc3bea3c5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -48,22 +48,23 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
int err;
cancel_delayed_work_sync(&dev->cal_work);
+ dev->beacon_ops->pre_tbtt_enable(dev, false);
+
+ mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &dev->mt76.state);
mt76_set_channel(&dev->mt76);
- dev->beacon_ops->pre_tbtt_enable(dev, false);
-
mt76x2_mac_stop(dev, false);
err = mt76x2u_phy_set_channel(dev, chandef);
mt76x2_mac_resume(dev);
- mt76x02_edcca_init(dev, true);
-
- dev->beacon_ops->pre_tbtt_enable(dev, true);
clear_bit(MT76_RESET, &dev->mt76.state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ dev->beacon_ops->pre_tbtt_enable(dev, true);
mt76_txq_schedule_all(&dev->mt76);
return err;
@@ -85,12 +86,6 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
}
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- err = mt76x2u_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
-
if (changed & IEEE80211_CONF_CHANGE_POWER) {
dev->mt76.txpower_conf = hw->conf.power_level * 2;
@@ -103,6 +98,12 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ err = mt76x2u_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
index 07f67cb6854c..dfd54f9b0e97 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -45,7 +45,7 @@ mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
if (!mac_stopped)
mt76x2_mac_resume(dev);
mt76x2_apply_gain_adj(dev);
- mt76x02_edcca_init(dev, true);
+ mt76x02_edcca_init(dev);
dev->cal.channel_cal_done = true;
}
@@ -55,10 +55,15 @@ void mt76x2u_phy_calibrate(struct work_struct *work)
struct mt76x02_dev *dev;
dev = container_of(work, struct mt76x02_dev, cal_work.work);
+
+ mutex_lock(&dev->mt76.mutex);
+
mt76x2u_phy_channel_calibrate(dev, false);
mt76x2_phy_tssi_compensate(dev);
mt76x2_phy_update_channel_gain(dev);
+ mutex_unlock(&dev->mt76.mutex);
+
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL);
}
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index bbaa1365bbda..fb87ce7fbdf6 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -267,12 +267,10 @@ mt76u_set_endpoints(struct usb_interface *intf,
if (usb_endpoint_is_bulk_in(ep_desc) &&
in_ep < __MT_EP_IN_MAX) {
usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
- usb->in_max_packet = usb_endpoint_maxp(ep_desc);
in_ep++;
} else if (usb_endpoint_is_bulk_out(ep_desc) &&
out_ep < __MT_EP_OUT_MAX) {
usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
- usb->out_max_packet = usb_endpoint_maxp(ep_desc);
out_ep++;
}
}
@@ -333,12 +331,13 @@ mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
}
static int
-mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
+mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
+ int sg_max_size)
{
unsigned int size = sizeof(struct urb);
if (dev->usb.sg_en)
- size += MT_SG_MAX_SIZE * sizeof(struct scatterlist);
+ size += sg_max_size * sizeof(struct scatterlist);
e->urb = kzalloc(size, GFP_KERNEL);
if (!e->urb)
@@ -357,11 +356,12 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
{
int err;
- err = mt76u_urb_alloc(dev, e);
+ err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
if (err)
return err;
- return mt76u_refill_rx(dev, e->urb, MT_SG_MAX_SIZE, GFP_KERNEL);
+ return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
+ GFP_KERNEL);
}
static void mt76u_urb_free(struct urb *urb)
@@ -429,6 +429,42 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
return dma_len;
}
+static struct sk_buff *
+mt76u_build_rx_skb(void *data, int len, int buf_size)
+{
+ struct sk_buff *skb;
+
+ if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
+ struct page *page;
+
+ /* slow path, not enough space for data and
+ * skb_shared_info
+ */
+ skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
+ data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
+ page = virt_to_head_page(data);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, data - page_address(page),
+ len - MT_SKB_HEAD_LEN, buf_size);
+
+ return skb;
+ }
+
+ /* fast path */
+ skb = build_skb(data, buf_size);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ __skb_put(skb, len);
+
+ return skb;
+}
+
static int
mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
{
@@ -446,19 +482,11 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
return 0;
data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
- if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) {
- dev_err_ratelimited(dev->dev, "rx data too big %d\n", data_len);
- return 0;
- }
-
- skb = build_skb(data, q->buf_size);
+ skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
if (!skb)
return 0;
- skb_reserve(skb, MT_DMA_HDR_LEN);
- __skb_put(skb, data_len);
len -= data_len;
-
while (len > 0 && nsgs < urb->num_sgs) {
data_len = min_t(int, len, urb->sg[nsgs].length);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
@@ -577,8 +605,9 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
if (!q->entry)
return -ENOMEM;
- q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
q->ndesc = MT_NUM_RX_ENTRIES;
+ q->buf_size = PAGE_SIZE;
+
for (i = 0; i < q->ndesc; i++) {
err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
if (err < 0)
@@ -735,7 +764,7 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
urb->transfer_buffer = skb->data;
return 0;
} else {
- sg_init_table(urb->sg, MT_SG_MAX_SIZE);
+ sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
if (urb->num_sgs == 0)
return -ENOMEM;
@@ -829,7 +858,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
q->ndesc = MT_NUM_TX_ENTRIES;
for (j = 0; j < q->ndesc; j++) {
- err = mt76u_urb_alloc(dev, &q->entry[j]);
+ err = mt76u_urb_alloc(dev, &q->entry[j],
+ MT_TX_SG_MAX_SIZE);
if (err < 0)
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 66d60283e456..f6a0454abe04 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -185,10 +185,23 @@ static void mt7601u_complete_rx(struct urb *urb)
struct mt7601u_rx_queue *q = &dev->rx_q;
unsigned long flags;
- spin_lock_irqsave(&dev->rx_lock, flags);
+ /* do no schedule rx tasklet if urb has been unlinked
+ * or the device has been removed
+ */
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
+ urb->status);
+ /* fall through */
+ case 0:
+ break;
+ }
- if (mt7601u_urb_has_error(urb))
- dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
+ spin_lock_irqsave(&dev->rx_lock, flags);
if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
goto out;
@@ -220,14 +233,25 @@ static void mt7601u_complete_tx(struct urb *urb)
struct sk_buff *skb;
unsigned long flags;
- spin_lock_irqsave(&dev->tx_lock, flags);
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
+ urb->status);
+ /* fall through */
+ case 0:
+ break;
+ }
- if (mt7601u_urb_has_error(urb))
- dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
+ spin_lock_irqsave(&dev->tx_lock, flags);
if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
goto out;
skb = q->e[q->start].skb;
+ q->e[q->start].skb = NULL;
trace_mt_tx_dma_done(dev, skb);
__skb_queue_tail(&dev->tx_skb_done, skb);
@@ -355,19 +379,9 @@ int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
static void mt7601u_kill_rx(struct mt7601u_dev *dev)
{
int i;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->rx_lock, flags);
-
- for (i = 0; i < dev->rx_q.entries; i++) {
- int next = dev->rx_q.end;
- spin_unlock_irqrestore(&dev->rx_lock, flags);
- usb_poison_urb(dev->rx_q.e[next].urb);
- spin_lock_irqsave(&dev->rx_lock, flags);
- }
-
- spin_unlock_irqrestore(&dev->rx_lock, flags);
+ for (i = 0; i < dev->rx_q.entries; i++)
+ usb_poison_urb(dev->rx_q.e[i].urb);
}
static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
@@ -437,10 +451,10 @@ static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
{
int i;
- WARN_ON(q->used);
-
for (i = 0; i < q->entries; i++) {
usb_poison_urb(q->e[i].urb);
+ if (q->e[i].skb)
+ mt7601u_tx_status(q->dev, q->e[i].skb);
usb_free_urb(q->e[i].urb);
}
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index 906e19c5f628..f3dff8319a4c 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -109,9 +109,9 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
- spin_lock(&dev->mac_lock);
+ spin_lock_bh(&dev->mac_lock);
ieee80211_tx_status(dev->hw, skb);
- spin_unlock(&dev->mac_lock);
+ spin_unlock_bh(&dev->mac_lock);
}
static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 459f6b81d2eb..dc0c7244b60e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -1011,9 +1011,8 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
return -E2BIG;
- mac->rd = kzalloc(sizeof(*mac->rd) +
- sizeof(struct ieee80211_reg_rule) *
- resp->n_reg_rules, GFP_KERNEL);
+ mac->rd = kzalloc(struct_size(mac->rd, reg_rules, resp->n_reg_rules),
+ GFP_KERNEL);
if (!mac->rd)
return -ENOMEM;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 621cd4ce69e2..c9b957ac5733 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -30,6 +30,10 @@
#include "rt2800lib.h"
#include "rt2800.h"
+static bool modparam_watchdog;
+module_param_named(watchdog, modparam_watchdog, bool, S_IRUGO);
+MODULE_PARM_DESC(watchdog, "Enable watchdog to detect tx/rx hangs and reset hardware if detected");
+
/*
* Register access.
* All access to the CSR registers will go through the methods
@@ -1212,6 +1216,63 @@ void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_txdone_nostatus);
+static int rt2800_check_hung(struct data_queue *queue)
+{
+ unsigned int cur_idx = rt2800_drv_get_dma_done(queue);
+
+ if (queue->wd_idx != cur_idx)
+ queue->wd_count = 0;
+ else
+ queue->wd_count++;
+
+ return queue->wd_count > 16;
+}
+
+void rt2800_watchdog(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ bool hung_tx = false;
+ bool hung_rx = false;
+
+ if (test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
+ return;
+
+ queue_for_each(rt2x00dev, queue) {
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ case QID_MGMT:
+ if (rt2x00queue_empty(queue))
+ continue;
+ hung_tx = rt2800_check_hung(queue);
+ break;
+ case QID_RX:
+ /* For station mode we should reactive at least
+ * beacons. TODO: need to find good way detect
+ * RX hung for AP mode.
+ */
+ if (rt2x00dev->intf_sta_count == 0)
+ continue;
+ hung_rx = rt2800_check_hung(queue);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (hung_tx)
+ rt2x00_warn(rt2x00dev, "Watchdog TX hung detected\n");
+
+ if (hung_rx)
+ rt2x00_warn(rt2x00dev, "Watchdog RX hung detected\n");
+
+ if (hung_tx || hung_rx)
+ ieee80211_restart_hw(rt2x00dev->hw);
+}
+EXPORT_SYMBOL_GPL(rt2800_watchdog);
+
static unsigned int rt2800_hw_beacon_base(struct rt2x00_dev *rt2x00dev,
unsigned int index)
{
@@ -1593,14 +1654,15 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
- memset(&iveiv_entry, 0, sizeof(iveiv_entry));
+ rt2800_register_multiread(rt2x00dev, offset,
+ &iveiv_entry, sizeof(iveiv_entry));
if ((crypto->cipher == CIPHER_TKIP) ||
(crypto->cipher == CIPHER_TKIP_NO_MIC) ||
(crypto->cipher == CIPHER_AES))
iveiv_entry.iv[3] |= 0x20;
iveiv_entry.iv[3] |= key->keyidx << 6;
rt2800_register_multiwrite(rt2x00dev, offset,
- &iveiv_entry, sizeof(iveiv_entry));
+ &iveiv_entry, sizeof(iveiv_entry));
}
int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
@@ -1789,6 +1851,25 @@ int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(rt2800_sta_remove);
+void rt2800_pre_reset_hw(struct rt2x00_dev *rt2x00dev)
+{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ struct data_queue *queue = rt2x00dev->bcn;
+ struct queue_entry *entry;
+ int i, wcid;
+
+ for (wcid = WCID_START; wcid < WCID_END; wcid++) {
+ drv_data->wcid_to_sta[wcid - WCID_START] = NULL;
+ __clear_bit(wcid - WCID_START, drv_data->sta_ids);
+ }
+
+ for (i = 0; i < queue->limit; i++) {
+ entry = &queue->entries[i];
+ clear_bit(ENTRY_BCN_ASSIGNED, &entry->flags);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800_pre_reset_hw);
+
void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
const unsigned int filter_flags)
{
@@ -6006,13 +6087,11 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
* ASIC will keep garbage value after boot, clear encryption keys.
*/
for (i = 0; i < 4; i++)
- rt2800_register_write(rt2x00dev,
- SHARED_KEY_MODE_ENTRY(i), 0);
+ rt2800_register_write(rt2x00dev, SHARED_KEY_MODE_ENTRY(i), 0);
for (i = 0; i < 256; i++) {
rt2800_config_wcid(rt2x00dev, NULL, i);
rt2800_delete_wcid_attr(rt2x00dev, i);
- rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
}
/*
@@ -10211,6 +10290,13 @@ int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev)
__set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
}
+ if (modparam_watchdog) {
+ __set_bit(CAPABILITY_RESTART_HW, &rt2x00dev->cap_flags);
+ rt2x00dev->link.watchdog_interval = msecs_to_jiffies(100);
+ } else {
+ rt2x00dev->link.watchdog_disabled = true;
+ }
+
/*
* Set the rssi offset.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 48adc6cc3233..1139405c0ebb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -65,6 +65,7 @@ struct rt2800_ops {
const u8 *data, const size_t len);
int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev);
__le32 *(*drv_get_txwi)(struct queue_entry *entry);
+ unsigned int (*drv_get_dma_done)(struct data_queue *queue);
};
static inline u32 rt2800_register_read(struct rt2x00_dev *rt2x00dev,
@@ -166,6 +167,13 @@ static inline __le32 *rt2800_drv_get_txwi(struct queue_entry *entry)
return rt2800ops->drv_get_txwi(entry);
}
+static inline unsigned int rt2800_drv_get_dma_done(struct data_queue *queue)
+{
+ const struct rt2800_ops *rt2800ops = queue->rt2x00dev->ops->drv;
+
+ return rt2800ops->drv_get_dma_done(queue);
+}
+
void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
const u8 command, const u8 token,
const u8 arg0, const u8 arg1);
@@ -189,6 +197,8 @@ void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev);
bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev);
bool rt2800_txstatus_pending(struct rt2x00_dev *rt2x00dev);
+void rt2800_watchdog(struct rt2x00_dev *rt2x00dev);
+
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
void rt2800_clear_beacon(struct queue_entry *entry);
@@ -247,5 +257,6 @@ void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
unsigned short *txwi_size,
unsigned short *rxwi_size);
+void rt2800_pre_reset_hw(struct rt2x00_dev *rt2x00dev);
#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index d1de8e2ff690..110bb391c372 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -24,6 +24,37 @@
#include "rt2800lib.h"
#include "rt2800mmio.h"
+unsigned int rt2800mmio_get_dma_done(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ struct queue_entry *entry;
+ int idx, qid;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ qid = queue->qid;
+ idx = rt2x00mmio_register_read(rt2x00dev, TX_DTX_IDX(qid));
+ break;
+ case QID_MGMT:
+ idx = rt2x00mmio_register_read(rt2x00dev, TX_DTX_IDX(5));
+ break;
+ case QID_RX:
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
+ idx = entry->entry_idx;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ idx = 0;
+ break;
+ }
+
+ return idx;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_get_dma_done);
+
/*
* TX descriptor initialization
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
index 29b5cfd2856f..adcd9d54ac1c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
@@ -114,6 +114,8 @@
#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
+unsigned int rt2800mmio_get_dma_done(struct data_queue *queue);
+
/* TX descriptor initialization */
__le32 *rt2800mmio_get_txwi(struct queue_entry *entry);
void rt2800mmio_write_tx_desc(struct queue_entry *entry,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index ead8bd3e9236..a23c26574002 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -326,6 +326,7 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
.drv_write_firmware = rt2800pci_write_firmware,
.drv_init_registers = rt2800mmio_init_registers,
.drv_get_txwi = rt2800mmio_get_txwi,
+ .drv_get_dma_done = rt2800mmio_get_dma_done,
};
static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
@@ -350,6 +351,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.link_tuner = rt2800_link_tuner,
.gain_calibration = rt2800_gain_calibration,
.vco_calibration = rt2800_vco_calibration,
+ .watchdog = rt2800_watchdog,
.start_queue = rt2800mmio_start_queue,
.kick_queue = rt2800mmio_kick_queue,
.stop_queue = rt2800mmio_stop_queue,
@@ -366,6 +368,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.config_erp = rt2800_config_erp,
.config_ant = rt2800_config_ant,
.config = rt2800_config,
+ .pre_reset_hw = rt2800_pre_reset_hw,
};
static const struct rt2x00_ops rt2800pci_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index 230557d36c52..7b931bb96a9e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -171,6 +171,7 @@ static const struct rt2800_ops rt2800soc_rt2800_ops = {
.drv_write_firmware = rt2800soc_write_firmware,
.drv_init_registers = rt2800mmio_init_registers,
.drv_get_txwi = rt2800mmio_get_txwi,
+ .drv_get_dma_done = rt2800mmio_get_dma_done,
};
static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
@@ -195,6 +196,7 @@ static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
.link_tuner = rt2800_link_tuner,
.gain_calibration = rt2800_gain_calibration,
.vco_calibration = rt2800_vco_calibration,
+ .watchdog = rt2800_watchdog,
.start_queue = rt2800mmio_start_queue,
.kick_queue = rt2800mmio_kick_queue,
.stop_queue = rt2800mmio_stop_queue,
@@ -211,6 +213,7 @@ static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
.config_erp = rt2800_config_erp,
.config_ant = rt2800_config_ant,
.config = rt2800_config,
+ .pre_reset_hw = rt2800_pre_reset_hw,
};
static const struct rt2x00_ops rt2800soc_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 551427b83775..fdf0504b5f1d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -379,6 +379,14 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
return retval;
}
+static unsigned int rt2800usb_get_dma_done(struct data_queue *queue)
+{
+ struct queue_entry *entry;
+
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
+ return entry->entry_idx;
+}
+
/*
* TX descriptor initialization
*/
@@ -661,6 +669,7 @@ static const struct rt2800_ops rt2800usb_rt2800_ops = {
.drv_write_firmware = rt2800usb_write_firmware,
.drv_init_registers = rt2800usb_init_registers,
.drv_get_txwi = rt2800usb_get_txwi,
+ .drv_get_dma_done = rt2800usb_get_dma_done,
};
static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
@@ -678,6 +687,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.link_tuner = rt2800_link_tuner,
.gain_calibration = rt2800_gain_calibration,
.vco_calibration = rt2800_vco_calibration,
+ .watchdog = rt2800_watchdog,
.start_queue = rt2800usb_start_queue,
.kick_queue = rt2x00usb_kick_queue,
.stop_queue = rt2800usb_stop_queue,
@@ -696,6 +706,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.config_erp = rt2800_config_erp,
.config_ant = rt2800_config_ant,
.config = rt2800_config,
+ .pre_reset_hw = rt2800_pre_reset_hw,
};
static void rt2800usb_queue_init(struct data_queue *queue)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 64a792a8fb2c..7e43690a861c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -325,6 +325,8 @@ struct link {
* to bring the device/driver back into the desired state.
*/
struct delayed_work watchdog_work;
+ unsigned int watchdog_interval;
+ bool watchdog_disabled;
/*
* Work structure for scheduling periodic AGC adjustments.
@@ -615,6 +617,7 @@ struct rt2x00lib_ops {
void (*config) (struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf,
const unsigned int changed_flags);
+ void (*pre_reset_hw) (struct rt2x00_dev *rt2x00dev);
int (*sta_add) (struct rt2x00_dev *rt2x00dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -710,6 +713,7 @@ enum rt2x00_capability_flags {
CAPABILITY_VCO_RECALIBRATION,
CAPABILITY_EXTERNAL_PA_TX0,
CAPABILITY_EXTERNAL_PA_TX1,
+ CAPABILITY_RESTART_HW,
};
/*
@@ -1266,6 +1270,12 @@ rt2x00_has_cap_vco_recalibration(struct rt2x00_dev *rt2x00dev)
return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_VCO_RECALIBRATION);
}
+static inline bool
+rt2x00_has_cap_restart_hw(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_RESTART_HW);
+}
+
/**
* rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
* @entry: Pointer to &struct queue_entry
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index aac3aae7afaa..ef5f51512212 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -52,6 +52,7 @@ struct rt2x00debug_intf {
* - chipset file
* - device state flags file
* - device capability flags file
+ * - hardware restart file
* - register folder
* - csr offset/value files
* - eeprom offset/value files
@@ -68,6 +69,7 @@ struct rt2x00debug_intf {
struct dentry *chipset_entry;
struct dentry *dev_flags;
struct dentry *cap_flags;
+ struct dentry *restart_hw;
struct dentry *register_folder;
struct dentry *csr_off_entry;
struct dentry *csr_val_entry;
@@ -566,6 +568,34 @@ static const struct file_operations rt2x00debug_fop_cap_flags = {
.llseek = default_llseek,
};
+static ssize_t rt2x00debug_write_restart_hw(struct file *file,
+ const char __user *buf,
+ size_t length,
+ loff_t *offset)
+{
+ struct rt2x00debug_intf *intf = file->private_data;
+ struct rt2x00_dev *rt2x00dev = intf->rt2x00dev;
+ static unsigned long last_reset;
+
+ if (!rt2x00_has_cap_restart_hw(rt2x00dev))
+ return -EOPNOTSUPP;
+
+ if (time_before(jiffies, last_reset + msecs_to_jiffies(2000)))
+ return -EBUSY;
+
+ last_reset = jiffies;
+
+ ieee80211_restart_hw(rt2x00dev->hw);
+ return length;
+}
+
+static const struct file_operations rt2x00debug_restart_hw = {
+ .owner = THIS_MODULE,
+ .write = rt2x00debug_write_restart_hw,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
static struct dentry *rt2x00debug_create_file_driver(const char *name,
struct rt2x00debug_intf
*intf,
@@ -661,6 +691,10 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
intf->driver_folder, intf,
&rt2x00debug_fop_cap_flags);
+ intf->restart_hw = debugfs_create_file("restart_hw", 0200,
+ intf->driver_folder, intf,
+ &rt2x00debug_restart_hw);
+
intf->register_folder =
debugfs_create_dir("register", intf->driver_folder);
@@ -742,6 +776,7 @@ void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
debugfs_remove(intf->csr_off_entry);
debugfs_remove(intf->register_folder);
debugfs_remove(intf->dev_flags);
+ debugfs_remove(intf->restart_hw);
debugfs_remove(intf->cap_flags);
debugfs_remove(intf->chipset_entry);
debugfs_remove(intf->driver_entry);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index a6c374c483c2..35414f97a978 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1258,8 +1258,14 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
{
int retval;
- if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
- return 0;
+ if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
+ /*
+ * This is special case for ieee80211_restart_hw(), otherwise
+ * mac80211 never call start() two times in row without stop();
+ */
+ rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
+ rt2x00lib_stop(rt2x00dev);
+ }
/*
* If this is the first interface which is added,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c
index 939cfa5141c6..b052c96347d6 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c
@@ -384,10 +384,10 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
struct link *link = &rt2x00dev->link;
if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
- rt2x00dev->ops->lib->watchdog)
+ rt2x00dev->ops->lib->watchdog && !link->watchdog_disabled)
ieee80211_queue_delayed_work(rt2x00dev->hw,
&link->watchdog_work,
- WATCHDOG_INTERVAL);
+ link->watchdog_interval);
}
void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -413,11 +413,16 @@ static void rt2x00link_watchdog(struct work_struct *work)
if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
ieee80211_queue_delayed_work(rt2x00dev->hw,
&link->watchdog_work,
- WATCHDOG_INTERVAL);
+ link->watchdog_interval);
}
void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
{
- INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
- INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
+ struct link *link = &rt2x00dev->link;
+
+ INIT_DELAYED_WORK(&link->work, rt2x00link_tuner);
+ INIT_DELAYED_WORK(&link->watchdog_work, rt2x00link_watchdog);
+
+ if (link->watchdog_interval == 0)
+ link->watchdog_interval = WATCHDOG_INTERVAL;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
index 099e747f70e7..23739dd0bc9b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
@@ -435,6 +435,9 @@ enum data_queue_flags {
* @length: Number of frames in queue.
* @index: Index pointers to entry positions in the queue,
* use &enum queue_index to get a specific index field.
+ * @wd_count: watchdog counter number of times entry does change
+ * in the queue
+ * @wd_idx: index of queue entry saved by watchdog
* @txop: maximum burst time.
* @aifs: The aifs value for outgoing frames (field ignored in RX queue).
* @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
@@ -462,6 +465,9 @@ struct data_queue {
unsigned short length;
unsigned short index[Q_INDEX_MAX];
+ unsigned short wd_count;
+ unsigned int wd_idx;
+
unsigned short txop;
unsigned short aifs;
unsigned short cw_min;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 2ac0481b29ef..152242ac0aa5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -1578,7 +1578,7 @@ void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
{
- u8 asso_type, asso_type_v2;
+ u8 asso_type;
bool wifi_under_5g;
if (!halbtc_is_bt_coexist_available(btcoexist))
@@ -1589,15 +1589,10 @@ void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
- if (action) {
+ if (action)
asso_type = BTC_ASSOCIATE_START;
- asso_type_v2 = wifi_under_5g ? BTC_ASSOCIATE_5G_START :
- BTC_ASSOCIATE_START;
- } else {
+ else
asso_type = BTC_ASSOCIATE_FINISH;
- asso_type_v2 = wifi_under_5g ? BTC_ASSOCIATE_5G_FINISH :
- BTC_ASSOCIATE_FINISH;
- }
halbtc_leave_low_power(btcoexist);
@@ -1746,30 +1741,6 @@ void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
}
}
-void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
-{
- u8 stack_op_type;
-
- if (!halbtc_is_bt_coexist_available(btcoexist))
- return;
- btcoexist->statistics.cnt_stack_operation_notify++;
- if (btcoexist->manual_control)
- return;
-
- if ((type == HCI_BT_OP_INQUIRY_START) ||
- (type == HCI_BT_OP_PAGING_START) ||
- (type == HCI_BT_OP_PAIRING_START)) {
- stack_op_type = BTC_STACK_OP_INQ_PAGE_PAIR_START;
- } else if ((type == HCI_BT_OP_INQUIRY_FINISH) ||
- (type == HCI_BT_OP_PAGING_SUCCESS) ||
- (type == HCI_BT_OP_PAGING_UNSUCCESS) ||
- (type == HCI_BT_OP_PAIRING_FINISH)) {
- stack_op_type = BTC_STACK_OP_INQ_PAGE_PAIR_FINISH;
- } else {
- stack_op_type = BTC_STACK_OP_NONE;
- }
-}
-
void exhalbtc_halt_notify(struct btc_coexist *btcoexist)
{
if (!halbtc_is_bt_coexist_available(btcoexist))
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index ee9aeddf1ebc..8c0a7fdbf200 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -764,7 +764,6 @@ void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
u8 length);
void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type);
-void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
void exhalbtc_coex_dm_switch(struct btc_coexist *btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index 0e509c33e9e6..b8c4536af6c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -316,7 +316,7 @@ void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
{
struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
- u8 extid, seq, len;
+ u8 extid, seq;
u16 bt_real_fw_ver;
u8 bt_fw_ver;
u8 *data;
@@ -332,7 +332,6 @@ void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
if (extid != 1) /* C2H_TRIG_BY_BT_FW = 1 */
return;
- len = tmp_buf[1] >> 4;
seq = tmp_buf[2] >> 4;
data = &tmp_buf[3];
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index e68340dfd980..ea4fc53764de 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -117,10 +117,8 @@ u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address)
rtlpriv->cfg->
maps[EFUSE_CTRL] + 3);
k++;
- if (k == 1000) {
- k = 0;
+ if (k == 1000)
break;
- }
}
data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
return data;
@@ -986,7 +984,6 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
} else if (write_state == PG_STATE_DATA) {
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
"efuse PG_STATE_DATA\n");
- badworden = 0x0f;
badworden =
enable_efuse_data_write(hw, efuse_addr + 1,
target_pkt.word_en,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index cf8e42a01015..0c7d74902d33 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -173,9 +173,6 @@ static void rtl_get_rate(void *ppriv, struct ieee80211_sta *sta,
u8 try_per_rate, i, rix;
bool not_data = !ieee80211_is_data(fc);
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
rix = _rtl_rc_get_highest_rix(rtlpriv, sta, skb, not_data);
try_per_rate = 1;
_rtl_rc_rate_set_series(rtlpriv, sta, &rates[0], txrc,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 454bab38b165..f92e95f5494f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1039,7 +1039,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- bool rtstatus = true;
+ bool rtstatus;
int err = 0;
u8 tmp_u1b, u1byte;
unsigned long flags;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
index 7cc86bb387a1..71f3b6b5d7bd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
@@ -680,6 +680,7 @@ static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
int i;
unsigned long flag = 0;
long temp_cck;
+ const u8 *cckswing;
/* Query CCK default setting From 0xa24 */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
@@ -687,28 +688,19 @@ static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
MASKDWORD) & MASKCCK;
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
for (i = 0; i < CCK_TABLE_LENGTH; i++) {
- if (rtlpriv->dm.cck_inch14) {
- if (!memcmp((void *)&temp_cck,
- (void *)&cckswing_table_ch14[i][2], 4)) {
- *cck_index_old = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- *cck_index_old,
- rtlpriv->dm.cck_inch14);
- break;
- }
- } else {
- if (!memcmp((void *) &temp_cck,
- &cckswing_table_ch1ch13[i][2], 4)) {
- *cck_index_old = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- *cck_index_old,
- rtlpriv->dm.cck_inch14);
- break;
- }
+ if (rtlpriv->dm.cck_inch14)
+ cckswing = &cckswing_table_ch14[i][2];
+ else
+ cckswing = &cckswing_table_ch1ch13[i][2];
+
+ if (temp_cck == le32_to_cpu(*((__le32 *)cckswing))) {
+ *cck_index_old = (u8)i;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ *cck_index_old,
+ rtlpriv->dm.cck_inch14);
+ break;
}
}
*temp_cckg = temp_cck;
@@ -718,8 +710,8 @@ static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index,
bool *internal_pa, u8 thermalvalue, u8 delta,
u8 rf, struct rtl_efuse *rtlefuse,
struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy,
- u8 index_mapping[5][INDEX_MAPPING_NUM],
- u8 index_mapping_pa[8][INDEX_MAPPING_NUM])
+ const u8 index_mapping[5][INDEX_MAPPING_NUM],
+ const u8 index_mapping_pa[8][INDEX_MAPPING_NUM])
{
int i;
u8 index;
@@ -787,9 +779,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
bool internal_pa = false;
long ele_a = 0, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
- u8 ofdm_index[3];
+ u8 ofdm_index[2];
s8 cck_index = 0;
- u8 ofdm_index_old[3] = {0, 0, 0};
+ u8 ofdm_index_old[2] = {0, 0};
s8 cck_index_old = 0;
u8 index;
int i;
@@ -797,7 +789,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf;
u8 indexforchannel =
rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel);
- u8 index_mapping[5][INDEX_MAPPING_NUM] = {
+ static const u8 index_mapping[5][INDEX_MAPPING_NUM] = {
/* 5G, path A/MAC 0, decrease power */
{0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
/* 5G, path A/MAC 0, increase power */
@@ -809,7 +801,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
/* 2.4G, for decreas power */
{0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10},
};
- u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = {
+ static const u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = {
/* 5G, path A/MAC 0, ch36-64, decrease power */
{0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
/* 5G, path A/MAC 0, ch36-64, increase power */
@@ -837,365 +829,338 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
rtl92d_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
+
+ if (!thermalvalue)
+ goto exit;
+
if (is2t)
rf = 2;
else
rf = 1;
- if (thermalvalue) {
- ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+
+ if (rtlpriv->dm.thermalvalue && !rtlhal->reloadtxpowerindex)
+ goto old_index_done;
+
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D;
+ for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
+ if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
+ ofdm_index_old[0] = (u8)i;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XATXIQIMBALANCE,
+ ele_d, ofdm_index_old[0]);
+ break;
+ }
+ }
+ if (is2t) {
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
MASKDWORD) & MASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
- if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
- ofdm_index_old[0] = (u8) i;
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XATXIQIMBALANCE,
- ele_d, ofdm_index_old[0]);
+ if (ele_d ==
+ (ofdmswing_table[i] & MASKOFDM_D)) {
+ ofdm_index_old[1] = (u8)i;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
+ ROFDM0_XBTXIQIMBALANCE, ele_d,
+ ofdm_index_old[1]);
break;
}
}
- if (is2t) {
- ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD) & MASKOFDM_D;
- for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
- if (ele_d ==
- (ofdmswing_table[i] & MASKOFDM_D)) {
- ofdm_index_old[1] = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
- ROFDM0_XBTXIQIMBALANCE, ele_d,
- ofdm_index_old[1]);
- break;
- }
- }
- }
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old);
- } else {
- temp_cck = 0x090e1317;
- cck_index_old = 12;
- }
+ }
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old);
+ } else {
+ temp_cck = 0x090e1317;
+ cck_index_old = 12;
+ }
- if (!rtlpriv->dm.thermalvalue) {
- rtlpriv->dm.thermalvalue =
- rtlefuse->eeprom_thermalmeter;
- rtlpriv->dm.thermalvalue_lck = thermalvalue;
- rtlpriv->dm.thermalvalue_iqk = thermalvalue;
- rtlpriv->dm.thermalvalue_rxgain =
- rtlefuse->eeprom_thermalmeter;
- for (i = 0; i < rf; i++)
- rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
- rtlpriv->dm.cck_index = cck_index_old;
+ if (!rtlpriv->dm.thermalvalue) {
+ rtlpriv->dm.thermalvalue = rtlefuse->eeprom_thermalmeter;
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+ rtlpriv->dm.thermalvalue_rxgain = rtlefuse->eeprom_thermalmeter;
+ for (i = 0; i < rf; i++)
+ rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
+ rtlpriv->dm.cck_index = cck_index_old;
+ }
+ if (rtlhal->reloadtxpowerindex) {
+ for (i = 0; i < rf; i++)
+ rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
+ rtlpriv->dm.cck_index = cck_index_old;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "reload ofdm index for band switch\n");
+ }
+old_index_done:
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] = rtlpriv->dm.ofdm_index[i];
+
+ rtlpriv->dm.thermalvalue_avg
+ [rtlpriv->dm.thermalvalue_avg_index] = thermalvalue;
+ rtlpriv->dm.thermalvalue_avg_index++;
+ if (rtlpriv->dm.thermalvalue_avg_index == AVG_THERMAL_NUM)
+ rtlpriv->dm.thermalvalue_avg_index = 0;
+ for (i = 0; i < AVG_THERMAL_NUM; i++) {
+ if (rtlpriv->dm.thermalvalue_avg[i]) {
+ thermalvalue_avg += rtlpriv->dm.thermalvalue_avg[i];
+ thermalvalue_avg_count++;
}
- if (rtlhal->reloadtxpowerindex) {
+ }
+ if (thermalvalue_avg_count)
+ thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count);
+ if (rtlhal->reloadtxpowerindex) {
+ delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
+ (thermalvalue - rtlefuse->eeprom_thermalmeter) :
+ (rtlefuse->eeprom_thermalmeter - thermalvalue);
+ rtlhal->reloadtxpowerindex = false;
+ rtlpriv->dm.done_txpower = false;
+ } else if (rtlpriv->dm.done_txpower) {
+ delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue) :
+ (rtlpriv->dm.thermalvalue - thermalvalue);
+ } else {
+ delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
+ (thermalvalue - rtlefuse->eeprom_thermalmeter) :
+ (rtlefuse->eeprom_thermalmeter - thermalvalue);
+ }
+ delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
+ (rtlpriv->dm.thermalvalue_lck - thermalvalue);
+ delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
+ (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
+ delta_rxgain =
+ (thermalvalue > rtlpriv->dm.thermalvalue_rxgain) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_rxgain) :
+ (rtlpriv->dm.thermalvalue_rxgain - thermalvalue);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+ delta_iqk);
+ if (delta_lck > rtlefuse->delta_lck && rtlefuse->delta_lck != 0) {
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtl92d_phy_lc_calibrate(hw);
+ }
+
+ if (delta == 0 || !rtlpriv->dm.txpower_track_control)
+ goto check_delta;
+
+ rtlpriv->dm.done_txpower = true;
+ delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
+ (thermalvalue - rtlefuse->eeprom_thermalmeter) :
+ (rtlefuse->eeprom_thermalmeter - thermalvalue);
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ offset = 4;
+ if (delta > INDEX_MAPPING_NUM - 1)
+ index = index_mapping[offset][INDEX_MAPPING_NUM - 1];
+ else
+ index = index_mapping[offset][delta];
+ if (thermalvalue > rtlpriv->dm.thermalvalue) {
for (i = 0; i < rf; i++)
- rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
- rtlpriv->dm.cck_index = cck_index_old;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "reload ofdm index for band switch\n");
- }
- rtlpriv->dm.thermalvalue_avg
- [rtlpriv->dm.thermalvalue_avg_index] = thermalvalue;
- rtlpriv->dm.thermalvalue_avg_index++;
- if (rtlpriv->dm.thermalvalue_avg_index == AVG_THERMAL_NUM)
- rtlpriv->dm.thermalvalue_avg_index = 0;
- for (i = 0; i < AVG_THERMAL_NUM; i++) {
- if (rtlpriv->dm.thermalvalue_avg[i]) {
- thermalvalue_avg +=
- rtlpriv->dm.thermalvalue_avg[i];
- thermalvalue_avg_count++;
- }
- }
- if (thermalvalue_avg_count)
- thermalvalue = (u8) (thermalvalue_avg /
- thermalvalue_avg_count);
- if (rtlhal->reloadtxpowerindex) {
- delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
- (thermalvalue - rtlefuse->eeprom_thermalmeter) :
- (rtlefuse->eeprom_thermalmeter - thermalvalue);
- rtlhal->reloadtxpowerindex = false;
- rtlpriv->dm.done_txpower = false;
- } else if (rtlpriv->dm.done_txpower) {
- delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
- (thermalvalue - rtlpriv->dm.thermalvalue) :
- (rtlpriv->dm.thermalvalue - thermalvalue);
+ ofdm_index[i] -= delta;
+ cck_index -= delta;
} else {
- delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
- (thermalvalue - rtlefuse->eeprom_thermalmeter) :
- (rtlefuse->eeprom_thermalmeter - thermalvalue);
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] += index;
+ cck_index += index;
}
- delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
- (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
- (rtlpriv->dm.thermalvalue_lck - thermalvalue);
- delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
- (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
- (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
- delta_rxgain =
- (thermalvalue > rtlpriv->dm.thermalvalue_rxgain) ?
- (thermalvalue - rtlpriv->dm.thermalvalue_rxgain) :
- (rtlpriv->dm.thermalvalue_rxgain - thermalvalue);
+ } else if (rtlhal->current_bandtype == BAND_ON_5G) {
+ rtl92d_bandtype_5G(rtlhal, ofdm_index,
+ &internal_pa, thermalvalue,
+ delta, rf, rtlefuse, rtlpriv,
+ rtlphy, index_mapping,
+ index_mapping_internal_pa);
+ }
+ if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk);
- if ((delta_lck > rtlefuse->delta_lck) &&
- (rtlefuse->delta_lck != 0)) {
- rtlpriv->dm.thermalvalue_lck = thermalvalue;
- rtl92d_phy_lc_calibrate(hw);
+ "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.ofdm_index[1],
+ rtlpriv->dm.cck_index);
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.cck_index);
+ }
+ for (i = 0; i < rf; i++) {
+ if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1)
+ ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1;
+ else if (ofdm_index[i] < ofdm_min_index)
+ ofdm_index[i] = ofdm_min_index;
+ }
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (cck_index > CCK_TABLE_SIZE - 1) {
+ cck_index = CCK_TABLE_SIZE - 1;
+ } else if (internal_pa ||
+ rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (ofdm_index[i] < ofdm_min_index_internal_pa)
+ ofdm_index[i] = ofdm_min_index_internal_pa;
+ } else if (cck_index < 0) {
+ cck_index = 0;
}
- if (delta > 0 && rtlpriv->dm.txpower_track_control) {
- rtlpriv->dm.done_txpower = true;
- delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
- (thermalvalue - rtlefuse->eeprom_thermalmeter) :
- (rtlefuse->eeprom_thermalmeter - thermalvalue);
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- offset = 4;
- if (delta > INDEX_MAPPING_NUM - 1)
- index = index_mapping[offset]
- [INDEX_MAPPING_NUM - 1];
- else
- index = index_mapping[offset][delta];
- if (thermalvalue > rtlpriv->dm.thermalvalue) {
- for (i = 0; i < rf; i++)
- ofdm_index[i] -= delta;
- cck_index -= delta;
- } else {
- for (i = 0; i < rf; i++)
- ofdm_index[i] += index;
- cck_index += index;
- }
- } else if (rtlhal->current_bandtype == BAND_ON_5G) {
- rtl92d_bandtype_5G(rtlhal, ofdm_index,
- &internal_pa, thermalvalue,
- delta, rf, rtlefuse, rtlpriv,
- rtlphy, index_mapping,
- index_mapping_internal_pa);
- }
- if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.ofdm_index[1],
- rtlpriv->dm.cck_index);
- } else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.cck_index);
- }
- for (i = 0; i < rf; i++) {
- if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1)
- ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1;
- else if (ofdm_index[i] < ofdm_min_index)
- ofdm_index[i] = ofdm_min_index;
- }
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- if (cck_index > CCK_TABLE_SIZE - 1) {
- cck_index = CCK_TABLE_SIZE - 1;
- } else if (internal_pa ||
- rtlhal->current_bandtype ==
- BAND_ON_2_4G) {
- if (ofdm_index[i] <
- ofdm_min_index_internal_pa)
- ofdm_index[i] =
- ofdm_min_index_internal_pa;
- } else if (cck_index < 0) {
- cck_index = 0;
- }
- }
- if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
- ofdm_index[0], ofdm_index[1],
- cck_index);
- } else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x,cck_index = 0x%x\n",
- ofdm_index[0], cck_index);
- }
- ele_d = (ofdmswing_table[(u8) ofdm_index[0]] &
- 0xFFC00000) >> 22;
- val_x = rtlphy->iqk_matrix
- [indexforchannel].value[0][0];
- val_y = rtlphy->iqk_matrix
- [indexforchannel].value[0][1];
- if (val_x != 0) {
- if ((val_x & 0x00000200) != 0)
- val_x = val_x | 0xFFFFFC00;
- ele_a =
- ((val_x * ele_d) >> 8) & 0x000003FF;
-
- /* new element C = element D x Y */
- if ((val_y & 0x00000200) != 0)
- val_y = val_y | 0xFFFFFC00;
- ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
-
- /* wirte new elements A, C, D to regC80 and
- * regC94, element B is always 0 */
- value32 = (ele_d << 22) | ((ele_c & 0x3F) <<
- 16) | ele_a;
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD, value32);
-
- value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
- value32);
-
- value32 = ((val_x * ele_d) >> 7) & 0x01;
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
- value32);
+ }
+ if (is2t) {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
+ ofdm_index[0], ofdm_index[1],
+ cck_index);
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x,cck_index = 0x%x\n",
+ ofdm_index[0], cck_index);
+ }
+ ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
+ val_x = rtlphy->iqk_matrix[indexforchannel].value[0][0];
+ val_y = rtlphy->iqk_matrix[indexforchannel].value[0][1];
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+ ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
+
+ /* write new elements A, C, D to regC80 and
+ * regC94, element B is always 0
+ */
+ value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a;
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD, value32);
+
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
+ value32);
+
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
+ value32);
- } else {
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD,
- ofdmswing_table
- [(u8)ofdm_index[0]]);
- rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
- 0x00);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(24), 0x00);
- }
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD,
+ ofdmswing_table[(u8)ofdm_index[0]]);
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
+ 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(24), 0x00);
+ }
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
- rtlhal->interfaceindex,
- val_x, val_y, ele_a, ele_c, ele_d,
- val_x, val_y);
-
- if (cck_index >= CCK_TABLE_SIZE)
- cck_index = CCK_TABLE_SIZE - 1;
- if (cck_index < 0)
- cck_index = 0;
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- /* Adjust CCK according to IQK result */
- if (!rtlpriv->dm.cck_inch14) {
- rtl_write_byte(rtlpriv, 0xa22,
- cckswing_table_ch1ch13
- [(u8)cck_index][0]);
- rtl_write_byte(rtlpriv, 0xa23,
- cckswing_table_ch1ch13
- [(u8)cck_index][1]);
- rtl_write_byte(rtlpriv, 0xa24,
- cckswing_table_ch1ch13
- [(u8)cck_index][2]);
- rtl_write_byte(rtlpriv, 0xa25,
- cckswing_table_ch1ch13
- [(u8)cck_index][3]);
- rtl_write_byte(rtlpriv, 0xa26,
- cckswing_table_ch1ch13
- [(u8)cck_index][4]);
- rtl_write_byte(rtlpriv, 0xa27,
- cckswing_table_ch1ch13
- [(u8)cck_index][5]);
- rtl_write_byte(rtlpriv, 0xa28,
- cckswing_table_ch1ch13
- [(u8)cck_index][6]);
- rtl_write_byte(rtlpriv, 0xa29,
- cckswing_table_ch1ch13
- [(u8)cck_index][7]);
- } else {
- rtl_write_byte(rtlpriv, 0xa22,
- cckswing_table_ch14
- [(u8)cck_index][0]);
- rtl_write_byte(rtlpriv, 0xa23,
- cckswing_table_ch14
- [(u8)cck_index][1]);
- rtl_write_byte(rtlpriv, 0xa24,
- cckswing_table_ch14
- [(u8)cck_index][2]);
- rtl_write_byte(rtlpriv, 0xa25,
- cckswing_table_ch14
- [(u8)cck_index][3]);
- rtl_write_byte(rtlpriv, 0xa26,
- cckswing_table_ch14
- [(u8)cck_index][4]);
- rtl_write_byte(rtlpriv, 0xa27,
- cckswing_table_ch14
- [(u8)cck_index][5]);
- rtl_write_byte(rtlpriv, 0xa28,
- cckswing_table_ch14
- [(u8)cck_index][6]);
- rtl_write_byte(rtlpriv, 0xa29,
- cckswing_table_ch14
- [(u8)cck_index][7]);
- }
- }
- if (is2t) {
- ele_d = (ofdmswing_table[(u8) ofdm_index[1]] &
- 0xFFC00000) >> 22;
- val_x = rtlphy->iqk_matrix
- [indexforchannel].value[0][4];
- val_y = rtlphy->iqk_matrix
- [indexforchannel].value[0][5];
- if (val_x != 0) {
- if ((val_x & 0x00000200) != 0)
- /* consider minus */
- val_x = val_x | 0xFFFFFC00;
- ele_a = ((val_x * ele_d) >> 8) &
- 0x000003FF;
- /* new element C = element D x Y */
- if ((val_y & 0x00000200) != 0)
- val_y =
- val_y | 0xFFFFFC00;
- ele_c =
- ((val_y *
- ele_d) >> 8) & 0x00003FF;
- /* write new elements A, C, D to regC88
- * and regC9C, element B is always 0
- */
- value32 = (ele_d << 22) |
- ((ele_c & 0x3F) << 16) |
- ele_a;
- rtl_set_bbreg(hw,
- ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD, value32);
- value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
- MASKH4BITS, value32);
- value32 = ((val_x * ele_d) >> 7) & 0x01;
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(28), value32);
- } else {
- rtl_set_bbreg(hw,
- ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD,
- ofdmswing_table
- [(u8) ofdm_index[1]]);
- rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
- MASKH4BITS, 0x00);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(28), 0x00);
- }
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
- val_x, val_y, ele_a, ele_c,
- ele_d, val_x, val_y);
- }
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
- rtl_get_bbreg(hw, 0xc80, MASKDWORD),
- rtl_get_bbreg(hw, 0xc94, MASKDWORD),
- rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
- RFREG_OFFSET_MASK));
- }
- if ((delta_iqk > rtlefuse->delta_iqk) &&
- (rtlefuse->delta_iqk != 0)) {
- rtl92d_phy_reset_iqk_result(hw);
- rtlpriv->dm.thermalvalue_iqk = thermalvalue;
- rtl92d_phy_iq_calibrate(hw);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
+ rtlhal->interfaceindex,
+ val_x, val_y, ele_a, ele_c, ele_d,
+ val_x, val_y);
+
+ if (cck_index >= CCK_TABLE_SIZE)
+ cck_index = CCK_TABLE_SIZE - 1;
+ if (cck_index < 0)
+ cck_index = 0;
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* Adjust CCK according to IQK result */
+ if (!rtlpriv->dm.cck_inch14) {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch1ch13[cck_index][0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch1ch13[cck_index][1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch1ch13[cck_index][2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch1ch13[cck_index][3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch1ch13[cck_index][4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch1ch13[cck_index][5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch1ch13[cck_index][6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch1ch13[cck_index][7]);
+ } else {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch14[cck_index][0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch14[cck_index][1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch14[cck_index][2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch14[cck_index][3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch14[cck_index][4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch14[cck_index][5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch14[cck_index][6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch14[cck_index][7]);
}
- if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G
- && thermalvalue <= rtlefuse->eeprom_thermalmeter) {
- rtlpriv->dm.thermalvalue_rxgain = thermalvalue;
- rtl92d_dm_rxgain_tracking_thermalmeter(hw);
+ }
+ if (is2t) {
+ ele_d = (ofdmswing_table[ofdm_index[1]] & 0xFFC00000) >> 22;
+ val_x = rtlphy->iqk_matrix[indexforchannel].value[0][4];
+ val_y = rtlphy->iqk_matrix[indexforchannel].value[0][5];
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ /* consider minus */
+ val_x = val_x | 0xFFFFFC00;
+ ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
+ /* new element C = element D x Y */
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ ele_c = ((val_y * ele_d) >> 8) & 0x00003FF;
+ /* write new elements A, C, D to regC88
+ * and regC9C, element B is always 0
+ */
+ value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a;
+ rtl_set_bbreg(hw,
+ ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD, value32);
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
+ MASKH4BITS, value32);
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(28), value32);
+ } else {
+ rtl_set_bbreg(hw,
+ ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD,
+ ofdmswing_table[ofdm_index[1]]);
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
+ MASKH4BITS, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(28), 0x00);
}
- if (rtlpriv->dm.txpower_track_control)
- rtlpriv->dm.thermalvalue = thermalvalue;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
+ val_x, val_y, ele_a, ele_c,
+ ele_d, val_x, val_y);
+ }
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
+ rtl_get_bbreg(hw, 0xc80, MASKDWORD),
+ rtl_get_bbreg(hw, 0xc94, MASKDWORD),
+ rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
+ RFREG_OFFSET_MASK));
+
+check_delta:
+ if (delta_iqk > rtlefuse->delta_iqk && rtlefuse->delta_iqk != 0) {
+ rtl92d_phy_reset_iqk_result(hw);
+ rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+ rtl92d_phy_iq_calibrate(hw);
}
+ if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G &&
+ thermalvalue <= rtlefuse->eeprom_thermalmeter) {
+ rtlpriv->dm.thermalvalue_rxgain = thermalvalue;
+ rtl92d_dm_rxgain_tracking_thermalmeter(hw);
+ }
+ if (rtlpriv->dm.txpower_track_control)
+ rtlpriv->dm.thermalvalue = thermalvalue;
+exit:
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 49d05b631ba1..b54230433a6b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -655,10 +655,9 @@ static void rtl8821ae_dm_check_rssi_monitor(struct ieee80211_hw *hw)
u8 h2c_parameter[4] = { 0 };
long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
u8 stbc_tx = 0;
- u64 cur_txokcnt = 0, cur_rxokcnt = 0;
+ u64 cur_rxokcnt = 0;
static u64 last_txokcnt = 0, last_rxokcnt;
- cur_txokcnt = rtlpriv->stats.txbytesunicast - last_txokcnt;
cur_rxokcnt = rtlpriv->stats.rxbytesunicast - last_rxokcnt;
last_txokcnt = rtlpriv->stats.txbytesunicast;
last_rxokcnt = rtlpriv->stats.rxbytesunicast;
@@ -2654,7 +2653,6 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
u32 edca_be = 0x5ea42b;
u8 iot_peer = 0;
bool *pb_is_cur_rdl_state = NULL;
- bool b_last_is_cur_rdl_state = false;
bool b_bias_on_rx = false;
bool b_edca_turbo_on = false;
@@ -2672,7 +2670,6 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
* list paramter for different platform
*===============================
*/
- b_last_is_cur_rdl_state = rtlpriv->dm.is_cur_rdlstate;
pb_is_cur_rdl_state = &rtlpriv->dm.is_cur_rdlstate;
cur_tx_ok_cnt = rtlpriv->stats.txbytesunicast - rtldm->last_tx_ok_cnt;
@@ -2958,10 +2955,11 @@ void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
struct fast_ant_training *pfat_table = &rtldm->fat_table;
+ __le32 *pdesc32 = (__le32 *)pdesc;
if (rtlhal->hw_type != HARDWARE_TYPE_RTL8812AE)
return;
if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)
- SET_TX_DESC_TX_ANT(pdesc, pfat_table->antsel_a[mac_id]);
+ set_tx_desc_tx_ant(pdesc32, pfat_table->antsel_a[mac_id]);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index 7b6faf38e09c..cd809c992245 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -56,7 +56,7 @@ static u8 _rtl8821ae_evm_dbm_jaguar(s8 value)
}
static void query_rxphystatus(struct ieee80211_hw *hw,
- struct rtl_stats *pstatus, u8 *pdesc,
+ struct rtl_stats *pstatus, __le32 *pdesc,
struct rx_fwinfo_8821ae *p_drvinfo,
bool bpacket_match_bssid,
bool bpacket_toself, bool packet_beacon)
@@ -274,7 +274,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
static void translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb,
- struct rtl_stats *pstatus, u8 *pdesc,
+ struct rtl_stats *pstatus, __le32 *pdesc,
struct rx_fwinfo_8821ae *p_drvinfo)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -332,14 +332,14 @@ static void translate_rx_signal_stuff(struct ieee80211_hw *hw,
rtl_process_phyinfo(hw, tmp_buf, pstatus);
}
-static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
- u8 *virtualaddress)
+static void rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
+ __le32 *virtualaddress)
{
u32 dwtmp = 0;
memset(virtualaddress, 0, 8);
- SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
+ set_earlymode_pktnum(virtualaddress, ptcb_desc->empkt_num);
if (ptcb_desc->empkt_num == 1) {
dwtmp = ptcb_desc->empkt_len[0];
} else {
@@ -347,7 +347,7 @@ static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[1];
}
- SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
+ set_earlymode_len0(virtualaddress, dwtmp);
if (ptcb_desc->empkt_num <= 3) {
dwtmp = ptcb_desc->empkt_len[2];
@@ -356,7 +356,7 @@ static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[3];
}
- SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
+ set_earlymode_len1(virtualaddress, dwtmp);
if (ptcb_desc->empkt_num <= 5) {
dwtmp = ptcb_desc->empkt_len[4];
} else {
@@ -364,8 +364,8 @@ static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[5];
}
- SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
- SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
+ set_earlymode_len2_1(virtualaddress, dwtmp & 0xF);
+ set_earlymode_len2_2(virtualaddress, dwtmp >> 4);
if (ptcb_desc->empkt_num <= 7) {
dwtmp = ptcb_desc->empkt_len[6];
} else {
@@ -373,7 +373,7 @@ static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[7];
}
- SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
+ set_earlymode_len3(virtualaddress, dwtmp);
if (ptcb_desc->empkt_num <= 9) {
dwtmp = ptcb_desc->empkt_len[8];
} else {
@@ -381,15 +381,15 @@ static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[9];
}
- SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
+ set_earlymode_len4(virtualaddress, dwtmp);
}
-static bool rtl8821ae_get_rxdesc_is_ht(struct ieee80211_hw *hw, u8 *pdesc)
+static bool rtl8821ae_get_rxdesc_is_ht(struct ieee80211_hw *hw, __le32 *pdesc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 rx_rate = 0;
- rx_rate = GET_RX_DESC_RXMCS(pdesc);
+ rx_rate = get_rx_desc_rxmcs(pdesc);
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
@@ -398,12 +398,12 @@ static bool rtl8821ae_get_rxdesc_is_ht(struct ieee80211_hw *hw, u8 *pdesc)
return false;
}
-static bool rtl8821ae_get_rxdesc_is_vht(struct ieee80211_hw *hw, u8 *pdesc)
+static bool rtl8821ae_get_rxdesc_is_vht(struct ieee80211_hw *hw, __le32 *pdesc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 rx_rate = 0;
- rx_rate = GET_RX_DESC_RXMCS(pdesc);
+ rx_rate = get_rx_desc_rxmcs(pdesc);
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
@@ -412,12 +412,12 @@ static bool rtl8821ae_get_rxdesc_is_vht(struct ieee80211_hw *hw, u8 *pdesc)
return false;
}
-static u8 rtl8821ae_get_rx_vht_nss(struct ieee80211_hw *hw, u8 *pdesc)
+static u8 rtl8821ae_get_rx_vht_nss(struct ieee80211_hw *hw, __le32 *pdesc)
{
u8 rx_rate = 0;
u8 vht_nss = 0;
- rx_rate = GET_RX_DESC_RXMCS(pdesc);
+ rx_rate = get_rx_desc_rxmcs(pdesc);
if ((rx_rate >= DESC_RATEVHT1SS_MCS0) &&
(rx_rate <= DESC_RATEVHT1SS_MCS9))
vht_nss = 1;
@@ -431,30 +431,31 @@ static u8 rtl8821ae_get_rx_vht_nss(struct ieee80211_hw *hw, u8 *pdesc)
bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *status,
struct ieee80211_rx_status *rx_status,
- u8 *pdesc, struct sk_buff *skb)
+ u8 *pdesc8, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rx_fwinfo_8821ae *p_drvinfo;
struct ieee80211_hdr *hdr;
u8 wake_match;
- u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ __le32 *pdesc = (__le32 *)pdesc8;
+ u32 phystatus = get_rx_desc_physt(pdesc);
- status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
- status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ status->length = (u16)get_rx_desc_pkt_len(pdesc);
+ status->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) *
RX_DRV_INFO_SIZE_UNIT;
- status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03);
- status->icv = (u16)GET_RX_DESC_ICV(pdesc);
- status->crc = (u16)GET_RX_DESC_CRC32(pdesc);
+ status->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03);
+ status->icv = (u16)get_rx_desc_icv(pdesc);
+ status->crc = (u16)get_rx_desc_crc32(pdesc);
status->hwerror = (status->crc | status->icv);
- status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
- status->rate = (u8)GET_RX_DESC_RXMCS(pdesc);
- status->shortpreamble = (u16)GET_RX_DESC_SPLCP(pdesc);
- status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1);
- status->isfirst_ampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1);
- status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- status->rx_packet_bw = GET_RX_DESC_BW(pdesc);
- status->macid = GET_RX_DESC_MACID(pdesc);
- status->is_short_gi = !(bool)GET_RX_DESC_SPLCP(pdesc);
+ status->decrypted = !get_rx_desc_swdec(pdesc);
+ status->rate = (u8)get_rx_desc_rxmcs(pdesc);
+ status->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
+ status->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
+ status->isfirst_ampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
+ status->timestamp_low = get_rx_desc_tsfl(pdesc);
+ status->rx_packet_bw = get_rx_desc_bw(pdesc);
+ status->macid = get_rx_desc_macid(pdesc);
+ status->is_short_gi = !(bool)get_rx_desc_splcp(pdesc);
status->is_ht = rtl8821ae_get_rxdesc_is_ht(hw, pdesc);
status->is_vht = rtl8821ae_get_rxdesc_is_vht(hw, pdesc);
status->vht_nss = rtl8821ae_get_rx_vht_nss(hw, pdesc);
@@ -467,16 +468,16 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
status->is_ht, status->is_vht, status->vht_nss,
status->is_short_gi);
- if (GET_RX_STATUS_DESC_RPT_SEL(pdesc))
+ if (get_rx_status_desc_rpt_sel(pdesc))
status->packet_report_type = C2H_PACKET;
else
status->packet_report_type = NORMAL_RX;
- if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+ if (get_rx_status_desc_pattern_match(pdesc))
wake_match = BIT(2);
- else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+ else if (get_rx_status_desc_magic_match(pdesc))
wake_match = BIT(1);
- else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
+ else if (get_rx_status_desc_unicast_match(pdesc))
wake_match = BIT(0);
else
wake_match = 0;
@@ -543,9 +544,9 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
rx_status->signal = status->recvsignalpower + 10;
if (status->packet_report_type == TX_REPORT2) {
status->macid_valid_entry[0] =
- GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
+ get_rx_rpt2_desc_macid_valid_1(pdesc);
status->macid_valid_entry[1] =
- GET_RX_RPT2_DESC_MACID_VALID_2(pdesc);
+ get_rx_rpt2_desc_macid_valid_2(pdesc);
}
return true;
}
@@ -656,7 +657,7 @@ static u8 rtl8821ae_sc_mapping(struct ieee80211_hw *hw,
}
void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd,
+ struct ieee80211_hdr *hdr, u8 *pdesc8, u8 *txbd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
@@ -667,7 +668,6 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb);
- u8 *pdesc = (u8 *)pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
unsigned int buf_len = 0;
@@ -679,6 +679,8 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
dma_addr_t mapping;
u8 short_gi = 0;
+ bool tmp_bool;
+ __le32 *pdesc = (__le32 *)pdesc8;
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
@@ -695,69 +697,70 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8821ae));
+ clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_8821ae));
if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
firstseg = true;
lastseg = true;
}
if (firstseg) {
if (rtlhal->earlymode_enable) {
- SET_TX_DESC_PKT_OFFSET(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
+ set_tx_desc_pkt_offset(pdesc, 1);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"Insert 8 byte.pTcb->EMPktNum:%d\n",
ptcb_desc->empkt_num);
- _rtl8821ae_insert_emcontent(ptcb_desc,
- (u8 *)(skb->data));
+ rtl8821ae_insert_emcontent(ptcb_desc,
+ (__le32 *)skb->data);
}
} else {
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
}
/* ptcb_desc->use_driver_rate = true; */
- SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->hw_rate > DESC_RATEMCS0)
short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
else
short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
- SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi);
+ set_tx_desc_data_shortgi(pdesc, short_gi);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- SET_TX_DESC_AGG_ENABLE(pdesc, 1);
- SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x1f);
+ set_tx_desc_agg_enable(pdesc, 1);
+ set_tx_desc_max_agg_num(pdesc, 0x1f);
}
- SET_TX_DESC_SEQ(pdesc, seq_number);
- SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
+ set_tx_desc_seq(pdesc, seq_number);
+ set_tx_desc_rts_enable(pdesc,
+ ((ptcb_desc->rts_enable &&
!ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0);
- SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
+ set_tx_desc_hw_rts_enable(pdesc, 0);
+ set_tx_desc_cts2self(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
- SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(pdesc,
- ((ptcb_desc->rts_rate <= DESC_RATE54M) ?
- (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
- (ptcb_desc->rts_use_shortgi ? 1 : 0)));
+ set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate);
+ set_tx_desc_rts_sc(pdesc, ptcb_desc->rts_sc);
+ tmp_bool = ((ptcb_desc->rts_rate <= DESC_RATE54M) ?
+ (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
+ (ptcb_desc->rts_use_shortgi ? 1 : 0));
+ set_tx_desc_rts_short(pdesc, tmp_bool);
if (ptcb_desc->tx_enable_sw_calc_duration)
- SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
+ set_tx_desc_nav_use_hdr(pdesc, 1);
- SET_TX_DESC_DATA_BW(pdesc,
- rtl8821ae_bw_mapping(hw, ptcb_desc));
+ set_tx_desc_data_bw(pdesc,
+ rtl8821ae_bw_mapping(hw, ptcb_desc));
- SET_TX_DESC_TX_SUB_CARRIER(pdesc,
- rtl8821ae_sc_mapping(hw, ptcb_desc));
+ set_tx_desc_tx_sub_carrier(pdesc,
+ rtl8821ae_sc_mapping(hw, ptcb_desc));
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb_len);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
- SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf =
@@ -766,69 +769,70 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ set_tx_desc_sec_type(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ set_tx_desc_sec_type(pdesc, 0x3);
break;
default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ set_tx_desc_sec_type(pdesc, 0x0);
break;
}
}
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
- SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
- SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
- SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
+ set_tx_desc_queue_sel(pdesc, fw_qsel);
+ set_tx_desc_data_rate_fb_limit(pdesc, 0x1F);
+ set_tx_desc_rts_rate_fb_limit(pdesc, 0xF);
+ set_tx_desc_disable_fb(pdesc, ptcb_desc->disable_ratefallback ?
1 : 0);
- SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+ set_tx_desc_use_rate(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"Enable RDG function.\n");
- SET_TX_DESC_RDG_ENABLE(pdesc, 1);
- SET_TX_DESC_HTC(pdesc, 1);
+ set_tx_desc_rdg_enable(pdesc, 1);
+ set_tx_desc_htc(pdesc, 1);
}
}
/* tx report */
- rtl_set_tx_report(ptcb_desc, pdesc, hw, tx_info);
+ rtl_set_tx_report(ptcb_desc, pdesc8, hw, tx_info);
}
- SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)buf_len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0));
+ set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0));
+ set_tx_desc_tx_buffer_size(pdesc, buf_len);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
/* if (rtlpriv->dm.useramask) { */
if (1) {
- SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ set_tx_desc_rate_id(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->mac_id);
} else {
- SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ set_tx_desc_rate_id(pdesc, 0xC + ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->mac_id);
}
if (!ieee80211_is_data_qos(fc)) {
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_HWSEQ_SEL(pdesc, 0);
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_hwseq_sel(pdesc, 0);
}
- SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+ set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
- SET_TX_DESC_BMC(pdesc, 1);
+ set_tx_desc_bmc(pdesc, 1);
}
- rtl8821ae_dm_set_tx_ant_by_tx_info(hw, pdesc, ptcb_desc->mac_id);
+ rtl8821ae_dm_set_tx_ant_by_tx_info(hw, pdesc8, ptcb_desc->mac_id);
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
- u8 *pdesc, bool firstseg,
+ u8 *pdesc8, bool firstseg,
bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 fw_queue = QSLT_BEACON;
+ __le32 *pdesc = (__le32 *)pdesc8;
dma_addr_t mapping = pci_map_single(rtlpci->pdev,
skb->data, skb->len,
@@ -839,48 +843,50 @@ void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+ clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len));
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
- SET_TX_DESC_USE_RATE(pdesc, 1);
- SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
- SET_TX_DESC_DISABLE_FB(pdesc, 1);
+ set_tx_desc_use_rate(pdesc, 1);
+ set_tx_desc_tx_rate(pdesc, DESC_RATE1M);
+ set_tx_desc_disable_fb(pdesc, 1);
- SET_TX_DESC_DATA_BW(pdesc, 0);
+ set_tx_desc_data_bw(pdesc, 0);
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ set_tx_desc_hwseq_en(pdesc, 1);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+ set_tx_desc_queue_sel(pdesc, fw_queue);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_size(pdesc, skb->len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
- SET_TX_DESC_MACID(pdesc, 0);
+ set_tx_desc_macid(pdesc, 0);
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
"H2C Tx Cmd Content\n",
- pdesc, TX_DESC_SIZE);
+ pdesc8, TX_DESC_SIZE);
}
-void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
+void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc8,
bool istx, u8 desc_name, u8 *val)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
- SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
break;
default:
WARN_ONCE(true,
@@ -891,16 +897,16 @@ void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
} else {
switch (desc_name) {
case HW_DESC_RXOWN:
- SET_RX_DESC_OWN(pdesc, 1);
+ set_rx_desc_own(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
- SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val);
+ set_rx_desc_buff_addr(pdesc, *(u32 *)val);
break;
case HW_DESC_RXPKT_LEN:
- SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val);
+ set_rx_desc_pkt_len(pdesc, *(u32 *)val);
break;
case HW_DESC_RXERO:
- SET_RX_DESC_EOR(pdesc, 1);
+ set_rx_desc_eor(pdesc, 1);
break;
default:
WARN_ONCE(true,
@@ -912,17 +918,18 @@ void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
}
u64 rtl8821ae_get_desc(struct ieee80211_hw *hw,
- u8 *pdesc, bool istx, u8 desc_name)
+ u8 *pdesc8, bool istx, u8 desc_name)
{
u32 ret = 0;
+ __le32 *pdesc = (__le32 *)pdesc8;
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_TX_DESC_OWN(pdesc);
+ ret = get_tx_desc_own(pdesc);
break;
case HW_DESC_TXBUFF_ADDR:
- ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
+ ret = get_tx_desc_tx_buffer_address(pdesc);
break;
default:
WARN_ONCE(true,
@@ -933,13 +940,13 @@ u64 rtl8821ae_get_desc(struct ieee80211_hw *hw,
} else {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(pdesc);
+ ret = get_rx_desc_own(pdesc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(pdesc);
+ ret = get_rx_desc_pkt_len(pdesc);
break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_DESC_BUFF_ADDR(pdesc);
+ ret = get_rx_desc_buff_addr(pdesc);
break;
default:
WARN_ONCE(true,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
index a3feecad645d..81951f0c80b6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
@@ -14,341 +14,385 @@
#define USB_HWDESC_HEADER_LEN 40
#define CRCLENGTH 4
-#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
-#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
-#define SET_TX_DESC_BMC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
-#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
-#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
-#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
-#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
-#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
-#define SET_TX_DESC_GF(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
-#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
-
-#define GET_TX_DESC_PKT_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 0, 16)
-#define GET_TX_DESC_OFFSET(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 16, 8)
-#define GET_TX_DESC_BMC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 24, 1)
-#define GET_TX_DESC_HTC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 25, 1)
-#define GET_TX_DESC_LAST_SEG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 26, 1)
-#define GET_TX_DESC_FIRST_SEG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 27, 1)
-#define GET_TX_DESC_LINIP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 28, 1)
-#define GET_TX_DESC_NO_ACM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 29, 1)
-#define GET_TX_DESC_GF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 30, 1)
-#define GET_TX_DESC_OWN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 31, 1)
-
-#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val)
-#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
-#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
-#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
-#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
-#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val)
-#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
-#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
-#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val)
-
-#define SET_TX_DESC_PAID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val)
-#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val)
-#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val)
-#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
-#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
-#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
-#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
-#define SET_TX_DESC_RAW(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
-#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val)
-#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
-#define SET_TX_DESC_BT_INT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val)
-#define SET_TX_DESC_GID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val)
-
-#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val)
-#define SET_TX_DESC_CHK_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val)
-#define SET_TX_DESC_EARLY_MODE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val)
-#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val)
-#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val)
-#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val)
-#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val)
-#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val)
-#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val)
-#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val)
-#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val)
-#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val)
-#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val)
-#define SET_TX_DESC_NDPA(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val)
-#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val)
-#define SET_TX_DESC_TX_ANT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 4, __val)
-
-#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val)
-#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val)
-#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val)
-#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val)
-#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val)
-#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val)
-
-#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val)
-#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
- SET_BITS_TO_LE_1BYTE(__pdesc+20, 4, 1, __val)
-#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val)
-#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
-#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val)
-#define SET_TX_DESC_CTROL_STBC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val)
-#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val)
-#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
-
-#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val)
-#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 16, 3, __val)
-#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 19, 3, __val)
-#define SET_TX_DESC_ANTSEL_C(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 22, 3, __val)
-#define SET_TX_DESC_ANTSEL_D(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 25, 3, __val)
-#define SET_TX_DESC_MBSSID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(i(__pdesc) + 24, 12, 4, __val)
-
-#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE((__pdesc) + 28, 0, 16, __val)
-
-#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
-
-#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val)
-
-#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val)
-
-#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
-
-#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
-
-#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val)
-
-#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+48, 0, 32)
-
-#define GET_RX_DESC_PKT_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 0, 14)
-#define GET_RX_DESC_CRC32(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 14, 1)
-#define GET_RX_DESC_ICV(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 15, 1)
-#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 16, 4)
-#define GET_RX_DESC_SECURITY(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 20, 3)
-#define GET_RX_DESC_QOS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 23, 1)
-#define GET_RX_DESC_SHIFT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 24, 2)
-#define GET_RX_DESC_PHYST(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 26, 1)
-#define GET_RX_DESC_SWDEC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 27, 1)
-#define GET_RX_DESC_LS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 28, 1)
-#define GET_RX_DESC_FS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 29, 1)
-#define GET_RX_DESC_EOR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 30, 1)
-#define GET_RX_DESC_OWN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 31, 1)
-
-#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
-#define SET_RX_DESC_EOR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
-#define SET_RX_DESC_OWN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
-
-#define GET_RX_DESC_MACID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 0, 7)
-#define GET_RX_DESC_TID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 8, 4)
-#define GET_RX_DESC_AMSDU(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
-#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
-#define GET_RX_DESC_PAGGR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
-#define GET_RX_DESC_A1_FIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
-#define GET_RX_DESC_CHKERR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
-#define GET_RX_DESC_IPVER(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
-#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 22, 1)
-#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 23, 1)
-#define GET_RX_DESC_PAM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
-#define GET_RX_DESC_PWR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
-#define GET_RX_DESC_MD(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
-#define GET_RX_DESC_MF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
-#define GET_RX_DESC_TYPE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
-#define GET_RX_DESC_MC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
-#define GET_RX_DESC_BC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
-
-#define GET_RX_DESC_SEQ(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
-#define GET_RX_DESC_FRAG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
-#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
-#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 18, 6)
-#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
-
-#define GET_RX_DESC_RXMCS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
-#define GET_RX_DESC_HTC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
-#define GET_RX_STATUS_DESC_EOSP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 11, 1)
-#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 12, 2)
-
-#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 29, 1)
-#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 30, 1)
-#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 31, 1)
-
-#define GET_RX_DESC_SPLCP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 0, 1)
-#define GET_RX_STATUS_DESC_LDPC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 1, 1)
-#define GET_RX_STATUS_DESC_STBC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 2, 1)
-#define GET_RX_DESC_BW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 4, 2)
-
-#define GET_RX_DESC_TSFL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
-
-#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
-#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
-
-#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
-#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_bmc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(24));
+}
+
+static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(25));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline int get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
+
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(6, 0));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(20, 16));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(28, 24));
+}
+
+static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(12));
+}
+
+static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(13));
+}
+
+static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(17));
+}
+
+static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, GENMASK(22, 20));
+}
+
+static inline void set_tx_desc_hwseq_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, GENMASK(7, 6));
+}
+
+static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, BIT(8));
+}
+
+static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, BIT(10));
+}
+
+static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, BIT(12));
+}
+
+static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, BIT(13));
+}
+
+static inline void set_tx_desc_nav_use_hdr(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, BIT(15));
+}
+
+static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, GENMASK(21, 17));
+}
+
+static inline void set_tx_desc_tx_ant(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, GENMASK(27, 24));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(6, 0));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(16, 13));
+}
+
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(28, 24));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, GENMASK(3, 0));
+}
+
+static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, BIT(4));
+}
+
+static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, GENMASK(6, 5));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, BIT(12));
+}
+
+static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, GENMASK(16, 13));
+}
+
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 7, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 8, __val, BIT(15));
+}
+
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 9, __val, GENMASK(23, 12));
+}
+
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 10) = cpu_to_le32(__val);
+}
+
+static inline int get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 10));
+}
+
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 12) = cpu_to_le32(__val);
+}
+
+static inline int get_rx_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(13, 0));
+}
+
+static inline int get_rx_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(14));
+}
+
+static inline int get_rx_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(15));
+}
+
+static inline int get_rx_desc_drv_info_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(19, 16));
+}
+
+static inline int get_rx_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(25, 24));
+}
+
+static inline int get_rx_desc_physt(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(26));
+}
+
+static inline int get_rx_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(27));
+}
+
+static inline int get_rx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
+
+static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline int get_rx_desc_macid(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), GENMASK(6, 0));
+}
+
+static inline int get_rx_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(15));
+}
+
+static inline int get_rx_status_desc_rpt_sel(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(28));
+}
+
+static inline int get_rx_desc_rxmcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(6, 0));
+}
+
+static inline int get_rx_status_desc_pattern_match(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(29));
+}
+
+static inline int get_rx_status_desc_unicast_match(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(30));
+}
+
+static inline int get_rx_status_desc_magic_match(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(31));
+}
+
+static inline int get_rx_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 4), BIT(0));
+}
+
+static inline int get_rx_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 4), GENMASK(5, 4));
+}
+
+static inline int get_rx_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 5));
+}
+
+static inline int get_rx_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 6));
+}
+
+static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
/* TX report 2 format in Rx desc*/
-#define GET_RX_RPT2_DESC_PKT_LEN(__status) \
- LE_BITS_TO_4BYTE(__status, 0, 9)
-#define GET_RX_RPT2_DESC_MACID_VALID_1(__status) \
- LE_BITS_TO_4BYTE(__status+16, 0, 32)
-#define GET_RX_RPT2_DESC_MACID_VALID_2(__status) \
- LE_BITS_TO_4BYTE(__status+20, 0, 32)
-
-#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value)
-#define SET_EARLYMODE_LEN0(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value)
-#define SET_EARLYMODE_LEN1(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value)
-#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value)
-#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value)
-#define SET_EARLYMODE_LEN3(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value)
-#define SET_EARLYMODE_LEN4(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value)
-
-#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
-do { \
- if (_size > TX_DESC_NEXT_DESC_OFFSET) \
- memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
- else \
- memset(__pdesc, 0, _size); \
-} while (0)
+static inline int get_rx_rpt2_desc_macid_valid_1(__le32 *__status)
+{
+ return le32_to_cpu(*(__status + 4));
+}
+
+static inline int get_rx_rpt2_desc_macid_valid_2(__le32 *__status)
+{
+ return le32_to_cpu(*(__status + 5));
+}
+
+static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(3, 0));
+}
+
+static inline void set_earlymode_len0(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(15, 4));
+}
+
+static inline void set_earlymode_len1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(27, 16));
+}
+
+static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(31, 28));
+}
+
+static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(7, 0));
+}
+
+static inline void set_earlymode_len3(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(19, 8));
+}
+
+static inline void set_earlymode_len4(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(31, 20));
+}
+
+static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size)
+{
+ if (_size > TX_DESC_NEXT_DESC_OFFSET)
+ memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET);
+ else
+ memset(__pdesc, 0, _size);
+}
#define RTL8821AE_RX_HAL_IS_CCK_RATE(rxmcs)\
(rxmcs == DESC_RATE1M ||\
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index e24fda5e9087..34d68dbf4b4c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1064,13 +1064,13 @@ int rtl_usb_probe(struct usb_interface *intf,
rtlpriv->cfg->ops->read_eeprom_info(hw);
err = _rtl_usb_init(hw);
if (err)
- goto error_out;
+ goto error_out2;
rtl_usb_init_sw(hw);
/* Init mac80211 sw */
err = rtl_init_core(hw);
if (err) {
pr_err("Can't allocate sw for mac80211\n");
- goto error_out;
+ goto error_out2;
}
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
pr_err("Can't init_sw_vars\n");
@@ -1091,6 +1091,7 @@ int rtl_usb_probe(struct usb_interface *intf,
error_out:
rtl_deinit_core(hw);
+error_out2:
_rtl_usb_io_handler_release(hw);
usb_put_dev(udev);
complete(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 518aaa875361..81caa3782ec0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -13,6 +13,7 @@
#include <linux/usb.h>
#include <net/mac80211.h>
#include <linux/completion.h>
+#include <linux/bitfield.h>
#include "debug.h"
#define MASKBYTE0 0xff
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 2676582a85a0..aba329c9d0cf 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -97,7 +97,7 @@ static inline void rtw_write8_set(struct rtw_dev *rtwdev, u32 addr, u8 bit)
rtw_write8(rtwdev, addr, val | bit);
}
-static inline void rtw_writ16_set(struct rtw_dev *rtwdev, u32 addr, u16 bit)
+static inline void rtw_write16_set(struct rtw_dev *rtwdev, u32 addr, u16 bit)
{
u16 val;
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 25a923bc6366..fc14b37d927d 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -285,8 +285,14 @@ int rtw_mac_power_on(struct rtw_dev *rtwdev)
goto err;
ret = rtw_mac_power_switch(rtwdev, true);
- if (ret)
+ if (ret == -EALREADY) {
+ rtw_mac_power_switch(rtwdev, false);
+ ret = rtw_mac_power_switch(rtwdev, true);
+ if (ret)
+ goto err;
+ } else if (ret) {
goto err;
+ }
ret = rtw_mac_init_system_cfg(rtwdev);
if (ret)
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index abded63f138d..abe6a148673b 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -85,30 +85,35 @@ static const struct rtw_vif_port rtw_vif_port[] = {
.bssid = {.addr = 0x0618},
.net_type = {.addr = 0x0100, .mask = 0x30000},
.aid = {.addr = 0x06a8, .mask = 0x7ff},
+ .bcn_ctrl = {.addr = 0x0550, .mask = 0xff},
},
[1] = {
.mac_addr = {.addr = 0x0700},
.bssid = {.addr = 0x0708},
.net_type = {.addr = 0x0100, .mask = 0xc0000},
.aid = {.addr = 0x0710, .mask = 0x7ff},
+ .bcn_ctrl = {.addr = 0x0551, .mask = 0xff},
},
[2] = {
.mac_addr = {.addr = 0x1620},
.bssid = {.addr = 0x1628},
.net_type = {.addr = 0x1100, .mask = 0x3},
.aid = {.addr = 0x1600, .mask = 0x7ff},
+ .bcn_ctrl = {.addr = 0x0578, .mask = 0xff},
},
[3] = {
.mac_addr = {.addr = 0x1630},
.bssid = {.addr = 0x1638},
.net_type = {.addr = 0x1100, .mask = 0xc},
.aid = {.addr = 0x1604, .mask = 0x7ff},
+ .bcn_ctrl = {.addr = 0x0579, .mask = 0xff},
},
[4] = {
.mac_addr = {.addr = 0x1640},
.bssid = {.addr = 0x1648},
.net_type = {.addr = 0x1100, .mask = 0x30},
.aid = {.addr = 0x1608, .mask = 0x7ff},
+ .bcn_ctrl = {.addr = 0x057a, .mask = 0xff},
},
};
@@ -120,6 +125,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
enum rtw_net_type net_type;
u32 config = 0;
u8 port = 0;
+ u8 bcn_ctrl = 0;
rtwvif->port = port;
rtwvif->vif = vif;
@@ -136,13 +142,16 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
net_type = RTW_NET_AP_MODE;
+ bcn_ctrl = BIT_EN_BCN_FUNCTION | BIT_DIS_TSF_UDT;
break;
case NL80211_IFTYPE_ADHOC:
net_type = RTW_NET_AD_HOC;
+ bcn_ctrl = BIT_EN_BCN_FUNCTION | BIT_DIS_TSF_UDT;
break;
case NL80211_IFTYPE_STATION:
default:
net_type = RTW_NET_NO_LINK;
+ bcn_ctrl = BIT_EN_BCN_FUNCTION;
break;
}
@@ -150,6 +159,8 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
config |= PORT_SET_MAC_ADDR;
rtwvif->net_type = net_type;
config |= PORT_SET_NET_TYPE;
+ rtwvif->bcn_ctrl = bcn_ctrl;
+ config |= PORT_SET_BCN_CTRL;
rtw_vif_port_config(rtwdev, rtwvif, config);
mutex_unlock(&rtwdev->mutex);
@@ -173,6 +184,8 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
config |= PORT_SET_MAC_ADDR;
rtwvif->net_type = RTW_NET_NO_LINK;
config |= PORT_SET_NET_TYPE;
+ rtwvif->bcn_ctrl = 0;
+ config |= PORT_SET_BCN_CTRL;
rtw_vif_port_config(rtwdev, rtwvif, config);
mutex_unlock(&rtwdev->mutex);
@@ -446,20 +459,39 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ u32 config = 0;
rtw_leave_lps(rtwdev, rtwvif);
+ mutex_lock(&rtwdev->mutex);
+
+ ether_addr_copy(rtwvif->mac_addr, mac_addr);
+ config |= PORT_SET_MAC_ADDR;
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+
rtw_flag_set(rtwdev, RTW_FLAG_DIG_DISABLE);
rtw_flag_set(rtwdev, RTW_FLAG_SCANNING);
+
+ mutex_unlock(&rtwdev->mutex);
}
static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ u32 config = 0;
+
+ mutex_lock(&rtwdev->mutex);
rtw_flag_clear(rtwdev, RTW_FLAG_SCANNING);
rtw_flag_clear(rtwdev, RTW_FLAG_DIG_DISABLE);
+
+ ether_addr_copy(rtwvif->mac_addr, vif->addr);
+ config |= PORT_SET_MAC_ADDR;
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+
+ mutex_unlock(&rtwdev->mutex);
}
const struct ieee80211_ops rtw_ops = {
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index b2dac4609138..5a2c06267d07 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -20,7 +20,7 @@ EXPORT_SYMBOL(rtw_debug_mask);
module_param_named(support_lps, rtw_fw_support_lps, bool, 0644);
module_param_named(debug_mask, rtw_debug_mask, uint, 0644);
-MODULE_PARM_DESC(support_lps, "Set Y to enable LPS support");
+MODULE_PARM_DESC(support_lps, "Set Y to enable Leisure Power Save support, to turn radio off between beacons");
MODULE_PARM_DESC(debug_mask, "Debugging mask");
static struct ieee80211_channel rtw_channeltable_2g[] = {
@@ -198,15 +198,20 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
{
struct ieee80211_channel *channel = chandef->chan;
enum nl80211_chan_width width = chandef->width;
+ u8 *cch_by_bw = chan_params->cch_by_bw;
u32 primary_freq, center_freq;
u8 center_chan;
u8 bandwidth = RTW_CHANNEL_WIDTH_20;
u8 primary_chan_idx = 0;
+ u8 i;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
center_freq = chandef->center_freq1;
+ /* assign the center channel used while 20M bw is selected */
+ cch_by_bw[RTW_CHANNEL_WIDTH_20] = channel->hw_value;
+
switch (width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
@@ -233,6 +238,10 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
primary_chan_idx = 3;
center_chan -= 6;
}
+ /* assign the center channel used
+ * while 40M bw is selected
+ */
+ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan + 4;
} else {
if (center_freq - primary_freq == 10) {
primary_chan_idx = 2;
@@ -241,6 +250,10 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
primary_chan_idx = 4;
center_chan += 6;
}
+ /* assign the center channel used
+ * while 40M bw is selected
+ */
+ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan - 4;
}
break;
default:
@@ -251,6 +264,12 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
chan_params->center_chan = center_chan;
chan_params->bandwidth = bandwidth;
chan_params->primary_chan_idx = primary_chan_idx;
+
+ /* assign the center channel used while current bw is selected */
+ cch_by_bw[bandwidth] = center_chan;
+
+ for (i = bandwidth + 1; i <= RTW_MAX_CHANNEL_WIDTH; i++)
+ cch_by_bw[i] = 0;
}
void rtw_set_channel(struct rtw_dev *rtwdev)
@@ -260,6 +279,7 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_channel_params ch_param;
u8 center_chan, bandwidth, primary_chan_idx;
+ u8 i;
rtw_get_channel_params(&hw->conf.chandef, &ch_param);
if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
@@ -272,6 +292,10 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
hal->current_band_width = bandwidth;
hal->current_channel = center_chan;
hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+
+ for (i = RTW_CHANNEL_WIDTH_20; i <= RTW_MAX_CHANNEL_WIDTH; i++)
+ hal->cch_by_bw[i] = ch_param.cch_by_bw[i];
+
chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx);
rtw_phy_set_tx_power_level(rtwdev, center_chan);
@@ -309,6 +333,11 @@ void rtw_vif_port_config(struct rtw_dev *rtwdev,
mask = rtwvif->conf->aid.mask;
rtw_write32_mask(rtwdev, addr, mask, rtwvif->aid);
}
+ if (config & PORT_SET_BCN_CTRL) {
+ addr = rtwvif->conf->bcn_ctrl.addr;
+ mask = rtwvif->conf->bcn_ctrl.mask;
+ rtw_write8_mask(rtwdev, addr, mask, rtwvif->bcn_ctrl);
+ }
}
static u8 hw_bw_cap_to_bitamp(u8 bw_cap)
@@ -1042,7 +1071,7 @@ static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
rtw_phy_setup_phy_cond(rtwdev, 0);
- rtw_hw_init_tx_power(hal);
+ rtw_phy_init_tx_power(rtwdev);
rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
rtw_phy_tx_power_by_rate_config(hal);
@@ -1169,6 +1198,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -1178,6 +1208,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+ hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 00fc77fb9b54..8fa05751836b 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -62,6 +62,9 @@ enum rtw_supported_band {
RTW_BAND_MAX,
};
+/* now, support upto 80M bw */
+#define RTW_MAX_CHANNEL_WIDTH RTW_CHANNEL_WIDTH_80
+
enum rtw_bandwidth {
RTW_CHANNEL_WIDTH_20 = 0,
RTW_CHANNEL_WIDTH_40 = 1,
@@ -286,10 +289,16 @@ enum rtw_trx_desc_rate {
};
enum rtw_regulatory_domains {
- RTW_REGD_FCC = 0,
- RTW_REGD_MKK = 1,
- RTW_REGD_ETSI = 2,
- RTW_REGD_WW = 3,
+ RTW_REGD_FCC = 0,
+ RTW_REGD_MKK = 1,
+ RTW_REGD_ETSI = 2,
+ RTW_REGD_IC = 3,
+ RTW_REGD_KCC = 4,
+ RTW_REGD_ACMA = 5,
+ RTW_REGD_CHILE = 6,
+ RTW_REGD_UKRAINE = 7,
+ RTW_REGD_MEXICO = 8,
+ RTW_REGD_WW,
RTW_REGD_MAX
};
@@ -413,6 +422,10 @@ struct rtw_channel_params {
u8 center_chan;
u8 bandwidth;
u8 primary_chan_idx;
+ /* center channel by different available bandwidth,
+ * val of (bw > current bandwidth) is invalid
+ */
+ u8 cch_by_bw[RTW_MAX_CHANNEL_WIDTH + 1];
};
struct rtw_hw_reg {
@@ -431,6 +444,7 @@ enum rtw_vif_port_set {
PORT_SET_BSSID = BIT(1),
PORT_SET_NET_TYPE = BIT(2),
PORT_SET_AID = BIT(3),
+ PORT_SET_BCN_CTRL = BIT(4),
};
struct rtw_vif_port {
@@ -438,6 +452,7 @@ struct rtw_vif_port {
struct rtw_hw_reg bssid;
struct rtw_hw_reg net_type;
struct rtw_hw_reg aid;
+ struct rtw_hw_reg bcn_ctrl;
};
struct rtw_tx_pkt_info {
@@ -591,6 +606,7 @@ struct rtw_vif {
u8 mac_addr[ETH_ALEN];
u8 bssid[ETH_ALEN];
u8 port;
+ u8 bcn_ctrl;
const struct rtw_vif_port *conf;
struct rtw_traffic_stats stats;
@@ -838,6 +854,9 @@ struct rtw_chip_info {
u32 rfe_defs_size;
};
+#define DACK_MSBK_BACKUP_NUM 0xf
+#define DACK_DCK_BACKUP_NUM 0x2
+
struct rtw_dm_info {
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
@@ -853,6 +872,11 @@ struct rtw_dm_info {
u8 cck_gi_u_bnd;
u8 cck_gi_l_bnd;
+
+ /* backup dack results for each path and I/Q */
+ u32 dack_adck[RTW_RF_PATH_MAX];
+ u16 dack_msbk[RTW_RF_PATH_MAX][2][DACK_MSBK_BACKUP_NUM];
+ u8 dack_dck[RTW_RF_PATH_MAX][2][DACK_DCK_BACKUP_NUM];
};
struct rtw_efuse {
@@ -973,6 +997,12 @@ struct rtw_hal {
u8 current_channel;
u8 current_band_width;
u8 current_band_type;
+
+ /* center channel for different available bandwidth,
+ * val of (bw > current_band_width) is invalid
+ */
+ u8 cch_by_bw[RTW_MAX_CHANNEL_WIDTH + 1];
+
u8 sec_ch_offset;
u8 rf_type;
u8 rf_path_num;
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index cfe05ba7280d..353871c27779 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -487,10 +487,10 @@ static void rtw_pci_stop(struct rtw_dev *rtwdev)
}
static u8 ac_to_hwq[] = {
- [0] = RTW_TX_QUEUE_VO,
- [1] = RTW_TX_QUEUE_VI,
- [2] = RTW_TX_QUEUE_BE,
- [3] = RTW_TX_QUEUE_BK,
+ [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
+ [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
+ [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
+ [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
};
static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
@@ -504,6 +504,8 @@ static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
queue = RTW_TX_QUEUE_BCN;
else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
queue = RTW_TX_QUEUE_MGMT;
+ else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
+ queue = ac_to_hwq[IEEE80211_AC_BE];
else
queue = ac_to_hwq[q_mapping];
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 404d89432c96..4ec8dcf17361 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -65,6 +65,56 @@ static const u32 db_invert_table[12][8] = {
1995262315, 2511886432U, 3162277660U, 3981071706U}
};
+u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
+u8 rtw_ofdm_rates[] = {
+ DESC_RATE6M, DESC_RATE9M, DESC_RATE12M,
+ DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
+ DESC_RATE48M, DESC_RATE54M
+};
+u8 rtw_ht_1s_rates[] = {
+ DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
+ DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
+ DESC_RATEMCS6, DESC_RATEMCS7
+};
+u8 rtw_ht_2s_rates[] = {
+ DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10,
+ DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
+ DESC_RATEMCS14, DESC_RATEMCS15
+};
+u8 rtw_vht_1s_rates[] = {
+ DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
+ DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
+ DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
+ DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
+ DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9
+};
+u8 rtw_vht_2s_rates[] = {
+ DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
+ DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
+ DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
+ DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
+ DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
+};
+u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
+ rtw_cck_rates, rtw_ofdm_rates,
+ rtw_ht_1s_rates, rtw_ht_2s_rates,
+ rtw_vht_1s_rates, rtw_vht_2s_rates
+};
+u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
+ ARRAY_SIZE(rtw_cck_rates),
+ ARRAY_SIZE(rtw_ofdm_rates),
+ ARRAY_SIZE(rtw_ht_1s_rates),
+ ARRAY_SIZE(rtw_ht_2s_rates),
+ ARRAY_SIZE(rtw_vht_1s_rates),
+ ARRAY_SIZE(rtw_vht_2s_rates)
+};
+static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
+static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
+static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
+static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
+static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
+static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
+
enum rtw_phy_band_type {
PHY_BAND_2G = 0,
PHY_BAND_5G = 1,
@@ -601,14 +651,19 @@ bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
direct_addr = base_addr[rf_path] + (addr << 2);
mask &= RFREG_MASK;
- rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI);
- rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI);
+ if (addr == RF_CFGCH) {
+ rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI);
+ rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI);
+ }
+
rtw_write32_mask(rtwdev, direct_addr, mask, data);
udelay(1);
- rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI);
- rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI);
+ if (addr == RF_CFGCH) {
+ rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI);
+ rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI);
+ }
return true;
}
@@ -714,6 +769,353 @@ void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
}
}
+#define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8))
+
+static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i)
+{
+ if (rtwdev->chip->is_pwr_by_rate_dec)
+ return bcd_to_dec_pwr_by_rate(hex, i);
+
+ return (hex >> (i * 8)) & 0xFF;
+}
+
+static void
+rtw_phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev,
+ u32 addr, u32 mask, u32 val, u8 *rate,
+ u8 *pwr_by_rate, u8 *rate_num)
+{
+ int i;
+
+ switch (addr) {
+ case 0xE00:
+ case 0x830:
+ rate[0] = DESC_RATE6M;
+ rate[1] = DESC_RATE9M;
+ rate[2] = DESC_RATE12M;
+ rate[3] = DESC_RATE18M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE04:
+ case 0x834:
+ rate[0] = DESC_RATE24M;
+ rate[1] = DESC_RATE36M;
+ rate[2] = DESC_RATE48M;
+ rate[3] = DESC_RATE54M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE08:
+ rate[0] = DESC_RATE1M;
+ pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1);
+ *rate_num = 1;
+ break;
+ case 0x86C:
+ if (mask == 0xffffff00) {
+ rate[0] = DESC_RATE2M;
+ rate[1] = DESC_RATE5_5M;
+ rate[2] = DESC_RATE11M;
+ for (i = 1; i < 4; ++i)
+ pwr_by_rate[i - 1] =
+ tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 3;
+ } else if (mask == 0x000000ff) {
+ rate[0] = DESC_RATE11M;
+ pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0);
+ *rate_num = 1;
+ }
+ break;
+ case 0xE10:
+ case 0x83C:
+ rate[0] = DESC_RATEMCS0;
+ rate[1] = DESC_RATEMCS1;
+ rate[2] = DESC_RATEMCS2;
+ rate[3] = DESC_RATEMCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE14:
+ case 0x848:
+ rate[0] = DESC_RATEMCS4;
+ rate[1] = DESC_RATEMCS5;
+ rate[2] = DESC_RATEMCS6;
+ rate[3] = DESC_RATEMCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE18:
+ case 0x84C:
+ rate[0] = DESC_RATEMCS8;
+ rate[1] = DESC_RATEMCS9;
+ rate[2] = DESC_RATEMCS10;
+ rate[3] = DESC_RATEMCS11;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE1C:
+ case 0x868:
+ rate[0] = DESC_RATEMCS12;
+ rate[1] = DESC_RATEMCS13;
+ rate[2] = DESC_RATEMCS14;
+ rate[3] = DESC_RATEMCS15;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0x838:
+ rate[0] = DESC_RATE1M;
+ rate[1] = DESC_RATE2M;
+ rate[2] = DESC_RATE5_5M;
+ for (i = 1; i < 4; ++i)
+ pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev,
+ val, i);
+ *rate_num = 3;
+ break;
+ case 0xC20:
+ case 0xE20:
+ case 0x1820:
+ case 0x1A20:
+ rate[0] = DESC_RATE1M;
+ rate[1] = DESC_RATE2M;
+ rate[2] = DESC_RATE5_5M;
+ rate[3] = DESC_RATE11M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC24:
+ case 0xE24:
+ case 0x1824:
+ case 0x1A24:
+ rate[0] = DESC_RATE6M;
+ rate[1] = DESC_RATE9M;
+ rate[2] = DESC_RATE12M;
+ rate[3] = DESC_RATE18M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC28:
+ case 0xE28:
+ case 0x1828:
+ case 0x1A28:
+ rate[0] = DESC_RATE24M;
+ rate[1] = DESC_RATE36M;
+ rate[2] = DESC_RATE48M;
+ rate[3] = DESC_RATE54M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC2C:
+ case 0xE2C:
+ case 0x182C:
+ case 0x1A2C:
+ rate[0] = DESC_RATEMCS0;
+ rate[1] = DESC_RATEMCS1;
+ rate[2] = DESC_RATEMCS2;
+ rate[3] = DESC_RATEMCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC30:
+ case 0xE30:
+ case 0x1830:
+ case 0x1A30:
+ rate[0] = DESC_RATEMCS4;
+ rate[1] = DESC_RATEMCS5;
+ rate[2] = DESC_RATEMCS6;
+ rate[3] = DESC_RATEMCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC34:
+ case 0xE34:
+ case 0x1834:
+ case 0x1A34:
+ rate[0] = DESC_RATEMCS8;
+ rate[1] = DESC_RATEMCS9;
+ rate[2] = DESC_RATEMCS10;
+ rate[3] = DESC_RATEMCS11;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC38:
+ case 0xE38:
+ case 0x1838:
+ case 0x1A38:
+ rate[0] = DESC_RATEMCS12;
+ rate[1] = DESC_RATEMCS13;
+ rate[2] = DESC_RATEMCS14;
+ rate[3] = DESC_RATEMCS15;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC3C:
+ case 0xE3C:
+ case 0x183C:
+ case 0x1A3C:
+ rate[0] = DESC_RATEVHT1SS_MCS0;
+ rate[1] = DESC_RATEVHT1SS_MCS1;
+ rate[2] = DESC_RATEVHT1SS_MCS2;
+ rate[3] = DESC_RATEVHT1SS_MCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC40:
+ case 0xE40:
+ case 0x1840:
+ case 0x1A40:
+ rate[0] = DESC_RATEVHT1SS_MCS4;
+ rate[1] = DESC_RATEVHT1SS_MCS5;
+ rate[2] = DESC_RATEVHT1SS_MCS6;
+ rate[3] = DESC_RATEVHT1SS_MCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC44:
+ case 0xE44:
+ case 0x1844:
+ case 0x1A44:
+ rate[0] = DESC_RATEVHT1SS_MCS8;
+ rate[1] = DESC_RATEVHT1SS_MCS9;
+ rate[2] = DESC_RATEVHT2SS_MCS0;
+ rate[3] = DESC_RATEVHT2SS_MCS1;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC48:
+ case 0xE48:
+ case 0x1848:
+ case 0x1A48:
+ rate[0] = DESC_RATEVHT2SS_MCS2;
+ rate[1] = DESC_RATEVHT2SS_MCS3;
+ rate[2] = DESC_RATEVHT2SS_MCS4;
+ rate[3] = DESC_RATEVHT2SS_MCS5;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC4C:
+ case 0xE4C:
+ case 0x184C:
+ case 0x1A4C:
+ rate[0] = DESC_RATEVHT2SS_MCS6;
+ rate[1] = DESC_RATEVHT2SS_MCS7;
+ rate[2] = DESC_RATEVHT2SS_MCS8;
+ rate[3] = DESC_RATEVHT2SS_MCS9;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCD8:
+ case 0xED8:
+ case 0x18D8:
+ case 0x1AD8:
+ rate[0] = DESC_RATEMCS16;
+ rate[1] = DESC_RATEMCS17;
+ rate[2] = DESC_RATEMCS18;
+ rate[3] = DESC_RATEMCS19;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCDC:
+ case 0xEDC:
+ case 0x18DC:
+ case 0x1ADC:
+ rate[0] = DESC_RATEMCS20;
+ rate[1] = DESC_RATEMCS21;
+ rate[2] = DESC_RATEMCS22;
+ rate[3] = DESC_RATEMCS23;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCE0:
+ case 0xEE0:
+ case 0x18E0:
+ case 0x1AE0:
+ rate[0] = DESC_RATEVHT3SS_MCS0;
+ rate[1] = DESC_RATEVHT3SS_MCS1;
+ rate[2] = DESC_RATEVHT3SS_MCS2;
+ rate[3] = DESC_RATEVHT3SS_MCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCE4:
+ case 0xEE4:
+ case 0x18E4:
+ case 0x1AE4:
+ rate[0] = DESC_RATEVHT3SS_MCS4;
+ rate[1] = DESC_RATEVHT3SS_MCS5;
+ rate[2] = DESC_RATEVHT3SS_MCS6;
+ rate[3] = DESC_RATEVHT3SS_MCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCE8:
+ case 0xEE8:
+ case 0x18E8:
+ case 0x1AE8:
+ rate[0] = DESC_RATEVHT3SS_MCS8;
+ rate[1] = DESC_RATEVHT3SS_MCS9;
+ for (i = 0; i < 2; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 2;
+ break;
+ default:
+ rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr);
+ break;
+ }
+}
+
+static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev,
+ u32 band, u32 rfpath, u32 txnum,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 rate_num = 0;
+ u8 rate;
+ u8 rates[RTW_RF_PATH_MAX] = {0};
+ s8 offset;
+ s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0};
+ int i;
+
+ rtw_phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data,
+ rates, pwr_by_rate, &rate_num);
+
+ if (WARN_ON(rfpath >= RTW_RF_PATH_MAX ||
+ (band != PHY_BAND_2G && band != PHY_BAND_5G) ||
+ rate_num > RTW_RF_PATH_MAX))
+ return;
+
+ for (i = 0; i < rate_num; i++) {
+ offset = pwr_by_rate[i];
+ rate = rates[i];
+ if (band == PHY_BAND_2G)
+ hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset;
+ else if (band == PHY_BAND_5G)
+ hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset;
+ else
+ continue;
+ }
+}
+
void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
{
const struct phy_pg_cfg_pair *p = tbl->data;
@@ -726,12 +1128,142 @@ void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
msleep(50);
continue;
}
- phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path,
- p->tx_num, p->addr, p->bitmask,
- p->data);
+ rtw_phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path,
+ p->tx_num, p->addr, p->bitmask,
+ p->data);
}
}
+static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = {
+ 36, 38, 40, 42, 44, 46, 48, /* Band 1 */
+ 52, 54, 56, 58, 60, 62, 64, /* Band 2 */
+ 100, 102, 104, 106, 108, 110, 112, /* Band 3 */
+ 116, 118, 120, 122, 124, 126, 128, /* Band 3 */
+ 132, 134, 136, 138, 140, 142, 144, /* Band 3 */
+ 149, 151, 153, 155, 157, 159, 161, /* Band 4 */
+ 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */
+
+static int rtw_channel_to_idx(u8 band, u8 channel)
+{
+ int ch_idx;
+ u8 n_channel;
+
+ if (band == PHY_BAND_2G) {
+ ch_idx = channel - 1;
+ n_channel = RTW_MAX_CHANNEL_NUM_2G;
+ } else if (band == PHY_BAND_5G) {
+ n_channel = RTW_MAX_CHANNEL_NUM_5G;
+ for (ch_idx = 0; ch_idx < n_channel; ch_idx++)
+ if (rtw_channel_idx_5g[ch_idx] == channel)
+ break;
+ } else {
+ return -1;
+ }
+
+ if (ch_idx >= n_channel)
+ return -1;
+
+ return ch_idx;
+}
+
+static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
+ u8 bw, u8 rs, u8 ch, s8 pwr_limit)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 max_power_index = rtwdev->chip->max_power_index;
+ s8 ww;
+ int ch_idx;
+
+ pwr_limit = clamp_t(s8, pwr_limit,
+ -max_power_index, max_power_index);
+ ch_idx = rtw_channel_to_idx(band, ch);
+
+ if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX ||
+ rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) {
+ WARN(1,
+ "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
+ regd, band, bw, rs, ch_idx, pwr_limit);
+ return;
+ }
+
+ if (band == PHY_BAND_2G) {
+ hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit;
+ ww = hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx];
+ ww = min_t(s8, ww, pwr_limit);
+ hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx] = ww;
+ } else if (band == PHY_BAND_5G) {
+ hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit;
+ ww = hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx];
+ ww = min_t(s8, ww, pwr_limit);
+ hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx] = ww;
+ }
+}
+
+/* cross-reference 5G power limits if values are not assigned */
+static void
+rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd,
+ u8 bw, u8 ch_idx, u8 rs_ht, u8 rs_vht)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 max_power_index = rtwdev->chip->max_power_index;
+ s8 lmt_ht = hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx];
+ s8 lmt_vht = hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx];
+
+ if (lmt_ht == lmt_vht)
+ return;
+
+ if (lmt_ht == max_power_index)
+ hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx] = lmt_vht;
+
+ else if (lmt_vht == max_power_index)
+ hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx] = lmt_ht;
+}
+
+/* cross-reference power limits for ht and vht */
+static void
+rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx)
+{
+ u8 rs_idx, rs_ht, rs_vht;
+ u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S},
+ {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} };
+
+ for (rs_idx = 0; rs_idx < 2; rs_idx++) {
+ rs_ht = rs_cmp[rs_idx][0];
+ rs_vht = rs_cmp[rs_idx][1];
+
+ rtw_xref_5g_txpwr_lmt(rtwdev, regd, bw, ch_idx, rs_ht, rs_vht);
+ }
+}
+
+/* cross-reference power limits for 5G channels */
+static void
+rtw_xref_5g_txpwr_lmt_by_ch(struct rtw_dev *rtwdev, u8 regd, u8 bw)
+{
+ u8 ch_idx;
+
+ for (ch_idx = 0; ch_idx < RTW_MAX_CHANNEL_NUM_5G; ch_idx++)
+ rtw_xref_txpwr_lmt_by_rs(rtwdev, regd, bw, ch_idx);
+}
+
+/* cross-reference power limits for 20/40M bandwidth */
+static void
+rtw_xref_txpwr_lmt_by_bw(struct rtw_dev *rtwdev, u8 regd)
+{
+ u8 bw;
+
+ for (bw = RTW_CHANNEL_WIDTH_20; bw <= RTW_CHANNEL_WIDTH_40; bw++)
+ rtw_xref_5g_txpwr_lmt_by_ch(rtwdev, regd, bw);
+}
+
+/* cross-reference power limits */
+static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev)
+{
+ u8 regd;
+
+ for (regd = 0; regd < RTW_REGD_MAX; regd++)
+ rtw_xref_txpwr_lmt_by_bw(rtwdev, regd);
+}
+
void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
const struct rtw_table *tbl)
{
@@ -741,10 +1273,11 @@ void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
BUILD_BUG_ON(sizeof(struct txpwr_lmt_cfg_pair) != sizeof(u8) * 6);
for (; p < end; p++) {
- phy_set_tx_power_limit(rtwdev, p->regd, p->band,
- p->bw, p->rs,
- p->ch, p->txpwr_lmt);
+ rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band,
+ p->bw, p->rs, p->ch, p->txpwr_lmt);
}
+
+ rtw_xref_txpwr_lmt(rtwdev);
}
void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
@@ -819,93 +1352,6 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
}
}
-#define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8))
-
-#define RTW_MAX_POWER_INDEX 0x3F
-
-u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
-u8 rtw_ofdm_rates[] = {
- DESC_RATE6M, DESC_RATE9M, DESC_RATE12M,
- DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
- DESC_RATE48M, DESC_RATE54M
-};
-u8 rtw_ht_1s_rates[] = {
- DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
- DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
- DESC_RATEMCS6, DESC_RATEMCS7
-};
-u8 rtw_ht_2s_rates[] = {
- DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10,
- DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
- DESC_RATEMCS14, DESC_RATEMCS15
-};
-u8 rtw_vht_1s_rates[] = {
- DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
- DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
- DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
- DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
- DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9
-};
-u8 rtw_vht_2s_rates[] = {
- DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
- DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
- DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
- DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
- DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
-};
-
-static u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
-static u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
-static u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
-static u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
-static u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
-static u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
-u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
- rtw_cck_rates, rtw_ofdm_rates,
- rtw_ht_1s_rates, rtw_ht_2s_rates,
- rtw_vht_1s_rates, rtw_vht_2s_rates
-};
-u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
- ARRAY_SIZE(rtw_cck_rates),
- ARRAY_SIZE(rtw_ofdm_rates),
- ARRAY_SIZE(rtw_ht_1s_rates),
- ARRAY_SIZE(rtw_ht_2s_rates),
- ARRAY_SIZE(rtw_vht_1s_rates),
- ARRAY_SIZE(rtw_vht_2s_rates)
-};
-
-static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = {
- 36, 38, 40, 42, 44, 46, 48, /* Band 1 */
- 52, 54, 56, 58, 60, 62, 64, /* Band 2 */
- 100, 102, 104, 106, 108, 110, 112, /* Band 3 */
- 116, 118, 120, 122, 124, 126, 128, /* Band 3 */
- 132, 134, 136, 138, 140, 142, 144, /* Band 3 */
- 149, 151, 153, 155, 157, 159, 161, /* Band 4 */
- 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */
-
-static int rtw_channel_to_idx(u8 band, u8 channel)
-{
- int ch_idx;
- u8 n_channel;
-
- if (band == PHY_BAND_2G) {
- ch_idx = channel - 1;
- n_channel = RTW_MAX_CHANNEL_NUM_2G;
- } else if (band == PHY_BAND_5G) {
- n_channel = RTW_MAX_CHANNEL_NUM_5G;
- for (ch_idx = 0; ch_idx < n_channel; ch_idx++)
- if (rtw_channel_idx_5g[ch_idx] == channel)
- break;
- } else {
- return -1;
- }
-
- if (ch_idx >= n_channel)
- return -1;
-
- return ch_idx;
-}
-
static u8 rtw_get_channel_group(u8 channel)
{
switch (channel) {
@@ -995,10 +1441,10 @@ static u8 rtw_get_channel_group(u8 channel)
}
}
-static u8 phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
- struct rtw_2g_txpwr_idx *pwr_idx_2g,
- enum rtw_bandwidth bandwidth,
- u8 rate, u8 group)
+static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
+ struct rtw_2g_txpwr_idx *pwr_idx_2g,
+ enum rtw_bandwidth bandwidth,
+ u8 rate, u8 group)
{
struct rtw_chip_info *chip = rtwdev->chip;
u8 tx_power;
@@ -1042,10 +1488,10 @@ static u8 phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
return tx_power;
}
-static u8 phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
- struct rtw_5g_txpwr_idx *pwr_idx_5g,
- enum rtw_bandwidth bandwidth,
- u8 rate, u8 group)
+static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
+ struct rtw_5g_txpwr_idx *pwr_idx_5g,
+ enum rtw_bandwidth bandwidth,
+ u8 rate, u8 group)
{
struct rtw_chip_info *chip = rtwdev->chip;
u8 tx_power;
@@ -1096,81 +1542,112 @@ static u8 phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
return tx_power;
}
-/* set tx power level by path for each rates, note that the order of the rates
- * are *very* important, bacause 8822B/8821C combines every four bytes of tx
- * power index into a four-byte power index register, and calls set_tx_agc to
- * write these values into hardware
- */
-static
-void phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, u8 ch, u8 path)
+static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
+ enum rtw_bandwidth bw, u8 rf_path,
+ u8 rate, u8 channel, u8 regd)
{
struct rtw_hal *hal = &rtwdev->hal;
+ u8 *cch_by_bw = hal->cch_by_bw;
+ s8 power_limit = (s8)rtwdev->chip->max_power_index;
u8 rs;
+ int ch_idx;
+ u8 cur_bw, cur_ch;
+ s8 cur_lmt;
- /* do not need cck rates if we are not in 2.4G */
- if (hal->current_band_type == RTW_BAND_2G)
+ if (regd > RTW_REGD_WW)
+ return power_limit;
+
+ if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
rs = RTW_RATE_SECTION_CCK;
- else
+ else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
rs = RTW_RATE_SECTION_OFDM;
+ else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7)
+ rs = RTW_RATE_SECTION_HT_1S;
+ else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
+ rs = RTW_RATE_SECTION_HT_2S;
+ else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
+ rs = RTW_RATE_SECTION_VHT_1S;
+ else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
+ rs = RTW_RATE_SECTION_VHT_2S;
+ else
+ goto err;
- for (; rs < RTW_RATE_SECTION_MAX; rs++)
- phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs);
-}
+ /* only 20M BW with cck and ofdm */
+ if (rs == RTW_RATE_SECTION_CCK || rs == RTW_RATE_SECTION_OFDM)
+ bw = RTW_CHANNEL_WIDTH_20;
-void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
-{
- struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_hal *hal = &rtwdev->hal;
- u8 path;
+ /* only 20/40M BW with ht */
+ if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S)
+ bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40);
- mutex_lock(&hal->tx_power_mutex);
+ /* select min power limit among [20M BW ~ current BW] */
+ for (cur_bw = RTW_CHANNEL_WIDTH_20; cur_bw <= bw; cur_bw++) {
+ cur_ch = cch_by_bw[cur_bw];
- for (path = 0; path < hal->rf_path_num; path++)
- phy_set_tx_power_level_by_path(rtwdev, channel, path);
+ ch_idx = rtw_channel_to_idx(band, cur_ch);
+ if (ch_idx < 0)
+ goto err;
- chip->ops->set_tx_power_index(rtwdev);
- mutex_unlock(&hal->tx_power_mutex);
-}
+ cur_lmt = cur_ch <= RTW_MAX_CHANNEL_NUM_2G ?
+ hal->tx_pwr_limit_2g[regd][cur_bw][rs][ch_idx] :
+ hal->tx_pwr_limit_5g[regd][cur_bw][rs][ch_idx];
+
+ power_limit = min_t(s8, cur_lmt, power_limit);
+ }
+
+ return power_limit;
-s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
- enum rtw_bandwidth bandwidth, u8 rf_path,
- u8 rate, u8 channel, u8 regd);
+err:
+ WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n",
+ band, bw, rf_path, rate, channel);
+ return (s8)rtwdev->chip->max_power_index;
+}
-static
-u8 phy_get_tx_power_index(void *adapter, u8 rf_path, u8 rate,
- enum rtw_bandwidth bandwidth, u8 channel, u8 regd)
+void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
+ u8 ch, u8 regd, struct rtw_power_params *pwr_param)
{
- struct rtw_dev *rtwdev = adapter;
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_txpwr_idx *pwr_idx;
- u8 tx_power;
- u8 group;
- u8 band;
- s8 offset, limit;
+ u8 group, band;
+ u8 *base = &pwr_param->pwr_base;
+ s8 *offset = &pwr_param->pwr_offset;
+ s8 *limit = &pwr_param->pwr_limit;
- pwr_idx = &rtwdev->efuse.txpwr_idx_table[rf_path];
- group = rtw_get_channel_group(channel);
+ pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
+ group = rtw_get_channel_group(ch);
/* base power index for 2.4G/5G */
- if (channel <= 14) {
+ if (ch <= 14) {
band = PHY_BAND_2G;
- tx_power = phy_get_2g_tx_power_index(rtwdev,
- &pwr_idx->pwr_idx_2g,
- bandwidth, rate, group);
- offset = hal->tx_pwr_by_rate_offset_2g[rf_path][rate];
+ *base = rtw_phy_get_2g_tx_power_index(rtwdev,
+ &pwr_idx->pwr_idx_2g,
+ bw, rate, group);
+ *offset = hal->tx_pwr_by_rate_offset_2g[path][rate];
} else {
band = PHY_BAND_5G;
- tx_power = phy_get_5g_tx_power_index(rtwdev,
- &pwr_idx->pwr_idx_5g,
- bandwidth, rate, group);
- offset = hal->tx_pwr_by_rate_offset_5g[rf_path][rate];
+ *base = rtw_phy_get_5g_tx_power_index(rtwdev,
+ &pwr_idx->pwr_idx_5g,
+ bw, rate, group);
+ *offset = hal->tx_pwr_by_rate_offset_5g[path][rate];
}
- limit = phy_get_tx_power_limit(rtwdev, band, bandwidth, rf_path,
- rate, channel, regd);
+ *limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path,
+ rate, ch, regd);
+}
+
+u8
+rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate,
+ enum rtw_bandwidth bandwidth, u8 channel, u8 regd)
+{
+ struct rtw_power_params pwr_param = {0};
+ u8 tx_power;
+ s8 offset;
+
+ rtw_get_tx_power_params(rtwdev, rf_path, rate, bandwidth,
+ channel, regd, &pwr_param);
- if (offset > limit)
- offset = limit;
+ tx_power = pwr_param.pwr_base;
+ offset = min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit);
tx_power += offset;
@@ -1180,9 +1657,9 @@ u8 phy_get_tx_power_index(void *adapter, u8 rf_path, u8 rate,
return tx_power;
}
-void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs)
+static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev,
+ u8 ch, u8 path, u8 rs)
{
- struct rtw_dev *rtwdev = adapter;
struct rtw_hal *hal = &rtwdev->hal;
u8 regd = rtwdev->regd.txpwr_regd;
u8 *rates;
@@ -1200,361 +1677,51 @@ void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs)
bw = hal->current_band_width;
for (i = 0; i < size; i++) {
rate = rates[i];
- pwr_idx = phy_get_tx_power_index(adapter, path, rate, bw, ch,
- regd);
+ pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, rate,
+ bw, ch, regd);
hal->tx_pwr_tbl[path][rate] = pwr_idx;
}
}
-static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i)
-{
- if (rtwdev->chip->is_pwr_by_rate_dec)
- return bcd_to_dec_pwr_by_rate(hex, i);
- else
- return (hex >> (i * 8)) & 0xFF;
-}
-
-static void phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev,
- u32 addr, u32 mask,
- u32 val, u8 *rate,
- u8 *pwr_by_rate, u8 *rate_num)
+/* set tx power level by path for each rates, note that the order of the rates
+ * are *very* important, bacause 8822B/8821C combines every four bytes of tx
+ * power index into a four-byte power index register, and calls set_tx_agc to
+ * write these values into hardware
+ */
+static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev,
+ u8 ch, u8 path)
{
- int i;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 rs;
- switch (addr) {
- case 0xE00:
- case 0x830:
- rate[0] = DESC_RATE6M;
- rate[1] = DESC_RATE9M;
- rate[2] = DESC_RATE12M;
- rate[3] = DESC_RATE18M;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xE04:
- case 0x834:
- rate[0] = DESC_RATE24M;
- rate[1] = DESC_RATE36M;
- rate[2] = DESC_RATE48M;
- rate[3] = DESC_RATE54M;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xE08:
- rate[0] = DESC_RATE1M;
- pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1);
- *rate_num = 1;
- break;
- case 0x86C:
- if (mask == 0xffffff00) {
- rate[0] = DESC_RATE2M;
- rate[1] = DESC_RATE5_5M;
- rate[2] = DESC_RATE11M;
- for (i = 1; i < 4; ++i)
- pwr_by_rate[i - 1] =
- tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 3;
- } else if (mask == 0x000000ff) {
- rate[0] = DESC_RATE11M;
- pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0);
- *rate_num = 1;
- }
- break;
- case 0xE10:
- case 0x83C:
- rate[0] = DESC_RATEMCS0;
- rate[1] = DESC_RATEMCS1;
- rate[2] = DESC_RATEMCS2;
- rate[3] = DESC_RATEMCS3;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xE14:
- case 0x848:
- rate[0] = DESC_RATEMCS4;
- rate[1] = DESC_RATEMCS5;
- rate[2] = DESC_RATEMCS6;
- rate[3] = DESC_RATEMCS7;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xE18:
- case 0x84C:
- rate[0] = DESC_RATEMCS8;
- rate[1] = DESC_RATEMCS9;
- rate[2] = DESC_RATEMCS10;
- rate[3] = DESC_RATEMCS11;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xE1C:
- case 0x868:
- rate[0] = DESC_RATEMCS12;
- rate[1] = DESC_RATEMCS13;
- rate[2] = DESC_RATEMCS14;
- rate[3] = DESC_RATEMCS15;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
+ /* do not need cck rates if we are not in 2.4G */
+ if (hal->current_band_type == RTW_BAND_2G)
+ rs = RTW_RATE_SECTION_CCK;
+ else
+ rs = RTW_RATE_SECTION_OFDM;
- break;
- case 0x838:
- rate[0] = DESC_RATE1M;
- rate[1] = DESC_RATE2M;
- rate[2] = DESC_RATE5_5M;
- for (i = 1; i < 4; ++i)
- pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev,
- val, i);
- *rate_num = 3;
- break;
- case 0xC20:
- case 0xE20:
- case 0x1820:
- case 0x1A20:
- rate[0] = DESC_RATE1M;
- rate[1] = DESC_RATE2M;
- rate[2] = DESC_RATE5_5M;
- rate[3] = DESC_RATE11M;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xC24:
- case 0xE24:
- case 0x1824:
- case 0x1A24:
- rate[0] = DESC_RATE6M;
- rate[1] = DESC_RATE9M;
- rate[2] = DESC_RATE12M;
- rate[3] = DESC_RATE18M;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xC28:
- case 0xE28:
- case 0x1828:
- case 0x1A28:
- rate[0] = DESC_RATE24M;
- rate[1] = DESC_RATE36M;
- rate[2] = DESC_RATE48M;
- rate[3] = DESC_RATE54M;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xC2C:
- case 0xE2C:
- case 0x182C:
- case 0x1A2C:
- rate[0] = DESC_RATEMCS0;
- rate[1] = DESC_RATEMCS1;
- rate[2] = DESC_RATEMCS2;
- rate[3] = DESC_RATEMCS3;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xC30:
- case 0xE30:
- case 0x1830:
- case 0x1A30:
- rate[0] = DESC_RATEMCS4;
- rate[1] = DESC_RATEMCS5;
- rate[2] = DESC_RATEMCS6;
- rate[3] = DESC_RATEMCS7;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xC34:
- case 0xE34:
- case 0x1834:
- case 0x1A34:
- rate[0] = DESC_RATEMCS8;
- rate[1] = DESC_RATEMCS9;
- rate[2] = DESC_RATEMCS10;
- rate[3] = DESC_RATEMCS11;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xC38:
- case 0xE38:
- case 0x1838:
- case 0x1A38:
- rate[0] = DESC_RATEMCS12;
- rate[1] = DESC_RATEMCS13;
- rate[2] = DESC_RATEMCS14;
- rate[3] = DESC_RATEMCS15;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xC3C:
- case 0xE3C:
- case 0x183C:
- case 0x1A3C:
- rate[0] = DESC_RATEVHT1SS_MCS0;
- rate[1] = DESC_RATEVHT1SS_MCS1;
- rate[2] = DESC_RATEVHT1SS_MCS2;
- rate[3] = DESC_RATEVHT1SS_MCS3;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xC40:
- case 0xE40:
- case 0x1840:
- case 0x1A40:
- rate[0] = DESC_RATEVHT1SS_MCS4;
- rate[1] = DESC_RATEVHT1SS_MCS5;
- rate[2] = DESC_RATEVHT1SS_MCS6;
- rate[3] = DESC_RATEVHT1SS_MCS7;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xC44:
- case 0xE44:
- case 0x1844:
- case 0x1A44:
- rate[0] = DESC_RATEVHT1SS_MCS8;
- rate[1] = DESC_RATEVHT1SS_MCS9;
- rate[2] = DESC_RATEVHT2SS_MCS0;
- rate[3] = DESC_RATEVHT2SS_MCS1;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xC48:
- case 0xE48:
- case 0x1848:
- case 0x1A48:
- rate[0] = DESC_RATEVHT2SS_MCS2;
- rate[1] = DESC_RATEVHT2SS_MCS3;
- rate[2] = DESC_RATEVHT2SS_MCS4;
- rate[3] = DESC_RATEVHT2SS_MCS5;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xC4C:
- case 0xE4C:
- case 0x184C:
- case 0x1A4C:
- rate[0] = DESC_RATEVHT2SS_MCS6;
- rate[1] = DESC_RATEVHT2SS_MCS7;
- rate[2] = DESC_RATEVHT2SS_MCS8;
- rate[3] = DESC_RATEVHT2SS_MCS9;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xCD8:
- case 0xED8:
- case 0x18D8:
- case 0x1AD8:
- rate[0] = DESC_RATEMCS16;
- rate[1] = DESC_RATEMCS17;
- rate[2] = DESC_RATEMCS18;
- rate[3] = DESC_RATEMCS19;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xCDC:
- case 0xEDC:
- case 0x18DC:
- case 0x1ADC:
- rate[0] = DESC_RATEMCS20;
- rate[1] = DESC_RATEMCS21;
- rate[2] = DESC_RATEMCS22;
- rate[3] = DESC_RATEMCS23;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xCE0:
- case 0xEE0:
- case 0x18E0:
- case 0x1AE0:
- rate[0] = DESC_RATEVHT3SS_MCS0;
- rate[1] = DESC_RATEVHT3SS_MCS1;
- rate[2] = DESC_RATEVHT3SS_MCS2;
- rate[3] = DESC_RATEVHT3SS_MCS3;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xCE4:
- case 0xEE4:
- case 0x18E4:
- case 0x1AE4:
- rate[0] = DESC_RATEVHT3SS_MCS4;
- rate[1] = DESC_RATEVHT3SS_MCS5;
- rate[2] = DESC_RATEVHT3SS_MCS6;
- rate[3] = DESC_RATEVHT3SS_MCS7;
- for (i = 0; i < 4; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 4;
- break;
- case 0xCE8:
- case 0xEE8:
- case 0x18E8:
- case 0x1AE8:
- rate[0] = DESC_RATEVHT3SS_MCS8;
- rate[1] = DESC_RATEVHT3SS_MCS9;
- for (i = 0; i < 2; ++i)
- pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
- *rate_num = 2;
- break;
- default:
- rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr);
- break;
- }
+ for (; rs < RTW_RATE_SECTION_MAX; rs++)
+ rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs);
}
-void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum,
- u32 regaddr, u32 bitmask, u32 data)
+void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
{
- struct rtw_dev *rtwdev = adapter;
+ struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
- u8 rate_num = 0;
- u8 rate;
- u8 rates[RTW_RF_PATH_MAX] = {0};
- s8 offset;
- s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0};
- int i;
+ u8 path;
- phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data,
- rates, pwr_by_rate, &rate_num);
+ mutex_lock(&hal->tx_power_mutex);
- if (WARN_ON(rfpath >= RTW_RF_PATH_MAX ||
- (band != PHY_BAND_2G && band != PHY_BAND_5G) ||
- rate_num > RTW_RF_PATH_MAX))
- return;
+ for (path = 0; path < hal->rf_path_num; path++)
+ rtw_phy_set_tx_power_level_by_path(rtwdev, channel, path);
- for (i = 0; i < rate_num; i++) {
- offset = pwr_by_rate[i];
- rate = rates[i];
- if (band == PHY_BAND_2G)
- hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset;
- else if (band == PHY_BAND_5G)
- hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset;
- else
- continue;
- }
+ chip->ops->set_tx_power_index(rtwdev);
+ mutex_unlock(&hal->tx_power_mutex);
}
-static
-void phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
- u8 rs, u8 size, u8 *rates)
+static void
+rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
+ u8 rs, u8 size, u8 *rates)
{
u8 rate;
u8 base_idx, rate_idx;
@@ -1580,36 +1747,35 @@ void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal)
u8 path;
for (path = 0; path < RTW_RF_PATH_MAX; path++) {
- phy_tx_power_by_rate_config_by_path(hal, path,
+ rtw_phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_CCK,
rtw_cck_size, rtw_cck_rates);
- phy_tx_power_by_rate_config_by_path(hal, path,
+ rtw_phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_OFDM,
rtw_ofdm_size, rtw_ofdm_rates);
- phy_tx_power_by_rate_config_by_path(hal, path,
+ rtw_phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_HT_1S,
rtw_ht_1s_size, rtw_ht_1s_rates);
- phy_tx_power_by_rate_config_by_path(hal, path,
+ rtw_phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_HT_2S,
rtw_ht_2s_size, rtw_ht_2s_rates);
- phy_tx_power_by_rate_config_by_path(hal, path,
+ rtw_phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_VHT_1S,
rtw_vht_1s_size, rtw_vht_1s_rates);
- phy_tx_power_by_rate_config_by_path(hal, path,
+ rtw_phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_VHT_2S,
rtw_vht_2s_size, rtw_vht_2s_rates);
}
}
static void
-phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
+__rtw_phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
{
- s8 base, orig;
+ s8 base;
u8 ch;
for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) {
base = hal->tx_pwr_by_rate_base_2g[0][rs];
- orig = hal->tx_pwr_limit_2g[regd][bw][rs][ch];
hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base;
}
@@ -1623,98 +1789,34 @@ void rtw_phy_tx_power_limit_config(struct rtw_hal *hal)
{
u8 regd, bw, rs;
+ /* default at channel 1 */
+ hal->cch_by_bw[RTW_CHANNEL_WIDTH_20] = 1;
+
for (regd = 0; regd < RTW_REGD_MAX; regd++)
for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
- phy_tx_power_limit_config(hal, regd, bw, rs);
-}
-
-static s8 get_tx_power_limit(struct rtw_hal *hal, u8 bw, u8 rs, u8 ch, u8 regd)
-{
- if (regd > RTW_REGD_WW)
- return RTW_MAX_POWER_INDEX;
-
- return hal->tx_pwr_limit_2g[regd][bw][rs][ch];
-}
-
-s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
- enum rtw_bandwidth bw, u8 rf_path,
- u8 rate, u8 channel, u8 regd)
-{
- struct rtw_hal *hal = &rtwdev->hal;
- s8 power_limit;
- u8 rs;
- int ch_idx;
-
- if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
- rs = RTW_RATE_SECTION_CCK;
- else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
- rs = RTW_RATE_SECTION_OFDM;
- else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7)
- rs = RTW_RATE_SECTION_HT_1S;
- else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
- rs = RTW_RATE_SECTION_HT_2S;
- else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
- rs = RTW_RATE_SECTION_VHT_1S;
- else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
- rs = RTW_RATE_SECTION_VHT_2S;
- else
- goto err;
-
- ch_idx = rtw_channel_to_idx(band, channel);
- if (ch_idx < 0)
- goto err;
-
- power_limit = get_tx_power_limit(hal, bw, rs, ch_idx, regd);
-
- return power_limit;
-
-err:
- WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n",
- band, bw, rf_path, rate, channel);
- return RTW_MAX_POWER_INDEX;
+ __rtw_phy_tx_power_limit_config(hal, regd, bw, rs);
}
-void phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
- u8 bw, u8 rs, u8 ch, s8 pwr_limit)
+static void rtw_phy_init_tx_power_limit(struct rtw_dev *rtwdev,
+ u8 regd, u8 bw, u8 rs)
{
struct rtw_hal *hal = &rtwdev->hal;
- int ch_idx;
-
- pwr_limit = clamp_t(s8, pwr_limit,
- -RTW_MAX_POWER_INDEX, RTW_MAX_POWER_INDEX);
- ch_idx = rtw_channel_to_idx(band, ch);
-
- if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX ||
- rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) {
- WARN(1,
- "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
- regd, band, bw, rs, ch_idx, pwr_limit);
- return;
- }
-
- if (band == PHY_BAND_2G)
- hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit;
- else if (band == PHY_BAND_5G)
- hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit;
-}
-
-static
-void rtw_hw_tx_power_limit_init(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
-{
+ s8 max_power_index = (s8)rtwdev->chip->max_power_index;
u8 ch;
/* 2.4G channels */
for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
- hal->tx_pwr_limit_2g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX;
+ hal->tx_pwr_limit_2g[regd][bw][rs][ch] = max_power_index;
/* 5G channels */
for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
- hal->tx_pwr_limit_5g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX;
+ hal->tx_pwr_limit_5g[regd][bw][rs][ch] = max_power_index;
}
-void rtw_hw_init_tx_power(struct rtw_hal *hal)
+void rtw_phy_init_tx_power(struct rtw_dev *rtwdev)
{
+ struct rtw_hal *hal = &rtwdev->hal;
u8 regd, path, rate, rs, bw;
/* init tx power by rate offset */
@@ -1729,5 +1831,6 @@ void rtw_hw_init_tx_power(struct rtw_hal *hal)
for (regd = 0; regd < RTW_REGD_MAX; regd++)
for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
- rtw_hw_tx_power_limit_init(hal, regd, bw, rs);
+ rtw_phy_init_tx_power_limit(rtwdev, regd, bw,
+ rs);
}
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index ec03a2051e52..7c8eb732b13c 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -27,11 +27,6 @@ bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data);
bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data);
-void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum,
- u32 regaddr, u32 bitmask, u32 data);
-void phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
- u8 bw, u8 rs, u8 ch, s8 pwr_limit);
-void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs);
void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg);
void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
@@ -44,7 +39,7 @@ void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data);
void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data);
-void rtw_hw_init_tx_power(struct rtw_hal *hal);
+void rtw_phy_init_tx_power(struct rtw_dev *rtwdev);
void rtw_phy_load_tables(struct rtw_dev *rtwdev);
void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel);
void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal);
@@ -110,6 +105,17 @@ static inline int rtw_check_supported_rfe(struct rtw_dev *rtwdev)
void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi);
+struct rtw_power_params {
+ u8 pwr_base;
+ s8 pwr_offset;
+ s8 pwr_limit;
+};
+
+void
+rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path,
+ u8 rate, u8 bw, u8 ch, u8 regd,
+ struct rtw_power_params *pwr_param);
+
#define MASKBYTE0 0xff
#define MASKBYTE1 0xff00
#define MASKBYTE2 0xff0000
diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c
index e7750a833a8e..69744dd65968 100644
--- a/drivers/net/wireless/realtek/rtw88/regd.c
+++ b/drivers/net/wireless/realtek/rtw88/regd.c
@@ -21,19 +21,19 @@ static const struct rtw_regulatory rtw_defined_chplan =
static const struct rtw_regulatory all_chplan_map[] = {
COUNTRY_CHPLAN_ENT("AD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AE", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AF", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AG", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("AI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("AO", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AR", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("AS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("AT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
COUNTRY_CHPLAN_ENT("AW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("AZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
@@ -42,31 +42,34 @@ static const struct rtw_regulatory all_chplan_map[] = {
COUNTRY_CHPLAN_ENT("BE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BH", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BH", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("BN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BO", RTW_CHPLAN_WORLD_FCC7, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("BR", RTW_CHPLAN_FCC2_FCC1, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("BS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BW", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BZ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("CA", RTW_CHPLAN_IC1_IC2, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CA", RTW_CHPLAN_IC1_IC2, RTW_REGD_IC),
COUNTRY_CHPLAN_ENT("CC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CI", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CL", RTW_CHPLAN_WORLD_CHILE1, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CL", RTW_CHPLAN_WORLD_CHILE1, RTW_REGD_CHILE),
COUNTRY_CHPLAN_ENT("CM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("CR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("CV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CX", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CX", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
COUNTRY_CHPLAN_ENT("CY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("DE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
@@ -90,7 +93,7 @@ static const struct rtw_regulatory all_chplan_map[] = {
COUNTRY_CHPLAN_ENT("FR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GD", RTW_CHPLAN_FCC1_FCC7, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("GD", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("GE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
@@ -107,8 +110,8 @@ static const struct rtw_regulatory all_chplan_map[] = {
COUNTRY_CHPLAN_ENT("GU", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("GW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GY", RTW_CHPLAN_FCC1_NCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("HK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("HM", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("HK", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("HM", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
COUNTRY_CHPLAN_ENT("HN", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("HR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("HT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
@@ -118,20 +121,22 @@ static const struct rtw_regulatory all_chplan_map[] = {
COUNTRY_CHPLAN_ENT("IL", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("JE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("JM", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("JM", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("JO", RTW_CHPLAN_WORLD_ETSI8, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("JP", RTW_CHPLAN_MKK1_MKK1, RTW_REGD_MKK),
COUNTRY_CHPLAN_ENT("KE", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("KG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("KH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("KI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("KN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("KR", RTW_CHPLAN_KCC1_KCC2, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KR", RTW_CHPLAN_KCC1_KCC3, RTW_REGD_KCC),
COUNTRY_CHPLAN_ENT("KW", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("KY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("KZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
@@ -157,7 +162,7 @@ static const struct rtw_regulatory all_chplan_map[] = {
COUNTRY_CHPLAN_ENT("ML", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MO", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MP", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("MQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
@@ -167,26 +172,26 @@ static const struct rtw_regulatory all_chplan_map[] = {
COUNTRY_CHPLAN_ENT("MV", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MX", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MY", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MY", RTW_CHPLAN_WORLD_ETSI15, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NF", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NF", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
COUNTRY_CHPLAN_ENT("NG", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("NL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NP", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NP", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NZ", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
+ COUNTRY_CHPLAN_ENT("NZ", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
COUNTRY_CHPLAN_ENT("OM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PA", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("PE", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("PF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PG", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PH", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PK", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
@@ -194,17 +199,17 @@ static const struct rtw_regulatory all_chplan_map[] = {
COUNTRY_CHPLAN_ENT("PT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("PY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("QA", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("QA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("RE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("RO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("RS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("RU", RTW_CHPLAN_WORLD_ETSI14, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("RW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("SE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SG", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
@@ -222,14 +227,15 @@ static const struct rtw_regulatory all_chplan_map[] = {
COUNTRY_CHPLAN_ENT("TD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TH", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TK", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TK", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
COUNTRY_CHPLAN_ENT("TM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TT", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("TV", RTW_CHPLAN_ETSI1_NULL, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("TZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("UA", RTW_CHPLAN_WORLD_ETSI3, RTW_REGD_ETSI),
@@ -240,14 +246,15 @@ static const struct rtw_regulatory all_chplan_map[] = {
COUNTRY_CHPLAN_ENT("VA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("VC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("VE", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("VG", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("VI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("VN", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("VU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("WF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("WS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("YE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("YT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ZA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ZA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("ZM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("ZW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
};
diff --git a/drivers/net/wireless/realtek/rtw88/regd.h b/drivers/net/wireless/realtek/rtw88/regd.h
index 7784bb6d3ba7..5d4578331788 100644
--- a/drivers/net/wireless/realtek/rtw88/regd.h
+++ b/drivers/net/wireless/realtek/rtw88/regd.h
@@ -8,6 +8,7 @@
#define IEEE80211_CHAN_NO_IBSS IEEE80211_CHAN_NO_IR
#define IEEE80211_CHAN_PASSIVE_SCAN IEEE80211_CHAN_NO_IR
enum rtw_chplan_id {
+ RTW_CHPLAN_ETSI1_NULL = 0x21,
RTW_CHPLAN_WORLD_ETSI1 = 0x26,
RTW_CHPLAN_MKK1_MKK1 = 0x27,
RTW_CHPLAN_IC1_IC2 = 0x2B,
@@ -15,6 +16,7 @@ enum rtw_chplan_id {
RTW_CHPLAN_WORLD_FCC3 = 0x30,
RTW_CHPLAN_WORLD_FCC5 = 0x32,
RTW_CHPLAN_FCC1_FCC7 = 0x34,
+ RTW_CHPLAN_WORLD_ETSI2 = 0x35,
RTW_CHPLAN_WORLD_ETSI3 = 0x36,
RTW_CHPLAN_ETSI1_ETSI12 = 0x3D,
RTW_CHPLAN_KCC1_KCC2 = 0x3E,
@@ -24,10 +26,12 @@ enum rtw_chplan_id {
RTW_CHPLAN_WORLD_ETSI6 = 0x47,
RTW_CHPLAN_WORLD_ETSI7 = 0x48,
RTW_CHPLAN_WORLD_ETSI8 = 0x49,
+ RTW_CHPLAN_KCC1_KCC3 = 0x4B,
RTW_CHPLAN_WORLD_ETSI10 = 0x51,
RTW_CHPLAN_WORLD_ETSI14 = 0x59,
RTW_CHPLAN_FCC2_FCC7 = 0x61,
RTW_CHPLAN_FCC2_FCC1 = 0x62,
+ RTW_CHPLAN_WORLD_ETSI15 = 0x63,
RTW_CHPLAN_WORLD_FCC7 = 0x73,
RTW_CHPLAN_FCC2_FCC17 = 0x74,
RTW_CHPLAN_WORLD_ETSI20 = 0x75,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index b4f7242e5aa3..f6214ff20337 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -203,7 +203,7 @@ static void rtw8822c_dac_iq_offset(struct rtw_dev *rtwdev, u32 *vec, u32 *val)
*val = t;
}
-static u32 rtw8822c_get_path_base_addr(u8 path)
+static u32 rtw8822c_get_path_write_addr(u8 path)
{
u32 base_addr;
@@ -222,6 +222,25 @@ static u32 rtw8822c_get_path_base_addr(u8 path)
return base_addr;
}
+static u32 rtw8822c_get_path_read_addr(u8 path)
+{
+ u32 base_addr;
+
+ switch (path) {
+ case RF_PATH_A:
+ base_addr = 0x2800;
+ break;
+ case RF_PATH_B:
+ base_addr = 0x4500;
+ break;
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+
+ return base_addr;
+}
+
static bool rtw8822c_dac_iq_check(struct rtw_dev *rtwdev, u32 value)
{
bool ret = true;
@@ -316,8 +335,6 @@ static void rtw8822c_dac_cal_rf_mode(struct rtw_dev *rtwdev,
u32 iv[DACK_SN_8822C], qv[DACK_SN_8822C];
u32 rf_a, rf_b;
- mdelay(10);
-
rf_a = rtw_read_rf(rtwdev, RF_PATH_A, 0x0, RFREG_MASK);
rf_b = rtw_read_rf(rtwdev, RF_PATH_B, 0x0, RFREG_MASK);
@@ -347,6 +364,7 @@ static void rtw8822c_dac_bb_setting(struct rtw_dev *rtwdev)
static void rtw8822c_dac_cal_adc(struct rtw_dev *rtwdev,
u8 path, u32 *adc_ic, u32 *adc_qc)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u32 ic = 0, qc = 0, temp = 0;
u32 base_addr;
u32 path_sel;
@@ -354,7 +372,7 @@ static void rtw8822c_dac_cal_adc(struct rtw_dev *rtwdev,
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] ADCK path(%d)\n", path);
- base_addr = rtw8822c_get_path_base_addr(path);
+ base_addr = rtw8822c_get_path_write_addr(path);
switch (path) {
case RF_PATH_A:
path_sel = 0xa0000;
@@ -396,6 +414,7 @@ static void rtw8822c_dac_cal_adc(struct rtw_dev *rtwdev,
}
temp = (ic & 0x3ff) | ((qc & 0x3ff) << 10);
rtw_write32(rtwdev, base_addr + 0x68, temp);
+ dm_info->dack_adck[path] = temp;
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] ADCK 0x%08x=0x08%x\n",
base_addr + 0x68, temp);
/* check ADC DC offset */
@@ -422,10 +441,14 @@ static void rtw8822c_dac_cal_adc(struct rtw_dev *rtwdev,
static void rtw8822c_dac_cal_step1(struct rtw_dev *rtwdev, u8 path)
{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u32 base_addr;
+ u32 read_addr;
- base_addr = rtw8822c_get_path_base_addr(path);
+ base_addr = rtw8822c_get_path_write_addr(path);
+ read_addr = rtw8822c_get_path_read_addr(path);
+ rtw_write32(rtwdev, base_addr + 0x68, dm_info->dack_adck[path]);
rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
if (path == RF_PATH_A) {
rtw_write32(rtwdev, base_addr + 0x60, 0xf0040ff0);
@@ -447,11 +470,13 @@ static void rtw8822c_dac_cal_step1(struct rtw_dev *rtwdev, u8 path)
rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb89);
mdelay(1);
rtw_write32(rtwdev, base_addr + 0xb8, 0x62000000);
- mdelay(20);
rtw_write32(rtwdev, base_addr + 0xd4, 0x62000000);
mdelay(20);
+ if (!check_hw_ready(rtwdev, read_addr + 0x08, 0x7fff80, 0xffff) ||
+ !check_hw_ready(rtwdev, read_addr + 0x34, 0x7fff80, 0xffff))
+ rtw_err(rtwdev, "failed to wait for dack ready\n");
rtw_write32(rtwdev, base_addr + 0xb8, 0x02000000);
- mdelay(20);
+ mdelay(1);
rtw_write32(rtwdev, base_addr + 0xbc, 0x0008ff87);
rtw_write32(rtwdev, 0x9b4, 0xdb6db600);
rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
@@ -465,7 +490,7 @@ static void rtw8822c_dac_cal_step2(struct rtw_dev *rtwdev,
u32 base_addr;
u32 ic, qc, ic_in, qc_in;
- base_addr = rtw8822c_get_path_base_addr(path);
+ base_addr = rtw8822c_get_path_write_addr(path);
rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xf0000000, 0x0);
rtw_write32_mask(rtwdev, base_addr + 0xc0, 0xf, 0x8);
rtw_write32_mask(rtwdev, base_addr + 0xd8, 0xf0000000, 0x0);
@@ -514,10 +539,12 @@ static void rtw8822c_dac_cal_step3(struct rtw_dev *rtwdev, u8 path,
u32 *i_out, u32 *q_out)
{
u32 base_addr;
+ u32 read_addr;
u32 ic, qc;
u32 temp;
- base_addr = rtw8822c_get_path_base_addr(path);
+ base_addr = rtw8822c_get_path_write_addr(path);
+ read_addr = rtw8822c_get_path_read_addr(path);
ic = *ic_in;
qc = *qc_in;
@@ -542,11 +569,13 @@ static void rtw8822c_dac_cal_step3(struct rtw_dev *rtwdev, u8 path,
rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb89);
mdelay(1);
rtw_write32(rtwdev, base_addr + 0xb8, 0x62000000);
- mdelay(20);
rtw_write32(rtwdev, base_addr + 0xd4, 0x62000000);
mdelay(20);
+ if (!check_hw_ready(rtwdev, read_addr + 0x24, 0x07f80000, ic) ||
+ !check_hw_ready(rtwdev, read_addr + 0x50, 0x07f80000, qc))
+ rtw_err(rtwdev, "failed to write IQ vector to hardware\n");
rtw_write32(rtwdev, base_addr + 0xb8, 0x02000000);
- mdelay(20);
+ mdelay(1);
rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xe, 0x3);
rtw_write32(rtwdev, 0x9b4, 0xdb6db600);
@@ -583,7 +612,7 @@ static void rtw8822c_dac_cal_step3(struct rtw_dev *rtwdev, u8 path,
static void rtw8822c_dac_cal_step4(struct rtw_dev *rtwdev, u8 path)
{
- u32 base_addr = rtw8822c_get_path_base_addr(path);
+ u32 base_addr = rtw8822c_get_path_write_addr(path);
rtw_write32(rtwdev, base_addr + 0x68, 0x0);
rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c4);
@@ -591,6 +620,296 @@ static void rtw8822c_dac_cal_step4(struct rtw_dev *rtwdev, u8 path)
rtw_write32_mask(rtwdev, base_addr + 0x30, BIT(30), 0x1);
}
+static void rtw8822c_dac_cal_backup_vec(struct rtw_dev *rtwdev,
+ u8 path, u8 vec, u32 w_addr, u32 r_addr)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u16 val;
+ u32 i;
+
+ if (WARN_ON(vec >= 2))
+ return;
+
+ for (i = 0; i < DACK_MSBK_BACKUP_NUM; i++) {
+ rtw_write32_mask(rtwdev, w_addr, 0xf0000000, i);
+ val = (u16)rtw_read32_mask(rtwdev, r_addr, 0x7fc0000);
+ dm_info->dack_msbk[path][vec][i] = val;
+ }
+}
+
+static void rtw8822c_dac_cal_backup_path(struct rtw_dev *rtwdev, u8 path)
+{
+ u32 w_off = 0x1c;
+ u32 r_off = 0x2c;
+ u32 w_addr, r_addr;
+
+ if (WARN_ON(path >= 2))
+ return;
+
+ /* backup I vector */
+ w_addr = rtw8822c_get_path_write_addr(path) + 0xb0;
+ r_addr = rtw8822c_get_path_read_addr(path) + 0x10;
+ rtw8822c_dac_cal_backup_vec(rtwdev, path, 0, w_addr, r_addr);
+
+ /* backup Q vector */
+ w_addr = rtw8822c_get_path_write_addr(path) + 0xb0 + w_off;
+ r_addr = rtw8822c_get_path_read_addr(path) + 0x10 + r_off;
+ rtw8822c_dac_cal_backup_vec(rtwdev, path, 1, w_addr, r_addr);
+}
+
+static void rtw8822c_dac_cal_backup_dck(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 val;
+
+ val = (u8)rtw_read32_mask(rtwdev, REG_DCKA_I_0, 0xf0000000);
+ dm_info->dack_dck[RF_PATH_A][0][0] = val;
+ val = (u8)rtw_read32_mask(rtwdev, REG_DCKA_I_1, 0xf);
+ dm_info->dack_dck[RF_PATH_A][0][1] = val;
+ val = (u8)rtw_read32_mask(rtwdev, REG_DCKA_Q_0, 0xf0000000);
+ dm_info->dack_dck[RF_PATH_A][1][0] = val;
+ val = (u8)rtw_read32_mask(rtwdev, REG_DCKA_Q_1, 0xf);
+ dm_info->dack_dck[RF_PATH_A][1][1] = val;
+
+ val = (u8)rtw_read32_mask(rtwdev, REG_DCKB_I_0, 0xf0000000);
+ dm_info->dack_dck[RF_PATH_B][0][0] = val;
+ val = (u8)rtw_read32_mask(rtwdev, REG_DCKB_I_1, 0xf);
+ dm_info->dack_dck[RF_PATH_B][1][0] = val;
+ val = (u8)rtw_read32_mask(rtwdev, REG_DCKB_Q_0, 0xf0000000);
+ dm_info->dack_dck[RF_PATH_B][0][1] = val;
+ val = (u8)rtw_read32_mask(rtwdev, REG_DCKB_Q_1, 0xf);
+ dm_info->dack_dck[RF_PATH_B][1][1] = val;
+}
+
+static void rtw8822c_dac_cal_backup(struct rtw_dev *rtwdev)
+{
+ u32 temp[3];
+
+ temp[0] = rtw_read32(rtwdev, 0x1860);
+ temp[1] = rtw_read32(rtwdev, 0x4160);
+ temp[2] = rtw_read32(rtwdev, 0x9b4);
+
+ /* set clock */
+ rtw_write32(rtwdev, 0x9b4, 0xdb66db00);
+
+ /* backup path-A I/Q */
+ rtw_write32_clr(rtwdev, 0x1830, BIT(30));
+ rtw_write32_mask(rtwdev, 0x1860, 0xfc000000, 0x3c);
+ rtw8822c_dac_cal_backup_path(rtwdev, RF_PATH_A);
+
+ /* backup path-B I/Q */
+ rtw_write32_clr(rtwdev, 0x4130, BIT(30));
+ rtw_write32_mask(rtwdev, 0x4160, 0xfc000000, 0x3c);
+ rtw8822c_dac_cal_backup_path(rtwdev, RF_PATH_B);
+
+ rtw8822c_dac_cal_backup_dck(rtwdev);
+ rtw_write32_set(rtwdev, 0x1830, BIT(30));
+ rtw_write32_set(rtwdev, 0x4130, BIT(30));
+
+ rtw_write32(rtwdev, 0x1860, temp[0]);
+ rtw_write32(rtwdev, 0x4160, temp[1]);
+ rtw_write32(rtwdev, 0x9b4, temp[2]);
+}
+
+static void rtw8822c_dac_cal_restore_dck(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 val;
+
+ rtw_write32_set(rtwdev, REG_DCKA_I_0, BIT(19));
+ val = dm_info->dack_dck[RF_PATH_A][0][0];
+ rtw_write32_mask(rtwdev, REG_DCKA_I_0, 0xf0000000, val);
+ val = dm_info->dack_dck[RF_PATH_A][0][1];
+ rtw_write32_mask(rtwdev, REG_DCKA_I_1, 0xf, val);
+
+ rtw_write32_set(rtwdev, REG_DCKA_Q_0, BIT(19));
+ val = dm_info->dack_dck[RF_PATH_A][1][0];
+ rtw_write32_mask(rtwdev, REG_DCKA_Q_0, 0xf0000000, val);
+ val = dm_info->dack_dck[RF_PATH_A][1][1];
+ rtw_write32_mask(rtwdev, REG_DCKA_Q_1, 0xf, val);
+
+ rtw_write32_set(rtwdev, REG_DCKB_I_0, BIT(19));
+ val = dm_info->dack_dck[RF_PATH_B][0][0];
+ rtw_write32_mask(rtwdev, REG_DCKB_I_0, 0xf0000000, val);
+ val = dm_info->dack_dck[RF_PATH_B][0][1];
+ rtw_write32_mask(rtwdev, REG_DCKB_I_1, 0xf, val);
+
+ rtw_write32_set(rtwdev, REG_DCKB_Q_0, BIT(19));
+ val = dm_info->dack_dck[RF_PATH_B][1][0];
+ rtw_write32_mask(rtwdev, REG_DCKB_Q_0, 0xf0000000, val);
+ val = dm_info->dack_dck[RF_PATH_B][1][1];
+ rtw_write32_mask(rtwdev, REG_DCKB_Q_1, 0xf, val);
+}
+
+static void rtw8822c_dac_cal_restore_prepare(struct rtw_dev *rtwdev)
+{
+ rtw_write32(rtwdev, 0x9b4, 0xdb66db00);
+
+ rtw_write32_mask(rtwdev, 0x18b0, BIT(27), 0x0);
+ rtw_write32_mask(rtwdev, 0x18cc, BIT(27), 0x0);
+ rtw_write32_mask(rtwdev, 0x41b0, BIT(27), 0x0);
+ rtw_write32_mask(rtwdev, 0x41cc, BIT(27), 0x0);
+
+ rtw_write32_mask(rtwdev, 0x1830, BIT(30), 0x0);
+ rtw_write32_mask(rtwdev, 0x1860, 0xfc000000, 0x3c);
+ rtw_write32_mask(rtwdev, 0x18b4, BIT(0), 0x1);
+ rtw_write32_mask(rtwdev, 0x18d0, BIT(0), 0x1);
+
+ rtw_write32_mask(rtwdev, 0x4130, BIT(30), 0x0);
+ rtw_write32_mask(rtwdev, 0x4160, 0xfc000000, 0x3c);
+ rtw_write32_mask(rtwdev, 0x41b4, BIT(0), 0x1);
+ rtw_write32_mask(rtwdev, 0x41d0, BIT(0), 0x1);
+
+ rtw_write32_mask(rtwdev, 0x18b0, 0xf00, 0x0);
+ rtw_write32_mask(rtwdev, 0x18c0, BIT(14), 0x0);
+ rtw_write32_mask(rtwdev, 0x18cc, 0xf00, 0x0);
+ rtw_write32_mask(rtwdev, 0x18dc, BIT(14), 0x0);
+
+ rtw_write32_mask(rtwdev, 0x18b0, BIT(0), 0x0);
+ rtw_write32_mask(rtwdev, 0x18cc, BIT(0), 0x0);
+ rtw_write32_mask(rtwdev, 0x18b0, BIT(0), 0x1);
+ rtw_write32_mask(rtwdev, 0x18cc, BIT(0), 0x1);
+
+ rtw8822c_dac_cal_restore_dck(rtwdev);
+
+ rtw_write32_mask(rtwdev, 0x18c0, 0x38000, 0x7);
+ rtw_write32_mask(rtwdev, 0x18dc, 0x38000, 0x7);
+ rtw_write32_mask(rtwdev, 0x41c0, 0x38000, 0x7);
+ rtw_write32_mask(rtwdev, 0x41dc, 0x38000, 0x7);
+
+ rtw_write32_mask(rtwdev, 0x18b8, BIT(26) | BIT(25), 0x1);
+ rtw_write32_mask(rtwdev, 0x18d4, BIT(26) | BIT(25), 0x1);
+
+ rtw_write32_mask(rtwdev, 0x41b0, 0xf00, 0x0);
+ rtw_write32_mask(rtwdev, 0x41c0, BIT(14), 0x0);
+ rtw_write32_mask(rtwdev, 0x41cc, 0xf00, 0x0);
+ rtw_write32_mask(rtwdev, 0x41dc, BIT(14), 0x0);
+
+ rtw_write32_mask(rtwdev, 0x41b0, BIT(0), 0x0);
+ rtw_write32_mask(rtwdev, 0x41cc, BIT(0), 0x0);
+ rtw_write32_mask(rtwdev, 0x41b0, BIT(0), 0x1);
+ rtw_write32_mask(rtwdev, 0x41cc, BIT(0), 0x1);
+
+ rtw_write32_mask(rtwdev, 0x41b8, BIT(26) | BIT(25), 0x1);
+ rtw_write32_mask(rtwdev, 0x41d4, BIT(26) | BIT(25), 0x1);
+}
+
+static bool rtw8822c_dac_cal_restore_wait(struct rtw_dev *rtwdev,
+ u32 target_addr, u32 toggle_addr)
+{
+ u32 cnt = 0;
+
+ do {
+ rtw_write32_mask(rtwdev, toggle_addr, BIT(26) | BIT(25), 0x0);
+ rtw_write32_mask(rtwdev, toggle_addr, BIT(26) | BIT(25), 0x2);
+
+ if (rtw_read32_mask(rtwdev, target_addr, 0xf) == 0x6)
+ return true;
+
+ } while (cnt++ < 100);
+
+ return false;
+}
+
+static bool rtw8822c_dac_cal_restore_path(struct rtw_dev *rtwdev, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 w_off = 0x1c;
+ u32 r_off = 0x2c;
+ u32 w_i, r_i, w_q, r_q;
+ u32 value;
+ u32 i;
+
+ w_i = rtw8822c_get_path_write_addr(path) + 0xb0;
+ r_i = rtw8822c_get_path_read_addr(path) + 0x08;
+ w_q = rtw8822c_get_path_write_addr(path) + 0xb0 + w_off;
+ r_q = rtw8822c_get_path_read_addr(path) + 0x08 + r_off;
+
+ if (!rtw8822c_dac_cal_restore_wait(rtwdev, r_i, w_i + 0x8))
+ return false;
+
+ for (i = 0; i < DACK_MSBK_BACKUP_NUM; i++) {
+ rtw_write32_mask(rtwdev, w_i + 0x4, BIT(2), 0x0);
+ value = dm_info->dack_msbk[path][0][i];
+ rtw_write32_mask(rtwdev, w_i + 0x4, 0xff8, value);
+ rtw_write32_mask(rtwdev, w_i, 0xf0000000, i);
+ rtw_write32_mask(rtwdev, w_i + 0x4, BIT(2), 0x1);
+ }
+
+ rtw_write32_mask(rtwdev, w_i + 0x4, BIT(2), 0x0);
+
+ if (!rtw8822c_dac_cal_restore_wait(rtwdev, r_q, w_q + 0x8))
+ return false;
+
+ for (i = 0; i < DACK_MSBK_BACKUP_NUM; i++) {
+ rtw_write32_mask(rtwdev, w_q + 0x4, BIT(2), 0x0);
+ value = dm_info->dack_msbk[path][1][i];
+ rtw_write32_mask(rtwdev, w_q + 0x4, 0xff8, value);
+ rtw_write32_mask(rtwdev, w_q, 0xf0000000, i);
+ rtw_write32_mask(rtwdev, w_q + 0x4, BIT(2), 0x1);
+ }
+ rtw_write32_mask(rtwdev, w_q + 0x4, BIT(2), 0x0);
+
+ rtw_write32_mask(rtwdev, w_i + 0x8, BIT(26) | BIT(25), 0x0);
+ rtw_write32_mask(rtwdev, w_q + 0x8, BIT(26) | BIT(25), 0x0);
+ rtw_write32_mask(rtwdev, w_i + 0x4, BIT(0), 0x0);
+ rtw_write32_mask(rtwdev, w_q + 0x4, BIT(0), 0x0);
+
+ return true;
+}
+
+static bool __rtw8822c_dac_cal_restore(struct rtw_dev *rtwdev)
+{
+ if (!rtw8822c_dac_cal_restore_path(rtwdev, RF_PATH_A))
+ return false;
+
+ if (!rtw8822c_dac_cal_restore_path(rtwdev, RF_PATH_B))
+ return false;
+
+ return true;
+}
+
+static bool rtw8822c_dac_cal_restore(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 temp[3];
+
+ /* sample the first element for both path's IQ vector */
+ if (dm_info->dack_msbk[RF_PATH_A][0][0] == 0 &&
+ dm_info->dack_msbk[RF_PATH_A][1][0] == 0 &&
+ dm_info->dack_msbk[RF_PATH_B][0][0] == 0 &&
+ dm_info->dack_msbk[RF_PATH_B][1][0] == 0)
+ return false;
+
+ temp[0] = rtw_read32(rtwdev, 0x1860);
+ temp[1] = rtw_read32(rtwdev, 0x4160);
+ temp[2] = rtw_read32(rtwdev, 0x9b4);
+
+ rtw8822c_dac_cal_restore_prepare(rtwdev);
+ if (!check_hw_ready(rtwdev, 0x2808, 0x7fff80, 0xffff) ||
+ !check_hw_ready(rtwdev, 0x2834, 0x7fff80, 0xffff) ||
+ !check_hw_ready(rtwdev, 0x4508, 0x7fff80, 0xffff) ||
+ !check_hw_ready(rtwdev, 0x4534, 0x7fff80, 0xffff))
+ return false;
+
+ if (!__rtw8822c_dac_cal_restore(rtwdev)) {
+ rtw_err(rtwdev, "failed to restore dack vectors\n");
+ return false;
+ }
+
+ rtw_write32_mask(rtwdev, 0x1830, BIT(30), 0x1);
+ rtw_write32_mask(rtwdev, 0x4130, BIT(30), 0x1);
+ rtw_write32(rtwdev, 0x1860, temp[0]);
+ rtw_write32(rtwdev, 0x4160, temp[1]);
+ rtw_write32_mask(rtwdev, 0x18b0, BIT(27), 0x1);
+ rtw_write32_mask(rtwdev, 0x18cc, BIT(27), 0x1);
+ rtw_write32_mask(rtwdev, 0x41b0, BIT(27), 0x1);
+ rtw_write32_mask(rtwdev, 0x41cc, BIT(27), 0x1);
+ rtw_write32(rtwdev, 0x9b4, temp[2]);
+
+ return true;
+}
+
static void rtw8822c_rf_dac_cal(struct rtw_dev *rtwdev)
{
struct rtw_backup_info backup_rf[DACK_RF_8822C * DACK_PATH_8822C];
@@ -600,6 +919,11 @@ static void rtw8822c_rf_dac_cal(struct rtw_dev *rtwdev)
u32 ic_a = 0x0, qc_a = 0x0, ic_b = 0x0, qc_b = 0x0;
u32 adc_ic_a = 0x0, adc_qc_a = 0x0, adc_ic_b = 0x0, adc_qc_b = 0x0;
+ if (rtw8822c_dac_cal_restore(rtwdev))
+ return;
+
+ /* not able to restore, do it */
+
rtw8822c_dac_backup_reg(rtwdev, backup, backup_rf);
rtw8822c_dac_bb_setting(rtwdev);
@@ -644,6 +968,9 @@ static void rtw8822c_rf_dac_cal(struct rtw_dev *rtwdev)
rtw8822c_dac_restore_reg(rtwdev, backup, backup_rf);
+ /* backup results to restore, saving a lot of time */
+ rtw8822c_dac_cal_backup(rtwdev);
+
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path A: ic=0x%x, qc=0x%x\n", ic_a, qc_a);
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path B: ic=0x%x, qc=0x%x\n", ic_b, qc_b);
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path A: i=0x%x, q=0x%x\n", i_a, q_a);
@@ -1015,8 +1342,28 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_clr(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN);
rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0xF);
- rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x0);
- rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x0);
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_CCK,
+ 0x5);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_CCK,
+ 0x5);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
+ 0x6);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
+ 0x6);
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_CCK,
+ 0x4);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_CCK,
+ 0x4);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
+ 0x0);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
+ 0x0);
+ break;
+ }
if (channel == 13 || channel == 14)
rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x969);
else if (channel == 11 || channel == 12)
@@ -1061,14 +1408,20 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0x22);
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
if (channel >= 36 && channel <= 64) {
- rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x1);
- rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x1);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
+ 0x1);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
+ 0x1);
} else if (channel >= 100 && channel <= 144) {
- rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x2);
- rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x2);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
+ 0x2);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
+ 0x2);
} else if (channel >= 149) {
- rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x3);
- rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x3);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, BITS_RXAGC_OFDM,
+ 0x3);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, BITS_RXAGC_OFDM,
+ 0x3);
}
if (channel >= 36 && channel <= 51)
@@ -1092,6 +1445,9 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x0);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x7);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x6);
+ rtw_write32_mask(rtwdev, REG_CCK_SOURCE, BIT_NBI_EN, 0x0);
+ rtw_write32_mask(rtwdev, REG_SBD, BITS_SUBTUNE, 0x1);
+ rtw_write32_mask(rtwdev, REG_PT_CHSMO, BIT_PT_OPT, 0x0);
break;
case RTW_CHANNEL_WIDTH_40:
rtw_write32_mask(rtwdev, REG_CCKSB, BIT(4),
@@ -1100,12 +1456,17 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xc0, 0x0);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xff00,
(primary_ch_idx | (primary_ch_idx << 4)));
+ rtw_write32_mask(rtwdev, REG_CCK_SOURCE, BIT_NBI_EN, 0x1);
+ rtw_write32_mask(rtwdev, REG_SBD, BITS_SUBTUNE, 0x1);
+ rtw_write32_mask(rtwdev, REG_PT_CHSMO, BIT_PT_OPT, 0x1);
break;
case RTW_CHANNEL_WIDTH_80:
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0xa);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xc0, 0x0);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xff00,
(primary_ch_idx | (primary_ch_idx << 4)));
+ rtw_write32_mask(rtwdev, REG_SBD, BITS_SUBTUNE, 0x6);
+ rtw_write32_mask(rtwdev, REG_PT_CHSMO, BIT_PT_OPT, 0x1);
break;
case RTW_CHANNEL_WIDTH_5:
rtw_write32_mask(rtwdev, REG_DFIRBW, 0x3FF0, 0x2AB);
@@ -1113,6 +1474,9 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x1);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x4);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x4);
+ rtw_write32_mask(rtwdev, REG_CCK_SOURCE, BIT_NBI_EN, 0x0);
+ rtw_write32_mask(rtwdev, REG_SBD, BITS_SUBTUNE, 0x1);
+ rtw_write32_mask(rtwdev, REG_PT_CHSMO, BIT_PT_OPT, 0x0);
break;
case RTW_CHANNEL_WIDTH_10:
rtw_write32_mask(rtwdev, REG_DFIRBW, 0x3FF0, 0x2AB);
@@ -1120,6 +1484,9 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x2);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x6);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x5);
+ rtw_write32_mask(rtwdev, REG_CCK_SOURCE, BIT_NBI_EN, 0x0);
+ rtw_write32_mask(rtwdev, REG_SBD, BITS_SUBTUNE, 0x1);
+ rtw_write32_mask(rtwdev, REG_PT_CHSMO, BIT_PT_OPT, 0x0);
break;
}
}
@@ -1451,13 +1818,30 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
u32 cck_enable;
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
- u32 ofdm_tx_counter;
+ u32 ofdm_fa_cnt1, ofdm_fa_cnt2, ofdm_fa_cnt3, ofdm_fa_cnt4, ofdm_fa_cnt5;
+ u16 parity_fail, rate_illegal, crc8_fail, mcs_fail, sb_search_fail,
+ fast_fsync, crc8_fail_vhta, mcs_fail_vht;
cck_enable = rtw_read32(rtwdev, REG_ENCCK) & BIT_CCK_BLK_EN;
cck_fa_cnt = rtw_read16(rtwdev, REG_CCK_FACNT);
- ofdm_fa_cnt = rtw_read16(rtwdev, REG_OFDM_FACNT);
- ofdm_tx_counter = rtw_read16(rtwdev, REG_OFDM_TXCNT);
- ofdm_fa_cnt -= ofdm_tx_counter;
+
+ ofdm_fa_cnt1 = rtw_read32(rtwdev, REG_OFDM_FACNT1);
+ ofdm_fa_cnt2 = rtw_read32(rtwdev, REG_OFDM_FACNT2);
+ ofdm_fa_cnt3 = rtw_read32(rtwdev, REG_OFDM_FACNT3);
+ ofdm_fa_cnt4 = rtw_read32(rtwdev, REG_OFDM_FACNT4);
+ ofdm_fa_cnt5 = rtw_read32(rtwdev, REG_OFDM_FACNT5);
+
+ parity_fail = FIELD_GET(GENMASK(31, 16), ofdm_fa_cnt1);
+ rate_illegal = FIELD_GET(GENMASK(15, 0), ofdm_fa_cnt2);
+ crc8_fail = FIELD_GET(GENMASK(31, 16), ofdm_fa_cnt2);
+ crc8_fail_vhta = FIELD_GET(GENMASK(15, 0), ofdm_fa_cnt3);
+ mcs_fail = FIELD_GET(GENMASK(15, 0), ofdm_fa_cnt4);
+ mcs_fail_vht = FIELD_GET(GENMASK(31, 16), ofdm_fa_cnt4);
+ fast_fsync = FIELD_GET(GENMASK(15, 0), ofdm_fa_cnt5);
+ sb_search_fail = FIELD_GET(GENMASK(31, 16), ofdm_fa_cnt5);
+
+ ofdm_fa_cnt = parity_fail + rate_illegal + crc8_fail + crc8_fail_vhta +
+ mcs_fail + mcs_fail_vht + fast_fsync + sb_search_fail;
dm_info->cck_fa_cnt = cck_fa_cnt;
dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
@@ -1468,8 +1852,12 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 2);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 0);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 2);
+
+ /* disable rx clk gating to reset counters */
+ rtw_write32_clr(rtwdev, REG_RX_BREAK, BIT_COM_RX_GCK_EN);
rtw_write32_set(rtwdev, REG_CNT_CTRL, BIT_ALL_CNT_RST);
rtw_write32_clr(rtwdev, REG_CNT_CTRL, BIT_ALL_CNT_RST);
+ rtw_write32_set(rtwdev, REG_RX_BREAK, BIT_COM_RX_GCK_EN);
}
static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index d3bd9850baa0..5ee1de41504d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -133,6 +133,8 @@ struct rtw8822c_efuse {
#define REG_DYMPRITH 0x86c
#define REG_DYMENTH0 0x870
#define REG_DYMENTH 0x874
+#define REG_SBD 0x88c
+#define BITS_SUBTUNE GENMASK(15, 12)
#define REG_DYMTHMIN 0x8a4
#define REG_TXBWCTL 0x9b0
#define REG_TXCLK 0x9b4
@@ -140,12 +142,20 @@ struct rtw8822c_efuse {
#define REG_MRCM 0xc38
#define REG_AGCSWSH 0xc44
#define REG_ANTWTPD 0xc54
+#define REG_PT_CHSMO 0xcbc
+#define BIT_PT_OPT BIT(21)
#define REG_ORITXCODE 0x1800
#define REG_3WIRE 0x180c
#define BIT_3WIRE_TX_EN BIT(0)
#define BIT_3WIRE_RX_EN BIT(1)
#define BIT_3WIRE_PI_ON BIT(28)
#define REG_RXAGCCTL0 0x18ac
+#define BITS_RXAGC_CCK GENMASK(15, 12)
+#define BITS_RXAGC_OFDM GENMASK(8, 4)
+#define REG_DCKA_I_0 0x18bc
+#define REG_DCKA_I_1 0x18c0
+#define REG_DCKA_Q_0 0x18d8
+#define REG_DCKA_Q_1 0x18dc
#define REG_CCKSB 0x1a00
#define REG_RXCCKSEL 0x1a04
#define REG_BGCTRL 0x1a14
@@ -164,11 +174,15 @@ struct rtw8822c_efuse {
#define REG_TXF5 0x1aa0
#define REG_TXF6 0x1aac
#define REG_TXF7 0x1ab0
+#define REG_CCK_SOURCE 0x1abc
+#define BIT_NBI_EN BIT(30)
#define REG_TXANT 0x1c28
#define REG_ENCCK 0x1c3c
#define BIT_CCK_BLK_EN BIT(1)
#define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1))
#define REG_CCAMSK 0x1c80
+#define REG_RX_BREAK 0x1d2c
+#define BIT_COM_RX_GCK_EN BIT(31)
#define REG_RXFNCTL 0x1d30
#define REG_RXIGI 0x1d70
#define REG_ENFN 0x1e24
@@ -178,9 +192,18 @@ struct rtw8822c_efuse {
#define REG_CNT_CTRL 0x1eb4
#define BIT_ALL_CNT_RST BIT(25)
#define REG_OFDM_FACNT 0x2d00
+#define REG_OFDM_FACNT1 0x2d04
+#define REG_OFDM_FACNT2 0x2d08
+#define REG_OFDM_FACNT3 0x2d0c
+#define REG_OFDM_FACNT4 0x2d10
+#define REG_OFDM_FACNT5 0x2d20
#define REG_OFDM_TXCNT 0x2de0
#define REG_ORITXCODE2 0x4100
#define REG_3WIRE2 0x410c
#define REG_RXAGCCTL 0x41ac
+#define REG_DCKB_I_0 0x41bc
+#define REG_DCKB_I_1 0x41c0
+#define REG_DCKB_Q_0 0x41d8
+#define REG_DCKB_Q_1 0x41dc
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index 49044f510c6c..18e609a69829 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -9489,55 +9489,55 @@ static const u8 rtw8822c_txpwr_lmt_type0[] = {
0, 0, 1, 3, 13, 127, 2, 0, 1, 3, 13, 127,
0, 0, 1, 3, 14, 127, 2, 0, 1, 3, 14, 127,
0, 1, 0, 1, 36, 74, 2, 1, 0, 1, 36, 62,
- 0, 1, 0, 1, 40, 80, 2, 1, 0, 1, 40, 62,
- 0, 1, 0, 1, 44, 80, 2, 1, 0, 1, 44, 62,
- 0, 1, 0, 1, 48, 80, 2, 1, 0, 1, 48, 62,
- 0, 1, 0, 1, 52, 80, 2, 1, 0, 1, 52, 62,
- 0, 1, 0, 1, 56, 80, 2, 1, 0, 1, 56, 62,
- 0, 1, 0, 1, 60, 80, 2, 1, 0, 1, 60, 62,
+ 0, 1, 0, 1, 40, 76, 2, 1, 0, 1, 40, 62,
+ 0, 1, 0, 1, 44, 76, 2, 1, 0, 1, 44, 62,
+ 0, 1, 0, 1, 48, 76, 2, 1, 0, 1, 48, 62,
+ 0, 1, 0, 1, 52, 76, 2, 1, 0, 1, 52, 62,
+ 0, 1, 0, 1, 56, 76, 2, 1, 0, 1, 56, 62,
+ 0, 1, 0, 1, 60, 76, 2, 1, 0, 1, 60, 62,
0, 1, 0, 1, 64, 74, 2, 1, 0, 1, 64, 62,
0, 1, 0, 1, 100, 72, 2, 1, 0, 1, 100, 62,
- 0, 1, 0, 1, 104, 80, 2, 1, 0, 1, 104, 62,
- 0, 1, 0, 1, 108, 80, 2, 1, 0, 1, 108, 62,
- 0, 1, 0, 1, 112, 80, 2, 1, 0, 1, 112, 62,
- 0, 1, 0, 1, 116, 80, 2, 1, 0, 1, 116, 62,
- 0, 1, 0, 1, 120, 80, 2, 1, 0, 1, 120, 62,
- 0, 1, 0, 1, 124, 80, 2, 1, 0, 1, 124, 62,
- 0, 1, 0, 1, 128, 80, 2, 1, 0, 1, 128, 62,
- 0, 1, 0, 1, 132, 80, 2, 1, 0, 1, 132, 62,
- 0, 1, 0, 1, 136, 80, 2, 1, 0, 1, 136, 62,
+ 0, 1, 0, 1, 104, 76, 2, 1, 0, 1, 104, 62,
+ 0, 1, 0, 1, 108, 76, 2, 1, 0, 1, 108, 62,
+ 0, 1, 0, 1, 112, 76, 2, 1, 0, 1, 112, 62,
+ 0, 1, 0, 1, 116, 76, 2, 1, 0, 1, 116, 62,
+ 0, 1, 0, 1, 120, 76, 2, 1, 0, 1, 120, 62,
+ 0, 1, 0, 1, 124, 76, 2, 1, 0, 1, 124, 62,
+ 0, 1, 0, 1, 128, 76, 2, 1, 0, 1, 128, 62,
+ 0, 1, 0, 1, 132, 76, 2, 1, 0, 1, 132, 62,
+ 0, 1, 0, 1, 136, 76, 2, 1, 0, 1, 136, 62,
0, 1, 0, 1, 140, 72, 2, 1, 0, 1, 140, 62,
- 0, 1, 0, 1, 144, 80, 2, 1, 0, 1, 144, 127,
- 0, 1, 0, 1, 149, 80, 2, 1, 0, 1, 149, 127,
- 0, 1, 0, 1, 153, 80, 2, 1, 0, 1, 153, 127,
- 0, 1, 0, 1, 157, 80, 2, 1, 0, 1, 157, 127,
- 0, 1, 0, 1, 161, 80, 2, 1, 0, 1, 161, 127,
- 0, 1, 0, 1, 165, 80, 2, 1, 0, 1, 165, 127,
+ 0, 1, 0, 1, 144, 76, 2, 1, 0, 1, 144, 127,
+ 0, 1, 0, 1, 149, 76, 2, 1, 0, 1, 149, -128,
+ 0, 1, 0, 1, 153, 76, 2, 1, 0, 1, 153, -128,
+ 0, 1, 0, 1, 157, 76, 2, 1, 0, 1, 157, -128,
+ 0, 1, 0, 1, 161, 76, 2, 1, 0, 1, 161, -128,
+ 0, 1, 0, 1, 165, 76, 2, 1, 0, 1, 165, -128,
0, 1, 0, 2, 36, 72, 2, 1, 0, 2, 36, 62,
- 0, 1, 0, 2, 40, 80, 2, 1, 0, 2, 40, 62,
- 0, 1, 0, 2, 44, 80, 2, 1, 0, 2, 44, 62,
- 0, 1, 0, 2, 48, 80, 2, 1, 0, 2, 48, 62,
- 0, 1, 0, 2, 52, 80, 2, 1, 0, 2, 52, 62,
- 0, 1, 0, 2, 56, 80, 2, 1, 0, 2, 56, 62,
- 0, 1, 0, 2, 60, 80, 2, 1, 0, 2, 60, 62,
+ 0, 1, 0, 2, 40, 76, 2, 1, 0, 2, 40, 62,
+ 0, 1, 0, 2, 44, 76, 2, 1, 0, 2, 44, 62,
+ 0, 1, 0, 2, 48, 76, 2, 1, 0, 2, 48, 62,
+ 0, 1, 0, 2, 52, 76, 2, 1, 0, 2, 52, 62,
+ 0, 1, 0, 2, 56, 76, 2, 1, 0, 2, 56, 62,
+ 0, 1, 0, 2, 60, 76, 2, 1, 0, 2, 60, 62,
0, 1, 0, 2, 64, 74, 2, 1, 0, 2, 64, 62,
0, 1, 0, 2, 100, 70, 2, 1, 0, 2, 100, 62,
- 0, 1, 0, 2, 104, 80, 2, 1, 0, 2, 104, 62,
- 0, 1, 0, 2, 108, 80, 2, 1, 0, 2, 108, 62,
- 0, 1, 0, 2, 112, 80, 2, 1, 0, 2, 112, 62,
- 0, 1, 0, 2, 116, 80, 2, 1, 0, 2, 116, 62,
- 0, 1, 0, 2, 120, 80, 2, 1, 0, 2, 120, 62,
- 0, 1, 0, 2, 124, 80, 2, 1, 0, 2, 124, 62,
- 0, 1, 0, 2, 128, 80, 2, 1, 0, 2, 128, 62,
- 0, 1, 0, 2, 132, 80, 2, 1, 0, 2, 132, 62,
- 0, 1, 0, 2, 136, 80, 2, 1, 0, 2, 136, 62,
+ 0, 1, 0, 2, 104, 76, 2, 1, 0, 2, 104, 62,
+ 0, 1, 0, 2, 108, 76, 2, 1, 0, 2, 108, 62,
+ 0, 1, 0, 2, 112, 76, 2, 1, 0, 2, 112, 62,
+ 0, 1, 0, 2, 116, 76, 2, 1, 0, 2, 116, 62,
+ 0, 1, 0, 2, 120, 76, 2, 1, 0, 2, 120, 62,
+ 0, 1, 0, 2, 124, 76, 2, 1, 0, 2, 124, 62,
+ 0, 1, 0, 2, 128, 76, 2, 1, 0, 2, 128, 62,
+ 0, 1, 0, 2, 132, 76, 2, 1, 0, 2, 132, 62,
+ 0, 1, 0, 2, 136, 76, 2, 1, 0, 2, 136, 62,
0, 1, 0, 2, 140, 70, 2, 1, 0, 2, 140, 62,
- 0, 1, 0, 2, 144, 80, 2, 1, 0, 2, 144, 127,
- 0, 1, 0, 2, 149, 80, 2, 1, 0, 2, 149, 127,
- 0, 1, 0, 2, 153, 80, 2, 1, 0, 2, 153, 127,
- 0, 1, 0, 2, 157, 80, 2, 1, 0, 2, 157, 127,
- 0, 1, 0, 2, 161, 80, 2, 1, 0, 2, 161, 127,
- 0, 1, 0, 2, 165, 80, 2, 1, 0, 2, 165, 127,
+ 0, 1, 0, 2, 144, 76, 2, 1, 0, 2, 144, 127,
+ 0, 1, 0, 2, 149, 76, 2, 1, 0, 2, 149, -128,
+ 0, 1, 0, 2, 153, 76, 2, 1, 0, 2, 153, -128,
+ 0, 1, 0, 2, 157, 76, 2, 1, 0, 2, 157, -128,
+ 0, 1, 0, 2, 161, 76, 2, 1, 0, 2, 161, -128,
+ 0, 1, 0, 2, 165, 76, 2, 1, 0, 2, 165, -128,
0, 1, 0, 3, 36, 68, 2, 1, 0, 3, 36, 38,
0, 1, 0, 3, 40, 68, 2, 1, 0, 3, 40, 38,
0, 1, 0, 3, 44, 68, 2, 1, 0, 3, 44, 38,
@@ -9558,23 +9558,23 @@ static const u8 rtw8822c_txpwr_lmt_type0[] = {
0, 1, 0, 3, 136, 68, 2, 1, 0, 3, 136, 38,
0, 1, 0, 3, 140, 60, 2, 1, 0, 3, 140, 38,
0, 1, 0, 3, 144, 68, 2, 1, 0, 3, 144, 127,
- 0, 1, 0, 3, 149, 80, 2, 1, 0, 3, 149, 127,
- 0, 1, 0, 3, 153, 80, 2, 1, 0, 3, 153, 127,
- 0, 1, 0, 3, 157, 80, 2, 1, 0, 3, 157, 127,
- 0, 1, 0, 3, 161, 80, 2, 1, 0, 3, 161, 127,
- 0, 1, 0, 3, 165, 80, 2, 1, 0, 3, 165, 127,
+ 0, 1, 0, 3, 149, 76, 2, 1, 0, 3, 149, -128,
+ 0, 1, 0, 3, 153, 76, 2, 1, 0, 3, 153, -128,
+ 0, 1, 0, 3, 157, 76, 2, 1, 0, 3, 157, -128,
+ 0, 1, 0, 3, 161, 76, 2, 1, 0, 3, 161, -128,
+ 0, 1, 0, 3, 165, 76, 2, 1, 0, 3, 165, -128,
0, 1, 1, 2, 38, 66, 2, 1, 1, 2, 38, 64,
0, 1, 1, 2, 46, 72, 2, 1, 1, 2, 46, 64,
0, 1, 1, 2, 54, 72, 2, 1, 1, 2, 54, 64,
0, 1, 1, 2, 62, 64, 2, 1, 1, 2, 62, 64,
0, 1, 1, 2, 102, 58, 2, 1, 1, 2, 102, 64,
- 0, 1, 1, 2, 110, 74, 2, 1, 1, 2, 110, 64,
- 0, 1, 1, 2, 118, 74, 2, 1, 1, 2, 118, 64,
- 0, 1, 1, 2, 126, 74, 2, 1, 1, 2, 126, 64,
- 0, 1, 1, 2, 134, 74, 2, 1, 1, 2, 134, 64,
- 0, 1, 1, 2, 142, 74, 2, 1, 1, 2, 142, 127,
- 0, 1, 1, 2, 151, 74, 2, 1, 1, 2, 151, 127,
- 0, 1, 1, 2, 159, 74, 2, 1, 1, 2, 159, 127,
+ 0, 1, 1, 2, 110, 72, 2, 1, 1, 2, 110, 64,
+ 0, 1, 1, 2, 118, 72, 2, 1, 1, 2, 118, 64,
+ 0, 1, 1, 2, 126, 72, 2, 1, 1, 2, 126, 64,
+ 0, 1, 1, 2, 134, 72, 2, 1, 1, 2, 134, 64,
+ 0, 1, 1, 2, 142, 72, 2, 1, 1, 2, 142, 127,
+ 0, 1, 1, 2, 151, 72, 2, 1, 1, 2, 151, -128,
+ 0, 1, 1, 2, 159, 72, 2, 1, 1, 2, 159, -128,
0, 1, 1, 3, 38, 60, 2, 1, 1, 3, 38, 40,
0, 1, 1, 3, 46, 68, 2, 1, 1, 3, 46, 40,
0, 1, 1, 3, 54, 68, 2, 1, 1, 3, 54, 40,
@@ -9585,20 +9585,703 @@ static const u8 rtw8822c_txpwr_lmt_type0[] = {
0, 1, 1, 3, 126, 68, 2, 1, 1, 3, 126, 40,
0, 1, 1, 3, 134, 68, 2, 1, 1, 3, 134, 40,
0, 1, 1, 3, 142, 68, 2, 1, 1, 3, 142, 127,
- 0, 1, 1, 3, 151, 74, 2, 1, 1, 3, 151, 127,
- 0, 1, 1, 3, 159, 74, 2, 1, 1, 3, 159, 127,
+ 0, 1, 1, 3, 151, 72, 2, 1, 1, 3, 151, -128,
+ 0, 1, 1, 3, 159, 72, 2, 1, 1, 3, 159, -128,
0, 1, 2, 4, 42, 64, 2, 1, 2, 4, 42, 64,
0, 1, 2, 4, 58, 62, 2, 1, 2, 4, 58, 64,
0, 1, 2, 4, 106, 58, 2, 1, 2, 4, 106, 64,
0, 1, 2, 4, 122, 72, 2, 1, 2, 4, 122, 64,
0, 1, 2, 4, 138, 72, 2, 1, 2, 4, 138, 127,
- 0, 1, 2, 4, 155, 72, 2, 1, 2, 4, 155, 127,
+ 0, 1, 2, 4, 155, 72, 2, 1, 2, 4, 155, -128,
0, 1, 2, 5, 42, 54, 2, 1, 2, 5, 42, 40,
0, 1, 2, 5, 58, 52, 2, 1, 2, 5, 58, 40,
0, 1, 2, 5, 106, 50, 2, 1, 2, 5, 106, 40,
0, 1, 2, 5, 122, 66, 2, 1, 2, 5, 122, 40,
0, 1, 2, 5, 138, 66, 2, 1, 2, 5, 138, 127,
- 0, 1, 2, 5, 155, 62, 2, 1, 2, 5, 155, 127
+ 0, 1, 2, 5, 155, 62, 2, 1, 2, 5, 155, -128,
+ 1, 0, 0, 0, 1, 68, 3, 0, 0, 0, 1, 72,
+ 4, 0, 0, 0, 1, 76, 5, 0, 0, 0, 1, 60,
+ 6, 0, 0, 0, 1, 72, 7, 0, 0, 0, 1, 60,
+ 8, 0, 0, 0, 1, 72, 1, 0, 0, 0, 2, 68,
+ 3, 0, 0, 0, 2, 72, 4, 0, 0, 0, 2, 76,
+ 5, 0, 0, 0, 2, 60, 6, 0, 0, 0, 2, 72,
+ 7, 0, 0, 0, 2, 60, 8, 0, 0, 0, 2, 72,
+ 1, 0, 0, 0, 3, 68, 3, 0, 0, 0, 3, 76,
+ 4, 0, 0, 0, 3, 76, 5, 0, 0, 0, 3, 60,
+ 6, 0, 0, 0, 3, 76, 7, 0, 0, 0, 3, 60,
+ 8, 0, 0, 0, 3, 76, 1, 0, 0, 0, 4, 68,
+ 3, 0, 0, 0, 4, 76, 4, 0, 0, 0, 4, 76,
+ 5, 0, 0, 0, 4, 60, 6, 0, 0, 0, 4, 76,
+ 7, 0, 0, 0, 4, 60, 8, 0, 0, 0, 4, 76,
+ 1, 0, 0, 0, 5, 68, 3, 0, 0, 0, 5, 76,
+ 4, 0, 0, 0, 5, 76, 5, 0, 0, 0, 5, 60,
+ 6, 0, 0, 0, 5, 76, 7, 0, 0, 0, 5, 60,
+ 8, 0, 0, 0, 5, 76, 1, 0, 0, 0, 6, 68,
+ 3, 0, 0, 0, 6, 76, 4, 0, 0, 0, 6, 76,
+ 5, 0, 0, 0, 6, 60, 6, 0, 0, 0, 6, 76,
+ 7, 0, 0, 0, 6, 60, 8, 0, 0, 0, 6, 76,
+ 1, 0, 0, 0, 7, 68, 3, 0, 0, 0, 7, 76,
+ 4, 0, 0, 0, 7, 76, 5, 0, 0, 0, 7, 60,
+ 6, 0, 0, 0, 7, 76, 7, 0, 0, 0, 7, 60,
+ 8, 0, 0, 0, 7, 76, 1, 0, 0, 0, 8, 68,
+ 3, 0, 0, 0, 8, 76, 4, 0, 0, 0, 8, 76,
+ 5, 0, 0, 0, 8, 60, 6, 0, 0, 0, 8, 76,
+ 7, 0, 0, 0, 8, 60, 8, 0, 0, 0, 8, 76,
+ 1, 0, 0, 0, 9, 68, 3, 0, 0, 0, 9, 76,
+ 4, 0, 0, 0, 9, 76, 5, 0, 0, 0, 9, 60,
+ 6, 0, 0, 0, 9, 76, 7, 0, 0, 0, 9, 60,
+ 8, 0, 0, 0, 9, 76, 1, 0, 0, 0, 10, 68,
+ 3, 0, 0, 0, 10, 72, 4, 0, 0, 0, 10, 76,
+ 5, 0, 0, 0, 10, 60, 6, 0, 0, 0, 10, 72,
+ 7, 0, 0, 0, 10, 60, 8, 0, 0, 0, 10, 72,
+ 1, 0, 0, 0, 11, 68, 3, 0, 0, 0, 11, 72,
+ 4, 0, 0, 0, 11, 76, 5, 0, 0, 0, 11, 60,
+ 6, 0, 0, 0, 11, 72, 7, 0, 0, 0, 11, 60,
+ 8, 0, 0, 0, 11, 72, 1, 0, 0, 0, 12, 68,
+ 3, 0, 0, 0, 12, 52, 4, 0, 0, 0, 12, 76,
+ 5, 0, 0, 0, 12, 60, 6, 0, 0, 0, 12, 52,
+ 7, 0, 0, 0, 12, 60, 8, 0, 0, 0, 12, 52,
+ 1, 0, 0, 0, 13, 68, 3, 0, 0, 0, 13, 48,
+ 4, 0, 0, 0, 13, 76, 5, 0, 0, 0, 13, 60,
+ 6, 0, 0, 0, 13, 48, 7, 0, 0, 0, 13, 60,
+ 8, 0, 0, 0, 13, 48, 1, 0, 0, 0, 14, 68,
+ 3, 0, 0, 0, 14, 127, 4, 0, 0, 0, 14, 127,
+ 5, 0, 0, 0, 14, 127, 6, 0, 0, 0, 14, 127,
+ 7, 0, 0, 0, 14, 127, 8, 0, 0, 0, 14, 127,
+ 1, 0, 0, 1, 1, 76, 3, 0, 0, 1, 1, 52,
+ 4, 0, 0, 1, 1, 76, 5, 0, 0, 1, 1, 60,
+ 6, 0, 0, 1, 1, 52, 7, 0, 0, 1, 1, 60,
+ 8, 0, 0, 1, 1, 52, 1, 0, 0, 1, 2, 76,
+ 3, 0, 0, 1, 2, 60, 4, 0, 0, 1, 2, 76,
+ 5, 0, 0, 1, 2, 60, 6, 0, 0, 1, 2, 60,
+ 7, 0, 0, 1, 2, 60, 8, 0, 0, 1, 2, 60,
+ 1, 0, 0, 1, 3, 76, 3, 0, 0, 1, 3, 64,
+ 4, 0, 0, 1, 3, 76, 5, 0, 0, 1, 3, 60,
+ 6, 0, 0, 1, 3, 64, 7, 0, 0, 1, 3, 60,
+ 8, 0, 0, 1, 3, 64, 1, 0, 0, 1, 4, 76,
+ 3, 0, 0, 1, 4, 68, 4, 0, 0, 1, 4, 76,
+ 5, 0, 0, 1, 4, 60, 6, 0, 0, 1, 4, 68,
+ 7, 0, 0, 1, 4, 60, 8, 0, 0, 1, 4, 68,
+ 1, 0, 0, 1, 5, 76, 3, 0, 0, 1, 5, 76,
+ 4, 0, 0, 1, 5, 76, 5, 0, 0, 1, 5, 60,
+ 6, 0, 0, 1, 5, 76, 7, 0, 0, 1, 5, 60,
+ 8, 0, 0, 1, 5, 76, 1, 0, 0, 1, 6, 76,
+ 3, 0, 0, 1, 6, 76, 4, 0, 0, 1, 6, 76,
+ 5, 0, 0, 1, 6, 60, 6, 0, 0, 1, 6, 76,
+ 7, 0, 0, 1, 6, 60, 8, 0, 0, 1, 6, 76,
+ 1, 0, 0, 1, 7, 76, 3, 0, 0, 1, 7, 76,
+ 4, 0, 0, 1, 7, 76, 5, 0, 0, 1, 7, 60,
+ 6, 0, 0, 1, 7, 76, 7, 0, 0, 1, 7, 60,
+ 8, 0, 0, 1, 7, 76, 1, 0, 0, 1, 8, 76,
+ 3, 0, 0, 1, 8, 68, 4, 0, 0, 1, 8, 76,
+ 5, 0, 0, 1, 8, 60, 6, 0, 0, 1, 8, 68,
+ 7, 0, 0, 1, 8, 60, 8, 0, 0, 1, 8, 68,
+ 1, 0, 0, 1, 9, 76, 3, 0, 0, 1, 9, 64,
+ 4, 0, 0, 1, 9, 76, 5, 0, 0, 1, 9, 60,
+ 6, 0, 0, 1, 9, 64, 7, 0, 0, 1, 9, 60,
+ 8, 0, 0, 1, 9, 64, 1, 0, 0, 1, 10, 76,
+ 3, 0, 0, 1, 10, 60, 4, 0, 0, 1, 10, 76,
+ 5, 0, 0, 1, 10, 60, 6, 0, 0, 1, 10, 60,
+ 7, 0, 0, 1, 10, 60, 8, 0, 0, 1, 10, 60,
+ 1, 0, 0, 1, 11, 76, 3, 0, 0, 1, 11, 52,
+ 4, 0, 0, 1, 11, 76, 5, 0, 0, 1, 11, 60,
+ 6, 0, 0, 1, 11, 52, 7, 0, 0, 1, 11, 60,
+ 8, 0, 0, 1, 11, 52, 1, 0, 0, 1, 12, 76,
+ 3, 0, 0, 1, 12, 40, 4, 0, 0, 1, 12, 76,
+ 5, 0, 0, 1, 12, 60, 6, 0, 0, 1, 12, 40,
+ 7, 0, 0, 1, 12, 60, 8, 0, 0, 1, 12, 40,
+ 1, 0, 0, 1, 13, 76, 3, 0, 0, 1, 13, 28,
+ 4, 0, 0, 1, 13, 70, 5, 0, 0, 1, 13, 60,
+ 6, 0, 0, 1, 13, 28, 7, 0, 0, 1, 13, 60,
+ 8, 0, 0, 1, 13, 28, 1, 0, 0, 1, 14, 127,
+ 3, 0, 0, 1, 14, 127, 4, 0, 0, 1, 14, 127,
+ 5, 0, 0, 1, 14, 127, 6, 0, 0, 1, 14, 127,
+ 7, 0, 0, 1, 14, 127, 8, 0, 0, 1, 14, 127,
+ 1, 0, 0, 2, 1, 76, 3, 0, 0, 2, 1, 52,
+ 4, 0, 0, 2, 1, 76, 5, 0, 0, 2, 1, 60,
+ 6, 0, 0, 2, 1, 52, 7, 0, 0, 2, 1, 60,
+ 8, 0, 0, 2, 1, 52, 1, 0, 0, 2, 2, 76,
+ 3, 0, 0, 2, 2, 60, 4, 0, 0, 2, 2, 76,
+ 5, 0, 0, 2, 2, 60, 6, 0, 0, 2, 2, 60,
+ 7, 0, 0, 2, 2, 60, 8, 0, 0, 2, 2, 60,
+ 1, 0, 0, 2, 3, 76, 3, 0, 0, 2, 3, 64,
+ 4, 0, 0, 2, 3, 76, 5, 0, 0, 2, 3, 60,
+ 6, 0, 0, 2, 3, 64, 7, 0, 0, 2, 3, 60,
+ 8, 0, 0, 2, 3, 64, 1, 0, 0, 2, 4, 76,
+ 3, 0, 0, 2, 4, 68, 4, 0, 0, 2, 4, 76,
+ 5, 0, 0, 2, 4, 60, 6, 0, 0, 2, 4, 68,
+ 7, 0, 0, 2, 4, 60, 8, 0, 0, 2, 4, 68,
+ 1, 0, 0, 2, 5, 76, 3, 0, 0, 2, 5, 76,
+ 4, 0, 0, 2, 5, 76, 5, 0, 0, 2, 5, 60,
+ 6, 0, 0, 2, 5, 76, 7, 0, 0, 2, 5, 60,
+ 8, 0, 0, 2, 5, 76, 1, 0, 0, 2, 6, 76,
+ 3, 0, 0, 2, 6, 76, 4, 0, 0, 2, 6, 76,
+ 5, 0, 0, 2, 6, 60, 6, 0, 0, 2, 6, 76,
+ 7, 0, 0, 2, 6, 60, 8, 0, 0, 2, 6, 76,
+ 1, 0, 0, 2, 7, 76, 3, 0, 0, 2, 7, 76,
+ 4, 0, 0, 2, 7, 76, 5, 0, 0, 2, 7, 60,
+ 6, 0, 0, 2, 7, 76, 7, 0, 0, 2, 7, 60,
+ 8, 0, 0, 2, 7, 76, 1, 0, 0, 2, 8, 76,
+ 3, 0, 0, 2, 8, 68, 4, 0, 0, 2, 8, 76,
+ 5, 0, 0, 2, 8, 60, 6, 0, 0, 2, 8, 68,
+ 7, 0, 0, 2, 8, 60, 8, 0, 0, 2, 8, 68,
+ 1, 0, 0, 2, 9, 76, 3, 0, 0, 2, 9, 64,
+ 4, 0, 0, 2, 9, 76, 5, 0, 0, 2, 9, 60,
+ 6, 0, 0, 2, 9, 64, 7, 0, 0, 2, 9, 60,
+ 8, 0, 0, 2, 9, 64, 1, 0, 0, 2, 10, 76,
+ 3, 0, 0, 2, 10, 60, 4, 0, 0, 2, 10, 76,
+ 5, 0, 0, 2, 10, 60, 6, 0, 0, 2, 10, 60,
+ 7, 0, 0, 2, 10, 60, 8, 0, 0, 2, 10, 60,
+ 1, 0, 0, 2, 11, 76, 3, 0, 0, 2, 11, 52,
+ 4, 0, 0, 2, 11, 76, 5, 0, 0, 2, 11, 60,
+ 6, 0, 0, 2, 11, 52, 7, 0, 0, 2, 11, 60,
+ 8, 0, 0, 2, 11, 52, 1, 0, 0, 2, 12, 76,
+ 3, 0, 0, 2, 12, 40, 4, 0, 0, 2, 12, 76,
+ 5, 0, 0, 2, 12, 60, 6, 0, 0, 2, 12, 40,
+ 7, 0, 0, 2, 12, 60, 8, 0, 0, 2, 12, 40,
+ 1, 0, 0, 2, 13, 76, 3, 0, 0, 2, 13, 28,
+ 4, 0, 0, 2, 13, 72, 5, 0, 0, 2, 13, 60,
+ 6, 0, 0, 2, 13, 28, 7, 0, 0, 2, 13, 60,
+ 8, 0, 0, 2, 13, 28, 1, 0, 0, 2, 14, 127,
+ 3, 0, 0, 2, 14, 127, 4, 0, 0, 2, 14, 127,
+ 5, 0, 0, 2, 14, 127, 6, 0, 0, 2, 14, 127,
+ 7, 0, 0, 2, 14, 127, 8, 0, 0, 2, 14, 127,
+ 1, 0, 0, 3, 1, 66, 3, 0, 0, 3, 1, 52,
+ 4, 0, 0, 3, 1, 68, 5, 0, 0, 3, 1, 36,
+ 6, 0, 0, 3, 1, 52, 7, 0, 0, 3, 1, 36,
+ 8, 0, 0, 3, 1, 52, 1, 0, 0, 3, 2, 66,
+ 3, 0, 0, 3, 2, 60, 4, 0, 0, 3, 2, 70,
+ 5, 0, 0, 3, 2, 36, 6, 0, 0, 3, 2, 60,
+ 7, 0, 0, 3, 2, 36, 8, 0, 0, 3, 2, 60,
+ 1, 0, 0, 3, 3, 66, 3, 0, 0, 3, 3, 64,
+ 4, 0, 0, 3, 3, 70, 5, 0, 0, 3, 3, 36,
+ 6, 0, 0, 3, 3, 64, 7, 0, 0, 3, 3, 36,
+ 8, 0, 0, 3, 3, 64, 1, 0, 0, 3, 4, 66,
+ 3, 0, 0, 3, 4, 68, 4, 0, 0, 3, 4, 70,
+ 5, 0, 0, 3, 4, 36, 6, 0, 0, 3, 4, 68,
+ 7, 0, 0, 3, 4, 36, 8, 0, 0, 3, 4, 68,
+ 1, 0, 0, 3, 5, 66, 3, 0, 0, 3, 5, 76,
+ 4, 0, 0, 3, 5, 70, 5, 0, 0, 3, 5, 36,
+ 6, 0, 0, 3, 5, 76, 7, 0, 0, 3, 5, 36,
+ 8, 0, 0, 3, 5, 76, 1, 0, 0, 3, 6, 66,
+ 3, 0, 0, 3, 6, 76, 4, 0, 0, 3, 6, 70,
+ 5, 0, 0, 3, 6, 36, 6, 0, 0, 3, 6, 76,
+ 7, 0, 0, 3, 6, 36, 8, 0, 0, 3, 6, 76,
+ 1, 0, 0, 3, 7, 66, 3, 0, 0, 3, 7, 76,
+ 4, 0, 0, 3, 7, 70, 5, 0, 0, 3, 7, 36,
+ 6, 0, 0, 3, 7, 76, 7, 0, 0, 3, 7, 36,
+ 8, 0, 0, 3, 7, 76, 1, 0, 0, 3, 8, 66,
+ 3, 0, 0, 3, 8, 68, 4, 0, 0, 3, 8, 70,
+ 5, 0, 0, 3, 8, 36, 6, 0, 0, 3, 8, 68,
+ 7, 0, 0, 3, 8, 36, 8, 0, 0, 3, 8, 68,
+ 1, 0, 0, 3, 9, 66, 3, 0, 0, 3, 9, 64,
+ 4, 0, 0, 3, 9, 70, 5, 0, 0, 3, 9, 36,
+ 6, 0, 0, 3, 9, 64, 7, 0, 0, 3, 9, 36,
+ 8, 0, 0, 3, 9, 64, 1, 0, 0, 3, 10, 66,
+ 3, 0, 0, 3, 10, 60, 4, 0, 0, 3, 10, 70,
+ 5, 0, 0, 3, 10, 36, 6, 0, 0, 3, 10, 60,
+ 7, 0, 0, 3, 10, 36, 8, 0, 0, 3, 10, 60,
+ 1, 0, 0, 3, 11, 66, 3, 0, 0, 3, 11, 52,
+ 4, 0, 0, 3, 11, 70, 5, 0, 0, 3, 11, 36,
+ 6, 0, 0, 3, 11, 52, 7, 0, 0, 3, 11, 36,
+ 8, 0, 0, 3, 11, 52, 1, 0, 0, 3, 12, 66,
+ 3, 0, 0, 3, 12, 40, 4, 0, 0, 3, 12, 70,
+ 5, 0, 0, 3, 12, 36, 6, 0, 0, 3, 12, 40,
+ 7, 0, 0, 3, 12, 36, 8, 0, 0, 3, 12, 40,
+ 1, 0, 0, 3, 13, 66, 3, 0, 0, 3, 13, 28,
+ 4, 0, 0, 3, 13, 62, 5, 0, 0, 3, 13, 36,
+ 6, 0, 0, 3, 13, 28, 7, 0, 0, 3, 13, 36,
+ 8, 0, 0, 3, 13, 28, 1, 0, 0, 3, 14, 127,
+ 3, 0, 0, 3, 14, 127, 4, 0, 0, 3, 14, 127,
+ 5, 0, 0, 3, 14, 127, 6, 0, 0, 3, 14, 127,
+ 7, 0, 0, 3, 14, 127, 8, 0, 0, 3, 14, 127,
+ 1, 0, 1, 2, 1, 127, 3, 0, 1, 2, 1, 127,
+ 4, 0, 1, 2, 1, 127, 5, 0, 1, 2, 1, 127,
+ 6, 0, 1, 2, 1, 127, 7, 0, 1, 2, 1, 127,
+ 8, 0, 1, 2, 1, 127, 1, 0, 1, 2, 2, 127,
+ 3, 0, 1, 2, 2, 127, 4, 0, 1, 2, 2, 127,
+ 5, 0, 1, 2, 2, 127, 6, 0, 1, 2, 2, 127,
+ 7, 0, 1, 2, 2, 127, 8, 0, 1, 2, 2, 127,
+ 1, 0, 1, 2, 3, 72, 3, 0, 1, 2, 3, 52,
+ 4, 0, 1, 2, 3, 72, 5, 0, 1, 2, 3, 60,
+ 6, 0, 1, 2, 3, 52, 7, 0, 1, 2, 3, 60,
+ 8, 0, 1, 2, 3, 52, 1, 0, 1, 2, 4, 72,
+ 3, 0, 1, 2, 4, 52, 4, 0, 1, 2, 4, 72,
+ 5, 0, 1, 2, 4, 60, 6, 0, 1, 2, 4, 52,
+ 7, 0, 1, 2, 4, 60, 8, 0, 1, 2, 4, 52,
+ 1, 0, 1, 2, 5, 72, 3, 0, 1, 2, 5, 60,
+ 4, 0, 1, 2, 5, 72, 5, 0, 1, 2, 5, 60,
+ 6, 0, 1, 2, 5, 60, 7, 0, 1, 2, 5, 60,
+ 8, 0, 1, 2, 5, 60, 1, 0, 1, 2, 6, 72,
+ 3, 0, 1, 2, 6, 64, 4, 0, 1, 2, 6, 72,
+ 5, 0, 1, 2, 6, 60, 6, 0, 1, 2, 6, 64,
+ 7, 0, 1, 2, 6, 60, 8, 0, 1, 2, 6, 64,
+ 1, 0, 1, 2, 7, 72, 3, 0, 1, 2, 7, 60,
+ 4, 0, 1, 2, 7, 72, 5, 0, 1, 2, 7, 60,
+ 6, 0, 1, 2, 7, 60, 7, 0, 1, 2, 7, 60,
+ 8, 0, 1, 2, 7, 60, 1, 0, 1, 2, 8, 72,
+ 3, 0, 1, 2, 8, 52, 4, 0, 1, 2, 8, 72,
+ 5, 0, 1, 2, 8, 60, 6, 0, 1, 2, 8, 52,
+ 7, 0, 1, 2, 8, 60, 8, 0, 1, 2, 8, 52,
+ 1, 0, 1, 2, 9, 72, 3, 0, 1, 2, 9, 52,
+ 4, 0, 1, 2, 9, 72, 5, 0, 1, 2, 9, 60,
+ 6, 0, 1, 2, 9, 52, 7, 0, 1, 2, 9, 60,
+ 8, 0, 1, 2, 9, 52, 1, 0, 1, 2, 10, 72,
+ 3, 0, 1, 2, 10, 40, 4, 0, 1, 2, 10, 72,
+ 5, 0, 1, 2, 10, 60, 6, 0, 1, 2, 10, 40,
+ 7, 0, 1, 2, 10, 60, 8, 0, 1, 2, 10, 40,
+ 1, 0, 1, 2, 11, 72, 3, 0, 1, 2, 11, 28,
+ 4, 0, 1, 2, 11, 70, 5, 0, 1, 2, 11, 60,
+ 6, 0, 1, 2, 11, 28, 7, 0, 1, 2, 11, 60,
+ 8, 0, 1, 2, 11, 28, 1, 0, 1, 2, 12, 127,
+ 3, 0, 1, 2, 12, 127, 4, 0, 1, 2, 12, 127,
+ 5, 0, 1, 2, 12, 127, 6, 0, 1, 2, 12, 127,
+ 7, 0, 1, 2, 12, 127, 8, 0, 1, 2, 12, 127,
+ 1, 0, 1, 2, 13, 127, 3, 0, 1, 2, 13, 127,
+ 4, 0, 1, 2, 13, 127, 5, 0, 1, 2, 13, 127,
+ 6, 0, 1, 2, 13, 127, 7, 0, 1, 2, 13, 127,
+ 8, 0, 1, 2, 13, 127, 1, 0, 1, 2, 14, 127,
+ 3, 0, 1, 2, 14, 127, 4, 0, 1, 2, 14, 127,
+ 5, 0, 1, 2, 14, 127, 6, 0, 1, 2, 14, 127,
+ 7, 0, 1, 2, 14, 127, 8, 0, 1, 2, 14, 127,
+ 1, 0, 1, 3, 1, 127, 3, 0, 1, 3, 1, 127,
+ 4, 0, 1, 3, 1, 127, 5, 0, 1, 3, 1, 127,
+ 6, 0, 1, 3, 1, 127, 7, 0, 1, 3, 1, 127,
+ 8, 0, 1, 3, 1, 127, 1, 0, 1, 3, 2, 127,
+ 3, 0, 1, 3, 2, 127, 4, 0, 1, 3, 2, 127,
+ 5, 0, 1, 3, 2, 127, 6, 0, 1, 3, 2, 127,
+ 7, 0, 1, 3, 2, 127, 8, 0, 1, 3, 2, 127,
+ 1, 0, 1, 3, 3, 66, 3, 0, 1, 3, 3, 48,
+ 4, 0, 1, 3, 3, 66, 5, 0, 1, 3, 3, 36,
+ 6, 0, 1, 3, 3, 48, 7, 0, 1, 3, 3, 36,
+ 8, 0, 1, 3, 3, 48, 1, 0, 1, 3, 4, 66,
+ 3, 0, 1, 3, 4, 48, 4, 0, 1, 3, 4, 70,
+ 5, 0, 1, 3, 4, 36, 6, 0, 1, 3, 4, 48,
+ 7, 0, 1, 3, 4, 36, 8, 0, 1, 3, 4, 48,
+ 1, 0, 1, 3, 5, 66, 3, 0, 1, 3, 5, 60,
+ 4, 0, 1, 3, 5, 70, 5, 0, 1, 3, 5, 36,
+ 6, 0, 1, 3, 5, 60, 7, 0, 1, 3, 5, 36,
+ 8, 0, 1, 3, 5, 60, 1, 0, 1, 3, 6, 66,
+ 3, 0, 1, 3, 6, 64, 4, 0, 1, 3, 6, 70,
+ 5, 0, 1, 3, 6, 36, 6, 0, 1, 3, 6, 64,
+ 7, 0, 1, 3, 6, 36, 8, 0, 1, 3, 6, 64,
+ 1, 0, 1, 3, 7, 66, 3, 0, 1, 3, 7, 60,
+ 4, 0, 1, 3, 7, 70, 5, 0, 1, 3, 7, 36,
+ 6, 0, 1, 3, 7, 60, 7, 0, 1, 3, 7, 36,
+ 8, 0, 1, 3, 7, 60, 1, 0, 1, 3, 8, 66,
+ 3, 0, 1, 3, 8, 52, 4, 0, 1, 3, 8, 70,
+ 5, 0, 1, 3, 8, 36, 6, 0, 1, 3, 8, 52,
+ 7, 0, 1, 3, 8, 36, 8, 0, 1, 3, 8, 52,
+ 1, 0, 1, 3, 9, 66, 3, 0, 1, 3, 9, 52,
+ 4, 0, 1, 3, 9, 70, 5, 0, 1, 3, 9, 36,
+ 6, 0, 1, 3, 9, 52, 7, 0, 1, 3, 9, 36,
+ 8, 0, 1, 3, 9, 52, 1, 0, 1, 3, 10, 66,
+ 3, 0, 1, 3, 10, 40, 4, 0, 1, 3, 10, 70,
+ 5, 0, 1, 3, 10, 36, 6, 0, 1, 3, 10, 40,
+ 7, 0, 1, 3, 10, 36, 8, 0, 1, 3, 10, 40,
+ 1, 0, 1, 3, 11, 66, 3, 0, 1, 3, 11, 26,
+ 4, 0, 1, 3, 11, 66, 5, 0, 1, 3, 11, 36,
+ 6, 0, 1, 3, 11, 26, 7, 0, 1, 3, 11, 36,
+ 8, 0, 1, 3, 11, 26, 1, 0, 1, 3, 12, 127,
+ 3, 0, 1, 3, 12, 127, 4, 0, 1, 3, 12, 127,
+ 5, 0, 1, 3, 12, 127, 6, 0, 1, 3, 12, 127,
+ 7, 0, 1, 3, 12, 127, 8, 0, 1, 3, 12, 127,
+ 1, 0, 1, 3, 13, 127, 3, 0, 1, 3, 13, 127,
+ 4, 0, 1, 3, 13, 127, 5, 0, 1, 3, 13, 127,
+ 6, 0, 1, 3, 13, 127, 7, 0, 1, 3, 13, 127,
+ 8, 0, 1, 3, 13, 127, 1, 0, 1, 3, 14, 127,
+ 3, 0, 1, 3, 14, 127, 4, 0, 1, 3, 14, 127,
+ 5, 0, 1, 3, 14, 127, 6, 0, 1, 3, 14, 127,
+ 7, 0, 1, 3, 14, 127, 8, 0, 1, 3, 14, 127,
+ 1, 1, 0, 1, 36, 60, 3, 1, 0, 1, 36, 62,
+ 4, 1, 0, 1, 36, 76, 5, 1, 0, 1, 36, 62,
+ 6, 1, 0, 1, 36, 64, 7, 1, 0, 1, 36, 54,
+ 8, 1, 0, 1, 36, 62, 1, 1, 0, 1, 40, 62,
+ 3, 1, 0, 1, 40, 62, 4, 1, 0, 1, 40, 76,
+ 5, 1, 0, 1, 40, 62, 6, 1, 0, 1, 40, 64,
+ 7, 1, 0, 1, 40, 54, 8, 1, 0, 1, 40, 62,
+ 1, 1, 0, 1, 44, 62, 3, 1, 0, 1, 44, 62,
+ 4, 1, 0, 1, 44, 76, 5, 1, 0, 1, 44, 62,
+ 6, 1, 0, 1, 44, 64, 7, 1, 0, 1, 44, 54,
+ 8, 1, 0, 1, 44, 62, 1, 1, 0, 1, 48, 62,
+ 3, 1, 0, 1, 48, 62, 4, 1, 0, 1, 48, 76,
+ 5, 1, 0, 1, 48, 62, 6, 1, 0, 1, 48, 64,
+ 7, 1, 0, 1, 48, 54, 8, 1, 0, 1, 48, 62,
+ 1, 1, 0, 1, 52, 62, 3, 1, 0, 1, 52, 64,
+ 4, 1, 0, 1, 52, 76, 5, 1, 0, 1, 52, 62,
+ 6, 1, 0, 1, 52, 76, 7, 1, 0, 1, 52, 54,
+ 8, 1, 0, 1, 52, 76, 1, 1, 0, 1, 56, 62,
+ 3, 1, 0, 1, 56, 64, 4, 1, 0, 1, 56, 76,
+ 5, 1, 0, 1, 56, 62, 6, 1, 0, 1, 56, 76,
+ 7, 1, 0, 1, 56, 54, 8, 1, 0, 1, 56, 76,
+ 1, 1, 0, 1, 60, 62, 3, 1, 0, 1, 60, 64,
+ 4, 1, 0, 1, 60, 76, 5, 1, 0, 1, 60, 62,
+ 6, 1, 0, 1, 60, 76, 7, 1, 0, 1, 60, 54,
+ 8, 1, 0, 1, 60, 76, 1, 1, 0, 1, 64, 60,
+ 3, 1, 0, 1, 64, 64, 4, 1, 0, 1, 64, 76,
+ 5, 1, 0, 1, 64, 62, 6, 1, 0, 1, 64, 74,
+ 7, 1, 0, 1, 64, 54, 8, 1, 0, 1, 64, 74,
+ 1, 1, 0, 1, 100, 76, 3, 1, 0, 1, 100, 72,
+ 4, 1, 0, 1, 100, 76, 5, 1, 0, 1, 100, 62,
+ 6, 1, 0, 1, 100, 72, 7, 1, 0, 1, 100, 54,
+ 8, 1, 0, 1, 100, 72, 1, 1, 0, 1, 104, 76,
+ 3, 1, 0, 1, 104, 76, 4, 1, 0, 1, 104, 76,
+ 5, 1, 0, 1, 104, 62, 6, 1, 0, 1, 104, 76,
+ 7, 1, 0, 1, 104, 54, 8, 1, 0, 1, 104, 76,
+ 1, 1, 0, 1, 108, 76, 3, 1, 0, 1, 108, 76,
+ 4, 1, 0, 1, 108, 76, 5, 1, 0, 1, 108, 62,
+ 6, 1, 0, 1, 108, 76, 7, 1, 0, 1, 108, 54,
+ 8, 1, 0, 1, 108, 76, 1, 1, 0, 1, 112, 76,
+ 3, 1, 0, 1, 112, 76, 4, 1, 0, 1, 112, 76,
+ 5, 1, 0, 1, 112, 62, 6, 1, 0, 1, 112, 76,
+ 7, 1, 0, 1, 112, 54, 8, 1, 0, 1, 112, 76,
+ 1, 1, 0, 1, 116, 76, 3, 1, 0, 1, 116, 76,
+ 4, 1, 0, 1, 116, 76, 5, 1, 0, 1, 116, 62,
+ 6, 1, 0, 1, 116, 76, 7, 1, 0, 1, 116, 54,
+ 8, 1, 0, 1, 116, 76, 1, 1, 0, 1, 120, 76,
+ 3, 1, 0, 1, 120, 127, 4, 1, 0, 1, 120, 76,
+ 5, 1, 0, 1, 120, 127, 6, 1, 0, 1, 120, 76,
+ 7, 1, 0, 1, 120, 54, 8, 1, 0, 1, 120, 76,
+ 1, 1, 0, 1, 124, 76, 3, 1, 0, 1, 124, 127,
+ 4, 1, 0, 1, 124, 76, 5, 1, 0, 1, 124, 127,
+ 6, 1, 0, 1, 124, 76, 7, 1, 0, 1, 124, 54,
+ 8, 1, 0, 1, 124, 76, 1, 1, 0, 1, 128, 76,
+ 3, 1, 0, 1, 128, 127, 4, 1, 0, 1, 128, 76,
+ 5, 1, 0, 1, 128, 127, 6, 1, 0, 1, 128, 76,
+ 7, 1, 0, 1, 128, 54, 8, 1, 0, 1, 128, 76,
+ 1, 1, 0, 1, 132, 76, 3, 1, 0, 1, 132, 76,
+ 4, 1, 0, 1, 132, 76, 5, 1, 0, 1, 132, 62,
+ 6, 1, 0, 1, 132, 76, 7, 1, 0, 1, 132, 54,
+ 8, 1, 0, 1, 132, 76, 1, 1, 0, 1, 136, 76,
+ 3, 1, 0, 1, 136, 76, 4, 1, 0, 1, 136, 76,
+ 5, 1, 0, 1, 136, 62, 6, 1, 0, 1, 136, 76,
+ 7, 1, 0, 1, 136, 127, 8, 1, 0, 1, 136, 76,
+ 1, 1, 0, 1, 140, 76, 3, 1, 0, 1, 140, 72,
+ 4, 1, 0, 1, 140, 76, 5, 1, 0, 1, 140, 62,
+ 6, 1, 0, 1, 140, 72, 7, 1, 0, 1, 140, 127,
+ 8, 1, 0, 1, 140, 72, 1, 1, 0, 1, 144, 127,
+ 3, 1, 0, 1, 144, 76, 4, 1, 0, 1, 144, 76,
+ 5, 1, 0, 1, 144, 127, 6, 1, 0, 1, 144, 76,
+ 7, 1, 0, 1, 144, 127, 8, 1, 0, 1, 144, 76,
+ 1, 1, 0, 1, 149, 127, 3, 1, 0, 1, 149, 76,
+ 4, 1, 0, 1, 149, 74, 5, 1, 0, 1, 149, 76,
+ 6, 1, 0, 1, 149, 76, 7, 1, 0, 1, 149, 54,
+ 8, 1, 0, 1, 149, 76, 1, 1, 0, 1, 153, 127,
+ 3, 1, 0, 1, 153, 76, 4, 1, 0, 1, 153, 74,
+ 5, 1, 0, 1, 153, 76, 6, 1, 0, 1, 153, 76,
+ 7, 1, 0, 1, 153, 54, 8, 1, 0, 1, 153, 76,
+ 1, 1, 0, 1, 157, 127, 3, 1, 0, 1, 157, 76,
+ 4, 1, 0, 1, 157, 74, 5, 1, 0, 1, 157, 76,
+ 6, 1, 0, 1, 157, 76, 7, 1, 0, 1, 157, 54,
+ 8, 1, 0, 1, 157, 76, 1, 1, 0, 1, 161, 127,
+ 3, 1, 0, 1, 161, 76, 4, 1, 0, 1, 161, 74,
+ 5, 1, 0, 1, 161, 76, 6, 1, 0, 1, 161, 76,
+ 7, 1, 0, 1, 161, 54, 8, 1, 0, 1, 161, 76,
+ 1, 1, 0, 1, 165, 127, 3, 1, 0, 1, 165, 76,
+ 4, 1, 0, 1, 165, 74, 5, 1, 0, 1, 165, 76,
+ 6, 1, 0, 1, 165, 76, 7, 1, 0, 1, 165, 54,
+ 8, 1, 0, 1, 165, 76, 1, 1, 0, 2, 36, 62,
+ 3, 1, 0, 2, 36, 62, 4, 1, 0, 2, 36, 76,
+ 5, 1, 0, 2, 36, 62, 6, 1, 0, 2, 36, 64,
+ 7, 1, 0, 2, 36, 54, 8, 1, 0, 2, 36, 62,
+ 1, 1, 0, 2, 40, 62, 3, 1, 0, 2, 40, 62,
+ 4, 1, 0, 2, 40, 76, 5, 1, 0, 2, 40, 62,
+ 6, 1, 0, 2, 40, 64, 7, 1, 0, 2, 40, 54,
+ 8, 1, 0, 2, 40, 62, 1, 1, 0, 2, 44, 62,
+ 3, 1, 0, 2, 44, 62, 4, 1, 0, 2, 44, 76,
+ 5, 1, 0, 2, 44, 62, 6, 1, 0, 2, 44, 64,
+ 7, 1, 0, 2, 44, 54, 8, 1, 0, 2, 44, 62,
+ 1, 1, 0, 2, 48, 62, 3, 1, 0, 2, 48, 62,
+ 4, 1, 0, 2, 48, 76, 5, 1, 0, 2, 48, 62,
+ 6, 1, 0, 2, 48, 64, 7, 1, 0, 2, 48, 54,
+ 8, 1, 0, 2, 48, 62, 1, 1, 0, 2, 52, 62,
+ 3, 1, 0, 2, 52, 64, 4, 1, 0, 2, 52, 76,
+ 5, 1, 0, 2, 52, 62, 6, 1, 0, 2, 52, 76,
+ 7, 1, 0, 2, 52, 54, 8, 1, 0, 2, 52, 76,
+ 1, 1, 0, 2, 56, 62, 3, 1, 0, 2, 56, 64,
+ 4, 1, 0, 2, 56, 76, 5, 1, 0, 2, 56, 62,
+ 6, 1, 0, 2, 56, 76, 7, 1, 0, 2, 56, 54,
+ 8, 1, 0, 2, 56, 76, 1, 1, 0, 2, 60, 62,
+ 3, 1, 0, 2, 60, 64, 4, 1, 0, 2, 60, 76,
+ 5, 1, 0, 2, 60, 62, 6, 1, 0, 2, 60, 76,
+ 7, 1, 0, 2, 60, 54, 8, 1, 0, 2, 60, 76,
+ 1, 1, 0, 2, 64, 60, 3, 1, 0, 2, 64, 64,
+ 4, 1, 0, 2, 64, 74, 5, 1, 0, 2, 64, 62,
+ 6, 1, 0, 2, 64, 74, 7, 1, 0, 2, 64, 54,
+ 8, 1, 0, 2, 64, 74, 1, 1, 0, 2, 100, 76,
+ 3, 1, 0, 2, 100, 70, 4, 1, 0, 2, 100, 76,
+ 5, 1, 0, 2, 100, 62, 6, 1, 0, 2, 100, 70,
+ 7, 1, 0, 2, 100, 54, 8, 1, 0, 2, 100, 70,
+ 1, 1, 0, 2, 104, 76, 3, 1, 0, 2, 104, 76,
+ 4, 1, 0, 2, 104, 76, 5, 1, 0, 2, 104, 62,
+ 6, 1, 0, 2, 104, 76, 7, 1, 0, 2, 104, 54,
+ 8, 1, 0, 2, 104, 76, 1, 1, 0, 2, 108, 76,
+ 3, 1, 0, 2, 108, 76, 4, 1, 0, 2, 108, 76,
+ 5, 1, 0, 2, 108, 62, 6, 1, 0, 2, 108, 76,
+ 7, 1, 0, 2, 108, 54, 8, 1, 0, 2, 108, 76,
+ 1, 1, 0, 2, 112, 76, 3, 1, 0, 2, 112, 76,
+ 4, 1, 0, 2, 112, 76, 5, 1, 0, 2, 112, 62,
+ 6, 1, 0, 2, 112, 76, 7, 1, 0, 2, 112, 54,
+ 8, 1, 0, 2, 112, 76, 1, 1, 0, 2, 116, 76,
+ 3, 1, 0, 2, 116, 76, 4, 1, 0, 2, 116, 76,
+ 5, 1, 0, 2, 116, 62, 6, 1, 0, 2, 116, 76,
+ 7, 1, 0, 2, 116, 54, 8, 1, 0, 2, 116, 76,
+ 1, 1, 0, 2, 120, 76, 3, 1, 0, 2, 120, 127,
+ 4, 1, 0, 2, 120, 76, 5, 1, 0, 2, 120, 127,
+ 6, 1, 0, 2, 120, 76, 7, 1, 0, 2, 120, 54,
+ 8, 1, 0, 2, 120, 76, 1, 1, 0, 2, 124, 76,
+ 3, 1, 0, 2, 124, 127, 4, 1, 0, 2, 124, 76,
+ 5, 1, 0, 2, 124, 127, 6, 1, 0, 2, 124, 76,
+ 7, 1, 0, 2, 124, 54, 8, 1, 0, 2, 124, 76,
+ 1, 1, 0, 2, 128, 76, 3, 1, 0, 2, 128, 127,
+ 4, 1, 0, 2, 128, 76, 5, 1, 0, 2, 128, 127,
+ 6, 1, 0, 2, 128, 76, 7, 1, 0, 2, 128, 54,
+ 8, 1, 0, 2, 128, 76, 1, 1, 0, 2, 132, 76,
+ 3, 1, 0, 2, 132, 76, 4, 1, 0, 2, 132, 76,
+ 5, 1, 0, 2, 132, 62, 6, 1, 0, 2, 132, 76,
+ 7, 1, 0, 2, 132, 54, 8, 1, 0, 2, 132, 76,
+ 1, 1, 0, 2, 136, 76, 3, 1, 0, 2, 136, 76,
+ 4, 1, 0, 2, 136, 76, 5, 1, 0, 2, 136, 62,
+ 6, 1, 0, 2, 136, 76, 7, 1, 0, 2, 136, 127,
+ 8, 1, 0, 2, 136, 76, 1, 1, 0, 2, 140, 76,
+ 3, 1, 0, 2, 140, 70, 4, 1, 0, 2, 140, 76,
+ 5, 1, 0, 2, 140, 62, 6, 1, 0, 2, 140, 70,
+ 7, 1, 0, 2, 140, 127, 8, 1, 0, 2, 140, 70,
+ 1, 1, 0, 2, 144, 127, 3, 1, 0, 2, 144, 76,
+ 4, 1, 0, 2, 144, 76, 5, 1, 0, 2, 144, 127,
+ 6, 1, 0, 2, 144, 76, 7, 1, 0, 2, 144, 127,
+ 8, 1, 0, 2, 144, 76, 1, 1, 0, 2, 149, 127,
+ 3, 1, 0, 2, 149, 76, 4, 1, 0, 2, 149, 74,
+ 5, 1, 0, 2, 149, 76, 6, 1, 0, 2, 149, 76,
+ 7, 1, 0, 2, 149, 54, 8, 1, 0, 2, 149, 76,
+ 1, 1, 0, 2, 153, 127, 3, 1, 0, 2, 153, 76,
+ 4, 1, 0, 2, 153, 74, 5, 1, 0, 2, 153, 76,
+ 6, 1, 0, 2, 153, 76, 7, 1, 0, 2, 153, 54,
+ 8, 1, 0, 2, 153, 76, 1, 1, 0, 2, 157, 127,
+ 3, 1, 0, 2, 157, 76, 4, 1, 0, 2, 157, 74,
+ 5, 1, 0, 2, 157, 76, 6, 1, 0, 2, 157, 76,
+ 7, 1, 0, 2, 157, 54, 8, 1, 0, 2, 157, 76,
+ 1, 1, 0, 2, 161, 127, 3, 1, 0, 2, 161, 76,
+ 4, 1, 0, 2, 161, 74, 5, 1, 0, 2, 161, 76,
+ 6, 1, 0, 2, 161, 76, 7, 1, 0, 2, 161, 54,
+ 8, 1, 0, 2, 161, 76, 1, 1, 0, 2, 165, 127,
+ 3, 1, 0, 2, 165, 76, 4, 1, 0, 2, 165, 74,
+ 5, 1, 0, 2, 165, 76, 6, 1, 0, 2, 165, 76,
+ 7, 1, 0, 2, 165, 54, 8, 1, 0, 2, 165, 76,
+ 1, 1, 0, 3, 36, 50, 3, 1, 0, 3, 36, 38,
+ 4, 1, 0, 3, 36, 66, 5, 1, 0, 3, 36, 38,
+ 6, 1, 0, 3, 36, 52, 7, 1, 0, 3, 36, 30,
+ 8, 1, 0, 3, 36, 50, 1, 1, 0, 3, 40, 50,
+ 3, 1, 0, 3, 40, 38, 4, 1, 0, 3, 40, 66,
+ 5, 1, 0, 3, 40, 38, 6, 1, 0, 3, 40, 52,
+ 7, 1, 0, 3, 40, 30, 8, 1, 0, 3, 40, 50,
+ 1, 1, 0, 3, 44, 50, 3, 1, 0, 3, 44, 38,
+ 4, 1, 0, 3, 44, 66, 5, 1, 0, 3, 44, 38,
+ 6, 1, 0, 3, 44, 52, 7, 1, 0, 3, 44, 30,
+ 8, 1, 0, 3, 44, 50, 1, 1, 0, 3, 48, 50,
+ 3, 1, 0, 3, 48, 38, 4, 1, 0, 3, 48, 66,
+ 5, 1, 0, 3, 48, 38, 6, 1, 0, 3, 48, 52,
+ 7, 1, 0, 3, 48, 30, 8, 1, 0, 3, 48, 50,
+ 1, 1, 0, 3, 52, 50, 3, 1, 0, 3, 52, 40,
+ 4, 1, 0, 3, 52, 66, 5, 1, 0, 3, 52, 38,
+ 6, 1, 0, 3, 52, 68, 7, 1, 0, 3, 52, 30,
+ 8, 1, 0, 3, 52, 68, 1, 1, 0, 3, 56, 50,
+ 3, 1, 0, 3, 56, 40, 4, 1, 0, 3, 56, 66,
+ 5, 1, 0, 3, 56, 38, 6, 1, 0, 3, 56, 68,
+ 7, 1, 0, 3, 56, 30, 8, 1, 0, 3, 56, 68,
+ 1, 1, 0, 3, 60, 50, 3, 1, 0, 3, 60, 40,
+ 4, 1, 0, 3, 60, 66, 5, 1, 0, 3, 60, 38,
+ 6, 1, 0, 3, 60, 66, 7, 1, 0, 3, 60, 30,
+ 8, 1, 0, 3, 60, 66, 1, 1, 0, 3, 64, 50,
+ 3, 1, 0, 3, 64, 40, 4, 1, 0, 3, 64, 66,
+ 5, 1, 0, 3, 64, 38, 6, 1, 0, 3, 64, 68,
+ 7, 1, 0, 3, 64, 30, 8, 1, 0, 3, 64, 68,
+ 1, 1, 0, 3, 100, 70, 3, 1, 0, 3, 100, 60,
+ 4, 1, 0, 3, 100, 64, 5, 1, 0, 3, 100, 38,
+ 6, 1, 0, 3, 100, 60, 7, 1, 0, 3, 100, 30,
+ 8, 1, 0, 3, 100, 60, 1, 1, 0, 3, 104, 70,
+ 3, 1, 0, 3, 104, 68, 4, 1, 0, 3, 104, 64,
+ 5, 1, 0, 3, 104, 38, 6, 1, 0, 3, 104, 68,
+ 7, 1, 0, 3, 104, 30, 8, 1, 0, 3, 104, 68,
+ 1, 1, 0, 3, 108, 70, 3, 1, 0, 3, 108, 68,
+ 4, 1, 0, 3, 108, 64, 5, 1, 0, 3, 108, 38,
+ 6, 1, 0, 3, 108, 68, 7, 1, 0, 3, 108, 30,
+ 8, 1, 0, 3, 108, 68, 1, 1, 0, 3, 112, 70,
+ 3, 1, 0, 3, 112, 68, 4, 1, 0, 3, 112, 64,
+ 5, 1, 0, 3, 112, 38, 6, 1, 0, 3, 112, 68,
+ 7, 1, 0, 3, 112, 30, 8, 1, 0, 3, 112, 68,
+ 1, 1, 0, 3, 116, 70, 3, 1, 0, 3, 116, 68,
+ 4, 1, 0, 3, 116, 64, 5, 1, 0, 3, 116, 38,
+ 6, 1, 0, 3, 116, 68, 7, 1, 0, 3, 116, 30,
+ 8, 1, 0, 3, 116, 68, 1, 1, 0, 3, 120, 70,
+ 3, 1, 0, 3, 120, 127, 4, 1, 0, 3, 120, 64,
+ 5, 1, 0, 3, 120, 127, 6, 1, 0, 3, 120, 68,
+ 7, 1, 0, 3, 120, 30, 8, 1, 0, 3, 120, 68,
+ 1, 1, 0, 3, 124, 70, 3, 1, 0, 3, 124, 127,
+ 4, 1, 0, 3, 124, 64, 5, 1, 0, 3, 124, 127,
+ 6, 1, 0, 3, 124, 68, 7, 1, 0, 3, 124, 30,
+ 8, 1, 0, 3, 124, 68, 1, 1, 0, 3, 128, 70,
+ 3, 1, 0, 3, 128, 127, 4, 1, 0, 3, 128, 64,
+ 5, 1, 0, 3, 128, 127, 6, 1, 0, 3, 128, 68,
+ 7, 1, 0, 3, 128, 30, 8, 1, 0, 3, 128, 68,
+ 1, 1, 0, 3, 132, 70, 3, 1, 0, 3, 132, 68,
+ 4, 1, 0, 3, 132, 64, 5, 1, 0, 3, 132, 38,
+ 6, 1, 0, 3, 132, 68, 7, 1, 0, 3, 132, 30,
+ 8, 1, 0, 3, 132, 68, 1, 1, 0, 3, 136, 70,
+ 3, 1, 0, 3, 136, 68, 4, 1, 0, 3, 136, 64,
+ 5, 1, 0, 3, 136, 38, 6, 1, 0, 3, 136, 68,
+ 7, 1, 0, 3, 136, 127, 8, 1, 0, 3, 136, 68,
+ 1, 1, 0, 3, 140, 70, 3, 1, 0, 3, 140, 60,
+ 4, 1, 0, 3, 140, 64, 5, 1, 0, 3, 140, 38,
+ 6, 1, 0, 3, 140, 60, 7, 1, 0, 3, 140, 127,
+ 8, 1, 0, 3, 140, 60, 1, 1, 0, 3, 144, 127,
+ 3, 1, 0, 3, 144, 68, 4, 1, 0, 3, 144, 64,
+ 5, 1, 0, 3, 144, 127, 6, 1, 0, 3, 144, 68,
+ 7, 1, 0, 3, 144, 127, 8, 1, 0, 3, 144, 68,
+ 1, 1, 0, 3, 149, 127, 3, 1, 0, 3, 149, 76,
+ 4, 1, 0, 3, 149, 60, 5, 1, 0, 3, 149, 76,
+ 6, 1, 0, 3, 149, 76, 7, 1, 0, 3, 149, 30,
+ 8, 1, 0, 3, 149, 72, 1, 1, 0, 3, 153, 127,
+ 3, 1, 0, 3, 153, 76, 4, 1, 0, 3, 153, 60,
+ 5, 1, 0, 3, 153, 76, 6, 1, 0, 3, 153, 76,
+ 7, 1, 0, 3, 153, 30, 8, 1, 0, 3, 153, 76,
+ 1, 1, 0, 3, 157, 127, 3, 1, 0, 3, 157, 76,
+ 4, 1, 0, 3, 157, 60, 5, 1, 0, 3, 157, 76,
+ 6, 1, 0, 3, 157, 76, 7, 1, 0, 3, 157, 30,
+ 8, 1, 0, 3, 157, 76, 1, 1, 0, 3, 161, 127,
+ 3, 1, 0, 3, 161, 76, 4, 1, 0, 3, 161, 60,
+ 5, 1, 0, 3, 161, 76, 6, 1, 0, 3, 161, 76,
+ 7, 1, 0, 3, 161, 30, 8, 1, 0, 3, 161, 76,
+ 1, 1, 0, 3, 165, 127, 3, 1, 0, 3, 165, 76,
+ 4, 1, 0, 3, 165, 60, 5, 1, 0, 3, 165, 76,
+ 6, 1, 0, 3, 165, 76, 7, 1, 0, 3, 165, 30,
+ 8, 1, 0, 3, 165, 76, 1, 1, 1, 2, 38, 62,
+ 3, 1, 1, 2, 38, 64, 4, 1, 1, 2, 38, 72,
+ 5, 1, 1, 2, 38, 64, 6, 1, 1, 2, 38, 64,
+ 7, 1, 1, 2, 38, 54, 8, 1, 1, 2, 38, 62,
+ 1, 1, 1, 2, 46, 62, 3, 1, 1, 2, 46, 64,
+ 4, 1, 1, 2, 46, 72, 5, 1, 1, 2, 46, 64,
+ 6, 1, 1, 2, 46, 64, 7, 1, 1, 2, 46, 54,
+ 8, 1, 1, 2, 46, 62, 1, 1, 1, 2, 54, 62,
+ 3, 1, 1, 2, 54, 64, 4, 1, 1, 2, 54, 72,
+ 5, 1, 1, 2, 54, 64, 6, 1, 1, 2, 54, 72,
+ 7, 1, 1, 2, 54, 54, 8, 1, 1, 2, 54, 72,
+ 1, 1, 1, 2, 62, 62, 3, 1, 1, 2, 62, 64,
+ 4, 1, 1, 2, 62, 70, 5, 1, 1, 2, 62, 64,
+ 6, 1, 1, 2, 62, 64, 7, 1, 1, 2, 62, 54,
+ 8, 1, 1, 2, 62, 64, 1, 1, 1, 2, 102, 72,
+ 3, 1, 1, 2, 102, 58, 4, 1, 1, 2, 102, 72,
+ 5, 1, 1, 2, 102, 64, 6, 1, 1, 2, 102, 58,
+ 7, 1, 1, 2, 102, 54, 8, 1, 1, 2, 102, 58,
+ 1, 1, 1, 2, 110, 72, 3, 1, 1, 2, 110, 72,
+ 4, 1, 1, 2, 110, 72, 5, 1, 1, 2, 110, 64,
+ 6, 1, 1, 2, 110, 72, 7, 1, 1, 2, 110, 54,
+ 8, 1, 1, 2, 110, 72, 1, 1, 1, 2, 118, 72,
+ 3, 1, 1, 2, 118, 127, 4, 1, 1, 2, 118, 72,
+ 5, 1, 1, 2, 118, 127, 6, 1, 1, 2, 118, 72,
+ 7, 1, 1, 2, 118, 54, 8, 1, 1, 2, 118, 72,
+ 1, 1, 1, 2, 126, 72, 3, 1, 1, 2, 126, 127,
+ 4, 1, 1, 2, 126, 72, 5, 1, 1, 2, 126, 127,
+ 6, 1, 1, 2, 126, 72, 7, 1, 1, 2, 126, 54,
+ 8, 1, 1, 2, 126, 72, 1, 1, 1, 2, 134, 72,
+ 3, 1, 1, 2, 134, 72, 4, 1, 1, 2, 134, 72,
+ 5, 1, 1, 2, 134, 64, 6, 1, 1, 2, 134, 72,
+ 7, 1, 1, 2, 134, 127, 8, 1, 1, 2, 134, 72,
+ 1, 1, 1, 2, 142, 127, 3, 1, 1, 2, 142, 72,
+ 4, 1, 1, 2, 142, 72, 5, 1, 1, 2, 142, 127,
+ 6, 1, 1, 2, 142, 72, 7, 1, 1, 2, 142, 127,
+ 8, 1, 1, 2, 142, 72, 1, 1, 1, 2, 151, 127,
+ 3, 1, 1, 2, 151, 72, 4, 1, 1, 2, 151, 72,
+ 5, 1, 1, 2, 151, 72, 6, 1, 1, 2, 151, 72,
+ 7, 1, 1, 2, 151, 54, 8, 1, 1, 2, 151, 72,
+ 1, 1, 1, 2, 159, 127, 3, 1, 1, 2, 159, 72,
+ 4, 1, 1, 2, 159, 72, 5, 1, 1, 2, 159, 72,
+ 6, 1, 1, 2, 159, 72, 7, 1, 1, 2, 159, 54,
+ 8, 1, 1, 2, 159, 72, 1, 1, 1, 3, 38, 50,
+ 3, 1, 1, 3, 38, 40, 4, 1, 1, 3, 38, 62,
+ 5, 1, 1, 3, 38, 40, 6, 1, 1, 3, 38, 52,
+ 7, 1, 1, 3, 38, 30, 8, 1, 1, 3, 38, 50,
+ 1, 1, 1, 3, 46, 50, 3, 1, 1, 3, 46, 40,
+ 4, 1, 1, 3, 46, 62, 5, 1, 1, 3, 46, 40,
+ 6, 1, 1, 3, 46, 52, 7, 1, 1, 3, 46, 30,
+ 8, 1, 1, 3, 46, 50, 1, 1, 1, 3, 54, 50,
+ 3, 1, 1, 3, 54, 40, 4, 1, 1, 3, 54, 62,
+ 5, 1, 1, 3, 54, 40, 6, 1, 1, 3, 54, 68,
+ 7, 1, 1, 3, 54, 30, 8, 1, 1, 3, 54, 68,
+ 1, 1, 1, 3, 62, 48, 3, 1, 1, 3, 62, 40,
+ 4, 1, 1, 3, 62, 58, 5, 1, 1, 3, 62, 40,
+ 6, 1, 1, 3, 62, 58, 7, 1, 1, 3, 62, 30,
+ 8, 1, 1, 3, 62, 58, 1, 1, 1, 3, 102, 70,
+ 3, 1, 1, 3, 102, 54, 4, 1, 1, 3, 102, 64,
+ 5, 1, 1, 3, 102, 40, 6, 1, 1, 3, 102, 54,
+ 7, 1, 1, 3, 102, 30, 8, 1, 1, 3, 102, 54,
+ 1, 1, 1, 3, 110, 70, 3, 1, 1, 3, 110, 68,
+ 4, 1, 1, 3, 110, 64, 5, 1, 1, 3, 110, 40,
+ 6, 1, 1, 3, 110, 68, 7, 1, 1, 3, 110, 30,
+ 8, 1, 1, 3, 110, 68, 1, 1, 1, 3, 118, 70,
+ 3, 1, 1, 3, 118, 127, 4, 1, 1, 3, 118, 64,
+ 5, 1, 1, 3, 118, 127, 6, 1, 1, 3, 118, 68,
+ 7, 1, 1, 3, 118, 30, 8, 1, 1, 3, 118, 68,
+ 1, 1, 1, 3, 126, 70, 3, 1, 1, 3, 126, 127,
+ 4, 1, 1, 3, 126, 64, 5, 1, 1, 3, 126, 127,
+ 6, 1, 1, 3, 126, 68, 7, 1, 1, 3, 126, 30,
+ 8, 1, 1, 3, 126, 68, 1, 1, 1, 3, 134, 70,
+ 3, 1, 1, 3, 134, 68, 4, 1, 1, 3, 134, 64,
+ 5, 1, 1, 3, 134, 40, 6, 1, 1, 3, 134, 68,
+ 7, 1, 1, 3, 134, 127, 8, 1, 1, 3, 134, 68,
+ 1, 1, 1, 3, 142, 127, 3, 1, 1, 3, 142, 68,
+ 4, 1, 1, 3, 142, 64, 5, 1, 1, 3, 142, 127,
+ 6, 1, 1, 3, 142, 68, 7, 1, 1, 3, 142, 127,
+ 8, 1, 1, 3, 142, 68, 1, 1, 1, 3, 151, 127,
+ 3, 1, 1, 3, 151, 72, 4, 1, 1, 3, 151, 66,
+ 5, 1, 1, 3, 151, 72, 6, 1, 1, 3, 151, 72,
+ 7, 1, 1, 3, 151, 30, 8, 1, 1, 3, 151, 68,
+ 1, 1, 1, 3, 159, 127, 3, 1, 1, 3, 159, 72,
+ 4, 1, 1, 3, 159, 66, 5, 1, 1, 3, 159, 72,
+ 6, 1, 1, 3, 159, 72, 7, 1, 1, 3, 159, 30,
+ 8, 1, 1, 3, 159, 72, 1, 1, 2, 4, 42, 64,
+ 3, 1, 2, 4, 42, 64, 4, 1, 2, 4, 42, 68,
+ 5, 1, 2, 4, 42, 64, 6, 1, 2, 4, 42, 64,
+ 7, 1, 2, 4, 42, 54, 8, 1, 2, 4, 42, 62,
+ 1, 1, 2, 4, 58, 64, 3, 1, 2, 4, 58, 62,
+ 4, 1, 2, 4, 58, 64, 5, 1, 2, 4, 58, 64,
+ 6, 1, 2, 4, 58, 62, 7, 1, 2, 4, 58, 54,
+ 8, 1, 2, 4, 58, 62, 1, 1, 2, 4, 106, 72,
+ 3, 1, 2, 4, 106, 58, 4, 1, 2, 4, 106, 66,
+ 5, 1, 2, 4, 106, 64, 6, 1, 2, 4, 106, 58,
+ 7, 1, 2, 4, 106, 54, 8, 1, 2, 4, 106, 58,
+ 1, 1, 2, 4, 122, 72, 3, 1, 2, 4, 122, 127,
+ 4, 1, 2, 4, 122, 68, 5, 1, 2, 4, 122, 127,
+ 6, 1, 2, 4, 122, 72, 7, 1, 2, 4, 122, 54,
+ 8, 1, 2, 4, 122, 72, 1, 1, 2, 4, 138, 127,
+ 3, 1, 2, 4, 138, 72, 4, 1, 2, 4, 138, 68,
+ 5, 1, 2, 4, 138, 127, 6, 1, 2, 4, 138, 72,
+ 7, 1, 2, 4, 138, 127, 8, 1, 2, 4, 138, 72,
+ 1, 1, 2, 4, 155, 127, 3, 1, 2, 4, 155, 72,
+ 4, 1, 2, 4, 155, 68, 5, 1, 2, 4, 155, 72,
+ 6, 1, 2, 4, 155, 72, 7, 1, 2, 4, 155, 54,
+ 8, 1, 2, 4, 155, 68, 1, 1, 2, 5, 42, 50,
+ 3, 1, 2, 5, 42, 40, 4, 1, 2, 5, 42, 58,
+ 5, 1, 2, 5, 42, 40, 6, 1, 2, 5, 42, 52,
+ 7, 1, 2, 5, 42, 30, 8, 1, 2, 5, 42, 50,
+ 1, 1, 2, 5, 58, 50, 3, 1, 2, 5, 58, 40,
+ 4, 1, 2, 5, 58, 56, 5, 1, 2, 5, 58, 40,
+ 6, 1, 2, 5, 58, 52, 7, 1, 2, 5, 58, 30,
+ 8, 1, 2, 5, 58, 52, 1, 1, 2, 5, 106, 72,
+ 3, 1, 2, 5, 106, 50, 4, 1, 2, 5, 106, 56,
+ 5, 1, 2, 5, 106, 40, 6, 1, 2, 5, 106, 50,
+ 7, 1, 2, 5, 106, 30, 8, 1, 2, 5, 106, 50,
+ 1, 1, 2, 5, 122, 72, 3, 1, 2, 5, 122, 127,
+ 4, 1, 2, 5, 122, 56, 5, 1, 2, 5, 122, 127,
+ 6, 1, 2, 5, 122, 66, 7, 1, 2, 5, 122, 30,
+ 8, 1, 2, 5, 122, 66, 1, 1, 2, 5, 138, 127,
+ 3, 1, 2, 5, 138, 66, 4, 1, 2, 5, 138, 58,
+ 5, 1, 2, 5, 138, 127, 6, 1, 2, 5, 138, 66,
+ 7, 1, 2, 5, 138, 127, 8, 1, 2, 5, 138, 66,
+ 1, 1, 2, 5, 155, 127, 3, 1, 2, 5, 155, 62,
+ 4, 1, 2, 5, 155, 58, 5, 1, 2, 5, 155, 72,
+ 6, 1, 2, 5, 155, 62, 7, 1, 2, 5, 155, 30,
+ 8, 1, 2, 5, 155, 62
};
RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index e32faf8bead9..8eaa9809ca44 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -362,6 +362,6 @@ void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
pkt_info->bmc = bmc;
pkt_info->tx_pkt_size = skb->len;
pkt_info->offset = chip->tx_pkt_desc_sz;
- pkt_info->qsel = skb->priority;
+ pkt_info->qsel = TX_DESC_QSEL_MGMT;
pkt_info->ls = true;
}
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index a5e0604d3009..0b3cf8477c6c 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -1847,44 +1847,6 @@ static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
},
};
-static const struct ieee80211_iface_limit wl18xx_iface_ap_cl_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
- },
-};
-
-static const struct ieee80211_iface_limit wl18xx_iface_ap_go_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_GO),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
- },
-};
-
static const struct ieee80211_iface_combination
wl18xx_iface_combinations[] = {
{
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index c9a485ecee7b..b74dc8bc9755 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -483,7 +483,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
}
/* update the host-chipset time offset */
- wl->time_offset = (ktime_get_boot_ns() >> 10) -
+ wl->time_offset = (ktime_get_boottime_ns() >> 10) -
(s64)(status->fw_localtime);
wl->fw_fast_lnk_map = status->link_fast_bitmap;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index d96bb602fae6..307fab21050b 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -93,7 +93,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
if (beacon || probe_rsp)
- status->boottime_ns = ktime_get_boot_ns();
+ status->boottime_ns = ktime_get_boottime_ns();
if (beacon)
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 057c6be330e7..90e56d4c3df3 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -273,7 +273,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
/* configure packet life time */
- hosttime = (ktime_get_boot_ns() >> 10);
+ hosttime = (ktime_get_boottime_ns() >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
is_dummy = wl12xx_is_dummy_packet(wl, skb);
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 606999f102eb..be92e1220284 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -172,7 +172,7 @@ static void virt_wifi_scan_result(struct work_struct *work)
informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
CFG80211_BSS_FTYPE_PRESP,
fake_router_bssid,
- ktime_get_boot_ns(),
+ ktime_get_boottime_ns(),
WLAN_CAPABILITY_ESS, 0,
(void *)&ssid, sizeof(ssid),
DBM_TO_MBM(-50), GFP_KERNEL);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 783198844dd7..240f762b3749 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -633,7 +633,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
unsigned int rx_evtchn)
{
struct task_struct *task;
- int err = -ENOMEM;
+ int err;
BUG_ON(queue->tx_irq);
BUG_ON(queue->task);
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 67a685adfd44..55d600cd3861 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -72,7 +72,7 @@ static void st_nci_i2c_disable(void *phy_id)
*/
static int st_nci_i2c_write(void *phy_id, struct sk_buff *skb)
{
- int r = -1;
+ int r;
struct st_nci_i2c_phy *phy = phy_id;
struct i2c_client *client = phy->i2c_dev;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 120fb593d1da..b2dd4e391f5c 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1113,15 +1113,15 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
return id;
}
-static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
- void *buffer, size_t buflen, u32 *result)
+static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
+ unsigned int dword11, void *buffer, size_t buflen, u32 *result)
{
struct nvme_command c;
union nvme_result res;
int ret;
memset(&c, 0, sizeof(c));
- c.features.opcode = nvme_admin_set_features;
+ c.features.opcode = op;
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
@@ -1132,6 +1132,24 @@ static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword
return ret;
}
+int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
+ unsigned int dword11, void *buffer, size_t buflen,
+ u32 *result)
+{
+ return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
+ buflen, result);
+}
+EXPORT_SYMBOL_GPL(nvme_set_features);
+
+int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
+ unsigned int dword11, void *buffer, size_t buflen,
+ u32 *result)
+{
+ return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
+ buflen, result);
+}
+EXPORT_SYMBOL_GPL(nvme_get_features);
+
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
{
u32 q_count = (*count - 1) | ((*count - 1) << 16);
@@ -3318,7 +3336,7 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
nvme_mpath_add_disk(ns, id);
- nvme_fault_inject_init(ns);
+ nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
kfree(id);
return 0;
@@ -3343,7 +3361,15 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
- nvme_fault_inject_fini(ns);
+ nvme_fault_inject_fini(&ns->fault_inject);
+
+ mutex_lock(&ns->ctrl->subsys->lock);
+ list_del_rcu(&ns->siblings);
+ mutex_unlock(&ns->ctrl->subsys->lock);
+ synchronize_rcu(); /* guarantee not available in head->list */
+ nvme_mpath_clear_current_path(ns);
+ synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
+
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
@@ -3351,16 +3377,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
blk_integrity_unregister(ns->disk);
}
- mutex_lock(&ns->ctrl->subsys->lock);
- list_del_rcu(&ns->siblings);
- nvme_mpath_clear_current_path(ns);
- mutex_unlock(&ns->ctrl->subsys->lock);
-
down_write(&ns->ctrl->namespaces_rwsem);
list_del_init(&ns->list);
up_write(&ns->ctrl->namespaces_rwsem);
- synchronize_srcu(&ns->head->srcu);
nvme_mpath_check_last_path(ns);
nvme_put_ns(ns);
}
@@ -3702,6 +3722,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
+ nvme_fault_inject_fini(&ctrl->fault_inject);
dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device);
}
@@ -3797,6 +3818,8 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
dev_pm_qos_update_user_latency_tolerance(ctrl->device,
min(default_ps_max_latency_us, (unsigned long)S32_MAX));
+ nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
+
return 0;
out_free_name:
kfree_const(ctrl->device->kobj.name);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 5838f7cd53ac..1994d5b42f94 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -578,7 +578,7 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
switch (ctrl->state) {
case NVME_CTRL_NEW:
case NVME_CTRL_CONNECTING:
- if (req->cmd->common.opcode == nvme_fabrics_command &&
+ if (nvme_is_fabrics(req->cmd) &&
req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
return true;
break;
diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c
index 4cfd2c9222d4..1352159733b0 100644
--- a/drivers/nvme/host/fault_inject.c
+++ b/drivers/nvme/host/fault_inject.c
@@ -15,11 +15,10 @@ static DECLARE_FAULT_ATTR(fail_default_attr);
static char *fail_request;
module_param(fail_request, charp, 0000);
-void nvme_fault_inject_init(struct nvme_ns *ns)
+void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
+ const char *dev_name)
{
struct dentry *dir, *parent;
- char *name = ns->disk->disk_name;
- struct nvme_fault_inject *fault_inj = &ns->fault_inject;
struct fault_attr *attr = &fault_inj->attr;
/* set default fault injection attribute */
@@ -27,20 +26,20 @@ void nvme_fault_inject_init(struct nvme_ns *ns)
setup_fault_attr(&fail_default_attr, fail_request);
/* create debugfs directory and attribute */
- parent = debugfs_create_dir(name, NULL);
+ parent = debugfs_create_dir(dev_name, NULL);
if (!parent) {
- pr_warn("%s: failed to create debugfs directory\n", name);
+ pr_warn("%s: failed to create debugfs directory\n", dev_name);
return;
}
*attr = fail_default_attr;
dir = fault_create_debugfs_attr("fault_inject", parent, attr);
if (IS_ERR(dir)) {
- pr_warn("%s: failed to create debugfs attr\n", name);
+ pr_warn("%s: failed to create debugfs attr\n", dev_name);
debugfs_remove_recursive(parent);
return;
}
- ns->fault_inject.parent = parent;
+ fault_inj->parent = parent;
/* create debugfs for status code and dont_retry */
fault_inj->status = NVME_SC_INVALID_OPCODE;
@@ -49,29 +48,33 @@ void nvme_fault_inject_init(struct nvme_ns *ns)
debugfs_create_bool("dont_retry", 0600, dir, &fault_inj->dont_retry);
}
-void nvme_fault_inject_fini(struct nvme_ns *ns)
+void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject)
{
/* remove debugfs directories */
- debugfs_remove_recursive(ns->fault_inject.parent);
+ debugfs_remove_recursive(fault_inject->parent);
}
void nvme_should_fail(struct request *req)
{
struct gendisk *disk = req->rq_disk;
- struct nvme_ns *ns = NULL;
+ struct nvme_fault_inject *fault_inject = NULL;
u16 status;
- /*
- * make sure this request is coming from a valid namespace
- */
- if (!disk)
- return;
+ if (disk) {
+ struct nvme_ns *ns = disk->private_data;
+
+ if (ns)
+ fault_inject = &ns->fault_inject;
+ else
+ WARN_ONCE(1, "No namespace found for request\n");
+ } else {
+ fault_inject = &nvme_req(req)->ctrl->fault_inject;
+ }
- ns = disk->private_data;
- if (ns && should_fail(&ns->fault_inject.attr, 1)) {
+ if (fault_inject && should_fail(&fault_inject->attr, 1)) {
/* inject status code and DNR bit */
- status = ns->fault_inject.status;
- if (ns->fault_inject.dont_retry)
+ status = fault_inject->status;
+ if (fault_inject->dont_retry)
status |= NVME_SC_DNR;
nvme_req(req)->status = status;
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index dd8169bbf0d2..dcb2b799966f 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2112,7 +2112,8 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
freq->sg_table.sgl = freq->first_sgl;
ret = sg_alloc_table_chained(&freq->sg_table,
- blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
+ blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
+ SG_CHUNK_SIZE);
if (ret)
return -ENOMEM;
@@ -2122,7 +2123,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
op->nents, dir);
if (unlikely(freq->sg_cnt <= 0)) {
- sg_free_table_chained(&freq->sg_table, true);
+ sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
freq->sg_cnt = 0;
return -EFAULT;
}
@@ -2148,7 +2149,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
nvme_cleanup_cmd(rq);
- sg_free_table_chained(&freq->sg_table, true);
+ sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
freq->sg_cnt = 0;
}
@@ -2607,6 +2608,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (nvme_fc_ctlr_active_on_rport(ctrl))
return -ENOTUNIQ;
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: create association : host wwpn 0x%016llx "
+ " rport wwpn 0x%016llx: NQN \"%s\"\n",
+ ctrl->cnum, ctrl->lport->localport.port_name,
+ ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
+
/*
* Create the admin queue
*/
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 4f20a10b39d3..ba009d4c9dfa 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -660,7 +660,7 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q,
rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
if (rqd->bio)
- blk_init_request_from_bio(rq, rqd->bio);
+ blk_rq_append_bio(rq, &rqd->bio);
else
rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 55553d293a98..ea45d7d393ad 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -146,6 +146,15 @@ enum nvme_ctrl_state {
NVME_CTRL_DEAD,
};
+struct nvme_fault_inject {
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+ struct fault_attr attr;
+ struct dentry *parent;
+ bool dont_retry; /* DNR, do not retry */
+ u16 status; /* status code */
+#endif
+};
+
struct nvme_ctrl {
bool comp_seen;
enum nvme_ctrl_state state;
@@ -247,6 +256,8 @@ struct nvme_ctrl {
struct page *discard_page;
unsigned long discard_page_busy;
+
+ struct nvme_fault_inject fault_inject;
};
enum nvme_iopolicy {
@@ -313,15 +324,6 @@ struct nvme_ns_head {
#endif
};
-#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
-struct nvme_fault_inject {
- struct fault_attr attr;
- struct dentry *parent;
- bool dont_retry; /* DNR, do not retry */
- u16 status; /* status code */
-};
-#endif
-
struct nvme_ns {
struct list_head list;
@@ -349,9 +351,7 @@ struct nvme_ns {
#define NVME_NS_ANA_PENDING 2
u16 noiob;
-#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct nvme_fault_inject fault_inject;
-#endif
};
@@ -372,12 +372,18 @@ struct nvme_ctrl_ops {
};
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
-void nvme_fault_inject_init(struct nvme_ns *ns);
-void nvme_fault_inject_fini(struct nvme_ns *ns);
+void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
+ const char *dev_name);
+void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
void nvme_should_fail(struct request *req);
#else
-static inline void nvme_fault_inject_init(struct nvme_ns *ns) {}
-static inline void nvme_fault_inject_fini(struct nvme_ns *ns) {}
+static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
+ const char *dev_name)
+{
+}
+static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
+{
+}
static inline void nvme_should_fail(struct request *req) {}
#endif
@@ -459,6 +465,12 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head,
blk_mq_req_flags_t flags, bool poll);
+int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
+ unsigned int dword11, void *buffer, size_t buflen,
+ u32 *result);
+int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
+ unsigned int dword11, void *buffer, size_t buflen,
+ u32 *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 524d6bd6d095..189352081994 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -18,6 +18,7 @@
#include <linux/mutex.h>
#include <linux/once.h>
#include <linux/pci.h>
+#include <linux/suspend.h>
#include <linux/t10-pi.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
@@ -67,20 +68,14 @@ static int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
-static int queue_count_set(const char *val, const struct kernel_param *kp);
-static const struct kernel_param_ops queue_count_ops = {
- .set = queue_count_set,
- .get = param_get_int,
-};
-
static int write_queues;
-module_param_cb(write_queues, &queue_count_ops, &write_queues, 0644);
+module_param(write_queues, int, 0644);
MODULE_PARM_DESC(write_queues,
"Number of queues to use for writes. If not set, reads and writes "
"will share a queue set.");
-static int poll_queues = 0;
-module_param_cb(poll_queues, &queue_count_ops, &poll_queues, 0644);
+static int poll_queues;
+module_param(poll_queues, int, 0644);
MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
struct nvme_dev;
@@ -116,6 +111,7 @@ struct nvme_dev {
u32 cmbsz;
u32 cmbloc;
struct nvme_ctrl ctrl;
+ u32 last_ps;
mempool_t *iod_mempool;
@@ -144,19 +140,6 @@ static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp);
}
-static int queue_count_set(const char *val, const struct kernel_param *kp)
-{
- int n, ret;
-
- ret = kstrtoint(val, 10, &n);
- if (ret)
- return ret;
- if (n > num_possible_cpus())
- n = num_possible_cpus();
-
- return param_set_int(val, kp);
-}
-
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
{
return qid * 2 * stride;
@@ -2068,6 +2051,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
.priv = dev,
};
unsigned int irq_queues, this_p_queues;
+ unsigned int nr_cpus = num_possible_cpus();
/*
* Poll queues don't need interrupts, but we need at least one IO
@@ -2078,7 +2062,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
this_p_queues = nr_io_queues - 1;
irq_queues = 1;
} else {
- irq_queues = nr_io_queues - this_p_queues + 1;
+ if (nr_cpus < nr_io_queues - this_p_queues)
+ irq_queues = nr_cpus + 1;
+ else
+ irq_queues = nr_io_queues - this_p_queues + 1;
}
dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
@@ -2464,10 +2451,8 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
kfree(dev);
}
-static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
+static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
{
- dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
-
nvme_get_ctrl(&dev->ctrl);
nvme_dev_disable(dev, false);
nvme_kill_queues(&dev->ctrl);
@@ -2480,11 +2465,13 @@ static void nvme_reset_work(struct work_struct *work)
struct nvme_dev *dev =
container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
- int result = -ENODEV;
+ int result;
enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
- if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
+ if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
+ result = -ENODEV;
goto out;
+ }
/*
* If we're called to reset a live controller first shut it down before
@@ -2528,6 +2515,7 @@ static void nvme_reset_work(struct work_struct *work)
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
dev_warn(dev->ctrl.device,
"failed to mark controller CONNECTING\n");
+ result = -EBUSY;
goto out;
}
@@ -2588,6 +2576,7 @@ static void nvme_reset_work(struct work_struct *work)
if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
dev_warn(dev->ctrl.device,
"failed to mark controller state %d\n", new_state);
+ result = -ENODEV;
goto out;
}
@@ -2597,7 +2586,10 @@ static void nvme_reset_work(struct work_struct *work)
out_unlock:
mutex_unlock(&dev->shutdown_lock);
out:
- nvme_remove_dead_ctrl(dev, result);
+ if (result)
+ dev_warn(dev->ctrl.device,
+ "Removing after probe failure status: %d\n", result);
+ nvme_remove_dead_ctrl(dev);
}
static void nvme_remove_dead_ctrl_work(struct work_struct *work)
@@ -2835,16 +2827,94 @@ static void nvme_remove(struct pci_dev *pdev)
}
#ifdef CONFIG_PM_SLEEP
+static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps)
+{
+ return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps);
+}
+
+static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps)
+{
+ return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL);
+}
+
+static int nvme_resume(struct device *dev)
+{
+ struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
+ struct nvme_ctrl *ctrl = &ndev->ctrl;
+
+ if (pm_resume_via_firmware() || !ctrl->npss ||
+ nvme_set_power_state(ctrl, ndev->last_ps) != 0)
+ nvme_reset_ctrl(ctrl);
+ return 0;
+}
+
static int nvme_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
+ struct nvme_ctrl *ctrl = &ndev->ctrl;
+ int ret = -EBUSY;
+
+ /*
+ * The platform does not remove power for a kernel managed suspend so
+ * use host managed nvme power settings for lowest idle power if
+ * possible. This should have quicker resume latency than a full device
+ * shutdown. But if the firmware is involved after the suspend or the
+ * device does not support any non-default power states, shut down the
+ * device fully.
+ */
+ if (pm_suspend_via_firmware() || !ctrl->npss) {
+ nvme_dev_disable(ndev, true);
+ return 0;
+ }
+
+ nvme_start_freeze(ctrl);
+ nvme_wait_freeze(ctrl);
+ nvme_sync_queues(ctrl);
+
+ if (ctrl->state != NVME_CTRL_LIVE &&
+ ctrl->state != NVME_CTRL_ADMIN_ONLY)
+ goto unfreeze;
+
+ ndev->last_ps = 0;
+ ret = nvme_get_power_state(ctrl, &ndev->last_ps);
+ if (ret < 0)
+ goto unfreeze;
+
+ ret = nvme_set_power_state(ctrl, ctrl->npss);
+ if (ret < 0)
+ goto unfreeze;
+
+ if (ret) {
+ /*
+ * Clearing npss forces a controller reset on resume. The
+ * correct value will be resdicovered then.
+ */
+ nvme_dev_disable(ndev, true);
+ ctrl->npss = 0;
+ ret = 0;
+ goto unfreeze;
+ }
+ /*
+ * A saved state prevents pci pm from generically controlling the
+ * device's power. If we're using protocol specific settings, we don't
+ * want pci interfering.
+ */
+ pci_save_state(pdev);
+unfreeze:
+ nvme_unfreeze(ctrl);
+ return ret;
+}
+
+static int nvme_simple_suspend(struct device *dev)
+{
+ struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
nvme_dev_disable(ndev, true);
return 0;
}
-static int nvme_resume(struct device *dev)
+static int nvme_simple_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
@@ -2852,9 +2922,16 @@ static int nvme_resume(struct device *dev)
nvme_reset_ctrl(&ndev->ctrl);
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
+const struct dev_pm_ops nvme_dev_pm_ops = {
+ .suspend = nvme_suspend,
+ .resume = nvme_resume,
+ .freeze = nvme_simple_suspend,
+ .thaw = nvme_simple_resume,
+ .poweroff = nvme_simple_suspend,
+ .restore = nvme_simple_resume,
+};
+#endif /* CONFIG_PM_SLEEP */
static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
@@ -2959,9 +3036,11 @@ static struct pci_driver nvme_driver = {
.probe = nvme_probe,
.remove = nvme_remove,
.shutdown = nvme_shutdown,
+#ifdef CONFIG_PM_SLEEP
.driver = {
.pm = &nvme_dev_pm_ops,
},
+#endif
.sriov_configure = pci_sriov_configure_simple,
.err_handler = &nvme_err_handler,
};
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 97f668a39ae1..676619c1454a 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1144,7 +1144,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_cleanup_cmd(rq);
- sg_free_table_chained(&req->sg_table, true);
+ sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
}
static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1259,7 +1259,8 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->sg_table.sgl = req->first_sgl;
ret = sg_alloc_table_chained(&req->sg_table,
- blk_rq_nr_phys_segments(rq), req->sg_table.sgl);
+ blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
+ SG_CHUNK_SIZE);
if (ret)
return -ENOMEM;
@@ -1299,7 +1300,7 @@ out_unmap_sg:
req->nents, rq_data_dir(rq) ==
WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
out_free_table:
- sg_free_table_chained(&req->sg_table, true);
+ sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
return ret;
}
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 5f24ea7a28eb..f01ad0fd60bb 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -135,6 +135,69 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
}
}
+static const char *nvme_trace_fabrics_property_set(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 attrib = spc[0];
+ u32 ofst = get_unaligned_le32(spc + 4);
+ u64 value = get_unaligned_le64(spc + 8);
+
+ trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx",
+ attrib, ofst, value);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_connect(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 recfmt = get_unaligned_le16(spc);
+ u16 qid = get_unaligned_le16(spc + 2);
+ u16 sqsize = get_unaligned_le16(spc + 4);
+ u8 cattr = spc[6];
+ u32 kato = get_unaligned_le32(spc + 8);
+
+ trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u",
+ recfmt, qid, sqsize, cattr, kato);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 attrib = spc[0];
+ u32 ofst = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "spcecific=%*ph", 24, spc);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p,
+ u8 fctype, u8 *spc)
+{
+ switch (fctype) {
+ case nvme_fabrics_type_property_set:
+ return nvme_trace_fabrics_property_set(p, spc);
+ case nvme_fabrics_type_connect:
+ return nvme_trace_fabrics_connect(p, spc);
+ case nvme_fabrics_type_property_get:
+ return nvme_trace_fabrics_property_get(p, spc);
+ default:
+ return nvme_trace_fabrics_common(p, spc);
+ }
+}
+
const char *nvme_trace_disk_name(struct trace_seq *p, char *name)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -145,6 +208,5 @@ const char *nvme_trace_disk_name(struct trace_seq *p, char *name)
return ret;
}
-EXPORT_SYMBOL_GPL(nvme_trace_disk_name);
EXPORT_TRACEPOINT_SYMBOL_GPL(nvme_sq);
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index e71502d141ed..daaf700eae79 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -16,59 +16,19 @@
#include "nvme.h"
-#define nvme_admin_opcode_name(opcode) { opcode, #opcode }
-#define show_admin_opcode_name(val) \
- __print_symbolic(val, \
- nvme_admin_opcode_name(nvme_admin_delete_sq), \
- nvme_admin_opcode_name(nvme_admin_create_sq), \
- nvme_admin_opcode_name(nvme_admin_get_log_page), \
- nvme_admin_opcode_name(nvme_admin_delete_cq), \
- nvme_admin_opcode_name(nvme_admin_create_cq), \
- nvme_admin_opcode_name(nvme_admin_identify), \
- nvme_admin_opcode_name(nvme_admin_abort_cmd), \
- nvme_admin_opcode_name(nvme_admin_set_features), \
- nvme_admin_opcode_name(nvme_admin_get_features), \
- nvme_admin_opcode_name(nvme_admin_async_event), \
- nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
- nvme_admin_opcode_name(nvme_admin_activate_fw), \
- nvme_admin_opcode_name(nvme_admin_download_fw), \
- nvme_admin_opcode_name(nvme_admin_ns_attach), \
- nvme_admin_opcode_name(nvme_admin_keep_alive), \
- nvme_admin_opcode_name(nvme_admin_directive_send), \
- nvme_admin_opcode_name(nvme_admin_directive_recv), \
- nvme_admin_opcode_name(nvme_admin_dbbuf), \
- nvme_admin_opcode_name(nvme_admin_format_nvm), \
- nvme_admin_opcode_name(nvme_admin_security_send), \
- nvme_admin_opcode_name(nvme_admin_security_recv), \
- nvme_admin_opcode_name(nvme_admin_sanitize_nvm))
-
-#define nvme_opcode_name(opcode) { opcode, #opcode }
-#define show_nvm_opcode_name(val) \
- __print_symbolic(val, \
- nvme_opcode_name(nvme_cmd_flush), \
- nvme_opcode_name(nvme_cmd_write), \
- nvme_opcode_name(nvme_cmd_read), \
- nvme_opcode_name(nvme_cmd_write_uncor), \
- nvme_opcode_name(nvme_cmd_compare), \
- nvme_opcode_name(nvme_cmd_write_zeroes), \
- nvme_opcode_name(nvme_cmd_dsm), \
- nvme_opcode_name(nvme_cmd_resv_register), \
- nvme_opcode_name(nvme_cmd_resv_report), \
- nvme_opcode_name(nvme_cmd_resv_acquire), \
- nvme_opcode_name(nvme_cmd_resv_release))
-
-#define show_opcode_name(qid, opcode) \
- (qid ? show_nvm_opcode_name(opcode) : show_admin_opcode_name(opcode))
-
const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
u8 *cdw10);
const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
u8 *cdw10);
+const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
+ u8 *spc);
-#define parse_nvme_cmd(qid, opcode, cdw10) \
- (qid ? \
- nvme_trace_parse_nvm_cmd(p, opcode, cdw10) : \
- nvme_trace_parse_admin_cmd(p, opcode, cdw10))
+#define parse_nvme_cmd(qid, opcode, fctype, cdw10) \
+ ((opcode) == nvme_fabrics_command ? \
+ nvme_trace_parse_fabrics_cmd(p, fctype, cdw10) : \
+ ((qid) ? \
+ nvme_trace_parse_nvm_cmd(p, opcode, cdw10) : \
+ nvme_trace_parse_admin_cmd(p, opcode, cdw10)))
const char *nvme_trace_disk_name(struct trace_seq *p, char *name);
#define __print_disk_name(name) \
@@ -93,6 +53,7 @@ TRACE_EVENT(nvme_setup_cmd,
__field(int, qid)
__field(u8, opcode)
__field(u8, flags)
+ __field(u8, fctype)
__field(u16, cid)
__field(u32, nsid)
__field(u64, metadata)
@@ -106,6 +67,7 @@ TRACE_EVENT(nvme_setup_cmd,
__entry->cid = cmd->common.command_id;
__entry->nsid = le32_to_cpu(cmd->common.nsid);
__entry->metadata = le64_to_cpu(cmd->common.metadata);
+ __entry->fctype = cmd->fabrics.fctype;
__assign_disk_name(__entry->disk, req->rq_disk);
memcpy(__entry->cdw10, &cmd->common.cdw10,
sizeof(__entry->cdw10));
@@ -114,8 +76,10 @@ TRACE_EVENT(nvme_setup_cmd,
__entry->ctrl_id, __print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->nsid,
__entry->flags, __entry->metadata,
- show_opcode_name(__entry->qid, __entry->opcode),
- parse_nvme_cmd(__entry->qid, __entry->opcode, __entry->cdw10))
+ show_opcode_name(__entry->qid, __entry->opcode,
+ __entry->fctype),
+ parse_nvme_cmd(__entry->qid, __entry->opcode,
+ __entry->fctype, __entry->cdw10))
);
TRACE_EVENT(nvme_complete_rq,
@@ -141,7 +105,7 @@ TRACE_EVENT(nvme_complete_rq,
__entry->status = nvme_req(req)->status;
__assign_disk_name(__entry->disk, req->rq_disk);
),
- TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u",
+ TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%#llx, retries=%u, flags=0x%x, status=%#x",
__entry->ctrl_id, __print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->result,
__entry->retries, __entry->flags, __entry->status)
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index 8c3ad0fb6860..2b33836f3d3e 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+ccflags-y += -I$(src)
+
obj-$(CONFIG_NVME_TARGET) += nvmet.o
obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
@@ -14,3 +16,4 @@ nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
nvme-fcloop-y += fcloop.o
nvmet-tcp-y += tcp.o
+nvmet-$(CONFIG_TRACING) += trace.o
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 7734a6acff85..dad0243c7c96 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -10,6 +10,9 @@
#include <linux/pci-p2pdma.h>
#include <linux/scatterlist.h>
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
#include "nvmet.h"
struct workqueue_struct *buffered_io_wq;
@@ -311,6 +314,7 @@ int nvmet_enable_port(struct nvmet_port *port)
port->inline_data_size = 0;
port->enabled = true;
+ port->tr_ops = ops;
return 0;
}
@@ -321,6 +325,7 @@ void nvmet_disable_port(struct nvmet_port *port)
lockdep_assert_held(&nvmet_config_sem);
port->enabled = false;
+ port->tr_ops = NULL;
ops = nvmet_transports[port->disc_addr.trtype];
ops->remove_port(port);
@@ -689,6 +694,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
if (unlikely(status))
nvmet_set_error(req, status);
+
+ trace_nvmet_req_complete(req);
+
if (req->ns)
nvmet_put_namespace(req->ns);
req->ops->queue_response(req);
@@ -848,6 +856,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0;
+ trace_nvmet_req_init(req, req->cmd);
+
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
req->error_loc = offsetof(struct nvme_common_command, flags);
@@ -871,7 +881,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
status = nvmet_parse_connect_cmd(req);
else if (likely(req->sq->qid != 0))
status = nvmet_parse_io_cmd(req);
- else if (req->cmd->common.opcode == nvme_fabrics_command)
+ else if (nvme_is_fabrics(req->cmd))
status = nvmet_parse_fabrics_cmd(req);
else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
status = nvmet_parse_discovery_cmd(req);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 5baf269f3f8a..8efca26b4776 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -41,6 +41,10 @@ void nvmet_port_disc_changed(struct nvmet_port *port,
__nvmet_disc_changed(port, ctrl);
}
mutex_unlock(&nvmet_disc_subsys->lock);
+
+ /* If transport can signal change, notify transport */
+ if (port->tr_ops && port->tr_ops->discovery_chg)
+ port->tr_ops->discovery_chg(port);
}
static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 3b9f79aba98f..d16b55ffe79f 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -268,7 +268,7 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
- if (cmd->common.opcode != nvme_fabrics_command) {
+ if (!nvme_is_fabrics(cmd)) {
pr_err("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 508661af0f50..ce8d819f86cc 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1806,7 +1806,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
*/
rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
if (!(rspcnt % fod->queue->ersp_ratio) ||
- sqe->opcode == nvme_fabrics_command ||
+ nvme_is_fabrics((struct nvme_command *) sqe) ||
xfr_length != fod->req.transfer_len ||
(le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
(sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
@@ -2549,6 +2549,16 @@ nvmet_fc_remove_port(struct nvmet_port *port)
kfree(pe);
}
+static void
+nvmet_fc_discovery_chg(struct nvmet_port *port)
+{
+ struct nvmet_fc_port_entry *pe = port->priv;
+ struct nvmet_fc_tgtport *tgtport = pe->tgtport;
+
+ if (tgtport && tgtport->ops->discovery_event)
+ tgtport->ops->discovery_event(&tgtport->fc_target_port);
+}
+
static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_FC,
@@ -2557,6 +2567,7 @@ static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
.remove_port = nvmet_fc_remove_port,
.queue_response = nvmet_fc_fcp_nvme_cmd_done,
.delete_ctrl = nvmet_fc_delete_ctrl,
+ .discovery_chg = nvmet_fc_discovery_chg,
};
static int __init nvmet_fc_init_module(void)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 381b5a90c48b..b8c1cc54a0db 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -231,6 +231,11 @@ struct fcloop_lsreq {
int status;
};
+struct fcloop_rscn {
+ struct fcloop_tport *tport;
+ struct work_struct work;
+};
+
enum {
INI_IO_START = 0,
INI_IO_ACTIVE = 1,
@@ -348,6 +353,37 @@ fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
return 0;
}
+/*
+ * Simulate reception of RSCN and converting it to a initiator transport
+ * call to rescan a remote port.
+ */
+static void
+fcloop_tgt_rscn_work(struct work_struct *work)
+{
+ struct fcloop_rscn *tgt_rscn =
+ container_of(work, struct fcloop_rscn, work);
+ struct fcloop_tport *tport = tgt_rscn->tport;
+
+ if (tport->remoteport)
+ nvme_fc_rescan_remoteport(tport->remoteport);
+ kfree(tgt_rscn);
+}
+
+static void
+fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
+{
+ struct fcloop_rscn *tgt_rscn;
+
+ tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
+ if (!tgt_rscn)
+ return;
+
+ tgt_rscn->tport = tgtport->private;
+ INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
+
+ schedule_work(&tgt_rscn->work);
+}
+
static void
fcloop_tfcp_req_free(struct kref *ref)
{
@@ -839,6 +875,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.fcp_op = fcloop_fcp_op,
.fcp_abort = fcloop_tgt_fcp_abort,
.fcp_req_release = fcloop_fcp_req_release,
+ .discovery_event = fcloop_tgt_discovery_evt,
.max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9e211ad6bdd3..b16dc3981c69 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -77,7 +77,7 @@ static void nvme_loop_complete_rq(struct request *req)
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
nvme_cleanup_cmd(req);
- sg_free_table_chained(&iod->sg_table, true);
+ sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
nvme_complete_rq(req);
}
@@ -157,7 +157,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->sg_table.sgl = iod->first_sgl;
if (sg_alloc_table_chained(&iod->sg_table,
blk_rq_nr_phys_segments(req),
- iod->sg_table.sgl))
+ iod->sg_table.sgl, SG_CHUNK_SIZE))
return BLK_STS_RESOURCE;
iod->req.sg = iod->sg_table.sgl;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index c25d88fc9dec..dc270944bb25 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -140,6 +140,7 @@ struct nvmet_port {
void *priv;
bool enabled;
int inline_data_size;
+ const struct nvmet_fabrics_ops *tr_ops;
};
static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
@@ -277,6 +278,7 @@ struct nvmet_fabrics_ops {
void (*disc_traddr)(struct nvmet_req *req,
struct nvmet_port *port, char *traddr);
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
+ void (*discovery_chg)(struct nvmet_port *port);
};
#define NVMET_MAX_INLINE_BIOVEC 8
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
new file mode 100644
index 000000000000..cdcdd14c6408
--- /dev/null
+++ b/drivers/nvme/target/trace.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express target device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ */
+
+#include <asm/unaligned.h>
+#include "trace.h"
+
+static const char *nvmet_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 cns = cdw10[0];
+ u16 ctrlid = get_unaligned_le16(cdw10 + 2);
+
+ trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_admin_get_features(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 fid = cdw10[0];
+ u8 sel = cdw10[1] & 0x7;
+ u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+ trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u16 length = get_unaligned_le16(cdw10 + 8);
+ u16 control = get_unaligned_le16(cdw10 + 10);
+ u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
+ u32 reftag = get_unaligned_le32(cdw10 + 16);
+
+ trace_seq_printf(p,
+ "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
+ slba, length, control, dsmgmt, reftag);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_dsm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "nr=%u, attributes=%u",
+ get_unaligned_le32(cdw10),
+ get_unaligned_le32(cdw10 + 4));
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_common(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_admin_identify:
+ return nvmet_trace_admin_identify(p, cdw10);
+ case nvme_admin_get_features:
+ return nvmet_trace_admin_get_features(p, cdw10);
+ default:
+ return nvmet_trace_common(p, cdw10);
+ }
+}
+
+const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ return nvmet_trace_read_write(p, cdw10);
+ case nvme_cmd_dsm:
+ return nvmet_trace_dsm(p, cdw10);
+ default:
+ return nvmet_trace_common(p, cdw10);
+ }
+}
+
+static const char *nvmet_trace_fabrics_property_set(struct trace_seq *p,
+ u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 attrib = spc[0];
+ u32 ofst = get_unaligned_le32(spc + 4);
+ u64 value = get_unaligned_le64(spc + 8);
+
+ trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx",
+ attrib, ofst, value);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvmet_trace_fabrics_connect(struct trace_seq *p,
+ u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 recfmt = get_unaligned_le16(spc);
+ u16 qid = get_unaligned_le16(spc + 2);
+ u16 sqsize = get_unaligned_le16(spc + 4);
+ u8 cattr = spc[6];
+ u32 kato = get_unaligned_le32(spc + 8);
+
+ trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u",
+ recfmt, qid, sqsize, cattr, kato);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p,
+ u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 attrib = spc[0];
+ u32 ofst = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "spcecific=%*ph", 24, spc);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p,
+ u8 fctype, u8 *spc)
+{
+ switch (fctype) {
+ case nvme_fabrics_type_property_set:
+ return nvmet_trace_fabrics_property_set(p, spc);
+ case nvme_fabrics_type_connect:
+ return nvmet_trace_fabrics_connect(p, spc);
+ case nvme_fabrics_type_property_get:
+ return nvmet_trace_fabrics_property_get(p, spc);
+ default:
+ return nvmet_trace_fabrics_common(p, spc);
+ }
+}
+
+const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (*name)
+ trace_seq_printf(p, "disk=%s, ", name);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ /*
+ * XXX: We don't know the controller instance before executing the
+ * connect command itself because the connect command for the admin
+ * queue will not provide the cntlid which will be allocated in this
+ * command. In case of io queues, the controller instance will be
+ * mapped by the extra data of the connect command.
+ * If we can know the extra data of the connect command in this stage,
+ * we can update this print statement later.
+ */
+ if (ctrl)
+ trace_seq_printf(p, "%d", ctrl->cntlid);
+ else
+ trace_seq_printf(p, "_");
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
new file mode 100644
index 000000000000..e645caa882dd
--- /dev/null
+++ b/drivers/nvme/target/trace.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NVM Express target device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This is entirely based on drivers/nvme/host/trace.h
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nvmet
+
+#if !defined(_TRACE_NVMET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NVMET_H
+
+#include <linux/nvme.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "nvmet.h"
+
+const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
+ u8 *spc);
+
+#define parse_nvme_cmd(qid, opcode, fctype, cdw10) \
+ ((opcode) == nvme_fabrics_command ? \
+ nvmet_trace_parse_fabrics_cmd(p, fctype, cdw10) : \
+ (qid ? \
+ nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) : \
+ nvmet_trace_parse_admin_cmd(p, opcode, cdw10)))
+
+const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl);
+#define __print_ctrl_name(ctrl) \
+ nvmet_trace_ctrl_name(p, ctrl)
+
+const char *nvmet_trace_disk_name(struct trace_seq *p, char *name);
+#define __print_disk_name(name) \
+ nvmet_trace_disk_name(p, name)
+
+#ifndef TRACE_HEADER_MULTI_READ
+static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
+{
+ return req->sq->ctrl;
+}
+
+static inline void __assign_disk_name(char *name, struct nvmet_req *req,
+ bool init)
+{
+ struct nvmet_ctrl *ctrl = nvmet_req_to_ctrl(req);
+ struct nvmet_ns *ns;
+
+ if ((init && req->sq->qid) || (!init && req->cq->qid)) {
+ ns = nvmet_find_namespace(ctrl, req->cmd->rw.nsid);
+ strncpy(name, ns->device_path, DISK_NAME_LEN);
+ return;
+ }
+
+ memset(name, 0, DISK_NAME_LEN);
+}
+#endif
+
+TRACE_EVENT(nvmet_req_init,
+ TP_PROTO(struct nvmet_req *req, struct nvme_command *cmd),
+ TP_ARGS(req, cmd),
+ TP_STRUCT__entry(
+ __field(struct nvme_command *, cmd)
+ __field(struct nvmet_ctrl *, ctrl)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, qid)
+ __field(u16, cid)
+ __field(u8, opcode)
+ __field(u8, fctype)
+ __field(u8, flags)
+ __field(u32, nsid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->ctrl = nvmet_req_to_ctrl(req);
+ __assign_disk_name(__entry->disk, req, true);
+ __entry->qid = req->sq->qid;
+ __entry->cid = cmd->common.command_id;
+ __entry->opcode = cmd->common.opcode;
+ __entry->fctype = cmd->fabrics.fctype;
+ __entry->flags = cmd->common.flags;
+ __entry->nsid = le32_to_cpu(cmd->common.nsid);
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, &cmd->common.cdw10,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
+ "meta=%#llx, cmd=(%s, %s)",
+ __print_ctrl_name(__entry->ctrl),
+ __print_disk_name(__entry->disk),
+ __entry->qid, __entry->cid, __entry->nsid,
+ __entry->flags, __entry->metadata,
+ show_opcode_name(__entry->qid, __entry->opcode,
+ __entry->fctype),
+ parse_nvme_cmd(__entry->qid, __entry->opcode,
+ __entry->fctype, __entry->cdw10))
+);
+
+TRACE_EVENT(nvmet_req_complete,
+ TP_PROTO(struct nvmet_req *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __field(struct nvmet_ctrl *, ctrl)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, qid)
+ __field(int, cid)
+ __field(u64, result)
+ __field(u16, status)
+ ),
+ TP_fast_assign(
+ __entry->ctrl = nvmet_req_to_ctrl(req);
+ __entry->qid = req->cq->qid;
+ __entry->cid = req->cqe->command_id;
+ __entry->result = le64_to_cpu(req->cqe->result.u64);
+ __entry->status = le16_to_cpu(req->cqe->status) >> 1;
+ __assign_disk_name(__entry->disk, req, false);
+ ),
+ TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
+ __print_ctrl_name(__entry->ctrl),
+ __print_disk_name(__entry->disk),
+ __entry->qid, __entry->cid, __entry->result, __entry->status)
+
+);
+
+#endif /* _TRACE_NVMET_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 3a9789388bfb..c094d5d20fd7 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -682,7 +682,7 @@ static int _set_opp_custom(const struct opp_table *opp_table,
data->old_opp.rate = old_freq;
size = sizeof(*old_supply) * opp_table->regulator_count;
- if (IS_ERR(old_supply))
+ if (!old_supply)
memset(data->old_opp.supplies, 0, size);
else
memcpy(data->old_opp.supplies, old_supply, size);
@@ -708,7 +708,7 @@ static int _set_required_opps(struct device *dev,
/* Single genpd case */
if (!genpd_virt_devs) {
- pstate = opp->required_opps[0]->pstate;
+ pstate = likely(opp) ? opp->required_opps[0]->pstate : 0;
ret = dev_pm_genpd_set_performance_state(dev, pstate);
if (ret) {
dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
@@ -726,7 +726,7 @@ static int _set_required_opps(struct device *dev,
mutex_lock(&opp_table->genpd_virt_dev_lock);
for (i = 0; i < opp_table->required_opp_count; i++) {
- pstate = opp->required_opps[i]->pstate;
+ pstate = likely(opp) ? opp->required_opps[i]->pstate : 0;
if (!genpd_virt_devs[i])
continue;
@@ -748,29 +748,37 @@ static int _set_required_opps(struct device *dev,
* @dev: device for which we do this operation
* @target_freq: frequency to achieve
*
- * This configures the power-supplies and clock source to the levels specified
- * by the OPP corresponding to the target_freq.
+ * This configures the power-supplies to the levels specified by the OPP
+ * corresponding to the target_freq, and programs the clock to a value <=
+ * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax
+ * provided by the opp, should have already rounded to the target OPP's
+ * frequency.
*/
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
struct opp_table *opp_table;
- unsigned long freq, old_freq;
+ unsigned long freq, old_freq, temp_freq;
struct dev_pm_opp *old_opp, *opp;
struct clk *clk;
int ret;
- if (unlikely(!target_freq)) {
- dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
- target_freq);
- return -EINVAL;
- }
-
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
dev_err(dev, "%s: device opp doesn't exist\n", __func__);
return PTR_ERR(opp_table);
}
+ if (unlikely(!target_freq)) {
+ if (opp_table->required_opp_tables) {
+ ret = _set_required_opps(dev, opp_table, NULL);
+ } else {
+ dev_err(dev, "target frequency can't be 0\n");
+ ret = -EINVAL;
+ }
+
+ goto put_opp_table;
+ }
+
clk = opp_table->clk;
if (IS_ERR(clk)) {
dev_err(dev, "%s: No clock available for the device\n",
@@ -793,13 +801,15 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
goto put_opp_table;
}
- old_opp = _find_freq_ceil(opp_table, &old_freq);
+ temp_freq = old_freq;
+ old_opp = _find_freq_ceil(opp_table, &temp_freq);
if (IS_ERR(old_opp)) {
dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
__func__, old_freq, PTR_ERR(old_opp));
}
- opp = _find_freq_ceil(opp_table, &freq);
+ temp_freq = freq;
+ opp = _find_freq_ceil(opp_table, &temp_freq);
if (IS_ERR(opp)) {
ret = PTR_ERR(opp);
dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
@@ -1741,91 +1751,137 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
+static void _opp_detach_genpd(struct opp_table *opp_table)
+{
+ int index;
+
+ for (index = 0; index < opp_table->required_opp_count; index++) {
+ if (!opp_table->genpd_virt_devs[index])
+ continue;
+
+ dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false);
+ opp_table->genpd_virt_devs[index] = NULL;
+ }
+
+ kfree(opp_table->genpd_virt_devs);
+ opp_table->genpd_virt_devs = NULL;
+}
+
/**
- * dev_pm_opp_set_genpd_virt_dev - Set virtual genpd device for an index
- * @dev: Consumer device for which the genpd device is getting set.
- * @virt_dev: virtual genpd device.
- * @index: index.
+ * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
+ * @dev: Consumer device for which the genpd is getting attached.
+ * @names: Null terminated array of pointers containing names of genpd to attach.
*
* Multiple generic power domains for a device are supported with the help of
* virtual genpd devices, which are created for each consumer device - genpd
* pair. These are the device structures which are attached to the power domain
* and are required by the OPP core to set the performance state of the genpd.
+ * The same API also works for the case where single genpd is available and so
+ * we don't need to support that separately.
*
* This helper will normally be called by the consumer driver of the device
- * "dev", as only that has details of the genpd devices.
+ * "dev", as only that has details of the genpd names.
*
- * This helper needs to be called once for each of those virtual devices, but
- * only if multiple domains are available for a device. Otherwise the original
- * device structure will be used instead by the OPP core.
+ * This helper needs to be called once with a list of all genpd to attach.
+ * Otherwise the original device structure will be used instead by the OPP core.
*/
-struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev,
- struct device *virt_dev,
- int index)
+struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names)
{
struct opp_table *opp_table;
+ struct device *virt_dev;
+ int index, ret = -EINVAL;
+ const char **name = names;
opp_table = dev_pm_opp_get_opp_table(dev);
if (!opp_table)
return ERR_PTR(-ENOMEM);
+ /*
+ * If the genpd's OPP table isn't already initialized, parsing of the
+ * required-opps fail for dev. We should retry this after genpd's OPP
+ * table is added.
+ */
+ if (!opp_table->required_opp_count) {
+ ret = -EPROBE_DEFER;
+ goto put_table;
+ }
+
mutex_lock(&opp_table->genpd_virt_dev_lock);
- if (unlikely(!opp_table->genpd_virt_devs ||
- index >= opp_table->required_opp_count ||
- opp_table->genpd_virt_devs[index])) {
+ opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count,
+ sizeof(*opp_table->genpd_virt_devs),
+ GFP_KERNEL);
+ if (!opp_table->genpd_virt_devs)
+ goto unlock;
- dev_err(dev, "Invalid request to set required device\n");
- dev_pm_opp_put_opp_table(opp_table);
- mutex_unlock(&opp_table->genpd_virt_dev_lock);
+ while (*name) {
+ index = of_property_match_string(dev->of_node,
+ "power-domain-names", *name);
+ if (index < 0) {
+ dev_err(dev, "Failed to find power domain: %s (%d)\n",
+ *name, index);
+ goto err;
+ }
- return ERR_PTR(-EINVAL);
+ if (index >= opp_table->required_opp_count) {
+ dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n",
+ *name, opp_table->required_opp_count, index);
+ goto err;
+ }
+
+ if (opp_table->genpd_virt_devs[index]) {
+ dev_err(dev, "Genpd virtual device already set %s\n",
+ *name);
+ goto err;
+ }
+
+ virt_dev = dev_pm_domain_attach_by_name(dev, *name);
+ if (IS_ERR(virt_dev)) {
+ ret = PTR_ERR(virt_dev);
+ dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
+ goto err;
+ }
+
+ opp_table->genpd_virt_devs[index] = virt_dev;
+ name++;
}
- opp_table->genpd_virt_devs[index] = virt_dev;
mutex_unlock(&opp_table->genpd_virt_dev_lock);
return opp_table;
+
+err:
+ _opp_detach_genpd(opp_table);
+unlock:
+ mutex_unlock(&opp_table->genpd_virt_dev_lock);
+
+put_table:
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd);
/**
- * dev_pm_opp_put_genpd_virt_dev() - Releases resources blocked for genpd device.
- * @opp_table: OPP table returned by dev_pm_opp_set_genpd_virt_dev().
- * @virt_dev: virtual genpd device.
+ * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device.
+ * @opp_table: OPP table returned by dev_pm_opp_attach_genpd().
*
- * This releases the resource previously acquired with a call to
- * dev_pm_opp_set_genpd_virt_dev(). The consumer driver shall call this helper
- * if it doesn't want OPP core to update performance state of a power domain
- * anymore.
+ * This detaches the genpd(s), resets the virtual device pointers, and puts the
+ * OPP table.
*/
-void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table,
- struct device *virt_dev)
+void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
{
- int i;
-
/*
* Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
* used in parallel.
*/
mutex_lock(&opp_table->genpd_virt_dev_lock);
-
- for (i = 0; i < opp_table->required_opp_count; i++) {
- if (opp_table->genpd_virt_devs[i] != virt_dev)
- continue;
-
- opp_table->genpd_virt_devs[i] = NULL;
- dev_pm_opp_put_opp_table(opp_table);
-
- /* Drop the vote */
- dev_pm_genpd_set_performance_state(virt_dev, 0);
- break;
- }
-
+ _opp_detach_genpd(opp_table);
mutex_unlock(&opp_table->genpd_virt_dev_lock);
- if (unlikely(i == opp_table->required_opp_count))
- dev_err(virt_dev, "Failed to find required device entry\n");
+ dev_pm_opp_put_opp_table(opp_table);
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
/**
* dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index b7d81c408242..b313aca9894f 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -138,7 +138,6 @@ err:
static void _opp_table_free_required_tables(struct opp_table *opp_table)
{
struct opp_table **required_opp_tables = opp_table->required_opp_tables;
- struct device **genpd_virt_devs = opp_table->genpd_virt_devs;
int i;
if (!required_opp_tables)
@@ -152,10 +151,8 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table)
}
kfree(required_opp_tables);
- kfree(genpd_virt_devs);
opp_table->required_opp_count = 0;
- opp_table->genpd_virt_devs = NULL;
opp_table->required_opp_tables = NULL;
}
@@ -168,9 +165,8 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
struct device_node *opp_np)
{
struct opp_table **required_opp_tables;
- struct device **genpd_virt_devs = NULL;
struct device_node *required_np, *np;
- int count, count_pd, i;
+ int count, i;
/* Traversing the first OPP node is all we need */
np = of_get_next_available_child(opp_np, NULL);
@@ -183,33 +179,11 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
if (!count)
goto put_np;
- /*
- * Check the number of power-domains to know if we need to deal
- * with virtual devices. In some cases we have devices with multiple
- * power domains but with only one of them being scalable, hence
- * 'count' could be 1, but we still have to deal with multiple genpds
- * and virtual devices.
- */
- count_pd = of_count_phandle_with_args(dev->of_node, "power-domains",
- "#power-domain-cells");
- if (!count_pd)
- goto put_np;
-
- if (count_pd > 1) {
- genpd_virt_devs = kcalloc(count, sizeof(*genpd_virt_devs),
- GFP_KERNEL);
- if (!genpd_virt_devs)
- goto put_np;
- }
-
required_opp_tables = kcalloc(count, sizeof(*required_opp_tables),
GFP_KERNEL);
- if (!required_opp_tables) {
- kfree(genpd_virt_devs);
+ if (!required_opp_tables)
goto put_np;
- }
- opp_table->genpd_virt_devs = genpd_virt_devs;
opp_table->required_opp_tables = required_opp_tables;
opp_table->required_opp_count = count;
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 24189c3399e0..1791830e7a71 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
+# see Documentation/kbuild/kconfig-language.rst.
#
# Parport configuration.
#
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 1897847ceb0c..45049f558860 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -685,12 +685,21 @@ static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
if (!adev || !acpi_device_power_manageable(adev))
return PCI_UNKNOWN;
- if (acpi_device_get_power(adev, &state) || state == ACPI_STATE_UNKNOWN)
+ state = adev->power.state;
+ if (state == ACPI_STATE_UNKNOWN)
return PCI_UNKNOWN;
return state_conv[state];
}
+static void acpi_pci_refresh_power_state(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+
+ if (adev && acpi_device_power_manageable(adev))
+ acpi_device_update_power(adev, NULL);
+}
+
static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
{
while (bus->parent) {
@@ -748,6 +757,7 @@ static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
.get_state = acpi_pci_get_power_state,
+ .refresh_state = acpi_pci_refresh_power_state,
.choose_state = acpi_pci_choose_state,
.set_wakeup = acpi_pci_wakeup,
.need_resume = acpi_pci_need_resume,
@@ -901,6 +911,7 @@ static void pci_acpi_setup(struct device *dev)
device_wakeup_enable(dev);
acpi_pci_wakeup(pci_dev, false);
+ acpi_device_power_add_dependent(adev, dev);
}
static void pci_acpi_cleanup(struct device *dev)
@@ -913,6 +924,7 @@ static void pci_acpi_cleanup(struct device *dev)
pci_acpi_remove_pm_notifier(adev);
if (adev->wakeup.flags.valid) {
+ acpi_device_power_remove_dependent(adev, dev);
if (pci_dev->bridge_d3)
device_wakeup_disable(dev);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 98af9ecd4a90..36dbe960306b 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -678,6 +678,7 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
static int pci_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
if (drv && drv->pm && drv->pm->prepare) {
int error = drv->pm->prepare(dev);
@@ -687,7 +688,15 @@ static int pci_pm_prepare(struct device *dev)
if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
return 0;
}
- return pci_dev_keep_suspended(to_pci_dev(dev));
+ if (pci_dev_need_resume(pci_dev))
+ return 0;
+
+ /*
+ * The PME setting needs to be adjusted here in case the direct-complete
+ * optimization is used with respect to this device.
+ */
+ pci_dev_adjust_pme(pci_dev);
+ return 1;
}
static void pci_pm_complete(struct device *dev)
@@ -701,7 +710,14 @@ static void pci_pm_complete(struct device *dev)
if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
pci_power_t pre_sleep_state = pci_dev->current_state;
- pci_update_current_state(pci_dev, pci_dev->current_state);
+ pci_refresh_power_state(pci_dev);
+ /*
+ * On platforms with ACPI this check may also trigger for
+ * devices sharing power resources if one of those power
+ * resources has been activated as a result of a change of the
+ * power state of another device sharing it. However, in that
+ * case it is also better to resume the device, in general.
+ */
if (pci_dev->current_state < pre_sleep_state)
pm_request_resume(dev);
}
@@ -757,9 +773,11 @@ static int pci_pm_suspend(struct device *dev)
* better to resume the device from runtime suspend here.
*/
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- !pci_dev_keep_suspended(pci_dev)) {
+ pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
+ } else {
+ pci_dev_adjust_pme(pci_dev);
}
if (pm->suspend) {
@@ -859,7 +877,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
pci_dev->bus->self->skip_bus_pm = true;
}
- if (pci_dev->skip_bus_pm && !pm_suspend_via_firmware()) {
+ if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) {
dev_dbg(dev, "PCI PM: Skipped\n");
goto Fixup;
}
@@ -914,10 +932,10 @@ static int pci_pm_resume_noirq(struct device *dev)
/*
* In the suspend-to-idle case, devices left in D0 during suspend will
* stay in D0, so it is not necessary to restore or update their
- * configuration here and attempting to put them into D0 again may
- * confuse some firmware, so avoid doing that.
+ * configuration here and attempting to put them into D0 again is
+ * pointless, so avoid doing that.
*/
- if (!pci_dev->skip_bus_pm || pm_suspend_via_firmware())
+ if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform()))
pci_pm_default_resume_early(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
@@ -994,15 +1012,15 @@ static int pci_pm_freeze(struct device *dev)
}
/*
- * This used to be done in pci_pm_prepare() for all devices and some
- * drivers may depend on it, so do it here. Ideally, runtime-suspended
- * devices should not be touched during freeze/thaw transitions,
- * however.
+ * Resume all runtime-suspended devices before creating a snapshot
+ * image of system memory, because the restore kernel generally cannot
+ * be expected to always handle them consistently and they need to be
+ * put into the runtime-active metastate during system resume anyway,
+ * so it is better to ensure that the state saved in the image will be
+ * always consistent with that.
*/
- if (!dev_pm_smart_suspend_and_suspended(dev)) {
- pm_runtime_resume(dev);
- pci_dev->state_saved = false;
- }
+ pm_runtime_resume(dev);
+ pci_dev->state_saved = false;
if (pm->freeze) {
int error;
@@ -1016,22 +1034,11 @@ static int pci_pm_freeze(struct device *dev)
return 0;
}
-static int pci_pm_freeze_late(struct device *dev)
-{
- if (dev_pm_smart_suspend_and_suspended(dev))
- return 0;
-
- return pm_generic_freeze_late(dev);
-}
-
static int pci_pm_freeze_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
- if (dev_pm_smart_suspend_and_suspended(dev))
- return 0;
-
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
@@ -1061,16 +1068,6 @@ static int pci_pm_thaw_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
- /*
- * If the device is in runtime suspend, the code below may not work
- * correctly with it, so skip that code and make the PM core skip all of
- * the subsequent "thaw" callbacks for the device.
- */
- if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev_pm_skip_next_resume_phases(dev);
- return 0;
- }
-
if (pcibios_pm_ops.thaw_noirq) {
error = pcibios_pm_ops.thaw_noirq(dev);
if (error)
@@ -1130,10 +1127,13 @@ static int pci_pm_poweroff(struct device *dev)
/* The reason to do that is the same as in pci_pm_suspend(). */
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- !pci_dev_keep_suspended(pci_dev))
+ pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
+ pci_dev->state_saved = false;
+ } else {
+ pci_dev_adjust_pme(pci_dev);
+ }
- pci_dev->state_saved = false;
if (pm->poweroff) {
int error;
@@ -1205,10 +1205,6 @@ static int pci_pm_restore_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
- /* This is analogous to the pci_pm_resume_noirq() case. */
- if (dev_pm_smart_suspend_and_suspended(dev))
- pm_runtime_set_active(dev);
-
if (pcibios_pm_ops.restore_noirq) {
error = pcibios_pm_ops.restore_noirq(dev);
if (error)
@@ -1258,7 +1254,6 @@ static int pci_pm_restore(struct device *dev)
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define pci_pm_freeze NULL
-#define pci_pm_freeze_late NULL
#define pci_pm_freeze_noirq NULL
#define pci_pm_thaw NULL
#define pci_pm_thaw_noirq NULL
@@ -1384,7 +1379,6 @@ static const struct dev_pm_ops pci_dev_pm_ops = {
.suspend_late = pci_pm_suspend_late,
.resume = pci_pm_resume,
.freeze = pci_pm_freeze,
- .freeze_late = pci_pm_freeze_late,
.thaw = pci_pm_thaw,
.poweroff = pci_pm_poweroff,
.poweroff_late = pci_pm_poweroff_late,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8abc843b1615..b1f563916036 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -777,6 +777,12 @@ static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
}
+static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
+{
+ if (pci_platform_pm && pci_platform_pm->refresh_state)
+ pci_platform_pm->refresh_state(dev);
+}
+
static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
{
return pci_platform_pm ?
@@ -938,6 +944,21 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
}
/**
+ * pci_refresh_power_state - Refresh the given device's power state data
+ * @dev: Target PCI device.
+ *
+ * Ask the platform to refresh the devices power state information and invoke
+ * pci_update_current_state() to update its current PCI power state.
+ */
+void pci_refresh_power_state(struct pci_dev *dev)
+{
+ if (platform_pci_power_manageable(dev))
+ platform_pci_refresh_power_state(dev);
+
+ pci_update_current_state(dev, dev->current_state);
+}
+
+/**
* pci_power_up - Put the given device into D0 forcibly
* @dev: PCI device to power up
*/
@@ -1004,15 +1025,10 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
if (state == PCI_D0) {
pci_platform_power_transition(dev, PCI_D0);
/*
- * Mandatory power management transition delays, see
- * PCI Express Base Specification Revision 2.0 Section
- * 6.6.1: Conventional Reset. Do not delay for
- * devices powered on/off by corresponding bridge,
- * because have already delayed for the bridge.
+ * Mandatory power management transition delays are
+ * handled in the PCIe portdrv resume hooks.
*/
if (dev->runtime_d3cold) {
- if (dev->d3cold_delay && !dev->imm_ready)
- msleep(dev->d3cold_delay);
/*
* When powering on a bridge from D3cold, the
* whole hierarchy may be powered on into
@@ -2065,6 +2081,13 @@ static void pci_pme_list_scan(struct work_struct *work)
*/
if (bridge && bridge->current_state != PCI_D0)
continue;
+ /*
+ * If the device is in D3cold it should not be
+ * polled either.
+ */
+ if (pme_dev->dev->current_state == PCI_D3cold)
+ continue;
+
pci_pme_wakeup(pme_dev->dev, NULL);
} else {
list_del(&pme_dev->list);
@@ -2459,45 +2482,56 @@ bool pci_dev_run_wake(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_dev_run_wake);
/**
- * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
+ * pci_dev_need_resume - Check if it is necessary to resume the device.
* @pci_dev: Device to check.
*
- * Return 'true' if the device is runtime-suspended, it doesn't have to be
+ * Return 'true' if the device is not runtime-suspended or it has to be
* reconfigured due to wakeup settings difference between system and runtime
- * suspend and the current power state of it is suitable for the upcoming
- * (system) transition.
- *
- * If the device is not configured for system wakeup, disable PME for it before
- * returning 'true' to prevent it from waking up the system unnecessarily.
+ * suspend, or the current power state of it is not suitable for the upcoming
+ * (system-wide) transition.
*/
-bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
+bool pci_dev_need_resume(struct pci_dev *pci_dev)
{
struct device *dev = &pci_dev->dev;
- bool wakeup = device_may_wakeup(dev);
+ pci_power_t target_state;
- if (!pm_runtime_suspended(dev)
- || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
- || platform_pci_need_resume(pci_dev))
- return false;
+ if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
+ return true;
+
+ target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
/*
- * At this point the device is good to go unless it's been configured
- * to generate PME at the runtime suspend time, but it is not supposed
- * to wake up the system. In that case, simply disable PME for it
- * (it will have to be re-enabled on exit from system resume).
- *
- * If the device's power state is D3cold and the platform check above
- * hasn't triggered, the device's configuration is suitable and we don't
- * need to manipulate it at all.
+ * If the earlier platform check has not triggered, D3cold is just power
+ * removal on top of D3hot, so no need to resume the device in that
+ * case.
*/
+ return target_state != pci_dev->current_state &&
+ target_state != PCI_D3cold &&
+ pci_dev->current_state != PCI_D3hot;
+}
+
+/**
+ * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
+ * @pci_dev: Device to check.
+ *
+ * If the device is suspended and it is not configured for system wakeup,
+ * disable PME for it to prevent it from waking up the system unnecessarily.
+ *
+ * Note that if the device's power state is D3cold and the platform check in
+ * pci_dev_need_resume() has not triggered, the device's configuration need not
+ * be changed.
+ */
+void pci_dev_adjust_pme(struct pci_dev *pci_dev)
+{
+ struct device *dev = &pci_dev->dev;
+
spin_lock_irq(&dev->power.lock);
- if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
- !wakeup)
+ if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
+ pci_dev->current_state < PCI_D3cold)
__pci_pme_active(pci_dev, false);
spin_unlock_irq(&dev->power.lock);
- return true;
}
/**
@@ -4568,14 +4602,16 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
}
+
/**
- * pcie_wait_for_link - Wait until link is active or inactive
+ * pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
+ * @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
{
int timeout = 1000;
bool ret;
@@ -4612,13 +4648,25 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
timeout -= 10;
}
if (active && ret)
- msleep(100);
+ msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
active ? "set" : "cleared");
return ret == active;
}
+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+ return pcie_wait_for_link_delay(pdev, active, 100);
+}
+
void pci_reset_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9cb99380c61e..5db6f985f16d 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -51,6 +51,8 @@ int pci_bus_error_reset(struct pci_dev *dev);
*
* @get_state: queries the platform firmware for a device's current power state
*
+ * @refresh_state: asks the platform to refresh the device's power state data
+ *
* @choose_state: returns PCI power state of given device preferred by the
* platform; to be used during system-wide transitions from a
* sleeping state to the working state and vice versa
@@ -69,6 +71,7 @@ struct pci_platform_pm_ops {
bool (*is_manageable)(struct pci_dev *dev);
int (*set_state)(struct pci_dev *dev, pci_power_t state);
pci_power_t (*get_state)(struct pci_dev *dev);
+ void (*refresh_state)(struct pci_dev *dev);
pci_power_t (*choose_state)(struct pci_dev *dev);
int (*set_wakeup)(struct pci_dev *dev, bool enable);
bool (*need_resume)(struct pci_dev *dev);
@@ -76,13 +79,15 @@ struct pci_platform_pm_ops {
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops);
void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+void pci_refresh_power_state(struct pci_dev *dev);
void pci_power_up(struct pci_dev *dev);
void pci_disable_enabled_device(struct pci_dev *dev);
int pci_finish_runtime_suspend(struct pci_dev *dev);
void pcie_clear_root_pme_status(struct pci_dev *dev);
int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
void pci_pme_restore(struct pci_dev *dev);
-bool pci_dev_keep_suspended(struct pci_dev *dev);
+bool pci_dev_need_resume(struct pci_dev *dev);
+void pci_dev_adjust_pme(struct pci_dev *dev);
void pci_dev_complete_resume(struct pci_dev *pci_dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
@@ -493,6 +498,7 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
u32 service);
+bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay);
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index fd4cb75088f9..e44af7f4d37f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1062,18 +1062,18 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
-static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
{
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link;
if (!pci_is_pcie(pdev))
- return;
+ return 0;
if (pdev->has_secondary_link)
parent = pdev;
if (!parent || !parent->link_state)
- return;
+ return -EINVAL;
/*
* A driver requested that ASPM be disabled on this device, but
@@ -1085,7 +1085,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
*/
if (aspm_disabled) {
pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
- return;
+ return -EPERM;
}
if (sem)
@@ -1105,11 +1105,13 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
mutex_unlock(&aspm_lock);
if (sem)
up_read(&pci_bus_sem);
+
+ return 0;
}
-void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
+int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, false);
+ return __pci_disable_link_state(pdev, state, false);
}
EXPORT_SYMBOL(pci_disable_link_state_locked);
@@ -1117,14 +1119,14 @@ EXPORT_SYMBOL(pci_disable_link_state_locked);
* pci_disable_link_state - Disable device's link state, so the link will
* never enter specific states. Note that if the BIOS didn't grant ASPM
* control to the OS, this does nothing because we can't touch the LNKCTL
- * register.
+ * register. Returns 0 or a negative errno.
*
* @pdev: PCI device
* @state: ASPM link state to disable
*/
-void pci_disable_link_state(struct pci_dev *pdev, int state)
+int pci_disable_link_state(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, true);
+ return __pci_disable_link_state(pdev, state, true);
}
EXPORT_SYMBOL(pci_disable_link_state);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 1b330129089f..308c3e0c4a34 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -378,6 +379,67 @@ static int pm_iter(struct device *dev, void *data)
return 0;
}
+static int get_downstream_delay(struct pci_bus *bus)
+{
+ struct pci_dev *pdev;
+ int min_delay = 100;
+ int max_delay = 0;
+
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ if (!pdev->imm_ready)
+ min_delay = 0;
+ else if (pdev->d3cold_delay < min_delay)
+ min_delay = pdev->d3cold_delay;
+ if (pdev->d3cold_delay > max_delay)
+ max_delay = pdev->d3cold_delay;
+ }
+
+ return max(min_delay, max_delay);
+}
+
+/*
+ * wait_for_downstream_link - Wait for downstream link to establish
+ * @pdev: PCIe port whose downstream link is waited
+ *
+ * Handle delays according to PCIe 4.0 section 6.6.1 before configuration
+ * access to the downstream component is permitted.
+ *
+ * This blocks PCI core resume of the hierarchy below this port until the
+ * link is trained. Should be called before resuming port services to
+ * prevent pciehp from starting to tear-down the hierarchy too soon.
+ */
+static void wait_for_downstream_link(struct pci_dev *pdev)
+{
+ int delay;
+
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+ if (pci_dev_is_disconnected(pdev))
+ return;
+
+ if (!pdev->subordinate || list_empty(&pdev->subordinate->devices) ||
+ !pdev->bridge_d3)
+ return;
+
+ delay = get_downstream_delay(pdev->subordinate);
+ if (!delay)
+ return;
+
+ dev_dbg(&pdev->dev, "waiting downstream link for %d ms\n", delay);
+
+ /*
+ * If downstream port does not support speeds greater than 5 GT/s
+ * need to wait 100ms. For higher speeds (gen3) we need to wait
+ * first for the data link layer to become active.
+ */
+ if (pcie_get_speed_cap(pdev) <= PCIE_SPEED_5_0GT)
+ msleep(delay);
+ else
+ pcie_wait_for_link_delay(pdev, true, delay);
+}
+
/**
* pcie_port_device_suspend - suspend port services associated with a PCIe port
* @dev: PCI Express port to handle
@@ -391,6 +453,8 @@ int pcie_port_device_suspend(struct device *dev)
int pcie_port_device_resume_noirq(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
+
+ wait_for_downstream_link(to_pci_dev(dev));
return device_for_each_child(dev, &off, pm_iter);
}
@@ -421,6 +485,8 @@ int pcie_port_device_runtime_suspend(struct device *dev)
int pcie_port_device_runtime_resume(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
+
+ wait_for_downstream_link(to_pci_dev(dev));
return device_for_each_child(dev, &off, pm_iter);
}
#endif /* PM */
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 552bda167e7d..09d06b082f8b 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -64,7 +64,7 @@ static void pcmcia_check_driver(struct pcmcia_driver *p_drv)
"be 0x%x\n", p_drv->name, did->prod_id[i],
did->prod_id_hash[i], hash);
printk(KERN_DEBUG "pcmcia: see "
- "Documentation/pcmcia/devicetable.txt for "
+ "Documentation/pcmcia/devicetable.rst for "
"details\n");
}
did++;
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index e4221a107dca..09ae8a970880 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -71,6 +71,14 @@ config ARM_DSU_PMU
system, control logic. The PMU allows counting various events related
to DSU.
+config FSL_IMX8_DDR_PMU
+ tristate "Freescale i.MX8 DDR perf monitor"
+ depends on ARCH_MXC
+ help
+ Provides support for the DDR performance monitor in i.MX8, which
+ can give information about memory throughput and other related
+ events.
+
config HISI_PMU
bool "HiSilicon SoC PMU"
depends on ARM64 && ACPI
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 30489941f3d6..2ebb4de17815 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
+obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index d2c2978409d2..acce8781c456 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -71,6 +71,76 @@ static void arm_pmu_acpi_unregister_irq(int cpu)
acpi_unregister_gsi(gsi);
}
+#if IS_ENABLED(CONFIG_ARM_SPE_PMU)
+static struct resource spe_resources[] = {
+ {
+ /* irq */
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device spe_dev = {
+ .name = ARMV8_SPE_PDEV_NAME,
+ .id = -1,
+ .resource = spe_resources,
+ .num_resources = ARRAY_SIZE(spe_resources)
+};
+
+/*
+ * For lack of a better place, hook the normal PMU MADT walk
+ * and create a SPE device if we detect a recent MADT with
+ * a homogeneous PPI mapping.
+ */
+static void arm_spe_acpi_register_device(void)
+{
+ int cpu, hetid, irq, ret;
+ bool first = true;
+ u16 gsi = 0;
+
+ /*
+ * Sanity check all the GICC tables for the same interrupt number.
+ * For now, we only support homogeneous ACPI/SPE machines.
+ */
+ for_each_possible_cpu(cpu) {
+ struct acpi_madt_generic_interrupt *gicc;
+
+ gicc = acpi_cpu_get_madt_gicc(cpu);
+ if (gicc->header.length < ACPI_MADT_GICC_SPE)
+ return;
+
+ if (first) {
+ gsi = gicc->spe_interrupt;
+ if (!gsi)
+ return;
+ hetid = find_acpi_cpu_topology_hetero_id(cpu);
+ first = false;
+ } else if ((gsi != gicc->spe_interrupt) ||
+ (hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
+ pr_warn("ACPI: SPE must be homogeneous\n");
+ return;
+ }
+ }
+
+ irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_HIGH);
+ if (irq < 0) {
+ pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi);
+ return;
+ }
+
+ spe_resources[0].start = irq;
+ ret = platform_device_register(&spe_dev);
+ if (ret < 0) {
+ pr_warn("ACPI: SPE: Unable to register device\n");
+ acpi_unregister_gsi(gsi);
+ }
+}
+#else
+static inline void arm_spe_acpi_register_device(void)
+{
+}
+#endif /* CONFIG_ARM_SPE_PMU */
+
static int arm_pmu_acpi_parse_irqs(void)
{
int irq, cpu, irq_cpu, err;
@@ -276,6 +346,8 @@ static int arm_pmu_acpi_init(void)
if (acpi_disabled)
return 0;
+ arm_spe_acpi_register_device();
+
ret = arm_pmu_acpi_parse_irqs();
if (ret)
return ret;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 49b490925255..4e4984a55cd1 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -27,6 +27,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h>
@@ -1157,7 +1158,13 @@ static const struct of_device_id arm_spe_pmu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match);
-static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
+static const struct platform_device_id arm_spe_match[] = {
+ { ARMV8_SPE_PDEV_NAME, 0},
+ { }
+};
+MODULE_DEVICE_TABLE(platform, arm_spe_match);
+
+static int arm_spe_pmu_device_probe(struct platform_device *pdev)
{
int ret;
struct arm_spe_pmu *spe_pmu;
@@ -1217,11 +1224,12 @@ static int arm_spe_pmu_device_remove(struct platform_device *pdev)
}
static struct platform_driver arm_spe_pmu_driver = {
+ .id_table = arm_spe_match,
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(arm_spe_pmu_of_match),
},
- .probe = arm_spe_pmu_device_dt_probe,
+ .probe = arm_spe_pmu_device_probe,
.remove = arm_spe_pmu_device_remove,
};
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
new file mode 100644
index 000000000000..63fe21600072
--- /dev/null
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 NXP
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+
+#define COUNTER_CNTL 0x0
+#define COUNTER_READ 0x20
+
+#define COUNTER_DPCR1 0x30
+
+#define CNTL_OVER 0x1
+#define CNTL_CLEAR 0x2
+#define CNTL_EN 0x4
+#define CNTL_EN_MASK 0xFFFFFFFB
+#define CNTL_CLEAR_MASK 0xFFFFFFFD
+#define CNTL_OVER_MASK 0xFFFFFFFE
+
+#define CNTL_CSV_SHIFT 24
+#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
+
+#define EVENT_CYCLES_ID 0
+#define EVENT_CYCLES_COUNTER 0
+#define NUM_COUNTERS 4
+
+#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
+
+#define DDR_PERF_DEV_NAME "imx8_ddr"
+#define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
+
+static DEFINE_IDA(ddr_ida);
+
+static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
+ { .compatible = "fsl,imx8-ddr-pmu",},
+ { .compatible = "fsl,imx8m-ddr-pmu",},
+ { /* sentinel */ }
+};
+
+struct ddr_pmu {
+ struct pmu pmu;
+ void __iomem *base;
+ unsigned int cpu;
+ struct hlist_node node;
+ struct device *dev;
+ struct perf_event *events[NUM_COUNTERS];
+ int active_events;
+ enum cpuhp_state cpuhp_state;
+ int irq;
+ int id;
+};
+
+static ssize_t ddr_perf_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddr_pmu *pmu = dev_get_drvdata(dev);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
+}
+
+static struct device_attribute ddr_perf_cpumask_attr =
+ __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
+
+static struct attribute *ddr_perf_cpumask_attrs[] = {
+ &ddr_perf_cpumask_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ddr_perf_cpumask_attr_group = {
+ .attrs = ddr_perf_cpumask_attrs,
+};
+
+static ssize_t
+ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *ddr_perf_events_attrs[] = {
+ IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
+ IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
+ IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
+ IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
+ IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
+ IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
+ IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
+ IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
+ IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
+ IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
+ IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
+ IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
+ IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
+ IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
+ IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
+ IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
+ IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
+ IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
+ IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
+ IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
+ IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
+ IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
+ IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
+ IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
+ IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
+ IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
+ IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
+ NULL,
+};
+
+static struct attribute_group ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = ddr_perf_events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+
+static struct attribute *ddr_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group ddr_perf_format_attr_group = {
+ .name = "format",
+ .attrs = ddr_perf_format_attrs,
+};
+
+static const struct attribute_group *attr_groups[] = {
+ &ddr_perf_events_attr_group,
+ &ddr_perf_format_attr_group,
+ &ddr_perf_cpumask_attr_group,
+ NULL,
+};
+
+static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
+{
+ int i;
+
+ /*
+ * Always map cycle event to counter 0
+ * Cycles counter is dedicated for cycle event
+ * can't used for the other events
+ */
+ if (event == EVENT_CYCLES_ID) {
+ if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+ return EVENT_CYCLES_COUNTER;
+ else
+ return -ENOENT;
+ }
+
+ for (i = 1; i < NUM_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
+{
+ pmu->events[counter] = NULL;
+}
+
+static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
+{
+ return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
+}
+
+static int ddr_perf_event_init(struct perf_event *event)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_event *sibling;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ if (event->cpu < 0) {
+ dev_warn(pmu->dev, "Can't provide per-task data!\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * We must NOT create groups containing mixed PMUs, although software
+ * events are acceptable (for example to create a CCN group
+ * periodically read when a hrtimer aka cpu-clock leader triggers).
+ */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu != event->pmu &&
+ !is_software_event(sibling))
+ return -EINVAL;
+ }
+
+ event->cpu = pmu->cpu;
+ hwc->idx = -1;
+
+ return 0;
+}
+
+
+static void ddr_perf_event_update(struct perf_event *event)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev_raw_count, new_raw_count;
+ int counter = hwc->idx;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = ddr_perf_read_counter(pmu, counter);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count);
+
+ delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
+
+ local64_add(delta, &event->count);
+}
+
+static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
+ int counter, bool enable)
+{
+ u8 reg = counter * 4 + COUNTER_CNTL;
+ int val;
+
+ if (enable) {
+ /*
+ * must disable first, then enable again
+ * otherwise, cycle counter will not work
+ * if previous state is enabled.
+ */
+ writel(0, pmu->base + reg);
+ val = CNTL_EN | CNTL_CLEAR;
+ val |= FIELD_PREP(CNTL_CSV_MASK, config);
+ writel(val, pmu->base + reg);
+ } else {
+ /* Disable counter */
+ writel(0, pmu->base + reg);
+ }
+}
+
+static void ddr_perf_event_start(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ local64_set(&hwc->prev_count, 0);
+
+ ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
+
+ hwc->state = 0;
+}
+
+static int ddr_perf_event_add(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter;
+ int cfg = event->attr.config;
+
+ counter = ddr_perf_alloc_counter(pmu, cfg);
+ if (counter < 0) {
+ dev_dbg(pmu->dev, "There are not enough counters\n");
+ return -EOPNOTSUPP;
+ }
+
+ pmu->events[counter] = event;
+ pmu->active_events++;
+ hwc->idx = counter;
+
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ ddr_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void ddr_perf_event_stop(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
+ ddr_perf_event_update(event);
+
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static void ddr_perf_event_del(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ ddr_perf_event_stop(event, PERF_EF_UPDATE);
+
+ ddr_perf_free_counter(pmu, counter);
+ pmu->active_events--;
+ hwc->idx = -1;
+}
+
+static void ddr_perf_pmu_enable(struct pmu *pmu)
+{
+ struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+
+ /* enable cycle counter if cycle is not active event list */
+ if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+ ddr_perf_counter_enable(ddr_pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ true);
+}
+
+static void ddr_perf_pmu_disable(struct pmu *pmu)
+{
+ struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+
+ if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
+ ddr_perf_counter_enable(ddr_pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ false);
+}
+
+static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
+ struct device *dev)
+{
+ *pmu = (struct ddr_pmu) {
+ .pmu = (struct pmu) {
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = attr_groups,
+ .event_init = ddr_perf_event_init,
+ .add = ddr_perf_event_add,
+ .del = ddr_perf_event_del,
+ .start = ddr_perf_event_start,
+ .stop = ddr_perf_event_stop,
+ .read = ddr_perf_event_update,
+ .pmu_enable = ddr_perf_pmu_enable,
+ .pmu_disable = ddr_perf_pmu_disable,
+ },
+ .base = base,
+ .dev = dev,
+ };
+
+ pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
+ return pmu->id;
+}
+
+static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
+{
+ int i;
+ struct ddr_pmu *pmu = (struct ddr_pmu *) p;
+ struct perf_event *event, *cycle_event = NULL;
+
+ /* all counter will stop if cycle counter disabled */
+ ddr_perf_counter_enable(pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ false);
+ /*
+ * When the cycle counter overflows, all counters are stopped,
+ * and an IRQ is raised. If any other counter overflows, it
+ * continues counting, and no IRQ is raised.
+ *
+ * Cycles occur at least 4 times as often as other events, so we
+ * can update all events on a cycle counter overflow and not
+ * lose events.
+ *
+ */
+ for (i = 0; i < NUM_COUNTERS; i++) {
+
+ if (!pmu->events[i])
+ continue;
+
+ event = pmu->events[i];
+
+ ddr_perf_event_update(event);
+
+ if (event->hw.idx == EVENT_CYCLES_COUNTER)
+ cycle_event = event;
+ }
+
+ ddr_perf_counter_enable(pmu,
+ EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER,
+ true);
+ if (cycle_event)
+ ddr_perf_event_update(cycle_event);
+
+ return IRQ_HANDLED;
+}
+
+static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
+ int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+
+ WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
+
+ return 0;
+}
+
+static int ddr_perf_probe(struct platform_device *pdev)
+{
+ struct ddr_pmu *pmu;
+ struct device_node *np;
+ void __iomem *base;
+ char *name;
+ int num;
+ int ret;
+ int irq;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ np = pdev->dev.of_node;
+
+ pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ num = ddr_perf_init(pmu, base, &pdev->dev);
+
+ platform_set_drvdata(pdev, pmu);
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
+ num);
+ if (!name)
+ return -ENOMEM;
+
+ pmu->cpu = raw_smp_processor_id();
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ DDR_CPUHP_CB_NAME,
+ NULL,
+ ddr_perf_offline_cpu);
+
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
+ goto ddr_perf_err;
+ }
+
+ pmu->cpuhp_state = ret;
+
+ /* Register the pmu instance for cpu hotplug */
+ cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+
+ /* Request irq */
+ irq = of_irq_get(np, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get irq: %d", irq);
+ ret = irq;
+ goto ddr_perf_err;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq,
+ ddr_perf_irq_handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ DDR_CPUHP_CB_NAME,
+ pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Request irq failed: %d", ret);
+ goto ddr_perf_err;
+ }
+
+ pmu->irq = irq;
+ ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
+ if (ret) {
+ dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
+ goto ddr_perf_err;
+ }
+
+ ret = perf_pmu_register(&pmu->pmu, name, -1);
+ if (ret)
+ goto ddr_perf_err;
+
+ return 0;
+
+ddr_perf_err:
+ if (pmu->cpuhp_state)
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+
+ ida_simple_remove(&ddr_ida, pmu->id);
+ dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
+ return ret;
+}
+
+static int ddr_perf_remove(struct platform_device *pdev)
+{
+ struct ddr_pmu *pmu = platform_get_drvdata(pdev);
+
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ irq_set_affinity_hint(pmu->irq, NULL);
+
+ perf_pmu_unregister(&pmu->pmu);
+
+ ida_simple_remove(&ddr_ida, pmu->id);
+ return 0;
+}
+
+static struct platform_driver imx_ddr_pmu_driver = {
+ .driver = {
+ .name = "imx-ddr-pmu",
+ .of_match_table = imx_ddr_pmu_dt_ids,
+ },
+ .probe = ddr_perf_probe,
+ .remove = ddr_perf_remove,
+};
+
+module_platform_driver(imx_ddr_pmu_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index f464f8cd274b..7e526bcf5e0b 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -113,6 +113,8 @@ static void mtk_eint_mask(struct irq_data *d)
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
eint->regs->mask_set);
+ eint->cur_mask[d->hwirq >> 5] &= ~mask;
+
writel(mask, reg);
}
@@ -123,6 +125,8 @@ static void mtk_eint_unmask(struct irq_data *d)
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
eint->regs->mask_clr);
+ eint->cur_mask[d->hwirq >> 5] |= mask;
+
writel(mask, reg);
if (eint->dual_edge[d->hwirq])
@@ -217,19 +221,6 @@ static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
}
}
-static void mtk_eint_chip_read_mask(const struct mtk_eint *eint,
- void __iomem *base, u32 *buf)
-{
- int port;
- void __iomem *reg;
-
- for (port = 0; port < eint->hw->ports; port++) {
- reg = base + eint->regs->mask + (port << 2);
- buf[port] = ~readl_relaxed(reg);
- /* Mask is 0 when irq is enabled, and 1 when disabled. */
- }
-}
-
static int mtk_eint_irq_request_resources(struct irq_data *d)
{
struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
@@ -318,7 +309,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
struct mtk_eint *eint = irq_desc_get_handler_data(desc);
unsigned int status, eint_num;
- int offset, index, virq;
+ int offset, mask_offset, index, virq;
void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
int dual_edge, start_level, curr_level;
@@ -328,10 +319,24 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
status = readl(reg);
while (status) {
offset = __ffs(status);
+ mask_offset = eint_num >> 5;
index = eint_num + offset;
virq = irq_find_mapping(eint->domain, index);
status &= ~BIT(offset);
+ /*
+ * If we get an interrupt on pin that was only required
+ * for wake (but no real interrupt requested), mask the
+ * interrupt (as would mtk_eint_resume do anyway later
+ * in the resume sequence).
+ */
+ if (eint->wake_mask[mask_offset] & BIT(offset) &&
+ !(eint->cur_mask[mask_offset] & BIT(offset))) {
+ writel_relaxed(BIT(offset), reg -
+ eint->regs->stat +
+ eint->regs->mask_set);
+ }
+
dual_edge = eint->dual_edge[index];
if (dual_edge) {
/*
@@ -370,7 +375,6 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
int mtk_eint_do_suspend(struct mtk_eint *eint)
{
- mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask);
mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
return 0;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 568ca96cdb6d..3a235487e38d 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -771,6 +771,10 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (ret < 0)
goto fail;
+ ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
+ if (ret < 0)
+ goto fail;
+
mcp->irq_controller =
device_property_read_bool(dev, "interrupt-controller");
if (mcp->irq && mcp->irq_controller) {
@@ -812,10 +816,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail;
}
- ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
- if (ret < 0)
- goto fail;
-
if (one_regmap_config) {
mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
"mcp23xxx-pinctrl.%d", raw_chip_address);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 3b4ca52d2456..fb76fb2e9ea5 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -396,7 +396,7 @@ static int ocelot_pin_function_idx(struct ocelot_pinctrl *info,
return -1;
}
-#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32)))
+#define REG_ALT(msb, info, p) (OCELOT_GPIO_ALT0 * (info)->stride + 4 * ((msb) + ((info)->stride * ((p) / 32))))
static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int selector, unsigned int group)
@@ -412,19 +412,21 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
/*
* f is encoded on two bits.
- * bit 0 of f goes in BIT(pin) of ALT0, bit 1 of f goes in BIT(pin) of
- * ALT1
+ * bit 0 of f goes in BIT(pin) of ALT[0], bit 1 of f goes in BIT(pin) of
+ * ALT[1]
* This is racy because both registers can't be updated at the same time
* but it doesn't matter much for now.
*/
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT0, info, pin->pin),
+ regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
BIT(p), f << p);
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT1, info, pin->pin),
+ regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
BIT(p), f << (p - 1));
return 0;
}
+#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32)))
+
static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int pin, bool input)
@@ -432,7 +434,7 @@ static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev,
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
unsigned int p = pin % 32;
- regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, p), BIT(p),
+ regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, pin), BIT(p),
input ? 0 : BIT(p));
return 0;
@@ -445,9 +447,9 @@ static int ocelot_gpio_request_enable(struct pinctrl_dev *pctldev,
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
unsigned int p = offset % 32;
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT0, info, offset),
+ regmap_update_bits(info->map, REG_ALT(0, info, offset),
BIT(p), 0);
- regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT1, info, offset),
+ regmap_update_bits(info->map, REG_ALT(1, info, offset),
BIT(p), 0);
return 0;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5d5cc6111081..b7e5cee2aa26 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -433,9 +433,6 @@ config COMPAL_LAPTOP
It adds support for rfkill, Bluetooth, WLAN, LCD brightness, hwmon
and battery charging level control.
- For a (possibly incomplete) list of supported laptops, please refer
- to: Documentation/platform/x86-laptop-drivers.txt
-
config SONY_LAPTOP
tristate "Sony Laptop Extras"
depends on ACPI
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index 6fa3cced6f8e..4fbdff48a4b5 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -21,18 +21,55 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/usb/pd.h>
#define EXPECTED_PTYPE 4
+enum {
+ INT33FE_NODE_FUSB302,
+ INT33FE_NODE_MAX17047,
+ INT33FE_NODE_PI3USB30532,
+ INT33FE_NODE_DISPLAYPORT,
+ INT33FE_NODE_ROLE_SWITCH,
+ INT33FE_NODE_USB_CONNECTOR,
+ INT33FE_NODE_MAX,
+};
+
struct cht_int33fe_data {
struct i2c_client *max17047;
struct i2c_client *fusb302;
struct i2c_client *pi3usb30532;
- /* Contain a list-head must be per device */
- struct device_connection connections[4];
+
+ struct fwnode_handle *dp;
+ struct fwnode_handle *mux;
+};
+
+static const struct software_node nodes[];
+
+static const struct software_node_ref_args pi3usb30532_ref = {
+ &nodes[INT33FE_NODE_PI3USB30532]
+};
+
+static const struct software_node_ref_args dp_ref = {
+ &nodes[INT33FE_NODE_DISPLAYPORT]
+};
+
+static struct software_node_ref_args mux_ref;
+
+static const struct software_node_reference usb_connector_refs[] = {
+ { "orientation-switch", 1, &pi3usb30532_ref},
+ { "mode-switch", 1, &pi3usb30532_ref},
+ { "displayport", 1, &dp_ref},
+ { }
+};
+
+static const struct software_node_reference fusb302_refs[] = {
+ { "usb-role-switch", 1, &mux_ref},
+ { }
};
/*
@@ -63,14 +100,6 @@ static int cht_int33fe_check_for_max17047(struct device *dev, void *data)
return 1;
}
-static struct i2c_client *cht_int33fe_find_max17047(void)
-{
- struct i2c_client *max17047 = NULL;
-
- i2c_for_each_dev(&max17047, cht_int33fe_check_for_max17047);
- return max17047;
-}
-
static const char * const max17047_suppliers[] = { "bq24190-charger" };
static const struct property_entry max17047_props[] = {
@@ -80,18 +109,196 @@ static const struct property_entry max17047_props[] = {
static const struct property_entry fusb302_props[] = {
PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"),
- PROPERTY_ENTRY_U32("fcs,max-sink-microvolt", 12000000),
- PROPERTY_ENTRY_U32("fcs,max-sink-microamp", 3000000),
- PROPERTY_ENTRY_U32("fcs,max-sink-microwatt", 36000000),
{ }
};
+#define PDO_FIXED_FLAGS \
+ (PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP | PDO_FIXED_USB_COMM)
+
+static const u32 src_pdo[] = {
+ PDO_FIXED(5000, 1500, PDO_FIXED_FLAGS),
+};
+
+static const u32 snk_pdo[] = {
+ PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
+ PDO_VAR(5000, 12000, 3000),
+};
+
+static const struct property_entry usb_connector_props[] = {
+ PROPERTY_ENTRY_STRING("data-role", "dual"),
+ PROPERTY_ENTRY_STRING("power-role", "dual"),
+ PROPERTY_ENTRY_STRING("try-power-role", "sink"),
+ PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
+ PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
+ PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
+ { }
+};
+
+static const struct software_node nodes[] = {
+ { "fusb302", NULL, fusb302_props, fusb302_refs },
+ { "max17047", NULL, max17047_props },
+ { "pi3usb30532" },
+ { "displayport" },
+ { "usb-role-switch" },
+ { "connector", &nodes[0], usb_connector_props, usb_connector_refs },
+ { }
+};
+
+static int cht_int33fe_setup_mux(struct cht_int33fe_data *data)
+{
+ struct fwnode_handle *fwnode;
+ struct device *dev;
+ struct device *p;
+
+ fwnode = software_node_fwnode(&nodes[INT33FE_NODE_ROLE_SWITCH]);
+ if (!fwnode)
+ return -ENODEV;
+
+ /* First finding the platform device */
+ p = bus_find_device_by_name(&platform_bus_type, NULL,
+ "intel_xhci_usb_sw");
+ if (!p)
+ return -EPROBE_DEFER;
+
+ /* Then the mux child device */
+ dev = device_find_child_by_name(p, "intel_xhci_usb_sw-role-switch");
+ put_device(p);
+ if (!dev)
+ return -EPROBE_DEFER;
+
+ /* If there already is a node for the mux, using that one. */
+ if (dev->fwnode)
+ fwnode_remove_software_node(fwnode);
+ else
+ dev->fwnode = fwnode;
+
+ data->mux = fwnode_handle_get(dev->fwnode);
+ put_device(dev);
+ mux_ref.node = to_software_node(data->mux);
+
+ return 0;
+}
+
+static int cht_int33fe_setup_dp(struct cht_int33fe_data *data)
+{
+ struct fwnode_handle *fwnode;
+ struct pci_dev *pdev;
+
+ fwnode = software_node_fwnode(&nodes[INT33FE_NODE_DISPLAYPORT]);
+ if (!fwnode)
+ return -ENODEV;
+
+ /* First let's find the GPU PCI device */
+ pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
+ if (!pdev || pdev->vendor != PCI_VENDOR_ID_INTEL) {
+ pci_dev_put(pdev);
+ return -ENODEV;
+ }
+
+ /* Then the DP child device node */
+ data->dp = device_get_named_child_node(&pdev->dev, "DD02");
+ pci_dev_put(pdev);
+ if (!data->dp)
+ return -ENODEV;
+
+ fwnode->secondary = ERR_PTR(-ENODEV);
+ data->dp->secondary = fwnode;
+
+ return 0;
+}
+
+static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
+{
+ software_node_unregister_nodes(nodes);
+
+ if (data->mux) {
+ fwnode_handle_put(data->mux);
+ mux_ref.node = NULL;
+ data->mux = NULL;
+ }
+
+ if (data->dp) {
+ data->dp->secondary = NULL;
+ fwnode_handle_put(data->dp);
+ data->dp = NULL;
+ }
+}
+
+static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
+{
+ int ret;
+
+ ret = software_node_register_nodes(nodes);
+ if (ret)
+ return ret;
+
+ /* The devices that are not created in this driver need extra steps. */
+
+ /*
+ * There is no ACPI device node for the USB role mux, so we need to find
+ * the mux device and assign our node directly to it. That means we
+ * depend on the mux driver. This function will return -PROBE_DEFER
+ * until the mux device is registered.
+ */
+ ret = cht_int33fe_setup_mux(data);
+ if (ret)
+ goto err_remove_nodes;
+
+ /*
+ * The DP connector does have ACPI device node. In this case we can just
+ * find that ACPI node and assign our node as the secondary node to it.
+ */
+ ret = cht_int33fe_setup_dp(data);
+ if (ret)
+ goto err_remove_nodes;
+
+ return 0;
+
+err_remove_nodes:
+ cht_int33fe_remove_nodes(data);
+
+ return ret;
+}
+
+static int
+cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data)
+{
+ struct i2c_client *max17047 = NULL;
+ struct i2c_board_info board_info;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ fwnode = software_node_fwnode(&nodes[INT33FE_NODE_MAX17047]);
+ if (!fwnode)
+ return -ENODEV;
+
+ i2c_for_each_dev(&max17047, cht_int33fe_check_for_max17047);
+ if (max17047) {
+ /* Pre-existing i2c-client for the max17047, add device-props */
+ fwnode->secondary = ERR_PTR(-ENODEV);
+ max17047->dev.fwnode->secondary = fwnode;
+ /* And re-probe to get the new device-props applied. */
+ ret = device_reprobe(&max17047->dev);
+ if (ret)
+ dev_warn(dev, "Reprobing max17047 error: %d\n", ret);
+ return 0;
+ }
+
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
+ board_info.dev_name = "max17047";
+ board_info.fwnode = fwnode;
+ data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
+
+ return PTR_ERR_OR_ZERO(data->max17047);
+}
+
static int cht_int33fe_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct i2c_board_info board_info;
struct cht_int33fe_data *data;
- struct i2c_client *max17047;
+ struct fwnode_handle *fwnode;
struct regulator *regulator;
unsigned long long ptyp;
acpi_status status;
@@ -151,43 +358,25 @@ static int cht_int33fe_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- /* Work around BIOS bug, see comment on cht_int33fe_find_max17047 */
- max17047 = cht_int33fe_find_max17047();
- if (max17047) {
- /* Pre-existing i2c-client for the max17047, add device-props */
- ret = device_add_properties(&max17047->dev, max17047_props);
- if (ret)
- return ret;
- /* And re-probe to get the new device-props applied. */
- ret = device_reprobe(&max17047->dev);
- if (ret)
- dev_warn(dev, "Reprobing max17047 error: %d\n", ret);
- } else {
- memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
- board_info.dev_name = "max17047";
- board_info.properties = max17047_props;
- data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
- if (IS_ERR(data->max17047))
- return PTR_ERR(data->max17047);
- }
+ ret = cht_int33fe_add_nodes(data);
+ if (ret)
+ return ret;
- data->connections[0].endpoint[0] = "port0";
- data->connections[0].endpoint[1] = "i2c-pi3usb30532";
- data->connections[0].id = "orientation-switch";
- data->connections[1].endpoint[0] = "port0";
- data->connections[1].endpoint[1] = "i2c-pi3usb30532";
- data->connections[1].id = "mode-switch";
- data->connections[2].endpoint[0] = "i2c-fusb302";
- data->connections[2].endpoint[1] = "intel_xhci_usb_sw-role-switch";
- data->connections[2].id = "usb-role-switch";
+ /* Work around BIOS bug, see comment on cht_int33fe_check_for_max17047 */
+ ret = cht_int33fe_register_max17047(dev, data);
+ if (ret)
+ goto out_remove_nodes;
- device_connections_add(data->connections);
+ fwnode = software_node_fwnode(&nodes[INT33FE_NODE_FUSB302]);
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto out_unregister_max17047;
+ }
memset(&board_info, 0, sizeof(board_info));
strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
board_info.dev_name = "fusb302";
- board_info.properties = fusb302_props;
+ board_info.fwnode = fwnode;
board_info.irq = fusb302_irq;
data->fusb302 = i2c_acpi_new_device(dev, 2, &board_info);
@@ -196,8 +385,15 @@ static int cht_int33fe_probe(struct platform_device *pdev)
goto out_unregister_max17047;
}
+ fwnode = software_node_fwnode(&nodes[INT33FE_NODE_PI3USB30532]);
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto out_unregister_fusb302;
+ }
+
memset(&board_info, 0, sizeof(board_info));
board_info.dev_name = "pi3usb30532";
+ board_info.fwnode = fwnode;
strlcpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
@@ -216,7 +412,8 @@ out_unregister_fusb302:
out_unregister_max17047:
i2c_unregister_device(data->max17047);
- device_connections_remove(data->connections);
+out_remove_nodes:
+ cht_int33fe_remove_nodes(data);
return ret;
}
@@ -229,7 +426,7 @@ static int cht_int33fe_remove(struct platform_device *pdev)
i2c_unregister_device(data->fusb302);
i2c_unregister_device(data->max17047);
- device_connections_remove(data->connections);
+ cht_int33fe_remove_nodes(data);
return 0;
}
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index c96c01e09740..4684e7df833a 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -899,38 +899,19 @@ static int omap_sr_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__);
- if (!sr_dbg_dir) {
+ if (!sr_dbg_dir)
sr_dbg_dir = debugfs_create_dir("smartreflex", NULL);
- if (IS_ERR_OR_NULL(sr_dbg_dir)) {
- ret = PTR_ERR(sr_dbg_dir);
- pr_err("%s:sr debugfs dir creation failed(%d)\n",
- __func__, ret);
- goto err_list_del;
- }
- }
sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir);
- if (IS_ERR_OR_NULL(sr_info->dbg_dir)) {
- dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
- __func__);
- ret = PTR_ERR(sr_info->dbg_dir);
- goto err_debugfs;
- }
- (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR,
- sr_info->dbg_dir, (void *)sr_info, &pm_sr_fops);
- (void) debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
- &sr_info->err_weight);
- (void) debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
- &sr_info->err_maxlimit);
+ debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, sr_info->dbg_dir,
+ (void *)sr_info, &pm_sr_fops);
+ debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_weight);
+ debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_maxlimit);
nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir);
- if (IS_ERR_OR_NULL(nvalue_dir)) {
- dev_err(&pdev->dev, "%s: Unable to create debugfs directory for n-values\n",
- __func__);
- ret = PTR_ERR(nvalue_dir);
- goto err_debugfs;
- }
if (sr_info->nvalue_count == 0 || !sr_info->nvalue_table) {
dev_warn(&pdev->dev, "%s: %s: No Voltage table for the corresponding vdd. Cannot create debugfs entries for n-values\n",
@@ -945,12 +926,12 @@ static int omap_sr_probe(struct platform_device *pdev)
snprintf(name, sizeof(name), "volt_%lu",
sr_info->nvalue_table[i].volt_nominal);
- (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
- &(sr_info->nvalue_table[i].nvalue));
+ debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].nvalue));
snprintf(name, sizeof(name), "errminlimit_%lu",
sr_info->nvalue_table[i].volt_nominal);
- (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
- &(sr_info->nvalue_table[i].errminlimit));
+ debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].errminlimit));
}
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index f888117b0efc..8692f6b79f93 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -166,12 +166,15 @@ struct rapl_domain {
#define power_zone_to_rapl_domain(_zone) \
container_of(_zone, struct rapl_domain, power_zone)
+/* maximum rapl package domain name: package-%d-die-%d */
+#define PACKAGE_DOMAIN_NAME_LENGTH 30
-/* Each physical package contains multiple domains, these are the common
+
+/* Each rapl package contains multiple domains, these are the common
* data across RAPL domains within a package.
*/
struct rapl_package {
- unsigned int id; /* physical package/socket id */
+ unsigned int id; /* logical die id, equals physical 1-die systems */
unsigned int nr_domains;
unsigned long domain_map; /* bit map of active domains */
unsigned int power_unit;
@@ -186,6 +189,7 @@ struct rapl_package {
int lead_cpu; /* one active cpu per package for access */
/* Track active cpus */
struct cpumask cpumask;
+ char name[PACKAGE_DOMAIN_NAME_LENGTH];
};
struct rapl_defaults {
@@ -252,8 +256,9 @@ static struct powercap_control_type *control_type; /* PowerCap Controller */
static struct rapl_domain *platform_rapl_domain; /* Platform (PSys) domain */
/* caller to ensure CPU hotplug lock is held */
-static struct rapl_package *find_package_by_id(int id)
+static struct rapl_package *rapl_find_package_domain(int cpu)
{
+ int id = topology_logical_die_id(cpu);
struct rapl_package *rp;
list_for_each_entry(rp, &rapl_packages, plist) {
@@ -913,8 +918,8 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
rp->time_unit = 1000000 / (1 << value);
- pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n",
- rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
+ pr_debug("Core CPU %s energy=%dpJ, time=%dus, power=%duW\n",
+ rp->name, rp->energy_unit, rp->time_unit, rp->power_unit);
return 0;
}
@@ -938,8 +943,8 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
rp->time_unit = 1000000 / (1 << value);
- pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n",
- rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
+ pr_debug("Atom %s energy=%dpJ, time=%dus, power=%duW\n",
+ rp->name, rp->energy_unit, rp->time_unit, rp->power_unit);
return 0;
}
@@ -1168,7 +1173,7 @@ static void rapl_update_domain_data(struct rapl_package *rp)
u64 val;
for (dmn = 0; dmn < rp->nr_domains; dmn++) {
- pr_debug("update package %d domain %s data\n", rp->id,
+ pr_debug("update %s domain %s data\n", rp->name,
rp->domains[dmn].name);
/* exclude non-raw primitives */
for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) {
@@ -1193,7 +1198,6 @@ static void rapl_unregister_powercap(void)
static int rapl_package_register_powercap(struct rapl_package *rp)
{
struct rapl_domain *rd;
- char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
struct powercap_zone *power_zone = NULL;
int nr_pl, ret;
@@ -1204,20 +1208,16 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
if (rd->id == RAPL_DOMAIN_PACKAGE) {
nr_pl = find_nr_power_limit(rd);
- pr_debug("register socket %d package domain %s\n",
- rp->id, rd->name);
- memset(dev_name, 0, sizeof(dev_name));
- snprintf(dev_name, sizeof(dev_name), "%s-%d",
- rd->name, rp->id);
+ pr_debug("register package domain %s\n", rp->name);
power_zone = powercap_register_zone(&rd->power_zone,
control_type,
- dev_name, NULL,
+ rp->name, NULL,
&zone_ops[rd->id],
nr_pl,
&constraint_ops);
if (IS_ERR(power_zone)) {
- pr_debug("failed to register package, %d\n",
- rp->id);
+ pr_debug("failed to register power zone %s\n",
+ rp->name);
return PTR_ERR(power_zone);
}
/* track parent zone in per package/socket data */
@@ -1243,8 +1243,8 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
&constraint_ops);
if (IS_ERR(power_zone)) {
- pr_debug("failed to register power_zone, %d:%s:%s\n",
- rp->id, rd->name, dev_name);
+ pr_debug("failed to register power_zone, %s:%s\n",
+ rp->name, rd->name);
ret = PTR_ERR(power_zone);
goto err_cleanup;
}
@@ -1257,7 +1257,7 @@ err_cleanup:
* failed after the first domain setup.
*/
while (--rd >= rp->domains) {
- pr_debug("unregister package %d domain %s\n", rp->id, rd->name);
+ pr_debug("unregister %s domain %s\n", rp->name, rd->name);
powercap_unregister_zone(control_type, &rd->power_zone);
}
@@ -1288,7 +1288,7 @@ static int __init rapl_register_psys(void)
rd->rpl[0].name = pl1_name;
rd->rpl[1].prim_id = PL2_ENABLE;
rd->rpl[1].name = pl2_name;
- rd->rp = find_package_by_id(0);
+ rd->rp = rapl_find_package_domain(0);
power_zone = powercap_register_zone(&rd->power_zone, control_type,
"psys", NULL,
@@ -1367,8 +1367,8 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd)
/* check if the domain is locked by BIOS, ignore if MSR doesn't exist */
if (!rapl_read_data_raw(rd, FW_LOCK, false, &val64)) {
if (val64) {
- pr_info("RAPL package %d domain %s locked by BIOS\n",
- rd->rp->id, rd->name);
+ pr_info("RAPL %s domain %s locked by BIOS\n",
+ rd->rp->name, rd->name);
rd->state |= DOMAIN_STATE_BIOS_LOCKED;
}
}
@@ -1397,10 +1397,10 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
}
rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
if (!rp->nr_domains) {
- pr_debug("no valid rapl domains found in package %d\n", rp->id);
+ pr_debug("no valid rapl domains found in %s\n", rp->name);
return -ENODEV;
}
- pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id);
+ pr_debug("found %d domains on %s\n", rp->nr_domains, rp->name);
rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
GFP_KERNEL);
@@ -1433,8 +1433,8 @@ static void rapl_remove_package(struct rapl_package *rp)
rd_package = rd;
continue;
}
- pr_debug("remove package, undo power limit on %d: %s\n",
- rp->id, rd->name);
+ pr_debug("remove package, undo power limit on %s: %s\n",
+ rp->name, rd->name);
powercap_unregister_zone(control_type, &rd->power_zone);
}
/* do parent zone last */
@@ -1444,9 +1444,11 @@ static void rapl_remove_package(struct rapl_package *rp)
}
/* called from CPU hotplug notifier, hotplug lock held */
-static struct rapl_package *rapl_add_package(int cpu, int pkgid)
+static struct rapl_package *rapl_add_package(int cpu)
{
+ int id = topology_logical_die_id(cpu);
struct rapl_package *rp;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
int ret;
rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
@@ -1454,9 +1456,16 @@ static struct rapl_package *rapl_add_package(int cpu, int pkgid)
return ERR_PTR(-ENOMEM);
/* add the new package to the list */
- rp->id = pkgid;
+ rp->id = id;
rp->lead_cpu = cpu;
+ if (topology_max_die_per_package() > 1)
+ snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH,
+ "package-%d-die-%d", c->phys_proc_id, c->cpu_die_id);
+ else
+ snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d",
+ c->phys_proc_id);
+
/* check if the package contains valid domains */
if (rapl_detect_domains(rp, cpu) ||
rapl_defaults->check_unit(rp, cpu)) {
@@ -1485,12 +1494,11 @@ err_free_package:
*/
static int rapl_cpu_online(unsigned int cpu)
{
- int pkgid = topology_physical_package_id(cpu);
struct rapl_package *rp;
- rp = find_package_by_id(pkgid);
+ rp = rapl_find_package_domain(cpu);
if (!rp) {
- rp = rapl_add_package(cpu, pkgid);
+ rp = rapl_add_package(cpu);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
@@ -1500,11 +1508,10 @@ static int rapl_cpu_online(unsigned int cpu)
static int rapl_cpu_down_prep(unsigned int cpu)
{
- int pkgid = topology_physical_package_id(cpu);
struct rapl_package *rp;
int lead_cpu;
- rp = find_package_by_id(pkgid);
+ rp = rapl_find_package_domain(cpu);
if (!rp)
return 0;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 9b8fee5178e8..960961fb0d7c 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -44,7 +44,7 @@ config PTP_1588_CLOCK_DTE
config PTP_1588_CLOCK_QORIQ
tristate "Freescale QorIQ 1588 timer as PTP clock"
- depends on GIANFAR || FSL_DPAA_ETH || FSL_ENETC || FSL_ENETC_VF
+ depends on GIANFAR || FSL_DPAA_ETH || FSL_DPAA2_ETH || FSL_ENETC || FSL_ENETC_VF || COMPILE_TEST
depends on PTP_1588_CLOCK
default y
help
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index e189fa1be21e..e60eab7f8a61 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -63,7 +63,7 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
spin_unlock_irqrestore(&queue->lock, flags);
}
-static s32 scaled_ppm_to_ppb(long ppm)
+s32 scaled_ppm_to_ppb(long ppm)
{
/*
* The 'freq' field in the 'struct timex' is in parts per
@@ -82,6 +82,7 @@ static s32 scaled_ppm_to_ppb(long ppm)
ppb >>= 13;
return (s32) ppb;
}
+EXPORT_SYMBOL(scaled_ppm_to_ppb);
/* posix clock implementation */
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index dff5a93f7daa..a7e57516959e 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -401,6 +401,17 @@ config PWM_SAMSUNG
To compile this driver as a module, choose M here: the module
will be called pwm-samsung.
+config PWM_SIFIVE
+ tristate "SiFive PWM support"
+ depends on OF
+ depends on COMMON_CLK
+ depends on RISCV || COMPILE_TEST
+ help
+ Generic PWM framework driver for SiFive SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-sifive.
+
config PWM_SPEAR
tristate "STMicroelectronics SPEAr PWM support"
depends on PLAT_SPEAR
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index c368599d36c0..76b555b51887 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_PWM_RCAR) += pwm-rcar.o
obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o
obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
+obj-$(CONFIG_PWM_SIFIVE) += pwm-sifive.o
obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
obj-$(CONFIG_PWM_STI) += pwm-sti.o
obj-$(CONFIG_PWM_STM32) += pwm-stm32.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 275b5f399a1a..c3ab07ab31a9 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -6,6 +6,7 @@
* Copyright (C) 2011-2012 Avionic Design GmbH
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/pwm.h>
#include <linux/radix-tree.h>
@@ -626,8 +627,35 @@ static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
return ERR_PTR(-EPROBE_DEFER);
}
+static struct device_link *pwm_device_link_add(struct device *dev,
+ struct pwm_device *pwm)
+{
+ struct device_link *dl;
+
+ if (!dev) {
+ /*
+ * No device for the PWM consumer has been provided. It may
+ * impact the PM sequence ordering: the PWM supplier may get
+ * suspended before the consumer.
+ */
+ dev_warn(pwm->chip->dev,
+ "No consumer device specified to create a link to\n");
+ return NULL;
+ }
+
+ dl = device_link_add(dev, pwm->chip->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dl) {
+ dev_err(dev, "failed to create device link to %s\n",
+ dev_name(pwm->chip->dev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ return dl;
+}
+
/**
* of_pwm_get() - request a PWM via the PWM framework
+ * @dev: device for PWM consumer
* @np: device node to get the PWM from
* @con_id: consumer name
*
@@ -645,10 +673,12 @@ static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
* Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
* error code on failure.
*/
-struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id)
+struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
+ const char *con_id)
{
struct pwm_device *pwm = NULL;
struct of_phandle_args args;
+ struct device_link *dl;
struct pwm_chip *pc;
int index = 0;
int err;
@@ -679,6 +709,14 @@ struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id)
if (IS_ERR(pwm))
goto put;
+ dl = pwm_device_link_add(dev, pwm);
+ if (IS_ERR(dl)) {
+ /* of_xlate ended up calling pwm_request_from_chip() */
+ pwm_free(pwm);
+ pwm = ERR_CAST(dl);
+ goto put;
+ }
+
/*
* If a consumer name was not given, try to look it up from the
* "pwm-names" property if it exists. Otherwise use the name of
@@ -700,6 +738,85 @@ put:
}
EXPORT_SYMBOL_GPL(of_pwm_get);
+#if IS_ENABLED(CONFIG_ACPI)
+static struct pwm_chip *device_to_pwmchip(struct device *dev)
+{
+ struct pwm_chip *chip;
+
+ mutex_lock(&pwm_lock);
+
+ list_for_each_entry(chip, &pwm_chips, list) {
+ struct acpi_device *adev = ACPI_COMPANION(chip->dev);
+
+ if ((chip->dev == dev) || (adev && &adev->dev == dev)) {
+ mutex_unlock(&pwm_lock);
+ return chip;
+ }
+ }
+
+ mutex_unlock(&pwm_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+#endif
+
+/**
+ * acpi_pwm_get() - request a PWM via parsing "pwms" property in ACPI
+ * @fwnode: firmware node to get the "pwm" property from
+ *
+ * Returns the PWM device parsed from the fwnode and index specified in the
+ * "pwms" property or a negative error-code on failure.
+ * Values parsed from the device tree are stored in the returned PWM device
+ * object.
+ *
+ * This is analogous to of_pwm_get() except con_id is not yet supported.
+ * ACPI entries must look like
+ * Package () {"pwms", Package ()
+ * { <PWM device reference>, <PWM index>, <PWM period> [, <PWM flags>]}}
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
+ */
+static struct pwm_device *acpi_pwm_get(struct fwnode_handle *fwnode)
+{
+ struct pwm_device *pwm = ERR_PTR(-ENODEV);
+#if IS_ENABLED(CONFIG_ACPI)
+ struct fwnode_reference_args args;
+ struct acpi_device *acpi;
+ struct pwm_chip *chip;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+
+ ret = __acpi_node_get_property_reference(fwnode, "pwms", 0, 3, &args);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ acpi = to_acpi_device_node(args.fwnode);
+ if (!acpi)
+ return ERR_PTR(-EINVAL);
+
+ if (args.nargs < 2)
+ return ERR_PTR(-EPROTO);
+
+ chip = device_to_pwmchip(&acpi->dev);
+ if (IS_ERR(chip))
+ return ERR_CAST(chip);
+
+ pwm = pwm_request_from_chip(chip, args.args[0], NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ pwm->args.period = args.args[1];
+ pwm->args.polarity = PWM_POLARITY_NORMAL;
+
+ if (args.nargs > 2 && args.args[2] & PWM_POLARITY_INVERTED)
+ pwm->args.polarity = PWM_POLARITY_INVERSED;
+#endif
+
+ return pwm;
+}
+
/**
* pwm_add_table() - register PWM device consumers
* @table: array of consumers to register
@@ -754,6 +871,7 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
const char *dev_id = dev ? dev_name(dev) : NULL;
struct pwm_device *pwm;
struct pwm_chip *chip;
+ struct device_link *dl;
unsigned int best = 0;
struct pwm_lookup *p, *chosen = NULL;
unsigned int match;
@@ -761,7 +879,11 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
/* look up via DT first */
if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
- return of_pwm_get(dev->of_node, con_id);
+ return of_pwm_get(dev, dev->of_node, con_id);
+
+ /* then lookup via ACPI */
+ if (dev && is_acpi_node(dev->fwnode))
+ return acpi_pwm_get(dev->fwnode);
/*
* We look up the provider in the static table typically provided by
@@ -838,6 +960,12 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
if (IS_ERR(pwm))
return pwm;
+ dl = pwm_device_link_add(dev, pwm);
+ if (IS_ERR(dl)) {
+ pwm_free(pwm);
+ return ERR_CAST(dl);
+ }
+
pwm->args.period = chosen->period;
pwm->args.polarity = chosen->polarity;
@@ -930,7 +1058,7 @@ struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
if (!ptr)
return ERR_PTR(-ENOMEM);
- pwm = of_pwm_get(np, con_id);
+ pwm = of_pwm_get(dev, np, con_id);
if (!IS_ERR(pwm)) {
*ptr = pwm;
devres_add(dev, ptr);
@@ -942,6 +1070,44 @@ struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
}
EXPORT_SYMBOL_GPL(devm_of_pwm_get);
+/**
+ * devm_fwnode_pwm_get() - request a resource managed PWM from firmware node
+ * @dev: device for PWM consumer
+ * @fwnode: firmware node to get the PWM from
+ * @con_id: consumer name
+ *
+ * Returns the PWM device parsed from the firmware node. See of_pwm_get() and
+ * acpi_pwm_get() for a detailed description.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
+ */
+struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id)
+{
+ struct pwm_device **ptr, *pwm = ERR_PTR(-ENODEV);
+
+ ptr = devres_alloc(devm_pwm_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ if (is_of_node(fwnode))
+ pwm = of_pwm_get(dev, to_of_node(fwnode), con_id);
+ else if (is_acpi_node(fwnode))
+ pwm = acpi_pwm_get(fwnode);
+
+ if (!IS_ERR(pwm)) {
+ *ptr = pwm;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(devm_fwnode_pwm_get);
+
static int devm_pwm_match(struct device *dev, void *res, void *data)
{
struct pwm_device **p = res;
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 7186db85b15f..d13a83f430ac 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -235,6 +235,7 @@ static const struct of_device_id atmel_hlcdc_dt_ids[] = {
.compatible = "atmel,sama5d4-hlcdc",
.data = &atmel_hlcdc_pwm_sama5d3_errata,
},
+ { .compatible = "microchip,sam9x60-hlcdc", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, atmel_hlcdc_dt_ids);
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index 5652f461d994..f6fe0b922e1e 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -70,7 +70,7 @@ static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return -EINVAL;
}
- scaler = NSEC_PER_SEC / rate;
+ scaler = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate);
if (period_ns <= MIN_PERIOD) {
dev_err(pc->dev, "period %d not supported, minimum %d\n",
@@ -78,8 +78,10 @@ static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return -EINVAL;
}
- writel(duty_ns / scaler, pc->base + DUTY(pwm->hwpwm));
- writel(period_ns / scaler, pc->base + PERIOD(pwm->hwpwm));
+ writel(DIV_ROUND_CLOSEST(duty_ns, scaler),
+ pc->base + DUTY(pwm->hwpwm));
+ writel(DIV_ROUND_CLOSEST(period_ns, scaler),
+ pc->base + PERIOD(pwm->hwpwm));
return 0;
}
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index a39b48839df7..9d31a217111d 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -34,17 +34,19 @@ struct fsl_ftm_soc {
bool has_enable_bits;
};
+struct fsl_pwm_periodcfg {
+ enum fsl_pwm_clk clk_select;
+ unsigned int clk_ps;
+ unsigned int mod_period;
+};
+
struct fsl_pwm_chip {
struct pwm_chip chip;
-
struct mutex lock;
-
- unsigned int cnt_select;
- unsigned int clk_ps;
-
struct regmap *regmap;
- int period_ns;
+ /* This value is valid iff a pwm is running */
+ struct fsl_pwm_periodcfg period;
struct clk *ipg_clk;
struct clk *clk[FSL_PWM_CLK_MAX];
@@ -57,6 +59,33 @@ static inline struct fsl_pwm_chip *to_fsl_chip(struct pwm_chip *chip)
return container_of(chip, struct fsl_pwm_chip, chip);
}
+static void ftm_clear_write_protection(struct fsl_pwm_chip *fpc)
+{
+ u32 val;
+
+ regmap_read(fpc->regmap, FTM_FMS, &val);
+ if (val & FTM_FMS_WPEN)
+ regmap_update_bits(fpc->regmap, FTM_MODE, FTM_MODE_WPDIS,
+ FTM_MODE_WPDIS);
+}
+
+static void ftm_set_write_protection(struct fsl_pwm_chip *fpc)
+{
+ regmap_update_bits(fpc->regmap, FTM_FMS, FTM_FMS_WPEN, FTM_FMS_WPEN);
+}
+
+static bool fsl_pwm_periodcfg_are_equal(const struct fsl_pwm_periodcfg *a,
+ const struct fsl_pwm_periodcfg *b)
+{
+ if (a->clk_select != b->clk_select)
+ return false;
+ if (a->clk_ps != b->clk_ps)
+ return false;
+ if (a->mod_period != b->mod_period)
+ return false;
+ return true;
+}
+
static int fsl_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
int ret;
@@ -87,89 +116,58 @@ static void fsl_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
clk_disable_unprepare(fpc->ipg_clk);
}
-static int fsl_pwm_calculate_default_ps(struct fsl_pwm_chip *fpc,
- enum fsl_pwm_clk index)
+static unsigned int fsl_pwm_ticks_to_ns(struct fsl_pwm_chip *fpc,
+ unsigned int ticks)
{
- unsigned long sys_rate, cnt_rate;
- unsigned long long ratio;
-
- sys_rate = clk_get_rate(fpc->clk[FSL_PWM_CLK_SYS]);
- if (!sys_rate)
- return -EINVAL;
-
- cnt_rate = clk_get_rate(fpc->clk[fpc->cnt_select]);
- if (!cnt_rate)
- return -EINVAL;
-
- switch (index) {
- case FSL_PWM_CLK_SYS:
- fpc->clk_ps = 1;
- break;
- case FSL_PWM_CLK_FIX:
- ratio = 2 * cnt_rate - 1;
- do_div(ratio, sys_rate);
- fpc->clk_ps = ratio;
- break;
- case FSL_PWM_CLK_EXT:
- ratio = 4 * cnt_rate - 1;
- do_div(ratio, sys_rate);
- fpc->clk_ps = ratio;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ unsigned long rate;
+ unsigned long long exval;
+
+ rate = clk_get_rate(fpc->clk[fpc->period.clk_select]);
+ exval = ticks;
+ exval *= 1000000000UL;
+ do_div(exval, rate >> fpc->period.clk_ps);
+ return exval;
}
-static unsigned long fsl_pwm_calculate_cycles(struct fsl_pwm_chip *fpc,
- unsigned long period_ns)
+static bool fsl_pwm_calculate_period_clk(struct fsl_pwm_chip *fpc,
+ unsigned int period_ns,
+ enum fsl_pwm_clk index,
+ struct fsl_pwm_periodcfg *periodcfg
+ )
{
- unsigned long long c, c0;
+ unsigned long long c;
+ unsigned int ps;
- c = clk_get_rate(fpc->clk[fpc->cnt_select]);
+ c = clk_get_rate(fpc->clk[index]);
c = c * period_ns;
do_div(c, 1000000000UL);
- do {
- c0 = c;
- do_div(c0, (1 << fpc->clk_ps));
- if (c0 <= 0xFFFF)
- return (unsigned long)c0;
- } while (++fpc->clk_ps < 8);
-
- return 0;
-}
-
-static unsigned long fsl_pwm_calculate_period_cycles(struct fsl_pwm_chip *fpc,
- unsigned long period_ns,
- enum fsl_pwm_clk index)
-{
- int ret;
+ if (c == 0)
+ return false;
- ret = fsl_pwm_calculate_default_ps(fpc, index);
- if (ret) {
- dev_err(fpc->chip.dev,
- "failed to calculate default prescaler: %d\n",
- ret);
- return 0;
+ for (ps = 0; ps < 8 ; ++ps, c >>= 1) {
+ if (c <= 0x10000) {
+ periodcfg->clk_select = index;
+ periodcfg->clk_ps = ps;
+ periodcfg->mod_period = c - 1;
+ return true;
+ }
}
-
- return fsl_pwm_calculate_cycles(fpc, period_ns);
+ return false;
}
-static unsigned long fsl_pwm_calculate_period(struct fsl_pwm_chip *fpc,
- unsigned long period_ns)
+static bool fsl_pwm_calculate_period(struct fsl_pwm_chip *fpc,
+ unsigned int period_ns,
+ struct fsl_pwm_periodcfg *periodcfg)
{
enum fsl_pwm_clk m0, m1;
- unsigned long fix_rate, ext_rate, cycles;
+ unsigned long fix_rate, ext_rate;
+ bool ret;
- cycles = fsl_pwm_calculate_period_cycles(fpc, period_ns,
- FSL_PWM_CLK_SYS);
- if (cycles) {
- fpc->cnt_select = FSL_PWM_CLK_SYS;
- return cycles;
- }
+ ret = fsl_pwm_calculate_period_clk(fpc, period_ns, FSL_PWM_CLK_SYS,
+ periodcfg);
+ if (ret)
+ return true;
fix_rate = clk_get_rate(fpc->clk[FSL_PWM_CLK_FIX]);
ext_rate = clk_get_rate(fpc->clk[FSL_PWM_CLK_EXT]);
@@ -182,158 +180,185 @@ static unsigned long fsl_pwm_calculate_period(struct fsl_pwm_chip *fpc,
m1 = FSL_PWM_CLK_FIX;
}
- cycles = fsl_pwm_calculate_period_cycles(fpc, period_ns, m0);
- if (cycles) {
- fpc->cnt_select = m0;
- return cycles;
- }
-
- fpc->cnt_select = m1;
+ ret = fsl_pwm_calculate_period_clk(fpc, period_ns, m0, periodcfg);
+ if (ret)
+ return true;
- return fsl_pwm_calculate_period_cycles(fpc, period_ns, m1);
+ return fsl_pwm_calculate_period_clk(fpc, period_ns, m1, periodcfg);
}
-static unsigned long fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc,
- unsigned long period_ns,
- unsigned long duty_ns)
+static unsigned int fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc,
+ unsigned int duty_ns)
{
unsigned long long duty;
- u32 val;
- regmap_read(fpc->regmap, FTM_MOD, &val);
- duty = (unsigned long long)duty_ns * (val + 1);
+ unsigned int period = fpc->period.mod_period + 1;
+ unsigned int period_ns = fsl_pwm_ticks_to_ns(fpc, period);
+
+ duty = (unsigned long long)duty_ns * period;
do_div(duty, period_ns);
- return (unsigned long)duty;
+ return (unsigned int)duty;
}
-static int fsl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static bool fsl_pwm_is_any_pwm_enabled(struct fsl_pwm_chip *fpc,
+ struct pwm_device *pwm)
{
- struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
- u32 period, duty;
+ u32 val;
- mutex_lock(&fpc->lock);
+ regmap_read(fpc->regmap, FTM_OUTMASK, &val);
+ if (~val & 0xFF)
+ return true;
+ else
+ return false;
+}
+
+static bool fsl_pwm_is_other_pwm_enabled(struct fsl_pwm_chip *fpc,
+ struct pwm_device *pwm)
+{
+ u32 val;
+ regmap_read(fpc->regmap, FTM_OUTMASK, &val);
+ if (~(val | BIT(pwm->hwpwm)) & 0xFF)
+ return true;
+ else
+ return false;
+}
+
+static int fsl_pwm_apply_config(struct fsl_pwm_chip *fpc,
+ struct pwm_device *pwm,
+ struct pwm_state *newstate)
+{
+ unsigned int duty;
+ u32 reg_polarity;
+
+ struct fsl_pwm_periodcfg periodcfg;
+ bool do_write_period = false;
+
+ if (!fsl_pwm_calculate_period(fpc, newstate->period, &periodcfg)) {
+ dev_err(fpc->chip.dev, "failed to calculate new period\n");
+ return -EINVAL;
+ }
+
+ if (!fsl_pwm_is_any_pwm_enabled(fpc, pwm))
+ do_write_period = true;
/*
* The Freescale FTM controller supports only a single period for
- * all PWM channels, therefore incompatible changes need to be
- * refused.
+ * all PWM channels, therefore verify if the newly computed period
+ * is different than the current period being used. In such case
+ * we allow to change the period only if no other pwm is running.
*/
- if (fpc->period_ns && fpc->period_ns != period_ns) {
- dev_err(fpc->chip.dev,
- "conflicting period requested for PWM %u\n",
- pwm->hwpwm);
- mutex_unlock(&fpc->lock);
- return -EBUSY;
+ else if (!fsl_pwm_periodcfg_are_equal(&fpc->period, &periodcfg)) {
+ if (fsl_pwm_is_other_pwm_enabled(fpc, pwm)) {
+ dev_err(fpc->chip.dev,
+ "Cannot change period for PWM %u, disable other PWMs first\n",
+ pwm->hwpwm);
+ return -EBUSY;
+ }
+ if (fpc->period.clk_select != periodcfg.clk_select) {
+ int ret;
+ enum fsl_pwm_clk oldclk = fpc->period.clk_select;
+ enum fsl_pwm_clk newclk = periodcfg.clk_select;
+
+ ret = clk_prepare_enable(fpc->clk[newclk]);
+ if (ret)
+ return ret;
+ clk_disable_unprepare(fpc->clk[oldclk]);
+ }
+ do_write_period = true;
}
- if (!fpc->period_ns && duty_ns) {
- period = fsl_pwm_calculate_period(fpc, period_ns);
- if (!period) {
- dev_err(fpc->chip.dev, "failed to calculate period\n");
- mutex_unlock(&fpc->lock);
- return -EINVAL;
- }
+ ftm_clear_write_protection(fpc);
+ if (do_write_period) {
+ regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK,
+ FTM_SC_CLK(periodcfg.clk_select));
regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_PS_MASK,
- fpc->clk_ps);
- regmap_write(fpc->regmap, FTM_MOD, period - 1);
+ periodcfg.clk_ps);
+ regmap_write(fpc->regmap, FTM_MOD, periodcfg.mod_period);
- fpc->period_ns = period_ns;
+ fpc->period = periodcfg;
}
- mutex_unlock(&fpc->lock);
-
- duty = fsl_pwm_calculate_duty(fpc, period_ns, duty_ns);
+ duty = fsl_pwm_calculate_duty(fpc, newstate->duty_cycle);
regmap_write(fpc->regmap, FTM_CSC(pwm->hwpwm),
FTM_CSC_MSB | FTM_CSC_ELSB);
regmap_write(fpc->regmap, FTM_CV(pwm->hwpwm), duty);
- return 0;
-}
-
-static int fsl_pwm_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
- u32 val;
+ reg_polarity = 0;
+ if (newstate->polarity == PWM_POLARITY_INVERSED)
+ reg_polarity = BIT(pwm->hwpwm);
- regmap_read(fpc->regmap, FTM_POL, &val);
+ regmap_update_bits(fpc->regmap, FTM_POL, BIT(pwm->hwpwm), reg_polarity);
- if (polarity == PWM_POLARITY_INVERSED)
- val |= BIT(pwm->hwpwm);
- else
- val &= ~BIT(pwm->hwpwm);
+ newstate->period = fsl_pwm_ticks_to_ns(fpc,
+ fpc->period.mod_period + 1);
+ newstate->duty_cycle = fsl_pwm_ticks_to_ns(fpc, duty);
- regmap_write(fpc->regmap, FTM_POL, val);
+ ftm_set_write_protection(fpc);
return 0;
}
-static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
-{
- int ret;
-
- /* select counter clock source */
- regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK,
- FTM_SC_CLK(fpc->cnt_select));
-
- ret = clk_prepare_enable(fpc->clk[fpc->cnt_select]);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]);
- if (ret) {
- clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
- return ret;
- }
-
- return 0;
-}
-
-static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static int fsl_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *newstate)
{
struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
- int ret;
-
- mutex_lock(&fpc->lock);
- regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm), 0);
+ struct pwm_state *oldstate = &pwm->state;
+ int ret = 0;
- ret = fsl_counter_clock_enable(fpc);
- mutex_unlock(&fpc->lock);
+ /*
+ * oldstate to newstate : action
+ *
+ * disabled to disabled : ignore
+ * enabled to disabled : disable
+ * enabled to enabled : update settings
+ * disabled to enabled : update settings + enable
+ */
- return ret;
-}
+ mutex_lock(&fpc->lock);
-static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
- u32 val;
+ if (!newstate->enabled) {
+ if (oldstate->enabled) {
+ regmap_update_bits(fpc->regmap, FTM_OUTMASK,
+ BIT(pwm->hwpwm), BIT(pwm->hwpwm));
+ clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ clk_disable_unprepare(fpc->clk[fpc->period.clk_select]);
+ }
- mutex_lock(&fpc->lock);
- regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm),
- BIT(pwm->hwpwm));
+ goto end_mutex;
+ }
- clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
- clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
+ ret = fsl_pwm_apply_config(fpc, pwm, newstate);
+ if (ret)
+ goto end_mutex;
+
+ /* check if need to enable */
+ if (!oldstate->enabled) {
+ ret = clk_prepare_enable(fpc->clk[fpc->period.clk_select]);
+ if (ret)
+ goto end_mutex;
+
+ ret = clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]);
+ if (ret) {
+ clk_disable_unprepare(fpc->clk[fpc->period.clk_select]);
+ goto end_mutex;
+ }
- regmap_read(fpc->regmap, FTM_OUTMASK, &val);
- if ((val & 0xFF) == 0xFF)
- fpc->period_ns = 0;
+ regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm),
+ 0);
+ }
+end_mutex:
mutex_unlock(&fpc->lock);
+ return ret;
}
static const struct pwm_ops fsl_pwm_ops = {
.request = fsl_pwm_request,
.free = fsl_pwm_free,
- .config = fsl_pwm_config,
- .set_polarity = fsl_pwm_set_polarity,
- .enable = fsl_pwm_enable,
- .disable = fsl_pwm_disable,
+ .apply = fsl_pwm_apply,
.owner = THIS_MODULE,
};
@@ -357,6 +382,8 @@ static int fsl_pwm_init(struct fsl_pwm_chip *fpc)
static bool fsl_pwm_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case FTM_FMS:
+ case FTM_MODE:
case FTM_CNT:
return true;
}
@@ -474,7 +501,7 @@ static int fsl_pwm_suspend(struct device *dev)
continue;
clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
- clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
+ clk_disable_unprepare(fpc->clk[fpc->period.clk_select]);
}
return 0;
@@ -496,7 +523,7 @@ static int fsl_pwm_resume(struct device *dev)
if (!pwm_is_enabled(pwm))
continue;
- clk_prepare_enable(fpc->clk[fpc->cnt_select]);
+ clk_prepare_enable(fpc->clk[fpc->period.clk_select]);
clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]);
}
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 88a51a40e695..f901e8a0d33d 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -63,7 +63,15 @@ static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
uint32_t ctrl = jz4740_timer_get_ctrl(pwm->hwpwm);
- /* Disable PWM output.
+ /*
+ * Set duty > period. This trick allows the TCU channels in TCU2 mode to
+ * properly return to their init level.
+ */
+ jz4740_timer_set_duty(pwm->hwpwm, 0xffff);
+ jz4740_timer_set_period(pwm->hwpwm, 0x0);
+
+ /*
+ * Disable PWM output.
* In TCU2 mode (channel 1/2 on JZ4750+), this must be done before the
* counter is stopped, while in TCU1 mode the order does not matter.
*/
@@ -74,17 +82,16 @@ static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
jz4740_timer_disable(pwm->hwpwm);
}
-static int jz4740_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct jz4740_pwm_chip *jz4740 = to_jz4740(pwm->chip);
unsigned long long tmp;
unsigned long period, duty;
unsigned int prescaler = 0;
uint16_t ctrl;
- bool is_enabled;
- tmp = (unsigned long long)clk_get_rate(jz4740->clk) * period_ns;
+ tmp = (unsigned long long)clk_get_rate(jz4740->clk) * state->period;
do_div(tmp, 1000000000);
period = tmp;
@@ -96,16 +103,14 @@ static int jz4740_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (prescaler == 6)
return -EINVAL;
- tmp = (unsigned long long)period * duty_ns;
- do_div(tmp, period_ns);
+ tmp = (unsigned long long)period * state->duty_cycle;
+ do_div(tmp, state->period);
duty = period - tmp;
if (duty >= period)
duty = period - 1;
- is_enabled = jz4740_timer_is_enabled(pwm->hwpwm);
- if (is_enabled)
- jz4740_pwm_disable(chip, pwm);
+ jz4740_pwm_disable(chip, pwm);
jz4740_timer_set_count(pwm->hwpwm, 0);
jz4740_timer_set_duty(pwm->hwpwm, duty);
@@ -116,18 +121,7 @@ static int jz4740_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
- if (is_enabled)
- jz4740_pwm_enable(chip, pwm);
-
- return 0;
-}
-
-static int jz4740_pwm_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm, enum pwm_polarity polarity)
-{
- uint32_t ctrl = jz4740_timer_get_ctrl(pwm->pwm);
-
- switch (polarity) {
+ switch (state->polarity) {
case PWM_POLARITY_NORMAL:
ctrl &= ~JZ_TIMER_CTRL_PWM_ACTIVE_LOW;
break;
@@ -137,16 +131,17 @@ static int jz4740_pwm_set_polarity(struct pwm_chip *chip,
}
jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
+
+ if (state->enabled)
+ jz4740_pwm_enable(chip, pwm);
+
return 0;
}
static const struct pwm_ops jz4740_pwm_ops = {
.request = jz4740_pwm_request,
.free = jz4740_pwm_free,
- .config = jz4740_pwm_config,
- .set_polarity = jz4740_pwm_set_polarity,
- .enable = jz4740_pwm_enable,
- .disable = jz4740_pwm_disable,
+ .apply = jz4740_pwm_apply,
.owner = THIS_MODULE,
};
@@ -184,8 +179,6 @@ static int jz4740_pwm_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id jz4740_pwm_dt_ids[] = {
{ .compatible = "ingenic,jz4740-pwm", },
- { .compatible = "ingenic,jz4770-pwm", },
- { .compatible = "ingenic,jz4780-pwm", },
{},
};
MODULE_DEVICE_TABLE(of, jz4740_pwm_dt_ids);
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index fb5a369b1a8d..3cbff5cbb789 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -1,65 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
+ * PWM controller driver for Amlogic Meson SoCs.
*
- * GPL LICENSE SUMMARY
+ * This PWM is only a set of Gates, Dividers and Counters:
+ * PWM output is achieved by calculating a clock that permits calculating
+ * two periods (low and high). The counter then has to be set to switch after
+ * N cycles for the first half period.
+ * The hardware has no "polarity" setting. This driver reverses the period
+ * cycles (the low length is inverted with the high length) for
+ * PWM_POLARITY_INVERSED. This means that .get_state cannot read the polarity
+ * from the hardware.
+ * Setting the duty cycle will disable and re-enable the PWM output.
+ * Disabling the PWM stops the output immediately (without waiting for the
+ * current period to complete first).
*
- * Copyright (c) 2016 BayLibre, SAS.
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2014 Amlogic, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * BSD LICENSE
+ * The public S912 (GXM) datasheet contains some documentation for this PWM
+ * controller starting on page 543:
+ * https://dl.khadas.com/Hardware/VIM2/Datasheet/S912_Datasheet_V0.220170314publicversion-Wesion.pdf
+ * An updated version of this IP block is found in S922X (G12B) SoCs. The
+ * datasheet contains the description for this IP block revision starting at
+ * page 1084:
+ * https://dn.odroid.com/S922X/ODROID-N2/Datasheet/S922X_Public_Datasheet_V0.2.pdf
*
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
* Copyright (C) 2014 Amlogic, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -70,7 +45,8 @@
#define REG_PWM_A 0x0
#define REG_PWM_B 0x4
-#define PWM_HIGH_SHIFT 16
+#define PWM_LOW_MASK GENMASK(15, 0)
+#define PWM_HIGH_MASK GENMASK(31, 16)
#define REG_MISC_AB 0x8
#define MISC_B_CLK_EN BIT(23)
@@ -80,13 +56,33 @@
#define MISC_A_CLK_DIV_SHIFT 8
#define MISC_B_CLK_SEL_SHIFT 6
#define MISC_A_CLK_SEL_SHIFT 4
-#define MISC_CLK_SEL_WIDTH 2
+#define MISC_CLK_SEL_MASK 0x3
#define MISC_B_EN BIT(1)
#define MISC_A_EN BIT(0)
-static const unsigned int mux_reg_shifts[] = {
- MISC_A_CLK_SEL_SHIFT,
- MISC_B_CLK_SEL_SHIFT
+#define MESON_NUM_PWMS 2
+
+static struct meson_pwm_channel_data {
+ u8 reg_offset;
+ u8 clk_sel_shift;
+ u8 clk_div_shift;
+ u32 clk_en_mask;
+ u32 pwm_en_mask;
+} meson_pwm_per_channel_data[MESON_NUM_PWMS] = {
+ {
+ .reg_offset = REG_PWM_A,
+ .clk_sel_shift = MISC_A_CLK_SEL_SHIFT,
+ .clk_div_shift = MISC_A_CLK_DIV_SHIFT,
+ .clk_en_mask = MISC_A_CLK_EN,
+ .pwm_en_mask = MISC_A_EN,
+ },
+ {
+ .reg_offset = REG_PWM_B,
+ .clk_sel_shift = MISC_B_CLK_SEL_SHIFT,
+ .clk_div_shift = MISC_B_CLK_DIV_SHIFT,
+ .clk_en_mask = MISC_B_CLK_EN,
+ .pwm_en_mask = MISC_B_EN,
+ }
};
struct meson_pwm_channel {
@@ -94,8 +90,6 @@ struct meson_pwm_channel {
unsigned int lo;
u8 pre_div;
- struct pwm_state state;
-
struct clk *clk_parent;
struct clk_mux mux;
struct clk *clk;
@@ -109,8 +103,8 @@ struct meson_pwm_data {
struct meson_pwm {
struct pwm_chip chip;
const struct meson_pwm_data *data;
+ struct meson_pwm_channel channels[MESON_NUM_PWMS];
void __iomem *base;
- u8 inverter_mask;
/*
* Protects register (write) access to the REG_MISC_AB register
* that is shared between the two PWMs.
@@ -125,12 +119,16 @@ static inline struct meson_pwm *to_meson_pwm(struct pwm_chip *chip)
static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+ struct meson_pwm *meson = to_meson_pwm(chip);
+ struct meson_pwm_channel *channel;
struct device *dev = chip->dev;
int err;
- if (!channel)
- return -ENODEV;
+ channel = pwm_get_chip_data(pwm);
+ if (channel)
+ return 0;
+
+ channel = &meson->channels[pwm->hwpwm];
if (channel->clk_parent) {
err = clk_set_parent(channel->clk, channel->clk_parent);
@@ -149,9 +147,7 @@ static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
return err;
}
- chip->ops->get_state(chip, pwm, &channel->state);
-
- return 0;
+ return pwm_set_chip_data(pwm, channel);
}
static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -162,20 +158,18 @@ static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
clk_disable_unprepare(channel->clk);
}
-static int meson_pwm_calc(struct meson_pwm *meson,
- struct meson_pwm_channel *channel, unsigned int id,
- unsigned int duty, unsigned int period)
+static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
+ struct pwm_state *state)
{
- unsigned int pre_div, cnt, duty_cnt;
+ struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+ unsigned int duty, period, pre_div, cnt, duty_cnt;
unsigned long fin_freq = -1;
- u64 fin_ps;
- if (~(meson->inverter_mask >> id) & 0x1)
- duty = period - duty;
+ duty = state->duty_cycle;
+ period = state->period;
- if (period == channel->state.period &&
- duty == channel->state.duty_cycle)
- return 0;
+ if (state->polarity == PWM_POLARITY_INVERSED)
+ duty = period - duty;
fin_freq = clk_get_rate(channel->clk);
if (fin_freq == 0) {
@@ -184,24 +178,19 @@ static int meson_pwm_calc(struct meson_pwm *meson,
}
dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq);
- fin_ps = (u64)NSEC_PER_SEC * 1000;
- do_div(fin_ps, fin_freq);
-
- /* Calc pre_div with the period */
- for (pre_div = 0; pre_div <= MISC_CLK_DIV_MASK; pre_div++) {
- cnt = DIV_ROUND_CLOSEST_ULL((u64)period * 1000,
- fin_ps * (pre_div + 1));
- dev_dbg(meson->chip.dev, "fin_ps=%llu pre_div=%u cnt=%u\n",
- fin_ps, pre_div, cnt);
- if (cnt <= 0xffff)
- break;
- }
+ pre_div = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * 0xffffLL);
if (pre_div > MISC_CLK_DIV_MASK) {
dev_err(meson->chip.dev, "unable to get period pre_div\n");
return -EINVAL;
}
+ cnt = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * (pre_div + 1));
+ if (cnt > 0xffff) {
+ dev_err(meson->chip.dev, "unable to get period cnt\n");
+ return -EINVAL;
+ }
+
dev_dbg(meson->chip.dev, "period=%u pre_div=%u cnt=%u\n", period,
pre_div, cnt);
@@ -215,8 +204,8 @@ static int meson_pwm_calc(struct meson_pwm *meson,
channel->lo = cnt;
} else {
/* Then check is we can have the duty with the same pre_div */
- duty_cnt = DIV_ROUND_CLOSEST_ULL((u64)duty * 1000,
- fin_ps * (pre_div + 1));
+ duty_cnt = div64_u64(fin_freq * (u64)duty,
+ NSEC_PER_SEC * (pre_div + 1));
if (duty_cnt > 0xffff) {
dev_err(meson->chip.dev, "unable to get duty cycle\n");
return -EINVAL;
@@ -233,73 +222,43 @@ static int meson_pwm_calc(struct meson_pwm *meson,
return 0;
}
-static void meson_pwm_enable(struct meson_pwm *meson,
- struct meson_pwm_channel *channel,
- unsigned int id)
+static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
{
- u32 value, clk_shift, clk_enable, enable;
- unsigned int offset;
+ struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+ struct meson_pwm_channel_data *channel_data;
unsigned long flags;
+ u32 value;
- switch (id) {
- case 0:
- clk_shift = MISC_A_CLK_DIV_SHIFT;
- clk_enable = MISC_A_CLK_EN;
- enable = MISC_A_EN;
- offset = REG_PWM_A;
- break;
-
- case 1:
- clk_shift = MISC_B_CLK_DIV_SHIFT;
- clk_enable = MISC_B_CLK_EN;
- enable = MISC_B_EN;
- offset = REG_PWM_B;
- break;
-
- default:
- return;
- }
+ channel_data = &meson_pwm_per_channel_data[pwm->hwpwm];
spin_lock_irqsave(&meson->lock, flags);
value = readl(meson->base + REG_MISC_AB);
- value &= ~(MISC_CLK_DIV_MASK << clk_shift);
- value |= channel->pre_div << clk_shift;
- value |= clk_enable;
+ value &= ~(MISC_CLK_DIV_MASK << channel_data->clk_div_shift);
+ value |= channel->pre_div << channel_data->clk_div_shift;
+ value |= channel_data->clk_en_mask;
writel(value, meson->base + REG_MISC_AB);
- value = (channel->hi << PWM_HIGH_SHIFT) | channel->lo;
- writel(value, meson->base + offset);
+ value = FIELD_PREP(PWM_HIGH_MASK, channel->hi) |
+ FIELD_PREP(PWM_LOW_MASK, channel->lo);
+ writel(value, meson->base + channel_data->reg_offset);
value = readl(meson->base + REG_MISC_AB);
- value |= enable;
+ value |= channel_data->pwm_en_mask;
writel(value, meson->base + REG_MISC_AB);
spin_unlock_irqrestore(&meson->lock, flags);
}
-static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
+static void meson_pwm_disable(struct meson_pwm *meson, struct pwm_device *pwm)
{
- u32 value, enable;
unsigned long flags;
-
- switch (id) {
- case 0:
- enable = MISC_A_EN;
- break;
-
- case 1:
- enable = MISC_B_EN;
- break;
-
- default:
- return;
- }
+ u32 value;
spin_lock_irqsave(&meson->lock, flags);
value = readl(meson->base + REG_MISC_AB);
- value &= ~enable;
+ value &= ~meson_pwm_per_channel_data[pwm->hwpwm].pwm_en_mask;
writel(value, meson->base + REG_MISC_AB);
spin_unlock_irqrestore(&meson->lock, flags);
@@ -316,64 +275,97 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return -EINVAL;
if (!state->enabled) {
- meson_pwm_disable(meson, pwm->hwpwm);
- channel->state.enabled = false;
-
- return 0;
- }
-
- if (state->period != channel->state.period ||
- state->duty_cycle != channel->state.duty_cycle ||
- state->polarity != channel->state.polarity) {
- if (state->polarity != channel->state.polarity) {
- if (state->polarity == PWM_POLARITY_NORMAL)
- meson->inverter_mask |= BIT(pwm->hwpwm);
- else
- meson->inverter_mask &= ~BIT(pwm->hwpwm);
+ if (state->polarity == PWM_POLARITY_INVERSED) {
+ /*
+ * This IP block revision doesn't have an "always high"
+ * setting which we can use for "inverted disabled".
+ * Instead we achieve this using the same settings
+ * that we use a pre_div of 0 (to get the shortest
+ * possible duration for one "count") and
+ * "period == duty_cycle". This results in a signal
+ * which is LOW for one "count", while being HIGH for
+ * the rest of the (so the signal is HIGH for slightly
+ * less than 100% of the period, but this is the best
+ * we can achieve).
+ */
+ channel->pre_div = 0;
+ channel->hi = ~0;
+ channel->lo = 0;
+
+ meson_pwm_enable(meson, pwm);
+ } else {
+ meson_pwm_disable(meson, pwm);
}
-
- err = meson_pwm_calc(meson, channel, pwm->hwpwm,
- state->duty_cycle, state->period);
+ } else {
+ err = meson_pwm_calc(meson, pwm, state);
if (err < 0)
return err;
- channel->state.polarity = state->polarity;
- channel->state.period = state->period;
- channel->state.duty_cycle = state->duty_cycle;
- }
-
- if (state->enabled && !channel->state.enabled) {
- meson_pwm_enable(meson, channel, pwm->hwpwm);
- channel->state.enabled = true;
+ meson_pwm_enable(meson, pwm);
}
return 0;
}
+static unsigned int meson_pwm_cnt_to_ns(struct pwm_chip *chip,
+ struct pwm_device *pwm, u32 cnt)
+{
+ struct meson_pwm *meson = to_meson_pwm(chip);
+ struct meson_pwm_channel *channel;
+ unsigned long fin_freq;
+ u32 fin_ns;
+
+ /* to_meson_pwm() can only be used after .get_state() is called */
+ channel = &meson->channels[pwm->hwpwm];
+
+ fin_freq = clk_get_rate(channel->clk);
+ if (fin_freq == 0)
+ return 0;
+
+ fin_ns = div_u64(NSEC_PER_SEC, fin_freq);
+
+ return cnt * fin_ns * (channel->pre_div + 1);
+}
+
static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct meson_pwm *meson = to_meson_pwm(chip);
- u32 value, mask;
+ struct meson_pwm_channel_data *channel_data;
+ struct meson_pwm_channel *channel;
+ u32 value, tmp;
if (!state)
return;
- switch (pwm->hwpwm) {
- case 0:
- mask = MISC_A_EN;
- break;
+ channel = &meson->channels[pwm->hwpwm];
+ channel_data = &meson_pwm_per_channel_data[pwm->hwpwm];
- case 1:
- mask = MISC_B_EN;
- break;
+ value = readl(meson->base + REG_MISC_AB);
- default:
- return;
- }
+ tmp = channel_data->pwm_en_mask | channel_data->clk_en_mask;
+ state->enabled = (value & tmp) == tmp;
- value = readl(meson->base + REG_MISC_AB);
- state->enabled = (value & mask) != 0;
+ tmp = value >> channel_data->clk_div_shift;
+ channel->pre_div = FIELD_GET(MISC_CLK_DIV_MASK, tmp);
+
+ value = readl(meson->base + channel_data->reg_offset);
+
+ channel->lo = FIELD_GET(PWM_LOW_MASK, value);
+ channel->hi = FIELD_GET(PWM_HIGH_MASK, value);
+
+ if (channel->lo == 0) {
+ state->period = meson_pwm_cnt_to_ns(chip, pwm, channel->hi);
+ state->duty_cycle = state->period;
+ } else if (channel->lo >= channel->hi) {
+ state->period = meson_pwm_cnt_to_ns(chip, pwm,
+ channel->lo + channel->hi);
+ state->duty_cycle = meson_pwm_cnt_to_ns(chip, pwm,
+ channel->hi);
+ } else {
+ state->period = 0;
+ state->duty_cycle = 0;
+ }
}
static const struct pwm_ops meson_pwm_ops = {
@@ -433,8 +425,17 @@ static const struct meson_pwm_data pwm_axg_ao_data = {
.num_parents = ARRAY_SIZE(pwm_axg_ao_parent_names),
};
+static const char * const pwm_g12a_ao_ab_parent_names[] = {
+ "xtal", "aoclk81", "fclk_div4", "fclk_div5"
+};
+
+static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
+ .parent_names = pwm_g12a_ao_ab_parent_names,
+ .num_parents = ARRAY_SIZE(pwm_g12a_ao_ab_parent_names),
+};
+
static const char * const pwm_g12a_ao_cd_parent_names[] = {
- "aoclk81", "xtal",
+ "xtal", "aoclk81",
};
static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
@@ -478,7 +479,7 @@ static const struct of_device_id meson_pwm_matches[] = {
},
{
.compatible = "amlogic,meson-g12a-ao-pwm-ab",
- .data = &pwm_axg_ao_data
+ .data = &pwm_g12a_ao_ab_data
},
{
.compatible = "amlogic,meson-g12a-ao-pwm-cd",
@@ -488,8 +489,7 @@ static const struct of_device_id meson_pwm_matches[] = {
};
MODULE_DEVICE_TABLE(of, meson_pwm_matches);
-static int meson_pwm_init_channels(struct meson_pwm *meson,
- struct meson_pwm_channel *channels)
+static int meson_pwm_init_channels(struct meson_pwm *meson)
{
struct device *dev = meson->chip.dev;
struct clk_init_data init;
@@ -498,7 +498,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
int err;
for (i = 0; i < meson->chip.npwm; i++) {
- struct meson_pwm_channel *channel = &channels[i];
+ struct meson_pwm_channel *channel = &meson->channels[i];
snprintf(name, sizeof(name), "%s#mux%u", dev_name(dev), i);
@@ -509,8 +509,9 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
init.num_parents = meson->data->num_parents;
channel->mux.reg = meson->base + REG_MISC_AB;
- channel->mux.shift = mux_reg_shifts[i];
- channel->mux.mask = BIT(MISC_CLK_SEL_WIDTH) - 1;
+ channel->mux.shift =
+ meson_pwm_per_channel_data[i].clk_sel_shift;
+ channel->mux.mask = MISC_CLK_SEL_MASK;
channel->mux.flags = 0;
channel->mux.lock = &meson->lock;
channel->mux.table = NULL;
@@ -525,31 +526,16 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
snprintf(name, sizeof(name), "clkin%u", i);
- channel->clk_parent = devm_clk_get(dev, name);
- if (IS_ERR(channel->clk_parent)) {
- err = PTR_ERR(channel->clk_parent);
- if (err == -EPROBE_DEFER)
- return err;
-
- channel->clk_parent = NULL;
- }
+ channel->clk_parent = devm_clk_get_optional(dev, name);
+ if (IS_ERR(channel->clk_parent))
+ return PTR_ERR(channel->clk_parent);
}
return 0;
}
-static void meson_pwm_add_channels(struct meson_pwm *meson,
- struct meson_pwm_channel *channels)
-{
- unsigned int i;
-
- for (i = 0; i < meson->chip.npwm; i++)
- pwm_set_chip_data(&meson->chip.pwms[i], &channels[i]);
-}
-
static int meson_pwm_probe(struct platform_device *pdev)
{
- struct meson_pwm_channel *channels;
struct meson_pwm *meson;
struct resource *regs;
int err;
@@ -567,19 +553,13 @@ static int meson_pwm_probe(struct platform_device *pdev)
meson->chip.dev = &pdev->dev;
meson->chip.ops = &meson_pwm_ops;
meson->chip.base = -1;
- meson->chip.npwm = 2;
+ meson->chip.npwm = MESON_NUM_PWMS;
meson->chip.of_xlate = of_pwm_xlate_with_flags;
meson->chip.of_pwm_n_cells = 3;
meson->data = of_device_get_match_data(&pdev->dev);
- meson->inverter_mask = BIT(meson->chip.npwm) - 1;
- channels = devm_kcalloc(&pdev->dev, meson->chip.npwm,
- sizeof(*channels), GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- err = meson_pwm_init_channels(meson, channels);
+ err = meson_pwm_init_channels(meson);
if (err < 0)
return err;
@@ -589,8 +569,6 @@ static int meson_pwm_probe(struct platform_device *pdev)
return err;
}
- meson_pwm_add_channels(meson, channels);
-
platform_set_drvdata(pdev, meson);
return 0;
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index cfe7dd1b448e..5b2b8ecc354c 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -254,50 +254,11 @@ static const struct of_device_id rcar_pwm_of_table[] = {
};
MODULE_DEVICE_TABLE(of, rcar_pwm_of_table);
-#ifdef CONFIG_PM_SLEEP
-static struct pwm_device *rcar_pwm_dev_to_pwm_dev(struct device *dev)
-{
- struct rcar_pwm_chip *rcar_pwm = dev_get_drvdata(dev);
- struct pwm_chip *chip = &rcar_pwm->chip;
-
- return &chip->pwms[0];
-}
-
-static int rcar_pwm_suspend(struct device *dev)
-{
- struct pwm_device *pwm = rcar_pwm_dev_to_pwm_dev(dev);
-
- if (!test_bit(PWMF_REQUESTED, &pwm->flags))
- return 0;
-
- pm_runtime_put(dev);
-
- return 0;
-}
-
-static int rcar_pwm_resume(struct device *dev)
-{
- struct pwm_device *pwm = rcar_pwm_dev_to_pwm_dev(dev);
- struct pwm_state state;
-
- if (!test_bit(PWMF_REQUESTED, &pwm->flags))
- return 0;
-
- pm_runtime_get_sync(dev);
-
- pwm_get_state(pwm, &state);
-
- return rcar_pwm_apply(pwm->chip, pwm, &state);
-}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(rcar_pwm_pm_ops, rcar_pwm_suspend, rcar_pwm_resume);
-
static struct platform_driver rcar_pwm_driver = {
.probe = rcar_pwm_probe,
.remove = rcar_pwm_remove,
.driver = {
.name = "pwm-rcar",
- .pm = &rcar_pwm_pm_ops,
.of_match_table = of_match_ptr(rcar_pwm_of_table),
}
};
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
new file mode 100644
index 000000000000..a7c107f19e66
--- /dev/null
+++ b/drivers/pwm/pwm-sifive.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2018 SiFive
+ * For SiFive's PWM IP block documentation please refer Chapter 14 of
+ * Reference Manual : https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+ *
+ * Limitations:
+ * - When changing both duty cycle and period, we cannot prevent in
+ * software that the output might produce a period with mixed
+ * settings (new period length and old duty cycle).
+ * - The hardware cannot generate a 100% duty cycle.
+ * - The hardware generates only inverted output.
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+
+/* Register offsets */
+#define PWM_SIFIVE_PWMCFG 0x0
+#define PWM_SIFIVE_PWMCOUNT 0x8
+#define PWM_SIFIVE_PWMS 0x10
+#define PWM_SIFIVE_PWMCMP0 0x20
+
+/* PWMCFG fields */
+#define PWM_SIFIVE_PWMCFG_SCALE GENMASK(3, 0)
+#define PWM_SIFIVE_PWMCFG_STICKY BIT(8)
+#define PWM_SIFIVE_PWMCFG_ZERO_CMP BIT(9)
+#define PWM_SIFIVE_PWMCFG_DEGLITCH BIT(10)
+#define PWM_SIFIVE_PWMCFG_EN_ALWAYS BIT(12)
+#define PWM_SIFIVE_PWMCFG_EN_ONCE BIT(13)
+#define PWM_SIFIVE_PWMCFG_CENTER BIT(16)
+#define PWM_SIFIVE_PWMCFG_GANG BIT(24)
+#define PWM_SIFIVE_PWMCFG_IP BIT(28)
+
+/* PWM_SIFIVE_SIZE_PWMCMP is used to calculate offset for pwmcmpX registers */
+#define PWM_SIFIVE_SIZE_PWMCMP 4
+#define PWM_SIFIVE_CMPWIDTH 16
+#define PWM_SIFIVE_DEFAULT_PERIOD 10000000
+
+struct pwm_sifive_ddata {
+ struct pwm_chip chip;
+ struct mutex lock; /* lock to protect user_count */
+ struct notifier_block notifier;
+ struct clk *clk;
+ void __iomem *regs;
+ unsigned int real_period;
+ unsigned int approx_period;
+ int user_count;
+};
+
+static inline
+struct pwm_sifive_ddata *pwm_sifive_chip_to_ddata(struct pwm_chip *c)
+{
+ return container_of(c, struct pwm_sifive_ddata, chip);
+}
+
+static int pwm_sifive_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
+
+ mutex_lock(&ddata->lock);
+ ddata->user_count++;
+ mutex_unlock(&ddata->lock);
+
+ return 0;
+}
+
+static void pwm_sifive_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
+
+ mutex_lock(&ddata->lock);
+ ddata->user_count--;
+ mutex_unlock(&ddata->lock);
+}
+
+static void pwm_sifive_update_clock(struct pwm_sifive_ddata *ddata,
+ unsigned long rate)
+{
+ unsigned long long num;
+ unsigned long scale_pow;
+ int scale;
+ u32 val;
+ /*
+ * The PWM unit is used with pwmzerocmp=0, so the only way to modify the
+ * period length is using pwmscale which provides the number of bits the
+ * counter is shifted before being feed to the comparators. A period
+ * lasts (1 << (PWM_SIFIVE_CMPWIDTH + pwmscale)) clock ticks.
+ * (1 << (PWM_SIFIVE_CMPWIDTH + scale)) * 10^9/rate = period
+ */
+ scale_pow = div64_ul(ddata->approx_period * (u64)rate, NSEC_PER_SEC);
+ scale = clamp(ilog2(scale_pow) - PWM_SIFIVE_CMPWIDTH, 0, 0xf);
+
+ val = PWM_SIFIVE_PWMCFG_EN_ALWAYS |
+ FIELD_PREP(PWM_SIFIVE_PWMCFG_SCALE, scale);
+ writel(val, ddata->regs + PWM_SIFIVE_PWMCFG);
+
+ /* As scale <= 15 the shift operation cannot overflow. */
+ num = (unsigned long long)NSEC_PER_SEC << (PWM_SIFIVE_CMPWIDTH + scale);
+ ddata->real_period = div64_ul(num, rate);
+ dev_dbg(ddata->chip.dev,
+ "New real_period = %u ns\n", ddata->real_period);
+}
+
+static void pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
+ u32 duty, val;
+
+ duty = readl(ddata->regs + PWM_SIFIVE_PWMCMP0 +
+ pwm->hwpwm * PWM_SIFIVE_SIZE_PWMCMP);
+
+ state->enabled = duty > 0;
+
+ val = readl(ddata->regs + PWM_SIFIVE_PWMCFG);
+ if (!(val & PWM_SIFIVE_PWMCFG_EN_ALWAYS))
+ state->enabled = false;
+
+ state->period = ddata->real_period;
+ state->duty_cycle =
+ (u64)duty * ddata->real_period >> PWM_SIFIVE_CMPWIDTH;
+ state->polarity = PWM_POLARITY_INVERSED;
+}
+
+static int pwm_sifive_enable(struct pwm_chip *chip, bool enable)
+{
+ struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
+ int ret;
+
+ if (enable) {
+ ret = clk_enable(ddata->clk);
+ if (ret) {
+ dev_err(ddata->chip.dev, "Enable clk failed\n");
+ return ret;
+ }
+ }
+
+ if (!enable)
+ clk_disable(ddata->clk);
+
+ return 0;
+}
+
+static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
+ struct pwm_state cur_state;
+ unsigned int duty_cycle;
+ unsigned long long num;
+ bool enabled;
+ int ret = 0;
+ u32 frac;
+
+ if (state->polarity != PWM_POLARITY_INVERSED)
+ return -EINVAL;
+
+ ret = clk_enable(ddata->clk);
+ if (ret) {
+ dev_err(ddata->chip.dev, "Enable clk failed\n");
+ return ret;
+ }
+
+ mutex_lock(&ddata->lock);
+ cur_state = pwm->state;
+ enabled = cur_state.enabled;
+
+ duty_cycle = state->duty_cycle;
+ if (!state->enabled)
+ duty_cycle = 0;
+
+ /*
+ * The problem of output producing mixed setting as mentioned at top,
+ * occurs here. To minimize the window for this problem, we are
+ * calculating the register values first and then writing them
+ * consecutively
+ */
+ num = (u64)duty_cycle * (1U << PWM_SIFIVE_CMPWIDTH);
+ frac = DIV_ROUND_CLOSEST_ULL(num, state->period);
+ /* The hardware cannot generate a 100% duty cycle */
+ frac = min(frac, (1U << PWM_SIFIVE_CMPWIDTH) - 1);
+
+ if (state->period != ddata->approx_period) {
+ if (ddata->user_count != 1) {
+ ret = -EBUSY;
+ goto exit;
+ }
+ ddata->approx_period = state->period;
+ pwm_sifive_update_clock(ddata, clk_get_rate(ddata->clk));
+ }
+
+ writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP0 +
+ pwm->hwpwm * PWM_SIFIVE_SIZE_PWMCMP);
+
+ if (state->enabled != enabled)
+ pwm_sifive_enable(chip, state->enabled);
+
+exit:
+ clk_disable(ddata->clk);
+ mutex_unlock(&ddata->lock);
+ return ret;
+}
+
+static const struct pwm_ops pwm_sifive_ops = {
+ .request = pwm_sifive_request,
+ .free = pwm_sifive_free,
+ .get_state = pwm_sifive_get_state,
+ .apply = pwm_sifive_apply,
+ .owner = THIS_MODULE,
+};
+
+static int pwm_sifive_clock_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct pwm_sifive_ddata *ddata =
+ container_of(nb, struct pwm_sifive_ddata, notifier);
+
+ if (event == POST_RATE_CHANGE)
+ pwm_sifive_update_clock(ddata, ndata->new_rate);
+
+ return NOTIFY_OK;
+}
+
+static int pwm_sifive_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pwm_sifive_ddata *ddata;
+ struct pwm_chip *chip;
+ struct resource *res;
+ int ret;
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ mutex_init(&ddata->lock);
+ chip = &ddata->chip;
+ chip->dev = dev;
+ chip->ops = &pwm_sifive_ops;
+ chip->of_xlate = of_pwm_xlate_with_flags;
+ chip->of_pwm_n_cells = 3;
+ chip->base = -1;
+ chip->npwm = 4;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ddata->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ddata->regs)) {
+ dev_err(dev, "Unable to map IO resources\n");
+ return PTR_ERR(ddata->regs);
+ }
+
+ ddata->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ddata->clk)) {
+ if (PTR_ERR(ddata->clk) != -EPROBE_DEFER)
+ dev_err(dev, "Unable to find controller clock\n");
+ return PTR_ERR(ddata->clk);
+ }
+
+ ret = clk_prepare_enable(ddata->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock for pwm: %d\n", ret);
+ return ret;
+ }
+
+ /* Watch for changes to underlying clock frequency */
+ ddata->notifier.notifier_call = pwm_sifive_clock_notifier;
+ ret = clk_notifier_register(ddata->clk, &ddata->notifier);
+ if (ret) {
+ dev_err(dev, "failed to register clock notifier: %d\n", ret);
+ goto disable_clk;
+ }
+
+ ret = pwmchip_add(chip);
+ if (ret < 0) {
+ dev_err(dev, "cannot register PWM: %d\n", ret);
+ goto unregister_clk;
+ }
+
+ platform_set_drvdata(pdev, ddata);
+ dev_dbg(dev, "SiFive PWM chip registered %d PWMs\n", chip->npwm);
+
+ return 0;
+
+unregister_clk:
+ clk_notifier_unregister(ddata->clk, &ddata->notifier);
+disable_clk:
+ clk_disable_unprepare(ddata->clk);
+
+ return ret;
+}
+
+static int pwm_sifive_remove(struct platform_device *dev)
+{
+ struct pwm_sifive_ddata *ddata = platform_get_drvdata(dev);
+ bool is_enabled = false;
+ struct pwm_device *pwm;
+ int ret, ch;
+
+ for (ch = 0; ch < ddata->chip.npwm; ch++) {
+ pwm = &ddata->chip.pwms[ch];
+ if (pwm->state.enabled) {
+ is_enabled = true;
+ break;
+ }
+ }
+ if (is_enabled)
+ clk_disable(ddata->clk);
+
+ clk_disable_unprepare(ddata->clk);
+ ret = pwmchip_remove(&ddata->chip);
+ clk_notifier_unregister(ddata->clk, &ddata->notifier);
+
+ return ret;
+}
+
+static const struct of_device_id pwm_sifive_of_match[] = {
+ { .compatible = "sifive,pwm0" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pwm_sifive_of_match);
+
+static struct platform_driver pwm_sifive_driver = {
+ .probe = pwm_sifive_probe,
+ .remove = pwm_sifive_remove,
+ .driver = {
+ .name = "pwm-sifive",
+ .of_match_table = pwm_sifive_of_match,
+ },
+};
+module_platform_driver(pwm_sifive_driver);
+
+MODULE_DESCRIPTION("SiFive PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 0059b24cfdc3..2211a642066d 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -13,6 +13,7 @@
#include <linux/mfd/stm32-lptimer.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
@@ -223,6 +224,29 @@ static int stm32_pwm_lp_remove(struct platform_device *pdev)
return pwmchip_remove(&priv->chip);
}
+static int __maybe_unused stm32_pwm_lp_suspend(struct device *dev)
+{
+ struct stm32_pwm_lp *priv = dev_get_drvdata(dev);
+ struct pwm_state state;
+
+ pwm_get_state(&priv->chip.pwms[0], &state);
+ if (state.enabled) {
+ dev_err(dev, "The consumer didn't stop us (%s)\n",
+ priv->chip.pwms[0].label);
+ return -EBUSY;
+ }
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int __maybe_unused stm32_pwm_lp_resume(struct device *dev)
+{
+ return pinctrl_pm_select_default_state(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_pwm_lp_pm_ops, stm32_pwm_lp_suspend,
+ stm32_pwm_lp_resume);
+
static const struct of_device_id stm32_pwm_lp_of_match[] = {
{ .compatible = "st,stm32-pwm-lp", },
{},
@@ -235,6 +259,7 @@ static struct platform_driver stm32_pwm_lp_driver = {
.driver = {
.name = "stm32-pwm-lp",
.of_match_table = of_match_ptr(stm32_pwm_lp_of_match),
+ .pm = &stm32_pwm_lp_pm_ops,
},
};
module_platform_driver(stm32_pwm_lp_driver);
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 4f842550fbd1..740e2dec8313 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -608,6 +608,8 @@ static int stm32_pwm_probe(struct platform_device *pdev)
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
+ priv->chip.of_xlate = of_pwm_xlate_with_flags;
+ priv->chip.of_pwm_n_cells = 3;
if (!priv->regmap || !priv->clk)
return -EINVAL;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index bf6823fe0812..2389b8669846 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -18,6 +18,7 @@ struct pwm_export {
struct device child;
struct pwm_device *pwm;
struct mutex lock;
+ struct pwm_state suspend;
};
static struct pwm_export *child_to_pwm_export(struct device *child)
@@ -372,10 +373,111 @@ static struct attribute *pwm_chip_attrs[] = {
};
ATTRIBUTE_GROUPS(pwm_chip);
+/* takes export->lock on success */
+static struct pwm_export *pwm_class_get_state(struct device *parent,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct device *child;
+ struct pwm_export *export;
+
+ if (!test_bit(PWMF_EXPORTED, &pwm->flags))
+ return NULL;
+
+ child = device_find_child(parent, pwm, pwm_unexport_match);
+ if (!child)
+ return NULL;
+
+ export = child_to_pwm_export(child);
+ put_device(child); /* for device_find_child() */
+
+ mutex_lock(&export->lock);
+ pwm_get_state(pwm, state);
+
+ return export;
+}
+
+static int pwm_class_apply_state(struct pwm_export *export,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ int ret = pwm_apply_state(pwm, state);
+
+ /* release lock taken in pwm_class_get_state */
+ mutex_unlock(&export->lock);
+
+ return ret;
+}
+
+static int pwm_class_resume_npwm(struct device *parent, unsigned int npwm)
+{
+ struct pwm_chip *chip = dev_get_drvdata(parent);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+ struct pwm_state state;
+ struct pwm_export *export;
+
+ export = pwm_class_get_state(parent, pwm, &state);
+ if (!export)
+ continue;
+
+ state.enabled = export->suspend.enabled;
+ ret = pwm_class_apply_state(export, pwm, &state);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int __maybe_unused pwm_class_suspend(struct device *parent)
+{
+ struct pwm_chip *chip = dev_get_drvdata(parent);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+ struct pwm_state state;
+ struct pwm_export *export;
+
+ export = pwm_class_get_state(parent, pwm, &state);
+ if (!export)
+ continue;
+
+ export->suspend = state;
+ state.enabled = false;
+ ret = pwm_class_apply_state(export, pwm, &state);
+ if (ret < 0) {
+ /*
+ * roll back the PWM devices that were disabled by
+ * this suspend function.
+ */
+ pwm_class_resume_npwm(parent, i);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int __maybe_unused pwm_class_resume(struct device *parent)
+{
+ struct pwm_chip *chip = dev_get_drvdata(parent);
+
+ return pwm_class_resume_npwm(parent, chip->npwm);
+}
+
+static SIMPLE_DEV_PM_OPS(pwm_class_pm_ops, pwm_class_suspend, pwm_class_resume);
+
static struct class pwm_class = {
.name = "pwm",
.owner = THIS_MODULE,
.dev_groups = pwm_chip_groups,
+ .pm = &pwm_class_pm_ops,
};
static int pwmchip_sysfs_match(struct device *parent, const void *data)
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 673f8a128397..5d545806d930 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2019 Borislav Petkov, SUSE Labs.
+ */
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
@@ -37,9 +40,9 @@
* thus emulate an an LRU-like behavior when deleting elements to free up space
* in the page.
*
- * When an element reaches it's max count of count_threshold, we try to poison
- * it by assuming that errors triggered count_threshold times in a single page
- * are excessive and that page shouldn't be used anymore. count_threshold is
+ * When an element reaches it's max count of action_threshold, we try to poison
+ * it by assuming that errors triggered action_threshold times in a single page
+ * are excessive and that page shouldn't be used anymore. action_threshold is
* initialized to COUNT_MASK which is the maximum.
*
* That error event entry causes cec_add_elem() to return !0 value and thus
@@ -122,7 +125,7 @@ static DEFINE_MUTEX(ce_mutex);
static u64 dfs_pfn;
/* Amount of errors after which we offline */
-static unsigned int count_threshold = COUNT_MASK;
+static u64 action_threshold = COUNT_MASK;
/* Each element "decays" each decay_interval which is 24hrs by default. */
#define CEC_DECAY_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */
@@ -276,11 +279,39 @@ static u64 __maybe_unused del_lru_elem(void)
return pfn;
}
+static bool sanity_check(struct ce_array *ca)
+{
+ bool ret = false;
+ u64 prev = 0;
+ int i;
+
+ for (i = 0; i < ca->n; i++) {
+ u64 this = PFN(ca->array[i]);
+
+ if (WARN(prev > this, "prev: 0x%016llx <-> this: 0x%016llx\n", prev, this))
+ ret = true;
+
+ prev = this;
+ }
+
+ if (!ret)
+ return ret;
+
+ pr_info("Sanity check dump:\n{ n: %d\n", ca->n);
+ for (i = 0; i < ca->n; i++) {
+ u64 this = PFN(ca->array[i]);
+
+ pr_info(" %03d: [%016llx|%03llx]\n", i, this, FULL_COUNT(ca->array[i]));
+ }
+ pr_info("}\n");
+
+ return ret;
+}
int cec_add_elem(u64 pfn)
{
struct ce_array *ca = &ce_arr;
- unsigned int to;
+ unsigned int to = 0;
int count, ret = 0;
/*
@@ -294,6 +325,7 @@ int cec_add_elem(u64 pfn)
ca->ces_entered++;
+ /* Array full, free the LRU slot. */
if (ca->n == MAX_ELEMS)
WARN_ON(!del_lru_elem_unlocked(ca));
@@ -306,24 +338,17 @@ int cec_add_elem(u64 pfn)
(void *)&ca->array[to],
(ca->n - to) * sizeof(u64));
- ca->array[to] = (pfn << PAGE_SHIFT) |
- (DECAY_MASK << COUNT_BITS) | 1;
-
+ ca->array[to] = pfn << PAGE_SHIFT;
ca->n++;
-
- ret = 0;
-
- goto decay;
}
- count = COUNT(ca->array[to]);
-
- if (count < count_threshold) {
- ca->array[to] |= (DECAY_MASK << COUNT_BITS);
- ca->array[to]++;
+ /* Add/refresh element generation and increment count */
+ ca->array[to] |= DECAY_MASK << COUNT_BITS;
+ ca->array[to]++;
- ret = 0;
- } else {
+ /* Check action threshold and soft-offline, if reached. */
+ count = COUNT(ca->array[to]);
+ if (count >= action_threshold) {
u64 pfn = ca->array[to] >> PAGE_SHIFT;
if (!pfn_valid(pfn)) {
@@ -338,20 +363,21 @@ int cec_add_elem(u64 pfn)
del_elem(ca, to);
/*
- * Return a >0 value to denote that we've reached the offlining
- * threshold.
+ * Return a >0 value to callers, to denote that we've reached
+ * the offlining threshold.
*/
ret = 1;
goto unlock;
}
-decay:
ca->decay_count++;
if (ca->decay_count >= CLEAN_ELEMS)
do_spring_cleaning(ca);
+ WARN_ON_ONCE(sanity_check(ca));
+
unlock:
mutex_unlock(&ce_mutex);
@@ -369,45 +395,48 @@ static int pfn_set(void *data, u64 val)
{
*(u64 *)data = val;
- return cec_add_elem(val);
+ cec_add_elem(val);
+
+ return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(pfn_ops, u64_get, pfn_set, "0x%llx\n");
static int decay_interval_set(void *data, u64 val)
{
- *(u64 *)data = val;
-
if (val < CEC_DECAY_MIN_INTERVAL)
return -EINVAL;
if (val > CEC_DECAY_MAX_INTERVAL)
return -EINVAL;
+ *(u64 *)data = val;
decay_interval = val;
cec_mod_work(decay_interval);
+
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n");
-static int count_threshold_set(void *data, u64 val)
+static int action_threshold_set(void *data, u64 val)
{
*(u64 *)data = val;
if (val > COUNT_MASK)
val = COUNT_MASK;
- count_threshold = val;
+ action_threshold = val;
return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(count_threshold_ops, u64_get, count_threshold_set, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(action_threshold_ops, u64_get, action_threshold_set, "%lld\n");
+
+static const char * const bins[] = { "00", "01", "10", "11" };
static int array_dump(struct seq_file *m, void *v)
{
struct ce_array *ca = &ce_arr;
- u64 prev = 0;
int i;
mutex_lock(&ce_mutex);
@@ -416,11 +445,8 @@ static int array_dump(struct seq_file *m, void *v)
for (i = 0; i < ca->n; i++) {
u64 this = PFN(ca->array[i]);
- seq_printf(m, " %03d: [%016llx|%03llx]\n", i, this, FULL_COUNT(ca->array[i]));
-
- WARN_ON(prev > this);
-
- prev = this;
+ seq_printf(m, " %3d: [%016llx|%s|%03llx]\n",
+ i, this, bins[DECAY(ca->array[i])], COUNT(ca->array[i]));
}
seq_printf(m, "}\n");
@@ -433,7 +459,7 @@ static int array_dump(struct seq_file *m, void *v)
seq_printf(m, "Decay interval: %lld seconds\n", decay_interval);
seq_printf(m, "Decays: %lld\n", ca->decays_done);
- seq_printf(m, "Action threshold: %d\n", count_threshold);
+ seq_printf(m, "Action threshold: %lld\n", action_threshold);
mutex_unlock(&ce_mutex);
@@ -463,18 +489,6 @@ static int __init create_debugfs_nodes(void)
return -1;
}
- pfn = debugfs_create_file("pfn", S_IRUSR | S_IWUSR, d, &dfs_pfn, &pfn_ops);
- if (!pfn) {
- pr_warn("Error creating pfn debugfs node!\n");
- goto err;
- }
-
- array = debugfs_create_file("array", S_IRUSR, d, NULL, &array_ops);
- if (!array) {
- pr_warn("Error creating array debugfs node!\n");
- goto err;
- }
-
decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d,
&decay_interval, &decay_interval_ops);
if (!decay) {
@@ -482,13 +496,27 @@ static int __init create_debugfs_nodes(void)
goto err;
}
- count = debugfs_create_file("count_threshold", S_IRUSR | S_IWUSR, d,
- &count_threshold, &count_threshold_ops);
+ count = debugfs_create_file("action_threshold", S_IRUSR | S_IWUSR, d,
+ &action_threshold, &action_threshold_ops);
if (!count) {
- pr_warn("Error creating count_threshold debugfs node!\n");
+ pr_warn("Error creating action_threshold debugfs node!\n");
+ goto err;
+ }
+
+ if (!IS_ENABLED(CONFIG_RAS_CEC_DEBUG))
+ return 0;
+
+ pfn = debugfs_create_file("pfn", S_IRUSR | S_IWUSR, d, &dfs_pfn, &pfn_ops);
+ if (!pfn) {
+ pr_warn("Error creating pfn debugfs node!\n");
goto err;
}
+ array = debugfs_create_file("array", S_IRUSR, d, NULL, &array_ops);
+ if (!array) {
+ pr_warn("Error creating array debugfs node!\n");
+ goto err;
+ }
return 0;
@@ -509,8 +537,10 @@ void __init cec_init(void)
return;
}
- if (create_debugfs_nodes())
+ if (create_debugfs_nodes()) {
+ free_page((unsigned long)ce_arr.array);
return;
+ }
INIT_DELAYED_WORK(&cec_work, cec_work_fn);
schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL);
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800-regulator.c
index 69ae25886181..69ae25886181 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800-regulator.c
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 8553bdf87c1d..7928960563e6 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -136,19 +136,20 @@ config REGULATOR_AB8500
signal AB8500 PMIC
config REGULATOR_ARIZONA_LDO1
- tristate "Wolfson Arizona class devices LDO1"
- depends on MFD_ARIZONA
+ tristate "Cirrus Madera and Wolfson Arizona class devices LDO1"
+ depends on MFD_ARIZONA || MFD_MADERA
depends on SND_SOC
help
- Support for the LDO1 regulators found on Wolfson Arizona class
- devices.
+ Support for the LDO1 regulators found on Cirrus Logic Madera codecs
+ and Wolfson Microelectronic Arizona codecs.
config REGULATOR_ARIZONA_MICSUPP
- tristate "Wolfson Arizona class devices MICSUPP"
- depends on MFD_ARIZONA
+ tristate "Cirrus Madera and Wolfson Arizona class devices MICSUPP"
+ depends on MFD_ARIZONA || MFD_MADERA
depends on SND_SOC
help
- Support for the MICSUPP regulators found on Wolfson Arizona class
+ Support for the MICSUPP regulators found on Cirrus Logic Madera codecs
+ and Wolfson Microelectronic Arizona codecs
devices.
config REGULATOR_AS3711
@@ -258,7 +259,7 @@ config REGULATOR_DA9062
config REGULATOR_DA9063
tristate "Dialog Semiconductor DA9063 regulators"
- depends on MFD_DA9063
+ depends on MFD_DA9063 && OF
help
Say y here to support the BUCKs and LDOs regulators found on
DA9063 PMICs.
@@ -364,7 +365,7 @@ config REGULATOR_LM363X
tristate "TI LM363X voltage regulators"
depends on MFD_TI_LMU
help
- This driver supports LM3631 and LM3632 voltage regulators for
+ This driver supports LM3631, LM3632 and LM36274 voltage regulators for
the LCD bias.
One boost output voltage is configurable and always on.
Other LDOs are used for the display module.
@@ -829,6 +830,26 @@ config REGULATOR_SKY81452
This driver can also be built as a module. If so, the module
will be called sky81452-regulator.
+config REGULATOR_SLG51000
+ tristate "Dialog Semiconductor SLG51000 regulators"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support for the Dialog Semiconductor SLG51000.
+ The SLG51000 is seven compact and customizable low dropout
+ regulators.
+
+config REGULATOR_STM32_BOOSTER
+ tristate "STMicroelectronics STM32 BOOSTER"
+ depends on ARCH_STM32 || COMPILE_TEST
+ help
+ This driver supports internal booster (3V3) embedded in some
+ STMicroelectronics STM32 chips. It can be used to supply ADC analog
+ input switches when vdda supply is below 2.7V.
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-booster.
+
config REGULATOR_STM32_VREFBUF
tristate "STMicroelectronics STM32 VREFBUF"
depends on ARCH_STM32 || COMPILE_TEST
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 93f53840e8f1..eef73b5a35a4 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
obj-$(CONFIG_REGULATOR_88PG86X) += 88pg86x.o
-obj-$(CONFIG_REGULATOR_88PM800) += 88pm800.o
+obj-$(CONFIG_REGULATOR_88PM800) += 88pm800-regulator.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_CPCAP) += cpcap-regulator.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
@@ -104,6 +104,8 @@ obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
+obj-$(CONFIG_REGULATOR_SLG51000) += slg51000-regulator.o
+obj-$(CONFIG_REGULATOR_STM32_BOOSTER) += stm32-booster.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o
obj-$(CONFIG_REGULATOR_STPMIC1) += stpmic1_regulator.o
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index e4bc7b1e5ccd..1a3d7b720f5e 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -25,6 +25,10 @@
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
+#include <linux/mfd/madera/core.h>
+#include <linux/mfd/madera/pdata.h>
+#include <linux/mfd/madera/registers.h>
+
struct arizona_ldo1 {
struct regulator_dev *regulator;
struct regmap *regmap;
@@ -158,6 +162,31 @@ static const struct regulator_init_data arizona_ldo1_wm5110 = {
.num_consumer_supplies = 1,
};
+static const struct regulator_desc madera_ldo1 = {
+ .name = "LDO1",
+ .supply_name = "LDOVDD",
+ .type = REGULATOR_VOLTAGE,
+ .ops = &arizona_ldo1_ops,
+
+ .vsel_reg = MADERA_LDO1_CONTROL_1,
+ .vsel_mask = MADERA_LDO1_VSEL_MASK,
+ .min_uV = 900000,
+ .uV_step = 25000,
+ .n_voltages = 13,
+ .enable_time = 3000,
+
+ .owner = THIS_MODULE,
+};
+
+static const struct regulator_init_data madera_ldo1_default = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 1200000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+};
+
static int arizona_ldo1_of_get_pdata(struct arizona_ldo1_pdata *pdata,
struct regulator_config *config,
const struct regulator_desc *desc,
@@ -320,6 +349,32 @@ static int arizona_ldo1_remove(struct platform_device *pdev)
return 0;
}
+static int madera_ldo1_probe(struct platform_device *pdev)
+{
+ struct madera *madera = dev_get_drvdata(pdev->dev.parent);
+ struct arizona_ldo1 *ldo1;
+ bool external_dcvdd;
+ int ret;
+
+ ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL);
+ if (!ldo1)
+ return -ENOMEM;
+
+ ldo1->regmap = madera->regmap;
+
+ ldo1->init_data = madera_ldo1_default;
+
+ ret = arizona_ldo1_common_init(pdev, ldo1, &madera_ldo1,
+ &madera->pdata.ldo1,
+ &external_dcvdd);
+ if (ret)
+ return ret;
+
+ madera->internal_dcvdd = !external_dcvdd;
+
+ return 0;
+}
+
static struct platform_driver arizona_ldo1_driver = {
.probe = arizona_ldo1_probe,
.remove = arizona_ldo1_remove,
@@ -328,10 +383,36 @@ static struct platform_driver arizona_ldo1_driver = {
},
};
-module_platform_driver(arizona_ldo1_driver);
+static struct platform_driver madera_ldo1_driver = {
+ .probe = madera_ldo1_probe,
+ .remove = arizona_ldo1_remove,
+ .driver = {
+ .name = "madera-ldo1",
+ },
+};
+
+static struct platform_driver * const madera_ldo1_drivers[] = {
+ &arizona_ldo1_driver,
+ &madera_ldo1_driver,
+};
+
+static int __init arizona_ldo1_init(void)
+{
+ return platform_register_drivers(madera_ldo1_drivers,
+ ARRAY_SIZE(madera_ldo1_drivers));
+}
+module_init(arizona_ldo1_init);
+
+static void __exit madera_ldo1_exit(void)
+{
+ platform_unregister_drivers(madera_ldo1_drivers,
+ ARRAY_SIZE(madera_ldo1_drivers));
+}
+module_exit(madera_ldo1_exit);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("Arizona LDO1 driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:arizona-ldo1");
+MODULE_ALIAS("platform:madera-ldo1");
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index be0d46da51a1..ae1a5de3e57d 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -16,7 +16,6 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
-#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <sound/soc.h>
@@ -25,6 +24,10 @@
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
+#include <linux/mfd/madera/core.h>
+#include <linux/mfd/madera/pdata.h>
+#include <linux/mfd/madera/registers.h>
+
#include <linux/regulator/arizona-micsupp.h>
struct arizona_micsupp {
@@ -200,6 +203,28 @@ static const struct regulator_init_data arizona_micsupp_ext_default = {
.num_consumer_supplies = 1,
};
+static const struct regulator_desc madera_micsupp = {
+ .name = "MICVDD",
+ .supply_name = "CPVDD1",
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 40,
+ .ops = &arizona_micsupp_ops,
+
+ .vsel_reg = MADERA_LDO2_CONTROL_1,
+ .vsel_mask = MADERA_LDO2_VSEL_MASK,
+ .enable_reg = MADERA_MIC_CHARGE_PUMP_1,
+ .enable_mask = MADERA_CPMIC_ENA,
+ .bypass_reg = MADERA_MIC_CHARGE_PUMP_1,
+ .bypass_mask = MADERA_CPMIC_BYPASS,
+
+ .linear_ranges = arizona_micsupp_ext_ranges,
+ .n_linear_ranges = ARRAY_SIZE(arizona_micsupp_ext_ranges),
+
+ .enable_time = 3000,
+
+ .owner = THIS_MODULE,
+};
+
static int arizona_micsupp_of_get_pdata(struct arizona_micsupp_pdata *pdata,
struct regulator_config *config,
const struct regulator_desc *desc)
@@ -316,6 +341,24 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
&arizona->pdata.micvdd);
}
+static int madera_micsupp_probe(struct platform_device *pdev)
+{
+ struct madera *madera = dev_get_drvdata(pdev->dev.parent);
+ struct arizona_micsupp *micsupp;
+
+ micsupp = devm_kzalloc(&pdev->dev, sizeof(*micsupp), GFP_KERNEL);
+ if (!micsupp)
+ return -ENOMEM;
+
+ micsupp->regmap = madera->regmap;
+ micsupp->dapm = &madera->dapm;
+ micsupp->dev = madera->dev;
+ micsupp->init_data = arizona_micsupp_ext_default;
+
+ return arizona_micsupp_common_init(pdev, micsupp, &madera_micsupp,
+ &madera->pdata.micvdd);
+}
+
static struct platform_driver arizona_micsupp_driver = {
.probe = arizona_micsupp_probe,
.driver = {
@@ -323,10 +366,35 @@ static struct platform_driver arizona_micsupp_driver = {
},
};
-module_platform_driver(arizona_micsupp_driver);
+static struct platform_driver madera_micsupp_driver = {
+ .probe = madera_micsupp_probe,
+ .driver = {
+ .name = "madera-micsupp",
+ },
+};
+
+static struct platform_driver * const arizona_micsupp_drivers[] = {
+ &arizona_micsupp_driver,
+ &madera_micsupp_driver,
+};
+
+static int __init arizona_micsupp_init(void)
+{
+ return platform_register_drivers(arizona_micsupp_drivers,
+ ARRAY_SIZE(arizona_micsupp_drivers));
+}
+module_init(arizona_micsupp_init);
+
+static void __exit arizona_micsupp_exit(void)
+{
+ platform_unregister_drivers(arizona_micsupp_drivers,
+ ARRAY_SIZE(arizona_micsupp_drivers));
+}
+module_exit(arizona_micsupp_exit);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("Arizona microphone supply driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:arizona-micsupp");
+MODULE_ALIAS("platform:madera-micsupp");
diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
index 30e3ed430a8a..0248a61f1006 100644
--- a/drivers/regulator/bd70528-regulator.c
+++ b/drivers/regulator/bd70528-regulator.c
@@ -4,7 +4,6 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/rohm-bd70528.h>
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index fde4264da6ff..8c22cfb76173 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -4,7 +4,6 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/rohm-bd718x7.h>
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c894cf0d8a28..e0c0cf462004 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1,12 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * core.c -- Voltage/Current Regulator framework.
- *
- * Copyright 2007, 2008 Wolfson Microelectronics PLC.
- * Copyright 2008 SlimLogic Ltd.
- *
- * Author: Liam Girdwood <lrg@slimlogic.co.uk>
- */
+//
+// core.c -- Voltage/Current Regulator framework.
+//
+// Copyright 2007, 2008 Wolfson Microelectronics PLC.
+// Copyright 2008 SlimLogic Ltd.
+//
+// Author: Liam Girdwood <lrg@slimlogic.co.uk>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -23,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/consumer.h>
+#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/module.h>
@@ -50,6 +50,7 @@ static DEFINE_MUTEX(regulator_list_mutex);
static LIST_HEAD(regulator_map_list);
static LIST_HEAD(regulator_ena_gpio_list);
static LIST_HEAD(regulator_supply_alias_list);
+static LIST_HEAD(regulator_coupler_list);
static bool has_full_constraints;
static struct dentry *debugfs_root;
@@ -93,7 +94,6 @@ struct regulator_supply_alias {
static int _regulator_is_enabled(struct regulator_dev *rdev);
static int _regulator_disable(struct regulator *regulator);
-static int _regulator_get_voltage(struct regulator_dev *rdev);
static int _regulator_get_current_limit(struct regulator_dev *rdev);
static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
static int _notifier_call_chain(struct regulator_dev *rdev,
@@ -102,15 +102,12 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV);
static int regulator_balance_voltage(struct regulator_dev *rdev,
suspend_state_t state);
-static int regulator_set_voltage_rdev(struct regulator_dev *rdev,
- int min_uV, int max_uV,
- suspend_state_t state);
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name);
static void _regulator_put(struct regulator *regulator);
-static const char *rdev_get_name(struct regulator_dev *rdev)
+const char *rdev_get_name(struct regulator_dev *rdev)
{
if (rdev->constraints && rdev->constraints->name)
return rdev->constraints->name;
@@ -424,8 +421,8 @@ static struct device_node *of_get_regulator(struct device *dev, const char *supp
}
/* Platform voltage constraint check */
-static int regulator_check_voltage(struct regulator_dev *rdev,
- int *min_uV, int *max_uV)
+int regulator_check_voltage(struct regulator_dev *rdev,
+ int *min_uV, int *max_uV)
{
BUG_ON(*min_uV > *max_uV);
@@ -457,9 +454,9 @@ static int regulator_check_states(suspend_state_t state)
/* Make sure we select a voltage that suits the needs of all
* regulator consumers
*/
-static int regulator_check_consumers(struct regulator_dev *rdev,
- int *min_uV, int *max_uV,
- suspend_state_t state)
+int regulator_check_consumers(struct regulator_dev *rdev,
+ int *min_uV, int *max_uV,
+ suspend_state_t state)
{
struct regulator *regulator;
struct regulator_voltage *voltage;
@@ -570,7 +567,7 @@ static ssize_t regulator_uV_show(struct device *dev,
ssize_t ret;
regulator_lock(rdev);
- ret = sprintf(buf, "%d\n", _regulator_get_voltage(rdev));
+ ret = sprintf(buf, "%d\n", regulator_get_voltage_rdev(rdev));
regulator_unlock(rdev);
return ret;
@@ -941,7 +938,7 @@ static int drms_uA_update(struct regulator_dev *rdev)
rdev_err(rdev, "failed to set load %d\n", current_uA);
} else {
/* get output voltage */
- output_uV = _regulator_get_voltage(rdev);
+ output_uV = regulator_get_voltage_rdev(rdev);
if (output_uV <= 0) {
rdev_err(rdev, "invalid output voltage found\n");
return -EINVAL;
@@ -1054,7 +1051,7 @@ static void print_constraints(struct regulator_dev *rdev)
if (!constraints->min_uV ||
constraints->min_uV != constraints->max_uV) {
- ret = _regulator_get_voltage(rdev);
+ ret = regulator_get_voltage_rdev(rdev);
if (ret > 0)
count += scnprintf(buf + count, len - count,
"at %d mV ", ret / 1000);
@@ -1113,7 +1110,7 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
if (rdev->constraints->apply_uV &&
rdev->constraints->min_uV && rdev->constraints->max_uV) {
int target_min, target_max;
- int current_uV = _regulator_get_voltage(rdev);
+ int current_uV = regulator_get_voltage_rdev(rdev);
if (current_uV == -ENOTRECOVERABLE) {
/* This regulator can't be read and must be initialized */
@@ -1123,7 +1120,7 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
_regulator_do_set_voltage(rdev,
rdev->constraints->min_uV,
rdev->constraints->max_uV);
- current_uV = _regulator_get_voltage(rdev);
+ current_uV = regulator_get_voltage_rdev(rdev);
}
if (current_uV < 0) {
@@ -1645,9 +1642,9 @@ static int _regulator_get_enable_time(struct regulator_dev *rdev)
{
if (rdev->constraints && rdev->constraints->enable_time)
return rdev->constraints->enable_time;
- if (!rdev->desc->ops->enable_time)
- return rdev->desc->enable_time;
- return rdev->desc->ops->enable_time(rdev);
+ if (rdev->desc->ops->enable_time)
+ return rdev->desc->ops->enable_time(rdev);
+ return rdev->desc->enable_time;
}
static struct regulator_supply_alias *regulator_find_supply_alias(
@@ -2304,7 +2301,7 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
*
* Delay for the requested amount of time as per the guidelines in:
*
- * Documentation/timers/timers-howto.txt
+ * Documentation/timers/timers-howto.rst
*
* The assumption here is that regulators will never be enabled in
* atomic context and therefore sleeping functions can be used.
@@ -3065,7 +3062,7 @@ static int _regulator_call_set_voltage(struct regulator_dev *rdev,
struct pre_voltage_change_data data;
int ret;
- data.old_uV = _regulator_get_voltage(rdev);
+ data.old_uV = regulator_get_voltage_rdev(rdev);
data.min_uV = min_uV;
data.max_uV = max_uV;
ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE,
@@ -3089,7 +3086,7 @@ static int _regulator_call_set_voltage_sel(struct regulator_dev *rdev,
struct pre_voltage_change_data data;
int ret;
- data.old_uV = _regulator_get_voltage(rdev);
+ data.old_uV = regulator_get_voltage_rdev(rdev);
data.min_uV = uV;
data.max_uV = uV;
ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE,
@@ -3107,6 +3104,66 @@ static int _regulator_call_set_voltage_sel(struct regulator_dev *rdev,
return ret;
}
+static int _regulator_set_voltage_sel_step(struct regulator_dev *rdev,
+ int uV, int new_selector)
+{
+ const struct regulator_ops *ops = rdev->desc->ops;
+ int diff, old_sel, curr_sel, ret;
+
+ /* Stepping is only needed if the regulator is enabled. */
+ if (!_regulator_is_enabled(rdev))
+ goto final_set;
+
+ if (!ops->get_voltage_sel)
+ return -EINVAL;
+
+ old_sel = ops->get_voltage_sel(rdev);
+ if (old_sel < 0)
+ return old_sel;
+
+ diff = new_selector - old_sel;
+ if (diff == 0)
+ return 0; /* No change needed. */
+
+ if (diff > 0) {
+ /* Stepping up. */
+ for (curr_sel = old_sel + rdev->desc->vsel_step;
+ curr_sel < new_selector;
+ curr_sel += rdev->desc->vsel_step) {
+ /*
+ * Call the callback directly instead of using
+ * _regulator_call_set_voltage_sel() as we don't
+ * want to notify anyone yet. Same in the branch
+ * below.
+ */
+ ret = ops->set_voltage_sel(rdev, curr_sel);
+ if (ret)
+ goto try_revert;
+ }
+ } else {
+ /* Stepping down. */
+ for (curr_sel = old_sel - rdev->desc->vsel_step;
+ curr_sel > new_selector;
+ curr_sel -= rdev->desc->vsel_step) {
+ ret = ops->set_voltage_sel(rdev, curr_sel);
+ if (ret)
+ goto try_revert;
+ }
+ }
+
+final_set:
+ /* The final selector will trigger the notifiers. */
+ return _regulator_call_set_voltage_sel(rdev, uV, new_selector);
+
+try_revert:
+ /*
+ * At least try to return to the previous voltage if setting a new
+ * one failed.
+ */
+ (void)ops->set_voltage_sel(rdev, old_sel);
+ return ret;
+}
+
static int _regulator_set_voltage_time(struct regulator_dev *rdev,
int old_uV, int new_uV)
{
@@ -3142,7 +3199,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
unsigned int selector;
int old_selector = -1;
const struct regulator_ops *ops = rdev->desc->ops;
- int old_uV = _regulator_get_voltage(rdev);
+ int old_uV = regulator_get_voltage_rdev(rdev);
trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
@@ -3169,7 +3226,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
best_val = ops->list_voltage(rdev,
selector);
else
- best_val = _regulator_get_voltage(rdev);
+ best_val = regulator_get_voltage_rdev(rdev);
}
} else if (ops->set_voltage_sel) {
@@ -3180,6 +3237,9 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
selector = ret;
if (old_selector == selector)
ret = 0;
+ else if (rdev->desc->vsel_step)
+ ret = _regulator_set_voltage_sel_step(
+ rdev, best_val, selector);
else
ret = _regulator_call_set_voltage_sel(
rdev, best_val, selector);
@@ -3288,7 +3348,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
* changing the voltage.
*/
if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
- current_uV = _regulator_get_voltage(rdev);
+ current_uV = regulator_get_voltage_rdev(rdev);
if (min_uV <= current_uV && current_uV <= max_uV) {
voltage->min_uV = min_uV;
voltage->max_uV = max_uV;
@@ -3325,8 +3385,8 @@ out:
return ret;
}
-static int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
- int max_uV, suspend_state_t state)
+int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
+ int max_uV, suspend_state_t state)
{
int best_supply_uV = 0;
int supply_change_uV = 0;
@@ -3354,7 +3414,7 @@ static int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
best_supply_uV += rdev->desc->min_dropout_uV;
- current_supply_uV = _regulator_get_voltage(rdev->supply->rdev);
+ current_supply_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
if (current_supply_uV < 0) {
ret = current_supply_uV;
goto out;
@@ -3405,7 +3465,7 @@ static int regulator_limit_voltage_step(struct regulator_dev *rdev,
return 1;
if (*current_uV < 0) {
- *current_uV = _regulator_get_voltage(rdev);
+ *current_uV = regulator_get_voltage_rdev(rdev);
if (*current_uV < 0)
return *current_uV;
@@ -3434,11 +3494,10 @@ static int regulator_get_optimal_voltage(struct regulator_dev *rdev,
struct coupling_desc *c_desc = &rdev->coupling_desc;
struct regulator_dev **c_rdevs = c_desc->coupled_rdevs;
struct regulation_constraints *constraints = rdev->constraints;
- int max_spread = constraints->max_spread;
int desired_min_uV = 0, desired_max_uV = INT_MAX;
int max_current_uV = 0, min_current_uV = INT_MAX;
int highest_min_uV = 0, target_uV, possible_uV;
- int i, ret;
+ int i, ret, max_spread;
bool done;
*current_uV = -1;
@@ -3492,6 +3551,8 @@ static int regulator_get_optimal_voltage(struct regulator_dev *rdev,
}
}
+ max_spread = constraints->max_spread[0];
+
/*
* Let target_uV be equal to the desired one if possible.
* If not, set it to minimum voltage, allowed by other coupled
@@ -3509,7 +3570,7 @@ static int regulator_get_optimal_voltage(struct regulator_dev *rdev,
if (!_regulator_is_enabled(c_rdevs[i]))
continue;
- tmp_act = _regulator_get_voltage(c_rdevs[i]);
+ tmp_act = regulator_get_voltage_rdev(c_rdevs[i]);
if (tmp_act < 0)
return tmp_act;
@@ -3551,7 +3612,7 @@ finish:
if (n_coupled > 1 && *current_uV == -1) {
if (_regulator_is_enabled(rdev)) {
- ret = _regulator_get_voltage(rdev);
+ ret = regulator_get_voltage_rdev(rdev);
if (ret < 0)
return ret;
@@ -3573,9 +3634,11 @@ static int regulator_balance_voltage(struct regulator_dev *rdev,
struct regulator_dev **c_rdevs;
struct regulator_dev *best_rdev;
struct coupling_desc *c_desc = &rdev->coupling_desc;
+ struct regulator_coupler *coupler = c_desc->coupler;
int i, ret, n_coupled, best_min_uV, best_max_uV, best_c_rdev;
- bool best_c_rdev_done, c_rdev_done[MAX_COUPLED];
unsigned int delta, best_delta;
+ unsigned long c_rdev_done = 0;
+ bool best_c_rdev_done;
c_rdevs = c_desc->coupled_rdevs;
n_coupled = c_desc->n_coupled;
@@ -3592,8 +3655,9 @@ static int regulator_balance_voltage(struct regulator_dev *rdev,
return -EPERM;
}
- for (i = 0; i < n_coupled; i++)
- c_rdev_done[i] = false;
+ /* Invoke custom balancer for customized couplers */
+ if (coupler && coupler->balance_voltage)
+ return coupler->balance_voltage(coupler, rdev, state);
/*
* Find the best possible voltage change on each loop. Leave the loop
@@ -3620,7 +3684,7 @@ static int regulator_balance_voltage(struct regulator_dev *rdev,
*/
int optimal_uV = 0, optimal_max_uV = 0, current_uV = 0;
- if (c_rdev_done[i])
+ if (test_bit(i, &c_rdev_done))
continue;
ret = regulator_get_optimal_voltage(c_rdevs[i],
@@ -3655,7 +3719,8 @@ static int regulator_balance_voltage(struct regulator_dev *rdev,
if (ret < 0)
goto out;
- c_rdev_done[best_c_rdev] = best_c_rdev_done;
+ if (best_c_rdev_done)
+ set_bit(best_c_rdev, &c_rdev_done);
} while (n_coupled > 1);
@@ -3911,7 +3976,7 @@ out:
}
EXPORT_SYMBOL_GPL(regulator_sync_voltage);
-static int _regulator_get_voltage(struct regulator_dev *rdev)
+int regulator_get_voltage_rdev(struct regulator_dev *rdev)
{
int sel, ret;
bool bypassed;
@@ -3928,7 +3993,7 @@ static int _regulator_get_voltage(struct regulator_dev *rdev)
return -EPROBE_DEFER;
}
- return _regulator_get_voltage(rdev->supply->rdev);
+ return regulator_get_voltage_rdev(rdev->supply->rdev);
}
}
@@ -3944,7 +4009,7 @@ static int _regulator_get_voltage(struct regulator_dev *rdev)
} else if (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1)) {
ret = rdev->desc->fixed_uV;
} else if (rdev->supply) {
- ret = _regulator_get_voltage(rdev->supply->rdev);
+ ret = regulator_get_voltage_rdev(rdev->supply->rdev);
} else {
return -EINVAL;
}
@@ -3969,7 +4034,7 @@ int regulator_get_voltage(struct regulator *regulator)
int ret;
regulator_lock_dependent(regulator->rdev, &ww_ctx);
- ret = _regulator_get_voltage(regulator->rdev);
+ ret = regulator_get_voltage_rdev(regulator->rdev);
regulator_unlock_dependent(regulator->rdev, &ww_ctx);
return ret;
@@ -4707,8 +4772,60 @@ static int regulator_register_resolve_supply(struct device *dev, void *data)
return 0;
}
+int regulator_coupler_register(struct regulator_coupler *coupler)
+{
+ mutex_lock(&regulator_list_mutex);
+ list_add_tail(&coupler->list, &regulator_coupler_list);
+ mutex_unlock(&regulator_list_mutex);
+
+ return 0;
+}
+
+static struct regulator_coupler *
+regulator_find_coupler(struct regulator_dev *rdev)
+{
+ struct regulator_coupler *coupler;
+ int err;
+
+ /*
+ * Note that regulators are appended to the list and the generic
+ * coupler is registered first, hence it will be attached at last
+ * if nobody cared.
+ */
+ list_for_each_entry_reverse(coupler, &regulator_coupler_list, list) {
+ err = coupler->attach_regulator(coupler, rdev);
+ if (!err) {
+ if (!coupler->balance_voltage &&
+ rdev->coupling_desc.n_coupled > 2)
+ goto err_unsupported;
+
+ return coupler;
+ }
+
+ if (err < 0)
+ return ERR_PTR(err);
+
+ if (err == 1)
+ continue;
+
+ break;
+ }
+
+ return ERR_PTR(-EINVAL);
+
+err_unsupported:
+ if (coupler->detach_regulator)
+ coupler->detach_regulator(coupler, rdev);
+
+ rdev_err(rdev,
+ "Voltage balancing for multiple regulator couples is unimplemented\n");
+
+ return ERR_PTR(-EPERM);
+}
+
static void regulator_resolve_coupling(struct regulator_dev *rdev)
{
+ struct regulator_coupler *coupler = rdev->coupling_desc.coupler;
struct coupling_desc *c_desc = &rdev->coupling_desc;
int n_coupled = c_desc->n_coupled;
struct regulator_dev *c_rdev;
@@ -4724,6 +4841,12 @@ static void regulator_resolve_coupling(struct regulator_dev *rdev)
if (!c_rdev)
continue;
+ if (c_rdev->coupling_desc.coupler != coupler) {
+ rdev_err(rdev, "coupler mismatch with %s\n",
+ rdev_get_name(c_rdev));
+ return;
+ }
+
regulator_lock(c_rdev);
c_desc->coupled_rdevs[i] = c_rdev;
@@ -4737,10 +4860,12 @@ static void regulator_resolve_coupling(struct regulator_dev *rdev)
static void regulator_remove_coupling(struct regulator_dev *rdev)
{
+ struct regulator_coupler *coupler = rdev->coupling_desc.coupler;
struct coupling_desc *__c_desc, *c_desc = &rdev->coupling_desc;
struct regulator_dev *__c_rdev, *c_rdev;
unsigned int __n_coupled, n_coupled;
int i, k;
+ int err;
n_coupled = c_desc->n_coupled;
@@ -4770,21 +4895,33 @@ static void regulator_remove_coupling(struct regulator_dev *rdev)
c_desc->coupled_rdevs[i] = NULL;
c_desc->n_resolved--;
}
+
+ if (coupler && coupler->detach_regulator) {
+ err = coupler->detach_regulator(coupler, rdev);
+ if (err)
+ rdev_err(rdev, "failed to detach from coupler: %d\n",
+ err);
+ }
+
+ kfree(rdev->coupling_desc.coupled_rdevs);
+ rdev->coupling_desc.coupled_rdevs = NULL;
}
static int regulator_init_coupling(struct regulator_dev *rdev)
{
- int n_phandles;
+ int err, n_phandles;
+ size_t alloc_size;
if (!IS_ENABLED(CONFIG_OF))
n_phandles = 0;
else
n_phandles = of_get_n_coupled(rdev);
- if (n_phandles + 1 > MAX_COUPLED) {
- rdev_err(rdev, "too many regulators coupled\n");
- return -EPERM;
- }
+ alloc_size = sizeof(*rdev) * (n_phandles + 1);
+
+ rdev->coupling_desc.coupled_rdevs = kzalloc(alloc_size, GFP_KERNEL);
+ if (!rdev->coupling_desc.coupled_rdevs)
+ return -ENOMEM;
/*
* Every regulator should always have coupling descriptor filled with
@@ -4798,23 +4935,35 @@ static int regulator_init_coupling(struct regulator_dev *rdev)
if (n_phandles == 0)
return 0;
- /* regulator, which can't change its voltage, can't be coupled */
- if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
- rdev_err(rdev, "voltage operation not allowed\n");
+ if (!of_check_coupling_data(rdev))
return -EPERM;
- }
- if (rdev->constraints->max_spread <= 0) {
- rdev_err(rdev, "wrong max_spread value\n");
- return -EPERM;
+ rdev->coupling_desc.coupler = regulator_find_coupler(rdev);
+ if (IS_ERR(rdev->coupling_desc.coupler)) {
+ err = PTR_ERR(rdev->coupling_desc.coupler);
+ rdev_err(rdev, "failed to get coupler: %d\n", err);
+ return err;
}
- if (!of_check_coupling_data(rdev))
+ return 0;
+}
+
+static int generic_coupler_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ if (rdev->coupling_desc.n_coupled > 2) {
+ rdev_err(rdev,
+ "Voltage balancing for multiple regulator couples is unimplemented\n");
return -EPERM;
+ }
return 0;
}
+static struct regulator_coupler generic_regulator_coupler = {
+ .attach_regulator = generic_coupler_attach,
+};
+
/**
* regulator_register - register regulator
* @regulator_desc: regulator to register
@@ -4976,7 +5125,9 @@ regulator_register(const struct regulator_desc *regulator_desc,
if (ret < 0)
goto wash;
+ mutex_lock(&regulator_list_mutex);
ret = regulator_init_coupling(rdev);
+ mutex_unlock(&regulator_list_mutex);
if (ret < 0)
goto wash;
@@ -5025,6 +5176,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
unset_supplies:
mutex_lock(&regulator_list_mutex);
unset_regulator_supplies(rdev);
+ regulator_remove_coupling(rdev);
mutex_unlock(&regulator_list_mutex);
wash:
kfree(rdev->constraints);
@@ -5278,7 +5430,7 @@ static void regulator_summary_show_subtree(struct seq_file *s,
rdev->use_count, rdev->open_count, rdev->bypass_count,
regulator_opmode_to_str(opmode));
- seq_printf(s, "%5dmV ", _regulator_get_voltage(rdev) / 1000);
+ seq_printf(s, "%5dmV ", regulator_get_voltage_rdev(rdev) / 1000);
seq_printf(s, "%5dmA ",
_regulator_get_current_limit_unlocked(rdev) / 1000);
@@ -5480,6 +5632,8 @@ static int __init regulator_init(void)
#endif
regulator_dummy_init();
+ regulator_coupler_register(&generic_regulator_coupler);
+
return ret;
}
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
index d3284361e594..f80781d58a28 100644
--- a/drivers/regulator/cpcap-regulator.c
+++ b/drivers/regulator/cpcap-regulator.c
@@ -90,7 +90,7 @@
#define CPCAP_REG_OFF_MODE_SEC BIT(15)
/**
- * SoC specific configuraion for CPCAP regulator. There are at least three
+ * SoC specific configuration for CPCAP regulator. There are at least three
* different SoCs each with their own parameters: omap3, omap4 and tegra2.
*
* The assign_reg and assign_mask seem to allow toggling between primary
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index a02e0488410f..2ffc64622451 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -493,12 +493,13 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.ops = &da9062_ldo_ops,
.desc.min_uV = (900) * 1000,
.desc.uV_step = (50) * 1000,
- .desc.n_voltages = ((3600) - (900))/(50) + 1,
+ .desc.n_voltages = ((3600) - (900))/(50) + 1
+ + DA9062AA_VLDO_A_MIN_SEL,
.desc.enable_reg = DA9062AA_LDO1_CONT,
.desc.enable_mask = DA9062AA_LDO1_EN_MASK,
.desc.vsel_reg = DA9062AA_VLDO1_A,
.desc.vsel_mask = DA9062AA_VLDO1_A_MASK,
- .desc.linear_min_sel = 0,
+ .desc.linear_min_sel = DA9062AA_VLDO_A_MIN_SEL,
.sleep = REG_FIELD(DA9062AA_VLDO1_A,
__builtin_ffs((int)DA9062AA_LDO1_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -525,12 +526,13 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.ops = &da9062_ldo_ops,
.desc.min_uV = (900) * 1000,
.desc.uV_step = (50) * 1000,
- .desc.n_voltages = ((3600) - (600))/(50) + 1,
+ .desc.n_voltages = ((3600) - (900))/(50) + 1
+ + DA9062AA_VLDO_A_MIN_SEL,
.desc.enable_reg = DA9062AA_LDO2_CONT,
.desc.enable_mask = DA9062AA_LDO2_EN_MASK,
.desc.vsel_reg = DA9062AA_VLDO2_A,
.desc.vsel_mask = DA9062AA_VLDO2_A_MASK,
- .desc.linear_min_sel = 0,
+ .desc.linear_min_sel = DA9062AA_VLDO_A_MIN_SEL,
.sleep = REG_FIELD(DA9062AA_VLDO2_A,
__builtin_ffs((int)DA9062AA_LDO2_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -557,12 +559,13 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.ops = &da9062_ldo_ops,
.desc.min_uV = (900) * 1000,
.desc.uV_step = (50) * 1000,
- .desc.n_voltages = ((3600) - (900))/(50) + 1,
+ .desc.n_voltages = ((3600) - (900))/(50) + 1
+ + DA9062AA_VLDO_A_MIN_SEL,
.desc.enable_reg = DA9062AA_LDO3_CONT,
.desc.enable_mask = DA9062AA_LDO3_EN_MASK,
.desc.vsel_reg = DA9062AA_VLDO3_A,
.desc.vsel_mask = DA9062AA_VLDO3_A_MASK,
- .desc.linear_min_sel = 0,
+ .desc.linear_min_sel = DA9062AA_VLDO_A_MIN_SEL,
.sleep = REG_FIELD(DA9062AA_VLDO3_A,
__builtin_ffs((int)DA9062AA_LDO3_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -589,12 +592,13 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
.desc.ops = &da9062_ldo_ops,
.desc.min_uV = (900) * 1000,
.desc.uV_step = (50) * 1000,
- .desc.n_voltages = ((3600) - (900))/(50) + 1,
+ .desc.n_voltages = ((3600) - (900))/(50) + 1
+ + DA9062AA_VLDO_A_MIN_SEL,
.desc.enable_reg = DA9062AA_LDO4_CONT,
.desc.enable_mask = DA9062AA_LDO4_EN_MASK,
.desc.vsel_reg = DA9062AA_VLDO4_A,
.desc.vsel_mask = DA9062AA_VLDO4_A_MASK,
- .desc.linear_min_sel = 0,
+ .desc.linear_min_sel = DA9062AA_VLDO_A_MIN_SEL,
.sleep = REG_FIELD(DA9062AA_VLDO4_A,
__builtin_ffs((int)DA9062AA_LDO4_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -769,12 +773,13 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.ops = &da9062_ldo_ops,
.desc.min_uV = (900) * 1000,
.desc.uV_step = (50) * 1000,
- .desc.n_voltages = ((3600) - (900))/(50) + 1,
+ .desc.n_voltages = ((3600) - (900))/(50) + 1
+ + DA9062AA_VLDO_A_MIN_SEL,
.desc.enable_reg = DA9062AA_LDO1_CONT,
.desc.enable_mask = DA9062AA_LDO1_EN_MASK,
.desc.vsel_reg = DA9062AA_VLDO1_A,
.desc.vsel_mask = DA9062AA_VLDO1_A_MASK,
- .desc.linear_min_sel = 0,
+ .desc.linear_min_sel = DA9062AA_VLDO_A_MIN_SEL,
.sleep = REG_FIELD(DA9062AA_VLDO1_A,
__builtin_ffs((int)DA9062AA_LDO1_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -801,12 +806,13 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.ops = &da9062_ldo_ops,
.desc.min_uV = (900) * 1000,
.desc.uV_step = (50) * 1000,
- .desc.n_voltages = ((3600) - (600))/(50) + 1,
+ .desc.n_voltages = ((3600) - (900))/(50) + 1
+ + DA9062AA_VLDO_A_MIN_SEL,
.desc.enable_reg = DA9062AA_LDO2_CONT,
.desc.enable_mask = DA9062AA_LDO2_EN_MASK,
.desc.vsel_reg = DA9062AA_VLDO2_A,
.desc.vsel_mask = DA9062AA_VLDO2_A_MASK,
- .desc.linear_min_sel = 0,
+ .desc.linear_min_sel = DA9062AA_VLDO_A_MIN_SEL,
.sleep = REG_FIELD(DA9062AA_VLDO2_A,
__builtin_ffs((int)DA9062AA_LDO2_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -833,12 +839,13 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.ops = &da9062_ldo_ops,
.desc.min_uV = (900) * 1000,
.desc.uV_step = (50) * 1000,
- .desc.n_voltages = ((3600) - (900))/(50) + 1,
+ .desc.n_voltages = ((3600) - (900))/(50) + 1
+ + DA9062AA_VLDO_A_MIN_SEL,
.desc.enable_reg = DA9062AA_LDO3_CONT,
.desc.enable_mask = DA9062AA_LDO3_EN_MASK,
.desc.vsel_reg = DA9062AA_VLDO3_A,
.desc.vsel_mask = DA9062AA_VLDO3_A_MASK,
- .desc.linear_min_sel = 0,
+ .desc.linear_min_sel = DA9062AA_VLDO_A_MIN_SEL,
.sleep = REG_FIELD(DA9062AA_VLDO3_A,
__builtin_ffs((int)DA9062AA_LDO3_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -865,12 +872,13 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
.desc.ops = &da9062_ldo_ops,
.desc.min_uV = (900) * 1000,
.desc.uV_step = (50) * 1000,
- .desc.n_voltages = ((3600) - (900))/(50) + 1,
+ .desc.n_voltages = ((3600) - (900))/(50) + 1
+ + DA9062AA_VLDO_A_MIN_SEL,
.desc.enable_reg = DA9062AA_LDO4_CONT,
.desc.enable_mask = DA9062AA_LDO4_EN_MASK,
.desc.vsel_reg = DA9062AA_VLDO4_A,
.desc.vsel_mask = DA9062AA_VLDO4_A_MASK,
- .desc.linear_min_sel = 0,
+ .desc.linear_min_sel = DA9062AA_VLDO_A_MIN_SEL,
.sleep = REG_FIELD(DA9062AA_VLDO4_A,
__builtin_ffs((int)DA9062AA_LDO4_SL_A_MASK) - 1,
sizeof(unsigned int) * 8 -
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 6f9ce1a6e44d..02f816318fba 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -19,7 +19,6 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/da9063/core.h>
-#include <linux/mfd/da9063/pdata.h>
#include <linux/mfd/da9063/registers.h>
@@ -28,6 +27,49 @@
REG_FIELD(_reg, __builtin_ffs((int)_mask) - 1, \
sizeof(unsigned int) * 8 - __builtin_clz((_mask)) - 1)
+/* DA9063 and DA9063L regulator IDs */
+enum {
+ /* BUCKs */
+ DA9063_ID_BCORE1,
+ DA9063_ID_BCORE2,
+ DA9063_ID_BPRO,
+ DA9063_ID_BMEM,
+ DA9063_ID_BIO,
+ DA9063_ID_BPERI,
+
+ /* BCORE1 and BCORE2 in merged mode */
+ DA9063_ID_BCORES_MERGED,
+ /* BMEM and BIO in merged mode */
+ DA9063_ID_BMEM_BIO_MERGED,
+ /* When two BUCKs are merged, they cannot be reused separately */
+
+ /* LDOs on both DA9063 and DA9063L */
+ DA9063_ID_LDO3,
+ DA9063_ID_LDO7,
+ DA9063_ID_LDO8,
+ DA9063_ID_LDO9,
+ DA9063_ID_LDO11,
+
+ /* DA9063-only LDOs */
+ DA9063_ID_LDO1,
+ DA9063_ID_LDO2,
+ DA9063_ID_LDO4,
+ DA9063_ID_LDO5,
+ DA9063_ID_LDO6,
+ DA9063_ID_LDO10,
+};
+
+/* Old regulator platform data */
+struct da9063_regulator_data {
+ int id;
+ struct regulator_init_data *initdata;
+};
+
+struct da9063_regulators_pdata {
+ unsigned n_regulators;
+ struct da9063_regulator_data *regulator_data;
+};
+
/* Regulator capabilities and registers description */
struct da9063_regulator_info {
struct regulator_desc desc;
@@ -592,7 +634,6 @@ static const struct regulator_init_data *da9063_get_regulator_initdata(
return NULL;
}
-#ifdef CONFIG_OF
static struct of_regulator_match da9063_matches[] = {
[DA9063_ID_BCORE1] = { .name = "bcore1" },
[DA9063_ID_BCORE2] = { .name = "bcore2" },
@@ -670,20 +711,10 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
*da9063_reg_matches = da9063_matches;
return pdata;
}
-#else
-static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
- struct platform_device *pdev,
- struct of_regulator_match **da9063_reg_matches)
-{
- *da9063_reg_matches = NULL;
- return ERR_PTR(-ENODEV);
-}
-#endif
static int da9063_regulator_probe(struct platform_device *pdev)
{
struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent);
- struct da9063_pdata *da9063_pdata = dev_get_platdata(da9063->dev);
struct of_regulator_match *da9063_reg_matches = NULL;
struct da9063_regulators_pdata *regl_pdata;
const struct da9063_dev_model *model;
@@ -693,11 +724,7 @@ static int da9063_regulator_probe(struct platform_device *pdev)
bool bcores_merged, bmem_bio_merged;
int id, irq, n, n_regulators, ret, val;
- regl_pdata = da9063_pdata ? da9063_pdata->regulators_pdata : NULL;
-
- if (!regl_pdata)
- regl_pdata = da9063_parse_regulators_dt(pdev,
- &da9063_reg_matches);
+ regl_pdata = da9063_parse_regulators_dt(pdev, &da9063_reg_matches);
if (IS_ERR(regl_pdata) || regl_pdata->n_regulators == 0) {
dev_err(&pdev->dev,
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index da37b4ccd834..0309823d2c72 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -289,6 +289,8 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
0,
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"da9211-enable");
+ if (IS_ERR(pdata->gpiod_ren[n]))
+ pdata->gpiod_ren[n] = NULL;
n++;
}
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index b9ae45d2d199..4986cc5064a1 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * helpers.c -- Voltage/Current Regulator framework helper functions.
- *
- * Copyright 2007, 2008 Wolfson Microelectronics PLC.
- * Copyright 2008 SlimLogic Ltd.
- */
+//
+// helpers.c -- Voltage/Current Regulator framework helper functions.
+//
+// Copyright 2007, 2008 Wolfson Microelectronics PLC.
+// Copyright 2008 SlimLogic Ltd.
#include <linux/kernel.h>
#include <linux/err.h>
diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
index 60f15a722760..5647e2f97ff8 100644
--- a/drivers/regulator/lm363x-regulator.c
+++ b/drivers/regulator/lm363x-regulator.c
@@ -34,6 +34,11 @@
#define LM3632_VBOOST_MIN 4500000
#define LM3632_VLDO_MIN 4000000
+/* LM36274 */
+#define LM36274_BOOST_VSEL_MAX 0x3f
+#define LM36274_LDO_VSEL_MAX 0x34
+#define LM36274_VOLTAGE_MIN 4000000
+
/* Common */
#define LM363X_STEP_50mV 50000
#define LM363X_STEP_500mV 500000
@@ -214,6 +219,51 @@ static const struct regulator_desc lm363x_regulator_desc[] = {
.enable_reg = LM3632_REG_BIAS_CONFIG,
.enable_mask = LM3632_EN_VNEG_MASK,
},
+
+ /* LM36274 */
+ {
+ .name = "vboost",
+ .of_match = "vboost",
+ .id = LM36274_BOOST,
+ .ops = &lm363x_boost_voltage_table_ops,
+ .n_voltages = LM36274_BOOST_VSEL_MAX,
+ .min_uV = LM36274_VOLTAGE_MIN,
+ .uV_step = LM363X_STEP_50mV,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LM36274_REG_VOUT_BOOST,
+ .vsel_mask = LM36274_VOUT_MASK,
+ },
+ {
+ .name = "ldo_vpos",
+ .of_match = "vpos",
+ .id = LM36274_LDO_POS,
+ .ops = &lm363x_regulator_voltage_table_ops,
+ .n_voltages = LM36274_LDO_VSEL_MAX,
+ .min_uV = LM36274_VOLTAGE_MIN,
+ .uV_step = LM363X_STEP_50mV,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LM36274_REG_VOUT_POS,
+ .vsel_mask = LM36274_VOUT_MASK,
+ .enable_reg = LM36274_REG_BIAS_CONFIG_1,
+ .enable_mask = LM36274_EN_VPOS_MASK,
+ },
+ {
+ .name = "ldo_vneg",
+ .of_match = "vneg",
+ .id = LM36274_LDO_NEG,
+ .ops = &lm363x_regulator_voltage_table_ops,
+ .n_voltages = LM36274_LDO_VSEL_MAX,
+ .min_uV = LM36274_VOLTAGE_MIN,
+ .uV_step = LM363X_STEP_50mV,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LM36274_REG_VOUT_NEG,
+ .vsel_mask = LM36274_VOUT_MASK,
+ .enable_reg = LM36274_REG_BIAS_CONFIG_1,
+ .enable_mask = LM36274_EN_VNEG_MASK,
+ },
};
static struct gpio_desc *lm363x_regulator_of_get_enable_gpio(struct device *dev, int id)
@@ -226,9 +276,11 @@ static struct gpio_desc *lm363x_regulator_of_get_enable_gpio(struct device *dev,
*/
switch (id) {
case LM3632_LDO_POS:
+ case LM36274_LDO_POS:
return gpiod_get_index_optional(dev, "enable", 0,
GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
case LM3632_LDO_NEG:
+ case LM36274_LDO_NEG:
return gpiod_get_index_optional(dev, "enable", 1,
GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
default:
@@ -236,6 +288,27 @@ static struct gpio_desc *lm363x_regulator_of_get_enable_gpio(struct device *dev,
}
}
+static int lm363x_regulator_set_ext_en(struct regmap *regmap, int id)
+{
+ int ext_en_mask = 0;
+
+ switch (id) {
+ case LM3632_LDO_POS:
+ case LM3632_LDO_NEG:
+ ext_en_mask = LM3632_EXT_EN_MASK;
+ break;
+ case LM36274_LDO_POS:
+ case LM36274_LDO_NEG:
+ ext_en_mask = LM36274_EXT_EN_MASK;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return regmap_update_bits(regmap, lm363x_regulator_desc[id].enable_reg,
+ ext_en_mask, ext_en_mask);
+}
+
static int lm363x_regulator_probe(struct platform_device *pdev)
{
struct ti_lmu *lmu = dev_get_drvdata(pdev->dev.parent);
@@ -260,10 +333,7 @@ static int lm363x_regulator_probe(struct platform_device *pdev)
if (gpiod) {
cfg.ena_gpiod = gpiod;
-
- ret = regmap_update_bits(regmap, LM3632_REG_BIAS_CONFIG,
- LM3632_EXT_EN_MASK,
- LM3632_EXT_EN_MASK);
+ ret = lm363x_regulator_set_ext_en(regmap, id);
if (ret) {
gpiod_put(gpiod);
dev_err(dev, "External pin err: %d\n", ret);
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index 0db367b54ae7..8d9731e4052b 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -467,7 +467,7 @@ static int max77620_regulator_is_enabled(struct regulator_dev *rdev)
{
struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
- int ret = 1;
+ int ret;
if (pmic->active_fps_src[id] != MAX77620_FPS_SRC_NONE)
return 1;
@@ -758,6 +758,24 @@ static struct max77620_regulator_info max20024_regs_info[MAX77620_NUM_REGS] = {
RAIL_LDO(LDO8, ldo8, "in-ldo7-8", N, 800000, 3950000, 50000),
};
+static struct max77620_regulator_info max77663_regs_info[MAX77620_NUM_REGS] = {
+ RAIL_SD(SD0, sd0, "in-sd0", SD0, 600000, 3387500, 12500, 0xFF, NONE),
+ RAIL_SD(SD1, sd1, "in-sd1", SD1, 800000, 1587500, 12500, 0xFF, NONE),
+ RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+ RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+ RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+
+ RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
+ RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
+ RAIL_LDO(LDO2, ldo2, "in-ldo2", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO3, ldo3, "in-ldo3-5", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO4, ldo4, "in-ldo4-6", P, 800000, 1587500, 12500),
+ RAIL_LDO(LDO5, ldo5, "in-ldo3-5", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO6, ldo6, "in-ldo4-6", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO7, ldo7, "in-ldo7-8", N, 800000, 3950000, 50000),
+ RAIL_LDO(LDO8, ldo8, "in-ldo7-8", N, 800000, 3950000, 50000),
+};
+
static int max77620_regulator_probe(struct platform_device *pdev)
{
struct max77620_chip *max77620_chip = dev_get_drvdata(pdev->dev.parent);
@@ -782,9 +800,14 @@ static int max77620_regulator_probe(struct platform_device *pdev)
case MAX77620:
rinfo = max77620_regs_info;
break;
- default:
+ case MAX20024:
rinfo = max20024_regs_info;
break;
+ case MAX77663:
+ rinfo = max77663_regs_info;
+ break;
+ default:
+ return -EINVAL;
}
config.regmap = pmic->rmap;
@@ -878,6 +901,7 @@ static const struct dev_pm_ops max77620_regulator_pm_ops = {
static const struct platform_device_id max77620_regulator_devtype[] = {
{ .name = "max77620-pmic", },
{ .name = "max20024-pmic", },
+ { .name = "max77663-pmic", },
{},
};
MODULE_DEVICE_TABLE(platform, max77620_regulator_devtype);
diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c
index 5c4f86c98510..e57fc9197d62 100644
--- a/drivers/regulator/max77650-regulator.c
+++ b/drivers/regulator/max77650-regulator.c
@@ -20,6 +20,8 @@
#define MAX77650_REGULATOR_V_LDO_MASK GENMASK(6, 0)
#define MAX77650_REGULATOR_V_SBB_MASK GENMASK(5, 0)
+#define MAX77651_REGULATOR_V_SBB1_MASK GENMASK(5, 2)
+#define MAX77651_REGULATOR_V_SBB1_RANGE_MASK GENMASK(1, 0)
#define MAX77650_REGULATOR_AD_MASK BIT(3)
#define MAX77650_REGULATOR_AD_DISABLED 0x00
@@ -41,43 +43,22 @@ struct max77650_regulator_desc {
unsigned int regB;
};
-static const unsigned int max77651_sbb1_regulator_volt_table[] = {
- 2400000, 3200000, 4000000, 4800000,
- 2450000, 3250000, 4050000, 4850000,
- 2500000, 3300000, 4100000, 4900000,
- 2550000, 3350000, 4150000, 4950000,
- 2600000, 3400000, 4200000, 5000000,
- 2650000, 3450000, 4250000, 5050000,
- 2700000, 3500000, 4300000, 5100000,
- 2750000, 3550000, 4350000, 5150000,
- 2800000, 3600000, 4400000, 5200000,
- 2850000, 3650000, 4450000, 5250000,
- 2900000, 3700000, 4500000, 0,
- 2950000, 3750000, 4550000, 0,
- 3000000, 3800000, 4600000, 0,
- 3050000, 3850000, 4650000, 0,
- 3100000, 3900000, 4700000, 0,
- 3150000, 3950000, 4750000, 0,
+static struct max77650_regulator_desc max77651_SBB1_desc;
+
+static const unsigned int max77651_sbb1_volt_range_sel[] = {
+ 0x0, 0x1, 0x2, 0x3
};
-#define MAX77651_REGULATOR_SBB1_SEL_DEC(_val) \
- (((_val & 0x3c) >> 2) | ((_val & 0x03) << 4))
-#define MAX77651_REGULATOR_SBB1_SEL_ENC(_val) \
- (((_val & 0x30) >> 4) | ((_val & 0x0f) << 2))
-
-#define MAX77650_REGULATOR_SBB1_SEL_DECR(_val) \
- do { \
- _val = MAX77651_REGULATOR_SBB1_SEL_DEC(_val); \
- _val--; \
- _val = MAX77651_REGULATOR_SBB1_SEL_ENC(_val); \
- } while (0)
-
-#define MAX77650_REGULATOR_SBB1_SEL_INCR(_val) \
- do { \
- _val = MAX77651_REGULATOR_SBB1_SEL_DEC(_val); \
- _val++; \
- _val = MAX77651_REGULATOR_SBB1_SEL_ENC(_val); \
- } while (0)
+static const struct regulator_linear_range max77651_sbb1_volt_ranges[] = {
+ /* range index 0 */
+ REGULATOR_LINEAR_RANGE(2400000, 0x00, 0x0f, 50000),
+ /* range index 1 */
+ REGULATOR_LINEAR_RANGE(3200000, 0x00, 0x0f, 50000),
+ /* range index 2 */
+ REGULATOR_LINEAR_RANGE(4000000, 0x00, 0x0f, 50000),
+ /* range index 3 */
+ REGULATOR_LINEAR_RANGE(4800000, 0x00, 0x09, 50000),
+};
static const unsigned int max77650_current_limit_table[] = {
1000000, 866000, 707000, 500000,
@@ -127,96 +108,6 @@ static int max77650_regulator_disable(struct regulator_dev *rdev)
MAX77650_REGULATOR_DISABLED);
}
-static int max77650_regulator_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int sel)
-{
- int rv = 0, curr, diff;
- bool ascending;
-
- /*
- * If the regulator is disabled, we can program the desired
- * voltage right away.
- */
- if (!max77650_regulator_is_enabled(rdev))
- return regulator_set_voltage_sel_regmap(rdev, sel);
-
- /*
- * Otherwise we need to manually ramp the output voltage up/down
- * one step at a time.
- */
-
- curr = regulator_get_voltage_sel_regmap(rdev);
- if (curr < 0)
- return curr;
-
- diff = curr - sel;
- if (diff == 0)
- return 0; /* Already there. */
- else if (diff > 0)
- ascending = false;
- else
- ascending = true;
-
- /*
- * Make sure we'll get to the right voltage and break the loop even if
- * the selector equals 0.
- */
- for (ascending ? curr++ : curr--;; ascending ? curr++ : curr--) {
- rv = regulator_set_voltage_sel_regmap(rdev, curr);
- if (rv)
- return rv;
-
- if (curr == sel)
- break;
- }
-
- return 0;
-}
-
-/*
- * Special case: non-linear voltage table for max77651 SBB1 - software
- * must ensure the voltage is ramped in 50mV increments.
- */
-static int max77651_regulator_sbb1_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int sel)
-{
- int rv = 0, curr, vcurr, vdest, vdiff;
-
- /*
- * If the regulator is disabled, we can program the desired
- * voltage right away.
- */
- if (!max77650_regulator_is_enabled(rdev))
- return regulator_set_voltage_sel_regmap(rdev, sel);
-
- curr = regulator_get_voltage_sel_regmap(rdev);
- if (curr < 0)
- return curr;
-
- if (curr == sel)
- return 0; /* Already there. */
-
- vcurr = max77651_sbb1_regulator_volt_table[curr];
- vdest = max77651_sbb1_regulator_volt_table[sel];
- vdiff = vcurr - vdest;
-
- for (;;) {
- if (vdiff > 0)
- MAX77650_REGULATOR_SBB1_SEL_DECR(curr);
- else
- MAX77650_REGULATOR_SBB1_SEL_INCR(curr);
-
- rv = regulator_set_voltage_sel_regmap(rdev, curr);
- if (rv)
- return rv;
-
- if (curr == sel)
- break;
- };
-
- return 0;
-}
-
static const struct regulator_ops max77650_regulator_LDO_ops = {
.is_enabled = max77650_regulator_is_enabled,
.enable = max77650_regulator_enable,
@@ -224,7 +115,7 @@ static const struct regulator_ops max77650_regulator_LDO_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = max77650_regulator_set_voltage_sel,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
};
@@ -235,20 +126,20 @@ static const struct regulator_ops max77650_regulator_SBB_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = max77650_regulator_set_voltage_sel,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
.set_current_limit = regulator_set_current_limit_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
};
-/* Special case for max77651 SBB1 - non-linear voltage mapping. */
+/* Special case for max77651 SBB1 - pickable linear-range voltage mapping. */
static const struct regulator_ops max77651_SBB1_regulator_ops = {
.is_enabled = max77650_regulator_is_enabled,
.enable = max77650_regulator_enable,
.disable = max77650_regulator_disable,
- .list_voltage = regulator_list_voltage_table,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = max77651_regulator_sbb1_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_pickable_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_pickable_regmap,
.get_current_limit = regulator_get_current_limit_regmap,
.set_current_limit = regulator_set_current_limit_regmap,
.set_active_discharge = regulator_set_active_discharge_regmap,
@@ -265,6 +156,7 @@ static struct max77650_regulator_desc max77650_LDO_desc = {
.min_uV = 1350000,
.uV_step = 12500,
.n_voltages = 128,
+ .vsel_step = 1,
.vsel_mask = MAX77650_REGULATOR_V_LDO_MASK,
.vsel_reg = MAX77650_REG_CNFG_LDO_A,
.active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
@@ -290,6 +182,7 @@ static struct max77650_regulator_desc max77650_SBB0_desc = {
.min_uV = 800000,
.uV_step = 25000,
.n_voltages = 64,
+ .vsel_step = 1,
.vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
.vsel_reg = MAX77650_REG_CNFG_SBB0_A,
.active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
@@ -319,6 +212,7 @@ static struct max77650_regulator_desc max77650_SBB1_desc = {
.min_uV = 800000,
.uV_step = 12500,
.n_voltages = 64,
+ .vsel_step = 1,
.vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
.vsel_reg = MAX77650_REG_CNFG_SBB1_A,
.active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
@@ -345,9 +239,14 @@ static struct max77650_regulator_desc max77651_SBB1_desc = {
.supply_name = "in-sbb1",
.id = MAX77650_REGULATOR_ID_SBB1,
.ops = &max77651_SBB1_regulator_ops,
- .volt_table = max77651_sbb1_regulator_volt_table,
- .n_voltages = ARRAY_SIZE(max77651_sbb1_regulator_volt_table),
- .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
+ .linear_range_selectors = max77651_sbb1_volt_range_sel,
+ .linear_ranges = max77651_sbb1_volt_ranges,
+ .n_linear_ranges = ARRAY_SIZE(max77651_sbb1_volt_ranges),
+ .n_voltages = 58,
+ .vsel_step = 1,
+ .vsel_range_mask = MAX77651_REGULATOR_V_SBB1_RANGE_MASK,
+ .vsel_range_reg = MAX77650_REG_CNFG_SBB1_A,
+ .vsel_mask = MAX77651_REGULATOR_V_SBB1_MASK,
.vsel_reg = MAX77650_REG_CNFG_SBB1_A,
.active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
.active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
@@ -376,6 +275,7 @@ static struct max77650_regulator_desc max77650_SBB2_desc = {
.min_uV = 800000,
.uV_step = 50000,
.n_voltages = 64,
+ .vsel_step = 1,
.vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
.vsel_reg = MAX77650_REG_CNFG_SBB2_A,
.active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
@@ -405,6 +305,7 @@ static struct max77650_regulator_desc max77651_SBB2_desc = {
.min_uV = 2400000,
.uV_step = 50000,
.n_voltages = 64,
+ .vsel_step = 1,
.vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
.vsel_reg = MAX77650_REG_CNFG_SBB2_A,
.active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
@@ -496,3 +397,4 @@ module_platform_driver(max77650_regulator_driver);
MODULE_DESCRIPTION("MAXIM 77650/77651 regulator driver");
MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:max77650-regulator");
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index ea7b50397300..7b8ec8c0bd15 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -14,9 +14,7 @@
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/slab.h>
-#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 2a123b87d9f2..ccd5da63cdf2 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -13,11 +13,9 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/max8952.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
@@ -37,7 +35,8 @@ enum {
struct max8952_data {
struct i2c_client *client;
struct max8952_platform_data *pdata;
-
+ struct gpio_desc *vid0_gpiod;
+ struct gpio_desc *vid1_gpiod;
bool vid0;
bool vid1;
};
@@ -87,16 +86,15 @@ static int max8952_set_voltage_sel(struct regulator_dev *rdev,
{
struct max8952_data *max8952 = rdev_get_drvdata(rdev);
- if (!gpio_is_valid(max8952->pdata->gpio_vid0) ||
- !gpio_is_valid(max8952->pdata->gpio_vid1)) {
+ if (!max8952->vid0_gpiod || !max8952->vid1_gpiod) {
/* DVS not supported */
return -EPERM;
}
max8952->vid0 = selector & 0x1;
max8952->vid1 = (selector >> 1) & 0x1;
- gpio_set_value(max8952->pdata->gpio_vid0, max8952->vid0);
- gpio_set_value(max8952->pdata->gpio_vid1, max8952->vid1);
+ gpiod_set_value(max8952->vid0_gpiod, max8952->vid0);
+ gpiod_set_value(max8952->vid1_gpiod, max8952->vid1);
return 0;
}
@@ -134,9 +132,6 @@ static struct max8952_platform_data *max8952_parse_dt(struct device *dev)
if (!pd)
return NULL;
- pd->gpio_vid0 = of_get_named_gpio(np, "max8952,vid-gpios", 0);
- pd->gpio_vid1 = of_get_named_gpio(np, "max8952,vid-gpios", 1);
-
if (of_property_read_u32(np, "max8952,default-mode", &pd->default_mode))
dev_warn(dev, "Default mode not specified, assuming 0\n");
@@ -179,7 +174,7 @@ static struct max8952_platform_data *max8952_parse_dt(struct device *dev)
static int max8952_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
struct max8952_platform_data *pdata = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct max8952_data *max8952;
@@ -187,7 +182,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
struct gpio_desc *gpiod;
enum gpiod_flags gflags;
- int ret = 0, err = 0;
+ int ret = 0;
if (client->dev.of_node)
pdata = max8952_parse_dt(&client->dev);
@@ -240,32 +235,31 @@ static int max8952_pmic_probe(struct i2c_client *client,
max8952->vid0 = pdata->default_mode & 0x1;
max8952->vid1 = (pdata->default_mode >> 1) & 0x1;
- if (gpio_is_valid(pdata->gpio_vid0) &&
- gpio_is_valid(pdata->gpio_vid1)) {
- unsigned long gpio_flags;
-
- gpio_flags = max8952->vid0 ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- if (devm_gpio_request_one(&client->dev, pdata->gpio_vid0,
- gpio_flags, "MAX8952 VID0"))
- err = 1;
-
- gpio_flags = max8952->vid1 ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- if (devm_gpio_request_one(&client->dev, pdata->gpio_vid1,
- gpio_flags, "MAX8952 VID1"))
- err = 2;
- } else
- err = 3;
-
- if (err) {
+ /* Fetch vid0 and vid1 GPIOs if available */
+ gflags = max8952->vid0 ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ max8952->vid0_gpiod = devm_gpiod_get_index_optional(&client->dev,
+ "max8952,vid",
+ 0, gflags);
+ if (IS_ERR(max8952->vid0_gpiod))
+ return PTR_ERR(max8952->vid0_gpiod);
+ gflags = max8952->vid1 ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ max8952->vid1_gpiod = devm_gpiod_get_index_optional(&client->dev,
+ "max8952,vid",
+ 1, gflags);
+ if (IS_ERR(max8952->vid1_gpiod))
+ return PTR_ERR(max8952->vid1_gpiod);
+
+ /* If either VID GPIO is missing just disable this */
+ if (!max8952->vid0_gpiod || !max8952->vid1_gpiod) {
dev_warn(&client->dev, "VID0/1 gpio invalid: "
- "DVS not available.\n");
+ "DVS not available.\n");
max8952->vid0 = 0;
max8952->vid1 = 0;
- /* Mark invalid */
- pdata->gpio_vid0 = -1;
- pdata->gpio_vid1 = -1;
+ /* Make sure if we have any descriptors they get set to low */
+ if (max8952->vid0_gpiod)
+ gpiod_set_value(max8952->vid0_gpiod, 0);
+ if (max8952->vid1_gpiod)
+ gpiod_set_value(max8952->vid1_gpiod, 0);
/* Disable Pulldown of EN only */
max8952_write_reg(max8952, MAX8952_REG_CONTROL, 0x60);
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 0ead1164e4d6..397918ebba55 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -21,7 +21,8 @@ static const char *const regulator_states[PM_SUSPEND_MAX + 1] = {
[PM_SUSPEND_MAX] = "regulator-state-disk",
};
-static void of_get_regulation_constraints(struct device_node *np,
+static int of_get_regulation_constraints(struct device *dev,
+ struct device_node *np,
struct regulator_init_data **init_data,
const struct regulator_desc *desc)
{
@@ -30,8 +31,13 @@ static void of_get_regulation_constraints(struct device_node *np,
struct device_node *suspend_np;
unsigned int mode;
int ret, i, len;
+ int n_phandles;
u32 pval;
+ n_phandles = of_count_phandle_with_args(np, "regulator-coupled-with",
+ NULL);
+ n_phandles = max(n_phandles, 0);
+
constraints->name = of_get_property(np, "regulator-name", NULL);
if (!of_property_read_u32(np, "regulator-min-microvolt", &pval))
@@ -163,9 +169,17 @@ static void of_get_regulation_constraints(struct device_node *np,
if (!of_property_read_u32(np, "regulator-system-load", &pval))
constraints->system_load = pval;
- if (!of_property_read_u32(np, "regulator-coupled-max-spread",
- &pval))
- constraints->max_spread = pval;
+ if (n_phandles) {
+ constraints->max_spread = devm_kzalloc(dev,
+ sizeof(*constraints->max_spread) * n_phandles,
+ GFP_KERNEL);
+
+ if (!constraints->max_spread)
+ return -ENOMEM;
+
+ of_property_read_u32_array(np, "regulator-coupled-max-spread",
+ constraints->max_spread, n_phandles);
+ }
if (!of_property_read_u32(np, "regulator-max-step-microvolt",
&pval))
@@ -242,6 +256,8 @@ static void of_get_regulation_constraints(struct device_node *np,
suspend_state = NULL;
suspend_np = NULL;
}
+
+ return 0;
}
/**
@@ -267,7 +283,9 @@ struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
if (!init_data)
return NULL; /* Out of memory? */
- of_get_regulation_constraints(node, &init_data, desc);
+ if (of_get_regulation_constraints(dev, node, &init_data, desc))
+ return NULL;
+
return init_data;
}
EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
@@ -473,7 +491,8 @@ int of_get_n_coupled(struct regulator_dev *rdev)
/* Looks for "to_find" device_node in src's "regulator-coupled-with" property */
static bool of_coupling_find_node(struct device_node *src,
- struct device_node *to_find)
+ struct device_node *to_find,
+ int *index)
{
int n_phandles, i;
bool found = false;
@@ -495,8 +514,10 @@ static bool of_coupling_find_node(struct device_node *src,
of_node_put(tmp);
- if (found)
+ if (found) {
+ *index = i;
break;
+ }
}
return found;
@@ -517,22 +538,23 @@ static bool of_coupling_find_node(struct device_node *src,
*/
bool of_check_coupling_data(struct regulator_dev *rdev)
{
- int max_spread = rdev->constraints->max_spread;
struct device_node *node = rdev->dev.of_node;
int n_phandles = of_get_n_coupled(rdev);
struct device_node *c_node;
+ int index;
int i;
bool ret = true;
- if (max_spread <= 0) {
- dev_err(&rdev->dev, "max_spread value invalid\n");
- return false;
- }
-
/* iterate over rdev's phandles */
for (i = 0; i < n_phandles; i++) {
+ int max_spread = rdev->constraints->max_spread[i];
int c_max_spread, c_n_phandles;
+ if (max_spread <= 0) {
+ dev_err(&rdev->dev, "max_spread value invalid\n");
+ return false;
+ }
+
c_node = of_parse_phandle(node,
"regulator-coupled-with", i);
@@ -549,22 +571,23 @@ bool of_check_coupling_data(struct regulator_dev *rdev)
goto clean;
}
- if (of_property_read_u32(c_node, "regulator-coupled-max-spread",
- &c_max_spread)) {
+ if (!of_coupling_find_node(c_node, node, &index)) {
+ dev_err(&rdev->dev, "missing 2-way linking for coupled regulators\n");
ret = false;
goto clean;
}
- if (c_max_spread != max_spread) {
- dev_err(&rdev->dev,
- "coupled regulators max_spread mismatch\n");
+ if (of_property_read_u32_index(c_node, "regulator-coupled-max-spread",
+ index, &c_max_spread)) {
ret = false;
goto clean;
}
- if (!of_coupling_find_node(c_node, node)) {
- dev_err(&rdev->dev, "missing 2-way linking for coupled regulators\n");
+ if (c_max_spread != max_spread) {
+ dev_err(&rdev->dev,
+ "coupled regulators max_spread mismatch\n");
ret = false;
+ goto clean;
}
clean:
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 6dfc9e176360..7f51c5fc8194 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -96,6 +96,8 @@ enum spmi_regulator_logical_type {
SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS,
SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS,
SPMI_REGULATOR_LOGICAL_TYPE_ULT_LDO,
+ SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS426,
+ SPMI_REGULATOR_LOGICAL_TYPE_HFS430,
};
enum spmi_regulator_type {
@@ -142,11 +144,13 @@ enum spmi_regulator_subtype {
SPMI_REGULATOR_SUBTYPE_5V_BOOST = 0x01,
SPMI_REGULATOR_SUBTYPE_FTS_CTL = 0x08,
SPMI_REGULATOR_SUBTYPE_FTS2p5_CTL = 0x09,
+ SPMI_REGULATOR_SUBTYPE_FTS426_CTL = 0x0a,
SPMI_REGULATOR_SUBTYPE_BB_2A = 0x01,
SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL1 = 0x0d,
SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL2 = 0x0e,
SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL3 = 0x0f,
SPMI_REGULATOR_SUBTYPE_ULT_HF_CTL4 = 0x10,
+ SPMI_REGULATOR_SUBTYPE_HFS430 = 0x0a,
};
enum spmi_common_regulator_registers {
@@ -162,6 +166,18 @@ enum spmi_common_regulator_registers {
SPMI_COMMON_REG_STEP_CTRL = 0x61,
};
+/*
+ * Second common register layout used by newer devices starting with ftsmps426
+ * Note that some of the registers from the first common layout remain
+ * unchanged and their definition is not duplicated.
+ */
+enum spmi_ftsmps426_regulator_registers {
+ SPMI_FTSMPS426_REG_VOLTAGE_LSB = 0x40,
+ SPMI_FTSMPS426_REG_VOLTAGE_MSB = 0x41,
+ SPMI_FTSMPS426_REG_VOLTAGE_ULS_LSB = 0x68,
+ SPMI_FTSMPS426_REG_VOLTAGE_ULS_MSB = 0x69,
+};
+
enum spmi_vs_registers {
SPMI_VS_REG_OCP = 0x4a,
SPMI_VS_REG_SOFT_START = 0x4c,
@@ -221,6 +237,14 @@ enum spmi_common_control_register_index {
#define SPMI_COMMON_MODE_FOLLOW_HW_EN0_MASK 0x01
#define SPMI_COMMON_MODE_FOLLOW_ALL_MASK 0x1f
+#define SPMI_FTSMPS426_MODE_BYPASS_MASK 3
+#define SPMI_FTSMPS426_MODE_RETENTION_MASK 4
+#define SPMI_FTSMPS426_MODE_LPM_MASK 5
+#define SPMI_FTSMPS426_MODE_AUTO_MASK 6
+#define SPMI_FTSMPS426_MODE_HPM_MASK 7
+
+#define SPMI_FTSMPS426_MODE_MASK 0x07
+
/* Common regulator pull down control register layout */
#define SPMI_COMMON_PULL_DOWN_ENABLE_MASK 0x80
@@ -266,6 +290,25 @@ enum spmi_common_control_register_index {
#define SPMI_FTSMPS_STEP_MARGIN_NUM 4
#define SPMI_FTSMPS_STEP_MARGIN_DEN 5
+#define SPMI_FTSMPS426_STEP_CTRL_DELAY_MASK 0x03
+#define SPMI_FTSMPS426_STEP_CTRL_DELAY_SHIFT 0
+
+/* Clock rate in kHz of the FTSMPS426 regulator reference clock. */
+#define SPMI_FTSMPS426_CLOCK_RATE 4800
+
+#define SPMI_HFS430_CLOCK_RATE 1600
+
+/* Minimum voltage stepper delay for each step. */
+#define SPMI_FTSMPS426_STEP_DELAY 2
+
+/*
+ * The ratio SPMI_FTSMPS426_STEP_MARGIN_NUM/SPMI_FTSMPS426_STEP_MARGIN_DEN is
+ * used to adjust the step rate in order to account for oscillator variance.
+ */
+#define SPMI_FTSMPS426_STEP_MARGIN_NUM 10
+#define SPMI_FTSMPS426_STEP_MARGIN_DEN 11
+
+
/* VSET value to decide the range of ULT SMPS */
#define ULT_SMPS_RANGE_SPLIT 0x60
@@ -439,6 +482,10 @@ static struct spmi_voltage_range ftsmps2p5_ranges[] = {
SPMI_VOLTAGE_RANGE(1, 160000, 1360000, 2200000, 2200000, 10000),
};
+static struct spmi_voltage_range ftsmps426_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 0, 320000, 1352000, 1352000, 4000),
+};
+
static struct spmi_voltage_range boost_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 4000000, 4000000, 5550000, 5550000, 50000),
};
@@ -464,6 +511,10 @@ static struct spmi_voltage_range ult_pldo_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1750000, 1750000, 3337500, 3337500, 12500),
};
+static struct spmi_voltage_range hfs430_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 320000, 320000, 2040000, 2040000, 8000),
+};
+
static DEFINE_SPMI_SET_POINTS(pldo);
static DEFINE_SPMI_SET_POINTS(nldo1);
static DEFINE_SPMI_SET_POINTS(nldo2);
@@ -472,12 +523,14 @@ static DEFINE_SPMI_SET_POINTS(ln_ldo);
static DEFINE_SPMI_SET_POINTS(smps);
static DEFINE_SPMI_SET_POINTS(ftsmps);
static DEFINE_SPMI_SET_POINTS(ftsmps2p5);
+static DEFINE_SPMI_SET_POINTS(ftsmps426);
static DEFINE_SPMI_SET_POINTS(boost);
static DEFINE_SPMI_SET_POINTS(boost_byp);
static DEFINE_SPMI_SET_POINTS(ult_lo_smps);
static DEFINE_SPMI_SET_POINTS(ult_ho_smps);
static DEFINE_SPMI_SET_POINTS(ult_nldo);
static DEFINE_SPMI_SET_POINTS(ult_pldo);
+static DEFINE_SPMI_SET_POINTS(hfs430);
static inline int spmi_vreg_read(struct spmi_regulator *vreg, u16 addr, u8 *buf,
int len)
@@ -739,18 +792,31 @@ spmi_regulator_common_set_voltage(struct regulator_dev *rdev, unsigned selector)
return spmi_vreg_write(vreg, SPMI_COMMON_REG_VOLTAGE_RANGE, buf, 2);
}
+static int spmi_regulator_common_list_voltage(struct regulator_dev *rdev,
+ unsigned selector);
+
+static int spmi_regulator_ftsmps426_set_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ u8 buf[2];
+ int mV;
+
+ mV = spmi_regulator_common_list_voltage(rdev, selector) / 1000;
+
+ buf[0] = mV & 0xff;
+ buf[1] = mV >> 8;
+ return spmi_vreg_write(vreg, SPMI_FTSMPS426_REG_VOLTAGE_LSB, buf, 2);
+}
+
static int spmi_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int old_selector, unsigned int new_selector)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- const struct spmi_voltage_range *range;
int diff_uV;
- range = spmi_regulator_find_range(vreg);
- if (!range)
- return -EINVAL;
-
- diff_uV = abs(new_selector - old_selector) * range->step_uV;
+ diff_uV = abs(spmi_regulator_common_list_voltage(rdev, new_selector) -
+ spmi_regulator_common_list_voltage(rdev, old_selector));
return DIV_ROUND_UP(diff_uV, vreg->slew_rate);
}
@@ -770,6 +836,21 @@ static int spmi_regulator_common_get_voltage(struct regulator_dev *rdev)
return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
}
+static int spmi_regulator_ftsmps426_get_voltage(struct regulator_dev *rdev)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ const struct spmi_voltage_range *range;
+ u8 buf[2];
+ int uV;
+
+ spmi_vreg_read(vreg, SPMI_FTSMPS426_REG_VOLTAGE_LSB, buf, 2);
+
+ uV = (((unsigned int)buf[1] << 8) | (unsigned int)buf[0]) * 1000;
+ range = vreg->set_points->range;
+
+ return (uV - range->set_point_min_uV) / range->step_uV;
+}
+
static int spmi_regulator_single_map_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
@@ -903,13 +984,33 @@ static unsigned int spmi_regulator_common_get_mode(struct regulator_dev *rdev)
spmi_vreg_read(vreg, SPMI_COMMON_REG_MODE, &reg, 1);
- if (reg & SPMI_COMMON_MODE_HPM_MASK)
- return REGULATOR_MODE_NORMAL;
+ reg &= SPMI_COMMON_MODE_HPM_MASK | SPMI_COMMON_MODE_AUTO_MASK;
- if (reg & SPMI_COMMON_MODE_AUTO_MASK)
+ switch (reg) {
+ case SPMI_COMMON_MODE_HPM_MASK:
+ return REGULATOR_MODE_NORMAL;
+ case SPMI_COMMON_MODE_AUTO_MASK:
return REGULATOR_MODE_FAST;
+ default:
+ return REGULATOR_MODE_IDLE;
+ }
+}
- return REGULATOR_MODE_IDLE;
+static unsigned int spmi_regulator_ftsmps426_get_mode(struct regulator_dev *rdev)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ u8 reg;
+
+ spmi_vreg_read(vreg, SPMI_COMMON_REG_MODE, &reg, 1);
+
+ switch (reg) {
+ case SPMI_FTSMPS426_MODE_HPM_MASK:
+ return REGULATOR_MODE_NORMAL;
+ case SPMI_FTSMPS426_MODE_AUTO_MASK:
+ return REGULATOR_MODE_FAST;
+ default:
+ return REGULATOR_MODE_IDLE;
+ }
}
static int
@@ -917,12 +1018,43 @@ spmi_regulator_common_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
u8 mask = SPMI_COMMON_MODE_HPM_MASK | SPMI_COMMON_MODE_AUTO_MASK;
- u8 val = 0;
+ u8 val;
- if (mode == REGULATOR_MODE_NORMAL)
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
val = SPMI_COMMON_MODE_HPM_MASK;
- else if (mode == REGULATOR_MODE_FAST)
+ break;
+ case REGULATOR_MODE_FAST:
val = SPMI_COMMON_MODE_AUTO_MASK;
+ break;
+ default:
+ val = 0;
+ break;
+ }
+
+ return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask);
+}
+
+static int
+spmi_regulator_ftsmps426_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ u8 mask = SPMI_FTSMPS426_MODE_MASK;
+ u8 val;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = SPMI_FTSMPS426_MODE_HPM_MASK;
+ break;
+ case REGULATOR_MODE_FAST:
+ val = SPMI_FTSMPS426_MODE_AUTO_MASK;
+ break;
+ case REGULATOR_MODE_IDLE:
+ val = SPMI_FTSMPS426_MODE_LPM_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask);
}
@@ -1256,12 +1388,41 @@ static struct regulator_ops spmi_ult_ldo_ops = {
.set_soft_start = spmi_regulator_common_set_soft_start,
};
+static struct regulator_ops spmi_ftsmps426_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = spmi_regulator_ftsmps426_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_ftsmps426_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
+ .list_voltage = spmi_regulator_common_list_voltage,
+ .set_mode = spmi_regulator_ftsmps426_set_mode,
+ .get_mode = spmi_regulator_ftsmps426_get_mode,
+ .set_load = spmi_regulator_common_set_load,
+ .set_pull_down = spmi_regulator_common_set_pull_down,
+};
+
+static struct regulator_ops spmi_hfs430_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = spmi_regulator_ftsmps426_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_ftsmps426_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
+ .list_voltage = spmi_regulator_common_list_voltage,
+ .set_mode = spmi_regulator_ftsmps426_set_mode,
+ .get_mode = spmi_regulator_ftsmps426_get_mode,
+};
+
/* Maximum possible digital major revision value */
#define INF 0xFF
static const struct spmi_regulator_mapping supported_regulators[] = {
/* type subtype dig_min dig_max ltype ops setpoints hpm_min */
SPMI_VREG(BUCK, GP_CTL, 0, INF, SMPS, smps, smps, 100000),
+ SPMI_VREG(BUCK, HFS430, 0, INF, HFS430, hfs430, hfs430, 10000),
SPMI_VREG(LDO, N300, 0, INF, LDO, ldo, nldo1, 10000),
SPMI_VREG(LDO, N600, 0, 0, LDO, ldo, nldo2, 10000),
SPMI_VREG(LDO, N1200, 0, 0, LDO, ldo, nldo2, 10000),
@@ -1291,6 +1452,7 @@ static const struct spmi_regulator_mapping supported_regulators[] = {
SPMI_VREG(BOOST, 5V_BOOST, 0, INF, BOOST, boost, boost, 0),
SPMI_VREG(FTS, FTS_CTL, 0, INF, FTSMPS, ftsmps, ftsmps, 100000),
SPMI_VREG(FTS, FTS2p5_CTL, 0, INF, FTSMPS, ftsmps, ftsmps2p5, 100000),
+ SPMI_VREG(FTS, FTS426_CTL, 0, INF, FTSMPS426, ftsmps426, ftsmps426, 100000),
SPMI_VREG(BOOST_BYP, BB_2A, 0, INF, BOOST_BYP, boost, boost_byp, 0),
SPMI_VREG(ULT_BUCK, ULT_HF_CTL1, 0, INF, ULT_LO_SMPS, ult_lo_smps,
ult_lo_smps, 100000),
@@ -1428,6 +1590,35 @@ static int spmi_regulator_init_slew_rate(struct spmi_regulator *vreg)
return ret;
}
+static int spmi_regulator_init_slew_rate_ftsmps426(struct spmi_regulator *vreg,
+ int clock_rate)
+{
+ int ret;
+ u8 reg = 0;
+ int delay, slew_rate;
+ const struct spmi_voltage_range *range = &vreg->set_points->range[0];
+
+ ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_STEP_CTRL, &reg, 1);
+ if (ret) {
+ dev_err(vreg->dev, "spmi read failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ delay = reg & SPMI_FTSMPS426_STEP_CTRL_DELAY_MASK;
+ delay >>= SPMI_FTSMPS426_STEP_CTRL_DELAY_SHIFT;
+
+ /* slew_rate has units of uV/us */
+ slew_rate = clock_rate * range->step_uV;
+ slew_rate /= 1000 * (SPMI_FTSMPS426_STEP_DELAY << delay);
+ slew_rate *= SPMI_FTSMPS426_STEP_MARGIN_NUM;
+ slew_rate /= SPMI_FTSMPS426_STEP_MARGIN_DEN;
+
+ /* Ensure that the slew rate is greater than 0 */
+ vreg->slew_rate = max(slew_rate, 1);
+
+ return ret;
+}
+
static int spmi_regulator_init_registers(struct spmi_regulator *vreg,
const struct spmi_regulator_init_data *data)
{
@@ -1567,6 +1758,19 @@ static int spmi_regulator_of_parse(struct device_node *node,
ret = spmi_regulator_init_slew_rate(vreg);
if (ret)
return ret;
+ break;
+ case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS426:
+ ret = spmi_regulator_init_slew_rate_ftsmps426(vreg,
+ SPMI_FTSMPS426_CLOCK_RATE);
+ if (ret)
+ return ret;
+ break;
+ case SPMI_REGULATOR_LOGICAL_TYPE_HFS430:
+ ret = spmi_regulator_init_slew_rate_ftsmps426(vreg,
+ SPMI_HFS430_CLOCK_RATE);
+ if (ret)
+ return ret;
+ break;
default:
break;
}
@@ -1723,12 +1927,27 @@ static const struct spmi_regulator_data pmi8994_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8005_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { }
+};
+
+static const struct spmi_regulator_data pms405_regulators[] = {
+ { "s3", 0x1a00, "vdd_s3"},
+ { }
+};
+
static const struct of_device_id qcom_spmi_regulator_match[] = {
+ { .compatible = "qcom,pm8005-regulators", .data = &pm8005_regulators },
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
{ .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
+ { .compatible = "qcom,pms405-regulators", .data = &pms405_regulators },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match);
@@ -1736,6 +1955,7 @@ MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match);
static int qcom_spmi_regulator_probe(struct platform_device *pdev)
{
const struct spmi_regulator_data *reg;
+ const struct spmi_voltage_range *range;
const struct of_device_id *match;
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -1825,6 +2045,12 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
}
}
+ if (vreg->set_points && vreg->set_points->count == 1) {
+ /* since there is only one range */
+ range = vreg->set_points->range;
+ vreg->desc.uV_step = range->step_uV;
+ }
+
config.dev = dev;
config.driver_data = vreg;
config.regmap = regmap;
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 134c62db36c5..054baaadfdfd 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -34,7 +34,7 @@ struct s2mps11_info {
enum sec_device_type dev_type;
/*
- * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
+ * One bit for each S2MPS11/S2MPS13/S2MPS14/S2MPU02 regulator whether
* the suspend mode was enabled.
*/
DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
@@ -70,10 +70,11 @@ static int s2mps11_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_selector)
{
struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+ int rdev_id = rdev_get_id(rdev);
unsigned int ramp_delay = 0;
int old_volt, new_volt;
- switch (rdev_get_id(rdev)) {
+ switch (rdev_id) {
case S2MPS11_BUCK2:
ramp_delay = s2mps11->ramp_delay2;
break;
@@ -111,9 +112,10 @@ static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
unsigned int ramp_val, ramp_shift, ramp_reg = S2MPS11_REG_RAMP_BUCK;
unsigned int ramp_enable = 1, enable_shift = 0;
+ int rdev_id = rdev_get_id(rdev);
int ret;
- switch (rdev_get_id(rdev)) {
+ switch (rdev_id) {
case S2MPS11_BUCK1:
if (ramp_delay > s2mps11->ramp_delay16)
s2mps11->ramp_delay16 = ramp_delay;
@@ -203,9 +205,8 @@ static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
goto ramp_disable;
/* Ramp delay can be enabled/disabled only for buck[2346] */
- if ((rdev_get_id(rdev) >= S2MPS11_BUCK2 &&
- rdev_get_id(rdev) <= S2MPS11_BUCK4) ||
- rdev_get_id(rdev) == S2MPS11_BUCK6) {
+ if ((rdev_id >= S2MPS11_BUCK2 && rdev_id <= S2MPS11_BUCK4) ||
+ rdev_id == S2MPS11_BUCK6) {
ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
1 << enable_shift, 1 << enable_shift);
if (ret) {
@@ -224,27 +225,133 @@ ramp_disable:
1 << enable_shift, 0);
}
+static int s2mps11_regulator_enable(struct regulator_dev *rdev)
+{
+ struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+ int rdev_id = rdev_get_id(rdev);
+ unsigned int val;
+
+ switch (s2mps11->dev_type) {
+ case S2MPS11X:
+ if (test_bit(rdev_id, s2mps11->suspend_state))
+ val = S2MPS14_ENABLE_SUSPEND;
+ else
+ val = rdev->desc->enable_mask;
+ break;
+ case S2MPS13X:
+ case S2MPS14X:
+ if (test_bit(rdev_id, s2mps11->suspend_state))
+ val = S2MPS14_ENABLE_SUSPEND;
+ else if (s2mps11->ext_control_gpiod[rdev_id])
+ val = S2MPS14_ENABLE_EXT_CONTROL;
+ else
+ val = rdev->desc->enable_mask;
+ break;
+ case S2MPU02:
+ if (test_bit(rdev_id, s2mps11->suspend_state))
+ val = S2MPU02_ENABLE_SUSPEND;
+ else
+ val = rdev->desc->enable_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, val);
+}
+
+static int s2mps11_regulator_set_suspend_disable(struct regulator_dev *rdev)
+{
+ int ret;
+ unsigned int val, state;
+ struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+ int rdev_id = rdev_get_id(rdev);
+
+ /* Below LDO should be always on or does not support suspend mode. */
+ switch (s2mps11->dev_type) {
+ case S2MPS11X:
+ switch (rdev_id) {
+ case S2MPS11_LDO2:
+ case S2MPS11_LDO36:
+ case S2MPS11_LDO37:
+ case S2MPS11_LDO38:
+ return 0;
+ default:
+ state = S2MPS14_ENABLE_SUSPEND;
+ break;
+ }
+ break;
+ case S2MPS13X:
+ case S2MPS14X:
+ switch (rdev_id) {
+ case S2MPS14_LDO3:
+ return 0;
+ default:
+ state = S2MPS14_ENABLE_SUSPEND;
+ break;
+ }
+ break;
+ case S2MPU02:
+ switch (rdev_id) {
+ case S2MPU02_LDO13:
+ case S2MPU02_LDO14:
+ case S2MPU02_LDO15:
+ case S2MPU02_LDO17:
+ case S2MPU02_BUCK7:
+ state = S2MPU02_DISABLE_SUSPEND;
+ break;
+ default:
+ state = S2MPU02_ENABLE_SUSPEND;
+ break;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
+ if (ret < 0)
+ return ret;
+
+ set_bit(rdev_id, s2mps11->suspend_state);
+ /*
+ * Don't enable suspend mode if regulator is already disabled because
+ * this would effectively for a short time turn on the regulator after
+ * resuming.
+ * However we still want to toggle the suspend_state bit for regulator
+ * in case if it got enabled before suspending the system.
+ */
+ if (!(val & rdev->desc->enable_mask))
+ return 0;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, state);
+}
+
static const struct regulator_ops s2mps11_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
+ .enable = s2mps11_regulator_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_suspend_disable = s2mps11_regulator_set_suspend_disable,
};
static const struct regulator_ops s2mps11_buck_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
+ .enable = s2mps11_regulator_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = s2mps11_regulator_set_voltage_time_sel,
.set_ramp_delay = s2mps11_set_ramp_delay,
+ .set_suspend_disable = s2mps11_regulator_set_suspend_disable,
};
#define regulator_desc_s2mps11_ldo(num, step) { \
@@ -269,9 +376,10 @@ static const struct regulator_ops s2mps11_buck_ops = {
.ops = &s2mps11_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = MIN_600_MV, \
+ .min_uV = MIN_650_MV, \
.uV_step = STEP_6_25_MV, \
- .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .linear_min_sel = 8, \
+ .n_voltages = S2MPS11_BUCK12346_N_VOLTAGES, \
.ramp_delay = S2MPS11_RAMP_DELAY, \
.vsel_reg = S2MPS11_REG_B1CTRL2 + (num - 1) * 2, \
.vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
@@ -285,9 +393,10 @@ static const struct regulator_ops s2mps11_buck_ops = {
.ops = &s2mps11_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .min_uV = MIN_600_MV, \
+ .min_uV = MIN_650_MV, \
.uV_step = STEP_6_25_MV, \
- .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .linear_min_sel = 8, \
+ .n_voltages = S2MPS11_BUCK5_N_VOLTAGES, \
.ramp_delay = S2MPS11_RAMP_DELAY, \
.vsel_reg = S2MPS11_REG_B5CTRL2, \
.vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
@@ -295,7 +404,7 @@ static const struct regulator_ops s2mps11_buck_ops = {
.enable_mask = S2MPS11_ENABLE_MASK \
}
-#define regulator_desc_s2mps11_buck67810(num, min, step) { \
+#define regulator_desc_s2mps11_buck67810(num, min, step, min_sel, voltages) { \
.name = "BUCK"#num, \
.id = S2MPS11_BUCK##num, \
.ops = &s2mps11_buck_ops, \
@@ -303,7 +412,8 @@ static const struct regulator_ops s2mps11_buck_ops = {
.owner = THIS_MODULE, \
.min_uV = min, \
.uV_step = step, \
- .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .linear_min_sel = min_sel, \
+ .n_voltages = voltages, \
.ramp_delay = S2MPS11_RAMP_DELAY, \
.vsel_reg = S2MPS11_REG_B6CTRL2 + (num - 6) * 2, \
.vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
@@ -371,11 +481,15 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_buck1_4(3),
regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5,
- regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
- regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck67810(6, MIN_650_MV, STEP_6_25_MV, 8,
+ S2MPS11_BUCK12346_N_VOLTAGES),
+ regulator_desc_s2mps11_buck67810(7, MIN_750_MV, STEP_12_5_MV, 0,
+ S2MPS11_BUCK7810_N_VOLTAGES),
+ regulator_desc_s2mps11_buck67810(8, MIN_750_MV, STEP_12_5_MV, 0,
+ S2MPS11_BUCK7810_N_VOLTAGES),
regulator_desc_s2mps11_buck9,
- regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV, 0,
+ S2MPS11_BUCK7810_N_VOLTAGES),
};
static const struct regulator_ops s2mps14_reg_ops;
@@ -500,101 +614,16 @@ static const struct regulator_desc s2mps13_regulators[] = {
regulator_desc_s2mps13_buck8_10(10, MIN_500_MV, STEP_6_25_MV, 0x10),
};
-static int s2mps14_regulator_enable(struct regulator_dev *rdev)
-{
- struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
- unsigned int val;
-
- switch (s2mps11->dev_type) {
- case S2MPS13X:
- case S2MPS14X:
- if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
- val = S2MPS14_ENABLE_SUSPEND;
- else if (s2mps11->ext_control_gpiod[rdev_get_id(rdev)])
- val = S2MPS14_ENABLE_EXT_CONTROL;
- else
- val = rdev->desc->enable_mask;
- break;
- case S2MPU02:
- if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
- val = S2MPU02_ENABLE_SUSPEND;
- else
- val = rdev->desc->enable_mask;
- break;
- default:
- return -EINVAL;
- }
-
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask, val);
-}
-
-static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
-{
- int ret;
- unsigned int val, state;
- struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
- int rdev_id = rdev_get_id(rdev);
-
- /* Below LDO should be always on or does not support suspend mode. */
- switch (s2mps11->dev_type) {
- case S2MPS13X:
- case S2MPS14X:
- switch (rdev_id) {
- case S2MPS14_LDO3:
- return 0;
- default:
- state = S2MPS14_ENABLE_SUSPEND;
- break;
- }
- break;
- case S2MPU02:
- switch (rdev_id) {
- case S2MPU02_LDO13:
- case S2MPU02_LDO14:
- case S2MPU02_LDO15:
- case S2MPU02_LDO17:
- case S2MPU02_BUCK7:
- state = S2MPU02_DISABLE_SUSPEND;
- break;
- default:
- state = S2MPU02_ENABLE_SUSPEND;
- break;
- }
- break;
- default:
- return -EINVAL;
- }
-
- ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
- if (ret < 0)
- return ret;
-
- set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
- /*
- * Don't enable suspend mode if regulator is already disabled because
- * this would effectively for a short time turn on the regulator after
- * resuming.
- * However we still want to toggle the suspend_state bit for regulator
- * in case if it got enabled before suspending the system.
- */
- if (!(val & rdev->desc->enable_mask))
- return 0;
-
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask, state);
-}
-
static const struct regulator_ops s2mps14_reg_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
- .enable = s2mps14_regulator_enable,
+ .enable = s2mps11_regulator_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_suspend_disable = s2mps14_regulator_set_suspend_disable,
+ .set_suspend_disable = s2mps11_regulator_set_suspend_disable,
};
#define regulator_desc_s2mps14_ldo(num, min, step) { \
@@ -821,9 +850,12 @@ static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev,
0,
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"s2mps11-regulator");
- if (IS_ERR(gpio[reg])) {
+ if (PTR_ERR(gpio[reg]) == -ENOENT)
+ gpio[reg] = NULL;
+ else if (IS_ERR(gpio[reg])) {
dev_err(&pdev->dev, "Failed to get control GPIO for %d/%s\n",
reg, rdata[reg].name);
+ gpio[reg] = NULL;
continue;
}
if (gpio[reg])
@@ -856,8 +888,9 @@ static int s2mps11_pmic_dt_parse(struct platform_device *pdev,
static int s2mpu02_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
unsigned int ramp_val, ramp_shift, ramp_reg;
+ int rdev_id = rdev_get_id(rdev);
- switch (rdev_get_id(rdev)) {
+ switch (rdev_id) {
case S2MPU02_BUCK1:
ramp_shift = S2MPU02_BUCK1_RAMP_SHIFT;
break;
@@ -885,24 +918,24 @@ static const struct regulator_ops s2mpu02_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
- .enable = s2mps14_regulator_enable,
+ .enable = s2mps11_regulator_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_suspend_disable = s2mps14_regulator_set_suspend_disable,
+ .set_suspend_disable = s2mps11_regulator_set_suspend_disable,
};
static const struct regulator_ops s2mpu02_buck_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
- .enable = s2mps14_regulator_enable,
+ .enable = s2mps11_regulator_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_suspend_disable = s2mps14_regulator_set_suspend_disable,
+ .set_suspend_disable = s2mps11_regulator_set_suspend_disable,
.set_ramp_delay = s2mpu02_set_ramp_delay,
};
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index bb9d1a083299..6ca27e9d5ef7 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -574,7 +574,9 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
0,
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"s5m8767");
- if (IS_ERR(rdata->ext_control_gpiod))
+ if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT)
+ rdata->ext_control_gpiod = NULL;
+ else if (IS_ERR(rdata->ext_control_gpiod))
return PTR_ERR(rdata->ext_control_gpiod);
rdata->id = i;
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
new file mode 100644
index 000000000000..04b732991d69
--- /dev/null
+++ b/drivers/regulator/slg51000-regulator.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// SLG51000 High PSRR, Multi-Output Regulators
+// Copyright (C) 2019 Dialog Semiconductor
+//
+// Author: Eric Jeong <eric.jeong.opensource@diasemi.com>
+
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include "slg51000-regulator.h"
+
+#define SLG51000_SCTL_EVT 7
+#define SLG51000_MAX_EVT_REGISTER 8
+#define SLG51000_LDOHP_LV_MIN 1200000
+#define SLG51000_LDOHP_HV_MIN 2400000
+
+enum slg51000_regulators {
+ SLG51000_REGULATOR_LDO1 = 0,
+ SLG51000_REGULATOR_LDO2,
+ SLG51000_REGULATOR_LDO3,
+ SLG51000_REGULATOR_LDO4,
+ SLG51000_REGULATOR_LDO5,
+ SLG51000_REGULATOR_LDO6,
+ SLG51000_REGULATOR_LDO7,
+ SLG51000_MAX_REGULATORS,
+};
+
+struct slg51000 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_desc *rdesc[SLG51000_MAX_REGULATORS];
+ struct regulator_dev *rdev[SLG51000_MAX_REGULATORS];
+ struct gpio_desc *cs_gpiod;
+ int chip_irq;
+};
+
+struct slg51000_evt_sta {
+ unsigned int ereg;
+ unsigned int sreg;
+};
+
+static const struct slg51000_evt_sta es_reg[SLG51000_MAX_EVT_REGISTER] = {
+ {SLG51000_LDO1_EVENT, SLG51000_LDO1_STATUS},
+ {SLG51000_LDO2_EVENT, SLG51000_LDO2_STATUS},
+ {SLG51000_LDO3_EVENT, SLG51000_LDO3_STATUS},
+ {SLG51000_LDO4_EVENT, SLG51000_LDO4_STATUS},
+ {SLG51000_LDO5_EVENT, SLG51000_LDO5_STATUS},
+ {SLG51000_LDO6_EVENT, SLG51000_LDO6_STATUS},
+ {SLG51000_LDO7_EVENT, SLG51000_LDO7_STATUS},
+ {SLG51000_SYSCTL_EVENT, SLG51000_SYSCTL_STATUS},
+};
+
+static const struct regmap_range slg51000_writeable_ranges[] = {
+ regmap_reg_range(SLG51000_SYSCTL_MATRIX_CONF_A,
+ SLG51000_SYSCTL_MATRIX_CONF_A),
+ regmap_reg_range(SLG51000_LDO1_VSEL, SLG51000_LDO1_VSEL),
+ regmap_reg_range(SLG51000_LDO1_MINV, SLG51000_LDO1_MAXV),
+ regmap_reg_range(SLG51000_LDO1_IRQ_MASK, SLG51000_LDO1_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO2_VSEL, SLG51000_LDO2_VSEL),
+ regmap_reg_range(SLG51000_LDO2_MINV, SLG51000_LDO2_MAXV),
+ regmap_reg_range(SLG51000_LDO2_IRQ_MASK, SLG51000_LDO2_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO3_VSEL, SLG51000_LDO3_VSEL),
+ regmap_reg_range(SLG51000_LDO3_MINV, SLG51000_LDO3_MAXV),
+ regmap_reg_range(SLG51000_LDO3_IRQ_MASK, SLG51000_LDO3_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO4_VSEL, SLG51000_LDO4_VSEL),
+ regmap_reg_range(SLG51000_LDO4_MINV, SLG51000_LDO4_MAXV),
+ regmap_reg_range(SLG51000_LDO4_IRQ_MASK, SLG51000_LDO4_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO5_VSEL, SLG51000_LDO5_VSEL),
+ regmap_reg_range(SLG51000_LDO5_MINV, SLG51000_LDO5_MAXV),
+ regmap_reg_range(SLG51000_LDO5_IRQ_MASK, SLG51000_LDO5_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO6_VSEL, SLG51000_LDO6_VSEL),
+ regmap_reg_range(SLG51000_LDO6_MINV, SLG51000_LDO6_MAXV),
+ regmap_reg_range(SLG51000_LDO6_IRQ_MASK, SLG51000_LDO6_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO7_VSEL, SLG51000_LDO7_VSEL),
+ regmap_reg_range(SLG51000_LDO7_MINV, SLG51000_LDO7_MAXV),
+ regmap_reg_range(SLG51000_LDO7_IRQ_MASK, SLG51000_LDO7_IRQ_MASK),
+ regmap_reg_range(SLG51000_OTP_IRQ_MASK, SLG51000_OTP_IRQ_MASK),
+};
+
+static const struct regmap_range slg51000_readable_ranges[] = {
+ regmap_reg_range(SLG51000_SYSCTL_PATN_ID_B0,
+ SLG51000_SYSCTL_PATN_ID_B2),
+ regmap_reg_range(SLG51000_SYSCTL_SYS_CONF_A,
+ SLG51000_SYSCTL_SYS_CONF_A),
+ regmap_reg_range(SLG51000_SYSCTL_SYS_CONF_D,
+ SLG51000_SYSCTL_MATRIX_CONF_B),
+ regmap_reg_range(SLG51000_SYSCTL_REFGEN_CONF_C,
+ SLG51000_SYSCTL_UVLO_CONF_A),
+ regmap_reg_range(SLG51000_SYSCTL_FAULT_LOG1, SLG51000_SYSCTL_IRQ_MASK),
+ regmap_reg_range(SLG51000_IO_GPIO1_CONF, SLG51000_IO_GPIO_STATUS),
+ regmap_reg_range(SLG51000_LUTARRAY_LUT_VAL_0,
+ SLG51000_LUTARRAY_LUT_VAL_11),
+ regmap_reg_range(SLG51000_MUXARRAY_INPUT_SEL_0,
+ SLG51000_MUXARRAY_INPUT_SEL_63),
+ regmap_reg_range(SLG51000_PWRSEQ_RESOURCE_EN_0,
+ SLG51000_PWRSEQ_INPUT_SENSE_CONF_B),
+ regmap_reg_range(SLG51000_LDO1_VSEL, SLG51000_LDO1_VSEL),
+ regmap_reg_range(SLG51000_LDO1_MINV, SLG51000_LDO1_MAXV),
+ regmap_reg_range(SLG51000_LDO1_MISC1, SLG51000_LDO1_VSEL_ACTUAL),
+ regmap_reg_range(SLG51000_LDO1_EVENT, SLG51000_LDO1_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO2_VSEL, SLG51000_LDO2_VSEL),
+ regmap_reg_range(SLG51000_LDO2_MINV, SLG51000_LDO2_MAXV),
+ regmap_reg_range(SLG51000_LDO2_MISC1, SLG51000_LDO2_VSEL_ACTUAL),
+ regmap_reg_range(SLG51000_LDO2_EVENT, SLG51000_LDO2_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO3_VSEL, SLG51000_LDO3_VSEL),
+ regmap_reg_range(SLG51000_LDO3_MINV, SLG51000_LDO3_MAXV),
+ regmap_reg_range(SLG51000_LDO3_CONF1, SLG51000_LDO3_VSEL_ACTUAL),
+ regmap_reg_range(SLG51000_LDO3_EVENT, SLG51000_LDO3_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO4_VSEL, SLG51000_LDO4_VSEL),
+ regmap_reg_range(SLG51000_LDO4_MINV, SLG51000_LDO4_MAXV),
+ regmap_reg_range(SLG51000_LDO4_CONF1, SLG51000_LDO4_VSEL_ACTUAL),
+ regmap_reg_range(SLG51000_LDO4_EVENT, SLG51000_LDO4_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO5_VSEL, SLG51000_LDO5_VSEL),
+ regmap_reg_range(SLG51000_LDO5_MINV, SLG51000_LDO5_MAXV),
+ regmap_reg_range(SLG51000_LDO5_TRIM2, SLG51000_LDO5_TRIM2),
+ regmap_reg_range(SLG51000_LDO5_CONF1, SLG51000_LDO5_VSEL_ACTUAL),
+ regmap_reg_range(SLG51000_LDO5_EVENT, SLG51000_LDO5_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO6_VSEL, SLG51000_LDO6_VSEL),
+ regmap_reg_range(SLG51000_LDO6_MINV, SLG51000_LDO6_MAXV),
+ regmap_reg_range(SLG51000_LDO6_TRIM2, SLG51000_LDO6_TRIM2),
+ regmap_reg_range(SLG51000_LDO6_CONF1, SLG51000_LDO6_VSEL_ACTUAL),
+ regmap_reg_range(SLG51000_LDO6_EVENT, SLG51000_LDO6_IRQ_MASK),
+ regmap_reg_range(SLG51000_LDO7_VSEL, SLG51000_LDO7_VSEL),
+ regmap_reg_range(SLG51000_LDO7_MINV, SLG51000_LDO7_MAXV),
+ regmap_reg_range(SLG51000_LDO7_CONF1, SLG51000_LDO7_VSEL_ACTUAL),
+ regmap_reg_range(SLG51000_LDO7_EVENT, SLG51000_LDO7_IRQ_MASK),
+ regmap_reg_range(SLG51000_OTP_EVENT, SLG51000_OTP_EVENT),
+ regmap_reg_range(SLG51000_OTP_IRQ_MASK, SLG51000_OTP_IRQ_MASK),
+ regmap_reg_range(SLG51000_OTP_LOCK_OTP_PROG, SLG51000_OTP_LOCK_CTRL),
+ regmap_reg_range(SLG51000_LOCK_GLOBAL_LOCK_CTRL1,
+ SLG51000_LOCK_GLOBAL_LOCK_CTRL1),
+};
+
+static const struct regmap_range slg51000_volatile_ranges[] = {
+ regmap_reg_range(SLG51000_SYSCTL_FAULT_LOG1, SLG51000_SYSCTL_STATUS),
+ regmap_reg_range(SLG51000_IO_GPIO_STATUS, SLG51000_IO_GPIO_STATUS),
+ regmap_reg_range(SLG51000_LDO1_EVENT, SLG51000_LDO1_STATUS),
+ regmap_reg_range(SLG51000_LDO2_EVENT, SLG51000_LDO2_STATUS),
+ regmap_reg_range(SLG51000_LDO3_EVENT, SLG51000_LDO3_STATUS),
+ regmap_reg_range(SLG51000_LDO4_EVENT, SLG51000_LDO4_STATUS),
+ regmap_reg_range(SLG51000_LDO5_EVENT, SLG51000_LDO5_STATUS),
+ regmap_reg_range(SLG51000_LDO6_EVENT, SLG51000_LDO6_STATUS),
+ regmap_reg_range(SLG51000_LDO7_EVENT, SLG51000_LDO7_STATUS),
+ regmap_reg_range(SLG51000_OTP_EVENT, SLG51000_OTP_EVENT),
+};
+
+static const struct regmap_access_table slg51000_writeable_table = {
+ .yes_ranges = slg51000_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(slg51000_writeable_ranges),
+};
+
+static const struct regmap_access_table slg51000_readable_table = {
+ .yes_ranges = slg51000_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(slg51000_readable_ranges),
+};
+
+static const struct regmap_access_table slg51000_volatile_table = {
+ .yes_ranges = slg51000_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(slg51000_volatile_ranges),
+};
+
+static const struct regmap_config slg51000_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0x8000,
+ .wr_table = &slg51000_writeable_table,
+ .rd_table = &slg51000_readable_table,
+ .volatile_table = &slg51000_volatile_table,
+};
+
+static const struct regulator_ops slg51000_regl_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_ops slg51000_switch_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static int slg51000_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ struct slg51000 *chip = config->driver_data;
+ struct gpio_desc *ena_gpiod;
+ enum gpiod_flags gflags = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE;
+
+ ena_gpiod = devm_gpiod_get_from_of_node(chip->dev, np,
+ "enable-gpios", 0,
+ gflags, "gpio-en-ldo");
+ if (ena_gpiod) {
+ config->ena_gpiod = ena_gpiod;
+ devm_gpiod_unhinge(chip->dev, config->ena_gpiod);
+ }
+
+ return 0;
+}
+
+#define SLG51000_REGL_DESC(_id, _name, _s_name, _min, _step) \
+ [SLG51000_REGULATOR_##_id] = { \
+ .name = #_name, \
+ .supply_name = _s_name, \
+ .id = SLG51000_REGULATOR_##_id, \
+ .of_match = of_match_ptr(#_name), \
+ .of_parse_cb = slg51000_of_parse_cb, \
+ .ops = &slg51000_regl_ops, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .n_voltages = 256, \
+ .min_uV = _min, \
+ .uV_step = _step, \
+ .linear_min_sel = 0, \
+ .vsel_mask = SLG51000_VSEL_MASK, \
+ .vsel_reg = SLG51000_##_id##_VSEL, \
+ .enable_reg = SLG51000_SYSCTL_MATRIX_CONF_A, \
+ .enable_mask = BIT(SLG51000_REGULATOR_##_id), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }
+
+static struct regulator_desc regls_desc[SLG51000_MAX_REGULATORS] = {
+ SLG51000_REGL_DESC(LDO1, ldo1, NULL, 2400000, 5000),
+ SLG51000_REGL_DESC(LDO2, ldo2, NULL, 2400000, 5000),
+ SLG51000_REGL_DESC(LDO3, ldo3, "vin3", 1200000, 10000),
+ SLG51000_REGL_DESC(LDO4, ldo4, "vin4", 1200000, 10000),
+ SLG51000_REGL_DESC(LDO5, ldo5, "vin5", 400000, 5000),
+ SLG51000_REGL_DESC(LDO6, ldo6, "vin6", 400000, 5000),
+ SLG51000_REGL_DESC(LDO7, ldo7, "vin7", 1200000, 10000),
+};
+
+static int slg51000_regulator_init(struct slg51000 *chip)
+{
+ struct regulator_config config = { };
+ struct regulator_desc *rdesc;
+ unsigned int reg, val;
+ u8 vsel_range[2];
+ int id, ret = 0;
+ const unsigned int min_regs[SLG51000_MAX_REGULATORS] = {
+ SLG51000_LDO1_MINV, SLG51000_LDO2_MINV, SLG51000_LDO3_MINV,
+ SLG51000_LDO4_MINV, SLG51000_LDO5_MINV, SLG51000_LDO6_MINV,
+ SLG51000_LDO7_MINV,
+ };
+
+ for (id = 0; id < SLG51000_MAX_REGULATORS; id++) {
+ chip->rdesc[id] = &regls_desc[id];
+ rdesc = chip->rdesc[id];
+ config.regmap = chip->regmap;
+ config.dev = chip->dev;
+ config.driver_data = chip;
+
+ ret = regmap_bulk_read(chip->regmap, min_regs[id],
+ vsel_range, 2);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to read the MIN register\n");
+ return ret;
+ }
+
+ switch (id) {
+ case SLG51000_REGULATOR_LDO1:
+ case SLG51000_REGULATOR_LDO2:
+ if (id == SLG51000_REGULATOR_LDO1)
+ reg = SLG51000_LDO1_MISC1;
+ else
+ reg = SLG51000_LDO2_MISC1;
+
+ ret = regmap_read(chip->regmap, reg, &val);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to read voltage range of ldo%d\n",
+ id + 1);
+ return ret;
+ }
+
+ rdesc->linear_min_sel = vsel_range[0];
+ rdesc->n_voltages = vsel_range[1] + 1;
+ if (val & SLG51000_SEL_VRANGE_MASK)
+ rdesc->min_uV = SLG51000_LDOHP_HV_MIN
+ + (vsel_range[0]
+ * rdesc->uV_step);
+ else
+ rdesc->min_uV = SLG51000_LDOHP_LV_MIN
+ + (vsel_range[0]
+ * rdesc->uV_step);
+ break;
+
+ case SLG51000_REGULATOR_LDO5:
+ case SLG51000_REGULATOR_LDO6:
+ if (id == SLG51000_REGULATOR_LDO5)
+ reg = SLG51000_LDO5_TRIM2;
+ else
+ reg = SLG51000_LDO6_TRIM2;
+
+ ret = regmap_read(chip->regmap, reg, &val);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to read LDO mode register\n");
+ return ret;
+ }
+
+ if (val & SLG51000_SEL_BYP_MODE_MASK) {
+ rdesc->ops = &slg51000_switch_ops;
+ rdesc->n_voltages = 0;
+ rdesc->min_uV = 0;
+ rdesc->uV_step = 0;
+ rdesc->linear_min_sel = 0;
+ break;
+ }
+ /* Fall through - to the check below.*/
+
+ default:
+ rdesc->linear_min_sel = vsel_range[0];
+ rdesc->n_voltages = vsel_range[1] + 1;
+ rdesc->min_uV = rdesc->min_uV
+ + (vsel_range[0] * rdesc->uV_step);
+ break;
+ }
+
+ chip->rdev[id] = devm_regulator_register(chip->dev, rdesc,
+ &config);
+ if (IS_ERR(chip->rdev[id])) {
+ ret = PTR_ERR(chip->rdev[id]);
+ dev_err(chip->dev,
+ "Failed to register regulator(%s):%d\n",
+ chip->rdesc[id]->name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t slg51000_irq_handler(int irq, void *data)
+{
+ struct slg51000 *chip = data;
+ struct regmap *regmap = chip->regmap;
+ enum { R0 = 0, R1, R2, REG_MAX };
+ u8 evt[SLG51000_MAX_EVT_REGISTER][REG_MAX];
+ int ret, i, handled = IRQ_NONE;
+ unsigned int evt_otp, mask_otp;
+
+ /* Read event[R0], status[R1] and mask[R2] register */
+ for (i = 0; i < SLG51000_MAX_EVT_REGISTER; i++) {
+ ret = regmap_bulk_read(regmap, es_reg[i].ereg, evt[i], REG_MAX);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to read event registers(%d)\n", ret);
+ return IRQ_NONE;
+ }
+ }
+
+ ret = regmap_read(regmap, SLG51000_OTP_EVENT, &evt_otp);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to read otp event registers(%d)\n", ret);
+ return IRQ_NONE;
+ }
+
+ ret = regmap_read(regmap, SLG51000_OTP_IRQ_MASK, &mask_otp);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to read otp mask register(%d)\n", ret);
+ return IRQ_NONE;
+ }
+
+ if ((evt_otp & SLG51000_EVT_CRC_MASK) &&
+ !(mask_otp & SLG51000_IRQ_CRC_MASK)) {
+ dev_info(chip->dev,
+ "OTP has been read or OTP crc is not zero\n");
+ handled = IRQ_HANDLED;
+ }
+
+ for (i = 0; i < SLG51000_MAX_REGULATORS; i++) {
+ if (!(evt[i][R2] & SLG51000_IRQ_ILIM_FLAG_MASK) &&
+ (evt[i][R0] & SLG51000_EVT_ILIM_FLAG_MASK)) {
+ regulator_lock(chip->rdev[i]);
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_OVER_CURRENT, NULL);
+ regulator_unlock(chip->rdev[i]);
+
+ if (evt[i][R1] & SLG51000_STA_ILIM_FLAG_MASK)
+ dev_warn(chip->dev,
+ "Over-current limit(ldo%d)\n", i + 1);
+ handled = IRQ_HANDLED;
+ }
+ }
+
+ if (!(evt[SLG51000_SCTL_EVT][R2] & SLG51000_IRQ_HIGH_TEMP_WARN_MASK) &&
+ (evt[SLG51000_SCTL_EVT][R0] & SLG51000_EVT_HIGH_TEMP_WARN_MASK)) {
+ for (i = 0; i < SLG51000_MAX_REGULATORS; i++) {
+ if (!(evt[i][R1] & SLG51000_STA_ILIM_FLAG_MASK) &&
+ (evt[i][R1] & SLG51000_STA_VOUT_OK_FLAG_MASK)) {
+ regulator_lock(chip->rdev[i]);
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_OVER_TEMP, NULL);
+ regulator_unlock(chip->rdev[i]);
+ }
+ }
+ handled = IRQ_HANDLED;
+ if (evt[SLG51000_SCTL_EVT][R1] &
+ SLG51000_STA_HIGH_TEMP_WARN_MASK)
+ dev_warn(chip->dev, "High temperature warning!\n");
+ }
+
+ return handled;
+}
+
+static void slg51000_clear_fault_log(struct slg51000 *chip)
+{
+ unsigned int val = 0;
+ int ret = 0;
+
+ ret = regmap_read(chip->regmap, SLG51000_SYSCTL_FAULT_LOG1, &val);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read Fault log register\n");
+ return;
+ }
+
+ if (val & SLG51000_FLT_OVER_TEMP_MASK)
+ dev_dbg(chip->dev, "Fault log: FLT_OVER_TEMP\n");
+ if (val & SLG51000_FLT_POWER_SEQ_CRASH_REQ_MASK)
+ dev_dbg(chip->dev, "Fault log: FLT_POWER_SEQ_CRASH_REQ\n");
+ if (val & SLG51000_FLT_RST_MASK)
+ dev_dbg(chip->dev, "Fault log: FLT_RST\n");
+ if (val & SLG51000_FLT_POR_MASK)
+ dev_dbg(chip->dev, "Fault log: FLT_POR\n");
+}
+
+static int slg51000_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct slg51000 *chip;
+ struct gpio_desc *cs_gpiod = NULL;
+ int error, ret;
+
+ chip = devm_kzalloc(dev, sizeof(struct slg51000), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ cs_gpiod = devm_gpiod_get_from_of_node(dev, dev->of_node,
+ "dlg,cs-gpios", 0,
+ GPIOD_OUT_HIGH
+ | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "slg51000-cs");
+ if (cs_gpiod) {
+ dev_info(dev, "Found chip selector property\n");
+ chip->cs_gpiod = cs_gpiod;
+ }
+
+ i2c_set_clientdata(client, chip);
+ chip->chip_irq = client->irq;
+ chip->dev = dev;
+ chip->regmap = devm_regmap_init_i2c(client, &slg51000_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ error = PTR_ERR(chip->regmap);
+ dev_err(dev, "Failed to allocate register map: %d\n",
+ error);
+ return error;
+ }
+
+ ret = slg51000_regulator_init(chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to init regulator(%d)\n", ret);
+ return ret;
+ }
+
+ slg51000_clear_fault_log(chip);
+
+ if (chip->chip_irq) {
+ ret = devm_request_threaded_irq(dev, chip->chip_irq, NULL,
+ slg51000_irq_handler,
+ (IRQF_TRIGGER_HIGH |
+ IRQF_ONESHOT),
+ "slg51000-irq", chip);
+ if (ret != 0) {
+ dev_err(dev, "Failed to request IRQ: %d\n",
+ chip->chip_irq);
+ return ret;
+ }
+ } else {
+ dev_info(dev, "No IRQ configured\n");
+ }
+
+ return ret;
+}
+
+static const struct i2c_device_id slg51000_i2c_id[] = {
+ {"slg51000", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, slg51000_i2c_id);
+
+static struct i2c_driver slg51000_regulator_driver = {
+ .driver = {
+ .name = "slg51000-regulator",
+ },
+ .probe = slg51000_i2c_probe,
+ .id_table = slg51000_i2c_id,
+};
+
+module_i2c_driver(slg51000_regulator_driver);
+
+MODULE_AUTHOR("Eric Jeong <eric.jeong.opensource@diasemi.com>");
+MODULE_DESCRIPTION("SLG51000 regulator driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/regulator/slg51000-regulator.h b/drivers/regulator/slg51000-regulator.h
new file mode 100644
index 000000000000..20feb7f91942
--- /dev/null
+++ b/drivers/regulator/slg51000-regulator.h
@@ -0,0 +1,505 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SLG51000 High PSRR, Multi-Output Regulators
+ * Copyright (C) 2019 Dialog Semiconductor
+ *
+ * Author: Eric Jeong <eric.jeong.opensource@diasemi.com>
+ */
+
+#ifndef __SLG51000_REGISTERS_H__
+#define __SLG51000_REGISTERS_H__
+
+/* Registers */
+
+#define SLG51000_SYSCTL_PATN_ID_B0 0x1105
+#define SLG51000_SYSCTL_PATN_ID_B1 0x1106
+#define SLG51000_SYSCTL_PATN_ID_B2 0x1107
+#define SLG51000_SYSCTL_SYS_CONF_A 0x1109
+#define SLG51000_SYSCTL_SYS_CONF_D 0x110c
+#define SLG51000_SYSCTL_MATRIX_CONF_A 0x110d
+#define SLG51000_SYSCTL_MATRIX_CONF_B 0x110e
+#define SLG51000_SYSCTL_REFGEN_CONF_C 0x1111
+#define SLG51000_SYSCTL_UVLO_CONF_A 0x1112
+#define SLG51000_SYSCTL_FAULT_LOG1 0x1115
+#define SLG51000_SYSCTL_EVENT 0x1116
+#define SLG51000_SYSCTL_STATUS 0x1117
+#define SLG51000_SYSCTL_IRQ_MASK 0x1118
+#define SLG51000_IO_GPIO1_CONF 0x1500
+#define SLG51000_IO_GPIO2_CONF 0x1501
+#define SLG51000_IO_GPIO3_CONF 0x1502
+#define SLG51000_IO_GPIO4_CONF 0x1503
+#define SLG51000_IO_GPIO5_CONF 0x1504
+#define SLG51000_IO_GPIO6_CONF 0x1505
+#define SLG51000_IO_GPIO_STATUS 0x1506
+#define SLG51000_LUTARRAY_LUT_VAL_0 0x1600
+#define SLG51000_LUTARRAY_LUT_VAL_1 0x1601
+#define SLG51000_LUTARRAY_LUT_VAL_2 0x1602
+#define SLG51000_LUTARRAY_LUT_VAL_3 0x1603
+#define SLG51000_LUTARRAY_LUT_VAL_4 0x1604
+#define SLG51000_LUTARRAY_LUT_VAL_5 0x1605
+#define SLG51000_LUTARRAY_LUT_VAL_6 0x1606
+#define SLG51000_LUTARRAY_LUT_VAL_7 0x1607
+#define SLG51000_LUTARRAY_LUT_VAL_8 0x1608
+#define SLG51000_LUTARRAY_LUT_VAL_9 0x1609
+#define SLG51000_LUTARRAY_LUT_VAL_10 0x160a
+#define SLG51000_LUTARRAY_LUT_VAL_11 0x160b
+#define SLG51000_MUXARRAY_INPUT_SEL_0 0x1700
+#define SLG51000_MUXARRAY_INPUT_SEL_1 0x1701
+#define SLG51000_MUXARRAY_INPUT_SEL_2 0x1702
+#define SLG51000_MUXARRAY_INPUT_SEL_3 0x1703
+#define SLG51000_MUXARRAY_INPUT_SEL_4 0x1704
+#define SLG51000_MUXARRAY_INPUT_SEL_5 0x1705
+#define SLG51000_MUXARRAY_INPUT_SEL_6 0x1706
+#define SLG51000_MUXARRAY_INPUT_SEL_7 0x1707
+#define SLG51000_MUXARRAY_INPUT_SEL_8 0x1708
+#define SLG51000_MUXARRAY_INPUT_SEL_9 0x1709
+#define SLG51000_MUXARRAY_INPUT_SEL_10 0x170a
+#define SLG51000_MUXARRAY_INPUT_SEL_11 0x170b
+#define SLG51000_MUXARRAY_INPUT_SEL_12 0x170c
+#define SLG51000_MUXARRAY_INPUT_SEL_13 0x170d
+#define SLG51000_MUXARRAY_INPUT_SEL_14 0x170e
+#define SLG51000_MUXARRAY_INPUT_SEL_15 0x170f
+#define SLG51000_MUXARRAY_INPUT_SEL_16 0x1710
+#define SLG51000_MUXARRAY_INPUT_SEL_17 0x1711
+#define SLG51000_MUXARRAY_INPUT_SEL_18 0x1712
+#define SLG51000_MUXARRAY_INPUT_SEL_19 0x1713
+#define SLG51000_MUXARRAY_INPUT_SEL_20 0x1714
+#define SLG51000_MUXARRAY_INPUT_SEL_21 0x1715
+#define SLG51000_MUXARRAY_INPUT_SEL_22 0x1716
+#define SLG51000_MUXARRAY_INPUT_SEL_23 0x1717
+#define SLG51000_MUXARRAY_INPUT_SEL_24 0x1718
+#define SLG51000_MUXARRAY_INPUT_SEL_25 0x1719
+#define SLG51000_MUXARRAY_INPUT_SEL_26 0x171a
+#define SLG51000_MUXARRAY_INPUT_SEL_27 0x171b
+#define SLG51000_MUXARRAY_INPUT_SEL_28 0x171c
+#define SLG51000_MUXARRAY_INPUT_SEL_29 0x171d
+#define SLG51000_MUXARRAY_INPUT_SEL_30 0x171e
+#define SLG51000_MUXARRAY_INPUT_SEL_31 0x171f
+#define SLG51000_MUXARRAY_INPUT_SEL_32 0x1720
+#define SLG51000_MUXARRAY_INPUT_SEL_33 0x1721
+#define SLG51000_MUXARRAY_INPUT_SEL_34 0x1722
+#define SLG51000_MUXARRAY_INPUT_SEL_35 0x1723
+#define SLG51000_MUXARRAY_INPUT_SEL_36 0x1724
+#define SLG51000_MUXARRAY_INPUT_SEL_37 0x1725
+#define SLG51000_MUXARRAY_INPUT_SEL_38 0x1726
+#define SLG51000_MUXARRAY_INPUT_SEL_39 0x1727
+#define SLG51000_MUXARRAY_INPUT_SEL_40 0x1728
+#define SLG51000_MUXARRAY_INPUT_SEL_41 0x1729
+#define SLG51000_MUXARRAY_INPUT_SEL_42 0x172a
+#define SLG51000_MUXARRAY_INPUT_SEL_43 0x172b
+#define SLG51000_MUXARRAY_INPUT_SEL_44 0x172c
+#define SLG51000_MUXARRAY_INPUT_SEL_45 0x172d
+#define SLG51000_MUXARRAY_INPUT_SEL_46 0x172e
+#define SLG51000_MUXARRAY_INPUT_SEL_47 0x172f
+#define SLG51000_MUXARRAY_INPUT_SEL_48 0x1730
+#define SLG51000_MUXARRAY_INPUT_SEL_49 0x1731
+#define SLG51000_MUXARRAY_INPUT_SEL_50 0x1732
+#define SLG51000_MUXARRAY_INPUT_SEL_51 0x1733
+#define SLG51000_MUXARRAY_INPUT_SEL_52 0x1734
+#define SLG51000_MUXARRAY_INPUT_SEL_53 0x1735
+#define SLG51000_MUXARRAY_INPUT_SEL_54 0x1736
+#define SLG51000_MUXARRAY_INPUT_SEL_55 0x1737
+#define SLG51000_MUXARRAY_INPUT_SEL_56 0x1738
+#define SLG51000_MUXARRAY_INPUT_SEL_57 0x1739
+#define SLG51000_MUXARRAY_INPUT_SEL_58 0x173a
+#define SLG51000_MUXARRAY_INPUT_SEL_59 0x173b
+#define SLG51000_MUXARRAY_INPUT_SEL_60 0x173c
+#define SLG51000_MUXARRAY_INPUT_SEL_61 0x173d
+#define SLG51000_MUXARRAY_INPUT_SEL_62 0x173e
+#define SLG51000_MUXARRAY_INPUT_SEL_63 0x173f
+#define SLG51000_PWRSEQ_RESOURCE_EN_0 0x1900
+#define SLG51000_PWRSEQ_RESOURCE_EN_1 0x1901
+#define SLG51000_PWRSEQ_RESOURCE_EN_2 0x1902
+#define SLG51000_PWRSEQ_RESOURCE_EN_3 0x1903
+#define SLG51000_PWRSEQ_RESOURCE_EN_4 0x1904
+#define SLG51000_PWRSEQ_RESOURCE_EN_5 0x1905
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_UP0 0x1906
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_DOWN0 0x1907
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_UP1 0x1908
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_DOWN1 0x1909
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_UP2 0x190a
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_DOWN2 0x190b
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_UP3 0x190c
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_DOWN3 0x190d
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_UP4 0x190e
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_DOWN4 0x190f
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_UP5 0x1910
+#define SLG51000_PWRSEQ_SLOT_TIME_MIN_DOWN5 0x1911
+#define SLG51000_PWRSEQ_SLOT_TIME_MAX_CONF_A 0x1912
+#define SLG51000_PWRSEQ_SLOT_TIME_MAX_CONF_B 0x1913
+#define SLG51000_PWRSEQ_SLOT_TIME_MAX_CONF_C 0x1914
+#define SLG51000_PWRSEQ_INPUT_SENSE_CONF_A 0x1915
+#define SLG51000_PWRSEQ_INPUT_SENSE_CONF_B 0x1916
+#define SLG51000_LDO1_VSEL 0x2000
+#define SLG51000_LDO1_MINV 0x2060
+#define SLG51000_LDO1_MAXV 0x2061
+#define SLG51000_LDO1_MISC1 0x2064
+#define SLG51000_LDO1_VSEL_ACTUAL 0x2065
+#define SLG51000_LDO1_EVENT 0x20c0
+#define SLG51000_LDO1_STATUS 0x20c1
+#define SLG51000_LDO1_IRQ_MASK 0x20c2
+#define SLG51000_LDO2_VSEL 0x2200
+#define SLG51000_LDO2_MINV 0x2260
+#define SLG51000_LDO2_MAXV 0x2261
+#define SLG51000_LDO2_MISC1 0x2264
+#define SLG51000_LDO2_VSEL_ACTUAL 0x2265
+#define SLG51000_LDO2_EVENT 0x22c0
+#define SLG51000_LDO2_STATUS 0x22c1
+#define SLG51000_LDO2_IRQ_MASK 0x22c2
+#define SLG51000_LDO3_VSEL 0x2300
+#define SLG51000_LDO3_MINV 0x2360
+#define SLG51000_LDO3_MAXV 0x2361
+#define SLG51000_LDO3_CONF1 0x2364
+#define SLG51000_LDO3_CONF2 0x2365
+#define SLG51000_LDO3_VSEL_ACTUAL 0x2366
+#define SLG51000_LDO3_EVENT 0x23c0
+#define SLG51000_LDO3_STATUS 0x23c1
+#define SLG51000_LDO3_IRQ_MASK 0x23c2
+#define SLG51000_LDO4_VSEL 0x2500
+#define SLG51000_LDO4_MINV 0x2560
+#define SLG51000_LDO4_MAXV 0x2561
+#define SLG51000_LDO4_CONF1 0x2564
+#define SLG51000_LDO4_CONF2 0x2565
+#define SLG51000_LDO4_VSEL_ACTUAL 0x2566
+#define SLG51000_LDO4_EVENT 0x25c0
+#define SLG51000_LDO4_STATUS 0x25c1
+#define SLG51000_LDO4_IRQ_MASK 0x25c2
+#define SLG51000_LDO5_VSEL 0x2700
+#define SLG51000_LDO5_MINV 0x2760
+#define SLG51000_LDO5_MAXV 0x2761
+#define SLG51000_LDO5_TRIM2 0x2763
+#define SLG51000_LDO5_CONF1 0x2765
+#define SLG51000_LDO5_CONF2 0x2766
+#define SLG51000_LDO5_VSEL_ACTUAL 0x2767
+#define SLG51000_LDO5_EVENT 0x27c0
+#define SLG51000_LDO5_STATUS 0x27c1
+#define SLG51000_LDO5_IRQ_MASK 0x27c2
+#define SLG51000_LDO6_VSEL 0x2900
+#define SLG51000_LDO6_MINV 0x2960
+#define SLG51000_LDO6_MAXV 0x2961
+#define SLG51000_LDO6_TRIM2 0x2963
+#define SLG51000_LDO6_CONF1 0x2965
+#define SLG51000_LDO6_CONF2 0x2966
+#define SLG51000_LDO6_VSEL_ACTUAL 0x2967
+#define SLG51000_LDO6_EVENT 0x29c0
+#define SLG51000_LDO6_STATUS 0x29c1
+#define SLG51000_LDO6_IRQ_MASK 0x29c2
+#define SLG51000_LDO7_VSEL 0x3100
+#define SLG51000_LDO7_MINV 0x3160
+#define SLG51000_LDO7_MAXV 0x3161
+#define SLG51000_LDO7_CONF1 0x3164
+#define SLG51000_LDO7_CONF2 0x3165
+#define SLG51000_LDO7_VSEL_ACTUAL 0x3166
+#define SLG51000_LDO7_EVENT 0x31c0
+#define SLG51000_LDO7_STATUS 0x31c1
+#define SLG51000_LDO7_IRQ_MASK 0x31c2
+#define SLG51000_OTP_EVENT 0x782b
+#define SLG51000_OTP_IRQ_MASK 0x782d
+#define SLG51000_OTP_LOCK_OTP_PROG 0x78fe
+#define SLG51000_OTP_LOCK_CTRL 0x78ff
+#define SLG51000_LOCK_GLOBAL_LOCK_CTRL1 0x8000
+
+/* Register Bit Fields */
+
+/* SLG51000_SYSCTL_PATTERN_ID_BYTE0 = 0x1105 */
+#define SLG51000_PATTERN_ID_BYTE0_SHIFT 0
+#define SLG51000_PATTERN_ID_BYTE0_MASK (0xff << 0)
+
+/* SLG51000_SYSCTL_PATTERN_ID_BYTE1 = 0x1106 */
+#define SLG51000_PATTERN_ID_BYTE1_SHIFT 0
+#define SLG51000_PATTERN_ID_BYTE1_MASK (0xff << 0)
+
+/* SLG51000_SYSCTL_PATTERN_ID_BYTE2 = 0x1107 */
+#define SLG51000_PATTERN_ID_BYTE2_SHIFT 0
+#define SLG51000_PATTERN_ID_BYTE2_MASK (0xff << 0)
+
+/* SLG51000_SYSCTL_SYS_CONF_A = 0x1109 */
+#define SLG51000_I2C_ADDRESS_SHIFT 0
+#define SLG51000_I2C_ADDRESS_MASK (0x7f << 0)
+#define SLG51000_I2C_DISABLE_SHIFT 7
+#define SLG51000_I2C_DISABLE_MASK (0x01 << 7)
+
+/* SLG51000_SYSCTL_SYS_CONF_D = 0x110c */
+#define SLG51000_CS_T_DEB_SHIFT 6
+#define SLG51000_CS_T_DEB_MASK (0x03 << 6)
+#define SLG51000_I2C_CLR_MODE_SHIFT 5
+#define SLG51000_I2C_CLR_MODE_MASK (0x01 << 5)
+
+/* SLG51000_SYSCTL_MATRIX_CTRL_CONF_A = 0x110d */
+#define SLG51000_RESOURCE_CTRL_SHIFT 0
+#define SLG51000_RESOURCE_CTRL_MASK (0xff << 0)
+
+/* SLG51000_SYSCTL_MATRIX_CTRL_CONF_B = 0x110e */
+#define SLG51000_MATRIX_EVENT_SENSE_SHIFT 0
+#define SLG51000_MATRIX_EVENT_SENSE_MASK (0x07 << 0)
+
+/* SLG51000_SYSCTL_REFGEN_CONF_C = 0x1111 */
+#define SLG51000_REFGEN_SEL_TEMP_WARN_DEBOUNCE_SHIFT 2
+#define SLG51000_REFGEN_SEL_TEMP_WARN_DEBOUNCE_MASK (0x03 << 2)
+#define SLG51000_REFGEN_SEL_TEMP_WARN_THR_SHIFT 0
+#define SLG51000_REFGEN_SEL_TEMP_WARN_THR_MASK (0x03 << 0)
+
+/* SLG51000_SYSCTL_UVLO_CONF_A = 0x1112 */
+#define SLG51000_VMON_UVLO_SEL_THR_SHIFT 0
+#define SLG51000_VMON_UVLO_SEL_THR_MASK (0x1f << 0)
+
+/* SLG51000_SYSCTL_FAULT_LOG1 = 0x1115 */
+#define SLG51000_FLT_POR_SHIFT 5
+#define SLG51000_FLT_POR_MASK (0x01 << 5)
+#define SLG51000_FLT_RST_SHIFT 4
+#define SLG51000_FLT_RST_MASK (0x01 << 4)
+#define SLG51000_FLT_POWER_SEQ_CRASH_REQ_SHIFT 2
+#define SLG51000_FLT_POWER_SEQ_CRASH_REQ_MASK (0x01 << 2)
+#define SLG51000_FLT_OVER_TEMP_SHIFT 1
+#define SLG51000_FLT_OVER_TEMP_MASK (0x01 << 1)
+
+/* SLG51000_SYSCTL_EVENT = 0x1116 */
+#define SLG51000_EVT_MATRIX_SHIFT 1
+#define SLG51000_EVT_MATRIX_MASK (0x01 << 1)
+#define SLG51000_EVT_HIGH_TEMP_WARN_SHIFT 0
+#define SLG51000_EVT_HIGH_TEMP_WARN_MASK (0x01 << 0)
+
+/* SLG51000_SYSCTL_STATUS = 0x1117 */
+#define SLG51000_STA_MATRIX_SHIFT 1
+#define SLG51000_STA_MATRIX_MASK (0x01 << 1)
+#define SLG51000_STA_HIGH_TEMP_WARN_SHIFT 0
+#define SLG51000_STA_HIGH_TEMP_WARN_MASK (0x01 << 0)
+
+/* SLG51000_SYSCTL_IRQ_MASK = 0x1118 */
+#define SLG51000_IRQ_MATRIX_SHIFT 1
+#define SLG51000_IRQ_MATRIX_MASK (0x01 << 1)
+#define SLG51000_IRQ_HIGH_TEMP_WARN_SHIFT 0
+#define SLG51000_IRQ_HIGH_TEMP_WARN_MASK (0x01 << 0)
+
+/* SLG51000_IO_GPIO1_CONF ~ SLG51000_IO_GPIO5_CONF =
+ * 0x1500, 0x1501, 0x1502, 0x1503, 0x1504
+ */
+#define SLG51000_GPIO_DIR_SHIFT 7
+#define SLG51000_GPIO_DIR_MASK (0x01 << 7)
+#define SLG51000_GPIO_SENS_SHIFT 5
+#define SLG51000_GPIO_SENS_MASK (0x03 << 5)
+#define SLG51000_GPIO_INVERT_SHIFT 4
+#define SLG51000_GPIO_INVERT_MASK (0x01 << 4)
+#define SLG51000_GPIO_BYP_SHIFT 3
+#define SLG51000_GPIO_BYP_MASK (0x01 << 3)
+#define SLG51000_GPIO_T_DEB_SHIFT 1
+#define SLG51000_GPIO_T_DEB_MASK (0x03 << 1)
+#define SLG51000_GPIO_LEVEL_SHIFT 0
+#define SLG51000_GPIO_LEVEL_MASK (0x01 << 0)
+
+/* SLG51000_IO_GPIO6_CONF = 0x1505 */
+#define SLG51000_GPIO6_SENS_SHIFT 5
+#define SLG51000_GPIO6_SENS_MASK (0x03 << 5)
+#define SLG51000_GPIO6_INVERT_SHIFT 4
+#define SLG51000_GPIO6_INVERT_MASK (0x01 << 4)
+#define SLG51000_GPIO6_T_DEB_SHIFT 1
+#define SLG51000_GPIO6_T_DEB_MASK (0x03 << 1)
+#define SLG51000_GPIO6_LEVEL_SHIFT 0
+#define SLG51000_GPIO6_LEVEL_MASK (0x01 << 0)
+
+/* SLG51000_IO_GPIO_STATUS = 0x1506 */
+#define SLG51000_GPIO6_STATUS_SHIFT 5
+#define SLG51000_GPIO6_STATUS_MASK (0x01 << 5)
+#define SLG51000_GPIO5_STATUS_SHIFT 4
+#define SLG51000_GPIO5_STATUS_MASK (0x01 << 4)
+#define SLG51000_GPIO4_STATUS_SHIFT 3
+#define SLG51000_GPIO4_STATUS_MASK (0x01 << 3)
+#define SLG51000_GPIO3_STATUS_SHIFT 2
+#define SLG51000_GPIO3_STATUS_MASK (0x01 << 2)
+#define SLG51000_GPIO2_STATUS_SHIFT 1
+#define SLG51000_GPIO2_STATUS_MASK (0x01 << 1)
+#define SLG51000_GPIO1_STATUS_SHIFT 0
+#define SLG51000_GPIO1_STATUS_MASK (0x01 << 0)
+
+/* SLG51000_LUTARRAY_LUT_VAL_0 ~ SLG51000_LUTARRAY_LUT_VAL_11
+ * 0x1600, 0x1601, 0x1602, 0x1603, 0x1604, 0x1605,
+ * 0x1606, 0x1607, 0x1608, 0x1609, 0x160a, 0x160b
+ */
+#define SLG51000_LUT_VAL_SHIFT 0
+#define SLG51000_LUT_VAL_MASK (0xff << 0)
+
+/* SLG51000_MUXARRAY_INPUT_SEL_0 ~ SLG51000_MUXARRAY_INPUT_SEL_63
+ * 0x1700, 0x1701, 0x1702, 0x1703, 0x1704, 0x1705,
+ * 0x1706, 0x1707, 0x1708, 0x1709, 0x170a, 0x170b,
+ * 0x170c, 0x170d, 0x170e, 0x170f, 0x1710, 0x1711,
+ * 0x1712, 0x1713, 0x1714, 0x1715, 0x1716, 0x1717,
+ * 0x1718, 0x1719, 0x171a, 0x171b, 0x171c, 0x171d,
+ * 0x171e, 0x171f, 0x1720, 0x1721, 0x1722, 0x1723,
+ * 0x1724, 0x1725, 0x1726, 0x1727, 0x1728, 0x1729,
+ * 0x173a, 0x173b, 0x173c, 0x173d, 0x173e, 0x173f,
+ */
+#define SLG51000_INPUT_SEL_SHIFT 0
+#define SLG51000_INPUT_SEL_MASK (0x3f << 0)
+
+/* SLG51000_PWRSEQ_RESOURCE_EN_0 ~ SLG51000_PWRSEQ_RESOURCE_EN_5
+ * 0x1900, 0x1901, 0x1902, 0x1903, 0x1904, 0x1905
+ */
+#define SLG51000_RESOURCE_EN_DOWN0_SHIFT 4
+#define SLG51000_RESOURCE_EN_DOWN0_MASK (0x07 << 4)
+#define SLG51000_RESOURCE_EN_UP0_SHIFT 0
+#define SLG51000_RESOURCE_EN_UP0_MASK (0x07 << 0)
+
+/* SLG51000_PWRSEQ_SLOT_TIME_MIN_UP0 ~ SLG51000_PWRSEQ_SLOT_TIME_MIN_UP5
+ * 0x1906, 0x1908, 0x190a, 0x190c, 0x190e, 0x1910
+ */
+#define SLG51000_SLOT_TIME_MIN_UP_SHIFT 0
+#define SLG51000_SLOT_TIME_MIN_UP_MASK (0xff << 0)
+
+/* SLG51000_PWRSEQ_SLOT_TIME_MIN_DOWN0 ~ SLG51000_PWRSEQ_SLOT_TIME_MIN_DOWN5
+ * 0x1907, 0x1909, 0x190b, 0x190d, 0x190f, 0x1911
+ */
+#define SLG51000_SLOT_TIME_MIN_DOWN_SHIFT 0
+#define SLG51000_SLOT_TIME_MIN_DOWN_MASK (0xff << 0)
+
+/* SLG51000_PWRSEQ_SLOT_TIME_MAX_CONF_A ~ SLG51000_PWRSEQ_SLOT_TIME_MAX_CONF_C
+ * 0x1912, 0x1913, 0x1914
+ */
+#define SLG51000_SLOT_TIME_MAX_DOWN1_SHIFT 6
+#define SLG51000_SLOT_TIME_MAX_DOWN1_MASK (0x03 << 6)
+#define SLG51000_SLOT_TIME_MAX_UP1_SHIFT 4
+#define SLG51000_SLOT_TIME_MAX_UP1_MASK (0x03 << 4)
+#define SLG51000_SLOT_TIME_MAX_DOWN0_SHIFT 2
+#define SLG51000_SLOT_TIME_MAX_DOWN0_MASK (0x03 << 2)
+#define SLG51000_SLOT_TIME_MAX_UP0_SHIFT 0
+#define SLG51000_SLOT_TIME_MAX_UP0_MASK (0x03 << 0)
+
+/* SLG51000_PWRSEQ_INPUT_SENSE_CONF_A = 0x1915 */
+#define SLG51000_TRIG_UP_SENSE_SHIFT 6
+#define SLG51000_TRIG_UP_SENSE_MASK (0x01 << 6)
+#define SLG51000_UP_EN_SENSE5_SHIFT 5
+#define SLG51000_UP_EN_SENSE5_MASK (0x01 << 5)
+#define SLG51000_UP_EN_SENSE4_SHIFT 4
+#define SLG51000_UP_EN_SENSE4_MASK (0x01 << 4)
+#define SLG51000_UP_EN_SENSE3_SHIFT 3
+#define SLG51000_UP_EN_SENSE3_MASK (0x01 << 3)
+#define SLG51000_UP_EN_SENSE2_SHIFT 2
+#define SLG51000_UP_EN_SENSE2_MASK (0x01 << 2)
+#define SLG51000_UP_EN_SENSE1_SHIFT 1
+#define SLG51000_UP_EN_SENSE1_MASK (0x01 << 1)
+#define SLG51000_UP_EN_SENSE0_SHIFT 0
+#define SLG51000_UP_EN_SENSE0_MASK (0x01 << 0)
+
+/* SLG51000_PWRSEQ_INPUT_SENSE_CONF_B = 0x1916 */
+#define SLG51000_CRASH_DETECT_SENSE_SHIFT 7
+#define SLG51000_CRASH_DETECT_SENSE_MASK (0x01 << 7)
+#define SLG51000_TRIG_DOWN_SENSE_SHIFT 6
+#define SLG51000_TRIG_DOWN_SENSE_MASK (0x01 << 6)
+#define SLG51000_DOWN_EN_SENSE5_SHIFT 5
+#define SLG51000_DOWN_EN_SENSE5_MASK (0x01 << 5)
+#define SLG51000_DOWN_EN_SENSE4_SHIFT 4
+#define SLG51000_DOWN_EN_SENSE4_MASK (0x01 << 4)
+#define SLG51000_DOWN_EN_SENSE3_SHIFT 3
+#define SLG51000_DOWN_EN_SENSE3_MASK (0x01 << 3)
+#define SLG51000_DOWN_EN_SENSE2_SHIFT 2
+#define SLG51000_DOWN_EN_SENSE2_MASK (0x01 << 2)
+#define SLG51000_DOWN_EN_SENSE1_SHIFT 1
+#define SLG51000_DOWN_EN_SENSE1_MASK (0x01 << 1)
+#define SLG51000_DOWN_EN_SENSE0_SHIFT 0
+#define SLG51000_DOWN_EN_SENSE0_MASK (0x01 << 0)
+
+/* SLG51000_LDO1_VSEL ~ SLG51000_LDO7_VSEL =
+ * 0x2000, 0x2200, 0x2300, 0x2500, 0x2700, 0x2900, 0x3100
+ */
+#define SLG51000_VSEL_SHIFT 0
+#define SLG51000_VSEL_MASK (0xff << 0)
+
+/* SLG51000_LDO1_MINV ~ SLG51000_LDO7_MINV =
+ * 0x2060, 0x2260, 0x2360, 0x2560, 0x2760, 0x2960, 0x3160
+ */
+#define SLG51000_MINV_SHIFT 0
+#define SLG51000_MINV_MASK (0xff << 0)
+
+/* SLG51000_LDO1_MAXV ~ SLG51000_LDO7_MAXV =
+ * 0x2061, 0x2261, 0x2361, 0x2561, 0x2761, 0x2961, 0x3161
+ */
+#define SLG51000_MAXV_SHIFT 0
+#define SLG51000_MAXV_MASK (0xff << 0)
+
+/* SLG51000_LDO1_MISC1 = 0x2064, SLG51000_LDO2_MISC1 = 0x2264 */
+#define SLG51000_SEL_VRANGE_SHIFT 0
+#define SLG51000_SEL_VRANGE_MASK (0x01 << 0)
+
+/* SLG51000_LDO1_VSEL_ACTUAL ~ SLG51000_LDO7_VSEL_ACTUAL =
+ * 0x2065, 0x2265, 0x2366, 0x2566, 0x2767, 0x2967, 0x3166
+ */
+#define SLG51000_VSEL_ACTUAL_SHIFT 0
+#define SLG51000_VSEL_ACTUAL_MASK (0xff << 0)
+
+/* SLG51000_LDO1_EVENT ~ SLG51000_LDO7_EVENT =
+ * 0x20c0, 0x22c0, 0x23c0, 0x25c0, 0x27c0, 0x29c0, 0x31c0
+ */
+#define SLG51000_EVT_ILIM_FLAG_SHIFT 0
+#define SLG51000_EVT_ILIM_FLAG_MASK (0x01 << 0)
+#define SLG51000_EVT_VOUT_OK_FLAG_SHIFT 1
+#define SLG51000_EVT_VOUT_OK_FLAG_MASK (0x01 << 1)
+
+/* SLG51000_LDO1_STATUS ~ SLG51000_LDO7_STATUS =
+ * 0x20c1, 0x22c1, 0x23c1, 0x25c1, 0x27c1, 0x29c1, 0x31c1
+ */
+#define SLG51000_STA_ILIM_FLAG_SHIFT 0
+#define SLG51000_STA_ILIM_FLAG_MASK (0x01 << 0)
+#define SLG51000_STA_VOUT_OK_FLAG_SHIFT 1
+#define SLG51000_STA_VOUT_OK_FLAG_MASK (0x01 << 1)
+
+/* SLG51000_LDO1_IRQ_MASK ~ SLG51000_LDO7_IRQ_MASK =
+ * 0x20c2, 0x22c2, 0x23c2, 0x25c2, 0x27c2, 0x29c2, 0x31c2
+ */
+#define SLG51000_IRQ_ILIM_FLAG_SHIFT 0
+#define SLG51000_IRQ_ILIM_FLAG_MASK (0x01 << 0)
+
+/* SLG51000_LDO3_CONF1 ~ SLG51000_LDO7_CONF1 =
+ * 0x2364, 0x2564, 0x2765, 0x2965, 0x3164
+ */
+#define SLG51000_SEL_START_ILIM_SHIFT 0
+#define SLG51000_SEL_START_ILIM_MASK (0x7f << 0)
+
+/* SLG51000_LDO3_CONF2 ~ SLG51000_LDO7_CONF2 =
+ * 0x2365, 0x2565, 0x2766, 0x2966, 0x3165
+ */
+#define SLG51000_SEL_FUNC_ILIM_SHIFT 0
+#define SLG51000_SEL_FUNC_ILIM_MASK (0x7f << 0)
+
+/* SLG51000_LDO5_TRIM2 = 0x2763, SLG51000_LDO6_TRIM2 = 0x2963 */
+#define SLG51000_SEL_BYP_SLEW_RATE_SHIFT 2
+#define SLG51000_SEL_BYP_SLEW_RATE_MASK (0x03 << 2)
+#define SLG51000_SEL_BYP_VGATE_SHIFT 1
+#define SLG51000_SEL_BYP_VGATE_MASK (0x01 << 1)
+#define SLG51000_SEL_BYP_MODE_SHIFT 0
+#define SLG51000_SEL_BYP_MODE_MASK (0x01 << 0)
+
+/* SLG51000_OTP_EVENT = 0x782b */
+#define SLG51000_EVT_CRC_SHIFT 0
+#define SLG51000_EVT_CRC_MASK (0x01 << 0)
+
+/* SLG51000_OTP_IRQ_MASK = 0x782d */
+#define SLG51000_IRQ_CRC_SHIFT 0
+#define SLG51000_IRQ_CRC_MASK (0x01 << 0)
+
+/* SLG51000_OTP_LOCK_OTP_PROG = 0x78fe */
+#define SLG51000_LOCK_OTP_PROG_SHIFT 0
+#define SLG51000_LOCK_OTP_PROG_MASK (0x01 << 0)
+
+/* SLG51000_OTP_LOCK_CTRL = 0x78ff */
+#define SLG51000_LOCK_DFT_SHIFT 1
+#define SLG51000_LOCK_DFT_MASK (0x01 << 1)
+#define SLG51000_LOCK_RWT_SHIFT 0
+#define SLG51000_LOCK_RWT_MASK (0x01 << 0)
+
+/* SLG51000_LOCK_GLOBAL_LOCK_CTRL1 = 0x8000 */
+#define SLG51000_LDO7_LOCK_SHIFT 7
+#define SLG51000_LDO7_LOCK_MASK (0x01 << 7)
+#define SLG51000_LDO6_LOCK_SHIFT 6
+#define SLG51000_LDO6_LOCK_MASK (0x01 << 6)
+#define SLG51000_LDO5_LOCK_SHIFT 5
+#define SLG51000_LDO5_LOCK_MASK (0x01 << 5)
+#define SLG51000_LDO4_LOCK_SHIFT 4
+#define SLG51000_LDO4_LOCK_MASK (0x01 << 4)
+#define SLG51000_LDO3_LOCK_SHIFT 3
+#define SLG51000_LDO3_LOCK_MASK (0x01 << 3)
+#define SLG51000_LDO2_LOCK_SHIFT 2
+#define SLG51000_LDO2_LOCK_MASK (0x01 << 2)
+#define SLG51000_LDO1_LOCK_SHIFT 1
+#define SLG51000_LDO1_LOCK_MASK (0x01 << 1)
+
+#endif /* __SLG51000_REGISTERS_H__ */
+
diff --git a/drivers/regulator/stm32-booster.c b/drivers/regulator/stm32-booster.c
new file mode 100644
index 000000000000..2a897666c650
--- /dev/null
+++ b/drivers/regulator/stm32-booster.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) STMicroelectronics 2019
+// Author(s): Fabrice Gasnier <fabrice.gasnier@st.com>.
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/* STM32H7 SYSCFG register */
+#define STM32H7_SYSCFG_PMCR 0x04
+#define STM32H7_SYSCFG_BOOSTE_MASK BIT(8)
+
+/* STM32MP1 SYSCFG has set and clear registers */
+#define STM32MP1_SYSCFG_PMCSETR 0x04
+#define STM32MP1_SYSCFG_PMCCLRR 0x44
+#define STM32MP1_SYSCFG_EN_BOOSTER_MASK BIT(8)
+
+static const struct regulator_ops stm32h7_booster_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_desc stm32h7_booster_desc = {
+ .name = "booster",
+ .supply_name = "vdda",
+ .n_voltages = 1,
+ .type = REGULATOR_VOLTAGE,
+ .min_uV = 3300000,
+ .fixed_uV = 3300000,
+ .ramp_delay = 66000, /* up to 50us to stabilize */
+ .ops = &stm32h7_booster_ops,
+ .enable_reg = STM32H7_SYSCFG_PMCR,
+ .enable_mask = STM32H7_SYSCFG_BOOSTE_MASK,
+ .owner = THIS_MODULE,
+};
+
+static int stm32mp1_booster_enable(struct regulator_dev *rdev)
+{
+ return regmap_write(rdev->regmap, STM32MP1_SYSCFG_PMCSETR,
+ STM32MP1_SYSCFG_EN_BOOSTER_MASK);
+}
+
+static int stm32mp1_booster_disable(struct regulator_dev *rdev)
+{
+ return regmap_write(rdev->regmap, STM32MP1_SYSCFG_PMCCLRR,
+ STM32MP1_SYSCFG_EN_BOOSTER_MASK);
+}
+
+static const struct regulator_ops stm32mp1_booster_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = stm32mp1_booster_enable,
+ .disable = stm32mp1_booster_disable,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_desc stm32mp1_booster_desc = {
+ .name = "booster",
+ .supply_name = "vdda",
+ .n_voltages = 1,
+ .type = REGULATOR_VOLTAGE,
+ .min_uV = 3300000,
+ .fixed_uV = 3300000,
+ .ramp_delay = 66000,
+ .ops = &stm32mp1_booster_ops,
+ .enable_reg = STM32MP1_SYSCFG_PMCSETR,
+ .enable_mask = STM32MP1_SYSCFG_EN_BOOSTER_MASK,
+ .owner = THIS_MODULE,
+};
+
+static int stm32_booster_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct regulator_config config = { };
+ const struct regulator_desc *desc;
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ int ret;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ desc = (const struct regulator_desc *)
+ of_match_device(dev->driver->of_match_table, dev)->data;
+
+ config.regmap = regmap;
+ config.dev = dev;
+ config.of_node = np;
+ config.init_data = of_get_regulator_init_data(dev, np, desc);
+
+ rdev = devm_regulator_register(dev, desc, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(dev, "register failed with error %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id stm32_booster_of_match[] = {
+ {
+ .compatible = "st,stm32h7-booster",
+ .data = (void *)&stm32h7_booster_desc
+ }, {
+ .compatible = "st,stm32mp1-booster",
+ .data = (void *)&stm32mp1_booster_desc
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, stm32_booster_of_match);
+
+static struct platform_driver stm32_booster_driver = {
+ .probe = stm32_booster_probe,
+ .driver = {
+ .name = "stm32-booster",
+ .of_match_table = of_match_ptr(stm32_booster_of_match),
+ },
+};
+module_platform_driver(stm32_booster_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 booster regulator driver");
+MODULE_ALIAS("platform:stm32-booster");
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index ca39b3d55123..10ea4b5a0f55 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -371,11 +371,12 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
"dcdc-ext-control-gpios", 0,
gflags,
"tps65090");
- if (IS_ERR(rpdata->gpiod))
- return ERR_CAST(rpdata->gpiod);
- if (!rpdata->gpiod)
+ if (PTR_ERR(rpdata->gpiod) == -ENOENT) {
dev_err(&pdev->dev,
"could not find DCDC external control GPIO\n");
+ rpdata->gpiod = NULL;
+ } else if (IS_ERR(rpdata->gpiod))
+ return ERR_CAST(rpdata->gpiod);
}
if (of_property_read_u32(tps65090_matches[idx].of_node,
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index b422eef97b77..018dbbd96771 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/mfd/wm831x/core.h>
@@ -50,7 +50,7 @@ struct wm831x_dcdc {
int base;
struct wm831x *wm831x;
struct regulator_dev *regulator;
- int dvs_gpio;
+ struct gpio_desc *dvs_gpiod;
int dvs_gpio_state;
int on_vsel;
int dvs_vsel;
@@ -217,7 +217,7 @@ static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
return 0;
dcdc->dvs_gpio_state = state;
- gpio_set_value(dcdc->dvs_gpio, state);
+ gpiod_set_value(dcdc->dvs_gpiod, state);
/* Should wait for DVS state change to be asserted if we have
* a GPIO for it, for now assume the device is configured
@@ -237,10 +237,10 @@ static int wm831x_buckv_set_voltage_sel(struct regulator_dev *rdev,
int ret;
/* If this value is already set then do a GPIO update if we can */
- if (dcdc->dvs_gpio && dcdc->on_vsel == vsel)
+ if (dcdc->dvs_gpiod && dcdc->on_vsel == vsel)
return wm831x_buckv_set_dvs(rdev, 0);
- if (dcdc->dvs_gpio && dcdc->dvs_vsel == vsel)
+ if (dcdc->dvs_gpiod && dcdc->dvs_vsel == vsel)
return wm831x_buckv_set_dvs(rdev, 1);
/* Always set the ON status to the minimum voltage */
@@ -249,7 +249,7 @@ static int wm831x_buckv_set_voltage_sel(struct regulator_dev *rdev,
return ret;
dcdc->on_vsel = vsel;
- if (!dcdc->dvs_gpio)
+ if (!dcdc->dvs_gpiod)
return ret;
/* Kick the voltage transition now */
@@ -296,7 +296,7 @@ static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- if (dcdc->dvs_gpio && dcdc->dvs_gpio_state)
+ if (dcdc->dvs_gpiod && dcdc->dvs_gpio_state)
return dcdc->dvs_vsel;
else
return dcdc->on_vsel;
@@ -337,7 +337,7 @@ static void wm831x_buckv_dvs_init(struct platform_device *pdev,
int ret;
u16 ctrl;
- if (!pdata || !pdata->dvs_gpio)
+ if (!pdata)
return;
/* gpiolib won't let us read the GPIO status so pick the higher
@@ -345,17 +345,14 @@ static void wm831x_buckv_dvs_init(struct platform_device *pdev,
*/
dcdc->dvs_gpio_state = pdata->dvs_init_state;
- ret = devm_gpio_request_one(&pdev->dev, pdata->dvs_gpio,
- dcdc->dvs_gpio_state ? GPIOF_INIT_HIGH : 0,
- "DCDC DVS");
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
- dcdc->name, ret);
+ dcdc->dvs_gpiod = devm_gpiod_get(&pdev->dev, "dvs",
+ dcdc->dvs_gpio_state ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
+ if (IS_ERR(dcdc->dvs_gpiod)) {
+ dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %ld\n",
+ dcdc->name, PTR_ERR(dcdc->dvs_gpiod));
return;
}
- dcdc->dvs_gpio = pdata->dvs_gpio;
-
switch (pdata->dvs_control_src) {
case 1:
ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 9ac7574e3cfb..a8682f69effc 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -38,7 +38,7 @@ config DASD_PROFILE
depends on DASD
help
Enable this option if you want to see profiling information
- in /proc/dasd/statistics.
+ in /proc/dasd/statistics.
config DASD_ECKD
def_tristate y
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index fab35c6170cc..245f33c2f71e 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -203,7 +203,7 @@ static int __init dasd_feature_list(char *str)
else if (len == 8 && !strncmp(str, "failfast", 8))
features |= DASD_FEATURE_FAILFAST;
else {
- pr_warn("%*s is not a supported device option\n",
+ pr_warn("%.*s is not a supported device option\n",
len, str);
rc = -EINVAL;
}
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index ab0b243a947d..6cc4b19acf85 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -79,27 +79,6 @@ config SCLP_VT220_CONSOLE
Include support for using an IBM SCLP VT220-compatible terminal as a
Linux system console.
-config SCLP_ASYNC
- def_tristate m
- prompt "Support for Call Home via Asynchronous SCLP Records"
- depends on S390
- help
- This option enables the call home function, which is able to inform
- the service element and connected organisations about a kernel panic.
- You should only select this option if you know what you are doing,
- want for inform other people about your kernel panics,
- need this feature and intend to run your kernel in LPAR.
-
-config SCLP_ASYNC_ID
- string "Component ID for Call Home"
- depends on SCLP_ASYNC
- default "000000000"
- help
- The Component ID for Call Home is used to identify the correct
- problem reporting queue the call home records should be sent to.
-
- If your are unsure, please use the default value "000000000".
-
config HMC_DRV
def_tristate m
prompt "Support for file transfers from HMC drive CD/DVD-ROM"
@@ -205,4 +184,3 @@ config S390_VMUR
depends on S390
help
Character device driver for z/VM reader, puncher and printer.
-
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 3072b89785dd..b8a8816d94e7 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_TN3215) += con3215.o
obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
-obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_PCI) += sclp_pci.o
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
deleted file mode 100644
index e69b12a40636..000000000000
--- a/drivers/s390/char/sclp_async.c
+++ /dev/null
@@ -1,189 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Enable Asynchronous Notification via SCLP.
- *
- * Copyright IBM Corp. 2009
- * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/kmod.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/proc_fs.h>
-#include <linux/sysctl.h>
-#include <linux/utsname.h>
-#include "sclp.h"
-
-static int callhome_enabled;
-static struct sclp_req *request;
-static struct sclp_async_sccb *sccb;
-static int sclp_async_send_wait(char *message);
-static struct ctl_table_header *callhome_sysctl_header;
-static DEFINE_SPINLOCK(sclp_async_lock);
-#define SCLP_NORMAL_WRITE 0x00
-
-struct async_evbuf {
- struct evbuf_header header;
- u64 reserved;
- u8 rflags;
- u8 empty;
- u8 rtype;
- u8 otype;
- char comp_id[12];
- char data[3000]; /* there is still some space left */
-} __attribute__((packed));
-
-struct sclp_async_sccb {
- struct sccb_header header;
- struct async_evbuf evbuf;
-} __attribute__((packed));
-
-static struct sclp_register sclp_async_register = {
- .send_mask = EVTYP_ASYNC_MASK,
-};
-
-static int call_home_on_panic(struct notifier_block *self,
- unsigned long event, void *data)
-{
- strncat(data, init_utsname()->nodename,
- sizeof(init_utsname()->nodename));
- sclp_async_send_wait(data);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block call_home_panic_nb = {
- .notifier_call = call_home_on_panic,
- .priority = INT_MAX,
-};
-
-static int zero;
-static int one = 1;
-
-static struct ctl_table callhome_table[] = {
- {
- .procname = "callhome",
- .data = &callhome_enabled,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
- },
- {}
-};
-
-static struct ctl_table kern_dir_table[] = {
- {
- .procname = "kernel",
- .maxlen = 0,
- .mode = 0555,
- .child = callhome_table,
- },
- {}
-};
-
-/*
- * Function used to transfer asynchronous notification
- * records which waits for send completion
- */
-static int sclp_async_send_wait(char *message)
-{
- struct async_evbuf *evb;
- int rc;
- unsigned long flags;
-
- if (!callhome_enabled)
- return 0;
- sccb->evbuf.header.type = EVTYP_ASYNC;
- sccb->evbuf.rtype = 0xA5;
- sccb->evbuf.otype = 0x00;
- evb = &sccb->evbuf;
- request->command = SCLP_CMDW_WRITE_EVENT_DATA;
- request->sccb = sccb;
- request->status = SCLP_REQ_FILLED;
- strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
- /*
- * Retain Queue
- * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
- */
- strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID,
- sizeof(sccb->evbuf.comp_id));
- sccb->evbuf.header.length = sizeof(sccb->evbuf);
- sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
- sccb->header.function_code = SCLP_NORMAL_WRITE;
- rc = sclp_add_request(request);
- if (rc)
- return rc;
- spin_lock_irqsave(&sclp_async_lock, flags);
- while (request->status != SCLP_REQ_DONE &&
- request->status != SCLP_REQ_FAILED) {
- sclp_sync_wait();
- }
- spin_unlock_irqrestore(&sclp_async_lock, flags);
- if (request->status != SCLP_REQ_DONE)
- return -EIO;
- rc = ((struct sclp_async_sccb *)
- request->sccb)->header.response_code;
- if (rc != 0x0020)
- return -EIO;
- if (evb->header.flags != 0x80)
- return -EIO;
- return rc;
-}
-
-static int __init sclp_async_init(void)
-{
- int rc;
-
- rc = sclp_register(&sclp_async_register);
- if (rc)
- return rc;
- rc = -EOPNOTSUPP;
- if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
- goto out_sclp;
- rc = -ENOMEM;
- callhome_sysctl_header = register_sysctl_table(kern_dir_table);
- if (!callhome_sysctl_header)
- goto out_sclp;
- request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
- sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!request || !sccb)
- goto out_mem;
- rc = atomic_notifier_chain_register(&panic_notifier_list,
- &call_home_panic_nb);
- if (!rc)
- goto out;
-out_mem:
- kfree(request);
- free_page((unsigned long) sccb);
- unregister_sysctl_table(callhome_sysctl_header);
-out_sclp:
- sclp_unregister(&sclp_async_register);
-out:
- return rc;
-}
-module_init(sclp_async_init);
-
-static void __exit sclp_async_exit(void)
-{
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &call_home_panic_nb);
- unregister_sysctl_table(callhome_sysctl_header);
- sclp_unregister(&sclp_async_register);
- free_page((unsigned long) sccb);
- kfree(request);
-}
-module_exit(sclp_async_exit);
-
-MODULE_AUTHOR("Copyright IBM Corp. 2009");
-MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 405a60538630..08f812475f5e 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -4,7 +4,7 @@
* dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
* dump format as s390 standalone dumps.
*
- * For more information please refer to Documentation/s390/zfcpdump.txt
+ * For more information please refer to Documentation/s390/zfcpdump.rst
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 4534afc63591..427b2e24a8ce 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -16,9 +16,11 @@
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/slab.h>
+#include <linux/dmapool.h>
#include <asm/airq.h>
#include <asm/isc.h>
+#include <asm/cio.h>
#include "cio.h"
#include "cio_debug.h"
@@ -27,7 +29,7 @@
static DEFINE_SPINLOCK(airq_lists_lock);
static struct hlist_head airq_lists[MAX_ISC+1];
-static struct kmem_cache *airq_iv_cache;
+static struct dma_pool *airq_iv_cache;
/**
* register_adapter_interrupt() - register adapter interrupt handler
@@ -115,6 +117,11 @@ void __init init_airq_interrupts(void)
setup_irq(THIN_INTERRUPT, &airq_interrupt);
}
+static inline unsigned long iv_size(unsigned long bits)
+{
+ return BITS_TO_LONGS(bits) * sizeof(unsigned long);
+}
+
/**
* airq_iv_create - create an interrupt vector
* @bits: number of bits in the interrupt vector
@@ -132,17 +139,19 @@ struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
goto out;
iv->bits = bits;
iv->flags = flags;
- size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
+ size = iv_size(bits);
if (flags & AIRQ_IV_CACHELINE) {
- if ((cache_line_size() * BITS_PER_BYTE) < bits)
+ if ((cache_line_size() * BITS_PER_BYTE) < bits
+ || !airq_iv_cache)
goto out_free;
- iv->vector = kmem_cache_zalloc(airq_iv_cache, GFP_KERNEL);
+ iv->vector = dma_pool_zalloc(airq_iv_cache, GFP_KERNEL,
+ &iv->vector_dma);
if (!iv->vector)
goto out_free;
} else {
- iv->vector = kzalloc(size, GFP_KERNEL);
+ iv->vector = cio_dma_zalloc(size);
if (!iv->vector)
goto out_free;
}
@@ -178,10 +187,10 @@ out_free:
kfree(iv->ptr);
kfree(iv->bitlock);
kfree(iv->avail);
- if (iv->flags & AIRQ_IV_CACHELINE)
- kmem_cache_free(airq_iv_cache, iv->vector);
+ if (iv->flags & AIRQ_IV_CACHELINE && iv->vector)
+ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
else
- kfree(iv->vector);
+ cio_dma_free(iv->vector, size);
kfree(iv);
out:
return NULL;
@@ -198,9 +207,9 @@ void airq_iv_release(struct airq_iv *iv)
kfree(iv->ptr);
kfree(iv->bitlock);
if (iv->flags & AIRQ_IV_CACHELINE)
- kmem_cache_free(airq_iv_cache, iv->vector);
+ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
else
- kfree(iv->vector);
+ cio_dma_free(iv->vector, iv_size(iv->bits));
kfree(iv->avail);
kfree(iv);
}
@@ -295,12 +304,12 @@ unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
}
EXPORT_SYMBOL(airq_iv_scan);
-static int __init airq_init(void)
+int __init airq_init(void)
{
- airq_iv_cache = kmem_cache_create("airq_iv_cache", cache_line_size(),
- cache_line_size(), 0, NULL);
+ airq_iv_cache = dma_pool_create("airq_iv_cache", cio_get_dma_css_dev(),
+ cache_line_size(),
+ cache_line_size(), PAGE_SIZE);
if (!airq_iv_cache)
return -ENOMEM;
return 0;
}
-subsys_initcall(airq_init);
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 603268a33ea1..73582a0a2622 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -63,7 +63,7 @@ static void ccwreq_stop(struct ccw_device *cdev, int rc)
return;
req->done = 1;
ccw_device_set_timeout(cdev, 0);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
if (rc && rc != -ENODEV && req->drc)
rc = req->drc;
req->callback(cdev, req->data, rc);
@@ -86,7 +86,7 @@ static void ccwreq_do(struct ccw_device *cdev)
continue;
}
/* Perform start function. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
rc = cio_start(sch, cp, (u8) req->mask);
if (rc == 0) {
/* I/O started successfully. */
@@ -169,7 +169,7 @@ int ccw_request_cancel(struct ccw_device *cdev)
*/
static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
{
- struct irb *irb = &cdev->private->irb;
+ struct irb *irb = &cdev->private->dma_area->irb;
struct cmd_scsw *scsw = &irb->scsw.cmd;
enum uc_todo todo;
@@ -187,7 +187,8 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
CIO_TRACE_EVENT(2, "sensedata");
CIO_HEX_EVENT(2, &cdev->private->dev_id,
sizeof(struct ccw_dev_id));
- CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
+ CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw,
+ SENSE_MAX_COUNT);
/* Check for command reject. */
if (irb->ecw[0] & SNS0_CMD_REJECT)
return IO_REJECTED;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a835b31aad99..6392a1b95b02 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -323,36 +323,6 @@ struct chsc_sei {
} __packed __aligned(PAGE_SIZE);
/*
- * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
- */
-
-#define ND_VALIDITY_VALID 0
-#define ND_VALIDITY_OUTDATED 1
-#define ND_VALIDITY_INVALID 2
-
-struct node_descriptor {
- /* Flags. */
- union {
- struct {
- u32 validity:3;
- u32 reserved:5;
- } __packed;
- u8 byte0;
- } __packed;
-
- /* Node parameters. */
- u32 params:24;
-
- /* Node ID. */
- char type[6];
- char model[3];
- char manufacturer[3];
- char plant[2];
- char seq[12];
- u16 tag;
-} __packed;
-
-/*
* Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
*/
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 06a91743335a..ba7d2480613b 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -113,6 +113,7 @@ struct subchannel {
enum sch_todo todo;
struct work_struct todo_work;
struct schib_config config;
+ char *driver_override; /* Driver name to force a match */
} __attribute__ ((aligned(8)));
DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
@@ -135,6 +136,8 @@ extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
+extern int __init airq_init(void);
+
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index aea502922646..e1f2d0eed544 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -20,6 +20,8 @@
#include <linux/reboot.h>
#include <linux/suspend.h>
#include <linux/proc_fs.h>
+#include <linux/genalloc.h>
+#include <linux/dma-mapping.h>
#include <asm/isc.h>
#include <asm/crw.h>
@@ -165,6 +167,7 @@ static void css_subchannel_release(struct device *dev)
sch->config.intparm = 0;
cio_commit_config(sch);
+ kfree(sch->driver_override);
kfree(sch->lock);
kfree(sch);
}
@@ -224,6 +227,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
INIT_WORK(&sch->todo_work, css_sch_todo);
sch->dev.release = &css_subchannel_release;
device_initialize(&sch->dev);
+ /*
+ * The physical addresses of some the dma structures that can
+ * belong to a subchannel need to fit 31 bit width (e.g. ccw).
+ */
+ sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
+ sch->dev.dma_mask = &sch->dev.coherent_dma_mask;
return sch;
err:
@@ -315,9 +324,57 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(modalias);
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ char *driver_override, *old, *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ device_lock(dev);
+ old = sch->driver_override;
+ if (strlen(driver_override)) {
+ sch->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ sch->driver_override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
+ device_unlock(dev);
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
static struct attribute *subch_attrs[] = {
&dev_attr_type.attr,
&dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
NULL,
};
@@ -899,6 +956,13 @@ static int __init setup_css(int nr)
dev_set_name(&css->device, "css%x", nr);
css->device.groups = cssdev_attr_groups;
css->device.release = channel_subsystem_release;
+ /*
+ * We currently allocate notifier bits with this (using
+ * css->device as the device argument with the DMA API)
+ * and are fine with 64 bit addresses.
+ */
+ css->device.coherent_dma_mask = DMA_BIT_MASK(64);
+ css->device.dma_mask = &css->device.coherent_dma_mask;
mutex_init(&css->mutex);
css->cssid = chsc_get_cssid(nr);
@@ -1018,6 +1082,111 @@ static struct notifier_block css_power_notifier = {
.notifier_call = css_power_event,
};
+#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
+static struct gen_pool *cio_dma_pool;
+
+/* Currently cio supports only a single css */
+struct device *cio_get_dma_css_dev(void)
+{
+ return &channel_subsystems[0]->device;
+}
+
+struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
+{
+ struct gen_pool *gp_dma;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ int i;
+
+ gp_dma = gen_pool_create(3, -1);
+ if (!gp_dma)
+ return NULL;
+ for (i = 0; i < nr_pages; ++i) {
+ cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
+ CIO_DMA_GFP);
+ if (!cpu_addr)
+ return gp_dma;
+ gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
+ dma_addr, PAGE_SIZE, -1);
+ }
+ return gp_dma;
+}
+
+static void __gp_dma_free_dma(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk, void *data)
+{
+ size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
+
+ dma_free_coherent((struct device *) data, chunk_size,
+ (void *) chunk->start_addr,
+ (dma_addr_t) chunk->phys_addr);
+}
+
+void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
+{
+ if (!gp_dma)
+ return;
+ /* this is quite ugly but no better idea */
+ gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
+ gen_pool_destroy(gp_dma);
+}
+
+static int cio_dma_pool_init(void)
+{
+ /* No need to free up the resources: compiled in */
+ cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
+ if (!cio_dma_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ dma_addr_t dma_addr;
+ unsigned long addr;
+ size_t chunk_size;
+
+ if (!gp_dma)
+ return NULL;
+ addr = gen_pool_alloc(gp_dma, size);
+ while (!addr) {
+ chunk_size = round_up(size, PAGE_SIZE);
+ addr = (unsigned long) dma_alloc_coherent(dma_dev,
+ chunk_size, &dma_addr, CIO_DMA_GFP);
+ if (!addr)
+ return NULL;
+ gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_alloc(gp_dma, size);
+ }
+ return (void *) addr;
+}
+
+void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
+{
+ if (!cpu_addr)
+ return;
+ memset(cpu_addr, 0, size);
+ gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
+}
+
+/*
+ * Allocate dma memory from the css global pool. Intended for memory not
+ * specific to any single device within the css. The allocated memory
+ * is not guaranteed to be 31-bit addressable.
+ *
+ * Caution: Not suitable for early stuff like console.
+ */
+void *cio_dma_zalloc(size_t size)
+{
+ return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
+}
+
+void cio_dma_free(void *cpu_addr, size_t size)
+{
+ cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
+}
+
/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing.
@@ -1059,16 +1228,22 @@ static int __init css_bus_init(void)
if (ret)
goto out_unregister;
ret = register_pm_notifier(&css_power_notifier);
- if (ret) {
- unregister_reboot_notifier(&css_reboot_notifier);
- goto out_unregister;
- }
+ if (ret)
+ goto out_unregister_rn;
+ ret = cio_dma_pool_init();
+ if (ret)
+ goto out_unregister_pmn;
+ airq_init();
css_init_done = 1;
/* Enable default isc for I/O subchannels. */
isc_register(IO_SCH_ISC);
return 0;
+out_unregister_pmn:
+ unregister_pm_notifier(&css_power_notifier);
+out_unregister_rn:
+ unregister_reboot_notifier(&css_reboot_notifier);
out_unregister:
while (i-- > 0) {
struct channel_subsystem *css = channel_subsystems[i];
@@ -1222,6 +1397,10 @@ static int css_bus_match(struct device *dev, struct device_driver *drv)
struct css_driver *driver = to_cssdriver(drv);
struct css_device_id *id;
+ /* When driver_override is set, only bind to the matching driver */
+ if (sch->driver_override && strcmp(sch->driver_override, drv->name))
+ return 0;
+
for (id = driver->subchannel_type; id->match_flags; id++) {
if (sch->st == id->type)
return 1;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 1540229a37bb..9985b7484a6b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -24,6 +24,7 @@
#include <linux/timer.h>
#include <linux/kernel_stat.h>
#include <linux/sched/signal.h>
+#include <linux/dma-mapping.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -687,6 +688,9 @@ ccw_device_release(struct device *dev)
struct ccw_device *cdev;
cdev = to_ccwdev(dev);
+ cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
+ sizeof(*cdev->private->dma_area));
+ cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
/* Release reference of parent subchannel. */
put_device(cdev->dev.parent);
kfree(cdev->private);
@@ -696,15 +700,33 @@ ccw_device_release(struct device *dev)
static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
{
struct ccw_device *cdev;
+ struct gen_pool *dma_pool;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
- if (cdev) {
- cdev->private = kzalloc(sizeof(struct ccw_device_private),
- GFP_KERNEL | GFP_DMA);
- if (cdev->private)
- return cdev;
- }
+ if (!cdev)
+ goto err_cdev;
+ cdev->private = kzalloc(sizeof(struct ccw_device_private),
+ GFP_KERNEL | GFP_DMA);
+ if (!cdev->private)
+ goto err_priv;
+ cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
+ cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask;
+ dma_pool = cio_gp_dma_create(&cdev->dev, 1);
+ if (!dma_pool)
+ goto err_dma_pool;
+ cdev->private->dma_pool = dma_pool;
+ cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
+ sizeof(*cdev->private->dma_area));
+ if (!cdev->private->dma_area)
+ goto err_dma_area;
+ return cdev;
+err_dma_area:
+ cio_gp_dma_destroy(dma_pool, &cdev->dev);
+err_dma_pool:
+ kfree(cdev->private);
+err_priv:
kfree(cdev);
+err_cdev:
return ERR_PTR(-ENOMEM);
}
@@ -884,7 +906,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
wake_up(&ccw_device_init_wq);
break;
case DEV_STATE_OFFLINE:
- /*
+ /*
* We can't register the device in interrupt context so
* we schedule a work item.
*/
@@ -1062,6 +1084,14 @@ static int io_subchannel_probe(struct subchannel *sch)
if (!io_priv)
goto out_schedule;
+ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
+ sizeof(*io_priv->dma_area),
+ &io_priv->dma_area_dma, GFP_KERNEL);
+ if (!io_priv->dma_area) {
+ kfree(io_priv);
+ goto out_schedule;
+ }
+
set_io_private(sch, io_priv);
css_schedule_eval(sch->schid);
return 0;
@@ -1088,6 +1118,8 @@ static int io_subchannel_remove(struct subchannel *sch)
set_io_private(sch, NULL);
spin_unlock_irq(sch->lock);
out_free:
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
kfree(io_priv);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
@@ -1593,13 +1625,19 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
return ERR_CAST(sch);
io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
- if (!io_priv) {
- put_device(&sch->dev);
- return ERR_PTR(-ENOMEM);
- }
+ if (!io_priv)
+ goto err_priv;
+ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
+ sizeof(*io_priv->dma_area),
+ &io_priv->dma_area_dma, GFP_KERNEL);
+ if (!io_priv->dma_area)
+ goto err_dma_area;
set_io_private(sch, io_priv);
cdev = io_subchannel_create_ccwdev(sch);
if (IS_ERR(cdev)) {
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
+ set_io_private(sch, NULL);
put_device(&sch->dev);
kfree(io_priv);
return cdev;
@@ -1607,6 +1645,12 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
cdev->drv = drv;
ccw_device_set_int_class(cdev);
return cdev;
+
+err_dma_area:
+ kfree(io_priv);
+err_priv:
+ put_device(&sch->dev);
+ return ERR_PTR(-ENOMEM);
}
void __init ccw_device_destroy_console(struct ccw_device *cdev)
@@ -1617,6 +1661,8 @@ void __init ccw_device_destroy_console(struct ccw_device *cdev)
set_io_private(sch, NULL);
put_device(&sch->dev);
put_device(&cdev->dev);
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
kfree(io_priv);
}
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 9169af7dbb43..8fc267324ebb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -67,8 +67,10 @@ static void ccw_timeout_log(struct ccw_device *cdev)
sizeof(struct tcw), 0);
} else {
printk(KERN_WARNING "cio: orb indicates command mode\n");
- if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
- (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
+ if ((void *)(addr_t)orb->cmd.cpa ==
+ &private->dma_area->sense_ccw ||
+ (void *)(addr_t)orb->cmd.cpa ==
+ cdev->private->dma_area->iccws)
printk(KERN_WARNING "cio: last channel program "
"(intern):\n");
else
@@ -143,18 +145,22 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
void ccw_device_update_sense_data(struct ccw_device *cdev)
{
memset(&cdev->id, 0, sizeof(cdev->id));
- cdev->id.cu_type = cdev->private->senseid.cu_type;
- cdev->id.cu_model = cdev->private->senseid.cu_model;
- cdev->id.dev_type = cdev->private->senseid.dev_type;
- cdev->id.dev_model = cdev->private->senseid.dev_model;
+ cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
+ cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
+ cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
+ cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
}
int ccw_device_test_sense_data(struct ccw_device *cdev)
{
- return cdev->id.cu_type == cdev->private->senseid.cu_type &&
- cdev->id.cu_model == cdev->private->senseid.cu_model &&
- cdev->id.dev_type == cdev->private->senseid.dev_type &&
- cdev->id.dev_model == cdev->private->senseid.dev_model;
+ return cdev->id.cu_type ==
+ cdev->private->dma_area->senseid.cu_type &&
+ cdev->id.cu_model ==
+ cdev->private->dma_area->senseid.cu_model &&
+ cdev->id.dev_type ==
+ cdev->private->dma_area->senseid.dev_type &&
+ cdev->id.dev_model ==
+ cdev->private->dma_area->senseid.dev_model;
}
/*
@@ -342,7 +348,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
cio_disable_subchannel(sch);
/* Reset device status. */
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
cdev->private->state = state;
@@ -509,13 +515,14 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- create_fake_irb(&cdev->private->irb,
+ create_fake_irb(&cdev->private->dma_area->irb,
cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
- &cdev->private->irb);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ &cdev->private->dma_area->irb);
+ memset(&cdev->private->dma_area->irb, 0,
+ sizeof(struct irb));
}
ccw_device_report_path_events(cdev);
ccw_device_handle_broken_paths(cdev);
@@ -672,7 +679,8 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
if (scsw_actl(&sch->schib.scsw) != 0 ||
(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
- (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
+ (scsw_stctl(&cdev->private->dma_area->irb.scsw) &
+ SCSW_STCTL_STATUS_PEND)) {
/*
* No final status yet or final status not yet delivered
* to the device driver. Can't do path verification now,
@@ -719,7 +727,7 @@ static int ccw_device_call_handler(struct ccw_device *cdev)
* - fast notification was requested (primary status)
* - unsolicited interrupts
*/
- stctl = scsw_stctl(&cdev->private->irb.scsw);
+ stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
(stctl == SCSW_STCTL_STATUS_PEND);
@@ -735,9 +743,9 @@ static int ccw_device_call_handler(struct ccw_device *cdev)
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
- &cdev->private->irb);
+ &cdev->private->dma_area->irb);
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
return 1;
}
@@ -759,7 +767,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* Unit check but no sense data. Need basic sense. */
if (ccw_device_do_sense(cdev, irb) != 0)
goto call_handler_unsol;
- memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+ memcpy(&cdev->private->dma_area->irb, irb,
+ sizeof(struct irb));
cdev->private->state = DEV_STATE_W4SENSE;
cdev->private->intparm = 0;
return;
@@ -842,7 +851,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
if (scsw_fctl(&irb->scsw) &
(SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
cdev->private->flags.dosense = 0;
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
goto call_handler;
}
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index f6df83a9dfbb..740996d0dc8c 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -99,7 +99,7 @@ static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
static int diag210_get_dev_info(struct ccw_device *cdev)
{
struct ccw_dev_id *dev_id = &cdev->private->dev_id;
- struct senseid *senseid = &cdev->private->senseid;
+ struct senseid *senseid = &cdev->private->dma_area->senseid;
struct diag210 diag_data;
int rc;
@@ -134,8 +134,10 @@ err_failed:
static void snsid_init(struct ccw_device *cdev)
{
cdev->private->flags.esid = 0;
- memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
- cdev->private->senseid.cu_type = 0xffff;
+
+ memset(&cdev->private->dma_area->senseid, 0,
+ sizeof(cdev->private->dma_area->senseid));
+ cdev->private->dma_area->senseid.cu_type = 0xffff;
}
/*
@@ -143,16 +145,16 @@ static void snsid_init(struct ccw_device *cdev)
*/
static int snsid_check(struct ccw_device *cdev, void *data)
{
- struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
+ struct cmd_scsw *scsw = &cdev->private->dma_area->irb.scsw.cmd;
int len = sizeof(struct senseid) - scsw->count;
/* Check for incomplete SENSE ID data. */
if (len < SENSE_ID_MIN_LEN)
goto out_restart;
- if (cdev->private->senseid.cu_type == 0xffff)
+ if (cdev->private->dma_area->senseid.cu_type == 0xffff)
goto out_restart;
/* Check for incompatible SENSE ID data. */
- if (cdev->private->senseid.reserved != 0xff)
+ if (cdev->private->dma_area->senseid.reserved != 0xff)
return -EOPNOTSUPP;
/* Check for extended-identification information. */
if (len > SENSE_ID_BASIC_LEN)
@@ -170,7 +172,7 @@ out_restart:
static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct ccw_dev_id *id = &cdev->private->dev_id;
- struct senseid *senseid = &cdev->private->senseid;
+ struct senseid *senseid = &cdev->private->dma_area->senseid;
int vm = 0;
if (rc && MACHINE_IS_VM) {
@@ -200,7 +202,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
CIO_TRACE_EVENT(4, "snsid");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
@@ -208,7 +210,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
snsid_init(cdev);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_ID;
- cp->cda = (u32) (addr_t) &cdev->private->senseid;
+ cp->cda = (u32) (addr_t) &cdev->private->dma_area->senseid;
cp->count = sizeof(struct senseid);
cp->flags = CCW_FLAG_SLI;
/* Request setup. */
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 4435ae0b3027..d722458c5928 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -429,8 +429,8 @@ struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
if (cdev->private->flags.esid == 0)
return NULL;
for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
- if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
- return cdev->private->senseid.ciw + ciw_cnt;
+ if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
+ return cdev->private->dma_area->senseid.ciw + ciw_cnt;
return NULL;
}
@@ -699,6 +699,23 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
}
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
+/*
+ * Allocate zeroed dma coherent 31 bit addressable memory using
+ * the subchannels dma pool. Maximal size of allocation supported
+ * is PAGE_SIZE.
+ */
+void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
+{
+ return cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
+}
+EXPORT_SYMBOL(ccw_device_dma_zalloc);
+
+void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
+{
+ cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
+}
+EXPORT_SYMBOL(ccw_device_dma_free);
+
EXPORT_SYMBOL(ccw_device_set_options_mask);
EXPORT_SYMBOL(ccw_device_set_options);
EXPORT_SYMBOL(ccw_device_clear_options);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index d30a3babf176..767a85635a0f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -57,7 +57,7 @@ out:
static void nop_build_cp(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
cp->cmd_code = CCW_CMD_NOOP;
cp->cda = 0;
@@ -134,9 +134,9 @@ err:
static void spid_build_cp(struct ccw_device *cdev, u8 fn)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
int i = pathmask_to_pos(req->lpm);
- struct pgid *pgid = &cdev->private->pgid[i];
+ struct pgid *pgid = &cdev->private->dma_area->pgid[i];
pgid->inf.fc = fn;
cp->cmd_code = CCW_CMD_SET_PGID;
@@ -300,7 +300,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
int *mismatch, u8 *reserved, u8 *reset)
{
- struct pgid *pgid = &cdev->private->pgid[0];
+ struct pgid *pgid = &cdev->private->dma_area->pgid[0];
struct pgid *first = NULL;
int lpm;
int i;
@@ -342,7 +342,7 @@ static u8 pgid_to_donepm(struct ccw_device *cdev)
lpm = 0x80 >> i;
if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
- pgid = &cdev->private->pgid[i];
+ pgid = &cdev->private->dma_area->pgid[i];
if (sch->opm & lpm) {
if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
continue;
@@ -368,7 +368,8 @@ static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
int i;
for (i = 0; i < 8; i++)
- memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
+ memcpy(&cdev->private->dma_area->pgid[i], pgid,
+ sizeof(struct pgid));
}
/*
@@ -435,12 +436,12 @@ out:
static void snid_build_cp(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
int i = pathmask_to_pos(req->lpm);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_PGID;
- cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
+ cp->cda = (u32) (addr_t) &cdev->private->dma_area->pgid[i];
cp->count = sizeof(struct pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
@@ -516,7 +517,8 @@ static void verify_start(struct ccw_device *cdev)
sch->lpm = sch->schib.pmcw.pam;
/* Initialize PGID data. */
- memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
+ memset(cdev->private->dma_area->pgid, 0,
+ sizeof(cdev->private->dma_area->pgid));
cdev->private->pgid_valid_mask = 0;
cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
cdev->private->path_notoper_mask = 0;
@@ -626,7 +628,7 @@ struct stlck_data {
static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
{
struct ccw_request *req = &cdev->private->req;
- struct ccw1 *cp = cdev->private->iccws;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
cp[0].cmd_code = CCW_CMD_STLCK;
cp[0].cda = (u32) (addr_t) buf1;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 7d5c7892b2c4..0bd8f2642732 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -79,15 +79,15 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
* are condition that have to be met for the extended control
* bit to have meaning. Sick.
*/
- cdev->private->irb.scsw.cmd.ectl = 0;
+ cdev->private->dma_area->irb.scsw.cmd.ectl = 0;
if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
!(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
- cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
+ cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
/* Check if extended control word is valid. */
- if (!cdev->private->irb.scsw.cmd.ectl)
+ if (!cdev->private->dma_area->irb.scsw.cmd.ectl)
return;
/* Copy concurrent sense / model dependent information. */
- memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
+ memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw));
}
/*
@@ -118,7 +118,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
if (!ccw_device_accumulate_esw_valid(irb))
return;
- cdev_irb = &cdev->private->irb;
+ cdev_irb = &cdev->private->dma_area->irb;
/* Copy last path used mask. */
cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
@@ -210,7 +210,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
ccw_device_path_notoper(cdev);
/* No irb accumulation for transport mode irbs. */
if (scsw_is_tm(&irb->scsw)) {
- memcpy(&cdev->private->irb, irb, sizeof(struct irb));
+ memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb));
return;
}
/*
@@ -219,7 +219,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
if (!scsw_is_solicited(&irb->scsw))
return;
- cdev_irb = &cdev->private->irb;
+ cdev_irb = &cdev->private->dma_area->irb;
/*
* If the clear function had been performed, all formerly pending
@@ -227,7 +227,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
* intermediate accumulated status to the device driver.
*/
if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
- memset(&cdev->private->irb, 0, sizeof(struct irb));
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
/* Copy bits which are valid only for the start function. */
if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
@@ -329,9 +329,9 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
/*
* We have ending status but no sense information. Do a basic sense.
*/
- sense_ccw = &to_io_private(sch)->sense_ccw;
+ sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
- sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
+ sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw);
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
@@ -364,7 +364,7 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
- cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
}
/* Check if path verification is required. */
@@ -386,7 +386,7 @@ ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
/* Check for basic sense. */
if (cdev->private->flags.dosense &&
!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
- cdev->private->irb.esw.esw0.erw.cons = 1;
+ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
return 0;
}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 90e4e3a7841b..c03b4a19974e 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -9,15 +9,20 @@
#include "css.h"
#include "orb.h"
+struct io_subchannel_dma_area {
+ struct ccw1 sense_ccw; /* static ccw for sense command */
+};
+
struct io_subchannel_private {
union orb orb; /* operation request block */
- struct ccw1 sense_ccw; /* static ccw for sense command */
struct ccw_device *cdev;/* pointer to the child ccw device */
struct {
unsigned int suspend:1; /* allow suspend */
unsigned int prefetch:1;/* deny prefetch */
unsigned int inter:1; /* suppress intermediate interrupts */
} __packed options;
+ struct io_subchannel_dma_area *dma_area;
+ dma_addr_t dma_area_dma;
} __aligned(8);
#define to_io_private(n) ((struct io_subchannel_private *) \
@@ -115,6 +120,13 @@ enum cdev_todo {
#define FAKE_CMD_IRB 1
#define FAKE_TM_IRB 2
+struct ccw_device_dma_area {
+ struct senseid senseid; /* SenseID info */
+ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
+ struct irb irb; /* device status */
+ struct pgid pgid[8]; /* path group IDs per chpid*/
+};
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
@@ -156,11 +168,7 @@ struct ccw_device_private {
} __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data;
- struct irb irb; /* device status */
int async_kill_io_rc;
- struct senseid senseid; /* SenseID info */
- struct pgid pgid[8]; /* path group IDs per chpid*/
- struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
struct work_struct todo_work;
enum cdev_todo todo;
wait_queue_head_t wait_q;
@@ -169,6 +177,8 @@ struct ccw_device_private {
struct list_head cmb_list; /* list of measured devices */
u64 cmb_start_time; /* clock value of cmb reset */
void *cmb_wait; /* deferred cmb enable/disable */
+ struct gen_pool *dma_pool;
+ struct ccw_device_dma_area *dma_area;
enum interruption_class int_class;
};
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 7b7620de2acd..730c4e68094b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -736,6 +736,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
+ case SLSB_P_OUTPUT_PENDING:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
"out empty:%1d %02x", q->nr, count);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 99d7d2566a3a..d4101cecdc8d 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -150,6 +150,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
return -ENOMEM;
}
irq_ptr_qs[i] = q;
+ INIT_LIST_HEAD(&q->entry);
}
return 0;
}
@@ -178,6 +179,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
+ INIT_LIST_HEAD(&q->entry);
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 28d59ac2204c..93ee067c10ca 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -79,7 +79,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
mutex_lock(&tiq_list_lock);
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
- xchg(irq_ptr->dsci, 1 << 7);
}
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
@@ -87,14 +86,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
struct qdio_q *q;
q = irq_ptr->input_qs[0];
- /* if establish triggered an error */
- if (!q || !q->entry.prev || !q->entry.next)
+ if (!q)
return;
mutex_lock(&tiq_list_lock);
list_del_rcu(&q->entry);
mutex_unlock(&tiq_list_lock);
synchronize_rcu();
+ INIT_LIST_HEAD(&q->entry);
}
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
@@ -178,6 +177,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @airq: pointer to adapter interrupt descriptor
+ * @floating: flag to recognize floating vs. directed interrupts (unused)
*/
static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
{
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 0e79799e9a71..1d4c893ead23 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -16,12 +16,6 @@
#include "vfio_ccw_cp.h"
-/*
- * Max length for ccw chain.
- * XXX: Limit to 256, need to check more?
- */
-#define CCWCHAIN_LEN_MAX 256
-
struct pfn_array {
/* Starting guest physical I/O address. */
unsigned long pa_iova;
@@ -33,11 +27,6 @@ struct pfn_array {
int pa_nr;
};
-struct pfn_array_table {
- struct pfn_array *pat_pa;
- int pat_nr;
-};
-
struct ccwchain {
struct list_head next;
struct ccw1 *ch_ccw;
@@ -46,35 +35,29 @@ struct ccwchain {
/* Count of the valid ccws in chain. */
int ch_len;
/* Pinned PAGEs for the original data. */
- struct pfn_array_table *ch_pat;
+ struct pfn_array *ch_pa;
};
/*
- * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
+ * pfn_array_alloc() - alloc memory for PFNs
* @pa: pfn_array on which to perform the operation
- * @mdev: the mediated device to perform pin/unpin operations
* @iova: target guest physical address
* @len: number of bytes that should be pinned from @iova
*
- * Attempt to allocate memory for PFNs, and pin user pages in memory.
+ * Attempt to allocate memory for PFNs.
*
* Usage of pfn_array:
* We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
* this structure will be filled in by this function.
*
* Returns:
- * Number of pages pinned on success.
- * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
- * returns -EINVAL.
- * If no pages were pinned, returns -errno.
+ * 0 if PFNs are allocated
+ * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
+ * -ENOMEM if alloc failed
*/
-static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
- u64 iova, unsigned int len)
+static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
{
- int i, ret = 0;
-
- if (!len)
- return 0;
+ int i;
if (pa->pa_nr || pa->pa_iova_pfn)
return -EINVAL;
@@ -94,8 +77,27 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
- for (i = 1; i < pa->pa_nr; i++)
+ pa->pa_pfn[0] = -1ULL;
+ for (i = 1; i < pa->pa_nr; i++) {
pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
+ pa->pa_pfn[i] = -1ULL;
+ }
+
+ return 0;
+}
+
+/*
+ * pfn_array_pin() - Pin user pages in memory
+ * @pa: pfn_array on which to perform the operation
+ * @mdev: the mediated device to perform pin operations
+ *
+ * Returns number of pages pinned upon success.
+ * If the pin request partially succeeds, or fails completely,
+ * all pages are left unpinned and a negative error value is returned.
+ */
+static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
+{
+ int ret = 0;
ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
@@ -112,8 +114,6 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
err_out:
pa->pa_nr = 0;
- kfree(pa->pa_iova_pfn);
- pa->pa_iova_pfn = NULL;
return ret;
}
@@ -121,60 +121,30 @@ err_out:
/* Unpin the pages before releasing the memory. */
static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
{
- vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+ /* Only unpin if any pages were pinned to begin with */
+ if (pa->pa_nr)
+ vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
pa->pa_nr = 0;
kfree(pa->pa_iova_pfn);
}
-static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
-{
- pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
- if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) {
- pat->pat_nr = 0;
- return -ENOMEM;
- }
-
- pat->pat_nr = nr;
-
- return 0;
-}
-
-static void pfn_array_table_unpin_free(struct pfn_array_table *pat,
- struct device *mdev)
+static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
{
- int i;
-
- for (i = 0; i < pat->pat_nr; i++)
- pfn_array_unpin_free(pat->pat_pa + i, mdev);
-
- if (pat->pat_nr) {
- kfree(pat->pat_pa);
- pat->pat_pa = NULL;
- pat->pat_nr = 0;
- }
-}
-
-static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
- unsigned long iova)
-{
- struct pfn_array *pa = pat->pat_pa;
unsigned long iova_pfn = iova >> PAGE_SHIFT;
- int i, j;
+ int i;
- for (i = 0; i < pat->pat_nr; i++, pa++)
- for (j = 0; j < pa->pa_nr; j++)
- if (pa->pa_iova_pfn[j] == iova_pfn)
- return true;
+ for (i = 0; i < pa->pa_nr; i++)
+ if (pa->pa_iova_pfn[i] == iova_pfn)
+ return true;
return false;
}
-/* Create the list idal words for a pfn_array_table. */
-static inline void pfn_array_table_idal_create_words(
- struct pfn_array_table *pat,
+/* Create the list of IDAL words for a pfn_array. */
+static inline void pfn_array_idal_create_words(
+ struct pfn_array *pa,
unsigned long *idaws)
{
- struct pfn_array *pa;
- int i, j, k;
+ int i;
/*
* Idal words (execept the first one) rely on the memory being 4k
@@ -183,19 +153,36 @@ static inline void pfn_array_table_idal_create_words(
* there will be no problem here to simply use the phys to create an
* idaw.
*/
- k = 0;
- for (i = 0; i < pat->pat_nr; i++) {
- pa = pat->pat_pa + i;
- for (j = 0; j < pa->pa_nr; j++) {
- idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT;
- if (k == 0)
- idaws[k] += pa->pa_iova & (PAGE_SIZE - 1);
- k++;
+
+ for (i = 0; i < pa->pa_nr; i++)
+ idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
+
+ /* Adjust the first IDAW, since it may not start on a page boundary */
+ idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
+}
+
+static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
+{
+ struct ccw0 ccw0;
+ struct ccw1 *pccw1 = source;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ ccw0 = *(struct ccw0 *)pccw1;
+ if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
+ pccw1->cmd_code = CCW_CMD_TIC;
+ pccw1->flags = 0;
+ pccw1->count = 0;
+ } else {
+ pccw1->cmd_code = ccw0.cmd_code;
+ pccw1->flags = ccw0.flags;
+ pccw1->count = ccw0.count;
}
+ pccw1->cda = ccw0.cda;
+ pccw1++;
}
}
-
/*
* Within the domain (@mdev), copy @n bytes from a guest physical
* address (@iova) to a host physical address (@to).
@@ -209,9 +196,15 @@ static long copy_from_iova(struct device *mdev,
int i, ret;
unsigned long l, m;
- ret = pfn_array_alloc_pin(&pa, mdev, iova, n);
- if (ret <= 0)
+ ret = pfn_array_alloc(&pa, iova, n);
+ if (ret < 0)
+ return ret;
+
+ ret = pfn_array_pin(&pa, mdev);
+ if (ret < 0) {
+ pfn_array_unpin_free(&pa, mdev);
return ret;
+ }
l = n;
for (i = 0; i < pa.pa_nr; i++) {
@@ -235,55 +228,60 @@ static long copy_from_iova(struct device *mdev,
return l;
}
-static long copy_ccw_from_iova(struct channel_program *cp,
- struct ccw1 *to, u64 iova,
- unsigned long len)
-{
- struct ccw0 ccw0;
- struct ccw1 *pccw1;
- int ret;
- int i;
-
- ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1));
- if (ret)
- return ret;
-
- if (!cp->orb.cmd.fmt) {
- pccw1 = to;
- for (i = 0; i < len; i++) {
- ccw0 = *(struct ccw0 *)pccw1;
- if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
- pccw1->cmd_code = CCW_CMD_TIC;
- pccw1->flags = 0;
- pccw1->count = 0;
- } else {
- pccw1->cmd_code = ccw0.cmd_code;
- pccw1->flags = ccw0.flags;
- pccw1->count = ccw0.count;
- }
- pccw1->cda = ccw0.cda;
- pccw1++;
- }
- }
-
- return ret;
-}
-
/*
* Helpers to operate ccwchain.
*/
-#define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0)
+#define ccw_is_read(_ccw) (((_ccw)->cmd_code & 0x03) == 0x02)
+#define ccw_is_read_backward(_ccw) (((_ccw)->cmd_code & 0x0F) == 0x0C)
+#define ccw_is_sense(_ccw) (((_ccw)->cmd_code & 0x0F) == CCW_CMD_BASIC_SENSE)
#define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
#define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
#define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
-
+#define ccw_is_skip(_ccw) ((_ccw)->flags & CCW_FLAG_SKIP)
#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
/*
+ * ccw_does_data_transfer()
+ *
+ * Determine whether a CCW will move any data, such that the guest pages
+ * would need to be pinned before performing the I/O.
+ *
+ * Returns 1 if yes, 0 if no.
+ */
+static inline int ccw_does_data_transfer(struct ccw1 *ccw)
+{
+ /* If the count field is zero, then no data will be transferred */
+ if (ccw->count == 0)
+ return 0;
+
+ /* If the command is a NOP, then no data will be transferred */
+ if (ccw_is_noop(ccw))
+ return 0;
+
+ /* If the skip flag is off, then data will be transferred */
+ if (!ccw_is_skip(ccw))
+ return 1;
+
+ /*
+ * If the skip flag is on, it is only meaningful if the command
+ * code is a read, read backward, sense, or sense ID. In those
+ * cases, no data will be transferred.
+ */
+ if (ccw_is_read(ccw) || ccw_is_read_backward(ccw))
+ return 0;
+
+ if (ccw_is_sense(ccw))
+ return 0;
+
+ /* The skip flag is on, but it is ignored for this command code. */
+ return 1;
+}
+
+/*
* is_cpa_within_range()
*
* @cpa: channel program address being questioned
@@ -319,7 +317,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
/* Make ccw address aligned to 8. */
size = ((sizeof(*chain) + 7L) & -8L) +
sizeof(*chain->ch_ccw) * len +
- sizeof(*chain->ch_pat) * len;
+ sizeof(*chain->ch_pa) * len;
chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
if (!chain)
return NULL;
@@ -328,7 +326,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
chain->ch_ccw = (struct ccw1 *)data;
data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
- chain->ch_pat = (struct pfn_array_table *)data;
+ chain->ch_pa = (struct pfn_array *)data;
chain->ch_len = len;
@@ -348,31 +346,12 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
{
struct ccw1 *ccw = chain->ch_ccw + idx;
- if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw))
- return;
- if (!ccw->count)
+ if (ccw_is_tic(ccw))
return;
kfree((void *)(u64)ccw->cda);
}
-/* Unpin the pages then free the memory resources. */
-static void cp_unpin_free(struct channel_program *cp)
-{
- struct ccwchain *chain, *temp;
- int i;
-
- cp->initialized = false;
- list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
- for (i = 0; i < chain->ch_len; i++) {
- pfn_array_table_unpin_free(chain->ch_pat + i,
- cp->mdev);
- ccwchain_cda_free(chain, i);
- }
- ccwchain_free(chain);
- }
-}
-
/**
* ccwchain_calc_length - calculate the length of the ccw chain.
* @iova: guest physical address of the target ccw chain
@@ -388,25 +367,9 @@ static void cp_unpin_free(struct channel_program *cp)
*/
static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
{
- struct ccw1 *ccw, *p;
- int cnt;
-
- /*
- * Copy current chain from guest to host kernel.
- * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256).
- * So copying 2K is enough (safe).
- */
- p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL);
- if (!ccw)
- return -ENOMEM;
-
- cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX);
- if (cnt) {
- kfree(ccw);
- return cnt;
- }
+ struct ccw1 *ccw = cp->guest_cp;
+ int cnt = 0;
- cnt = 0;
do {
cnt++;
@@ -415,10 +378,8 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
* orb specified one of the unsupported formats, we defer
* checking for IDAWs in unsupported formats to here.
*/
- if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
- kfree(p);
+ if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
return -EOPNOTSUPP;
- }
/*
* We want to keep counting if the current CCW has the
@@ -437,7 +398,6 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
if (cnt == CCWCHAIN_LEN_MAX + 1)
cnt = -EINVAL;
- kfree(p);
return cnt;
}
@@ -458,17 +418,23 @@ static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
static int ccwchain_loop_tic(struct ccwchain *chain,
struct channel_program *cp);
-static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
+static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
{
struct ccwchain *chain;
- int len, ret;
+ int len;
- /* May transfer to an existing chain. */
- if (tic_target_chain_exists(tic, cp))
- return 0;
+ /* Copy 2K (the most we support today) of possible CCWs */
+ len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
+ CCWCHAIN_LEN_MAX * sizeof(struct ccw1));
+ if (len)
+ return len;
- /* Get chain length. */
- len = ccwchain_calc_length(tic->cda, cp);
+ /* Convert any Format-0 CCWs to Format-1 */
+ if (!cp->orb.cmd.fmt)
+ convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX);
+
+ /* Count the CCWs in the current chain */
+ len = ccwchain_calc_length(cda, cp);
if (len < 0)
return len;
@@ -476,14 +442,10 @@ static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
chain = ccwchain_alloc(cp, len);
if (!chain)
return -ENOMEM;
- chain->ch_iova = tic->cda;
+ chain->ch_iova = cda;
- /* Copy the new chain from user. */
- ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len);
- if (ret) {
- ccwchain_free(chain);
- return ret;
- }
+ /* Copy the actual CCWs into the new chain */
+ memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
/* Loop for tics on this new chain. */
return ccwchain_loop_tic(chain, cp);
@@ -501,7 +463,12 @@ static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
if (!ccw_is_tic(tic))
continue;
- ret = ccwchain_handle_tic(tic, cp);
+ /* May transfer to an existing chain. */
+ if (tic_target_chain_exists(tic, cp))
+ continue;
+
+ /* Build a ccwchain for the next segment */
+ ret = ccwchain_handle_ccw(tic->cda, cp);
if (ret)
return ret;
}
@@ -534,115 +501,90 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
struct channel_program *cp)
{
struct ccw1 *ccw;
- struct pfn_array_table *pat;
+ struct pfn_array *pa;
+ u64 iova;
unsigned long *idaws;
int ret;
+ int bytes = 1;
+ int idaw_nr, idal_len;
+ int i;
ccw = chain->ch_ccw + idx;
- if (!ccw->count) {
- /*
- * We just want the translation result of any direct ccw
- * to be an IDA ccw, so let's add the IDA flag for it.
- * Although the flag will be ignored by firmware.
- */
- ccw->flags |= CCW_FLAG_IDA;
- return 0;
- }
-
- /*
- * Pin data page(s) in memory.
- * The number of pages actually is the count of the idaws which will be
- * needed when translating a direct ccw to a idal ccw.
- */
- pat = chain->ch_pat + idx;
- ret = pfn_array_table_init(pat, 1);
- if (ret)
- goto out_init;
-
- ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
- if (ret < 0)
- goto out_unpin;
+ if (ccw->count)
+ bytes = ccw->count;
- /* Translate this direct ccw to a idal ccw. */
- idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
- if (!idaws) {
- ret = -ENOMEM;
- goto out_unpin;
+ /* Calculate size of IDAL */
+ if (ccw_is_idal(ccw)) {
+ /* Read first IDAW to see if it's 4K-aligned or not. */
+ /* All subsequent IDAws will be 4K-aligned. */
+ ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova));
+ if (ret)
+ return ret;
+ } else {
+ iova = ccw->cda;
}
- ccw->cda = (__u32) virt_to_phys(idaws);
- ccw->flags |= CCW_FLAG_IDA;
-
- pfn_array_table_idal_create_words(pat, idaws);
-
- return 0;
-
-out_unpin:
- pfn_array_table_unpin_free(pat, cp->mdev);
-out_init:
- ccw->cda = 0;
- return ret;
-}
-
-static int ccwchain_fetch_idal(struct ccwchain *chain,
- int idx,
- struct channel_program *cp)
-{
- struct ccw1 *ccw;
- struct pfn_array_table *pat;
- unsigned long *idaws;
- u64 idaw_iova;
- unsigned int idaw_nr, idaw_len;
- int i, ret;
-
- ccw = chain->ch_ccw + idx;
-
- if (!ccw->count)
- return 0;
-
- /* Calculate size of idaws. */
- ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova));
- if (ret)
- return ret;
- idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count);
- idaw_len = idaw_nr * sizeof(*idaws);
-
- /* Pin data page(s) in memory. */
- pat = chain->ch_pat + idx;
- ret = pfn_array_table_init(pat, idaw_nr);
- if (ret)
- goto out_init;
+ idaw_nr = idal_nr_words((void *)iova, bytes);
+ idal_len = idaw_nr * sizeof(*idaws);
- /* Translate idal ccw to use new allocated idaws. */
- idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
+ /* Allocate an IDAL from host storage */
+ idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
if (!idaws) {
ret = -ENOMEM;
- goto out_unpin;
+ goto out_init;
}
- ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len);
- if (ret)
+ /*
+ * Allocate an array of pfn's for pages to pin/translate.
+ * The number of pages is actually the count of the idaws
+ * required for the data transfer, since we only only support
+ * 4K IDAWs today.
+ */
+ pa = chain->ch_pa + idx;
+ ret = pfn_array_alloc(pa, iova, bytes);
+ if (ret < 0)
goto out_free_idaws;
- ccw->cda = virt_to_phys(idaws);
+ if (ccw_is_idal(ccw)) {
+ /* Copy guest IDAL into host IDAL */
+ ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idal_len);
+ if (ret)
+ goto out_unpin;
- for (i = 0; i < idaw_nr; i++) {
- idaw_iova = *(idaws + i);
+ /*
+ * Copy guest IDAWs into pfn_array, in case the memory they
+ * occupy is not contiguous.
+ */
+ for (i = 0; i < idaw_nr; i++)
+ pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
+ } else {
+ /*
+ * No action is required here; the iova addresses in pfn_array
+ * were initialized sequentially in pfn_array_alloc() beginning
+ * with the contents of ccw->cda.
+ */
+ }
- ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev,
- idaw_iova, 1);
+ if (ccw_does_data_transfer(ccw)) {
+ ret = pfn_array_pin(pa, cp->mdev);
if (ret < 0)
- goto out_free_idaws;
+ goto out_unpin;
+ } else {
+ pa->pa_nr = 0;
}
- pfn_array_table_idal_create_words(pat, idaws);
+ ccw->cda = (__u32) virt_to_phys(idaws);
+ ccw->flags |= CCW_FLAG_IDA;
+
+ /* Populate the IDAL with pinned/translated addresses from pfn */
+ pfn_array_idal_create_words(pa, idaws);
return 0;
+out_unpin:
+ pfn_array_unpin_free(pa, cp->mdev);
out_free_idaws:
kfree(idaws);
-out_unpin:
- pfn_array_table_unpin_free(pat, cp->mdev);
out_init:
ccw->cda = 0;
return ret;
@@ -660,15 +602,9 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
{
struct ccw1 *ccw = chain->ch_ccw + idx;
- if (ccw_is_test(ccw) || ccw_is_noop(ccw))
- return 0;
-
if (ccw_is_tic(ccw))
return ccwchain_fetch_tic(chain, idx, cp);
- if (ccw_is_idal(ccw))
- return ccwchain_fetch_idal(chain, idx, cp);
-
return ccwchain_fetch_direct(chain, idx, cp);
}
@@ -691,9 +627,7 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
*/
int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
{
- u64 iova = orb->cmd.cpa;
- struct ccwchain *chain;
- int len, ret;
+ int ret;
/*
* XXX:
@@ -706,28 +640,11 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
memcpy(&cp->orb, orb, sizeof(*orb));
cp->mdev = mdev;
- /* Get chain length. */
- len = ccwchain_calc_length(iova, cp);
- if (len < 0)
- return len;
-
- /* Alloc mem for the head chain. */
- chain = ccwchain_alloc(cp, len);
- if (!chain)
- return -ENOMEM;
- chain->ch_iova = iova;
-
- /* Copy the head chain from guest. */
- ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len);
- if (ret) {
- ccwchain_free(chain);
- return ret;
- }
-
- /* Now loop for its TICs. */
- ret = ccwchain_loop_tic(chain, cp);
+ /* Build a ccwchain for the first CCW segment */
+ ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
if (ret)
- cp_unpin_free(cp);
+ cp_free(cp);
+
/* It is safe to force: if not set but idals used
* ccwchain_calc_length returns an error.
*/
@@ -750,8 +667,20 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
*/
void cp_free(struct channel_program *cp)
{
- if (cp->initialized)
- cp_unpin_free(cp);
+ struct ccwchain *chain, *temp;
+ int i;
+
+ if (!cp->initialized)
+ return;
+
+ cp->initialized = false;
+ list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
+ for (i = 0; i < chain->ch_len; i++) {
+ pfn_array_unpin_free(chain->ch_pa + i, cp->mdev);
+ ccwchain_cda_free(chain, i);
+ }
+ ccwchain_free(chain);
+ }
}
/**
@@ -886,7 +815,11 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
*/
list_for_each_entry(chain, &cp->ccwchain_list, next) {
ccw_head = (u32)(u64)chain->ch_ccw;
- if (is_cpa_within_range(cpa, ccw_head, chain->ch_len)) {
+ /*
+ * On successful execution, cpa points just beyond the end
+ * of the chain.
+ */
+ if (is_cpa_within_range(cpa, ccw_head, chain->ch_len + 1)) {
/*
* (cpa - ccw_head) is the offset value of the host
* physical ccw to its chain head.
@@ -919,8 +852,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
- if (pfn_array_table_iova_pinned(chain->ch_pat + i,
- iova))
+ if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
return true;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index 3c20cd208da5..7cdc38049033 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -16,6 +16,12 @@
#include "orb.h"
+/*
+ * Max length for ccw chain.
+ * XXX: Limit to 256, need to check more?
+ */
+#define CCWCHAIN_LEN_MAX 256
+
/**
* struct channel_program - manage information for channel program
* @ccwchain_list: list head of ccwchains
@@ -32,6 +38,7 @@ struct channel_program {
union orb orb;
struct device *mdev;
bool initialized;
+ struct ccw1 *guest_cp;
};
extern int cp_init(struct channel_program *cp, struct device *mdev,
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 9125f7f4e64c..2b90a5ecaeb9 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -95,11 +95,11 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
mutex_unlock(&private->io_mutex);
- if (private->io_trigger)
- eventfd_signal(private->io_trigger, 1);
-
if (private->mdev && is_final)
private->state = VFIO_CCW_STATE_IDLE;
+
+ if (private->io_trigger)
+ eventfd_signal(private->io_trigger, 1);
}
/*
@@ -129,6 +129,11 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (!private)
return -ENOMEM;
+ private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
+ GFP_KERNEL);
+ if (!private->cp.guest_cp)
+ goto out_free;
+
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA);
if (!private->io_region)
@@ -169,6 +174,7 @@ out_free:
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
if (private->io_region)
kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ kfree(private->cp.guest_cp);
kfree(private);
return ret;
}
@@ -185,6 +191,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ kfree(private->cp.guest_cp);
kfree(private);
return 0;
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 45eb0c14b880..7f418d2d8cdf 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -690,7 +690,7 @@ int pkey_clr2protkey(u32 keytype,
*/
if (!cpacf_test_func(&pckmo_functions, fc)) {
DEBUG_ERR("%s pckmo functions not available\n", __func__);
- return -EOPNOTSUPP;
+ return -ENODEV;
}
/* prepare param block */
@@ -1695,15 +1695,15 @@ static int __init pkey_init(void)
* are able to work with protected keys.
*/
if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
- return -EOPNOTSUPP;
+ return -ENODEV;
/* check for kmc instructions available */
if (!cpacf_query(CPACF_KMC, &kmc_functions))
- return -EOPNOTSUPP;
+ return -ENODEV;
if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256))
- return -EOPNOTSUPP;
+ return -ENODEV;
pkey_debug_init();
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index e9824c35c34f..003662aa8060 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -5,6 +5,7 @@
* Copyright IBM Corp. 2018
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
*/
#include <linux/module.h>
@@ -40,14 +41,45 @@ static struct ap_device_id ap_queue_ids[] = {
MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
+/**
+ * vfio_ap_queue_dev_probe:
+ *
+ * Allocate a vfio_ap_queue structure and associate it
+ * with the device as driver_data.
+ */
static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
{
+ struct vfio_ap_queue *q;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+ dev_set_drvdata(&apdev->device, q);
+ q->apqn = to_ap_queue(&apdev->device)->qid;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
return 0;
}
+/**
+ * vfio_ap_queue_dev_remove:
+ *
+ * Takes the matrix lock to avoid actions on this device while removing
+ * Free the associated vfio_ap_queue structure
+ */
static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
{
- /* Nothing to do yet */
+ struct vfio_ap_queue *q;
+ int apid, apqi;
+
+ mutex_lock(&matrix_dev->lock);
+ q = dev_get_drvdata(&apdev->device);
+ dev_set_drvdata(&apdev->device, NULL);
+ apid = AP_QID_CARD(q->apqn);
+ apqi = AP_QID_QUEUE(q->apqn);
+ vfio_ap_mdev_reset_queue(apid, apqi, 1);
+ vfio_ap_irq_disable(q);
+ kfree(q);
+ mutex_unlock(&matrix_dev->lock);
}
static void vfio_ap_matrix_dev_release(struct device *dev)
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 900b9cf20ca5..2c9fb1423a39 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -24,6 +24,296 @@
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
+static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
+
+static int match_apqn(struct device *dev, void *data)
+{
+ struct vfio_ap_queue *q = dev_get_drvdata(dev);
+
+ return (q->apqn == *(int *)(data)) ? 1 : 0;
+}
+
+/**
+ * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
+ * @matrix_mdev: the associated mediated matrix
+ * @apqn: The queue APQN
+ *
+ * Retrieve a queue with a specific APQN from the list of the
+ * devices of the vfio_ap_drv.
+ * Verify that the APID and the APQI are set in the matrix.
+ *
+ * Returns the pointer to the associated vfio_ap_queue
+ */
+static struct vfio_ap_queue *vfio_ap_get_queue(
+ struct ap_matrix_mdev *matrix_mdev,
+ int apqn)
+{
+ struct vfio_ap_queue *q;
+ struct device *dev;
+
+ if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
+ return NULL;
+ if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
+ return NULL;
+
+ dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &apqn, match_apqn);
+ if (!dev)
+ return NULL;
+ q = dev_get_drvdata(dev);
+ q->matrix_mdev = matrix_mdev;
+ put_device(dev);
+
+ return q;
+}
+
+/**
+ * vfio_ap_wait_for_irqclear
+ * @apqn: The AP Queue number
+ *
+ * Checks the IRQ bit for the status of this APQN using ap_tapq.
+ * Returns if the ap_tapq function succeeded and the bit is clear.
+ * Returns if ap_tapq function failed with invalid, deconfigured or
+ * checkstopped AP.
+ * Otherwise retries up to 5 times after waiting 20ms.
+ *
+ */
+static void vfio_ap_wait_for_irqclear(int apqn)
+{
+ struct ap_queue_status status;
+ int retry = 5;
+
+ do {
+ status = ap_tapq(apqn, NULL);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (!status.irq_enabled)
+ return;
+ /* Fall through */
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
+ status.response_code, apqn);
+ return;
+ }
+ } while (--retry);
+
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
+ __func__, status.response_code, apqn);
+}
+
+/**
+ * vfio_ap_free_aqic_resources
+ * @q: The vfio_ap_queue
+ *
+ * Unregisters the ISC in the GIB when the saved ISC not invalid.
+ * Unpin the guest's page holding the NIB when it exist.
+ * Reset the saved_pfn and saved_isc to invalid values.
+ * Clear the pointer to the matrix mediated device.
+ *
+ */
+static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
+{
+ if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev)
+ kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
+ if (q->saved_pfn && q->matrix_mdev)
+ vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
+ &q->saved_pfn, 1);
+ q->saved_pfn = 0;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ q->matrix_mdev = NULL;
+}
+
+/**
+ * vfio_ap_irq_disable
+ * @q: The vfio_ap_queue
+ *
+ * Uses ap_aqic to disable the interruption and in case of success, reset
+ * in progress or IRQ disable command already proceeded: calls
+ * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
+ * and calls vfio_ap_free_aqic_resources() to free the resources associated
+ * with the AP interrupt handling.
+ *
+ * In the case the AP is busy, or a reset is in progress,
+ * retries after 20ms, up to 5 times.
+ *
+ * Returns if ap_aqic function failed with invalid, deconfigured or
+ * checkstopped AP.
+ */
+struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
+{
+ struct ap_qirq_ctrl aqic_gisa = {};
+ struct ap_queue_status status;
+ int retries = 5;
+
+ do {
+ status = ap_aqic(q->apqn, aqic_gisa, NULL);
+ switch (status.response_code) {
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ case AP_RESPONSE_NORMAL:
+ vfio_ap_wait_for_irqclear(q->apqn);
+ goto end_free;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_INVALID_ADDRESS:
+ default:
+ /* All cases in default means AP not operational */
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+ goto end_free;
+ }
+ } while (retries--);
+
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+end_free:
+ vfio_ap_free_aqic_resources(q);
+ return status;
+}
+
+/**
+ * vfio_ap_setirq: Enable Interruption for a APQN
+ *
+ * @dev: the device associated with the ap_queue
+ * @q: the vfio_ap_queue holding AQIC parameters
+ *
+ * Pin the NIB saved in *q
+ * Register the guest ISC to GIB interface and retrieve the
+ * host ISC to issue the host side PQAP/AQIC
+ *
+ * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the
+ * vfio_pin_pages failed.
+ *
+ * Otherwise return the ap_queue_status returned by the ap_aqic(),
+ * all retry handling will be done by the guest.
+ */
+static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
+ int isc,
+ unsigned long nib)
+{
+ struct ap_qirq_ctrl aqic_gisa = {};
+ struct ap_queue_status status = {};
+ struct kvm_s390_gisa *gisa;
+ struct kvm *kvm;
+ unsigned long h_nib, g_pfn, h_pfn;
+ int ret;
+
+ g_pfn = nib >> PAGE_SHIFT;
+ ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &h_pfn);
+ switch (ret) {
+ case 1:
+ break;
+ default:
+ status.response_code = AP_RESPONSE_INVALID_ADDRESS;
+ return status;
+ }
+
+ kvm = q->matrix_mdev->kvm;
+ gisa = kvm->arch.gisa_int.origin;
+
+ h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
+ aqic_gisa.gisc = isc;
+ aqic_gisa.isc = kvm_s390_gisc_register(kvm, isc);
+ aqic_gisa.ir = 1;
+ aqic_gisa.gisa = (uint64_t)gisa >> 4;
+
+ status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ /* See if we did clear older IRQ configuration */
+ vfio_ap_free_aqic_resources(q);
+ q->saved_pfn = g_pfn;
+ q->saved_isc = isc;
+ break;
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ /* We could not modify IRQ setings: clear new configuration */
+ vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1);
+ kvm_s390_gisc_unregister(kvm, isc);
+ break;
+ default:
+ pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
+ status.response_code);
+ vfio_ap_irq_disable(q);
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * handle_pqap: PQAP instruction callback
+ *
+ * @vcpu: The vcpu on which we received the PQAP instruction
+ *
+ * Get the general register contents to initialize internal variables.
+ * REG[0]: APQN
+ * REG[1]: IR and ISC
+ * REG[2]: NIB
+ *
+ * Response.status may be set to following Response Code:
+ * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
+ * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
+ * - AP_RESPONSE_NORMAL (0) : in case of successs
+ * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
+ * We take the matrix_dev lock to ensure serialization on queues and
+ * mediated device access.
+ *
+ * Return 0 if we could handle the request inside KVM.
+ * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
+ */
+static int handle_pqap(struct kvm_vcpu *vcpu)
+{
+ uint64_t status;
+ uint16_t apqn;
+ struct vfio_ap_queue *q;
+ struct ap_queue_status qstatus = {
+ .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
+ struct ap_matrix_mdev *matrix_mdev;
+
+ /* If we do not use the AIV facility just go to userland */
+ if (!(vcpu->arch.sie_block->eca & ECA_AIV))
+ return -EOPNOTSUPP;
+
+ apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
+ mutex_lock(&matrix_dev->lock);
+
+ if (!vcpu->kvm->arch.crypto.pqap_hook)
+ goto out_unlock;
+ matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
+ struct ap_matrix_mdev, pqap_hook);
+
+ q = vfio_ap_get_queue(matrix_mdev, apqn);
+ if (!q)
+ goto out_unlock;
+
+ status = vcpu->run->s.regs.gprs[1];
+
+ /* If IR bit(16) is set we enable the interrupt */
+ if ((status >> (63 - 16)) & 0x01)
+ qstatus = vfio_ap_irq_enable(q, status & 0x07,
+ vcpu->run->s.regs.gprs[2]);
+ else
+ qstatus = vfio_ap_irq_disable(q);
+
+out_unlock:
+ memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
+ vcpu->run->s.regs.gprs[1] >>= 32;
+ mutex_unlock(&matrix_dev->lock);
+ return 0;
+}
+
static void vfio_ap_matrix_init(struct ap_config_info *info,
struct ap_matrix *matrix)
{
@@ -45,8 +335,11 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
return -ENOMEM;
}
+ matrix_mdev->mdev = mdev;
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
mdev_set_drvdata(mdev, matrix_mdev);
+ matrix_mdev->pqap_hook.hook = handle_pqap;
+ matrix_mdev->pqap_hook.owner = THIS_MODULE;
mutex_lock(&matrix_dev->lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->lock);
@@ -62,6 +355,7 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
return -EBUSY;
mutex_lock(&matrix_dev->lock);
+ vfio_ap_mdev_reset_queues(mdev);
list_del(&matrix_mdev->node);
mutex_unlock(&matrix_dev->lock);
@@ -754,11 +1048,42 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
}
matrix_mdev->kvm = kvm;
+ kvm_get_kvm(kvm);
+ kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
mutex_unlock(&matrix_dev->lock);
return 0;
}
+/*
+ * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
+ *
+ * @nb: The notifier block
+ * @action: Action to be taken
+ * @data: data associated with the request
+ *
+ * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
+ * pinned before). Other requests are ignored.
+ *
+ */
+static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
+
+ if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
+ struct vfio_iommu_type1_dma_unmap *unmap = data;
+ unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
+
+ vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -790,15 +1115,36 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
- unsigned int retry)
+static void vfio_ap_irq_disable_apqn(int apqn)
+{
+ struct device *dev;
+ struct vfio_ap_queue *q;
+
+ dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &apqn, match_apqn);
+ if (dev) {
+ q = dev_get_drvdata(dev);
+ vfio_ap_irq_disable(q);
+ put_device(dev);
+ }
+}
+
+int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+ unsigned int retry)
{
struct ap_queue_status status;
+ int retry2 = 2;
+ int apqn = AP_MKQID(apid, apqi);
do {
- status = ap_zapq(AP_MKQID(apid, apqi));
+ status = ap_zapq(apqn);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
+ while (!status.queue_empty && retry2--) {
+ msleep(20);
+ status = ap_tapq(apqn, NULL);
+ }
+ WARN_ON_ONCE(retry <= 0);
return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
@@ -832,6 +1178,7 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
*/
if (ret)
rc = ret;
+ vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi));
}
}
@@ -858,20 +1205,37 @@ static int vfio_ap_mdev_open(struct mdev_device *mdev)
return ret;
}
- return 0;
+ matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
+ events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &events, &matrix_mdev->iommu_notifier);
+ if (!ret)
+ return ret;
+
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &matrix_mdev->group_notifier);
+ module_put(THIS_MODULE);
+ return ret;
}
static void vfio_ap_mdev_release(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- if (matrix_mdev->kvm)
+ mutex_lock(&matrix_dev->lock);
+ if (matrix_mdev->kvm) {
kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+ vfio_ap_mdev_reset_queues(mdev);
+ kvm_put_kvm(matrix_mdev->kvm);
+ matrix_mdev->kvm = NULL;
+ }
+ mutex_unlock(&matrix_dev->lock);
- vfio_ap_mdev_reset_queues(mdev);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &matrix_mdev->iommu_notifier);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
&matrix_mdev->group_notifier);
- matrix_mdev->kvm = NULL;
module_put(THIS_MODULE);
}
@@ -900,6 +1264,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
{
int ret;
+ mutex_lock(&matrix_dev->lock);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
ret = vfio_ap_mdev_get_device_info(arg);
@@ -911,6 +1276,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
ret = -EOPNOTSUPP;
break;
}
+ mutex_unlock(&matrix_dev->lock);
return ret;
}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 76b7f98e47e9..f46dde56b464 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -4,6 +4,7 @@
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
* Halil Pasic <pasic@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
*
* Copyright IBM Corp. 2018
*/
@@ -16,6 +17,7 @@
#include <linux/mdev.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/kvm_host.h>
#include "ap_bus.h"
@@ -80,10 +82,23 @@ struct ap_matrix_mdev {
struct list_head node;
struct ap_matrix matrix;
struct notifier_block group_notifier;
+ struct notifier_block iommu_notifier;
struct kvm *kvm;
+ struct kvm_s390_module_hook pqap_hook;
+ struct mdev_device *mdev;
};
extern int vfio_ap_mdev_register(void);
extern void vfio_ap_mdev_unregister(void);
+int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+ unsigned int retry);
+struct vfio_ap_queue {
+ struct ap_matrix_mdev *matrix_mdev;
+ unsigned long saved_pfn;
+ int apqn;
+#define VFIO_AP_ISC_INVALID 0xff
+ unsigned char saved_isc;
+};
+struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q);
#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 0cbcc238ef98..12fe9deb265e 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -567,6 +567,10 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
*fcode = payload_hdr->func_val & 0xFFFF;
+ /* enable special processing based on the cprbs flags special bit */
+ if (msg->cprbx.flags & 0x20)
+ ap_msg->special = 1;
+
return 0;
}
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 7c5a25ddf832..ced896d1534a 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -7,10 +7,10 @@ config LCS
prompt "Lan Channel Station Interface"
depends on CCW && NETDEVICES && (ETHERNET || FDDI)
help
- Select this option if you want to use LCS networking on IBM System z.
- This device driver supports FDDI (IEEE 802.7) and Ethernet.
- To compile as a module, choose M. The module name is lcs.
- If you do not know what it is, it's safe to choose Y.
+ Select this option if you want to use LCS networking on IBM System z.
+ This device driver supports FDDI (IEEE 802.7) and Ethernet.
+ To compile as a module, choose M. The module name is lcs.
+ If you do not know what it is, it's safe to choose Y.
config CTCM
def_tristate m
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 784a2e76a1b0..c7ee07ce3615 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -25,6 +25,8 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <net/dst.h>
+#include <net/ip6_fib.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
#include <net/addrconf.h>
@@ -60,7 +62,7 @@ struct qeth_dbf_info {
debug_info_t *id;
};
-#define QETH_DBF_CTRL_LEN 256
+#define QETH_DBF_CTRL_LEN 256U
#define QETH_DBF_TEXT(name, level, text) \
debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text)
@@ -525,11 +527,6 @@ struct qeth_qdio_info {
};
/**
- * buffer stuff for read channel
- */
-#define QETH_CMD_BUFFER_NO 8
-
-/**
* channel state machine
*/
enum qeth_channel_states {
@@ -537,8 +534,6 @@ enum qeth_channel_states {
CH_STATE_DOWN,
CH_STATE_HALTED,
CH_STATE_STOPPED,
- CH_STATE_RCD,
- CH_STATE_RCD_DONE,
};
/**
* card state machine
@@ -553,15 +548,11 @@ enum qeth_card_states {
* Protocol versions
*/
enum qeth_prot_versions {
+ QETH_PROT_NONE = 0x0000,
QETH_PROT_IPV4 = 0x0004,
QETH_PROT_IPV6 = 0x0006,
};
-enum qeth_cmd_buffer_state {
- BUF_STATE_FREE,
- BUF_STATE_LOCKED,
-};
-
enum qeth_cq {
QETH_CQ_DISABLED = 0,
QETH_CQ_ENABLED = 1,
@@ -575,39 +566,37 @@ struct qeth_ipato {
struct list_head entries;
};
-struct qeth_channel;
+struct qeth_channel {
+ struct ccw_device *ccwdev;
+ enum qeth_channel_states state;
+ atomic_t irq_pending;
+};
struct qeth_cmd_buffer {
- enum qeth_cmd_buffer_state state;
+ unsigned int length;
+ refcount_t ref_count;
struct qeth_channel *channel;
struct qeth_reply *reply;
long timeout;
unsigned char *data;
- void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- unsigned int length);
- void (*callback)(struct qeth_card *card, struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob);
+ void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
+ void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
};
+static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob)
+{
+ refcount_inc(&iob->ref_count);
+}
+
static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
{
return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
}
-/**
- * definition of a qeth channel, used for read and write
- */
-struct qeth_channel {
- enum qeth_channel_states state;
- struct ccw1 *ccw;
- spinlock_t iob_lock;
- wait_queue_head_t wait_q;
- struct ccw_device *ccwdev;
-/*command buffer for control data*/
- struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
- atomic_t irq_pending;
- int io_buf_no;
-};
+static inline struct ccw1 *__ccw_from_cmd(struct qeth_cmd_buffer *iob)
+{
+ return (struct ccw1 *)(iob->data + ALIGN(iob->length, 8));
+}
static inline bool qeth_trylock_channel(struct qeth_channel *channel)
{
@@ -665,6 +654,7 @@ struct qeth_card_info {
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
u8 open_when_online:1;
+ u8 use_v1_blkt:1;
u8 is_vm_nic:1;
int mac_bits;
enum qeth_card_types type;
@@ -725,9 +715,6 @@ struct qeth_discipline {
void (*remove) (struct ccwgroup_device *);
int (*set_online) (struct ccwgroup_device *);
int (*set_offline) (struct ccwgroup_device *);
- int (*freeze)(struct ccwgroup_device *);
- int (*thaw) (struct ccwgroup_device *);
- int (*restore)(struct ccwgroup_device *);
int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
int (*control_event_handler)(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
@@ -764,6 +751,7 @@ struct qeth_card {
enum qeth_card_states state;
spinlock_t lock;
struct ccwgroup_device *gdev;
+ struct qeth_cmd_buffer *read_cmd;
struct qeth_channel read;
struct qeth_channel write;
struct qeth_channel data;
@@ -891,6 +879,17 @@ static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
return RTN_UNICAST;
}
+static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct rt6_info *rt;
+
+ rt = (struct rt6_info *) dst;
+ if (dst)
+ dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
+ return dst;
+}
+
static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
u8 flags)
{
@@ -925,12 +924,12 @@ static inline int qeth_is_diagass_supported(struct qeth_card *card,
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
- u16 cmd_code, long data,
+ u16 cmd_code, u32 *data,
enum qeth_prot_versions prot);
/* IPv4 variant */
static inline int qeth_send_simple_setassparms(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
- u16 cmd_code, long data)
+ u16 cmd_code, u32 *data)
{
return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
data, QETH_PROT_IPV4);
@@ -938,7 +937,7 @@ static inline int qeth_send_simple_setassparms(struct qeth_card *card,
static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
- u16 cmd_code, long data)
+ u16 cmd_code, u32 *data)
{
return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
data, QETH_PROT_IPV6);
@@ -979,8 +978,23 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
void *);
-struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
- enum qeth_ipa_cmds, enum qeth_prot_versions);
+struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
+ enum qeth_ipa_cmds cmd_code,
+ enum qeth_prot_versions prot,
+ unsigned int data_length);
+struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
+ unsigned int length, unsigned int ccws,
+ long timeout);
+struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ u16 cmd_code,
+ unsigned int data_length,
+ enum qeth_prot_versions prot);
+struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
+ enum qeth_diags_cmds sub_cmd,
+ unsigned int data_length);
+void qeth_put_cmd(struct qeth_cmd_buffer *iob);
+
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
@@ -989,15 +1003,13 @@ int qeth_poll(struct napi_struct *napi, int budget);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
-void qeth_clear_cmd_buffers(struct qeth_channel *);
void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
-void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
+void qeth_notify_reply(struct qeth_reply *reply, int reason);
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
u16 cmd_length);
-struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_query_card_info(struct qeth_card *card,
@@ -1014,10 +1026,6 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
-struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
- enum qeth_ipa_funcs,
- __u16, __u16,
- enum qeth_prot_versions);
int qeth_set_features(struct net_device *, netdev_features_t);
void qeth_enable_hw_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
@@ -1032,11 +1040,10 @@ int qeth_stop(struct net_device *dev);
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv, int cast_type,
+ struct qeth_qdio_out_q *queue, int ipv,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, int cast_type,
- unsigned int data_len));
+ int ipv, unsigned int data_len));
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b1823d75dd35..4d0caeebc802 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -20,6 +20,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
+#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
@@ -62,9 +63,7 @@ static struct device *qeth_core_root_dev;
static struct lock_class_key qdio_out_skb_queue_key;
static void qeth_issue_next_read_cb(struct qeth_card *card,
- struct qeth_channel *channel,
struct qeth_cmd_buffer *iob);
-static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_queues(struct qeth_card *card);
@@ -292,7 +291,7 @@ static int qeth_cq_init(struct qeth_card *card)
int rc;
if (card->options.cq == QETH_CQ_ENABLED) {
- QETH_DBF_TEXT(SETUP, 2, "cqinit");
+ QETH_CARD_TEXT(card, 2, "cqinit");
qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
QDIO_MAX_BUFFERS_PER_Q);
card->qdio.c_q->next_buf_to_init = 127;
@@ -300,7 +299,7 @@ static int qeth_cq_init(struct qeth_card *card)
card->qdio.no_in_queues - 1, 0,
127);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
goto out;
}
}
@@ -317,7 +316,7 @@ static int qeth_alloc_cq(struct qeth_card *card)
int i;
struct qdio_outbuf_state *outbuf_states;
- QETH_DBF_TEXT(SETUP, 2, "cqon");
+ QETH_CARD_TEXT(card, 2, "cqon");
card->qdio.c_q = qeth_alloc_qdio_queue();
if (!card->qdio.c_q) {
rc = -1;
@@ -339,11 +338,11 @@ static int qeth_alloc_cq(struct qeth_card *card)
outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
}
} else {
- QETH_DBF_TEXT(SETUP, 2, "nocq");
+ QETH_CARD_TEXT(card, 2, "nocq");
card->qdio.c_q = NULL;
card->qdio.no_in_queues = 1;
}
- QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
+ QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
rc = 0;
out:
return rc;
@@ -486,42 +485,39 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
queue == card->qdio.no_in_queues - 1;
}
-static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data)
+static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
+ void *data)
{
ccw->cmd_code = cmd_code;
- ccw->flags = CCW_FLAG_SLI;
+ ccw->flags = flags | CCW_FLAG_SLI;
ccw->count = len;
ccw->cda = (__u32) __pa(data);
}
static int __qeth_issue_next_read(struct qeth_card *card)
{
- struct qeth_channel *channel = &card->read;
- struct qeth_cmd_buffer *iob;
+ struct qeth_cmd_buffer *iob = card->read_cmd;
+ struct qeth_channel *channel = iob->channel;
+ struct ccw1 *ccw = __ccw_from_cmd(iob);
int rc;
QETH_CARD_TEXT(card, 5, "issnxrd");
if (channel->state != CH_STATE_UP)
return -EIO;
- iob = qeth_get_buffer(channel);
- if (!iob) {
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
- CARD_DEVID(card));
- return -ENOMEM;
- }
- qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
+ memset(iob->data, 0, iob->length);
+ qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
iob->callback = qeth_issue_next_read_cb;
+ /* keep the cmd alive after completion: */
+ qeth_get_cmd(iob);
+
QETH_CARD_TEXT(card, 6, "noirqpnd");
- rc = ccw_device_start(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0);
+ rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
if (rc) {
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
rc, CARD_DEVID(card));
atomic_set(&channel->irq_pending, 0);
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
wake_up(&card->wait_q);
@@ -577,11 +573,12 @@ static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply)
spin_unlock_irq(&card->lock);
}
-static void qeth_notify_reply(struct qeth_reply *reply, int reason)
+void qeth_notify_reply(struct qeth_reply *reply, int reason)
{
reply->rc = reason;
complete(&reply->received);
}
+EXPORT_SYMBOL_GPL(qeth_notify_reply);
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
@@ -692,48 +689,21 @@ static int qeth_check_idx_response(struct qeth_card *card,
return 0;
}
-static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
-{
- __u8 index;
-
- index = channel->io_buf_no;
- do {
- if (channel->iob[index].state == BUF_STATE_FREE) {
- channel->iob[index].state = BUF_STATE_LOCKED;
- channel->iob[index].timeout = QETH_TIMEOUT;
- channel->io_buf_no = (channel->io_buf_no + 1) %
- QETH_CMD_BUFFER_NO;
- memset(channel->iob[index].data, 0, QETH_BUFSIZE);
- return channel->iob + index;
- }
- index = (index + 1) % QETH_CMD_BUFFER_NO;
- } while (index != channel->io_buf_no);
-
- return NULL;
-}
-
-void qeth_release_buffer(struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
+void qeth_put_cmd(struct qeth_cmd_buffer *iob)
{
- unsigned long flags;
-
- spin_lock_irqsave(&channel->iob_lock, flags);
- iob->state = BUF_STATE_FREE;
- iob->callback = NULL;
- if (iob->reply) {
- qeth_put_reply(iob->reply);
- iob->reply = NULL;
+ if (refcount_dec_and_test(&iob->ref_count)) {
+ if (iob->reply)
+ qeth_put_reply(iob->reply);
+ kfree(iob->data);
+ kfree(iob);
}
- spin_unlock_irqrestore(&channel->iob_lock, flags);
- wake_up(&channel->wait_q);
}
-EXPORT_SYMBOL_GPL(qeth_release_buffer);
+EXPORT_SYMBOL_GPL(qeth_put_cmd);
static void qeth_release_buffer_cb(struct qeth_card *card,
- struct qeth_channel *channel,
struct qeth_cmd_buffer *iob)
{
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
}
static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
@@ -742,41 +712,38 @@ static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
if (reply)
qeth_notify_reply(reply, rc);
- qeth_release_buffer(iob->channel, iob);
+ qeth_put_cmd(iob);
}
-static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
+struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
+ unsigned int length, unsigned int ccws,
+ long timeout)
{
- struct qeth_cmd_buffer *buffer = NULL;
- unsigned long flags;
+ struct qeth_cmd_buffer *iob;
- spin_lock_irqsave(&channel->iob_lock, flags);
- buffer = __qeth_get_buffer(channel);
- spin_unlock_irqrestore(&channel->iob_lock, flags);
- return buffer;
-}
+ if (length > QETH_BUFSIZE)
+ return NULL;
-struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
-{
- struct qeth_cmd_buffer *buffer;
- wait_event(channel->wait_q,
- ((buffer = qeth_get_buffer(channel)) != NULL));
- return buffer;
-}
-EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
+ iob = kzalloc(sizeof(*iob), GFP_KERNEL);
+ if (!iob)
+ return NULL;
-void qeth_clear_cmd_buffers(struct qeth_channel *channel)
-{
- int cnt;
+ iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
+ GFP_KERNEL | GFP_DMA);
+ if (!iob->data) {
+ kfree(iob);
+ return NULL;
+ }
- for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
- qeth_release_buffer(channel, &channel->iob[cnt]);
- channel->io_buf_no = 0;
+ refcount_set(&iob->ref_count, 1);
+ iob->channel = channel;
+ iob->timeout = timeout;
+ iob->length = length;
+ return iob;
}
-EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
+EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
static void qeth_issue_next_read_cb(struct qeth_card *card,
- struct qeth_channel *channel,
struct qeth_cmd_buffer *iob)
{
struct qeth_ipa_cmd *cmd = NULL;
@@ -849,7 +816,8 @@ out:
memcpy(&card->seqno.pdu_hdr_ack,
QETH_PDU_HEADER_SEQ_NO(iob->data),
QETH_SEQ_NO_LENGTH);
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
+ __qeth_issue_next_read(card);
}
static int qeth_set_thread_start_bit(struct qeth_card *card,
@@ -976,7 +944,7 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
}
static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
- unsigned long intparm, struct irb *irb)
+ struct irb *irb)
{
if (!IS_ERR(irb))
return 0;
@@ -993,12 +961,6 @@ static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
" on the device\n");
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
- if (intparm == QETH_RCD_PARM) {
- if (card->data.ccwdev == cdev) {
- card->data.state = CH_STATE_DOWN;
- wake_up(&card->wait_q);
- }
- }
return -ETIMEDOUT;
default:
QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
@@ -1041,7 +1003,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
if (qeth_intparm_is_iob(intparm))
iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
- rc = qeth_check_irb_error(card, cdev, intparm, irb);
+ rc = qeth_check_irb_error(card, cdev, irb);
if (rc) {
/* IO was terminated, free its resources. */
if (iob)
@@ -1059,11 +1021,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
channel->state = CH_STATE_HALTED;
- /*let's wake up immediately on data channel*/
- if ((channel == &card->data) && (intparm != 0) &&
- (intparm != QETH_RCD_PARM))
- goto out;
-
if (intparm == QETH_CLEAR_CHANNEL_PARM) {
QETH_CARD_TEXT(card, 6, "clrchpar");
/* we don't have to handle this further */
@@ -1093,10 +1050,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
print_hex_dump(KERN_WARNING, "qeth: sense data ",
DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
}
- if (intparm == QETH_RCD_PARM) {
- channel->state = CH_STATE_DOWN;
- goto out;
- }
+
rc = qeth_get_problem(card, cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
@@ -1108,18 +1062,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
}
- if (intparm == QETH_RCD_PARM) {
- channel->state = CH_STATE_RCD_DONE;
- goto out;
- }
- if (channel == &card->data)
- return;
- if (channel == &card->read &&
- channel->state == CH_STATE_UP)
- __qeth_issue_next_read(card);
-
if (iob && iob->callback)
- iob->callback(card, iob->channel, iob);
+ iob->callback(card, iob);
out:
wake_up(&card->wait_q);
@@ -1222,56 +1166,26 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
static void qeth_clean_channel(struct qeth_channel *channel)
{
struct ccw_device *cdev = channel->ccwdev;
- int cnt;
QETH_DBF_TEXT(SETUP, 2, "freech");
spin_lock_irq(get_ccwdev_lock(cdev));
cdev->handler = NULL;
spin_unlock_irq(get_ccwdev_lock(cdev));
-
- for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
- kfree(channel->iob[cnt].data);
- kfree(channel->ccw);
}
-static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
+static void qeth_setup_channel(struct qeth_channel *channel)
{
struct ccw_device *cdev = channel->ccwdev;
- int cnt;
QETH_DBF_TEXT(SETUP, 2, "setupch");
- channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
- if (!channel->ccw)
- return -ENOMEM;
channel->state = CH_STATE_DOWN;
atomic_set(&channel->irq_pending, 0);
- init_waitqueue_head(&channel->wait_q);
spin_lock_irq(get_ccwdev_lock(cdev));
cdev->handler = qeth_irq;
spin_unlock_irq(get_ccwdev_lock(cdev));
-
- if (!alloc_buffers)
- return 0;
-
- for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
- channel->iob[cnt].data = kmalloc(QETH_BUFSIZE,
- GFP_KERNEL | GFP_DMA);
- if (channel->iob[cnt].data == NULL)
- break;
- channel->iob[cnt].state = BUF_STATE_FREE;
- channel->iob[cnt].channel = channel;
- }
- if (cnt < QETH_CMD_BUFFER_NO) {
- qeth_clean_channel(channel);
- return -ENOMEM;
- }
- channel->io_buf_no = 0;
- spin_lock_init(&channel->iob_lock);
-
- return 0;
}
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
@@ -1306,7 +1220,7 @@ static int qeth_update_from_chp_desc(struct qeth_card *card)
struct channel_path_desc_fmt0 *chp_dsc;
int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "chp_desc");
+ QETH_CARD_TEXT(card, 2, "chp_desc");
ccwdev = card->data.ccwdev;
chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
@@ -1320,14 +1234,14 @@ static int qeth_update_from_chp_desc(struct qeth_card *card)
rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
kfree(chp_dsc);
- QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
- QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
+ QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
+ QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
return rc;
}
static void qeth_init_qdio_info(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 4, "intqdinf");
+ QETH_CARD_TEXT(card, 4, "intqdinf");
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
@@ -1393,8 +1307,7 @@ static void qeth_start_kernel_thread(struct work_struct *work)
static void qeth_buffer_reclaim_work(struct work_struct *);
static void qeth_setup_card(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 2, "setupcrd");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "setupcrd");
card->info.type = CARD_RDEV(card)->id.driver_info;
card->state = CARD_STATE_DOWN;
@@ -1442,21 +1355,19 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
dev_name(&gdev->dev));
if (!card->event_wq)
goto out_wq;
- if (qeth_setup_channel(&card->read, true))
- goto out_ip;
- if (qeth_setup_channel(&card->write, true))
- goto out_channel;
- if (qeth_setup_channel(&card->data, false))
- goto out_data;
+
+ card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
+ if (!card->read_cmd)
+ goto out_read_cmd;
+
+ qeth_setup_channel(&card->read);
+ qeth_setup_channel(&card->write);
+ qeth_setup_channel(&card->data);
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
-out_data:
- qeth_clean_channel(&card->write);
-out_channel:
- qeth_clean_channel(&card->read);
-out_ip:
+out_read_cmd:
destroy_workqueue(card->event_wq);
out_wq:
dev_set_drvdata(&gdev->dev, NULL);
@@ -1582,60 +1493,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
}
EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
-static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
- int *length)
-{
- struct ciw *ciw;
- char *rcd_buf;
- int ret;
- struct qeth_channel *channel = &card->data;
-
- /*
- * scan for RCD command in extended SenseID data
- */
- ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
- if (!ciw || ciw->cmd == 0)
- return -EOPNOTSUPP;
- rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
- if (!rcd_buf)
- return -ENOMEM;
-
- qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
- channel->state = CH_STATE_RCD;
- spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- QETH_RCD_PARM, LPM_ANYPATH, 0,
- QETH_RCD_TIMEOUT);
- spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
- if (!ret)
- wait_event(card->wait_q,
- (channel->state == CH_STATE_RCD_DONE ||
- channel->state == CH_STATE_DOWN));
- if (channel->state == CH_STATE_DOWN)
- ret = -EIO;
- else
- channel->state = CH_STATE_DOWN;
- if (ret) {
- kfree(rcd_buf);
- *buffer = NULL;
- *length = 0;
- } else {
- *length = ciw->count;
- *buffer = rcd_buf;
- }
- return ret;
-}
-
-static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
-{
- QETH_DBF_TEXT(SETUP, 2, "cfgunit");
- card->info.chpid = prcd[30];
- card->info.unit_addr2 = prcd[31];
- card->info.cula = prcd[63];
- card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) &&
- (prcd[0x11] == _ascebc['M']));
-}
-
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
@@ -1645,7 +1502,7 @@ static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
char userid[80];
int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "vmlayer");
+ QETH_CARD_TEXT(card, 2, "vmlayer");
cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
if (rc)
@@ -1688,7 +1545,7 @@ out:
kfree(response);
kfree(request);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
+ QETH_CARD_TEXT_(card, 2, "err%x", rc);
return disc;
}
@@ -1705,24 +1562,23 @@ static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
switch (disc) {
case QETH_DISCIPLINE_LAYER2:
- QETH_DBF_TEXT(SETUP, 3, "force l2");
+ QETH_CARD_TEXT(card, 3, "force l2");
break;
case QETH_DISCIPLINE_LAYER3:
- QETH_DBF_TEXT(SETUP, 3, "force l3");
+ QETH_CARD_TEXT(card, 3, "force l3");
break;
default:
- QETH_DBF_TEXT(SETUP, 3, "force no");
+ QETH_CARD_TEXT(card, 3, "force no");
}
return disc;
}
-static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
+static void qeth_set_blkt_defaults(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
+ QETH_CARD_TEXT(card, 2, "cfgblkt");
- if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
- prcd[76] >= 0xF1 && prcd[76] <= 0xF4) {
+ if (card->info.use_v1_blkt) {
card->info.blkt.time_total = 0;
card->info.blkt.inter_packet = 0;
card->info.blkt.inter_packet_jumbo = 0;
@@ -1758,11 +1614,8 @@ static void qeth_init_func_level(struct qeth_card *card)
}
static void qeth_idx_finalize_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob,
- unsigned int length)
+ struct qeth_cmd_buffer *iob)
{
- qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, length, iob->data);
-
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
QETH_SEQ_NO_LENGTH);
if (iob->channel == &card->write)
@@ -1779,10 +1632,9 @@ static int qeth_peer_func_level(int level)
}
static void qeth_mpc_finalize_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob,
- unsigned int length)
+ struct qeth_cmd_buffer *iob)
{
- qeth_idx_finalize_cmd(card, iob, length);
+ qeth_idx_finalize_cmd(card, iob);
memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
&card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
@@ -1794,10 +1646,26 @@ static void qeth_mpc_finalize_cmd(struct qeth_card *card,
iob->callback = qeth_release_buffer_cb;
}
+static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
+ void *data,
+ unsigned int data_length)
+{
+ struct qeth_cmd_buffer *iob;
+
+ iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
+ if (!iob)
+ return NULL;
+
+ memcpy(iob->data, data, data_length);
+ qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
+ iob->data);
+ iob->finalize = qeth_mpc_finalize_cmd;
+ return iob;
+}
+
/**
* qeth_send_control_data() - send control command to the card
* @card: qeth_card structure pointer
- * @len: size of the command buffer
* @iob: qeth_cmd_buffer pointer
* @reply_cb: callback function pointer
* @cb_card: pointer to the qeth_card structure
@@ -1817,7 +1685,7 @@ static void qeth_mpc_finalize_cmd(struct qeth_card *card,
* field 'param' of the structure qeth_reply.
*/
-static int qeth_send_control_data(struct qeth_card *card, int len,
+static int qeth_send_control_data(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
int (*reply_cb)(struct qeth_card *cb_card,
struct qeth_reply *cb_reply,
@@ -1833,13 +1701,13 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
reply = qeth_alloc_reply(card);
if (!reply) {
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
return -ENOMEM;
}
reply->callback = reply_cb;
reply->param = reply_param;
- /* pairs with qeth_release_buffer(): */
+ /* pairs with qeth_put_cmd(): */
qeth_get_reply(reply);
iob->reply = reply;
@@ -1848,18 +1716,19 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
timeout);
if (timeout <= 0) {
qeth_put_reply(reply);
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
}
- iob->finalize(card, iob, len);
- QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
+ if (iob->finalize)
+ iob->finalize(card, iob);
+ QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
qeth_enqueue_reply(card, reply);
QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
+ rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
(addr_t) iob, 0, 0, timeout);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
@@ -1868,7 +1737,7 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
QETH_CARD_TEXT_(card, 2, " err%d", rc);
qeth_dequeue_reply(card, reply);
qeth_put_reply(reply);
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
return rc;
@@ -1886,6 +1755,46 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
return rc;
}
+static void qeth_read_conf_data_cb(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
+{
+ unsigned char *prcd = iob->data;
+
+ QETH_CARD_TEXT(card, 2, "cfgunit");
+ card->info.chpid = prcd[30];
+ card->info.unit_addr2 = prcd[31];
+ card->info.cula = prcd[63];
+ card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) &&
+ (prcd[0x11] == _ascebc['M']));
+ card->info.use_v1_blkt = prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
+ prcd[76] >= 0xF1 && prcd[76] <= 0xF4;
+
+ qeth_notify_reply(iob->reply, 0);
+ qeth_put_cmd(iob);
+}
+
+static int qeth_read_conf_data(struct qeth_card *card)
+{
+ struct qeth_channel *channel = &card->data;
+ struct qeth_cmd_buffer *iob;
+ struct ciw *ciw;
+
+ /* scan for RCD command in extended SenseID data */
+ ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
+ if (!ciw || ciw->cmd == 0)
+ return -EOPNOTSUPP;
+
+ iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
+ if (!iob)
+ return -ENOMEM;
+
+ iob->callback = qeth_read_conf_data_cb;
+ qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
+ iob->data);
+
+ return qeth_send_control_data(card, iob, NULL, NULL);
+}
+
static int qeth_idx_check_activate_response(struct qeth_card *card,
struct qeth_channel *channel,
struct qeth_cmd_buffer *iob)
@@ -1900,8 +1809,8 @@ static int qeth_idx_check_activate_response(struct qeth_card *card,
return 0;
/* negative reply: */
- QETH_DBF_TEXT_(SETUP, 2, "idxneg%c",
- QETH_IDX_ACT_CAUSE_CODE(iob->data));
+ QETH_CARD_TEXT_(card, 2, "idxneg%c",
+ QETH_IDX_ACT_CAUSE_CODE(iob->data));
switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
case QETH_IDX_ACT_ERR_EXCL:
@@ -1920,14 +1829,14 @@ static int qeth_idx_check_activate_response(struct qeth_card *card,
}
}
-static void qeth_idx_query_read_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
+static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
{
+ struct qeth_channel *channel = iob->channel;
u16 peer_level;
int rc;
- QETH_DBF_TEXT(SETUP, 2, "idxrdcb");
+ QETH_CARD_TEXT(card, 2, "idxrdcb");
rc = qeth_idx_check_activate_response(card, channel, iob);
if (rc)
@@ -1950,17 +1859,17 @@ static void qeth_idx_query_read_cb(struct qeth_card *card,
out:
qeth_notify_reply(iob->reply, rc);
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
}
-static void qeth_idx_query_write_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
+static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
{
+ struct qeth_channel *channel = iob->channel;
u16 peer_level;
int rc;
- QETH_DBF_TEXT(SETUP, 2, "idxwrcb");
+ QETH_CARD_TEXT(card, 2, "idxwrcb");
rc = qeth_idx_check_activate_response(card, channel, iob);
if (rc)
@@ -1977,22 +1886,7 @@ static void qeth_idx_query_write_cb(struct qeth_card *card,
out:
qeth_notify_reply(iob->reply, rc);
- qeth_release_buffer(channel, iob);
-}
-
-static void qeth_idx_finalize_query_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob,
- unsigned int length)
-{
- qeth_setup_ccw(iob->channel->ccw, CCW_CMD_READ, length, iob->data);
-}
-
-static void qeth_idx_activate_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
-{
- qeth_notify_reply(iob->reply, 0);
- qeth_release_buffer(channel, iob);
+ qeth_put_cmd(iob);
}
static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
@@ -2000,11 +1894,14 @@ static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
{
u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
u8 port = ((u8)card->dev->dev_port) | 0x80;
+ struct ccw1 *ccw = __ccw_from_cmd(iob);
struct ccw_dev_id dev_id;
+ qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
+ iob->data);
+ qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
ccw_device_get_id(CARD_DDEV(card), &dev_id);
iob->finalize = qeth_idx_finalize_cmd;
- iob->callback = qeth_idx_activate_cb;
memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
@@ -2021,26 +1918,17 @@ static int qeth_idx_activate_read_channel(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
int rc;
- QETH_DBF_TEXT(SETUP, 2, "idxread");
+ QETH_CARD_TEXT(card, 2, "idxread");
- iob = qeth_get_buffer(channel);
+ iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
if (!iob)
return -ENOMEM;
memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
qeth_idx_setup_activate_cmd(card, iob);
+ iob->callback = qeth_idx_activate_read_channel_cb;
- rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL);
- if (rc)
- return rc;
-
- iob = qeth_get_buffer(channel);
- if (!iob)
- return -ENOMEM;
-
- iob->finalize = qeth_idx_finalize_query_cmd;
- iob->callback = qeth_idx_query_read_cb;
- rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL);
+ rc = qeth_send_control_data(card, iob, NULL, NULL);
if (rc)
return rc;
@@ -2054,26 +1942,17 @@ static int qeth_idx_activate_write_channel(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
int rc;
- QETH_DBF_TEXT(SETUP, 2, "idxwrite");
+ QETH_CARD_TEXT(card, 2, "idxwrite");
- iob = qeth_get_buffer(channel);
+ iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
if (!iob)
return -ENOMEM;
memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
qeth_idx_setup_activate_cmd(card, iob);
+ iob->callback = qeth_idx_activate_write_channel_cb;
- rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL);
- if (rc)
- return rc;
-
- iob = qeth_get_buffer(channel);
- if (!iob)
- return -ENOMEM;
-
- iob->finalize = qeth_idx_finalize_query_cmd;
- iob->callback = qeth_idx_query_write_cb;
- rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL);
+ rc = qeth_send_control_data(card, iob, NULL, NULL);
if (rc)
return rc;
@@ -2086,7 +1965,7 @@ static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
{
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
+ QETH_CARD_TEXT(card, 2, "cmenblcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.cm_filter_r,
@@ -2097,23 +1976,20 @@ static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
static int qeth_cm_enable(struct qeth_card *card)
{
- int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "cmenable");
+ QETH_CARD_TEXT(card, 2, "cmenable");
- iob = qeth_wait_for_buffer(&card->write);
- iob->finalize = qeth_mpc_finalize_cmd;
- memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
+ iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
+ if (!iob)
+ return -ENOMEM;
memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
&card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
- rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
- qeth_cm_enable_cb, NULL);
- return rc;
+ return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
}
static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -2121,7 +1997,7 @@ static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
{
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
+ QETH_CARD_TEXT(card, 2, "cmsetpcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.cm_connection_r,
@@ -2132,14 +2008,13 @@ static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
static int qeth_cm_setup(struct qeth_card *card)
{
- int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "cmsetup");
+ QETH_CARD_TEXT(card, 2, "cmsetup");
- iob = qeth_wait_for_buffer(&card->write);
- iob->finalize = qeth_mpc_finalize_cmd;
- memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
+ iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
+ if (!iob)
+ return -ENOMEM;
memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
@@ -2147,9 +2022,7 @@ static int qeth_cm_setup(struct qeth_card *card)
&card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
&card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
- rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
- qeth_cm_setup_cb, NULL);
- return rc;
+ return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
}
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
@@ -2214,7 +2087,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
__u8 link_type;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
+ QETH_CARD_TEXT(card, 2, "ulpenacb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.ulp_filter_r,
@@ -2235,7 +2108,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
card->info.link_type = link_type;
} else
card->info.link_type = 0;
- QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
+ QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
return 0;
}
@@ -2253,12 +2126,11 @@ static int qeth_ulp_enable(struct qeth_card *card)
u16 max_mtu;
int rc;
- /*FIXME: trace view callbacks*/
- QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
+ QETH_CARD_TEXT(card, 2, "ulpenabl");
- iob = qeth_wait_for_buffer(&card->write);
- iob->finalize = qeth_mpc_finalize_cmd;
- memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
+ iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
+ if (!iob)
+ return -ENOMEM;
*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
@@ -2266,8 +2138,7 @@ static int qeth_ulp_enable(struct qeth_card *card)
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
&card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
- rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
- qeth_ulp_enable_cb, &max_mtu);
+ rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
if (rc)
return rc;
return qeth_update_max_mtu(card, max_mtu);
@@ -2278,7 +2149,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
{
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
+ QETH_CARD_TEXT(card, 2, "ulpstpcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.ulp_connection_r,
@@ -2286,7 +2157,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
QETH_MPC_TOKEN_LENGTH);
if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
3)) {
- QETH_DBF_TEXT(SETUP, 2, "olmlimit");
+ QETH_CARD_TEXT(card, 2, "olmlimit");
dev_err(&card->gdev->dev, "A connection could not be "
"established because of an OLM limit\n");
return -EMLINK;
@@ -2296,16 +2167,15 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
static int qeth_ulp_setup(struct qeth_card *card)
{
- int rc;
__u16 temp;
struct qeth_cmd_buffer *iob;
struct ccw_dev_id dev_id;
- QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
+ QETH_CARD_TEXT(card, 2, "ulpsetup");
- iob = qeth_wait_for_buffer(&card->write);
- iob->finalize = qeth_mpc_finalize_cmd;
- memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
+ iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
+ if (!iob)
+ return -ENOMEM;
memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
@@ -2318,9 +2188,7 @@ static int qeth_ulp_setup(struct qeth_card *card)
memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
temp = (card->info.cula << 8) + card->info.unit_addr2;
memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
- rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
- qeth_ulp_setup_cb, NULL);
- return rc;
+ return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
}
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
@@ -2369,13 +2237,13 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
{
int i, j;
- QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
+ QETH_CARD_TEXT(card, 2, "allcqdbf");
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
- QETH_DBF_TEXT(SETUP, 2, "inq");
+ QETH_CARD_TEXT(card, 2, "inq");
card->qdio.in_q = qeth_alloc_qdio_queue();
if (!card->qdio.in_q)
goto out_nomem;
@@ -2389,8 +2257,8 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
card->qdio.out_qs[i] = qeth_alloc_output_queue();
if (!card->qdio.out_qs[i])
goto out_freeoutq;
- QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
- QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
+ QETH_CARD_TEXT_(card, 2, "outq %i", i);
+ QETH_CARD_HEX(card, 2, &card->qdio.out_qs[i], sizeof(void *));
card->qdio.out_qs[i]->card = card;
card->qdio.out_qs[i]->queue_no = i;
/* give outbound qeth_qdio_buffers their qdio_buffers */
@@ -2481,79 +2349,77 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
static int qeth_qdio_activate(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 3, "qdioact");
+ QETH_CARD_TEXT(card, 3, "qdioact");
return qdio_activate(CARD_DDEV(card));
}
static int qeth_dm_act(struct qeth_card *card)
{
- int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "dmact");
+ QETH_CARD_TEXT(card, 2, "dmact");
- iob = qeth_wait_for_buffer(&card->write);
- iob->finalize = qeth_mpc_finalize_cmd;
- memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
+ iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
+ if (!iob)
+ return -ENOMEM;
memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
- rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
- return rc;
+ return qeth_send_control_data(card, iob, NULL, NULL);
}
static int qeth_mpc_initialize(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(SETUP, 2, "mpcinit");
+ QETH_CARD_TEXT(card, 2, "mpcinit");
rc = qeth_issue_next_read(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
return rc;
}
rc = qeth_cm_enable(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "2err%d", rc);
goto out_qdio;
}
rc = qeth_cm_setup(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
goto out_qdio;
}
rc = qeth_ulp_enable(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "4err%d", rc);
goto out_qdio;
}
rc = qeth_ulp_setup(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
goto out_qdio;
}
rc = qeth_alloc_qdio_queues(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
goto out_qdio;
}
rc = qeth_qdio_establish(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
qeth_free_qdio_queues(card);
goto out_qdio;
}
rc = qeth_qdio_activate(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "7err%d", rc);
goto out_qdio;
}
rc = qeth_dm_act(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "8err%d", rc);
goto out_qdio;
}
@@ -2706,7 +2572,7 @@ int qeth_init_qdio_queues(struct qeth_card *card)
unsigned int i;
int rc;
- QETH_DBF_TEXT(SETUP, 2, "initqdqs");
+ QETH_CARD_TEXT(card, 2, "initqdqs");
/* inbound queue */
qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
@@ -2720,7 +2586,7 @@ int qeth_init_qdio_queues(struct qeth_card *card)
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
card->qdio.in_buf_pool.buf_count - 1);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
return rc;
}
@@ -2746,36 +2612,10 @@ int qeth_init_qdio_queues(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
-static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
-{
- switch (link_type) {
- case QETH_LINK_TYPE_HSTR:
- return 2;
- default:
- return 1;
- }
-}
-
-static void qeth_fill_ipacmd_header(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd,
- enum qeth_ipa_cmds command,
- enum qeth_prot_versions prot)
-{
- cmd->hdr.command = command;
- cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
- /* cmd->hdr.seqno is set by qeth_send_control_data() */
- cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
- cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
- cmd->hdr.prim_version_no = IS_LAYER2(card) ? 2 : 1;
- cmd->hdr.param_count = 1;
- cmd->hdr.prot_version = prot;
-}
-
static void qeth_ipa_finalize_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob,
- unsigned int length)
+ struct qeth_cmd_buffer *iob)
{
- qeth_mpc_finalize_cmd(card, iob, length);
+ qeth_mpc_finalize_cmd(card, iob);
/* override with IPA-specific values: */
__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa;
@@ -2785,11 +2625,12 @@ static void qeth_ipa_finalize_cmd(struct qeth_card *card,
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
u16 cmd_length)
{
- u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length;
u8 prot_type = qeth_mpc_select_prot_type(card);
+ u16 total_length = iob->length;
+ qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
+ iob->data);
iob->finalize = qeth_ipa_finalize_cmd;
- iob->timeout = QETH_IPA_TIMEOUT;
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
@@ -2802,25 +2643,35 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
-struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
- enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
+struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
+ enum qeth_ipa_cmds cmd_code,
+ enum qeth_prot_versions prot,
+ unsigned int data_length)
{
+ enum qeth_link_types link_type = card->info.link_type;
struct qeth_cmd_buffer *iob;
+ struct qeth_ipacmd_hdr *hdr;
- iob = qeth_get_buffer(&card->write);
- if (iob) {
- qeth_prepare_ipa_cmd(card, iob, sizeof(struct qeth_ipa_cmd));
- qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
- } else {
- dev_warn(&card->gdev->dev,
- "The qeth driver ran out of channel command buffers\n");
- QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
- CARD_DEVID(card));
- }
+ data_length += offsetof(struct qeth_ipa_cmd, data);
+ iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
+ QETH_IPA_TIMEOUT);
+ if (!iob)
+ return NULL;
+ qeth_prepare_ipa_cmd(card, iob, data_length);
+
+ hdr = &__ipa_cmd(iob)->hdr;
+ hdr->command = cmd_code;
+ hdr->initiator = IPA_CMD_INITIATOR_HOST;
+ /* hdr->seqno is set by qeth_send_control_data() */
+ hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1;
+ hdr->rel_adapter_no = (u8) card->dev->dev_port;
+ hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
+ hdr->param_count = 1;
+ hdr->prot_version = prot;
return iob;
}
-EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
+EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -2841,20 +2692,18 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
unsigned long),
void *reply_param)
{
- u16 length;
int rc;
QETH_CARD_TEXT(card, 4, "sendipa");
if (card->read_or_write_problem) {
- qeth_release_buffer(iob->channel, iob);
+ qeth_put_cmd(iob);
return -EIO;
}
if (reply_cb == NULL)
reply_cb = qeth_send_ipa_cmd_cb;
- memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2);
- rc = qeth_send_control_data(card, length, iob, reply_cb, reply_param);
+ rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
if (rc == -ETIME) {
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
@@ -2878,9 +2727,9 @@ static int qeth_send_startlan(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(SETUP, 2, "strtlan");
+ QETH_CARD_TEXT(card, 2, "strtlan");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
@@ -2906,7 +2755,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
card->info.link_type =
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
- QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
+ QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
}
card->options.adp.supported_funcs =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
@@ -2914,21 +2763,24 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
}
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
- __u32 command, __u32 cmdlen)
+ enum qeth_ipa_setadp_cmd adp_cmd,
+ unsigned int data_length)
{
+ struct qeth_ipacmd_setadpparms_hdr *hdr;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
- QETH_PROT_IPV4);
- if (iob) {
- cmd = __ipa_cmd(iob);
- cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
- cmd->data.setadapterparms.hdr.command_code = command;
- cmd->data.setadapterparms.hdr.used_total = 1;
- cmd->data.setadapterparms.hdr.seq_no = 1;
- }
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
+ data_length +
+ offsetof(struct qeth_ipacmd_setadpparms,
+ data));
+ if (!iob)
+ return NULL;
+ hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
+ hdr->cmdlength = sizeof(*hdr) + data_length;
+ hdr->command_code = adp_cmd;
+ hdr->used_total = 1;
+ hdr->seq_no = 1;
return iob;
}
@@ -2939,7 +2791,7 @@ static int qeth_query_setadapterparms(struct qeth_card *card)
QETH_CARD_TEXT(card, 3, "queryadp");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
- sizeof(struct qeth_ipacmd_setadpparms));
+ SETADP_DATA_SIZEOF(query_cmds_supp));
if (!iob)
return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
@@ -2951,7 +2803,7 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "qipasscb");
+ QETH_CARD_TEXT(card, 2, "qipasscb");
cmd = (struct qeth_ipa_cmd *) data;
@@ -2960,7 +2812,7 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
break;
case IPA_RC_NOTSUPP:
case IPA_RC_L2_UNSUPPORTED_CMD:
- QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
+ QETH_CARD_TEXT(card, 2, "ipaunsup");
card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
return -EOPNOTSUPP;
@@ -2988,8 +2840,8 @@ static int qeth_query_ipassists(struct qeth_card *card,
int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
+ QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
if (!iob)
return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
@@ -3026,14 +2878,32 @@ int qeth_query_switch_attributes(struct qeth_card *card,
return -EOPNOTSUPP;
if (!netif_carrier_ok(card->dev))
return -ENOMEDIUM;
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
- sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob,
qeth_query_switch_attributes_cb, sw_info);
}
+struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
+ enum qeth_diags_cmds sub_cmd,
+ unsigned int data_length)
+{
+ struct qeth_ipacmd_diagass *cmd;
+ struct qeth_cmd_buffer *iob;
+
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
+ DIAG_HDR_LEN + data_length);
+ if (!iob)
+ return NULL;
+
+ cmd = &__ipa_cmd(iob)->data.diagass;
+ cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
+ cmd->subcmd = sub_cmd;
+ return iob;
+}
+EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
+
static int qeth_query_setdiagass_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@@ -3052,15 +2922,11 @@ static int qeth_query_setdiagass_cb(struct qeth_card *card,
static int qeth_query_setdiagass(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "qdiagass");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ QETH_CARD_TEXT(card, 2, "qdiagass");
+ iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
if (!iob)
return -ENOMEM;
- cmd = __ipa_cmd(iob);
- cmd->data.diagass.subcmd_len = 16;
- cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
}
@@ -3107,13 +2973,11 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "diagtrap");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ QETH_CARD_TEXT(card, 2, "diagtrap");
+ iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
- cmd->data.diagass.subcmd_len = 80;
- cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
cmd->data.diagass.type = 1;
cmd->data.diagass.action = action;
switch (action) {
@@ -3236,13 +3100,6 @@ static void qeth_handle_send_error(struct qeth_card *card,
int sbalf15 = buffer->buffer->element[15].sflags;
QETH_CARD_TEXT(card, 6, "hdsnderr");
- if (IS_IQD(card)) {
- if (sbalf15 == 0) {
- qdio_err = 0;
- } else {
- qdio_err = 1;
- }
- }
qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
if (!qdio_err)
@@ -3730,8 +3587,8 @@ check_layout:
__elements = 1 + qeth_count_elements(skb, proto_len);
else
__elements = qeth_count_elements(skb, 0);
- } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
- /* Push HW header into a new page. */
+ } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
+ /* Push HW header into preceding page, flush with skb->data. */
push_ok = true;
__elements = 1 + qeth_count_elements(skb, 0);
} else {
@@ -3785,18 +3642,16 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
int element = buf->next_element_to_fill;
int length = skb_headlen(skb) - offset;
char *data = skb->data + offset;
- int length_here, cnt;
+ unsigned int elem_length, cnt;
/* map linear part into buffer element(s) */
while (length > 0) {
- /* length_here is the remaining amount of data in this page */
- length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
- if (length < length_here)
- length_here = length;
+ elem_length = min_t(unsigned int, length,
+ PAGE_SIZE - offset_in_page(data));
buffer->element[element].addr = data;
- buffer->element[element].length = length_here;
- length -= length_here;
+ buffer->element[element].length = elem_length;
+ length -= elem_length;
if (is_first_elem) {
is_first_elem = false;
if (length || skb_is_nonlinear(skb))
@@ -3809,7 +3664,8 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
}
- data += length_here;
+
+ data += elem_length;
element++;
}
@@ -3820,17 +3676,16 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
data = skb_frag_address(frag);
length = skb_frag_size(frag);
while (length > 0) {
- length_here = PAGE_SIZE -
- ((unsigned long) data % PAGE_SIZE);
- if (length < length_here)
- length_here = length;
+ elem_length = min_t(unsigned int, length,
+ PAGE_SIZE - offset_in_page(data));
buffer->element[element].addr = data;
- buffer->element[element].length = length_here;
+ buffer->element[element].length = elem_length;
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
- length -= length_here;
- data += length_here;
+
+ length -= elem_length;
+ data += elem_length;
element++;
}
}
@@ -4053,11 +3908,10 @@ static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
}
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv, int cast_type,
+ struct qeth_qdio_out_q *queue, int ipv,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, int cast_type,
- unsigned int data_len))
+ int ipv, unsigned int data_len))
{
unsigned int proto_len, hw_hdr_len;
unsigned int frame_len = skb->len;
@@ -4091,7 +3945,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
data_offset = push_len + proto_len;
}
memset(hdr, 0, hw_hdr_len);
- fill_header(queue, hdr, skb, ipv, cast_type, frame_len);
+ fill_header(queue, hdr, skb, ipv, frame_len);
if (is_tso)
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb, proto_len);
@@ -4160,7 +4014,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
- sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8);
+ SETADP_DATA_SIZEOF(mode));
if (!iob)
return;
cmd = __ipa_cmd(iob);
@@ -4200,8 +4054,7 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
QETH_CARD_TEXT(card, 4, "chgmac");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
- sizeof(struct qeth_ipacmd_setadpparms_hdr) +
- sizeof(struct qeth_change_addr));
+ SETADP_DATA_SIZEOF(change_addr));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -4228,10 +4081,8 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
qeth_setadpparms_inspect_rc(cmd);
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
- QETH_DBF_TEXT_(SETUP, 2, "setaccb");
- QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
- QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
- cmd->data.setadapterparms.hdr.return_code);
+ QETH_CARD_TEXT_(card, 2, "rc=%d",
+ cmd->data.setadapterparms.hdr.return_code);
if (cmd->data.setadapterparms.hdr.return_code !=
SET_ACCESS_CTRL_RC_SUCCESS)
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
@@ -4311,12 +4162,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setacctl");
- QETH_DBF_TEXT_(SETUP, 2, "setacctl");
- QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
-
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
- sizeof(struct qeth_ipacmd_setadpparms_hdr) +
- sizeof(struct qeth_set_access_ctrl));
+ SETADP_DATA_SIZEOF(set_access_ctrl));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -4325,7 +4172,7 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
&fallback);
- QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
+ QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
return rc;
}
@@ -4472,18 +4319,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
return -ENOSPC;
}
QETH_CARD_TEXT_(card, 4, "snore%i",
- cmd->data.setadapterparms.hdr.used_total);
+ cmd->data.setadapterparms.hdr.used_total);
QETH_CARD_TEXT_(card, 4, "sseqn%i",
- cmd->data.setadapterparms.hdr.seq_no);
+ cmd->data.setadapterparms.hdr.seq_no);
/*copy entries to user buffer*/
memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
qinfo->udata_offset += data_len;
- /* check if all replies received ... */
- QETH_CARD_TEXT_(card, 4, "srtot%i",
- cmd->data.setadapterparms.hdr.used_total);
- QETH_CARD_TEXT_(card, 4, "srseq%i",
- cmd->data.setadapterparms.hdr.seq_no);
if (cmd->data.setadapterparms.hdr.seq_no <
cmd->data.setadapterparms.hdr.used_total)
return 1;
@@ -4492,9 +4334,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
{
+ struct qeth_snmp_ureq __user *ureq;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
- struct qeth_snmp_ureq *ureq;
unsigned int req_len;
struct qeth_arp_query_info qinfo = {0, };
int rc = 0;
@@ -4508,38 +4349,28 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
IS_LAYER3(card))
return -EOPNOTSUPP;
- /* skip 4 bytes (data_len struct member) to get req_len */
- if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
+ ureq = (struct qeth_snmp_ureq __user *) udata;
+ if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
+ get_user(req_len, &ureq->hdr.req_len))
+ return -EFAULT;
+
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
+ if (!iob)
+ return -ENOMEM;
+
+ if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
+ &ureq->cmd, req_len)) {
+ qeth_put_cmd(iob);
return -EFAULT;
- if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
- sizeof(struct qeth_ipacmd_hdr) -
- sizeof(struct qeth_ipacmd_setadpparms_hdr)))
- return -EINVAL;
- ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
- if (IS_ERR(ureq)) {
- QETH_CARD_TEXT(card, 2, "snmpnome");
- return PTR_ERR(ureq);
}
- qinfo.udata_len = ureq->hdr.data_len;
+
qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
if (!qinfo.udata) {
- kfree(ureq);
+ qeth_put_cmd(iob);
return -ENOMEM;
}
qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
- QETH_SNMP_SETADP_CMDLENGTH + req_len);
- if (!iob) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* for large requests, fix-up the length fields: */
- qeth_prepare_ipa_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len);
-
- cmd = __ipa_cmd(iob);
- memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
@@ -4548,8 +4379,7 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
}
-out:
- kfree(ureq);
+
kfree(qinfo.udata);
return rc;
}
@@ -4615,8 +4445,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
}
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
- sizeof(struct qeth_ipacmd_setadpparms_hdr) +
- sizeof(struct qeth_query_oat));
+ SETADP_DATA_SIZEOF(query_oat));
if (!iob) {
rc = -ENOMEM;
goto out_free;
@@ -4678,8 +4507,7 @@ int qeth_query_card_info(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "qcrdinfo");
if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
return -EOPNOTSUPP;
- iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
- sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
if (!iob)
return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
@@ -4701,7 +4529,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
struct ccw_dev_id id;
int rc;
- QETH_DBF_TEXT(SETUP, 2, "vmreqmac");
+ QETH_CARD_TEXT(card, 2, "vmreqmac");
request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
@@ -4726,13 +4554,13 @@ int qeth_vm_request_mac(struct qeth_card *card)
if (request->resp_buf_len < sizeof(*response) ||
response->version != request->resp_version) {
rc = -EIO;
- QETH_DBF_TEXT(SETUP, 2, "badresp");
- QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len,
- sizeof(request->resp_buf_len));
+ QETH_CARD_TEXT(card, 2, "badresp");
+ QETH_CARD_HEX(card, 2, &request->resp_buf_len,
+ sizeof(request->resp_buf_len));
} else if (!is_valid_ether_addr(response->mac)) {
rc = -EINVAL;
- QETH_DBF_TEXT(SETUP, 2, "badmac");
- QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN);
+ QETH_CARD_TEXT(card, 2, "badmac");
+ QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
} else {
ether_addr_copy(card->dev->dev_addr, response->mac);
}
@@ -4747,43 +4575,37 @@ EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
static void qeth_determine_capabilities(struct qeth_card *card)
{
int rc;
- int length;
- char *prcd;
struct ccw_device *ddev;
int ddev_offline = 0;
- QETH_DBF_TEXT(SETUP, 2, "detcapab");
+ QETH_CARD_TEXT(card, 2, "detcapab");
ddev = CARD_DDEV(card);
if (!ddev->online) {
ddev_offline = 1;
rc = ccw_device_set_online(ddev);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
goto out;
}
}
- rc = qeth_read_conf_data(card, (void **) &prcd, &length);
+ rc = qeth_read_conf_data(card);
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
CARD_DEVID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
goto out_offline;
}
- qeth_configure_unitaddr(card, prcd);
- if (ddev_offline)
- qeth_configure_blkt_default(card, prcd);
- kfree(prcd);
rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
- QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
- QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1);
- QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2);
- QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3);
- QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
+ QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
+ QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
+ QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
+ QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
+ QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
@@ -4831,7 +4653,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
int i, j, k;
int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "qdioest");
+ QETH_CARD_TEXT(card, 2, "qdioest");
qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
GFP_KERNEL);
@@ -4935,11 +4757,11 @@ out_free_nothing:
static void qeth_core_free_card(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 2, "freecrd");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "freecrd");
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
qeth_clean_channel(&card->data);
+ qeth_put_cmd(card->read_cmd);
destroy_workqueue(card->event_wq);
qeth_free_qdio_queues(card);
unregister_service_level(&card->qeth_service_level);
@@ -4988,7 +4810,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
int retries = 3;
int rc;
- QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
+ QETH_CARD_TEXT(card, 2, "hrdsetup");
atomic_set(&card->force_alloc_skb, 0);
rc = qeth_update_from_chp_desc(card);
if (rc)
@@ -5013,10 +4835,10 @@ retry:
goto retriable;
retriable:
if (rc == -ERESTARTSYS) {
- QETH_DBF_TEXT(SETUP, 2, "break1");
+ QETH_CARD_TEXT(card, 2, "break1");
return rc;
} else if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
if (--retries < 0)
goto out;
else
@@ -5028,10 +4850,10 @@ retriable:
rc = qeth_idx_activate_read_channel(card);
if (rc == -EINTR) {
- QETH_DBF_TEXT(SETUP, 2, "break2");
+ QETH_CARD_TEXT(card, 2, "break2");
return rc;
} else if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
if (--retries < 0)
goto out;
else
@@ -5040,10 +4862,10 @@ retriable:
rc = qeth_idx_activate_write_channel(card);
if (rc == -EINTR) {
- QETH_DBF_TEXT(SETUP, 2, "break3");
+ QETH_CARD_TEXT(card, 2, "break3");
return rc;
} else if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "4err%d", rc);
if (--retries < 0)
goto out;
else
@@ -5052,13 +4874,13 @@ retriable:
card->read_or_write_problem = 0;
rc = qeth_mpc_initialize(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
goto out;
}
rc = qeth_send_startlan(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
if (rc == -ENETDOWN) {
dev_warn(&card->gdev->dev, "The LAN is offline\n");
*carrier_ok = false;
@@ -5085,14 +4907,14 @@ retriable:
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "7err%d", rc);
goto out;
}
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "8err%d", rc);
goto out;
}
}
@@ -5352,42 +5174,47 @@ EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
- __u16 cmd_code, __u16 len,
+ u16 cmd_code,
+ unsigned int data_length,
enum qeth_prot_versions prot)
{
+ struct qeth_ipacmd_setassparms *setassparms;
+ struct qeth_ipacmd_setassparms_hdr *hdr;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 4, "getasscm");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
+ data_length +
+ offsetof(struct qeth_ipacmd_setassparms,
+ data));
+ if (!iob)
+ return NULL;
- if (iob) {
- cmd = __ipa_cmd(iob);
- cmd->data.setassparms.hdr.assist_no = ipa_func;
- cmd->data.setassparms.hdr.length = 8 + len;
- cmd->data.setassparms.hdr.command_code = cmd_code;
- }
+ setassparms = &__ipa_cmd(iob)->data.setassparms;
+ setassparms->assist_no = ipa_func;
+ hdr = &setassparms->hdr;
+ hdr->length = sizeof(*hdr) + data_length;
+ hdr->command_code = cmd_code;
return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
- u16 cmd_code, long data,
+ u16 cmd_code, u32 *data,
enum qeth_prot_versions prot)
{
- int length = 0;
+ unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
- if (data)
- length = sizeof(__u32);
iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
if (!iob)
return -ENOMEM;
- __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
+ if (data)
+ __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
@@ -5670,6 +5497,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
if (rc)
goto err_chp_desc;
qeth_determine_capabilities(card);
+ qeth_set_blkt_defaults(card);
+
enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) {
case QETH_DISCIPLINE_UNDETERMINED:
@@ -5707,7 +5536,7 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- QETH_DBF_TEXT(SETUP, 2, "removedv");
+ QETH_CARD_TEXT(card, 2, "removedv");
if (card->discipline) {
card->discipline->remove(gdev);
@@ -5759,28 +5588,30 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev)
qdio_free(CARD_DDEV(card));
}
-static int qeth_core_freeze(struct ccwgroup_device *gdev)
+static int qeth_suspend(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- if (card->discipline && card->discipline->freeze)
- return card->discipline->freeze(gdev);
- return 0;
-}
-static int qeth_core_thaw(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- if (card->discipline && card->discipline->thaw)
- return card->discipline->thaw(gdev);
+ qeth_set_allowed_threads(card, 0, 1);
+ wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+ if (gdev->state == CCWGROUP_OFFLINE)
+ return 0;
+
+ card->discipline->set_offline(gdev);
return 0;
}
-static int qeth_core_restore(struct ccwgroup_device *gdev)
+static int qeth_resume(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- if (card->discipline && card->discipline->restore)
- return card->discipline->restore(gdev);
- return 0;
+ int rc;
+
+ rc = card->discipline->set_online(gdev);
+
+ qeth_set_allowed_threads(card, 0xffffffff, 0);
+ if (rc)
+ dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n");
+ return rc;
}
static ssize_t group_store(struct device_driver *ddrv, const char *buf,
@@ -5821,9 +5652,9 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.shutdown = qeth_core_shutdown,
.prepare = NULL,
.complete = NULL,
- .freeze = qeth_core_freeze,
- .thaw = qeth_core_thaw,
- .restore = qeth_core_restore,
+ .freeze = qeth_suspend,
+ .thaw = qeth_resume,
+ .restore = qeth_resume,
};
struct qeth_card *qeth_get_card_by_busid(char *bus_id)
@@ -5902,8 +5733,8 @@ static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
enum qeth_prot_versions prot)
{
- return qeth_send_simple_setassparms_prot(card, cstype,
- IPA_CMD_ASS_STOP, 0, prot);
+ return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
+ NULL, prot);
}
static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
@@ -5934,7 +5765,8 @@ static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
return -EOPNOTSUPP;
}
- iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 4,
+ iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
+ SETASS_DATA_SIZEOF(flags_32bit),
prot);
if (!iob) {
qeth_set_csum_off(card, cstype, prot);
@@ -5991,7 +5823,7 @@ static int qeth_set_tso_off(struct qeth_card *card,
enum qeth_prot_versions prot)
{
return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
- IPA_CMD_ASS_STOP, 0, prot);
+ IPA_CMD_ASS_STOP, NULL, prot);
}
static int qeth_set_tso_on(struct qeth_card *card,
@@ -6017,7 +5849,8 @@ static int qeth_set_tso_on(struct qeth_card *card,
}
iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
- IPA_CMD_ASS_ENABLE, sizeof(caps), prot);
+ IPA_CMD_ASS_ENABLE,
+ SETASS_DATA_SIZEOF(caps), prot);
if (!iob) {
qeth_set_tso_off(card, prot);
return -ENOMEM;
@@ -6104,8 +5937,8 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
netdev_features_t changed = dev->features ^ features;
int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "setfeat");
- QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
+ QETH_CARD_TEXT(card, 2, "setfeat");
+ QETH_CARD_HEX(card, 2, &features, sizeof(features));
if ((changed & NETIF_F_IP_CSUM)) {
rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
@@ -6151,7 +5984,7 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(SETUP, 2, "fixfeat");
+ QETH_CARD_TEXT(card, 2, "fixfeat");
if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
features &= ~NETIF_F_IP_CSUM;
if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
@@ -6164,7 +5997,7 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO6;
- QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
+ QETH_CARD_HEX(card, 2, &features, sizeof(features));
return features;
}
EXPORT_SYMBOL_GPL(qeth_fix_features);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index f5237b7c14c4..75b5834ed28d 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -31,14 +31,12 @@ extern unsigned char IPA_PDU_HEADER[];
#define QETH_CLEAR_CHANNEL_PARM -10
#define QETH_HALT_CHANNEL_PARM -11
-#define QETH_RCD_PARM -12
static inline bool qeth_intparm_is_iob(unsigned long intparm)
{
switch (intparm) {
case QETH_CLEAR_CHANNEL_PARM:
case QETH_HALT_CHANNEL_PARM:
- case QETH_RCD_PARM:
case 0:
return false;
}
@@ -381,9 +379,7 @@ struct qeth_ipacmd_layer2setdelvlan {
__u16 vlan_id;
} __attribute__ ((packed));
-
struct qeth_ipacmd_setassparms_hdr {
- __u32 assist_no;
__u16 length;
__u16 command_code;
__u16 return_code;
@@ -428,6 +424,7 @@ struct qeth_tso_start_data {
/* SETASSPARMS IPA Command: */
struct qeth_ipacmd_setassparms {
+ u32 assist_no;
struct qeth_ipacmd_setassparms_hdr hdr;
union {
__u32 flags_32bit;
@@ -439,6 +436,8 @@ struct qeth_ipacmd_setassparms {
} data;
} __attribute__ ((packed));
+#define SETASS_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setassparms,\
+ data.field)
/* SETRTG IPA Command: ****************************************************/
struct qeth_set_routing {
@@ -526,8 +525,6 @@ struct qeth_query_switch_attributes {
#define QETH_SETADP_FLAGS_VIRTUAL_MAC 0x80 /* for CHANGE_ADDR_READ_MAC */
struct qeth_ipacmd_setadpparms_hdr {
- u32 supp_hw_cmds;
- u32 reserved1;
u16 cmdlength;
u16 reserved2;
u32 command_code;
@@ -539,6 +536,7 @@ struct qeth_ipacmd_setadpparms_hdr {
};
struct qeth_ipacmd_setadpparms {
+ struct qeth_ipa_caps hw_cmds;
struct qeth_ipacmd_setadpparms_hdr hdr;
union {
struct qeth_query_cmds_supp query_cmds_supp;
@@ -552,6 +550,9 @@ struct qeth_ipacmd_setadpparms {
} data;
} __attribute__ ((packed));
+#define SETADP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setadpparms,\
+ data.field)
+
/* CREATE_ADDR IPA Command: ***********************************************/
struct qeth_create_destroy_address {
__u8 unique_id[8];
@@ -598,6 +599,11 @@ struct qeth_ipacmd_diagass {
__u8 cdata[64];
} __attribute__ ((packed));
+#define DIAG_HDR_LEN offsetofend(struct qeth_ipacmd_diagass, ext)
+#define DIAG_SUB_HDR_LEN (offsetofend(struct qeth_ipacmd_diagass, ext) -\
+ offsetof(struct qeth_ipacmd_diagass, \
+ subcmd_len))
+
/* VNIC Characteristics IPA Command: *****************************************/
/* IPA commands/sub commands for VNICC */
#define IPA_VNICC_QUERY_CHARS 0x00000000L
@@ -624,12 +630,6 @@ struct qeth_ipacmd_diagass {
/* VNICC header */
struct qeth_ipacmd_vnicc_hdr {
- u32 sup;
- u32 cur;
-};
-
-/* VNICC sub command header */
-struct qeth_vnicc_sub_hdr {
u16 data_length;
u16 reserved;
u32 sub_command;
@@ -654,15 +654,18 @@ struct qeth_vnicc_getset_timeout {
/* complete VNICC IPA command message */
struct qeth_ipacmd_vnicc {
+ struct qeth_ipa_caps vnicc_cmds;
struct qeth_ipacmd_vnicc_hdr hdr;
- struct qeth_vnicc_sub_hdr sub_hdr;
union {
struct qeth_vnicc_query_cmds query_cmds;
struct qeth_vnicc_set_char set_char;
struct qeth_vnicc_getset_timeout getset_timeout;
- };
+ } data;
};
+#define VNICC_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_vnicc,\
+ data.field)
+
/* SETBRIDGEPORT IPA Command: *********************************************/
enum qeth_ipa_sbp_cmd {
IPA_SBP_QUERY_COMMANDS_SUPPORTED = 0x00000000L,
@@ -688,8 +691,6 @@ struct mac_addr_lnid {
} __packed;
struct qeth_ipacmd_sbp_hdr {
- __u32 supported_sbp_cmds;
- __u32 enabled_sbp_cmds;
__u16 cmdlength;
__u16 reserved1;
__u32 command_code;
@@ -704,16 +705,10 @@ struct qeth_sbp_query_cmds_supp {
__u32 reserved;
} __packed;
-struct qeth_sbp_reset_role {
-} __packed;
-
struct qeth_sbp_set_primary {
struct net_if_token token;
} __packed;
-struct qeth_sbp_set_secondary {
-} __packed;
-
struct qeth_sbp_port_entry {
__u8 role;
__u8 state;
@@ -739,17 +734,19 @@ struct qeth_sbp_state_change {
} __packed;
struct qeth_ipacmd_setbridgeport {
+ struct qeth_ipa_caps sbp_cmds;
struct qeth_ipacmd_sbp_hdr hdr;
union {
struct qeth_sbp_query_cmds_supp query_cmds_supp;
- struct qeth_sbp_reset_role reset_role;
struct qeth_sbp_set_primary set_primary;
- struct qeth_sbp_set_secondary set_secondary;
struct qeth_sbp_query_ports query_ports;
struct qeth_sbp_state_change state_change;
} data;
} __packed;
+#define SBP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setbridgeport,\
+ data.field)
+
/* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/
/* Bitmask for entry->change_code. Both bits may be raised. */
enum qeth_ipa_addr_change_code {
@@ -808,6 +805,8 @@ struct qeth_ipa_cmd {
} data;
} __attribute__ ((packed));
+#define IPA_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipa_cmd, data.field)
+
/*
* special command for ARP processing.
* this is not included in setassparms command before, because we get
@@ -825,10 +824,6 @@ enum qeth_ipa_arp_return_codes {
extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
-#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
- sizeof(struct qeth_ipacmd_setadpparms_hdr))
-#define QETH_SNMP_SETADP_CMDLENGTH 16
-
/* Helper functions */
#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
(cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index ff8a6cd790b1..fd64bc3f4062 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -85,7 +85,8 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "L2sdmac");
- iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
+ IPA_DATA_SIZEOF(setdelmac));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -163,8 +164,9 @@ static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, int cast_type, unsigned int data_len)
+ int ipv, unsigned int data_len)
{
+ int cast_type = qeth_get_ether_cast_type(skb);
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
hdr->hdr.l2.pkt_length = data_len;
@@ -240,7 +242,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
- iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
+ IPA_DATA_SIZEOF(setdelvlan));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -274,8 +277,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
static void qeth_l2_stop_card(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP , 2, "stopcard");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "stopcard");
qeth_set_allowed_threads(card, 0, 1);
@@ -292,10 +294,6 @@ static void qeth_l2_stop_card(struct qeth_card *card)
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
- if (card->state == CARD_STATE_DOWN) {
- qeth_clear_cmd_buffers(&card->read);
- qeth_clear_cmd_buffers(&card->write);
- }
flush_workqueue(card->event_wq);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
@@ -354,8 +352,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "l2reqmac");
- QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
+ QETH_CARD_TEXT(card, 2, "l2reqmac");
if (MACHINE_IS_VM) {
rc = qeth_vm_request_mac(card);
@@ -363,7 +360,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
goto out;
QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
CARD_DEVID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "err%04x", rc);
/* fall back to alternative mechanism: */
}
@@ -373,7 +370,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
goto out;
QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
CARD_DEVID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
/* fall back once more: */
}
@@ -383,7 +380,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
eth_hw_addr_random(card->dev);
out:
- QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, card->dev->addr_len);
+ QETH_CARD_HEX(card, 2, card->dev->dev_addr, card->dev->addr_len);
return 0;
}
@@ -467,7 +464,7 @@ static void qeth_promisc_to_bridge(struct qeth_card *card)
role = QETH_SBP_ROLE_NONE;
rc = qeth_bridgeport_setrole(card, role);
- QETH_DBF_TEXT_(SETUP, 2, "bpm%c%04x",
+ QETH_CARD_TEXT_(card, 2, "bpm%c%04x",
(promisc_mode == SET_PROMISC_MODE_ON) ? '+' : '-', rc);
if (!rc) {
card->options.sbp.role = role;
@@ -602,7 +599,6 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
rc = qeth_l2_xmit_osn(card, skb, queue);
else
rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
- qeth_get_ether_cast_type(skb),
qeth_l2_fill_header);
if (!rc) {
@@ -796,12 +792,11 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
- QETH_DBF_TEXT(SETUP, 2, "setonlin");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "setonlin");
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -832,7 +827,7 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
qeth_print_status_message(card);
/* softsetup */
- QETH_DBF_TEXT(SETUP, 2, "softsetp");
+ QETH_CARD_TEXT(card, 2, "softsetp");
if (IS_OSD(card) || IS_OSX(card)) {
rc = qeth_l2_start_ipassists(card);
@@ -842,7 +837,7 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
rc = qeth_init_qdio_queues(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -882,7 +877,6 @@ out_remove:
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
qdio_free(CARD_DDEV(card));
- card->state = CARD_STATE_DOWN;
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
@@ -897,8 +891,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
- QETH_DBF_TEXT(SETUP, 3, "setoffl");
- QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 3, "setoffl");
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -919,7 +912,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
if (!rc)
rc = (rc2) ? rc2 : rc3;
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
qdio_free(CARD_DDEV(card));
/* let user_space know that device is offline */
@@ -972,33 +965,6 @@ static void __exit qeth_l2_exit(void)
pr_info("unregister layer 2 discipline\n");
}
-static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-
- qeth_set_allowed_threads(card, 0, 1);
- wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (gdev->state == CCWGROUP_OFFLINE)
- return 0;
-
- qeth_l2_set_offline(gdev);
- return 0;
-}
-
-static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc;
-
- rc = qeth_l2_set_online(gdev);
-
- qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (rc)
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- return rc;
-}
-
/* Returns zero if the command is successfully "consumed" */
static int qeth_l2_control_event(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
@@ -1028,50 +994,16 @@ struct qeth_discipline qeth_l2_discipline = {
.remove = qeth_l2_remove_device,
.set_online = qeth_l2_set_online,
.set_offline = qeth_l2_set_offline,
- .freeze = qeth_l2_pm_suspend,
- .thaw = qeth_l2_pm_resume,
- .restore = qeth_l2_pm_resume,
.do_ioctl = NULL,
.control_event_handler = qeth_l2_control_event,
};
EXPORT_SYMBOL_GPL(qeth_l2_discipline);
-static int qeth_osn_send_control_data(struct qeth_card *card, int len,
- struct qeth_cmd_buffer *iob)
+static void qeth_osn_assist_cb(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
{
- struct qeth_channel *channel = iob->channel;
- int rc = 0;
-
- QETH_CARD_TEXT(card, 5, "osndctrd");
-
- wait_event(card->wait_q, qeth_trylock_channel(channel));
- iob->finalize(card, iob, len);
- QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
- QETH_CARD_TEXT(card, 6, "osnoirqp");
- spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, iob->timeout);
- spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
- if (rc) {
- QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
- "ccw_device_start rc = %i\n", rc);
- QETH_CARD_TEXT_(card, 2, " err%d", rc);
- qeth_release_buffer(channel, iob);
- atomic_set(&channel->irq_pending, 0);
- wake_up(&card->wait_q);
- }
- return rc;
-}
-
-static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob)
-{
- u16 length;
-
- QETH_CARD_TEXT(card, 4, "osndipa");
-
- memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2);
- return qeth_osn_send_control_data(card, length, iob);
+ qeth_notify_reply(iob->reply, 0);
+ qeth_put_cmd(iob);
}
int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
@@ -1079,6 +1011,8 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
struct qeth_cmd_buffer *iob;
struct qeth_card *card;
+ if (data_len < 0)
+ return -EINVAL;
if (!dev)
return -ENODEV;
card = dev->ml_priv;
@@ -1087,10 +1021,16 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
QETH_CARD_TEXT(card, 2, "osnsdmc");
if (!qeth_card_hw_is_reachable(card))
return -ENODEV;
- iob = qeth_wait_for_buffer(&card->write);
+
+ iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_len, 1,
+ QETH_IPA_TIMEOUT);
+ if (!iob)
+ return -ENOMEM;
+
qeth_prepare_ipa_cmd(card, iob, (u16) data_len);
memcpy(__ipa_cmd(iob), data, data_len);
- return qeth_osn_send_ipa_cmd(card, iob);
+ iob->callback = qeth_osn_assist_cb;
+ return qeth_send_ipa_cmd(card, iob, NULL, NULL);
}
EXPORT_SYMBOL(qeth_osn_assist);
@@ -1456,22 +1396,25 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
enum qeth_ipa_sbp_cmd sbp_cmd,
- unsigned int cmd_length)
+ unsigned int data_length)
{
enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD :
IPA_CMD_SETBRIDGEPORT_OSA;
+ struct qeth_ipacmd_sbp_hdr *hdr;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
- iob = qeth_get_ipacmd_buffer(card, ipa_cmd, 0);
+ iob = qeth_ipa_alloc_cmd(card, ipa_cmd, QETH_PROT_NONE,
+ data_length +
+ offsetof(struct qeth_ipacmd_setbridgeport,
+ data));
if (!iob)
return iob;
- cmd = __ipa_cmd(iob);
- cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) +
- cmd_length;
- cmd->data.sbp.hdr.command_code = sbp_cmd;
- cmd->data.sbp.hdr.used_total = 1;
- cmd->data.sbp.hdr.seq_no = 1;
+
+ hdr = &__ipa_cmd(iob)->data.sbp.hdr;
+ hdr->cmdlength = sizeof(*hdr) + data_length;
+ hdr->command_code = sbp_cmd;
+ hdr->used_total = 1;
+ hdr->seq_no = 1;
return iob;
}
@@ -1506,7 +1449,7 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "brqsuppo");
iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED,
- sizeof(struct qeth_sbp_query_cmds_supp));
+ SBP_DATA_SIZEOF(query_cmds_supp));
if (!iob)
return;
@@ -1598,23 +1541,21 @@ static int qeth_bridgeport_set_cb(struct qeth_card *card,
*/
int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
{
- int cmdlength;
struct qeth_cmd_buffer *iob;
enum qeth_ipa_sbp_cmd setcmd;
+ unsigned int cmdlength = 0;
QETH_CARD_TEXT(card, 2, "brsetrol");
switch (role) {
case QETH_SBP_ROLE_NONE:
setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
- cmdlength = sizeof(struct qeth_sbp_reset_role);
break;
case QETH_SBP_ROLE_PRIMARY:
setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
- cmdlength = sizeof(struct qeth_sbp_set_primary);
+ cmdlength = SBP_DATA_SIZEOF(set_primary);
break;
case QETH_SBP_ROLE_SECONDARY:
setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
- cmdlength = sizeof(struct qeth_sbp_set_secondary);
break;
default:
return -EINVAL;
@@ -1764,10 +1705,6 @@ static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
struct _qeth_l2_vnicc_request_cbctl {
u32 sub_cmd;
struct {
- u32 vnic_char;
- u32 timeout;
- } param;
- struct {
union{
u32 *sup_cmds;
u32 *timeout;
@@ -1789,80 +1726,52 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
if (cmd->hdr.return_code)
return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code);
/* return results to caller */
- card->options.vnicc.sup_chars = rep->hdr.sup;
- card->options.vnicc.cur_chars = rep->hdr.cur;
+ card->options.vnicc.sup_chars = rep->vnicc_cmds.supported;
+ card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
if (cbctl->sub_cmd == IPA_VNICC_QUERY_CMDS)
- *cbctl->result.sup_cmds = rep->query_cmds.sup_cmds;
+ *cbctl->result.sup_cmds = rep->data.query_cmds.sup_cmds;
if (cbctl->sub_cmd == IPA_VNICC_GET_TIMEOUT)
- *cbctl->result.timeout = rep->getset_timeout.timeout;
+ *cbctl->result.timeout = rep->data.getset_timeout.timeout;
return 0;
}
-/* generic VNICC request */
-static int qeth_l2_vnicc_request(struct qeth_card *card,
- struct _qeth_l2_vnicc_request_cbctl *cbctl)
+static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card,
+ u32 vnicc_cmd,
+ unsigned int data_length)
{
- struct qeth_ipacmd_vnicc *req;
+ struct qeth_ipacmd_vnicc_hdr *hdr;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 2, "vniccreq");
- /* get new buffer for request */
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_VNICC, 0);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_VNICC, QETH_PROT_NONE,
+ data_length +
+ offsetof(struct qeth_ipacmd_vnicc, data));
if (!iob)
- return -ENOMEM;
-
- /* create header for request */
- cmd = __ipa_cmd(iob);
- req = &cmd->data.vnicc;
-
- /* create sub command header for request */
- req->sub_hdr.data_length = sizeof(req->sub_hdr);
- req->sub_hdr.sub_command = cbctl->sub_cmd;
-
- /* create sub command specific request fields */
- switch (cbctl->sub_cmd) {
- case IPA_VNICC_QUERY_CHARS:
- break;
- case IPA_VNICC_QUERY_CMDS:
- req->sub_hdr.data_length += sizeof(req->query_cmds);
- req->query_cmds.vnic_char = cbctl->param.vnic_char;
- break;
- case IPA_VNICC_ENABLE:
- case IPA_VNICC_DISABLE:
- req->sub_hdr.data_length += sizeof(req->set_char);
- req->set_char.vnic_char = cbctl->param.vnic_char;
- break;
- case IPA_VNICC_SET_TIMEOUT:
- req->getset_timeout.timeout = cbctl->param.timeout;
- /* fallthrough */
- case IPA_VNICC_GET_TIMEOUT:
- req->sub_hdr.data_length += sizeof(req->getset_timeout);
- req->getset_timeout.vnic_char = cbctl->param.vnic_char;
- break;
- default:
- qeth_release_buffer(iob->channel, iob);
- return -EOPNOTSUPP;
- }
+ return NULL;
- /* send request */
- return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, cbctl);
+ hdr = &__ipa_cmd(iob)->data.vnicc.hdr;
+ hdr->data_length = sizeof(*hdr) + data_length;
+ hdr->sub_command = vnicc_cmd;
+ return iob;
}
/* VNICC query VNIC characteristics request */
static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
{
struct _qeth_l2_vnicc_request_cbctl cbctl;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "vniccqch");
+ iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CHARS, 0);
+ if (!iob)
+ return -ENOMEM;
/* prepare callback control */
cbctl.sub_cmd = IPA_VNICC_QUERY_CHARS;
- QETH_CARD_TEXT(card, 2, "vniccqch");
- return qeth_l2_vnicc_request(card, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
}
/* VNICC query sub commands request */
@@ -1870,14 +1779,21 @@ static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
u32 *sup_cmds)
{
struct _qeth_l2_vnicc_request_cbctl cbctl;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "vniccqcm");
+ iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CMDS,
+ VNICC_DATA_SIZEOF(query_cmds));
+ if (!iob)
+ return -ENOMEM;
+
+ __ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
/* prepare callback control */
cbctl.sub_cmd = IPA_VNICC_QUERY_CMDS;
- cbctl.param.vnic_char = vnic_char;
cbctl.result.sup_cmds = sup_cmds;
- QETH_CARD_TEXT(card, 2, "vniccqcm");
- return qeth_l2_vnicc_request(card, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
}
/* VNICC enable/disable characteristic request */
@@ -1885,31 +1801,47 @@ static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char,
u32 cmd)
{
struct _qeth_l2_vnicc_request_cbctl cbctl;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "vniccedc");
+ iob = qeth_l2_vnicc_build_cmd(card, cmd, VNICC_DATA_SIZEOF(set_char));
+ if (!iob)
+ return -ENOMEM;
+
+ __ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char;
/* prepare callback control */
cbctl.sub_cmd = cmd;
- cbctl.param.vnic_char = vnic_char;
- QETH_CARD_TEXT(card, 2, "vniccedc");
- return qeth_l2_vnicc_request(card, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
}
/* VNICC get/set timeout for characteristic request */
static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
u32 cmd, u32 *timeout)
{
+ struct qeth_vnicc_getset_timeout *getset_timeout;
struct _qeth_l2_vnicc_request_cbctl cbctl;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "vniccgst");
+ iob = qeth_l2_vnicc_build_cmd(card, cmd,
+ VNICC_DATA_SIZEOF(getset_timeout));
+ if (!iob)
+ return -ENOMEM;
+
+ getset_timeout = &__ipa_cmd(iob)->data.vnicc.data.getset_timeout;
+ getset_timeout->vnic_char = vnicc;
+
+ if (cmd == IPA_VNICC_SET_TIMEOUT)
+ getset_timeout->timeout = *timeout;
/* prepare callback control */
cbctl.sub_cmd = cmd;
- cbctl.param.vnic_char = vnicc;
- if (cmd == IPA_VNICC_SET_TIMEOUT)
- cbctl.param.timeout = *timeout;
if (cmd == IPA_VNICC_GET_TIMEOUT)
cbctl.result.timeout = timeout;
- QETH_CARD_TEXT(card, 2, "vniccgst");
- return qeth_l2_vnicc_request(card, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
}
/* set current VNICC flag state; called from sysfs store function */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 13bf3e2e9cea..2dd99f103671 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -32,7 +32,6 @@
#include <net/route.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
-#include <net/ip6_fib.h>
#include <net/iucv/af_iucv.h>
#include <linux/hashtable.h>
@@ -377,7 +376,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setdelmc");
- iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto,
+ IPA_DATA_SIZEOF(setdelipm));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -429,7 +429,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setdelip");
- iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto,
+ IPA_DATA_SIZEOF(setdelip6));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -461,7 +462,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 4, "setroutg");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETRTG, prot,
+ IPA_DATA_SIZEOF(setrtg));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -742,7 +744,7 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(SETUP, 2, "setadprm");
+ QETH_CARD_TEXT(card, 2, "setadprm");
if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
rc = qeth_setadpparms_change_macaddr(card);
@@ -767,7 +769,7 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
return 0;
}
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Starting ARP processing support for %s failed\n",
@@ -790,7 +792,7 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
}
rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc)
dev_warn(&card->gdev->dev,
"Starting source MAC-address support for %s failed\n",
@@ -811,7 +813,7 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
}
rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Starting VLAN support for %s failed\n",
@@ -836,7 +838,7 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
}
rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Starting multicast support for %s failed\n",
@@ -850,6 +852,7 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
{
+ u32 ipv6_data = 3;
int rc;
QETH_CARD_TEXT(card, 3, "softipv6");
@@ -857,16 +860,16 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
if (IS_IQD(card))
goto out;
- rc = qeth_send_simple_setassparms(card, IPA_IPV6,
- IPA_CMD_ASS_START, 3);
+ rc = qeth_send_simple_setassparms(card, IPA_IPV6, IPA_CMD_ASS_START,
+ &ipv6_data);
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
QETH_CARD_IFNAME(card));
return rc;
}
- rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6,
- IPA_CMD_ASS_START, 0);
+ rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, IPA_CMD_ASS_START,
+ NULL);
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
@@ -874,7 +877,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
return rc;
}
rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
"Enabling the passthrough mode for %s failed\n",
@@ -900,6 +903,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
{
+ u32 filter_data = 1;
int rc;
QETH_CARD_TEXT(card, 3, "stbrdcst");
@@ -912,7 +916,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
goto out;
}
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
- IPA_CMD_ASS_START, 0);
+ IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
"%s failed\n", QETH_CARD_IFNAME(card));
@@ -920,7 +924,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
}
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
- IPA_CMD_ASS_CONFIGURE, 1);
+ IPA_CMD_ASS_CONFIGURE, &filter_data);
if (rc) {
dev_warn(&card->gdev->dev,
"Setting up broadcast filtering for %s failed\n",
@@ -930,7 +934,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
dev_info(&card->gdev->dev, "Broadcast enabled\n");
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
- IPA_CMD_ASS_ENABLE, 1);
+ IPA_CMD_ASS_ENABLE, &filter_data);
if (rc) {
dev_warn(&card->gdev->dev, "Setting up broadcast echo "
"filtering for %s failed\n", QETH_CARD_IFNAME(card));
@@ -979,10 +983,10 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "hsrmac");
+ QETH_CARD_TEXT(card, 2, "hsrmac");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
- QETH_PROT_IPV6);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
+ IPA_DATA_SIZEOF(create_destroy_addr));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -1017,7 +1021,7 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "guniqeid");
+ QETH_CARD_TEXT(card, 2, "guniqeid");
if (!qeth_is_supported(card, IPA_IPV6)) {
card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
@@ -1025,8 +1029,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
return 0;
}
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
- QETH_PROT_IPV6);
+ iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
+ IPA_DATA_SIZEOF(create_destroy_addr));
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -1044,7 +1048,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
struct qeth_ipa_cmd *cmd;
__u16 rc;
- QETH_DBF_TEXT(SETUP, 2, "diastrcb");
+ QETH_CARD_TEXT(card, 2, "diastrcb");
cmd = (struct qeth_ipa_cmd *)data;
rc = cmd->hdr.return_code;
@@ -1100,14 +1104,12 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(SETUP, 2, "diagtrac");
+ QETH_CARD_TEXT(card, 2, "diagtrac");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRACE, 0);
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
- cmd->data.diagass.subcmd_len = 16;
- cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
cmd->data.diagass.action = diags_cmd;
return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
@@ -1309,6 +1311,15 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
{
+ struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
+ struct net_device *dev = skb->dev;
+
+ if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
+ dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
+ "FAKELL", skb->len);
+ return;
+ }
+
if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
ETH_P_IP;
@@ -1342,8 +1353,6 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
tg_addr, "FAKELL", skb->len);
}
- skb->protocol = eth_type_trans(skb, card->dev);
-
/* copy VLAN tag from hdr into skb */
if (!card->options.sniffer &&
(hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
@@ -1360,12 +1369,10 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
int budget, int *done)
{
- struct net_device *dev = card->dev;
int work_done = 0;
struct sk_buff *skb;
struct qeth_hdr *hdr;
unsigned int len;
- __u16 magic;
*done = 0;
WARN_ON_ONCE(!budget);
@@ -1379,23 +1386,12 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
}
switch (hdr->hdr.l3.id) {
case QETH_HEADER_TYPE_LAYER3:
- magic = *(__u16 *)skb->data;
- if (IS_IQD(card) && magic == ETH_P_AF_IUCV) {
- len = skb->len;
- dev_hard_header(skb, dev, ETH_P_AF_IUCV,
- dev->dev_addr, "FAKELL", len);
- skb->protocol = eth_type_trans(skb, dev);
- netif_receive_skb(skb);
- } else {
- qeth_l3_rebuild_skb(card, skb, hdr);
- len = skb->len;
- napi_gro_receive(&card->napi, skb);
- }
- break;
+ qeth_l3_rebuild_skb(card, skb, hdr);
+ /* fall through */
case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
skb->protocol = eth_type_trans(skb, skb->dev);
len = skb->len;
- netif_receive_skb(skb);
+ napi_gro_receive(&card->napi, skb);
break;
default:
dev_kfree_skb_any(skb);
@@ -1413,8 +1409,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
static void qeth_l3_stop_card(struct qeth_card *card)
{
- QETH_DBF_TEXT(SETUP, 2, "stopcard");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "stopcard");
qeth_set_allowed_threads(card, 0, 1);
@@ -1436,10 +1431,6 @@ static void qeth_l3_stop_card(struct qeth_card *card)
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
- if (card->state == CARD_STATE_DOWN) {
- qeth_clear_cmd_buffers(&card->read);
- qeth_clear_cmd_buffers(&card->write);
- }
flush_workqueue(card->event_wq);
}
@@ -1563,7 +1554,8 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
}
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 4,
+ IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
+ SETASS_DATA_SIZEOF(flags_32bit),
QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
@@ -1709,9 +1701,7 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_QUERY_INFO,
- sizeof(struct qeth_arp_query_data)
- - sizeof(char),
- prot);
+ SETASS_DATA_SIZEOF(query_arp), prot);
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
@@ -1795,7 +1785,8 @@ static int qeth_l3_arp_modify_entry(struct qeth_card *card,
}
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
- sizeof(*cmd_entry), QETH_PROT_IPV4);
+ SETASS_DATA_SIZEOF(arp_entry),
+ QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
@@ -1886,26 +1877,17 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return rc;
}
-static int qeth_l3_get_cast_type(struct sk_buff *skb)
+static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
+ int ipv)
{
- int ipv = qeth_get_ip_version(skb);
struct neighbour *n = NULL;
- struct dst_entry *dst;
-
- rcu_read_lock();
- dst = skb_dst(skb);
- if (dst) {
- struct rt6_info *rt = (struct rt6_info *) dst;
- dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
- if (dst)
- n = dst_neigh_lookup_skb(dst, skb);
- }
+ if (dst)
+ n = dst_neigh_lookup_skb(dst, skb);
if (n) {
int cast_type = n->type;
- rcu_read_unlock();
neigh_release(n);
if ((cast_type == RTN_BROADCAST) ||
(cast_type == RTN_MULTICAST) ||
@@ -1913,7 +1895,6 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
return cast_type;
return RTN_UNICAST;
}
- rcu_read_unlock();
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
switch (ipv) {
@@ -1931,6 +1912,20 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
}
}
+static int qeth_l3_get_cast_type(struct sk_buff *skb)
+{
+ int ipv = qeth_get_ip_version(skb);
+ struct dst_entry *dst;
+ int cast_type;
+
+ rcu_read_lock();
+ dst = qeth_dst_check_rcu(skb, ipv);
+ cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
+ rcu_read_unlock();
+
+ return cast_type;
+}
+
static u8 qeth_l3_cast_type_to_flag(int cast_type)
{
if (cast_type == RTN_MULTICAST)
@@ -1944,12 +1939,13 @@ static u8 qeth_l3_cast_type_to_flag(int cast_type)
static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
- int ipv, int cast_type, unsigned int data_len)
+ int ipv, unsigned int data_len)
{
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
struct qeth_card *card = queue->card;
struct dst_entry *dst;
+ int cast_type;
hdr->hdr.l3.length = data_len;
@@ -1986,36 +1982,23 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI);
}
- l3_hdr->flags = qeth_l3_cast_type_to_flag(cast_type);
-
- /* OSA only: */
- if (!ipv) {
- l3_hdr->flags |= QETH_HDR_PASSTHRU;
- return;
- }
-
rcu_read_lock();
- dst = skb_dst(skb);
+ dst = qeth_dst_check_rcu(skb, ipv);
- if (ipv == 4) {
- struct rtable *rt;
+ if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ)
+ cast_type = RTN_UNICAST;
+ else
+ cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv);
+ l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
- if (dst)
- dst = dst_check(dst, 0);
- rt = (struct rtable *) dst;
+ if (ipv == 4) {
+ struct rtable *rt = (struct rtable *) dst;
*((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
rt_nexthop(rt, ip_hdr(skb)->daddr) :
ip_hdr(skb)->daddr;
- } else {
- /* IPv6 */
- struct rt6_info *rt;
-
- if (dst) {
- rt = (struct rt6_info *) dst;
- dst = dst_check(dst, rt6_get_cookie(rt));
- }
- rt = (struct rt6_info *) dst;
+ } else if (ipv == 6) {
+ struct rt6_info *rt = (struct rt6_info *) dst;
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
@@ -2025,6 +2008,9 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
if (!IS_IQD(card))
hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
+ } else {
+ /* OSA only: */
+ l3_hdr->flags |= QETH_HDR_PASSTHRU;
}
rcu_read_unlock();
}
@@ -2044,7 +2030,7 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
}
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv, int cast_type)
+ struct qeth_qdio_out_q *queue, int ipv)
{
unsigned int hw_hdr_len;
int rc;
@@ -2058,7 +2044,7 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
skb_pull(skb, ETH_HLEN);
qeth_l3_fixup_headers(skb);
- return qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
+ return qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
}
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
@@ -2069,7 +2055,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
- int cast_type, rc;
+ int rc;
if (IS_IQD(card)) {
queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
@@ -2080,24 +2066,18 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
(card->options.cq == QETH_CQ_ENABLED &&
skb->protocol != htons(ETH_P_AF_IUCV)))
goto tx_drop;
-
- if (txq == QETH_IQD_MCAST_TXQ)
- cast_type = qeth_l3_get_cast_type(skb);
- else
- cast_type = RTN_UNICAST;
} else {
queue = card->qdio.out_qs[txq];
- cast_type = qeth_l3_get_cast_type(skb);
}
- if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
+ if (!(dev->flags & IFF_BROADCAST) &&
+ qeth_l3_get_cast_type(skb) == RTN_BROADCAST)
goto tx_drop;
if (ipv == 4 || IS_IQD(card))
- rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
+ rc = qeth_l3_xmit(card, skb, queue, ipv);
else
- rc = qeth_xmit(card, skb, queue, ipv, cast_type,
- qeth_l3_fill_header);
+ rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets);
@@ -2337,12 +2317,11 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
- QETH_DBF_TEXT(SETUP, 2, "setonlin");
- QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "setonlin");
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -2358,28 +2337,28 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
qeth_print_status_message(card);
/* softsetup */
- QETH_DBF_TEXT(SETUP, 2, "softsetp");
+ QETH_CARD_TEXT(card, 2, "softsetp");
rc = qeth_l3_setadapter_parms(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
if (!card->options.sniffer) {
rc = qeth_l3_start_ipassists(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
goto out_remove;
}
rc = qeth_l3_setrouting_v4(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "4err%04x", rc);
rc = qeth_l3_setrouting_v6(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
+ QETH_CARD_TEXT_(card, 2, "5err%04x", rc);
}
rc = qeth_init_qdio_queues(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -2420,7 +2399,6 @@ out_remove:
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
qdio_free(CARD_DDEV(card));
- card->state = CARD_STATE_DOWN;
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
@@ -2435,8 +2413,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
- QETH_DBF_TEXT(SETUP, 3, "setoffl");
- QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 3, "setoffl");
if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -2462,7 +2439,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
if (!rc)
rc = (rc2) ? rc2 : rc3;
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 2, "1err%d", rc);
qdio_free(CARD_DDEV(card));
/* let user_space know that device is offline */
@@ -2505,33 +2482,6 @@ static int qeth_l3_recover(void *ptr)
return 0;
}
-static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-
- qeth_set_allowed_threads(card, 0, 1);
- wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (gdev->state == CCWGROUP_OFFLINE)
- return 0;
-
- qeth_l3_set_offline(gdev);
- return 0;
-}
-
-static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc;
-
- rc = qeth_l3_set_online(gdev);
-
- qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (rc)
- dev_warn(&card->gdev->dev, "The qeth device driver "
- "failed to recover an error on the device\n");
- return rc;
-}
-
/* Returns zero if the command is successfully "consumed" */
static int qeth_l3_control_event(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
@@ -2547,9 +2497,6 @@ struct qeth_discipline qeth_l3_discipline = {
.remove = qeth_l3_remove_device,
.set_online = qeth_l3_set_online,
.set_offline = qeth_l3_set_offline,
- .freeze = qeth_l3_pm_suspend,
- .thaw = qeth_l3_pm_resume,
- .restore = qeth_l3_pm_resume,
.do_ioctl = qeth_l3_do_ioctl,
.control_event_handler = qeth_l3_control_event,
};
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 33eddb02ee30..b018b61bd168 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -620,7 +620,7 @@ static void zfcp_fc_sg_free_table(struct scatterlist *sg, int count)
{
int i;
- for (i = 0; i < count; i++, sg++)
+ for (i = 0; i < count; i++, sg = sg_next(sg))
if (sg)
free_page((unsigned long) sg_virt(sg));
else
@@ -641,7 +641,7 @@ static int zfcp_fc_sg_setup_table(struct scatterlist *sg, int count)
int i;
sg_init_table(sg, count);
- for (i = 0; i < count; i++, sg++) {
+ for (i = 0; i < count; i++, sg = sg_next(sg)) {
addr = (void *) get_zeroed_page(GFP_KERNEL);
if (!addr) {
zfcp_fc_sg_free_table(sg, i);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 6a3076881321..1a55e5942d36 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -46,9 +46,15 @@ struct vq_config_block {
#define VIRTIO_CCW_CONFIG_SIZE 0x100
/* same as PCI config space size, should be enough for all drivers */
+struct vcdev_dma_area {
+ unsigned long indicators;
+ unsigned long indicators2;
+ struct vq_config_block config_block;
+ __u8 status;
+};
+
struct virtio_ccw_device {
struct virtio_device vdev;
- __u8 *status;
__u8 config[VIRTIO_CCW_CONFIG_SIZE];
struct ccw_device *cdev;
__u32 curr_io;
@@ -58,17 +64,24 @@ struct virtio_ccw_device {
spinlock_t lock;
struct mutex io_lock; /* Serializes I/O requests */
struct list_head virtqueues;
- unsigned long indicators;
- unsigned long indicators2;
- struct vq_config_block *config_block;
bool is_thinint;
bool going_away;
bool device_lost;
unsigned int config_ready;
void *airq_info;
- u64 dma_mask;
+ struct vcdev_dma_area *dma_area;
};
+static inline unsigned long *indicators(struct virtio_ccw_device *vcdev)
+{
+ return &vcdev->dma_area->indicators;
+}
+
+static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev)
+{
+ return &vcdev->dma_area->indicators2;
+}
+
struct vq_info_block_legacy {
__u64 queue;
__u32 align;
@@ -127,11 +140,17 @@ static int virtio_ccw_use_airq = 1;
struct airq_info {
rwlock_t lock;
- u8 summary_indicator;
+ u8 summary_indicator_idx;
struct airq_struct airq;
struct airq_iv *aiv;
};
static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+static u8 *summary_indicators;
+
+static inline u8 *get_summary_indicator(struct airq_info *info)
+{
+ return summary_indicators + info->summary_indicator_idx;
+}
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
@@ -196,7 +215,7 @@ static void virtio_airq_handler(struct airq_struct *airq, bool floating)
break;
vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
}
- info->summary_indicator = 0;
+ *(get_summary_indicator(info)) = 0;
smp_wmb();
/* Walk through indicators field, summary indicator not active. */
for (ai = 0;;) {
@@ -208,7 +227,7 @@ static void virtio_airq_handler(struct airq_struct *airq, bool floating)
read_unlock(&info->lock);
}
-static struct airq_info *new_airq_info(void)
+static struct airq_info *new_airq_info(int index)
{
struct airq_info *info;
int rc;
@@ -217,13 +236,15 @@ static struct airq_info *new_airq_info(void)
if (!info)
return NULL;
rwlock_init(&info->lock);
- info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
+ info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR
+ | AIRQ_IV_CACHELINE);
if (!info->aiv) {
kfree(info);
return NULL;
}
info->airq.handler = virtio_airq_handler;
- info->airq.lsi_ptr = &info->summary_indicator;
+ info->summary_indicator_idx = index;
+ info->airq.lsi_ptr = get_summary_indicator(info);
info->airq.lsi_mask = 0xff;
info->airq.isc = VIRTIO_AIRQ_ISC;
rc = register_adapter_interrupt(&info->airq);
@@ -245,7 +266,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
if (!airq_areas[i])
- airq_areas[i] = new_airq_info();
+ airq_areas[i] = new_airq_info(i);
info = airq_areas[i];
if (!info)
return 0;
@@ -326,29 +347,29 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
struct airq_info *airq_info = vcdev->airq_info;
if (vcdev->is_thinint) {
- thinint_area = kzalloc(sizeof(*thinint_area),
- GFP_DMA | GFP_KERNEL);
+ thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*thinint_area));
if (!thinint_area)
return;
thinint_area->summary_indicator =
- (unsigned long) &airq_info->summary_indicator;
+ (unsigned long) get_summary_indicator(airq_info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->count = sizeof(*thinint_area);
ccw->cda = (__u32)(unsigned long) thinint_area;
} else {
/* payload is the address of the indicators */
- indicatorp = kmalloc(sizeof(&vcdev->indicators),
- GFP_DMA | GFP_KERNEL);
+ indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(indicators(vcdev)));
if (!indicatorp)
return;
*indicatorp = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
- ccw->count = sizeof(&vcdev->indicators);
+ ccw->count = sizeof(indicators(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
}
/* Deregister indicators from host. */
- vcdev->indicators = 0;
+ *indicators(vcdev) = 0;
ccw->flags = 0;
ret = ccw_io_helper(vcdev, ccw,
vcdev->is_thinint ?
@@ -359,8 +380,8 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
"Failed to deregister indicators (%d)\n", ret);
else if (vcdev->is_thinint)
virtio_ccw_drop_indicators(vcdev);
- kfree(indicatorp);
- kfree(thinint_area);
+ ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
}
static inline long __do_kvm_notify(struct subchannel_id schid,
@@ -407,15 +428,15 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
{
int ret;
- vcdev->config_block->index = index;
+ vcdev->dma_area->config_block.index = index;
ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
ccw->flags = 0;
ccw->count = sizeof(struct vq_config_block);
- ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
+ ccw->cda = (__u32)(unsigned long)(&vcdev->dma_area->config_block);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
- return vcdev->config_block->num ?: -ENOENT;
+ return vcdev->dma_area->config_block.num ?: -ENOENT;
}
static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
@@ -460,7 +481,8 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
ret, index);
vring_del_virtqueue(vq);
- kfree(info->info_block);
+ ccw_device_dma_free(vcdev->cdev, info->info_block,
+ sizeof(*info->info_block));
kfree(info);
}
@@ -470,7 +492,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
struct ccw1 *ccw;
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
@@ -479,7 +501,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
list_for_each_entry_safe(vq, n, &vdev->vqs, list)
virtio_ccw_del_vq(vq, ccw);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
@@ -502,8 +524,8 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = -ENOMEM;
goto out_err;
}
- info->info_block = kzalloc(sizeof(*info->info_block),
- GFP_DMA | GFP_KERNEL);
+ info->info_block = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*info->info_block));
if (!info->info_block) {
dev_warn(&vcdev->cdev->dev, "no info block\n");
err = -ENOMEM;
@@ -567,7 +589,8 @@ out_err:
if (vq)
vring_del_virtqueue(vq);
if (info) {
- kfree(info->info_block);
+ ccw_device_dma_free(vcdev->cdev, info->info_block,
+ sizeof(*info->info_block));
}
kfree(info);
return ERR_PTR(err);
@@ -581,7 +604,8 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
struct virtio_thinint_area *thinint_area = NULL;
struct airq_info *info;
- thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
+ thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*thinint_area));
if (!thinint_area) {
ret = -ENOMEM;
goto out;
@@ -596,7 +620,7 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
}
info = vcdev->airq_info;
thinint_area->summary_indicator =
- (unsigned long) &info->summary_indicator;
+ (unsigned long) get_summary_indicator(info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->flags = CCW_FLAG_SLI;
@@ -617,7 +641,7 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
virtio_ccw_drop_indicators(vcdev);
}
out:
- kfree(thinint_area);
+ ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
return ret;
}
@@ -633,7 +657,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
@@ -657,10 +681,11 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* We need a data area under 2G to communicate. Our payload is
* the address of the indicators.
*/
- indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
+ indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(indicators(vcdev)));
if (!indicatorp)
goto out;
- *indicatorp = (unsigned long) &vcdev->indicators;
+ *indicatorp = (unsigned long) indicators(vcdev);
if (vcdev->is_thinint) {
ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
if (ret)
@@ -669,32 +694,36 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
}
if (!vcdev->is_thinint) {
/* Register queue indicators with host. */
- vcdev->indicators = 0;
+ *indicators(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
- ccw->count = sizeof(&vcdev->indicators);
+ ccw->count = sizeof(indicators(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
goto out;
}
/* Register indicators2 with host for config changes */
- *indicatorp = (unsigned long) &vcdev->indicators2;
- vcdev->indicators2 = 0;
+ *indicatorp = (unsigned long) indicators2(vcdev);
+ *indicators2(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_CONF_IND;
ccw->flags = 0;
- ccw->count = sizeof(&vcdev->indicators2);
+ ccw->count = sizeof(indicators2(vcdev));
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
if (ret)
goto out;
- kfree(indicatorp);
- kfree(ccw);
+ if (indicatorp)
+ ccw_device_dma_free(vcdev->cdev, indicatorp,
+ sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return 0;
out:
- kfree(indicatorp);
- kfree(ccw);
+ if (indicatorp)
+ ccw_device_dma_free(vcdev->cdev, indicatorp,
+ sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
virtio_ccw_del_vqs(vdev);
return ret;
}
@@ -704,12 +733,12 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
/* Zero status bits. */
- *vcdev->status = 0;
+ vcdev->dma_area->status = 0;
/* Send a reset ccw on device. */
ccw->cmd_code = CCW_CMD_VDEV_RESET;
@@ -717,7 +746,7 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
ccw->count = 0;
ccw->cda = 0;
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static u64 virtio_ccw_get_features(struct virtio_device *vdev)
@@ -728,11 +757,11 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
u64 rc;
struct ccw1 *ccw;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return 0;
- features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
if (!features) {
rc = 0;
goto out_free;
@@ -765,8 +794,8 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
rc |= (u64)le32_to_cpu(features->features) << 32;
out_free:
- kfree(features);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return rc;
}
@@ -791,11 +820,11 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
- features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
if (!features) {
ret = -ENOMEM;
goto out_free;
@@ -830,8 +859,8 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
out_free:
- kfree(features);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return ret;
}
@@ -845,11 +874,12 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
- config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ config_area = ccw_device_dma_zalloc(vcdev->cdev,
+ VIRTIO_CCW_CONFIG_SIZE);
if (!config_area)
goto out_free;
@@ -871,8 +901,8 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
memcpy(buf, config_area + offset, len);
out_free:
- kfree(config_area);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static void virtio_ccw_set_config(struct virtio_device *vdev,
@@ -884,11 +914,12 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
- config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
+ config_area = ccw_device_dma_zalloc(vcdev->cdev,
+ VIRTIO_CCW_CONFIG_SIZE);
if (!config_area)
goto out_free;
@@ -907,61 +938,61 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
out_free:
- kfree(config_area);
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static u8 virtio_ccw_get_status(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- u8 old_status = *vcdev->status;
+ u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw;
if (vcdev->revision < 1)
- return *vcdev->status;
+ return vcdev->dma_area->status;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return old_status;
ccw->cmd_code = CCW_CMD_READ_STATUS;
ccw->flags = 0;
- ccw->count = sizeof(*vcdev->status);
- ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw->count = sizeof(vcdev->dma_area->status);
+ ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
/*
* If the channel program failed (should only happen if the device
* was hotunplugged, and then we clean up via the machine check
- * handler anyway), vcdev->status was not overwritten and we just
+ * handler anyway), vcdev->dma_area->status was not overwritten and we just
* return the old status, which is fine.
*/
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
- return *vcdev->status;
+ return vcdev->dma_area->status;
}
static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- u8 old_status = *vcdev->status;
+ u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw;
int ret;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return;
/* Write the status to the host. */
- *vcdev->status = status;
+ vcdev->dma_area->status = status;
ccw->cmd_code = CCW_CMD_WRITE_STATUS;
ccw->flags = 0;
ccw->count = sizeof(status);
- ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
/* Write failed? We assume status is unchanged. */
if (ret)
- *vcdev->status = old_status;
- kfree(ccw);
+ vcdev->dma_area->status = old_status;
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
}
static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
@@ -994,8 +1025,8 @@ static void virtio_ccw_release_dev(struct device *_d)
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_ccw_device *vcdev = to_vc_device(dev);
- kfree(vcdev->status);
- kfree(vcdev->config_block);
+ ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
+ sizeof(*vcdev->dma_area));
kfree(vcdev);
}
@@ -1093,17 +1124,17 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
vcdev->err = -EIO;
}
virtio_ccw_check_activity(vcdev, activity);
- for_each_set_bit(i, &vcdev->indicators,
- sizeof(vcdev->indicators) * BITS_PER_BYTE) {
+ for_each_set_bit(i, indicators(vcdev),
+ sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
/* The bit clear must happen before the vring kick. */
- clear_bit(i, &vcdev->indicators);
+ clear_bit(i, indicators(vcdev));
barrier();
vq = virtio_ccw_vq_by_ind(vcdev, i);
vring_interrupt(0, vq);
}
- if (test_bit(0, &vcdev->indicators2)) {
+ if (test_bit(0, indicators2(vcdev))) {
virtio_config_changed(&vcdev->vdev);
- clear_bit(0, &vcdev->indicators2);
+ clear_bit(0, indicators2(vcdev));
}
}
@@ -1203,12 +1234,12 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
struct ccw1 *ccw;
int ret;
- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
if (!ccw)
return -ENOMEM;
- rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
+ rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev));
if (!rev) {
- kfree(ccw);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return -ENOMEM;
}
@@ -1238,8 +1269,8 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
}
} while (ret == -EOPNOTSUPP);
- kfree(ccw);
- kfree(rev);
+ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ ccw_device_dma_free(vcdev->cdev, rev, sizeof(*rev));
return ret;
}
@@ -1255,24 +1286,11 @@ static int virtio_ccw_online(struct ccw_device *cdev)
ret = -ENOMEM;
goto out_free;
}
-
vcdev->vdev.dev.parent = &cdev->dev;
- cdev->dev.dma_mask = &vcdev->dma_mask;
- /* we are fine with common virtio infrastructure using 64 bit DMA */
- ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64));
- if (ret) {
- dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n");
- goto out_free;
- }
-
- vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
- GFP_DMA | GFP_KERNEL);
- if (!vcdev->config_block) {
- ret = -ENOMEM;
- goto out_free;
- }
- vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
- if (!vcdev->status) {
+ vcdev->cdev = cdev;
+ vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
+ sizeof(*vcdev->dma_area));
+ if (!vcdev->dma_area) {
ret = -ENOMEM;
goto out_free;
}
@@ -1281,7 +1299,6 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops;
- vcdev->cdev = cdev;
init_waitqueue_head(&vcdev->wait_q);
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
@@ -1312,8 +1329,8 @@ out_put:
return ret;
out_free:
if (vcdev) {
- kfree(vcdev->status);
- kfree(vcdev->config_block);
+ ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
+ sizeof(*vcdev->dma_area));
}
kfree(vcdev);
return ret;
@@ -1483,8 +1500,17 @@ static void __init no_auto_parse(void)
static int __init virtio_ccw_init(void)
{
+ int rc;
+
/* parse no_auto string before we do anything further */
no_auto_parse();
- return ccw_driver_register(&virtio_ccw_driver);
+
+ summary_indicators = cio_dma_zalloc(MAX_AIRQ_AREAS);
+ if (!summary_indicators)
+ return -ENOMEM;
+ rc = ccw_driver_register(&virtio_ccw_driver);
+ if (rc)
+ cio_dma_free(summary_indicators, MAX_AIRQ_AREAS);
+ return rc;
}
device_initcall(virtio_ccw_init);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 61da513fc0ed..75f66f8ad3ea 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -99,28 +99,6 @@ config CHR_DEV_ST
To compile this driver as a module, choose M here and read
<file:Documentation/scsi/scsi.txt>. The module will be called st.
-config CHR_DEV_OSST
- tristate "SCSI OnStream SC-x0 tape support"
- depends on SCSI
- ---help---
- The OnStream SC-x0 SCSI tape drives cannot be driven by the
- standard st driver, but instead need this special osst driver and
- use the /dev/osstX char device nodes (major 206). Via usb-storage,
- you may be able to drive the USB-x0 and DI-x0 drives as well.
- Note that there is also a second generation of OnStream
- tape drives (ADR-x0) that supports the standard SCSI-2 commands for
- tapes (QIC-157) and can be driven by the standard driver st.
- For more information, you may have a look at the SCSI-HOWTO
- <http://www.tldp.org/docs.html#howto> and
- <file:Documentation/scsi/osst.txt> in the kernel source.
- More info on the OnStream driver may be found on
- <http://sourceforge.net/projects/osst/>
- Please also have a look at the standard st docu, as most of it
- applies to osst as well.
-
- To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>. The module will be called osst.
-
config BLK_DEV_SR
tristate "SCSI CDROM support"
depends on SCSI && BLK_DEV
@@ -183,7 +161,7 @@ config CHR_DEV_SCH
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/kbuild/modules.txt> and
+ say M here and read <file:Documentation/kbuild/modules.rst> and
<file:Documentation/scsi/scsi.txt>. The module will be called ch.o.
If unsure, say N.
@@ -664,6 +642,41 @@ config SCSI_DMX3191D
To compile this driver as a module, choose M here: the
module will be called dmx3191d.
+config SCSI_FDOMAIN
+ tristate
+ depends on SCSI
+
+config SCSI_FDOMAIN_PCI
+ tristate "Future Domain TMC-3260/AHA-2920A PCI SCSI support"
+ depends on PCI && SCSI
+ select SCSI_FDOMAIN
+ help
+ This is support for Future Domain's PCI SCSI host adapters (TMC-3260)
+ and other adapters with PCI bus based on the Future Domain chipsets
+ (Adaptec AHA-2920A).
+
+ NOTE: Newer Adaptec AHA-2920C boards use the Adaptec AIC-7850 chip
+ and should use the aic7xxx driver ("Adaptec AIC7xxx chipset SCSI
+ controller support"). This Future Domain driver works with the older
+ Adaptec AHA-2920A boards with a Future Domain chip on them.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fdomain_pci.
+
+config SCSI_FDOMAIN_ISA
+ tristate "Future Domain 16xx ISA SCSI support"
+ depends on ISA && SCSI
+ select CHECK_SIGNATURE
+ select SCSI_FDOMAIN
+ help
+ This is support for Future Domain's 16-bit SCSI host adapters
+ (TMC-1660/1680, TMC-1650/1670, TMC-1610M/MER/MEX) and other adapters
+ with ISA bus based on the Future Domain chipsets (Quantum ISA-200S,
+ ISA-250MG; and at least one IBM board).
+
+ To compile this driver as a module, choose M here: the
+ module will be called fdomain_isa.
+
config SCSI_GDTH
tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
depends on PCI && SCSI
@@ -1474,7 +1487,7 @@ config ZFCP
This driver is also available as a module. This module will be
called zfcp. If you want to compile it as a module, say M here
- and read <file:Documentation/kbuild/modules.txt>.
+ and read <file:Documentation/kbuild/modules.rst>.
config SCSI_PMCRAID
tristate "PMC SIERRA Linux MaxRAID adapter support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 8826111fdf4a..aeda53901064 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -76,6 +76,9 @@ obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
obj-$(CONFIG_SCSI_PM8001) += pm8001/
obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
+obj-$(CONFIG_SCSI_FDOMAIN) += fdomain.o
+obj-$(CONFIG_SCSI_FDOMAIN_PCI) += fdomain_pci.o
+obj-$(CONFIG_SCSI_FDOMAIN_ISA) += fdomain_isa.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
@@ -143,7 +146,6 @@ obj-$(CONFIG_SCSI_WD719X) += wd719x.o
obj-$(CONFIG_ARM) += arm/
obj-$(CONFIG_CHR_DEV_ST) += st.o
-obj-$(CONFIG_CHR_DEV_OSST) += osst.o
obj-$(CONFIG_BLK_DEV_SD) += sd_mod.o
obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o
obj-$(CONFIG_CHR_DEV_SG) += sg.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index fe0535affc14..536426f25e86 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -149,12 +149,10 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
if (scsi_bufflen(cmd)) {
cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
- cmd->SCp.buffers_residual = 0;
cmd->SCp.ptr = NULL;
cmd->SCp.this_residual = 0;
}
@@ -163,6 +161,17 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
cmd->SCp.Message = 0;
}
+static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
+{
+ struct scatterlist *s = cmd->SCp.buffer;
+
+ if (!cmd->SCp.this_residual && s && !sg_is_last(s)) {
+ cmd->SCp.buffer = sg_next(s);
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ }
+}
+
/**
* NCR5380_poll_politely2 - wait for two chip register values
* @hostdata: host private data
@@ -709,6 +718,8 @@ static void NCR5380_main(struct work_struct *work)
NCR5380_information_transfer(instance);
done = 0;
}
+ if (!hostdata->connected)
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
spin_unlock_irq(&hostdata->lock);
if (!done)
cond_resched();
@@ -1110,8 +1121,6 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
spin_lock_irq(&hostdata->lock);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_reselect(instance);
- if (!hostdata->connected)
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n");
goto out;
}
@@ -1119,7 +1128,6 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
if (err < 0) {
spin_lock_irq(&hostdata->lock);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
/* Can't touch cmd if it has been reclaimed by the scsi ML */
if (!hostdata->selecting)
@@ -1157,7 +1165,6 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
if (err < 0) {
shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
goto out;
}
if (!hostdata->selecting) {
@@ -1672,12 +1679,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
sun3_dma_setup_done != cmd) {
int count;
- if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- ++cmd->SCp.buffer;
- --cmd->SCp.buffers_residual;
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- }
+ advance_sg_buffer(cmd);
count = sun3scsi_dma_xfer_len(hostdata, cmd);
@@ -1727,15 +1729,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* scatter-gather list, move onto the next one.
*/
- if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- ++cmd->SCp.buffer;
- --cmd->SCp.buffers_residual;
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- dsprintk(NDEBUG_INFORMATION, instance, "%d bytes and %d buffers left\n",
- cmd->SCp.this_residual,
- cmd->SCp.buffers_residual);
- }
+ advance_sg_buffer(cmd);
+ dsprintk(NDEBUG_INFORMATION, instance,
+ "this residual %d, sg ents %d\n",
+ cmd->SCp.this_residual,
+ sg_nents(cmd->SCp.buffer));
/*
* The preferred transfer method is going to be
@@ -1763,10 +1761,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
scmd_printk(KERN_INFO, cmd,
"switching to slow handshake\n");
cmd->device->borken = 1;
- sink = 1;
- do_abort(instance);
- cmd->result = DID_ERROR << 16;
- /* XXX - need to source or sink data here, as appropriate */
+ do_reset(instance);
+ bus_reset_cleanup(instance);
}
} else {
/* Transfer a small chunk so that the
@@ -1826,9 +1822,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
*/
NCR5380_write(TARGET_COMMAND_REG, 0);
- /* Enable reselect interrupts */
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-
maybe_release_dma_irq(instance);
return;
case MESSAGE_REJECT:
@@ -1860,8 +1853,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
*/
NCR5380_write(TARGET_COMMAND_REG, 0);
- /* Enable reselect interrupts */
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
#ifdef SUN3_SCSI_VME
dregs->csr |= CSR_DMA_ENABLE;
#endif
@@ -1964,7 +1955,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
maybe_release_dma_irq(instance);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return;
}
msgout = NOP;
@@ -2136,12 +2126,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
if (sun3_dma_setup_done != tmp) {
int count;
- if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
- ++tmp->SCp.buffer;
- --tmp->SCp.buffers_residual;
- tmp->SCp.this_residual = tmp->SCp.buffer->length;
- tmp->SCp.ptr = sg_virt(tmp->SCp.buffer);
- }
+ advance_sg_buffer(tmp);
count = sun3scsi_dma_xfer_len(hostdata, tmp);
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index efca509b92b0..5935fd6d1a05 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -235,7 +235,7 @@ struct NCR5380_cmd {
#define NCR5380_PIO_CHUNK_SIZE 256
/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
-#define NCR5380_REG_POLL_TIME 15
+#define NCR5380_REG_POLL_TIME 10
static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
{
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 926311c792d5..a242a62caaa1 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7710,7 +7710,7 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp,
sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */
return ADV_SUCCESS;
}
- slp++;
+ slp = sg_next(slp);
}
sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
prev_sg_block = sg_block;
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 88c649b3ef61..eb466c2e1839 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -937,7 +937,6 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
SCp.ptr : buffer pointer
SCp.this_residual : buffer length
SCp.buffer : next buffer
- SCp.buffers_residual : left buffers in list
SCp.phase : current state of the command */
if ((phase & resetting) || !scsi_sglist(SCpnt)) {
@@ -945,13 +944,11 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
SCpnt->SCp.this_residual = 0;
scsi_set_resid(SCpnt, 0);
SCpnt->SCp.buffer = NULL;
- SCpnt->SCp.buffers_residual = 0;
} else {
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
SCpnt->SCp.buffer = scsi_sglist(SCpnt);
SCpnt->SCp.ptr = SG_ADDRESS(SCpnt->SCp.buffer);
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
- SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
}
DO_LOCK(flags);
@@ -2019,10 +2016,9 @@ static void datai_run(struct Scsi_Host *shpnt)
}
if (CURRENT_SC->SCp.this_residual == 0 &&
- CURRENT_SC->SCp.buffers_residual > 0) {
+ !sg_is_last(CURRENT_SC->SCp.buffer)) {
/* advance to next buffer */
- CURRENT_SC->SCp.buffers_residual--;
- CURRENT_SC->SCp.buffer++;
+ CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
}
@@ -2125,10 +2121,10 @@ static void datao_run(struct Scsi_Host *shpnt)
CMD_INC_RESID(CURRENT_SC, -2 * data_count);
}
- if(CURRENT_SC->SCp.this_residual==0 && CURRENT_SC->SCp.buffers_residual>0) {
+ if (CURRENT_SC->SCp.this_residual == 0 &&
+ !sg_is_last(CURRENT_SC->SCp.buffer)) {
/* advance to next buffer */
- CURRENT_SC->SCp.buffers_residual--;
- CURRENT_SC->SCp.buffer++;
+ CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
}
@@ -2147,22 +2143,26 @@ static void datao_run(struct Scsi_Host *shpnt)
static void datao_end(struct Scsi_Host *shpnt)
{
if(TESTLO(DMASTAT, DFIFOEMP)) {
- int data_count = (DATA_LEN - scsi_get_resid(CURRENT_SC)) -
- GETSTCNT();
+ u32 datao_cnt = GETSTCNT();
+ int datao_out = DATA_LEN - scsi_get_resid(CURRENT_SC);
+ int done;
+ struct scatterlist *sg = scsi_sglist(CURRENT_SC);
- CMD_INC_RESID(CURRENT_SC, data_count);
+ CMD_INC_RESID(CURRENT_SC, datao_out - datao_cnt);
- data_count -= CURRENT_SC->SCp.ptr -
- SG_ADDRESS(CURRENT_SC->SCp.buffer);
- while(data_count>0) {
- CURRENT_SC->SCp.buffer--;
- CURRENT_SC->SCp.buffers_residual++;
- data_count -= CURRENT_SC->SCp.buffer->length;
+ done = scsi_bufflen(CURRENT_SC) - scsi_get_resid(CURRENT_SC);
+ /* Locate the first SG entry not yet sent */
+ while (done > 0 && !sg_is_last(sg)) {
+ if (done < sg->length)
+ break;
+ done -= sg->length;
+ sg = sg_next(sg);
}
- CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) -
- data_count;
- CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length +
- data_count;
+
+ CURRENT_SC->SCp.buffer = sg;
+ CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) + done;
+ CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length -
+ done;
}
SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
@@ -2490,7 +2490,7 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
scsi_get_resid(ptr), ptr->SCp.this_residual,
- ptr->SCp.buffers_residual);
+ sg_nents(ptr->SCp.buffer) - 1);
if (ptr->SCp.phase & not_issued)
seq_puts(m, "not issued|");
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index ba0b411d03e2..00fde2243e48 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -1666,7 +1666,7 @@ scratch_ram {
size 6
/*
* These are reserved registers in the card's scratch ram on the 2742.
- * The EISA configuraiton chip is mapped here. On Rev E. of the
+ * The EISA configuration chip is mapped here. On Rev E. of the
* aic7770, the sequencer can use this area for scratch, but the
* host cannot directly access these registers. On later chips, this
* area can be read and written by both the host and the sequencer.
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 730b35e7c1ba..604a5331f639 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -170,9 +170,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
}
} else {
flags |= CONCURRENT_CONN_SUPP;
- if (!dev->parent &&
- (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
- dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE))
+ if (!dev->parent && dev_is_expander(dev->dev_type))
asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
4);
else
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 901a31632493..3b84db8d13a9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -66,7 +66,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "2.11.8"
+#define BNX2FC_VERSION "2.12.10"
#define PFX "bnx2fc: "
@@ -75,8 +75,9 @@
#define BNX2X_DOORBELL_PCI_BAR 2
#define BNX2FC_MAX_BD_LEN 0xffff
-#define BNX2FC_BD_SPLIT_SZ 0x8000
-#define BNX2FC_MAX_BDS_PER_CMD 256
+#define BNX2FC_BD_SPLIT_SZ 0xffff
+#define BNX2FC_MAX_BDS_PER_CMD 255
+#define BNX2FC_FW_MAX_BDS_PER_CMD 255
#define BNX2FC_SQ_WQES_MAX 256
@@ -433,8 +434,10 @@ struct bnx2fc_cmd {
void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg);
struct bnx2fc_els_cb_arg *cb_arg;
struct delayed_work timeout_work; /* timer for ULP timeouts */
- struct completion tm_done;
- int wait_for_comp;
+ struct completion abts_done;
+ struct completion cleanup_done;
+ int wait_for_abts_comp;
+ int wait_for_cleanup_comp;
u16 xid;
struct fcoe_err_report_entry err_entry;
struct fcoe_task_ctx_entry *task;
@@ -455,6 +458,7 @@ struct bnx2fc_cmd {
#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
#define BNX2FC_FLAG_CMD_LOST 0xc
#define BNX2FC_FLAG_SRR_SENT 0xd
+#define BNX2FC_FLAG_ISSUE_CLEANUP_REQ 0xe
u8 rec_retry;
u8 srr_retry;
u32 srr_offset;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 76e65a32f38c..754f2e82d955 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -610,7 +610,6 @@ int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
bnx2fc_rec_compl, cb_arg,
r_a_tov);
-rec_err:
if (rc) {
BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
spin_lock_bh(&tgt->tgt_lock);
@@ -618,6 +617,7 @@ rec_err:
spin_unlock_bh(&tgt->tgt_lock);
kfree(cb_arg);
}
+rec_err:
return rc;
}
@@ -654,7 +654,6 @@ int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
bnx2fc_srr_compl, cb_arg,
r_a_tov);
-srr_err:
if (rc) {
BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
spin_lock_bh(&tgt->tgt_lock);
@@ -664,6 +663,7 @@ srr_err:
} else
set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
+srr_err:
return rc;
}
@@ -854,33 +854,57 @@ void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
kref_put(&els_req->refcount, bnx2fc_cmd_release);
}
+#define BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC 1
+#define BNX2FC_FCOE_MAC_METHOD_FCF_MAP 2
+#define BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC 3
static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
void *arg)
{
struct fcoe_ctlr *fip = arg;
struct fc_exch *exch = fc_seq_exch(seq);
struct fc_lport *lport = exch->lp;
- u8 *mac;
- u8 op;
+
+ struct fc_frame_header *fh;
+ u8 *granted_mac;
+ u8 fcoe_mac[6];
+ u8 fc_map[3];
+ int method;
if (IS_ERR(fp))
goto done;
- mac = fr_cb(fp)->granted_mac;
- if (is_zero_ether_addr(mac)) {
- op = fc_frame_payload_op(fp);
- if (lport->vport) {
- if (op == ELS_LS_RJT) {
- printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
- fc_vport_terminate(lport->vport);
- fc_frame_free(fp);
- return;
- }
- }
- fcoe_ctlr_recv_flogi(fip, lport, fp);
+ fh = fc_frame_header_get(fp);
+ granted_mac = fr_cb(fp)->granted_mac;
+
+ /*
+ * We set the source MAC for FCoE traffic based on the Granted MAC
+ * address from the switch.
+ *
+ * If granted_mac is non-zero, we use that.
+ * If the granted_mac is zeroed out, create the FCoE MAC based on
+ * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
+ * If sel_fcf->fc_map is 0, then we use the default FCF-MAC plus the
+ * d_id of the FLOGI frame.
+ */
+ if (!is_zero_ether_addr(granted_mac)) {
+ ether_addr_copy(fcoe_mac, granted_mac);
+ method = BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC;
+ } else if (fip->sel_fcf && fip->sel_fcf->fc_map != 0) {
+ hton24(fc_map, fip->sel_fcf->fc_map);
+ fcoe_mac[0] = fc_map[0];
+ fcoe_mac[1] = fc_map[1];
+ fcoe_mac[2] = fc_map[2];
+ fcoe_mac[3] = fh->fh_d_id[0];
+ fcoe_mac[4] = fh->fh_d_id[1];
+ fcoe_mac[5] = fh->fh_d_id[2];
+ method = BNX2FC_FCOE_MAC_METHOD_FCF_MAP;
+ } else {
+ fc_fcoe_set_mac(fcoe_mac, fh->fh_d_id);
+ method = BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC;
}
- if (!is_zero_ether_addr(mac))
- fip->update_mac(lport, mac);
+
+ BNX2FC_HBA_DBG(lport, "fcoe_mac=%pM method=%d\n", fcoe_mac, method);
+ fip->update_mac(lport, fcoe_mac);
done:
fc_lport_flogi_resp(seq, fp, lport);
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index a75e74ad1698..7796799bf04a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2971,7 +2971,8 @@ static struct scsi_host_template bnx2fc_shost_template = {
.this_id = -1,
.cmd_per_lun = 3,
.sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
- .max_sectors = 1024,
+ .dma_boundary = 0x7fff,
+ .max_sectors = 0x3fbf,
.track_queue_depth = 1,
.slave_configure = bnx2fc_slave_configure,
.shost_attrs = bnx2fc_host_attrs,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 8def63c0755f..9e50e5b53763 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -70,7 +70,7 @@ static void bnx2fc_cmd_timeout(struct work_struct *work)
&io_req->req_flags)) {
/* Handle eh_abort timeout */
BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
- complete(&io_req->tm_done);
+ complete(&io_req->abts_done);
} else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
&io_req->req_flags)) {
/* Handle internally generated ABTS timeout */
@@ -775,31 +775,32 @@ retry_tmf:
io_req->on_tmf_queue = 1;
list_add_tail(&io_req->link, &tgt->active_tm_queue);
- init_completion(&io_req->tm_done);
- io_req->wait_for_comp = 1;
+ init_completion(&io_req->abts_done);
+ io_req->wait_for_abts_comp = 1;
/* Ring doorbell */
bnx2fc_ring_doorbell(tgt);
spin_unlock_bh(&tgt->tgt_lock);
- rc = wait_for_completion_timeout(&io_req->tm_done,
+ rc = wait_for_completion_timeout(&io_req->abts_done,
interface->tm_timeout * HZ);
spin_lock_bh(&tgt->tgt_lock);
- io_req->wait_for_comp = 0;
+ io_req->wait_for_abts_comp = 0;
if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
if (io_req->on_tmf_queue) {
list_del_init(&io_req->link);
io_req->on_tmf_queue = 0;
}
- io_req->wait_for_comp = 1;
+ io_req->wait_for_cleanup_comp = 1;
+ init_completion(&io_req->cleanup_done);
bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
- rc = wait_for_completion_timeout(&io_req->tm_done,
+ rc = wait_for_completion_timeout(&io_req->cleanup_done,
BNX2FC_FW_TIMEOUT);
spin_lock_bh(&tgt->tgt_lock);
- io_req->wait_for_comp = 0;
+ io_req->wait_for_cleanup_comp = 0;
if (!rc)
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
@@ -1047,6 +1048,9 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
/* Obtain free SQ entry */
bnx2fc_add_2_sq(tgt, xid);
+ /* Set flag that cleanup request is pending with the firmware */
+ set_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags);
+
/* Ring doorbell */
bnx2fc_ring_doorbell(tgt);
@@ -1085,7 +1089,8 @@ static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
struct bnx2fc_rport *tgt = io_req->tgt;
unsigned int time_left;
- io_req->wait_for_comp = 1;
+ init_completion(&io_req->cleanup_done);
+ io_req->wait_for_cleanup_comp = 1;
bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
@@ -1094,21 +1099,21 @@ static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
* Can't wait forever on cleanup response lest we let the SCSI error
* handler wait forever
*/
- time_left = wait_for_completion_timeout(&io_req->tm_done,
+ time_left = wait_for_completion_timeout(&io_req->cleanup_done,
BNX2FC_FW_TIMEOUT);
- io_req->wait_for_comp = 0;
- if (!time_left)
+ if (!time_left) {
BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n",
__func__);
- /*
- * Release reference held by SCSI command the cleanup completion
- * hits the BNX2FC_CLEANUP case in bnx2fc_process_cq_compl() and
- * thus the SCSI command is not returnedi by bnx2fc_scsi_done().
- */
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ /*
+ * Put the extra reference to the SCSI command since it would
+ * not have been returned in this case.
+ */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_cleanup_comp = 0;
return SUCCESS;
}
@@ -1197,7 +1202,8 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Move IO req to retire queue */
list_add_tail(&io_req->link, &tgt->io_retire_queue);
- init_completion(&io_req->tm_done);
+ init_completion(&io_req->abts_done);
+ init_completion(&io_req->cleanup_done);
if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
@@ -1225,26 +1231,28 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
- io_req->wait_for_comp = 1;
+ io_req->wait_for_abts_comp = 1;
rc = bnx2fc_initiate_abts(io_req);
if (rc == FAILED) {
+ io_req->wait_for_cleanup_comp = 1;
bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
+ wait_for_completion(&io_req->cleanup_done);
spin_lock_bh(&tgt->tgt_lock);
- io_req->wait_for_comp = 0;
+ io_req->wait_for_cleanup_comp = 0;
goto done;
}
spin_unlock_bh(&tgt->tgt_lock);
/* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
- time_left = wait_for_completion_timeout(&io_req->tm_done,
- (2 * rp->r_a_tov + 1) * HZ);
+ time_left = wait_for_completion_timeout(&io_req->abts_done,
+ (2 * rp->r_a_tov + 1) * HZ);
if (time_left)
- BNX2FC_IO_DBG(io_req, "Timed out in eh_abort waiting for tm_done");
+ BNX2FC_IO_DBG(io_req,
+ "Timed out in eh_abort waiting for abts_done");
spin_lock_bh(&tgt->tgt_lock);
- io_req->wait_for_comp = 0;
+ io_req->wait_for_abts_comp = 0;
if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
rc = SUCCESS;
@@ -1319,10 +1327,29 @@ void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
"refcnt = %d, cmd_type = %d\n",
kref_read(&io_req->refcount), io_req->cmd_type);
+ /*
+ * Test whether there is a cleanup request pending. If not just
+ * exit.
+ */
+ if (!test_and_clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ,
+ &io_req->req_flags))
+ return;
+ /*
+ * If we receive a cleanup completion for this request then the
+ * firmware will not give us an abort completion for this request
+ * so clear any ABTS pending flags.
+ */
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags) &&
+ !test_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) {
+ set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags);
+ if (io_req->wait_for_abts_comp)
+ complete(&io_req->abts_done);
+ }
+
bnx2fc_scsi_done(io_req, DID_ERROR);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
- if (io_req->wait_for_comp)
- complete(&io_req->tm_done);
+ if (io_req->wait_for_cleanup_comp)
+ complete(&io_req->cleanup_done);
}
void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
@@ -1346,6 +1373,16 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
return;
}
+ /*
+ * If we receive an ABTS completion here then we will not receive
+ * a cleanup completion so clear any cleanup pending flags.
+ */
+ if (test_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags)) {
+ clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags);
+ if (io_req->wait_for_cleanup_comp)
+ complete(&io_req->cleanup_done);
+ }
+
/* Do not issue RRQ as this IO is already cleanedup */
if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
&io_req->req_flags))
@@ -1390,10 +1427,10 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
bnx2fc_cmd_timer_set(io_req, r_a_tov);
io_compl:
- if (io_req->wait_for_comp) {
+ if (io_req->wait_for_abts_comp) {
if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
&io_req->req_flags))
- complete(&io_req->tm_done);
+ complete(&io_req->abts_done);
} else {
/*
* We end up here when ABTS is issued as
@@ -1577,9 +1614,9 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
sc_cmd->scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
- if (io_req->wait_for_comp) {
+ if (io_req->wait_for_abts_comp) {
BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
- complete(&io_req->tm_done);
+ complete(&io_req->abts_done);
}
}
@@ -1623,6 +1660,7 @@ static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
u64 addr;
int i;
+ WARN_ON(scsi_sg_count(sc) > BNX2FC_MAX_BDS_PER_CMD);
/*
* Use dma_map_sg directly to ensure we're using the correct
* dev struct off of pcidev.
@@ -1670,6 +1708,16 @@ static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
}
io_req->bd_tbl->bd_valid = bd_count;
+ /*
+ * Return the command to ML if BD count exceeds the max number
+ * that can be handled by FW.
+ */
+ if (bd_count > BNX2FC_FW_MAX_BDS_PER_CMD) {
+ pr_err("bd_count = %d exceeded FW supported max BD(255), task_id = 0x%x\n",
+ bd_count, io_req->xid);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -1926,10 +1974,10 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
* between command abort and (late) completion.
*/
BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
- if (io_req->wait_for_comp)
+ if (io_req->wait_for_abts_comp)
if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
&io_req->req_flags))
- complete(&io_req->tm_done);
+ complete(&io_req->abts_done);
}
bnx2fc_unmap_sg_list(io_req);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index d735e87e416a..50384b4a817c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -187,7 +187,7 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
/* Handle eh_abort timeout */
BNX2FC_IO_DBG(io_req, "eh_abort for IO "
"cleaned up\n");
- complete(&io_req->tm_done);
+ complete(&io_req->abts_done);
}
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
@@ -210,8 +210,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
list_del_init(&io_req->link);
io_req->on_tmf_queue = 0;
BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
- if (io_req->wait_for_comp)
- complete(&io_req->tm_done);
+ if (io_req->wait_for_abts_comp)
+ complete(&io_req->abts_done);
}
list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) {
@@ -251,8 +251,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
/* Handle eh_abort timeout */
BNX2FC_IO_DBG(io_req, "eh_abort for IO "
"in retire_q\n");
- if (io_req->wait_for_comp)
- complete(&io_req->tm_done);
+ if (io_req->wait_for_abts_comp)
+ complete(&io_req->abts_done);
}
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index b8dd9e648dd0..524cdbcd29aa 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1243,8 +1243,12 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
tformat.pgsz_order[i] = uinfo.pgsz_factor[i];
cxgbi_tagmask_check(tagmask, &tformat);
- cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat, ppmax,
- uinfo.llimit, uinfo.llimit, 0);
+ err = cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat,
+ (uinfo.ulimit - uinfo.llimit + 1),
+ uinfo.llimit, uinfo.llimit, 0, 0, 0);
+ if (err)
+ return err;
+
if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) {
uinfo.tagmask = tagmask;
uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
@@ -1318,7 +1322,7 @@ static void cxgb3i_dev_open(struct t3cdev *t3dev)
err = cxgb3i_ddp_init(cdev);
if (err) {
- pr_info("0x%p ddp init failed\n", cdev);
+ pr_info("0x%p ddp init failed %d\n", cdev, err);
goto err_out;
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 124f3345420f..da50e87921bc 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1665,8 +1665,12 @@ static u8 get_iscsi_dcb_priority(struct net_device *ndev)
return 0;
if (caps & DCB_CAP_DCBX_VER_IEEE) {
- iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+ if (!rv) {
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+ rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+ }
} else if (caps & DCB_CAP_DCBX_VER_CEE) {
iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
rv = dcb_getapp(ndev, &iscsi_dcb_app);
@@ -2070,7 +2074,7 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
struct net_device *ndev = cdev->ports[0];
struct cxgbi_tag_format tformat;
unsigned int ppmax;
- int i;
+ int i, err;
if (!lldi->vr->iscsi.size) {
pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
@@ -2086,8 +2090,17 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
& 0xF;
cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
- cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax,
- lldi->iscsi_llimit, lldi->vr->iscsi.start, 2);
+ pr_info("iscsi_edram.start 0x%x iscsi_edram.size 0x%x",
+ lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size);
+
+ err = cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat,
+ lldi->vr->iscsi.size, lldi->iscsi_llimit,
+ lldi->vr->iscsi.start, 2,
+ lldi->vr->ppod_edram.start,
+ lldi->vr->ppod_edram.size);
+
+ if (err < 0)
+ return err;
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
@@ -2141,7 +2154,7 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
rc = cxgb4i_ddp_init(cdev);
if (rc) {
- pr_info("t4 0x%p ddp init failed.\n", cdev);
+ pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc);
goto err_out;
}
rc = cxgb4i_ofld_init(cdev);
@@ -2251,7 +2264,8 @@ cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
u8 priority;
if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
- if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
+ if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
+ (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
return NOTIFY_DONE;
priority = iscsi_app->app.priority;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 7d43e014bd21..3e17af8aedeb 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1285,14 +1285,15 @@ EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod);
static unsigned char padding[4];
-void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
- struct cxgbi_tag_format *tformat, unsigned int ppmax,
- unsigned int llimit, unsigned int start,
- unsigned int rsvd_factor)
+int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
+ struct cxgbi_tag_format *tformat,
+ unsigned int iscsi_size, unsigned int llimit,
+ unsigned int start, unsigned int rsvd_factor,
+ unsigned int edram_start, unsigned int edram_size)
{
int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev,
- cdev->lldev, tformat, ppmax, llimit, start,
- rsvd_factor);
+ cdev->lldev, tformat, iscsi_size, llimit, start,
+ rsvd_factor, edram_start, edram_size);
if (err >= 0) {
struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
@@ -1304,6 +1305,8 @@ void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
} else {
cdev->flags |= CXGBI_FLAG_DDP_OFF;
}
+
+ return err;
}
EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 1917ff57651d..84b96af52655 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -617,8 +617,9 @@ void cxgbi_ddp_page_size_factor(int *);
void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *,
struct cxgbi_task_tag_info *,
struct scatterlist **sg_pp, unsigned int *sg_off);
-void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *,
- struct cxgbi_tag_format *, unsigned int ppmax,
- unsigned int llimit, unsigned int start,
- unsigned int rsvd_factor);
+int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
+ struct cxgbi_tag_format *tformat,
+ unsigned int iscsi_size, unsigned int llimit,
+ unsigned int start, unsigned int rsvd_factor,
+ unsigned int edram_start, unsigned int edram_size);
#endif /*__LIBCXGBI_H__*/
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 76e7ca864d6a..bb88995a12c7 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -371,6 +371,7 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
struct scatterlist *sg = scsi_sglist(cmd);
int total = 0, i;
+ struct scatterlist *s;
if (cmd->sc_data_direction == DMA_NONE)
return;
@@ -381,16 +382,18 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
* a dma address, so perform an identity mapping.
*/
spriv->num_sg = scsi_sg_count(cmd);
- for (i = 0; i < spriv->num_sg; i++) {
- sg[i].dma_address = (uintptr_t)sg_virt(&sg[i]);
- total += sg_dma_len(&sg[i]);
+
+ scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
+ s->dma_address = (uintptr_t)sg_virt(s);
+ total += sg_dma_len(s);
}
} else {
spriv->num_sg = scsi_dma_map(cmd);
- for (i = 0; i < spriv->num_sg; i++)
- total += sg_dma_len(&sg[i]);
+ scsi_for_each_sg(cmd, s, spriv->num_sg, i)
+ total += sg_dma_len(s);
}
spriv->cur_residue = sg_dma_len(sg);
+ spriv->prv_sg = NULL;
spriv->cur_sg = sg;
spriv->tot_residue = total;
}
@@ -444,7 +447,8 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
p->tot_residue = 0;
}
if (!p->cur_residue && p->tot_residue) {
- p->cur_sg++;
+ p->prv_sg = p->cur_sg;
+ p->cur_sg = sg_next(p->cur_sg);
p->cur_residue = sg_dma_len(p->cur_sg);
}
}
@@ -465,6 +469,7 @@ static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
return;
}
ent->saved_cur_residue = spriv->cur_residue;
+ ent->saved_prv_sg = spriv->prv_sg;
ent->saved_cur_sg = spriv->cur_sg;
ent->saved_tot_residue = spriv->tot_residue;
}
@@ -479,6 +484,7 @@ static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
return;
}
spriv->cur_residue = ent->saved_cur_residue;
+ spriv->prv_sg = ent->saved_prv_sg;
spriv->cur_sg = ent->saved_cur_sg;
spriv->tot_residue = ent->saved_tot_residue;
}
@@ -1647,7 +1653,7 @@ static int esp_msgin_process(struct esp *esp)
spriv = ESP_CMD_PRIV(ent->cmd);
if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
- spriv->cur_sg--;
+ spriv->cur_sg = spriv->prv_sg;
spriv->cur_residue = 1;
} else
spriv->cur_residue++;
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index aa87a6b72dcc..91b32f2a1a1b 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -251,6 +251,7 @@
struct esp_cmd_priv {
int num_sg;
int cur_residue;
+ struct scatterlist *prv_sg;
struct scatterlist *cur_sg;
int tot_residue;
};
@@ -273,6 +274,7 @@ struct esp_cmd_entry {
struct scsi_cmnd *cmd;
unsigned int saved_cur_residue;
+ struct scatterlist *saved_prv_sg;
struct scatterlist *saved_cur_sg;
unsigned int saved_tot_residue;
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
new file mode 100644
index 000000000000..b5e66971b6d9
--- /dev/null
+++ b/drivers/scsi/fdomain.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Future Domain TMC-16x0 and TMC-3260 SCSI host adapters
+ * Copyright 2019 Ondrej Zary
+ *
+ * Original driver by
+ * Rickard E. Faith, faith@cs.unc.edu
+ *
+ * Future Domain BIOS versions supported for autodetect:
+ * 2.0, 3.0, 3.2, 3.4 (1.0), 3.5 (2.0), 3.6, 3.61
+ * Chips supported:
+ * TMC-1800, TMC-18C50, TMC-18C30, TMC-36C70
+ * Boards supported:
+ * Future Domain TMC-1650, TMC-1660, TMC-1670, TMC-1680, TMC-1610M/MER/MEX
+ * Future Domain TMC-3260 (PCI)
+ * Quantum ISA-200S, ISA-250MG
+ * Adaptec AHA-2920A (PCI) [BUT *NOT* AHA-2920C -- use aic7xxx instead]
+ * IBM ?
+ *
+ * NOTE:
+ *
+ * The Adaptec AHA-2920C has an Adaptec AIC-7850 chip on it.
+ * Use the aic7xxx driver for this board.
+ *
+ * The Adaptec AHA-2920A has a Future Domain chip on it, so this is the right
+ * driver for that card. Unfortunately, the boxes will probably just say
+ * "2920", so you'll have to look on the card for a Future Domain logo, or a
+ * letter after the 2920.
+ *
+ * If you have a TMC-8xx or TMC-9xx board, then this is not the driver for
+ * your board.
+ *
+ * DESCRIPTION:
+ *
+ * This is the Linux low-level SCSI driver for Future Domain TMC-1660/1680
+ * TMC-1650/1670, and TMC-3260 SCSI host adapters. The 1650 and 1670 have a
+ * 25-pin external connector, whereas the 1660 and 1680 have a SCSI-2 50-pin
+ * high-density external connector. The 1670 and 1680 have floppy disk
+ * controllers built in. The TMC-3260 is a PCI bus card.
+ *
+ * Future Domain's older boards are based on the TMC-1800 chip, and this
+ * driver was originally written for a TMC-1680 board with the TMC-1800 chip.
+ * More recently, boards are being produced with the TMC-18C50 and TMC-18C30
+ * chips.
+ *
+ * Please note that the drive ordering that Future Domain implemented in BIOS
+ * versions 3.4 and 3.5 is the opposite of the order (currently) used by the
+ * rest of the SCSI industry.
+ *
+ *
+ * REFERENCES USED:
+ *
+ * "TMC-1800 SCSI Chip Specification (FDC-1800T)", Future Domain Corporation,
+ * 1990.
+ *
+ * "Technical Reference Manual: 18C50 SCSI Host Adapter Chip", Future Domain
+ * Corporation, January 1992.
+ *
+ * "LXT SCSI Products: Specifications and OEM Technical Manual (Revision
+ * B/September 1991)", Maxtor Corporation, 1991.
+ *
+ * "7213S product Manual (Revision P3)", Maxtor Corporation, 1992.
+ *
+ * "Draft Proposed American National Standard: Small Computer System
+ * Interface - 2 (SCSI-2)", Global Engineering Documents. (X3T9.2/86-109,
+ * revision 10h, October 17, 1991)
+ *
+ * Private communications, Drew Eckhardt (drew@cs.colorado.edu) and Eric
+ * Youngdale (ericy@cais.com), 1992.
+ *
+ * Private communication, Tuong Le (Future Domain Engineering department),
+ * 1994. (Disk geometry computations for Future Domain BIOS version 3.4, and
+ * TMC-18C30 detection.)
+ *
+ * Hogan, Thom. The Programmer's PC Sourcebook. Microsoft Press, 1988. Page
+ * 60 (2.39: Disk Partition Table Layout).
+ *
+ * "18C30 Technical Reference Manual", Future Domain Corporation, 1993, page
+ * 6-1.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/workqueue.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "fdomain.h"
+
+/*
+ * FIFO_COUNT: The host adapter has an 8K cache (host adapters based on the
+ * 18C30 chip have a 2k cache). When this many 512 byte blocks are filled by
+ * the SCSI device, an interrupt will be raised. Therefore, this could be as
+ * low as 0, or as high as 16. Note, however, that values which are too high
+ * or too low seem to prevent any interrupts from occurring, and thereby lock
+ * up the machine.
+ */
+#define FIFO_COUNT 2 /* Number of 512 byte blocks before INTR */
+#define PARITY_MASK ACTL_PAREN /* Parity enabled, 0 = disabled */
+
+enum chip_type {
+ unknown = 0x00,
+ tmc1800 = 0x01,
+ tmc18c50 = 0x02,
+ tmc18c30 = 0x03,
+};
+
+struct fdomain {
+ int base;
+ struct scsi_cmnd *cur_cmd;
+ enum chip_type chip;
+ struct work_struct work;
+};
+
+static inline void fdomain_make_bus_idle(struct fdomain *fd)
+{
+ outb(0, fd->base + REG_BCTL);
+ outb(0, fd->base + REG_MCTL);
+ if (fd->chip == tmc18c50 || fd->chip == tmc18c30)
+ /* Clear forced intr. */
+ outb(ACTL_RESET | ACTL_CLRFIRQ | PARITY_MASK,
+ fd->base + REG_ACTL);
+ else
+ outb(ACTL_RESET | PARITY_MASK, fd->base + REG_ACTL);
+}
+
+static enum chip_type fdomain_identify(int port)
+{
+ u16 id = inb(port + REG_ID_LSB) | inb(port + REG_ID_MSB) << 8;
+
+ switch (id) {
+ case 0x6127:
+ return tmc1800;
+ case 0x60e9: /* 18c50 or 18c30 */
+ break;
+ default:
+ return unknown;
+ }
+
+ /* Try to toggle 32-bit mode. This only works on an 18c30 chip. */
+ outb(CFG2_32BIT, port + REG_CFG2);
+ if ((inb(port + REG_CFG2) & CFG2_32BIT)) {
+ outb(0, port + REG_CFG2);
+ if ((inb(port + REG_CFG2) & CFG2_32BIT) == 0)
+ return tmc18c30;
+ }
+ /* If that failed, we are an 18c50. */
+ return tmc18c50;
+}
+
+static int fdomain_test_loopback(int base)
+{
+ int i;
+
+ for (i = 0; i < 255; i++) {
+ outb(i, base + REG_LOOPBACK);
+ if (inb(base + REG_LOOPBACK) != i)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void fdomain_reset(int base)
+{
+ outb(1, base + REG_BCTL);
+ mdelay(20);
+ outb(0, base + REG_BCTL);
+ mdelay(1150);
+ outb(0, base + REG_MCTL);
+ outb(PARITY_MASK, base + REG_ACTL);
+}
+
+static int fdomain_select(struct Scsi_Host *sh, int target)
+{
+ int status;
+ unsigned long timeout;
+ struct fdomain *fd = shost_priv(sh);
+
+ outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL);
+ outb(BIT(sh->this_id) | BIT(target), fd->base + REG_SCSI_DATA_NOACK);
+
+ /* Stop arbitration and enable parity */
+ outb(PARITY_MASK, fd->base + REG_ACTL);
+
+ timeout = 350; /* 350 msec */
+
+ do {
+ status = inb(fd->base + REG_BSTAT);
+ if (status & BSTAT_BSY) {
+ /* Enable SCSI Bus */
+ /* (on error, should make bus idle with 0) */
+ outb(BCTL_BUSEN, fd->base + REG_BCTL);
+ return 0;
+ }
+ mdelay(1);
+ } while (--timeout);
+ fdomain_make_bus_idle(fd);
+ return 1;
+}
+
+static void fdomain_finish_cmd(struct fdomain *fd, int result)
+{
+ outb(0, fd->base + REG_ICTL);
+ fdomain_make_bus_idle(fd);
+ fd->cur_cmd->result = result;
+ fd->cur_cmd->scsi_done(fd->cur_cmd);
+ fd->cur_cmd = NULL;
+}
+
+static void fdomain_read_data(struct scsi_cmnd *cmd)
+{
+ struct fdomain *fd = shost_priv(cmd->device->host);
+ unsigned char *virt, *ptr;
+ size_t offset, len;
+
+ while ((len = inw(fd->base + REG_FIFO_COUNT)) > 0) {
+ offset = scsi_bufflen(cmd) - scsi_get_resid(cmd);
+ virt = scsi_kmap_atomic_sg(scsi_sglist(cmd), scsi_sg_count(cmd),
+ &offset, &len);
+ ptr = virt + offset;
+ if (len & 1)
+ *ptr++ = inb(fd->base + REG_FIFO);
+ if (len > 1)
+ insw(fd->base + REG_FIFO, ptr, len >> 1);
+ scsi_set_resid(cmd, scsi_get_resid(cmd) - len);
+ scsi_kunmap_atomic_sg(virt);
+ }
+}
+
+static void fdomain_write_data(struct scsi_cmnd *cmd)
+{
+ struct fdomain *fd = shost_priv(cmd->device->host);
+ /* 8k FIFO for pre-tmc18c30 chips, 2k FIFO for tmc18c30 */
+ int FIFO_Size = fd->chip == tmc18c30 ? 0x800 : 0x2000;
+ unsigned char *virt, *ptr;
+ size_t offset, len;
+
+ while ((len = FIFO_Size - inw(fd->base + REG_FIFO_COUNT)) > 512) {
+ offset = scsi_bufflen(cmd) - scsi_get_resid(cmd);
+ if (len + offset > scsi_bufflen(cmd)) {
+ len = scsi_bufflen(cmd) - offset;
+ if (len == 0)
+ break;
+ }
+ virt = scsi_kmap_atomic_sg(scsi_sglist(cmd), scsi_sg_count(cmd),
+ &offset, &len);
+ ptr = virt + offset;
+ if (len & 1)
+ outb(*ptr++, fd->base + REG_FIFO);
+ if (len > 1)
+ outsw(fd->base + REG_FIFO, ptr, len >> 1);
+ scsi_set_resid(cmd, scsi_get_resid(cmd) - len);
+ scsi_kunmap_atomic_sg(virt);
+ }
+}
+
+static void fdomain_work(struct work_struct *work)
+{
+ struct fdomain *fd = container_of(work, struct fdomain, work);
+ struct Scsi_Host *sh = container_of((void *)fd, struct Scsi_Host,
+ hostdata);
+ struct scsi_cmnd *cmd = fd->cur_cmd;
+ unsigned long flags;
+ int status;
+ int done = 0;
+
+ spin_lock_irqsave(sh->host_lock, flags);
+
+ if (cmd->SCp.phase & in_arbitration) {
+ status = inb(fd->base + REG_ASTAT);
+ if (!(status & ASTAT_ARB)) {
+ fdomain_finish_cmd(fd, DID_BUS_BUSY << 16);
+ goto out;
+ }
+ cmd->SCp.phase = in_selection;
+
+ outb(ICTL_SEL | FIFO_COUNT, fd->base + REG_ICTL);
+ outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL);
+ outb(BIT(cmd->device->host->this_id) | BIT(scmd_id(cmd)),
+ fd->base + REG_SCSI_DATA_NOACK);
+ /* Stop arbitration and enable parity */
+ outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
+ goto out;
+ } else if (cmd->SCp.phase & in_selection) {
+ status = inb(fd->base + REG_BSTAT);
+ if (!(status & BSTAT_BSY)) {
+ /* Try again, for slow devices */
+ if (fdomain_select(cmd->device->host, scmd_id(cmd))) {
+ fdomain_finish_cmd(fd, DID_NO_CONNECT << 16);
+ goto out;
+ }
+ /* Stop arbitration and enable parity */
+ outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
+ }
+ cmd->SCp.phase = in_other;
+ outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT, fd->base + REG_ICTL);
+ outb(BCTL_BUSEN, fd->base + REG_BCTL);
+ goto out;
+ }
+
+ /* cur_cmd->SCp.phase == in_other: this is the body of the routine */
+ status = inb(fd->base + REG_BSTAT);
+
+ if (status & BSTAT_REQ) {
+ switch (status & 0x0e) {
+ case BSTAT_CMD: /* COMMAND OUT */
+ outb(cmd->cmnd[cmd->SCp.sent_command++],
+ fd->base + REG_SCSI_DATA);
+ break;
+ case 0: /* DATA OUT -- tmc18c50/tmc18c30 only */
+ if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
+ cmd->SCp.have_data_in = -1;
+ outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
+ PARITY_MASK, fd->base + REG_ACTL);
+ }
+ break;
+ case BSTAT_IO: /* DATA IN -- tmc18c50/tmc18c30 only */
+ if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
+ cmd->SCp.have_data_in = 1;
+ outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
+ fd->base + REG_ACTL);
+ }
+ break;
+ case BSTAT_CMD | BSTAT_IO: /* STATUS IN */
+ cmd->SCp.Status = inb(fd->base + REG_SCSI_DATA);
+ break;
+ case BSTAT_MSG | BSTAT_CMD: /* MESSAGE OUT */
+ outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA);
+ break;
+ case BSTAT_MSG | BSTAT_IO | BSTAT_CMD: /* MESSAGE IN */
+ cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA);
+ if (!cmd->SCp.Message)
+ ++done;
+ break;
+ }
+ }
+
+ if (fd->chip == tmc1800 && !cmd->SCp.have_data_in &&
+ cmd->SCp.sent_command >= cmd->cmd_len) {
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd->SCp.have_data_in = -1;
+ outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
+ PARITY_MASK, fd->base + REG_ACTL);
+ } else {
+ cmd->SCp.have_data_in = 1;
+ outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
+ fd->base + REG_ACTL);
+ }
+ }
+
+ if (cmd->SCp.have_data_in == -1) /* DATA OUT */
+ fdomain_write_data(cmd);
+
+ if (cmd->SCp.have_data_in == 1) /* DATA IN */
+ fdomain_read_data(cmd);
+
+ if (done) {
+ fdomain_finish_cmd(fd, (cmd->SCp.Status & 0xff) |
+ ((cmd->SCp.Message & 0xff) << 8) |
+ (DID_OK << 16));
+ } else {
+ if (cmd->SCp.phase & disconnect) {
+ outb(ICTL_FIFO | ICTL_SEL | ICTL_REQ | FIFO_COUNT,
+ fd->base + REG_ICTL);
+ outb(0, fd->base + REG_BCTL);
+ } else
+ outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT,
+ fd->base + REG_ICTL);
+ }
+out:
+ spin_unlock_irqrestore(sh->host_lock, flags);
+}
+
+static irqreturn_t fdomain_irq(int irq, void *dev_id)
+{
+ struct fdomain *fd = dev_id;
+
+ /* Is it our IRQ? */
+ if ((inb(fd->base + REG_ASTAT) & ASTAT_IRQ) == 0)
+ return IRQ_NONE;
+
+ outb(0, fd->base + REG_ICTL);
+
+ /* We usually have one spurious interrupt after each command. */
+ if (!fd->cur_cmd) /* Spurious interrupt */
+ return IRQ_NONE;
+
+ schedule_work(&fd->work);
+
+ return IRQ_HANDLED;
+}
+
+static int fdomain_queue(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
+{
+ struct fdomain *fd = shost_priv(cmd->device->host);
+ unsigned long flags;
+
+ cmd->SCp.Status = 0;
+ cmd->SCp.Message = 0;
+ cmd->SCp.have_data_in = 0;
+ cmd->SCp.sent_command = 0;
+ cmd->SCp.phase = in_arbitration;
+ scsi_set_resid(cmd, scsi_bufflen(cmd));
+
+ spin_lock_irqsave(sh->host_lock, flags);
+
+ fd->cur_cmd = cmd;
+
+ fdomain_make_bus_idle(fd);
+
+ /* Start arbitration */
+ outb(0, fd->base + REG_ICTL);
+ outb(0, fd->base + REG_BCTL); /* Disable data drivers */
+ /* Set our id bit */
+ outb(BIT(cmd->device->host->this_id), fd->base + REG_SCSI_DATA_NOACK);
+ outb(ICTL_ARB, fd->base + REG_ICTL);
+ /* Start arbitration */
+ outb(ACTL_ARB | ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
+
+ spin_unlock_irqrestore(sh->host_lock, flags);
+
+ return 0;
+}
+
+static int fdomain_abort(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *sh = cmd->device->host;
+ struct fdomain *fd = shost_priv(sh);
+ unsigned long flags;
+
+ if (!fd->cur_cmd)
+ return FAILED;
+
+ spin_lock_irqsave(sh->host_lock, flags);
+
+ fdomain_make_bus_idle(fd);
+ fd->cur_cmd->SCp.phase |= aborted;
+ fd->cur_cmd->result = DID_ABORT << 16;
+
+ /* Aborts are not done well. . . */
+ fdomain_finish_cmd(fd, DID_ABORT << 16);
+ spin_unlock_irqrestore(sh->host_lock, flags);
+ return SUCCESS;
+}
+
+static int fdomain_host_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *sh = cmd->device->host;
+ struct fdomain *fd = shost_priv(sh);
+ unsigned long flags;
+
+ spin_lock_irqsave(sh->host_lock, flags);
+ fdomain_reset(fd->base);
+ spin_unlock_irqrestore(sh->host_lock, flags);
+ return SUCCESS;
+}
+
+static int fdomain_biosparam(struct scsi_device *sdev,
+ struct block_device *bdev, sector_t capacity,
+ int geom[])
+{
+ unsigned char *p = scsi_bios_ptable(bdev);
+
+ if (p && p[65] == 0xaa && p[64] == 0x55 /* Partition table valid */
+ && p[4]) { /* Partition type */
+ geom[0] = p[5] + 1; /* heads */
+ geom[1] = p[6] & 0x3f; /* sectors */
+ } else {
+ if (capacity >= 0x7e0000) {
+ geom[0] = 255; /* heads */
+ geom[1] = 63; /* sectors */
+ } else if (capacity >= 0x200000) {
+ geom[0] = 128; /* heads */
+ geom[1] = 63; /* sectors */
+ } else {
+ geom[0] = 64; /* heads */
+ geom[1] = 32; /* sectors */
+ }
+ }
+ geom[2] = sector_div(capacity, geom[0] * geom[1]);
+ kfree(p);
+
+ return 0;
+}
+
+static struct scsi_host_template fdomain_template = {
+ .module = THIS_MODULE,
+ .name = "Future Domain TMC-16x0",
+ .proc_name = "fdomain",
+ .queuecommand = fdomain_queue,
+ .eh_abort_handler = fdomain_abort,
+ .eh_host_reset_handler = fdomain_host_reset,
+ .bios_param = fdomain_biosparam,
+ .can_queue = 1,
+ .this_id = 7,
+ .sg_tablesize = 64,
+ .dma_boundary = PAGE_SIZE - 1,
+};
+
+struct Scsi_Host *fdomain_create(int base, int irq, int this_id,
+ struct device *dev)
+{
+ struct Scsi_Host *sh;
+ struct fdomain *fd;
+ enum chip_type chip;
+ static const char * const chip_names[] = {
+ "Unknown", "TMC-1800", "TMC-18C50", "TMC-18C30"
+ };
+ unsigned long irq_flags = 0;
+
+ chip = fdomain_identify(base);
+ if (!chip)
+ return NULL;
+
+ fdomain_reset(base);
+
+ if (fdomain_test_loopback(base))
+ return NULL;
+
+ if (!irq) {
+ dev_err(dev, "card has no IRQ assigned");
+ return NULL;
+ }
+
+ sh = scsi_host_alloc(&fdomain_template, sizeof(struct fdomain));
+ if (!sh)
+ return NULL;
+
+ if (this_id)
+ sh->this_id = this_id & 0x07;
+
+ sh->irq = irq;
+ sh->io_port = base;
+ sh->n_io_port = FDOMAIN_REGION_SIZE;
+
+ fd = shost_priv(sh);
+ fd->base = base;
+ fd->chip = chip;
+ INIT_WORK(&fd->work, fdomain_work);
+
+ if (dev_is_pci(dev) || !strcmp(dev->bus->name, "pcmcia"))
+ irq_flags = IRQF_SHARED;
+
+ if (request_irq(irq, fdomain_irq, irq_flags, "fdomain", fd))
+ goto fail_put;
+
+ shost_printk(KERN_INFO, sh, "%s chip at 0x%x irq %d SCSI ID %d\n",
+ dev_is_pci(dev) ? "TMC-36C70 (PCI bus)" : chip_names[chip],
+ base, irq, sh->this_id);
+
+ if (scsi_add_host(sh, dev))
+ goto fail_free_irq;
+
+ scsi_scan_host(sh);
+
+ return sh;
+
+fail_free_irq:
+ free_irq(irq, fd);
+fail_put:
+ scsi_host_put(sh);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fdomain_create);
+
+int fdomain_destroy(struct Scsi_Host *sh)
+{
+ struct fdomain *fd = shost_priv(sh);
+
+ cancel_work_sync(&fd->work);
+ scsi_remove_host(sh);
+ if (sh->irq)
+ free_irq(sh->irq, fd);
+ scsi_host_put(sh);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fdomain_destroy);
+
+#ifdef CONFIG_PM_SLEEP
+static int fdomain_resume(struct device *dev)
+{
+ struct fdomain *fd = shost_priv(dev_get_drvdata(dev));
+
+ fdomain_reset(fd->base);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(fdomain_pm_ops, NULL, fdomain_resume);
+#endif /* CONFIG_PM_SLEEP */
+
+MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith");
+MODULE_DESCRIPTION("Future Domain TMC-16x0/TMC-3260 SCSI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/fdomain.h b/drivers/scsi/fdomain.h
new file mode 100644
index 000000000000..6f63fc6b0d12
--- /dev/null
+++ b/drivers/scsi/fdomain.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define FDOMAIN_REGION_SIZE 0x10
+#define FDOMAIN_BIOS_SIZE 0x2000
+
+enum {
+ in_arbitration = 0x02,
+ in_selection = 0x04,
+ in_other = 0x08,
+ disconnect = 0x10,
+ aborted = 0x20,
+ sent_ident = 0x40,
+};
+
+/* (@) = not present on TMC1800, (#) = not present on TMC1800 and TMC18C50 */
+#define REG_SCSI_DATA 0 /* R/W: SCSI Data (with ACK) */
+#define REG_BSTAT 1 /* R: SCSI Bus Status */
+#define BSTAT_BSY BIT(0) /* Busy */
+#define BSTAT_MSG BIT(1) /* Message */
+#define BSTAT_IO BIT(2) /* Input/Output */
+#define BSTAT_CMD BIT(3) /* Command/Data */
+#define BSTAT_REQ BIT(4) /* Request and Not Ack */
+#define BSTAT_SEL BIT(5) /* Select */
+#define BSTAT_ACK BIT(6) /* Acknowledge and Request */
+#define BSTAT_ATN BIT(7) /* Attention */
+#define REG_BCTL 1 /* W: SCSI Bus Control */
+#define BCTL_RST BIT(0) /* Bus Reset */
+#define BCTL_SEL BIT(1) /* Select */
+#define BCTL_BSY BIT(2) /* Busy */
+#define BCTL_ATN BIT(3) /* Attention */
+#define BCTL_IO BIT(4) /* Input/Output */
+#define BCTL_CMD BIT(5) /* Command/Data */
+#define BCTL_MSG BIT(6) /* Message */
+#define BCTL_BUSEN BIT(7) /* Enable bus drivers */
+#define REG_ASTAT 2 /* R: Adapter Status 1 */
+#define ASTAT_IRQ BIT(0) /* Interrupt active */
+#define ASTAT_ARB BIT(1) /* Arbitration complete */
+#define ASTAT_PARERR BIT(2) /* Parity error */
+#define ASTAT_RST BIT(3) /* SCSI reset occurred */
+#define ASTAT_FIFODIR BIT(4) /* FIFO direction */
+#define ASTAT_FIFOEN BIT(5) /* FIFO enabled */
+#define ASTAT_PAREN BIT(6) /* Parity enabled */
+#define ASTAT_BUSEN BIT(7) /* Bus drivers enabled */
+#define REG_ICTL 2 /* W: Interrupt Control */
+#define ICTL_FIFO_MASK 0x0f /* FIFO threshold, 1/16 FIFO size */
+#define ICTL_FIFO BIT(4) /* Int. on FIFO count */
+#define ICTL_ARB BIT(5) /* Int. on Arbitration complete */
+#define ICTL_SEL BIT(6) /* Int. on SCSI Select */
+#define ICTL_REQ BIT(7) /* Int. on SCSI Request */
+#define REG_FSTAT 3 /* R: Adapter Status 2 (FIFO) - (@) */
+#define FSTAT_ONOTEMPTY BIT(0) /* Output FIFO not empty */
+#define FSTAT_INOTEMPTY BIT(1) /* Input FIFO not empty */
+#define FSTAT_NOTEMPTY BIT(2) /* Main FIFO not empty */
+#define FSTAT_NOTFULL BIT(3) /* Main FIFO not full */
+#define REG_MCTL 3 /* W: SCSI Data Mode Control */
+#define MCTL_ACK_MASK 0x0f /* Acknowledge period */
+#define MCTL_ACTDEASS BIT(4) /* Active deassert of REQ and ACK */
+#define MCTL_TARGET BIT(5) /* Enable target mode */
+#define MCTL_FASTSYNC BIT(6) /* Enable Fast Synchronous */
+#define MCTL_SYNC BIT(7) /* Enable Synchronous */
+#define REG_INTCOND 4 /* R: Interrupt Condition - (@) */
+#define IRQ_FIFO BIT(1) /* FIFO interrupt */
+#define IRQ_REQ BIT(2) /* SCSI Request interrupt */
+#define IRQ_SEL BIT(3) /* SCSI Select interrupt */
+#define IRQ_ARB BIT(4) /* SCSI Arbitration interrupt */
+#define IRQ_RST BIT(5) /* SCSI Reset interrupt */
+#define IRQ_FORCED BIT(6) /* Forced interrupt */
+#define IRQ_TIMEOUT BIT(7) /* Bus timeout */
+#define REG_ACTL 4 /* W: Adapter Control 1 */
+#define ACTL_RESET BIT(0) /* Reset FIFO, parity, reset int. */
+#define ACTL_FIRQ BIT(1) /* Set Forced interrupt */
+#define ACTL_ARB BIT(2) /* Initiate Bus Arbitration */
+#define ACTL_PAREN BIT(3) /* Enable SCSI Parity */
+#define ACTL_IRQEN BIT(4) /* Enable interrupts */
+#define ACTL_CLRFIRQ BIT(5) /* Clear Forced interrupt */
+#define ACTL_FIFOWR BIT(6) /* FIFO Direction (1=write) */
+#define ACTL_FIFOEN BIT(7) /* Enable FIFO */
+#define REG_ID_LSB 5 /* R: ID Code (LSB) */
+#define REG_ACTL2 5 /* Adapter Control 2 - (@) */
+#define ACTL2_RAMOVRLY BIT(0) /* Enable RAM overlay */
+#define ACTL2_SLEEP BIT(7) /* Sleep mode */
+#define REG_ID_MSB 6 /* R: ID Code (MSB) */
+#define REG_LOOPBACK 7 /* R/W: Loopback */
+#define REG_SCSI_DATA_NOACK 8 /* R/W: SCSI Data (no ACK) */
+#define REG_ASTAT3 9 /* R: Adapter Status 3 */
+#define ASTAT3_ACTDEASS BIT(0) /* Active deassert enabled */
+#define ASTAT3_RAMOVRLY BIT(1) /* RAM overlay enabled */
+#define ASTAT3_TARGERR BIT(2) /* Target error */
+#define ASTAT3_IRQEN BIT(3) /* Interrupts enabled */
+#define ASTAT3_IRQMASK 0xf0 /* Enabled interrupts mask */
+#define REG_CFG1 10 /* R: Configuration Register 1 */
+#define CFG1_BUS BIT(0) /* 0 = ISA */
+#define CFG1_IRQ_MASK 0x0e /* IRQ jumpers */
+#define CFG1_IO_MASK 0x30 /* I/O base jumpers */
+#define CFG1_BIOS_MASK 0xc0 /* BIOS base jumpers */
+#define REG_CFG2 11 /* R/W: Configuration Register 2 (@) */
+#define CFG2_ROMDIS BIT(0) /* ROM disabled */
+#define CFG2_RAMDIS BIT(1) /* RAM disabled */
+#define CFG2_IRQEDGE BIT(2) /* Edge-triggered interrupts */
+#define CFG2_NOWS BIT(3) /* No wait states */
+#define CFG2_32BIT BIT(7) /* 32-bit mode */
+#define REG_FIFO 12 /* R/W: FIFO */
+#define REG_FIFO_COUNT 14 /* R: FIFO Data Count */
+
+#ifdef CONFIG_PM_SLEEP
+static const struct dev_pm_ops fdomain_pm_ops;
+#define FDOMAIN_PM_OPS (&fdomain_pm_ops)
+#else
+#define FDOMAIN_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+struct Scsi_Host *fdomain_create(int base, int irq, int this_id,
+ struct device *dev);
+int fdomain_destroy(struct Scsi_Host *sh);
diff --git a/drivers/scsi/fdomain_isa.c b/drivers/scsi/fdomain_isa.c
new file mode 100644
index 000000000000..28639adf8219
--- /dev/null
+++ b/drivers/scsi/fdomain_isa.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/isa.h>
+#include <scsi/scsi_host.h>
+#include "fdomain.h"
+
+#define MAXBOARDS_PARAM 4
+static int io[MAXBOARDS_PARAM] = { 0, 0, 0, 0 };
+module_param_hw_array(io, int, ioport, NULL, 0);
+MODULE_PARM_DESC(io, "base I/O address of controller (0x140, 0x150, 0x160, 0x170)");
+
+static int irq[MAXBOARDS_PARAM] = { 0, 0, 0, 0 };
+module_param_hw_array(irq, int, irq, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ of controller (0=auto [default])");
+
+static int scsi_id[MAXBOARDS_PARAM] = { 0, 0, 0, 0 };
+module_param_hw_array(scsi_id, int, other, NULL, 0);
+MODULE_PARM_DESC(scsi_id, "SCSI ID of controller (default = 7)");
+
+static unsigned long addresses[] = {
+ 0xc8000,
+ 0xca000,
+ 0xce000,
+ 0xde000,
+};
+#define ADDRESS_COUNT ARRAY_SIZE(addresses)
+
+static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
+#define PORT_COUNT ARRAY_SIZE(ports)
+
+static unsigned short irqs[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
+
+/* This driver works *ONLY* for Future Domain cards using the TMC-1800,
+ * TMC-18C50, or TMC-18C30 chip. This includes models TMC-1650, 1660, 1670,
+ * and 1680. These are all 16-bit cards.
+ * BIOS versions prior to 3.2 assigned SCSI ID 6 to SCSI adapter.
+ *
+ * The following BIOS signature signatures are for boards which do *NOT*
+ * work with this driver (these TMC-8xx and TMC-9xx boards may work with the
+ * Seagate driver):
+ *
+ * FUTURE DOMAIN CORP. (C) 1986-1988 V4.0I 03/16/88
+ * FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89
+ * FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89
+ * FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90
+ * FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90
+ * FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90
+ * FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92
+ *
+ * (The cards which do *NOT* work are all 8-bit cards -- although some of
+ * them have a 16-bit form-factor, the upper 8-bits are used only for IRQs
+ * and are *NOT* used for data. You can tell the difference by following
+ * the tracings on the circuit board -- if only the IRQ lines are involved,
+ * you have a "8-bit" card, and should *NOT* use this driver.)
+ */
+
+static struct signature {
+ const char *signature;
+ int offset;
+ int length;
+ int this_id;
+ int base_offset;
+} signatures[] = {
+/* 1 2 3 4 5 6 */
+/* 123456789012345678901234567890123456789012345678901234567890 */
+{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 5, 50, 6, 0x1fcc },
+{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V1.07/28/89", 5, 50, 6, 0x1fcc },
+{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 72, 50, 6, 0x1fa2 },
+{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.0", 73, 43, 6, 0x1fa2 },
+{ "FUTURE DOMAIN CORP. (C) 1991 1800-V2.0.", 72, 39, 6, 0x1fa3 },
+{ "FUTURE DOMAIN CORP. (C) 1992 V3.00.004/02/92", 5, 44, 6, 0 },
+{ "FUTURE DOMAIN TMC-18XX (C) 1993 V3.203/12/93", 5, 44, 7, 0 },
+{ "IBM F1 P2 BIOS v1.0011/09/92", 5, 28, 7, 0x1ff3 },
+{ "IBM F1 P2 BIOS v1.0104/29/93", 5, 28, 7, 0 },
+{ "Future Domain Corp. V1.0008/18/93", 5, 33, 7, 0 },
+{ "Future Domain Corp. V2.0108/18/93", 5, 33, 7, 0 },
+{ "FUTURE DOMAIN CORP. V3.5008/18/93", 5, 34, 7, 0 },
+{ "FUTURE DOMAIN 18c30/18c50/1800 (C) 1994 V3.5", 5, 44, 7, 0 },
+{ "FUTURE DOMAIN CORP. V3.6008/18/93", 5, 34, 7, 0 },
+{ "FUTURE DOMAIN CORP. V3.6108/18/93", 5, 34, 7, 0 },
+};
+#define SIGNATURE_COUNT ARRAY_SIZE(signatures)
+
+static int fdomain_isa_match(struct device *dev, unsigned int ndev)
+{
+ struct Scsi_Host *sh;
+ int i, base = 0, irq = 0;
+ unsigned long bios_base = 0;
+ struct signature *sig = NULL;
+ void __iomem *p;
+ static struct signature *saved_sig;
+ int this_id = 7;
+
+ if (ndev < ADDRESS_COUNT) { /* scan supported ISA BIOS addresses */
+ p = ioremap(addresses[ndev], FDOMAIN_BIOS_SIZE);
+ if (!p)
+ return 0;
+ for (i = 0; i < SIGNATURE_COUNT; i++)
+ if (check_signature(p + signatures[i].offset,
+ signatures[i].signature,
+ signatures[i].length))
+ break;
+ if (i == SIGNATURE_COUNT) /* no signature found */
+ goto fail_unmap;
+ sig = &signatures[i];
+ bios_base = addresses[ndev];
+ /* read I/O base from BIOS area */
+ if (sig->base_offset)
+ base = readb(p + sig->base_offset) +
+ (readb(p + sig->base_offset + 1) << 8);
+ iounmap(p);
+ if (base)
+ dev_info(dev, "BIOS at 0x%lx specifies I/O base 0x%x\n",
+ bios_base, base);
+ else
+ dev_info(dev, "BIOS at 0x%lx\n", bios_base);
+ if (!base) { /* no I/O base in BIOS area */
+ /* save BIOS signature for later use in port probing */
+ saved_sig = sig;
+ return 0;
+ }
+ } else /* scan supported I/O ports */
+ base = ports[ndev - ADDRESS_COUNT];
+
+ /* use saved BIOS signature if present */
+ if (!sig && saved_sig)
+ sig = saved_sig;
+
+ if (!request_region(base, FDOMAIN_REGION_SIZE, "fdomain_isa"))
+ return 0;
+
+ irq = irqs[(inb(base + REG_CFG1) & 0x0e) >> 1];
+
+
+ if (sig)
+ this_id = sig->this_id;
+
+ sh = fdomain_create(base, irq, this_id, dev);
+ if (!sh) {
+ release_region(base, FDOMAIN_REGION_SIZE);
+ return 0;
+ }
+
+ dev_set_drvdata(dev, sh);
+ return 1;
+fail_unmap:
+ iounmap(p);
+ return 0;
+}
+
+static int fdomain_isa_param_match(struct device *dev, unsigned int ndev)
+{
+ struct Scsi_Host *sh;
+ int irq_ = irq[ndev];
+
+ if (!io[ndev])
+ return 0;
+
+ if (!request_region(io[ndev], FDOMAIN_REGION_SIZE, "fdomain_isa")) {
+ dev_err(dev, "base 0x%x already in use", io[ndev]);
+ return 0;
+ }
+
+ if (irq_ <= 0)
+ irq_ = irqs[(inb(io[ndev] + REG_CFG1) & 0x0e) >> 1];
+
+ sh = fdomain_create(io[ndev], irq_, scsi_id[ndev], dev);
+ if (!sh) {
+ dev_err(dev, "controller not found at base 0x%x", io[ndev]);
+ release_region(io[ndev], FDOMAIN_REGION_SIZE);
+ return 0;
+ }
+
+ dev_set_drvdata(dev, sh);
+ return 1;
+}
+
+static int fdomain_isa_remove(struct device *dev, unsigned int ndev)
+{
+ struct Scsi_Host *sh = dev_get_drvdata(dev);
+ int base = sh->io_port;
+
+ fdomain_destroy(sh);
+ release_region(base, FDOMAIN_REGION_SIZE);
+ dev_set_drvdata(dev, NULL);
+ return 0;
+}
+
+static struct isa_driver fdomain_isa_driver = {
+ .match = fdomain_isa_match,
+ .remove = fdomain_isa_remove,
+ .driver = {
+ .name = "fdomain_isa",
+ .pm = FDOMAIN_PM_OPS,
+ },
+};
+
+static int __init fdomain_isa_init(void)
+{
+ int isa_probe_count = ADDRESS_COUNT + PORT_COUNT;
+
+ if (io[0]) { /* use module parameters if present */
+ fdomain_isa_driver.match = fdomain_isa_param_match;
+ isa_probe_count = MAXBOARDS_PARAM;
+ }
+
+ return isa_register_driver(&fdomain_isa_driver, isa_probe_count);
+}
+
+static void __exit fdomain_isa_exit(void)
+{
+ isa_unregister_driver(&fdomain_isa_driver);
+}
+
+module_init(fdomain_isa_init);
+module_exit(fdomain_isa_exit);
+
+MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith");
+MODULE_DESCRIPTION("Future Domain TMC-16x0 ISA SCSI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/fdomain_pci.c b/drivers/scsi/fdomain_pci.c
new file mode 100644
index 000000000000..3e05ce7b89e5
--- /dev/null
+++ b/drivers/scsi/fdomain_pci.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "fdomain.h"
+
+static int fdomain_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *d)
+{
+ int err;
+ struct Scsi_Host *sh;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ goto fail;
+
+ err = pci_request_regions(pdev, "fdomain_pci");
+ if (err)
+ goto disable_device;
+
+ err = -ENODEV;
+ if (pci_resource_len(pdev, 0) == 0)
+ goto release_region;
+
+ sh = fdomain_create(pci_resource_start(pdev, 0), pdev->irq, 7,
+ &pdev->dev);
+ if (!sh)
+ goto release_region;
+
+ pci_set_drvdata(pdev, sh);
+ return 0;
+
+release_region:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+fail:
+ return err;
+}
+
+static void fdomain_pci_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *sh = pci_get_drvdata(pdev);
+
+ fdomain_destroy(sh);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id fdomain_pci_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, fdomain_pci_table);
+
+static struct pci_driver fdomain_pci_driver = {
+ .name = "fdomain_pci",
+ .id_table = fdomain_pci_table,
+ .probe = fdomain_pci_probe,
+ .remove = fdomain_pci_remove,
+ .driver.pm = FDOMAIN_PM_OPS,
+};
+
+module_pci_driver(fdomain_pci_driver);
+
+MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith");
+MODULE_DESCRIPTION("Future Domain TMC-3260 PCI SCSI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 8d9a8fb2dd32..42a02cc47a60 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -61,10 +61,6 @@
#define HISI_SAS_MAX_SMP_RESP_SZ 1028
#define HISI_SAS_MAX_STP_RESP_SZ 28
-#define DEV_IS_EXPANDER(type) \
- ((type == SAS_EDGE_EXPANDER_DEVICE) || \
- (type == SAS_FANOUT_EXPANDER_DEVICE))
-
#define HISI_SAS_SATA_PROTOCOL_NONDATA 0x1
#define HISI_SAS_SATA_PROTOCOL_PIO 0x2
#define HISI_SAS_SATA_PROTOCOL_DMA 0x4
@@ -479,12 +475,12 @@ struct hisi_sas_command_table_stp {
u8 atapi_cdb[ATAPI_CDB_LEN];
};
-#define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE
+#define HISI_SAS_SGE_PAGE_CNT (124)
struct hisi_sas_sge_page {
struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT];
} __aligned(16);
-#define HISI_SAS_SGE_DIF_PAGE_CNT SG_CHUNK_SIZE
+#define HISI_SAS_SGE_DIF_PAGE_CNT HISI_SAS_SGE_PAGE_CNT
struct hisi_sas_sge_dif_page {
struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT];
} __aligned(16);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 5879771d82b2..cb746cfc2fa8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -803,7 +803,7 @@ static int hisi_sas_dev_found(struct domain_device *device)
device->lldd_dev = sas_dev;
hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
- if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+ if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
int phy_no;
u8 phy_num = parent_dev->ex_dev.num_phys;
struct ex_phy *phy;
@@ -1446,7 +1446,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
_sas_port = sas_port;
- if (DEV_IS_EXPANDER(dev->dev_type))
+ if (dev_is_expander(dev->dev_type))
sas_ha->notify_port_event(sas_phy,
PORTE_BROADCAST_RCVD);
}
@@ -1533,7 +1533,7 @@ static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
struct domain_device *port_dev = sas_port->port_dev;
struct domain_device *device;
- if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type))
+ if (!port_dev || !dev_is_expander(port_dev->dev_type))
continue;
/* Try to find a SATA device */
@@ -1903,7 +1903,7 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
struct domain_device *device = sas_dev->sas_device;
if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
- DEV_IS_EXPANDER(device->dev_type))
+ dev_is_expander(device->dev_type))
continue;
rc = hisi_sas_debug_I_T_nexus_reset(device);
@@ -2475,6 +2475,14 @@ EXPORT_SYMBOL_GPL(hisi_sas_alloc);
void hisi_sas_free(struct hisi_hba *hisi_hba)
{
+ int i;
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[i];
+
+ del_timer_sync(&phy->timer);
+ }
+
if (hisi_hba->wq)
destroy_workqueue(hisi_hba->wq);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index d99086ef6244..e9b15d45f98f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -422,70 +422,70 @@ static const struct hisi_sas_hw_error one_bit_ecc_errors[] = {
.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF),
.msk = HGC_DQE_ECC_1B_ADDR_MSK,
.shift = HGC_DQE_ECC_1B_ADDR_OFF,
- .msg = "hgc_dqe_acc1b_intr found: Ram address is 0x%08X\n",
+ .msg = "hgc_dqe_ecc1b_intr",
.reg = HGC_DQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF),
.msk = HGC_IOST_ECC_1B_ADDR_MSK,
.shift = HGC_IOST_ECC_1B_ADDR_OFF,
- .msg = "hgc_iost_acc1b_intr found: Ram address is 0x%08X\n",
+ .msg = "hgc_iost_ecc1b_intr",
.reg = HGC_IOST_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF),
.msk = HGC_ITCT_ECC_1B_ADDR_MSK,
.shift = HGC_ITCT_ECC_1B_ADDR_OFF,
- .msg = "hgc_itct_acc1b_intr found: am address is 0x%08X\n",
+ .msg = "hgc_itct_ecc1b_intr",
.reg = HGC_ITCT_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF),
.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
- .msg = "hgc_iostl_acc1b_intr found: memory address is 0x%08X\n",
+ .msg = "hgc_iostl_ecc1b_intr",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF),
.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
- .msg = "hgc_itctl_acc1b_intr found: memory address is 0x%08X\n",
+ .msg = "hgc_itctl_ecc1b_intr",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF),
.msk = HGC_CQE_ECC_1B_ADDR_MSK,
.shift = HGC_CQE_ECC_1B_ADDR_OFF,
- .msg = "hgc_cqe_acc1b_intr found: Ram address is 0x%08X\n",
+ .msg = "hgc_cqe_ecc1b_intr",
.reg = HGC_CQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
- .msg = "rxm_mem0_acc1b_intr found: memory address is 0x%08X\n",
+ .msg = "rxm_mem0_ecc1b_intr",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
- .msg = "rxm_mem1_acc1b_intr found: memory address is 0x%08X\n",
+ .msg = "rxm_mem1_ecc1b_intr",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
- .msg = "rxm_mem2_acc1b_intr found: memory address is 0x%08X\n",
+ .msg = "rxm_mem2_ecc1b_intr",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
- .msg = "rxm_mem3_acc1b_intr found: memory address is 0x%08X\n",
+ .msg = "rxm_mem3_ecc1b_intr",
.reg = HGC_RXM_DFX_STATUS15,
},
};
@@ -495,70 +495,70 @@ static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
.msk = HGC_DQE_ECC_MB_ADDR_MSK,
.shift = HGC_DQE_ECC_MB_ADDR_OFF,
- .msg = "hgc_dqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
+ .msg = "hgc_dqe_eccbad_intr",
.reg = HGC_DQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
.msk = HGC_IOST_ECC_MB_ADDR_MSK,
.shift = HGC_IOST_ECC_MB_ADDR_OFF,
- .msg = "hgc_iost_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
+ .msg = "hgc_iost_eccbad_intr",
.reg = HGC_IOST_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
.msk = HGC_ITCT_ECC_MB_ADDR_MSK,
.shift = HGC_ITCT_ECC_MB_ADDR_OFF,
- .msg = "hgc_itct_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
+ .msg = "hgc_itct_eccbad_intr",
.reg = HGC_ITCT_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
- .msg = "hgc_iostl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+ .msg = "hgc_iostl_eccbad_intr",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
- .msg = "hgc_itctl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+ .msg = "hgc_itctl_eccbad_intr",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
.msk = HGC_CQE_ECC_MB_ADDR_MSK,
.shift = HGC_CQE_ECC_MB_ADDR_OFF,
- .msg = "hgc_cqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
+ .msg = "hgc_cqe_eccbad_intr",
.reg = HGC_CQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
- .msg = "rxm_mem0_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+ .msg = "rxm_mem0_eccbad_intr",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
- .msg = "rxm_mem1_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+ .msg = "rxm_mem1_eccbad_intr",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
- .msg = "rxm_mem2_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+ .msg = "rxm_mem2_eccbad_intr",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
- .msg = "rxm_mem3_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+ .msg = "rxm_mem3_eccbad_intr",
.reg = HGC_RXM_DFX_STATUS15,
},
};
@@ -944,7 +944,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
break;
case SAS_SATA_DEV:
case SAS_SATA_PENDING:
- if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ if (parent_dev && dev_is_expander(parent_dev->dev_type))
qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
else
qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
@@ -2526,7 +2526,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
/* create header */
/* dw0 */
dw0 = port->id << CMD_HDR_PORT_OFF;
- if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ if (parent_dev && dev_is_expander(parent_dev->dev_type))
dw0 |= 3 << CMD_HDR_CMD_OFF;
else
dw0 |= 4 << CMD_HDR_CMD_OFF;
@@ -2973,7 +2973,8 @@ one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
val = hisi_sas_read32(hisi_hba, ecc_error->reg);
val &= ecc_error->msk;
val >>= ecc_error->shift;
- dev_warn(dev, ecc_error->msg, val);
+ dev_warn(dev, "%s found: mem addr is 0x%08X\n",
+ ecc_error->msg, val);
}
}
}
@@ -2992,7 +2993,8 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
val = hisi_sas_read32(hisi_hba, ecc_error->reg);
val &= ecc_error->msk;
val >>= ecc_error->shift;
- dev_err(dev, ecc_error->msg, irq_value, val);
+ dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n",
+ ecc_error->msg, irq_value, val);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 0efd55baacd3..5f0f6df11adf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -23,6 +23,7 @@
#define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
#define ITCT_DEV_OFF 0
#define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
+#define SAS_AXI_USER3 0x50
#define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
#define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
#define SATA_INITI_D2H_STORE_ADDR_LO 0x60
@@ -549,6 +550,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* Global registers init */
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
(u32)((1ULL << hisi_hba->queue_count) - 1));
+ hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0);
hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
@@ -752,7 +754,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
break;
case SAS_SATA_DEV:
case SAS_SATA_PENDING:
- if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ if (parent_dev && dev_is_expander(parent_dev->dev_type))
qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
else
qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
@@ -906,8 +908,14 @@ static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
{
u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+ u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
+ static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) |
+ BIT(CHL_INT2_RX_CODE_ERR_OFF) |
+ BIT(CHL_INT2_RX_INVLD_DW_OFF);
u32 state;
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, msk | irq_msk);
+
cfg &= ~PHY_CFG_ENA_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
@@ -918,6 +926,15 @@ static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
cfg |= PHY_CFG_PHY_RST_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
}
+
+ udelay(1);
+
+ hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
+ hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
+ hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, msk);
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, irq_msk);
}
static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1336,10 +1353,10 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
u32 dw1 = 0, dw2 = 0;
hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
- if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ if (parent_dev && dev_is_expander(parent_dev->dev_type))
hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
else
- hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
+ hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF);
switch (task->data_dir) {
case DMA_TO_DEVICE:
@@ -1407,7 +1424,7 @@ static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
/* dw0 */
- hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
+ hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /*abort*/
(port->id << CMD_HDR_PORT_OFF) |
(dev_is_sata(dev)
<< CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
@@ -1826,77 +1843,77 @@ static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
.msk = HGC_DQE_ECC_MB_ADDR_MSK,
.shift = HGC_DQE_ECC_MB_ADDR_OFF,
- .msg = "hgc_dqe_eccbad_intr found: ram addr is 0x%08X\n",
+ .msg = "hgc_dqe_eccbad_intr",
.reg = HGC_DQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
.msk = HGC_IOST_ECC_MB_ADDR_MSK,
.shift = HGC_IOST_ECC_MB_ADDR_OFF,
- .msg = "hgc_iost_eccbad_intr found: ram addr is 0x%08X\n",
+ .msg = "hgc_iost_eccbad_intr",
.reg = HGC_IOST_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
.msk = HGC_ITCT_ECC_MB_ADDR_MSK,
.shift = HGC_ITCT_ECC_MB_ADDR_OFF,
- .msg = "hgc_itct_eccbad_intr found: ram addr is 0x%08X\n",
+ .msg = "hgc_itct_eccbad_intr",
.reg = HGC_ITCT_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
- .msg = "hgc_iostl_eccbad_intr found: mem addr is 0x%08X\n",
+ .msg = "hgc_iostl_eccbad_intr",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
- .msg = "hgc_itctl_eccbad_intr found: mem addr is 0x%08X\n",
+ .msg = "hgc_itctl_eccbad_intr",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
.msk = HGC_CQE_ECC_MB_ADDR_MSK,
.shift = HGC_CQE_ECC_MB_ADDR_OFF,
- .msg = "hgc_cqe_eccbad_intr found: ram address is 0x%08X\n",
+ .msg = "hgc_cqe_eccbad_intr",
.reg = HGC_CQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
- .msg = "rxm_mem0_eccbad_intr found: mem addr is 0x%08X\n",
+ .msg = "rxm_mem0_eccbad_intr",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
- .msg = "rxm_mem1_eccbad_intr found: mem addr is 0x%08X\n",
+ .msg = "rxm_mem1_eccbad_intr",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
- .msg = "rxm_mem2_eccbad_intr found: mem addr is 0x%08X\n",
+ .msg = "rxm_mem2_eccbad_intr",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
- .msg = "rxm_mem3_eccbad_intr found: mem addr is 0x%08X\n",
+ .msg = "rxm_mem3_eccbad_intr",
.reg = HGC_RXM_DFX_STATUS15,
},
{
.irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF),
.msk = AM_ROB_ECC_ERR_ADDR_MSK,
.shift = AM_ROB_ECC_ERR_ADDR_OFF,
- .msg = "ooo_ram_eccbad_intr found: ROB_ECC_ERR_ADDR=0x%08X\n",
+ .msg = "ooo_ram_eccbad_intr",
.reg = AM_ROB_ECC_ERR_ADDR,
},
};
@@ -1915,7 +1932,8 @@ static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba,
val = hisi_sas_read32(hisi_hba, ecc_error->reg);
val &= ecc_error->msk;
val >>= ecc_error->shift;
- dev_err(dev, ecc_error->msg, irq_value, val);
+ dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n",
+ ecc_error->msg, irq_value, val);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8068520cf89e..43a6b5350775 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -60,7 +60,7 @@
* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
-#define HPSA_DRIVER_VERSION "3.4.20-160"
+#define HPSA_DRIVER_VERSION "3.4.20-170"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -73,6 +73,8 @@
/*define how many times we will try a command because of bus resets */
#define MAX_CMD_RETRIES 3
+/* How long to wait before giving up on a command */
+#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
/* Embedded module documentation macros - see modules.h */
MODULE_AUTHOR("Hewlett-Packard Company");
@@ -344,11 +346,6 @@ static inline bool hpsa_is_cmd_idle(struct CommandList *c)
return c->scsi_cmd == SCSI_CMD_IDLE;
}
-static inline bool hpsa_is_pending_event(struct CommandList *c)
-{
- return c->reset_pending;
-}
-
/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
static void decode_sense_data(const u8 *sense_data, int sense_data_len,
u8 *sense_key, u8 *asc, u8 *ascq)
@@ -1144,6 +1141,8 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
{
dial_down_lockup_detection_during_fw_flash(h, c);
atomic_inc(&h->commands_outstanding);
+ if (c->device)
+ atomic_inc(&c->device->commands_outstanding);
reply_queue = h->reply_map[raw_smp_processor_id()];
switch (c->cmd_type) {
@@ -1167,9 +1166,6 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
{
- if (unlikely(hpsa_is_pending_event(c)))
- return finish_cmd(c);
-
__enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
}
@@ -1842,25 +1838,33 @@ static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
return count;
}
+#define NUM_WAIT 20
static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
struct hpsa_scsi_dev_t *device)
{
int cmds = 0;
int waits = 0;
+ int num_wait = NUM_WAIT;
+
+ if (device->external)
+ num_wait = HPSA_EH_PTRAID_TIMEOUT;
while (1) {
cmds = hpsa_find_outstanding_commands_for_dev(h, device);
if (cmds == 0)
break;
- if (++waits > 20)
+ if (++waits > num_wait)
break;
msleep(1000);
}
- if (waits > 20)
+ if (waits > num_wait) {
dev_warn(&h->pdev->dev,
- "%s: removing device with %d outstanding commands!\n",
- __func__, cmds);
+ "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
+ __func__,
+ h->scsi_host->host_no,
+ device->bus, device->target, device->lun, cmds);
+ }
}
static void hpsa_remove_device(struct ctlr_info *h,
@@ -2131,11 +2135,16 @@ static int hpsa_slave_configure(struct scsi_device *sdev)
sdev->no_uld_attach = !sd || !sd->expose_device;
if (sd) {
- if (sd->external)
+ sd->was_removed = 0;
+ if (sd->external) {
queue_depth = EXTERNAL_QD;
- else
+ sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue,
+ HPSA_EH_PTRAID_TIMEOUT);
+ } else {
queue_depth = sd->queue_depth != 0 ?
sd->queue_depth : sdev->host->can_queue;
+ }
} else
queue_depth = sdev->host->can_queue;
@@ -2146,7 +2155,12 @@ static int hpsa_slave_configure(struct scsi_device *sdev)
static void hpsa_slave_destroy(struct scsi_device *sdev)
{
- /* nothing to do. */
+ struct hpsa_scsi_dev_t *hdev = NULL;
+
+ hdev = sdev->hostdata;
+
+ if (hdev)
+ hdev->was_removed = 1;
}
static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
@@ -2414,13 +2428,16 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
break;
}
+ if (dev->in_reset)
+ retry = 0;
+
return retry; /* retry on raid path? */
}
static void hpsa_cmd_resolve_events(struct ctlr_info *h,
struct CommandList *c)
{
- bool do_wake = false;
+ struct hpsa_scsi_dev_t *dev = c->device;
/*
* Reset c->scsi_cmd here so that the reset handler will know
@@ -2429,25 +2446,12 @@ static void hpsa_cmd_resolve_events(struct ctlr_info *h,
*/
c->scsi_cmd = SCSI_CMD_IDLE;
mb(); /* Declare command idle before checking for pending events. */
- if (c->reset_pending) {
- unsigned long flags;
- struct hpsa_scsi_dev_t *dev;
-
- /*
- * There appears to be a reset pending; lock the lock and
- * reconfirm. If so, then decrement the count of outstanding
- * commands and wake the reset command if this is the last one.
- */
- spin_lock_irqsave(&h->lock, flags);
- dev = c->reset_pending; /* Re-fetch under the lock. */
- if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
- do_wake = true;
- c->reset_pending = NULL;
- spin_unlock_irqrestore(&h->lock, flags);
+ if (dev) {
+ atomic_dec(&dev->commands_outstanding);
+ if (dev->in_reset &&
+ atomic_read(&dev->commands_outstanding) <= 0)
+ wake_up_all(&h->event_sync_wait_queue);
}
-
- if (do_wake)
- wake_up_all(&h->event_sync_wait_queue);
}
static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
@@ -2496,6 +2500,11 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
dev->offload_to_be_enabled = 0;
}
+ if (dev->in_reset) {
+ cmd->result = DID_RESET << 16;
+ return hpsa_cmd_free_and_done(h, c, cmd);
+ }
+
return hpsa_retry_cmd(h, c);
}
@@ -2574,6 +2583,12 @@ static void complete_scsi_command(struct CommandList *cp)
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+ /* SCSI command has already been cleaned up in SML */
+ if (dev->was_removed) {
+ hpsa_cmd_resolve_and_free(h, cp);
+ return;
+ }
+
if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
if (dev->physical_device && dev->expose_device &&
dev->removed) {
@@ -2595,10 +2610,6 @@ static void complete_scsi_command(struct CommandList *cp)
return hpsa_cmd_free_and_done(h, cp, cmd);
}
- if ((unlikely(hpsa_is_pending_event(cp))))
- if (cp->reset_pending)
- return hpsa_cmd_free_and_done(h, cp, cmd);
-
if (cp->cmd_type == CMD_IOACCEL2)
return process_ioaccel2_completion(h, cp, cmd, dev);
@@ -3048,7 +3059,7 @@ out:
return rc;
}
-static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
u8 reset_type, int reply_queue)
{
int rc = IO_OK;
@@ -3056,11 +3067,10 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
struct ErrorInfo *ei;
c = cmd_alloc(h);
-
+ c->device = dev;
/* fill_cmd can't fail here, no data buffer to map. */
- (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
- scsi3addr, TYPE_MSG);
+ (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
if (rc) {
dev_warn(&h->pdev->dev, "Failed to send reset command\n");
@@ -3138,9 +3148,8 @@ static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
}
static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
- unsigned char *scsi3addr, u8 reset_type, int reply_queue)
+ u8 reset_type, int reply_queue)
{
- int i;
int rc = 0;
/* We can really only handle one reset at a time */
@@ -3149,38 +3158,14 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
return -EINTR;
}
- BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
-
- for (i = 0; i < h->nr_cmds; i++) {
- struct CommandList *c = h->cmd_pool + i;
- int refcount = atomic_inc_return(&c->refcount);
-
- if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
- unsigned long flags;
-
- /*
- * Mark the target command as having a reset pending,
- * then lock a lock so that the command cannot complete
- * while we're considering it. If the command is not
- * idle then count it; otherwise revoke the event.
- */
- c->reset_pending = dev;
- spin_lock_irqsave(&h->lock, flags); /* Implied MB */
- if (!hpsa_is_cmd_idle(c))
- atomic_inc(&dev->reset_cmds_out);
- else
- c->reset_pending = NULL;
- spin_unlock_irqrestore(&h->lock, flags);
- }
-
- cmd_free(h, c);
- }
-
- rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
- if (!rc)
+ rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
+ if (!rc) {
+ /* incremented by sending the reset request */
+ atomic_dec(&dev->commands_outstanding);
wait_event(h->event_sync_wait_queue,
- atomic_read(&dev->reset_cmds_out) == 0 ||
+ atomic_read(&dev->commands_outstanding) <= 0 ||
lockup_detected(h));
+ }
if (unlikely(lockup_detected(h))) {
dev_warn(&h->pdev->dev,
@@ -3188,10 +3173,8 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
rc = -ENODEV;
}
- if (unlikely(rc))
- atomic_set(&dev->reset_cmds_out, 0);
- else
- rc = wait_for_device_to_become_ready(h, scsi3addr, 0);
+ if (!rc)
+ rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
mutex_unlock(&h->reset_mutex);
return rc;
@@ -4820,6 +4803,9 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
c->phys_disk = dev;
+ if (dev->in_reset)
+ return -1;
+
return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
}
@@ -5010,6 +4996,11 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
} else
cp->sg_count = (u8) use_sg;
+ if (phys_disk->in_reset) {
+ cmd->result = DID_RESET << 16;
+ return -1;
+ }
+
enqueue_cmd_and_start_io(h, c);
return 0;
}
@@ -5027,6 +5018,9 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
if (!c->scsi_cmd->device->hostdata)
return -1;
+ if (phys_disk->in_reset)
+ return -1;
+
/* Try to honor the device's queue depth */
if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
phys_disk->queue_depth) {
@@ -5110,6 +5104,9 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
if (!dev)
return -1;
+ if (dev->in_reset)
+ return -1;
+
/* check for valid opcode, get LBA and block count */
switch (cmd->cmnd[0]) {
case WRITE_6:
@@ -5414,13 +5411,13 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
*/
static int hpsa_ciss_submit(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd,
- unsigned char scsi3addr[])
+ struct hpsa_scsi_dev_t *dev)
{
cmd->host_scribble = (unsigned char *) c;
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
c->Header.ReplyQueue = 0; /* unused in simple mode */
- memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+ memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
/* Fill in the request block... */
@@ -5471,6 +5468,12 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
hpsa_cmd_resolve_and_free(h, c);
return SCSI_MLQUEUE_HOST_BUSY;
}
+
+ if (dev->in_reset) {
+ hpsa_cmd_resolve_and_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
enqueue_cmd_and_start_io(h, c);
/* the cmd'll come back via intr handler in complete_scsi_command() */
return 0;
@@ -5522,8 +5525,7 @@ static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
}
static int hpsa_ioaccel_submit(struct ctlr_info *h,
- struct CommandList *c, struct scsi_cmnd *cmd,
- unsigned char *scsi3addr)
+ struct CommandList *c, struct scsi_cmnd *cmd)
{
struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
int rc = IO_ACCEL_INELIGIBLE;
@@ -5531,6 +5533,12 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
if (!dev)
return SCSI_MLQUEUE_HOST_BUSY;
+ if (dev->in_reset)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (hpsa_simple_mode)
+ return IO_ACCEL_INELIGIBLE;
+
cmd->host_scribble = (unsigned char *) c;
if (dev->offload_enabled) {
@@ -5563,8 +5571,12 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
cmd->result = DID_NO_CONNECT << 16;
return hpsa_cmd_free_and_done(c->h, c, cmd);
}
- if (c->reset_pending)
+
+ if (dev->in_reset) {
+ cmd->result = DID_RESET << 16;
return hpsa_cmd_free_and_done(c->h, c, cmd);
+ }
+
if (c->cmd_type == CMD_IOACCEL2) {
struct ctlr_info *h = c->h;
struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
@@ -5572,7 +5584,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
if (c2->error_data.serv_response ==
IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
- rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
+ rc = hpsa_ioaccel_submit(h, c, cmd);
if (rc == 0)
return;
if (rc == SCSI_MLQUEUE_HOST_BUSY) {
@@ -5588,7 +5600,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
}
}
hpsa_cmd_partial_init(c->h, c->cmdindex, c);
- if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
+ if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
/*
* If we get here, it means dma mapping failed. Try
* again via scsi mid layer, which will then get
@@ -5607,7 +5619,6 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
- unsigned char scsi3addr[8];
struct CommandList *c;
int rc = 0;
@@ -5629,14 +5640,18 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
return 0;
}
- memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
-
if (unlikely(lockup_detected(h))) {
cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
}
+
+ if (dev->in_reset)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
c = cmd_tagged_alloc(h, cmd);
+ if (c == NULL)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
/*
* Call alternate submit routine for I/O accelerated commands.
@@ -5645,7 +5660,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
if (likely(cmd->retries == 0 &&
!blk_rq_is_passthrough(cmd->request) &&
h->acciopath_status)) {
- rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
+ rc = hpsa_ioaccel_submit(h, c, cmd);
if (rc == 0)
return 0;
if (rc == SCSI_MLQUEUE_HOST_BUSY) {
@@ -5653,7 +5668,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
return SCSI_MLQUEUE_HOST_BUSY;
}
}
- return hpsa_ciss_submit(h, c, cmd, scsi3addr);
+ return hpsa_ciss_submit(h, c, cmd, dev);
}
static void hpsa_scan_complete(struct ctlr_info *h)
@@ -5935,8 +5950,9 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
{
int rc = SUCCESS;
+ int i;
struct ctlr_info *h;
- struct hpsa_scsi_dev_t *dev;
+ struct hpsa_scsi_dev_t *dev = NULL;
u8 reset_type;
char msg[48];
unsigned long flags;
@@ -6002,9 +6018,19 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
+ /*
+ * wait to see if any commands will complete before sending reset
+ */
+ dev->in_reset = true; /* block any new cmds from OS for this device */
+ for (i = 0; i < 10; i++) {
+ if (atomic_read(&dev->commands_outstanding) > 0)
+ msleep(1000);
+ else
+ break;
+ }
+
/* send a reset to the SCSI LUN which the command was sent to */
- rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
- DEFAULT_REPLY_QUEUE);
+ rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
if (rc == 0)
rc = SUCCESS;
else
@@ -6018,6 +6044,8 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
return_reset_status:
spin_lock_irqsave(&h->reset_lock, flags);
h->reset_in_progress = 0;
+ if (dev)
+ dev->in_reset = false;
spin_unlock_irqrestore(&h->reset_lock, flags);
return rc;
}
@@ -6043,7 +6071,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
BUG();
}
- atomic_inc(&c->refcount);
if (unlikely(!hpsa_is_cmd_idle(c))) {
/*
* We expect that the SCSI layer will hand us a unique tag
@@ -6051,14 +6078,20 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
* two requests...because if the selected command isn't idle
* then someone is going to be very disappointed.
*/
- dev_err(&h->pdev->dev,
- "tag collision (tag=%d) in cmd_tagged_alloc().\n",
- idx);
- if (c->scsi_cmd != NULL)
- scsi_print_command(c->scsi_cmd);
- scsi_print_command(scmd);
+ if (idx != h->last_collision_tag) { /* Print once per tag */
+ dev_warn(&h->pdev->dev,
+ "%s: tag collision (tag=%d)\n", __func__, idx);
+ if (c->scsi_cmd != NULL)
+ scsi_print_command(c->scsi_cmd);
+ if (scmd)
+ scsi_print_command(scmd);
+ h->last_collision_tag = idx;
+ }
+ return NULL;
}
+ atomic_inc(&c->refcount);
+
hpsa_cmd_partial_init(h, idx, c);
return c;
}
@@ -6126,6 +6159,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
break; /* it's ours now. */
}
hpsa_cmd_partial_init(h, i, c);
+ c->device = NULL;
return c;
}
@@ -6579,8 +6613,7 @@ static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
}
}
-static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
- u8 reset_type)
+static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
{
struct CommandList *c;
@@ -7765,7 +7798,7 @@ static void hpsa_free_pci_init(struct ctlr_info *h)
hpsa_disable_interrupt_mode(h); /* pci_init 2 */
/*
* call pci_disable_device before pci_release_regions per
- * Documentation/PCI/pci.txt
+ * Documentation/PCI/pci.rst
*/
pci_disable_device(h->pdev); /* pci_init 1 */
pci_release_regions(h->pdev); /* pci_init 2 */
@@ -7848,7 +7881,7 @@ clean2: /* intmode+region, pci */
clean1:
/*
* call pci_disable_device before pci_release_regions per
- * Documentation/PCI/pci.txt
+ * Documentation/PCI/pci.rst
*/
pci_disable_device(h->pdev);
pci_release_regions(h->pdev);
@@ -7983,10 +8016,15 @@ clean_up:
static void hpsa_free_irqs(struct ctlr_info *h)
{
int i;
+ int irq_vector = 0;
+
+ if (hpsa_simple_mode)
+ irq_vector = h->intr_mode;
if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
/* Single reply queue, only one irq to free */
- free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
+ free_irq(pci_irq_vector(h->pdev, irq_vector),
+ &h->q[h->intr_mode]);
h->q[h->intr_mode] = 0;
return;
}
@@ -8005,6 +8043,10 @@ static int hpsa_request_irqs(struct ctlr_info *h,
irqreturn_t (*intxhandler)(int, void *))
{
int rc, i;
+ int irq_vector = 0;
+
+ if (hpsa_simple_mode)
+ irq_vector = h->intr_mode;
/*
* initialize h->q[x] = x so that interrupt handlers know which
@@ -8040,14 +8082,14 @@ static int hpsa_request_irqs(struct ctlr_info *h,
if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
sprintf(h->intrname[0], "%s-msi%s", h->devname,
h->msix_vectors ? "x" : "");
- rc = request_irq(pci_irq_vector(h->pdev, 0),
+ rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
msixhandler, 0,
h->intrname[0],
&h->q[h->intr_mode]);
} else {
sprintf(h->intrname[h->intr_mode],
"%s-intx", h->devname);
- rc = request_irq(pci_irq_vector(h->pdev, 0),
+ rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
intxhandler, IRQF_SHARED,
h->intrname[0],
&h->q[h->intr_mode]);
@@ -8055,7 +8097,7 @@ static int hpsa_request_irqs(struct ctlr_info *h,
}
if (rc) {
dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
- pci_irq_vector(h->pdev, 0), h->devname);
+ pci_irq_vector(h->pdev, irq_vector), h->devname);
hpsa_free_irqs(h);
return -ENODEV;
}
@@ -8065,7 +8107,7 @@ static int hpsa_request_irqs(struct ctlr_info *h,
static int hpsa_kdump_soft_reset(struct ctlr_info *h)
{
int rc;
- hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
+ hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
@@ -8121,6 +8163,11 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
destroy_workqueue(h->rescan_ctlr_wq);
h->rescan_ctlr_wq = NULL;
}
+ if (h->monitor_ctlr_wq) {
+ destroy_workqueue(h->monitor_ctlr_wq);
+ h->monitor_ctlr_wq = NULL;
+ }
+
kfree(h); /* init_one 1 */
}
@@ -8456,8 +8503,8 @@ static void hpsa_event_monitor_worker(struct work_struct *work)
spin_lock_irqsave(&h->lock, flags);
if (!h->remove_in_progress)
- schedule_delayed_work(&h->event_monitor_work,
- HPSA_EVENT_MONITOR_INTERVAL);
+ queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
+ HPSA_EVENT_MONITOR_INTERVAL);
spin_unlock_irqrestore(&h->lock, flags);
}
@@ -8502,7 +8549,7 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
spin_lock_irqsave(&h->lock, flags);
if (!h->remove_in_progress)
- schedule_delayed_work(&h->monitor_ctlr_work,
+ queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
h->heartbeat_sample_interval);
spin_unlock_irqrestore(&h->lock, flags);
}
@@ -8670,6 +8717,12 @@ reinit_after_soft_reset:
goto clean7; /* aer/h */
}
+ h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
+ if (!h->monitor_ctlr_wq) {
+ rc = -ENOMEM;
+ goto clean7;
+ }
+
/*
* At this point, the controller is ready to take commands.
* Now, if reset_devices and the hard reset didn't work, try
@@ -8799,6 +8852,10 @@ clean1: /* wq/aer/h */
destroy_workqueue(h->rescan_ctlr_wq);
h->rescan_ctlr_wq = NULL;
}
+ if (h->monitor_ctlr_wq) {
+ destroy_workqueue(h->monitor_ctlr_wq);
+ h->monitor_ctlr_wq = NULL;
+ }
kfree(h);
return rc;
}
@@ -8946,6 +9003,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
cancel_delayed_work_sync(&h->event_monitor_work);
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
+ destroy_workqueue(h->monitor_ctlr_wq);
hpsa_delete_sas_host(h);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 59e023696fff..f8c88fc7b80a 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -65,6 +65,7 @@ struct hpsa_scsi_dev_t {
u8 physical_device : 1;
u8 expose_device;
u8 removed : 1; /* device is marked for death */
+ u8 was_removed : 1; /* device actually removed */
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
unsigned char device_id[16]; /* from inquiry pg. 0x83 */
u64 sas_address;
@@ -75,11 +76,12 @@ struct hpsa_scsi_dev_t {
unsigned char raid_level; /* from inquiry page 0xC1 */
unsigned char volume_offline; /* discovered via TUR or VPD */
u16 queue_depth; /* max queue_depth for this device */
- atomic_t reset_cmds_out; /* Count of commands to-be affected */
+ atomic_t commands_outstanding; /* track commands sent to device */
atomic_t ioaccel_cmds_out; /* Only used for physical devices
* counts commands sent to physical
* device via "ioaccel" path.
*/
+ bool in_reset;
u32 ioaccel_handle;
u8 active_path_index;
u8 path_map;
@@ -174,6 +176,7 @@ struct ctlr_info {
struct CfgTable __iomem *cfgtable;
int interrupts_enabled;
int max_commands;
+ int last_collision_tag; /* tags are global */
atomic_t commands_outstanding;
# define PERF_MODE_INT 0
# define DOORBELL_INT 1
@@ -300,6 +303,7 @@ struct ctlr_info {
int needs_abort_tags_swizzled;
struct workqueue_struct *resubmit_wq;
struct workqueue_struct *rescan_ctlr_wq;
+ struct workqueue_struct *monitor_ctlr_wq;
atomic_t abort_cmds_available;
wait_queue_head_t event_sync_wait_queue;
struct mutex reset_mutex;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index f6afca4b2319..7825cbfea4dc 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -448,7 +448,7 @@ struct CommandList {
struct hpsa_scsi_dev_t *phys_disk;
int abort_pending;
- struct hpsa_scsi_dev_t *reset_pending;
+ struct hpsa_scsi_dev_t *device;
atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */
} __aligned(COMMANDLIST_ALIGNMENT);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 4aea97ee4b24..7f66a7783209 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -814,7 +814,7 @@ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
atomic_set(&hostdata->request_limit, 0);
purge_requests(hostdata, DID_ERROR);
- hostdata->reset_crq = 1;
+ hostdata->action = IBMVSCSI_HOST_ACTION_RESET;
wake_up(&hostdata->work_wait_q);
}
@@ -1165,7 +1165,8 @@ static void login_rsp(struct srp_event_struct *evt_struct)
be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
/* If we had any pending I/Os, kick them */
- scsi_unblock_requests(hostdata->host);
+ hostdata->action = IBMVSCSI_HOST_ACTION_UNBLOCK;
+ wake_up(&hostdata->work_wait_q);
}
/**
@@ -1783,7 +1784,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
/* We need to re-setup the interpartition connection */
dev_info(hostdata->dev, "Re-enabling adapter!\n");
hostdata->client_migrated = 1;
- hostdata->reenable_crq = 1;
+ hostdata->action = IBMVSCSI_HOST_ACTION_REENABLE;
purge_requests(hostdata, DID_REQUEUE);
wake_up(&hostdata->work_wait_q);
} else {
@@ -2036,6 +2037,16 @@ static struct device_attribute ibmvscsi_host_config = {
.show = show_host_config,
};
+static int ibmvscsi_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+
+ dev_info(hostdata->dev, "Initiating adapter reset!\n");
+ ibmvscsi_reset_host(hostdata);
+
+ return 0;
+}
+
static struct device_attribute *ibmvscsi_attrs[] = {
&ibmvscsi_host_vhost_loc,
&ibmvscsi_host_vhost_name,
@@ -2062,6 +2073,7 @@ static struct scsi_host_template driver_template = {
.eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
.slave_configure = ibmvscsi_slave_configure,
.change_queue_depth = ibmvscsi_change_queue_depth,
+ .host_reset = ibmvscsi_host_reset,
.cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
.this_id = -1,
@@ -2091,48 +2103,75 @@ static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
{
+ unsigned long flags;
int rc;
char *action = "reset";
- if (hostdata->reset_crq) {
- smp_rmb();
- hostdata->reset_crq = 0;
-
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ switch (hostdata->action) {
+ case IBMVSCSI_HOST_ACTION_UNBLOCK:
+ rc = 0;
+ break;
+ case IBMVSCSI_HOST_ACTION_RESET:
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
if (!rc)
rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
vio_enable_interrupts(to_vio_dev(hostdata->dev));
- } else if (hostdata->reenable_crq) {
- smp_rmb();
+ break;
+ case IBMVSCSI_HOST_ACTION_REENABLE:
action = "enable";
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
- hostdata->reenable_crq = 0;
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
if (!rc)
rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
- } else
+ break;
+ case IBMVSCSI_HOST_ACTION_NONE:
+ default:
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
return;
+ }
+
+ hostdata->action = IBMVSCSI_HOST_ACTION_NONE;
if (rc) {
atomic_set(&hostdata->request_limit, -1);
dev_err(hostdata->dev, "error after %s\n", action);
}
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
scsi_unblock_requests(hostdata->host);
}
-static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
+static int __ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
{
if (kthread_should_stop())
return 1;
- else if (hostdata->reset_crq) {
- smp_rmb();
- return 1;
- } else if (hostdata->reenable_crq) {
- smp_rmb();
- return 1;
+ switch (hostdata->action) {
+ case IBMVSCSI_HOST_ACTION_NONE:
+ return 0;
+ case IBMVSCSI_HOST_ACTION_RESET:
+ case IBMVSCSI_HOST_ACTION_REENABLE:
+ case IBMVSCSI_HOST_ACTION_UNBLOCK:
+ default:
+ break;
}
- return 0;
+ return 1;
+}
+
+static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ rc = __ibmvscsi_work_to_do(hostdata);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
+ return rc;
}
static int ibmvscsi_work(void *data)
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 6ebd1410488d..e60916ef7a49 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -74,13 +74,19 @@ struct event_pool {
dma_addr_t iu_token;
};
+enum ibmvscsi_host_action {
+ IBMVSCSI_HOST_ACTION_NONE = 0,
+ IBMVSCSI_HOST_ACTION_RESET,
+ IBMVSCSI_HOST_ACTION_REENABLE,
+ IBMVSCSI_HOST_ACTION_UNBLOCK,
+};
+
/* all driver data associated with a host adapter */
struct ibmvscsi_host_data {
struct list_head host_list;
atomic_t request_limit;
int client_migrated;
- int reset_crq;
- int reenable_crq;
+ enum ibmvscsi_host_action action;
struct device *dev;
struct event_pool pool;
struct crq_queue queue;
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 9751309f8b8c..2519fb7aee51 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -687,7 +687,7 @@ static int imm_completion(struct scsi_cmnd *cmd)
if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
/* if scatter/gather, advance to the next segment */
if (cmd->SCp.buffers_residual--) {
- cmd->SCp.buffer++;
+ cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
cmd->SCp.this_residual =
cmd->SCp.buffer->length;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d06bc1a817a1..079c04bc448a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3901,22 +3901,23 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
u8 *buffer, u32 len)
{
int bsize_elem, i, result = 0;
- struct scatterlist *scatterlist;
+ struct scatterlist *sg;
void *kaddr;
/* Determine the actual number of bytes per element */
bsize_elem = PAGE_SIZE * (1 << sglist->order);
- scatterlist = sglist->scatterlist;
+ sg = sglist->scatterlist;
- for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
- struct page *page = sg_page(&scatterlist[i]);
+ for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
+ buffer += bsize_elem) {
+ struct page *page = sg_page(sg);
kaddr = kmap(page);
memcpy(kaddr, buffer, bsize_elem);
kunmap(page);
- scatterlist[i].length = bsize_elem;
+ sg->length = bsize_elem;
if (result != 0) {
ipr_trace;
@@ -3925,13 +3926,13 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
}
if (len % bsize_elem) {
- struct page *page = sg_page(&scatterlist[i]);
+ struct page *page = sg_page(sg);
kaddr = kmap(page);
memcpy(kaddr, buffer, len % bsize_elem);
kunmap(page);
- scatterlist[i].length = len % bsize_elem;
+ sg->length = len % bsize_elem;
}
sglist->buffer_len = len;
@@ -3952,6 +3953,7 @@ static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
struct scatterlist *scatterlist = sglist->scatterlist;
+ struct scatterlist *sg;
int i;
ipr_cmd->dma_use_sg = sglist->num_dma_sg;
@@ -3960,10 +3962,10 @@ static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
ioarcb->ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
- for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+ for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
- ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
- ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
+ ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
+ ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
}
ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
@@ -3983,6 +3985,7 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
struct scatterlist *scatterlist = sglist->scatterlist;
+ struct scatterlist *sg;
int i;
ipr_cmd->dma_use_sg = sglist->num_dma_sg;
@@ -3992,11 +3995,11 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
ioarcb->ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+ for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
ioadl[i].flags_and_data_len =
- cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
+ cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
ioadl[i].address =
- cpu_to_be32(sg_dma_address(&scatterlist[i]));
+ cpu_to_be32(sg_dma_address(sg));
}
ioadl[i-1].flags_and_data_len |=
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 9d29edb9f590..49aa4e657c44 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1087,7 +1087,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s
if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
- } else if (dev_is_expander(dev)) {
+ } else if (dev_is_expander(dev->dev_type)) {
sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
} else
isci_remote_device_ready(ihost, idev);
@@ -1478,7 +1478,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
struct domain_device *dev = idev->domain_dev;
enum sci_status status;
- if (dev->parent && dev_is_expander(dev->parent))
+ if (dev->parent && dev_is_expander(dev->parent->dev_type))
status = sci_remote_device_ea_construct(iport, idev);
else
status = sci_remote_device_da_construct(iport, idev);
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 47a013fffae7..3ad681c4c20a 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -295,11 +295,6 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte
return idev;
}
-static inline bool dev_is_expander(struct domain_device *dev)
-{
- return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
-}
-
static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
{
/* XXX delete this voodoo when converting to the top-level device
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 1b18cf55167e..343d24c7e788 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -224,7 +224,7 @@ static void scu_ssp_request_construct_task_context(
idev = ireq->target_device;
iport = idev->owning_port;
- /* Fill in the TC with the its required data */
+ /* Fill in the TC with its required data */
task_context->abort = 0;
task_context->priority = 0;
task_context->initiator_request = 1;
@@ -506,7 +506,7 @@ static void scu_sata_request_construct_task_context(
idev = ireq->target_device;
iport = idev->owning_port;
- /* Fill in the TC with the its required data */
+ /* Fill in the TC with its required data */
task_context->abort = 0;
task_context->priority = SCU_TASK_PRIORITY_NORMAL;
task_context->initiator_request = 1;
@@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost,
/* pass */;
else if (dev_is_sata(dev))
memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
- else if (dev_is_expander(dev))
+ else if (dev_is_expander(dev->dev_type))
/* pass */;
else
return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
@@ -3235,7 +3235,7 @@ sci_io_request_construct_smp(struct device *dev,
iport = idev->owning_port;
/*
- * Fill in the TC with the its required data
+ * Fill in the TC with its required data
* 00h
*/
task_context->priority = 0;
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index fb6eba331ac6..26fa1a4d1e6b 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -511,7 +511,7 @@ int isci_task_abort_task(struct sas_task *task)
"%s: dev = %p (%s%s), task = %p, old_request == %p\n",
__func__, idev,
(dev_is_sata(task->dev) ? "STP/SATA"
- : ((dev_is_expander(task->dev))
+ : ((dev_is_expander(task->dev->dev_type))
? "SMP"
: "SSP")),
((idev) ? ((test_bit(IDEV_GONE, &idev->flags))
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 719e57685dd5..6ef93c7af954 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -8,8 +8,6 @@
* Copyright (C) 2006 Red Hat, Inc. All rights reserved.
* maintained by open-iscsi@googlegroups.com
*
- * See the file COPYING included with this distribution for more details.
- *
* Credits:
* Christoph Hellwig
* FUJITA Tomonori
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 726ada9b8c79..abcad097ff2f 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -1,25 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial Attached SCSI (SAS) Discover process
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/scatterlist.h>
@@ -309,7 +293,7 @@ void sas_free_device(struct kref *kref)
dev->phy = NULL;
/* remove the phys and ports, everything else should be gone */
- if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ if (dev_is_expander(dev->dev_type))
kfree(dev->ex_dev.ex_phy);
if (dev_is_sata(dev) && dev->sata_dev.ap) {
@@ -519,8 +503,7 @@ static void sas_revalidate_domain(struct work_struct *work)
pr_debug("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
task_pid_nr(current));
- if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
- ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
+ if (ddev && dev_is_expander(ddev->dev_type))
res = sas_ex_revalidate_domain(ddev);
pr_debug("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index b1e0f7d2b396..a1852f6c042b 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -1,25 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial Attached SCSI (SAS) Event processing
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/export.h>
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 9f7e2457360e..9fdb9c9fbda4 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial Attached SCSI (SAS) Expander discovery and configuration
*
@@ -5,21 +6,6 @@
* Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
*
* This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/scatterlist.h>
@@ -1106,7 +1092,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
SAS_ADDR(dev->sas_addr),
phy_id);
sas_ex_disable_phy(dev, phy_id);
- break;
+ return res;
} else
memcpy(dev->port->disc.fanout_sas_addr,
ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
@@ -1118,27 +1104,9 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
break;
}
- if (child) {
- int i;
-
- for (i = 0; i < ex->num_phys; i++) {
- if (ex->ex_phy[i].phy_state == PHY_VACANT ||
- ex->ex_phy[i].phy_state == PHY_NOT_PRESENT)
- continue;
- /*
- * Due to races, the phy might not get added to the
- * wide port, so we add the phy to the wide port here.
- */
- if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
- SAS_ADDR(child->sas_addr)) {
- ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
- if (sas_ex_join_wide_port(dev, i))
- pr_debug("Attaching ex phy%02d to wide port %016llx\n",
- i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
- }
- }
- }
-
+ if (!child)
+ pr_notice("ex %016llx phy%02d failed to discover\n",
+ SAS_ADDR(dev->sas_addr), phy_id);
return res;
}
@@ -1154,8 +1122,7 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
phy->phy_state == PHY_NOT_PRESENT)
continue;
- if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE ||
- phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
+ if (dev_is_expander(phy->attached_dev_type) &&
phy->routing_attr == SUBTRACTIVE_ROUTING) {
memcpy(sub_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
@@ -1173,8 +1140,7 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
u8 sub_addr[SAS_ADDR_SIZE] = {0, };
list_for_each_entry(child, &ex->children, siblings) {
- if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
- child->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
+ if (!dev_is_expander(child->dev_type))
continue;
if (sub_addr[0] == 0) {
sas_find_sub_addr(child, sub_addr);
@@ -1259,8 +1225,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
phy->phy_state == PHY_NOT_PRESENT)
continue;
- if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
- phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) &&
+ if (dev_is_expander(phy->attached_dev_type) &&
phy->routing_attr == SUBTRACTIVE_ROUTING) {
if (!sub_sas_addr)
@@ -1356,8 +1321,7 @@ static int sas_check_parent_topology(struct domain_device *child)
if (!child->parent)
return 0;
- if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
- child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
+ if (!dev_is_expander(child->parent->dev_type))
return 0;
parent_ex = &child->parent->ex_dev;
@@ -1653,8 +1617,7 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
struct domain_device *dev;
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
- if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
- dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ if (dev_is_expander(dev->dev_type)) {
struct sas_expander_device *ex =
rphy_to_expander_device(dev->rphy);
@@ -1886,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
SAS_ADDR(dev->sas_addr));
}
list_for_each_entry(ch, &ex->children, siblings) {
- if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ if (dev_is_expander(ch->dev_type)) {
res = sas_find_bcast_dev(ch, src_dev);
if (*src_dev)
return res;
@@ -1903,8 +1866,7 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
list_for_each_entry_safe(child, n, &ex->children, siblings) {
set_bit(SAS_DEV_GONE, &child->state);
- if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
- child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ if (dev_is_expander(child->dev_type))
sas_unregister_ex_tree(port, child);
else
sas_unregister_dev(port, child);
@@ -1924,8 +1886,7 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
if (SAS_ADDR(child->sas_addr) ==
SAS_ADDR(phy->attached_sas_addr)) {
set_bit(SAS_DEV_GONE, &child->state);
- if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
- child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ if (dev_is_expander(child->dev_type))
sas_unregister_ex_tree(parent->port, child);
else
sas_unregister_dev(parent->port, child);
@@ -1954,8 +1915,7 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root,
int res = 0;
list_for_each_entry(child, &ex_root->children, siblings) {
- if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
- child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ if (dev_is_expander(child->dev_type)) {
struct sas_expander_device *ex =
rphy_to_expander_device(child->rphy);
@@ -2008,8 +1968,7 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
list_for_each_entry(child, &dev->ex_dev.children, siblings) {
if (SAS_ADDR(child->sas_addr) ==
SAS_ADDR(ex_phy->attached_sas_addr)) {
- if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
- child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+ if (dev_is_expander(child->dev_type))
res = sas_discover_bfs_by_root(child);
break;
}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index d50810da53a9..21c43b18d5d5 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Serial Attached SCSI (SAS) Transport Layer initialization
*
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 1f1e07e98477..01f1738ce6df 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Serial Attached SCSI (SAS) class internal header file
*
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index b71f5ac6c7dc..4ca4b1f30bd0 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -1,25 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial Attached SCSI (SAS) Phy class
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include "sas_internal.h"
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 38a10478605c..7c86fd248129 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -1,25 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial Attached SCSI (SAS) Port class
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include "sas_internal.h"
@@ -70,7 +54,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
continue;
}
- if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ if (dev_is_expander(dev->dev_type)) {
dev->ex_dev.ex_change_count = -1;
for (i = 0; i < dev->ex_dev.num_phys; i++) {
struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
@@ -195,7 +179,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
/* Only insert a revalidate event after initial discovery */
- if (port_dev && sas_dev_type_is_expander(port_dev->dev_type)) {
+ if (port_dev && dev_is_expander(port_dev->dev_type)) {
struct expander_device *ex_dev = &port_dev->ex_dev;
ex_dev->ex_change_count = -1;
@@ -264,7 +248,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
/* Only insert revalidate event if the port still has members */
- if (port->port && dev && sas_dev_type_is_expander(dev->dev_type)) {
+ if (port->port && dev && dev_is_expander(dev->dev_type)) {
struct expander_device *ex_dev = &dev->ex_dev;
ex_dev->ex_change_count = -1;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index ede0674d8399..4f339f939a51 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Serial Attached SCSI (SAS) class SCSI Host glue.
*
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index aafcffaa25f7..2c3bb8a966e5 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -274,6 +274,7 @@ struct lpfc_stats {
uint32_t elsXmitADISC;
uint32_t elsXmitLOGO;
uint32_t elsXmitSCR;
+ uint32_t elsXmitRSCN;
uint32_t elsXmitRNID;
uint32_t elsXmitFARP;
uint32_t elsXmitFARPR;
@@ -819,6 +820,7 @@ struct lpfc_hba {
uint32_t cfg_use_msi;
uint32_t cfg_auto_imax;
uint32_t cfg_fcp_imax;
+ uint32_t cfg_force_rscn;
uint32_t cfg_cq_poll_threshold;
uint32_t cfg_cq_max_proc_limit;
uint32_t cfg_fcp_cpu_map;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d4c65e2109e2..ea62322ffe2b 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -4097,9 +4097,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
}
if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
- val != FLAGS_TOPOLOGY_MODE_PT_PT) {
+ val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "3114 Only non-FC-AL mode is supported\n");
+ "3114 Loop mode not supported\n");
return -EINVAL;
}
phba->cfg_topology = val;
@@ -4959,6 +4959,64 @@ static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
lpfc_request_firmware_upgrade_store);
/**
+ * lpfc_force_rscn_store
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: unused string
+ * @count: unused variable.
+ *
+ * Description:
+ * Force the switch to send a RSCN to all other NPorts in our zone
+ * If we are direct connect pt2pt, build the RSCN command ourself
+ * and send to the other NPort. Not supported for private loop.
+ *
+ * Returns:
+ * 0 - on success
+ * -EIO - if command is not sent
+ **/
+static ssize_t
+lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ int i;
+
+ i = lpfc_issue_els_rscn(vport, 0);
+ if (i)
+ return -EIO;
+ return strlen(buf);
+}
+
+/*
+ * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts
+ * connected to the HBA.
+ *
+ * Value range is any ascii value
+ */
+static int lpfc_force_rscn;
+module_param(lpfc_force_rscn, int, 0644);
+MODULE_PARM_DESC(lpfc_force_rscn,
+ "Force an RSCN to be sent to all remote NPorts");
+lpfc_param_show(force_rscn)
+
+/**
+ * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts
+ * @phba: lpfc_hba pointer.
+ * @val: unused value.
+ *
+ * Returns:
+ * zero if val saved.
+ **/
+static int
+lpfc_force_rscn_init(struct lpfc_hba *phba, int val)
+{
+ return 0;
+}
+static DEVICE_ATTR_RW(lpfc_force_rscn);
+
+/**
* lpfc_fcp_imax_store
*
* @dev: class device that is converted into a Scsi_host.
@@ -5122,7 +5180,8 @@ lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
/* set the values on the cq's */
for (i = 0; i < phba->cfg_irq_chann; i++) {
- eq = phba->sli4_hba.hdwq[i].hba_eq;
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
if (!eq)
continue;
@@ -5243,35 +5302,44 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
len += scnprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d hdwq None "
- "physid %d coreid %d ht %d\n",
+ "physid %d coreid %d ht %d ua %d\n",
phba->sli4_hba.curr_disp_cpu,
- cpup->phys_id,
- cpup->core_id, cpup->hyper);
+ cpup->phys_id, cpup->core_id,
+ (cpup->flag & LPFC_CPU_MAP_HYPER),
+ (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d EQ %04d hdwq %04d "
- "physid %d coreid %d ht %d\n",
+ "physid %d coreid %d ht %d ua %d\n",
phba->sli4_hba.curr_disp_cpu,
cpup->eq, cpup->hdwq, cpup->phys_id,
- cpup->core_id, cpup->hyper);
+ cpup->core_id,
+ (cpup->flag & LPFC_CPU_MAP_HYPER),
+ (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
} else {
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
len += scnprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d hdwq None "
- "physid %d coreid %d ht %d IRQ %d\n",
+ "physid %d coreid %d ht %d ua %d IRQ %d\n",
phba->sli4_hba.curr_disp_cpu,
cpup->phys_id,
- cpup->core_id, cpup->hyper, cpup->irq);
+ cpup->core_id,
+ (cpup->flag & LPFC_CPU_MAP_HYPER),
+ (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
+ cpup->irq);
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
"CPU %02d EQ %04d hdwq %04d "
- "physid %d coreid %d ht %d IRQ %d\n",
+ "physid %d coreid %d ht %d ua %d IRQ %d\n",
phba->sli4_hba.curr_disp_cpu,
cpup->eq, cpup->hdwq, cpup->phys_id,
- cpup->core_id, cpup->hyper, cpup->irq);
+ cpup->core_id,
+ (cpup->flag & LPFC_CPU_MAP_HYPER),
+ (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
+ cpup->irq);
}
phba->sli4_hba.curr_disp_cpu++;
@@ -5958,6 +6026,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_nvme_oas,
&dev_attr_lpfc_nvme_embed_cmd,
&dev_attr_lpfc_fcp_imax,
+ &dev_attr_lpfc_force_rscn,
&dev_attr_lpfc_cq_poll_threshold,
&dev_attr_lpfc_cq_max_proc_limit,
&dev_attr_lpfc_fcp_cpu_map,
@@ -7005,6 +7074,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+ lpfc_force_rscn_init(phba, lpfc_force_rscn);
lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index b0202bc0aa62..b7216d694bff 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -5741,7 +5741,7 @@ lpfc_get_trunk_info(struct bsg_job *job)
event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
event_reply->logical_speed =
- phba->sli4_hba.link_state.logical_speed / 100;
+ phba->sli4_hba.link_state.logical_speed / 1000;
job_error:
bsg_reply->result = rc;
bsg_job_done(job, bsg_reply->result,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index e0b14d791b8c..68e9f96242d3 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -141,6 +141,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry);
int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
@@ -355,6 +356,7 @@ void lpfc_mbox_timeout_handler(struct lpfc_hba *);
struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
struct lpfc_name *);
+struct lpfc_nodelist *lpfc_findnode_mapped(struct lpfc_vport *vport);
int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@@ -555,6 +557,8 @@ void lpfc_ras_stop_fwlog(struct lpfc_hba *phba);
int lpfc_check_fwlog_support(struct lpfc_hba *phba);
/* NVME interfaces. */
+void lpfc_nvme_rescan_port(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
int lpfc_nvme_register_port(struct lpfc_vport *vport,
@@ -568,7 +572,8 @@ void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba);
void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb);
void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx,
- struct rqb_dmabuf *nvmebuf, uint64_t isr_ts);
+ struct rqb_dmabuf *nvmebuf, uint64_t isr_ts,
+ uint8_t cqflag);
void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 4812bbbf43cc..ec72c39997d2 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -2358,6 +2358,7 @@ static int
lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_def *ad)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
@@ -2366,9 +2367,13 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
- if (vport->nvmei_support || vport->phba->nvmet_support)
- ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+
+ /* Check to see if Firmware supports NVME and on physical port */
+ if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) &&
+ phba->sli4_hba.pc_sli4_params.nvme)
+ ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
+
size = FOURBYTES + 32;
ad->AttrLen = cpu_to_be16(size);
ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
@@ -2680,9 +2685,12 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+ ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+
+ /* Check to see if NVME is configured or not */
if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
- ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+
size = FOURBYTES + 32;
ad->AttrLen = cpu_to_be16(size);
ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 5ac4f8d76b91..f12780f4cfbb 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -30,6 +30,8 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@@ -3079,6 +3081,116 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
}
/**
+ * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric)
+ * or the other nport (pt2pt).
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD)
+ * when connected to a fabric, or to the remote port when connected
+ * in point-to-point mode. When sent to the Fabric Controller, it will
+ * replay the RSCN to registered recipients.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RSCN ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued RSCN command
+ * 1 - Failed to issue RSCN command
+ **/
+int
+lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_nodelist *ndlp;
+ struct {
+ struct fc_els_rscn rscn;
+ struct fc_els_rscn_page portid;
+ } *event;
+ uint32_t nportid;
+ uint16_t cmdsize = sizeof(*event);
+
+ /* Not supported for private loop */
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
+ !(vport->fc_flag & FC_PUBLIC_LOOP))
+ return 1;
+
+ if (vport->fc_flag & FC_PT2PT) {
+ /* find any mapped nport - that would be the other nport */
+ ndlp = lpfc_findnode_mapped(vport);
+ if (!ndlp)
+ return 1;
+ } else {
+ nportid = FC_FID_FCTRL;
+ /* find the fabric controller node */
+ ndlp = lpfc_findnode_did(vport, nportid);
+ if (!ndlp) {
+ /* if one didn't exist, make one */
+ ndlp = lpfc_nlp_init(vport, nportid);
+ if (!ndlp)
+ return 1;
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 1;
+ }
+ }
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
+
+ if (!elsiocb) {
+ /* This will trigger the release of the node just
+ * allocated
+ */
+ lpfc_nlp_put(ndlp);
+ return 1;
+ }
+
+ event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+
+ event->rscn.rscn_cmd = ELS_RSCN;
+ event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
+ event->rscn.rscn_plen = cpu_to_be16(cmdsize);
+
+ nportid = vport->fc_myDID;
+ /* appears that page flags must be 0 for fabric to broadcast RSCN */
+ event->portid.rscn_page_flags = 0;
+ event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16;
+ event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
+ event->portid.rscn_fid[2] = nportid & 0x000000FF;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue RSCN: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
+ phba->fc_stat.elsXmitRSCN++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
+ /* The additional lpfc_nlp_put will cause the following
+ * lpfc_els_free_iocb routine to trigger the rlease of
+ * the node.
+ */
+ lpfc_nlp_put(ndlp);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ /* This will cause the callback-function lpfc_cmpl_els_cmd to
+ * trigger the release of node.
+ */
+ if (!(vport->fc_flag & FC_PT2PT))
+ lpfc_nlp_put(ndlp);
+
+ return 0;
+}
+
+/**
* lpfc_issue_els_farpr - Issue a farp to an node on a vport
* @vport: pointer to a host virtual N_Port data structure.
* @nportid: N_Port identifier to the remote node.
@@ -4196,6 +4308,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((rspiocb->iocb.ulpStatus == 0)
&& (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
+ (!(vport->fc_flag & FC_PT2PT)) &&
(ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
lpfc_printf_vlog(vport, KERN_INFO,
@@ -6214,6 +6327,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
continue;
}
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
+ lpfc_nvme_rescan_port(vport, ndlp);
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
@@ -6318,6 +6433,19 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
fc_host_post_event(shost, fc_get_event_number(),
FCH_EVT_RSCN, lp[i]);
+ /* Check if RSCN is coming from a direct-connected remote NPort */
+ if (vport->fc_flag & FC_PT2PT) {
+ /* If so, just ACC it, no other action needed for now */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2024 pt2pt RSCN %08x Data: x%x x%x\n",
+ *lp, vport->fc_flag, payload_len);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
+ lpfc_nvme_rescan_port(vport, ndlp);
+ return 0;
+ }
+
/* If we are about to begin discovery, just ACC the RSCN.
* Discovery processing will satisfy it.
*/
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index c43852f97f25..28ecaa7fc715 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -5277,6 +5277,41 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
}
struct lpfc_nodelist *
+lpfc_findnode_mapped(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+ uint32_t data1;
+ unsigned long iflags;
+
+ spin_lock_irqsave(shost->host_lock, iflags);
+
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
+ ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+ data1 = (((uint32_t)ndlp->nlp_state << 24) |
+ ((uint32_t)ndlp->nlp_xri << 16) |
+ ((uint32_t)ndlp->nlp_type << 8) |
+ ((uint32_t)ndlp->nlp_rpi & 0xff));
+ spin_unlock_irqrestore(shost->host_lock, iflags);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "2025 FIND node DID "
+ "Data: x%p x%x x%x x%x %p\n",
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1,
+ ndlp->active_rrqs_xri_bitmap);
+ return ndlp;
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, iflags);
+
+ /* FIND node did <did> NOT FOUND */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "2026 FIND mapped did NOT FOUND.\n");
+ return NULL;
+}
+
+struct lpfc_nodelist *
lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index edd8f3982023..5b439a6dcde1 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -601,6 +601,7 @@ struct fc_vft_header {
#define ELS_CMD_RPL 0x57000000
#define ELS_CMD_FAN 0x60000000
#define ELS_CMD_RSCN 0x61040000
+#define ELS_CMD_RSCN_XMT 0x61040008
#define ELS_CMD_SCR 0x62000000
#define ELS_CMD_RNID 0x78000000
#define ELS_CMD_LIRR 0x7A000000
@@ -642,6 +643,7 @@ struct fc_vft_header {
#define ELS_CMD_RPL 0x57
#define ELS_CMD_FAN 0x60
#define ELS_CMD_RSCN 0x0461
+#define ELS_CMD_RSCN_XMT 0x08000461
#define ELS_CMD_SCR 0x62
#define ELS_CMD_RNID 0x78
#define ELS_CMD_LIRR 0x7A
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index eaaef682de25..6d6b14295734 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -72,7 +72,7 @@ unsigned long _dump_buf_dif_order;
spinlock_t _dump_buf_lock;
/* Used when mapping IRQ vectors in a driver centric manner */
-uint32_t lpfc_present_cpu;
+static uint32_t lpfc_present_cpu;
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
@@ -93,8 +93,8 @@ static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
static void lpfc_sli4_disable_intr(struct lpfc_hba *);
static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
-static uint16_t lpfc_find_eq_handle(struct lpfc_hba *, uint16_t);
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
+static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1274,8 +1274,10 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
if (!eqcnt)
goto requeue;
+ /* Loop thru all IRQ vectors */
for (i = 0; i < phba->cfg_irq_chann; i++) {
- eq = phba->sli4_hba.hdwq[i].hba_eq;
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
if (eq && eqcnt[eq->last_cpu] < 2)
eqcnt[eq->last_cpu]++;
continue;
@@ -4114,14 +4116,13 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
* pci bus space for an I/O. The DMA buffer includes the
* number of SGE's necessary to support the sg_tablesize.
*/
- lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
- GFP_KERNEL,
- &lpfc_ncmd->dma_handle);
+ lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+ GFP_KERNEL,
+ &lpfc_ncmd->dma_handle);
if (!lpfc_ncmd->data) {
kfree(lpfc_ncmd);
break;
}
- memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
/*
* 4K Page alignment is CRITICAL to BlockGuard, double check
@@ -4347,6 +4348,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+ lpfc_setup_bg(phba, shost);
+
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
goto out_put_shost;
@@ -5055,7 +5059,7 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
phba->sli4_hba.link_state.logical_speed =
- bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
+ bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
/* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
phba->fc_linkspeed =
lpfc_async_link_speed_to_read_top(
@@ -5158,8 +5162,14 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
phba->sli4_hba.link_state.fault =
bf_get(lpfc_acqe_link_fault, acqe_fc);
- phba->sli4_hba.link_state.logical_speed =
+
+ if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
+ LPFC_FC_LA_TYPE_LINK_DOWN)
+ phba->sli4_hba.link_state.logical_speed = 0;
+ else if (!phba->sli4_hba.conf_trunk)
+ phba->sli4_hba.link_state.logical_speed =
bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
+
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2896 Async FC event - Speed:%dGBaud Topology:x%x "
"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
@@ -6551,6 +6561,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
+ spin_lock_init(&phba->sli4_hba.t_active_list_lock);
+ INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
}
/* This abort list used by worker thread */
@@ -7660,8 +7672,6 @@ lpfc_post_init_setup(struct lpfc_hba *phba)
*/
shost = pci_get_drvdata(phba->pcidev);
shost->can_queue = phba->cfg_hba_queue_depth - 10;
- if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
- lpfc_setup_bg(phba, shost);
lpfc_host_attrib_init(shost);
@@ -8740,8 +8750,10 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- int idx, eqidx, cpu;
+ int idx, cpu, eqcpu;
struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_vector_map_info *cpup;
+ struct lpfc_vector_map_info *eqcpup;
struct lpfc_eq_intr_info *eqi;
/*
@@ -8826,40 +8838,60 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
/* Create HBA Event Queues (EQs) */
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
- /* determine EQ affinity */
- eqidx = lpfc_find_eq_handle(phba, idx);
- cpu = lpfc_find_cpu_handle(phba, eqidx, LPFC_FIND_BY_EQ);
- /*
- * If there are more Hardware Queues than available
- * EQs, multiple Hardware Queues may share a common EQ.
+ for_each_present_cpu(cpu) {
+ /* We only want to create 1 EQ per vector, even though
+ * multiple CPUs might be using that vector. so only
+ * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
*/
- if (idx >= phba->cfg_irq_chann) {
- /* Share an existing EQ */
- phba->sli4_hba.hdwq[idx].hba_eq =
- phba->sli4_hba.hdwq[eqidx].hba_eq;
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
continue;
- }
- /* Create an EQ */
+
+ /* Get a ptr to the Hardware Queue associated with this CPU */
+ qp = &phba->sli4_hba.hdwq[cpup->hdwq];
+
+ /* Allocate an EQ */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0497 Failed allocate EQ (%d)\n", idx);
+ "0497 Failed allocate EQ (%d)\n",
+ cpup->hdwq);
goto out_error;
}
qdesc->qe_valid = 1;
- qdesc->hdwq = idx;
-
- /* Save the CPU this EQ is affinitised to */
- qdesc->chann = cpu;
- phba->sli4_hba.hdwq[idx].hba_eq = qdesc;
+ qdesc->hdwq = cpup->hdwq;
+ qdesc->chann = cpu; /* First CPU this EQ is affinitised to */
qdesc->last_cpu = qdesc->chann;
+
+ /* Save the allocated EQ in the Hardware Queue */
+ qp->hba_eq = qdesc;
+
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
list_add(&qdesc->cpu_list, &eqi->list);
}
+ /* Now we need to populate the other Hardware Queues, that share
+ * an IRQ vector, with the associated EQ ptr.
+ */
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Check for EQ already allocated in previous loop */
+ if (cpup->flag & LPFC_CPU_FIRST_IRQ)
+ continue;
+
+ /* Check for multiple CPUs per hdwq */
+ qp = &phba->sli4_hba.hdwq[cpup->hdwq];
+ if (qp->hba_eq)
+ continue;
+
+ /* We need to share an EQ for this hdwq */
+ eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
+ eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
+ qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
+ }
/* Allocate SCSI SLI4 CQ/WQs */
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
@@ -9122,23 +9154,31 @@ static inline void
lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
{
struct lpfc_sli4_hdw_queue *hdwq;
+ struct lpfc_queue *eq;
uint32_t idx;
hdwq = phba->sli4_hba.hdwq;
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
- if (idx < phba->cfg_irq_chann)
- lpfc_sli4_queue_free(hdwq[idx].hba_eq);
- hdwq[idx].hba_eq = NULL;
+ /* Loop thru all Hardware Queues */
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ /* Free the CQ/WQ corresponding to the Hardware Queue */
lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
+ hdwq[idx].hba_eq = NULL;
hdwq[idx].fcp_cq = NULL;
hdwq[idx].nvme_cq = NULL;
hdwq[idx].fcp_wq = NULL;
hdwq[idx].nvme_wq = NULL;
}
+ /* Loop thru all IRQ vectors */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ /* Free the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
+ lpfc_sli4_queue_free(eq);
+ phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
+ }
}
/**
@@ -9316,16 +9356,17 @@ static void
lpfc_setup_cq_lookup(struct lpfc_hba *phba)
{
struct lpfc_queue *eq, *childq;
- struct lpfc_sli4_hdw_queue *qp;
int qidx;
- qp = phba->sli4_hba.hdwq;
memset(phba->sli4_hba.cq_lookup, 0,
(sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
+ /* Loop thru all IRQ vectors */
for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
- eq = qp[qidx].hba_eq;
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
if (!eq)
continue;
+ /* Loop through all CQs associated with that EQ */
list_for_each_entry(childq, &eq->child_list, list) {
if (childq->queue_id > phba->sli4_hba.cq_max)
continue;
@@ -9354,9 +9395,10 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
{
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_vector_map_info *cpup;
struct lpfc_sli4_hdw_queue *qp;
LPFC_MBOXQ_t *mboxq;
- int qidx;
+ int qidx, cpu;
uint32_t length, usdelay;
int rc = -ENOMEM;
@@ -9417,32 +9459,55 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
rc = -ENOMEM;
goto out_error;
}
+
+ /* Loop thru all IRQ vectors */
for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
- if (!qp[qidx].hba_eq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0522 Fast-path EQ (%d) not "
- "allocated\n", qidx);
- rc = -ENOMEM;
- goto out_destroy;
- }
- rc = lpfc_eq_create(phba, qp[qidx].hba_eq,
- phba->cfg_fcp_imax);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0523 Failed setup of fast-path EQ "
- "(%d), rc = 0x%x\n", qidx,
- (uint32_t)rc);
- goto out_destroy;
+ /* Create HBA Event Queues (EQs) in order */
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Look for the CPU thats using that vector with
+ * LPFC_CPU_FIRST_IRQ set.
+ */
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+ continue;
+ if (qidx != cpup->eq)
+ continue;
+
+ /* Create an EQ for that vector */
+ rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
+ phba->cfg_fcp_imax);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0523 Failed setup of fast-path"
+ " EQ (%d), rc = 0x%x\n",
+ cpup->eq, (uint32_t)rc);
+ goto out_destroy;
+ }
+
+ /* Save the EQ for that vector in the hba_eq_hdl */
+ phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
+ qp[cpup->hdwq].hba_eq;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2584 HBA EQ setup: queue[%d]-id=%d\n",
+ cpup->eq,
+ qp[cpup->hdwq].hba_eq->queue_id);
}
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2584 HBA EQ setup: queue[%d]-id=%d\n", qidx,
- qp[qidx].hba_eq->queue_id);
}
+ /* Loop thru all Hardware Queues */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ cpu = lpfc_find_cpu_handle(phba, qidx,
+ LPFC_FIND_BY_HDWQ);
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Create the CQ/WQ corresponding to the
+ * Hardware Queue
+ */
rc = lpfc_create_wq_cq(phba,
- qp[qidx].hba_eq,
+ phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
qp[qidx].nvme_cq,
qp[qidx].nvme_wq,
&phba->sli4_hba.hdwq[qidx].nvme_cq_map,
@@ -9458,8 +9523,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Create the CQ/WQ corresponding to the Hardware Queue */
rc = lpfc_create_wq_cq(phba,
- qp[qidx].hba_eq,
+ phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
qp[qidx].fcp_cq,
qp[qidx].fcp_wq,
&phba->sli4_hba.hdwq[qidx].fcp_cq_map,
@@ -9711,6 +9780,7 @@ void
lpfc_sli4_queue_unset(struct lpfc_hba *phba)
{
struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_queue *eq;
int qidx;
/* Unset mailbox command work queue */
@@ -9762,14 +9832,20 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
/* Unset fast-path SLI4 queues */
if (phba->sli4_hba.hdwq) {
+ /* Loop thru all Hardware Queues */
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ /* Destroy the CQ/WQ corresponding to Hardware Queue */
qp = &phba->sli4_hba.hdwq[qidx];
lpfc_wq_destroy(phba, qp->fcp_wq);
lpfc_wq_destroy(phba, qp->nvme_wq);
lpfc_cq_destroy(phba, qp->fcp_cq);
lpfc_cq_destroy(phba, qp->nvme_cq);
- if (qidx < phba->cfg_irq_chann)
- lpfc_eq_destroy(phba, qp->hba_eq);
+ }
+ /* Loop thru all IRQ vectors */
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
+ /* Destroy the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
+ lpfc_eq_destroy(phba, eq);
}
}
@@ -10559,11 +10635,12 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
}
/**
- * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified EQ
+ * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
* @phba: pointer to lpfc hba data structure.
* @id: EQ vector index or Hardware Queue index
* @match: LPFC_FIND_BY_EQ = match by EQ
* LPFC_FIND_BY_HDWQ = match by Hardware Queue
+ * Return the CPU that matches the selection criteria
*/
static uint16_t
lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
@@ -10571,40 +10648,27 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
struct lpfc_vector_map_info *cpup;
int cpu;
- /* Find the desired phys_id for the specified EQ */
+ /* Loop through all CPUs */
for_each_present_cpu(cpu) {
cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* If we are matching by EQ, there may be multiple CPUs using
+ * using the same vector, so select the one with
+ * LPFC_CPU_FIRST_IRQ set.
+ */
if ((match == LPFC_FIND_BY_EQ) &&
+ (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
(cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
(cpup->eq == id))
return cpu;
+
+ /* If matching by HDWQ, select the first CPU that matches */
if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
return cpu;
}
return 0;
}
-/**
- * lpfc_find_eq_handle - Find the EQ that corresponds to the specified
- * Hardware Queue
- * @phba: pointer to lpfc hba data structure.
- * @hdwq: Hardware Queue index
- */
-static uint16_t
-lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq)
-{
- struct lpfc_vector_map_info *cpup;
- int cpu;
-
- /* Find the desired phys_id for the specified EQ */
- for_each_present_cpu(cpu) {
- cpup = &phba->sli4_hba.cpu_map[cpu];
- if (cpup->hdwq == hdwq)
- return cpup->eq;
- }
- return 0;
-}
-
#ifdef CONFIG_X86
/**
* lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
@@ -10645,24 +10709,31 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
static void
lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
{
- int i, cpu, idx;
+ int i, cpu, idx, new_cpu, start_cpu, first_cpu;
int max_phys_id, min_phys_id;
int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
+ struct lpfc_vector_map_info *new_cpup;
const struct cpumask *maskp;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
/* Init cpu_map array */
- memset(phba->sli4_hba.cpu_map, 0xff,
- (sizeof(struct lpfc_vector_map_info) *
- phba->sli4_hba.num_possible_cpu));
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->eq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->irq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->flag = 0;
+ }
max_phys_id = 0;
- min_phys_id = 0xffff;
+ min_phys_id = LPFC_VECTOR_MAP_EMPTY;
max_core_id = 0;
- min_core_id = 0xffff;
+ min_core_id = LPFC_VECTOR_MAP_EMPTY;
/* Update CPU map with physical id and core id of each CPU */
for_each_present_cpu(cpu) {
@@ -10671,13 +10742,12 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
cpuinfo = &cpu_data(cpu);
cpup->phys_id = cpuinfo->phys_proc_id;
cpup->core_id = cpuinfo->cpu_core_id;
- cpup->hyper = lpfc_find_hyper(phba, cpu,
- cpup->phys_id, cpup->core_id);
+ if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
+ cpup->flag |= LPFC_CPU_MAP_HYPER;
#else
/* No distinction between CPUs for other platforms */
cpup->phys_id = 0;
cpup->core_id = cpu;
- cpup->hyper = 0;
#endif
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -10703,23 +10773,216 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
eqi->icnt = 0;
}
+ /* This loop sets up all CPUs that are affinitized with a
+ * irq vector assigned to the driver. All affinitized CPUs
+ * will get a link to that vectors IRQ and EQ.
+ */
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ /* Get a CPU mask for all CPUs affinitized to this vector */
maskp = pci_irq_get_affinity(phba->pcidev, idx);
if (!maskp)
continue;
+ i = 0;
+ /* Loop through all CPUs associated with vector idx */
for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ /* Set the EQ index and IRQ for that vector */
cpup = &phba->sli4_hba.cpu_map[cpu];
cpup->eq = idx;
- cpup->hdwq = idx;
cpup->irq = pci_irq_vector(phba->pcidev, idx);
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3336 Set Affinity: CPU %d "
- "hdwq %d irq %d\n",
- cpu, cpup->hdwq, cpup->irq);
+ "irq %d eq %d\n",
+ cpu, cpup->irq, cpup->eq);
+
+ /* If this is the first CPU thats assigned to this
+ * vector, set LPFC_CPU_FIRST_IRQ.
+ */
+ if (!i)
+ cpup->flag |= LPFC_CPU_FIRST_IRQ;
+ i++;
}
}
+
+ /* After looking at each irq vector assigned to this pcidev, its
+ * possible to see that not ALL CPUs have been accounted for.
+ * Next we will set any unassigned (unaffinitized) cpu map
+ * entries to a IRQ on the same phys_id.
+ */
+ first_cpu = cpumask_first(cpu_present_mask);
+ start_cpu = first_cpu;
+
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Is this CPU entry unassigned */
+ if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
+ /* Mark CPU as IRQ not assigned by the kernel */
+ cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
+
+ /* If so, find a new_cpup thats on the the SAME
+ * phys_id as cpup. start_cpu will start where we
+ * left off so all unassigned entries don't get assgined
+ * the IRQ of the first entry.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
+ (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->phys_id == cpup->phys_id))
+ goto found_same;
+ new_cpu = cpumask_next(
+ new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
+ }
+ /* At this point, we leave the CPU as unassigned */
+ continue;
+found_same:
+ /* We found a matching phys_id, so copy the IRQ info */
+ cpup->eq = new_cpup->eq;
+ cpup->irq = new_cpup->irq;
+
+ /* Bump start_cpu to the next slot to minmize the
+ * chance of having multiple unassigned CPU entries
+ * selecting the same IRQ.
+ */
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (start_cpu == nr_cpumask_bits)
+ start_cpu = first_cpu;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3337 Set Affinity: CPU %d "
+ "irq %d from id %d same "
+ "phys_id (%d)\n",
+ cpu, cpup->irq, new_cpu, cpup->phys_id);
+ }
+ }
+
+ /* Set any unassigned cpu map entries to a IRQ on any phys_id */
+ start_cpu = first_cpu;
+
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Is this entry unassigned */
+ if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
+ /* Mark it as IRQ not assigned by the kernel */
+ cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
+
+ /* If so, find a new_cpup thats on ANY phys_id
+ * as the cpup. start_cpu will start where we
+ * left off so all unassigned entries don't get
+ * assigned the IRQ of the first entry.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
+ (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
+ goto found_any;
+ new_cpu = cpumask_next(
+ new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
+ }
+ /* We should never leave an entry unassigned */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3339 Set Affinity: CPU %d "
+ "irq %d UNASSIGNED\n",
+ cpup->hdwq, cpup->irq);
+ continue;
+found_any:
+ /* We found an available entry, copy the IRQ info */
+ cpup->eq = new_cpup->eq;
+ cpup->irq = new_cpup->irq;
+
+ /* Bump start_cpu to the next slot to minmize the
+ * chance of having multiple unassigned CPU entries
+ * selecting the same IRQ.
+ */
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (start_cpu == nr_cpumask_bits)
+ start_cpu = first_cpu;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3338 Set Affinity: CPU %d "
+ "irq %d from id %d (%d/%d)\n",
+ cpu, cpup->irq, new_cpu,
+ new_cpup->phys_id, new_cpup->core_id);
+ }
+ }
+
+ /* Finally we need to associate a hdwq with each cpu_map entry
+ * This will be 1 to 1 - hdwq to cpu, unless there are less
+ * hardware queues then CPUs. For that case we will just round-robin
+ * the available hardware queues as they get assigned to CPUs.
+ */
+ idx = 0;
+ start_cpu = 0;
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ if (idx >= phba->cfg_hdw_queue) {
+ /* We need to reuse a Hardware Queue for another CPU,
+ * so be smart about it and pick one that has its
+ * IRQ/EQ mapped to the same phys_id (CPU package).
+ * and core_id.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->phys_id == cpup->phys_id) &&
+ (new_cpup->core_id == cpup->core_id))
+ goto found_hdwq;
+ new_cpu = cpumask_next(
+ new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
+ }
+
+ /* If we can't match both phys_id and core_id,
+ * settle for just a phys_id match.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->phys_id == cpup->phys_id))
+ goto found_hdwq;
+ new_cpu = cpumask_next(
+ new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
+ }
+
+ /* Otherwise just round robin on cfg_hdw_queue */
+ cpup->hdwq = idx % phba->cfg_hdw_queue;
+ goto logit;
+found_hdwq:
+ /* We found an available entry, copy the IRQ info */
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (start_cpu == nr_cpumask_bits)
+ start_cpu = first_cpu;
+ cpup->hdwq = new_cpup->hdwq;
+ } else {
+ /* 1 to 1, CPU to hdwq */
+ cpup->hdwq = idx;
+ }
+logit:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3335 Set Affinity: CPU %d (phys %d core %d): "
+ "hdwq %d eq %d irq %d flg x%x\n",
+ cpu, cpup->phys_id, cpup->core_id,
+ cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ idx++;
+ }
+
+ /* The cpu_map array will be used later during initialization
+ * when EQ / CQ / WQs are allocated and configured.
+ */
return;
}
@@ -11331,24 +11594,43 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mbx_sli4_parameters);
phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
- phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
- bf_get(cfg_xib, mbx_sli4_parameters));
-
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
- !phba->nvme_support) {
- phba->nvme_support = 0;
- phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
- "6101 Disabling NVME support: "
- "Not supported by firmware: %d %d\n",
- bf_get(cfg_nvme, mbx_sli4_parameters),
- bf_get(cfg_xib, mbx_sli4_parameters));
-
- /* If firmware doesn't support NVME, just use SCSI support */
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
- return -ENODEV;
- phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+
+ /* Check for firmware nvme support */
+ rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
+ bf_get(cfg_xib, mbx_sli4_parameters));
+
+ if (rc) {
+ /* Save this to indicate the Firmware supports NVME */
+ sli4_params->nvme = 1;
+
+ /* Firmware NVME support, check driver FC4 NVME support */
+ if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
+ "6133 Disabling NVME support: "
+ "FC4 type not supported: x%x\n",
+ phba->cfg_enable_fc4_type);
+ goto fcponly;
+ }
+ } else {
+ /* No firmware NVME support, check driver FC4 NVME support */
+ sli4_params->nvme = 0;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
+ "6101 Disabling NVME support: Not "
+ "supported by firmware (%d %d) x%x\n",
+ bf_get(cfg_nvme, mbx_sli4_parameters),
+ bf_get(cfg_xib, mbx_sli4_parameters),
+ phba->cfg_enable_fc4_type);
+fcponly:
+ phba->nvme_support = 0;
+ phba->nvmet_support = 0;
+ phba->cfg_nvmet_mrq = 0;
+
+ /* If no FC4 type support, move to just SCSI support */
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+ return -ENODEV;
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+ }
}
/* Only embed PBDE for if_type 6, PBDE support requires xib be set */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 9d99cb915390..946642cee3df 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2143,7 +2143,9 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
struct completion *lport_unreg_cmp)
{
u32 wait_tmo;
- int ret;
+ int ret, i, pending = 0;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_hba *phba = vport->phba;
/* Host transport has to clean up and confirm requiring an indefinite
* wait. Print a message if a 10 second wait expires and renew the
@@ -2153,10 +2155,18 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
while (true) {
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
if (unlikely(!ret)) {
+ pending = 0;
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
+ if (!pring)
+ continue;
+ if (pring->txcmplq_cnt)
+ pending += pring->txcmplq_cnt;
+ }
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
"6176 Lport %p Localport %p wait "
- "timed out. Renewing.\n",
- lport, vport->localport);
+ "timed out. Pending %d. Renewing.\n",
+ lport, vport->localport, pending);
continue;
}
break;
@@ -2402,6 +2412,50 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
#endif
}
+/**
+ * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
+ *
+ * If the ndlp represents an NVME Target, that we are logged into,
+ * ping the NVME FC Transport layer to initiate a device rescan
+ * on this remote NPort.
+ */
+void
+lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ struct lpfc_nvme_rport *rport;
+ struct nvme_fc_remote_port *remoteport;
+
+ rport = ndlp->nrport;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6170 Rescan NPort DID x%06x type x%x "
+ "state x%x rport %p\n",
+ ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, rport);
+ if (!rport)
+ goto input_err;
+ remoteport = rport->remoteport;
+ if (!remoteport)
+ goto input_err;
+
+ /* Only rescan if we are an NVME target in the MAPPED state */
+ if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
+ ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+ nvme_fc_rescan_remoteport(remoteport);
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ "6172 NVME rescanned DID x%06x "
+ "port_state x%x\n",
+ ndlp->nlp_DID, remoteport->port_state);
+ }
+ return;
+input_err:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ "6169 State error: lport %p, rport%p FCID x%06x\n",
+ vport->localport, ndlp->rport, ndlp->nlp_DID);
+#endif
+}
+
/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
*
* There is no notion of Devloss or rport recovery from the current
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index d74bfd264495..faa596f9e861 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -220,19 +220,68 @@ lpfc_nvmet_cmd_template(void)
/* Word 12, 13, 14, 15 - is zero */
}
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+static struct lpfc_nvmet_rcv_ctx *
+lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
+{
+ struct lpfc_nvmet_rcv_ctx *ctxp;
+ unsigned long iflag;
+ bool found = false;
+
+ spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+ list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
+ if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
+ continue;
+
+ found = true;
+ break;
+ }
+ spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
+ if (found)
+ return ctxp;
+
+ return NULL;
+}
+
+static struct lpfc_nvmet_rcv_ctx *
+lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
+{
+ struct lpfc_nvmet_rcv_ctx *ctxp;
+ unsigned long iflag;
+ bool found = false;
+
+ spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+ list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
+ if (ctxp->oxid != oxid || ctxp->sid != sid)
+ continue;
+
+ found = true;
+ break;
+ }
+ spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
+ if (found)
+ return ctxp;
+
+ return NULL;
+}
+#endif
+
static void
lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
{
lockdep_assert_held(&ctxp->ctxlock);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6313 NVMET Defer ctx release xri x%x flg x%x\n",
+ "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
ctxp->oxid, ctxp->flag);
if (ctxp->flag & LPFC_NVMET_CTX_RLS)
return;
ctxp->flag |= LPFC_NVMET_CTX_RLS;
+ spin_lock(&phba->sli4_hba.t_active_list_lock);
+ list_del(&ctxp->list);
+ spin_unlock(&phba->sli4_hba.t_active_list_lock);
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
@@ -343,16 +392,23 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
}
if (ctxp->rqb_buffer) {
- nvmebuf = ctxp->rqb_buffer;
spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->rqb_buffer = NULL;
- if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
- ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
- spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
- nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ nvmebuf = ctxp->rqb_buffer;
+ /* check if freed in another path whilst acquiring lock */
+ if (nvmebuf) {
+ ctxp->rqb_buffer = NULL;
+ if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
+ ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
+ nvmebuf);
+ } else {
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ /* repost */
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
+ }
} else {
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
}
}
ctxp->state = LPFC_NVMET_STE_FREE;
@@ -388,8 +444,9 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (ctxp->ts_cmd_nvme) {
- ctxp->ts_cmd_nvme = ktime_get_ns();
+ /* NOTE: isr time stamp is stale when context is re-assigned*/
+ if (ctxp->ts_isr_cmd) {
+ ctxp->ts_cmd_nvme = 0;
ctxp->ts_nvme_data = 0;
ctxp->ts_data_wqput = 0;
ctxp->ts_isr_data = 0;
@@ -402,9 +459,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
#endif
atomic_inc(&tgtp->rcv_fcp_cmd_in);
- /* flag new work queued, replacement buffer has already
- * been reposted
- */
+ /* Indicate that a replacement buffer has been posted */
spin_lock_irqsave(&ctxp->ctxlock, iflag);
ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
@@ -433,6 +488,9 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
* Use the CPU context list, from the MRQ the IO was received on
* (ctxp->idx), to save context structure.
*/
+ spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+ list_del_init(&ctxp->list);
+ spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
cpu = raw_smp_processor_id();
infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
@@ -700,8 +758,10 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
}
lpfc_printf_log(phba, KERN_INFO, logerr,
- "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
- ctxp->oxid, status, result, ctxp->flag);
+ "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
+ "XBUSY:x%x\n",
+ ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
+ status, result, ctxp->flag);
} else {
rsp->fcp_error = NVME_SC_SUCCESS;
@@ -849,7 +909,6 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
* before freeing ctxp and iocbq.
*/
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
- ctxp->rqb_buffer = 0;
atomic_inc(&nvmep->xmt_ls_rsp);
return 0;
}
@@ -922,7 +981,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
(ctxp->state == LPFC_NVMET_STE_ABORT)) {
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6102 IO xri x%x aborted\n",
+ "6102 IO oxid x%x aborted\n",
ctxp->oxid);
rc = -ENXIO;
goto aerr;
@@ -1022,7 +1081,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
ctxp->hdwq = &phba->sli4_hba.hdwq[0];
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
+ "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
ctxp->oxid, ctxp->flag, ctxp->state);
lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
@@ -1035,7 +1094,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
/* Since iaab/iaar are NOT set, we need to check
* if the firmware is in process of aborting IO
*/
- if (ctxp->flag & LPFC_NVMET_XBUSY) {
+ if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
return;
}
@@ -1098,6 +1157,7 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
ctxp->state, aborting);
atomic_inc(&lpfc_nvmep->xmt_fcp_release);
+ ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
if (aborting)
return;
@@ -1122,7 +1182,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
if (!nvmebuf) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
- "6425 Defer rcv: no buffer xri x%x: "
+ "6425 Defer rcv: no buffer oxid x%x: "
"flg %x ste %x\n",
ctxp->oxid, ctxp->flag, ctxp->state);
return;
@@ -1139,6 +1199,22 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
}
+static void
+lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_hba *phba;
+ uint32_t rc;
+
+ tgtp = tgtport->private;
+ phba = tgtp->phba;
+
+ rc = lpfc_issue_els_rscn(phba->pport, 0);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6420 NVMET subsystem change: Notification %s\n",
+ (rc) ? "Failed" : "Sent");
+}
+
static struct nvmet_fc_target_template lpfc_tgttemplate = {
.targetport_delete = lpfc_nvmet_targetport_delete,
.xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
@@ -1146,6 +1222,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
.fcp_abort = lpfc_nvmet_xmt_fcp_abort,
.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
.defer_rcv = lpfc_nvmet_defer_rcv,
+ .discovery_event = lpfc_nvmet_discovery_event,
.max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1497,10 +1574,12 @@ void
lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri)
{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
struct lpfc_nvmet_tgtport *tgtp;
+ struct nvmefc_tgt_fcp_req *req = NULL;
struct lpfc_nodelist *ndlp;
unsigned long iflag = 0;
int rrq_empty = 0;
@@ -1531,7 +1610,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
*/
if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
!(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
- list_del(&ctxp->list);
+ list_del_init(&ctxp->list);
released = true;
}
ctxp->flag &= ~LPFC_NVMET_XBUSY;
@@ -1551,7 +1630,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
}
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6318 XB aborted oxid %x flg x%x (%x)\n",
+ "6318 XB aborted oxid x%x flg x%x (%x)\n",
ctxp->oxid, ctxp->flag, released);
if (released)
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
@@ -1562,6 +1641,33 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
}
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
+ if (ctxp) {
+ /*
+ * Abort already done by FW, so BA_ACC sent.
+ * However, the transport may be unaware.
+ */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
+ "flag x%x oxid x%x rxid x%x\n",
+ xri, ctxp->state, ctxp->flag, ctxp->oxid,
+ rxid);
+
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ ctxp->flag |= LPFC_NVMET_ABTS_RCV;
+ ctxp->state = LPFC_NVMET_STE_ABORT;
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+ lpfc_nvmeio_data(phba,
+ "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+ xri, raw_smp_processor_id(), 0);
+
+ req = &ctxp->ctx.fcp_req;
+ if (req)
+ nvmet_fc_rcv_fcp_abort(phba->targetport, req);
+ }
+#endif
}
int
@@ -1572,19 +1678,23 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
struct nvmefc_tgt_fcp_req *rsp;
- uint16_t xri;
+ uint32_t sid;
+ uint16_t oxid, xri;
unsigned long iflag = 0;
- xri = be16_to_cpu(fc_hdr->fh_ox_id);
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) {
- if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
+ if (ctxp->oxid != oxid || ctxp->sid != sid)
continue;
+ xri = ctxp->ctxbuf->sglq->sli4_xritag;
+
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -1609,11 +1719,93 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
- xri, raw_smp_processor_id(), 1);
+ /* check the wait list */
+ if (phba->sli4_hba.nvmet_io_wait_cnt) {
+ struct rqb_dmabuf *nvmebuf;
+ struct fc_frame_header *fc_hdr_tmp;
+ u32 sid_tmp;
+ u16 oxid_tmp;
+ bool found = false;
+
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+
+ /* match by oxid and s_id */
+ list_for_each_entry(nvmebuf,
+ &phba->sli4_hba.lpfc_nvmet_io_wait_list,
+ hbuf.list) {
+ fc_hdr_tmp = (struct fc_frame_header *)
+ (nvmebuf->hbuf.virt);
+ oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
+ sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
+ if (oxid_tmp != oxid || sid_tmp != sid)
+ continue;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6321 NVMET Rcv ABTS oxid x%x from x%x "
+ "is waiting for a ctxp\n",
+ oxid, sid);
+
+ list_del_init(&nvmebuf->hbuf.list);
+ phba->sli4_hba.nvmet_io_wait_cnt--;
+ found = true;
+ break;
+ }
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+ iflag);
+
+ /* free buffer since already posted a new DMA buffer to RQ */
+ if (found) {
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ /* Respond with BA_ACC accordingly */
+ lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
+ return 0;
+ }
+ }
+
+ /* check active list */
+ ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
+ if (ctxp) {
+ xri = ctxp->ctxbuf->sglq->sli4_xritag;
+
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+ lpfc_nvmeio_data(phba,
+ "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+ xri, raw_smp_processor_id(), 0);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
+ "flag x%x state x%x\n",
+ ctxp->oxid, xri, ctxp->flag, ctxp->state);
+
+ if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
+ /* Notify the transport */
+ nvmet_fc_rcv_fcp_abort(phba->targetport,
+ &ctxp->ctx.fcp_req);
+ } else {
+ cancel_work_sync(&ctxp->ctxbuf->defer_work);
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ lpfc_nvmet_defer_release(phba, ctxp);
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ }
+ if (ctxp->state == LPFC_NVMET_STE_RCV)
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
+ else
+ lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
+
+ lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
+ return 0;
+ }
+
+ lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
+ oxid, raw_smp_processor_id(), 1);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
+ "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
/* Respond with BA_RJT accordingly */
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
@@ -1697,6 +1889,18 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
spin_unlock_irqrestore(&pring->ring_lock, iflags);
return;
}
+ if (rc == WQE_SUCCESS) {
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (ctxp->ts_cmd_nvme) {
+ if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
+ ctxp->ts_status_wqput = ktime_get_ns();
+ else
+ ctxp->ts_data_wqput = ktime_get_ns();
+ }
+#endif
+ } else {
+ WARN_ON(rc);
+ }
}
wq->q_flag &= ~HBA_NVMET_WQFULL;
spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -1862,8 +2066,20 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
return;
}
+ if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6324 IO oxid x%x aborted\n",
+ ctxp->oxid);
+ return;
+ }
+
payload = (uint32_t *)(nvmebuf->dbuf.virt);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ ctxp->flag |= LPFC_NVMET_TNOTIFY;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (ctxp->ts_isr_cmd)
+ ctxp->ts_cmd_nvme = ktime_get_ns();
+#endif
/*
* The calling sequence should be:
* nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
@@ -1913,6 +2129,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
return;
}
+ ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
@@ -2002,6 +2219,8 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
* @phba: pointer to lpfc hba data structure.
* @idx: relative index of MRQ vector
* @nvmebuf: pointer to lpfc nvme command HBQ data structure.
+ * @isr_timestamp: in jiffies.
+ * @cqflag: cq processing information regarding workload.
*
* This routine is used for processing the WQE associated with a unsolicited
* event. It first determines whether there is an existing ndlp that matches
@@ -2014,7 +2233,8 @@ static void
lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
uint32_t idx,
struct rqb_dmabuf *nvmebuf,
- uint64_t isr_timestamp)
+ uint64_t isr_timestamp,
+ uint8_t cqflag)
{
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
@@ -2101,6 +2321,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+ spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+ list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
if (ctxp->state != LPFC_NVMET_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6414 NVMET Context corrupt %d %d oxid x%x\n",
@@ -2123,24 +2346,41 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (isr_timestamp) {
+ if (isr_timestamp)
ctxp->ts_isr_cmd = isr_timestamp;
- ctxp->ts_cmd_nvme = ktime_get_ns();
- ctxp->ts_nvme_data = 0;
- ctxp->ts_data_wqput = 0;
- ctxp->ts_isr_data = 0;
- ctxp->ts_data_nvme = 0;
- ctxp->ts_nvme_status = 0;
- ctxp->ts_status_wqput = 0;
- ctxp->ts_isr_status = 0;
- ctxp->ts_status_nvme = 0;
- } else {
- ctxp->ts_cmd_nvme = 0;
- }
+ ctxp->ts_cmd_nvme = 0;
+ ctxp->ts_nvme_data = 0;
+ ctxp->ts_data_wqput = 0;
+ ctxp->ts_isr_data = 0;
+ ctxp->ts_data_nvme = 0;
+ ctxp->ts_nvme_status = 0;
+ ctxp->ts_status_wqput = 0;
+ ctxp->ts_isr_status = 0;
+ ctxp->ts_status_nvme = 0;
#endif
atomic_inc(&tgtp->rcv_fcp_cmd_in);
- lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
+ /* check for cq processing load */
+ if (!cqflag) {
+ lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
+ return;
+ }
+
+ if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
+ atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6325 Unable to queue work for oxid x%x. "
+ "FCP Drop IO [x%x x%x x%x]\n",
+ ctxp->oxid,
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_out),
+ atomic_read(&tgtp->xmt_fcp_release));
+
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ lpfc_nvmet_defer_release(phba, ctxp);
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+ }
}
/**
@@ -2177,6 +2417,8 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* @phba: pointer to lpfc hba data structure.
* @idx: relative index of MRQ vector
* @nvmebuf: pointer to received nvme data structure.
+ * @isr_timestamp: in jiffies.
+ * @cqflag: cq processing information regarding workload.
*
* This routine is used to process an unsolicited event received from a SLI
* (Service Level Interface) ring. The actual processing of the data buffer
@@ -2188,14 +2430,14 @@ void
lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
uint32_t idx,
struct rqb_dmabuf *nvmebuf,
- uint64_t isr_timestamp)
+ uint64_t isr_timestamp,
+ uint8_t cqflag)
{
if (phba->nvmet_support == 0) {
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
return;
}
- lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
- isr_timestamp);
+ lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
}
/**
@@ -2662,8 +2904,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
nvmewqe->context1 = ndlp;
- for (i = 0; i < rsp->sg_cnt; i++) {
- sgel = &rsp->sg[i];
+ for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
physaddr = sg_dma_address(sgel);
cnt = sg_dma_len(sgel);
sgl->addr_hi = putPaddrHigh(physaddr);
@@ -2733,7 +2974,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
!(ctxp->flag & LPFC_NVMET_XBUSY)) {
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- list_del(&ctxp->list);
+ list_del_init(&ctxp->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
@@ -2742,7 +2983,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
atomic_inc(&tgtp->xmt_abort_rsp);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6165 ABORT cmpl: xri x%x flg x%x (%d) "
+ "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
"WCQE: %08x %08x %08x %08x\n",
ctxp->oxid, ctxp->flag, released,
wcqe->word0, wcqe->total_data_placed,
@@ -2817,7 +3058,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
!(ctxp->flag & LPFC_NVMET_XBUSY)) {
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- list_del(&ctxp->list);
+ list_del_init(&ctxp->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
@@ -2826,7 +3067,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
atomic_inc(&tgtp->xmt_abort_rsp);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6316 ABTS cmpl xri x%x flg x%x (%x) "
+ "6316 ABTS cmpl oxid x%x flg x%x (%x) "
"WCQE: %08x %08x %08x %08x\n",
ctxp->oxid, ctxp->flag, released,
wcqe->word0, wcqe->total_data_placed,
@@ -3197,7 +3438,7 @@ aerr:
spin_lock_irqsave(&ctxp->ctxlock, flags);
if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
- list_del(&ctxp->list);
+ list_del_init(&ctxp->list);
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
released = true;
}
@@ -3206,8 +3447,9 @@ aerr:
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
- "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
- ctxp->oxid, rc);
+ "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
+ "(%x)\n",
+ ctxp->oxid, rc, released);
if (released)
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
return 1;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 2f3f603d94c4..8ff67deac10a 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -140,6 +140,7 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
#define LPFC_NVMET_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */
#define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
+#define LPFC_NVMET_TNOTIFY 0x80 /* notify transport of abts */
struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf;
struct lpfc_sli4_hdw_queue *hdwq;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index ba996fbde89b..f9df800e7067 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3879,10 +3879,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
*/
spin_lock(&lpfc_cmd->buf_lock);
lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
- if (lpfc_cmd->waitq) {
+ if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
- lpfc_cmd->waitq = NULL;
- }
spin_unlock(&lpfc_cmd->buf_lock);
lpfc_release_scsi_buf(phba, lpfc_cmd);
@@ -4718,6 +4716,9 @@ wait_for_cmpl:
iocb->sli4_xritag, ret,
cmnd->device->id, cmnd->device->lun);
}
+
+ lpfc_cmd->waitq = NULL;
+
spin_unlock(&lpfc_cmd->buf_lock);
goto out;
@@ -4797,7 +4798,12 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
rsp_info,
rsp_len, rsp_info_code);
- if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
+ /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
+ * field specifies the number of valid bytes of FCP_RSP_INFO.
+ * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
+ */
+ if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
+ ((rsp_len == 8) || (rsp_len == 4))) {
switch (rsp_info_code) {
case RSP_NO_FAILURE:
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
@@ -5741,7 +5747,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
/* Create an lun info structure and add to list of luns */
lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
- pri, false);
+ pri, true);
if (lun_info) {
lun_info->oas_enabled = true;
lun_info->priority = pri;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d1512e4f9791..f9e6a135d656 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -108,7 +108,7 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
* endianness. This function can be called with or without
* lock.
**/
-void
+static void
lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
{
uint64_t *src = srcp;
@@ -5571,6 +5571,7 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
int qidx;
struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_queue *eq;
sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
@@ -5578,18 +5579,24 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
LPFC_QUEUE_REARM);
- qp = sli4_hba->hdwq;
if (sli4_hba->hdwq) {
+ /* Loop thru all Hardware Queues */
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- sli4_hba->sli4_write_cq_db(phba, qp[qidx].fcp_cq, 0,
+ qp = &sli4_hba->hdwq[qidx];
+ /* ARM the corresponding CQ */
+ sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0,
LPFC_QUEUE_REARM);
- sli4_hba->sli4_write_cq_db(phba, qp[qidx].nvme_cq, 0,
+ sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0,
LPFC_QUEUE_REARM);
}
- for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++)
- sli4_hba->sli4_write_eq_db(phba, qp[qidx].hba_eq,
- 0, LPFC_QUEUE_REARM);
+ /* Loop thru all IRQ vectors */
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
+ eq = sli4_hba->hba_eq_hdl[qidx].eq;
+ /* ARM the corresponding EQ */
+ sli4_hba->sli4_write_eq_db(phba, eq,
+ 0, LPFC_QUEUE_REARM);
+ }
}
if (phba->nvmet_support) {
@@ -7875,26 +7882,28 @@ lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
* and will process all the completions associated with the eq for the
* mailbox completion queue.
**/
-bool
+static bool
lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
{
struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
uint32_t eqidx;
struct lpfc_queue *fpeq = NULL;
+ struct lpfc_queue *eq;
bool mbox_pending;
if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
return false;
- /* Find the eq associated with the mcq */
-
- if (sli4_hba->hdwq)
- for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++)
- if (sli4_hba->hdwq[eqidx].hba_eq->queue_id ==
- sli4_hba->mbx_cq->assoc_qid) {
- fpeq = sli4_hba->hdwq[eqidx].hba_eq;
+ /* Find the EQ associated with the mbox CQ */
+ if (sli4_hba->hdwq) {
+ for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
+ eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
+ if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
+ fpeq = eq;
break;
}
+ }
+ }
if (!fpeq)
return false;
@@ -9398,6 +9407,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
*pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_RSCN_XMT ||
*pcmd == ELS_CMD_FDISC ||
*pcmd == ELS_CMD_LOGO ||
*pcmd == ELS_CMD_PLOGI)) {
@@ -13604,14 +13614,9 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
goto rearm_and_exit;
/* Process all the entries to the CQ */
+ cq->q_flag = 0;
cqe = lpfc_sli4_cq_get(cq);
while (cqe) {
-#if defined(CONFIG_SCSI_LPFC_DEBUG_FS) && defined(BUILD_NVME)
- if (phba->ktime_on)
- cq->isr_timestamp = ktime_get_ns();
- else
- cq->isr_timestamp = 0;
-#endif
workposted |= handler(phba, cq, cqe);
__lpfc_sli4_consume_cqe(phba, cq, cqe);
@@ -13625,6 +13630,9 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
consumed = 0;
}
+ if (count == LPFC_NVMET_CQ_NOTIFY)
+ cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
+
cqe = lpfc_sli4_cq_get(cq);
}
if (count >= phba->cfg_cq_poll_threshold) {
@@ -13940,10 +13948,10 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
goto drop;
if (fc_hdr->fh_type == FC_TYPE_FCP) {
- dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
+ dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
lpfc_nvmet_unsol_fcp_event(
- phba, idx, dma_buf,
- cq->isr_timestamp);
+ phba, idx, dma_buf, cq->isr_timestamp,
+ cq->q_flag & HBA_NVMET_CQ_NOTIFY);
return false;
}
drop:
@@ -14109,6 +14117,12 @@ process_cq:
}
work_cq:
+#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+ if (phba->ktime_on)
+ cq->isr_timestamp = ktime_get_ns();
+ else
+ cq->isr_timestamp = 0;
+#endif
if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0363 Cannot schedule soft IRQ "
@@ -14235,7 +14249,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
/* Get to the EQ struct associated with this vector */
- fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq;
+ fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
if (unlikely(!fpeq))
return IRQ_NONE;
@@ -14520,7 +14534,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
/* set values by EQ_DELAY register if supported */
if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
- eq = phba->sli4_hba.hdwq[qidx].hba_eq;
+ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
if (!eq)
continue;
@@ -14529,7 +14543,6 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
if (++cnt >= numq)
break;
}
-
return;
}
@@ -14557,7 +14570,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
dmult = LPFC_DMULT_MAX;
for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
- eq = phba->sli4_hba.hdwq[qidx].hba_eq;
+ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
if (!eq)
continue;
eq->q_mode = usdelay;
@@ -14659,8 +14672,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0360 Unsupported EQ count. (%d)\n",
eq->entry_count);
- if (eq->entry_count < 256)
- return -EINVAL;
+ if (eq->entry_count < 256) {
+ status = -EINVAL;
+ goto out;
+ }
/* fall through - otherwise default to smallest count */
case 256:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
@@ -14712,7 +14727,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
eq->host_index = 0;
eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
-
+out:
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 8e4fd1a98023..3aeca387b22a 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -197,6 +197,8 @@ struct lpfc_queue {
#define LPFC_DB_LIST_FORMAT 0x02
uint8_t q_flag;
#define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */
+#define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
+#define LPFC_NVMET_CQ_NOTIFY 4
void __iomem *db_regaddr;
uint16_t dpp_enable;
uint16_t dpp_id;
@@ -450,6 +452,7 @@ struct lpfc_hba_eq_hdl {
uint32_t idx;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
+ struct lpfc_queue *eq;
};
/*BB Credit recovery value*/
@@ -512,6 +515,7 @@ struct lpfc_pc_sli4_params {
#define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2
uint8_t wqpcnt;
+ uint8_t nvme;
};
#define LPFC_CQ_4K_PAGE_SZ 0x1
@@ -546,7 +550,10 @@ struct lpfc_vector_map_info {
uint16_t irq;
uint16_t eq;
uint16_t hdwq;
- uint16_t hyper;
+ uint16_t flag;
+#define LPFC_CPU_MAP_HYPER 0x1
+#define LPFC_CPU_MAP_UNASSIGN 0x2
+#define LPFC_CPU_FIRST_IRQ 0x4
};
#define LPFC_VECTOR_MAP_EMPTY 0xffff
@@ -843,6 +850,8 @@ struct lpfc_sli4_hba {
struct list_head lpfc_nvmet_sgl_list;
spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
struct list_head lpfc_abts_nvmet_ctx_list;
+ spinlock_t t_active_list_lock; /* list of active NVMET IOs */
+ struct list_head t_active_ctx_list;
struct list_head lpfc_nvmet_io_wait_list;
struct lpfc_nvmet_ctx_info *nvmet_ctx_info;
struct lpfc_sglq **lpfc_sglq_active_list;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 220a932fe943..f7e93aaf1e00 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.2.0.2"
+#define LPFC_DRIVER_VERSION "12.2.0.3"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index dba9517d9553..9c5566217ef6 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -4,6 +4,8 @@
*
* Copyright 1998, Michael Schmitz <mschmitz@lbl.gov>
*
+ * Copyright 2019 Finn Thain
+ *
* derived in part from:
*/
/*
@@ -12,6 +14,7 @@
* Copyright 1995, Russell King
*/
+#include <linux/delay.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/ioport.h>
@@ -22,6 +25,7 @@
#include <asm/hwtest.h>
#include <asm/io.h>
+#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/setup.h>
@@ -53,7 +57,7 @@ static int setup_cmd_per_lun = -1;
module_param(setup_cmd_per_lun, int, 0);
static int setup_sg_tablesize = -1;
module_param(setup_sg_tablesize, int, 0);
-static int setup_use_pdma = -1;
+static int setup_use_pdma = 512;
module_param(setup_use_pdma, int, 0);
static int setup_hostid = -1;
module_param(setup_hostid, int, 0);
@@ -90,223 +94,318 @@ static int __init mac_scsi_setup(char *str)
__setup("mac5380=", mac_scsi_setup);
#endif /* !MODULE */
-/* Pseudo DMA asm originally by Ove Edlund */
-
-#define CP_IO_TO_MEM(s,d,n) \
-__asm__ __volatile__ \
- (" cmp.w #4,%2\n" \
- " bls 8f\n" \
- " move.w %1,%%d0\n" \
- " neg.b %%d0\n" \
- " and.w #3,%%d0\n" \
- " sub.w %%d0,%2\n" \
- " bra 2f\n" \
- " 1: move.b (%0),(%1)+\n" \
- " 2: dbf %%d0,1b\n" \
- " move.w %2,%%d0\n" \
- " lsr.w #5,%%d0\n" \
- " bra 4f\n" \
- " 3: move.l (%0),(%1)+\n" \
- "31: move.l (%0),(%1)+\n" \
- "32: move.l (%0),(%1)+\n" \
- "33: move.l (%0),(%1)+\n" \
- "34: move.l (%0),(%1)+\n" \
- "35: move.l (%0),(%1)+\n" \
- "36: move.l (%0),(%1)+\n" \
- "37: move.l (%0),(%1)+\n" \
- " 4: dbf %%d0,3b\n" \
- " move.w %2,%%d0\n" \
- " lsr.w #2,%%d0\n" \
- " and.w #7,%%d0\n" \
- " bra 6f\n" \
- " 5: move.l (%0),(%1)+\n" \
- " 6: dbf %%d0,5b\n" \
- " and.w #3,%2\n" \
- " bra 8f\n" \
- " 7: move.b (%0),(%1)+\n" \
- " 8: dbf %2,7b\n" \
- " moveq.l #0, %2\n" \
- " 9: \n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "91: moveq.l #1, %2\n" \
- " jra 9b\n" \
- "94: moveq.l #4, %2\n" \
- " jra 9b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,91b\n" \
- " .long 3b,94b\n" \
- " .long 31b,94b\n" \
- " .long 32b,94b\n" \
- " .long 33b,94b\n" \
- " .long 34b,94b\n" \
- " .long 35b,94b\n" \
- " .long 36b,94b\n" \
- " .long 37b,94b\n" \
- " .long 5b,94b\n" \
- " .long 7b,91b\n" \
- ".previous" \
- : "=a"(s), "=a"(d), "=d"(n) \
- : "0"(s), "1"(d), "2"(n) \
- : "d0")
+/*
+ * According to "Inside Macintosh: Devices", Mac OS requires disk drivers to
+ * specify the number of bytes between the delays expected from a SCSI target.
+ * This allows the operating system to "prevent bus errors when a target fails
+ * to deliver the next byte within the processor bus error timeout period."
+ * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets
+ * so bus errors are unavoidable.
+ *
+ * If a MOVE.B instruction faults, we assume that zero bytes were transferred
+ * and simply retry. That assumption probably depends on target behaviour but
+ * seems to hold up okay. The NOP provides synchronization: without it the
+ * fault can sometimes occur after the program counter has moved past the
+ * offending instruction. Post-increment addressing can't be used.
+ */
+
+#define MOVE_BYTE(operands) \
+ asm volatile ( \
+ "1: moveb " operands " \n" \
+ "11: nop \n" \
+ " addq #1,%0 \n" \
+ " subq #1,%1 \n" \
+ "40: \n" \
+ " \n" \
+ ".section .fixup,\"ax\" \n" \
+ ".even \n" \
+ "90: movel #1, %2 \n" \
+ " jra 40b \n" \
+ ".previous \n" \
+ " \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 4 \n" \
+ ".long 1b,90b \n" \
+ ".long 11b,90b \n" \
+ ".previous \n" \
+ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+/*
+ * If a MOVE.W (or MOVE.L) instruction faults, it cannot be retried because
+ * the residual byte count would be uncertain. In that situation the MOVE_WORD
+ * macro clears n in the fixup section to abort the transfer.
+ */
+
+#define MOVE_WORD(operands) \
+ asm volatile ( \
+ "1: movew " operands " \n" \
+ "11: nop \n" \
+ " subq #2,%1 \n" \
+ "40: \n" \
+ " \n" \
+ ".section .fixup,\"ax\" \n" \
+ ".even \n" \
+ "90: movel #0, %1 \n" \
+ " movel #2, %2 \n" \
+ " jra 40b \n" \
+ ".previous \n" \
+ " \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 4 \n" \
+ ".long 1b,90b \n" \
+ ".long 11b,90b \n" \
+ ".previous \n" \
+ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+#define MOVE_16_WORDS(operands) \
+ asm volatile ( \
+ "1: movew " operands " \n" \
+ "2: movew " operands " \n" \
+ "3: movew " operands " \n" \
+ "4: movew " operands " \n" \
+ "5: movew " operands " \n" \
+ "6: movew " operands " \n" \
+ "7: movew " operands " \n" \
+ "8: movew " operands " \n" \
+ "9: movew " operands " \n" \
+ "10: movew " operands " \n" \
+ "11: movew " operands " \n" \
+ "12: movew " operands " \n" \
+ "13: movew " operands " \n" \
+ "14: movew " operands " \n" \
+ "15: movew " operands " \n" \
+ "16: movew " operands " \n" \
+ "17: nop \n" \
+ " subl #32,%1 \n" \
+ "40: \n" \
+ " \n" \
+ ".section .fixup,\"ax\" \n" \
+ ".even \n" \
+ "90: movel #0, %1 \n" \
+ " movel #2, %2 \n" \
+ " jra 40b \n" \
+ ".previous \n" \
+ " \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 4 \n" \
+ ".long 1b,90b \n" \
+ ".long 2b,90b \n" \
+ ".long 3b,90b \n" \
+ ".long 4b,90b \n" \
+ ".long 5b,90b \n" \
+ ".long 6b,90b \n" \
+ ".long 7b,90b \n" \
+ ".long 8b,90b \n" \
+ ".long 9b,90b \n" \
+ ".long 10b,90b \n" \
+ ".long 11b,90b \n" \
+ ".long 12b,90b \n" \
+ ".long 13b,90b \n" \
+ ".long 14b,90b \n" \
+ ".long 15b,90b \n" \
+ ".long 16b,90b \n" \
+ ".long 17b,90b \n" \
+ ".previous \n" \
+ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+#define MAC_PDMA_DELAY 32
+
+static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n)
+{
+ unsigned char *addr = start;
+ int result = 0;
+
+ if (n >= 1) {
+ MOVE_BYTE("%3@,%0@");
+ if (result)
+ goto out;
+ }
+ if (n >= 1 && ((unsigned long)addr & 1)) {
+ MOVE_BYTE("%3@,%0@");
+ if (result)
+ goto out;
+ }
+ while (n >= 32)
+ MOVE_16_WORDS("%3@,%0@+");
+ while (n >= 2)
+ MOVE_WORD("%3@,%0@+");
+ if (result)
+ return start - addr; /* Negated to indicate uncertain length */
+ if (n == 1)
+ MOVE_BYTE("%3@,%0@");
+out:
+ return addr - start;
+}
+
+static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n)
+{
+ unsigned char *addr = start;
+ int result = 0;
+
+ if (n >= 1) {
+ MOVE_BYTE("%0@,%3@");
+ if (result)
+ goto out;
+ }
+ if (n >= 1 && ((unsigned long)addr & 1)) {
+ MOVE_BYTE("%0@,%3@");
+ if (result)
+ goto out;
+ }
+ while (n >= 32)
+ MOVE_16_WORDS("%0@+,%3@");
+ while (n >= 2)
+ MOVE_WORD("%0@+,%3@");
+ if (result)
+ return start - addr; /* Negated to indicate uncertain length */
+ if (n == 1)
+ MOVE_BYTE("%0@,%3@");
+out:
+ return addr - start;
+}
+
+/* The "SCSI DMA" chip on the IIfx implements this register. */
+#define CTRL_REG 0x8
+#define CTRL_INTERRUPTS_ENABLE BIT(1)
+#define CTRL_HANDSHAKE_MODE BIT(3)
+
+static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value)
+{
+ out_be32(hostdata->io + (CTRL_REG << 4), value);
+}
static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *dst, int len)
{
u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
unsigned char *d = dst;
- int n = len;
- int transferred;
+ int result = 0;
+
+ hostdata->pdma_residual = len;
while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
- CP_IO_TO_MEM(s, d, n);
+ int bytes;
+
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
+ CTRL_INTERRUPTS_ENABLE);
- transferred = d - dst - n;
- hostdata->pdma_residual = len - transferred;
+ bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512));
- /* No bus error. */
- if (n == 0)
- return 0;
+ if (bytes > 0) {
+ d += bytes;
+ hostdata->pdma_residual -= bytes;
+ }
+
+ if (hostdata->pdma_residual == 0)
+ goto out;
- /* Target changed phase early? */
if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
- BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
- scmd_printk(KERN_ERR, hostdata->connected,
+ BUS_AND_STATUS_REG, BASR_ACK,
+ BASR_ACK, HZ / 64) < 0)
+ scmd_printk(KERN_DEBUG, hostdata->connected,
"%s: !REQ and !ACK\n", __func__);
if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
- return 0;
+ goto out;
+
+ if (bytes == 0)
+ udelay(MAC_PDMA_DELAY);
+
+ if (bytes >= 0)
+ continue;
dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
- "%s: bus error (%d/%d)\n", __func__, transferred, len);
+ "%s: bus error (%d/%d)\n", __func__, d - dst, len);
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- d = dst + transferred;
- n = len - transferred;
+ result = -1;
+ goto out;
}
scmd_printk(KERN_ERR, hostdata->connected,
"%s: phase mismatch or !DRQ\n", __func__);
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- return -1;
+ result = -1;
+out:
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
+ return result;
}
-
-#define CP_MEM_TO_IO(s,d,n) \
-__asm__ __volatile__ \
- (" cmp.w #4,%2\n" \
- " bls 8f\n" \
- " move.w %0,%%d0\n" \
- " neg.b %%d0\n" \
- " and.w #3,%%d0\n" \
- " sub.w %%d0,%2\n" \
- " bra 2f\n" \
- " 1: move.b (%0)+,(%1)\n" \
- " 2: dbf %%d0,1b\n" \
- " move.w %2,%%d0\n" \
- " lsr.w #5,%%d0\n" \
- " bra 4f\n" \
- " 3: move.l (%0)+,(%1)\n" \
- "31: move.l (%0)+,(%1)\n" \
- "32: move.l (%0)+,(%1)\n" \
- "33: move.l (%0)+,(%1)\n" \
- "34: move.l (%0)+,(%1)\n" \
- "35: move.l (%0)+,(%1)\n" \
- "36: move.l (%0)+,(%1)\n" \
- "37: move.l (%0)+,(%1)\n" \
- " 4: dbf %%d0,3b\n" \
- " move.w %2,%%d0\n" \
- " lsr.w #2,%%d0\n" \
- " and.w #7,%%d0\n" \
- " bra 6f\n" \
- " 5: move.l (%0)+,(%1)\n" \
- " 6: dbf %%d0,5b\n" \
- " and.w #3,%2\n" \
- " bra 8f\n" \
- " 7: move.b (%0)+,(%1)\n" \
- " 8: dbf %2,7b\n" \
- " moveq.l #0, %2\n" \
- " 9: \n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "91: moveq.l #1, %2\n" \
- " jra 9b\n" \
- "94: moveq.l #4, %2\n" \
- " jra 9b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,91b\n" \
- " .long 3b,94b\n" \
- " .long 31b,94b\n" \
- " .long 32b,94b\n" \
- " .long 33b,94b\n" \
- " .long 34b,94b\n" \
- " .long 35b,94b\n" \
- " .long 36b,94b\n" \
- " .long 37b,94b\n" \
- " .long 5b,94b\n" \
- " .long 7b,91b\n" \
- ".previous" \
- : "=a"(s), "=a"(d), "=d"(n) \
- : "0"(s), "1"(d), "2"(n) \
- : "d0")
-
static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
unsigned char *src, int len)
{
unsigned char *s = src;
u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
- int n = len;
- int transferred;
+ int result = 0;
+
+ hostdata->pdma_residual = len;
while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
BASR_DRQ | BASR_PHASE_MATCH,
BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
- CP_MEM_TO_IO(s, d, n);
+ int bytes;
- transferred = s - src - n;
- hostdata->pdma_residual = len - transferred;
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
+ CTRL_INTERRUPTS_ENABLE);
- /* Target changed phase early? */
- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
- BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
- scmd_printk(KERN_ERR, hostdata->connected,
- "%s: !REQ and !ACK\n", __func__);
- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
- return 0;
+ bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512));
+
+ if (bytes > 0) {
+ s += bytes;
+ hostdata->pdma_residual -= bytes;
+ }
- /* No bus error. */
- if (n == 0) {
+ if (hostdata->pdma_residual == 0) {
if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
TCR_LAST_BYTE_SENT,
- TCR_LAST_BYTE_SENT, HZ / 64) < 0)
+ TCR_LAST_BYTE_SENT,
+ HZ / 64) < 0) {
scmd_printk(KERN_ERR, hostdata->connected,
"%s: Last Byte Sent timeout\n", __func__);
- return 0;
+ result = -1;
+ }
+ goto out;
}
+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+ BUS_AND_STATUS_REG, BASR_ACK,
+ BASR_ACK, HZ / 64) < 0)
+ scmd_printk(KERN_DEBUG, hostdata->connected,
+ "%s: !REQ and !ACK\n", __func__);
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+ goto out;
+
+ if (bytes == 0)
+ udelay(MAC_PDMA_DELAY);
+
+ if (bytes >= 0)
+ continue;
+
dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
- "%s: bus error (%d/%d)\n", __func__, transferred, len);
+ "%s: bus error (%d/%d)\n", __func__, s - src, len);
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- s = src + transferred;
- n = len - transferred;
+ result = -1;
+ goto out;
}
scmd_printk(KERN_ERR, hostdata->connected,
"%s: phase mismatch or !DRQ\n", __func__);
NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
-
- return -1;
+ result = -1;
+out:
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
+ return result;
}
static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
- cmd->SCp.this_residual < 16)
+ cmd->SCp.this_residual < setup_use_pdma)
return 0;
return cmd->SCp.this_residual;
diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid
index e630e41dc843..2adc2afd9f91 100644
--- a/drivers/scsi/megaraid/Kconfig.megaraid
+++ b/drivers/scsi/megaraid/Kconfig.megaraid
@@ -79,6 +79,7 @@ config MEGARAID_LEGACY
config MEGARAID_SAS
tristate "LSI Logic MegaRAID SAS RAID Module"
depends on PCI && SCSI
+ select IRQ_POLL
help
Module for LSI Logic's SAS based RAID controllers.
To compile this driver as a module, choose 'm' here.
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
index 6e74d21227a5..12177e4cae65 100644
--- a/drivers/scsi/megaraid/Makefile
+++ b/drivers/scsi/megaraid/Makefile
@@ -3,4 +3,4 @@ obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
megaraid_sas-objs := megaraid_sas_base.o megaraid_sas_fusion.o \
- megaraid_sas_fp.o
+ megaraid_sas_fp.o megaraid_sas_debugfs.o
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index fe9a785b7b6f..ca724fe91b8d 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -21,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.707.51.00-rc1"
-#define MEGASAS_RELDATE "February 7, 2019"
+#define MEGASAS_VERSION "07.710.06.00-rc1"
+#define MEGASAS_RELDATE "June 18, 2019"
/*
* Device IDs
@@ -52,6 +52,10 @@
#define PCI_DEVICE_ID_LSI_AERO_10E2 0x10e2
#define PCI_DEVICE_ID_LSI_AERO_10E5 0x10e5
#define PCI_DEVICE_ID_LSI_AERO_10E6 0x10e6
+#define PCI_DEVICE_ID_LSI_AERO_10E0 0x10e0
+#define PCI_DEVICE_ID_LSI_AERO_10E3 0x10e3
+#define PCI_DEVICE_ID_LSI_AERO_10E4 0x10e4
+#define PCI_DEVICE_ID_LSI_AERO_10E7 0x10e7
/*
* Intel HBA SSDIDs
@@ -123,6 +127,8 @@
#define MFI_RESET_ADAPTER 0x00000002
#define MEGAMFI_FRAME_SIZE 64
+#define MFI_STATE_FAULT_CODE 0x0FFF0000
+#define MFI_STATE_FAULT_SUBCODE 0x0000FF00
/*
* During FW init, clear pending cmds & reset state using inbound_msg_0
*
@@ -190,6 +196,7 @@ enum MFI_CMD_OP {
MFI_CMD_SMP = 0x7,
MFI_CMD_STP = 0x8,
MFI_CMD_NVME = 0x9,
+ MFI_CMD_TOOLBOX = 0xa,
MFI_CMD_OP_COUNT,
MFI_CMD_INVALID = 0xff
};
@@ -1449,7 +1456,39 @@ struct megasas_ctrl_info {
u8 reserved6[64];
- u32 rsvdForAdptOp[64];
+ struct {
+ #if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:19;
+ u32 support_pci_lane_margining: 1;
+ u32 support_psoc_update:1;
+ u32 support_force_personality_change:1;
+ u32 support_fde_type_mix:1;
+ u32 support_snap_dump:1;
+ u32 support_nvme_tm:1;
+ u32 support_oce_only:1;
+ u32 support_ext_mfg_vpd:1;
+ u32 support_pcie:1;
+ u32 support_cvhealth_info:1;
+ u32 support_profile_change:2;
+ u32 mr_config_ext2_supported:1;
+ #else
+ u32 mr_config_ext2_supported:1;
+ u32 support_profile_change:2;
+ u32 support_cvhealth_info:1;
+ u32 support_pcie:1;
+ u32 support_ext_mfg_vpd:1;
+ u32 support_oce_only:1;
+ u32 support_nvme_tm:1;
+ u32 support_snap_dump:1;
+ u32 support_fde_type_mix:1;
+ u32 support_force_personality_change:1;
+ u32 support_psoc_update:1;
+ u32 support_pci_lane_margining: 1;
+ u32 reserved:19;
+ #endif
+ } adapter_operations5;
+
+ u32 rsvdForAdptOp[63];
u8 reserved7[3];
@@ -1483,7 +1522,9 @@ struct megasas_ctrl_info {
#define MEGASAS_FW_BUSY 1
/* Driver's internal Logging levels*/
-#define OCR_LOGS (1 << 0)
+#define OCR_DEBUG (1 << 0)
+#define TM_DEBUG (1 << 1)
+#define LD_PD_DEBUG (1 << 2)
#define SCAN_PD_CHANNEL 0x1
#define SCAN_VD_CHANNEL 0x2
@@ -1559,6 +1600,7 @@ enum FW_BOOT_CONTEXT {
#define MFI_IO_TIMEOUT_SECS 180
#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ)
#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30)
+#define MEGASAS_SRIOV_MAX_RESET_TRIES_VF 1
#define MEGASAS_ROUTINE_WAIT_TIME_VF 300
#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
@@ -1583,7 +1625,10 @@ enum FW_BOOT_CONTEXT {
#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
+#define MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET (1 << 24)
+
#define MR_CAN_HANDLE_64_BIT_DMA_OFFSET (1 << 25)
+#define MR_INTR_COALESCING_SUPPORT_OFFSET (1 << 26)
#define MEGASAS_WATCHDOG_THREAD_INTERVAL 1000
#define MEGASAS_WAIT_FOR_NEXT_DMA_MSECS 20
@@ -1762,7 +1807,7 @@ struct megasas_init_frame {
__le32 pad_0; /*0Ch */
__le16 flags; /*10h */
- __le16 reserved_3; /*12h */
+ __le16 replyqueue_mask; /*12h */
__le32 data_xfer_len; /*14h */
__le32 queue_info_new_phys_addr_lo; /*18h */
@@ -2160,6 +2205,10 @@ struct megasas_aen_event {
struct megasas_irq_context {
struct megasas_instance *instance;
u32 MSIxIndex;
+ u32 os_irq;
+ struct irq_poll irqpoll;
+ bool irq_poll_scheduled;
+ bool irq_line_enable;
};
struct MR_DRV_SYSTEM_INFO {
@@ -2190,6 +2239,23 @@ enum MR_PD_TYPE {
#define MR_DEFAULT_NVME_MDTS_KB 128
#define MR_NVME_PAGE_SIZE_MASK 0x000000FF
+/*Aero performance parameters*/
+#define MR_HIGH_IOPS_QUEUE_COUNT 8
+#define MR_DEVICE_HIGH_IOPS_DEPTH 8
+#define MR_HIGH_IOPS_BATCH_COUNT 16
+
+enum MR_PERF_MODE {
+ MR_BALANCED_PERF_MODE = 0,
+ MR_IOPS_PERF_MODE = 1,
+ MR_LATENCY_PERF_MODE = 2,
+};
+
+#define MEGASAS_PERF_MODE_2STR(mode) \
+ ((mode) == MR_BALANCED_PERF_MODE ? "Balanced" : \
+ (mode) == MR_IOPS_PERF_MODE ? "IOPS" : \
+ (mode) == MR_LATENCY_PERF_MODE ? "Latency" : \
+ "Unknown")
+
struct megasas_instance {
unsigned int *reply_map;
@@ -2246,6 +2312,7 @@ struct megasas_instance {
u32 secure_jbod_support;
u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
bool use_seqnum_jbod_fp; /* Added for PD sequence */
+ bool smp_affinity_enable;
spinlock_t crashdump_lock;
struct megasas_register_set __iomem *reg_set;
@@ -2263,6 +2330,7 @@ struct megasas_instance {
u16 ldio_threshold;
u16 cur_can_queue;
u32 max_sectors_per_req;
+ bool msix_load_balance;
struct megasas_aen_event *ev;
struct megasas_cmd **cmd_list;
@@ -2290,15 +2358,13 @@ struct megasas_instance {
struct pci_dev *pdev;
u32 unique_id;
u32 fw_support_ieee;
+ u32 threshold_reply_count;
atomic_t fw_outstanding;
atomic_t ldio_outstanding;
atomic_t fw_reset_no_pci_access;
- atomic_t ieee_sgl;
- atomic_t prp_sgl;
- atomic_t sge_holes_type1;
- atomic_t sge_holes_type2;
- atomic_t sge_holes_type3;
+ atomic64_t total_io_count;
+ atomic64_t high_iops_outstanding;
struct megasas_instance_template *instancet;
struct tasklet_struct isr_tasklet;
@@ -2366,8 +2432,18 @@ struct megasas_instance {
u8 task_abort_tmo;
u8 max_reset_tmo;
u8 snapdump_wait_time;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+ struct dentry *raidmap_dump;
+#endif
u8 enable_fw_dev_list;
+ bool atomic_desc_support;
+ bool support_seqnum_jbod_fp;
+ bool support_pci_lane_margining;
+ u8 low_latency_index_start;
+ int perf_mode;
};
+
struct MR_LD_VF_MAP {
u32 size;
union MR_LD_REF ref;
@@ -2623,4 +2699,9 @@ void megasas_fusion_stop_watchdog(struct megasas_instance *instance);
void megasas_set_dma_settings(struct megasas_instance *instance,
struct megasas_dcmd_frame *dcmd,
dma_addr_t dma_addr, u32 dma_len);
+int megasas_adp_reset_wait_for_ready(struct megasas_instance *instance,
+ bool do_adp_reset,
+ int ocr_context);
+int megasas_irqpoll(struct irq_poll *irqpoll, int budget);
+void megasas_dump_fusion_io(struct scsi_cmnd *scmd);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3dd1df472dc6..80ab9700f1de 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -36,12 +36,14 @@
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
+#include <linux/irq_poll.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
#include "megaraid_sas_fusion.h"
#include "megaraid_sas.h"
@@ -50,47 +52,59 @@
* Will be set in megasas_init_mfi if user does not provide
*/
static unsigned int max_sectors;
-module_param_named(max_sectors, max_sectors, int, 0);
+module_param_named(max_sectors, max_sectors, int, 0444);
MODULE_PARM_DESC(max_sectors,
"Maximum number of sectors per IO command");
static int msix_disable;
-module_param(msix_disable, int, S_IRUGO);
+module_param(msix_disable, int, 0444);
MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
static unsigned int msix_vectors;
-module_param(msix_vectors, int, S_IRUGO);
+module_param(msix_vectors, int, 0444);
MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
static int allow_vf_ioctls;
-module_param(allow_vf_ioctls, int, S_IRUGO);
+module_param(allow_vf_ioctls, int, 0444);
MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
-module_param(throttlequeuedepth, int, S_IRUGO);
+module_param(throttlequeuedepth, int, 0444);
MODULE_PARM_DESC(throttlequeuedepth,
"Adapter queue depth when throttled due to I/O timeout. Default: 16");
unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
-module_param(resetwaittime, int, S_IRUGO);
+module_param(resetwaittime, int, 0444);
MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
int smp_affinity_enable = 1;
-module_param(smp_affinity_enable, int, S_IRUGO);
+module_param(smp_affinity_enable, int, 0444);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
int rdpq_enable = 1;
-module_param(rdpq_enable, int, S_IRUGO);
+module_param(rdpq_enable, int, 0444);
MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
unsigned int dual_qdepth_disable;
-module_param(dual_qdepth_disable, int, S_IRUGO);
+module_param(dual_qdepth_disable, int, 0444);
MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
-module_param(scmd_timeout, int, S_IRUGO);
+module_param(scmd_timeout, int, 0444);
MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
+int perf_mode = -1;
+module_param(perf_mode, int, 0444);
+MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
+ "0 - balanced: High iops and low latency queues are allocated &\n\t\t"
+ "interrupt coalescing is enabled only on high iops queues\n\t\t"
+ "1 - iops: High iops queues are not allocated &\n\t\t"
+ "interrupt coalescing is enabled on all queues\n\t\t"
+ "2 - latency: High iops queues are not allocated &\n\t\t"
+ "interrupt coalescing is disabled on all queues\n\t\t"
+ "default mode is 'balanced'"
+ );
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -154,6 +168,10 @@ static struct pci_device_id megasas_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
{}
};
@@ -170,10 +188,17 @@ static u32 support_poll_for_event;
u32 megasas_dbg_lvl;
static u32 support_device_change;
static bool support_nvme_encapsulation;
+static bool support_pci_lane_margining;
/* define lock for aen poll */
spinlock_t poll_aen_lock;
+extern struct dentry *megasas_debugfs_root;
+extern void megasas_init_debugfs(void);
+extern void megasas_exit_debugfs(void);
+extern void megasas_setup_debugfs(struct megasas_instance *instance);
+extern void megasas_destroy_debugfs(struct megasas_instance *instance);
+
void
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
u8 alt_status);
@@ -1098,8 +1123,9 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
ret = wait_event_timeout(instance->int_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
if (!ret) {
- dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
- __func__, __LINE__);
+ dev_err(&instance->pdev->dev,
+ "DCMD(opcode: 0x%x) is timed out, func:%s\n",
+ cmd->frame->dcmd.opcode, __func__);
return DCMD_TIMEOUT;
}
} else
@@ -1128,6 +1154,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd;
struct megasas_abort_frame *abort_fr;
int ret = 0;
+ u32 opcode;
cmd = megasas_get_cmd(instance);
@@ -1163,8 +1190,10 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
ret = wait_event_timeout(instance->abort_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
if (!ret) {
- dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
- __func__, __LINE__);
+ opcode = cmd_to_abort->frame->dcmd.opcode;
+ dev_err(&instance->pdev->dev,
+ "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
+ opcode, __func__);
return DCMD_TIMEOUT;
}
} else
@@ -1918,7 +1947,6 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
static void megasas_set_static_target_properties(struct scsi_device *sdev,
bool is_target_prop)
{
- u16 target_index = 0;
u8 interface_type;
u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
@@ -1935,8 +1963,6 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev,
*/
blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
- target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
-
switch (interface_type) {
case SAS_PD:
device_qd = MEGASAS_SAS_QD;
@@ -2822,21 +2848,108 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
}
/**
- * megasas_dump_frame - This function will dump MPT/MFI frame
+ * megasas_dump - This function will print hexdump of provided buffer.
+ * @buf: Buffer to be dumped
+ * @sz: Size in bytes
+ * @format: Different formats of dumping e.g. format=n will
+ * cause only 'n' 32 bit words to be dumped in a single
+ * line.
*/
-static inline void
-megasas_dump_frame(void *mpi_request, int sz)
+inline void
+megasas_dump(void *buf, int sz, int format)
{
int i;
- __le32 *mfp = (__le32 *)mpi_request;
+ __le32 *buf_loc = (__le32 *)buf;
+
+ for (i = 0; i < (sz / sizeof(__le32)); i++) {
+ if ((i % format) == 0) {
+ if (i != 0)
+ printk(KERN_CONT "\n");
+ printk(KERN_CONT "%08x: ", (i * 4));
+ }
+ printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
+ }
+ printk(KERN_CONT "\n");
+}
+
+/**
+ * megasas_dump_reg_set - This function will print hexdump of register set
+ * @buf: Buffer to be dumped
+ * @sz: Size in bytes
+ * @format: Different formats of dumping e.g. format=n will
+ * cause only 'n' 32 bit words to be dumped in a
+ * single line.
+ */
+inline void
+megasas_dump_reg_set(void __iomem *reg_set)
+{
+ unsigned int i, sz = 256;
+ u32 __iomem *reg = (u32 __iomem *)reg_set;
+
+ for (i = 0; i < (sz / sizeof(u32)); i++)
+ printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
+}
+
+/**
+ * megasas_dump_fusion_io - This function will print key details
+ * of SCSI IO
+ * @scmd: SCSI command pointer of SCSI IO
+ */
+void
+megasas_dump_fusion_io(struct scsi_cmnd *scmd)
+{
+ struct megasas_cmd_fusion *cmd;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct megasas_instance *instance;
+
+ cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ scmd_printk(KERN_INFO, scmd,
+ "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n",
+ scmd, scmd->retries, scmd->allowed);
+ scsi_print_command(scmd);
+
+ if (cmd) {
+ req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
+ scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
+ scmd_printk(KERN_INFO, scmd,
+ "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n",
+ req_desc->SCSIIO.RequestFlags,
+ req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
+ req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
+
+ printk(KERN_INFO "IO request frame:\n");
+ megasas_dump(cmd->io_request,
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
+ printk(KERN_INFO "Chain frame:\n");
+ megasas_dump(cmd->sg_frame,
+ instance->max_chain_frame_sz, 8);
+ }
+
+}
+
+/*
+ * megasas_dump_sys_regs - This function will dump system registers through
+ * sysfs.
+ * @reg_set: Pointer to System register set.
+ * @buf: Buffer to which output is to be written.
+ * @return: Number of bytes written to buffer.
+ */
+static inline ssize_t
+megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
+{
+ unsigned int i, sz = 256;
+ int bytes_wrote = 0;
+ char *loc = (char *)buf;
+ u32 __iomem *reg = (u32 __iomem *)reg_set;
- printk(KERN_INFO "IO request frame:\n\t");
- for (i = 0; i < sz / sizeof(__le32); i++) {
- if (i && ((i % 8) == 0))
- printk("\n\t");
- printk("%08x ", le32_to_cpu(mfp[i]));
+ for (i = 0; i < sz / sizeof(u32); i++) {
+ bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE,
+ "%08x: %08x\n", (i * 4),
+ readl(&reg[i]));
}
- printk("\n");
+ return bytes_wrote;
}
/**
@@ -2850,24 +2963,20 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
scmd_printk(KERN_INFO, scmd,
- "Controller reset is requested due to IO timeout\n"
- "SCSI command pointer: (%p)\t SCSI host state: %d\t"
- " SCSI host busy: %d\t FW outstanding: %d\n",
- scmd, scmd->device->host->shost_state,
+ "OCR is requested due to IO timeout!!\n");
+
+ scmd_printk(KERN_INFO, scmd,
+ "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n",
+ scmd->device->host->shost_state,
scsi_host_busy(scmd->device->host),
atomic_read(&instance->fw_outstanding));
-
/*
* First wait for all commands to complete
*/
if (instance->adapter_type == MFI_SERIES) {
ret = megasas_generic_reset(scmd);
} else {
- struct megasas_cmd_fusion *cmd;
- cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
- if (cmd)
- megasas_dump_frame(cmd->io_request,
- MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
+ megasas_dump_fusion_io(scmd);
ret = megasas_reset_fusion(scmd->device->host,
SCSIIO_TIMEOUT_OCR);
}
@@ -3017,7 +3126,7 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
}
static ssize_t
-megasas_fw_crash_buffer_store(struct device *cdev,
+fw_crash_buffer_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3036,14 +3145,13 @@ megasas_fw_crash_buffer_store(struct device *cdev,
}
static ssize_t
-megasas_fw_crash_buffer_show(struct device *cdev,
+fw_crash_buffer_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct megasas_instance *instance =
(struct megasas_instance *) shost->hostdata;
u32 size;
- unsigned long buff_addr;
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
unsigned long src_addr;
unsigned long flags;
@@ -3060,8 +3168,6 @@ megasas_fw_crash_buffer_show(struct device *cdev,
return -EINVAL;
}
- buff_addr = (unsigned long) buf;
-
if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
dev_err(&instance->pdev->dev,
"Firmware crash dump offset is out of range\n");
@@ -3081,7 +3187,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
}
static ssize_t
-megasas_fw_crash_buffer_size_show(struct device *cdev,
+fw_crash_buffer_size_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3093,7 +3199,7 @@ megasas_fw_crash_buffer_size_show(struct device *cdev,
}
static ssize_t
-megasas_fw_crash_state_store(struct device *cdev,
+fw_crash_state_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3128,7 +3234,7 @@ megasas_fw_crash_state_store(struct device *cdev,
}
static ssize_t
-megasas_fw_crash_state_show(struct device *cdev,
+fw_crash_state_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3139,14 +3245,14 @@ megasas_fw_crash_state_show(struct device *cdev,
}
static ssize_t
-megasas_page_size_show(struct device *cdev,
+page_size_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
}
static ssize_t
-megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
+ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3156,7 +3262,7 @@ megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr
}
static ssize_t
-megasas_fw_cmds_outstanding_show(struct device *cdev,
+fw_cmds_outstanding_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3165,18 +3271,37 @@ megasas_fw_cmds_outstanding_show(struct device *cdev,
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
}
-static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
- megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
-static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
- megasas_fw_crash_buffer_size_show, NULL);
-static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
- megasas_fw_crash_state_show, megasas_fw_crash_state_store);
-static DEVICE_ATTR(page_size, S_IRUGO,
- megasas_page_size_show, NULL);
-static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
- megasas_ldio_outstanding_show, NULL);
-static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
- megasas_fw_cmds_outstanding_show, NULL);
+static ssize_t
+dump_system_regs_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *)shost->hostdata;
+
+ return megasas_dump_sys_regs(instance->reg_set, buf);
+}
+
+static ssize_t
+raid_map_id_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *)shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n",
+ (unsigned long)instance->map_id);
+}
+
+static DEVICE_ATTR_RW(fw_crash_buffer);
+static DEVICE_ATTR_RO(fw_crash_buffer_size);
+static DEVICE_ATTR_RW(fw_crash_state);
+static DEVICE_ATTR_RO(page_size);
+static DEVICE_ATTR_RO(ldio_outstanding);
+static DEVICE_ATTR_RO(fw_cmds_outstanding);
+static DEVICE_ATTR_RO(dump_system_regs);
+static DEVICE_ATTR_RO(raid_map_id);
struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
@@ -3185,6 +3310,8 @@ struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_page_size,
&dev_attr_ldio_outstanding,
&dev_attr_fw_cmds_outstanding,
+ &dev_attr_dump_system_regs,
+ &dev_attr_raid_map_id,
NULL,
};
@@ -3368,6 +3495,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP:
case MFI_CMD_STP:
case MFI_CMD_NVME:
+ case MFI_CMD_TOOLBOX:
megasas_complete_int_cmd(instance, cmd);
break;
@@ -3776,7 +3904,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
int i;
u8 max_wait;
u32 fw_state;
- u32 cur_state;
u32 abs_state, curr_abs_state;
abs_state = instance->instancet->read_fw_status_reg(instance);
@@ -3791,13 +3918,18 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
switch (fw_state) {
case MFI_STATE_FAULT:
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
+ dev_printk(KERN_ERR, &instance->pdev->dev,
+ "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
+ abs_state & MFI_STATE_FAULT_CODE,
+ abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
if (ocr) {
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_FAULT;
break;
- } else
+ } else {
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
+ megasas_dump_reg_set(instance->reg_set);
return -ENODEV;
+ }
case MFI_STATE_WAIT_HANDSHAKE:
/*
@@ -3817,7 +3949,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
&instance->reg_set->inbound_doorbell);
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_WAIT_HANDSHAKE;
break;
case MFI_STATE_BOOT_MESSAGE_PENDING:
@@ -3833,7 +3964,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
&instance->reg_set->inbound_doorbell);
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
break;
case MFI_STATE_OPERATIONAL:
@@ -3866,7 +3996,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
&instance->reg_set->inbound_doorbell);
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_OPERATIONAL;
break;
case MFI_STATE_UNDEFINED:
@@ -3874,37 +4003,33 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
* This state should not last for more than 2 seconds
*/
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_UNDEFINED;
break;
case MFI_STATE_BB_INIT:
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_BB_INIT;
break;
case MFI_STATE_FW_INIT:
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_FW_INIT;
break;
case MFI_STATE_FW_INIT_2:
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_FW_INIT_2;
break;
case MFI_STATE_DEVICE_SCAN:
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_DEVICE_SCAN;
break;
case MFI_STATE_FLUSH_CACHE:
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_FLUSH_CACHE;
break;
default:
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
fw_state);
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
+ megasas_dump_reg_set(instance->reg_set);
return -ENODEV;
}
@@ -3927,6 +4052,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
if (curr_abs_state == abs_state) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
"in %d secs\n", fw_state, max_wait);
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
+ megasas_dump_reg_set(instance->reg_set);
return -ENODEV;
}
@@ -3990,23 +4117,12 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
{
int i;
u16 max_cmd;
- u32 sge_sz;
u32 frame_count;
struct megasas_cmd *cmd;
max_cmd = instance->max_mfi_cmds;
/*
- * Size of our frame is 64 bytes for MFI frame, followed by max SG
- * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
- */
- sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
- sizeof(struct megasas_sge32);
-
- if (instance->flag_ieee)
- sge_sz = sizeof(struct megasas_sge_skinny);
-
- /*
* For MFI controllers.
* max_num_sge = 60
* max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
@@ -4255,8 +4371,10 @@ megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
switch (dcmd_timeout_ocr_possible(instance)) {
case INITIATE_OCR:
cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
megasas_reset_fusion(instance->host,
MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
break;
case KILL_ADAPTER:
megaraid_sas_kill_hba(instance);
@@ -4292,7 +4410,6 @@ megasas_get_pd_list(struct megasas_instance *instance)
struct megasas_dcmd_frame *dcmd;
struct MR_PD_LIST *ci;
struct MR_PD_ADDRESS *pd_addr;
- dma_addr_t ci_h = 0;
if (instance->pd_list_not_supported) {
dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
@@ -4301,7 +4418,6 @@ megasas_get_pd_list(struct megasas_instance *instance)
}
ci = instance->pd_list_buf;
- ci_h = instance->pd_list_buf_h;
cmd = megasas_get_cmd(instance);
@@ -4374,6 +4490,9 @@ megasas_get_pd_list(struct megasas_instance *instance)
case DCMD_SUCCESS:
pd_addr = ci->addr;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
+ __func__, le32_to_cpu(ci->count));
if ((le32_to_cpu(ci->count) >
(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
@@ -4389,6 +4508,11 @@ megasas_get_pd_list(struct megasas_instance *instance)
pd_addr->scsiDevType;
instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
MR_PD_STATE_SYSTEM;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "PD%d: targetID: 0x%03x deviceType:0x%x\n",
+ pd_index, le16_to_cpu(pd_addr->deviceId),
+ pd_addr->scsiDevType);
pd_addr++;
}
@@ -4492,6 +4616,10 @@ megasas_get_ld_list(struct megasas_instance *instance)
break;
case DCMD_SUCCESS:
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
+ __func__, ld_count);
+
if (ld_count > instance->fw_supported_vd_count)
break;
@@ -4501,6 +4629,10 @@ megasas_get_ld_list(struct megasas_instance *instance)
if (ci->ldList[ld_index].state != 0) {
ids = ci->ldList[ld_index].ref.targetId;
instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "LD%d: targetID: 0x%03x\n",
+ ld_index, ids);
}
}
@@ -4604,6 +4736,10 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
case DCMD_SUCCESS:
tgtid_count = le32_to_cpu(ci->count);
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
+ __func__, tgtid_count);
+
if ((tgtid_count > (instance->fw_supported_vd_count)))
break;
@@ -4611,6 +4747,9 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
ids = ci->targetId[ld_index];
instance->ld_ids[ids] = ci->targetId[ld_index];
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
+ ld_index, ci->targetId[ld_index]);
}
break;
@@ -4690,6 +4829,13 @@ megasas_host_device_list_query(struct megasas_instance *instance,
*/
count = le32_to_cpu(ci->count);
+ if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
+ break;
+
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
+ __func__, count);
+
memset(instance->local_pd_list, 0,
MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
@@ -4701,8 +4847,16 @@ megasas_host_device_list_query(struct megasas_instance *instance,
ci->host_device_list[i].scsi_type;
instance->local_pd_list[target_id].driveState =
MR_PD_STATE_SYSTEM;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
+ i, target_id, ci->host_device_list[i].scsi_type);
} else {
instance->ld_ids[target_id] = target_id;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "Device %d: LD targetID: 0x%03x\n",
+ i, target_id);
}
}
@@ -4714,8 +4868,10 @@ megasas_host_device_list_query(struct megasas_instance *instance,
switch (dcmd_timeout_ocr_possible(instance)) {
case INITIATE_OCR:
cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
megasas_reset_fusion(instance->host,
MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
break;
case KILL_ADAPTER:
megaraid_sas_kill_hba(instance);
@@ -4863,8 +5019,10 @@ void megasas_get_snapdump_properties(struct megasas_instance *instance)
switch (dcmd_timeout_ocr_possible(instance)) {
case INITIATE_OCR:
cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
megasas_reset_fusion(instance->host,
MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
break;
case KILL_ADAPTER:
megaraid_sas_kill_hba(instance);
@@ -4943,6 +5101,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
le32_to_cpus((u32 *)&ci->adapterOperations2);
le32_to_cpus((u32 *)&ci->adapterOperations3);
le16_to_cpus((u16 *)&ci->adapter_operations4);
+ le32_to_cpus((u32 *)&ci->adapter_operations5);
/* Update the latest Ext VD info.
* From Init path, store current firmware details.
@@ -4950,12 +5109,14 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
* in case of Firmware upgrade without system reboot.
*/
megasas_update_ext_vd_details(instance);
- instance->use_seqnum_jbod_fp =
+ instance->support_seqnum_jbod_fp =
ci->adapterOperations3.useSeqNumJbodFP;
instance->support_morethan256jbod =
ci->adapter_operations4.support_pd_map_target_id;
instance->support_nvme_passthru =
ci->adapter_operations4.support_nvme_passthru;
+ instance->support_pci_lane_margining =
+ ci->adapter_operations5.support_pci_lane_margining;
instance->task_abort_tmo = ci->TaskAbortTO;
instance->max_reset_tmo = ci->MaxResetTO;
@@ -4987,6 +5148,10 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
dev_info(&instance->pdev->dev,
"FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
instance->task_abort_tmo, instance->max_reset_tmo);
+ dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
+ instance->support_seqnum_jbod_fp ? "Yes" : "No");
+ dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
+ instance->support_pci_lane_margining ? "Yes" : "No");
break;
@@ -4994,8 +5159,10 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
switch (dcmd_timeout_ocr_possible(instance)) {
case INITIATE_OCR:
cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
megasas_reset_fusion(instance->host,
MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
break;
case KILL_ADAPTER:
megaraid_sas_kill_hba(instance);
@@ -5262,6 +5429,25 @@ fail_alloc_cmds:
return 1;
}
+static
+void megasas_setup_irq_poll(struct megasas_instance *instance)
+{
+ struct megasas_irq_context *irq_ctx;
+ u32 count, i;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+ /* Initialize IRQ poll */
+ for (i = 0; i < count; i++) {
+ irq_ctx = &instance->irq_context[i];
+ irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
+ irq_ctx->irq_poll_scheduled = false;
+ irq_poll_init(&irq_ctx->irqpoll,
+ instance->threshold_reply_count,
+ megasas_irqpoll);
+ }
+}
+
/*
* megasas_setup_irqs_ioapic - register legacy interrupts.
* @instance: Adapter soft state
@@ -5286,6 +5472,8 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
__func__, __LINE__);
return -1;
}
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+ instance->low_latency_index_start = 0;
return 0;
}
@@ -5320,6 +5508,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
&instance->irq_context[j]);
/* Retry irq register for IO_APIC*/
instance->msix_vectors = 0;
+ instance->msix_load_balance = false;
if (is_probe) {
pci_free_irq_vectors(instance->pdev);
return megasas_setup_irqs_ioapic(instance);
@@ -5328,6 +5517,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
}
}
}
+
return 0;
}
@@ -5340,6 +5530,16 @@ static void
megasas_destroy_irqs(struct megasas_instance *instance) {
int i;
+ int count;
+ struct megasas_irq_context *irq_ctx;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ if (instance->adapter_type != MFI_SERIES) {
+ for (i = 0; i < count; i++) {
+ irq_ctx = &instance->irq_context[i];
+ irq_poll_disable(&irq_ctx->irqpoll);
+ }
+ }
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
@@ -5368,10 +5568,12 @@ megasas_setup_jbod_map(struct megasas_instance *instance)
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
+ instance->use_seqnum_jbod_fp =
+ instance->support_seqnum_jbod_fp;
if (reset_devices || !fusion ||
- !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
+ !instance->support_seqnum_jbod_fp) {
dev_info(&instance->pdev->dev,
- "Jbod map is not supported %s %d\n",
+ "JBOD sequence map is disabled %s %d\n",
__func__, __LINE__);
instance->use_seqnum_jbod_fp = false;
return;
@@ -5410,9 +5612,11 @@ skip_alloc:
static void megasas_setup_reply_map(struct megasas_instance *instance)
{
const struct cpumask *mask;
- unsigned int queue, cpu;
+ unsigned int queue, cpu, low_latency_index_start;
- for (queue = 0; queue < instance->msix_vectors; queue++) {
+ low_latency_index_start = instance->low_latency_index_start;
+
+ for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
mask = pci_irq_get_affinity(instance->pdev, queue);
if (!mask)
goto fallback;
@@ -5423,8 +5627,14 @@ static void megasas_setup_reply_map(struct megasas_instance *instance)
return;
fallback:
- for_each_possible_cpu(cpu)
- instance->reply_map[cpu] = cpu % instance->msix_vectors;
+ queue = low_latency_index_start;
+ for_each_possible_cpu(cpu) {
+ instance->reply_map[cpu] = queue;
+ if (queue == (instance->msix_vectors - 1))
+ queue = low_latency_index_start;
+ else
+ queue++;
+ }
}
/**
@@ -5461,6 +5671,89 @@ int megasas_get_device_list(struct megasas_instance *instance)
return SUCCESS;
}
+
+/**
+ * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues
+ * @instance: Adapter soft state
+ * return: void
+ */
+static inline void
+megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
+{
+ int i;
+ int local_numa_node;
+
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+ local_numa_node = dev_to_node(&instance->pdev->dev);
+
+ for (i = 0; i < instance->low_latency_index_start; i++)
+ irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
+ cpumask_of_node(local_numa_node));
+ }
+}
+
+static int
+__megasas_alloc_irq_vectors(struct megasas_instance *instance)
+{
+ int i, irq_flags;
+ struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
+ struct irq_affinity *descp = &desc;
+
+ irq_flags = PCI_IRQ_MSIX;
+
+ if (instance->smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
+ else
+ descp = NULL;
+
+ i = pci_alloc_irq_vectors_affinity(instance->pdev,
+ instance->low_latency_index_start,
+ instance->msix_vectors, irq_flags, descp);
+
+ return i;
+}
+
+/**
+ * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors
+ * @instance: Adapter soft state
+ * return: void
+ */
+static void
+megasas_alloc_irq_vectors(struct megasas_instance *instance)
+{
+ int i;
+ unsigned int num_msix_req;
+
+ i = __megasas_alloc_irq_vectors(instance);
+
+ if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
+ (i != instance->msix_vectors)) {
+ if (instance->msix_vectors)
+ pci_free_irq_vectors(instance->pdev);
+ /* Disable Balanced IOPS mode and try realloc vectors */
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+ instance->low_latency_index_start = 1;
+ num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+
+ instance->msix_vectors = min(num_msix_req,
+ instance->msix_vectors);
+
+ i = __megasas_alloc_irq_vectors(instance);
+
+ }
+
+ dev_info(&instance->pdev->dev,
+ "requested/available msix %d/%d\n", instance->msix_vectors, i);
+
+ if (i > 0)
+ instance->msix_vectors = i;
+ else
+ instance->msix_vectors = 0;
+
+ if (instance->smp_affinity_enable)
+ megasas_set_high_iops_queue_affinity_hint(instance);
+}
+
/**
* megasas_init_fw - Initializes the FW
* @instance: Adapter soft state
@@ -5474,12 +5767,15 @@ static int megasas_init_fw(struct megasas_instance *instance)
u32 max_sectors_2, tmp_sectors, msix_enable;
u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
resource_size_t base_addr;
+ void *base_addr_phys;
struct megasas_ctrl_info *ctrl_info = NULL;
unsigned long bar_list;
- int i, j, loop, fw_msix_count = 0;
+ int i, j, loop;
struct IOV_111 *iovPtr;
struct fusion_context *fusion;
- bool do_adp_reset = true;
+ bool intr_coalescing;
+ unsigned int num_msix_req;
+ u16 lnksta, speed;
fusion = instance->ctrl_context;
@@ -5500,6 +5796,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
goto fail_ioremap;
}
+ base_addr_phys = &base_addr;
+ dev_printk(KERN_DEBUG, &instance->pdev->dev,
+ "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n",
+ instance->bar, base_addr_phys, instance->reg_set);
+
if (instance->adapter_type != MFI_SERIES)
instance->instancet = &megasas_instance_template_fusion;
else {
@@ -5526,29 +5827,35 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
if (megasas_transition_to_ready(instance, 0)) {
- if (instance->adapter_type >= INVADER_SERIES) {
+ dev_info(&instance->pdev->dev,
+ "Failed to transition controller to ready from %s!\n",
+ __func__);
+ if (instance->adapter_type != MFI_SERIES) {
status_reg = instance->instancet->read_fw_status_reg(
instance);
- do_adp_reset = status_reg & MFI_RESET_ADAPTER;
- }
-
- if (do_adp_reset) {
+ if (status_reg & MFI_RESET_ADAPTER) {
+ if (megasas_adp_reset_wait_for_ready
+ (instance, true, 0) == FAILED)
+ goto fail_ready_state;
+ } else {
+ goto fail_ready_state;
+ }
+ } else {
atomic_set(&instance->fw_reset_no_pci_access, 1);
instance->instancet->adp_reset
(instance, instance->reg_set);
atomic_set(&instance->fw_reset_no_pci_access, 0);
- dev_info(&instance->pdev->dev,
- "FW restarted successfully from %s!\n",
- __func__);
/*waiting for about 30 second before retry*/
ssleep(30);
if (megasas_transition_to_ready(instance, 0))
goto fail_ready_state;
- } else {
- goto fail_ready_state;
}
+
+ dev_info(&instance->pdev->dev,
+ "FW restarted successfully from %s!\n",
+ __func__);
}
megasas_init_ctrl_params(instance);
@@ -5573,11 +5880,21 @@ static int megasas_init_fw(struct megasas_instance *instance)
MR_MAX_RAID_MAP_SIZE_MASK);
}
+ switch (instance->adapter_type) {
+ case VENTURA_SERIES:
+ fusion->pcie_bw_limitation = true;
+ break;
+ case AERO_SERIES:
+ fusion->r56_div_offload = true;
+ break;
+ default:
+ break;
+ }
+
/* Check if MSI-X is supported while in ready state */
msix_enable = (instance->instancet->read_fw_status_reg(instance) &
0x4000000) >> 0x1a;
if (msix_enable && !msix_disable) {
- int irq_flags = PCI_IRQ_MSIX;
scratch_pad_1 = megasas_readl
(instance, &instance->reg_set->outbound_scratch_pad_1);
@@ -5587,7 +5904,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Thunderbolt Series*/
instance->msix_vectors = (scratch_pad_1
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
- fw_msix_count = instance->msix_vectors;
} else {
instance->msix_vectors = ((scratch_pad_1
& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
@@ -5616,7 +5932,12 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (rdpq_enable)
instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
1 : 0;
- fw_msix_count = instance->msix_vectors;
+
+ if (!instance->msix_combined) {
+ instance->msix_load_balance = true;
+ instance->smp_affinity_enable = false;
+ }
+
/* Save 1-15 reply post index address to local memory
* Index 0 is already saved from reg offset
* MPI2_REPLY_POST_HOST_INDEX_OFFSET
@@ -5629,22 +5950,91 @@ static int megasas_init_fw(struct megasas_instance *instance)
+ (loop * 0x10));
}
}
+
+ dev_info(&instance->pdev->dev,
+ "firmware supports msix\t: (%d)",
+ instance->msix_vectors);
if (msix_vectors)
instance->msix_vectors = min(msix_vectors,
instance->msix_vectors);
} else /* MFI adapters */
instance->msix_vectors = 1;
- /* Don't bother allocating more MSI-X vectors than cpus */
- instance->msix_vectors = min(instance->msix_vectors,
- (unsigned int)num_online_cpus());
- if (smp_affinity_enable)
- irq_flags |= PCI_IRQ_AFFINITY;
- i = pci_alloc_irq_vectors(instance->pdev, 1,
- instance->msix_vectors, irq_flags);
- if (i > 0)
- instance->msix_vectors = i;
+
+
+ /*
+ * For Aero (if some conditions are met), driver will configure a
+ * few additional reply queues with interrupt coalescing enabled.
+ * These queues with interrupt coalescing enabled are called
+ * High IOPS queues and rest of reply queues (based on number of
+ * logical CPUs) are termed as Low latency queues.
+ *
+ * Total Number of reply queues = High IOPS queues + low latency queues
+ *
+ * For rest of fusion adapters, 1 additional reply queue will be
+ * reserved for management commands, rest of reply queues
+ * (based on number of logical CPUs) will be used for IOs and
+ * referenced as IO queues.
+ * Total Number of reply queues = 1 + IO queues
+ *
+ * MFI adapters supports single MSI-x so single reply queue
+ * will be used for IO and management commands.
+ */
+
+ intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
+ true : false;
+ if (intr_coalescing &&
+ (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
+ (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
+ instance->perf_mode = MR_BALANCED_PERF_MODE;
else
- instance->msix_vectors = 0;
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+
+
+ if (instance->adapter_type == AERO_SERIES) {
+ pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
+ speed = lnksta & PCI_EXP_LNKSTA_CLS;
+
+ /*
+ * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
+ * in latency perf mode and enable R1 PCI bandwidth algorithm
+ */
+ if (speed < 0x4) {
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+ fusion->pcie_bw_limitation = true;
+ }
+
+ /*
+ * Performance mode settings provided through module parameter-perf_mode will
+ * take affect only for:
+ * 1. Aero family of adapters.
+ * 2. When user sets module parameter- perf_mode in range of 0-2.
+ */
+ if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
+ (perf_mode <= MR_LATENCY_PERF_MODE))
+ instance->perf_mode = perf_mode;
+ /*
+ * If intr coalescing is not supported by controller FW, then IOPS
+ * and Balanced modes are not feasible.
+ */
+ if (!intr_coalescing)
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+
+ }
+
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE)
+ instance->low_latency_index_start =
+ MR_HIGH_IOPS_QUEUE_COUNT;
+ else
+ instance->low_latency_index_start = 1;
+
+ num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+
+ instance->msix_vectors = min(num_msix_req,
+ instance->msix_vectors);
+
+ megasas_alloc_irq_vectors(instance);
+ if (!instance->msix_vectors)
+ instance->msix_load_balance = false;
}
/*
* MSI-X host index 0 is common for all adapter.
@@ -5669,8 +6059,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
megasas_setup_reply_map(instance);
dev_info(&instance->pdev->dev,
- "firmware supports msix\t: (%d)", fw_msix_count);
- dev_info(&instance->pdev->dev,
"current msix/online cpus\t: (%d/%d)\n",
instance->msix_vectors, (unsigned int)num_online_cpus());
dev_info(&instance->pdev->dev,
@@ -5707,6 +6095,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
megasas_setup_irqs_ioapic(instance))
goto fail_init_adapter;
+ if (instance->adapter_type != MFI_SERIES)
+ megasas_setup_irq_poll(instance);
+
instance->instancet->enable_intr(instance);
dev_info(&instance->pdev->dev, "INIT adapter done\n");
@@ -5833,8 +6224,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->UnevenSpanSupport ? "yes" : "no");
dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
instance->crash_dump_drv_support ? "yes" : "no");
- dev_info(&instance->pdev->dev, "jbod sync map : %s\n",
- instance->use_seqnum_jbod_fp ? "yes" : "no");
+ dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n",
+ instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
instance->max_sectors_per_req = instance->max_num_sge *
SGE_BUFFER_SIZE / 512;
@@ -6197,8 +6588,10 @@ megasas_get_target_prop(struct megasas_instance *instance,
switch (dcmd_timeout_ocr_possible(instance)) {
case INITIATE_OCR:
cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
megasas_reset_fusion(instance->host,
MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
break;
case KILL_ADAPTER:
megaraid_sas_kill_hba(instance);
@@ -6748,6 +7141,7 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
INIT_LIST_HEAD(&instance->internal_reset_pending_q);
atomic_set(&instance->fw_outstanding, 0);
+ atomic64_set(&instance->total_io_count, 0);
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
@@ -6770,6 +7164,8 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
instance->last_time = 0;
instance->disableOnlineCtrlReset = 1;
instance->UnevenSpanSupport = 0;
+ instance->smp_affinity_enable = smp_affinity_enable ? true : false;
+ instance->msix_load_balance = false;
if (instance->adapter_type != MFI_SERIES)
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
@@ -6791,6 +7187,12 @@ static int megasas_probe_one(struct pci_dev *pdev,
u16 control = 0;
switch (pdev->device) {
+ case PCI_DEVICE_ID_LSI_AERO_10E0:
+ case PCI_DEVICE_ID_LSI_AERO_10E3:
+ case PCI_DEVICE_ID_LSI_AERO_10E4:
+ case PCI_DEVICE_ID_LSI_AERO_10E7:
+ dev_err(&pdev->dev, "Adapter is in non secure mode\n");
+ return 1;
case PCI_DEVICE_ID_LSI_AERO_10E1:
case PCI_DEVICE_ID_LSI_AERO_10E5:
dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
@@ -6910,6 +7312,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
goto fail_start_aen;
}
+ megasas_setup_debugfs(instance);
+
/* Get current SR-IOV LD/VF affiliation */
if (instance->requestorId)
megasas_get_ld_vf_affiliation(instance, 1);
@@ -7041,13 +7445,17 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
static int
megasas_suspend(struct pci_dev *pdev, pm_message_t state)
{
- struct Scsi_Host *host;
struct megasas_instance *instance;
instance = pci_get_drvdata(pdev);
- host = instance->host;
+
+ if (!instance)
+ return 0;
+
instance->unload = 1;
+ dev_info(&pdev->dev, "%s is called\n", __func__);
+
/* Shutdown SR-IOV heartbeat timer */
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
@@ -7097,11 +7505,16 @@ megasas_resume(struct pci_dev *pdev)
int irq_flags = PCI_IRQ_LEGACY;
instance = pci_get_drvdata(pdev);
+
+ if (!instance)
+ return 0;
+
host = instance->host;
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
pci_restore_state(pdev);
+ dev_info(&pdev->dev, "%s is called\n", __func__);
/*
* PCI prepping: enable device set bus mastering and dma mask
*/
@@ -7133,7 +7546,7 @@ megasas_resume(struct pci_dev *pdev)
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
irq_flags = PCI_IRQ_MSIX;
- if (smp_affinity_enable)
+ if (instance->smp_affinity_enable)
irq_flags |= PCI_IRQ_AFFINITY;
}
rval = pci_alloc_irq_vectors(instance->pdev, 1,
@@ -7171,6 +7584,9 @@ megasas_resume(struct pci_dev *pdev)
megasas_setup_irqs_ioapic(instance))
goto fail_init_mfi;
+ if (instance->adapter_type != MFI_SERIES)
+ megasas_setup_irq_poll(instance);
+
/* Re-launch SR-IOV heartbeat timer */
if (instance->requestorId) {
if (!megasas_sriov_start_heartbeat(instance, 0))
@@ -7261,6 +7677,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
u32 pd_seq_map_sz;
instance = pci_get_drvdata(pdev);
+
+ if (!instance)
+ return;
+
host = instance->host;
fusion = instance->ctrl_context;
@@ -7374,6 +7794,8 @@ skip_firing_dcmds:
megasas_free_ctrl_mem(instance);
+ megasas_destroy_debugfs(instance);
+
scsi_host_put(host);
pci_disable_device(pdev);
@@ -7387,6 +7809,9 @@ static void megasas_shutdown(struct pci_dev *pdev)
{
struct megasas_instance *instance = pci_get_drvdata(pdev);
+ if (!instance)
+ return;
+
instance->unload = 1;
if (megasas_wait_for_adapter_operational(instance))
@@ -7532,7 +7957,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
- !instance->support_nvme_passthru)) {
+ !instance->support_nvme_passthru) ||
+ ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
+ !instance->support_pci_lane_margining)) {
dev_err(&instance->pdev->dev,
"Received invalid ioctl command 0x%x\n",
ioc->frame.hdr.cmd);
@@ -7568,10 +7995,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
+ mutex_lock(&instance->reset_mutex);
if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
megasas_return_cmd(instance, cmd);
+ mutex_unlock(&instance->reset_mutex);
return -1;
}
+ mutex_unlock(&instance->reset_mutex);
}
if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
@@ -8013,6 +8443,14 @@ support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
static DRIVER_ATTR_RO(support_nvme_encapsulation);
+static ssize_t
+support_pci_lane_margining_show(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_pci_lane_margining);
+}
+
+static DRIVER_ATTR_RO(support_pci_lane_margining);
+
static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
{
sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
@@ -8161,7 +8599,7 @@ megasas_aen_polling(struct work_struct *work)
struct megasas_instance *instance = ev->instance;
union megasas_evt_class_locale class_locale;
int event_type = 0;
- u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
+ u32 seq_num;
int error;
u8 dcmd_ret = DCMD_SUCCESS;
@@ -8171,10 +8609,6 @@ megasas_aen_polling(struct work_struct *work)
return;
}
- /* Adjust event workqueue thread wait time for VF mode */
- if (instance->requestorId)
- wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
-
/* Don't run the event workqueue thread if OCR is running */
mutex_lock(&instance->reset_mutex);
@@ -8286,6 +8720,7 @@ static int __init megasas_init(void)
support_poll_for_event = 2;
support_device_change = 1;
support_nvme_encapsulation = true;
+ support_pci_lane_margining = true;
memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
@@ -8301,6 +8736,8 @@ static int __init megasas_init(void)
megasas_mgmt_majorno = rval;
+ megasas_init_debugfs();
+
/*
* Register ourselves as PCI hotplug module
*/
@@ -8340,8 +8777,17 @@ static int __init megasas_init(void)
if (rval)
goto err_dcf_support_nvme_encapsulation;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_pci_lane_margining);
+ if (rval)
+ goto err_dcf_support_pci_lane_margining;
+
return rval;
+err_dcf_support_pci_lane_margining:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
+
err_dcf_support_nvme_encapsulation:
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_support_device_change);
@@ -8360,6 +8806,7 @@ err_dcf_rel_date:
err_dcf_attr_ver:
pci_unregister_driver(&megasas_pci_driver);
err_pcidrv:
+ megasas_exit_debugfs();
unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
return rval;
}
@@ -8380,8 +8827,11 @@ static void __exit megasas_exit(void)
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_support_nvme_encapsulation);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_pci_lane_margining);
pci_unregister_driver(&megasas_pci_driver);
+ megasas_exit_debugfs();
unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_debugfs.c b/drivers/scsi/megaraid/megaraid_sas_debugfs.c
new file mode 100644
index 000000000000..c69760775efa
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_debugfs.c
@@ -0,0 +1,179 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2003-2018 LSI Corporation.
+ * Copyright (c) 2003-2018 Avago Technologies.
+ * Copyright (c) 2003-2018 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Authors: Broadcom Inc.
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
+ * Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/irq_poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+struct dentry *megasas_debugfs_root;
+
+static ssize_t
+megasas_debugfs_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct megasas_debugfs_buffer *debug = filp->private_data;
+
+ if (!debug || !debug->buf)
+ return 0;
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len);
+}
+
+static int
+megasas_debugfs_raidmap_open(struct inode *inode, struct file *file)
+{
+ struct megasas_instance *instance = inode->i_private;
+ struct megasas_debugfs_buffer *debug;
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ debug = kzalloc(sizeof(struct megasas_debugfs_buffer), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->buf = (void *)fusion->ld_drv_map[(instance->map_id & 1)];
+ debug->len = fusion->drv_map_sz;
+ file->private_data = debug;
+
+ return 0;
+}
+
+static int
+megasas_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct megasas_debug_buffer *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ file->private_data = NULL;
+ kfree(debug);
+ return 0;
+}
+
+static const struct file_operations megasas_debugfs_raidmap_fops = {
+ .owner = THIS_MODULE,
+ .open = megasas_debugfs_raidmap_open,
+ .read = megasas_debugfs_read,
+ .release = megasas_debugfs_release,
+};
+
+/*
+ * megasas_init_debugfs : Create debugfs root for megaraid_sas driver
+ */
+void megasas_init_debugfs(void)
+{
+ megasas_debugfs_root = debugfs_create_dir("megaraid_sas", NULL);
+ if (!megasas_debugfs_root)
+ pr_info("Cannot create debugfs root\n");
+}
+
+/*
+ * megasas_exit_debugfs : Remove debugfs root for megaraid_sas driver
+ */
+void megasas_exit_debugfs(void)
+{
+ debugfs_remove_recursive(megasas_debugfs_root);
+}
+
+/*
+ * megasas_setup_debugfs : Setup debugfs per Fusion adapter
+ * instance: Soft instance of adapter
+ */
+void
+megasas_setup_debugfs(struct megasas_instance *instance)
+{
+ char name[64];
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ if (fusion) {
+ snprintf(name, sizeof(name),
+ "scsi_host%d", instance->host->host_no);
+ if (!instance->debugfs_root) {
+ instance->debugfs_root =
+ debugfs_create_dir(name, megasas_debugfs_root);
+ if (!instance->debugfs_root) {
+ dev_err(&instance->pdev->dev,
+ "Cannot create per adapter debugfs directory\n");
+ return;
+ }
+ }
+
+ snprintf(name, sizeof(name), "raidmap_dump");
+ instance->raidmap_dump =
+ debugfs_create_file(name, S_IRUGO,
+ instance->debugfs_root, instance,
+ &megasas_debugfs_raidmap_fops);
+ if (!instance->raidmap_dump) {
+ dev_err(&instance->pdev->dev,
+ "Cannot create raidmap debugfs file\n");
+ debugfs_remove(instance->debugfs_root);
+ return;
+ }
+ }
+
+}
+
+/*
+ * megasas_destroy_debugfs : Destroy debugfs per Fusion adapter
+ * instance: Soft instance of adapter
+ */
+void megasas_destroy_debugfs(struct megasas_instance *instance)
+{
+ debugfs_remove_recursive(instance->debugfs_root);
+}
+
+#else
+void megasas_init_debugfs(void)
+{
+}
+void megasas_exit_debugfs(void)
+{
+}
+void megasas_setup_debugfs(struct megasas_instance *instance)
+{
+}
+void megasas_destroy_debugfs(struct megasas_instance *instance)
+{
+}
+#endif /*CONFIG_DEBUG_FS*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 12637606c46d..50b8c1b12767 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -33,6 +33,7 @@
#include <linux/compat.h>
#include <linux/blkdev.h>
#include <linux/poll.h>
+#include <linux/irq_poll.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -45,7 +46,7 @@
#define LB_PENDING_CMDS_DEFAULT 4
static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
-module_param(lb_pending_cmds, int, S_IRUGO);
+module_param(lb_pending_cmds, int, 0444);
MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
"threshold. Valid Values are 1-128. Default: 4");
@@ -889,6 +890,77 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
}
/*
+ * mr_get_phy_params_r56_rmw - Calculate parameters for R56 CTIO write operation
+ * @instance: Adapter soft state
+ * @ld: LD index
+ * @stripNo: Strip Number
+ * @io_info: IO info structure pointer
+ * pRAID_Context: RAID context pointer
+ * map: RAID map pointer
+ *
+ * This routine calculates the logical arm, data Arm, row number and parity arm
+ * for R56 CTIO write operation.
+ */
+static void mr_get_phy_params_r56_rmw(struct megasas_instance *instance,
+ u32 ld, u64 stripNo,
+ struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT_G35 *pRAID_Context,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u8 span, dataArms, arms, dataArm, logArm;
+ s8 rightmostParityArm, PParityArm;
+ u64 rowNum;
+ u64 *pdBlock = &io_info->pdBlock;
+
+ dataArms = raid->rowDataSize;
+ arms = raid->rowSize;
+
+ rowNum = mega_div64_32(stripNo, dataArms);
+ /* parity disk arm, first arm is 0 */
+ rightmostParityArm = (arms - 1) - mega_mod64(rowNum, arms);
+
+ /* logical arm within row */
+ logArm = mega_mod64(stripNo, dataArms);
+ /* physical arm for data */
+ dataArm = mega_mod64((rightmostParityArm + 1 + logArm), arms);
+
+ if (raid->spanDepth == 1) {
+ span = 0;
+ } else {
+ span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map);
+ if (span == SPAN_INVALID)
+ return;
+ }
+
+ if (raid->level == 6) {
+ /* P Parity arm, note this can go negative adjust if negative */
+ PParityArm = (arms - 2) - mega_mod64(rowNum, arms);
+
+ if (PParityArm < 0)
+ PParityArm += arms;
+
+ /* rightmostParityArm is P-Parity for RAID 5 and Q-Parity for RAID */
+ pRAID_Context->flow_specific.r56_arm_map = rightmostParityArm;
+ pRAID_Context->flow_specific.r56_arm_map |=
+ (u16)(PParityArm << RAID_CTX_R56_P_ARM_SHIFT);
+ } else {
+ pRAID_Context->flow_specific.r56_arm_map |=
+ (u16)(rightmostParityArm << RAID_CTX_R56_P_ARM_SHIFT);
+ }
+
+ pRAID_Context->reg_lock_row_lba = cpu_to_le64(rowNum);
+ pRAID_Context->flow_specific.r56_arm_map |=
+ (u16)(logArm << RAID_CTX_R56_LOG_ARM_SHIFT);
+ cpu_to_le16s(&pRAID_Context->flow_specific.r56_arm_map);
+ pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | dataArm;
+ pRAID_Context->raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD <<
+ MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+
+ return;
+}
+
+/*
******************************************************************************
*
* MR_BuildRaidContext function
@@ -954,6 +1026,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
stripSize = 1 << raid->stripeShift;
stripe_mask = stripSize-1;
+ io_info->data_arms = raid->rowDataSize;
/*
* calculate starting row and stripe, and number of strips and rows
@@ -1095,6 +1168,13 @@ MR_BuildRaidContext(struct megasas_instance *instance,
/* save pointer to raid->LUN array */
*raidLUN = raid->LUN;
+ /* Aero R5/6 Division Offload for WRITE */
+ if (fusion->r56_div_offload && (raid->level >= 5) && !isRead) {
+ mr_get_phy_params_r56_rmw(instance, ld, start_strip, io_info,
+ (struct RAID_CONTEXT_G35 *)pRAID_Context,
+ map);
+ return true;
+ }
/*Get Phy Params only if FP capable, or else leave it to MR firmware
to do the calculation.*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 4dfa0685a86c..a32b3f0fcd15 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -35,6 +35,7 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include <linux/irq_poll.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -87,6 +88,62 @@ extern u32 megasas_readl(struct megasas_instance *instance,
const volatile void __iomem *addr);
/**
+ * megasas_adp_reset_wait_for_ready - initiate chip reset and wait for
+ * controller to come to ready state
+ * @instance - adapter's soft state
+ * @do_adp_reset - If true, do a chip reset
+ * @ocr_context - If called from OCR context this will
+ * be set to 1, else 0
+ *
+ * This function initates a chip reset followed by a wait for controller to
+ * transition to ready state.
+ * During this, driver will block all access to PCI config space from userspace
+ */
+int
+megasas_adp_reset_wait_for_ready(struct megasas_instance *instance,
+ bool do_adp_reset,
+ int ocr_context)
+{
+ int ret = FAILED;
+
+ /*
+ * Block access to PCI config space from userspace
+ * when diag reset is initiated from driver
+ */
+ if (megasas_dbg_lvl & OCR_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "Block access to PCI config space %s %d\n",
+ __func__, __LINE__);
+
+ pci_cfg_access_lock(instance->pdev);
+
+ if (do_adp_reset) {
+ if (instance->instancet->adp_reset
+ (instance, instance->reg_set))
+ goto out;
+ }
+
+ /* Wait for FW to become ready */
+ if (megasas_transition_to_ready(instance, ocr_context)) {
+ dev_warn(&instance->pdev->dev,
+ "Failed to transition controller to ready for scsi%d.\n",
+ instance->host->host_no);
+ goto out;
+ }
+
+ ret = SUCCESS;
+out:
+ if (megasas_dbg_lvl & OCR_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "Unlock access to PCI config space %s %d\n",
+ __func__, __LINE__);
+
+ pci_cfg_access_unlock(instance->pdev);
+
+ return ret;
+}
+
+/**
* megasas_check_same_4gb_region - check if allocation
* crosses same 4GB boundary or not
* @instance - adapter's soft instance
@@ -133,7 +190,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
/* Dummy readl to force pci flush */
- readl(&regs->outbound_intr_mask);
+ dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
+ __func__, readl(&regs->outbound_intr_mask));
}
/**
@@ -144,14 +202,14 @@ void
megasas_disable_intr_fusion(struct megasas_instance *instance)
{
u32 mask = 0xFFFFFFFF;
- u32 status;
struct megasas_register_set __iomem *regs;
regs = instance->reg_set;
instance->mask_interrupts = 1;
writel(mask, &regs->outbound_intr_mask);
/* Dummy readl to force pci flush */
- status = readl(&regs->outbound_intr_mask);
+ dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
+ __func__, readl(&regs->outbound_intr_mask));
}
int
@@ -207,21 +265,17 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
}
/**
- * megasas_fire_cmd_fusion - Sends command to the FW
- * @instance: Adapter soft state
- * @req_desc: 64bit Request descriptor
- *
- * Perform PCI Write.
+ * megasas_write_64bit_req_desc - PCI writes 64bit request descriptor
+ * @instance: Adapter soft state
+ * @req_desc: 64bit Request descriptor
*/
-
static void
-megasas_fire_cmd_fusion(struct megasas_instance *instance,
+megasas_write_64bit_req_desc(struct megasas_instance *instance,
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
{
#if defined(writeq) && defined(CONFIG_64BIT)
u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
le32_to_cpu(req_desc->u.low));
-
writeq(req_data, &instance->reg_set->inbound_low_queue_port);
#else
unsigned long flags;
@@ -235,6 +289,25 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
}
/**
+ * megasas_fire_cmd_fusion - Sends command to the FW
+ * @instance: Adapter soft state
+ * @req_desc: 32bit or 64bit Request descriptor
+ *
+ * Perform PCI Write. AERO SERIES supports 32 bit Descriptor.
+ * Prior to AERO_SERIES support 64 bit Descriptor.
+ */
+static void
+megasas_fire_cmd_fusion(struct megasas_instance *instance,
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
+{
+ if (instance->atomic_desc_support)
+ writel(le32_to_cpu(req_desc->u.low),
+ &instance->reg_set->inbound_single_queue_port);
+ else
+ megasas_write_64bit_req_desc(instance, req_desc);
+}
+
+/**
* megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here
* @instance: Adapter soft state
* fw_boot_context: Whether this function called during probe or after OCR
@@ -924,6 +997,7 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
{
int i;
struct megasas_header *frame_hdr = &cmd->frame->hdr;
+ u32 status_reg;
u32 msecs = seconds * 1000;
@@ -933,6 +1007,12 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
rmb();
msleep(20);
+ if (!(i % 5000)) {
+ status_reg = instance->instancet->read_fw_status_reg(instance)
+ & MFI_STATE_MASK;
+ if (status_reg == MFI_STATE_FAULT)
+ break;
+ }
}
if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
@@ -966,6 +1046,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
u32 scratch_pad_1;
ktime_t time;
bool cur_fw_64bit_dma_capable;
+ bool cur_intr_coalescing;
fusion = instance->ctrl_context;
@@ -999,6 +1080,16 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
goto fail_fw_init;
}
+ cur_intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
+ true : false;
+
+ if ((instance->low_latency_index_start ==
+ MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
+ instance->perf_mode = MR_BALANCED_PERF_MODE;
+
+ dev_info(&instance->pdev->dev, "Performance mode :%s\n",
+ MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+
instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
@@ -1083,6 +1174,22 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
cpu_to_le32(lower_32_bits(ioc_init_handle));
init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
+ /*
+ * Each bit in replyqueue_mask represents one group of MSI-x vectors
+ * (each group has 8 vectors)
+ */
+ switch (instance->perf_mode) {
+ case MR_BALANCED_PERF_MODE:
+ init_frame->replyqueue_mask =
+ cpu_to_le16(~(~0 << instance->low_latency_index_start/8));
+ break;
+ case MR_IOPS_PERF_MODE:
+ init_frame->replyqueue_mask =
+ cpu_to_le16(~(~0 << instance->msix_vectors/8));
+ break;
+ }
+
+
req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
req_desc.MFAIo.RequestFlags =
@@ -1101,7 +1208,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
break;
}
- megasas_fire_cmd_fusion(instance, &req_desc);
+ /* For AERO also, IOC_INIT requires 64 bit descriptor write */
+ megasas_write_64bit_req_desc(instance, &req_desc);
wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS);
@@ -1111,6 +1219,17 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
goto fail_fw_init;
}
+ if (instance->adapter_type >= AERO_SERIES) {
+ scratch_pad_1 = megasas_readl
+ (instance, &instance->reg_set->outbound_scratch_pad_1);
+
+ instance->atomic_desc_support =
+ (scratch_pad_1 & MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
+
+ dev_info(&instance->pdev->dev, "FW supports atomic descriptor\t: %s\n",
+ instance->atomic_desc_support ? "Yes" : "No");
+ }
+
return 0;
fail_fw_init:
@@ -1133,7 +1252,7 @@ fail_fw_init:
int
megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
int ret = 0;
- u32 pd_seq_map_sz;
+ size_t pd_seq_map_sz;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
struct fusion_context *fusion = instance->ctrl_context;
@@ -1142,9 +1261,7 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
- (sizeof(struct MR_PD_CFG_SEQ) *
- (MAX_PHYSICAL_DEVICES - 1));
+ pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES - 1);
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -1625,6 +1742,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
struct fusion_context *fusion;
u32 scratch_pad_1;
int i = 0, count;
+ u32 status_reg;
fusion = instance->ctrl_context;
@@ -1707,8 +1825,21 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
if (megasas_alloc_cmds_fusion(instance))
goto fail_alloc_cmds;
- if (megasas_ioc_init_fusion(instance))
- goto fail_ioc_init;
+ if (megasas_ioc_init_fusion(instance)) {
+ status_reg = instance->instancet->read_fw_status_reg(instance);
+ if (((status_reg & MFI_STATE_MASK) == MFI_STATE_FAULT) &&
+ (status_reg & MFI_RESET_ADAPTER)) {
+ /* Do a chip reset and then retry IOC INIT once */
+ if (megasas_adp_reset_wait_for_ready
+ (instance, true, 0) == FAILED)
+ goto fail_ioc_init;
+
+ if (megasas_ioc_init_fusion(instance))
+ goto fail_ioc_init;
+ } else {
+ goto fail_ioc_init;
+ }
+ }
megasas_display_intel_branding(instance);
if (megasas_get_ctrl_info(instance)) {
@@ -1720,6 +1851,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
instance->flag_ieee = 1;
instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT;
+ instance->threshold_reply_count = instance->max_fw_cmds / 4;
fusion->fast_path_io = 0;
if (megasas_allocate_raid_maps(instance))
@@ -1970,7 +2102,6 @@ megasas_is_prp_possible(struct megasas_instance *instance,
mega_mod64(sg_dma_address(sg_scmd),
mr_nvme_pg_size)) {
build_prp = false;
- atomic_inc(&instance->sge_holes_type1);
break;
}
}
@@ -1980,7 +2111,6 @@ megasas_is_prp_possible(struct megasas_instance *instance,
sg_dma_len(sg_scmd)),
mr_nvme_pg_size))) {
build_prp = false;
- atomic_inc(&instance->sge_holes_type2);
break;
}
}
@@ -1989,7 +2119,6 @@ megasas_is_prp_possible(struct megasas_instance *instance,
if (mega_mod64(sg_dma_address(sg_scmd),
mr_nvme_pg_size)) {
build_prp = false;
- atomic_inc(&instance->sge_holes_type3);
break;
}
}
@@ -2122,7 +2251,6 @@ megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd,
main_chain_element->Length =
cpu_to_le32(num_prp_in_chain * sizeof(u64));
- atomic_inc(&instance->prp_sgl);
return build_prp;
}
@@ -2197,7 +2325,6 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
memset(sgl_ptr, 0, instance->max_chain_frame_sz);
}
}
- atomic_inc(&instance->ieee_sgl);
}
/**
@@ -2509,9 +2636,10 @@ static void megasas_stream_detect(struct megasas_instance *instance,
*
*/
static void
-megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
- struct MR_LD_RAID *raid, bool fp_possible,
- u8 is_read, u32 scsi_buff_len)
+megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion,
+ union RAID_CONTEXT_UNION *praid_context,
+ struct MR_LD_RAID *raid, bool fp_possible,
+ u8 is_read, u32 scsi_buff_len)
{
u8 cpu_sel = MR_RAID_CTX_CPUSEL_0;
struct RAID_CONTEXT_G35 *rctx_g35;
@@ -2569,11 +2697,11 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
* vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
* IO Subtype is not bitmap.
*/
- if ((raid->level == 1) && (!is_read)) {
- if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
- praid_context->raid_context_g35.raid_flags =
- (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
- << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+ if ((fusion->pcie_bw_limitation) && (raid->level == 1) && (!is_read) &&
+ (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)) {
+ praid_context->raid_context_g35.raid_flags =
+ (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
}
}
@@ -2679,6 +2807,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
scsi_buff_len = scsi_bufflen(scp);
io_request->DataLength = cpu_to_le32(scsi_buff_len);
+ io_info.data_arms = 1;
if (scp->sc_data_direction == DMA_FROM_DEVICE)
io_info.isRead = 1;
@@ -2698,8 +2827,19 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
fp_possible = (io_info.fpOkForIo > 0) ? true : false;
}
- cmd->request_desc->SCSIIO.MSIxIndex =
- instance->reply_map[raw_smp_processor_id()];
+ if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
+ atomic_read(&scp->device->device_busy) >
+ (io_info.data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
+ MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
+ else if (instance->msix_load_balance)
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
+ instance->msix_vectors));
+ else
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->reply_map[raw_smp_processor_id()];
if (instance->adapter_type >= VENTURA_SERIES) {
/* FP for Optimal raid level 1.
@@ -2717,8 +2857,9 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
(instance->host->can_queue)) {
fp_possible = false;
atomic_dec(&instance->fw_outstanding);
- } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
- (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
+ } else if (fusion->pcie_bw_limitation &&
+ ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
+ (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0))) {
fp_possible = false;
atomic_dec(&instance->fw_outstanding);
if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
@@ -2743,7 +2884,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
/* If raid is NULL, set CPU affinity to default CPU0 */
if (raid)
- megasas_set_raidflag_cpu_affinity(&io_request->RaidContext,
+ megasas_set_raidflag_cpu_affinity(fusion, &io_request->RaidContext,
raid, fp_possible, io_info.isRead,
scsi_buff_len);
else
@@ -2759,10 +2900,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (instance->adapter_type == INVADER_SERIES) {
- if (rctx->reg_lock_flags == REGION_TYPE_UNUSED)
- cmd->request_desc->SCSIIO.RequestFlags =
- (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
- MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
rctx->type = MPI2_TYPE_CUDA;
rctx->nseg = 0x1;
io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
@@ -2970,50 +3107,71 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
/* If FW supports PD sequence number */
- if (instance->use_seqnum_jbod_fp &&
- instance->pd_list[pd_index].driveType == TYPE_DISK) {
- /* TgtId must be incremented by 255 as jbod seq number is index
- * below raid map
- */
- /* More than 256 PD/JBOD support for Ventura */
- if (instance->support_morethan256jbod)
- pRAID_Context->virtual_disk_tgt_id =
- pd_sync->seq[pd_index].pd_target_id;
- else
- pRAID_Context->virtual_disk_tgt_id =
- cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
- pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
- io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
- if (instance->adapter_type >= VENTURA_SERIES) {
- io_request->RaidContext.raid_context_g35.routing_flags |=
- (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (1 << RAID_CONTEXT_NSEG_SHIFT);
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+ if (instance->support_seqnum_jbod_fp) {
+ if (instance->use_seqnum_jbod_fp &&
+ instance->pd_list[pd_index].driveType == TYPE_DISK) {
+
+ /* More than 256 PD/JBOD support for Ventura */
+ if (instance->support_morethan256jbod)
+ pRAID_Context->virtual_disk_tgt_id =
+ pd_sync->seq[pd_index].pd_target_id;
+ else
+ pRAID_Context->virtual_disk_tgt_id =
+ cpu_to_le16(device_id +
+ (MAX_PHYSICAL_DEVICES - 1));
+ pRAID_Context->config_seq_num =
+ pd_sync->seq[pd_index].seqNum;
+ io_request->DevHandle =
+ pd_sync->seq[pd_index].devHandle;
+ if (instance->adapter_type >= VENTURA_SERIES) {
+ io_request->RaidContext.raid_context_g35.routing_flags |=
+ (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+ io_request->RaidContext.raid_context_g35.nseg_type |=
+ (1 << RAID_CONTEXT_NSEG_SHIFT);
+ io_request->RaidContext.raid_context_g35.nseg_type |=
+ (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+ } else {
+ pRAID_Context->type = MPI2_TYPE_CUDA;
+ pRAID_Context->nseg = 0x1;
+ pRAID_Context->reg_lock_flags |=
+ (MR_RL_FLAGS_SEQ_NUM_ENABLE |
+ MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+ }
} else {
- pRAID_Context->type = MPI2_TYPE_CUDA;
- pRAID_Context->nseg = 0x1;
- pRAID_Context->reg_lock_flags |=
- (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+ pRAID_Context->virtual_disk_tgt_id =
+ cpu_to_le16(device_id +
+ (MAX_PHYSICAL_DEVICES - 1));
+ pRAID_Context->config_seq_num = 0;
+ io_request->DevHandle = cpu_to_le16(0xFFFF);
}
- } else if (fusion->fast_path_io) {
- pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
- pRAID_Context->config_seq_num = 0;
- local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
- io_request->DevHandle =
- local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
} else {
- /* Want to send all IO via FW path */
pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
pRAID_Context->config_seq_num = 0;
- io_request->DevHandle = cpu_to_le16(0xFFFF);
+
+ if (fusion->fast_path_io) {
+ local_map_ptr =
+ fusion->ld_drv_map[(instance->map_id & 1)];
+ io_request->DevHandle =
+ local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ } else {
+ io_request->DevHandle = cpu_to_le16(0xFFFF);
+ }
}
cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
- cmd->request_desc->SCSIIO.MSIxIndex =
- instance->reply_map[raw_smp_processor_id()];
+ if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
+ atomic_read(&scmd->device->device_busy) > MR_DEVICE_HIGH_IOPS_DEPTH)
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
+ MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
+ else if (instance->msix_load_balance)
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
+ instance->msix_vectors));
+ else
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->reply_map[raw_smp_processor_id()];
if (!fp_possible) {
/* system pd firmware path */
@@ -3193,9 +3351,9 @@ void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
- cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+ cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid =
cpu_to_le16(r1_cmd->index);
- r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+ r1_cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid =
cpu_to_le16(cmd->index);
/*MSIxIndex of both commands request descriptors should be same*/
r1_cmd->request_desc->SCSIIO.MSIxIndex =
@@ -3313,7 +3471,7 @@ megasas_complete_r1_command(struct megasas_instance *instance,
rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35;
fusion = instance->ctrl_context;
- peer_smid = le16_to_cpu(rctx_g35->smid.peer_smid);
+ peer_smid = le16_to_cpu(rctx_g35->flow_specific.peer_smid);
r1_cmd = fusion->cmd_list[peer_smid - 1];
scmd_local = cmd->scmd;
@@ -3353,7 +3511,8 @@ megasas_complete_r1_command(struct megasas_instance *instance,
* Completes all commands that is in reply descriptor queue
*/
int
-complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
+complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
+ struct megasas_irq_context *irq_context)
{
union MPI2_REPLY_DESCRIPTORS_UNION *desc;
struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
@@ -3486,7 +3645,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
* number of reply counts and still there are more replies in reply queue
* pending to be completed
*/
- if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+ if (threshold_reply_count >= instance->threshold_reply_count) {
if (instance->msix_combined)
writel(((MSIxIndex & 0x7) << 24) |
fusion->last_reply_idx[MSIxIndex],
@@ -3496,23 +3655,46 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
fusion->last_reply_idx[MSIxIndex],
instance->reply_post_host_index_addr[0]);
threshold_reply_count = 0;
+ if (irq_context) {
+ if (!irq_context->irq_poll_scheduled) {
+ irq_context->irq_poll_scheduled = true;
+ irq_context->irq_line_enable = true;
+ irq_poll_sched(&irq_context->irqpoll);
+ }
+ return num_completed;
+ }
}
}
- if (!num_completed)
- return IRQ_NONE;
+ if (num_completed) {
+ wmb();
+ if (instance->msix_combined)
+ writel(((MSIxIndex & 0x7) << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[MSIxIndex/8]);
+ else
+ writel((MSIxIndex << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[0]);
+ megasas_check_and_restore_queue_depth(instance);
+ }
+ return num_completed;
+}
- wmb();
- if (instance->msix_combined)
- writel(((MSIxIndex & 0x7) << 24) |
- fusion->last_reply_idx[MSIxIndex],
- instance->reply_post_host_index_addr[MSIxIndex/8]);
- else
- writel((MSIxIndex << 24) |
- fusion->last_reply_idx[MSIxIndex],
- instance->reply_post_host_index_addr[0]);
- megasas_check_and_restore_queue_depth(instance);
- return IRQ_HANDLED;
+/**
+ * megasas_enable_irq_poll() - enable irqpoll
+ */
+static void megasas_enable_irq_poll(struct megasas_instance *instance)
+{
+ u32 count, i;
+ struct megasas_irq_context *irq_ctx;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+ for (i = 0; i < count; i++) {
+ irq_ctx = &instance->irq_context[i];
+ irq_poll_enable(&irq_ctx->irqpoll);
+ }
}
/**
@@ -3524,11 +3706,51 @@ void megasas_sync_irqs(unsigned long instance_addr)
u32 count, i;
struct megasas_instance *instance =
(struct megasas_instance *)instance_addr;
+ struct megasas_irq_context *irq_ctx;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
- for (i = 0; i < count; i++)
+ for (i = 0; i < count; i++) {
synchronize_irq(pci_irq_vector(instance->pdev, i));
+ irq_ctx = &instance->irq_context[i];
+ irq_poll_disable(&irq_ctx->irqpoll);
+ if (irq_ctx->irq_poll_scheduled) {
+ irq_ctx->irq_poll_scheduled = false;
+ enable_irq(irq_ctx->os_irq);
+ }
+ }
+}
+
+/**
+ * megasas_irqpoll() - process a queue for completed reply descriptors
+ * @irqpoll: IRQ poll structure associated with queue to poll.
+ * @budget: Threshold of reply descriptors to process per poll.
+ *
+ * Return: The number of entries processed.
+ */
+
+int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
+{
+ struct megasas_irq_context *irq_ctx;
+ struct megasas_instance *instance;
+ int num_entries;
+
+ irq_ctx = container_of(irqpoll, struct megasas_irq_context, irqpoll);
+ instance = irq_ctx->instance;
+
+ if (irq_ctx->irq_line_enable) {
+ disable_irq(irq_ctx->os_irq);
+ irq_ctx->irq_line_enable = false;
+ }
+
+ num_entries = complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
+ if (num_entries < budget) {
+ irq_poll_complete(irqpoll);
+ irq_ctx->irq_poll_scheduled = false;
+ enable_irq(irq_ctx->os_irq);
+ }
+
+ return num_entries;
}
/**
@@ -3551,7 +3773,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
return;
for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
- complete_cmd_fusion(instance, MSIxIndex);
+ complete_cmd_fusion(instance, MSIxIndex, NULL);
}
/**
@@ -3566,6 +3788,11 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
if (instance->mask_interrupts)
return IRQ_NONE;
+#if defined(ENABLE_IRQ_POLL)
+ if (irq_context->irq_poll_scheduled)
+ return IRQ_HANDLED;
+#endif
+
if (!instance->msix_vectors) {
mfiStatus = instance->instancet->clear_intr(instance);
if (!mfiStatus)
@@ -3578,7 +3805,8 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
return IRQ_HANDLED;
}
- return complete_cmd_fusion(instance, irq_context->MSIxIndex);
+ return complete_cmd_fusion(instance, irq_context->MSIxIndex, irq_context)
+ ? IRQ_HANDLED : IRQ_NONE;
}
/**
@@ -3843,7 +4071,7 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
static inline void megasas_trigger_snap_dump(struct megasas_instance *instance)
{
int j;
- u32 fw_state;
+ u32 fw_state, abs_state;
if (!instance->disableOnlineCtrlReset) {
dev_info(&instance->pdev->dev, "Trigger snap dump\n");
@@ -3853,11 +4081,13 @@ static inline void megasas_trigger_snap_dump(struct megasas_instance *instance)
}
for (j = 0; j < instance->snapdump_wait_time; j++) {
- fw_state = instance->instancet->read_fw_status_reg(instance) &
- MFI_STATE_MASK;
+ abs_state = instance->instancet->read_fw_status_reg(instance);
+ fw_state = abs_state & MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT) {
- dev_err(&instance->pdev->dev,
- "Found FW in FAULT state, after snap dump trigger\n");
+ dev_printk(KERN_ERR, &instance->pdev->dev,
+ "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n",
+ abs_state & MFI_STATE_FAULT_CODE,
+ abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
return;
}
msleep(1000);
@@ -3869,7 +4099,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
int reason, int *convert)
{
int i, outstanding, retval = 0, hb_seconds_missed = 0;
- u32 fw_state;
+ u32 fw_state, abs_state;
u32 waittime_for_io_completion;
waittime_for_io_completion =
@@ -3888,12 +4118,13 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
for (i = 0; i < waittime_for_io_completion; i++) {
/* Check if firmware is in fault state */
- fw_state = instance->instancet->read_fw_status_reg(instance) &
- MFI_STATE_MASK;
+ abs_state = instance->instancet->read_fw_status_reg(instance);
+ fw_state = abs_state & MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT) {
- dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
- " will reset adapter scsi%d.\n",
- instance->host->host_no);
+ dev_printk(KERN_ERR, &instance->pdev->dev,
+ "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n",
+ abs_state & MFI_STATE_FAULT_CODE,
+ abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
megasas_complete_cmd_dpc_fusion((unsigned long)instance);
if (instance->requestorId && reason) {
dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT"
@@ -4042,6 +4273,13 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
}
break;
+ case MFI_CMD_TOOLBOX:
+ if (!instance->support_pci_lane_margining) {
+ cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
+ result = COMPLETE_CMD;
+ }
+
+ break;
default:
break;
}
@@ -4265,6 +4503,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
instance->instancet->disable_intr(instance);
megasas_sync_irqs((unsigned long)instance);
instance->instancet->enable_intr(instance);
+ megasas_enable_irq_poll(instance);
if (scsi_lookup->scmd == NULL)
break;
}
@@ -4278,6 +4517,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
megasas_sync_irqs((unsigned long)instance);
rc = megasas_track_scsiio(instance, id, channel);
instance->instancet->enable_intr(instance);
+ megasas_enable_irq_poll(instance);
break;
case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
@@ -4376,9 +4616,6 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd);
- scsi_print_command(scmd);
-
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
"SCSI host:%d\n", instance->host->host_no);
@@ -4421,7 +4658,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
goto out;
}
sdev_printk(KERN_INFO, scmd->device,
- "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
+ "attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n",
scmd, devhandle);
mr_device_priv_data->tm_busy = 1;
@@ -4432,9 +4669,12 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
mr_device_priv_data->tm_busy = 0;
mutex_unlock(&instance->reset_mutex);
-out:
- sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n",
((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+out:
+ scsi_print_command(scmd);
+ if (megasas_dbg_lvl & TM_DEBUG)
+ megasas_dump_fusion_io(scmd);
return ret;
}
@@ -4457,9 +4697,6 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- sdev_printk(KERN_INFO, scmd->device,
- "target reset called for scmd(%p)\n", scmd);
-
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
"SCSI host:%d\n", instance->host->host_no);
@@ -4468,8 +4705,8 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
}
if (!mr_device_priv_data) {
- sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
- "scmd(%p)\n", scmd);
+ sdev_printk(KERN_INFO, scmd->device,
+ "device been deleted! scmd: (0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
ret = SUCCESS;
goto out;
@@ -4492,7 +4729,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
}
sdev_printk(KERN_INFO, scmd->device,
- "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
+ "attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n",
scmd, devhandle);
mr_device_priv_data->tm_busy = 1;
ret = megasas_issue_tm(instance, devhandle,
@@ -4501,10 +4738,10 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
mr_device_priv_data);
mr_device_priv_data->tm_busy = 0;
mutex_unlock(&instance->reset_mutex);
-out:
- scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n",
+ scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n",
(ret == SUCCESS) ? "SUCCESS" : "FAILED");
+out:
return ret;
}
@@ -4549,12 +4786,14 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
struct megasas_instance *instance;
struct megasas_cmd_fusion *cmd_fusion, *r1_cmd;
struct fusion_context *fusion;
- u32 abs_state, status_reg, reset_adapter;
+ u32 abs_state, status_reg, reset_adapter, fpio_count = 0;
u32 io_timeout_in_crash_mode = 0;
struct scsi_cmnd *scmd_local = NULL;
struct scsi_device *sdev;
int ret_target_prop = DCMD_FAILED;
bool is_target_prop = false;
+ bool do_adp_reset = true;
+ int max_reset_tries = MEGASAS_FUSION_MAX_RESET_TRIES;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
@@ -4621,7 +4860,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
if (convert)
reason = 0;
- if (megasas_dbg_lvl & OCR_LOGS)
+ if (megasas_dbg_lvl & OCR_DEBUG)
dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n");
/* Now return commands back to the OS */
@@ -4634,13 +4873,17 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
}
scmd_local = cmd_fusion->scmd;
if (cmd_fusion->scmd) {
- if (megasas_dbg_lvl & OCR_LOGS) {
+ if (megasas_dbg_lvl & OCR_DEBUG) {
sdev_printk(KERN_INFO,
cmd_fusion->scmd->device, "SMID: 0x%x\n",
cmd_fusion->index);
- scsi_print_command(cmd_fusion->scmd);
+ megasas_dump_fusion_io(cmd_fusion->scmd);
}
+ if (cmd_fusion->io_request->Function ==
+ MPI2_FUNCTION_SCSI_IO_REQUEST)
+ fpio_count++;
+
scmd_local->result =
megasas_check_mpio_paths(instance,
scmd_local);
@@ -4653,6 +4896,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
}
}
+ dev_info(&instance->pdev->dev, "Outstanding fastpath IOs: %d\n",
+ fpio_count);
+
atomic_set(&instance->fw_outstanding, 0);
status_reg = instance->instancet->read_fw_status_reg(instance);
@@ -4664,52 +4910,45 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
dev_warn(&instance->pdev->dev, "Reset not supported"
", killing adapter scsi%d.\n",
instance->host->host_no);
- megaraid_sas_kill_hba(instance);
- instance->skip_heartbeat_timer_del = 1;
- retval = FAILED;
- goto out;
+ goto kill_hba;
}
/* Let SR-IOV VF & PF sync up if there was a HB failure */
if (instance->requestorId && !reason) {
msleep(MEGASAS_OCR_SETTLE_TIME_VF);
- goto transition_to_ready;
+ do_adp_reset = false;
+ max_reset_tries = MEGASAS_SRIOV_MAX_RESET_TRIES_VF;
}
/* Now try to reset the chip */
- for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
-
- if (instance->instancet->adp_reset
- (instance, instance->reg_set))
+ for (i = 0; i < max_reset_tries; i++) {
+ /*
+ * Do adp reset and wait for
+ * controller to transition to ready
+ */
+ if (megasas_adp_reset_wait_for_ready(instance,
+ do_adp_reset, 1) == FAILED)
continue;
-transition_to_ready:
+
/* Wait for FW to become ready */
if (megasas_transition_to_ready(instance, 1)) {
dev_warn(&instance->pdev->dev,
"Failed to transition controller to ready for "
"scsi%d.\n", instance->host->host_no);
- if (instance->requestorId && !reason)
- goto fail_kill_adapter;
- else
- continue;
+ continue;
}
megasas_reset_reply_desc(instance);
megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
if (megasas_ioc_init_fusion(instance)) {
- if (instance->requestorId && !reason)
- goto fail_kill_adapter;
- else
- continue;
+ continue;
}
if (megasas_get_ctrl_info(instance)) {
dev_info(&instance->pdev->dev,
"Failed from %s %d\n",
__func__, __LINE__);
- megaraid_sas_kill_hba(instance);
- retval = FAILED;
- goto out;
+ goto kill_hba;
}
megasas_refire_mgmt_cmd(instance);
@@ -4738,7 +4977,7 @@ transition_to_ready:
clear_bit(MEGASAS_FUSION_IN_RESET,
&instance->reset_flags);
instance->instancet->enable_intr(instance);
-
+ megasas_enable_irq_poll(instance);
shost_for_each_device(sdev, shost) {
if ((instance->tgt_prop) &&
(instance->nvme_page_size))
@@ -4750,9 +4989,9 @@ transition_to_ready:
atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
- dev_info(&instance->pdev->dev, "Interrupts are enabled and"
- " controller is OPERATIONAL for scsi:%d\n",
- instance->host->host_no);
+ dev_info(&instance->pdev->dev,
+ "Adapter is OPERATIONAL for scsi:%d\n",
+ instance->host->host_no);
/* Restart SR-IOV heartbeat */
if (instance->requestorId) {
@@ -4786,13 +5025,10 @@ transition_to_ready:
goto out;
}
-fail_kill_adapter:
/* Reset failed, kill the adapter */
dev_warn(&instance->pdev->dev, "Reset failed, killing "
"adapter scsi%d.\n", instance->host->host_no);
- megaraid_sas_kill_hba(instance);
- instance->skip_heartbeat_timer_del = 1;
- retval = FAILED;
+ goto kill_hba;
} else {
/* For VF: Restart HB timer if we didn't OCR */
if (instance->requestorId) {
@@ -4800,8 +5036,15 @@ fail_kill_adapter:
}
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
instance->instancet->enable_intr(instance);
+ megasas_enable_irq_poll(instance);
atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
+ goto out;
}
+kill_hba:
+ megaraid_sas_kill_hba(instance);
+ megasas_enable_irq_poll(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ retval = FAILED;
out:
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
mutex_unlock(&instance->reset_mutex);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 7fa73eaca1a8..c013c80fe4e6 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -75,7 +75,8 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3,
MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4,
MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
- MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7
+ MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7,
+ MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD = 8
};
/*
@@ -88,7 +89,6 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define MEGASAS_FP_CMD_LEN 16
#define MEGASAS_FUSION_IN_RESET 0
-#define THRESHOLD_REPLY_COUNT 50
#define RAID_1_PEER_CMDS 2
#define JBOD_MAPS_COUNT 2
#define MEGASAS_REDUCE_QD_COUNT 64
@@ -140,12 +140,15 @@ struct RAID_CONTEXT_G35 {
u16 timeout_value; /* 0x02 -0x03 */
u16 routing_flags; // 0x04 -0x05 routing flags
u16 virtual_disk_tgt_id; /* 0x06 -0x07 */
- u64 reg_lock_row_lba; /* 0x08 - 0x0F */
+ __le64 reg_lock_row_lba; /* 0x08 - 0x0F */
u32 reg_lock_length; /* 0x10 - 0x13 */
- union {
- u16 next_lmid; /* 0x14 - 0x15 */
- u16 peer_smid; /* used for the raid 1/10 fp writes */
- } smid;
+ union { // flow specific
+ u16 rmw_op_index; /* 0x14 - 0x15, R5/6 RMW: rmw operation index*/
+ u16 peer_smid; /* 0x14 - 0x15, R1 Write: peer smid*/
+ u16 r56_arm_map; /* 0x14 - 0x15, Unused [15], LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
+
+ } flow_specific;
+
u8 ex_status; /* 0x16 : OUT */
u8 status; /* 0x17 status */
u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4],
@@ -236,6 +239,13 @@ union RAID_CONTEXT_UNION {
#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
+/* LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
+#define RAID_CTX_R56_Q_ARM_MASK (0x1F)
+#define RAID_CTX_R56_P_ARM_SHIFT (5)
+#define RAID_CTX_R56_P_ARM_MASK (0x3E0)
+#define RAID_CTX_R56_LOG_ARM_SHIFT (10)
+#define RAID_CTX_R56_LOG_ARM_MASK (0x7C00)
+
/* number of bits per index in U32 TrackStream */
#define BITS_PER_INDEX_STREAM 4
#define INVALID_STREAM_NUM 16
@@ -940,6 +950,7 @@ struct IO_REQUEST_INFO {
u8 pd_after_lb;
u16 r1_alt_dev_handle; /* raid 1/10 only */
bool ra_capable;
+ u8 data_arms;
};
struct MR_LD_TARGET_SYNC {
@@ -1324,7 +1335,8 @@ struct fusion_context {
dma_addr_t ioc_init_request_phys;
struct MPI2_IOC_INIT_REQUEST *ioc_init_request;
struct megasas_cmd *ioc_init_cmd;
-
+ bool pcie_bw_limitation;
+ bool r56_div_offload;
};
union desc_value {
@@ -1349,6 +1361,11 @@ struct MR_SNAPDUMP_PROPERTIES {
u8 reserved[12];
};
+struct megasas_debugfs_buffer {
+ void *buf;
+ u32 len;
+};
+
void megasas_free_cmds_fusion(struct megasas_instance *instance);
int megasas_ioc_init_fusion(struct megasas_instance *instance);
u8 megasas_get_map_info(struct megasas_instance *instance);
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index a2f4a55c51be..167d79d145ca 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -1398,7 +1398,7 @@ typedef struct _MPI2_CONFIG_PAGE_IOC_1 {
U8 PCIBusNum; /*0x0E */
U8 PCIDomainSegment; /*0x0F */
U32 Reserved1; /*0x10 */
- U32 Reserved2; /*0x14 */
+ U32 ProductSpecific; /* 0x14 */
} MPI2_CONFIG_PAGE_IOC_1,
*PTR_MPI2_CONFIG_PAGE_IOC_1,
Mpi2IOCPage1_t, *pMpi2IOCPage1_t;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8aacbd1e7db2..684662888792 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -74,28 +74,28 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
#define MAX_HBA_QUEUE_DEPTH 30000
#define MAX_CHAIN_DEPTH 100000
static int max_queue_depth = -1;
-module_param(max_queue_depth, int, 0);
+module_param(max_queue_depth, int, 0444);
MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
static int max_sgl_entries = -1;
-module_param(max_sgl_entries, int, 0);
+module_param(max_sgl_entries, int, 0444);
MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
static int msix_disable = -1;
-module_param(msix_disable, int, 0);
+module_param(msix_disable, int, 0444);
MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
static int smp_affinity_enable = 1;
-module_param(smp_affinity_enable, int, S_IRUGO);
+module_param(smp_affinity_enable, int, 0444);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
static int max_msix_vectors = -1;
-module_param(max_msix_vectors, int, 0);
+module_param(max_msix_vectors, int, 0444);
MODULE_PARM_DESC(max_msix_vectors,
" max msix vectors");
static int irqpoll_weight = -1;
-module_param(irqpoll_weight, int, 0);
+module_param(irqpoll_weight, int, 0444);
MODULE_PARM_DESC(irqpoll_weight,
"irq poll weight (default= one fourth of HBA queue depth)");
@@ -103,6 +103,26 @@ static int mpt3sas_fwfault_debug;
MODULE_PARM_DESC(mpt3sas_fwfault_debug,
" enable detection of firmware fault and halt firmware - (default=0)");
+static int perf_mode = -1;
+module_param(perf_mode, int, 0444);
+MODULE_PARM_DESC(perf_mode,
+ "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
+ "0 - balanced: high iops mode is enabled &\n\t\t"
+ "interrupt coalescing is enabled only on high iops queues,\n\t\t"
+ "1 - iops: high iops mode is disabled &\n\t\t"
+ "interrupt coalescing is enabled on all queues,\n\t\t"
+ "2 - latency: high iops mode is disabled &\n\t\t"
+ "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
+ "\t\tdefault - default perf_mode is 'balanced'"
+ );
+
+enum mpt3sas_perf_mode {
+ MPT_PERF_MODE_DEFAULT = -1,
+ MPT_PERF_MODE_BALANCED = 0,
+ MPT_PERF_MODE_IOPS = 1,
+ MPT_PERF_MODE_LATENCY = 2,
+};
+
static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
@@ -1282,7 +1302,7 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
ack_request->EventContext = mpi_reply->EventContext;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
out:
@@ -2793,6 +2813,9 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
list_del(&reply_q->list);
+ if (ioc->smp_affinity_enable)
+ irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
+ reply_q->msix_index), NULL);
free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
reply_q);
kfree(reply_q);
@@ -2857,14 +2880,13 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
{
unsigned int cpu, nr_cpus, nr_msix, index = 0;
struct adapter_reply_queue *reply_q;
+ int local_numa_node;
if (!_base_is_controller_msix_enabled(ioc))
return;
- ioc->msix_load_balance = false;
- if (ioc->reply_queue_count < num_online_cpus()) {
- ioc->msix_load_balance = true;
+
+ if (ioc->msix_load_balance)
return;
- }
memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
@@ -2874,14 +2896,33 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
if (!nr_msix)
return;
- if (smp_affinity_enable) {
+ if (ioc->smp_affinity_enable) {
+
+ /*
+ * set irq affinity to local numa node for those irqs
+ * corresponding to high iops queues.
+ */
+ if (ioc->high_iops_queues) {
+ local_numa_node = dev_to_node(&ioc->pdev->dev);
+ for (index = 0; index < ioc->high_iops_queues;
+ index++) {
+ irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
+ index), cpumask_of_node(local_numa_node));
+ }
+ }
+
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
- reply_q->msix_index);
+ const cpumask_t *mask;
+
+ if (reply_q->msix_index < ioc->high_iops_queues)
+ continue;
+
+ mask = pci_irq_get_affinity(ioc->pdev,
+ reply_q->msix_index);
if (!mask) {
ioc_warn(ioc, "no affinity for msi %x\n",
reply_q->msix_index);
- continue;
+ goto fall_back;
}
for_each_cpu_and(cpu, mask, cpu_online_mask) {
@@ -2892,12 +2933,18 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
}
return;
}
+
+fall_back:
cpu = cpumask_first(cpu_online_mask);
+ nr_msix -= ioc->high_iops_queues;
+ index = 0;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
-
unsigned int i, group = nr_cpus / nr_msix;
+ if (reply_q->msix_index < ioc->high_iops_queues)
+ continue;
+
if (cpu >= nr_cpus)
break;
@@ -2913,6 +2960,52 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _base_check_and_enable_high_iops_queues - enable high iops mode
+ * @ ioc - per adapter object
+ * @ hba_msix_vector_count - msix vectors supported by HBA
+ *
+ * Enable high iops queues only if
+ * - HBA is a SEA/AERO controller and
+ * - MSI-Xs vector supported by the HBA is 128 and
+ * - total CPU count in the system >=16 and
+ * - loaded driver with default max_msix_vectors module parameter and
+ * - system booted in non kdump mode
+ *
+ * returns nothing.
+ */
+static void
+_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
+ int hba_msix_vector_count)
+{
+ u16 lnksta, speed;
+
+ if (perf_mode == MPT_PERF_MODE_IOPS ||
+ perf_mode == MPT_PERF_MODE_LATENCY) {
+ ioc->high_iops_queues = 0;
+ return;
+ }
+
+ if (perf_mode == MPT_PERF_MODE_DEFAULT) {
+
+ pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
+ speed = lnksta & PCI_EXP_LNKSTA_CLS;
+
+ if (speed < 0x4) {
+ ioc->high_iops_queues = 0;
+ return;
+ }
+ }
+
+ if (!reset_devices && ioc->is_aero_ioc &&
+ hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
+ num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
+ max_msix_vectors == -1)
+ ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
+ else
+ ioc->high_iops_queues = 0;
+}
+
+/**
* _base_disable_msix - disables msix
* @ioc: per adapter object
*
@@ -2922,11 +3015,38 @@ _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
{
if (!ioc->msix_enable)
return;
- pci_disable_msix(ioc->pdev);
+ pci_free_irq_vectors(ioc->pdev);
ioc->msix_enable = 0;
}
/**
+ * _base_alloc_irq_vectors - allocate msix vectors
+ * @ioc: per adapter object
+ *
+ */
+static int
+_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i, irq_flags = PCI_IRQ_MSIX;
+ struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
+ struct irq_affinity *descp = &desc;
+
+ if (ioc->smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
+ else
+ descp = NULL;
+
+ ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
+ ioc->msix_vector_count);
+
+ i = pci_alloc_irq_vectors_affinity(ioc->pdev,
+ ioc->high_iops_queues,
+ ioc->msix_vector_count, irq_flags, descp);
+
+ return i;
+}
+
+/**
* _base_enable_msix - enables msix, failback to io_apic
* @ioc: per adapter object
*
@@ -2937,7 +3057,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
int r;
int i, local_max_msix_vectors;
u8 try_msix = 0;
- unsigned int irq_flags = PCI_IRQ_MSIX;
+
+ ioc->msix_load_balance = false;
if (msix_disable == -1 || msix_disable == 0)
try_msix = 1;
@@ -2948,12 +3069,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
if (_base_check_enable_msix(ioc) != 0)
goto try_ioapic;
- ioc->reply_queue_count = min_t(int, ioc->cpu_count,
+ ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
+ pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
+ ioc->cpu_count, max_msix_vectors);
+ if (ioc->is_aero_ioc)
+ _base_check_and_enable_high_iops_queues(ioc,
+ ioc->msix_vector_count);
+ ioc->reply_queue_count =
+ min_t(int, ioc->cpu_count + ioc->high_iops_queues,
ioc->msix_vector_count);
- ioc_info(ioc, "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
- ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
-
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
local_max_msix_vectors = (reset_devices) ? 1 : 8;
else
@@ -2965,14 +3090,23 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
else if (local_max_msix_vectors == 0)
goto try_ioapic;
- if (ioc->msix_vector_count < ioc->cpu_count)
- smp_affinity_enable = 0;
+ /*
+ * Enable msix_load_balance only if combined reply queue mode is
+ * disabled on SAS3 & above generation HBA devices.
+ */
+ if (!ioc->combined_reply_queue &&
+ ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ ioc->msix_load_balance = true;
+ }
- if (smp_affinity_enable)
- irq_flags |= PCI_IRQ_AFFINITY;
+ /*
+ * smp affinity setting is not need when msix load balance
+ * is enabled.
+ */
+ if (ioc->msix_load_balance)
+ ioc->smp_affinity_enable = 0;
- r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
- irq_flags);
+ r = _base_alloc_irq_vectors(ioc);
if (r < 0) {
dfailprintk(ioc,
ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
@@ -2991,11 +3125,15 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
}
}
+ ioc_info(ioc, "High IOPs queues : %s\n",
+ ioc->high_iops_queues ? "enabled" : "disabled");
+
return 0;
/* failback to io_apic interrupt routing */
try_ioapic:
-
+ ioc->high_iops_queues = 0;
+ ioc_info(ioc, "High IOPs queues : disabled\n");
ioc->reply_queue_count = 1;
r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
if (r < 0) {
@@ -3265,8 +3403,18 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
}
+/**
+ * _base_get_msix_index - get the msix index
+ * @ioc: per adapter object
+ * @scmd: scsi_cmnd object
+ *
+ * returns msix index of general reply queues,
+ * i.e. reply queue on which IO request's reply
+ * should be posted by the HBA firmware.
+ */
static inline u8
-_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd)
{
/* Enables reply_queue load balancing */
if (ioc->msix_load_balance)
@@ -3278,6 +3426,35 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _base_get_high_iops_msix_index - get the msix index of
+ * high iops queues
+ * @ioc: per adapter object
+ * @scmd: scsi_cmnd object
+ *
+ * Returns: msix index of high iops reply queues.
+ * i.e. high iops reply queue on which IO request's
+ * reply should be posted by the HBA firmware.
+ */
+static inline u8
+_base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd)
+{
+ /**
+ * Round robin the IO interrupts among the high iops
+ * reply queues in terms of batch count 16 when outstanding
+ * IOs on the target device is >=8.
+ */
+ if (atomic_read(&scmd->device->device_busy) >
+ MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
+ return base_mod64((
+ atomic64_add_return(1, &ioc->high_iops_outstanding) /
+ MPT3SAS_HIGH_IOPS_BATCH_COUNT),
+ MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
+
+ return _base_get_msix_index(ioc, scmd);
+}
+
+/**
* mpt3sas_base_get_smid - obtain a free smid from internal queue
* @ioc: per adapter object
* @cb_idx: callback index
@@ -3325,8 +3502,8 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
smid = tag + 1;
request->cb_idx = cb_idx;
- request->msix_io = _base_get_msix_index(ioc);
request->smid = smid;
+ request->scmd = scmd;
INIT_LIST_HEAD(&request->chain_list);
return smid;
}
@@ -3380,6 +3557,7 @@ void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
return;
st->cb_idx = 0xFF;
st->direct_io = 0;
+ st->scmd = NULL;
atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
st->smid = 0;
}
@@ -3479,13 +3657,37 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
#endif
/**
+ * _base_set_and_get_msix_index - get the msix index and assign to msix_io
+ * variable of scsi tracker
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * returns msix index.
+ */
+static u8
+_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct scsiio_tracker *st = NULL;
+
+ if (smid < ioc->hi_priority_smid)
+ st = _get_st_from_smid(ioc, smid);
+
+ if (st == NULL)
+ return _base_get_msix_index(ioc, NULL);
+
+ st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
+ return st->msix_io;
+}
+
+/**
* _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
*/
static void
-_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid, u16 handle)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
@@ -3498,7 +3700,7 @@ _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
ioc->request_sz);
descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
- descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
descriptor.SCSIIO.SMID = cpu_to_le16(smid);
descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
descriptor.SCSIIO.LMID = 0;
@@ -3520,7 +3722,7 @@ _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
- descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
descriptor.SCSIIO.SMID = cpu_to_le16(smid);
descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
descriptor.SCSIIO.LMID = 0;
@@ -3529,13 +3731,13 @@ _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
}
/**
- * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * _base_put_smid_fast_path - send fast path request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
*/
-void
-mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle)
{
Mpi2RequestDescriptorUnion_t descriptor;
@@ -3543,7 +3745,7 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
descriptor.SCSIIO.RequestFlags =
MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
- descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
descriptor.SCSIIO.SMID = cpu_to_le16(smid);
descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
descriptor.SCSIIO.LMID = 0;
@@ -3552,13 +3754,13 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * mpt3sas_base_put_smid_hi_priority - send Task Management request to firmware
+ * _base_put_smid_hi_priority - send Task Management request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
*/
-void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 msix_task)
{
Mpi2RequestDescriptorUnion_t descriptor;
@@ -3607,7 +3809,7 @@ mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
descriptor.Default.RequestFlags =
MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
- descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
descriptor.Default.SMID = cpu_to_le16(smid);
descriptor.Default.LMID = 0;
descriptor.Default.DescriptorTypeDependent = 0;
@@ -3616,12 +3818,12 @@ mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
- * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * _base_put_smid_default - Default, primarily used for config pages
* @ioc: per adapter object
* @smid: system request message index
*/
-void
-mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+static void
+_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
Mpi2RequestDescriptorUnion_t descriptor;
void *mpi_req_iomem;
@@ -3639,7 +3841,7 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
request = (u64 *)&descriptor;
descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
- descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
descriptor.Default.SMID = cpu_to_le16(smid);
descriptor.Default.LMID = 0;
descriptor.Default.DescriptorTypeDependent = 0;
@@ -3653,6 +3855,95 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
+ * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
+ * Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle, unused in this function, for function type match
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_fast_path_atomic - send fast path request to firmware
+ * using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle, unused in this function, for function type match
+ * Return nothing
+ */
+static void
+_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_hi_priority_atomic - send Task Management request to
+ * firmware using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 msix_task)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ descriptor.MSIxIndex = msix_task;
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_default - Default, primarily used for config pages
+ * use Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
* _base_display_OEMs_branding - Display branding string
* @ioc: per adapter object
*/
@@ -3952,7 +4243,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
data_length);
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
/* Wait for 15 seconds */
wait_for_completion_timeout(&ioc->base_cmds.done,
FW_IMG_HDR_READ_TIMEOUT*HZ);
@@ -4192,6 +4483,71 @@ out:
}
/**
+ * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
+ * according to performance mode.
+ * @ioc : per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2IOCPage1_t ioc_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+
+ mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
+ memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
+
+ switch (perf_mode) {
+ case MPT_PERF_MODE_DEFAULT:
+ case MPT_PERF_MODE_BALANCED:
+ if (ioc->high_iops_queues) {
+ ioc_info(ioc,
+ "Enable interrupt coalescing only for first\t"
+ "%d reply queues\n",
+ MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
+ /*
+ * If 31st bit is zero then interrupt coalescing is
+ * enabled for all reply descriptor post queues.
+ * If 31st bit is set to one then user can
+ * enable/disable interrupt coalescing on per reply
+ * descriptor post queue group(8) basis. So to enable
+ * interrupt coalescing only on first reply descriptor
+ * post queue group 31st bit and zero th bit is enabled.
+ */
+ ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
+ ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
+ mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ ioc_info(ioc, "performance mode: balanced\n");
+ return;
+ }
+ /* Fall through */
+ case MPT_PERF_MODE_LATENCY:
+ /*
+ * Enable interrupt coalescing on all reply queues
+ * with timeout value 0xA
+ */
+ ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
+ ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
+ ioc_pg1.ProductSpecific = 0;
+ mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ ioc_info(ioc, "performance mode: latency\n");
+ break;
+ case MPT_PERF_MODE_IOPS:
+ /*
+ * Enable interrupt coalescing on all reply queues.
+ */
+ ioc_info(ioc,
+ "performance mode: iops with coalescing timeout: 0x%x\n",
+ le32_to_cpu(ioc_pg1.CoalescingTimeout));
+ ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
+ ioc_pg1.ProductSpecific = 0;
+ mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ break;
+ }
+}
+
+/**
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
*/
@@ -4258,6 +4614,8 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
if (ioc->iounit_pg8.NumSensors)
ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
+ if (ioc->is_aero_ioc)
+ _base_update_ioc_page1_inlinewith_perf_mode(ioc);
}
/**
@@ -5431,7 +5789,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
ioc->ioc_link_reset_in_progress = 1;
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -5510,7 +5868,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
ioc->base_cmds.smid = smid;
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -5693,6 +6051,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
if ((facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
ioc->rdpq_array_capable = 1;
+ if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
+ && ioc->is_aero_ioc)
+ ioc->atomic_desc_capable = 1;
facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
facts->IOCRequestFrameSize =
le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -5914,7 +6275,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
init_completion(&ioc->port_enable_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
ioc_err(ioc, "%s: timeout\n", __func__);
@@ -5973,7 +6334,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
return 0;
}
@@ -6089,7 +6450,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
mpi_request->EventMasks[i] =
cpu_to_le32(ioc->event_masks[i]);
init_completion(&ioc->base_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
ioc_err(ioc, "%s: timeout\n", __func__);
@@ -6549,6 +6910,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
}
}
+ ioc->smp_affinity_enable = smp_affinity_enable;
+
ioc->rdpq_array_enable_assigned = 0;
ioc->dma_mask = 0;
if (ioc->is_aero_ioc)
@@ -6569,6 +6932,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_sg_scmd = &_base_build_sg_scmd;
ioc->build_sg = &_base_build_sg;
ioc->build_zero_len_sge = &_base_build_zero_len_sge;
+ ioc->get_msix_index_for_smlio = &_base_get_msix_index;
break;
case MPI25_VERSION:
case MPI26_VERSION:
@@ -6583,15 +6947,30 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_nvme_prp = &_base_build_nvme_prp;
ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
-
+ if (ioc->high_iops_queues)
+ ioc->get_msix_index_for_smlio =
+ &_base_get_high_iops_msix_index;
+ else
+ ioc->get_msix_index_for_smlio = &_base_get_msix_index;
break;
}
-
- if (ioc->is_mcpu_endpoint)
- ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io;
- else
- ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
-
+ if (ioc->atomic_desc_capable) {
+ ioc->put_smid_default = &_base_put_smid_default_atomic;
+ ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
+ ioc->put_smid_fast_path =
+ &_base_put_smid_fast_path_atomic;
+ ioc->put_smid_hi_priority =
+ &_base_put_smid_hi_priority_atomic;
+ } else {
+ ioc->put_smid_default = &_base_put_smid_default;
+ ioc->put_smid_fast_path = &_base_put_smid_fast_path;
+ ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
+ if (ioc->is_mcpu_endpoint)
+ ioc->put_smid_scsi_io =
+ &_base_put_smid_mpi_ep_scsi_io;
+ else
+ ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
+ }
/*
* These function pointers for other requests that don't
* the require IEEE scatter gather elements.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 480219f0efc5..6afbdb044310 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "28.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 28
+#define MPT3SAS_DRIVER_VERSION "29.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 29
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -355,6 +355,12 @@ struct mpt3sas_nvme_cmd {
#define VIRTUAL_IO_FAILED_RETRY (0x32010081)
+/* High IOPs definitions */
+#define MPT3SAS_DEVICE_HIGH_IOPS_DEPTH 8
+#define MPT3SAS_HIGH_IOPS_REPLY_QUEUES 8
+#define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16
+#define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128
+
/* OEM Specific Flags will come from OEM specific header files */
struct Mpi2ManufacturingPage10_t {
MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
@@ -824,6 +830,7 @@ struct chain_lookup {
*/
struct scsiio_tracker {
u16 smid;
+ struct scsi_cmnd *scmd;
u8 cb_idx;
u8 direct_io;
struct pcie_sg_list pcie_sg_list;
@@ -924,6 +931,12 @@ typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 funcdep);
typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
typedef u32 (*BASE_READ_REG) (const volatile void __iomem *addr);
+/*
+ * To get high iops reply queue's msix index when high iops mode is enabled
+ * else get the msix index of general reply queues.
+ */
+typedef u8 (*GET_MSIX_INDEX) (struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd);
/* IOC Facts and Port Facts converted from little endian to cpu */
union mpi3_version_union {
@@ -1025,6 +1038,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @cpu_msix_table: table for mapping cpus to msix index
* @cpu_msix_table_sz: table size
* @total_io_cnt: Gives total IO count, used to load balance the interrupts
+ * @high_iops_outstanding: used to load balance the interrupts
+ * within high iops reply queues
* @msix_load_balance: Enables load balancing of interrupts across
* the multiple MSIXs
* @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
@@ -1147,6 +1162,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* path functions resulting in Null pointer reference followed by kernel
* crash. To avoid the above race condition we use mutex syncrhonization
* which ensures the syncrhonization between cli/sysfs_show path.
+ * @atomic_desc_capable: Atomic Request Descriptor support.
+ * @GET_MSIX_INDEX: Get the msix index of high iops queues.
*/
struct MPT3SAS_ADAPTER {
struct list_head list;
@@ -1206,8 +1223,10 @@ struct MPT3SAS_ADAPTER {
MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
u32 non_operational_loop;
atomic64_t total_io_cnt;
+ atomic64_t high_iops_outstanding;
bool msix_load_balance;
u16 thresh_hold;
+ u8 high_iops_queues;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
@@ -1267,6 +1286,7 @@ struct MPT3SAS_ADAPTER {
Mpi2IOUnitPage0_t iounit_pg0;
Mpi2IOUnitPage1_t iounit_pg1;
Mpi2IOUnitPage8_t iounit_pg8;
+ Mpi2IOCPage1_t ioc_pg1_copy;
struct _boot_device req_boot_device;
struct _boot_device req_alt_boot_device;
@@ -1385,6 +1405,7 @@ struct MPT3SAS_ADAPTER {
u8 combined_reply_queue;
u8 combined_reply_index_count;
+ u8 smp_affinity_enable;
/* reply post register index */
resource_size_t **replyPostRegisterIndex;
@@ -1412,6 +1433,7 @@ struct MPT3SAS_ADAPTER {
u8 hide_drives;
spinlock_t diag_trigger_lock;
u8 diag_trigger_active;
+ u8 atomic_desc_capable;
BASE_READ_REG base_readl;
struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
@@ -1422,7 +1444,10 @@ struct MPT3SAS_ADAPTER {
u8 is_gen35_ioc;
u8 is_aero_ioc;
PUT_SMID_IO_FP_HIP put_smid_scsi_io;
-
+ PUT_SMID_IO_FP_HIP put_smid_fast_path;
+ PUT_SMID_IO_FP_HIP put_smid_hi_priority;
+ PUT_SMID_DEFAULT put_smid_default;
+ GET_MSIX_INDEX get_msix_index_for_smlio;
};
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1611,6 +1636,10 @@ int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
int mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
u16 sz);
+int mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOCPage1_t *config_page);
+int mpt3sas_config_set_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOCPage1_t *config_page);
int mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOCPage8_t *config_page);
int mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index fb0a17252f86..14a1a2793dd5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -380,7 +380,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
_config_display_some_debug(ioc, smid, "config_request", NULL);
init_completion(&ioc->config_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
mpt3sas_base_check_cmd_timeout(ioc,
@@ -949,6 +949,77 @@ mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc,
out:
return r;
}
+/**
+ * mpt3sas_config_get_ioc_pg1 - obtain ioc page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_ioc_pg1 - modify ioc page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_ioc_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
/**
* mpt3sas_config_get_sas_device_pg0 - obtain sas device page 0
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index b2bb47c14d35..d4ecfbbe738c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -822,7 +822,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
ioc->put_smid_scsi_io(ioc, smid, device_handle);
else
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -859,7 +859,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
tm_request->DevHandle));
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
break;
}
case MPI2_FUNCTION_SMP_PASSTHROUGH:
@@ -890,7 +890,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SATA_PASSTHROUGH:
@@ -905,7 +905,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_FW_DOWNLOAD:
@@ -913,7 +913,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
{
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_TOOLBOX:
@@ -928,7 +928,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
}
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
@@ -948,7 +948,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
default:
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
break;
}
@@ -1576,7 +1576,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
cpu_to_le32(ioc->product_specific[buffer_type][i]);
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -1903,7 +1903,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
mpi_request->VP_ID = 0;
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -2151,7 +2151,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
mpi_request->VP_ID = 0;
init_completion(&ioc->ctl_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -2319,6 +2319,10 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
break;
}
+ if (karg.hdr.ioc_number != ioctl_header.ioc_number) {
+ ret = -EINVAL;
+ break;
+ }
if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
uarg = arg;
ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
@@ -2453,7 +2457,7 @@ _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
/* scsi host attributes */
/**
- * _ctl_version_fw_show - firmware version
+ * version_fw_show - firmware version
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2461,7 +2465,7 @@ _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
+version_fw_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2473,10 +2477,10 @@ _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
(ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
ioc->facts.FWVersion.Word & 0x000000FF);
}
-static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
+static DEVICE_ATTR_RO(version_fw);
/**
- * _ctl_version_bios_show - bios version
+ * version_bios_show - bios version
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2484,7 +2488,7 @@ static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
+version_bios_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2498,10 +2502,10 @@ _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
(version & 0x0000FF00) >> 8,
version & 0x000000FF);
}
-static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
+static DEVICE_ATTR_RO(version_bios);
/**
- * _ctl_version_mpi_show - MPI (message passing interface) version
+ * version_mpi_show - MPI (message passing interface) version
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2509,7 +2513,7 @@ static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
+version_mpi_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2518,10 +2522,10 @@ _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
}
-static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
+static DEVICE_ATTR_RO(version_mpi);
/**
- * _ctl_version_product_show - product name
+ * version_product_show - product name
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2529,7 +2533,7 @@ static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
+version_product_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2537,10 +2541,10 @@ _ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
}
-static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
+static DEVICE_ATTR_RO(version_product);
/**
- * _ctl_version_nvdata_persistent_show - ndvata persistent version
+ * version_nvdata_persistent_show - ndvata persistent version
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2548,7 +2552,7 @@ static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_version_nvdata_persistent_show(struct device *cdev,
+version_nvdata_persistent_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2557,11 +2561,10 @@ _ctl_version_nvdata_persistent_show(struct device *cdev,
return snprintf(buf, PAGE_SIZE, "%08xh\n",
le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
}
-static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
- _ctl_version_nvdata_persistent_show, NULL);
+static DEVICE_ATTR_RO(version_nvdata_persistent);
/**
- * _ctl_version_nvdata_default_show - nvdata default version
+ * version_nvdata_default_show - nvdata default version
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2569,7 +2572,7 @@ static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
+version_nvdata_default_show(struct device *cdev, struct device_attribute
*attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2578,11 +2581,10 @@ _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%08xh\n",
le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
}
-static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
- _ctl_version_nvdata_default_show, NULL);
+static DEVICE_ATTR_RO(version_nvdata_default);
/**
- * _ctl_board_name_show - board name
+ * board_name_show - board name
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2590,7 +2592,7 @@ static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
+board_name_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2598,10 +2600,10 @@ _ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
}
-static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
+static DEVICE_ATTR_RO(board_name);
/**
- * _ctl_board_assembly_show - board assembly name
+ * board_assembly_show - board assembly name
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2609,7 +2611,7 @@ static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
+board_assembly_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2617,10 +2619,10 @@ _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
}
-static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
+static DEVICE_ATTR_RO(board_assembly);
/**
- * _ctl_board_tracer_show - board tracer number
+ * board_tracer_show - board tracer number
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2628,7 +2630,7 @@ static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
+board_tracer_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2636,10 +2638,10 @@ _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
}
-static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
+static DEVICE_ATTR_RO(board_tracer);
/**
- * _ctl_io_delay_show - io missing delay
+ * io_delay_show - io missing delay
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2650,7 +2652,7 @@ static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
+io_delay_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2658,10 +2660,10 @@ _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
}
-static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
+static DEVICE_ATTR_RO(io_delay);
/**
- * _ctl_device_delay_show - device missing delay
+ * device_delay_show - device missing delay
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2672,7 +2674,7 @@ static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
+device_delay_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2680,10 +2682,10 @@ _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
}
-static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
+static DEVICE_ATTR_RO(device_delay);
/**
- * _ctl_fw_queue_depth_show - global credits
+ * fw_queue_depth_show - global credits
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2693,7 +2695,7 @@ static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
+fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2701,10 +2703,10 @@ _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
}
-static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
+static DEVICE_ATTR_RO(fw_queue_depth);
/**
- * _ctl_sas_address_show - sas address
+ * sas_address_show - sas address
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2714,7 +2716,7 @@ static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
+host_sas_address_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
@@ -2724,11 +2726,10 @@ _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
(unsigned long long)ioc->sas_hba.sas_address);
}
-static DEVICE_ATTR(host_sas_address, S_IRUGO,
- _ctl_host_sas_address_show, NULL);
+static DEVICE_ATTR_RO(host_sas_address);
/**
- * _ctl_logging_level_show - logging level
+ * logging_level_show - logging level
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2736,7 +2737,7 @@ static DEVICE_ATTR(host_sas_address, S_IRUGO,
* A sysfs 'read/write' shost attribute.
*/
static ssize_t
-_ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
+logging_level_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2745,7 +2746,7 @@ _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
}
static ssize_t
-_ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
+logging_level_store(struct device *cdev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2760,11 +2761,10 @@ _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
ioc->logging_level);
return strlen(buf);
}
-static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
- _ctl_logging_level_store);
+static DEVICE_ATTR_RW(logging_level);
/**
- * _ctl_fwfault_debug_show - show/store fwfault_debug
+ * fwfault_debug_show - show/store fwfault_debug
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2773,7 +2773,7 @@ static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
* A sysfs 'read/write' shost attribute.
*/
static ssize_t
-_ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
+fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2782,7 +2782,7 @@ _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
}
static ssize_t
-_ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
+fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2797,11 +2797,10 @@ _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
ioc->fwfault_debug);
return strlen(buf);
}
-static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
- _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
+static DEVICE_ATTR_RW(fwfault_debug);
/**
- * _ctl_ioc_reset_count_show - ioc reset count
+ * ioc_reset_count_show - ioc reset count
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2811,7 +2810,7 @@ static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
+ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2819,10 +2818,10 @@ _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
}
-static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
+static DEVICE_ATTR_RO(ioc_reset_count);
/**
- * _ctl_ioc_reply_queue_count_show - number of reply queues
+ * reply_queue_count_show - number of reply queues
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2832,7 +2831,7 @@ static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_ioc_reply_queue_count_show(struct device *cdev,
+reply_queue_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
u8 reply_queue_count;
@@ -2847,11 +2846,10 @@ _ctl_ioc_reply_queue_count_show(struct device *cdev,
return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
}
-static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
- NULL);
+static DEVICE_ATTR_RO(reply_queue_count);
/**
- * _ctl_BRM_status_show - Backup Rail Monitor Status
+ * BRM_status_show - Backup Rail Monitor Status
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2861,7 +2859,7 @@ static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
+BRM_status_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2923,7 +2921,7 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
mutex_unlock(&ioc->pci_access_mutex);
return rc;
}
-static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
+static DEVICE_ATTR_RO(BRM_status);
struct DIAG_BUFFER_START {
__le32 Size;
@@ -2936,7 +2934,7 @@ struct DIAG_BUFFER_START {
};
/**
- * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
+ * host_trace_buffer_size_show - host buffer size (trace only)
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2944,7 +2942,7 @@ struct DIAG_BUFFER_START {
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_host_trace_buffer_size_show(struct device *cdev,
+host_trace_buffer_size_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2976,11 +2974,10 @@ _ctl_host_trace_buffer_size_show(struct device *cdev,
ioc->ring_buffer_sz = size;
return snprintf(buf, PAGE_SIZE, "%d\n", size);
}
-static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
- _ctl_host_trace_buffer_size_show, NULL);
+static DEVICE_ATTR_RO(host_trace_buffer_size);
/**
- * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
+ * host_trace_buffer_show - firmware ring buffer (trace only)
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -2992,7 +2989,7 @@ static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
* offset to the same attribute, it will move the pointer.
*/
static ssize_t
-_ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
+host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3024,7 +3021,7 @@ _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
}
static ssize_t
-_ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
+host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3037,14 +3034,13 @@ _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
ioc->ring_buffer_offset = val;
return strlen(buf);
}
-static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
- _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
+static DEVICE_ATTR_RW(host_trace_buffer);
/*****************************************/
/**
- * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
+ * host_trace_buffer_enable_show - firmware ring buffer (trace only)
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3054,7 +3050,7 @@ static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
* This is a mechnism to post/release host_trace_buffers
*/
static ssize_t
-_ctl_host_trace_buffer_enable_show(struct device *cdev,
+host_trace_buffer_enable_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3072,7 +3068,7 @@ _ctl_host_trace_buffer_enable_show(struct device *cdev,
}
static ssize_t
-_ctl_host_trace_buffer_enable_store(struct device *cdev,
+host_trace_buffer_enable_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3122,14 +3118,12 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
out:
return strlen(buf);
}
-static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
- _ctl_host_trace_buffer_enable_show,
- _ctl_host_trace_buffer_enable_store);
+static DEVICE_ATTR_RW(host_trace_buffer_enable);
/*********** diagnostic trigger suppport *********************************/
/**
- * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
+ * diag_trigger_master_show - show the diag_trigger_master attribute
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3137,7 +3131,7 @@ static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
* A sysfs 'read/write' shost attribute.
*/
static ssize_t
-_ctl_diag_trigger_master_show(struct device *cdev,
+diag_trigger_master_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -3154,7 +3148,7 @@ _ctl_diag_trigger_master_show(struct device *cdev,
}
/**
- * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
+ * diag_trigger_master_store - store the diag_trigger_master attribute
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3163,7 +3157,7 @@ _ctl_diag_trigger_master_show(struct device *cdev,
* A sysfs 'read/write' shost attribute.
*/
static ssize_t
-_ctl_diag_trigger_master_store(struct device *cdev,
+diag_trigger_master_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -3182,12 +3176,11 @@ _ctl_diag_trigger_master_store(struct device *cdev,
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
return rc;
}
-static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
- _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store);
+static DEVICE_ATTR_RW(diag_trigger_master);
/**
- * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
+ * diag_trigger_event_show - show the diag_trigger_event attribute
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3195,7 +3188,7 @@ static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
* A sysfs 'read/write' shost attribute.
*/
static ssize_t
-_ctl_diag_trigger_event_show(struct device *cdev,
+diag_trigger_event_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3211,7 +3204,7 @@ _ctl_diag_trigger_event_show(struct device *cdev,
}
/**
- * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
+ * diag_trigger_event_store - store the diag_trigger_event attribute
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3220,7 +3213,7 @@ _ctl_diag_trigger_event_show(struct device *cdev,
* A sysfs 'read/write' shost attribute.
*/
static ssize_t
-_ctl_diag_trigger_event_store(struct device *cdev,
+diag_trigger_event_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -3239,12 +3232,11 @@ _ctl_diag_trigger_event_store(struct device *cdev,
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
return sz;
}
-static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
- _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store);
+static DEVICE_ATTR_RW(diag_trigger_event);
/**
- * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
+ * diag_trigger_scsi_show - show the diag_trigger_scsi attribute
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3252,7 +3244,7 @@ static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
* A sysfs 'read/write' shost attribute.
*/
static ssize_t
-_ctl_diag_trigger_scsi_show(struct device *cdev,
+diag_trigger_scsi_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3268,7 +3260,7 @@ _ctl_diag_trigger_scsi_show(struct device *cdev,
}
/**
- * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
+ * diag_trigger_scsi_store - store the diag_trigger_scsi attribute
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3277,7 +3269,7 @@ _ctl_diag_trigger_scsi_show(struct device *cdev,
* A sysfs 'read/write' shost attribute.
*/
static ssize_t
-_ctl_diag_trigger_scsi_store(struct device *cdev,
+diag_trigger_scsi_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3295,12 +3287,11 @@ _ctl_diag_trigger_scsi_store(struct device *cdev,
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
return sz;
}
-static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
- _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store);
+static DEVICE_ATTR_RW(diag_trigger_scsi);
/**
- * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
+ * diag_trigger_scsi_show - show the diag_trigger_mpi attribute
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3308,7 +3299,7 @@ static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
* A sysfs 'read/write' shost attribute.
*/
static ssize_t
-_ctl_diag_trigger_mpi_show(struct device *cdev,
+diag_trigger_mpi_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3324,7 +3315,7 @@ _ctl_diag_trigger_mpi_show(struct device *cdev,
}
/**
- * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
+ * diag_trigger_mpi_store - store the diag_trigger_mpi attribute
* @cdev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3333,7 +3324,7 @@ _ctl_diag_trigger_mpi_show(struct device *cdev,
* A sysfs 'read/write' shost attribute.
*/
static ssize_t
-_ctl_diag_trigger_mpi_store(struct device *cdev,
+diag_trigger_mpi_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3352,8 +3343,7 @@ _ctl_diag_trigger_mpi_store(struct device *cdev,
return sz;
}
-static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
- _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store);
+static DEVICE_ATTR_RW(diag_trigger_mpi);
/*********** diagnostic trigger suppport *** END ****************************/
@@ -3391,7 +3381,7 @@ struct device_attribute *mpt3sas_host_attrs[] = {
/* device attributes */
/**
- * _ctl_device_sas_address_show - sas address
+ * sas_address_show - sas address
* @dev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3401,7 +3391,7 @@ struct device_attribute *mpt3sas_host_attrs[] = {
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
+sas_address_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
@@ -3410,10 +3400,10 @@ _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
(unsigned long long)sas_device_priv_data->sas_target->sas_address);
}
-static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
+static DEVICE_ATTR_RO(sas_address);
/**
- * _ctl_device_handle_show - device handle
+ * sas_device_handle_show - device handle
* @dev: pointer to embedded class device
* @attr: ?
* @buf: the buffer returned
@@ -3423,7 +3413,7 @@ static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
* A sysfs 'read-only' shost attribute.
*/
static ssize_t
-_ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
+sas_device_handle_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
@@ -3432,10 +3422,10 @@ _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "0x%04x\n",
sas_device_priv_data->sas_target->handle);
}
-static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
+static DEVICE_ATTR_RO(sas_device_handle);
/**
- * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
+ * sas_ncq_io_prio_show - send prioritized io commands to device
* @dev: pointer to embedded device
* @attr: ?
* @buf: the buffer returned
@@ -3443,7 +3433,7 @@ static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
* A sysfs 'read/write' sdev attribute, only works with SATA
*/
static ssize_t
-_ctl_device_ncq_prio_enable_show(struct device *dev,
+sas_ncq_prio_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
@@ -3454,7 +3444,7 @@ _ctl_device_ncq_prio_enable_show(struct device *dev,
}
static ssize_t
-_ctl_device_ncq_prio_enable_store(struct device *dev,
+sas_ncq_prio_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3471,9 +3461,7 @@ _ctl_device_ncq_prio_enable_store(struct device *dev,
sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
return strlen(buf);
}
-static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
- _ctl_device_ncq_prio_enable_show,
- _ctl_device_ncq_prio_enable_store);
+static DEVICE_ATTR_RW(sas_ncq_prio_enable);
struct device_attribute *mpt3sas_dev_attrs[] = {
&dev_attr_sas_address,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 1ccfbc7eebe0..27c731a3fb49 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -113,22 +113,22 @@ MODULE_PARM_DESC(logging_level,
static ushort max_sectors = 0xFFFF;
-module_param(max_sectors, ushort, 0);
+module_param(max_sectors, ushort, 0444);
MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
static int missing_delay[2] = {-1, -1};
-module_param_array(missing_delay, int, NULL, 0);
+module_param_array(missing_delay, int, NULL, 0444);
MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
#define MPT3SAS_MAX_LUN (16895)
static u64 max_lun = MPT3SAS_MAX_LUN;
-module_param(max_lun, ullong, 0);
+module_param(max_lun, ullong, 0444);
MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
static ushort hbas_to_enumerate;
-module_param(hbas_to_enumerate, ushort, 0);
+module_param(hbas_to_enumerate, ushort, 0444);
MODULE_PARM_DESC(hbas_to_enumerate,
" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
1 - enumerates only SAS 2.0 generation HBAs\n \
@@ -142,17 +142,17 @@ MODULE_PARM_DESC(hbas_to_enumerate,
* Either bit can be set, or both
*/
static int diag_buffer_enable = -1;
-module_param(diag_buffer_enable, int, 0);
+module_param(diag_buffer_enable, int, 0444);
MODULE_PARM_DESC(diag_buffer_enable,
" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
static int disable_discovery = -1;
-module_param(disable_discovery, int, 0);
+module_param(disable_discovery, int, 0444);
MODULE_PARM_DESC(disable_discovery, " disable discovery ");
/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
static int prot_mask = -1;
-module_param(prot_mask, int, 0);
+module_param(prot_mask, int, 0444);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
@@ -2685,7 +2685,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
+ ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
if (mpt3sas_base_check_cmd_timeout(ioc,
@@ -3659,7 +3659,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
mpi_request->MsgFlags = tr_method;
set_bit(handle, ioc->device_remove_in_progress);
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
out:
@@ -3755,7 +3755,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = mpi_request_tm->DevHandle;
- mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+ ioc->put_smid_default(ioc, smid_sas_ctrl);
return _scsih_check_for_pending_tm(ioc, smid);
}
@@ -3881,7 +3881,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
}
/**
@@ -3970,7 +3970,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
ack_request->EventContext = event_context;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
}
/**
@@ -4026,7 +4026,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = cpu_to_le16(handle);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
}
/**
@@ -4734,12 +4734,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
MPI25_SCSIIO_IOFLAGS_FAST_PATH);
- mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+ ioc->put_smid_fast_path(ioc, smid, handle);
} else
ioc->put_smid_scsi_io(ioc, smid,
le16_to_cpu(mpi_request->DevHandle));
} else
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
return 0;
out:
@@ -5210,6 +5210,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
((ioc_status & MPI2_IOCSTATUS_MASK)
!= MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
st->direct_io = 0;
+ st->scmd = scmd;
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
@@ -7601,7 +7602,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
handle, phys_disk_num));
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -9633,7 +9634,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->hide_ir_msg)
ioc_info(ioc, "IR shutdown (sending)\n");
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -9670,6 +9671,7 @@ static void scsih_remove(struct pci_dev *pdev)
struct _pcie_device *pcie_device, *pcienext;
struct workqueue_struct *wq;
unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
ioc->remove_host = 1;
@@ -9684,7 +9686,13 @@ static void scsih_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
if (wq)
destroy_workqueue(wq);
-
+ /*
+ * Copy back the unmodified ioc page1. so that on next driver load,
+ * current modified changes on ioc page1 won't take effect.
+ */
+ if (ioc->is_aero_ioc)
+ mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
+ &ioc->ioc_pg1_copy);
/* release all the volumes */
_scsih_ir_shutdown(ioc);
sas_remove_host(shost);
@@ -9747,6 +9755,7 @@ scsih_shutdown(struct pci_dev *pdev)
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
struct workqueue_struct *wq;
unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
ioc->remove_host = 1;
@@ -9761,6 +9770,13 @@ scsih_shutdown(struct pci_dev *pdev)
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
if (wq)
destroy_workqueue(wq);
+ /*
+ * Copy back the unmodified ioc page1 so that on next driver load,
+ * current modified changes on ioc page1 won't take effect.
+ */
+ if (ioc->is_aero_ioc)
+ mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
+ &ioc->ioc_pg1_copy);
_scsih_ir_shutdown(ioc);
mpt3sas_base_detach(ioc);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 60ae2d0feb2b..5324662751bf 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -367,7 +367,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
ioc_info(ioc, "report_manufacture - send to sas_addr(0x%016llx)\n",
(u64)sas_address));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1139,7 +1139,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
(u64)phy->identify.sas_address,
phy->number));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1434,7 +1434,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
(u64)phy->identify.sas_address,
phy->number, phy_operation));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1911,7 +1911,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
ioc_info(ioc, "%s: sending smp request\n", __func__));
init_completion(&ioc->transport_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 6dcae0e50018..3e0b8ebe257f 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1193,7 +1193,7 @@ static int mvs_dev_found_notify(struct domain_device *dev, int lock)
mvi_device->dev_type = dev->dev_type;
mvi_device->mvi_info = mvi;
mvi_device->sas_device = dev;
- if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+ if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
int phy_id;
u8 phy_num = parent_dev->ex_dev.num_phys;
struct ex_phy *phy;
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index b7d7ec435487..519edc796691 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -50,9 +50,6 @@ extern struct mvs_info *tgt_mvi;
extern const struct mvs_dispatch mvs_64xx_dispatch;
extern const struct mvs_dispatch mvs_94xx_dispatch;
-#define DEV_IS_EXPANDER(type) \
- ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
-
#define bit(n) ((u64)1 << n)
#define for_each_phy(__lseq_mask, __mc, __lseq) \
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 1fb6f6ca627e..8906aceda4c4 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -195,23 +195,22 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
unsigned int sgnum = scsi_sg_count(scmd);
dma_addr_t busaddr;
- sg = scsi_sglist(scmd);
- *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
+ *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
scmd->sc_data_direction);
if (*sg_count > mhba->max_sge) {
dev_err(&mhba->pdev->dev,
"sg count[0x%x] is bigger than max sg[0x%x].\n",
*sg_count, mhba->max_sge);
- dma_unmap_sg(&mhba->pdev->dev, sg, sgnum,
+ dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
scmd->sc_data_direction);
return -1;
}
- for (i = 0; i < *sg_count; i++) {
- busaddr = sg_dma_address(&sg[i]);
+ scsi_for_each_sg(scmd, sg, *sg_count, i) {
+ busaddr = sg_dma_address(sg);
m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
m_sg->flags = 0;
- sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
+ sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg)));
if ((i + 1) == *sg_count)
m_sg->flags |= 1U << mhba->eot_flag;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
deleted file mode 100644
index 815bb4097c1b..000000000000
--- a/drivers/scsi/osst.c
+++ /dev/null
@@ -1,6108 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying
- file Documentation/scsi/st.txt for more information.
-
- History:
-
- OnStream SCSI Tape support (osst) cloned from st.c by
- Willem Riede (osst@riede.org) Feb 2000
- Fixes ... Kurt Garloff <garloff@suse.de> Mar 2000
-
- Rewritten from Dwayne Forsyth's SCSI tape driver by Kai Makisara.
- Contribution and ideas from several people including (in alphabetical
- order) Klaus Ehrenfried, Wolfgang Denk, Steve Hirsch, Andreas Koppenh"ofer,
- Michael Leodolter, Eyal Lebedinsky, J"org Weule, and Eric Youngdale.
-
- Copyright 1992 - 2002 Kai Makisara / 2000 - 2006 Willem Riede
- email osst@riede.org
-
- $Header: /cvsroot/osst/Driver/osst.c,v 1.73 2005/01/01 21:13:34 wriede Exp $
-
- Microscopic alterations - Rik Ling, 2000/12/21
- Last st.c sync: Tue Oct 15 22:01:04 2002 by makisara
- Some small formal changes - aeb, 950809
-*/
-
-static const char * cvsid = "$Id: osst.c,v 1.73 2005/01/01 21:13:34 wriede Exp $";
-static const char * osst_version = "0.99.4";
-
-/* The "failure to reconnect" firmware bug */
-#define OSST_FW_NEED_POLL_MIN 10601 /*(107A)*/
-#define OSST_FW_NEED_POLL_MAX 10704 /*(108D)*/
-#define OSST_FW_NEED_POLL(x,d) ((x) >= OSST_FW_NEED_POLL_MIN && (x) <= OSST_FW_NEED_POLL_MAX && d->host->this_id != 7)
-
-#include <linux/module.h>
-
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/proc_fs.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/mtio.h>
-#include <linux/ioctl.h>
-#include <linux/fcntl.h>
-#include <linux/spinlock.h>
-#include <linux/vmalloc.h>
-#include <linux/blkdev.h>
-#include <linux/moduleparam.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-#include <asm/dma.h>
-
-/* The driver prints some debugging information on the console if DEBUG
- is defined and non-zero. */
-#define DEBUG 0
-
-/* The message level for the debug messages is currently set to KERN_NOTICE
- so that people can easily see the messages. Later when the debugging messages
- in the drivers are more widely classified, this may be changed to KERN_DEBUG. */
-#define OSST_DEB_MSG KERN_NOTICE
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_driver.h>
-#include <scsi/scsi_eh.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_ioctl.h>
-
-#define ST_KILOBYTE 1024
-
-#include "st.h"
-#include "osst.h"
-#include "osst_options.h"
-#include "osst_detect.h"
-
-static DEFINE_MUTEX(osst_int_mutex);
-static int max_dev = 0;
-static int write_threshold_kbs = 0;
-static int max_sg_segs = 0;
-
-#ifdef MODULE
-MODULE_AUTHOR("Willem Riede");
-MODULE_DESCRIPTION("OnStream {DI-|FW-|SC-|USB}{30|50} Tape Driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV_MAJOR(OSST_MAJOR);
-MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE);
-
-module_param(max_dev, int, 0444);
-MODULE_PARM_DESC(max_dev, "Maximum number of OnStream Tape Drives to attach (4)");
-
-module_param(write_threshold_kbs, int, 0644);
-MODULE_PARM_DESC(write_threshold_kbs, "Asynchronous write threshold (KB; 32)");
-
-module_param(max_sg_segs, int, 0644);
-MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (9)");
-#else
-static struct osst_dev_parm {
- char *name;
- int *val;
-} parms[] __initdata = {
- { "max_dev", &max_dev },
- { "write_threshold_kbs", &write_threshold_kbs },
- { "max_sg_segs", &max_sg_segs }
-};
-#endif
-
-/* Some default definitions have been moved to osst_options.h */
-#define OSST_BUFFER_SIZE (OSST_BUFFER_BLOCKS * ST_KILOBYTE)
-#define OSST_WRITE_THRESHOLD (OSST_WRITE_THRESHOLD_BLOCKS * ST_KILOBYTE)
-
-/* The buffer size should fit into the 24 bits for length in the
- 6-byte SCSI read and write commands. */
-#if OSST_BUFFER_SIZE >= (2 << 24 - 1)
-#error "Buffer size should not exceed (2 << 24 - 1) bytes!"
-#endif
-
-#if DEBUG
-static int debugging = 1;
-/* uncomment define below to test error recovery */
-// #define OSST_INJECT_ERRORS 1
-#endif
-
-/* Do not retry! The drive firmware already retries when appropriate,
- and when it tries to tell us something, we had better listen... */
-#define MAX_RETRIES 0
-
-#define NO_TAPE NOT_READY
-
-#define OSST_WAIT_POSITION_COMPLETE (HZ > 200 ? HZ / 200 : 1)
-#define OSST_WAIT_WRITE_COMPLETE (HZ / 12)
-#define OSST_WAIT_LONG_WRITE_COMPLETE (HZ / 2)
-
-#define OSST_TIMEOUT (200 * HZ)
-#define OSST_LONG_TIMEOUT (1800 * HZ)
-
-#define TAPE_NR(x) (iminor(x) & ((1 << ST_MODE_SHIFT)-1))
-#define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT)
-#define TAPE_REWIND(x) ((iminor(x) & 0x80) == 0)
-#define TAPE_IS_RAW(x) (TAPE_MODE(x) & (ST_NBR_MODES >> 1))
-
-/* Internal ioctl to set both density (uppermost 8 bits) and blocksize (lower
- 24 bits) */
-#define SET_DENS_AND_BLK 0x10001
-
-static int osst_buffer_size = OSST_BUFFER_SIZE;
-static int osst_write_threshold = OSST_WRITE_THRESHOLD;
-static int osst_max_sg_segs = OSST_MAX_SG;
-static int osst_max_dev = OSST_MAX_TAPES;
-static int osst_nr_dev;
-
-static struct osst_tape **os_scsi_tapes = NULL;
-static DEFINE_RWLOCK(os_scsi_tapes_lock);
-
-static int modes_defined = 0;
-
-static struct osst_buffer *new_tape_buffer(int, int, int);
-static int enlarge_buffer(struct osst_buffer *, int);
-static void normalize_buffer(struct osst_buffer *);
-static int append_to_buffer(const char __user *, struct osst_buffer *, int);
-static int from_buffer(struct osst_buffer *, char __user *, int);
-static int osst_zero_buffer_tail(struct osst_buffer *);
-static int osst_copy_to_buffer(struct osst_buffer *, unsigned char *);
-static int osst_copy_from_buffer(struct osst_buffer *, unsigned char *);
-
-static int osst_probe(struct device *);
-static int osst_remove(struct device *);
-
-static struct scsi_driver osst_template = {
- .gendrv = {
- .name = "osst",
- .owner = THIS_MODULE,
- .probe = osst_probe,
- .remove = osst_remove,
- }
-};
-
-static int osst_int_ioctl(struct osst_tape *STp, struct osst_request ** aSRpnt,
- unsigned int cmd_in, unsigned long arg);
-
-static int osst_set_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt, int frame, int skip);
-
-static int osst_get_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt);
-
-static int osst_flush_write_buffer(struct osst_tape *STp, struct osst_request ** aSRpnt);
-
-static int osst_write_error_recovery(struct osst_tape * STp, struct osst_request ** aSRpnt, int pending);
-
-static inline char *tape_name(struct osst_tape *tape)
-{
- return tape->drive->disk_name;
-}
-
-/* Routines that handle the interaction with mid-layer SCSI routines */
-
-
-/* Normalize Sense */
-static void osst_analyze_sense(struct osst_request *SRpnt, struct st_cmdstatus *s)
-{
- const u8 *ucp;
- const u8 *sense = SRpnt->sense;
-
- s->have_sense = scsi_normalize_sense(SRpnt->sense,
- SCSI_SENSE_BUFFERSIZE, &s->sense_hdr);
- s->flags = 0;
-
- if (s->have_sense) {
- s->deferred = 0;
- s->remainder_valid =
- scsi_get_sense_info_fld(sense, SCSI_SENSE_BUFFERSIZE, &s->uremainder64);
- switch (sense[0] & 0x7f) {
- case 0x71:
- s->deferred = 1;
- /* fall through */
- case 0x70:
- s->fixed_format = 1;
- s->flags = sense[2] & 0xe0;
- break;
- case 0x73:
- s->deferred = 1;
- /* fall through */
- case 0x72:
- s->fixed_format = 0;
- ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4);
- s->flags = ucp ? (ucp[3] & 0xe0) : 0;
- break;
- }
- }
-}
-
-/* Convert the result to success code */
-static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
-{
- char *name = tape_name(STp);
- int result = SRpnt->result;
- u8 * sense = SRpnt->sense, scode;
-#if DEBUG
- const char *stp;
-#endif
- struct st_cmdstatus *cmdstatp;
-
- if (!result)
- return 0;
-
- cmdstatp = &STp->buffer->cmdstat;
- osst_analyze_sense(SRpnt, cmdstatp);
-
- if (cmdstatp->have_sense)
- scode = STp->buffer->cmdstat.sense_hdr.sense_key;
- else
- scode = 0;
-#if DEBUG
- if (debugging) {
- printk(OSST_DEB_MSG "%s:D: Error: %x, cmd: %x %x %x %x %x %x\n",
- name, result,
- SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
- SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
- if (scode) printk(OSST_DEB_MSG "%s:D: Sense: %02x, ASC: %02x, ASCQ: %02x\n",
- name, scode, sense[12], sense[13]);
- if (cmdstatp->have_sense)
- __scsi_print_sense(STp->device, name,
- SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
- }
- else
-#endif
- if (cmdstatp->have_sense && (
- scode != NO_SENSE &&
- scode != RECOVERED_ERROR &&
-/* scode != UNIT_ATTENTION && */
- scode != BLANK_CHECK &&
- scode != VOLUME_OVERFLOW &&
- SRpnt->cmd[0] != MODE_SENSE &&
- SRpnt->cmd[0] != TEST_UNIT_READY)) { /* Abnormal conditions for tape */
- if (cmdstatp->have_sense) {
- printk(KERN_WARNING "%s:W: Command with sense data:\n", name);
- __scsi_print_sense(STp->device, name,
- SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
- }
- else {
- static int notyetprinted = 1;
-
- printk(KERN_WARNING
- "%s:W: Warning %x (driver bt 0x%x, host bt 0x%x).\n",
- name, result, driver_byte(result),
- host_byte(result));
- if (notyetprinted) {
- notyetprinted = 0;
- printk(KERN_INFO
- "%s:I: This warning may be caused by your scsi controller,\n", name);
- printk(KERN_INFO
- "%s:I: it has been reported with some Buslogic cards.\n", name);
- }
- }
- }
- STp->pos_unknown |= STp->device->was_reset;
-
- if (cmdstatp->have_sense && scode == RECOVERED_ERROR) {
- STp->recover_count++;
- STp->recover_erreg++;
-#if DEBUG
- if (debugging) {
- if (SRpnt->cmd[0] == READ_6)
- stp = "read";
- else if (SRpnt->cmd[0] == WRITE_6)
- stp = "write";
- else
- stp = "ioctl";
- printk(OSST_DEB_MSG "%s:D: Recovered %s error (%d).\n", name, stp,
- STp->recover_count);
- }
-#endif
- if ((sense[2] & 0xe0) == 0)
- return 0;
- }
- return (-EIO);
-}
-
-
-/* Wakeup from interrupt */
-static void osst_end_async(struct request *req, blk_status_t status)
-{
- struct scsi_request *rq = scsi_req(req);
- struct osst_request *SRpnt = req->end_io_data;
- struct osst_tape *STp = SRpnt->stp;
- struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
-
- STp->buffer->cmdstat.midlevel_result = SRpnt->result = rq->result;
-#if DEBUG
- STp->write_pending = 0;
-#endif
- if (rq->sense_len)
- memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
- if (SRpnt->waiting)
- complete(SRpnt->waiting);
-
- if (SRpnt->bio) {
- kfree(mdata->pages);
- blk_rq_unmap_user(SRpnt->bio);
- }
-
- blk_put_request(req);
-}
-
-/* osst_request memory management */
-static struct osst_request *osst_allocate_request(void)
-{
- return kzalloc(sizeof(struct osst_request), GFP_KERNEL);
-}
-
-static void osst_release_request(struct osst_request *streq)
-{
- kfree(streq);
-}
-
-static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
- int cmd_len, int data_direction, void *buffer, unsigned bufflen,
- int use_sg, int timeout, int retries)
-{
- struct request *req;
- struct scsi_request *rq;
- struct page **pages = NULL;
- struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
-
- int err = 0;
- int write = (data_direction == DMA_TO_DEVICE);
-
- req = blk_get_request(SRpnt->stp->device->request_queue,
- write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
- if (IS_ERR(req))
- return DRIVER_ERROR << 24;
-
- rq = scsi_req(req);
- req->rq_flags |= RQF_QUIET;
-
- SRpnt->bio = NULL;
-
- if (use_sg) {
- struct scatterlist *sg, *sgl = (struct scatterlist *)buffer;
- int i;
-
- pages = kcalloc(use_sg, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- goto free_req;
-
- for_each_sg(sgl, sg, use_sg, i)
- pages[i] = sg_page(sg);
-
- mdata->null_mapped = 1;
-
- mdata->page_order = get_order(sgl[0].length);
- mdata->nr_entries =
- DIV_ROUND_UP(bufflen, PAGE_SIZE << mdata->page_order);
- mdata->offset = 0;
-
- err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen, GFP_KERNEL);
- if (err) {
- kfree(pages);
- goto free_req;
- }
- SRpnt->bio = req->bio;
- mdata->pages = pages;
-
- } else if (bufflen) {
- err = blk_rq_map_kern(req->q, req, buffer, bufflen, GFP_KERNEL);
- if (err)
- goto free_req;
- }
-
- rq->cmd_len = cmd_len;
- memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
- memcpy(rq->cmd, cmd, rq->cmd_len);
- req->timeout = timeout;
- rq->retries = retries;
- req->end_io_data = SRpnt;
-
- blk_execute_rq_nowait(req->q, NULL, req, 1, osst_end_async);
- return 0;
-free_req:
- blk_put_request(req);
- return DRIVER_ERROR << 24;
-}
-
-/* Do the scsi command. Waits until command performed if do_wait is true.
- Otherwise osst_write_behind_check() is used to check that the command
- has finished. */
-static struct osst_request * osst_do_scsi(struct osst_request *SRpnt, struct osst_tape *STp,
- unsigned char *cmd, int bytes, int direction, int timeout, int retries, int do_wait)
-{
- unsigned char *bp;
- unsigned short use_sg;
-#ifdef OSST_INJECT_ERRORS
- static int inject = 0;
- static int repeat = 0;
-#endif
- struct completion *waiting;
-
- /* if async, make sure there's no command outstanding */
- if (!do_wait && ((STp->buffer)->last_SRpnt)) {
- printk(KERN_ERR "%s: Async command already active.\n",
- tape_name(STp));
- if (signal_pending(current))
- (STp->buffer)->syscall_result = (-EINTR);
- else
- (STp->buffer)->syscall_result = (-EBUSY);
- return NULL;
- }
-
- if (SRpnt == NULL) {
- SRpnt = osst_allocate_request();
- if (SRpnt == NULL) {
- printk(KERN_ERR "%s: Can't allocate SCSI request.\n",
- tape_name(STp));
- if (signal_pending(current))
- (STp->buffer)->syscall_result = (-EINTR);
- else
- (STp->buffer)->syscall_result = (-EBUSY);
- return NULL;
- }
- SRpnt->stp = STp;
- }
-
- /* If async IO, set last_SRpnt. This ptr tells write_behind_check
- which IO is outstanding. It's nulled out when the IO completes. */
- if (!do_wait)
- (STp->buffer)->last_SRpnt = SRpnt;
-
- waiting = &STp->wait;
- init_completion(waiting);
- SRpnt->waiting = waiting;
-
- use_sg = (bytes > STp->buffer->sg[0].length) ? STp->buffer->use_sg : 0;
- if (use_sg) {
- bp = (char *)&(STp->buffer->sg[0]);
- if (STp->buffer->sg_segs < use_sg)
- use_sg = STp->buffer->sg_segs;
- }
- else
- bp = (STp->buffer)->b_data;
-
- memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
- STp->buffer->cmdstat.have_sense = 0;
- STp->buffer->syscall_result = 0;
-
- if (osst_execute(SRpnt, cmd, COMMAND_SIZE(cmd[0]), direction, bp, bytes,
- use_sg, timeout, retries))
- /* could not allocate the buffer or request was too large */
- (STp->buffer)->syscall_result = (-EBUSY);
- else if (do_wait) {
- wait_for_completion(waiting);
- SRpnt->waiting = NULL;
- STp->buffer->syscall_result = osst_chk_result(STp, SRpnt);
-#ifdef OSST_INJECT_ERRORS
- if (STp->buffer->syscall_result == 0 &&
- cmd[0] == READ_6 &&
- cmd[4] &&
- ( (++ inject % 83) == 29 ||
- (STp->first_frame_position == 240
- /* or STp->read_error_frame to fail again on the block calculated above */ &&
- ++repeat < 3))) {
- printk(OSST_DEB_MSG "%s:D: Injecting read error\n", tape_name(STp));
- STp->buffer->last_result_fatal = 1;
- }
-#endif
- }
- return SRpnt;
-}
-
-
-/* Handle the write-behind checking (downs the semaphore) */
-static void osst_write_behind_check(struct osst_tape *STp)
-{
- struct osst_buffer * STbuffer;
-
- STbuffer = STp->buffer;
-
-#if DEBUG
- if (STp->write_pending)
- STp->nbr_waits++;
- else
- STp->nbr_finished++;
-#endif
- wait_for_completion(&(STp->wait));
- STp->buffer->last_SRpnt->waiting = NULL;
-
- STp->buffer->syscall_result = osst_chk_result(STp, STp->buffer->last_SRpnt);
-
- if (STp->buffer->syscall_result)
- STp->buffer->syscall_result =
- osst_write_error_recovery(STp, &(STp->buffer->last_SRpnt), 1);
- else
- STp->first_frame_position++;
-
- osst_release_request(STp->buffer->last_SRpnt);
-
- if (STbuffer->writing < STbuffer->buffer_bytes)
- printk(KERN_WARNING "osst :A: write_behind_check: something left in buffer!\n");
-
- STbuffer->last_SRpnt = NULL;
- STbuffer->buffer_bytes -= STbuffer->writing;
- STbuffer->writing = 0;
-
- return;
-}
-
-
-
-/* Onstream specific Routines */
-/*
- * Initialize the OnStream AUX
- */
-static void osst_init_aux(struct osst_tape * STp, int frame_type, int frame_seq_number,
- int logical_blk_num, int blk_sz, int blk_cnt)
-{
- os_aux_t *aux = STp->buffer->aux;
- os_partition_t *par = &aux->partition;
- os_dat_t *dat = &aux->dat;
-
- if (STp->raw) return;
-
- memset(aux, 0, sizeof(*aux));
- aux->format_id = htonl(0);
- memcpy(aux->application_sig, "LIN4", 4);
- aux->hdwr = htonl(0);
- aux->frame_type = frame_type;
-
- switch (frame_type) {
- case OS_FRAME_TYPE_HEADER:
- aux->update_frame_cntr = htonl(STp->update_frame_cntr);
- par->partition_num = OS_CONFIG_PARTITION;
- par->par_desc_ver = OS_PARTITION_VERSION;
- par->wrt_pass_cntr = htons(0xffff);
- /* 0-4 = reserved, 5-9 = header, 2990-2994 = header, 2995-2999 = reserved */
- par->first_frame_ppos = htonl(0);
- par->last_frame_ppos = htonl(0xbb7);
- aux->frame_seq_num = htonl(0);
- aux->logical_blk_num_high = htonl(0);
- aux->logical_blk_num = htonl(0);
- aux->next_mark_ppos = htonl(STp->first_mark_ppos);
- break;
- case OS_FRAME_TYPE_DATA:
- case OS_FRAME_TYPE_MARKER:
- dat->dat_sz = 8;
- dat->reserved1 = 0;
- dat->entry_cnt = 1;
- dat->reserved3 = 0;
- dat->dat_list[0].blk_sz = htonl(blk_sz);
- dat->dat_list[0].blk_cnt = htons(blk_cnt);
- dat->dat_list[0].flags = frame_type==OS_FRAME_TYPE_MARKER?
- OS_DAT_FLAGS_MARK:OS_DAT_FLAGS_DATA;
- dat->dat_list[0].reserved = 0;
- /* fall through */
- case OS_FRAME_TYPE_EOD:
- aux->update_frame_cntr = htonl(0);
- par->partition_num = OS_DATA_PARTITION;
- par->par_desc_ver = OS_PARTITION_VERSION;
- par->wrt_pass_cntr = htons(STp->wrt_pass_cntr);
- par->first_frame_ppos = htonl(STp->first_data_ppos);
- par->last_frame_ppos = htonl(STp->capacity);
- aux->frame_seq_num = htonl(frame_seq_number);
- aux->logical_blk_num_high = htonl(0);
- aux->logical_blk_num = htonl(logical_blk_num);
- break;
- default: ; /* probably FILL */
- }
- aux->filemark_cnt = htonl(STp->filemark_cnt);
- aux->phys_fm = htonl(0xffffffff);
- aux->last_mark_ppos = htonl(STp->last_mark_ppos);
- aux->last_mark_lbn = htonl(STp->last_mark_lbn);
-}
-
-/*
- * Verify that we have the correct tape frame
- */
-static int osst_verify_frame(struct osst_tape * STp, int frame_seq_number, int quiet)
-{
- char * name = tape_name(STp);
- os_aux_t * aux = STp->buffer->aux;
- os_partition_t * par = &(aux->partition);
- struct st_partstat * STps = &(STp->ps[STp->partition]);
- unsigned int blk_cnt, blk_sz, i;
-
- if (STp->raw) {
- if (STp->buffer->syscall_result) {
- for (i=0; i < STp->buffer->sg_segs; i++)
- memset(page_address(sg_page(&STp->buffer->sg[i])),
- 0, STp->buffer->sg[i].length);
- strcpy(STp->buffer->b_data, "READ ERROR ON FRAME");
- } else
- STp->buffer->buffer_bytes = OS_FRAME_SIZE;
- return 1;
- }
- if (STp->buffer->syscall_result) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping frame, read error\n", name);
-#endif
- return 0;
- }
- if (ntohl(aux->format_id) != 0) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping frame, format_id %u\n", name, ntohl(aux->format_id));
-#endif
- goto err_out;
- }
- if (memcmp(aux->application_sig, STp->application_sig, 4) != 0 &&
- (memcmp(aux->application_sig, "LIN3", 4) != 0 || STp->linux_media_version != 4)) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping frame, incorrect application signature\n", name);
-#endif
- goto err_out;
- }
- if (par->partition_num != OS_DATA_PARTITION) {
- if (!STp->linux_media || STp->linux_media_version != 2) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping frame, partition num %d\n",
- name, par->partition_num);
-#endif
- goto err_out;
- }
- }
- if (par->par_desc_ver != OS_PARTITION_VERSION) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping frame, partition version %d\n", name, par->par_desc_ver);
-#endif
- goto err_out;
- }
- if (ntohs(par->wrt_pass_cntr) != STp->wrt_pass_cntr) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping frame, wrt_pass_cntr %d (expected %d)\n",
- name, ntohs(par->wrt_pass_cntr), STp->wrt_pass_cntr);
-#endif
- goto err_out;
- }
- if (aux->frame_type != OS_FRAME_TYPE_DATA &&
- aux->frame_type != OS_FRAME_TYPE_EOD &&
- aux->frame_type != OS_FRAME_TYPE_MARKER) {
- if (!quiet) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping frame, frame type %x\n", name, aux->frame_type);
-#endif
- }
- goto err_out;
- }
- if (aux->frame_type == OS_FRAME_TYPE_EOD &&
- STp->first_frame_position < STp->eod_frame_ppos) {
- printk(KERN_INFO "%s:I: Skipping premature EOD frame %d\n", name,
- STp->first_frame_position);
- goto err_out;
- }
- if (frame_seq_number != -1 && ntohl(aux->frame_seq_num) != frame_seq_number) {
- if (!quiet) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping frame, sequence number %u (expected %d)\n",
- name, ntohl(aux->frame_seq_num), frame_seq_number);
-#endif
- }
- goto err_out;
- }
- if (aux->frame_type == OS_FRAME_TYPE_MARKER) {
- STps->eof = ST_FM_HIT;
-
- i = ntohl(aux->filemark_cnt);
- if (STp->header_cache != NULL && i < OS_FM_TAB_MAX && (i > STp->filemark_cnt ||
- STp->first_frame_position - 1 != ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[i]))) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: %s filemark %d at frame pos %d\n", name,
- STp->header_cache->dat_fm_tab.fm_tab_ent[i] == 0?"Learned":"Corrected",
- i, STp->first_frame_position - 1);
-#endif
- STp->header_cache->dat_fm_tab.fm_tab_ent[i] = htonl(STp->first_frame_position - 1);
- if (i >= STp->filemark_cnt)
- STp->filemark_cnt = i+1;
- }
- }
- if (aux->frame_type == OS_FRAME_TYPE_EOD) {
- STps->eof = ST_EOD_1;
- STp->frame_in_buffer = 1;
- }
- if (aux->frame_type == OS_FRAME_TYPE_DATA) {
- blk_cnt = ntohs(aux->dat.dat_list[0].blk_cnt);
- blk_sz = ntohl(aux->dat.dat_list[0].blk_sz);
- STp->buffer->buffer_bytes = blk_cnt * blk_sz;
- STp->buffer->read_pointer = 0;
- STp->frame_in_buffer = 1;
-
- /* See what block size was used to write file */
- if (STp->block_size != blk_sz && blk_sz > 0) {
- printk(KERN_INFO
- "%s:I: File was written with block size %d%c, currently %d%c, adjusted to match.\n",
- name, blk_sz<1024?blk_sz:blk_sz/1024,blk_sz<1024?'b':'k',
- STp->block_size<1024?STp->block_size:STp->block_size/1024,
- STp->block_size<1024?'b':'k');
- STp->block_size = blk_sz;
- STp->buffer->buffer_blocks = OS_DATA_SIZE / blk_sz;
- }
- STps->eof = ST_NOEOF;
- }
- STp->frame_seq_number = ntohl(aux->frame_seq_num);
- STp->logical_blk_num = ntohl(aux->logical_blk_num);
- return 1;
-
-err_out:
- if (STp->read_error_frame == 0)
- STp->read_error_frame = STp->first_frame_position - 1;
- return 0;
-}
-
-/*
- * Wait for the unit to become Ready
- */
-static int osst_wait_ready(struct osst_tape * STp, struct osst_request ** aSRpnt,
- unsigned timeout, int initial_delay)
-{
- unsigned char cmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt;
- unsigned long startwait = jiffies;
-#if DEBUG
- int dbg = debugging;
- char * name = tape_name(STp);
-
- printk(OSST_DEB_MSG "%s:D: Reached onstream wait ready\n", name);
-#endif
-
- if (initial_delay > 0)
- msleep(jiffies_to_msecs(initial_delay));
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = TEST_UNIT_READY;
-
- SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
- *aSRpnt = SRpnt;
- if (!SRpnt) return (-EBUSY);
-
- while ( STp->buffer->syscall_result && time_before(jiffies, startwait + timeout*HZ) &&
- (( SRpnt->sense[2] == 2 && SRpnt->sense[12] == 4 &&
- (SRpnt->sense[13] == 1 || SRpnt->sense[13] == 8) ) ||
- ( SRpnt->sense[2] == 6 && SRpnt->sense[12] == 0x28 &&
- SRpnt->sense[13] == 0 ) )) {
-#if DEBUG
- if (debugging) {
- printk(OSST_DEB_MSG "%s:D: Sleeping in onstream wait ready\n", name);
- printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
- debugging = 0;
- }
-#endif
- msleep(100);
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = TEST_UNIT_READY;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
- }
- *aSRpnt = SRpnt;
-#if DEBUG
- debugging = dbg;
-#endif
- if ( STp->buffer->syscall_result &&
- osst_write_error_recovery(STp, aSRpnt, 0) ) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Abnormal exit from onstream wait ready\n", name);
- printk(OSST_DEB_MSG "%s:D: Result = %d, Sense: 0=%02x, 2=%02x, 12=%02x, 13=%02x\n", name,
- STp->buffer->syscall_result, SRpnt->sense[0], SRpnt->sense[2],
- SRpnt->sense[12], SRpnt->sense[13]);
-#endif
- return (-EIO);
- }
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Normal exit from onstream wait ready\n", name);
-#endif
- return 0;
-}
-
-/*
- * Wait for a tape to be inserted in the unit
- */
-static int osst_wait_for_medium(struct osst_tape * STp, struct osst_request ** aSRpnt, unsigned timeout)
-{
- unsigned char cmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt;
- unsigned long startwait = jiffies;
-#if DEBUG
- int dbg = debugging;
- char * name = tape_name(STp);
-
- printk(OSST_DEB_MSG "%s:D: Reached onstream wait for medium\n", name);
-#endif
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = TEST_UNIT_READY;
-
- SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
- *aSRpnt = SRpnt;
- if (!SRpnt) return (-EBUSY);
-
- while ( STp->buffer->syscall_result && time_before(jiffies, startwait + timeout*HZ) &&
- SRpnt->sense[2] == 2 && SRpnt->sense[12] == 0x3a && SRpnt->sense[13] == 0 ) {
-#if DEBUG
- if (debugging) {
- printk(OSST_DEB_MSG "%s:D: Sleeping in onstream wait medium\n", name);
- printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
- debugging = 0;
- }
-#endif
- msleep(100);
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = TEST_UNIT_READY;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
- }
- *aSRpnt = SRpnt;
-#if DEBUG
- debugging = dbg;
-#endif
- if ( STp->buffer->syscall_result && SRpnt->sense[2] != 2 &&
- SRpnt->sense[12] != 4 && SRpnt->sense[13] == 1) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Abnormal exit from onstream wait medium\n", name);
- printk(OSST_DEB_MSG "%s:D: Result = %d, Sense: 0=%02x, 2=%02x, 12=%02x, 13=%02x\n", name,
- STp->buffer->syscall_result, SRpnt->sense[0], SRpnt->sense[2],
- SRpnt->sense[12], SRpnt->sense[13]);
-#endif
- return 0;
- }
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Normal exit from onstream wait medium\n", name);
-#endif
- return 1;
-}
-
-static int osst_position_tape_and_confirm(struct osst_tape * STp, struct osst_request ** aSRpnt, int frame)
-{
- int retval;
-
- osst_wait_ready(STp, aSRpnt, 15 * 60, 0); /* TODO - can this catch a write error? */
- retval = osst_set_frame_position(STp, aSRpnt, frame, 0);
- if (retval) return (retval);
- osst_wait_ready(STp, aSRpnt, 15 * 60, OSST_WAIT_POSITION_COMPLETE);
- return (osst_get_frame_position(STp, aSRpnt));
-}
-
-/*
- * Wait for write(s) to complete
- */
-static int osst_flush_drive_buffer(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
- unsigned char cmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt;
- int result = 0;
- int delay = OSST_WAIT_WRITE_COMPLETE;
-#if DEBUG
- char * name = tape_name(STp);
-
- printk(OSST_DEB_MSG "%s:D: Reached onstream flush drive buffer (write filemark)\n", name);
-#endif
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = WRITE_FILEMARKS;
- cmd[1] = 1;
-
- SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
- *aSRpnt = SRpnt;
- if (!SRpnt) return (-EBUSY);
- if (STp->buffer->syscall_result) {
- if ((SRpnt->sense[2] & 0x0f) == 2 && SRpnt->sense[12] == 4) {
- if (SRpnt->sense[13] == 8) {
- delay = OSST_WAIT_LONG_WRITE_COMPLETE;
- }
- } else
- result = osst_write_error_recovery(STp, aSRpnt, 0);
- }
- result |= osst_wait_ready(STp, aSRpnt, 5 * 60, delay);
- STp->ps[STp->partition].rw = OS_WRITING_COMPLETE;
-
- return (result);
-}
-
-#define OSST_POLL_PER_SEC 10
-static int osst_wait_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int curr, int minlast, int to)
-{
- unsigned long startwait = jiffies;
- char * name = tape_name(STp);
-#if DEBUG
- char notyetprinted = 1;
-#endif
- if (minlast >= 0 && STp->ps[STp->partition].rw != ST_READING)
- printk(KERN_ERR "%s:A: Waiting for frame without having initialized read!\n", name);
-
- while (time_before (jiffies, startwait + to*HZ))
- {
- int result;
- result = osst_get_frame_position(STp, aSRpnt);
- if (result == -EIO)
- if ((result = osst_write_error_recovery(STp, aSRpnt, 0)) == 0)
- return 0; /* successful recovery leaves drive ready for frame */
- if (result < 0) break;
- if (STp->first_frame_position == curr &&
- ((minlast < 0 &&
- (signed)STp->last_frame_position > (signed)curr + minlast) ||
- (minlast >= 0 && STp->cur_frames > minlast)
- ) && result >= 0)
- {
-#if DEBUG
- if (debugging || time_after_eq(jiffies, startwait + 2*HZ/OSST_POLL_PER_SEC))
- printk (OSST_DEB_MSG
- "%s:D: Succ wait f fr %i (>%i): %i-%i %i (%i): %3li.%li s\n",
- name, curr, curr+minlast, STp->first_frame_position,
- STp->last_frame_position, STp->cur_frames,
- result, (jiffies-startwait)/HZ,
- (((jiffies-startwait)%HZ)*10)/HZ);
-#endif
- return 0;
- }
-#if DEBUG
- if (time_after_eq(jiffies, startwait + 2*HZ/OSST_POLL_PER_SEC) && notyetprinted)
- {
- printk (OSST_DEB_MSG "%s:D: Wait for frame %i (>%i): %i-%i %i (%i)\n",
- name, curr, curr+minlast, STp->first_frame_position,
- STp->last_frame_position, STp->cur_frames, result);
- notyetprinted--;
- }
-#endif
- msleep(1000 / OSST_POLL_PER_SEC);
- }
-#if DEBUG
- printk (OSST_DEB_MSG "%s:D: Fail wait f fr %i (>%i): %i-%i %i: %3li.%li s\n",
- name, curr, curr+minlast, STp->first_frame_position,
- STp->last_frame_position, STp->cur_frames,
- (jiffies-startwait)/HZ, (((jiffies-startwait)%HZ)*10)/HZ);
-#endif
- return -EBUSY;
-}
-
-static int osst_recover_wait_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int writing)
-{
- struct osst_request * SRpnt;
- unsigned char cmd[MAX_COMMAND_SIZE];
- unsigned long startwait = jiffies;
- int retval = 1;
- char * name = tape_name(STp);
-
- if (writing) {
- char mybuf[24];
- char * olddata = STp->buffer->b_data;
- int oldsize = STp->buffer->buffer_size;
-
- /* write zero fm then read pos - if shows write error, try to recover - if no progress, wait */
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = WRITE_FILEMARKS;
- cmd[1] = 1;
- SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout,
- MAX_RETRIES, 1);
-
- while (retval && time_before (jiffies, startwait + 5*60*HZ)) {
-
- if (STp->buffer->syscall_result && (SRpnt->sense[2] & 0x0f) != 2) {
-
- /* some failure - not just not-ready */
- retval = osst_write_error_recovery(STp, aSRpnt, 0);
- break;
- }
- schedule_timeout_interruptible(HZ / OSST_POLL_PER_SEC);
-
- STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = READ_POSITION;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, 20, DMA_FROM_DEVICE, STp->timeout,
- MAX_RETRIES, 1);
-
- retval = ( STp->buffer->syscall_result || (STp->buffer)->b_data[15] > 25 );
- STp->buffer->b_data = olddata; STp->buffer->buffer_size = oldsize;
- }
- if (retval)
- printk(KERN_ERR "%s:E: Device did not succeed to write buffered data\n", name);
- } else
- /* TODO - figure out which error conditions can be handled */
- if (STp->buffer->syscall_result)
- printk(KERN_WARNING
- "%s:W: Recover_wait_frame(read) cannot handle %02x:%02x:%02x\n", name,
- (*aSRpnt)->sense[ 2] & 0x0f,
- (*aSRpnt)->sense[12],
- (*aSRpnt)->sense[13]);
-
- return retval;
-}
-
-/*
- * Read the next OnStream tape frame at the current location
- */
-static int osst_read_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int timeout)
-{
- unsigned char cmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt;
- int retval = 0;
-#if DEBUG
- os_aux_t * aux = STp->buffer->aux;
- char * name = tape_name(STp);
-#endif
-
- if (STp->poll)
- if (osst_wait_frame (STp, aSRpnt, STp->first_frame_position, 0, timeout))
- retval = osst_recover_wait_frame(STp, aSRpnt, 0);
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = READ_6;
- cmd[1] = 1;
- cmd[4] = 1;
-
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Reading frame from OnStream tape\n", name);
-#endif
- SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_FROM_DEVICE,
- STp->timeout, MAX_RETRIES, 1);
- *aSRpnt = SRpnt;
- if (!SRpnt)
- return (-EBUSY);
-
- if ((STp->buffer)->syscall_result) {
- retval = 1;
- if (STp->read_error_frame == 0) {
- STp->read_error_frame = STp->first_frame_position;
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Recording read error at %d\n", name, STp->read_error_frame);
-#endif
- }
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n",
- name,
- SRpnt->sense[0], SRpnt->sense[1],
- SRpnt->sense[2], SRpnt->sense[3],
- SRpnt->sense[4], SRpnt->sense[5],
- SRpnt->sense[6], SRpnt->sense[7]);
-#endif
- }
- else
- STp->first_frame_position++;
-#if DEBUG
- if (debugging) {
- char sig[8]; int i;
- for (i=0;i<4;i++)
- sig[i] = aux->application_sig[i]<32?'^':aux->application_sig[i];
- sig[4] = '\0';
- printk(OSST_DEB_MSG
- "%s:D: AUX: %s UpdFrCt#%d Wpass#%d %s FrSeq#%d LogBlk#%d Qty=%d Sz=%d\n", name, sig,
- ntohl(aux->update_frame_cntr), ntohs(aux->partition.wrt_pass_cntr),
- aux->frame_type==1?"EOD":aux->frame_type==2?"MARK":
- aux->frame_type==8?"HEADR":aux->frame_type==0x80?"DATA":"FILL",
- ntohl(aux->frame_seq_num), ntohl(aux->logical_blk_num),
- ntohs(aux->dat.dat_list[0].blk_cnt), ntohl(aux->dat.dat_list[0].blk_sz) );
- if (aux->frame_type==2)
- printk(OSST_DEB_MSG "%s:D: mark_cnt=%d, last_mark_ppos=%d, last_mark_lbn=%d\n", name,
- ntohl(aux->filemark_cnt), ntohl(aux->last_mark_ppos), ntohl(aux->last_mark_lbn));
- printk(OSST_DEB_MSG "%s:D: Exit read frame from OnStream tape with code %d\n", name, retval);
- }
-#endif
- return (retval);
-}
-
-static int osst_initiate_read(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
- struct st_partstat * STps = &(STp->ps[STp->partition]);
- struct osst_request * SRpnt ;
- unsigned char cmd[MAX_COMMAND_SIZE];
- int retval = 0;
- char * name = tape_name(STp);
-
- if (STps->rw != ST_READING) { /* Initialize read operation */
- if (STps->rw == ST_WRITING || STp->dirty) {
- STp->write_type = OS_WRITE_DATA;
- osst_flush_write_buffer(STp, aSRpnt);
- osst_flush_drive_buffer(STp, aSRpnt);
- }
- STps->rw = ST_READING;
- STp->frame_in_buffer = 0;
-
- /*
- * Issue a read 0 command to get the OnStream drive
- * read frames into its buffer.
- */
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = READ_6;
- cmd[1] = 1;
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Start Read Ahead on OnStream tape\n", name);
-#endif
- SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
- *aSRpnt = SRpnt;
- if ((retval = STp->buffer->syscall_result))
- printk(KERN_WARNING "%s:W: Error starting read ahead\n", name);
- }
-
- return retval;
-}
-
-static int osst_get_logical_frame(struct osst_tape * STp, struct osst_request ** aSRpnt,
- int frame_seq_number, int quiet)
-{
- struct st_partstat * STps = &(STp->ps[STp->partition]);
- char * name = tape_name(STp);
- int cnt = 0,
- bad = 0,
- past = 0,
- x,
- position;
-
- /*
- * If we want just any frame (-1) and there is a frame in the buffer, return it
- */
- if (frame_seq_number == -1 && STp->frame_in_buffer) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Frame %d still in buffer\n", name, STp->frame_seq_number);
-#endif
- return (STps->eof);
- }
- /*
- * Search and wait for the next logical tape frame
- */
- while (1) {
- if (cnt++ > 400) {
- printk(KERN_ERR "%s:E: Couldn't find logical frame %d, aborting\n",
- name, frame_seq_number);
- if (STp->read_error_frame) {
- osst_set_frame_position(STp, aSRpnt, STp->read_error_frame, 0);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Repositioning tape to bad frame %d\n",
- name, STp->read_error_frame);
-#endif
- STp->read_error_frame = 0;
- STp->abort_count++;
- }
- return (-EIO);
- }
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Looking for frame %d, attempt %d\n",
- name, frame_seq_number, cnt);
-#endif
- if ( osst_initiate_read(STp, aSRpnt)
- || ( (!STp->frame_in_buffer) && osst_read_frame(STp, aSRpnt, 30) ) ) {
- if (STp->raw)
- return (-EIO);
- position = osst_get_frame_position(STp, aSRpnt);
- if (position >= 0xbae && position < 0xbb8)
- position = 0xbb8;
- else if (position > STp->eod_frame_ppos || ++bad == 10) {
- position = STp->read_error_frame - 1;
- bad = 0;
- }
- else {
- position += 29;
- cnt += 19;
- }
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Bad frame detected, positioning tape to block %d\n",
- name, position);
-#endif
- osst_set_frame_position(STp, aSRpnt, position, 0);
- continue;
- }
- if (osst_verify_frame(STp, frame_seq_number, quiet))
- break;
- if (osst_verify_frame(STp, -1, quiet)) {
- x = ntohl(STp->buffer->aux->frame_seq_num);
- if (STp->fast_open) {
- printk(KERN_WARNING
- "%s:W: Found logical frame %d instead of %d after fast open\n",
- name, x, frame_seq_number);
- STp->header_ok = 0;
- STp->read_error_frame = 0;
- return (-EIO);
- }
- if (x > frame_seq_number) {
- if (++past > 3) {
- /* positioning backwards did not bring us to the desired frame */
- position = STp->read_error_frame - 1;
- }
- else {
- position = osst_get_frame_position(STp, aSRpnt)
- + frame_seq_number - x - 1;
-
- if (STp->first_frame_position >= 3000 && position < 3000)
- position -= 10;
- }
-#if DEBUG
- printk(OSST_DEB_MSG
- "%s:D: Found logical frame %d while looking for %d: back up %d\n",
- name, x, frame_seq_number,
- STp->first_frame_position - position);
-#endif
- osst_set_frame_position(STp, aSRpnt, position, 0);
- cnt += 10;
- }
- else
- past = 0;
- }
- if (osst_get_frame_position(STp, aSRpnt) == 0xbaf) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping config partition\n", name);
-#endif
- osst_set_frame_position(STp, aSRpnt, 0xbb8, 0);
- cnt--;
- }
- STp->frame_in_buffer = 0;
- }
- if (cnt > 1) {
- STp->recover_count++;
- STp->recover_erreg++;
- printk(KERN_WARNING "%s:I: Don't worry, Read error at position %d recovered\n",
- name, STp->read_error_frame);
- }
- STp->read_count++;
-
-#if DEBUG
- if (debugging || STps->eof)
- printk(OSST_DEB_MSG
- "%s:D: Exit get logical frame (%d=>%d) from OnStream tape with code %d\n",
- name, frame_seq_number, STp->frame_seq_number, STps->eof);
-#endif
- STp->fast_open = 0;
- STp->read_error_frame = 0;
- return (STps->eof);
-}
-
-static int osst_seek_logical_blk(struct osst_tape * STp, struct osst_request ** aSRpnt, int logical_blk_num)
-{
- struct st_partstat * STps = &(STp->ps[STp->partition]);
- char * name = tape_name(STp);
- int retries = 0;
- int frame_seq_estimate, ppos_estimate, move;
-
- if (logical_blk_num < 0) logical_blk_num = 0;
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Seeking logical block %d (now at %d, size %d%c)\n",
- name, logical_blk_num, STp->logical_blk_num,
- STp->block_size<1024?STp->block_size:STp->block_size/1024,
- STp->block_size<1024?'b':'k');
-#endif
- /* Do we know where we are? */
- if (STps->drv_block >= 0) {
- move = logical_blk_num - STp->logical_blk_num;
- if (move < 0) move -= (OS_DATA_SIZE / STp->block_size) - 1;
- move /= (OS_DATA_SIZE / STp->block_size);
- frame_seq_estimate = STp->frame_seq_number + move;
- } else
- frame_seq_estimate = logical_blk_num * STp->block_size / OS_DATA_SIZE;
-
- if (frame_seq_estimate < 2980) ppos_estimate = frame_seq_estimate + 10;
- else ppos_estimate = frame_seq_estimate + 20;
- while (++retries < 10) {
- if (ppos_estimate > STp->eod_frame_ppos-2) {
- frame_seq_estimate += STp->eod_frame_ppos - 2 - ppos_estimate;
- ppos_estimate = STp->eod_frame_ppos - 2;
- }
- if (frame_seq_estimate < 0) {
- frame_seq_estimate = 0;
- ppos_estimate = 10;
- }
- osst_set_frame_position(STp, aSRpnt, ppos_estimate, 0);
- if (osst_get_logical_frame(STp, aSRpnt, frame_seq_estimate, 1) >= 0) {
- /* we've located the estimated frame, now does it have our block? */
- if (logical_blk_num < STp->logical_blk_num ||
- logical_blk_num >= STp->logical_blk_num + ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt)) {
- if (STps->eof == ST_FM_HIT)
- move = logical_blk_num < STp->logical_blk_num? -2 : 1;
- else {
- move = logical_blk_num - STp->logical_blk_num;
- if (move < 0) move -= (OS_DATA_SIZE / STp->block_size) - 1;
- move /= (OS_DATA_SIZE / STp->block_size);
- }
- if (!move) move = logical_blk_num > STp->logical_blk_num ? 1 : -1;
-#if DEBUG
- printk(OSST_DEB_MSG
- "%s:D: Seek retry %d at ppos %d fsq %d (est %d) lbn %d (need %d) move %d\n",
- name, retries, ppos_estimate, STp->frame_seq_number, frame_seq_estimate,
- STp->logical_blk_num, logical_blk_num, move);
-#endif
- frame_seq_estimate += move;
- ppos_estimate += move;
- continue;
- } else {
- STp->buffer->read_pointer = (logical_blk_num - STp->logical_blk_num) * STp->block_size;
- STp->buffer->buffer_bytes -= STp->buffer->read_pointer;
- STp->logical_blk_num = logical_blk_num;
-#if DEBUG
- printk(OSST_DEB_MSG
- "%s:D: Seek success at ppos %d fsq %d in_buf %d, bytes %d, ptr %d*%d\n",
- name, ppos_estimate, STp->frame_seq_number, STp->frame_in_buffer,
- STp->buffer->buffer_bytes, STp->buffer->read_pointer / STp->block_size,
- STp->block_size);
-#endif
- STps->drv_file = ntohl(STp->buffer->aux->filemark_cnt);
- if (STps->eof == ST_FM_HIT) {
- STps->drv_file++;
- STps->drv_block = 0;
- } else {
- STps->drv_block = ntohl(STp->buffer->aux->last_mark_lbn)?
- STp->logical_blk_num -
- (STps->drv_file ? ntohl(STp->buffer->aux->last_mark_lbn) + 1 : 0):
- -1;
- }
- STps->eof = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD:ST_NOEOF;
- return 0;
- }
- }
- if (osst_get_logical_frame(STp, aSRpnt, -1, 1) < 0)
- goto error;
- /* we are not yet at the estimated frame, adjust our estimate of its physical position */
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Seek retry %d at ppos %d fsq %d (est %d) lbn %d (need %d)\n",
- name, retries, ppos_estimate, STp->frame_seq_number, frame_seq_estimate,
- STp->logical_blk_num, logical_blk_num);
-#endif
- if (frame_seq_estimate != STp->frame_seq_number)
- ppos_estimate += frame_seq_estimate - STp->frame_seq_number;
- else
- break;
- }
-error:
- printk(KERN_ERR "%s:E: Couldn't seek to logical block %d (at %d), %d retries\n",
- name, logical_blk_num, STp->logical_blk_num, retries);
- return (-EIO);
-}
-
-/* The values below are based on the OnStream frame payload size of 32K == 2**15,
- * that is, OSST_FRAME_SHIFT + OSST_SECTOR_SHIFT must be 15. With a minimum block
- * size of 512 bytes, we need to be able to resolve 32K/512 == 64 == 2**6 positions
- * inside each frame. Finally, OSST_SECTOR_MASK == 2**OSST_FRAME_SHIFT - 1.
- */
-#define OSST_FRAME_SHIFT 6
-#define OSST_SECTOR_SHIFT 9
-#define OSST_SECTOR_MASK 0x03F
-
-static int osst_get_sector(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
- int sector;
-#if DEBUG
- char * name = tape_name(STp);
-
- printk(OSST_DEB_MSG
- "%s:D: Positioned at ppos %d, frame %d, lbn %d, file %d, blk %d, %cptr %d, eof %d\n",
- name, STp->first_frame_position, STp->frame_seq_number, STp->logical_blk_num,
- STp->ps[STp->partition].drv_file, STp->ps[STp->partition].drv_block,
- STp->ps[STp->partition].rw == ST_WRITING?'w':'r',
- STp->ps[STp->partition].rw == ST_WRITING?STp->buffer->buffer_bytes:
- STp->buffer->read_pointer, STp->ps[STp->partition].eof);
-#endif
- /* do we know where we are inside a file? */
- if (STp->ps[STp->partition].drv_block >= 0) {
- sector = (STp->frame_in_buffer ? STp->first_frame_position-1 :
- STp->first_frame_position) << OSST_FRAME_SHIFT;
- if (STp->ps[STp->partition].rw == ST_WRITING)
- sector |= (STp->buffer->buffer_bytes >> OSST_SECTOR_SHIFT) & OSST_SECTOR_MASK;
- else
- sector |= (STp->buffer->read_pointer >> OSST_SECTOR_SHIFT) & OSST_SECTOR_MASK;
- } else {
- sector = osst_get_frame_position(STp, aSRpnt);
- if (sector > 0)
- sector <<= OSST_FRAME_SHIFT;
- }
- return sector;
-}
-
-static int osst_seek_sector(struct osst_tape * STp, struct osst_request ** aSRpnt, int sector)
-{
- struct st_partstat * STps = &(STp->ps[STp->partition]);
- int frame = sector >> OSST_FRAME_SHIFT,
- offset = (sector & OSST_SECTOR_MASK) << OSST_SECTOR_SHIFT,
- r;
-#if DEBUG
- char * name = tape_name(STp);
-
- printk(OSST_DEB_MSG "%s:D: Seeking sector %d in frame %d at offset %d\n",
- name, sector, frame, offset);
-#endif
- if (frame < 0 || frame >= STp->capacity) return (-ENXIO);
-
- if (frame <= STp->first_data_ppos) {
- STp->frame_seq_number = STp->logical_blk_num = STps->drv_file = STps->drv_block = 0;
- return (osst_set_frame_position(STp, aSRpnt, frame, 0));
- }
- r = osst_set_frame_position(STp, aSRpnt, offset?frame:frame-1, 0);
- if (r < 0) return r;
-
- r = osst_get_logical_frame(STp, aSRpnt, -1, 1);
- if (r < 0) return r;
-
- if (osst_get_frame_position(STp, aSRpnt) != (offset?frame+1:frame)) return (-EIO);
-
- if (offset) {
- STp->logical_blk_num += offset / STp->block_size;
- STp->buffer->read_pointer = offset;
- STp->buffer->buffer_bytes -= offset;
- } else {
- STp->frame_seq_number++;
- STp->frame_in_buffer = 0;
- STp->logical_blk_num += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
- STp->buffer->buffer_bytes = STp->buffer->read_pointer = 0;
- }
- STps->drv_file = ntohl(STp->buffer->aux->filemark_cnt);
- if (STps->eof == ST_FM_HIT) {
- STps->drv_file++;
- STps->drv_block = 0;
- } else {
- STps->drv_block = ntohl(STp->buffer->aux->last_mark_lbn)?
- STp->logical_blk_num -
- (STps->drv_file ? ntohl(STp->buffer->aux->last_mark_lbn) + 1 : 0):
- -1;
- }
- STps->eof = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD:ST_NOEOF;
-#if DEBUG
- printk(OSST_DEB_MSG
- "%s:D: Now positioned at ppos %d, frame %d, lbn %d, file %d, blk %d, rptr %d, eof %d\n",
- name, STp->first_frame_position, STp->frame_seq_number, STp->logical_blk_num,
- STps->drv_file, STps->drv_block, STp->buffer->read_pointer, STps->eof);
-#endif
- return 0;
-}
-
-/*
- * Read back the drive's internal buffer contents, as a part
- * of the write error recovery mechanism for old OnStream
- * firmware revisions.
- * Precondition for this function to work: all frames in the
- * drive's buffer must be of one type (DATA, MARK or EOD)!
- */
-static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct osst_request ** aSRpnt,
- unsigned int frame, unsigned int skip, int pending)
-{
- struct osst_request * SRpnt = * aSRpnt;
- unsigned char * buffer, * p;
- unsigned char cmd[MAX_COMMAND_SIZE];
- int flag, new_frame, i;
- int nframes = STp->cur_frames;
- int blks_per_frame = ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
- int frame_seq_number = ntohl(STp->buffer->aux->frame_seq_num)
- - (nframes + pending - 1);
- int logical_blk_num = ntohl(STp->buffer->aux->logical_blk_num)
- - (nframes + pending - 1) * blks_per_frame;
- char * name = tape_name(STp);
- unsigned long startwait = jiffies;
-#if DEBUG
- int dbg = debugging;
-#endif
-
- if ((buffer = vmalloc(array_size((nframes + 1), OS_DATA_SIZE))) == NULL)
- return (-EIO);
-
- printk(KERN_INFO "%s:I: Reading back %d frames from drive buffer%s\n",
- name, nframes, pending?" and one that was pending":"");
-
- osst_copy_from_buffer(STp->buffer, (p = &buffer[nframes * OS_DATA_SIZE]));
-#if DEBUG
- if (pending && debugging)
- printk(OSST_DEB_MSG "%s:D: Pending frame %d (lblk %d), data %02x %02x %02x %02x\n",
- name, frame_seq_number + nframes,
- logical_blk_num + nframes * blks_per_frame,
- p[0], p[1], p[2], p[3]);
-#endif
- for (i = 0, p = buffer; i < nframes; i++, p += OS_DATA_SIZE) {
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = 0x3C; /* Buffer Read */
- cmd[1] = 6; /* Retrieve Faulty Block */
- cmd[7] = 32768 >> 8;
- cmd[8] = 32768 & 0xff;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, OS_FRAME_SIZE, DMA_FROM_DEVICE,
- STp->timeout, MAX_RETRIES, 1);
-
- if ((STp->buffer)->syscall_result || !SRpnt) {
- printk(KERN_ERR "%s:E: Failed to read frame back from OnStream buffer\n", name);
- vfree(buffer);
- *aSRpnt = SRpnt;
- return (-EIO);
- }
- osst_copy_from_buffer(STp->buffer, p);
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Read back logical frame %d, data %02x %02x %02x %02x\n",
- name, frame_seq_number + i, p[0], p[1], p[2], p[3]);
-#endif
- }
- *aSRpnt = SRpnt;
- osst_get_frame_position(STp, aSRpnt);
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Frames left in buffer: %d\n", name, STp->cur_frames);
-#endif
- /* Write synchronously so we can be sure we're OK again and don't have to recover recursively */
- /* In the header we don't actually re-write the frames that fail, just the ones after them */
-
- for (flag=1, new_frame=frame, p=buffer, i=0; i < nframes + pending; ) {
-
- if (flag) {
- if (STp->write_type == OS_WRITE_HEADER) {
- i += skip;
- p += skip * OS_DATA_SIZE;
- }
- else if (new_frame < 2990 && new_frame+skip+nframes+pending >= 2990)
- new_frame = 3000-i;
- else
- new_frame += skip;
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Position to frame %d, write fseq %d\n",
- name, new_frame+i, frame_seq_number+i);
-#endif
- osst_set_frame_position(STp, aSRpnt, new_frame + i, 0);
- osst_wait_ready(STp, aSRpnt, 60, OSST_WAIT_POSITION_COMPLETE);
- osst_get_frame_position(STp, aSRpnt);
- SRpnt = * aSRpnt;
-
- if (new_frame > frame + 1000) {
- printk(KERN_ERR "%s:E: Failed to find writable tape media\n", name);
- vfree(buffer);
- return (-EIO);
- }
- if ( i >= nframes + pending ) break;
- flag = 0;
- }
- osst_copy_to_buffer(STp->buffer, p);
- /*
- * IMPORTANT: for error recovery to work, _never_ queue frames with mixed frame type!
- */
- osst_init_aux(STp, STp->buffer->aux->frame_type, frame_seq_number+i,
- logical_blk_num + i*blks_per_frame,
- ntohl(STp->buffer->aux->dat.dat_list[0].blk_sz), blks_per_frame);
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = WRITE_6;
- cmd[1] = 1;
- cmd[4] = 1;
-
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG
- "%s:D: About to write frame %d, seq %d, lbn %d, data %02x %02x %02x %02x\n",
- name, new_frame+i, frame_seq_number+i, logical_blk_num + i*blks_per_frame,
- p[0], p[1], p[2], p[3]);
-#endif
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE,
- STp->timeout, MAX_RETRIES, 1);
-
- if (STp->buffer->syscall_result)
- flag = 1;
- else {
- p += OS_DATA_SIZE; i++;
-
- /* if we just sent the last frame, wait till all successfully written */
- if ( i == nframes + pending ) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Check re-write successful\n", name);
-#endif
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = WRITE_FILEMARKS;
- cmd[1] = 1;
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
- STp->timeout, MAX_RETRIES, 1);
-#if DEBUG
- if (debugging) {
- printk(OSST_DEB_MSG "%s:D: Sleeping in re-write wait ready\n", name);
- printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
- debugging = 0;
- }
-#endif
- flag = STp->buffer->syscall_result;
- while ( !flag && time_before(jiffies, startwait + 60*HZ) ) {
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = TEST_UNIT_READY;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout,
- MAX_RETRIES, 1);
-
- if (SRpnt->sense[2] == 2 && SRpnt->sense[12] == 4 &&
- (SRpnt->sense[13] == 1 || SRpnt->sense[13] == 8)) {
- /* in the process of becoming ready */
- msleep(100);
- continue;
- }
- if (STp->buffer->syscall_result)
- flag = 1;
- break;
- }
-#if DEBUG
- debugging = dbg;
- printk(OSST_DEB_MSG "%s:D: Wait re-write finished\n", name);
-#endif
- }
- }
- *aSRpnt = SRpnt;
- if (flag) {
- if ((SRpnt->sense[ 2] & 0x0f) == 13 &&
- SRpnt->sense[12] == 0 &&
- SRpnt->sense[13] == 2) {
- printk(KERN_ERR "%s:E: Volume overflow in write error recovery\n", name);
- vfree(buffer);
- return (-EIO); /* hit end of tape = fail */
- }
- i = ((SRpnt->sense[3] << 24) |
- (SRpnt->sense[4] << 16) |
- (SRpnt->sense[5] << 8) |
- SRpnt->sense[6] ) - new_frame;
- p = &buffer[i * OS_DATA_SIZE];
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Additional write error at %d\n", name, new_frame+i);
-#endif
- osst_get_frame_position(STp, aSRpnt);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: reported frame positions: host = %d, tape = %d, buffer = %d\n",
- name, STp->first_frame_position, STp->last_frame_position, STp->cur_frames);
-#endif
- }
- }
- if (flag) {
- /* error recovery did not successfully complete */
- printk(KERN_ERR "%s:D: Write error recovery failed in %s\n", name,
- STp->write_type == OS_WRITE_HEADER?"header":"body");
- }
- if (!pending)
- osst_copy_to_buffer(STp->buffer, p); /* so buffer content == at entry in all cases */
- vfree(buffer);
- return 0;
-}
-
-static int osst_reposition_and_retry(struct osst_tape * STp, struct osst_request ** aSRpnt,
- unsigned int frame, unsigned int skip, int pending)
-{
- unsigned char cmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt;
- char * name = tape_name(STp);
- int expected = 0;
- int attempts = 1000 / skip;
- int flag = 1;
- unsigned long startwait = jiffies;
-#if DEBUG
- int dbg = debugging;
-#endif
-
- while (attempts && time_before(jiffies, startwait + 60*HZ)) {
- if (flag) {
-#if DEBUG
- debugging = dbg;
-#endif
- if (frame < 2990 && frame+skip+STp->cur_frames+pending >= 2990)
- frame = 3000-skip;
- expected = frame+skip+STp->cur_frames+pending;
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Position to fppos %d, re-write from fseq %d\n",
- name, frame+skip, STp->frame_seq_number-STp->cur_frames-pending);
-#endif
- osst_set_frame_position(STp, aSRpnt, frame + skip, 1);
- flag = 0;
- attempts--;
- schedule_timeout_interruptible(msecs_to_jiffies(100));
- }
- if (osst_get_frame_position(STp, aSRpnt) < 0) { /* additional write error */
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Addl error, host %d, tape %d, buffer %d\n",
- name, STp->first_frame_position,
- STp->last_frame_position, STp->cur_frames);
-#endif
- frame = STp->last_frame_position;
- flag = 1;
- continue;
- }
- if (pending && STp->cur_frames < 50) {
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = WRITE_6;
- cmd[1] = 1;
- cmd[4] = 1;
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: About to write pending fseq %d at fppos %d\n",
- name, STp->frame_seq_number-1, STp->first_frame_position);
-#endif
- SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE,
- STp->timeout, MAX_RETRIES, 1);
- *aSRpnt = SRpnt;
-
- if (STp->buffer->syscall_result) { /* additional write error */
- if ((SRpnt->sense[ 2] & 0x0f) == 13 &&
- SRpnt->sense[12] == 0 &&
- SRpnt->sense[13] == 2) {
- printk(KERN_ERR
- "%s:E: Volume overflow in write error recovery\n",
- name);
- break; /* hit end of tape = fail */
- }
- flag = 1;
- }
- else
- pending = 0;
-
- continue;
- }
- if (STp->cur_frames == 0) {
-#if DEBUG
- debugging = dbg;
- printk(OSST_DEB_MSG "%s:D: Wait re-write finished\n", name);
-#endif
- if (STp->first_frame_position != expected) {
- printk(KERN_ERR "%s:A: Actual position %d - expected %d\n",
- name, STp->first_frame_position, expected);
- return (-EIO);
- }
- return 0;
- }
-#if DEBUG
- if (debugging) {
- printk(OSST_DEB_MSG "%s:D: Sleeping in re-write wait ready\n", name);
- printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
- debugging = 0;
- }
-#endif
- schedule_timeout_interruptible(msecs_to_jiffies(100));
- }
- printk(KERN_ERR "%s:E: Failed to find valid tape media\n", name);
-#if DEBUG
- debugging = dbg;
-#endif
- return (-EIO);
-}
-
-/*
- * Error recovery algorithm for the OnStream tape.
- */
-
-static int osst_write_error_recovery(struct osst_tape * STp, struct osst_request ** aSRpnt, int pending)
-{
- struct osst_request * SRpnt = * aSRpnt;
- struct st_partstat * STps = & STp->ps[STp->partition];
- char * name = tape_name(STp);
- int retval = 0;
- int rw_state;
- unsigned int frame, skip;
-
- rw_state = STps->rw;
-
- if ((SRpnt->sense[ 2] & 0x0f) != 3
- || SRpnt->sense[12] != 12
- || SRpnt->sense[13] != 0) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Write error recovery cannot handle %02x:%02x:%02x\n", name,
- SRpnt->sense[2], SRpnt->sense[12], SRpnt->sense[13]);
-#endif
- return (-EIO);
- }
- frame = (SRpnt->sense[3] << 24) |
- (SRpnt->sense[4] << 16) |
- (SRpnt->sense[5] << 8) |
- SRpnt->sense[6];
- skip = SRpnt->sense[9];
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Detected physical bad frame at %u, advised to skip %d\n", name, frame, skip);
-#endif
- osst_get_frame_position(STp, aSRpnt);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: reported frame positions: host = %d, tape = %d\n",
- name, STp->first_frame_position, STp->last_frame_position);
-#endif
- switch (STp->write_type) {
- case OS_WRITE_DATA:
- case OS_WRITE_EOD:
- case OS_WRITE_NEW_MARK:
- printk(KERN_WARNING
- "%s:I: Relocating %d buffered logical frames from position %u to %u\n",
- name, STp->cur_frames, frame, (frame + skip > 3000 && frame < 3000)?3000:frame + skip);
- if (STp->os_fw_rev >= 10600)
- retval = osst_reposition_and_retry(STp, aSRpnt, frame, skip, pending);
- else
- retval = osst_read_back_buffer_and_rewrite(STp, aSRpnt, frame, skip, pending);
- printk(KERN_WARNING "%s:%s: %sWrite error%srecovered\n", name,
- retval?"E" :"I",
- retval?"" :"Don't worry, ",
- retval?" not ":" ");
- break;
- case OS_WRITE_LAST_MARK:
- printk(KERN_ERR "%s:E: Bad frame in update last marker, fatal\n", name);
- osst_set_frame_position(STp, aSRpnt, frame + STp->cur_frames + pending, 0);
- retval = -EIO;
- break;
- case OS_WRITE_HEADER:
- printk(KERN_WARNING "%s:I: Bad frame in header partition, skipped\n", name);
- retval = osst_read_back_buffer_and_rewrite(STp, aSRpnt, frame, 1, pending);
- break;
- default:
- printk(KERN_INFO "%s:I: Bad frame in filler, ignored\n", name);
- osst_set_frame_position(STp, aSRpnt, frame + STp->cur_frames + pending, 0);
- }
- osst_get_frame_position(STp, aSRpnt);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Positioning complete, cur_frames %d, pos %d, tape pos %d\n",
- name, STp->cur_frames, STp->first_frame_position, STp->last_frame_position);
- printk(OSST_DEB_MSG "%s:D: next logical frame to write: %d\n", name, STp->logical_blk_num);
-#endif
- if (retval == 0) {
- STp->recover_count++;
- STp->recover_erreg++;
- } else
- STp->abort_count++;
-
- STps->rw = rw_state;
- return retval;
-}
-
-static int osst_space_over_filemarks_backward(struct osst_tape * STp, struct osst_request ** aSRpnt,
- int mt_op, int mt_count)
-{
- char * name = tape_name(STp);
- int cnt;
- int last_mark_ppos = -1;
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reached space_over_filemarks_backwards %d %d\n", name, mt_op, mt_count);
-#endif
- if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks_bwd\n", name);
-#endif
- return -EIO;
- }
- if (STp->linux_media_version >= 4) {
- /*
- * direct lookup in header filemark list
- */
- cnt = ntohl(STp->buffer->aux->filemark_cnt);
- if (STp->header_ok &&
- STp->header_cache != NULL &&
- (cnt - mt_count) >= 0 &&
- (cnt - mt_count) < OS_FM_TAB_MAX &&
- (cnt - mt_count) < STp->filemark_cnt &&
- STp->header_cache->dat_fm_tab.fm_tab_ent[cnt-1] == STp->buffer->aux->last_mark_ppos)
-
- last_mark_ppos = ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[cnt - mt_count]);
-#if DEBUG
- if (STp->header_cache == NULL || (cnt - mt_count) < 0 || (cnt - mt_count) >= OS_FM_TAB_MAX)
- printk(OSST_DEB_MSG "%s:D: Filemark lookup fail due to %s\n", name,
- STp->header_cache == NULL?"lack of header cache":"count out of range");
- else
- printk(OSST_DEB_MSG "%s:D: Filemark lookup: prev mark %d (%s), skip %d to %d\n",
- name, cnt,
- ((cnt == -1 && ntohl(STp->buffer->aux->last_mark_ppos) == -1) ||
- (STp->header_cache->dat_fm_tab.fm_tab_ent[cnt-1] ==
- STp->buffer->aux->last_mark_ppos))?"match":"error",
- mt_count, last_mark_ppos);
-#endif
- if (last_mark_ppos > 10 && last_mark_ppos < STp->eod_frame_ppos) {
- osst_position_tape_and_confirm(STp, aSRpnt, last_mark_ppos);
- if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
- printk(OSST_DEB_MSG
- "%s:D: Couldn't get logical blk num in space_filemarks\n", name);
-#endif
- return (-EIO);
- }
- if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
- printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
- name, last_mark_ppos);
- return (-EIO);
- }
- goto found;
- }
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reverting to scan filemark backwards\n", name);
-#endif
- }
- cnt = 0;
- while (cnt != mt_count) {
- last_mark_ppos = ntohl(STp->buffer->aux->last_mark_ppos);
- if (last_mark_ppos == -1)
- return (-EIO);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Positioning to last mark at %d\n", name, last_mark_ppos);
-#endif
- osst_position_tape_and_confirm(STp, aSRpnt, last_mark_ppos);
- cnt++;
- if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n", name);
-#endif
- return (-EIO);
- }
- if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
- printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
- name, last_mark_ppos);
- return (-EIO);
- }
- }
-found:
- if (mt_op == MTBSFM) {
- STp->frame_seq_number++;
- STp->frame_in_buffer = 0;
- STp->buffer->buffer_bytes = 0;
- STp->buffer->read_pointer = 0;
- STp->logical_blk_num += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
- }
- return 0;
-}
-
-/*
- * ADRL 1.1 compatible "slow" space filemarks fwd version
- *
- * Just scans for the filemark sequentially.
- */
-static int osst_space_over_filemarks_forward_slow(struct osst_tape * STp, struct osst_request ** aSRpnt,
- int mt_op, int mt_count)
-{
- int cnt = 0;
-#if DEBUG
- char * name = tape_name(STp);
-
- printk(OSST_DEB_MSG "%s:D: Reached space_over_filemarks_forward_slow %d %d\n", name, mt_op, mt_count);
-#endif
- if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks_fwd\n", name);
-#endif
- return (-EIO);
- }
- while (1) {
- if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n", name);
-#endif
- return (-EIO);
- }
- if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_MARKER)
- cnt++;
- if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_EOD) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: space_fwd: EOD reached\n", name);
-#endif
- if (STp->first_frame_position > STp->eod_frame_ppos+1) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: EOD position corrected (%d=>%d)\n",
- name, STp->eod_frame_ppos, STp->first_frame_position-1);
-#endif
- STp->eod_frame_ppos = STp->first_frame_position-1;
- }
- return (-EIO);
- }
- if (cnt == mt_count)
- break;
- STp->frame_in_buffer = 0;
- }
- if (mt_op == MTFSF) {
- STp->frame_seq_number++;
- STp->frame_in_buffer = 0;
- STp->buffer->buffer_bytes = 0;
- STp->buffer->read_pointer = 0;
- STp->logical_blk_num += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
- }
- return 0;
-}
-
-/*
- * Fast linux specific version of OnStream FSF
- */
-static int osst_space_over_filemarks_forward_fast(struct osst_tape * STp, struct osst_request ** aSRpnt,
- int mt_op, int mt_count)
-{
- char * name = tape_name(STp);
- int cnt = 0,
- next_mark_ppos = -1;
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reached space_over_filemarks_forward_fast %d %d\n", name, mt_op, mt_count);
-#endif
- if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks_fwd\n", name);
-#endif
- return (-EIO);
- }
-
- if (STp->linux_media_version >= 4) {
- /*
- * direct lookup in header filemark list
- */
- cnt = ntohl(STp->buffer->aux->filemark_cnt) - 1;
- if (STp->header_ok &&
- STp->header_cache != NULL &&
- (cnt + mt_count) < OS_FM_TAB_MAX &&
- (cnt + mt_count) < STp->filemark_cnt &&
- ((cnt == -1 && ntohl(STp->buffer->aux->last_mark_ppos) == -1) ||
- (STp->header_cache->dat_fm_tab.fm_tab_ent[cnt] == STp->buffer->aux->last_mark_ppos)))
-
- next_mark_ppos = ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[cnt + mt_count]);
-#if DEBUG
- if (STp->header_cache == NULL || (cnt + mt_count) >= OS_FM_TAB_MAX)
- printk(OSST_DEB_MSG "%s:D: Filemark lookup fail due to %s\n", name,
- STp->header_cache == NULL?"lack of header cache":"count out of range");
- else
- printk(OSST_DEB_MSG "%s:D: Filemark lookup: prev mark %d (%s), skip %d to %d\n",
- name, cnt,
- ((cnt == -1 && ntohl(STp->buffer->aux->last_mark_ppos) == -1) ||
- (STp->header_cache->dat_fm_tab.fm_tab_ent[cnt] ==
- STp->buffer->aux->last_mark_ppos))?"match":"error",
- mt_count, next_mark_ppos);
-#endif
- if (next_mark_ppos <= 10 || next_mark_ppos > STp->eod_frame_ppos) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reverting to slow filemark space\n", name);
-#endif
- return osst_space_over_filemarks_forward_slow(STp, aSRpnt, mt_op, mt_count);
- } else {
- osst_position_tape_and_confirm(STp, aSRpnt, next_mark_ppos);
- if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n",
- name);
-#endif
- return (-EIO);
- }
- if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
- printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
- name, next_mark_ppos);
- return (-EIO);
- }
- if (ntohl(STp->buffer->aux->filemark_cnt) != cnt + mt_count) {
- printk(KERN_WARNING "%s:W: Expected to find marker %d at ppos %d, not %d\n",
- name, cnt+mt_count, next_mark_ppos,
- ntohl(STp->buffer->aux->filemark_cnt));
- return (-EIO);
- }
- }
- } else {
- /*
- * Find nearest (usually previous) marker, then jump from marker to marker
- */
- while (1) {
- if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_MARKER)
- break;
- if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_EOD) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: space_fwd: EOD reached\n", name);
-#endif
- return (-EIO);
- }
- if (ntohl(STp->buffer->aux->filemark_cnt) == 0) {
- if (STp->first_mark_ppos == -1) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reverting to slow filemark space\n", name);
-#endif
- return osst_space_over_filemarks_forward_slow(STp, aSRpnt, mt_op, mt_count);
- }
- osst_position_tape_and_confirm(STp, aSRpnt, STp->first_mark_ppos);
- if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
- printk(OSST_DEB_MSG
- "%s:D: Couldn't get logical blk num in space_filemarks_fwd_fast\n",
- name);
-#endif
- return (-EIO);
- }
- if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
- printk(KERN_WARNING "%s:W: Expected to find filemark at %d\n",
- name, STp->first_mark_ppos);
- return (-EIO);
- }
- } else {
- if (osst_space_over_filemarks_backward(STp, aSRpnt, MTBSF, 1) < 0)
- return (-EIO);
- mt_count++;
- }
- }
- cnt++;
- while (cnt != mt_count) {
- next_mark_ppos = ntohl(STp->buffer->aux->next_mark_ppos);
- if (!next_mark_ppos || next_mark_ppos > STp->eod_frame_ppos) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reverting to slow filemark space\n", name);
-#endif
- return osst_space_over_filemarks_forward_slow(STp, aSRpnt, mt_op, mt_count - cnt);
- }
-#if DEBUG
- else printk(OSST_DEB_MSG "%s:D: Positioning to next mark at %d\n", name, next_mark_ppos);
-#endif
- osst_position_tape_and_confirm(STp, aSRpnt, next_mark_ppos);
- cnt++;
- if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n",
- name);
-#endif
- return (-EIO);
- }
- if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
- printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
- name, next_mark_ppos);
- return (-EIO);
- }
- }
- }
- if (mt_op == MTFSF) {
- STp->frame_seq_number++;
- STp->frame_in_buffer = 0;
- STp->buffer->buffer_bytes = 0;
- STp->buffer->read_pointer = 0;
- STp->logical_blk_num += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
- }
- return 0;
-}
-
-/*
- * In debug mode, we want to see as many errors as possible
- * to test the error recovery mechanism.
- */
-#if DEBUG
-static void osst_set_retries(struct osst_tape * STp, struct osst_request ** aSRpnt, int retries)
-{
- unsigned char cmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt = * aSRpnt;
- char * name = tape_name(STp);
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = MODE_SELECT;
- cmd[1] = 0x10;
- cmd[4] = NUMBER_RETRIES_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
- (STp->buffer)->b_data[0] = cmd[4] - 1;
- (STp->buffer)->b_data[1] = 0; /* Medium Type - ignoring */
- (STp->buffer)->b_data[2] = 0; /* Reserved */
- (STp->buffer)->b_data[3] = 0; /* Block Descriptor Length */
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 0] = NUMBER_RETRIES_PAGE | (1 << 7);
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 1] = 2;
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 2] = 4;
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 3] = retries;
-
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Setting number of retries on OnStream tape to %d\n", name, retries);
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
- *aSRpnt = SRpnt;
-
- if ((STp->buffer)->syscall_result)
- printk (KERN_ERR "%s:D: Couldn't set retries to %d\n", name, retries);
-}
-#endif
-
-
-static int osst_write_filemark(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
- int result;
- int this_mark_ppos = STp->first_frame_position;
- int this_mark_lbn = STp->logical_blk_num;
-#if DEBUG
- char * name = tape_name(STp);
-#endif
-
- if (STp->raw) return 0;
-
- STp->write_type = OS_WRITE_NEW_MARK;
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Writing Filemark %i at fppos %d (fseq %d, lblk %d)\n",
- name, STp->filemark_cnt, this_mark_ppos, STp->frame_seq_number, this_mark_lbn);
-#endif
- STp->dirty = 1;
- result = osst_flush_write_buffer(STp, aSRpnt);
- result |= osst_flush_drive_buffer(STp, aSRpnt);
- STp->last_mark_ppos = this_mark_ppos;
- STp->last_mark_lbn = this_mark_lbn;
- if (STp->header_cache != NULL && STp->filemark_cnt < OS_FM_TAB_MAX)
- STp->header_cache->dat_fm_tab.fm_tab_ent[STp->filemark_cnt] = htonl(this_mark_ppos);
- if (STp->filemark_cnt++ == 0)
- STp->first_mark_ppos = this_mark_ppos;
- return result;
-}
-
-static int osst_write_eod(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
- int result;
-#if DEBUG
- char * name = tape_name(STp);
-#endif
-
- if (STp->raw) return 0;
-
- STp->write_type = OS_WRITE_EOD;
- STp->eod_frame_ppos = STp->first_frame_position;
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Writing EOD at fppos %d (fseq %d, lblk %d)\n", name,
- STp->eod_frame_ppos, STp->frame_seq_number, STp->logical_blk_num);
-#endif
- STp->dirty = 1;
-
- result = osst_flush_write_buffer(STp, aSRpnt);
- result |= osst_flush_drive_buffer(STp, aSRpnt);
- STp->eod_frame_lfa = --(STp->frame_seq_number);
- return result;
-}
-
-static int osst_write_filler(struct osst_tape * STp, struct osst_request ** aSRpnt, int where, int count)
-{
- char * name = tape_name(STp);
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reached onstream write filler group %d\n", name, where);
-#endif
- osst_wait_ready(STp, aSRpnt, 60 * 5, 0);
- osst_set_frame_position(STp, aSRpnt, where, 0);
- STp->write_type = OS_WRITE_FILLER;
- while (count--) {
- memcpy(STp->buffer->b_data, "Filler", 6);
- STp->buffer->buffer_bytes = 6;
- STp->dirty = 1;
- if (osst_flush_write_buffer(STp, aSRpnt)) {
- printk(KERN_INFO "%s:I: Couldn't write filler frame\n", name);
- return (-EIO);
- }
- }
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Exiting onstream write filler group\n", name);
-#endif
- return osst_flush_drive_buffer(STp, aSRpnt);
-}
-
-static int __osst_write_header(struct osst_tape * STp, struct osst_request ** aSRpnt, int where, int count)
-{
- char * name = tape_name(STp);
- int result;
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reached onstream write header group %d\n", name, where);
-#endif
- osst_wait_ready(STp, aSRpnt, 60 * 5, 0);
- osst_set_frame_position(STp, aSRpnt, where, 0);
- STp->write_type = OS_WRITE_HEADER;
- while (count--) {
- osst_copy_to_buffer(STp->buffer, (unsigned char *)STp->header_cache);
- STp->buffer->buffer_bytes = sizeof(os_header_t);
- STp->dirty = 1;
- if (osst_flush_write_buffer(STp, aSRpnt)) {
- printk(KERN_INFO "%s:I: Couldn't write header frame\n", name);
- return (-EIO);
- }
- }
- result = osst_flush_drive_buffer(STp, aSRpnt);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Write onstream header group %s\n", name, result?"failed":"done");
-#endif
- return result;
-}
-
-static int osst_write_header(struct osst_tape * STp, struct osst_request ** aSRpnt, int locate_eod)
-{
- os_header_t * header;
- int result;
- char * name = tape_name(STp);
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Writing tape header\n", name);
-#endif
- if (STp->raw) return 0;
-
- if (STp->header_cache == NULL) {
- if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
- printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
- return (-ENOMEM);
- }
- memset(STp->header_cache, 0, sizeof(os_header_t));
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Allocated and cleared memory for header cache\n", name);
-#endif
- }
- if (STp->header_ok) STp->update_frame_cntr++;
- else STp->update_frame_cntr = 0;
-
- header = STp->header_cache;
- strcpy(header->ident_str, "ADR_SEQ");
- header->major_rev = 1;
- header->minor_rev = 4;
- header->ext_trk_tb_off = htons(17192);
- header->pt_par_num = 1;
- header->partition[0].partition_num = OS_DATA_PARTITION;
- header->partition[0].par_desc_ver = OS_PARTITION_VERSION;
- header->partition[0].wrt_pass_cntr = htons(STp->wrt_pass_cntr);
- header->partition[0].first_frame_ppos = htonl(STp->first_data_ppos);
- header->partition[0].last_frame_ppos = htonl(STp->capacity);
- header->partition[0].eod_frame_ppos = htonl(STp->eod_frame_ppos);
- header->cfg_col_width = htonl(20);
- header->dat_col_width = htonl(1500);
- header->qfa_col_width = htonl(0);
- header->ext_track_tb.nr_stream_part = 1;
- header->ext_track_tb.et_ent_sz = 32;
- header->ext_track_tb.dat_ext_trk_ey.et_part_num = 0;
- header->ext_track_tb.dat_ext_trk_ey.fmt = 1;
- header->ext_track_tb.dat_ext_trk_ey.fm_tab_off = htons(17736);
- header->ext_track_tb.dat_ext_trk_ey.last_hlb_hi = 0;
- header->ext_track_tb.dat_ext_trk_ey.last_hlb = htonl(STp->eod_frame_lfa);
- header->ext_track_tb.dat_ext_trk_ey.last_pp = htonl(STp->eod_frame_ppos);
- header->dat_fm_tab.fm_part_num = 0;
- header->dat_fm_tab.fm_tab_ent_sz = 4;
- header->dat_fm_tab.fm_tab_ent_cnt = htons(STp->filemark_cnt<OS_FM_TAB_MAX?
- STp->filemark_cnt:OS_FM_TAB_MAX);
-
- result = __osst_write_header(STp, aSRpnt, 0xbae, 5);
- if (STp->update_frame_cntr == 0)
- osst_write_filler(STp, aSRpnt, 0xbb3, 5);
- result &= __osst_write_header(STp, aSRpnt, 5, 5);
-
- if (locate_eod) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Locating back to eod frame addr %d\n", name, STp->eod_frame_ppos);
-#endif
- osst_set_frame_position(STp, aSRpnt, STp->eod_frame_ppos, 0);
- }
- if (result)
- printk(KERN_ERR "%s:E: Write header failed\n", name);
- else {
- memcpy(STp->application_sig, "LIN4", 4);
- STp->linux_media = 1;
- STp->linux_media_version = 4;
- STp->header_ok = 1;
- }
- return result;
-}
-
-static int osst_reset_header(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
- if (STp->header_cache != NULL)
- memset(STp->header_cache, 0, sizeof(os_header_t));
-
- STp->logical_blk_num = STp->frame_seq_number = 0;
- STp->frame_in_buffer = 0;
- STp->eod_frame_ppos = STp->first_data_ppos = 0x0000000A;
- STp->filemark_cnt = 0;
- STp->first_mark_ppos = STp->last_mark_ppos = STp->last_mark_lbn = -1;
- return osst_write_header(STp, aSRpnt, 1);
-}
-
-static int __osst_analyze_headers(struct osst_tape * STp, struct osst_request ** aSRpnt, int ppos)
-{
- char * name = tape_name(STp);
- os_header_t * header;
- os_aux_t * aux;
- char id_string[8];
- int linux_media_version,
- update_frame_cntr;
-
- if (STp->raw)
- return 1;
-
- if (ppos == 5 || ppos == 0xbae || STp->buffer->syscall_result) {
- if (osst_set_frame_position(STp, aSRpnt, ppos, 0))
- printk(KERN_WARNING "%s:W: Couldn't position tape\n", name);
- osst_wait_ready(STp, aSRpnt, 60 * 15, 0);
- if (osst_initiate_read (STp, aSRpnt)) {
- printk(KERN_WARNING "%s:W: Couldn't initiate read\n", name);
- return 0;
- }
- }
- if (osst_read_frame(STp, aSRpnt, 180)) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Couldn't read header frame\n", name);
-#endif
- return 0;
- }
- header = (os_header_t *) STp->buffer->b_data; /* warning: only first segment addressable */
- aux = STp->buffer->aux;
- if (aux->frame_type != OS_FRAME_TYPE_HEADER) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping non-header frame (%d)\n", name, ppos);
-#endif
- return 0;
- }
- if (ntohl(aux->frame_seq_num) != 0 ||
- ntohl(aux->logical_blk_num) != 0 ||
- aux->partition.partition_num != OS_CONFIG_PARTITION ||
- ntohl(aux->partition.first_frame_ppos) != 0 ||
- ntohl(aux->partition.last_frame_ppos) != 0xbb7 ) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Invalid header frame (%d,%d,%d,%d,%d)\n", name,
- ntohl(aux->frame_seq_num), ntohl(aux->logical_blk_num),
- aux->partition.partition_num, ntohl(aux->partition.first_frame_ppos),
- ntohl(aux->partition.last_frame_ppos));
-#endif
- return 0;
- }
- if (strncmp(header->ident_str, "ADR_SEQ", 7) != 0 &&
- strncmp(header->ident_str, "ADR-SEQ", 7) != 0) {
- strlcpy(id_string, header->ident_str, 8);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Invalid header identification string %s\n", name, id_string);
-#endif
- return 0;
- }
- update_frame_cntr = ntohl(aux->update_frame_cntr);
- if (update_frame_cntr < STp->update_frame_cntr) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping frame %d with update_frame_counter %d<%d\n",
- name, ppos, update_frame_cntr, STp->update_frame_cntr);
-#endif
- return 0;
- }
- if (header->major_rev != 1 || header->minor_rev != 4 ) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: %s revision %d.%d detected (1.4 supported)\n",
- name, (header->major_rev != 1 || header->minor_rev < 2 ||
- header->minor_rev > 4 )? "Invalid" : "Warning:",
- header->major_rev, header->minor_rev);
-#endif
- if (header->major_rev != 1 || header->minor_rev < 2 || header->minor_rev > 4)
- return 0;
- }
-#if DEBUG
- if (header->pt_par_num != 1)
- printk(KERN_INFO "%s:W: %d partitions defined, only one supported\n",
- name, header->pt_par_num);
-#endif
- memcpy(id_string, aux->application_sig, 4);
- id_string[4] = 0;
- if (memcmp(id_string, "LIN", 3) == 0) {
- STp->linux_media = 1;
- linux_media_version = id_string[3] - '0';
- if (linux_media_version != 4)
- printk(KERN_INFO "%s:I: Linux media version %d detected (current 4)\n",
- name, linux_media_version);
- } else {
- printk(KERN_WARNING "%s:W: Non Linux media detected (%s)\n", name, id_string);
- return 0;
- }
- if (linux_media_version < STp->linux_media_version) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping frame %d with linux_media_version %d\n",
- name, ppos, linux_media_version);
-#endif
- return 0;
- }
- if (linux_media_version > STp->linux_media_version) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Frame %d sets linux_media_version to %d\n",
- name, ppos, linux_media_version);
-#endif
- memcpy(STp->application_sig, id_string, 5);
- STp->linux_media_version = linux_media_version;
- STp->update_frame_cntr = -1;
- }
- if (update_frame_cntr > STp->update_frame_cntr) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Frame %d sets update_frame_counter to %d\n",
- name, ppos, update_frame_cntr);
-#endif
- if (STp->header_cache == NULL) {
- if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
- printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
- return 0;
- }
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Allocated memory for header cache\n", name);
-#endif
- }
- osst_copy_from_buffer(STp->buffer, (unsigned char *)STp->header_cache);
- header = STp->header_cache; /* further accesses from cached (full) copy */
-
- STp->wrt_pass_cntr = ntohs(header->partition[0].wrt_pass_cntr);
- STp->first_data_ppos = ntohl(header->partition[0].first_frame_ppos);
- STp->eod_frame_ppos = ntohl(header->partition[0].eod_frame_ppos);
- STp->eod_frame_lfa = ntohl(header->ext_track_tb.dat_ext_trk_ey.last_hlb);
- STp->filemark_cnt = ntohl(aux->filemark_cnt);
- STp->first_mark_ppos = ntohl(aux->next_mark_ppos);
- STp->last_mark_ppos = ntohl(aux->last_mark_ppos);
- STp->last_mark_lbn = ntohl(aux->last_mark_lbn);
- STp->update_frame_cntr = update_frame_cntr;
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Detected write pass %d, update frame counter %d, filemark counter %d\n",
- name, STp->wrt_pass_cntr, STp->update_frame_cntr, STp->filemark_cnt);
- printk(OSST_DEB_MSG "%s:D: first data frame on tape = %d, last = %d, eod frame = %d\n", name,
- STp->first_data_ppos,
- ntohl(header->partition[0].last_frame_ppos),
- ntohl(header->partition[0].eod_frame_ppos));
- printk(OSST_DEB_MSG "%s:D: first mark on tape = %d, last = %d, eod frame = %d\n",
- name, STp->first_mark_ppos, STp->last_mark_ppos, STp->eod_frame_ppos);
-#endif
- if (header->minor_rev < 4 && STp->linux_media_version == 4) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Moving filemark list to ADR 1.4 location\n", name);
-#endif
- memcpy((void *)header->dat_fm_tab.fm_tab_ent,
- (void *)header->old_filemark_list, sizeof(header->dat_fm_tab.fm_tab_ent));
- memset((void *)header->old_filemark_list, 0, sizeof(header->old_filemark_list));
- }
- if (header->minor_rev == 4 &&
- (header->ext_trk_tb_off != htons(17192) ||
- header->partition[0].partition_num != OS_DATA_PARTITION ||
- header->partition[0].par_desc_ver != OS_PARTITION_VERSION ||
- header->partition[0].last_frame_ppos != htonl(STp->capacity) ||
- header->cfg_col_width != htonl(20) ||
- header->dat_col_width != htonl(1500) ||
- header->qfa_col_width != htonl(0) ||
- header->ext_track_tb.nr_stream_part != 1 ||
- header->ext_track_tb.et_ent_sz != 32 ||
- header->ext_track_tb.dat_ext_trk_ey.et_part_num != OS_DATA_PARTITION ||
- header->ext_track_tb.dat_ext_trk_ey.fmt != 1 ||
- header->ext_track_tb.dat_ext_trk_ey.fm_tab_off != htons(17736) ||
- header->ext_track_tb.dat_ext_trk_ey.last_hlb_hi != 0 ||
- header->ext_track_tb.dat_ext_trk_ey.last_pp != htonl(STp->eod_frame_ppos) ||
- header->dat_fm_tab.fm_part_num != OS_DATA_PARTITION ||
- header->dat_fm_tab.fm_tab_ent_sz != 4 ||
- header->dat_fm_tab.fm_tab_ent_cnt !=
- htons(STp->filemark_cnt<OS_FM_TAB_MAX?STp->filemark_cnt:OS_FM_TAB_MAX)))
- printk(KERN_WARNING "%s:W: Failed consistency check ADR 1.4 format\n", name);
-
- }
-
- return 1;
-}
-
-static int osst_analyze_headers(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
- int position, ppos;
- int first, last;
- int valid = 0;
- char * name = tape_name(STp);
-
- position = osst_get_frame_position(STp, aSRpnt);
-
- if (STp->raw) {
- STp->header_ok = STp->linux_media = 1;
- STp->linux_media_version = 0;
- return 1;
- }
- STp->header_ok = STp->linux_media = STp->linux_media_version = 0;
- STp->wrt_pass_cntr = STp->update_frame_cntr = -1;
- STp->eod_frame_ppos = STp->first_data_ppos = -1;
- STp->first_mark_ppos = STp->last_mark_ppos = STp->last_mark_lbn = -1;
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reading header\n", name);
-#endif
-
- /* optimization for speed - if we are positioned at ppos 10, read second group first */
- /* TODO try the ADR 1.1 locations for the second group if we have no valid one yet... */
-
- first = position==10?0xbae: 5;
- last = position==10?0xbb3:10;
-
- for (ppos = first; ppos < last; ppos++)
- if (__osst_analyze_headers(STp, aSRpnt, ppos))
- valid = 1;
-
- first = position==10? 5:0xbae;
- last = position==10?10:0xbb3;
-
- for (ppos = first; ppos < last; ppos++)
- if (__osst_analyze_headers(STp, aSRpnt, ppos))
- valid = 1;
-
- if (!valid) {
- printk(KERN_ERR "%s:E: Failed to find valid ADRL header, new media?\n", name);
- STp->eod_frame_ppos = STp->first_data_ppos = 0;
- osst_set_frame_position(STp, aSRpnt, 10, 0);
- return 0;
- }
- if (position <= STp->first_data_ppos) {
- position = STp->first_data_ppos;
- STp->ps[0].drv_file = STp->ps[0].drv_block = STp->frame_seq_number = STp->logical_blk_num = 0;
- }
- osst_set_frame_position(STp, aSRpnt, position, 0);
- STp->header_ok = 1;
-
- return 1;
-}
-
-static int osst_verify_position(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
- int frame_position = STp->first_frame_position;
- int frame_seq_numbr = STp->frame_seq_number;
- int logical_blk_num = STp->logical_blk_num;
- int halfway_frame = STp->frame_in_buffer;
- int read_pointer = STp->buffer->read_pointer;
- int prev_mark_ppos = -1;
- int actual_mark_ppos, i, n;
-#if DEBUG
- char * name = tape_name(STp);
-
- printk(OSST_DEB_MSG "%s:D: Verify that the tape is really the one we think before writing\n", name);
-#endif
- osst_set_frame_position(STp, aSRpnt, frame_position - 1, 0);
- if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in verify_position\n", name);
-#endif
- return (-EIO);
- }
- if (STp->linux_media_version >= 4) {
- for (i=0; i<STp->filemark_cnt; i++)
- if ((n=ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[i])) < frame_position)
- prev_mark_ppos = n;
- } else
- prev_mark_ppos = frame_position - 1; /* usually - we don't really know */
- actual_mark_ppos = STp->buffer->aux->frame_type == OS_FRAME_TYPE_MARKER ?
- frame_position - 1 : ntohl(STp->buffer->aux->last_mark_ppos);
- if (frame_position != STp->first_frame_position ||
- frame_seq_numbr != STp->frame_seq_number + (halfway_frame?0:1) ||
- prev_mark_ppos != actual_mark_ppos ) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Block mismatch: fppos %d-%d, fseq %d-%d, mark %d-%d\n", name,
- STp->first_frame_position, frame_position,
- STp->frame_seq_number + (halfway_frame?0:1),
- frame_seq_numbr, actual_mark_ppos, prev_mark_ppos);
-#endif
- return (-EIO);
- }
- if (halfway_frame) {
- /* prepare buffer for append and rewrite on top of original */
- osst_set_frame_position(STp, aSRpnt, frame_position - 1, 0);
- STp->buffer->buffer_bytes = read_pointer;
- STp->ps[STp->partition].rw = ST_WRITING;
- STp->dirty = 1;
- }
- STp->frame_in_buffer = halfway_frame;
- STp->frame_seq_number = frame_seq_numbr;
- STp->logical_blk_num = logical_blk_num;
- return 0;
-}
-
-/* Acc. to OnStream, the vers. numbering is the following:
- * X.XX for released versions (X=digit),
- * XXXY for unreleased versions (Y=letter)
- * Ordering 1.05 < 106A < 106B < ... < 106a < ... < 1.06
- * This fn makes monoton numbers out of this scheme ...
- */
-static unsigned int osst_parse_firmware_rev (const char * str)
-{
- if (str[1] == '.') {
- return (str[0]-'0')*10000
- +(str[2]-'0')*1000
- +(str[3]-'0')*100;
- } else {
- return (str[0]-'0')*10000
- +(str[1]-'0')*1000
- +(str[2]-'0')*100 - 100
- +(str[3]-'@');
- }
-}
-
-/*
- * Configure the OnStream SCII tape drive for default operation
- */
-static int osst_configure_onstream(struct osst_tape *STp, struct osst_request ** aSRpnt)
-{
- unsigned char cmd[MAX_COMMAND_SIZE];
- char * name = tape_name(STp);
- struct osst_request * SRpnt = * aSRpnt;
- osst_mode_parameter_header_t * header;
- osst_block_size_page_t * bs;
- osst_capabilities_page_t * cp;
- osst_tape_paramtr_page_t * prm;
- int drive_buffer_size;
-
- if (STp->ready != ST_READY) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Not Ready\n", name);
-#endif
- return (-EIO);
- }
-
- if (STp->os_fw_rev < 10600) {
- printk(KERN_INFO "%s:I: Old OnStream firmware revision detected (%s),\n", name, STp->device->rev);
- printk(KERN_INFO "%s:I: an upgrade to version 1.06 or above is recommended\n", name);
- }
-
- /*
- * Configure 32.5KB (data+aux) frame size.
- * Get the current frame size from the block size mode page
- */
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = MODE_SENSE;
- cmd[1] = 8;
- cmd[2] = BLOCK_SIZE_PAGE;
- cmd[4] = BLOCK_SIZE_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
- if (SRpnt == NULL) {
-#if DEBUG
- printk(OSST_DEB_MSG "osst :D: Busy\n");
-#endif
- return (-EBUSY);
- }
- *aSRpnt = SRpnt;
- if ((STp->buffer)->syscall_result != 0) {
- printk (KERN_ERR "%s:E: Can't get tape block size mode page\n", name);
- return (-EIO);
- }
-
- header = (osst_mode_parameter_header_t *) (STp->buffer)->b_data;
- bs = (osst_block_size_page_t *) ((STp->buffer)->b_data + sizeof(osst_mode_parameter_header_t) + header->bdl);
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: 32KB play back: %s\n", name, bs->play32 ? "Yes" : "No");
- printk(OSST_DEB_MSG "%s:D: 32.5KB play back: %s\n", name, bs->play32_5 ? "Yes" : "No");
- printk(OSST_DEB_MSG "%s:D: 32KB record: %s\n", name, bs->record32 ? "Yes" : "No");
- printk(OSST_DEB_MSG "%s:D: 32.5KB record: %s\n", name, bs->record32_5 ? "Yes" : "No");
-#endif
-
- /*
- * Configure default auto columns mode, 32.5KB transfer mode
- */
- bs->one = 1;
- bs->play32 = 0;
- bs->play32_5 = 1;
- bs->record32 = 0;
- bs->record32_5 = 1;
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = MODE_SELECT;
- cmd[1] = 0x10;
- cmd[4] = BLOCK_SIZE_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
- *aSRpnt = SRpnt;
- if ((STp->buffer)->syscall_result != 0) {
- printk (KERN_ERR "%s:E: Couldn't set tape block size mode page\n", name);
- return (-EIO);
- }
-
-#if DEBUG
- printk(KERN_INFO "%s:D: Drive Block Size changed to 32.5K\n", name);
- /*
- * In debug mode, we want to see as many errors as possible
- * to test the error recovery mechanism.
- */
- osst_set_retries(STp, aSRpnt, 0);
- SRpnt = * aSRpnt;
-#endif
-
- /*
- * Set vendor name to 'LIN4' for "Linux support version 4".
- */
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = MODE_SELECT;
- cmd[1] = 0x10;
- cmd[4] = VENDOR_IDENT_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
- header->mode_data_length = VENDOR_IDENT_PAGE_LENGTH + MODE_HEADER_LENGTH - 1;
- header->medium_type = 0; /* Medium Type - ignoring */
- header->dsp = 0; /* Reserved */
- header->bdl = 0; /* Block Descriptor Length */
-
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 0] = VENDOR_IDENT_PAGE | (1 << 7);
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 1] = 6;
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 2] = 'L';
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 3] = 'I';
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 4] = 'N';
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 5] = '4';
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 6] = 0;
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 7] = 0;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
- *aSRpnt = SRpnt;
-
- if ((STp->buffer)->syscall_result != 0) {
- printk (KERN_ERR "%s:E: Couldn't set vendor name to %s\n", name,
- (char *) ((STp->buffer)->b_data + MODE_HEADER_LENGTH + 2));
- return (-EIO);
- }
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = MODE_SENSE;
- cmd[1] = 8;
- cmd[2] = CAPABILITIES_PAGE;
- cmd[4] = CAPABILITIES_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
- *aSRpnt = SRpnt;
-
- if ((STp->buffer)->syscall_result != 0) {
- printk (KERN_ERR "%s:E: Can't get capabilities page\n", name);
- return (-EIO);
- }
-
- header = (osst_mode_parameter_header_t *) (STp->buffer)->b_data;
- cp = (osst_capabilities_page_t *) ((STp->buffer)->b_data +
- sizeof(osst_mode_parameter_header_t) + header->bdl);
-
- drive_buffer_size = ntohs(cp->buffer_size) / 2;
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = MODE_SENSE;
- cmd[1] = 8;
- cmd[2] = TAPE_PARAMTR_PAGE;
- cmd[4] = TAPE_PARAMTR_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
- *aSRpnt = SRpnt;
-
- if ((STp->buffer)->syscall_result != 0) {
- printk (KERN_ERR "%s:E: Can't get tape parameter page\n", name);
- return (-EIO);
- }
-
- header = (osst_mode_parameter_header_t *) (STp->buffer)->b_data;
- prm = (osst_tape_paramtr_page_t *) ((STp->buffer)->b_data +
- sizeof(osst_mode_parameter_header_t) + header->bdl);
-
- STp->density = prm->density;
- STp->capacity = ntohs(prm->segtrk) * ntohs(prm->trks);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Density %d, tape length: %dMB, drive buffer size: %dKB\n",
- name, STp->density, STp->capacity / 32, drive_buffer_size);
-#endif
-
- return 0;
-
-}
-
-
-/* Step over EOF if it has been inadvertently crossed (ioctl not used because
- it messes up the block number). */
-static int cross_eof(struct osst_tape *STp, struct osst_request ** aSRpnt, int forward)
-{
- int result;
- char * name = tape_name(STp);
-
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Stepping over filemark %s.\n",
- name, forward ? "forward" : "backward");
-#endif
-
- if (forward) {
- /* assumes that the filemark is already read by the drive, so this is low cost */
- result = osst_space_over_filemarks_forward_slow(STp, aSRpnt, MTFSF, 1);
- }
- else
- /* assumes this is only called if we just read the filemark! */
- result = osst_seek_logical_blk(STp, aSRpnt, STp->logical_blk_num - 1);
-
- if (result < 0)
- printk(KERN_WARNING "%s:W: Stepping over filemark %s failed.\n",
- name, forward ? "forward" : "backward");
-
- return result;
-}
-
-
-/* Get the tape position. */
-
-static int osst_get_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt)
-{
- unsigned char scmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt;
- int result = 0;
- char * name = tape_name(STp);
-
- /* KG: We want to be able to use it for checking Write Buffer availability
- * and thus don't want to risk to overwrite anything. Exchange buffers ... */
- char mybuf[24];
- char * olddata = STp->buffer->b_data;
- int oldsize = STp->buffer->buffer_size;
-
- if (STp->ready != ST_READY) return (-EIO);
-
- memset (scmd, 0, MAX_COMMAND_SIZE);
- scmd[0] = READ_POSITION;
-
- STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
- SRpnt = osst_do_scsi(*aSRpnt, STp, scmd, 20, DMA_FROM_DEVICE,
- STp->timeout, MAX_RETRIES, 1);
- if (!SRpnt) {
- STp->buffer->b_data = olddata; STp->buffer->buffer_size = oldsize;
- return (-EBUSY);
- }
- *aSRpnt = SRpnt;
-
- if (STp->buffer->syscall_result)
- result = ((SRpnt->sense[2] & 0x0f) == 3) ? -EIO : -EINVAL; /* 3: Write Error */
-
- if (result == -EINVAL)
- printk(KERN_ERR "%s:E: Can't read tape position.\n", name);
- else {
- if (result == -EIO) { /* re-read position - this needs to preserve media errors */
- unsigned char mysense[16];
- memcpy (mysense, SRpnt->sense, 16);
- memset (scmd, 0, MAX_COMMAND_SIZE);
- scmd[0] = READ_POSITION;
- STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
- SRpnt = osst_do_scsi(SRpnt, STp, scmd, 20, DMA_FROM_DEVICE,
- STp->timeout, MAX_RETRIES, 1);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reread position, reason=[%02x:%02x:%02x], result=[%s%02x:%02x:%02x]\n",
- name, mysense[2], mysense[12], mysense[13], STp->buffer->syscall_result?"":"ok:",
- SRpnt->sense[2],SRpnt->sense[12],SRpnt->sense[13]);
-#endif
- if (!STp->buffer->syscall_result)
- memcpy (SRpnt->sense, mysense, 16);
- else
- printk(KERN_WARNING "%s:W: Double error in get position\n", name);
- }
- STp->first_frame_position = ((STp->buffer)->b_data[4] << 24)
- + ((STp->buffer)->b_data[5] << 16)
- + ((STp->buffer)->b_data[6] << 8)
- + (STp->buffer)->b_data[7];
- STp->last_frame_position = ((STp->buffer)->b_data[ 8] << 24)
- + ((STp->buffer)->b_data[ 9] << 16)
- + ((STp->buffer)->b_data[10] << 8)
- + (STp->buffer)->b_data[11];
- STp->cur_frames = (STp->buffer)->b_data[15];
-#if DEBUG
- if (debugging) {
- printk(OSST_DEB_MSG "%s:D: Drive Positions: host %d, tape %d%s, buffer %d\n", name,
- STp->first_frame_position, STp->last_frame_position,
- ((STp->buffer)->b_data[0]&0x80)?" (BOP)":
- ((STp->buffer)->b_data[0]&0x40)?" (EOP)":"",
- STp->cur_frames);
- }
-#endif
- if (STp->cur_frames == 0 && STp->first_frame_position != STp->last_frame_position) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Correcting read position %d, %d, %d\n", name,
- STp->first_frame_position, STp->last_frame_position, STp->cur_frames);
-#endif
- STp->first_frame_position = STp->last_frame_position;
- }
- }
- STp->buffer->b_data = olddata; STp->buffer->buffer_size = oldsize;
-
- return (result == 0 ? STp->first_frame_position : result);
-}
-
-
-/* Set the tape block */
-static int osst_set_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt, int ppos, int skip)
-{
- unsigned char scmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt;
- struct st_partstat * STps;
- int result = 0;
- int pp = (ppos == 3000 && !skip)? 0 : ppos;
- char * name = tape_name(STp);
-
- if (STp->ready != ST_READY) return (-EIO);
-
- STps = &(STp->ps[STp->partition]);
-
- if (ppos < 0 || ppos > STp->capacity) {
- printk(KERN_WARNING "%s:W: Reposition request %d out of range\n", name, ppos);
- pp = ppos = ppos < 0 ? 0 : (STp->capacity - 1);
- result = (-EINVAL);
- }
-
- do {
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Setting ppos to %d.\n", name, pp);
-#endif
- memset (scmd, 0, MAX_COMMAND_SIZE);
- scmd[0] = SEEK_10;
- scmd[1] = 1;
- scmd[3] = (pp >> 24);
- scmd[4] = (pp >> 16);
- scmd[5] = (pp >> 8);
- scmd[6] = pp;
- if (skip)
- scmd[9] = 0x80;
-
- SRpnt = osst_do_scsi(*aSRpnt, STp, scmd, 0, DMA_NONE, STp->long_timeout,
- MAX_RETRIES, 1);
- if (!SRpnt)
- return (-EBUSY);
- *aSRpnt = SRpnt;
-
- if ((STp->buffer)->syscall_result != 0) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: SEEK command from %d to %d failed.\n",
- name, STp->first_frame_position, pp);
-#endif
- result = (-EIO);
- }
- if (pp != ppos)
- osst_wait_ready(STp, aSRpnt, 5 * 60, OSST_WAIT_POSITION_COMPLETE);
- } while ((pp != ppos) && (pp = ppos));
- STp->first_frame_position = STp->last_frame_position = ppos;
- STps->eof = ST_NOEOF;
- STps->at_sm = 0;
- STps->rw = ST_IDLE;
- STp->frame_in_buffer = 0;
- return result;
-}
-
-static int osst_write_trailer(struct osst_tape *STp, struct osst_request ** aSRpnt, int leave_at_EOT)
-{
- struct st_partstat * STps = &(STp->ps[STp->partition]);
- int result = 0;
-
- if (STp->write_type != OS_WRITE_NEW_MARK) {
- /* true unless the user wrote the filemark for us */
- result = osst_flush_drive_buffer(STp, aSRpnt);
- if (result < 0) goto out;
- result = osst_write_filemark(STp, aSRpnt);
- if (result < 0) goto out;
-
- if (STps->drv_file >= 0)
- STps->drv_file++ ;
- STps->drv_block = 0;
- }
- result = osst_write_eod(STp, aSRpnt);
- osst_write_header(STp, aSRpnt, leave_at_EOT);
-
- STps->eof = ST_FM;
-out:
- return result;
-}
-
-/* osst versions of st functions - augmented and stripped to suit OnStream only */
-
-/* Flush the write buffer (never need to write if variable blocksize). */
-static int osst_flush_write_buffer(struct osst_tape *STp, struct osst_request ** aSRpnt)
-{
- int offset, transfer, blks = 0;
- int result = 0;
- unsigned char cmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt = *aSRpnt;
- struct st_partstat * STps;
- char * name = tape_name(STp);
-
- if ((STp->buffer)->writing) {
- if (SRpnt == (STp->buffer)->last_SRpnt)
-#if DEBUG
- { printk(OSST_DEB_MSG
- "%s:D: aSRpnt points to osst_request that write_behind_check will release -- cleared\n", name);
-#endif
- *aSRpnt = SRpnt = NULL;
-#if DEBUG
- } else if (SRpnt)
- printk(OSST_DEB_MSG
- "%s:D: aSRpnt does not point to osst_request that write_behind_check will release -- strange\n", name);
-#endif
- osst_write_behind_check(STp);
- if ((STp->buffer)->syscall_result) {
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Async write error (flush) %x.\n",
- name, (STp->buffer)->midlevel_result);
-#endif
- if ((STp->buffer)->midlevel_result == INT_MAX)
- return (-ENOSPC);
- return (-EIO);
- }
- }
-
- result = 0;
- if (STp->dirty == 1) {
-
- STp->write_count++;
- STps = &(STp->ps[STp->partition]);
- STps->rw = ST_WRITING;
- offset = STp->buffer->buffer_bytes;
- blks = (offset + STp->block_size - 1) / STp->block_size;
- transfer = OS_FRAME_SIZE;
-
- if (offset < OS_DATA_SIZE)
- osst_zero_buffer_tail(STp->buffer);
-
- if (STp->poll)
- if (osst_wait_frame (STp, aSRpnt, STp->first_frame_position, -50, 120))
- result = osst_recover_wait_frame(STp, aSRpnt, 1);
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = WRITE_6;
- cmd[1] = 1;
- cmd[4] = 1;
-
- switch (STp->write_type) {
- case OS_WRITE_DATA:
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Writing %d blocks to frame %d, lblks %d-%d\n",
- name, blks, STp->frame_seq_number,
- STp->logical_blk_num - blks, STp->logical_blk_num - 1);
-#endif
- osst_init_aux(STp, OS_FRAME_TYPE_DATA, STp->frame_seq_number++,
- STp->logical_blk_num - blks, STp->block_size, blks);
- break;
- case OS_WRITE_EOD:
- osst_init_aux(STp, OS_FRAME_TYPE_EOD, STp->frame_seq_number++,
- STp->logical_blk_num, 0, 0);
- break;
- case OS_WRITE_NEW_MARK:
- osst_init_aux(STp, OS_FRAME_TYPE_MARKER, STp->frame_seq_number++,
- STp->logical_blk_num++, 0, blks=1);
- break;
- case OS_WRITE_HEADER:
- osst_init_aux(STp, OS_FRAME_TYPE_HEADER, 0, 0, 0, blks=0);
- break;
- default: /* probably FILLER */
- osst_init_aux(STp, OS_FRAME_TYPE_FILL, 0, 0, 0, 0);
- }
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Flushing %d bytes, Transferring %d bytes in %d lblocks.\n",
- name, offset, transfer, blks);
-#endif
-
- SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, transfer, DMA_TO_DEVICE,
- STp->timeout, MAX_RETRIES, 1);
- *aSRpnt = SRpnt;
- if (!SRpnt)
- return (-EBUSY);
-
- if ((STp->buffer)->syscall_result != 0) {
-#if DEBUG
- printk(OSST_DEB_MSG
- "%s:D: write sense [0]=0x%02x [2]=%02x [12]=%02x [13]=%02x\n",
- name, SRpnt->sense[0], SRpnt->sense[2],
- SRpnt->sense[12], SRpnt->sense[13]);
-#endif
- if ((SRpnt->sense[0] & 0x70) == 0x70 &&
- (SRpnt->sense[2] & 0x40) && /* FIXME - SC-30 drive doesn't assert EOM bit */
- (SRpnt->sense[2] & 0x0f) == NO_SENSE) {
- STp->dirty = 0;
- (STp->buffer)->buffer_bytes = 0;
- result = (-ENOSPC);
- }
- else {
- if (osst_write_error_recovery(STp, aSRpnt, 1)) {
- printk(KERN_ERR "%s:E: Error on flush write.\n", name);
- result = (-EIO);
- }
- }
- STps->drv_block = (-1); /* FIXME - even if write recovery succeeds? */
- }
- else {
- STp->first_frame_position++;
- STp->dirty = 0;
- (STp->buffer)->buffer_bytes = 0;
- }
- }
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Exit flush write buffer with code %d\n", name, result);
-#endif
- return result;
-}
-
-
-/* Flush the tape buffer. The tape will be positioned correctly unless
- seek_next is true. */
-static int osst_flush_buffer(struct osst_tape * STp, struct osst_request ** aSRpnt, int seek_next)
-{
- struct st_partstat * STps;
- int backspace = 0, result = 0;
-#if DEBUG
- char * name = tape_name(STp);
-#endif
-
- /*
- * If there was a bus reset, block further access
- * to this device.
- */
- if( STp->pos_unknown)
- return (-EIO);
-
- if (STp->ready != ST_READY)
- return 0;
-
- STps = &(STp->ps[STp->partition]);
- if (STps->rw == ST_WRITING || STp->dirty) { /* Writing */
- STp->write_type = OS_WRITE_DATA;
- return osst_flush_write_buffer(STp, aSRpnt);
- }
- if (STp->block_size == 0)
- return 0;
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reached flush (read) buffer\n", name);
-#endif
-
- if (!STp->can_bsr) {
- backspace = ((STp->buffer)->buffer_bytes + (STp->buffer)->read_pointer) / STp->block_size -
- ((STp->buffer)->read_pointer + STp->block_size - 1 ) / STp->block_size ;
- (STp->buffer)->buffer_bytes = 0;
- (STp->buffer)->read_pointer = 0;
- STp->frame_in_buffer = 0; /* FIXME is this relevant w. OSST? */
- }
-
- if (!seek_next) {
- if (STps->eof == ST_FM_HIT) {
- result = cross_eof(STp, aSRpnt, 0); /* Back over the EOF hit */
- if (!result)
- STps->eof = ST_NOEOF;
- else {
- if (STps->drv_file >= 0)
- STps->drv_file++;
- STps->drv_block = 0;
- }
- }
- if (!result && backspace > 0) /* TODO -- design and run a test case for this */
- result = osst_seek_logical_blk(STp, aSRpnt, STp->logical_blk_num - backspace);
- }
- else if (STps->eof == ST_FM_HIT) {
- if (STps->drv_file >= 0)
- STps->drv_file++;
- STps->drv_block = 0;
- STps->eof = ST_NOEOF;
- }
-
- return result;
-}
-
-static int osst_write_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int synchronous)
-{
- unsigned char cmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt;
- int blks;
-#if DEBUG
- char * name = tape_name(STp);
-#endif
-
- if ((!STp-> raw) && (STp->first_frame_position == 0xbae)) { /* _must_ preserve buffer! */
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Reaching config partition.\n", name);
-#endif
- if (osst_flush_drive_buffer(STp, aSRpnt) < 0) {
- return (-EIO);
- }
- /* error recovery may have bumped us past the header partition */
- if (osst_get_frame_position(STp, aSRpnt) < 0xbb8) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Skipping over config partition.\n", name);
-#endif
- osst_position_tape_and_confirm(STp, aSRpnt, 0xbb8);
- }
- }
-
- if (STp->poll)
- if (osst_wait_frame (STp, aSRpnt, STp->first_frame_position, -48, 120))
- if (osst_recover_wait_frame(STp, aSRpnt, 1))
- return (-EIO);
-
-// osst_build_stats(STp, &SRpnt);
-
- STp->ps[STp->partition].rw = ST_WRITING;
- STp->write_type = OS_WRITE_DATA;
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = WRITE_6;
- cmd[1] = 1;
- cmd[4] = 1; /* one frame at a time... */
- blks = STp->buffer->buffer_bytes / STp->block_size;
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Writing %d blocks to frame %d, lblks %d-%d\n", name, blks,
- STp->frame_seq_number, STp->logical_blk_num - blks, STp->logical_blk_num - 1);
-#endif
- osst_init_aux(STp, OS_FRAME_TYPE_DATA, STp->frame_seq_number++,
- STp->logical_blk_num - blks, STp->block_size, blks);
-
-#if DEBUG
- if (!synchronous)
- STp->write_pending = 1;
-#endif
- SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE, STp->timeout,
- MAX_RETRIES, synchronous);
- if (!SRpnt)
- return (-EBUSY);
- *aSRpnt = SRpnt;
-
- if (synchronous) {
- if (STp->buffer->syscall_result != 0) {
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Error on write:\n", name);
-#endif
- if ((SRpnt->sense[0] & 0x70) == 0x70 &&
- (SRpnt->sense[2] & 0x40)) {
- if ((SRpnt->sense[2] & 0x0f) == VOLUME_OVERFLOW)
- return (-ENOSPC);
- }
- else {
- if (osst_write_error_recovery(STp, aSRpnt, 1))
- return (-EIO);
- }
- }
- else
- STp->first_frame_position++;
- }
-
- STp->write_count++;
-
- return 0;
-}
-
-/* Lock or unlock the drive door. Don't use when struct osst_request allocated. */
-static int do_door_lock(struct osst_tape * STp, int do_lock)
-{
- int retval;
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: %socking drive door.\n", tape_name(STp), do_lock ? "L" : "Unl");
-#endif
-
- retval = scsi_set_medium_removal(STp->device,
- do_lock ? SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW);
- if (!retval)
- STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED;
- else
- STp->door_locked = ST_LOCK_FAILS;
- return retval;
-}
-
-/* Set the internal state after reset */
-static void reset_state(struct osst_tape *STp)
-{
- int i;
- struct st_partstat *STps;
-
- STp->pos_unknown = 0;
- for (i = 0; i < ST_NBR_PARTITIONS; i++) {
- STps = &(STp->ps[i]);
- STps->rw = ST_IDLE;
- STps->eof = ST_NOEOF;
- STps->at_sm = 0;
- STps->last_block_valid = 0;
- STps->drv_block = -1;
- STps->drv_file = -1;
- }
-}
-
-
-/* Entry points to osst */
-
-/* Write command */
-static ssize_t osst_write(struct file * filp, const char __user * buf, size_t count, loff_t *ppos)
-{
- ssize_t total, retval = 0;
- ssize_t i, do_count, blks, transfer;
- int write_threshold;
- int doing_write = 0;
- const char __user * b_point;
- struct osst_request * SRpnt = NULL;
- struct st_modedef * STm;
- struct st_partstat * STps;
- struct osst_tape * STp = filp->private_data;
- char * name = tape_name(STp);
-
-
- if (mutex_lock_interruptible(&STp->lock))
- return (-ERESTARTSYS);
-
- /*
- * If we are in the middle of error recovery, don't let anyone
- * else try and use this device. Also, if error recovery fails, it
- * may try and take the device offline, in which case all further
- * access to the device is prohibited.
- */
- if( !scsi_block_when_processing_errors(STp->device) ) {
- retval = (-ENXIO);
- goto out;
- }
-
- if (STp->ready != ST_READY) {
- if (STp->ready == ST_NO_TAPE)
- retval = (-ENOMEDIUM);
- else
- retval = (-EIO);
- goto out;
- }
- STm = &(STp->modes[STp->current_mode]);
- if (!STm->defined) {
- retval = (-ENXIO);
- goto out;
- }
- if (count == 0)
- goto out;
-
- /*
- * If there was a bus reset, block further access
- * to this device.
- */
- if (STp->pos_unknown) {
- retval = (-EIO);
- goto out;
- }
-
-#if DEBUG
- if (!STp->in_use) {
- printk(OSST_DEB_MSG "%s:D: Incorrect device.\n", name);
- retval = (-EIO);
- goto out;
- }
-#endif
-
- if (STp->write_prot) {
- retval = (-EACCES);
- goto out;
- }
-
- /* Write must be integral number of blocks */
- if (STp->block_size != 0 && (count % STp->block_size) != 0) {
- printk(KERN_ERR "%s:E: Write (%zd bytes) not multiple of tape block size (%d%c).\n",
- name, count, STp->block_size<1024?
- STp->block_size:STp->block_size/1024, STp->block_size<1024?'b':'k');
- retval = (-EINVAL);
- goto out;
- }
-
- if (STp->first_frame_position >= STp->capacity - OSST_EOM_RESERVE) {
- printk(KERN_ERR "%s:E: Write truncated at EOM early warning (frame %d).\n",
- name, STp->first_frame_position);
- retval = (-ENOSPC);
- goto out;
- }
-
- if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED && !do_door_lock(STp, 1))
- STp->door_locked = ST_LOCKED_AUTO;
-
- STps = &(STp->ps[STp->partition]);
-
- if (STps->rw == ST_READING) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Switching from read to write at file %d, block %d\n", name,
- STps->drv_file, STps->drv_block);
-#endif
- retval = osst_flush_buffer(STp, &SRpnt, 0);
- if (retval)
- goto out;
- STps->rw = ST_IDLE;
- }
- if (STps->rw != ST_WRITING) {
- /* Are we totally rewriting this tape? */
- if (!STp->header_ok ||
- (STp->first_frame_position == STp->first_data_ppos && STps->drv_block < 0) ||
- (STps->drv_file == 0 && STps->drv_block == 0)) {
- STp->wrt_pass_cntr++;
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Allocating next write pass counter: %d\n",
- name, STp->wrt_pass_cntr);
-#endif
- osst_reset_header(STp, &SRpnt);
- STps->drv_file = STps->drv_block = 0;
- }
- /* Do we know where we'll be writing on the tape? */
- else {
- if ((STp->fast_open && osst_verify_position(STp, &SRpnt)) ||
- STps->drv_file < 0 || STps->drv_block < 0) {
- if (STp->first_frame_position == STp->eod_frame_ppos) { /* at EOD */
- STps->drv_file = STp->filemark_cnt;
- STps->drv_block = 0;
- }
- else {
- /* We have no idea where the tape is positioned - give up */
-#if DEBUG
- printk(OSST_DEB_MSG
- "%s:D: Cannot write at indeterminate position.\n", name);
-#endif
- retval = (-EIO);
- goto out;
- }
- }
- if ((STps->drv_file + STps->drv_block) > 0 && STps->drv_file < STp->filemark_cnt) {
- STp->filemark_cnt = STps->drv_file;
- STp->last_mark_ppos =
- ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[STp->filemark_cnt-1]);
- printk(KERN_WARNING
- "%s:W: Overwriting file %d with old write pass counter %d\n",
- name, STps->drv_file, STp->wrt_pass_cntr);
- printk(KERN_WARNING
- "%s:W: may lead to stale data being accepted on reading back!\n",
- name);
-#if DEBUG
- printk(OSST_DEB_MSG
- "%s:D: resetting filemark count to %d and last mark ppos,lbn to %d,%d\n",
- name, STp->filemark_cnt, STp->last_mark_ppos, STp->last_mark_lbn);
-#endif
- }
- }
- STp->fast_open = 0;
- }
- if (!STp->header_ok) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Write cannot proceed without valid headers\n", name);
-#endif
- retval = (-EIO);
- goto out;
- }
-
- if ((STp->buffer)->writing) {
-if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name, __LINE__);
- osst_write_behind_check(STp);
- if ((STp->buffer)->syscall_result) {
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Async write error (write) %x.\n", name,
- (STp->buffer)->midlevel_result);
-#endif
- if ((STp->buffer)->midlevel_result == INT_MAX)
- STps->eof = ST_EOM_OK;
- else
- STps->eof = ST_EOM_ERROR;
- }
- }
- if (STps->eof == ST_EOM_OK) {
- retval = (-ENOSPC);
- goto out;
- }
- else if (STps->eof == ST_EOM_ERROR) {
- retval = (-EIO);
- goto out;
- }
-
- /* Check the buffer readability in cases where copy_user might catch
- the problems after some tape movement. */
- if ((copy_from_user(&i, buf, 1) != 0 ||
- copy_from_user(&i, buf + count - 1, 1) != 0)) {
- retval = (-EFAULT);
- goto out;
- }
-
- if (!STm->do_buffer_writes) {
- write_threshold = 1;
- }
- else
- write_threshold = (STp->buffer)->buffer_blocks * STp->block_size;
- if (!STm->do_async_writes)
- write_threshold--;
-
- total = count;
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Writing %d bytes to file %d block %d lblk %d fseq %d fppos %d\n",
- name, (int) count, STps->drv_file, STps->drv_block,
- STp->logical_blk_num, STp->frame_seq_number, STp->first_frame_position);
-#endif
- b_point = buf;
- while ((STp->buffer)->buffer_bytes + count > write_threshold)
- {
- doing_write = 1;
- do_count = (STp->buffer)->buffer_blocks * STp->block_size -
- (STp->buffer)->buffer_bytes;
- if (do_count > count)
- do_count = count;
-
- i = append_to_buffer(b_point, STp->buffer, do_count);
- if (i) {
- retval = i;
- goto out;
- }
-
- blks = do_count / STp->block_size;
- STp->logical_blk_num += blks; /* logical_blk_num is incremented as data is moved from user */
-
- i = osst_write_frame(STp, &SRpnt, 1);
-
- if (i == (-ENOSPC)) {
- transfer = STp->buffer->writing; /* FIXME -- check this logic */
- if (transfer <= do_count) {
- *ppos += do_count - transfer;
- count -= do_count - transfer;
- if (STps->drv_block >= 0) {
- STps->drv_block += (do_count - transfer) / STp->block_size;
- }
- STps->eof = ST_EOM_OK;
- retval = (-ENOSPC); /* EOM within current request */
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: EOM with %d bytes unwritten.\n",
- name, (int) transfer);
-#endif
- }
- else {
- STps->eof = ST_EOM_ERROR;
- STps->drv_block = (-1); /* Too cautious? */
- retval = (-EIO); /* EOM for old data */
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: EOM with lost data.\n", name);
-#endif
- }
- }
- else
- retval = i;
-
- if (retval < 0) {
- if (SRpnt != NULL) {
- osst_release_request(SRpnt);
- SRpnt = NULL;
- }
- STp->buffer->buffer_bytes = 0;
- STp->dirty = 0;
- if (count < total)
- retval = total - count;
- goto out;
- }
-
- *ppos += do_count;
- b_point += do_count;
- count -= do_count;
- if (STps->drv_block >= 0) {
- STps->drv_block += blks;
- }
- STp->buffer->buffer_bytes = 0;
- STp->dirty = 0;
- } /* end while write threshold exceeded */
-
- if (count != 0) {
- STp->dirty = 1;
- i = append_to_buffer(b_point, STp->buffer, count);
- if (i) {
- retval = i;
- goto out;
- }
- blks = count / STp->block_size;
- STp->logical_blk_num += blks;
- if (STps->drv_block >= 0) {
- STps->drv_block += blks;
- }
- *ppos += count;
- count = 0;
- }
-
- if (doing_write && (STp->buffer)->syscall_result != 0) {
- retval = (STp->buffer)->syscall_result;
- goto out;
- }
-
- if (STm->do_async_writes && ((STp->buffer)->buffer_bytes >= STp->write_threshold)) {
- /* Schedule an asynchronous write */
- (STp->buffer)->writing = ((STp->buffer)->buffer_bytes /
- STp->block_size) * STp->block_size;
- STp->dirty = !((STp->buffer)->writing ==
- (STp->buffer)->buffer_bytes);
-
- i = osst_write_frame(STp, &SRpnt, 0);
- if (i < 0) {
- retval = (-EIO);
- goto out;
- }
- SRpnt = NULL; /* Prevent releasing this request! */
- }
- STps->at_sm &= (total == 0);
- if (total > 0)
- STps->eof = ST_NOEOF;
-
- retval = total;
-
-out:
- if (SRpnt != NULL) osst_release_request(SRpnt);
-
- mutex_unlock(&STp->lock);
-
- return retval;
-}
-
-
-/* Read command */
-static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, loff_t *ppos)
-{
- ssize_t total, retval = 0;
- ssize_t i, transfer;
- int special;
- struct st_modedef * STm;
- struct st_partstat * STps;
- struct osst_request * SRpnt = NULL;
- struct osst_tape * STp = filp->private_data;
- char * name = tape_name(STp);
-
-
- if (mutex_lock_interruptible(&STp->lock))
- return (-ERESTARTSYS);
-
- /*
- * If we are in the middle of error recovery, don't let anyone
- * else try and use this device. Also, if error recovery fails, it
- * may try and take the device offline, in which case all further
- * access to the device is prohibited.
- */
- if( !scsi_block_when_processing_errors(STp->device) ) {
- retval = (-ENXIO);
- goto out;
- }
-
- if (STp->ready != ST_READY) {
- if (STp->ready == ST_NO_TAPE)
- retval = (-ENOMEDIUM);
- else
- retval = (-EIO);
- goto out;
- }
- STm = &(STp->modes[STp->current_mode]);
- if (!STm->defined) {
- retval = (-ENXIO);
- goto out;
- }
-#if DEBUG
- if (!STp->in_use) {
- printk(OSST_DEB_MSG "%s:D: Incorrect device.\n", name);
- retval = (-EIO);
- goto out;
- }
-#endif
- /* Must have initialized medium */
- if (!STp->header_ok) {
- retval = (-EIO);
- goto out;
- }
-
- if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED && !do_door_lock(STp, 1))
- STp->door_locked = ST_LOCKED_AUTO;
-
- STps = &(STp->ps[STp->partition]);
- if (STps->rw == ST_WRITING) {
- retval = osst_flush_buffer(STp, &SRpnt, 0);
- if (retval)
- goto out;
- STps->rw = ST_IDLE;
- /* FIXME -- this may leave the tape without EOD and up2date headers */
- }
-
- if ((count % STp->block_size) != 0) {
- printk(KERN_WARNING
- "%s:W: Read (%zd bytes) not multiple of tape block size (%d%c).\n", name, count,
- STp->block_size<1024?STp->block_size:STp->block_size/1024, STp->block_size<1024?'b':'k');
- }
-
-#if DEBUG
- if (debugging && STps->eof != ST_NOEOF)
- printk(OSST_DEB_MSG "%s:D: EOF/EOM flag up (%d). Bytes %d\n", name,
- STps->eof, (STp->buffer)->buffer_bytes);
-#endif
- if ((STp->buffer)->buffer_bytes == 0 &&
- STps->eof >= ST_EOD_1) {
- if (STps->eof < ST_EOD) {
- STps->eof += 1;
- retval = 0;
- goto out;
- }
- retval = (-EIO); /* EOM or Blank Check */
- goto out;
- }
-
- /* Check the buffer writability before any tape movement. Don't alter
- buffer data. */
- if (copy_from_user(&i, buf, 1) != 0 ||
- copy_to_user (buf, &i, 1) != 0 ||
- copy_from_user(&i, buf + count - 1, 1) != 0 ||
- copy_to_user (buf + count - 1, &i, 1) != 0) {
- retval = (-EFAULT);
- goto out;
- }
-
- /* Loop until enough data in buffer or a special condition found */
- for (total = 0, special = 0; total < count - STp->block_size + 1 && !special; ) {
-
- /* Get new data if the buffer is empty */
- if ((STp->buffer)->buffer_bytes == 0) {
- if (STps->eof == ST_FM_HIT)
- break;
- special = osst_get_logical_frame(STp, &SRpnt, STp->frame_seq_number, 0);
- if (special < 0) { /* No need to continue read */
- STp->frame_in_buffer = 0;
- retval = special;
- goto out;
- }
- }
-
- /* Move the data from driver buffer to user buffer */
- if ((STp->buffer)->buffer_bytes > 0) {
-#if DEBUG
- if (debugging && STps->eof != ST_NOEOF)
- printk(OSST_DEB_MSG "%s:D: EOF up (%d). Left %d, needed %d.\n", name,
- STps->eof, (STp->buffer)->buffer_bytes, (int) (count - total));
-#endif
- /* force multiple of block size, note block_size may have been adjusted */
- transfer = (((STp->buffer)->buffer_bytes < count - total ?
- (STp->buffer)->buffer_bytes : count - total)/
- STp->block_size) * STp->block_size;
-
- if (transfer == 0) {
- printk(KERN_WARNING
- "%s:W: Nothing can be transferred, requested %zd, tape block size (%d%c).\n",
- name, count, STp->block_size < 1024?
- STp->block_size:STp->block_size/1024,
- STp->block_size<1024?'b':'k');
- break;
- }
- i = from_buffer(STp->buffer, buf, transfer);
- if (i) {
- retval = i;
- goto out;
- }
- STp->logical_blk_num += transfer / STp->block_size;
- STps->drv_block += transfer / STp->block_size;
- *ppos += transfer;
- buf += transfer;
- total += transfer;
- }
-
- if ((STp->buffer)->buffer_bytes == 0) {
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Finished with frame %d\n",
- name, STp->frame_seq_number);
-#endif
- STp->frame_in_buffer = 0;
- STp->frame_seq_number++; /* frame to look for next time */
- }
- } /* for (total = 0, special = 0; total < count && !special; ) */
-
- /* Change the eof state if no data from tape or buffer */
- if (total == 0) {
- if (STps->eof == ST_FM_HIT) {
- STps->eof = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD_2:ST_FM;
- STps->drv_block = 0;
- if (STps->drv_file >= 0)
- STps->drv_file++;
- }
- else if (STps->eof == ST_EOD_1) {
- STps->eof = ST_EOD_2;
- if (STps->drv_block > 0 && STps->drv_file >= 0)
- STps->drv_file++;
- STps->drv_block = 0;
- }
- else if (STps->eof == ST_EOD_2)
- STps->eof = ST_EOD;
- }
- else if (STps->eof == ST_FM)
- STps->eof = ST_NOEOF;
-
- retval = total;
-
-out:
- if (SRpnt != NULL) osst_release_request(SRpnt);
-
- mutex_unlock(&STp->lock);
-
- return retval;
-}
-
-
-/* Set the driver options */
-static void osst_log_options(struct osst_tape *STp, struct st_modedef *STm, char *name)
-{
- printk(KERN_INFO
-"%s:I: Mode %d options: buffer writes: %d, async writes: %d, read ahead: %d\n",
- name, STp->current_mode, STm->do_buffer_writes, STm->do_async_writes,
- STm->do_read_ahead);
- printk(KERN_INFO
-"%s:I: can bsr: %d, two FMs: %d, fast mteom: %d, auto lock: %d,\n",
- name, STp->can_bsr, STp->two_fm, STp->fast_mteom, STp->do_auto_lock);
- printk(KERN_INFO
-"%s:I: defs for wr: %d, no block limits: %d, partitions: %d, s2 log: %d\n",
- name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions,
- STp->scsi2_logical);
- printk(KERN_INFO
-"%s:I: sysv: %d\n", name, STm->sysv);
-#if DEBUG
- printk(KERN_INFO
- "%s:D: debugging: %d\n",
- name, debugging);
-#endif
-}
-
-
-static int osst_set_options(struct osst_tape *STp, long options)
-{
- int value;
- long code;
- struct st_modedef * STm;
- char * name = tape_name(STp);
-
- STm = &(STp->modes[STp->current_mode]);
- if (!STm->defined) {
- memcpy(STm, &(STp->modes[0]), sizeof(*STm));
- modes_defined = 1;
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Initialized mode %d definition from mode 0\n",
- name, STp->current_mode);
-#endif
- }
-
- code = options & MT_ST_OPTIONS;
- if (code == MT_ST_BOOLEANS) {
- STm->do_buffer_writes = (options & MT_ST_BUFFER_WRITES) != 0;
- STm->do_async_writes = (options & MT_ST_ASYNC_WRITES) != 0;
- STm->defaults_for_writes = (options & MT_ST_DEF_WRITES) != 0;
- STm->do_read_ahead = (options & MT_ST_READ_AHEAD) != 0;
- STp->two_fm = (options & MT_ST_TWO_FM) != 0;
- STp->fast_mteom = (options & MT_ST_FAST_MTEOM) != 0;
- STp->do_auto_lock = (options & MT_ST_AUTO_LOCK) != 0;
- STp->can_bsr = (options & MT_ST_CAN_BSR) != 0;
- STp->omit_blklims = (options & MT_ST_NO_BLKLIMS) != 0;
- if ((STp->device)->scsi_level >= SCSI_2)
- STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0;
- STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0;
- STm->sysv = (options & MT_ST_SYSV) != 0;
-#if DEBUG
- debugging = (options & MT_ST_DEBUGGING) != 0;
-#endif
- osst_log_options(STp, STm, name);
- }
- else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) {
- value = (code == MT_ST_SETBOOLEANS);
- if ((options & MT_ST_BUFFER_WRITES) != 0)
- STm->do_buffer_writes = value;
- if ((options & MT_ST_ASYNC_WRITES) != 0)
- STm->do_async_writes = value;
- if ((options & MT_ST_DEF_WRITES) != 0)
- STm->defaults_for_writes = value;
- if ((options & MT_ST_READ_AHEAD) != 0)
- STm->do_read_ahead = value;
- if ((options & MT_ST_TWO_FM) != 0)
- STp->two_fm = value;
- if ((options & MT_ST_FAST_MTEOM) != 0)
- STp->fast_mteom = value;
- if ((options & MT_ST_AUTO_LOCK) != 0)
- STp->do_auto_lock = value;
- if ((options & MT_ST_CAN_BSR) != 0)
- STp->can_bsr = value;
- if ((options & MT_ST_NO_BLKLIMS) != 0)
- STp->omit_blklims = value;
- if ((STp->device)->scsi_level >= SCSI_2 &&
- (options & MT_ST_CAN_PARTITIONS) != 0)
- STp->can_partitions = value;
- if ((options & MT_ST_SCSI2LOGICAL) != 0)
- STp->scsi2_logical = value;
- if ((options & MT_ST_SYSV) != 0)
- STm->sysv = value;
-#if DEBUG
- if ((options & MT_ST_DEBUGGING) != 0)
- debugging = value;
-#endif
- osst_log_options(STp, STm, name);
- }
- else if (code == MT_ST_WRITE_THRESHOLD) {
- value = (options & ~MT_ST_OPTIONS) * ST_KILOBYTE;
- if (value < 1 || value > osst_buffer_size) {
- printk(KERN_WARNING "%s:W: Write threshold %d too small or too large.\n",
- name, value);
- return (-EIO);
- }
- STp->write_threshold = value;
- printk(KERN_INFO "%s:I: Write threshold set to %d bytes.\n",
- name, value);
- }
- else if (code == MT_ST_DEF_BLKSIZE) {
- value = (options & ~MT_ST_OPTIONS);
- if (value == ~MT_ST_OPTIONS) {
- STm->default_blksize = (-1);
- printk(KERN_INFO "%s:I: Default block size disabled.\n", name);
- }
- else {
- if (value < 512 || value > OS_DATA_SIZE || OS_DATA_SIZE % value) {
- printk(KERN_WARNING "%s:W: Default block size cannot be set to %d.\n",
- name, value);
- return (-EINVAL);
- }
- STm->default_blksize = value;
- printk(KERN_INFO "%s:I: Default block size set to %d bytes.\n",
- name, STm->default_blksize);
- }
- }
- else if (code == MT_ST_TIMEOUTS) {
- value = (options & ~MT_ST_OPTIONS);
- if ((value & MT_ST_SET_LONG_TIMEOUT) != 0) {
- STp->long_timeout = (value & ~MT_ST_SET_LONG_TIMEOUT) * HZ;
- printk(KERN_INFO "%s:I: Long timeout set to %d seconds.\n", name,
- (value & ~MT_ST_SET_LONG_TIMEOUT));
- }
- else {
- STp->timeout = value * HZ;
- printk(KERN_INFO "%s:I: Normal timeout set to %d seconds.\n", name, value);
- }
- }
- else if (code == MT_ST_DEF_OPTIONS) {
- code = (options & ~MT_ST_CLEAR_DEFAULT);
- value = (options & MT_ST_CLEAR_DEFAULT);
- if (code == MT_ST_DEF_DENSITY) {
- if (value == MT_ST_CLEAR_DEFAULT) {
- STm->default_density = (-1);
- printk(KERN_INFO "%s:I: Density default disabled.\n", name);
- }
- else {
- STm->default_density = value & 0xff;
- printk(KERN_INFO "%s:I: Density default set to %x\n",
- name, STm->default_density);
- }
- }
- else if (code == MT_ST_DEF_DRVBUFFER) {
- if (value == MT_ST_CLEAR_DEFAULT) {
- STp->default_drvbuffer = 0xff;
- printk(KERN_INFO "%s:I: Drive buffer default disabled.\n", name);
- }
- else {
- STp->default_drvbuffer = value & 7;
- printk(KERN_INFO "%s:I: Drive buffer default set to %x\n",
- name, STp->default_drvbuffer);
- }
- }
- else if (code == MT_ST_DEF_COMPRESSION) {
- if (value == MT_ST_CLEAR_DEFAULT) {
- STm->default_compression = ST_DONT_TOUCH;
- printk(KERN_INFO "%s:I: Compression default disabled.\n", name);
- }
- else {
- STm->default_compression = (value & 1 ? ST_YES : ST_NO);
- printk(KERN_INFO "%s:I: Compression default set to %x\n",
- name, (value & 1));
- }
- }
- }
- else
- return (-EIO);
-
- return 0;
-}
-
-
-/* Internal ioctl function */
-static int osst_int_ioctl(struct osst_tape * STp, struct osst_request ** aSRpnt,
- unsigned int cmd_in, unsigned long arg)
-{
- int timeout;
- long ltmp;
- int i, ioctl_result;
- int chg_eof = 1;
- unsigned char cmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt = * aSRpnt;
- struct st_partstat * STps;
- int fileno, blkno, at_sm, frame_seq_numbr, logical_blk_num;
- int datalen = 0, direction = DMA_NONE;
- char * name = tape_name(STp);
-
- if (STp->ready != ST_READY && cmd_in != MTLOAD) {
- if (STp->ready == ST_NO_TAPE)
- return (-ENOMEDIUM);
- else
- return (-EIO);
- }
- timeout = STp->long_timeout;
- STps = &(STp->ps[STp->partition]);
- fileno = STps->drv_file;
- blkno = STps->drv_block;
- at_sm = STps->at_sm;
- frame_seq_numbr = STp->frame_seq_number;
- logical_blk_num = STp->logical_blk_num;
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- switch (cmd_in) {
- case MTFSFM:
- chg_eof = 0; /* Changed from the FSF after this */
- /* fall through */
- case MTFSF:
- if (STp->raw)
- return (-EIO);
- if (STp->linux_media)
- ioctl_result = osst_space_over_filemarks_forward_fast(STp, &SRpnt, cmd_in, arg);
- else
- ioctl_result = osst_space_over_filemarks_forward_slow(STp, &SRpnt, cmd_in, arg);
- if (fileno >= 0)
- fileno += arg;
- blkno = 0;
- at_sm &= (arg == 0);
- goto os_bypass;
-
- case MTBSF:
- chg_eof = 0; /* Changed from the FSF after this */
- /* fall through */
- case MTBSFM:
- if (STp->raw)
- return (-EIO);
- ioctl_result = osst_space_over_filemarks_backward(STp, &SRpnt, cmd_in, arg);
- if (fileno >= 0)
- fileno -= arg;
- blkno = (-1); /* We can't know the block number */
- at_sm &= (arg == 0);
- goto os_bypass;
-
- case MTFSR:
- case MTBSR:
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Skipping %lu blocks %s from logical block %d\n",
- name, arg, cmd_in==MTFSR?"forward":"backward", logical_blk_num);
-#endif
- if (cmd_in == MTFSR) {
- logical_blk_num += arg;
- if (blkno >= 0) blkno += arg;
- }
- else {
- logical_blk_num -= arg;
- if (blkno >= 0) blkno -= arg;
- }
- ioctl_result = osst_seek_logical_blk(STp, &SRpnt, logical_blk_num);
- fileno = STps->drv_file;
- blkno = STps->drv_block;
- at_sm &= (arg == 0);
- goto os_bypass;
-
- case MTFSS:
- cmd[0] = SPACE;
- cmd[1] = 0x04; /* Space Setmarks */ /* FIXME -- OS can't do this? */
- cmd[2] = (arg >> 16);
- cmd[3] = (arg >> 8);
- cmd[4] = arg;
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Spacing tape forward %d setmarks.\n", name,
- cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
-#endif
- if (arg != 0) {
- blkno = fileno = (-1);
- at_sm = 1;
- }
- break;
- case MTBSS:
- cmd[0] = SPACE;
- cmd[1] = 0x04; /* Space Setmarks */ /* FIXME -- OS can't do this? */
- ltmp = (-arg);
- cmd[2] = (ltmp >> 16);
- cmd[3] = (ltmp >> 8);
- cmd[4] = ltmp;
-#if DEBUG
- if (debugging) {
- if (cmd[2] & 0x80)
- ltmp = 0xff000000;
- ltmp = ltmp | (cmd[2] << 16) | (cmd[3] << 8) | cmd[4];
- printk(OSST_DEB_MSG "%s:D: Spacing tape backward %ld setmarks.\n",
- name, (-ltmp));
- }
-#endif
- if (arg != 0) {
- blkno = fileno = (-1);
- at_sm = 1;
- }
- break;
- case MTWEOF:
- if ((STps->rw == ST_WRITING || STp->dirty) && !STp->pos_unknown) {
- STp->write_type = OS_WRITE_DATA;
- ioctl_result = osst_flush_write_buffer(STp, &SRpnt);
- } else
- ioctl_result = 0;
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Writing %ld filemark(s).\n", name, arg);
-#endif
- for (i=0; i<arg; i++)
- ioctl_result |= osst_write_filemark(STp, &SRpnt);
- if (fileno >= 0) fileno += arg;
- if (blkno >= 0) blkno = 0;
- goto os_bypass;
-
- case MTWSM:
- if (STp->write_prot)
- return (-EACCES);
- if (!STp->raw)
- return 0;
- cmd[0] = WRITE_FILEMARKS; /* FIXME -- need OS version */
- if (cmd_in == MTWSM)
- cmd[1] = 2;
- cmd[2] = (arg >> 16);
- cmd[3] = (arg >> 8);
- cmd[4] = arg;
- timeout = STp->timeout;
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Writing %d setmark(s).\n", name,
- cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
-#endif
- if (fileno >= 0)
- fileno += arg;
- blkno = 0;
- at_sm = (cmd_in == MTWSM);
- break;
- case MTOFFL:
- case MTLOAD:
- case MTUNLOAD:
- case MTRETEN:
- cmd[0] = START_STOP;
- cmd[1] = 1; /* Don't wait for completion */
- if (cmd_in == MTLOAD) {
- if (STp->ready == ST_NO_TAPE)
- cmd[4] = 4; /* open tray */
- else
- cmd[4] = 1; /* load */
- }
- if (cmd_in == MTRETEN)
- cmd[4] = 3; /* retension then mount */
- if (cmd_in == MTOFFL)
- cmd[4] = 4; /* rewind then eject */
- timeout = STp->timeout;
-#if DEBUG
- if (debugging) {
- switch (cmd_in) {
- case MTUNLOAD:
- printk(OSST_DEB_MSG "%s:D: Unloading tape.\n", name);
- break;
- case MTLOAD:
- printk(OSST_DEB_MSG "%s:D: Loading tape.\n", name);
- break;
- case MTRETEN:
- printk(OSST_DEB_MSG "%s:D: Retensioning tape.\n", name);
- break;
- case MTOFFL:
- printk(OSST_DEB_MSG "%s:D: Ejecting tape.\n", name);
- break;
- }
- }
-#endif
- fileno = blkno = at_sm = frame_seq_numbr = logical_blk_num = 0 ;
- break;
- case MTNOP:
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: No-op on tape.\n", name);
-#endif
- return 0; /* Should do something ? */
- break;
- case MTEOM:
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Spacing to end of recorded medium.\n", name);
-#endif
- if ((osst_position_tape_and_confirm(STp, &SRpnt, STp->eod_frame_ppos) < 0) ||
- (osst_get_logical_frame(STp, &SRpnt, -1, 0) < 0)) {
- ioctl_result = -EIO;
- goto os_bypass;
- }
- if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_EOD) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: No EOD frame found where expected.\n", name);
-#endif
- ioctl_result = -EIO;
- goto os_bypass;
- }
- ioctl_result = osst_set_frame_position(STp, &SRpnt, STp->eod_frame_ppos, 0);
- fileno = STp->filemark_cnt;
- blkno = at_sm = 0;
- goto os_bypass;
-
- case MTERASE:
- if (STp->write_prot)
- return (-EACCES);
- ioctl_result = osst_reset_header(STp, &SRpnt);
- i = osst_write_eod(STp, &SRpnt);
- if (i < ioctl_result) ioctl_result = i;
- i = osst_position_tape_and_confirm(STp, &SRpnt, STp->eod_frame_ppos);
- if (i < ioctl_result) ioctl_result = i;
- fileno = blkno = at_sm = 0 ;
- goto os_bypass;
-
- case MTREW:
- cmd[0] = REZERO_UNIT; /* rewind */
- cmd[1] = 1;
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Rewinding tape, Immed=%d.\n", name, cmd[1]);
-#endif
- fileno = blkno = at_sm = frame_seq_numbr = logical_blk_num = 0 ;
- break;
-
- case MTSETBLK: /* Set block length */
- if ((STps->drv_block == 0 ) &&
- !STp->dirty &&
- ((STp->buffer)->buffer_bytes == 0) &&
- ((arg & MT_ST_BLKSIZE_MASK) >= 512 ) &&
- ((arg & MT_ST_BLKSIZE_MASK) <= OS_DATA_SIZE) &&
- !(OS_DATA_SIZE % (arg & MT_ST_BLKSIZE_MASK)) ) {
- /*
- * Only allowed to change the block size if you opened the
- * device at the beginning of a file before writing anything.
- * Note, that when reading, changing block_size is futile,
- * as the size used when writing overrides it.
- */
- STp->block_size = (arg & MT_ST_BLKSIZE_MASK);
- printk(KERN_INFO "%s:I: Block size set to %d bytes.\n",
- name, STp->block_size);
- return 0;
- }
- /* fall through */
- case MTSETDENSITY: /* Set tape density */
- case MTSETDRVBUFFER: /* Set drive buffering */
- case SET_DENS_AND_BLK: /* Set density and block size */
- chg_eof = 0;
- if (STp->dirty || (STp->buffer)->buffer_bytes != 0)
- return (-EIO); /* Not allowed if data in buffer */
- if ((cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) &&
- (arg & MT_ST_BLKSIZE_MASK) != 0 &&
- (arg & MT_ST_BLKSIZE_MASK) != STp->block_size ) {
- printk(KERN_WARNING "%s:W: Illegal to set block size to %d%s.\n",
- name, (int)(arg & MT_ST_BLKSIZE_MASK),
- (OS_DATA_SIZE % (arg & MT_ST_BLKSIZE_MASK))?"":" now");
- return (-EINVAL);
- }
- return 0; /* FIXME silently ignore if block size didn't change */
-
- default:
- return (-ENOSYS);
- }
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, datalen, direction, timeout, MAX_RETRIES, 1);
-
- ioctl_result = (STp->buffer)->syscall_result;
-
- if (!SRpnt) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Couldn't exec scsi cmd for IOCTL\n", name);
-#endif
- return ioctl_result;
- }
-
- if (!ioctl_result) { /* SCSI command successful */
- STp->frame_seq_number = frame_seq_numbr;
- STp->logical_blk_num = logical_blk_num;
- }
-
-os_bypass:
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: IOCTL (%d) Result=%d\n", name, cmd_in, ioctl_result);
-#endif
-
- if (!ioctl_result) { /* success */
-
- if (cmd_in == MTFSFM) {
- fileno--;
- blkno--;
- }
- if (cmd_in == MTBSFM) {
- fileno++;
- blkno++;
- }
- STps->drv_block = blkno;
- STps->drv_file = fileno;
- STps->at_sm = at_sm;
-
- if (cmd_in == MTEOM)
- STps->eof = ST_EOD;
- else if ((cmd_in == MTFSFM || cmd_in == MTBSF) && STps->eof == ST_FM_HIT) {
- ioctl_result = osst_seek_logical_blk(STp, &SRpnt, STp->logical_blk_num-1);
- STps->drv_block++;
- STp->logical_blk_num++;
- STp->frame_seq_number++;
- STp->frame_in_buffer = 0;
- STp->buffer->read_pointer = 0;
- }
- else if (cmd_in == MTFSF)
- STps->eof = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD:ST_FM;
- else if (chg_eof)
- STps->eof = ST_NOEOF;
-
- if (cmd_in == MTOFFL || cmd_in == MTUNLOAD)
- STp->rew_at_close = 0;
- else if (cmd_in == MTLOAD) {
- for (i=0; i < ST_NBR_PARTITIONS; i++) {
- STp->ps[i].rw = ST_IDLE;
- STp->ps[i].last_block_valid = 0;/* FIXME - where else is this field maintained? */
- }
- STp->partition = 0;
- }
-
- if (cmd_in == MTREW) {
- ioctl_result = osst_position_tape_and_confirm(STp, &SRpnt, STp->first_data_ppos);
- if (ioctl_result > 0)
- ioctl_result = 0;
- }
-
- } else if (cmd_in == MTBSF || cmd_in == MTBSFM ) {
- if (osst_position_tape_and_confirm(STp, &SRpnt, STp->first_data_ppos) < 0)
- STps->drv_file = STps->drv_block = -1;
- else
- STps->drv_file = STps->drv_block = 0;
- STps->eof = ST_NOEOF;
- } else if (cmd_in == MTFSF || cmd_in == MTFSFM) {
- if (osst_position_tape_and_confirm(STp, &SRpnt, STp->eod_frame_ppos) < 0)
- STps->drv_file = STps->drv_block = -1;
- else {
- STps->drv_file = STp->filemark_cnt;
- STps->drv_block = 0;
- }
- STps->eof = ST_EOD;
- } else if (cmd_in == MTBSR || cmd_in == MTFSR || cmd_in == MTWEOF || cmd_in == MTEOM) {
- STps->drv_file = STps->drv_block = (-1);
- STps->eof = ST_NOEOF;
- STp->header_ok = 0;
- } else if (cmd_in == MTERASE) {
- STp->header_ok = 0;
- } else if (SRpnt) { /* SCSI command was not completely successful. */
- if (SRpnt->sense[2] & 0x40) {
- STps->eof = ST_EOM_OK;
- STps->drv_block = 0;
- }
- if (chg_eof)
- STps->eof = ST_NOEOF;
-
- if ((SRpnt->sense[2] & 0x0f) == BLANK_CHECK)
- STps->eof = ST_EOD;
-
- if (cmd_in == MTLOAD && osst_wait_for_medium(STp, &SRpnt, 60))
- ioctl_result = osst_wait_ready(STp, &SRpnt, 5 * 60, OSST_WAIT_POSITION_COMPLETE);
- }
- *aSRpnt = SRpnt;
-
- return ioctl_result;
-}
-
-
-/* Open the device */
-static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
-{
- unsigned short flags;
- int i, b_size, new_session = 0, retval = 0;
- unsigned char cmd[MAX_COMMAND_SIZE];
- struct osst_request * SRpnt = NULL;
- struct osst_tape * STp;
- struct st_modedef * STm;
- struct st_partstat * STps;
- char * name;
- int dev = TAPE_NR(inode);
- int mode = TAPE_MODE(inode);
-
- /*
- * We really want to do nonseekable_open(inode, filp); here, but some
- * versions of tar incorrectly call lseek on tapes and bail out if that
- * fails. So we disallow pread() and pwrite(), but permit lseeks.
- */
- filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
-
- write_lock(&os_scsi_tapes_lock);
- if (dev >= osst_max_dev || os_scsi_tapes == NULL ||
- (STp = os_scsi_tapes[dev]) == NULL || !STp->device) {
- write_unlock(&os_scsi_tapes_lock);
- return (-ENXIO);
- }
-
- name = tape_name(STp);
-
- if (STp->in_use) {
- write_unlock(&os_scsi_tapes_lock);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Device already in use.\n", name);
-#endif
- return (-EBUSY);
- }
- if (scsi_device_get(STp->device)) {
- write_unlock(&os_scsi_tapes_lock);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Failed scsi_device_get.\n", name);
-#endif
- return (-ENXIO);
- }
- filp->private_data = STp;
- STp->in_use = 1;
- write_unlock(&os_scsi_tapes_lock);
- STp->rew_at_close = TAPE_REWIND(inode);
-
- if( !scsi_block_when_processing_errors(STp->device) ) {
- return -ENXIO;
- }
-
- if (mode != STp->current_mode) {
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Mode change from %d to %d.\n",
- name, STp->current_mode, mode);
-#endif
- new_session = 1;
- STp->current_mode = mode;
- }
- STm = &(STp->modes[STp->current_mode]);
-
- flags = filp->f_flags;
- STp->write_prot = ((flags & O_ACCMODE) == O_RDONLY);
-
- STp->raw = TAPE_IS_RAW(inode);
- if (STp->raw)
- STp->header_ok = 0;
-
- /* Allocate data segments for this device's tape buffer */
- if (!enlarge_buffer(STp->buffer, STp->restr_dma)) {
- printk(KERN_ERR "%s:E: Unable to allocate memory segments for tape buffer.\n", name);
- retval = (-EOVERFLOW);
- goto err_out;
- }
- if (STp->buffer->buffer_size >= OS_FRAME_SIZE) {
- for (i = 0, b_size = 0;
- (i < STp->buffer->sg_segs) && ((b_size + STp->buffer->sg[i].length) <= OS_DATA_SIZE);
- b_size += STp->buffer->sg[i++].length);
- STp->buffer->aux = (os_aux_t *) (page_address(sg_page(&STp->buffer->sg[i])) + OS_DATA_SIZE - b_size);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: b_data points to %p in segment 0 at %p\n", name,
- STp->buffer->b_data, page_address(STp->buffer->sg[0].page));
- printk(OSST_DEB_MSG "%s:D: AUX points to %p in segment %d at %p\n", name,
- STp->buffer->aux, i, page_address(STp->buffer->sg[i].page));
-#endif
- } else {
- STp->buffer->aux = NULL; /* this had better never happen! */
- printk(KERN_NOTICE "%s:A: Framesize %d too large for buffer.\n", name, OS_FRAME_SIZE);
- retval = (-EIO);
- goto err_out;
- }
- STp->buffer->writing = 0;
- STp->buffer->syscall_result = 0;
- STp->dirty = 0;
- for (i=0; i < ST_NBR_PARTITIONS; i++) {
- STps = &(STp->ps[i]);
- STps->rw = ST_IDLE;
- }
- STp->ready = ST_READY;
-#if DEBUG
- STp->nbr_waits = STp->nbr_finished = 0;
-#endif
-
- memset (cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = TEST_UNIT_READY;
-
- SRpnt = osst_do_scsi(NULL, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
- if (!SRpnt) {
- retval = (STp->buffer)->syscall_result; /* FIXME - valid? */
- goto err_out;
- }
- if ((SRpnt->sense[0] & 0x70) == 0x70 &&
- (SRpnt->sense[2] & 0x0f) == NOT_READY &&
- SRpnt->sense[12] == 4 ) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Unit not ready, cause %x\n", name, SRpnt->sense[13]);
-#endif
- if (filp->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- goto err_out;
- }
- if (SRpnt->sense[13] == 2) { /* initialize command required (LOAD) */
- memset (cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = START_STOP;
- cmd[1] = 1;
- cmd[4] = 1;
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
- STp->timeout, MAX_RETRIES, 1);
- }
- osst_wait_ready(STp, &SRpnt, (SRpnt->sense[13]==1?15:3) * 60, 0);
- }
- if ((SRpnt->sense[0] & 0x70) == 0x70 &&
- (SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) { /* New media? */
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Unit wants attention\n", name);
-#endif
- STp->header_ok = 0;
-
- for (i=0; i < 10; i++) {
-
- memset (cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = TEST_UNIT_READY;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
- STp->timeout, MAX_RETRIES, 1);
- if ((SRpnt->sense[0] & 0x70) != 0x70 ||
- (SRpnt->sense[2] & 0x0f) != UNIT_ATTENTION)
- break;
- }
-
- STp->pos_unknown = 0;
- STp->partition = STp->new_partition = 0;
- if (STp->can_partitions)
- STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
- for (i=0; i < ST_NBR_PARTITIONS; i++) {
- STps = &(STp->ps[i]);
- STps->rw = ST_IDLE; /* FIXME - seems to be redundant... */
- STps->eof = ST_NOEOF;
- STps->at_sm = 0;
- STps->last_block_valid = 0;
- STps->drv_block = 0;
- STps->drv_file = 0 ;
- }
- new_session = 1;
- STp->recover_count = 0;
- STp->abort_count = 0;
- }
- /*
- * if we have valid headers from before, and the drive/tape seem untouched,
- * open without reconfiguring and re-reading the headers
- */
- if (!STp->buffer->syscall_result && STp->header_ok &&
- !SRpnt->result && SRpnt->sense[0] == 0) {
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = MODE_SENSE;
- cmd[1] = 8;
- cmd[2] = VENDOR_IDENT_PAGE;
- cmd[4] = VENDOR_IDENT_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
-
- if (STp->buffer->syscall_result ||
- STp->buffer->b_data[MODE_HEADER_LENGTH + 2] != 'L' ||
- STp->buffer->b_data[MODE_HEADER_LENGTH + 3] != 'I' ||
- STp->buffer->b_data[MODE_HEADER_LENGTH + 4] != 'N' ||
- STp->buffer->b_data[MODE_HEADER_LENGTH + 5] != '4' ) {
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Signature was changed to %c%c%c%c\n", name,
- STp->buffer->b_data[MODE_HEADER_LENGTH + 2],
- STp->buffer->b_data[MODE_HEADER_LENGTH + 3],
- STp->buffer->b_data[MODE_HEADER_LENGTH + 4],
- STp->buffer->b_data[MODE_HEADER_LENGTH + 5]);
-#endif
- STp->header_ok = 0;
- }
- i = STp->first_frame_position;
- if (STp->header_ok && i == osst_get_frame_position(STp, &SRpnt)) {
- if (STp->door_locked == ST_UNLOCKED) {
- if (do_door_lock(STp, 1))
- printk(KERN_INFO "%s:I: Can't lock drive door\n", name);
- else
- STp->door_locked = ST_LOCKED_AUTO;
- }
- if (!STp->frame_in_buffer) {
- STp->block_size = (STm->default_blksize > 0) ?
- STm->default_blksize : OS_DATA_SIZE;
- STp->buffer->buffer_bytes = STp->buffer->read_pointer = 0;
- }
- STp->buffer->buffer_blocks = OS_DATA_SIZE / STp->block_size;
- STp->fast_open = 1;
- osst_release_request(SRpnt);
- return 0;
- }
-#if DEBUG
- if (i != STp->first_frame_position)
- printk(OSST_DEB_MSG "%s:D: Tape position changed from %d to %d\n",
- name, i, STp->first_frame_position);
-#endif
- STp->header_ok = 0;
- }
- STp->fast_open = 0;
-
- if ((STp->buffer)->syscall_result != 0 && /* in all error conditions except no medium */
- (SRpnt->sense[2] != 2 || SRpnt->sense[12] != 0x3A) ) {
-
- memset(cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = MODE_SELECT;
- cmd[1] = 0x10;
- cmd[4] = 4 + MODE_HEADER_LENGTH;
-
- (STp->buffer)->b_data[0] = cmd[4] - 1;
- (STp->buffer)->b_data[1] = 0; /* Medium Type - ignoring */
- (STp->buffer)->b_data[2] = 0; /* Reserved */
- (STp->buffer)->b_data[3] = 0; /* Block Descriptor Length */
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 0] = 0x3f;
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 1] = 1;
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 2] = 2;
- (STp->buffer)->b_data[MODE_HEADER_LENGTH + 3] = 3;
-
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Applying soft reset\n", name);
-#endif
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
-
- STp->header_ok = 0;
-
- for (i=0; i < 10; i++) {
-
- memset (cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = TEST_UNIT_READY;
-
- SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
- STp->timeout, MAX_RETRIES, 1);
- if ((SRpnt->sense[0] & 0x70) != 0x70 ||
- (SRpnt->sense[2] & 0x0f) == NOT_READY)
- break;
-
- if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) {
- int j;
-
- STp->pos_unknown = 0;
- STp->partition = STp->new_partition = 0;
- if (STp->can_partitions)
- STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
- for (j = 0; j < ST_NBR_PARTITIONS; j++) {
- STps = &(STp->ps[j]);
- STps->rw = ST_IDLE;
- STps->eof = ST_NOEOF;
- STps->at_sm = 0;
- STps->last_block_valid = 0;
- STps->drv_block = 0;
- STps->drv_file = 0 ;
- }
- new_session = 1;
- }
- }
- }
-
- if (osst_wait_ready(STp, &SRpnt, 15 * 60, 0)) /* FIXME - not allowed with NOBLOCK */
- printk(KERN_INFO "%s:I: Device did not become Ready in open\n", name);
-
- if ((STp->buffer)->syscall_result != 0) {
- if ((STp->device)->scsi_level >= SCSI_2 &&
- (SRpnt->sense[0] & 0x70) == 0x70 &&
- (SRpnt->sense[2] & 0x0f) == NOT_READY &&
- SRpnt->sense[12] == 0x3a) { /* Check ASC */
- STp->ready = ST_NO_TAPE;
- } else
- STp->ready = ST_NOT_READY;
- osst_release_request(SRpnt);
- SRpnt = NULL;
- STp->density = 0; /* Clear the erroneous "residue" */
- STp->write_prot = 0;
- STp->block_size = 0;
- STp->ps[0].drv_file = STp->ps[0].drv_block = (-1);
- STp->partition = STp->new_partition = 0;
- STp->door_locked = ST_UNLOCKED;
- return 0;
- }
-
- osst_configure_onstream(STp, &SRpnt);
-
- STp->block_size = STp->raw ? OS_FRAME_SIZE : (
- (STm->default_blksize > 0) ? STm->default_blksize : OS_DATA_SIZE);
- STp->buffer->buffer_blocks = STp->raw ? 1 : OS_DATA_SIZE / STp->block_size;
- STp->buffer->buffer_bytes =
- STp->buffer->read_pointer =
- STp->frame_in_buffer = 0;
-
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Block size: %d, frame size: %d, buffer size: %d (%d blocks).\n",
- name, STp->block_size, OS_FRAME_SIZE, (STp->buffer)->buffer_size,
- (STp->buffer)->buffer_blocks);
-#endif
-
- if (STp->drv_write_prot) {
- STp->write_prot = 1;
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Write protected\n", name);
-#endif
- if ((flags & O_ACCMODE) == O_WRONLY || (flags & O_ACCMODE) == O_RDWR) {
- retval = (-EROFS);
- goto err_out;
- }
- }
-
- if (new_session) { /* Change the drive parameters for the new mode */
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: New Session\n", name);
-#endif
- STp->density_changed = STp->blksize_changed = 0;
- STp->compression_changed = 0;
- }
-
- /*
- * properly position the tape and check the ADR headers
- */
- if (STp->door_locked == ST_UNLOCKED) {
- if (do_door_lock(STp, 1))
- printk(KERN_INFO "%s:I: Can't lock drive door\n", name);
- else
- STp->door_locked = ST_LOCKED_AUTO;
- }
-
- osst_analyze_headers(STp, &SRpnt);
-
- osst_release_request(SRpnt);
- SRpnt = NULL;
-
- return 0;
-
-err_out:
- if (SRpnt != NULL)
- osst_release_request(SRpnt);
- normalize_buffer(STp->buffer);
- STp->header_ok = 0;
- STp->in_use = 0;
- scsi_device_put(STp->device);
-
- return retval;
-}
-
-/* BKL pushdown: spaghetti avoidance wrapper */
-static int os_scsi_tape_open(struct inode * inode, struct file * filp)
-{
- int ret;
-
- mutex_lock(&osst_int_mutex);
- ret = __os_scsi_tape_open(inode, filp);
- mutex_unlock(&osst_int_mutex);
- return ret;
-}
-
-
-
-/* Flush the tape buffer before close */
-static int os_scsi_tape_flush(struct file * filp, fl_owner_t id)
-{
- int result = 0, result2;
- struct osst_tape * STp = filp->private_data;
- struct st_modedef * STm = &(STp->modes[STp->current_mode]);
- struct st_partstat * STps = &(STp->ps[STp->partition]);
- struct osst_request * SRpnt = NULL;
- char * name = tape_name(STp);
-
- if (file_count(filp) > 1)
- return 0;
-
- if ((STps->rw == ST_WRITING || STp->dirty) && !STp->pos_unknown) {
- STp->write_type = OS_WRITE_DATA;
- result = osst_flush_write_buffer(STp, &SRpnt);
- if (result != 0 && result != (-ENOSPC))
- goto out;
- }
- if ( STps->rw >= ST_WRITING && !STp->pos_unknown) {
-
-#if DEBUG
- if (debugging) {
- printk(OSST_DEB_MSG "%s:D: File length %ld bytes.\n",
- name, (long)(filp->f_pos));
- printk(OSST_DEB_MSG "%s:D: Async write waits %d, finished %d.\n",
- name, STp->nbr_waits, STp->nbr_finished);
- }
-#endif
- result = osst_write_trailer(STp, &SRpnt, !(STp->rew_at_close));
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG "%s:D: Buffer flushed, %d EOF(s) written\n",
- name, 1+STp->two_fm);
-#endif
- }
- else if (!STp->rew_at_close) {
- STps = &(STp->ps[STp->partition]);
- if (!STm->sysv || STps->rw != ST_READING) {
- if (STp->can_bsr)
- result = osst_flush_buffer(STp, &SRpnt, 0); /* this is the default path */
- else if (STps->eof == ST_FM_HIT) {
- result = cross_eof(STp, &SRpnt, 0);
- if (result) {
- if (STps->drv_file >= 0)
- STps->drv_file++;
- STps->drv_block = 0;
- STps->eof = ST_FM;
- }
- else
- STps->eof = ST_NOEOF;
- }
- }
- else if ((STps->eof == ST_NOEOF &&
- !(result = cross_eof(STp, &SRpnt, 1))) ||
- STps->eof == ST_FM_HIT) {
- if (STps->drv_file >= 0)
- STps->drv_file++;
- STps->drv_block = 0;
- STps->eof = ST_FM;
- }
- }
-
-out:
- if (STp->rew_at_close) {
- result2 = osst_position_tape_and_confirm(STp, &SRpnt, STp->first_data_ppos);
- STps->drv_file = STps->drv_block = STp->frame_seq_number = STp->logical_blk_num = 0;
- if (result == 0 && result2 < 0)
- result = result2;
- }
- if (SRpnt) osst_release_request(SRpnt);
-
- if (STp->abort_count || STp->recover_count) {
- printk(KERN_INFO "%s:I:", name);
- if (STp->abort_count)
- printk(" %d unrecovered errors", STp->abort_count);
- if (STp->recover_count)
- printk(" %d recovered errors", STp->recover_count);
- if (STp->write_count)
- printk(" in %d frames written", STp->write_count);
- if (STp->read_count)
- printk(" in %d frames read", STp->read_count);
- printk("\n");
- STp->recover_count = 0;
- STp->abort_count = 0;
- }
- STp->write_count = 0;
- STp->read_count = 0;
-
- return result;
-}
-
-
-/* Close the device and release it */
-static int os_scsi_tape_close(struct inode * inode, struct file * filp)
-{
- int result = 0;
- struct osst_tape * STp = filp->private_data;
-
- if (STp->door_locked == ST_LOCKED_AUTO)
- do_door_lock(STp, 0);
-
- if (STp->raw)
- STp->header_ok = 0;
-
- normalize_buffer(STp->buffer);
- write_lock(&os_scsi_tapes_lock);
- STp->in_use = 0;
- write_unlock(&os_scsi_tapes_lock);
-
- scsi_device_put(STp->device);
-
- return result;
-}
-
-
-/* The ioctl command */
-static long osst_ioctl(struct file * file,
- unsigned int cmd_in, unsigned long arg)
-{
- int i, cmd_nr, cmd_type, blk, retval = 0;
- struct st_modedef * STm;
- struct st_partstat * STps;
- struct osst_request * SRpnt = NULL;
- struct osst_tape * STp = file->private_data;
- char * name = tape_name(STp);
- void __user * p = (void __user *)arg;
-
- mutex_lock(&osst_int_mutex);
- if (mutex_lock_interruptible(&STp->lock)) {
- mutex_unlock(&osst_int_mutex);
- return -ERESTARTSYS;
- }
-
-#if DEBUG
- if (debugging && !STp->in_use) {
- printk(OSST_DEB_MSG "%s:D: Incorrect device.\n", name);
- retval = (-EIO);
- goto out;
- }
-#endif
- STm = &(STp->modes[STp->current_mode]);
- STps = &(STp->ps[STp->partition]);
-
- /*
- * If we are in the middle of error recovery, don't let anyone
- * else try and use this device. Also, if error recovery fails, it
- * may try and take the device offline, in which case all further
- * access to the device is prohibited.
- */
- retval = scsi_ioctl_block_when_processing_errors(STp->device, cmd_in,
- file->f_flags & O_NDELAY);
- if (retval)
- goto out;
-
- cmd_type = _IOC_TYPE(cmd_in);
- cmd_nr = _IOC_NR(cmd_in);
-#if DEBUG
- printk(OSST_DEB_MSG "%s:D: Ioctl %d,%d in %s mode\n", name,
- cmd_type, cmd_nr, STp->raw?"raw":"normal");
-#endif
- if (cmd_type == _IOC_TYPE(MTIOCTOP) && cmd_nr == _IOC_NR(MTIOCTOP)) {
- struct mtop mtc;
- int auto_weof = 0;
-
- if (_IOC_SIZE(cmd_in) != sizeof(mtc)) {
- retval = (-EINVAL);
- goto out;
- }
-
- i = copy_from_user((char *) &mtc, p, sizeof(struct mtop));
- if (i) {
- retval = (-EFAULT);
- goto out;
- }
-
- if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) {
- printk(KERN_WARNING "%s:W: MTSETDRVBUFFER only allowed for root.\n", name);
- retval = (-EPERM);
- goto out;
- }
-
- if (!STm->defined && (mtc.mt_op != MTSETDRVBUFFER && (mtc.mt_count & MT_ST_OPTIONS) == 0)) {
- retval = (-ENXIO);
- goto out;
- }
-
- if (!STp->pos_unknown) {
-
- if (STps->eof == ST_FM_HIT) {
- if (mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM|| mtc.mt_op == MTEOM) {
- mtc.mt_count -= 1;
- if (STps->drv_file >= 0)
- STps->drv_file += 1;
- }
- else if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) {
- mtc.mt_count += 1;
- if (STps->drv_file >= 0)
- STps->drv_file += 1;
- }
- }
-
- if (mtc.mt_op == MTSEEK) {
- /* Old position must be restored if partition will be changed */
- i = !STp->can_partitions || (STp->new_partition != STp->partition);
- }
- else {
- i = mtc.mt_op == MTREW || mtc.mt_op == MTOFFL ||
- mtc.mt_op == MTRETEN || mtc.mt_op == MTEOM ||
- mtc.mt_op == MTLOCK || mtc.mt_op == MTLOAD ||
- mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM ||
- mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM ||
- mtc.mt_op == MTCOMPRESSION;
- }
- i = osst_flush_buffer(STp, &SRpnt, i);
- if (i < 0) {
- retval = i;
- goto out;
- }
- }
- else {
- /*
- * If there was a bus reset, block further access
- * to this device. If the user wants to rewind the tape,
- * then reset the flag and allow access again.
- */
- if(mtc.mt_op != MTREW &&
- mtc.mt_op != MTOFFL &&
- mtc.mt_op != MTRETEN &&
- mtc.mt_op != MTERASE &&
- mtc.mt_op != MTSEEK &&
- mtc.mt_op != MTEOM) {
- retval = (-EIO);
- goto out;
- }
- reset_state(STp);
- /* remove this when the midlevel properly clears was_reset */
- STp->device->was_reset = 0;
- }
-
- if (mtc.mt_op != MTCOMPRESSION && mtc.mt_op != MTLOCK &&
- mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK &&
- mtc.mt_op != MTSETDENSITY && mtc.mt_op != MTSETDRVBUFFER &&
- mtc.mt_op != MTMKPART && mtc.mt_op != MTSETPART &&
- mtc.mt_op != MTWEOF && mtc.mt_op != MTWSM ) {
-
- /*
- * The user tells us to move to another position on the tape.
- * If we were appending to the tape content, that would leave
- * the tape without proper end, in that case write EOD and
- * update the header to reflect its position.
- */
-#if DEBUG
- printk(KERN_WARNING "%s:D: auto_weod %s at ffp=%d,efp=%d,fsn=%d,lbn=%d,fn=%d,bn=%d\n", name,
- STps->rw >= ST_WRITING ? "write" : STps->rw == ST_READING ? "read" : "idle",
- STp->first_frame_position, STp->eod_frame_ppos, STp->frame_seq_number,
- STp->logical_blk_num, STps->drv_file, STps->drv_block );
-#endif
- if (STps->rw >= ST_WRITING && STp->first_frame_position >= STp->eod_frame_ppos) {
- auto_weof = ((STp->write_type != OS_WRITE_NEW_MARK) &&
- !(mtc.mt_op == MTREW || mtc.mt_op == MTOFFL));
- i = osst_write_trailer(STp, &SRpnt,
- !(mtc.mt_op == MTREW || mtc.mt_op == MTOFFL));
-#if DEBUG
- printk(KERN_WARNING "%s:D: post trailer xeof=%d,ffp=%d,efp=%d,fsn=%d,lbn=%d,fn=%d,bn=%d\n",
- name, auto_weof, STp->first_frame_position, STp->eod_frame_ppos,
- STp->frame_seq_number, STp->logical_blk_num, STps->drv_file, STps->drv_block );
-#endif
- if (i < 0) {
- retval = i;
- goto out;
- }
- }
- STps->rw = ST_IDLE;
- }
-
- if (mtc.mt_op == MTOFFL && STp->door_locked != ST_UNLOCKED)
- do_door_lock(STp, 0); /* Ignore result! */
-
- if (mtc.mt_op == MTSETDRVBUFFER &&
- (mtc.mt_count & MT_ST_OPTIONS) != 0) {
- retval = osst_set_options(STp, mtc.mt_count);
- goto out;
- }
-
- if (mtc.mt_op == MTSETPART) {
- if (mtc.mt_count >= STp->nbr_partitions)
- retval = -EINVAL;
- else {
- STp->new_partition = mtc.mt_count;
- retval = 0;
- }
- goto out;
- }
-
- if (mtc.mt_op == MTMKPART) {
- if (!STp->can_partitions) {
- retval = (-EINVAL);
- goto out;
- }
- if ((i = osst_int_ioctl(STp, &SRpnt, MTREW, 0)) < 0 /*||
- (i = partition_tape(inode, mtc.mt_count)) < 0*/) {
- retval = i;
- goto out;
- }
- for (i=0; i < ST_NBR_PARTITIONS; i++) {
- STp->ps[i].rw = ST_IDLE;
- STp->ps[i].at_sm = 0;
- STp->ps[i].last_block_valid = 0;
- }
- STp->partition = STp->new_partition = 0;
- STp->nbr_partitions = 1; /* Bad guess ?-) */
- STps->drv_block = STps->drv_file = 0;
- retval = 0;
- goto out;
- }
-
- if (mtc.mt_op == MTSEEK) {
- if (STp->raw)
- i = osst_set_frame_position(STp, &SRpnt, mtc.mt_count, 0);
- else
- i = osst_seek_sector(STp, &SRpnt, mtc.mt_count);
- if (!STp->can_partitions)
- STp->ps[0].rw = ST_IDLE;
- retval = i;
- goto out;
- }
-
- if (mtc.mt_op == MTLOCK || mtc.mt_op == MTUNLOCK) {
- retval = do_door_lock(STp, (mtc.mt_op == MTLOCK));
- goto out;
- }
-
- if (auto_weof)
- cross_eof(STp, &SRpnt, 0);
-
- if (mtc.mt_op == MTCOMPRESSION)
- retval = -EINVAL; /* OnStream drives don't have compression hardware */
- else
- /* MTBSF MTBSFM MTBSR MTBSS MTEOM MTERASE MTFSF MTFSFB MTFSR MTFSS
- * MTLOAD MTOFFL MTRESET MTRETEN MTREW MTUNLOAD MTWEOF MTWSM */
- retval = osst_int_ioctl(STp, &SRpnt, mtc.mt_op, mtc.mt_count);
- goto out;
- }
-
- if (!STm->defined) {
- retval = (-ENXIO);
- goto out;
- }
-
- if ((i = osst_flush_buffer(STp, &SRpnt, 0)) < 0) {
- retval = i;
- goto out;
- }
-
- if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) {
- struct mtget mt_status;
-
- if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) {
- retval = (-EINVAL);
- goto out;
- }
-
- mt_status.mt_type = MT_ISONSTREAM_SC;
- mt_status.mt_erreg = STp->recover_erreg << MT_ST_SOFTERR_SHIFT;
- mt_status.mt_dsreg =
- ((STp->block_size << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK) |
- ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK);
- mt_status.mt_blkno = STps->drv_block;
- mt_status.mt_fileno = STps->drv_file;
- if (STp->block_size != 0) {
- if (STps->rw == ST_WRITING)
- mt_status.mt_blkno += (STp->buffer)->buffer_bytes / STp->block_size;
- else if (STps->rw == ST_READING)
- mt_status.mt_blkno -= ((STp->buffer)->buffer_bytes +
- STp->block_size - 1) / STp->block_size;
- }
-
- mt_status.mt_gstat = 0;
- if (STp->drv_write_prot)
- mt_status.mt_gstat |= GMT_WR_PROT(0xffffffff);
- if (mt_status.mt_blkno == 0) {
- if (mt_status.mt_fileno == 0)
- mt_status.mt_gstat |= GMT_BOT(0xffffffff);
- else
- mt_status.mt_gstat |= GMT_EOF(0xffffffff);
- }
- mt_status.mt_resid = STp->partition;
- if (STps->eof == ST_EOM_OK || STps->eof == ST_EOM_ERROR)
- mt_status.mt_gstat |= GMT_EOT(0xffffffff);
- else if (STps->eof >= ST_EOM_OK)
- mt_status.mt_gstat |= GMT_EOD(0xffffffff);
- if (STp->density == 1)
- mt_status.mt_gstat |= GMT_D_800(0xffffffff);
- else if (STp->density == 2)
- mt_status.mt_gstat |= GMT_D_1600(0xffffffff);
- else if (STp->density == 3)
- mt_status.mt_gstat |= GMT_D_6250(0xffffffff);
- if (STp->ready == ST_READY)
- mt_status.mt_gstat |= GMT_ONLINE(0xffffffff);
- if (STp->ready == ST_NO_TAPE)
- mt_status.mt_gstat |= GMT_DR_OPEN(0xffffffff);
- if (STps->at_sm)
- mt_status.mt_gstat |= GMT_SM(0xffffffff);
- if (STm->do_async_writes || (STm->do_buffer_writes && STp->block_size != 0) ||
- STp->drv_buffer != 0)
- mt_status.mt_gstat |= GMT_IM_REP_EN(0xffffffff);
-
- i = copy_to_user(p, &mt_status, sizeof(struct mtget));
- if (i) {
- retval = (-EFAULT);
- goto out;
- }
-
- STp->recover_erreg = 0; /* Clear after read */
- retval = 0;
- goto out;
- } /* End of MTIOCGET */
-
- if (cmd_type == _IOC_TYPE(MTIOCPOS) && cmd_nr == _IOC_NR(MTIOCPOS)) {
- struct mtpos mt_pos;
-
- if (_IOC_SIZE(cmd_in) != sizeof(struct mtpos)) {
- retval = (-EINVAL);
- goto out;
- }
- if (STp->raw)
- blk = osst_get_frame_position(STp, &SRpnt);
- else
- blk = osst_get_sector(STp, &SRpnt);
- if (blk < 0) {
- retval = blk;
- goto out;
- }
- mt_pos.mt_blkno = blk;
- i = copy_to_user(p, &mt_pos, sizeof(struct mtpos));
- if (i)
- retval = -EFAULT;
- goto out;
- }
- if (SRpnt) osst_release_request(SRpnt);
-
- mutex_unlock(&STp->lock);
-
- retval = scsi_ioctl(STp->device, cmd_in, p);
- mutex_unlock(&osst_int_mutex);
- return retval;
-
-out:
- if (SRpnt) osst_release_request(SRpnt);
-
- mutex_unlock(&STp->lock);
- mutex_unlock(&osst_int_mutex);
-
- return retval;
-}
-
-#ifdef CONFIG_COMPAT
-static long osst_compat_ioctl(struct file * file, unsigned int cmd_in, unsigned long arg)
-{
- struct osst_tape *STp = file->private_data;
- struct scsi_device *sdev = STp->device;
- int ret = -ENOIOCTLCMD;
- if (sdev->host->hostt->compat_ioctl) {
-
- ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
-
- }
- return ret;
-}
-#endif
-
-
-
-/* Memory handling routines */
-
-/* Try to allocate a new tape buffer skeleton. Caller must not hold os_scsi_tapes_lock */
-static struct osst_buffer * new_tape_buffer( int from_initialization, int need_dma, int max_sg )
-{
- int i;
- gfp_t priority;
- struct osst_buffer *tb;
-
- if (from_initialization)
- priority = GFP_ATOMIC;
- else
- priority = GFP_KERNEL;
-
- i = sizeof(struct osst_buffer) + (osst_max_sg_segs - 1) * sizeof(struct scatterlist);
- tb = kzalloc(i, priority);
- if (!tb) {
- printk(KERN_NOTICE "osst :I: Can't allocate new tape buffer.\n");
- return NULL;
- }
-
- tb->sg_segs = tb->orig_sg_segs = 0;
- tb->use_sg = max_sg;
- tb->in_use = 1;
- tb->dma = need_dma;
- tb->buffer_size = 0;
-#if DEBUG
- if (debugging)
- printk(OSST_DEB_MSG
- "osst :D: Allocated tape buffer skeleton (%d bytes, %d segments, dma: %d).\n",
- i, max_sg, need_dma);
-#endif
- return tb;
-}
-
-/* Try to allocate a temporary (while a user has the device open) enlarged tape buffer */
-static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
-{
- int segs, nbr, max_segs, b_size, order, got;
- gfp_t priority;
-
- if (STbuffer->buffer_size >= OS_FRAME_SIZE)
- return 1;
-
- if (STbuffer->sg_segs) {
- printk(KERN_WARNING "osst :A: Buffer not previously normalized.\n");
- normalize_buffer(STbuffer);
- }
- /* See how many segments we can use -- need at least two */
- nbr = max_segs = STbuffer->use_sg;
- if (nbr <= 2)
- return 0;
-
- priority = GFP_KERNEL /* | __GFP_NOWARN */;
- if (need_dma)
- priority |= GFP_DMA;
-
- /* Try to allocate the first segment up to OS_DATA_SIZE and the others
- big enough to reach the goal (code assumes no segments in place) */
- for (b_size = OS_DATA_SIZE, order = OSST_FIRST_ORDER; b_size >= PAGE_SIZE; order--, b_size /= 2) {
- struct page *page = alloc_pages(priority, order);
-
- STbuffer->sg[0].offset = 0;
- if (page != NULL) {
- sg_set_page(&STbuffer->sg[0], page, b_size, 0);
- STbuffer->b_data = page_address(page);
- break;
- }
- }
- if (sg_page(&STbuffer->sg[0]) == NULL) {
- printk(KERN_NOTICE "osst :I: Can't allocate tape buffer main segment.\n");
- return 0;
- }
- /* Got initial segment of 'bsize,order', continue with same size if possible, except for AUX */
- for (segs=STbuffer->sg_segs=1, got=b_size;
- segs < max_segs && got < OS_FRAME_SIZE; ) {
- struct page *page = alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
- STbuffer->sg[segs].offset = 0;
- if (page == NULL) {
- printk(KERN_WARNING "osst :W: Failed to enlarge buffer to %d bytes.\n",
- OS_FRAME_SIZE);
-#if DEBUG
- STbuffer->buffer_size = got;
-#endif
- normalize_buffer(STbuffer);
- return 0;
- }
- sg_set_page(&STbuffer->sg[segs], page, (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size, 0);
- got += STbuffer->sg[segs].length;
- STbuffer->buffer_size = got;
- STbuffer->sg_segs = ++segs;
- }
-#if DEBUG
- if (debugging) {
- printk(OSST_DEB_MSG
- "osst :D: Expanded tape buffer (%d bytes, %d->%d segments, dma: %d, at: %p).\n",
- got, STbuffer->orig_sg_segs, STbuffer->sg_segs, need_dma, STbuffer->b_data);
- printk(OSST_DEB_MSG
- "osst :D: segment sizes: first %d at %p, last %d bytes at %p.\n",
- STbuffer->sg[0].length, page_address(STbuffer->sg[0].page),
- STbuffer->sg[segs-1].length, page_address(STbuffer->sg[segs-1].page));
- }
-#endif
-
- return 1;
-}
-
-
-/* Release the segments */
-static void normalize_buffer(struct osst_buffer *STbuffer)
-{
- int i, order, b_size;
-
- for (i=0; i < STbuffer->sg_segs; i++) {
-
- for (b_size = PAGE_SIZE, order = 0;
- b_size < STbuffer->sg[i].length;
- b_size *= 2, order++);
-
- __free_pages(sg_page(&STbuffer->sg[i]), order);
- STbuffer->buffer_size -= STbuffer->sg[i].length;
- }
-#if DEBUG
- if (debugging && STbuffer->orig_sg_segs < STbuffer->sg_segs)
- printk(OSST_DEB_MSG "osst :D: Buffer at %p normalized to %d bytes (segs %d).\n",
- STbuffer->b_data, STbuffer->buffer_size, STbuffer->sg_segs);
-#endif
- STbuffer->sg_segs = STbuffer->orig_sg_segs = 0;
-}
-
-
-/* Move data from the user buffer to the tape buffer. Returns zero (success) or
- negative error code. */
-static int append_to_buffer(const char __user *ubp, struct osst_buffer *st_bp, int do_count)
-{
- int i, cnt, res, offset;
-
- for (i=0, offset=st_bp->buffer_bytes;
- i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++)
- offset -= st_bp->sg[i].length;
- if (i == st_bp->sg_segs) { /* Should never happen */
- printk(KERN_WARNING "osst :A: Append_to_buffer offset overflow.\n");
- return (-EIO);
- }
- for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
- cnt = st_bp->sg[i].length - offset < do_count ?
- st_bp->sg[i].length - offset : do_count;
- res = copy_from_user(page_address(sg_page(&st_bp->sg[i])) + offset, ubp, cnt);
- if (res)
- return (-EFAULT);
- do_count -= cnt;
- st_bp->buffer_bytes += cnt;
- ubp += cnt;
- offset = 0;
- }
- if (do_count) { /* Should never happen */
- printk(KERN_WARNING "osst :A: Append_to_buffer overflow (left %d).\n",
- do_count);
- return (-EIO);
- }
- return 0;
-}
-
-
-/* Move data from the tape buffer to the user buffer. Returns zero (success) or
- negative error code. */
-static int from_buffer(struct osst_buffer *st_bp, char __user *ubp, int do_count)
-{
- int i, cnt, res, offset;
-
- for (i=0, offset=st_bp->read_pointer;
- i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++)
- offset -= st_bp->sg[i].length;
- if (i == st_bp->sg_segs) { /* Should never happen */
- printk(KERN_WARNING "osst :A: From_buffer offset overflow.\n");
- return (-EIO);
- }
- for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
- cnt = st_bp->sg[i].length - offset < do_count ?
- st_bp->sg[i].length - offset : do_count;
- res = copy_to_user(ubp, page_address(sg_page(&st_bp->sg[i])) + offset, cnt);
- if (res)
- return (-EFAULT);
- do_count -= cnt;
- st_bp->buffer_bytes -= cnt;
- st_bp->read_pointer += cnt;
- ubp += cnt;
- offset = 0;
- }
- if (do_count) { /* Should never happen */
- printk(KERN_WARNING "osst :A: From_buffer overflow (left %d).\n", do_count);
- return (-EIO);
- }
- return 0;
-}
-
-/* Sets the tail of the buffer after fill point to zero.
- Returns zero (success) or negative error code. */
-static int osst_zero_buffer_tail(struct osst_buffer *st_bp)
-{
- int i, offset, do_count, cnt;
-
- for (i = 0, offset = st_bp->buffer_bytes;
- i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++)
- offset -= st_bp->sg[i].length;
- if (i == st_bp->sg_segs) { /* Should never happen */
- printk(KERN_WARNING "osst :A: Zero_buffer offset overflow.\n");
- return (-EIO);
- }
- for (do_count = OS_DATA_SIZE - st_bp->buffer_bytes;
- i < st_bp->sg_segs && do_count > 0; i++) {
- cnt = st_bp->sg[i].length - offset < do_count ?
- st_bp->sg[i].length - offset : do_count ;
- memset(page_address(sg_page(&st_bp->sg[i])) + offset, 0, cnt);
- do_count -= cnt;
- offset = 0;
- }
- if (do_count) { /* Should never happen */
- printk(KERN_WARNING "osst :A: Zero_buffer overflow (left %d).\n", do_count);
- return (-EIO);
- }
- return 0;
-}
-
-/* Copy a osst 32K chunk of memory into the buffer.
- Returns zero (success) or negative error code. */
-static int osst_copy_to_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
-{
- int i, cnt, do_count = OS_DATA_SIZE;
-
- for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
- cnt = st_bp->sg[i].length < do_count ?
- st_bp->sg[i].length : do_count ;
- memcpy(page_address(sg_page(&st_bp->sg[i])), ptr, cnt);
- do_count -= cnt;
- ptr += cnt;
- }
- if (do_count || i != st_bp->sg_segs-1) { /* Should never happen */
- printk(KERN_WARNING "osst :A: Copy_to_buffer overflow (left %d at sg %d).\n",
- do_count, i);
- return (-EIO);
- }
- return 0;
-}
-
-/* Copy a osst 32K chunk of memory from the buffer.
- Returns zero (success) or negative error code. */
-static int osst_copy_from_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
-{
- int i, cnt, do_count = OS_DATA_SIZE;
-
- for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
- cnt = st_bp->sg[i].length < do_count ?
- st_bp->sg[i].length : do_count ;
- memcpy(ptr, page_address(sg_page(&st_bp->sg[i])), cnt);
- do_count -= cnt;
- ptr += cnt;
- }
- if (do_count || i != st_bp->sg_segs-1) { /* Should never happen */
- printk(KERN_WARNING "osst :A: Copy_from_buffer overflow (left %d at sg %d).\n",
- do_count, i);
- return (-EIO);
- }
- return 0;
-}
-
-
-/* Module housekeeping */
-
-static void validate_options (void)
-{
- if (max_dev > 0)
- osst_max_dev = max_dev;
- if (write_threshold_kbs > 0)
- osst_write_threshold = write_threshold_kbs * ST_KILOBYTE;
- if (osst_write_threshold > osst_buffer_size)
- osst_write_threshold = osst_buffer_size;
- if (max_sg_segs >= OSST_FIRST_SG)
- osst_max_sg_segs = max_sg_segs;
-#if DEBUG
- printk(OSST_DEB_MSG "osst :D: max tapes %d, write threshold %d, max s/g segs %d.\n",
- osst_max_dev, osst_write_threshold, osst_max_sg_segs);
-#endif
-}
-
-#ifndef MODULE
-/* Set the boot options. Syntax: osst=xxx,yyy,...
- where xxx is write threshold in 1024 byte blocks,
- and yyy is number of s/g segments to use. */
-static int __init osst_setup (char *str)
-{
- int i, ints[5];
- char *stp;
-
- stp = get_options(str, ARRAY_SIZE(ints), ints);
-
- if (ints[0] > 0) {
- for (i = 0; i < ints[0] && i < ARRAY_SIZE(parms); i++)
- *parms[i].val = ints[i + 1];
- } else {
- while (stp != NULL) {
- for (i = 0; i < ARRAY_SIZE(parms); i++) {
- int len = strlen(parms[i].name);
- if (!strncmp(stp, parms[i].name, len) &&
- (*(stp + len) == ':' || *(stp + len) == '=')) {
- *parms[i].val =
- simple_strtoul(stp + len + 1, NULL, 0);
- break;
- }
- }
- if (i >= ARRAY_SIZE(parms))
- printk(KERN_INFO "osst :I: Illegal parameter in '%s'\n",
- stp);
- stp = strchr(stp, ',');
- if (stp)
- stp++;
- }
- }
-
- return 1;
-}
-
-__setup("osst=", osst_setup);
-
-#endif
-
-static const struct file_operations osst_fops = {
- .owner = THIS_MODULE,
- .read = osst_read,
- .write = osst_write,
- .unlocked_ioctl = osst_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = osst_compat_ioctl,
-#endif
- .open = os_scsi_tape_open,
- .flush = os_scsi_tape_flush,
- .release = os_scsi_tape_close,
- .llseek = noop_llseek,
-};
-
-static int osst_supports(struct scsi_device * SDp)
-{
- struct osst_support_data {
- char *vendor;
- char *model;
- char *rev;
- char *driver_hint; /* Name of the correct driver, NULL if unknown */
- };
-
-static struct osst_support_data support_list[] = {
- /* {"XXX", "Yy-", "", NULL}, example */
- SIGS_FROM_OSST,
- {NULL, }};
-
- struct osst_support_data *rp;
-
- /* We are willing to drive OnStream SC-x0 as well as the
- * * IDE, ParPort, FireWire, USB variants, if accessible by
- * * emulation layer (ide-scsi, usb-storage, ...) */
-
- for (rp=&(support_list[0]); rp->vendor != NULL; rp++)
- if (!strncmp(rp->vendor, SDp->vendor, strlen(rp->vendor)) &&
- !strncmp(rp->model, SDp->model, strlen(rp->model)) &&
- !strncmp(rp->rev, SDp->rev, strlen(rp->rev)))
- return 1;
- return 0;
-}
-
-/*
- * sysfs support for osst driver parameter information
- */
-
-static ssize_t version_show(struct device_driver *ddd, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", osst_version);
-}
-
-static DRIVER_ATTR_RO(version);
-
-static int osst_create_sysfs_files(struct device_driver *sysfs)
-{
- return driver_create_file(sysfs, &driver_attr_version);
-}
-
-static void osst_remove_sysfs_files(struct device_driver *sysfs)
-{
- driver_remove_file(sysfs, &driver_attr_version);
-}
-
-/*
- * sysfs support for accessing ADR header information
- */
-
-static ssize_t osst_adr_rev_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
- ssize_t l = 0;
-
- if (STp && STp->header_ok && STp->linux_media)
- l = snprintf(buf, PAGE_SIZE, "%d.%d\n", STp->header_cache->major_rev, STp->header_cache->minor_rev);
- return l;
-}
-
-DEVICE_ATTR(ADR_rev, S_IRUGO, osst_adr_rev_show, NULL);
-
-static ssize_t osst_linux_media_version_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
- ssize_t l = 0;
-
- if (STp && STp->header_ok && STp->linux_media)
- l = snprintf(buf, PAGE_SIZE, "LIN%d\n", STp->linux_media_version);
- return l;
-}
-
-DEVICE_ATTR(media_version, S_IRUGO, osst_linux_media_version_show, NULL);
-
-static ssize_t osst_capacity_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
- ssize_t l = 0;
-
- if (STp && STp->header_ok && STp->linux_media)
- l = snprintf(buf, PAGE_SIZE, "%d\n", STp->capacity);
- return l;
-}
-
-DEVICE_ATTR(capacity, S_IRUGO, osst_capacity_show, NULL);
-
-static ssize_t osst_first_data_ppos_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
- ssize_t l = 0;
-
- if (STp && STp->header_ok && STp->linux_media)
- l = snprintf(buf, PAGE_SIZE, "%d\n", STp->first_data_ppos);
- return l;
-}
-
-DEVICE_ATTR(BOT_frame, S_IRUGO, osst_first_data_ppos_show, NULL);
-
-static ssize_t osst_eod_frame_ppos_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
- ssize_t l = 0;
-
- if (STp && STp->header_ok && STp->linux_media)
- l = snprintf(buf, PAGE_SIZE, "%d\n", STp->eod_frame_ppos);
- return l;
-}
-
-DEVICE_ATTR(EOD_frame, S_IRUGO, osst_eod_frame_ppos_show, NULL);
-
-static ssize_t osst_filemark_cnt_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
- ssize_t l = 0;
-
- if (STp && STp->header_ok && STp->linux_media)
- l = snprintf(buf, PAGE_SIZE, "%d\n", STp->filemark_cnt);
- return l;
-}
-
-DEVICE_ATTR(file_count, S_IRUGO, osst_filemark_cnt_show, NULL);
-
-static struct class *osst_sysfs_class;
-
-static int osst_sysfs_init(void)
-{
- osst_sysfs_class = class_create(THIS_MODULE, "onstream_tape");
- if (IS_ERR(osst_sysfs_class)) {
- printk(KERN_ERR "osst :W: Unable to register sysfs class\n");
- return PTR_ERR(osst_sysfs_class);
- }
-
- return 0;
-}
-
-static void osst_sysfs_destroy(dev_t dev)
-{
- device_destroy(osst_sysfs_class, dev);
-}
-
-static int osst_sysfs_add(dev_t dev, struct device *device, struct osst_tape * STp, char * name)
-{
- struct device *osst_member;
- int err;
-
- osst_member = device_create(osst_sysfs_class, device, dev, STp,
- "%s", name);
- if (IS_ERR(osst_member)) {
- printk(KERN_WARNING "osst :W: Unable to add sysfs class member %s\n", name);
- return PTR_ERR(osst_member);
- }
-
- err = device_create_file(osst_member, &dev_attr_ADR_rev);
- if (err)
- goto err_out;
- err = device_create_file(osst_member, &dev_attr_media_version);
- if (err)
- goto err_out;
- err = device_create_file(osst_member, &dev_attr_capacity);
- if (err)
- goto err_out;
- err = device_create_file(osst_member, &dev_attr_BOT_frame);
- if (err)
- goto err_out;
- err = device_create_file(osst_member, &dev_attr_EOD_frame);
- if (err)
- goto err_out;
- err = device_create_file(osst_member, &dev_attr_file_count);
- if (err)
- goto err_out;
-
- return 0;
-
-err_out:
- osst_sysfs_destroy(dev);
- return err;
-}
-
-static void osst_sysfs_cleanup(void)
-{
- class_destroy(osst_sysfs_class);
-}
-
-/*
- * osst startup / cleanup code
- */
-
-static int osst_probe(struct device *dev)
-{
- struct scsi_device * SDp = to_scsi_device(dev);
- struct osst_tape * tpnt;
- struct st_modedef * STm;
- struct st_partstat * STps;
- struct osst_buffer * buffer;
- struct gendisk * drive;
- int i, dev_num, err = -ENODEV;
-
- if (SDp->type != TYPE_TAPE || !osst_supports(SDp))
- return -ENODEV;
-
- drive = alloc_disk(1);
- if (!drive) {
- printk(KERN_ERR "osst :E: Out of memory. Device not attached.\n");
- return -ENODEV;
- }
-
- /* if this is the first attach, build the infrastructure */
- write_lock(&os_scsi_tapes_lock);
- if (os_scsi_tapes == NULL) {
- os_scsi_tapes = kmalloc_array(osst_max_dev,
- sizeof(struct osst_tape *),
- GFP_ATOMIC);
- if (os_scsi_tapes == NULL) {
- write_unlock(&os_scsi_tapes_lock);
- printk(KERN_ERR "osst :E: Unable to allocate array for OnStream SCSI tapes.\n");
- goto out_put_disk;
- }
- for (i=0; i < osst_max_dev; ++i) os_scsi_tapes[i] = NULL;
- }
-
- if (osst_nr_dev >= osst_max_dev) {
- write_unlock(&os_scsi_tapes_lock);
- printk(KERN_ERR "osst :E: Too many tape devices (max. %d).\n", osst_max_dev);
- goto out_put_disk;
- }
-
- /* find a free minor number */
- for (i = 0; i < osst_max_dev && os_scsi_tapes[i]; i++)
- ;
- if(i >= osst_max_dev) panic ("Scsi_devices corrupt (osst)");
- dev_num = i;
-
- /* allocate a struct osst_tape for this device */
- tpnt = kzalloc(sizeof(struct osst_tape), GFP_ATOMIC);
- if (!tpnt) {
- write_unlock(&os_scsi_tapes_lock);
- printk(KERN_ERR "osst :E: Can't allocate device descriptor, device not attached.\n");
- goto out_put_disk;
- }
-
- /* allocate a buffer for this device */
- i = SDp->host->sg_tablesize;
- if (osst_max_sg_segs < i)
- i = osst_max_sg_segs;
- buffer = new_tape_buffer(1, SDp->host->unchecked_isa_dma, i);
- if (buffer == NULL) {
- write_unlock(&os_scsi_tapes_lock);
- printk(KERN_ERR "osst :E: Unable to allocate a tape buffer, device not attached.\n");
- kfree(tpnt);
- goto out_put_disk;
- }
- os_scsi_tapes[dev_num] = tpnt;
- tpnt->buffer = buffer;
- tpnt->device = SDp;
- drive->private_data = &tpnt->driver;
- sprintf(drive->disk_name, "osst%d", dev_num);
- tpnt->driver = &osst_template;
- tpnt->drive = drive;
- tpnt->in_use = 0;
- tpnt->capacity = 0xfffff;
- tpnt->dirty = 0;
- tpnt->drv_buffer = 1; /* Try buffering if no mode sense */
- tpnt->restr_dma = (SDp->host)->unchecked_isa_dma;
- tpnt->density = 0;
- tpnt->do_auto_lock = OSST_AUTO_LOCK;
- tpnt->can_bsr = OSST_IN_FILE_POS;
- tpnt->can_partitions = 0;
- tpnt->two_fm = OSST_TWO_FM;
- tpnt->fast_mteom = OSST_FAST_MTEOM;
- tpnt->scsi2_logical = OSST_SCSI2LOGICAL; /* FIXME */
- tpnt->write_threshold = osst_write_threshold;
- tpnt->default_drvbuffer = 0xff; /* No forced buffering */
- tpnt->partition = 0;
- tpnt->new_partition = 0;
- tpnt->nbr_partitions = 0;
- tpnt->min_block = 512;
- tpnt->max_block = OS_DATA_SIZE;
- tpnt->timeout = OSST_TIMEOUT;
- tpnt->long_timeout = OSST_LONG_TIMEOUT;
-
- /* Recognize OnStream tapes */
- /* We don't need to test for OnStream, as this has been done in detect () */
- tpnt->os_fw_rev = osst_parse_firmware_rev (SDp->rev);
- tpnt->omit_blklims = 1;
-
- tpnt->poll = (strncmp(SDp->model, "DI-", 3) == 0) ||
- (strncmp(SDp->model, "FW-", 3) == 0) || OSST_FW_NEED_POLL(tpnt->os_fw_rev,SDp);
- tpnt->frame_in_buffer = 0;
- tpnt->header_ok = 0;
- tpnt->linux_media = 0;
- tpnt->header_cache = NULL;
-
- for (i=0; i < ST_NBR_MODES; i++) {
- STm = &(tpnt->modes[i]);
- STm->defined = 0;
- STm->sysv = OSST_SYSV;
- STm->defaults_for_writes = 0;
- STm->do_async_writes = OSST_ASYNC_WRITES;
- STm->do_buffer_writes = OSST_BUFFER_WRITES;
- STm->do_read_ahead = OSST_READ_AHEAD;
- STm->default_compression = ST_DONT_TOUCH;
- STm->default_blksize = 512;
- STm->default_density = (-1); /* No forced density */
- }
-
- for (i=0; i < ST_NBR_PARTITIONS; i++) {
- STps = &(tpnt->ps[i]);
- STps->rw = ST_IDLE;
- STps->eof = ST_NOEOF;
- STps->at_sm = 0;
- STps->last_block_valid = 0;
- STps->drv_block = (-1);
- STps->drv_file = (-1);
- }
-
- tpnt->current_mode = 0;
- tpnt->modes[0].defined = 1;
- tpnt->modes[2].defined = 1;
- tpnt->density_changed = tpnt->compression_changed = tpnt->blksize_changed = 0;
-
- mutex_init(&tpnt->lock);
- osst_nr_dev++;
- write_unlock(&os_scsi_tapes_lock);
-
- {
- char name[8];
-
- /* Rewind entry */
- err = osst_sysfs_add(MKDEV(OSST_MAJOR, dev_num), dev, tpnt, tape_name(tpnt));
- if (err)
- goto out_free_buffer;
-
- /* No-rewind entry */
- snprintf(name, 8, "%s%s", "n", tape_name(tpnt));
- err = osst_sysfs_add(MKDEV(OSST_MAJOR, dev_num + 128), dev, tpnt, name);
- if (err)
- goto out_free_sysfs1;
- }
-
- sdev_printk(KERN_INFO, SDp,
- "osst :I: Attached OnStream %.5s tape as %s\n",
- SDp->model, tape_name(tpnt));
-
- return 0;
-
-out_free_sysfs1:
- osst_sysfs_destroy(MKDEV(OSST_MAJOR, dev_num));
-out_free_buffer:
- kfree(buffer);
-out_put_disk:
- put_disk(drive);
- return err;
-};
-
-static int osst_remove(struct device *dev)
-{
- struct scsi_device * SDp = to_scsi_device(dev);
- struct osst_tape * tpnt;
- int i;
-
- if ((SDp->type != TYPE_TAPE) || (osst_nr_dev <= 0))
- return 0;
-
- write_lock(&os_scsi_tapes_lock);
- for(i=0; i < osst_max_dev; i++) {
- if((tpnt = os_scsi_tapes[i]) && (tpnt->device == SDp)) {
- osst_sysfs_destroy(MKDEV(OSST_MAJOR, i));
- osst_sysfs_destroy(MKDEV(OSST_MAJOR, i+128));
- tpnt->device = NULL;
- put_disk(tpnt->drive);
- os_scsi_tapes[i] = NULL;
- osst_nr_dev--;
- write_unlock(&os_scsi_tapes_lock);
- vfree(tpnt->header_cache);
- if (tpnt->buffer) {
- normalize_buffer(tpnt->buffer);
- kfree(tpnt->buffer);
- }
- kfree(tpnt);
- return 0;
- }
- }
- write_unlock(&os_scsi_tapes_lock);
- return 0;
-}
-
-static int __init init_osst(void)
-{
- int err;
-
- printk(KERN_INFO "osst :I: Tape driver with OnStream support version %s\nosst :I: %s\n", osst_version, cvsid);
-
- validate_options();
-
- err = osst_sysfs_init();
- if (err)
- return err;
-
- err = register_chrdev(OSST_MAJOR, "osst", &osst_fops);
- if (err < 0) {
- printk(KERN_ERR "osst :E: Unable to register major %d for OnStream tapes\n", OSST_MAJOR);
- goto err_out;
- }
-
- err = scsi_register_driver(&osst_template.gendrv);
- if (err)
- goto err_out_chrdev;
-
- err = osst_create_sysfs_files(&osst_template.gendrv);
- if (err)
- goto err_out_scsidrv;
-
- return 0;
-
-err_out_scsidrv:
- scsi_unregister_driver(&osst_template.gendrv);
-err_out_chrdev:
- unregister_chrdev(OSST_MAJOR, "osst");
-err_out:
- osst_sysfs_cleanup();
- return err;
-}
-
-static void __exit exit_osst (void)
-{
- int i;
- struct osst_tape * STp;
-
- osst_remove_sysfs_files(&osst_template.gendrv);
- scsi_unregister_driver(&osst_template.gendrv);
- unregister_chrdev(OSST_MAJOR, "osst");
- osst_sysfs_cleanup();
-
- if (os_scsi_tapes) {
- for (i=0; i < osst_max_dev; ++i) {
- if (!(STp = os_scsi_tapes[i])) continue;
- /* This is defensive, supposed to happen during detach */
- vfree(STp->header_cache);
- if (STp->buffer) {
- normalize_buffer(STp->buffer);
- kfree(STp->buffer);
- }
- put_disk(STp->drive);
- kfree(STp);
- }
- kfree(os_scsi_tapes);
- }
- printk(KERN_INFO "osst :I: Unloaded.\n");
-}
-
-module_init(init_osst);
-module_exit(exit_osst);
diff --git a/drivers/scsi/osst.h b/drivers/scsi/osst.h
deleted file mode 100644
index b90ae280853d..000000000000
--- a/drivers/scsi/osst.h
+++ /dev/null
@@ -1,651 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * $Header: /cvsroot/osst/Driver/osst.h,v 1.16 2005/01/01 21:13:35 wriede Exp $
- */
-
-#include <asm/byteorder.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-
-/* FIXME - rename and use the following two types or delete them!
- * and the types really should go to st.h anyway...
- * INQUIRY packet command - Data Format (From Table 6-8 of QIC-157C)
- */
-typedef struct {
- unsigned device_type :5; /* Peripheral Device Type */
- unsigned reserved0_765 :3; /* Peripheral Qualifier - Reserved */
- unsigned reserved1_6t0 :7; /* Reserved */
- unsigned rmb :1; /* Removable Medium Bit */
- unsigned ansi_version :3; /* ANSI Version */
- unsigned ecma_version :3; /* ECMA Version */
- unsigned iso_version :2; /* ISO Version */
- unsigned response_format :4; /* Response Data Format */
- unsigned reserved3_45 :2; /* Reserved */
- unsigned reserved3_6 :1; /* TrmIOP - Reserved */
- unsigned reserved3_7 :1; /* AENC - Reserved */
- u8 additional_length; /* Additional Length (total_length-4) */
- u8 rsv5, rsv6, rsv7; /* Reserved */
- u8 vendor_id[8]; /* Vendor Identification */
- u8 product_id[16]; /* Product Identification */
- u8 revision_level[4]; /* Revision Level */
- u8 vendor_specific[20]; /* Vendor Specific - Optional */
- u8 reserved56t95[40]; /* Reserved - Optional */
- /* Additional information may be returned */
-} idetape_inquiry_result_t;
-
-/*
- * READ POSITION packet command - Data Format (From Table 6-57)
- */
-typedef struct {
- unsigned reserved0_10 :2; /* Reserved */
- unsigned bpu :1; /* Block Position Unknown */
- unsigned reserved0_543 :3; /* Reserved */
- unsigned eop :1; /* End Of Partition */
- unsigned bop :1; /* Beginning Of Partition */
- u8 partition; /* Partition Number */
- u8 reserved2, reserved3; /* Reserved */
- u32 first_block; /* First Block Location */
- u32 last_block; /* Last Block Location (Optional) */
- u8 reserved12; /* Reserved */
- u8 blocks_in_buffer[3]; /* Blocks In Buffer - (Optional) */
- u32 bytes_in_buffer; /* Bytes In Buffer (Optional) */
-} idetape_read_position_result_t;
-
-/*
- * Follows structures which are related to the SELECT SENSE / MODE SENSE
- * packet commands.
- */
-#define COMPRESSION_PAGE 0x0f
-#define COMPRESSION_PAGE_LENGTH 16
-
-#define CAPABILITIES_PAGE 0x2a
-#define CAPABILITIES_PAGE_LENGTH 20
-
-#define TAPE_PARAMTR_PAGE 0x2b
-#define TAPE_PARAMTR_PAGE_LENGTH 16
-
-#define NUMBER_RETRIES_PAGE 0x2f
-#define NUMBER_RETRIES_PAGE_LENGTH 4
-
-#define BLOCK_SIZE_PAGE 0x30
-#define BLOCK_SIZE_PAGE_LENGTH 4
-
-#define BUFFER_FILLING_PAGE 0x33
-#define BUFFER_FILLING_PAGE_LENGTH 4
-
-#define VENDOR_IDENT_PAGE 0x36
-#define VENDOR_IDENT_PAGE_LENGTH 8
-
-#define LOCATE_STATUS_PAGE 0x37
-#define LOCATE_STATUS_PAGE_LENGTH 0
-
-#define MODE_HEADER_LENGTH 4
-
-
-/*
- * REQUEST SENSE packet command result - Data Format.
- */
-typedef struct {
- unsigned error_code :7; /* Current of deferred errors */
- unsigned valid :1; /* The information field conforms to QIC-157C */
- u8 reserved1 :8; /* Segment Number - Reserved */
- unsigned sense_key :4; /* Sense Key */
- unsigned reserved2_4 :1; /* Reserved */
- unsigned ili :1; /* Incorrect Length Indicator */
- unsigned eom :1; /* End Of Medium */
- unsigned filemark :1; /* Filemark */
- u32 information __attribute__ ((packed));
- u8 asl; /* Additional sense length (n-7) */
- u32 command_specific; /* Additional command specific information */
- u8 asc; /* Additional Sense Code */
- u8 ascq; /* Additional Sense Code Qualifier */
- u8 replaceable_unit_code; /* Field Replaceable Unit Code */
- unsigned sk_specific1 :7; /* Sense Key Specific */
- unsigned sksv :1; /* Sense Key Specific information is valid */
- u8 sk_specific2; /* Sense Key Specific */
- u8 sk_specific3; /* Sense Key Specific */
- u8 pad[2]; /* Padding to 20 bytes */
-} idetape_request_sense_result_t;
-
-/*
- * Mode Parameter Header for the MODE SENSE packet command
- */
-typedef struct {
- u8 mode_data_length; /* Length of the following data transfer */
- u8 medium_type; /* Medium Type */
- u8 dsp; /* Device Specific Parameter */
- u8 bdl; /* Block Descriptor Length */
-} osst_mode_parameter_header_t;
-
-/*
- * Mode Parameter Block Descriptor the MODE SENSE packet command
- *
- * Support for block descriptors is optional.
- */
-typedef struct {
- u8 density_code; /* Medium density code */
- u8 blocks[3]; /* Number of blocks */
- u8 reserved4; /* Reserved */
- u8 length[3]; /* Block Length */
-} osst_parameter_block_descriptor_t;
-
-/*
- * The Data Compression Page, as returned by the MODE SENSE packet command.
- */
-typedef struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned ps :1;
- unsigned reserved0 :1; /* Reserved */
- unsigned page_code :6; /* Page Code - Should be 0xf */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned page_code :6; /* Page Code - Should be 0xf */
- unsigned reserved0 :1; /* Reserved */
- unsigned ps :1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u8 page_length; /* Page Length - Should be 14 */
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned dce :1; /* Data Compression Enable */
- unsigned dcc :1; /* Data Compression Capable */
- unsigned reserved2 :6; /* Reserved */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned reserved2 :6; /* Reserved */
- unsigned dcc :1; /* Data Compression Capable */
- unsigned dce :1; /* Data Compression Enable */
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned dde :1; /* Data Decompression Enable */
- unsigned red :2; /* Report Exception on Decompression */
- unsigned reserved3 :5; /* Reserved */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned reserved3 :5; /* Reserved */
- unsigned red :2; /* Report Exception on Decompression */
- unsigned dde :1; /* Data Decompression Enable */
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u32 ca; /* Compression Algorithm */
- u32 da; /* Decompression Algorithm */
- u8 reserved[4]; /* Reserved */
-} osst_data_compression_page_t;
-
-/*
- * The Medium Partition Page, as returned by the MODE SENSE packet command.
- */
-typedef struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned ps :1;
- unsigned reserved1_6 :1; /* Reserved */
- unsigned page_code :6; /* Page Code - Should be 0x11 */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned page_code :6; /* Page Code - Should be 0x11 */
- unsigned reserved1_6 :1; /* Reserved */
- unsigned ps :1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u8 page_length; /* Page Length - Should be 6 */
- u8 map; /* Maximum Additional Partitions - Should be 0 */
- u8 apd; /* Additional Partitions Defined - Should be 0 */
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned fdp :1; /* Fixed Data Partitions */
- unsigned sdp :1; /* Should be 0 */
- unsigned idp :1; /* Should be 0 */
- unsigned psum :2; /* Should be 0 */
- unsigned reserved4_012 :3; /* Reserved */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned reserved4_012 :3; /* Reserved */
- unsigned psum :2; /* Should be 0 */
- unsigned idp :1; /* Should be 0 */
- unsigned sdp :1; /* Should be 0 */
- unsigned fdp :1; /* Fixed Data Partitions */
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u8 mfr; /* Medium Format Recognition */
- u8 reserved[2]; /* Reserved */
-} osst_medium_partition_page_t;
-
-/*
- * Capabilities and Mechanical Status Page
- */
-typedef struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned reserved1_67 :2;
- unsigned page_code :6; /* Page code - Should be 0x2a */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned page_code :6; /* Page code - Should be 0x2a */
- unsigned reserved1_67 :2;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u8 page_length; /* Page Length - Should be 0x12 */
- u8 reserved2, reserved3;
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned reserved4_67 :2;
- unsigned sprev :1; /* Supports SPACE in the reverse direction */
- unsigned reserved4_1234 :4;
- unsigned ro :1; /* Read Only Mode */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned ro :1; /* Read Only Mode */
- unsigned reserved4_1234 :4;
- unsigned sprev :1; /* Supports SPACE in the reverse direction */
- unsigned reserved4_67 :2;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned reserved5_67 :2;
- unsigned qfa :1; /* Supports the QFA two partition formats */
- unsigned reserved5_4 :1;
- unsigned efmt :1; /* Supports ERASE command initiated formatting */
- unsigned reserved5_012 :3;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned reserved5_012 :3;
- unsigned efmt :1; /* Supports ERASE command initiated formatting */
- unsigned reserved5_4 :1;
- unsigned qfa :1; /* Supports the QFA two partition formats */
- unsigned reserved5_67 :2;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned cmprs :1; /* Supports data compression */
- unsigned ecc :1; /* Supports error correction */
- unsigned reserved6_45 :2; /* Reserved */
- unsigned eject :1; /* The device can eject the volume */
- unsigned prevent :1; /* The device defaults in the prevent state after power up */
- unsigned locked :1; /* The volume is locked */
- unsigned lock :1; /* Supports locking the volume */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned lock :1; /* Supports locking the volume */
- unsigned locked :1; /* The volume is locked */
- unsigned prevent :1; /* The device defaults in the prevent state after power up */
- unsigned eject :1; /* The device can eject the volume */
- unsigned reserved6_45 :2; /* Reserved */
- unsigned ecc :1; /* Supports error correction */
- unsigned cmprs :1; /* Supports data compression */
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned blk32768 :1; /* slowb - the device restricts the byte count for PIO */
- /* transfers for slow buffer memory ??? */
- /* Also 32768 block size in some cases */
- unsigned reserved7_3_6 :4;
- unsigned blk1024 :1; /* Supports 1024 bytes block size */
- unsigned blk512 :1; /* Supports 512 bytes block size */
- unsigned reserved7_0 :1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned reserved7_0 :1;
- unsigned blk512 :1; /* Supports 512 bytes block size */
- unsigned blk1024 :1; /* Supports 1024 bytes block size */
- unsigned reserved7_3_6 :4;
- unsigned blk32768 :1; /* slowb - the device restricts the byte count for PIO */
- /* transfers for slow buffer memory ??? */
- /* Also 32768 block size in some cases */
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- __be16 max_speed; /* Maximum speed supported in KBps */
- u8 reserved10, reserved11;
- __be16 ctl; /* Continuous Transfer Limit in blocks */
- __be16 speed; /* Current Speed, in KBps */
- __be16 buffer_size; /* Buffer Size, in 512 bytes */
- u8 reserved18, reserved19;
-} osst_capabilities_page_t;
-
-/*
- * Block Size Page
- */
-typedef struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned ps :1;
- unsigned reserved1_6 :1;
- unsigned page_code :6; /* Page code - Should be 0x30 */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned page_code :6; /* Page code - Should be 0x30 */
- unsigned reserved1_6 :1;
- unsigned ps :1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u8 page_length; /* Page Length - Should be 2 */
- u8 reserved2;
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned one :1;
- unsigned reserved2_6 :1;
- unsigned record32_5 :1;
- unsigned record32 :1;
- unsigned reserved2_23 :2;
- unsigned play32_5 :1;
- unsigned play32 :1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned play32 :1;
- unsigned play32_5 :1;
- unsigned reserved2_23 :2;
- unsigned record32 :1;
- unsigned record32_5 :1;
- unsigned reserved2_6 :1;
- unsigned one :1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-} osst_block_size_page_t;
-
-/*
- * Tape Parameters Page
- */
-typedef struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- unsigned ps :1;
- unsigned reserved1_6 :1;
- unsigned page_code :6; /* Page code - Should be 0x2b */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- unsigned page_code :6; /* Page code - Should be 0x2b */
- unsigned reserved1_6 :1;
- unsigned ps :1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
- u8 reserved2;
- u8 density;
- u8 reserved3,reserved4;
- __be16 segtrk;
- __be16 trks;
- u8 reserved5,reserved6,reserved7,reserved8,reserved9,reserved10;
-} osst_tape_paramtr_page_t;
-
-/* OnStream definitions */
-
-#define OS_CONFIG_PARTITION (0xff)
-#define OS_DATA_PARTITION (0)
-#define OS_PARTITION_VERSION (1)
-
-/*
- * partition
- */
-typedef struct os_partition_s {
- __u8 partition_num;
- __u8 par_desc_ver;
- __be16 wrt_pass_cntr;
- __be32 first_frame_ppos;
- __be32 last_frame_ppos;
- __be32 eod_frame_ppos;
-} os_partition_t;
-
-/*
- * DAT entry
- */
-typedef struct os_dat_entry_s {
- __be32 blk_sz;
- __be16 blk_cnt;
- __u8 flags;
- __u8 reserved;
-} os_dat_entry_t;
-
-/*
- * DAT
- */
-#define OS_DAT_FLAGS_DATA (0xc)
-#define OS_DAT_FLAGS_MARK (0x1)
-
-typedef struct os_dat_s {
- __u8 dat_sz;
- __u8 reserved1;
- __u8 entry_cnt;
- __u8 reserved3;
- os_dat_entry_t dat_list[16];
-} os_dat_t;
-
-/*
- * Frame types
- */
-#define OS_FRAME_TYPE_FILL (0)
-#define OS_FRAME_TYPE_EOD (1 << 0)
-#define OS_FRAME_TYPE_MARKER (1 << 1)
-#define OS_FRAME_TYPE_HEADER (1 << 3)
-#define OS_FRAME_TYPE_DATA (1 << 7)
-
-/*
- * AUX
- */
-typedef struct os_aux_s {
- __be32 format_id; /* hardware compatibility AUX is based on */
- char application_sig[4]; /* driver used to write this media */
- __be32 hdwr; /* reserved */
- __be32 update_frame_cntr; /* for configuration frame */
- __u8 frame_type;
- __u8 frame_type_reserved;
- __u8 reserved_18_19[2];
- os_partition_t partition;
- __u8 reserved_36_43[8];
- __be32 frame_seq_num;
- __be32 logical_blk_num_high;
- __be32 logical_blk_num;
- os_dat_t dat;
- __u8 reserved188_191[4];
- __be32 filemark_cnt;
- __be32 phys_fm;
- __be32 last_mark_ppos;
- __u8 reserved204_223[20];
-
- /*
- * __u8 app_specific[32];
- *
- * Linux specific fields:
- */
- __be32 next_mark_ppos; /* when known, points to next marker */
- __be32 last_mark_lbn; /* storing log_blk_num of last mark is extends ADR spec */
- __u8 linux_specific[24];
-
- __u8 reserved_256_511[256];
-} os_aux_t;
-
-#define OS_FM_TAB_MAX 1024
-
-typedef struct os_fm_tab_s {
- __u8 fm_part_num;
- __u8 reserved_1;
- __u8 fm_tab_ent_sz;
- __u8 reserved_3;
- __be16 fm_tab_ent_cnt;
- __u8 reserved6_15[10];
- __be32 fm_tab_ent[OS_FM_TAB_MAX];
-} os_fm_tab_t;
-
-typedef struct os_ext_trk_ey_s {
- __u8 et_part_num;
- __u8 fmt;
- __be16 fm_tab_off;
- __u8 reserved4_7[4];
- __be32 last_hlb_hi;
- __be32 last_hlb;
- __be32 last_pp;
- __u8 reserved20_31[12];
-} os_ext_trk_ey_t;
-
-typedef struct os_ext_trk_tb_s {
- __u8 nr_stream_part;
- __u8 reserved_1;
- __u8 et_ent_sz;
- __u8 reserved3_15[13];
- os_ext_trk_ey_t dat_ext_trk_ey;
- os_ext_trk_ey_t qfa_ext_trk_ey;
-} os_ext_trk_tb_t;
-
-typedef struct os_header_s {
- char ident_str[8];
- __u8 major_rev;
- __u8 minor_rev;
- __be16 ext_trk_tb_off;
- __u8 reserved12_15[4];
- __u8 pt_par_num;
- __u8 pt_reserved1_3[3];
- os_partition_t partition[16];
- __be32 cfg_col_width;
- __be32 dat_col_width;
- __be32 qfa_col_width;
- __u8 cartridge[16];
- __u8 reserved304_511[208];
- __be32 old_filemark_list[16680/4]; /* in ADR 1.4 __u8 track_table[16680] */
- os_ext_trk_tb_t ext_track_tb;
- __u8 reserved17272_17735[464];
- os_fm_tab_t dat_fm_tab;
- os_fm_tab_t qfa_fm_tab;
- __u8 reserved25960_32767[6808];
-} os_header_t;
-
-
-/*
- * OnStream ADRL frame
- */
-#define OS_FRAME_SIZE (32 * 1024 + 512)
-#define OS_DATA_SIZE (32 * 1024)
-#define OS_AUX_SIZE (512)
-//#define OSST_MAX_SG 2
-
-/* The OnStream tape buffer descriptor. */
-struct osst_buffer {
- unsigned char in_use;
- unsigned char dma; /* DMA-able buffer */
- int buffer_size;
- int buffer_blocks;
- int buffer_bytes;
- int read_pointer;
- int writing;
- int midlevel_result;
- int syscall_result;
- struct osst_request *last_SRpnt;
- struct st_cmdstatus cmdstat;
- struct rq_map_data map_data;
- unsigned char *b_data;
- os_aux_t *aux; /* onstream AUX structure at end of each block */
- unsigned short use_sg; /* zero or number of s/g segments for this adapter */
- unsigned short sg_segs; /* number of segments in s/g list */
- unsigned short orig_sg_segs; /* number of segments allocated at first try */
- struct scatterlist sg[1]; /* MUST BE last item */
-} ;
-
-/* The OnStream tape drive descriptor */
-struct osst_tape {
- struct scsi_driver *driver;
- unsigned capacity;
- struct scsi_device *device;
- struct mutex lock; /* for serialization */
- struct completion wait; /* for SCSI commands */
- struct osst_buffer * buffer;
-
- /* Drive characteristics */
- unsigned char omit_blklims;
- unsigned char do_auto_lock;
- unsigned char can_bsr;
- unsigned char can_partitions;
- unsigned char two_fm;
- unsigned char fast_mteom;
- unsigned char restr_dma;
- unsigned char scsi2_logical;
- unsigned char default_drvbuffer; /* 0xff = don't touch, value 3 bits */
- unsigned char pos_unknown; /* after reset position unknown */
- int write_threshold;
- int timeout; /* timeout for normal commands */
- int long_timeout; /* timeout for commands known to take long time*/
-
- /* Mode characteristics */
- struct st_modedef modes[ST_NBR_MODES];
- int current_mode;
-
- /* Status variables */
- int partition;
- int new_partition;
- int nbr_partitions; /* zero until partition support enabled */
- struct st_partstat ps[ST_NBR_PARTITIONS];
- unsigned char dirty;
- unsigned char ready;
- unsigned char write_prot;
- unsigned char drv_write_prot;
- unsigned char in_use;
- unsigned char blksize_changed;
- unsigned char density_changed;
- unsigned char compression_changed;
- unsigned char drv_buffer;
- unsigned char density;
- unsigned char door_locked;
- unsigned char rew_at_close;
- unsigned char inited;
- int block_size;
- int min_block;
- int max_block;
- int recover_count; /* from tape opening */
- int abort_count;
- int write_count;
- int read_count;
- int recover_erreg; /* from last status call */
- /*
- * OnStream specific data
- */
- int os_fw_rev; /* the firmware revision * 10000 */
- unsigned char raw; /* flag OnStream raw access (32.5KB block size) */
- unsigned char poll; /* flag that this drive needs polling (IDE|firmware) */
- unsigned char frame_in_buffer; /* flag that the frame as per frame_seq_number
- * has been read into STp->buffer and is valid */
- int frame_seq_number; /* logical frame number */
- int logical_blk_num; /* logical block number */
- unsigned first_frame_position; /* physical frame to be transferred to/from host */
- unsigned last_frame_position; /* physical frame to be transferd to/from tape */
- int cur_frames; /* current number of frames in internal buffer */
- int max_frames; /* max number of frames in internal buffer */
- char application_sig[5]; /* application signature */
- unsigned char fast_open; /* flag that reminds us we didn't check headers at open */
- unsigned short wrt_pass_cntr; /* write pass counter */
- int update_frame_cntr; /* update frame counter */
- int onstream_write_error; /* write error recovery active */
- int header_ok; /* header frame verified ok */
- int linux_media; /* reading linux-specifc media */
- int linux_media_version;
- os_header_t * header_cache; /* cache is kept for filemark positions */
- int filemark_cnt;
- int first_mark_ppos;
- int last_mark_ppos;
- int last_mark_lbn; /* storing log_blk_num of last mark is extends ADR spec */
- int first_data_ppos;
- int eod_frame_ppos;
- int eod_frame_lfa;
- int write_type; /* used in write error recovery */
- int read_error_frame; /* used in read error recovery */
- unsigned long cmd_start_time;
- unsigned long max_cmd_time;
-
-#if DEBUG
- unsigned char write_pending;
- int nbr_finished;
- int nbr_waits;
- unsigned char last_cmnd[6];
- unsigned char last_sense[16];
-#endif
- struct gendisk *drive;
-} ;
-
-/* scsi tape command */
-struct osst_request {
- unsigned char cmd[MAX_COMMAND_SIZE];
- unsigned char sense[SCSI_SENSE_BUFFERSIZE];
- int result;
- struct osst_tape *stp;
- struct completion *waiting;
- struct bio *bio;
-};
-
-/* Values of write_type */
-#define OS_WRITE_DATA 0
-#define OS_WRITE_EOD 1
-#define OS_WRITE_NEW_MARK 2
-#define OS_WRITE_LAST_MARK 3
-#define OS_WRITE_HEADER 4
-#define OS_WRITE_FILLER 5
-
-/* Additional rw state */
-#define OS_WRITING_COMPLETE 3
diff --git a/drivers/scsi/osst_detect.h b/drivers/scsi/osst_detect.h
deleted file mode 100644
index 83c1d4fb11db..000000000000
--- a/drivers/scsi/osst_detect.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#define SIGS_FROM_OSST \
- {"OnStream", "SC-", "", "osst"}, \
- {"OnStream", "DI-", "", "osst"}, \
- {"OnStream", "DP-", "", "osst"}, \
- {"OnStream", "FW-", "", "osst"}, \
- {"OnStream", "USB", "", "osst"}
diff --git a/drivers/scsi/osst_options.h b/drivers/scsi/osst_options.h
deleted file mode 100644
index a6a389b88876..000000000000
--- a/drivers/scsi/osst_options.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- The compile-time configurable defaults for the Linux SCSI tape driver.
-
- Copyright 1995 Kai Makisara.
-
- Last modified: Wed Sep 2 21:24:07 1998 by root@home
-
- Changed (and renamed) for OnStream SCSI drives garloff@suse.de
- 2000-06-21
-
- $Header: /cvsroot/osst/Driver/osst_options.h,v 1.6 2003/12/23 14:22:12 wriede Exp $
-*/
-
-#ifndef _OSST_OPTIONS_H
-#define _OSST_OPTIONS_H
-
-/* The minimum limit for the number of SCSI tape devices is determined by
- OSST_MAX_TAPES. If the number of tape devices and the "slack" defined by
- OSST_EXTRA_DEVS exceeds OSST_MAX_TAPES, the large number is used. */
-#define OSST_MAX_TAPES 4
-
-/* If OSST_IN_FILE_POS is nonzero, the driver positions the tape after the
- record been read by the user program even if the tape has moved further
- because of buffered reads. Should be set to zero to support also drives
- that can't space backwards over records. NOTE: The tape will be
- spaced backwards over an "accidentally" crossed filemark in any case. */
-#define OSST_IN_FILE_POS 1
-
-/* The tape driver buffer size in kilobytes. */
-/* Don't change, as this is the HW blocksize */
-#define OSST_BUFFER_BLOCKS 32
-
-/* The number of kilobytes of data in the buffer that triggers an
- asynchronous write in fixed block mode. See also OSST_ASYNC_WRITES
- below. */
-#define OSST_WRITE_THRESHOLD_BLOCKS 32
-
-/* OSST_EOM_RESERVE defines the number of frames are kept in reserve for
- * * write error recovery when writing near end of medium. ENOSPC is returned
- * * when write() is called and the tape write position is within this number
- * * of blocks from the tape capacity. */
-#define OSST_EOM_RESERVE 300
-
-/* The maximum number of tape buffers the driver allocates. The number
- is also constrained by the number of drives detected. Determines the
- maximum number of concurrently active tape drives. */
-#define OSST_MAX_BUFFERS OSST_MAX_TAPES
-
-/* Maximum number of scatter/gather segments */
-/* Fit one buffer in pages and add one for the AUX header */
-#define OSST_MAX_SG (((OSST_BUFFER_BLOCKS*1024) / PAGE_SIZE) + 1)
-
-/* The number of scatter/gather segments to allocate at first try (must be
- smaller or equal to the maximum). */
-#define OSST_FIRST_SG ((OSST_BUFFER_BLOCKS*1024) / PAGE_SIZE)
-
-/* The size of the first scatter/gather segments (determines the maximum block
- size for SCSI adapters not supporting scatter/gather). The default is set
- to try to allocate the buffer as one chunk. */
-#define OSST_FIRST_ORDER (15-PAGE_SHIFT)
-
-
-/* The following lines define defaults for properties that can be set
- separately for each drive using the MTSTOPTIONS ioctl. */
-
-/* If OSST_TWO_FM is non-zero, the driver writes two filemarks after a
- file being written. Some drives can't handle two filemarks at the
- end of data. */
-#define OSST_TWO_FM 0
-
-/* If OSST_BUFFER_WRITES is non-zero, writes in fixed block mode are
- buffered until the driver buffer is full or asynchronous write is
- triggered. */
-#define OSST_BUFFER_WRITES 1
-
-/* If OSST_ASYNC_WRITES is non-zero, the SCSI write command may be started
- without waiting for it to finish. May cause problems in multiple
- tape backups. */
-#define OSST_ASYNC_WRITES 1
-
-/* If OSST_READ_AHEAD is non-zero, blocks are read ahead in fixed block
- mode. */
-#define OSST_READ_AHEAD 1
-
-/* If OSST_AUTO_LOCK is non-zero, the drive door is locked at the first
- read or write command after the device is opened. The door is opened
- when the device is closed. */
-#define OSST_AUTO_LOCK 0
-
-/* If OSST_FAST_MTEOM is non-zero, the MTEOM ioctl is done using the
- direct SCSI command. The file number status is lost but this method
- is fast with some drives. Otherwise MTEOM is done by spacing over
- files and the file number status is retained. */
-#define OSST_FAST_MTEOM 0
-
-/* If OSST_SCSI2LOGICAL is nonzero, the logical block addresses are used for
- MTIOCPOS and MTSEEK by default. Vendor addresses are used if OSST_SCSI2LOGICAL
- is zero. */
-#define OSST_SCSI2LOGICAL 0
-
-/* If OSST_SYSV is non-zero, the tape behaves according to the SYS V semantics.
- The default is BSD semantics. */
-#define OSST_SYSV 0
-
-
-#endif
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index c544f48a1d18..2368f34efba3 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -20,6 +20,16 @@ config PCMCIA_AHA152X
To compile this driver as a module, choose M here: the
module will be called aha152x_cs.
+config PCMCIA_FDOMAIN
+ tristate "Future Domain PCMCIA support"
+ select SCSI_FDOMAIN
+ help
+ Say Y here if you intend to attach this type of PCMCIA SCSI host
+ adapter to your computer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fdomain_cs.
+
config PCMCIA_NINJA_SCSI
tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support"
depends on !64BIT
diff --git a/drivers/scsi/pcmcia/Makefile b/drivers/scsi/pcmcia/Makefile
index a5a24dd44e7e..02f5b44a2685 100644
--- a/drivers/scsi/pcmcia/Makefile
+++ b/drivers/scsi/pcmcia/Makefile
@@ -4,6 +4,7 @@ ccflags-y := -I $(srctree)/drivers/scsi
# 16-bit client drivers
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogic_cs.o
+obj-$(CONFIG_PCMCIA_FDOMAIN) += fdomain_cs.o
obj-$(CONFIG_PCMCIA_AHA152X) += aha152x_cs.o
obj-$(CONFIG_PCMCIA_NINJA_SCSI) += nsp_cs.o
obj-$(CONFIG_PCMCIA_SYM53C500) += sym53c500_cs.o
diff --git a/drivers/scsi/pcmcia/fdomain_cs.c b/drivers/scsi/pcmcia/fdomain_cs.c
new file mode 100644
index 000000000000..e42acf314d06
--- /dev/null
+++ b/drivers/scsi/pcmcia/fdomain_cs.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/*
+ * Driver for Future Domain-compatible PCMCIA SCSI cards
+ * Copyright 2019 Ondrej Zary
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <scsi/scsi_host.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include "fdomain.h"
+
+MODULE_AUTHOR("Ondrej Zary, David Hinds");
+MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver");
+MODULE_LICENSE("Dual MPL/GPL");
+
+static int fdomain_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ p_dev->io_lines = 10;
+ p_dev->resource[0]->end = FDOMAIN_REGION_SIZE;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ return pcmcia_request_io(p_dev);
+}
+
+static int fdomain_probe(struct pcmcia_device *link)
+{
+ int ret;
+ struct Scsi_Host *sh;
+
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+ link->config_regs = PRESENT_OPTION;
+
+ ret = pcmcia_loop_config(link, fdomain_config_check, NULL);
+ if (ret)
+ return ret;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto fail_disable;
+
+ if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE,
+ "fdomain_cs"))
+ goto fail_disable;
+
+ sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev);
+ if (!sh) {
+ dev_err(&link->dev, "Controller initialization failed");
+ ret = -ENODEV;
+ goto fail_release;
+ }
+
+ link->priv = sh;
+
+ return 0;
+
+fail_release:
+ release_region(link->resource[0]->start, FDOMAIN_REGION_SIZE);
+fail_disable:
+ pcmcia_disable_device(link);
+ return ret;
+}
+
+static void fdomain_remove(struct pcmcia_device *link)
+{
+ fdomain_destroy(link->priv);
+ release_region(link->resource[0]->start, FDOMAIN_REGION_SIZE);
+ pcmcia_disable_device(link);
+}
+
+static const struct pcmcia_device_id fdomain_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "SCSI PCMCIA Card", 0xe3736c88,
+ 0x859cad20),
+ PCMCIA_DEVICE_PROD_ID1("SCSI PCMCIA Adapter Card", 0x8dacb57e),
+ PCMCIA_DEVICE_PROD_ID12(" SIMPLE TECHNOLOGY Corporation",
+ "SCSI PCMCIA Credit Card Controller",
+ 0x182bdafe, 0xc80d106f),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, fdomain_ids);
+
+static struct pcmcia_driver fdomain_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "fdomain_cs",
+ .probe = fdomain_probe,
+ .remove = fdomain_remove,
+ .id_table = fdomain_ids,
+};
+
+module_pcmcia_driver(fdomain_cs_driver);
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index a81748e6e8fb..97416e1dcc5b 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -789,7 +789,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
SCpnt->SCp.buffers_residual != 0 ) {
//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out);
SCpnt->SCp.buffers_residual--;
- SCpnt->SCp.buffer++;
+ SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
SCpnt->SCp.ptr = BUFFER_ADDR;
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
time_out = 1000;
@@ -887,7 +887,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
SCpnt->SCp.buffers_residual != 0 ) {
//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next");
SCpnt->SCp.buffers_residual--;
- SCpnt->SCp.buffer++;
+ SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
SCpnt->SCp.ptr = BUFFER_ADDR;
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
time_out = 1000;
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index d193961ea82f..6b85016b4db3 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -462,6 +462,24 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
}
static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
/**
+ * event_log_size_show - event log size
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs read shost attribute.
+ */
+static ssize_t event_log_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
+}
+static DEVICE_ATTR_RO(event_log_size);
+/**
* pm8001_ctl_aap_log_show - IOP event log
* @cdev: pointer to embedded class device
* @buf: the buffer returned
@@ -474,25 +492,26 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
-#define IOP_MEMMAP(r, c) \
- (*(u32 *)((u8*)pm8001_ha->memoryMap.region[IOP].virt_ptr + (r) * 32 \
- + (c)))
- int i;
char *str = buf;
- int max = 2;
- for (i = 0; i < max; i++) {
- str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
- "0x%08x 0x%08x\n",
- IOP_MEMMAP(i, 0),
- IOP_MEMMAP(i, 4),
- IOP_MEMMAP(i, 8),
- IOP_MEMMAP(i, 12),
- IOP_MEMMAP(i, 16),
- IOP_MEMMAP(i, 20),
- IOP_MEMMAP(i, 24),
- IOP_MEMMAP(i, 28));
+ u32 read_size =
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size / 1024;
+ static u32 start, end, count;
+ u32 max_read_times = 32;
+ u32 max_count = (read_size * 1024) / (max_read_times * 4);
+ u32 *temp = (u32 *)pm8001_ha->memoryMap.region[IOP].virt_ptr;
+
+ if ((count % max_count) == 0) {
+ start = 0;
+ end = max_read_times;
+ count = 0;
+ } else {
+ start = end;
+ end = end + max_read_times;
}
+ for (; start < end; start++)
+ str += sprintf(str, "%08x ", *(temp+start));
+ count++;
return str - buf;
}
static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
@@ -796,6 +815,7 @@ struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_max_sg_list,
&dev_attr_sas_spec_support,
&dev_attr_logging_level,
+ &dev_attr_event_log_size,
&dev_attr_host_sas_address,
&dev_attr_bios_version,
&dev_attr_ib_log,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 109effd3557d..68a8217032d0 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2356,7 +2356,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
if (!((t->dev->parent) &&
- (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+ (dev_is_expander(t->dev->parent->dev_type)))) {
for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++)
@@ -4560,7 +4560,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
- if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ if (parent_dev && dev_is_expander(parent_dev->dev_type))
phy_id = parent_dev->ex_dev.ex_phy->phy_id;
else
phy_id = pm8001_dev->attached_phy;
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 88eef3b18e41..dd38c356a1a4 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -634,7 +634,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
dev->lldd_dev = pm8001_device;
pm8001_device->dev_type = dev->dev_type;
pm8001_device->dcompletion = &completion;
- if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+ if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
int phy_id;
struct ex_phy *phy;
for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
@@ -1181,7 +1181,7 @@ int pm8001_query_task(struct sas_task *task)
return rc;
}
-/* mandatory SAM-3, still need free task/ccb info, abord the specified task */
+/* mandatory SAM-3, still need free task/ccb info, abort the specified task */
int pm8001_abort_task(struct sas_task *task)
{
unsigned long flags;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index ac6d8e3f22de..ff17c6aff63d 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -103,7 +103,6 @@ do { \
#define PM8001_READ_VPD
-#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
#define IS_SPCV_12G(dev) ((dev->device == 0X8074) \
|| (dev->device == 0X8076) \
|| (dev->device == 0X8077) \
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 301de40eb708..1128d86d241a 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2066,7 +2066,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
if (!((t->dev->parent) &&
- (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+ (dev_is_expander(t->dev->parent->dev_type)))) {
for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++)
@@ -4561,7 +4561,7 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
- if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ if (parent_dev && dev_is_expander(parent_dev->dev_type))
phy_id = parent_dev->ex_dev.ex_phy->phy_id;
else
phy_id = pm8001_dev->attached_phy;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index ca22526aff7f..71ff3936da4f 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3255,7 +3255,7 @@ static int pmcraid_copy_sglist(
int direction
)
{
- struct scatterlist *scatterlist;
+ struct scatterlist *sg;
void *kaddr;
int bsize_elem;
int i;
@@ -3264,10 +3264,10 @@ static int pmcraid_copy_sglist(
/* Determine the actual number of bytes per element */
bsize_elem = PAGE_SIZE * (1 << sglist->order);
- scatterlist = sglist->scatterlist;
+ sg = sglist->scatterlist;
- for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
- struct page *page = sg_page(&scatterlist[i]);
+ for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
+ struct page *page = sg_page(sg);
kaddr = kmap(page);
if (direction == DMA_TO_DEVICE)
@@ -3282,11 +3282,11 @@ static int pmcraid_copy_sglist(
return -EFAULT;
}
- scatterlist[i].length = bsize_elem;
+ sg->length = bsize_elem;
}
if (len % bsize_elem) {
- struct page *page = sg_page(&scatterlist[i]);
+ struct page *page = sg_page(sg);
kaddr = kmap(page);
@@ -3297,7 +3297,7 @@ static int pmcraid_copy_sglist(
kunmap(page);
- scatterlist[i].length = len % bsize_elem;
+ sg->length = len % bsize_elem;
}
if (rc) {
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 35213082e933..a406cc825426 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -590,7 +590,7 @@ static int ppa_completion(struct scsi_cmnd *cmd)
if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
/* if scatter/gather, advance to the next segment */
if (cmd->SCp.buffers_residual--) {
- cmd->SCp.buffer++;
+ cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
cmd->SCp.this_residual =
cmd->SCp.buffer->length;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 6ef0f741bf89..a42babde036d 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2215,16 +2215,21 @@ static void qedf_simd_int_handler(void *cookie)
static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
{
int i;
+ u16 vector_idx = 0;
+ u32 vector;
if (qedf->int_info.msix_cnt) {
for (i = 0; i < qedf->int_info.used_cnt; i++) {
- synchronize_irq(qedf->int_info.msix[i].vector);
- irq_set_affinity_hint(qedf->int_info.msix[i].vector,
- NULL);
- irq_set_affinity_notifier(qedf->int_info.msix[i].vector,
- NULL);
- free_irq(qedf->int_info.msix[i].vector,
- &qedf->fp_array[i]);
+ vector_idx = i * qedf->dev_info.common.num_hwfns +
+ qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Freeing IRQ #%d vector_idx=%d.\n",
+ i, vector_idx);
+ vector = qedf->int_info.msix[vector_idx].vector;
+ synchronize_irq(vector);
+ irq_set_affinity_hint(vector, NULL);
+ irq_set_affinity_notifier(vector, NULL);
+ free_irq(vector, &qedf->fp_array[i]);
}
} else
qed_ops->common->simd_handler_clean(qedf->cdev,
@@ -2237,11 +2242,19 @@ static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
static int qedf_request_msix_irq(struct qedf_ctx *qedf)
{
int i, rc, cpu;
+ u16 vector_idx = 0;
+ u32 vector;
cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < qedf->num_queues; i++) {
- rc = request_irq(qedf->int_info.msix[i].vector,
- qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]);
+ vector_idx = i * qedf->dev_info.common.num_hwfns +
+ qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Requesting IRQ #%d vector_idx=%d.\n",
+ i, vector_idx);
+ vector = qedf->int_info.msix[vector_idx].vector;
+ rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
+ &qedf->fp_array[i]);
if (rc) {
QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
@@ -2250,8 +2263,7 @@ static int qedf_request_msix_irq(struct qedf_ctx *qedf)
}
qedf->int_info.used_cnt++;
- rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector,
- get_cpu_mask(cpu));
+ rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
cpu = cpumask_next(cpu, cpu_online_mask);
}
@@ -3208,6 +3220,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
goto err1;
}
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
+ qedf->dev_info.common.num_hwfns,
+ qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
+
/* queue allocation code should come here
* order should be
* slowpath_start
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index f210a3e0c9b1..acb930b8c6a6 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1313,13 +1313,20 @@ static void qedi_simd_int_handler(void *cookie)
static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
{
int i;
+ u16 idx;
if (qedi->int_info.msix_cnt) {
for (i = 0; i < qedi->int_info.used_cnt; i++) {
- synchronize_irq(qedi->int_info.msix[i].vector);
- irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+ idx = i * qedi->dev_info.common.num_hwfns +
+ qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Freeing IRQ #%d vector_idx=%d.\n", i, idx);
+
+ synchronize_irq(qedi->int_info.msix[idx].vector);
+ irq_set_affinity_hint(qedi->int_info.msix[idx].vector,
NULL);
- free_irq(qedi->int_info.msix[i].vector,
+ free_irq(qedi->int_info.msix[idx].vector,
&qedi->fp_array[i]);
}
} else {
@@ -1334,20 +1341,28 @@ static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
static int qedi_request_msix_irq(struct qedi_ctx *qedi)
{
int i, rc, cpu;
+ u16 idx;
cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < qedi->int_info.msix_cnt; i++) {
- rc = request_irq(qedi->int_info.msix[i].vector,
+ for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+ idx = i * qedi->dev_info.common.num_hwfns +
+ qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
+ qedi->dev_info.common.num_hwfns,
+ qedi_ops->common->get_affin_hwfn_idx(qedi->cdev));
+
+ rc = request_irq(qedi->int_info.msix[idx].vector,
qedi_msix_handler, 0, "qedi",
&qedi->fp_array[i]);
-
if (rc) {
QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
qedi_sync_free_irqs(qedi);
return rc;
}
qedi->int_info.used_cnt++;
- rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+ rc = irq_set_affinity_hint(qedi->int_info.msix[idx].vector,
get_cpu_mask(cpu));
cpu = cpumask_next(cpu, cpu_online_mask);
}
@@ -2415,6 +2430,11 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
if (rc)
goto free_host;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
+ qedi->dev_info.common.num_hwfns,
+ qedi_ops->common->get_affin_hwfn_idx(qedi->cdev));
+
if (mode != QEDI_MODE_RECOVERY) {
rc = qedi_set_iscsi_pf_param(qedi);
if (rc) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1a4095c56eee..bad2b12604f1 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -532,6 +532,8 @@ typedef struct srb {
uint8_t cmd_type;
uint8_t pad[3];
atomic_t ref_count;
+ struct kref cmd_kref; /* need to migrate ref_count over to this */
+ void *priv;
wait_queue_head_t nvme_ls_waitq;
struct fc_port *fcport;
struct scsi_qla_host *vha;
@@ -554,6 +556,7 @@ typedef struct srb {
} u;
void (*done)(void *, int);
void (*free)(void *);
+ void (*put_fn)(struct kref *kref);
} srb_t;
#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
@@ -2336,7 +2339,6 @@ typedef struct fc_port {
unsigned int id_changed:1;
unsigned int scan_needed:1;
- struct work_struct nvme_del_work;
struct completion nvme_del_done;
uint32_t nvme_prli_service_param;
#define NVME_PRLI_SP_CONF BIT_7
@@ -4376,7 +4378,6 @@ typedef struct scsi_qla_host {
struct nvme_fc_local_port *nvme_local_port;
struct completion nvme_del_done;
- struct list_head nvme_rport_list;
uint16_t fcoe_vlan_id;
uint16_t fcoe_fcf_idx;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index bbe69ab5cf3f..f9669fdf7798 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -908,4 +908,6 @@ void qlt_clr_qp_table(struct scsi_qla_host *vha);
void qlt_set_mode(struct scsi_qla_host *);
int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
+/* nvme.c */
+void qla_nvme_unregister_remote_port(struct fc_port *fcport);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 54772d4c377f..4059655639d9 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5403,7 +5403,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
fcport->deleted = 0;
fcport->logout_on_delete = 1;
- fcport->login_retry = vha->hw->login_retry_count;
fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
switch (vha->hw->current_topology) {
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 22e3fba28e51..963094b3c300 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -12,8 +12,6 @@
static struct nvme_fc_port_template qla_nvme_fc_transport;
-static void qla_nvme_unregister_remote_port(struct work_struct *);
-
int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
{
struct qla_nvme_rport *rport;
@@ -38,7 +36,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
return 0;
- INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
memset(&req, 0, sizeof(struct nvme_fc_port_info));
@@ -74,7 +71,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
rport = fcport->nvme_remote_port->private;
rport->fcport = fcport;
- list_add_tail(&rport->list, &vha->nvme_rport_list);
fcport->nvme_flag |= NVME_FLAG_REGISTERED;
return 0;
@@ -124,53 +120,91 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
return 0;
}
+static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct nvme_private *priv = (struct nvme_private *)sp->priv;
+ struct nvmefc_fcp_req *fd;
+ struct srb_iocb *nvme;
+ unsigned long flags;
+
+ if (!priv)
+ goto out;
+
+ nvme = &sp->u.iocb_cmd;
+ fd = nvme->u.nvme.desc;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ priv->sp = NULL;
+ sp->priv = NULL;
+ if (priv->comp_status == QLA_SUCCESS) {
+ fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+ } else {
+ fd->rcv_rsplen = 0;
+ fd->transferred_length = 0;
+ }
+ fd->status = 0;
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+
+ fd->done(fd);
+out:
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct nvme_private *priv = (struct nvme_private *)sp->priv;
+ struct nvmefc_ls_req *fd;
+ unsigned long flags;
+
+ if (!priv)
+ goto out;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ priv->sp = NULL;
+ sp->priv = NULL;
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+
+ fd = priv->fd;
+ fd->done(fd, priv->comp_status);
+out:
+ qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_ls_complete(struct work_struct *work)
+{
+ struct nvme_private *priv =
+ container_of(work, struct nvme_private, ls_work);
+
+ kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
+}
+
static void qla_nvme_sp_ls_done(void *ptr, int res)
{
srb_t *sp = ptr;
- struct srb_iocb *nvme;
- struct nvmefc_ls_req *fd;
struct nvme_private *priv;
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
+ if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
return;
- atomic_dec(&sp->ref_count);
-
if (res)
res = -EINVAL;
- nvme = &sp->u.iocb_cmd;
- fd = nvme->u.nvme.desc;
- priv = fd->private;
+ priv = (struct nvme_private *)sp->priv;
priv->comp_status = res;
+ INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
schedule_work(&priv->ls_work);
- /* work schedule doesn't need the sp */
- qla2x00_rel_sp(sp);
}
+/* it assumed that QPair lock is held. */
static void qla_nvme_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
- struct srb_iocb *nvme;
- struct nvmefc_fcp_req *fd;
-
- nvme = &sp->u.iocb_cmd;
- fd = nvme->u.nvme.desc;
-
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
+ struct nvme_private *priv = (struct nvme_private *)sp->priv;
- atomic_dec(&sp->ref_count);
-
- if (res == QLA_SUCCESS) {
- fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
- } else {
- fd->rcv_rsplen = 0;
- fd->transferred_length = 0;
- }
- fd->status = 0;
- fd->done(fd);
- qla2xxx_rel_qpair_sp(sp->qpair, sp);
+ priv->comp_status = res;
+ kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
return;
}
@@ -189,44 +223,50 @@ static void qla_nvme_abort_work(struct work_struct *work)
__func__, sp, sp->handle, fcport, fcport->deleted);
if (!ha->flags.fw_started && (fcport && fcport->deleted))
- return;
+ goto out;
if (ha->flags.host_shutting_down) {
ql_log(ql_log_info, sp->fcport->vha, 0xffff,
"%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
__func__, sp, sp->type, atomic_read(&sp->ref_count));
sp->done(sp, 0);
- return;
+ goto out;
}
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
-
rval = ha->isp_ops->abort_command(sp);
ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
"%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
__func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
sp, sp->handle, fcport, rval);
+
+out:
+ /* kref_get was done before work was schedule. */
+ kref_put(&sp->cmd_kref, sp->put_fn);
}
static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
{
struct nvme_private *priv = fd->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ if (!priv->sp) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+
+ if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
schedule_work(&priv->abort_work);
}
-static void qla_nvme_ls_complete(struct work_struct *work)
-{
- struct nvme_private *priv =
- container_of(work, struct nvme_private, ls_work);
- struct nvmefc_ls_req *fd = priv->fd;
-
- fd->done(fd, priv->comp_status);
-}
static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
@@ -240,8 +280,16 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
struct qla_hw_data *ha;
srb_t *sp;
+
+ if (!fcport || (fcport && fcport->deleted))
+ return rval;
+
vha = fcport->vha;
ha = vha->hw;
+
+ if (!ha->flags.fw_started)
+ return rval;
+
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
@@ -250,11 +298,13 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
sp->type = SRB_NVME_LS;
sp->name = "nvme_ls";
sp->done = qla_nvme_sp_ls_done;
- atomic_set(&sp->ref_count, 1);
- nvme = &sp->u.iocb_cmd;
+ sp->put_fn = qla_nvme_release_ls_cmd_kref;
+ sp->priv = (void *)priv;
priv->sp = sp;
+ kref_init(&sp->cmd_kref);
+ spin_lock_init(&priv->cmd_lock);
+ nvme = &sp->u.iocb_cmd;
priv->fd = fd;
- INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
nvme->u.nvme.desc = fd;
nvme->u.nvme.dir = 0;
nvme->u.nvme.dl = 0;
@@ -271,8 +321,10 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- atomic_dec(&sp->ref_count);
wake_up(&sp->nvme_ls_waitq);
+ sp->priv = NULL;
+ priv->sp = NULL;
+ qla2x00_rel_sp(sp);
return rval;
}
@@ -284,6 +336,18 @@ static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
struct nvmefc_fcp_req *fd)
{
struct nvme_private *priv = fd->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ if (!priv->sp) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
schedule_work(&priv->abort_work);
@@ -487,11 +551,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
fcport = qla_rport->fcport;
- vha = fcport->vha;
-
- if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
+ (fcport && fcport->deleted))
return rval;
+ vha = fcport->vha;
/*
* If we know the dev is going away while the transport is still sending
* IO's return busy back to stall the IO Q. This happens when the
@@ -507,12 +571,15 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
if (!sp)
return -EBUSY;
- atomic_set(&sp->ref_count, 1);
init_waitqueue_head(&sp->nvme_ls_waitq);
+ kref_init(&sp->cmd_kref);
+ spin_lock_init(&priv->cmd_lock);
+ sp->priv = (void *)priv;
priv->sp = sp;
sp->type = SRB_NVME_CMD;
sp->name = "nvme_cmd";
sp->done = qla_nvme_sp_done;
+ sp->put_fn = qla_nvme_release_fcp_cmd_kref;
sp->qpair = qpair;
sp->vha = vha;
nvme = &sp->u.iocb_cmd;
@@ -522,8 +589,10 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x212d,
"qla2x00_start_nvme_mq failed = %d\n", rval);
- atomic_dec(&sp->ref_count);
wake_up(&sp->nvme_ls_waitq);
+ sp->priv = NULL;
+ priv->sp = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
return rval;
@@ -542,29 +611,16 @@ static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
{
fc_port_t *fcport;
- struct qla_nvme_rport *qla_rport = rport->private, *trport;
+ struct qla_nvme_rport *qla_rport = rport->private;
fcport = qla_rport->fcport;
fcport->nvme_remote_port = NULL;
fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
-
- list_for_each_entry_safe(qla_rport, trport,
- &fcport->vha->nvme_rport_list, list) {
- if (qla_rport->fcport == fcport) {
- list_del(&qla_rport->list);
- break;
- }
- }
- complete(&fcport->nvme_del_done);
-
- if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
- INIT_WORK(&fcport->free_work, qlt_free_session_done);
- schedule_work(&fcport->free_work);
- }
-
fcport->nvme_flag &= ~NVME_FLAG_DELETING;
ql_log(ql_log_info, fcport->vha, 0x2110,
- "remoteport_delete of %p completed.\n", fcport);
+ "remoteport_delete of %p %8phN completed.\n",
+ fcport, fcport->port_name);
+ complete(&fcport->nvme_del_done);
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
@@ -586,35 +642,25 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.fcprqst_priv_sz = sizeof(struct nvme_private),
};
-static void qla_nvme_unregister_remote_port(struct work_struct *work)
+void qla_nvme_unregister_remote_port(struct fc_port *fcport)
{
- struct fc_port *fcport = container_of(work, struct fc_port,
- nvme_del_work);
- struct qla_nvme_rport *qla_rport, *trport;
+ int ret;
if (!IS_ENABLED(CONFIG_NVME_FC))
return;
ql_log(ql_log_warn, NULL, 0x2112,
- "%s: unregister remoteport on %p\n",__func__, fcport);
-
- list_for_each_entry_safe(qla_rport, trport,
- &fcport->vha->nvme_rport_list, list) {
- if (qla_rport->fcport == fcport) {
- ql_log(ql_log_info, fcport->vha, 0x2113,
- "%s: fcport=%p\n", __func__, fcport);
- nvme_fc_set_remoteport_devloss
- (fcport->nvme_remote_port, 0);
- init_completion(&fcport->nvme_del_done);
- if (nvme_fc_unregister_remoteport
- (fcport->nvme_remote_port))
- ql_log(ql_log_info, fcport->vha, 0x2114,
- "%s: Failed to unregister nvme_remote_port\n",
- __func__);
- wait_for_completion(&fcport->nvme_del_done);
- break;
- }
- }
+ "%s: unregister remoteport on %p %8phN\n",
+ __func__, fcport, fcport->port_name);
+
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
+ init_completion(&fcport->nvme_del_done);
+ ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
+ if (ret)
+ ql_log(ql_log_info, fcport->vha, 0x2114,
+ "%s: Failed to unregister nvme_remote_port (%d)\n",
+ __func__, ret);
+ wait_for_completion(&fcport->nvme_del_done);
}
void qla_nvme_delete(struct scsi_qla_host *vha)
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index d3b8a6440113..67bb4a2a3742 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -34,10 +34,10 @@ struct nvme_private {
struct work_struct ls_work;
struct work_struct abort_work;
int comp_status;
+ spinlock_t cmd_lock;
};
struct qla_nvme_rport {
- struct list_head list;
struct fc_port *fcport;
};
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d056f5e7cf93..2e58cff9d200 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4789,7 +4789,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->plogi_ack_list);
INIT_LIST_HEAD(&vha->qp_list);
INIT_LIST_HEAD(&vha->gnl.fcports);
- INIT_LIST_HEAD(&vha->nvme_rport_list);
INIT_LIST_HEAD(&vha->gpnid_list);
INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2fd5c09b42d4..1c1f63be6eed 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1004,6 +1004,12 @@ void qlt_free_session_done(struct work_struct *work)
else
logout_started = true;
}
+ } /* if sess->logout_on_delete */
+
+ if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
+ !(sess->nvme_flag & NVME_FLAG_DELETING)) {
+ sess->nvme_flag |= NVME_FLAG_DELETING;
+ qla_nvme_unregister_remote_port(sess);
}
}
@@ -1155,14 +1161,8 @@ void qlt_unreg_sess(struct fc_port *sess)
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
- if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
- !(sess->nvme_flag & NVME_FLAG_DELETING)) {
- sess->nvme_flag |= NVME_FLAG_DELETING;
- schedule_work(&sess->nvme_del_work);
- } else {
- INIT_WORK(&sess->free_work, qlt_free_session_done);
- schedule_work(&sess->free_work);
- }
+ INIT_WORK(&sess->free_work, qlt_free_session_done);
+ schedule_work(&sess->free_work);
}
EXPORT_SYMBOL(qlt_unreg_sess);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 653d5ea6c5d9..1f5b5c8a7f72 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -86,15 +86,10 @@ unsigned int scsi_logging_level;
EXPORT_SYMBOL(scsi_logging_level);
#endif
-/* sd, scsi core and power management need to coordinate flushing async actions */
-ASYNC_DOMAIN(scsi_sd_probe_domain);
-EXPORT_SYMBOL(scsi_sd_probe_domain);
-
/*
- * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
- * asynchronous system resume operations. It is marked 'exclusive' to avoid
- * being included in the async_synchronize_full() that is invoked by
- * dpm_resume()
+ * Domain for asynchronous system resume operations. It is marked 'exclusive'
+ * to avoid being included in the async_synchronize_full() that is invoked by
+ * dpm_resume().
*/
ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
EXPORT_SYMBOL(scsi_sd_pm_domain);
@@ -821,7 +816,6 @@ static void __exit exit_scsi(void)
scsi_exit_devinfo();
scsi_exit_procfs();
scsi_exit_queue();
- async_unregister_domain(&scsi_sd_probe_domain);
}
subsys_initcall(init_scsi);
diff --git a/drivers/scsi/scsi_debugfs.h b/drivers/scsi/scsi_debugfs.h
index 951b043e82d0..d125d1bd4184 100644
--- a/drivers/scsi/scsi_debugfs.h
+++ b/drivers/scsi/scsi_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
struct request;
struct seq_file;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index bfa569facd5b..1c470e31ae81 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1055,7 +1055,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
DECLARE_COMPLETION_ONSTACK(done);
- unsigned long timeleft = timeout;
+ unsigned long timeleft = timeout, delay;
struct scsi_eh_save ses;
const unsigned long stall_for = msecs_to_jiffies(100);
int rtn;
@@ -1066,7 +1066,29 @@ retry:
scsi_log_send(scmd);
scmd->scsi_done = scsi_eh_done;
- rtn = shost->hostt->queuecommand(shost, scmd);
+
+ /*
+ * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can
+ * change the SCSI device state after we have examined it and before
+ * .queuecommand() is called.
+ */
+ mutex_lock(&sdev->state_mutex);
+ while (sdev->sdev_state == SDEV_BLOCK && timeleft > 0) {
+ mutex_unlock(&sdev->state_mutex);
+ SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_DEBUG, sdev,
+ "%s: state %d <> %d\n", __func__, sdev->sdev_state,
+ SDEV_BLOCK));
+ delay = min(timeleft, stall_for);
+ timeleft -= delay;
+ msleep(jiffies_to_msecs(delay));
+ mutex_lock(&sdev->state_mutex);
+ }
+ if (sdev->sdev_state != SDEV_BLOCK)
+ rtn = shost->hostt->queuecommand(shost, scmd);
+ else
+ rtn = SCSI_MLQUEUE_DEVICE_BUSY;
+ mutex_unlock(&sdev->state_mutex);
+
if (rtn) {
if (timeleft > stall_for) {
scsi_eh_restore_cmnd(scmd, &ses);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 65d0a10c76ad..e1da8c70a266 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -40,6 +40,18 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
+/*
+ * Size of integrity metadata is usually small, 1 inline sg should
+ * cover normal cases.
+ */
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define SCSI_INLINE_PROT_SG_CNT 0
+#define SCSI_INLINE_SG_CNT 0
+#else
+#define SCSI_INLINE_PROT_SG_CNT 1
+#define SCSI_INLINE_SG_CNT 2
+#endif
+
static struct kmem_cache *scsi_sdb_cache;
static struct kmem_cache *scsi_sense_cache;
static struct kmem_cache *scsi_sense_isadma_cache;
@@ -542,9 +554,11 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
- sg_free_table_chained(&cmd->sdb.table, true);
+ sg_free_table_chained(&cmd->sdb.table,
+ SCSI_INLINE_SG_CNT);
if (scsi_prot_sg_count(cmd))
- sg_free_table_chained(&cmd->prot_sdb->table, true);
+ sg_free_table_chained(&cmd->prot_sdb->table,
+ SCSI_INLINE_PROT_SG_CNT);
}
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
@@ -977,7 +991,8 @@ static blk_status_t scsi_init_sgtable(struct request *req,
* If sg table allocation fails, requeue request later.
*/
if (unlikely(sg_alloc_table_chained(&sdb->table,
- blk_rq_nr_phys_segments(req), sdb->table.sgl)))
+ blk_rq_nr_phys_segments(req), sdb->table.sgl,
+ SCSI_INLINE_SG_CNT)))
return BLK_STS_RESOURCE;
/*
@@ -1031,7 +1046,8 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
- prot_sdb->table.sgl)) {
+ prot_sdb->table.sgl,
+ SCSI_INLINE_PROT_SG_CNT)) {
ret = BLK_STS_RESOURCE;
goto out_free_sgtables;
}
@@ -1542,9 +1558,9 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
-static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost)
+static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
{
- return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) *
+ return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
sizeof(struct scatterlist);
}
@@ -1726,7 +1742,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
if (scsi_host_get_prot(shost)) {
sg = (void *)cmd + sizeof(struct scsi_cmnd) +
shost->hostt->cmd_size;
- cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost);
+ cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
}
return 0;
@@ -1820,10 +1836,11 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;
- sgl_size = scsi_mq_sgl_size(shost);
+ sgl_size = scsi_mq_inline_sgl_size(shost);
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
if (scsi_host_get_prot(shost))
- cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
+ cmd_size += sizeof(struct scsi_data_buffer) +
+ sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
memset(&shost->tag_set, 0, sizeof(shost->tag_set));
shost->tag_set.ops = &scsi_mq_ops;
@@ -2616,10 +2633,6 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
* a legal transition). When the device is in this state, command processing
* is paused until the device leaves the SDEV_BLOCK state. See also
* scsi_internal_device_unblock().
- *
- * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
- * scsi_internal_device_block() has blocked a SCSI device and also
- * remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
static int scsi_internal_device_block(struct scsi_device *sdev)
{
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 48ee68059fe6..74ded5f3c236 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -176,11 +176,7 @@ static int scsi_bus_resume_common(struct device *dev,
static int scsi_bus_prepare(struct device *dev)
{
- if (scsi_is_sdev_device(dev)) {
- /* sd probing uses async_schedule. Wait until it finishes. */
- async_synchronize_full_domain(&scsi_sd_probe_domain);
-
- } else if (scsi_is_host_device(dev)) {
+ if (scsi_is_host_device(dev)) {
/* Wait until async scanning is finished */
scsi_complete_async_scans();
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 5f21547b2ad2..cc2859d76d81 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -175,7 +175,6 @@ static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
#endif /* CONFIG_PM */
extern struct async_domain scsi_sd_pm_domain;
-extern struct async_domain scsi_sd_probe_domain;
/* scsi_dh.c */
#ifdef CONFIG_SCSI_DH
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index dbb206c90ecf..64c96c7828ee 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -767,8 +767,13 @@ store_state_field(struct device *dev, struct device_attribute *attr,
break;
}
}
- if (!state)
+ switch (state) {
+ case SDEV_RUNNING:
+ case SDEV_OFFLINE:
+ break;
+ default:
return -EINVAL;
+ }
mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, state);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 118a687709ed..2732fa65119c 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3,9 +3,6 @@
* FiberChannel transport specific attributes exported to sysfs.
*
* Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
- *
- * ========
- *
* Copyright (C) 2004-2007 James Smart, Emulex Corporation
* Rewrite for host, target, device, and remote port attributes,
* statistics, and service functions...
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a3406bd62391..149d406aacc9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -568,6 +568,7 @@ static struct scsi_driver sd_template = {
.name = "sd",
.owner = THIS_MODULE,
.probe = sd_probe,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.remove = sd_remove,
.shutdown = sd_shutdown,
.pm = &sd_pm_ops,
@@ -3252,69 +3253,6 @@ static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
return 0;
}
-/*
- * The asynchronous part of sd_probe
- */
-static void sd_probe_async(void *data, async_cookie_t cookie)
-{
- struct scsi_disk *sdkp = data;
- struct scsi_device *sdp;
- struct gendisk *gd;
- u32 index;
- struct device *dev;
-
- sdp = sdkp->device;
- gd = sdkp->disk;
- index = sdkp->index;
- dev = &sdp->sdev_gendev;
-
- gd->major = sd_major((index & 0xf0) >> 4);
- gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
-
- gd->fops = &sd_fops;
- gd->private_data = &sdkp->driver;
- gd->queue = sdkp->device->request_queue;
-
- /* defaults, until the device tells us otherwise */
- sdp->sector_size = 512;
- sdkp->capacity = 0;
- sdkp->media_present = 1;
- sdkp->write_prot = 0;
- sdkp->cache_override = 0;
- sdkp->WCE = 0;
- sdkp->RCD = 0;
- sdkp->ATO = 0;
- sdkp->first_scan = 1;
- sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
-
- sd_revalidate_disk(gd);
-
- gd->flags = GENHD_FL_EXT_DEVT;
- if (sdp->removable) {
- gd->flags |= GENHD_FL_REMOVABLE;
- gd->events |= DISK_EVENT_MEDIA_CHANGE;
- gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
- }
-
- blk_pm_runtime_init(sdp->request_queue, dev);
- device_add_disk(dev, gd, NULL);
- if (sdkp->capacity)
- sd_dif_config_host(sdkp);
-
- sd_revalidate_disk(gd);
-
- if (sdkp->security) {
- sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit);
- if (sdkp->opal_dev)
- sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
- }
-
- sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
- sdp->removable ? "removable " : "");
- scsi_autopm_put_device(sdp);
- put_device(&sdkp->dev);
-}
-
/**
* sd_probe - called during driver initialization and whenever a
* new scsi device is attached to the system. It is called once
@@ -3404,8 +3342,50 @@ static int sd_probe(struct device *dev)
get_device(dev);
dev_set_drvdata(dev, sdkp);
- get_device(&sdkp->dev); /* prevent release before async_schedule */
- async_schedule_domain(sd_probe_async, sdkp, &scsi_sd_probe_domain);
+ gd->major = sd_major((index & 0xf0) >> 4);
+ gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+
+ gd->fops = &sd_fops;
+ gd->private_data = &sdkp->driver;
+ gd->queue = sdkp->device->request_queue;
+
+ /* defaults, until the device tells us otherwise */
+ sdp->sector_size = 512;
+ sdkp->capacity = 0;
+ sdkp->media_present = 1;
+ sdkp->write_prot = 0;
+ sdkp->cache_override = 0;
+ sdkp->WCE = 0;
+ sdkp->RCD = 0;
+ sdkp->ATO = 0;
+ sdkp->first_scan = 1;
+ sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
+
+ sd_revalidate_disk(gd);
+
+ gd->flags = GENHD_FL_EXT_DEVT;
+ if (sdp->removable) {
+ gd->flags |= GENHD_FL_REMOVABLE;
+ gd->events |= DISK_EVENT_MEDIA_CHANGE;
+ gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
+ }
+
+ blk_pm_runtime_init(sdp->request_queue, dev);
+ device_add_disk(dev, gd, NULL);
+ if (sdkp->capacity)
+ sd_dif_config_host(sdkp);
+
+ sd_revalidate_disk(gd);
+
+ if (sdkp->security) {
+ sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit);
+ if (sdkp->opal_dev)
+ sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
+ }
+
+ sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
+ sdp->removable ? "removable " : "");
+ scsi_autopm_put_device(sdp);
return 0;
@@ -3441,7 +3421,6 @@ static int sd_remove(struct device *dev)
scsi_autopm_get_device(sdkp->device);
async_synchronize_full_domain(&scsi_sd_pm_domain);
- async_synchronize_full_domain(&scsi_sd_probe_domain);
device_del(&sdkp->dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 60f01a7b728c..c2afba2a5414 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -3,12 +3,7 @@
* SCSI Enclosure Services
*
* Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
- *
-**-----------------------------------------------------------------------------
-**
-**
-**-----------------------------------------------------------------------------
-*/
+ */
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index baada5b50bb1..e3266a64a477 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -228,7 +228,6 @@ static DEFINE_IDR(st_index_idr);
-#include "osst_detect.h"
#ifndef SIGS_FROM_OSST
#define SIGS_FROM_OSST \
{"OnStream", "SC-", "", "osst"}, \
@@ -4267,9 +4266,10 @@ static int st_probe(struct device *dev)
if (SDp->type != TYPE_TAPE)
return -ENODEV;
if ((stp = st_incompatible(SDp))) {
- sdev_printk(KERN_INFO, SDp, "Found incompatible tape\n");
sdev_printk(KERN_INFO, SDp,
- "st: The suggested driver is %s.\n", stp);
+ "OnStream tapes are no longer supported;\n");
+ sdev_printk(KERN_INFO, SDp,
+ "please mail to linux-scsi@vger.kernel.org.\n");
return -ENODEV;
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index b89269120a2d..c2b6a0ca6933 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -375,6 +375,7 @@ enum storvsc_request_type {
static int storvsc_ringbuffer_size = (128 * 1024);
static u32 max_outstanding_req_per_channel;
+static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
static int storvsc_vcpus_per_sub_channel = 4;
@@ -1699,6 +1700,7 @@ static struct scsi_host_template scsi_driver = {
.dma_boundary = PAGE_SIZE-1,
.no_write_same = 1,
.track_queue_depth = 1,
+ .change_queue_depth = storvsc_change_queue_depth,
};
enum {
@@ -1905,6 +1907,15 @@ err_out0:
return ret;
}
+/* Change a scsi target's queue depth */
+static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth)
+{
+ if (queue_depth > scsi_driver.can_queue)
+ queue_depth = scsi_driver.can_queue;
+
+ return scsi_change_queue_depth(sdev, queue_depth);
+}
+
static int storvsc_remove(struct hv_device *dev)
{
struct storvsc_device *stor_device = hv_get_drvdata(dev);
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index b4d1b5c22987..ee4b1da1e223 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -3,6 +3,7 @@
* Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
*/
+#include <linux/acpi.h>
#include <linux/time.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -161,6 +162,9 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
int err = 0;
struct device *dev = host->hba->dev;
+ if (has_acpi_companion(dev))
+ return 0;
+
err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
&host->rx_l0_sync_clk, false);
if (err)
@@ -1127,9 +1131,13 @@ static int ufs_qcom_init(struct ufs_hba *hba)
__func__, err);
goto out_variant_clear;
} else if (IS_ERR(host->generic_phy)) {
- err = PTR_ERR(host->generic_phy);
- dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
- goto out_variant_clear;
+ if (has_acpi_companion(dev)) {
+ host->generic_phy = NULL;
+ } else {
+ err = PTR_ERR(host->generic_phy);
+ dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
+ goto out_variant_clear;
+ }
}
err = ufs_qcom_bus_register(host);
@@ -1599,6 +1607,14 @@ static const struct of_device_id ufs_qcom_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ufs_qcom_acpi_match[] = {
+ { "QCOM24A5" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
+#endif
+
static const struct dev_pm_ops ufs_qcom_pm_ops = {
.suspend = ufshcd_pltfrm_suspend,
.resume = ufshcd_pltfrm_resume,
@@ -1615,6 +1631,7 @@ static struct platform_driver ufs_qcom_pltform = {
.name = "ufshcd-qcom",
.pm = &ufs_qcom_pm_ops,
.of_match_table = of_match_ptr(ufs_qcom_of_match),
+ .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
},
};
module_platform_driver(ufs_qcom_pltform);
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 8d9332bb7d0c..f478685122ff 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -122,7 +122,7 @@ static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
{
unsigned long flags;
- if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+ if (!ufshcd_is_auto_hibern8_supported(hba))
return;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -164,7 +164,7 @@ static ssize_t auto_hibern8_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+ if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
return snprintf(buf, PAGE_SIZE, "%d\n", ufshcd_ahit_to_us(hba->ahit));
@@ -177,7 +177,7 @@ static ssize_t auto_hibern8_store(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned int timer;
- if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+ if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
if (kstrtouint(buf, 0, &timer))
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index 869e71f861d6..a9344eb4e047 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -122,7 +122,7 @@ static int ufs_bsg_request(struct bsg_job *job)
memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
ret = ufshcd_send_uic_cmd(hba, &uc);
if (ret)
- dev_dbg(hba->dev,
+ dev_err(hba->dev,
"send uic cmd: error code %d\n", ret);
memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
@@ -149,7 +149,9 @@ static int ufs_bsg_request(struct bsg_job *job)
out:
bsg_reply->result = ret;
job->reply_len = sizeof(struct ufs_bsg_reply);
- bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+ /* complete the job here only if no error */
+ if (ret == 0)
+ bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
return ret;
}
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index ffe6f82182ba..3b19de3ae9a3 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -200,6 +200,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = {
static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 3fe3029617a8..04d3686511c8 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3908,7 +3908,7 @@ static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
unsigned long flags;
- if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
+ if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
return;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -5255,6 +5255,7 @@ static void ufshcd_err_handler(struct work_struct *work)
goto skip_err_handling;
}
if ((hba->saved_err & INT_FATAL_ERRORS) ||
+ (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
@@ -5414,6 +5415,23 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
__func__, hba->uic_error);
}
+static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
+ u32 intr_mask)
+{
+ if (!ufshcd_is_auto_hibern8_supported(hba))
+ return false;
+
+ if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
+ return false;
+
+ if (hba->active_uic_cmd &&
+ (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
+ hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
+ return false;
+
+ return true;
+}
+
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
@@ -5432,6 +5450,15 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
queue_eh_work = true;
}
+ if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
+ dev_err(hba->dev,
+ "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
+ __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
+ "Enter" : "Exit",
+ hba->errors, ufshcd_get_upmcrs(hba));
+ queue_eh_work = true;
+ }
+
if (queue_eh_work) {
/*
* update the transfer error masks to sticky bits, let's do this
@@ -5494,6 +5521,10 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba)
static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
hba->errors = UFSHCD_ERROR_MASK & intr_status;
+
+ if (ufshcd_is_auto_hibern8_error(hba, intr_status))
+ hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
+
if (hba->errors)
ufshcd_check_errors(hba);
@@ -8313,7 +8344,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UIC_LINK_HIBERN8_STATE);
/* Set the default auto-hiberate idle timer value to 150 ms */
- if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
+ if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index ecfa898b9ccc..994d73d03207 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -740,6 +740,11 @@ return true;
#endif
}
+static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
+{
+ return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 6fa889de5ee5..dbb75cd28dc8 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -144,8 +144,10 @@ enum {
#define CONTROLLER_FATAL_ERROR 0x10000
#define SYSTEM_BUS_FATAL_ERROR 0x20000
-#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\
- UIC_HIBERNATE_EXIT |\
+#define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\
+ UIC_HIBERNATE_EXIT)
+
+#define UFSHCD_UIC_PWR_MASK (UFSHCD_UIC_HIBERN8_MASK |\
UIC_POWER_MODE)
#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 13f1b3b9923a..1705398b026a 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -74,9 +74,6 @@ struct virtio_scsi {
u32 num_queues;
- /* If the affinity hint is set for virtqueues */
- bool affinity_hint_set;
-
struct hlist_node node;
/* Protected by event_vq lock */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index ecee4b3ff073..70008816c91f 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -335,7 +335,7 @@ static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
sge = &ctx->sgl->sge[0];
- for (i = 0; i < count; i++, sg++) {
+ for (i = 0; i < count; i++, sg = sg_next(sg)) {
sge[i].addr = sg_dma_address(sg);
sge[i].length = sg_dma_len(sg);
sge[i].flags = 0;
@@ -763,6 +763,7 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
struct pvscsi_adapter *adapter = shost_priv(host);
struct pvscsi_ctx *ctx;
unsigned long flags;
+ unsigned char op;
spin_lock_irqsave(&adapter->hw_lock, flags);
@@ -775,13 +776,14 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
}
cmd->scsi_done = done;
+ op = cmd->cmnd[0];
dev_dbg(&cmd->device->sdev_gendev,
- "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
+ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
- pvscsi_kick_io(adapter, cmd->cmnd[0]);
+ pvscsi_kick_io(adapter, op);
return 0;
}
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index f965a3ee9ce5..fb7b289fa09f 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -735,7 +735,7 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
* source or destination for THIS transfer.
*/
if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- ++cmd->SCp.buffer;
+ cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index c2f40068f235..edc8a139a60d 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -108,8 +108,15 @@ static inline int wd719x_wait_done(struct wd719x *wd, int timeout)
}
if (status != WD719X_INT_NOERRORS) {
+ u8 sue = wd719x_readb(wd, WD719X_AMR_SCB_ERROR);
+ /* we get this after wd719x_dev_reset, it's not an error */
+ if (sue == WD719X_SUE_TERM)
+ return 0;
+ /* we get this after wd719x_bus_reset, it's not an error */
+ if (sue == WD719X_SUE_RESET)
+ return 0;
dev_err(&wd->pdev->dev, "direct command failed, status 0x%02x, SUE 0x%02x\n",
- status, wd719x_readb(wd, WD719X_AMR_SCB_ERROR));
+ status, sue);
return -EIO;
}
@@ -128,8 +135,10 @@ static int wd719x_direct_cmd(struct wd719x *wd, u8 opcode, u8 dev, u8 lun,
if (wd719x_wait_ready(wd))
return -ETIMEDOUT;
- /* make sure we get NO interrupts */
- dev |= WD719X_DISABLE_INT;
+ /* disable interrupts except for RESET/ABORT (it breaks them) */
+ if (opcode != WD719X_CMD_BUSRESET && opcode != WD719X_CMD_ABORT &&
+ opcode != WD719X_CMD_ABORT_TAG && opcode != WD719X_CMD_RESET)
+ dev |= WD719X_DISABLE_INT;
wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, dev);
wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_2, lun);
wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_3, tag);
@@ -465,6 +474,7 @@ static int wd719x_abort(struct scsi_cmnd *cmd)
spin_lock_irqsave(wd->sh->host_lock, flags);
result = wd719x_direct_cmd(wd, action, cmd->device->id,
cmd->device->lun, cmd->tag, scb->phys, 0);
+ wd719x_finish_cmd(scb, DID_ABORT);
spin_unlock_irqrestore(wd->sh->host_lock, flags);
if (result)
return FAILED;
@@ -477,6 +487,7 @@ static int wd719x_reset(struct scsi_cmnd *cmd, u8 opcode, u8 device)
int result;
unsigned long flags;
struct wd719x *wd = shost_priv(cmd->device->host);
+ struct wd719x_scb *scb, *tmp;
dev_info(&wd->pdev->dev, "%s reset requested\n",
(opcode == WD719X_CMD_BUSRESET) ? "bus" : "device");
@@ -484,6 +495,12 @@ static int wd719x_reset(struct scsi_cmnd *cmd, u8 opcode, u8 device)
spin_lock_irqsave(wd->sh->host_lock, flags);
result = wd719x_direct_cmd(wd, opcode, device, 0, 0, 0,
WD719X_WAIT_FOR_SCSI_RESET);
+ /* flush all SCBs (or all for a device if dev_reset) */
+ list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) {
+ if (opcode == WD719X_CMD_BUSRESET ||
+ scb->cmd->device->id == device)
+ wd719x_finish_cmd(scb, DID_RESET);
+ }
spin_unlock_irqrestore(wd->sh->host_lock, flags);
if (result)
return FAILED;
@@ -506,22 +523,23 @@ static int wd719x_host_reset(struct scsi_cmnd *cmd)
struct wd719x *wd = shost_priv(cmd->device->host);
struct wd719x_scb *scb, *tmp;
unsigned long flags;
- int result;
dev_info(&wd->pdev->dev, "host reset requested\n");
spin_lock_irqsave(wd->sh->host_lock, flags);
- /* Try to reinit the RISC */
- if (wd719x_chip_init(wd) == 0)
- result = SUCCESS;
- else
- result = FAILED;
+ /* stop the RISC */
+ if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0,
+ WD719X_WAIT_FOR_RISC))
+ dev_warn(&wd->pdev->dev, "RISC sleep command failed\n");
+ /* disable RISC */
+ wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0);
/* flush all SCBs */
list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list)
- wd719x_finish_cmd(scb, result);
+ wd719x_finish_cmd(scb, DID_RESET);
spin_unlock_irqrestore(wd->sh->host_lock, flags);
- return result;
+ /* Try to reinit the RISC */
+ return wd719x_chip_init(wd) == 0 ? SUCCESS : FAILED;
}
static int wd719x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
@@ -673,7 +691,7 @@ static irqreturn_t wd719x_interrupt(int irq, void *dev_id)
else
dev_err(&wd->pdev->dev, "card returned invalid SCB pointer\n");
} else
- dev_warn(&wd->pdev->dev, "direct command 0x%x completed\n",
+ dev_dbg(&wd->pdev->dev, "direct command 0x%x completed\n",
regs.bytes.OPC);
break;
case WD719X_INT_PIOREADY:
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 524ecdc2a9bb..2ec355003524 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
obj-y += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-$(CONFIG_SOC_TI) += ti/
+obj-y += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
obj-y += xilinx/
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c
index fc6429f9170a..b1bd8e2543ac 100644
--- a/drivers/soc/imx/soc-imx8.c
+++ b/drivers/soc/imx/soc-imx8.c
@@ -103,6 +103,9 @@ static int __init imx8_soc_init(void)
if (IS_ERR(soc_dev))
goto free_rev;
+ if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
+ platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+
return 0;
free_rev:
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index ea0859f7b185..d7d50d48d05d 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -75,10 +75,10 @@ config TI_SCI_PM_DOMAINS
called ti_sci_pm_domains. Note this is needed early in boot before
rootfs may be available.
+endif # SOC_TI
+
config TI_SCI_INTA_MSI_DOMAIN
bool
select GENERIC_MSI_IRQ_DOMAIN
help
Driver to enable Interrupt Aggregator specific MSI Domain.
-
-endif # SOC_TI
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 30a40280c157..3a1d8f1170de 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -120,7 +120,7 @@ config SPI_AXI_SPI_ENGINE
config SPI_BCM2835
tristate "BCM2835 SPI controller"
depends on GPIOLIB
- depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
help
This selects a driver for the Broadcom BCM2835 SPI master.
@@ -131,7 +131,7 @@ config SPI_BCM2835
config SPI_BCM2835AUX
tristate "BCM2835 SPI auxiliary controller"
- depends on (ARCH_BCM2835 && GPIOLIB) || COMPILE_TEST
+ depends on ((ARCH_BCM2835 || ARCH_BRCMSTB) && GPIOLIB) || COMPILE_TEST
help
This selects a driver for the Broadcom BCM2835 SPI aux master.
@@ -733,6 +733,16 @@ config SPI_SUN6I
help
This enables using the SPI controller on the Allwinner A31 SoCs.
+config SPI_SYNQUACER
+ tristate "Socionext's SynQuacer HighSpeed SPI controller"
+ depends on ARCH_SYNQUACER || COMPILE_TEST
+ help
+ SPI driver for Socionext's High speed SPI controller which provides
+ various operating modes for interfacing to serial peripheral devices
+ that use the de-facto standard SPI protocol.
+
+ It also supports the new dual-bit and quad-bit SPI protocol.
+
config SPI_MXIC
tristate "Macronix MX25F0A SPI controller"
depends on SPI_MASTER
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index f2f78d03dc28..63dcab552bcb 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
+obj-$(CONFIG_SPI_SYNQUACER) += spi-synquacer.o
obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 9f24d5f0b431..6a7d7b553d95 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -151,6 +151,7 @@ struct atmel_qspi {
const struct atmel_qspi_caps *caps;
u32 pending;
u32 mr;
+ u32 scr;
struct completion cmd_completion;
};
@@ -382,7 +383,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
struct spi_controller *ctrl = spi->master;
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
unsigned long src_rate;
- u32 scr, scbr;
+ u32 scbr;
if (ctrl->busy)
return -EBUSY;
@@ -399,13 +400,13 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (scbr > 0)
scbr--;
- scr = QSPI_SCR_SCBR(scbr);
- writel_relaxed(scr, aq->regs + QSPI_SCR);
+ aq->scr = QSPI_SCR_SCBR(scbr);
+ writel_relaxed(aq->scr, aq->regs + QSPI_SCR);
return 0;
}
-static int atmel_qspi_init(struct atmel_qspi *aq)
+static void atmel_qspi_init(struct atmel_qspi *aq)
{
/* Reset the QSPI controller */
writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR);
@@ -416,8 +417,6 @@ static int atmel_qspi_init(struct atmel_qspi *aq)
/* Enable the QSPI controller */
writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR);
-
- return 0;
}
static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
@@ -536,9 +535,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
if (err)
goto disable_qspick;
- err = atmel_qspi_init(aq);
- if (err)
- goto disable_qspick;
+ atmel_qspi_init(aq);
err = spi_register_controller(ctrl);
if (err)
@@ -587,7 +584,11 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
clk_prepare_enable(aq->pclk);
clk_prepare_enable(aq->qspick);
- return atmel_qspi_init(aq);
+ atmel_qspi_init(aq);
+
+ writel_relaxed(aq->scr, aq->regs + QSPI_SCR);
+
+ return 0;
}
static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend,
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index f763e14bdf12..a40bb2ef89dc 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -8,9 +8,12 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -59,6 +62,8 @@
#define US_INIT \
(US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
+#define US_DMA_MIN_BYTES 16
+#define US_DMA_TIMEOUT (msecs_to_jiffies(1000))
/* Register access macros */
#define at91_usart_spi_readl(port, reg) \
@@ -72,14 +77,19 @@
writeb_relaxed((value), (port)->regs + US_##reg)
struct at91_usart_spi {
+ struct platform_device *mpdev;
struct spi_transfer *current_transfer;
void __iomem *regs;
struct device *dev;
struct clk *clk;
+ struct completion xfer_completion;
+
/*used in interrupt to protect data reading*/
spinlock_t lock;
+ phys_addr_t phybase;
+
int irq;
unsigned int current_tx_remaining_bytes;
unsigned int current_rx_remaining_bytes;
@@ -88,8 +98,182 @@ struct at91_usart_spi {
u32 status;
bool xfer_failed;
+ bool use_dma;
};
+static void dma_callback(void *data)
+{
+ struct spi_controller *ctlr = data;
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
+ aus->current_rx_remaining_bytes = 0;
+ complete(&aus->xfer_completion);
+}
+
+static bool at91_usart_spi_can_dma(struct spi_controller *ctrl,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+
+ return aus->use_dma && xfer->len >= US_DMA_MIN_BYTES;
+}
+
+static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
+ struct at91_usart_spi *aus)
+{
+ struct dma_slave_config slave_config;
+ struct device *dev = &aus->mpdev->dev;
+ phys_addr_t phybase = aus->phybase;
+ dma_cap_mask_t mask;
+ int err = 0;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ ctlr->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR_OR_NULL(ctlr->dma_tx)) {
+ if (IS_ERR(ctlr->dma_tx)) {
+ err = PTR_ERR(ctlr->dma_tx);
+ goto at91_usart_spi_error_clear;
+ }
+
+ dev_dbg(dev,
+ "DMA TX channel not available, SPI unable to use DMA\n");
+ err = -EBUSY;
+ goto at91_usart_spi_error_clear;
+ }
+
+ ctlr->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR_OR_NULL(ctlr->dma_rx)) {
+ if (IS_ERR(ctlr->dma_rx)) {
+ err = PTR_ERR(ctlr->dma_rx);
+ goto at91_usart_spi_error;
+ }
+
+ dev_dbg(dev,
+ "DMA RX channel not available, SPI unable to use DMA\n");
+ err = -EBUSY;
+ goto at91_usart_spi_error;
+ }
+
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.dst_addr = (dma_addr_t)phybase + US_THR;
+ slave_config.src_addr = (dma_addr_t)phybase + US_RHR;
+ slave_config.src_maxburst = 1;
+ slave_config.dst_maxburst = 1;
+ slave_config.device_fc = false;
+
+ slave_config.direction = DMA_DEV_TO_MEM;
+ if (dmaengine_slave_config(ctlr->dma_rx, &slave_config)) {
+ dev_err(&ctlr->dev,
+ "failed to configure rx dma channel\n");
+ err = -EINVAL;
+ goto at91_usart_spi_error;
+ }
+
+ slave_config.direction = DMA_MEM_TO_DEV;
+ if (dmaengine_slave_config(ctlr->dma_tx, &slave_config)) {
+ dev_err(&ctlr->dev,
+ "failed to configure tx dma channel\n");
+ err = -EINVAL;
+ goto at91_usart_spi_error;
+ }
+
+ aus->use_dma = true;
+ return 0;
+
+at91_usart_spi_error:
+ if (!IS_ERR_OR_NULL(ctlr->dma_tx))
+ dma_release_channel(ctlr->dma_tx);
+ if (!IS_ERR_OR_NULL(ctlr->dma_rx))
+ dma_release_channel(ctlr->dma_rx);
+ ctlr->dma_tx = NULL;
+ ctlr->dma_rx = NULL;
+
+at91_usart_spi_error_clear:
+ return err;
+}
+
+static void at91_usart_spi_release_dma(struct spi_controller *ctlr)
+{
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
+}
+
+static void at91_usart_spi_stop_dma(struct spi_controller *ctlr)
+{
+ if (ctlr->dma_rx)
+ dmaengine_terminate_all(ctlr->dma_rx);
+ if (ctlr->dma_tx)
+ dmaengine_terminate_all(ctlr->dma_tx);
+}
+
+static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct dma_chan *rxchan = ctlr->dma_rx;
+ struct dma_chan *txchan = ctlr->dma_tx;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+ dma_cookie_t cookie;
+
+ /* Disable RX interrupt */
+ at91_usart_spi_writel(aus, IDR, US_IR_RXRDY);
+
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto at91_usart_spi_err_dma;
+
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!txdesc)
+ goto at91_usart_spi_err_dma;
+
+ rxdesc->callback = dma_callback;
+ rxdesc->callback_param = ctlr;
+
+ cookie = rxdesc->tx_submit(rxdesc);
+ if (dma_submit_error(cookie))
+ goto at91_usart_spi_err_dma;
+
+ cookie = txdesc->tx_submit(txdesc);
+ if (dma_submit_error(cookie))
+ goto at91_usart_spi_err_dma;
+
+ rxchan->device->device_issue_pending(rxchan);
+ txchan->device->device_issue_pending(txchan);
+
+ return 0;
+
+at91_usart_spi_err_dma:
+ /* Enable RX interrupt if something fails and fallback to PIO */
+ at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
+ at91_usart_spi_stop_dma(ctlr);
+
+ return -ENOMEM;
+}
+
+static unsigned long at91_usart_spi_dma_timeout(struct at91_usart_spi *aus)
+{
+ return wait_for_completion_timeout(&aus->xfer_completion,
+ US_DMA_TIMEOUT);
+}
+
static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus)
{
return aus->status & US_IR_TXRDY;
@@ -216,6 +400,8 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ unsigned long dma_timeout = 0;
+ int ret = 0;
at91_usart_spi_set_xfer_speed(aus, xfer);
aus->xfer_failed = false;
@@ -225,8 +411,25 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
while ((aus->current_tx_remaining_bytes ||
aus->current_rx_remaining_bytes) && !aus->xfer_failed) {
- at91_usart_spi_read_status(aus);
- at91_usart_spi_tx(aus);
+ reinit_completion(&aus->xfer_completion);
+ if (at91_usart_spi_can_dma(ctlr, spi, xfer) &&
+ !ret) {
+ ret = at91_usart_spi_dma_transfer(ctlr, xfer);
+ if (ret)
+ continue;
+
+ dma_timeout = at91_usart_spi_dma_timeout(aus);
+
+ if (WARN_ON(dma_timeout == 0)) {
+ dev_err(&spi->dev, "DMA transfer timeout\n");
+ return -EIO;
+ }
+ aus->current_tx_remaining_bytes = 0;
+ } else {
+ at91_usart_spi_read_status(aus);
+ at91_usart_spi_tx(aus);
+ }
+
cpu_relax();
}
@@ -345,6 +548,7 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
controller->transfer_one = at91_usart_spi_transfer_one;
controller->prepare_message = at91_usart_spi_prepare_message;
controller->unprepare_message = at91_usart_spi_unprepare_message;
+ controller->can_dma = at91_usart_spi_can_dma;
controller->cleanup = at91_usart_spi_cleanup;
controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
US_MIN_CLK_DIV);
@@ -376,7 +580,17 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
aus->spi_clk = clk_get_rate(clk);
at91_usart_spi_init(aus);
+ aus->phybase = regs->start;
+
+ aus->mpdev = to_platform_device(pdev->dev.parent);
+
+ ret = at91_usart_spi_configure_dma(controller, aus);
+ if (ret)
+ goto at91_usart_fail_dma;
+
spin_lock_init(&aus->lock);
+ init_completion(&aus->xfer_completion);
+
ret = devm_spi_register_master(&pdev->dev, controller);
if (ret)
goto at91_usart_fail_register_master;
@@ -389,6 +603,8 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
return 0;
at91_usart_fail_register_master:
+ at91_usart_spi_release_dma(controller);
+at91_usart_fail_dma:
clk_disable_unprepare(clk);
at91_usart_spi_probe_fail:
spi_master_put(controller);
@@ -453,6 +669,7 @@ static int at91_usart_spi_remove(struct platform_device *pdev)
struct spi_controller *ctlr = platform_get_drvdata(pdev);
struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ at91_usart_spi_release_dma(ctlr);
clk_disable_unprepare(aus->clk);
return 0;
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 402c1efcd762..6f243a90c844 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -64,14 +65,18 @@
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
-#define BCM2835_SPI_POLLING_LIMIT_US 30
-#define BCM2835_SPI_POLLING_JIFFIES 2
#define BCM2835_SPI_DMA_MIN_LENGTH 96
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
#define DRV_NAME "spi-bcm2835"
+/* define polling limits */
+unsigned int polling_limit_us = 30;
+module_param(polling_limit_us, uint, 0664);
+MODULE_PARM_DESC(polling_limit_us,
+ "time in us to run a transfer in polling mode\n");
+
/**
* struct bcm2835_spi - BCM2835 SPI controller
* @regs: base address of register map
@@ -88,6 +93,15 @@
* length is not a multiple of 4 (to overcome hardware limitation)
* @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
* @dma_pending: whether a DMA transfer is in progress
+ * @debugfs_dir: the debugfs directory - neede to remove debugfs when
+ * unloading the module
+ * @count_transfer_polling: count of how often polling mode is used
+ * @count_transfer_irq: count of how often interrupt mode is used
+ * @count_transfer_irq_after_polling: count of how often we fall back to
+ * interrupt mode after starting in polling mode.
+ * These are counted as well in @count_transfer_polling and
+ * @count_transfer_irq
+ * @count_transfer_dma: count how often dma mode is used
*/
struct bcm2835_spi {
void __iomem *regs;
@@ -102,8 +116,55 @@ struct bcm2835_spi {
int rx_prologue;
unsigned int tx_spillover;
unsigned int dma_pending;
+
+ struct dentry *debugfs_dir;
+ u64 count_transfer_polling;
+ u64 count_transfer_irq;
+ u64 count_transfer_irq_after_polling;
+ u64 count_transfer_dma;
};
+#if defined(CONFIG_DEBUG_FS)
+static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
+ const char *dname)
+{
+ char name[64];
+ struct dentry *dir;
+
+ /* get full name */
+ snprintf(name, sizeof(name), "spi-bcm2835-%s", dname);
+
+ /* the base directory */
+ dir = debugfs_create_dir(name, NULL);
+ bs->debugfs_dir = dir;
+
+ /* the counters */
+ debugfs_create_u64("count_transfer_polling", 0444, dir,
+ &bs->count_transfer_polling);
+ debugfs_create_u64("count_transfer_irq", 0444, dir,
+ &bs->count_transfer_irq);
+ debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir,
+ &bs->count_transfer_irq_after_polling);
+ debugfs_create_u64("count_transfer_dma", 0444, dir,
+ &bs->count_transfer_dma);
+}
+
+static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
+{
+ debugfs_remove_recursive(bs->debugfs_dir);
+ bs->debugfs_dir = NULL;
+}
+#else
+static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
+ const char *dname)
+{
+}
+
+static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
{
return readl(bs->regs + reg);
@@ -248,9 +309,9 @@ static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
}
}
-static void bcm2835_spi_reset_hw(struct spi_master *master)
+static void bcm2835_spi_reset_hw(struct spi_controller *ctlr)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/* Disable SPI interrupts and transfer */
@@ -269,8 +330,8 @@ static void bcm2835_spi_reset_hw(struct spi_master *master)
static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = dev_id;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/*
@@ -292,20 +353,23 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
if (!bs->rx_len) {
/* Transfer complete - reset SPI HW */
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
/* wake up the framework */
- complete(&master->xfer_completion);
+ complete(&ctlr->xfer_completion);
}
return IRQ_HANDLED;
}
-static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
+static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr,
u32 cs, bool fifo_empty)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+ /* update usage statistics */
+ bs->count_transfer_irq++;
/*
* Enable HW block, but with interrupts still disabled.
@@ -328,7 +392,7 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
/**
* bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
- * @master: SPI master
+ * @ctlr: SPI master controller
* @tfr: SPI transfer
* @bs: BCM2835 SPI controller
* @cs: CS register
@@ -372,7 +436,7 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
* be transmitted in 32-bit width to ensure that the following DMA transfer can
* pick up the residue in the RX FIFO in ungarbled form.
*/
-static void bcm2835_spi_transfer_prologue(struct spi_master *master,
+static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
struct spi_transfer *tfr,
struct bcm2835_spi *bs,
u32 cs)
@@ -413,9 +477,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_master *master,
bcm2835_wr_fifo_count(bs, bs->rx_prologue);
bcm2835_wait_tx_fifo_empty(bs);
bcm2835_rd_fifo_count(bs, bs->rx_prologue);
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
- dma_sync_single_for_device(master->dma_rx->device->dev,
+ dma_sync_single_for_device(ctlr->dma_rx->device->dev,
sg_dma_address(&tfr->rx_sg.sgl[0]),
bs->rx_prologue, DMA_FROM_DEVICE);
@@ -479,11 +543,11 @@ static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
static void bcm2835_spi_dma_done(void *data)
{
- struct spi_master *master = data;
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = data;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* reset fifo and HW */
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
/* and terminate tx-dma as we do not have an irq for it
* because when the rx dma will terminate and this callback
@@ -491,15 +555,15 @@ static void bcm2835_spi_dma_done(void *data)
* situation otherwise...
*/
if (cmpxchg(&bs->dma_pending, true, false)) {
- dmaengine_terminate_async(master->dma_tx);
+ dmaengine_terminate_async(ctlr->dma_tx);
bcm2835_spi_undo_prologue(bs);
}
/* and mark as completed */;
- complete(&master->xfer_completion);
+ complete(&ctlr->xfer_completion);
}
-static int bcm2835_spi_prepare_sg(struct spi_master *master,
+static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
struct spi_transfer *tfr,
bool is_tx)
{
@@ -514,14 +578,14 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
if (is_tx) {
dir = DMA_MEM_TO_DEV;
- chan = master->dma_tx;
+ chan = ctlr->dma_tx;
nents = tfr->tx_sg.nents;
sgl = tfr->tx_sg.sgl;
flags = 0 /* no tx interrupt */;
} else {
dir = DMA_DEV_TO_MEM;
- chan = master->dma_rx;
+ chan = ctlr->dma_rx;
nents = tfr->rx_sg.nents;
sgl = tfr->rx_sg.sgl;
flags = DMA_PREP_INTERRUPT;
@@ -534,7 +598,7 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
/* set callback for rx */
if (!is_tx) {
desc->callback = bcm2835_spi_dma_done;
- desc->callback_param = master;
+ desc->callback_param = ctlr;
}
/* submit it to DMA-engine */
@@ -543,27 +607,30 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
return dma_submit_error(cookie);
}
-static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
+static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr,
u32 cs)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
int ret;
+ /* update usage statistics */
+ bs->count_transfer_dma++;
+
/*
* Transfer first few bytes without DMA if length of first TX or RX
* sglist entry is not a multiple of 4 bytes (hardware limitation).
*/
- bcm2835_spi_transfer_prologue(master, tfr, bs, cs);
+ bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
/* setup tx-DMA */
- ret = bcm2835_spi_prepare_sg(master, tfr, true);
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, true);
if (ret)
goto err_reset_hw;
/* start TX early */
- dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(ctlr->dma_tx);
/* mark as dma pending */
bs->dma_pending = 1;
@@ -579,27 +646,27 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
* mapping of the rx buffers still takes place
* this saves 10us or more.
*/
- ret = bcm2835_spi_prepare_sg(master, tfr, false);
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, false);
if (ret) {
/* need to reset on errors */
- dmaengine_terminate_sync(master->dma_tx);
+ dmaengine_terminate_sync(ctlr->dma_tx);
bs->dma_pending = false;
goto err_reset_hw;
}
/* start rx dma late */
- dma_async_issue_pending(master->dma_rx);
+ dma_async_issue_pending(ctlr->dma_rx);
/* wait for wakeup in framework */
return 1;
err_reset_hw:
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
bcm2835_spi_undo_prologue(bs);
return ret;
}
-static bool bcm2835_spi_can_dma(struct spi_master *master,
+static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr)
{
@@ -611,21 +678,21 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
return true;
}
-static void bcm2835_dma_release(struct spi_master *master)
+static void bcm2835_dma_release(struct spi_controller *ctlr)
{
- if (master->dma_tx) {
- dmaengine_terminate_sync(master->dma_tx);
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (ctlr->dma_tx) {
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ dma_release_channel(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
}
- if (master->dma_rx) {
- dmaengine_terminate_sync(master->dma_rx);
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (ctlr->dma_rx) {
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ dma_release_channel(ctlr->dma_rx);
+ ctlr->dma_rx = NULL;
}
}
-static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
+static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
{
struct dma_slave_config slave_config;
const __be32 *addr;
@@ -633,7 +700,7 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
int ret;
/* base address in dma-space */
- addr = of_get_address(master->dev.of_node, 0, NULL, NULL);
+ addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
if (!addr) {
dev_err(dev, "could not get DMA-register address - not using dma mode\n");
goto err;
@@ -641,38 +708,36 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
dma_reg_base = be32_to_cpup(addr);
/* get tx/rx dma */
- master->dma_tx = dma_request_slave_channel(dev, "tx");
- if (!master->dma_tx) {
+ ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
+ if (!ctlr->dma_tx) {
dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
goto err;
}
- master->dma_rx = dma_request_slave_channel(dev, "rx");
- if (!master->dma_rx) {
+ ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
+ if (!ctlr->dma_rx) {
dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
goto err_release;
}
/* configure DMAs */
- slave_config.direction = DMA_MEM_TO_DEV;
slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- ret = dmaengine_slave_config(master->dma_tx, &slave_config);
+ ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config);
if (ret)
goto err_config;
- slave_config.direction = DMA_DEV_TO_MEM;
slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- ret = dmaengine_slave_config(master->dma_rx, &slave_config);
+ ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
if (ret)
goto err_config;
/* all went well, so set can_dma */
- master->can_dma = bcm2835_spi_can_dma;
+ ctlr->can_dma = bcm2835_spi_can_dma;
/* need to do TX AND RX DMA, so we need dummy buffers */
- master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
return;
@@ -680,20 +745,22 @@ err_config:
dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
ret);
err_release:
- bcm2835_dma_release(master);
+ bcm2835_dma_release(ctlr);
err:
return;
}
-static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
+static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr,
- u32 cs,
- unsigned long long xfer_time_us)
+ u32 cs)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
unsigned long timeout;
+ /* update usage statistics */
+ bs->count_transfer_polling++;
+
/* enable HW block without interrupts */
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
@@ -703,8 +770,8 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
*/
bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
- /* set the timeout */
- timeout = jiffies + BCM2835_SPI_POLLING_JIFFIES;
+ /* set the timeout to at least 2 jiffies */
+ timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
/* loop until finished the transfer */
while (bs->rx_len) {
@@ -723,25 +790,28 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
jiffies - timeout,
bs->tx_len, bs->rx_len);
/* fall back to interrupt mode */
- return bcm2835_spi_transfer_one_irq(master, spi,
+
+ /* update usage statistics */
+ bs->count_transfer_irq_after_polling++;
+
+ return bcm2835_spi_transfer_one_irq(ctlr, spi,
tfr, cs, false);
}
}
/* Transfer complete - reset SPI HW */
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
/* and return without waiting for completion */
return 0;
}
-static int bcm2835_spi_transfer_one(struct spi_master *master,
+static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
- unsigned long spi_hz, clk_hz, cdiv;
- unsigned long spi_used_hz;
- unsigned long long xfer_time_us;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ unsigned long spi_hz, clk_hz, cdiv, spi_used_hz;
+ unsigned long hz_per_byte, byte_limit;
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/* set clock */
@@ -782,42 +852,49 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
bs->tx_len = tfr->len;
bs->rx_len = tfr->len;
- /* calculate the estimated time in us the transfer runs */
- xfer_time_us = (unsigned long long)tfr->len
- * 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */
- * 1000000;
- do_div(xfer_time_us, spi_used_hz);
+ /* Calculate the estimated time in us the transfer runs. Note that
+ * there is 1 idle clocks cycles after each byte getting transferred
+ * so we have 9 cycles/byte. This is used to find the number of Hz
+ * per byte per polling limit. E.g., we can transfer 1 byte in 30 us
+ * per 300,000 Hz of bus clock.
+ */
+ hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
+ byte_limit = hz_per_byte ? spi_used_hz / hz_per_byte : 1;
- /* for short requests run polling*/
- if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US)
- return bcm2835_spi_transfer_one_poll(master, spi, tfr,
- cs, xfer_time_us);
+ /* run in polling mode for short transfers */
+ if (tfr->len < byte_limit)
+ return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs);
- /* run in dma mode if conditions are right */
- if (master->can_dma && bcm2835_spi_can_dma(master, spi, tfr))
- return bcm2835_spi_transfer_one_dma(master, spi, tfr, cs);
+ /* run in dma mode if conditions are right
+ * Note that unlike poll or interrupt mode DMA mode does not have
+ * this 1 idle clock cycle pattern but runs the spi clock without gaps
+ */
+ if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
+ return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs);
/* run in interrupt-mode */
- return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs, true);
+ return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
}
-static int bcm2835_spi_prepare_message(struct spi_master *master,
+static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct spi_device *spi = msg->spi;
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
int ret;
- /*
- * DMA transfers are limited to 16 bit (0 to 65535 bytes) by the SPI HW
- * due to DLEN. Split up transfers (32-bit FIFO aligned) if the limit is
- * exceeded.
- */
- ret = spi_split_transfers_maxsize(master, msg, 65532,
- GFP_KERNEL | GFP_DMA);
- if (ret)
- return ret;
+ if (ctlr->can_dma) {
+ /*
+ * DMA transfers are limited to 16 bit (0 to 65535 bytes) by
+ * the SPI HW due to DLEN. Split up transfers (32-bit FIFO
+ * aligned) if the limit is exceeded.
+ */
+ ret = spi_split_transfers_maxsize(ctlr, msg, 65532,
+ GFP_KERNEL | GFP_DMA);
+ if (ret)
+ return ret;
+ }
cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
@@ -831,19 +908,19 @@ static int bcm2835_spi_prepare_message(struct spi_master *master,
return 0;
}
-static void bcm2835_spi_handle_err(struct spi_master *master,
+static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* if an error occurred and we have an active dma, then terminate */
if (cmpxchg(&bs->dma_pending, true, false)) {
- dmaengine_terminate_sync(master->dma_tx);
- dmaengine_terminate_sync(master->dma_rx);
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ dmaengine_terminate_sync(ctlr->dma_rx);
bcm2835_spi_undo_prologue(bs);
}
/* and reset */
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
}
static int chip_match_name(struct gpio_chip *chip, void *data)
@@ -900,85 +977,88 @@ static int bcm2835_spi_setup(struct spi_device *spi)
static int bcm2835_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct bcm2835_spi *bs;
struct resource *res;
int err;
- master = spi_alloc_master(&pdev->dev, sizeof(*bs));
- if (!master) {
- dev_err(&pdev->dev, "spi_alloc_master() failed\n");
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!ctlr)
return -ENOMEM;
- }
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, ctlr);
- master->mode_bits = BCM2835_SPI_MODE_BITS;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->num_chipselect = 3;
- master->setup = bcm2835_spi_setup;
- master->transfer_one = bcm2835_spi_transfer_one;
- master->handle_err = bcm2835_spi_handle_err;
- master->prepare_message = bcm2835_spi_prepare_message;
- master->dev.of_node = pdev->dev.of_node;
+ ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->num_chipselect = 3;
+ ctlr->setup = bcm2835_spi_setup;
+ ctlr->transfer_one = bcm2835_spi_transfer_one;
+ ctlr->handle_err = bcm2835_spi_handle_err;
+ ctlr->prepare_message = bcm2835_spi_prepare_message;
+ ctlr->dev.of_node = pdev->dev.of_node;
- bs = spi_master_get_devdata(master);
+ bs = spi_controller_get_devdata(ctlr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bs->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(bs->regs)) {
err = PTR_ERR(bs->regs);
- goto out_master_put;
+ goto out_controller_put;
}
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
- goto out_master_put;
+ goto out_controller_put;
}
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0) {
dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
err = bs->irq ? bs->irq : -ENODEV;
- goto out_master_put;
+ goto out_controller_put;
}
clk_prepare_enable(bs->clk);
- bcm2835_dma_init(master, &pdev->dev);
+ bcm2835_dma_init(ctlr, &pdev->dev);
/* initialise the hardware with the default polarities */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
- dev_name(&pdev->dev), master);
+ dev_name(&pdev->dev), ctlr);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_clk_disable;
}
- err = devm_spi_register_master(&pdev->dev, master);
+ err = devm_spi_register_controller(&pdev->dev, ctlr);
if (err) {
- dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
+ dev_err(&pdev->dev, "could not register SPI controller: %d\n",
+ err);
goto out_clk_disable;
}
+ bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
+
return 0;
out_clk_disable:
clk_disable_unprepare(bs->clk);
-out_master_put:
- spi_master_put(master);
+out_controller_put:
+ spi_controller_put(ctlr);
return err;
}
static int bcm2835_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+ bcm2835_debugfs_remove(bs);
/* Clear FIFOs, and disable the HW block */
bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -986,7 +1066,7 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(bs->clk);
- bcm2835_dma_release(master);
+ bcm2835_dma_release(ctlr);
return 0;
}
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 40dfb7f58efe..bb57035c5770 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -496,10 +496,8 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
int err;
master = spi_alloc_master(&pdev->dev, sizeof(*bs));
- if (!master) {
- dev_err(&pdev->dev, "spi_alloc_master() failed\n");
+ if (!master)
return -ENOMEM;
- }
platform_set_drvdata(pdev, master);
master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index ea4b1bf0fa16..f7fe9b13d122 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -1,9 +1,9 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for Amlogic Meson SPI flash controller (SPIFC)
- *
- * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Driver for Amlogic Meson SPI flash controller (SPIFC)
+//
+// Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+//
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 10041eab36a2..45d8a7048b6c 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -123,8 +123,6 @@ static const struct mtk_spi_compatible mt8183_compat = {
* supplies it.
*/
static const struct mtk_chip_config mtk_default_chip_info = {
- .rx_mlsb = 1,
- .tx_mlsb = 1,
.cs_pol = 0,
.sample_sel = 0,
};
@@ -195,14 +193,13 @@ static int mtk_spi_prepare_message(struct spi_master *master,
reg_val &= ~SPI_CMD_CPOL;
/* set the mlsbx and mlsbtx */
- if (chip_config->tx_mlsb)
- reg_val |= SPI_CMD_TXMSBF;
- else
+ if (spi->mode & SPI_LSB_FIRST) {
reg_val &= ~SPI_CMD_TXMSBF;
- if (chip_config->rx_mlsb)
- reg_val |= SPI_CMD_RXMSBF;
- else
reg_val &= ~SPI_CMD_RXMSBF;
+ } else {
+ reg_val |= SPI_CMD_TXMSBF;
+ reg_val |= SPI_CMD_RXMSBF;
+ }
/* set the tx/rx endian */
#ifdef __LITTLE_ENDIAN
@@ -599,7 +596,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
master->auto_runtime_pm = true;
master->dev.of_node = pdev->dev.of_node;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->set_cs = mtk_spi_set_cs;
master->prepare_message = mtk_spi_prepare_message;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index af3f37ba82c8..fc7ab4b26880 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1437,6 +1437,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP },
+ /* EHL */
+ { PCI_VDEVICE(INTEL, 0x4b2a), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x4b2b), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x4b37), LPSS_BXT_SSP },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
@@ -1704,6 +1708,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
goto out_error_dma_irq_alloc;
controller->max_speed_hz = clk_get_rate(ssp->clk);
+ /*
+ * Set minimum speed for all other platforms than Intel Quark which is
+ * able do under 1 Hz transfers.
+ */
+ if (!pxa25x_ssp_comp(drv_data))
+ controller->min_speed_hz =
+ DIV_ROUND_UP(controller->max_speed_hz, 4096);
+ else if (!is_quark_x1000_ssp(drv_data))
+ controller->min_speed_hz =
+ DIV_ROUND_UP(controller->max_speed_hz, 512);
/* Load default SSP configuration */
pxa2xx_spi_write(drv_data, SSCR0, 0);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index f22dbb4b87a5..2f559e531100 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -273,6 +273,9 @@ static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
controller->base + QUP_OPERATIONAL);
+ if (!remainder)
+ goto exit;
+
if (is_block_mode) {
num_words = (remainder > words_per_block) ?
words_per_block : remainder;
@@ -302,11 +305,13 @@ static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
* to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
* present and this is used to determine if transaction is complete
*/
- *opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
- if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
- writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
- controller->base + QUP_OPERATIONAL);
-
+exit:
+ if (!remainder) {
+ *opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
+ writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
+ controller->base + QUP_OPERATIONAL);
+ }
}
static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
@@ -354,6 +359,10 @@ static void spi_qup_write(struct spi_qup *controller)
writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
controller->base + QUP_OPERATIONAL);
+ /* make sure the interrupt is valid */
+ if (!remainder)
+ return;
+
if (is_block_mode) {
num_words = (remainder > words_per_block) ?
words_per_block : remainder;
@@ -567,10 +576,24 @@ static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
return 0;
}
+static bool spi_qup_data_pending(struct spi_qup *controller)
+{
+ unsigned int remainder_tx, remainder_rx;
+
+ remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) -
+ controller->tx_bytes, controller->w_size);
+
+ remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) -
+ controller->rx_bytes, controller->w_size);
+
+ return remainder_tx || remainder_rx;
+}
+
static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
{
struct spi_qup *controller = dev_id;
u32 opflags, qup_err, spi_err;
+ unsigned long flags;
int error = 0;
qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
@@ -602,6 +625,11 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
error = -EIO;
}
+ spin_lock_irqsave(&controller->lock, flags);
+ if (!controller->error)
+ controller->error = error;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
if (spi_qup_is_dma_xfer(controller->mode)) {
writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
} else {
@@ -610,10 +638,21 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
if (opflags & QUP_OP_OUT_SERVICE_FLAG)
spi_qup_write(controller);
+
+ if (!spi_qup_data_pending(controller))
+ complete(&controller->done);
}
- if ((opflags & QUP_OP_MAX_INPUT_DONE_FLAG) || error)
+ if (error)
+ complete(&controller->done);
+
+ if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) {
+ if (!spi_qup_is_dma_xfer(controller->mode)) {
+ if (spi_qup_data_pending(controller))
+ return IRQ_HANDLED;
+ }
complete(&controller->done);
+ }
return IRQ_HANDLED;
}
@@ -834,10 +873,6 @@ static int spi_qup_transfer_one(struct spi_master *master,
else
ret = spi_qup_do_pio(spi, xfer, timeout);
- if (ret)
- goto exit;
-
-exit:
spi_qup_set_state(controller, QUP_STATE_RESET);
spin_lock_irqsave(&controller->lock, flags);
if (!ret)
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 9b91188a85f9..2cc6d9951b52 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -417,7 +417,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
.direction = DMA_MEM_TO_DEV,
.dst_addr = rs->dma_addr_tx,
.dst_addr_width = rs->n_bytes,
- .dst_maxburst = rs->fifo_len / 2,
+ .dst_maxburst = rs->fifo_len / 4,
};
dmaengine_slave_config(master->dma_tx, &txconf);
@@ -518,7 +518,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
else
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
- writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
+ writel_relaxed(rs->fifo_len / 2, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 6aab7b2136db..b50bdbc27e58 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -229,7 +229,7 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
sh_msiof_write(p, CTR, data);
return readl_poll_timeout_atomic(p->mapbase + CTR, data,
- (data & mask) == set, 10, 1000);
+ (data & mask) == set, 1, 100);
}
static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 42f8e3c6aa1f..655e4afbfb2a 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -29,7 +29,7 @@
#define CR_SSHIFT BIT(4)
#define CR_DFM BIT(6)
#define CR_FSEL BIT(7)
-#define CR_FTHRES_MASK GENMASK(12, 8)
+#define CR_FTHRES_SHIFT 8
#define CR_TEIE BIT(16)
#define CR_TCIE BIT(17)
#define CR_FTIE BIT(18)
@@ -245,12 +245,8 @@ static int stm32_qspi_tx_dma(struct stm32_qspi *qspi,
writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR);
t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
- if (!wait_for_completion_interruptible_timeout(&qspi->dma_completion,
- msecs_to_jiffies(t_out)))
- err = -ETIMEDOUT;
-
- if (dma_async_is_tx_complete(dma_ch, cookie,
- NULL, NULL) != DMA_COMPLETE)
+ if (!wait_for_completion_timeout(&qspi->dma_completion,
+ msecs_to_jiffies(t_out)))
err = -ETIMEDOUT;
if (err)
@@ -304,7 +300,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
cr = readl_relaxed(qspi->io_base + QSPI_CR);
writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
- if (!wait_for_completion_interruptible_timeout(&qspi->data_completion,
+ if (!wait_for_completion_timeout(&qspi->data_completion,
msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) {
err = -ETIMEDOUT;
} else {
@@ -463,7 +459,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
flash->presc = presc;
mutex_lock(&qspi->lock);
- qspi->cr_reg = FIELD_PREP(CR_FTHRES_MASK, 3) | CR_SSHIFT | CR_EN;
+ qspi->cr_reg = 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
/* set dcr fsize to max address */
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
new file mode 100644
index 000000000000..f99abd85c50a
--- /dev/null
+++ b/drivers/spi/spi-synquacer.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Synquacer HSSPI controller driver
+//
+// Copyright (c) 2015-2018 Socionext Inc.
+// Copyright (c) 2018-2019 Linaro Ltd.
+//
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+
+/* HSSPI register address definitions */
+#define SYNQUACER_HSSPI_REG_MCTRL 0x00
+#define SYNQUACER_HSSPI_REG_PCC0 0x04
+#define SYNQUACER_HSSPI_REG_PCC(n) (SYNQUACER_HSSPI_REG_PCC0 + (n) * 4)
+#define SYNQUACER_HSSPI_REG_TXF 0x14
+#define SYNQUACER_HSSPI_REG_TXE 0x18
+#define SYNQUACER_HSSPI_REG_TXC 0x1C
+#define SYNQUACER_HSSPI_REG_RXF 0x20
+#define SYNQUACER_HSSPI_REG_RXE 0x24
+#define SYNQUACER_HSSPI_REG_RXC 0x28
+#define SYNQUACER_HSSPI_REG_FAULTF 0x2C
+#define SYNQUACER_HSSPI_REG_FAULTC 0x30
+#define SYNQUACER_HSSPI_REG_DMCFG 0x34
+#define SYNQUACER_HSSPI_REG_DMSTART 0x38
+#define SYNQUACER_HSSPI_REG_DMBCC 0x3C
+#define SYNQUACER_HSSPI_REG_DMSTATUS 0x40
+#define SYNQUACER_HSSPI_REG_FIFOCFG 0x4C
+#define SYNQUACER_HSSPI_REG_TX_FIFO 0x50
+#define SYNQUACER_HSSPI_REG_RX_FIFO 0x90
+#define SYNQUACER_HSSPI_REG_MID 0xFC
+
+/* HSSPI register bit definitions */
+#define SYNQUACER_HSSPI_MCTRL_MEN BIT(0)
+#define SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN BIT(1)
+#define SYNQUACER_HSSPI_MCTRL_CDSS BIT(3)
+#define SYNQUACER_HSSPI_MCTRL_MES BIT(4)
+#define SYNQUACER_HSSPI_MCTRL_SYNCON BIT(5)
+
+#define SYNQUACER_HSSPI_PCC_CPHA BIT(0)
+#define SYNQUACER_HSSPI_PCC_CPOL BIT(1)
+#define SYNQUACER_HSSPI_PCC_ACES BIT(2)
+#define SYNQUACER_HSSPI_PCC_RTM BIT(3)
+#define SYNQUACER_HSSPI_PCC_SSPOL BIT(4)
+#define SYNQUACER_HSSPI_PCC_SDIR BIT(7)
+#define SYNQUACER_HSSPI_PCC_SENDIAN BIT(8)
+#define SYNQUACER_HSSPI_PCC_SAFESYNC BIT(16)
+#define SYNQUACER_HSSPI_PCC_SS2CD_SHIFT 5U
+#define SYNQUACER_HSSPI_PCC_CDRS_MASK 0x7f
+#define SYNQUACER_HSSPI_PCC_CDRS_SHIFT 9U
+
+#define SYNQUACER_HSSPI_TXF_FIFO_FULL BIT(0)
+#define SYNQUACER_HSSPI_TXF_FIFO_EMPTY BIT(1)
+#define SYNQUACER_HSSPI_TXF_SLAVE_RELEASED BIT(6)
+
+#define SYNQUACER_HSSPI_TXE_FIFO_FULL BIT(0)
+#define SYNQUACER_HSSPI_TXE_FIFO_EMPTY BIT(1)
+#define SYNQUACER_HSSPI_TXE_SLAVE_RELEASED BIT(6)
+
+#define SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD BIT(5)
+#define SYNQUACER_HSSPI_RXF_SLAVE_RELEASED BIT(6)
+
+#define SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD BIT(5)
+#define SYNQUACER_HSSPI_RXE_SLAVE_RELEASED BIT(6)
+
+#define SYNQUACER_HSSPI_DMCFG_SSDC BIT(1)
+#define SYNQUACER_HSSPI_DMCFG_MSTARTEN BIT(2)
+
+#define SYNQUACER_HSSPI_DMSTART_START BIT(0)
+#define SYNQUACER_HSSPI_DMSTOP_STOP BIT(8)
+#define SYNQUACER_HSSPI_DMPSEL_CS_MASK 0x3
+#define SYNQUACER_HSSPI_DMPSEL_CS_SHIFT 16U
+#define SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT 24U
+#define SYNQUACER_HSSPI_DMTRP_DATA_MASK 0x3
+#define SYNQUACER_HSSPI_DMTRP_DATA_SHIFT 26U
+#define SYNQUACER_HSSPI_DMTRP_DATA_TXRX 0
+#define SYNQUACER_HSSPI_DMTRP_DATA_RX 1
+#define SYNQUACER_HSSPI_DMTRP_DATA_TX 2
+
+#define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK 0x1f
+#define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT 8U
+#define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK 0x1f
+#define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT 16U
+
+#define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK 0xf
+#define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT 0U
+#define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_MASK 0xf
+#define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_SHIFT 4U
+#define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK 0x3
+#define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT 8U
+#define SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH BIT(11)
+#define SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH BIT(12)
+
+#define SYNQUACER_HSSPI_FIFO_DEPTH 16U
+#define SYNQUACER_HSSPI_FIFO_TX_THRESHOLD 4U
+#define SYNQUACER_HSSPI_FIFO_RX_THRESHOLD \
+ (SYNQUACER_HSSPI_FIFO_DEPTH - SYNQUACER_HSSPI_FIFO_TX_THRESHOLD)
+
+#define SYNQUACER_HSSPI_TRANSFER_MODE_TX BIT(1)
+#define SYNQUACER_HSSPI_TRANSFER_MODE_RX BIT(2)
+#define SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC 2000U
+#define SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC 1000U
+
+#define SYNQUACER_HSSPI_CLOCK_SRC_IHCLK 0
+#define SYNQUACER_HSSPI_CLOCK_SRC_IPCLK 1
+
+#define SYNQUACER_HSSPI_NUM_CHIP_SELECT 4U
+#define SYNQUACER_HSSPI_IRQ_NAME_MAX 32U
+
+struct synquacer_spi {
+ struct device *dev;
+ struct completion transfer_done;
+ unsigned int cs;
+ unsigned int bpw;
+ unsigned int mode;
+ unsigned int speed;
+ bool aces, rtm;
+ void *rx_buf;
+ const void *tx_buf;
+ struct clk *clk;
+ int clk_src_type;
+ void __iomem *regs;
+ u32 tx_words, rx_words;
+ unsigned int bus_width;
+ unsigned int transfer_mode;
+ char rx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX];
+ char tx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX];
+};
+
+static int read_fifo(struct synquacer_spi *sspi)
+{
+ u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS);
+
+ len = (len >> SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT) &
+ SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK;
+ len = min(len, sspi->rx_words);
+
+ switch (sspi->bpw) {
+ case 8: {
+ u8 *buf = sspi->rx_buf;
+
+ ioread8_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
+ buf, len);
+ sspi->rx_buf = buf + len;
+ break;
+ }
+ case 16: {
+ u16 *buf = sspi->rx_buf;
+
+ ioread16_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
+ buf, len);
+ sspi->rx_buf = buf + len;
+ break;
+ }
+ case 24:
+ /* fallthrough, should use 32-bits access */
+ case 32: {
+ u32 *buf = sspi->rx_buf;
+
+ ioread32_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
+ buf, len);
+ sspi->rx_buf = buf + len;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ sspi->rx_words -= len;
+ return 0;
+}
+
+static int write_fifo(struct synquacer_spi *sspi)
+{
+ u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS);
+
+ len = (len >> SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT) &
+ SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK;
+ len = min(SYNQUACER_HSSPI_FIFO_DEPTH - len,
+ sspi->tx_words);
+
+ switch (sspi->bpw) {
+ case 8: {
+ const u8 *buf = sspi->tx_buf;
+
+ iowrite8_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
+ buf, len);
+ sspi->tx_buf = buf + len;
+ break;
+ }
+ case 16: {
+ const u16 *buf = sspi->tx_buf;
+
+ iowrite16_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
+ buf, len);
+ sspi->tx_buf = buf + len;
+ break;
+ }
+ case 24:
+ /* fallthrough, should use 32-bits access */
+ case 32: {
+ const u32 *buf = sspi->tx_buf;
+
+ iowrite32_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
+ buf, len);
+ sspi->tx_buf = buf + len;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ sspi->tx_words -= len;
+ return 0;
+}
+
+static int synquacer_spi_config(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ unsigned int speed, mode, bpw, cs, bus_width, transfer_mode;
+ u32 rate, val, div;
+
+ /* Full Duplex only on 1-bit wide bus */
+ if (xfer->rx_buf && xfer->tx_buf &&
+ (xfer->rx_nbits != 1 || xfer->tx_nbits != 1)) {
+ dev_err(sspi->dev,
+ "RX and TX bus widths must be 1-bit for Full-Duplex!\n");
+ return -EINVAL;
+ }
+
+ if (xfer->tx_buf) {
+ bus_width = xfer->tx_nbits;
+ transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_TX;
+ } else {
+ bus_width = xfer->rx_nbits;
+ transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_RX;
+ }
+
+ mode = spi->mode;
+ cs = spi->chip_select;
+ speed = xfer->speed_hz;
+ bpw = xfer->bits_per_word;
+
+ /* return if nothing to change */
+ if (speed == sspi->speed &&
+ bus_width == sspi->bus_width && bpw == sspi->bpw &&
+ mode == sspi->mode && cs == sspi->cs &&
+ transfer_mode == sspi->transfer_mode) {
+ return 0;
+ }
+
+ sspi->transfer_mode = transfer_mode;
+ rate = master->max_speed_hz;
+
+ div = DIV_ROUND_UP(rate, speed);
+ if (div > 254) {
+ dev_err(sspi->dev, "Requested rate too low (%u)\n",
+ sspi->speed);
+ return -EINVAL;
+ }
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs));
+ val &= ~SYNQUACER_HSSPI_PCC_SAFESYNC;
+ if (bpw == 8 && (mode & (SPI_TX_DUAL | SPI_RX_DUAL)) && div < 3)
+ val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
+ if (bpw == 8 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 6)
+ val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
+ if (bpw == 16 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 3)
+ val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
+
+ if (mode & SPI_CPHA)
+ val |= SYNQUACER_HSSPI_PCC_CPHA;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_CPHA;
+
+ if (mode & SPI_CPOL)
+ val |= SYNQUACER_HSSPI_PCC_CPOL;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_CPOL;
+
+ if (mode & SPI_CS_HIGH)
+ val |= SYNQUACER_HSSPI_PCC_SSPOL;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_SSPOL;
+
+ if (mode & SPI_LSB_FIRST)
+ val |= SYNQUACER_HSSPI_PCC_SDIR;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_SDIR;
+
+ if (sspi->aces)
+ val |= SYNQUACER_HSSPI_PCC_ACES;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_ACES;
+
+ if (sspi->rtm)
+ val |= SYNQUACER_HSSPI_PCC_RTM;
+ else
+ val &= ~SYNQUACER_HSSPI_PCC_RTM;
+
+ val |= (3 << SYNQUACER_HSSPI_PCC_SS2CD_SHIFT);
+ val |= SYNQUACER_HSSPI_PCC_SENDIAN;
+
+ val &= ~(SYNQUACER_HSSPI_PCC_CDRS_MASK <<
+ SYNQUACER_HSSPI_PCC_CDRS_SHIFT);
+ val |= ((div >> 1) << SYNQUACER_HSSPI_PCC_CDRS_SHIFT);
+
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs));
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+ val &= ~(SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK <<
+ SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT);
+ val |= ((bpw / 8 - 1) << SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT);
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ val &= ~(SYNQUACER_HSSPI_DMTRP_DATA_MASK <<
+ SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
+
+ if (xfer->rx_buf)
+ val |= (SYNQUACER_HSSPI_DMTRP_DATA_RX <<
+ SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
+ else
+ val |= (SYNQUACER_HSSPI_DMTRP_DATA_TX <<
+ SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
+
+ val &= ~(3 << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT);
+ val |= ((bus_width >> 1) << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT);
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+
+ sspi->bpw = bpw;
+ sspi->mode = mode;
+ sspi->speed = speed;
+ sspi->cs = spi->chip_select;
+ sspi->bus_width = bus_width;
+
+ return 0;
+}
+
+static int synquacer_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+ int status = 0;
+ u32 words;
+ u8 bpw;
+ u32 val;
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ val &= ~SYNQUACER_HSSPI_DMSTOP_STOP;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+ val |= SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH;
+ val |= SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+
+ /*
+ * See if we can transfer 4-bytes as 1 word
+ * to maximize the FIFO buffer efficiency.
+ */
+ bpw = xfer->bits_per_word;
+ if (bpw == 8 && !(xfer->len % 4) && !(spi->mode & SPI_LSB_FIRST))
+ xfer->bits_per_word = 32;
+
+ ret = synquacer_spi_config(master, spi, xfer);
+
+ /* restore */
+ xfer->bits_per_word = bpw;
+
+ if (ret)
+ return ret;
+
+ reinit_completion(&sspi->transfer_done);
+
+ sspi->tx_buf = xfer->tx_buf;
+ sspi->rx_buf = xfer->rx_buf;
+
+ switch (sspi->bpw) {
+ case 8:
+ words = xfer->len;
+ break;
+ case 16:
+ words = xfer->len / 2;
+ break;
+ case 24:
+ /* fallthrough, should use 32-bits access */
+ case 32:
+ words = xfer->len / 4;
+ break;
+ default:
+ dev_err(sspi->dev, "unsupported bpw: %d\n", sspi->bpw);
+ return -EINVAL;
+ }
+
+ if (xfer->tx_buf)
+ sspi->tx_words = words;
+ else
+ sspi->tx_words = 0;
+
+ if (xfer->rx_buf)
+ sspi->rx_words = words;
+ else
+ sspi->rx_words = 0;
+
+ if (xfer->tx_buf) {
+ status = write_fifo(sspi);
+ if (status < 0) {
+ dev_err(sspi->dev, "failed write_fifo. status: 0x%x\n",
+ status);
+ return status;
+ }
+ }
+
+ if (xfer->rx_buf) {
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+ val &= ~(SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK <<
+ SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT);
+ val |= ((sspi->rx_words > SYNQUACER_HSSPI_FIFO_DEPTH ?
+ SYNQUACER_HSSPI_FIFO_RX_THRESHOLD : sspi->rx_words) <<
+ SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT);
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
+ }
+
+ writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC);
+ writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC);
+
+ /* Trigger */
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ val |= SYNQUACER_HSSPI_DMSTART_START;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+
+ if (xfer->tx_buf) {
+ val = SYNQUACER_HSSPI_TXE_FIFO_EMPTY;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
+ status = wait_for_completion_timeout(&sspi->transfer_done,
+ msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC));
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
+ }
+
+ if (xfer->rx_buf) {
+ u32 buf[SYNQUACER_HSSPI_FIFO_DEPTH];
+
+ val = SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD |
+ SYNQUACER_HSSPI_RXE_SLAVE_RELEASED;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
+ status = wait_for_completion_timeout(&sspi->transfer_done,
+ msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC));
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
+
+ /* stop RX and clean RXFIFO */
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ val |= SYNQUACER_HSSPI_DMSTOP_STOP;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ sspi->rx_buf = buf;
+ sspi->rx_words = SYNQUACER_HSSPI_FIFO_DEPTH;
+ read_fifo(sspi);
+ }
+
+ if (status < 0) {
+ dev_err(sspi->dev, "failed to transfer. status: 0x%x\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct synquacer_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 val;
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK <<
+ SYNQUACER_HSSPI_DMPSEL_CS_SHIFT);
+ val |= spi->chip_select << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+}
+
+static int synquacer_spi_wait_status_update(struct synquacer_spi *sspi,
+ bool enable)
+{
+ u32 val;
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC);
+
+ /* wait MES(Module Enable Status) is updated */
+ do {
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL) &
+ SYNQUACER_HSSPI_MCTRL_MES;
+ if (enable && val)
+ return 0;
+ if (!enable && !val)
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(sspi->dev, "timeout occurs in updating Module Enable Status\n");
+ return -EBUSY;
+}
+
+static int synquacer_spi_enable(struct spi_master *master)
+{
+ u32 val;
+ int status;
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+
+ /* Disable module */
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
+ status = synquacer_spi_wait_status_update(sspi, false);
+ if (status < 0)
+ return status;
+
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
+ writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC);
+ writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC);
+ writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_FAULTC);
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMCFG);
+ val &= ~SYNQUACER_HSSPI_DMCFG_SSDC;
+ val &= ~SYNQUACER_HSSPI_DMCFG_MSTARTEN;
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMCFG);
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
+ if (sspi->clk_src_type == SYNQUACER_HSSPI_CLOCK_SRC_IPCLK)
+ val |= SYNQUACER_HSSPI_MCTRL_CDSS;
+ else
+ val &= ~SYNQUACER_HSSPI_MCTRL_CDSS;
+
+ val &= ~SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN;
+ val |= SYNQUACER_HSSPI_MCTRL_MEN;
+ val |= SYNQUACER_HSSPI_MCTRL_SYNCON;
+
+ /* Enable module */
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
+ status = synquacer_spi_wait_status_update(sspi, true);
+ if (status < 0)
+ return status;
+
+ return 0;
+}
+
+static irqreturn_t sq_spi_rx_handler(int irq, void *priv)
+{
+ uint32_t val;
+ struct synquacer_spi *sspi = priv;
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_RXF);
+ if ((val & SYNQUACER_HSSPI_RXF_SLAVE_RELEASED) ||
+ (val & SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD)) {
+ read_fifo(sspi);
+
+ if (sspi->rx_words == 0) {
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
+ complete(&sspi->transfer_done);
+ }
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t sq_spi_tx_handler(int irq, void *priv)
+{
+ uint32_t val;
+ struct synquacer_spi *sspi = priv;
+
+ val = readl(sspi->regs + SYNQUACER_HSSPI_REG_TXF);
+ if (val & SYNQUACER_HSSPI_TXF_FIFO_EMPTY) {
+ if (sspi->tx_words == 0) {
+ writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
+ complete(&sspi->transfer_done);
+ } else {
+ write_fifo(sspi);
+ }
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int synquacer_spi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_master *master;
+ struct synquacer_spi *sspi;
+ int ret;
+ int rx_irq, tx_irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ sspi = spi_master_get_devdata(master);
+ sspi->dev = &pdev->dev;
+
+ init_completion(&sspi->transfer_done);
+
+ sspi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sspi->regs)) {
+ ret = PTR_ERR(sspi->regs);
+ goto put_spi;
+ }
+
+ sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; /* Default */
+ device_property_read_u32(&pdev->dev, "socionext,ihclk-rate",
+ &master->max_speed_hz); /* for ACPI */
+
+ if (dev_of_node(&pdev->dev)) {
+ if (device_property_match_string(&pdev->dev,
+ "clock-names", "iHCLK") >= 0) {
+ sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK;
+ sspi->clk = devm_clk_get(sspi->dev, "iHCLK");
+ } else if (device_property_match_string(&pdev->dev,
+ "clock-names", "iPCLK") >= 0) {
+ sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IPCLK;
+ sspi->clk = devm_clk_get(sspi->dev, "iPCLK");
+ } else {
+ dev_err(&pdev->dev, "specified wrong clock source\n");
+ ret = -EINVAL;
+ goto put_spi;
+ }
+
+ if (IS_ERR(sspi->clk)) {
+ if (!(PTR_ERR(sspi->clk) == -EPROBE_DEFER))
+ dev_err(&pdev->dev, "clock not found\n");
+ ret = PTR_ERR(sspi->clk);
+ goto put_spi;
+ }
+
+ ret = clk_prepare_enable(sspi->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock (%d)\n",
+ ret);
+ goto put_spi;
+ }
+
+ master->max_speed_hz = clk_get_rate(sspi->clk);
+ }
+
+ if (!master->max_speed_hz) {
+ dev_err(&pdev->dev, "missing clock source\n");
+ return -EINVAL;
+ }
+ master->min_speed_hz = master->max_speed_hz / 254;
+
+ sspi->aces = device_property_read_bool(&pdev->dev,
+ "socionext,set-aces");
+ sspi->rtm = device_property_read_bool(&pdev->dev, "socionext,use-rtm");
+
+ master->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT;
+
+ rx_irq = platform_get_irq(pdev, 0);
+ if (rx_irq <= 0) {
+ dev_err(&pdev->dev, "get rx_irq failed (%d)\n", rx_irq);
+ ret = rx_irq;
+ goto put_spi;
+ }
+ snprintf(sspi->rx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-rx",
+ dev_name(&pdev->dev));
+ ret = devm_request_irq(&pdev->dev, rx_irq, sq_spi_rx_handler,
+ 0, sspi->rx_irq_name, sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "request rx_irq failed (%d)\n", ret);
+ goto put_spi;
+ }
+
+ tx_irq = platform_get_irq(pdev, 1);
+ if (tx_irq <= 0) {
+ dev_err(&pdev->dev, "get tx_irq failed (%d)\n", tx_irq);
+ ret = tx_irq;
+ goto put_spi;
+ }
+ snprintf(sspi->tx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-tx",
+ dev_name(&pdev->dev));
+ ret = devm_request_irq(&pdev->dev, tx_irq, sq_spi_tx_handler,
+ 0, sspi->tx_irq_name, sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "request tx_irq failed (%d)\n", ret);
+ goto put_spi;
+ }
+
+ master->dev.of_node = np;
+ master->dev.fwnode = pdev->dev.fwnode;
+ master->auto_runtime_pm = true;
+ master->bus_num = pdev->id;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+
+ master->set_cs = synquacer_spi_set_cs;
+ master->transfer_one = synquacer_spi_transfer_one;
+
+ ret = synquacer_spi_enable(master);
+ if (ret)
+ goto fail_enable;
+
+ pm_runtime_set_active(sspi->dev);
+ pm_runtime_enable(sspi->dev);
+
+ ret = devm_spi_register_master(sspi->dev, master);
+ if (ret)
+ goto disable_pm;
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(sspi->dev);
+fail_enable:
+ clk_disable_unprepare(sspi->clk);
+put_spi:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int synquacer_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+
+ pm_runtime_disable(sspi->dev);
+
+ clk_disable_unprepare(sspi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused synquacer_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ clk_disable_unprepare(sspi->clk);
+
+ return ret;
+}
+
+static int __maybe_unused synquacer_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ /* Ensure reconfigure during next xfer */
+ sspi->speed = 0;
+
+ ret = clk_prepare_enable(sspi->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clk (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = synquacer_spi_enable(master);
+ if (ret) {
+ dev_err(dev, "failed to enable spi (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ ret = spi_master_resume(master);
+ if (ret < 0)
+ clk_disable_unprepare(sspi->clk);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(synquacer_spi_pm_ops, synquacer_spi_suspend,
+ synquacer_spi_resume);
+
+static const struct of_device_id synquacer_spi_of_match[] = {
+ {.compatible = "socionext,synquacer-spi"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, synquacer_spi_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id synquacer_hsspi_acpi_ids[] = {
+ { "SCX0004" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, synquacer_hsspi_acpi_ids);
+#endif
+
+static struct platform_driver synquacer_spi_driver = {
+ .driver = {
+ .name = "synquacer-spi",
+ .pm = &synquacer_spi_pm_ops,
+ .of_match_table = synquacer_spi_of_match,
+ .acpi_match_table = ACPI_PTR(synquacer_hsspi_acpi_ids),
+ },
+ .probe = synquacer_spi_probe,
+ .remove = synquacer_spi_remove,
+};
+module_platform_driver(synquacer_spi_driver);
+
+MODULE_DESCRIPTION("Socionext Synquacer HS-SPI controller driver");
+MODULE_AUTHOR("Masahisa Kojima <masahisa.kojima@linaro.org>");
+MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index d22f4d10413f..39374c2edcf3 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -84,8 +84,10 @@
(reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \
((reg) & ~(1 << ((cs) * 8 + 5))))
#define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \
- (reg = (((val) & 0xF) << ((cs) * 8)) | \
- ((reg) & ~(0xF << ((cs) * 8))))
+ (reg = (((val) & 0x1F) << ((cs) * 8)) | \
+ ((reg) & ~(0x1F << ((cs) * 8))))
+#define MAX_SETUP_HOLD_CYCLES 16
+#define MAX_INACTIVE_CYCLES 32
#define SPI_TRANS_STATUS 0x010
#define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF)
@@ -156,6 +158,11 @@ struct tegra_spi_soc_data {
bool has_intr_mask_reg;
};
+struct tegra_spi_client_data {
+ int tx_clk_tap_delay;
+ int rx_clk_tap_delay;
+};
+
struct tegra_spi_data {
struct device *dev;
struct spi_master *master;
@@ -182,6 +189,7 @@ struct tegra_spi_data {
unsigned dma_buf_size;
unsigned max_buf_size;
bool is_curr_dma_xfer;
+ bool use_hw_based_cs;
struct completion rx_dma_complete;
struct completion tx_dma_complete;
@@ -194,6 +202,10 @@ struct tegra_spi_data {
u32 command1_reg;
u32 dma_control_reg;
u32 def_command1_reg;
+ u32 def_command2_reg;
+ u32 spi_cs_timing1;
+ u32 spi_cs_timing2;
+ u8 last_used_cs;
struct completion xfer_completion;
struct spi_transfer *curr_xfer;
@@ -711,14 +723,55 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
dma_release_channel(dma_chan);
}
+static void tegra_spi_set_hw_cs_timing(struct spi_device *spi, u8 setup_dly,
+ u8 hold_dly, u8 inactive_dly)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ u32 setup_hold;
+ u32 spi_cs_timing;
+ u32 inactive_cycles;
+ u8 cs_state;
+
+ setup_dly = min_t(u8, setup_dly, MAX_SETUP_HOLD_CYCLES);
+ hold_dly = min_t(u8, hold_dly, MAX_SETUP_HOLD_CYCLES);
+ if (setup_dly && hold_dly) {
+ setup_hold = SPI_SETUP_HOLD(setup_dly - 1, hold_dly - 1);
+ spi_cs_timing = SPI_CS_SETUP_HOLD(tspi->spi_cs_timing1,
+ spi->chip_select,
+ setup_hold);
+ if (tspi->spi_cs_timing1 != spi_cs_timing) {
+ tspi->spi_cs_timing1 = spi_cs_timing;
+ tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING1);
+ }
+ }
+
+ inactive_cycles = min_t(u8, inactive_dly, MAX_INACTIVE_CYCLES);
+ if (inactive_cycles)
+ inactive_cycles--;
+ cs_state = inactive_cycles ? 0 : 1;
+ spi_cs_timing = tspi->spi_cs_timing2;
+ SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
+ cs_state);
+ SPI_SET_CYCLES_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
+ inactive_cycles);
+ if (tspi->spi_cs_timing2 != spi_cs_timing) {
+ tspi->spi_cs_timing2 = spi_cs_timing;
+ tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING2);
+ }
+}
+
static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
- struct spi_transfer *t, bool is_first_of_msg)
+ struct spi_transfer *t,
+ bool is_first_of_msg,
+ bool is_single_xfer)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_client_data *cdata = spi->controller_data;
u32 speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
- u32 command1;
+ u32 command1, command2;
int req_mode;
+ u32 tx_tap = 0, rx_tap = 0;
if (speed != tspi->cur_speed) {
clk_set_rate(tspi->clk, speed);
@@ -765,13 +818,34 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
} else
tegra_spi_writel(tspi, command1, SPI_COMMAND1);
- command1 |= SPI_CS_SW_HW;
- if (spi->mode & SPI_CS_HIGH)
- command1 |= SPI_CS_SW_VAL;
- else
- command1 &= ~SPI_CS_SW_VAL;
+ /* GPIO based chip select control */
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 1);
+
+ if (is_single_xfer && !(t->cs_change)) {
+ tspi->use_hw_based_cs = true;
+ command1 &= ~(SPI_CS_SW_HW | SPI_CS_SW_VAL);
+ } else {
+ tspi->use_hw_based_cs = false;
+ command1 |= SPI_CS_SW_HW;
+ if (spi->mode & SPI_CS_HIGH)
+ command1 |= SPI_CS_SW_VAL;
+ else
+ command1 &= ~SPI_CS_SW_VAL;
+ }
+
+ if (tspi->last_used_cs != spi->chip_select) {
+ if (cdata && cdata->tx_clk_tap_delay)
+ tx_tap = cdata->tx_clk_tap_delay;
+ if (cdata && cdata->rx_clk_tap_delay)
+ rx_tap = cdata->rx_clk_tap_delay;
+ command2 = SPI_TX_TAP_DELAY(tx_tap) |
+ SPI_RX_TAP_DELAY(rx_tap);
+ if (command2 != tspi->def_command2_reg)
+ tegra_spi_writel(tspi, command2, SPI_COMMAND2);
+ tspi->last_used_cs = spi->chip_select;
+ }
- tegra_spi_writel(tspi, 0, SPI_COMMAND2);
} else {
command1 = tspi->command1_reg;
command1 &= ~SPI_BIT_LENGTH(~0);
@@ -827,9 +901,42 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
return ret;
}
+static struct tegra_spi_client_data
+ *tegra_spi_parse_cdata_dt(struct spi_device *spi)
+{
+ struct tegra_spi_client_data *cdata;
+ struct device_node *slave_np;
+
+ slave_np = spi->dev.of_node;
+ if (!slave_np) {
+ dev_dbg(&spi->dev, "device node not found\n");
+ return NULL;
+ }
+
+ cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
+ if (!cdata)
+ return NULL;
+
+ of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
+ &cdata->tx_clk_tap_delay);
+ of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
+ &cdata->rx_clk_tap_delay);
+ return cdata;
+}
+
+static void tegra_spi_cleanup(struct spi_device *spi)
+{
+ struct tegra_spi_client_data *cdata = spi->controller_data;
+
+ spi->controller_data = NULL;
+ if (spi->dev.of_node)
+ kfree(cdata);
+}
+
static int tegra_spi_setup(struct spi_device *spi)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_client_data *cdata = spi->controller_data;
u32 val;
unsigned long flags;
int ret;
@@ -840,9 +947,16 @@ static int tegra_spi_setup(struct spi_device *spi)
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
+ if (!cdata) {
+ cdata = tegra_spi_parse_cdata_dt(spi);
+ spi->controller_data = cdata;
+ }
+
ret = pm_runtime_get_sync(tspi->dev);
if (ret < 0) {
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
+ if (cdata)
+ tegra_spi_cleanup(spi);
return ret;
}
@@ -853,6 +967,10 @@ static int tegra_spi_setup(struct spi_device *spi)
}
spin_lock_irqsave(&tspi->lock, flags);
+ /* GPIO based chip select control */
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 0);
+
val = tspi->def_command1_reg;
if (spi->mode & SPI_CS_HIGH)
val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
@@ -882,11 +1000,18 @@ static void tegra_spi_transfer_end(struct spi_device *spi)
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
- if (cs_val)
- tspi->command1_reg |= SPI_CS_SW_VAL;
- else
- tspi->command1_reg &= ~SPI_CS_SW_VAL;
- tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ /* GPIO based chip select control */
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 0);
+
+ if (!tspi->use_hw_based_cs) {
+ if (cs_val)
+ tspi->command1_reg |= SPI_CS_SW_VAL;
+ else
+ tspi->command1_reg &= ~SPI_CS_SW_VAL;
+ tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ }
+
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
}
@@ -913,16 +1038,19 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
struct spi_device *spi = msg->spi;
int ret;
bool skip = false;
+ int single_xfer;
msg->status = 0;
msg->actual_length = 0;
+ single_xfer = list_is_singular(&msg->transfers);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
u32 cmd1;
reinit_completion(&tspi->xfer_completion);
- cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg);
+ cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg,
+ single_xfer);
if (!xfer->len) {
ret = 0;
@@ -955,6 +1083,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
+ tspi->last_used_cs = master->num_chipselect + 1;
goto complete_xfer;
}
@@ -1188,11 +1317,14 @@ static int tegra_spi_probe(struct platform_device *pdev)
master->max_speed_hz = 25000000; /* 25MHz */
/* the spi->mode bits understood by this driver: */
+ master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = tegra_spi_setup;
+ master->cleanup = tegra_spi_cleanup;
master->transfer_one_message = tegra_spi_transfer_one_message;
+ master->set_cs_timing = tegra_spi_set_hw_cs_timing;
master->num_chipselect = MAX_CHIP_SELECT;
master->auto_runtime_pm = true;
bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
@@ -1268,6 +1400,10 @@ static int tegra_spi_probe(struct platform_device *pdev)
reset_control_deassert(tspi->rst);
tspi->def_command1_reg = SPI_M_S;
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ tspi->spi_cs_timing1 = tegra_spi_readl(tspi, SPI_CS_TIMING1);
+ tspi->spi_cs_timing2 = tegra_spi_readl(tspi, SPI_CS_TIMING2);
+ tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2);
+ tspi->last_used_cs = master->num_chipselect + 1;
pm_runtime_put(&pdev->dev);
ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
tegra_spi_isr_thread, IRQF_ONESHOT,
@@ -1340,6 +1476,8 @@ static int tegra_spi_resume(struct device *dev)
return ret;
}
tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2);
+ tspi->last_used_cs = master->num_chipselect + 1;
pm_runtime_put(dev);
return spi_master_resume(master);
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index 5a6137fe172d..b32c77df5d49 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -328,7 +328,12 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
struct spi_transfer *t)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
- int status;
+ struct device *dev = master->dev.parent;
+ unsigned long time_left;
+
+ /* Terminate and return success for 0 byte length transfer */
+ if (!t->len)
+ return 0;
uniphier_spi_setup_transfer(spi, t);
@@ -338,13 +343,15 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
- status = wait_for_completion_timeout(&priv->xfer_done,
- msecs_to_jiffies(SSI_TIMEOUT_MS));
+ time_left = wait_for_completion_timeout(&priv->xfer_done,
+ msecs_to_jiffies(SSI_TIMEOUT_MS));
uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
- if (status < 0)
- return status;
+ if (!time_left) {
+ dev_err(dev, "transfer timeout.\n");
+ return -ETIMEDOUT;
+ }
return priv->error;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 5e4654032bfa..91673351bcf3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1090,6 +1090,60 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
return 0;
}
+static void _spi_transfer_delay_ns(u32 ns)
+{
+ if (!ns)
+ return;
+ if (ns <= 1000) {
+ ndelay(ns);
+ } else {
+ u32 us = DIV_ROUND_UP(ns, 1000);
+
+ if (us <= 10)
+ udelay(us);
+ else
+ usleep_range(us, us + DIV_ROUND_UP(us, 10));
+ }
+}
+
+static void _spi_transfer_cs_change_delay(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ u32 delay = xfer->cs_change_delay;
+ u32 unit = xfer->cs_change_delay_unit;
+ u32 hz;
+
+ /* return early on "fast" mode - for everything but USECS */
+ if (!delay && unit != SPI_DELAY_UNIT_USECS)
+ return;
+
+ switch (unit) {
+ case SPI_DELAY_UNIT_USECS:
+ /* for compatibility use default of 10us */
+ if (!delay)
+ delay = 10000;
+ else
+ delay *= 1000;
+ break;
+ case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
+ break;
+ case SPI_DELAY_UNIT_SCK:
+ /* if there is no effective speed know, then approximate
+ * by underestimating with half the requested hz
+ */
+ hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
+ delay *= DIV_ROUND_UP(1000000000, hz);
+ break;
+ default:
+ dev_err_once(&msg->spi->dev,
+ "Use of unsupported delay unit %i, using default of 10us\n",
+ xfer->cs_change_delay_unit);
+ delay = 10000;
+ }
+ /* now sleep for the requested amount of time */
+ _spi_transfer_delay_ns(delay);
+}
+
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
@@ -1148,14 +1202,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
if (msg->status != -EINPROGRESS)
goto out;
- if (xfer->delay_usecs) {
- u16 us = xfer->delay_usecs;
-
- if (us <= 10)
- udelay(us);
- else
- usleep_range(us, us + DIV_ROUND_UP(us, 10));
- }
+ if (xfer->delay_usecs)
+ _spi_transfer_delay_ns(xfer->delay_usecs * 1000);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
@@ -1163,7 +1211,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
keep_cs = true;
} else {
spi_set_cs(msg->spi, false);
- udelay(10);
+ _spi_transfer_cs_change_delay(msg, xfer);
spi_set_cs(msg->spi, true);
}
}
@@ -1369,10 +1417,32 @@ static void spi_pump_messages(struct kthread_work *work)
__spi_pump_messages(ctlr, true);
}
-static int spi_init_queue(struct spi_controller *ctlr)
+/**
+ * spi_set_thread_rt - set the controller to pump at realtime priority
+ * @ctlr: controller to boost priority of
+ *
+ * This can be called because the controller requested realtime priority
+ * (by setting the ->rt value before calling spi_register_controller()) or
+ * because a device on the bus said that its transfers needed realtime
+ * priority.
+ *
+ * NOTE: at the moment if any device on a bus says it needs realtime then
+ * the thread will be at realtime priority for all transfers on that
+ * controller. If this eventually becomes a problem we may see if we can
+ * find a way to boost the priority only temporarily during relevant
+ * transfers.
+ */
+static void spi_set_thread_rt(struct spi_controller *ctlr)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ dev_info(&ctlr->dev,
+ "will run message pump with realtime priority\n");
+ sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
+}
+
+static int spi_init_queue(struct spi_controller *ctlr)
+{
ctlr->running = false;
ctlr->busy = false;
@@ -1392,11 +1462,8 @@ static int spi_init_queue(struct spi_controller *ctlr)
* request and the scheduling of the message pump thread. Without this
* setting the message pump thread will remain at default priority.
*/
- if (ctlr->rt) {
- dev_info(&ctlr->dev,
- "will run message pump with realtime priority\n");
- sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
- }
+ if (ctlr->rt)
+ spi_set_thread_rt(ctlr);
return 0;
}
@@ -1804,9 +1871,18 @@ static void of_register_spi_devices(struct spi_controller *ctlr) { }
#endif
#ifdef CONFIG_ACPI
-static void acpi_spi_parse_apple_properties(struct spi_device *spi)
+struct acpi_spi_lookup {
+ struct spi_controller *ctlr;
+ u32 max_speed_hz;
+ u32 mode;
+ int irq;
+ u8 bits_per_word;
+ u8 chip_select;
+};
+
+static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
+ struct acpi_spi_lookup *lookup)
{
- struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
const union acpi_object *obj;
if (!x86_apple_machine)
@@ -1814,35 +1890,46 @@ static void acpi_spi_parse_apple_properties(struct spi_device *spi)
if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length >= 4)
- spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
+ lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8)
- spi->bits_per_word = *(u64 *)obj->buffer.pointer;
+ lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
- spi->mode |= SPI_LSB_FIRST;
+ lookup->mode |= SPI_LSB_FIRST;
if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
- spi->mode |= SPI_CPOL;
+ lookup->mode |= SPI_CPOL;
if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
&& obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
- spi->mode |= SPI_CPHA;
+ lookup->mode |= SPI_CPHA;
}
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
- struct spi_device *spi = data;
- struct spi_controller *ctlr = spi->controller;
+ struct acpi_spi_lookup *lookup = data;
+ struct spi_controller *ctlr = lookup->ctlr;
if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
struct acpi_resource_spi_serialbus *sb;
+ acpi_handle parent_handle;
+ acpi_status status;
sb = &ares->data.spi_serial_bus;
if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
+
+ status = acpi_get_handle(NULL,
+ sb->resource_source.string_ptr,
+ &parent_handle);
+
+ if (ACPI_FAILURE(status) ||
+ ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
+ return -ENODEV;
+
/*
* ACPI DeviceSelection numbering is handled by the
* host controller driver in Windows and can vary
@@ -1855,25 +1942,25 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
sb->device_selection);
if (cs < 0)
return cs;
- spi->chip_select = cs;
+ lookup->chip_select = cs;
} else {
- spi->chip_select = sb->device_selection;
+ lookup->chip_select = sb->device_selection;
}
- spi->max_speed_hz = sb->connection_speed;
+ lookup->max_speed_hz = sb->connection_speed;
if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
- spi->mode |= SPI_CPHA;
+ lookup->mode |= SPI_CPHA;
if (sb->clock_polarity == ACPI_SPI_START_HIGH)
- spi->mode |= SPI_CPOL;
+ lookup->mode |= SPI_CPOL;
if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
- spi->mode |= SPI_CS_HIGH;
+ lookup->mode |= SPI_CS_HIGH;
}
- } else if (spi->irq < 0) {
+ } else if (lookup->irq < 0) {
struct resource r;
if (acpi_dev_resource_interrupt(ares, 0, &r))
- spi->irq = r.start;
+ lookup->irq = r.start;
}
/* Always tell the ACPI core to skip this resource */
@@ -1883,7 +1970,9 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
struct acpi_device *adev)
{
+ acpi_handle parent_handle = NULL;
struct list_head resource_list;
+ struct acpi_spi_lookup lookup = {};
struct spi_device *spi;
int ret;
@@ -1891,28 +1980,42 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
acpi_device_enumerated(adev))
return AE_OK;
- spi = spi_alloc_device(ctlr);
- if (!spi) {
- dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
- dev_name(&adev->dev));
- return AE_NO_MEMORY;
- }
-
- ACPI_COMPANION_SET(&spi->dev, adev);
- spi->irq = -1;
+ lookup.ctlr = ctlr;
+ lookup.irq = -1;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
- acpi_spi_add_resource, spi);
+ acpi_spi_add_resource, &lookup);
acpi_dev_free_resource_list(&resource_list);
- acpi_spi_parse_apple_properties(spi);
+ if (ret < 0)
+ /* found SPI in _CRS but it points to another controller */
+ return AE_OK;
- if (ret < 0 || !spi->max_speed_hz) {
- spi_dev_put(spi);
+ if (!lookup.max_speed_hz &&
+ !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
+ ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
+ /* Apple does not use _CRS but nested devices for SPI slaves */
+ acpi_spi_parse_apple_properties(adev, &lookup);
+ }
+
+ if (!lookup.max_speed_hz)
return AE_OK;
+
+ spi = spi_alloc_device(ctlr);
+ if (!spi) {
+ dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
+ dev_name(&adev->dev));
+ return AE_NO_MEMORY;
}
+ ACPI_COMPANION_SET(&spi->dev, adev);
+ spi->max_speed_hz = lookup.max_speed_hz;
+ spi->mode = lookup.mode;
+ spi->irq = lookup.irq;
+ spi->bits_per_word = lookup.bits_per_word;
+ spi->chip_select = lookup.chip_select;
+
acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
sizeof(spi->modalias));
@@ -1944,6 +2047,8 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
return acpi_register_spi_device(ctlr, adev);
}
+#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
+
static void acpi_register_spi_devices(struct spi_controller *ctlr)
{
acpi_status status;
@@ -1953,7 +2058,8 @@ static void acpi_register_spi_devices(struct spi_controller *ctlr)
if (!handle)
return;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ SPI_ACPI_ENUMERATE_MAX_DEPTH,
acpi_spi_add_device, NULL, ctlr, NULL);
if (ACPI_FAILURE(status))
dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
@@ -2286,11 +2392,6 @@ int spi_register_controller(struct spi_controller *ctlr)
if (status)
return status;
- /* even if it's just one always-selected device, there must
- * be at least one chipselect
- */
- if (ctlr->num_chipselect == 0)
- return -EINVAL;
if (ctlr->bus_num >= 0) {
/* devices with a fixed bus num must check-in with the num */
mutex_lock(&board_lock);
@@ -2361,6 +2462,13 @@ int spi_register_controller(struct spi_controller *ctlr)
}
}
+ /*
+ * Even if it's just one always-selected device, there must
+ * be at least one chipselect.
+ */
+ if (!ctlr->num_chipselect)
+ return -EINVAL;
+
status = device_add(&ctlr->dev);
if (status < 0) {
/* free bus id */
@@ -2470,7 +2578,6 @@ void spi_unregister_controller(struct spi_controller *ctlr)
{
struct spi_controller *found;
int id = ctlr->bus_num;
- int dummy;
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
@@ -2484,7 +2591,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
list_del(&ctlr->list);
mutex_unlock(&board_lock);
- dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
+ device_for_each_child(&ctlr->dev, NULL, __unregister);
device_unregister(&ctlr->dev);
/* free bus id */
mutex_lock(&board_lock);
@@ -2633,12 +2740,9 @@ EXPORT_SYMBOL_GPL(spi_res_add);
*/
void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
{
- struct spi_res *res;
-
- while (!list_empty(&message->resources)) {
- res = list_last_entry(&message->resources,
- struct spi_res, entry);
+ struct spi_res *res, *tmp;
+ list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
if (res->release)
res->release(ctlr, message, res->data);
@@ -2702,8 +2806,7 @@ struct spi_replaced_transfers *spi_replace_transfers(
/* allocate the structure using spi_res */
rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
- insert * sizeof(struct spi_transfer)
- + sizeof(struct spi_replaced_transfers)
+ struct_size(rxfer, inserted_transfers, insert)
+ extradatasize,
gfp);
if (!rxfer)
@@ -2987,6 +3090,11 @@ int spi_setup(struct spi_device *spi)
spi_set_cs(spi, false);
+ if (spi->rt && !spi->controller->rt) {
+ spi->controller->rt = true;
+ spi_set_thread_rt(spi->controller);
+ }
+
dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
@@ -3083,6 +3191,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
*/
message->frame_length = 0;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->effective_speed_hz = 0;
message->frame_length += xfer->len;
if (!xfer->bits_per_word)
xfer->bits_per_word = spi->bits_per_word;
@@ -3762,4 +3871,3 @@ err0:
* include needing to have boardinfo data structures be much more public.
*/
postcore_initcall(spi_init);
-
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 422bac8cc3e0..255786f2e844 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -663,6 +663,8 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "ge,achc" },
{ .compatible = "semtech,sx1301" },
{ .compatible = "lwn,bk4" },
+ { .compatible = "dh,dhcom-board" },
+ { .compatible = "menlo,m53cpld" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index e809dae4c470..66a76fd83248 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -460,9 +460,6 @@ int ssb_gpio_init(struct ssb_bus *bus)
return ssb_gpio_chipco_init(bus);
else if (ssb_extif_available(&bus->extif))
return ssb_gpio_extif_init(bus);
- else
- WARN_ON(1);
-
return -1;
}
@@ -472,9 +469,6 @@ int ssb_gpio_unregister(struct ssb_bus *bus)
ssb_extif_available(&bus->extif)) {
gpiochip_remove(&bus->gpio);
return 0;
- } else {
- WARN_ON(1);
}
-
return -1;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index d5f771fafc21..7c96a01eef6c 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -118,4 +118,6 @@ source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/kpc2000/Kconfig"
+source "drivers/staging/isdn/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 0da0d3f0b5e4..fcaac9693b83 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -49,3 +49,4 @@ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_EROFS_FS) += erofs/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
+obj-$(CONFIG_ISDN_CAPI) += isdn/
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 9b07badf4c6c..7cbc1bdd2d8a 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -891,7 +891,9 @@ int fbtft_unregister_framebuffer(struct fb_info *fb_info)
if (par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight(par);
fbtft_sysfs_exit(par);
- return unregister_framebuffer(fb_info);
+ unregister_framebuffer(fb_info);
+
+ return 0;
}
EXPORT_SYMBOL(fbtft_unregister_framebuffer);
diff --git a/drivers/staging/fieldbus/Documentation/fieldbus_dev.txt b/drivers/staging/fieldbus/Documentation/fieldbus_dev.txt
index 56af3f650fa3..89fb8e14676f 100644
--- a/drivers/staging/fieldbus/Documentation/fieldbus_dev.txt
+++ b/drivers/staging/fieldbus/Documentation/fieldbus_dev.txt
@@ -54,8 +54,8 @@ a limited few common behaviours and properties. This allows us to define
a simple interface consisting of a character device and a set of sysfs files:
See:
-Documentation/ABI/testing/sysfs-class-fieldbus-dev
-Documentation/ABI/testing/fieldbus-dev-cdev
+drivers/staging/fieldbus/Documentation/ABI/sysfs-class-fieldbus-dev
+drivers/staging/fieldbus/Documentation/ABI/fieldbus-dev-cdev
Note that this simple interface does not provide a way to modify adapter
configuration settings. It is therefore useful only for adapters that get their
diff --git a/drivers/staging/isdn/Kconfig b/drivers/staging/isdn/Kconfig
new file mode 100644
index 000000000000..faaf63887094
--- /dev/null
+++ b/drivers/staging/isdn/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "ISDN CAPI drivers"
+ depends on ISDN_CAPI
+
+source "drivers/staging/isdn/avm/Kconfig"
+
+source "drivers/staging/isdn/gigaset/Kconfig"
+
+source "drivers/staging/isdn/hysdn/Kconfig"
+
+endmenu
+
diff --git a/drivers/staging/isdn/Makefile b/drivers/staging/isdn/Makefile
new file mode 100644
index 000000000000..025504bae5df
--- /dev/null
+++ b/drivers/staging/isdn/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the kernel ISDN subsystem and device drivers.
+
+# Object files in subdirectories
+
+obj-$(CONFIG_CAPI_AVM) += avm/
+obj-$(CONFIG_HYSDN) += hysdn/
+obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/
diff --git a/drivers/staging/isdn/TODO b/drivers/staging/isdn/TODO
new file mode 100644
index 000000000000..9210d11eb68b
--- /dev/null
+++ b/drivers/staging/isdn/TODO
@@ -0,0 +1,22 @@
+TODO: Remove in late 2019 unless there are users
+
+
+I tried to find any indication of whether the capi drivers are
+still in use, and have not found anything from a long time ago.
+
+With public ISDN networks almost completely shut down over the past 12
+months, there is very little you can actually do with this hardware. The
+main remaining use case would be to connect ISDN voice phones to an
+in-house installation with Asterisk or LCR, but anyone trying this in
+turn seems to be using either the mISDN driver stack, or out-of-tree
+drivers from the hardware vendors.
+
+I may of course have missed something, so I would suggest moving
+these into drivers/staging/ just in case someone still uses one
+of the three remaining in-kernel drivers (avm, hysdn, gigaset).
+
+If nobody complains, we can remove them entirely in six months,
+or otherwise move the core code and any drivers that are still
+needed back into drivers/isdn.
+
+ Arnd Bergmann <arnd@arndb.de>
diff --git a/drivers/isdn/hardware/avm/Kconfig b/drivers/staging/isdn/avm/Kconfig
index 81483db067bb..81483db067bb 100644
--- a/drivers/isdn/hardware/avm/Kconfig
+++ b/drivers/staging/isdn/avm/Kconfig
diff --git a/drivers/isdn/hardware/avm/Makefile b/drivers/staging/isdn/avm/Makefile
index 3830a0573fcc..3830a0573fcc 100644
--- a/drivers/isdn/hardware/avm/Makefile
+++ b/drivers/staging/isdn/avm/Makefile
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/staging/isdn/avm/avm_cs.c
index 62b8030ee331..62b8030ee331 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/staging/isdn/avm/avm_cs.c
diff --git a/drivers/isdn/hardware/avm/avmcard.h b/drivers/staging/isdn/avm/avmcard.h
index cdfa89c71997..cdfa89c71997 100644
--- a/drivers/isdn/hardware/avm/avmcard.h
+++ b/drivers/staging/isdn/avm/avmcard.h
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/staging/isdn/avm/b1.c
index 40ca1e8fa09f..40ca1e8fa09f 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/staging/isdn/avm/b1.c
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/staging/isdn/avm/b1dma.c
index 6a3dc9937ce5..6a3dc9937ce5 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/staging/isdn/avm/b1dma.c
diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/staging/isdn/avm/b1isa.c
index cdfea72e0ef6..cdfea72e0ef6 100644
--- a/drivers/isdn/hardware/avm/b1isa.c
+++ b/drivers/staging/isdn/avm/b1isa.c
diff --git a/drivers/isdn/hardware/avm/b1pci.c b/drivers/staging/isdn/avm/b1pci.c
index b76b57a82c02..b76b57a82c02 100644
--- a/drivers/isdn/hardware/avm/b1pci.c
+++ b/drivers/staging/isdn/avm/b1pci.c
diff --git a/drivers/isdn/hardware/avm/b1pcmcia.c b/drivers/staging/isdn/avm/b1pcmcia.c
index 3aca16e62902..3aca16e62902 100644
--- a/drivers/isdn/hardware/avm/b1pcmcia.c
+++ b/drivers/staging/isdn/avm/b1pcmcia.c
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/staging/isdn/avm/c4.c
index ac72cd204c4d..ac72cd204c4d 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/staging/isdn/avm/c4.c
diff --git a/drivers/isdn/hardware/avm/t1isa.c b/drivers/staging/isdn/avm/t1isa.c
index 2153619c5b31..2153619c5b31 100644
--- a/drivers/isdn/hardware/avm/t1isa.c
+++ b/drivers/staging/isdn/avm/t1isa.c
diff --git a/drivers/isdn/hardware/avm/t1pci.c b/drivers/staging/isdn/avm/t1pci.c
index f5ed1d5004c9..f5ed1d5004c9 100644
--- a/drivers/isdn/hardware/avm/t1pci.c
+++ b/drivers/staging/isdn/avm/t1pci.c
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/staging/isdn/gigaset/Kconfig
index fe41e9cfb672..c593105b3600 100644
--- a/drivers/isdn/gigaset/Kconfig
+++ b/drivers/staging/isdn/gigaset/Kconfig
@@ -30,15 +30,6 @@ config GIGASET_CAPI
Say N to build the old native ISDN4Linux variant.
If unsure, say Y.
-config GIGASET_I4L
- bool
- depends on ISDN_I4L='y'||(ISDN_I4L='m'&&ISDN_DRV_GIGASET='m')
- default !GIGASET_CAPI
-
-config GIGASET_DUMMYLL
- bool
- default !GIGASET_CAPI&&!GIGASET_I4L
-
config GIGASET_BASE
tristate "Gigaset base station support"
depends on USB
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/staging/isdn/gigaset/Makefile
index ac45a2739f56..9c010891dcd7 100644
--- a/drivers/isdn/gigaset/Makefile
+++ b/drivers/staging/isdn/gigaset/Makefile
@@ -1,8 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
gigaset-y := common.o interface.o proc.o ev-layer.o asyncdata.o
-gigaset-$(CONFIG_GIGASET_CAPI) += capi.o
-gigaset-$(CONFIG_GIGASET_I4L) += i4l.o
-gigaset-$(CONFIG_GIGASET_DUMMYLL) += dummyll.o
+
+ifdef CONFIG_GIGASET_CAPI
+gigaset-y += capi.o
+else
+gigaset-y += dummyll.o
+endif
+
usb_gigaset-y := usb-gigaset.o
ser_gigaset-y := ser-gigaset.o
bas_gigaset-y := bas-gigaset.o isocdata.o
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/staging/isdn/gigaset/asyncdata.c
index a34b3c9d8a71..a34b3c9d8a71 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/staging/isdn/gigaset/asyncdata.c
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/staging/isdn/gigaset/bas-gigaset.c
index c334525a5f63..c334525a5f63 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/staging/isdn/gigaset/bas-gigaset.c
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/staging/isdn/gigaset/capi.c
index 83d7dd48c61d..83d7dd48c61d 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/staging/isdn/gigaset/capi.c
diff --git a/drivers/isdn/gigaset/common.c b/drivers/staging/isdn/gigaset/common.c
index 3bb8092858ab..3bb8092858ab 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/staging/isdn/gigaset/common.c
diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/staging/isdn/gigaset/dummyll.c
index 4b9637e5da6e..4b9637e5da6e 100644
--- a/drivers/isdn/gigaset/dummyll.c
+++ b/drivers/staging/isdn/gigaset/dummyll.c
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/staging/isdn/gigaset/ev-layer.c
index f8bb1869c600..f8bb1869c600 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/staging/isdn/gigaset/ev-layer.c
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/staging/isdn/gigaset/gigaset.h
index 0ecc2b5ea553..0ecc2b5ea553 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/staging/isdn/gigaset/gigaset.h
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/staging/isdn/gigaset/interface.c
index 17fa615a8c68..17fa615a8c68 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/staging/isdn/gigaset/interface.c
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/staging/isdn/gigaset/isocdata.c
index 3ecf6e33ed15..3ecf6e33ed15 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/staging/isdn/gigaset/isocdata.c
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/staging/isdn/gigaset/proc.c
index 8914439a4237..8914439a4237 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/staging/isdn/gigaset/proc.c
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/staging/isdn/gigaset/ser-gigaset.c
index 5587e9e7fc73..5587e9e7fc73 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/staging/isdn/gigaset/ser-gigaset.c
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/staging/isdn/gigaset/usb-gigaset.c
index 1b9b43659bdf..1b9b43659bdf 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/staging/isdn/gigaset/usb-gigaset.c
diff --git a/drivers/isdn/hysdn/Kconfig b/drivers/staging/isdn/hysdn/Kconfig
index 1971ef850c9a..1971ef850c9a 100644
--- a/drivers/isdn/hysdn/Kconfig
+++ b/drivers/staging/isdn/hysdn/Kconfig
diff --git a/drivers/isdn/hysdn/Makefile b/drivers/staging/isdn/hysdn/Makefile
index e01f17f22ebb..e01f17f22ebb 100644
--- a/drivers/isdn/hysdn/Makefile
+++ b/drivers/staging/isdn/hysdn/Makefile
diff --git a/drivers/isdn/hysdn/boardergo.c b/drivers/staging/isdn/hysdn/boardergo.c
index 2aa2a0e08247..2aa2a0e08247 100644
--- a/drivers/isdn/hysdn/boardergo.c
+++ b/drivers/staging/isdn/hysdn/boardergo.c
diff --git a/drivers/isdn/hysdn/boardergo.h b/drivers/staging/isdn/hysdn/boardergo.h
index e99bd81c4034..e99bd81c4034 100644
--- a/drivers/isdn/hysdn/boardergo.h
+++ b/drivers/staging/isdn/hysdn/boardergo.h
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/staging/isdn/hysdn/hycapi.c
index a2c15cd7bf67..a2c15cd7bf67 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/staging/isdn/hysdn/hycapi.c
diff --git a/drivers/isdn/hysdn/hysdn_boot.c b/drivers/staging/isdn/hysdn/hysdn_boot.c
index ba177c3a621b..ba177c3a621b 100644
--- a/drivers/isdn/hysdn/hysdn_boot.c
+++ b/drivers/staging/isdn/hysdn/hysdn_boot.c
diff --git a/drivers/isdn/hysdn/hysdn_defs.h b/drivers/staging/isdn/hysdn/hysdn_defs.h
index cdac46a21692..cdac46a21692 100644
--- a/drivers/isdn/hysdn/hysdn_defs.h
+++ b/drivers/staging/isdn/hysdn/hysdn_defs.h
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/staging/isdn/hysdn/hysdn_init.c
index 0db2f7506250..0db2f7506250 100644
--- a/drivers/isdn/hysdn/hysdn_init.c
+++ b/drivers/staging/isdn/hysdn/hysdn_init.c
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/staging/isdn/hysdn/hysdn_net.c
index 8e9c34f33d86..bea37ae30ebb 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/staging/isdn/hysdn/hysdn_net.c
@@ -70,9 +70,13 @@ net_open(struct net_device *dev)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = 0xfc;
if ((in_dev = dev->ip_ptr) != NULL) {
- struct in_ifaddr *ifa = in_dev->ifa_list;
+ const struct in_ifaddr *ifa;
+
+ rcu_read_lock();
+ ifa = rcu_dereference(in_dev->ifa_list);
if (ifa != NULL)
memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
+ rcu_read_unlock();
}
} else
memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);
diff --git a/drivers/isdn/hysdn/hysdn_pof.h b/drivers/staging/isdn/hysdn/hysdn_pof.h
index f63f5fa59d7e..f63f5fa59d7e 100644
--- a/drivers/isdn/hysdn/hysdn_pof.h
+++ b/drivers/staging/isdn/hysdn/hysdn_pof.h
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/staging/isdn/hysdn/hysdn_procconf.c
index 73079213ec94..73079213ec94 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/staging/isdn/hysdn/hysdn_procconf.c
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/staging/isdn/hysdn/hysdn_proclog.c
index 6e898b90e86e..6e898b90e86e 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/staging/isdn/hysdn/hysdn_proclog.c
diff --git a/drivers/isdn/hysdn/hysdn_sched.c b/drivers/staging/isdn/hysdn/hysdn_sched.c
index 31d7c1415543..31d7c1415543 100644
--- a/drivers/isdn/hysdn/hysdn_sched.c
+++ b/drivers/staging/isdn/hysdn/hysdn_sched.c
diff --git a/drivers/isdn/hysdn/ince1pc.h b/drivers/staging/isdn/hysdn/ince1pc.h
index cab68361de65..cab68361de65 100644
--- a/drivers/isdn/hysdn/ince1pc.h
+++ b/drivers/staging/isdn/hysdn/ince1pc.h
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index f77f5eee7fc2..534d85d6c5e3 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -20,15 +20,19 @@ menuconfig STAGING_MEDIA
if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
+source "drivers/staging/media/allegro-dvt/Kconfig"
+
source "drivers/staging/media/bcm2048/Kconfig"
source "drivers/staging/media/davinci_vpfe/Kconfig"
+source "drivers/staging/media/hantro/Kconfig"
+
source "drivers/staging/media/imx/Kconfig"
-source "drivers/staging/media/omap4iss/Kconfig"
+source "drivers/staging/media/meson/vdec/Kconfig"
-source "drivers/staging/media/rockchip/vpu/Kconfig"
+source "drivers/staging/media/omap4iss/Kconfig"
source "drivers/staging/media/sunxi/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 99218bfc997f..c486298194da 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,10 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro-dvt/
obj-$(CONFIG_I2C_BCM2048) += bcm2048/
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
+obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
-obj-$(CONFIG_VIDEO_ROCKCHIP_VPU) += rockchip/vpu/
+obj-$(CONFIG_VIDEO_HANTRO) += hantro/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
obj-$(CONFIG_SOC_CAMERA) += soc_camera/
diff --git a/drivers/staging/media/allegro-dvt/Kconfig b/drivers/staging/media/allegro-dvt/Kconfig
new file mode 100644
index 000000000000..6b7107d9995c
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_ALLEGRO_DVT
+ tristate "Allegro DVT Video IP Core"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ Support for the encoder video IP core by Allegro DVT. This core is
+ found for example on the Xilinx ZynqMP SoC in the EV family and is
+ called VCU in the reference manual.
+
+ To compile this driver as a module, choose M here: the module
+ will be called allegro.
diff --git a/drivers/staging/media/allegro-dvt/Makefile b/drivers/staging/media/allegro-dvt/Makefile
new file mode 100644
index 000000000000..80817160815c
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+allegro-objs := allegro-core.o nal-h264.o
+
+obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro.o
diff --git a/drivers/staging/media/allegro-dvt/TODO b/drivers/staging/media/allegro-dvt/TODO
new file mode 100644
index 000000000000..99e19be0e45a
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/TODO
@@ -0,0 +1,4 @@
+TODO:
+
+- This driver is waiting for the stateful encoder spec and corresponding
+ v4l2-compliance tests to be finalized.
diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c
new file mode 100644
index 000000000000..f050c7347fd5
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/allegro-core.c
@@ -0,0 +1,3014 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * Allegro DVT video encoder driver
+ */
+
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "nal-h264.h"
+
+/*
+ * Support up to 4k video streams. The hardware actually supports higher
+ * resolutions, which are specified in PG252 June 6, 2018 (H.264/H.265 Video
+ * Codec Unit v1.1) Chapter 3.
+ */
+#define ALLEGRO_WIDTH_MIN 128
+#define ALLEGRO_WIDTH_DEFAULT 1920
+#define ALLEGRO_WIDTH_MAX 3840
+#define ALLEGRO_HEIGHT_MIN 64
+#define ALLEGRO_HEIGHT_DEFAULT 1080
+#define ALLEGRO_HEIGHT_MAX 2160
+
+#define ALLEGRO_GOP_SIZE_DEFAULT 25
+#define ALLEGRO_GOP_SIZE_MAX 1000
+
+/*
+ * MCU Control Registers
+ *
+ * The Zynq UltraScale+ Devices Register Reference documents the registers
+ * with an offset of 0x9000, which equals the size of the SRAM and one page
+ * gap. The driver handles SRAM and registers separately and, therefore, is
+ * oblivious of the offset.
+ */
+#define AL5_MCU_RESET 0x0000
+#define AL5_MCU_RESET_SOFT BIT(0)
+#define AL5_MCU_RESET_REGS BIT(1)
+#define AL5_MCU_RESET_MODE 0x0004
+#define AL5_MCU_RESET_MODE_SLEEP BIT(0)
+#define AL5_MCU_RESET_MODE_HALT BIT(1)
+#define AL5_MCU_STA 0x0008
+#define AL5_MCU_STA_SLEEP BIT(0)
+#define AL5_MCU_WAKEUP 0x000c
+
+#define AL5_ICACHE_ADDR_OFFSET_MSB 0x0010
+#define AL5_ICACHE_ADDR_OFFSET_LSB 0x0014
+#define AL5_DCACHE_ADDR_OFFSET_MSB 0x0018
+#define AL5_DCACHE_ADDR_OFFSET_LSB 0x001c
+
+#define AL5_MCU_INTERRUPT 0x0100
+#define AL5_ITC_CPU_IRQ_MSK 0x0104
+#define AL5_ITC_CPU_IRQ_CLR 0x0108
+#define AL5_ITC_CPU_IRQ_STA 0x010C
+#define AL5_ITC_CPU_IRQ_STA_TRIGGERED BIT(0)
+
+#define AXI_ADDR_OFFSET_IP 0x0208
+
+/*
+ * The MCU accesses the system memory with a 2G offset compared to CPU
+ * physical addresses.
+ */
+#define MCU_CACHE_OFFSET SZ_2G
+
+/*
+ * The driver needs to reserve some space at the beginning of capture buffers,
+ * because it needs to write SPS/PPS NAL units. The encoder writes the actual
+ * frame data after the offset.
+ */
+#define ENCODER_STREAM_OFFSET SZ_64
+
+#define SIZE_MACROBLOCK 16
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+struct allegro_buffer {
+ void *vaddr;
+ dma_addr_t paddr;
+ size_t size;
+ struct list_head head;
+};
+
+struct allegro_channel;
+
+struct allegro_mbox {
+ unsigned int head;
+ unsigned int tail;
+ unsigned int data;
+ size_t size;
+ /* protect mailbox from simultaneous accesses */
+ struct mutex lock;
+};
+
+struct allegro_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device video_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct platform_device *plat_dev;
+
+ /* mutex protecting vb2_queue structure */
+ struct mutex lock;
+
+ struct regmap *regmap;
+ struct regmap *sram;
+
+ struct allegro_buffer firmware;
+ struct allegro_buffer suballocator;
+
+ struct completion init_complete;
+
+ /* The mailbox interface */
+ struct allegro_mbox mbox_command;
+ struct allegro_mbox mbox_status;
+
+ /*
+ * The downstream driver limits the users to 64 users, thus I can use
+ * a bitfield for the user_ids that are in use. See also user_id in
+ * struct allegro_channel.
+ */
+ unsigned long channel_user_ids;
+ struct list_head channels;
+};
+
+static struct regmap_config allegro_regmap_config = {
+ .name = "regmap",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0xfff,
+ .cache_type = REGCACHE_NONE,
+};
+
+static struct regmap_config allegro_sram_config = {
+ .name = "sram",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x7fff,
+ .cache_type = REGCACHE_NONE,
+};
+
+enum allegro_state {
+ ALLEGRO_STATE_ENCODING,
+ ALLEGRO_STATE_DRAIN,
+ ALLEGRO_STATE_WAIT_FOR_BUFFER,
+ ALLEGRO_STATE_STOPPED,
+};
+
+#define fh_to_channel(__fh) container_of(__fh, struct allegro_channel, fh)
+
+struct allegro_channel {
+ struct allegro_dev *dev;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ enum v4l2_xfer_func xfer_func;
+
+ u32 pixelformat;
+ unsigned int sizeimage_raw;
+ unsigned int osequence;
+
+ u32 codec;
+ enum v4l2_mpeg_video_h264_profile profile;
+ enum v4l2_mpeg_video_h264_level level;
+ unsigned int sizeimage_encoded;
+ unsigned int csequence;
+
+ enum v4l2_mpeg_video_bitrate_mode bitrate_mode;
+ unsigned int bitrate;
+ unsigned int bitrate_peak;
+ unsigned int cpb_size;
+ unsigned int gop_size;
+
+ struct v4l2_ctrl *mpeg_video_h264_profile;
+ struct v4l2_ctrl *mpeg_video_h264_level;
+ struct v4l2_ctrl *mpeg_video_bitrate_mode;
+ struct v4l2_ctrl *mpeg_video_bitrate;
+ struct v4l2_ctrl *mpeg_video_bitrate_peak;
+ struct v4l2_ctrl *mpeg_video_cpb_size;
+ struct v4l2_ctrl *mpeg_video_gop_size;
+
+ /* user_id is used to identify the channel during CREATE_CHANNEL */
+ /* not sure, what to set here and if this is actually required */
+ int user_id;
+ /* channel_id is set by the mcu and used by all later commands */
+ int mcu_channel_id;
+
+ struct list_head buffers_reference;
+ struct list_head buffers_intermediate;
+
+ struct list_head list;
+ struct completion completion;
+
+ unsigned int error;
+ enum allegro_state state;
+};
+
+static inline int
+allegro_set_state(struct allegro_channel *channel, enum allegro_state state)
+{
+ channel->state = state;
+
+ return 0;
+}
+
+static inline enum allegro_state
+allegro_get_state(struct allegro_channel *channel)
+{
+ return channel->state;
+}
+
+struct fw_info {
+ unsigned int id;
+ unsigned int id_codec;
+ char *version;
+ unsigned int mailbox_cmd;
+ unsigned int mailbox_status;
+ size_t mailbox_size;
+ size_t suballocator_size;
+};
+
+static const struct fw_info supported_firmware[] = {
+ {
+ .id = 18296,
+ .id_codec = 96272,
+ .version = "v2018.2",
+ .mailbox_cmd = 0x7800,
+ .mailbox_status = 0x7c00,
+ .mailbox_size = 0x400 - 0x8,
+ .suballocator_size = SZ_16M,
+ },
+};
+
+enum mcu_msg_type {
+ MCU_MSG_TYPE_INIT = 0x0000,
+ MCU_MSG_TYPE_CREATE_CHANNEL = 0x0005,
+ MCU_MSG_TYPE_DESTROY_CHANNEL = 0x0006,
+ MCU_MSG_TYPE_ENCODE_FRAME = 0x0007,
+ MCU_MSG_TYPE_PUT_STREAM_BUFFER = 0x0012,
+ MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE = 0x000e,
+ MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE = 0x000f,
+};
+
+static const char *msg_type_name(enum mcu_msg_type type)
+{
+ static char buf[9];
+
+ switch (type) {
+ case MCU_MSG_TYPE_INIT:
+ return "INIT";
+ case MCU_MSG_TYPE_CREATE_CHANNEL:
+ return "CREATE_CHANNEL";
+ case MCU_MSG_TYPE_DESTROY_CHANNEL:
+ return "DESTROY_CHANNEL";
+ case MCU_MSG_TYPE_ENCODE_FRAME:
+ return "ENCODE_FRAME";
+ case MCU_MSG_TYPE_PUT_STREAM_BUFFER:
+ return "PUT_STREAM_BUFFER";
+ case MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE:
+ return "PUSH_BUFFER_INTERMEDIATE";
+ case MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE:
+ return "PUSH_BUFFER_REFERENCE";
+ default:
+ snprintf(buf, sizeof(buf), "(0x%04x)", type);
+ return buf;
+ }
+}
+
+struct mcu_msg_header {
+ u16 length; /* length of the body in bytes */
+ u16 type;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_init_request {
+ struct mcu_msg_header header;
+ u32 reserved0; /* maybe a unused channel id */
+ u32 suballoc_dma;
+ u32 suballoc_size;
+ s32 l2_cache[3];
+} __attribute__ ((__packed__));
+
+struct mcu_msg_init_response {
+ struct mcu_msg_header header;
+ u32 reserved0;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_create_channel {
+ struct mcu_msg_header header;
+ u32 user_id;
+ u16 width;
+ u16 height;
+ u32 format;
+ u32 colorspace;
+ u32 src_mode;
+ u8 profile;
+ u16 constraint_set_flags;
+ s8 codec;
+ u16 level;
+ u16 tier;
+ u32 sps_param;
+ u32 pps_param;
+
+ u32 enc_option;
+#define AL_OPT_WPP BIT(0)
+#define AL_OPT_TILE BIT(1)
+#define AL_OPT_LF BIT(2)
+#define AL_OPT_LF_X_SLICE BIT(3)
+#define AL_OPT_LF_X_TILE BIT(4)
+#define AL_OPT_SCL_LST BIT(5)
+#define AL_OPT_CONST_INTRA_PRED BIT(6)
+#define AL_OPT_QP_TAB_RELATIVE BIT(7)
+#define AL_OPT_FIX_PREDICTOR BIT(8)
+#define AL_OPT_CUSTOM_LDA BIT(9)
+#define AL_OPT_ENABLE_AUTO_QP BIT(10)
+#define AL_OPT_ADAPT_AUTO_QP BIT(11)
+#define AL_OPT_TRANSFO_SKIP BIT(13)
+#define AL_OPT_FORCE_REC BIT(15)
+#define AL_OPT_FORCE_MV_OUT BIT(16)
+#define AL_OPT_FORCE_MV_CLIP BIT(17)
+#define AL_OPT_LOWLAT_SYNC BIT(18)
+#define AL_OPT_LOWLAT_INT BIT(19)
+#define AL_OPT_RDO_COST_MODE BIT(20)
+
+ s8 beta_offset;
+ s8 tc_offset;
+ u16 reserved10;
+ u32 unknown11;
+ u32 unknown12;
+ u16 num_slices;
+ u16 prefetch_auto;
+ u32 prefetch_mem_offset;
+ u32 prefetch_mem_size;
+ u16 clip_hrz_range;
+ u16 clip_vrt_range;
+ u16 me_range[4];
+ u8 max_cu_size;
+ u8 min_cu_size;
+ u8 max_tu_size;
+ u8 min_tu_size;
+ u8 max_transfo_depth_inter;
+ u8 max_transfo_depth_intra;
+ u16 reserved20;
+ u32 entropy_mode;
+ u32 wp_mode;
+
+ /* rate control param */
+ u32 rate_control_mode;
+ u32 initial_rem_delay;
+ u32 cpb_size;
+ u16 framerate;
+ u16 clk_ratio;
+ u32 target_bitrate;
+ u32 max_bitrate;
+ u16 initial_qp;
+ u16 min_qp;
+ u16 max_qp;
+ s16 ip_delta;
+ s16 pb_delta;
+ u16 golden_ref;
+ u16 golden_delta;
+ u16 golden_ref_frequency;
+ u32 rate_control_option;
+
+ /* gop param */
+ u32 gop_ctrl_mode;
+ u32 freq_ird;
+ u32 freq_lt;
+ u32 gdr_mode;
+ u32 gop_length;
+ u32 unknown39;
+
+ u32 subframe_latency;
+ u32 lda_control_mode;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_create_channel_response {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u32 user_id;
+ u32 options;
+ u32 num_core;
+ u32 pps_param;
+ u32 int_buffers_count;
+ u32 int_buffers_size;
+ u32 rec_buffers_count;
+ u32 rec_buffers_size;
+ u32 reserved;
+ u32 error_code;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_destroy_channel {
+ struct mcu_msg_header header;
+ u32 channel_id;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_destroy_channel_response {
+ struct mcu_msg_header header;
+ u32 channel_id;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_push_buffers_internal_buffer {
+ u32 dma_addr;
+ u32 mcu_addr;
+ u32 size;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_push_buffers_internal {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ struct mcu_msg_push_buffers_internal_buffer buffer[0];
+} __attribute__ ((__packed__));
+
+struct mcu_msg_put_stream_buffer {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u32 dma_addr;
+ u32 mcu_addr;
+ u32 size;
+ u32 offset;
+ u64 stream_id;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_encode_frame {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u32 reserved;
+
+ u32 encoding_options;
+#define AL_OPT_USE_QP_TABLE BIT(0)
+#define AL_OPT_FORCE_LOAD BIT(1)
+#define AL_OPT_USE_L2 BIT(2)
+#define AL_OPT_DISABLE_INTRA BIT(3)
+#define AL_OPT_DEPENDENT_SLICES BIT(4)
+
+ s16 pps_qp;
+ u16 padding;
+ u64 user_param;
+ u64 src_handle;
+
+ u32 request_options;
+#define AL_OPT_SCENE_CHANGE BIT(0)
+#define AL_OPT_RESTART_GOP BIT(1)
+#define AL_OPT_USE_LONG_TERM BIT(2)
+#define AL_OPT_UPDATE_PARAMS BIT(3)
+
+ /* u32 scene_change_delay (optional) */
+ /* rate control param (optional) */
+ /* gop param (optional) */
+ u32 src_y;
+ u32 src_uv;
+ u32 stride;
+ u32 ep2;
+ u64 ep2_v;
+} __attribute__ ((__packed__));
+
+struct mcu_msg_encode_frame_response {
+ struct mcu_msg_header header;
+ u32 channel_id;
+ u64 stream_id; /* see mcu_msg_put_stream_buffer */
+ u64 user_param; /* see mcu_msg_encode_frame */
+ u64 src_handle; /* see mcu_msg_encode_frame */
+ u16 skip;
+ u16 is_ref;
+ u32 initial_removal_delay;
+ u32 dpb_output_delay;
+ u32 size;
+ u32 frame_tag_size;
+ s32 stuffing;
+ s32 filler;
+ u16 num_column;
+ u16 num_row;
+ u16 qp;
+ u8 num_ref_idx_l0;
+ u8 num_ref_idx_l1;
+ u32 partition_table_offset;
+ s32 partition_table_size;
+ u32 sum_complex;
+ s32 tile_width[4];
+ s32 tile_height[22];
+ u32 error_code;
+
+ u32 slice_type;
+#define AL_ENC_SLICE_TYPE_B 0
+#define AL_ENC_SLICE_TYPE_P 1
+#define AL_ENC_SLICE_TYPE_I 2
+
+ u32 pic_struct;
+ u8 is_idr;
+ u8 is_first_slice;
+ u8 is_last_slice;
+ u8 reserved;
+ u16 pps_qp;
+ u16 reserved1;
+ u32 reserved2;
+} __attribute__ ((__packed__));
+
+union mcu_msg_response {
+ struct mcu_msg_header header;
+ struct mcu_msg_init_response init;
+ struct mcu_msg_create_channel_response create_channel;
+ struct mcu_msg_destroy_channel_response destroy_channel;
+ struct mcu_msg_encode_frame_response encode_frame;
+};
+
+/* Helper functions for channel and user operations */
+
+static unsigned long allegro_next_user_id(struct allegro_dev *dev)
+{
+ if (dev->channel_user_ids == ~0UL)
+ return -EBUSY;
+
+ return ffz(dev->channel_user_ids);
+}
+
+static struct allegro_channel *
+allegro_find_channel_by_user_id(struct allegro_dev *dev,
+ unsigned int user_id)
+{
+ struct allegro_channel *channel;
+
+ list_for_each_entry(channel, &dev->channels, list) {
+ if (channel->user_id == user_id)
+ return channel;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct allegro_channel *
+allegro_find_channel_by_channel_id(struct allegro_dev *dev,
+ unsigned int channel_id)
+{
+ struct allegro_channel *channel;
+
+ list_for_each_entry(channel, &dev->channels, list) {
+ if (channel->mcu_channel_id == channel_id)
+ return channel;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static inline bool channel_exists(struct allegro_channel *channel)
+{
+ return channel->mcu_channel_id != -1;
+}
+
+static unsigned int estimate_stream_size(unsigned int width,
+ unsigned int height)
+{
+ unsigned int offset = ENCODER_STREAM_OFFSET;
+ unsigned int num_blocks = DIV_ROUND_UP(width, SIZE_MACROBLOCK) *
+ DIV_ROUND_UP(height, SIZE_MACROBLOCK);
+ unsigned int pcm_size = SZ_256;
+ unsigned int partition_table = SZ_256;
+
+ return round_up(offset + num_blocks * pcm_size + partition_table, 32);
+}
+
+static enum v4l2_mpeg_video_h264_level
+select_minimum_h264_level(unsigned int width, unsigned int height)
+{
+ unsigned int pic_width_in_mb = DIV_ROUND_UP(width, SIZE_MACROBLOCK);
+ unsigned int frame_height_in_mb = DIV_ROUND_UP(height, SIZE_MACROBLOCK);
+ unsigned int frame_size_in_mb = pic_width_in_mb * frame_height_in_mb;
+ enum v4l2_mpeg_video_h264_level level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+
+ /*
+ * The level limits are specified in Rec. ITU-T H.264 Annex A.3.1 and
+ * also specify limits regarding bit rate and CBP size. Only approximate
+ * the levels using the frame size.
+ *
+ * Level 5.1 allows up to 4k video resolution.
+ */
+ if (frame_size_in_mb <= 99)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
+ else if (frame_size_in_mb <= 396)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_1_1;
+ else if (frame_size_in_mb <= 792)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_2_1;
+ else if (frame_size_in_mb <= 1620)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_2_2;
+ else if (frame_size_in_mb <= 3600)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_3_1;
+ else if (frame_size_in_mb <= 5120)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
+ else if (frame_size_in_mb <= 8192)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+ else if (frame_size_in_mb <= 8704)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
+ else if (frame_size_in_mb <= 22080)
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
+ else
+ level = V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
+
+ return level;
+}
+
+static unsigned int maximum_bitrate(enum v4l2_mpeg_video_h264_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 64000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return 128000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 192000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 384000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 768000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 2000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 4000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 4000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 10000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 14000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 20000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 20000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 50000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 50000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 135000000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ default:
+ return 240000000;
+ }
+}
+
+static unsigned int maximum_cpb_size(enum v4l2_mpeg_video_h264_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 175;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return 350;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 500;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 1000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 2000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 2000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 4000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 4000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 10000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 14000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 20000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 25000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 62500;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 62500;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 135000;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ default:
+ return 240000;
+ }
+}
+
+static const struct fw_info *
+allegro_get_firmware_info(struct allegro_dev *dev,
+ const struct firmware *fw,
+ const struct firmware *fw_codec)
+{
+ int i;
+ unsigned int id = fw->size;
+ unsigned int id_codec = fw_codec->size;
+
+ for (i = 0; i < ARRAY_SIZE(supported_firmware); i++)
+ if (supported_firmware[i].id == id &&
+ supported_firmware[i].id_codec == id_codec)
+ return &supported_firmware[i];
+
+ return NULL;
+}
+
+/*
+ * Buffers that are used internally by the MCU.
+ */
+
+static int allegro_alloc_buffer(struct allegro_dev *dev,
+ struct allegro_buffer *buffer, size_t size)
+{
+ buffer->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size,
+ &buffer->paddr, GFP_KERNEL);
+ if (!buffer->vaddr)
+ return -ENOMEM;
+ buffer->size = size;
+
+ return 0;
+}
+
+static void allegro_free_buffer(struct allegro_dev *dev,
+ struct allegro_buffer *buffer)
+{
+ if (buffer->vaddr) {
+ dma_free_coherent(&dev->plat_dev->dev, buffer->size,
+ buffer->vaddr, buffer->paddr);
+ buffer->vaddr = NULL;
+ buffer->size = 0;
+ }
+}
+
+/*
+ * Mailbox interface to send messages to the MCU.
+ */
+
+static int allegro_mbox_init(struct allegro_dev *dev,
+ struct allegro_mbox *mbox,
+ unsigned int base, size_t size)
+{
+ if (!mbox)
+ return -EINVAL;
+
+ mbox->head = base;
+ mbox->tail = base + 0x4;
+ mbox->data = base + 0x8;
+ mbox->size = size;
+ mutex_init(&mbox->lock);
+
+ regmap_write(dev->sram, mbox->head, 0);
+ regmap_write(dev->sram, mbox->tail, 0);
+
+ return 0;
+}
+
+static int allegro_mbox_write(struct allegro_dev *dev,
+ struct allegro_mbox *mbox, void *src, size_t size)
+{
+ struct mcu_msg_header *header = src;
+ unsigned int tail;
+ size_t size_no_wrap;
+ int err = 0;
+
+ if (!src)
+ return -EINVAL;
+
+ if (size > mbox->size) {
+ v4l2_err(&dev->v4l2_dev,
+ "message (%zu bytes) to large for mailbox (%zu bytes)\n",
+ size, mbox->size);
+ return -EINVAL;
+ }
+
+ if (header->length != size - sizeof(*header)) {
+ v4l2_err(&dev->v4l2_dev,
+ "invalid message length: %u bytes (expected %zu bytes)\n",
+ header->length, size - sizeof(*header));
+ return -EINVAL;
+ }
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "write command message: type %s, body length %d\n",
+ msg_type_name(header->type), header->length);
+
+ mutex_lock(&mbox->lock);
+ regmap_read(dev->sram, mbox->tail, &tail);
+ if (tail > mbox->size) {
+ v4l2_err(&dev->v4l2_dev,
+ "invalid tail (0x%x): must be smaller than mailbox size (0x%zx)\n",
+ tail, mbox->size);
+ err = -EIO;
+ goto out;
+ }
+ size_no_wrap = min(size, mbox->size - (size_t)tail);
+ regmap_bulk_write(dev->sram, mbox->data + tail, src, size_no_wrap / 4);
+ regmap_bulk_write(dev->sram, mbox->data,
+ src + size_no_wrap, (size - size_no_wrap) / 4);
+ regmap_write(dev->sram, mbox->tail, (tail + size) % mbox->size);
+
+out:
+ mutex_unlock(&mbox->lock);
+
+ return err;
+}
+
+static ssize_t allegro_mbox_read(struct allegro_dev *dev,
+ struct allegro_mbox *mbox,
+ void *dst, size_t nbyte)
+{
+ struct mcu_msg_header *header;
+ unsigned int head;
+ ssize_t size;
+ size_t body_no_wrap;
+
+ regmap_read(dev->sram, mbox->head, &head);
+ if (head > mbox->size) {
+ v4l2_err(&dev->v4l2_dev,
+ "invalid head (0x%x): must be smaller than mailbox size (0x%zx)\n",
+ head, mbox->size);
+ return -EIO;
+ }
+
+ /* Assume that the header does not wrap. */
+ regmap_bulk_read(dev->sram, mbox->data + head,
+ dst, sizeof(*header) / 4);
+ header = dst;
+ size = header->length + sizeof(*header);
+ if (size > mbox->size || size & 0x3) {
+ v4l2_err(&dev->v4l2_dev,
+ "invalid message length: %zu bytes (maximum %zu bytes)\n",
+ header->length + sizeof(*header), mbox->size);
+ return -EIO;
+ }
+ if (size > nbyte) {
+ v4l2_err(&dev->v4l2_dev,
+ "destination buffer too small: %zu bytes (need %zu bytes)\n",
+ nbyte, size);
+ return -EINVAL;
+ }
+
+ /*
+ * The message might wrap within the mailbox. If the message does not
+ * wrap, the first read will read the entire message, otherwise the
+ * first read will read message until the end of the mailbox and the
+ * second read will read the remaining bytes from the beginning of the
+ * mailbox.
+ *
+ * Skip the header, as was already read to get the size of the body.
+ */
+ body_no_wrap = min((size_t)header->length,
+ (size_t)(mbox->size - (head + sizeof(*header))));
+ regmap_bulk_read(dev->sram, mbox->data + head + sizeof(*header),
+ dst + sizeof(*header), body_no_wrap / 4);
+ regmap_bulk_read(dev->sram, mbox->data,
+ dst + sizeof(*header) + body_no_wrap,
+ (header->length - body_no_wrap) / 4);
+
+ regmap_write(dev->sram, mbox->head, (head + size) % mbox->size);
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "read status message: type %s, body length %d\n",
+ msg_type_name(header->type), header->length);
+
+ return size;
+}
+
+static void allegro_mcu_interrupt(struct allegro_dev *dev)
+{
+ regmap_write(dev->regmap, AL5_MCU_INTERRUPT, BIT(0));
+}
+
+static void allegro_mcu_send_init(struct allegro_dev *dev,
+ dma_addr_t suballoc_dma, size_t suballoc_size)
+{
+ struct mcu_msg_init_request msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.header.type = MCU_MSG_TYPE_INIT;
+ msg.header.length = sizeof(msg) - sizeof(msg.header);
+
+ msg.suballoc_dma = lower_32_bits(suballoc_dma) | MCU_CACHE_OFFSET;
+ msg.suballoc_size = suballoc_size;
+
+ /* disable L2 cache */
+ msg.l2_cache[0] = -1;
+ msg.l2_cache[1] = -1;
+ msg.l2_cache[2] = -1;
+
+ allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
+ allegro_mcu_interrupt(dev);
+}
+
+static u32 v4l2_pixelformat_to_mcu_format(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ /* AL_420_8BITS: 0x100 -> NV12, 0x88 -> 8 bit */
+ return 0x100 | 0x88;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 v4l2_colorspace_to_mcu_colorspace(enum v4l2_colorspace colorspace)
+{
+ switch (colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ return 2;
+ case V4L2_COLORSPACE_SMPTE170M:
+ return 3;
+ case V4L2_COLORSPACE_SMPTE240M:
+ return 4;
+ case V4L2_COLORSPACE_SRGB:
+ return 7;
+ default:
+ /* UNKNOWN */
+ return 0;
+ }
+}
+
+static s8 v4l2_pixelformat_to_mcu_codec(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_H264:
+ default:
+ return 1;
+ }
+}
+
+static u8 v4l2_profile_to_mcu_profile(enum v4l2_mpeg_video_h264_profile profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ default:
+ return 66;
+ }
+}
+
+static u16 v4l2_level_to_mcu_level(enum v4l2_mpeg_video_h264_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 10;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 20;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 30;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 40;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 42;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 50;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ default:
+ return 51;
+ }
+}
+
+static u32
+v4l2_bitrate_mode_to_mcu_mode(enum v4l2_mpeg_video_bitrate_mode mode)
+{
+ switch (mode) {
+ case V4L2_MPEG_VIDEO_BITRATE_MODE_VBR:
+ return 2;
+ case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR:
+ default:
+ return 1;
+ }
+}
+
+static int allegro_mcu_send_create_channel(struct allegro_dev *dev,
+ struct allegro_channel *channel)
+{
+ struct mcu_msg_create_channel msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.header.type = MCU_MSG_TYPE_CREATE_CHANNEL;
+ msg.header.length = sizeof(msg) - sizeof(msg.header);
+
+ msg.user_id = channel->user_id;
+ msg.width = channel->width;
+ msg.height = channel->height;
+ msg.format = v4l2_pixelformat_to_mcu_format(channel->pixelformat);
+ msg.colorspace = v4l2_colorspace_to_mcu_colorspace(channel->colorspace);
+ msg.src_mode = 0x0;
+ msg.profile = v4l2_profile_to_mcu_profile(channel->profile);
+ msg.constraint_set_flags = BIT(1);
+ msg.codec = v4l2_pixelformat_to_mcu_codec(channel->codec);
+ msg.level = v4l2_level_to_mcu_level(channel->level);
+ msg.tier = 0;
+ msg.sps_param = BIT(20) | 0x4a;
+ msg.pps_param = BIT(2);
+ msg.enc_option = AL_OPT_RDO_COST_MODE | AL_OPT_LF_X_TILE |
+ AL_OPT_LF_X_SLICE | AL_OPT_LF;
+ msg.beta_offset = -1;
+ msg.tc_offset = -1;
+ msg.num_slices = 1;
+ msg.me_range[0] = 8;
+ msg.me_range[1] = 8;
+ msg.me_range[2] = 16;
+ msg.me_range[3] = 16;
+ msg.max_cu_size = ilog2(SIZE_MACROBLOCK);
+ msg.min_cu_size = ilog2(8);
+ msg.max_tu_size = 2;
+ msg.min_tu_size = 2;
+ msg.max_transfo_depth_intra = 1;
+ msg.max_transfo_depth_inter = 1;
+
+ msg.rate_control_mode =
+ v4l2_bitrate_mode_to_mcu_mode(channel->bitrate_mode);
+ /* Shall be ]0;cpb_size in 90 kHz units]. Use maximum value. */
+ msg.initial_rem_delay =
+ ((channel->cpb_size * 1000) / channel->bitrate_peak) * 90000;
+ /* Encoder expects cpb_size in units of a 90 kHz clock. */
+ msg.cpb_size =
+ ((channel->cpb_size * 1000) / channel->bitrate_peak) * 90000;
+ msg.framerate = 25;
+ msg.clk_ratio = 1000;
+ msg.target_bitrate = channel->bitrate;
+ msg.max_bitrate = channel->bitrate_peak;
+ msg.initial_qp = 25;
+ msg.min_qp = 10;
+ msg.max_qp = 51;
+ msg.ip_delta = -1;
+ msg.pb_delta = -1;
+ msg.golden_ref = 0;
+ msg.golden_delta = 2;
+ msg.golden_ref_frequency = 10;
+ msg.rate_control_option = 0x00000000;
+
+ msg.gop_ctrl_mode = 0x00000000;
+ msg.freq_ird = 0x7fffffff;
+ msg.freq_lt = 0;
+ msg.gdr_mode = 0x00000000;
+ msg.gop_length = channel->gop_size;
+ msg.subframe_latency = 0x00000000;
+ msg.lda_control_mode = 0x700d0000;
+
+ allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
+ allegro_mcu_interrupt(dev);
+
+ return 0;
+}
+
+static int allegro_mcu_send_destroy_channel(struct allegro_dev *dev,
+ struct allegro_channel *channel)
+{
+ struct mcu_msg_destroy_channel msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.header.type = MCU_MSG_TYPE_DESTROY_CHANNEL;
+ msg.header.length = sizeof(msg) - sizeof(msg.header);
+
+ msg.channel_id = channel->mcu_channel_id;
+
+ allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
+ allegro_mcu_interrupt(dev);
+
+ return 0;
+}
+
+static int allegro_mcu_send_put_stream_buffer(struct allegro_dev *dev,
+ struct allegro_channel *channel,
+ dma_addr_t paddr,
+ unsigned long size)
+{
+ struct mcu_msg_put_stream_buffer msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.header.type = MCU_MSG_TYPE_PUT_STREAM_BUFFER;
+ msg.header.length = sizeof(msg) - sizeof(msg.header);
+
+ msg.channel_id = channel->mcu_channel_id;
+ msg.dma_addr = paddr;
+ msg.mcu_addr = paddr | MCU_CACHE_OFFSET;
+ msg.size = size;
+ msg.offset = ENCODER_STREAM_OFFSET;
+ msg.stream_id = 0; /* copied to mcu_msg_encode_frame_response */
+
+ allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
+ allegro_mcu_interrupt(dev);
+
+ return 0;
+}
+
+static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
+ struct allegro_channel *channel,
+ dma_addr_t src_y, dma_addr_t src_uv)
+{
+ struct mcu_msg_encode_frame msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.header.type = MCU_MSG_TYPE_ENCODE_FRAME;
+ msg.header.length = sizeof(msg) - sizeof(msg.header);
+
+ msg.channel_id = channel->mcu_channel_id;
+ msg.encoding_options = AL_OPT_FORCE_LOAD;
+ msg.pps_qp = 26; /* qp are relative to 26 */
+ msg.user_param = 0; /* copied to mcu_msg_encode_frame_response */
+ msg.src_handle = 0; /* copied to mcu_msg_encode_frame_response */
+ msg.src_y = src_y;
+ msg.src_uv = src_uv;
+ msg.stride = channel->stride;
+ msg.ep2 = 0x0;
+ msg.ep2_v = msg.ep2 | MCU_CACHE_OFFSET;
+
+ allegro_mbox_write(dev, &dev->mbox_command, &msg, sizeof(msg));
+ allegro_mcu_interrupt(dev);
+
+ return 0;
+}
+
+static int allegro_mcu_wait_for_init_timeout(struct allegro_dev *dev,
+ unsigned long timeout_ms)
+{
+ unsigned long tmo;
+
+ tmo = wait_for_completion_timeout(&dev->init_complete,
+ msecs_to_jiffies(timeout_ms));
+ if (tmo == 0)
+ return -ETIMEDOUT;
+
+ reinit_completion(&dev->init_complete);
+ return 0;
+}
+
+static int allegro_mcu_push_buffer_internal(struct allegro_channel *channel,
+ enum mcu_msg_type type)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct mcu_msg_push_buffers_internal *msg;
+ struct mcu_msg_push_buffers_internal_buffer *buffer;
+ unsigned int num_buffers = 0;
+ size_t size;
+ struct allegro_buffer *al_buffer;
+ struct list_head *list;
+ int err;
+
+ switch (type) {
+ case MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE:
+ list = &channel->buffers_reference;
+ break;
+ case MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE:
+ list = &channel->buffers_intermediate;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ list_for_each_entry(al_buffer, list, head)
+ num_buffers++;
+ size = struct_size(msg, buffer, num_buffers);
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->header.length = size - sizeof(msg->header);
+ msg->header.type = type;
+ msg->channel_id = channel->mcu_channel_id;
+
+ buffer = msg->buffer;
+ list_for_each_entry(al_buffer, list, head) {
+ buffer->dma_addr = lower_32_bits(al_buffer->paddr);
+ buffer->mcu_addr =
+ lower_32_bits(al_buffer->paddr) | MCU_CACHE_OFFSET;
+ buffer->size = al_buffer->size;
+ buffer++;
+ }
+
+ err = allegro_mbox_write(dev, &dev->mbox_command, msg, size);
+ if (err)
+ goto out;
+ allegro_mcu_interrupt(dev);
+
+out:
+ kfree(msg);
+ return err;
+}
+
+static int allegro_mcu_push_buffer_intermediate(struct allegro_channel *channel)
+{
+ enum mcu_msg_type type = MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE;
+
+ return allegro_mcu_push_buffer_internal(channel, type);
+}
+
+static int allegro_mcu_push_buffer_reference(struct allegro_channel *channel)
+{
+ enum mcu_msg_type type = MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE;
+
+ return allegro_mcu_push_buffer_internal(channel, type);
+}
+
+static int allocate_buffers_internal(struct allegro_channel *channel,
+ struct list_head *list,
+ size_t n, size_t size)
+{
+ struct allegro_dev *dev = channel->dev;
+ unsigned int i;
+ int err;
+ struct allegro_buffer *buffer, *tmp;
+
+ for (i = 0; i < n; i++) {
+ buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ err = -ENOMEM;
+ goto err;
+ }
+ INIT_LIST_HEAD(&buffer->head);
+
+ err = allegro_alloc_buffer(dev, buffer, size);
+ if (err)
+ goto err;
+ list_add(&buffer->head, list);
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(buffer, tmp, list, head) {
+ list_del(&buffer->head);
+ allegro_free_buffer(dev, buffer);
+ kfree(buffer);
+ }
+ return err;
+}
+
+static void destroy_buffers_internal(struct allegro_channel *channel,
+ struct list_head *list)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct allegro_buffer *buffer, *tmp;
+
+ list_for_each_entry_safe(buffer, tmp, list, head) {
+ list_del(&buffer->head);
+ allegro_free_buffer(dev, buffer);
+ kfree(buffer);
+ }
+}
+
+static void destroy_reference_buffers(struct allegro_channel *channel)
+{
+ return destroy_buffers_internal(channel, &channel->buffers_reference);
+}
+
+static void destroy_intermediate_buffers(struct allegro_channel *channel)
+{
+ return destroy_buffers_internal(channel,
+ &channel->buffers_intermediate);
+}
+
+static int allocate_intermediate_buffers(struct allegro_channel *channel,
+ size_t n, size_t size)
+{
+ return allocate_buffers_internal(channel,
+ &channel->buffers_intermediate,
+ n, size);
+}
+
+static int allocate_reference_buffers(struct allegro_channel *channel,
+ size_t n, size_t size)
+{
+ return allocate_buffers_internal(channel,
+ &channel->buffers_reference,
+ n, PAGE_ALIGN(size));
+}
+
+static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
+ void *dest, size_t n)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct nal_h264_sps *sps;
+ ssize_t size;
+ unsigned int size_mb = SIZE_MACROBLOCK;
+ /* Calculation of crop units in Rec. ITU-T H.264 (04/2017) p. 76 */
+ unsigned int crop_unit_x = 2;
+ unsigned int crop_unit_y = 2;
+
+ sps = kzalloc(sizeof(*sps), GFP_KERNEL);
+ if (!sps)
+ return -ENOMEM;
+
+ sps->profile_idc = nal_h264_profile_from_v4l2(channel->profile);
+ sps->constraint_set0_flag = 0;
+ sps->constraint_set1_flag = 1;
+ sps->constraint_set2_flag = 0;
+ sps->constraint_set3_flag = 0;
+ sps->constraint_set4_flag = 0;
+ sps->constraint_set5_flag = 0;
+ sps->level_idc = nal_h264_level_from_v4l2(channel->level);
+ sps->seq_parameter_set_id = 0;
+ sps->log2_max_frame_num_minus4 = 0;
+ sps->pic_order_cnt_type = 0;
+ sps->log2_max_pic_order_cnt_lsb_minus4 = 6;
+ sps->max_num_ref_frames = 3;
+ sps->gaps_in_frame_num_value_allowed_flag = 0;
+ sps->pic_width_in_mbs_minus1 =
+ DIV_ROUND_UP(channel->width, size_mb) - 1;
+ sps->pic_height_in_map_units_minus1 =
+ DIV_ROUND_UP(channel->height, size_mb) - 1;
+ sps->frame_mbs_only_flag = 1;
+ sps->mb_adaptive_frame_field_flag = 0;
+ sps->direct_8x8_inference_flag = 1;
+ sps->frame_cropping_flag =
+ (channel->width % size_mb) || (channel->height % size_mb);
+ if (sps->frame_cropping_flag) {
+ sps->crop_left = 0;
+ sps->crop_right = (round_up(channel->width, size_mb) - channel->width) / crop_unit_x;
+ sps->crop_top = 0;
+ sps->crop_bottom = (round_up(channel->height, size_mb) - channel->height) / crop_unit_y;
+ }
+ sps->vui_parameters_present_flag = 1;
+ sps->vui.aspect_ratio_info_present_flag = 0;
+ sps->vui.overscan_info_present_flag = 0;
+ sps->vui.video_signal_type_present_flag = 1;
+ sps->vui.video_format = 1;
+ sps->vui.video_full_range_flag = 0;
+ sps->vui.colour_description_present_flag = 1;
+ sps->vui.colour_primaries = 5;
+ sps->vui.transfer_characteristics = 5;
+ sps->vui.matrix_coefficients = 5;
+ sps->vui.chroma_loc_info_present_flag = 1;
+ sps->vui.chroma_sample_loc_type_top_field = 0;
+ sps->vui.chroma_sample_loc_type_bottom_field = 0;
+ sps->vui.timing_info_present_flag = 1;
+ sps->vui.num_units_in_tick = 1;
+ sps->vui.time_scale = 50;
+ sps->vui.fixed_frame_rate_flag = 1;
+ sps->vui.nal_hrd_parameters_present_flag = 0;
+ sps->vui.vcl_hrd_parameters_present_flag = 1;
+ sps->vui.vcl_hrd_parameters.cpb_cnt_minus1 = 0;
+ sps->vui.vcl_hrd_parameters.bit_rate_scale = 0;
+ sps->vui.vcl_hrd_parameters.cpb_size_scale = 1;
+ /* See Rec. ITU-T H.264 (04/2017) p. 410 E-53 */
+ sps->vui.vcl_hrd_parameters.bit_rate_value_minus1[0] =
+ channel->bitrate_peak / (1 << (6 + sps->vui.vcl_hrd_parameters.bit_rate_scale)) - 1;
+ /* See Rec. ITU-T H.264 (04/2017) p. 410 E-54 */
+ sps->vui.vcl_hrd_parameters.cpb_size_value_minus1[0] =
+ (channel->cpb_size * 1000) / (1 << (4 + sps->vui.vcl_hrd_parameters.cpb_size_scale)) - 1;
+ sps->vui.vcl_hrd_parameters.cbr_flag[0] = 1;
+ sps->vui.vcl_hrd_parameters.initial_cpb_removal_delay_length_minus1 = 31;
+ sps->vui.vcl_hrd_parameters.cpb_removal_delay_length_minus1 = 31;
+ sps->vui.vcl_hrd_parameters.dpb_output_delay_length_minus1 = 31;
+ sps->vui.vcl_hrd_parameters.time_offset_length = 0;
+ sps->vui.low_delay_hrd_flag = 0;
+ sps->vui.pic_struct_present_flag = 1;
+ sps->vui.bitstream_restriction_flag = 0;
+
+ size = nal_h264_write_sps(&dev->plat_dev->dev, dest, n, sps);
+
+ kfree(sps);
+
+ return size;
+}
+
+static ssize_t allegro_h264_write_pps(struct allegro_channel *channel,
+ void *dest, size_t n)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct nal_h264_pps *pps;
+ ssize_t size;
+
+ pps = kzalloc(sizeof(*pps), GFP_KERNEL);
+ if (!pps)
+ return -ENOMEM;
+
+ pps->pic_parameter_set_id = 0;
+ pps->seq_parameter_set_id = 0;
+ pps->entropy_coding_mode_flag = 0;
+ pps->bottom_field_pic_order_in_frame_present_flag = 0;
+ pps->num_slice_groups_minus1 = 0;
+ pps->num_ref_idx_l0_default_active_minus1 = 2;
+ pps->num_ref_idx_l1_default_active_minus1 = 2;
+ pps->weighted_pred_flag = 0;
+ pps->weighted_bipred_idc = 0;
+ pps->pic_init_qp_minus26 = 0;
+ pps->pic_init_qs_minus26 = 0;
+ pps->chroma_qp_index_offset = 0;
+ pps->deblocking_filter_control_present_flag = 1;
+ pps->constrained_intra_pred_flag = 0;
+ pps->redundant_pic_cnt_present_flag = 0;
+ pps->transform_8x8_mode_flag = 0;
+ pps->pic_scaling_matrix_present_flag = 0;
+ pps->second_chroma_qp_index_offset = 0;
+
+ size = nal_h264_write_pps(&dev->plat_dev->dev, dest, n, pps);
+
+ kfree(pps);
+
+ return size;
+}
+
+static bool allegro_channel_is_at_eos(struct allegro_channel *channel)
+{
+ bool is_at_eos = false;
+
+ switch (allegro_get_state(channel)) {
+ case ALLEGRO_STATE_STOPPED:
+ is_at_eos = true;
+ break;
+ case ALLEGRO_STATE_DRAIN:
+ case ALLEGRO_STATE_WAIT_FOR_BUFFER:
+ if (v4l2_m2m_num_src_bufs_ready(channel->fh.m2m_ctx) == 0)
+ is_at_eos = true;
+ break;
+ default:
+ break;
+ }
+
+ return is_at_eos;
+}
+
+static void allegro_channel_buf_done(struct allegro_channel *channel,
+ struct vb2_v4l2_buffer *buf,
+ enum vb2_buffer_state state)
+{
+ const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
+
+ if (allegro_channel_is_at_eos(channel)) {
+ buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_event_queue_fh(&channel->fh, &eos_event);
+
+ allegro_set_state(channel, ALLEGRO_STATE_STOPPED);
+ }
+
+ v4l2_m2m_buf_done(buf, state);
+}
+
+static void allegro_channel_finish_frame(struct allegro_channel *channel,
+ struct mcu_msg_encode_frame_response *msg)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct vb2_v4l2_buffer *src_buf;
+ struct vb2_v4l2_buffer *dst_buf;
+ struct {
+ u32 offset;
+ u32 size;
+ } *partition;
+ enum vb2_buffer_state state = VB2_BUF_STATE_ERROR;
+ char *curr;
+ ssize_t len;
+ ssize_t free;
+
+ src_buf = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx);
+
+ dst_buf = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx);
+ dst_buf->sequence = channel->csequence++;
+
+ if (msg->error_code) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: error while encoding frame: %x\n",
+ channel->mcu_channel_id, msg->error_code);
+ goto err;
+ }
+
+ if (msg->partition_table_size != 1) {
+ v4l2_warn(&dev->v4l2_dev,
+ "channel %d: only handling first partition table entry (%d entries)\n",
+ channel->mcu_channel_id, msg->partition_table_size);
+ }
+
+ if (msg->partition_table_offset +
+ msg->partition_table_size * sizeof(*partition) >
+ vb2_plane_size(&dst_buf->vb2_buf, 0)) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: partition table outside of dst_buf\n",
+ channel->mcu_channel_id);
+ goto err;
+ }
+
+ partition =
+ vb2_plane_vaddr(&dst_buf->vb2_buf, 0) + msg->partition_table_offset;
+ if (partition->offset + partition->size >
+ vb2_plane_size(&dst_buf->vb2_buf, 0)) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: encoded frame is outside of dst_buf (offset 0x%x, size 0x%x)\n",
+ channel->mcu_channel_id, partition->offset,
+ partition->size);
+ goto err;
+ }
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "channel %d: encoded frame of size %d is at offset 0x%x\n",
+ channel->mcu_channel_id, partition->size, partition->offset);
+
+ /*
+ * The payload must include the data before the partition offset,
+ * because we will put the sps and pps data there.
+ */
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ partition->offset + partition->size);
+
+ curr = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ free = partition->offset;
+ if (msg->is_idr) {
+ len = allegro_h264_write_sps(channel, curr, free);
+ if (len < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "not enough space for sequence parameter set: %zd left\n",
+ free);
+ goto err;
+ }
+ curr += len;
+ free -= len;
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: wrote %zd byte SPS nal unit\n",
+ channel->mcu_channel_id, len);
+ }
+
+ if (msg->slice_type == AL_ENC_SLICE_TYPE_I) {
+ len = allegro_h264_write_pps(channel, curr, free);
+ if (len < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "not enough space for picture parameter set: %zd left\n",
+ free);
+ goto err;
+ }
+ curr += len;
+ free -= len;
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: wrote %zd byte PPS nal unit\n",
+ channel->mcu_channel_id, len);
+ }
+
+ len = nal_h264_write_filler(&dev->plat_dev->dev, curr, free);
+ if (len < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to write %zd filler data\n", free);
+ goto err;
+ }
+ curr += len;
+ free -= len;
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "channel %d: wrote %zd bytes filler nal unit\n",
+ channel->mcu_channel_id, len);
+
+ if (free != 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "non-VCL NAL units do not fill space until VCL NAL unit: %zd bytes left\n",
+ free);
+ goto err;
+ }
+
+ state = VB2_BUF_STATE_DONE;
+
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
+ if (msg->is_idr)
+ dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ else
+ dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: encoded frame #%03d (%s%s, %d bytes)\n",
+ channel->mcu_channel_id,
+ dst_buf->sequence,
+ msg->is_idr ? "IDR, " : "",
+ msg->slice_type == AL_ENC_SLICE_TYPE_I ? "I slice" :
+ msg->slice_type == AL_ENC_SLICE_TYPE_P ? "P slice" : "unknown",
+ partition->size);
+
+err:
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+
+ allegro_channel_buf_done(channel, dst_buf, state);
+
+ v4l2_m2m_job_finish(dev->m2m_dev, channel->fh.m2m_ctx);
+}
+
+static int allegro_handle_init(struct allegro_dev *dev,
+ struct mcu_msg_init_response *msg)
+{
+ complete(&dev->init_complete);
+
+ return 0;
+}
+
+static int
+allegro_handle_create_channel(struct allegro_dev *dev,
+ struct mcu_msg_create_channel_response *msg)
+{
+ struct allegro_channel *channel;
+ int err = 0;
+
+ channel = allegro_find_channel_by_user_id(dev, msg->user_id);
+ if (IS_ERR(channel)) {
+ v4l2_warn(&dev->v4l2_dev,
+ "received %s for unknown user %d\n",
+ msg_type_name(msg->header.type),
+ msg->user_id);
+ return -EINVAL;
+ }
+
+ if (msg->error_code) {
+ v4l2_err(&dev->v4l2_dev,
+ "user %d: mcu failed to create channel: error %x\n",
+ channel->user_id, msg->error_code);
+ err = -EIO;
+ goto out;
+ }
+
+ channel->mcu_channel_id = msg->channel_id;
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "user %d: channel has channel id %d\n",
+ channel->user_id, channel->mcu_channel_id);
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: intermediate buffers: %d x %d bytes\n",
+ channel->mcu_channel_id,
+ msg->int_buffers_count, msg->int_buffers_size);
+ err = allocate_intermediate_buffers(channel, msg->int_buffers_count,
+ msg->int_buffers_size);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: failed to allocate intermediate buffers\n",
+ channel->mcu_channel_id);
+ goto out;
+ }
+ err = allegro_mcu_push_buffer_intermediate(channel);
+ if (err)
+ goto out;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: reference buffers: %d x %d bytes\n",
+ channel->mcu_channel_id,
+ msg->rec_buffers_count, msg->rec_buffers_size);
+ err = allocate_reference_buffers(channel, msg->rec_buffers_count,
+ msg->rec_buffers_size);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: failed to allocate reference buffers\n",
+ channel->mcu_channel_id);
+ goto out;
+ }
+ err = allegro_mcu_push_buffer_reference(channel);
+ if (err)
+ goto out;
+
+out:
+ channel->error = err;
+ complete(&channel->completion);
+
+ /* Handled successfully, error is passed via channel->error */
+ return 0;
+}
+
+static int
+allegro_handle_destroy_channel(struct allegro_dev *dev,
+ struct mcu_msg_destroy_channel_response *msg)
+{
+ struct allegro_channel *channel;
+
+ channel = allegro_find_channel_by_channel_id(dev, msg->channel_id);
+ if (IS_ERR(channel)) {
+ v4l2_err(&dev->v4l2_dev,
+ "received %s for unknown channel %d\n",
+ msg_type_name(msg->header.type),
+ msg->channel_id);
+ return -EINVAL;
+ }
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "user %d: vcu destroyed channel %d\n",
+ channel->user_id, channel->mcu_channel_id);
+ complete(&channel->completion);
+
+ return 0;
+}
+
+static int
+allegro_handle_encode_frame(struct allegro_dev *dev,
+ struct mcu_msg_encode_frame_response *msg)
+{
+ struct allegro_channel *channel;
+
+ channel = allegro_find_channel_by_channel_id(dev, msg->channel_id);
+ if (IS_ERR(channel)) {
+ v4l2_err(&dev->v4l2_dev,
+ "received %s for unknown channel %d\n",
+ msg_type_name(msg->header.type),
+ msg->channel_id);
+ return -EINVAL;
+ }
+
+ allegro_channel_finish_frame(channel, msg);
+
+ return 0;
+}
+
+static int allegro_receive_message(struct allegro_dev *dev)
+{
+ union mcu_msg_response *msg;
+ ssize_t size;
+ int err = 0;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ size = allegro_mbox_read(dev, &dev->mbox_status, msg, sizeof(*msg));
+ if (size < sizeof(msg->header)) {
+ v4l2_err(&dev->v4l2_dev,
+ "invalid mbox message (%zd): must be at least %zu\n",
+ size, sizeof(msg->header));
+ err = -EINVAL;
+ goto out;
+ }
+
+ switch (msg->header.type) {
+ case MCU_MSG_TYPE_INIT:
+ err = allegro_handle_init(dev, &msg->init);
+ break;
+ case MCU_MSG_TYPE_CREATE_CHANNEL:
+ err = allegro_handle_create_channel(dev, &msg->create_channel);
+ break;
+ case MCU_MSG_TYPE_DESTROY_CHANNEL:
+ err = allegro_handle_destroy_channel(dev,
+ &msg->destroy_channel);
+ break;
+ case MCU_MSG_TYPE_ENCODE_FRAME:
+ err = allegro_handle_encode_frame(dev, &msg->encode_frame);
+ break;
+ default:
+ v4l2_warn(&dev->v4l2_dev,
+ "%s: unknown message %s\n",
+ __func__, msg_type_name(msg->header.type));
+ err = -EINVAL;
+ break;
+ }
+
+out:
+ kfree(msg);
+
+ return err;
+}
+
+static irqreturn_t allegro_hardirq(int irq, void *data)
+{
+ struct allegro_dev *dev = data;
+ unsigned int status;
+
+ regmap_read(dev->regmap, AL5_ITC_CPU_IRQ_STA, &status);
+ if (!(status & AL5_ITC_CPU_IRQ_STA_TRIGGERED))
+ return IRQ_NONE;
+
+ regmap_write(dev->regmap, AL5_ITC_CPU_IRQ_CLR, status);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t allegro_irq_thread(int irq, void *data)
+{
+ struct allegro_dev *dev = data;
+
+ allegro_receive_message(dev);
+
+ return IRQ_HANDLED;
+}
+
+static void allegro_copy_firmware(struct allegro_dev *dev,
+ const u8 * const buf, size_t size)
+{
+ int err = 0;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "copy mcu firmware (%zu B) to SRAM\n", size);
+ err = regmap_bulk_write(dev->sram, 0x0, buf, size / 4);
+ if (err)
+ v4l2_err(&dev->v4l2_dev,
+ "failed to copy firmware: %d\n", err);
+}
+
+static void allegro_copy_fw_codec(struct allegro_dev *dev,
+ const u8 * const buf, size_t size)
+{
+ int err;
+ dma_addr_t icache_offset, dcache_offset;
+
+ /*
+ * The downstream allocates 600 KB for the codec firmware to have some
+ * extra space for "possible extensions." My tests were fine with
+ * allocating just enough memory for the actual firmware, but I am not
+ * sure that the firmware really does not use the remaining space.
+ */
+ err = allegro_alloc_buffer(dev, &dev->firmware, size);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to allocate %zu bytes for firmware\n", size);
+ return;
+ }
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "copy codec firmware (%zd B) to phys %pad\n",
+ size, &dev->firmware.paddr);
+ memcpy(dev->firmware.vaddr, buf, size);
+
+ regmap_write(dev->regmap, AXI_ADDR_OFFSET_IP,
+ upper_32_bits(dev->firmware.paddr));
+
+ icache_offset = dev->firmware.paddr - MCU_CACHE_OFFSET;
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "icache_offset: msb = 0x%x, lsb = 0x%x\n",
+ upper_32_bits(icache_offset), lower_32_bits(icache_offset));
+ regmap_write(dev->regmap, AL5_ICACHE_ADDR_OFFSET_MSB,
+ upper_32_bits(icache_offset));
+ regmap_write(dev->regmap, AL5_ICACHE_ADDR_OFFSET_LSB,
+ lower_32_bits(icache_offset));
+
+ dcache_offset =
+ (dev->firmware.paddr & 0xffffffff00000000ULL) - MCU_CACHE_OFFSET;
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "dcache_offset: msb = 0x%x, lsb = 0x%x\n",
+ upper_32_bits(dcache_offset), lower_32_bits(dcache_offset));
+ regmap_write(dev->regmap, AL5_DCACHE_ADDR_OFFSET_MSB,
+ upper_32_bits(dcache_offset));
+ regmap_write(dev->regmap, AL5_DCACHE_ADDR_OFFSET_LSB,
+ lower_32_bits(dcache_offset));
+}
+
+static void allegro_free_fw_codec(struct allegro_dev *dev)
+{
+ allegro_free_buffer(dev, &dev->firmware);
+}
+
+/*
+ * Control functions for the MCU
+ */
+
+static int allegro_mcu_enable_interrupts(struct allegro_dev *dev)
+{
+ return regmap_write(dev->regmap, AL5_ITC_CPU_IRQ_MSK, BIT(0));
+}
+
+static int allegro_mcu_disable_interrupts(struct allegro_dev *dev)
+{
+ return regmap_write(dev->regmap, AL5_ITC_CPU_IRQ_MSK, 0);
+}
+
+static int allegro_mcu_wait_for_sleep(struct allegro_dev *dev)
+{
+ unsigned long timeout;
+ unsigned int status;
+
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (regmap_read(dev->regmap, AL5_MCU_STA, &status) == 0 &&
+ status != AL5_MCU_STA_SLEEP) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static int allegro_mcu_start(struct allegro_dev *dev)
+{
+ unsigned long timeout;
+ unsigned int status;
+ int err;
+
+ err = regmap_write(dev->regmap, AL5_MCU_WAKEUP, BIT(0));
+ if (err)
+ return err;
+
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (regmap_read(dev->regmap, AL5_MCU_STA, &status) == 0 &&
+ status == AL5_MCU_STA_SLEEP) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ err = regmap_write(dev->regmap, AL5_MCU_WAKEUP, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int allegro_mcu_reset(struct allegro_dev *dev)
+{
+ int err;
+
+ err = regmap_write(dev->regmap,
+ AL5_MCU_RESET_MODE, AL5_MCU_RESET_MODE_SLEEP);
+ if (err < 0)
+ return err;
+
+ err = regmap_write(dev->regmap, AL5_MCU_RESET, AL5_MCU_RESET_SOFT);
+ if (err < 0)
+ return err;
+
+ return allegro_mcu_wait_for_sleep(dev);
+}
+
+static void allegro_destroy_channel(struct allegro_channel *channel)
+{
+ struct allegro_dev *dev = channel->dev;
+ unsigned long timeout;
+
+ if (channel_exists(channel)) {
+ reinit_completion(&channel->completion);
+ allegro_mcu_send_destroy_channel(dev, channel);
+ timeout = wait_for_completion_timeout(&channel->completion,
+ msecs_to_jiffies(5000));
+ if (timeout == 0)
+ v4l2_warn(&dev->v4l2_dev,
+ "channel %d: timeout while destroying\n",
+ channel->mcu_channel_id);
+
+ channel->mcu_channel_id = -1;
+ }
+
+ destroy_intermediate_buffers(channel);
+ destroy_reference_buffers(channel);
+
+ v4l2_ctrl_grab(channel->mpeg_video_h264_profile, false);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_level, false);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, false);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate, false);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate_peak, false);
+ v4l2_ctrl_grab(channel->mpeg_video_cpb_size, false);
+ v4l2_ctrl_grab(channel->mpeg_video_gop_size, false);
+
+ if (channel->user_id != -1) {
+ clear_bit(channel->user_id, &dev->channel_user_ids);
+ channel->user_id = -1;
+ }
+}
+
+/*
+ * Create the MCU channel
+ *
+ * After the channel has been created, the picture size, format, colorspace
+ * and framerate are fixed. Also the codec, profile, bitrate, etc. cannot be
+ * changed anymore.
+ *
+ * The channel can be created only once. The MCU will accept source buffers
+ * and stream buffers only after a channel has been created.
+ */
+static int allegro_create_channel(struct allegro_channel *channel)
+{
+ struct allegro_dev *dev = channel->dev;
+ unsigned long timeout;
+ enum v4l2_mpeg_video_h264_level min_level;
+
+ if (channel_exists(channel)) {
+ v4l2_warn(&dev->v4l2_dev,
+ "channel already exists\n");
+ return 0;
+ }
+
+ channel->user_id = allegro_next_user_id(dev);
+ if (channel->user_id < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "no free channels available\n");
+ return -EBUSY;
+ }
+ set_bit(channel->user_id, &dev->channel_user_ids);
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "user %d: creating channel (%4.4s, %dx%d@%d)\n",
+ channel->user_id,
+ (char *)&channel->codec, channel->width, channel->height, 25);
+
+ min_level = select_minimum_h264_level(channel->width, channel->height);
+ if (channel->level < min_level) {
+ v4l2_warn(&dev->v4l2_dev,
+ "user %d: selected Level %s too low: increasing to Level %s\n",
+ channel->user_id,
+ v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL)[channel->level],
+ v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL)[min_level]);
+ channel->level = min_level;
+ }
+
+ v4l2_ctrl_grab(channel->mpeg_video_h264_profile, true);
+ v4l2_ctrl_grab(channel->mpeg_video_h264_level, true);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, true);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate, true);
+ v4l2_ctrl_grab(channel->mpeg_video_bitrate_peak, true);
+ v4l2_ctrl_grab(channel->mpeg_video_cpb_size, true);
+ v4l2_ctrl_grab(channel->mpeg_video_gop_size, true);
+
+ reinit_completion(&channel->completion);
+ allegro_mcu_send_create_channel(dev, channel);
+ timeout = wait_for_completion_timeout(&channel->completion,
+ msecs_to_jiffies(5000));
+ if (timeout == 0)
+ channel->error = -ETIMEDOUT;
+ if (channel->error)
+ goto err;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: accepting buffers\n",
+ channel->mcu_channel_id);
+
+ return 0;
+
+err:
+ allegro_destroy_channel(channel);
+
+ return channel->error;
+}
+
+static void allegro_set_default_params(struct allegro_channel *channel)
+{
+ channel->width = ALLEGRO_WIDTH_DEFAULT;
+ channel->height = ALLEGRO_HEIGHT_DEFAULT;
+ channel->stride = round_up(channel->width, 32);
+
+ channel->colorspace = V4L2_COLORSPACE_REC709;
+ channel->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ channel->quantization = V4L2_QUANTIZATION_DEFAULT;
+ channel->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ channel->pixelformat = V4L2_PIX_FMT_NV12;
+ channel->sizeimage_raw = channel->stride * channel->height * 3 / 2;
+
+ channel->codec = V4L2_PIX_FMT_H264;
+ channel->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
+ channel->level =
+ select_minimum_h264_level(channel->width, channel->height);
+ channel->sizeimage_encoded =
+ estimate_stream_size(channel->width, channel->height);
+
+ channel->bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
+ channel->bitrate = maximum_bitrate(channel->level);
+ channel->bitrate_peak = maximum_bitrate(channel->level);
+ channel->cpb_size = maximum_cpb_size(channel->level);
+ channel->gop_size = ALLEGRO_GOP_SIZE_DEFAULT;
+}
+
+static int allegro_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct allegro_channel *channel = vb2_get_drv_priv(vq);
+ struct allegro_dev *dev = channel->dev;
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "%s: queue setup[%s]: nplanes = %d\n",
+ V4L2_TYPE_IS_OUTPUT(vq->type) ? "output" : "capture",
+ *nplanes == 0 ? "REQBUFS" : "CREATE_BUFS", *nplanes);
+
+ if (*nplanes != 0) {
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (sizes[0] < channel->sizeimage_raw)
+ return -EINVAL;
+ } else {
+ if (sizes[0] < channel->sizeimage_encoded)
+ return -EINVAL;
+ }
+ } else {
+ *nplanes = 1;
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ sizes[0] = channel->sizeimage_raw;
+ else
+ sizes[0] = channel->sizeimage_encoded;
+ }
+
+ return 0;
+}
+
+static int allegro_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct allegro_channel *channel = vb2_get_drv_priv(vb->vb2_queue);
+ struct allegro_dev *dev = channel->dev;
+
+ if (allegro_get_state(channel) == ALLEGRO_STATE_DRAIN &&
+ V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type))
+ return -EBUSY;
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ v4l2_err(&dev->v4l2_dev,
+ "channel %d: unsupported field\n",
+ channel->mcu_channel_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void allegro_buf_queue(struct vb2_buffer *vb)
+{
+ struct allegro_channel *channel = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (allegro_get_state(channel) == ALLEGRO_STATE_WAIT_FOR_BUFFER &&
+ vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ allegro_channel_buf_done(channel, vbuf, VB2_BUF_STATE_DONE);
+ return;
+ }
+
+ v4l2_m2m_buf_queue(channel->fh.m2m_ctx, vbuf);
+}
+
+static int allegro_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct allegro_channel *channel = vb2_get_drv_priv(q);
+ struct allegro_dev *dev = channel->dev;
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "%s: start streaming\n",
+ V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture");
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ channel->osequence = 0;
+ allegro_set_state(channel, ALLEGRO_STATE_ENCODING);
+ } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ channel->csequence = 0;
+ }
+
+ return 0;
+}
+
+static void allegro_stop_streaming(struct vb2_queue *q)
+{
+ struct allegro_channel *channel = vb2_get_drv_priv(q);
+ struct allegro_dev *dev = channel->dev;
+ struct vb2_v4l2_buffer *buffer;
+
+ v4l2_dbg(2, debug, &dev->v4l2_dev,
+ "%s: stop streaming\n",
+ V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture");
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ allegro_set_state(channel, ALLEGRO_STATE_STOPPED);
+ while ((buffer = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR);
+ } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ allegro_destroy_channel(channel);
+ while ((buffer = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops allegro_queue_ops = {
+ .queue_setup = allegro_queue_setup,
+ .buf_prepare = allegro_buf_prepare,
+ .buf_queue = allegro_buf_queue,
+ .start_streaming = allegro_start_streaming,
+ .stop_streaming = allegro_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int allegro_queue_init(void *priv,
+ struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ int err;
+ struct allegro_channel *channel = priv;
+
+ src_vq->dev = &channel->dev->plat_dev->dev;
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->drv_priv = channel;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = &allegro_queue_ops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->lock = &channel->dev->lock;
+ err = vb2_queue_init(src_vq);
+ if (err)
+ return err;
+
+ dst_vq->dev = &channel->dev->plat_dev->dev;
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->drv_priv = channel;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = &allegro_queue_ops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->lock = &channel->dev->lock;
+ err = vb2_queue_init(dst_vq);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int allegro_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct allegro_channel *channel = container_of(ctrl->handler,
+ struct allegro_channel,
+ ctrl_handler);
+ struct allegro_dev *dev = channel->dev;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "s_ctrl: %s = %d\n", v4l2_ctrl_get_name(ctrl->id), ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ channel->level = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ channel->bitrate_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ channel->bitrate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ channel->bitrate_peak = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
+ channel->cpb_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ channel->gop_size = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops allegro_ctrl_ops = {
+ .s_ctrl = allegro_s_ctrl,
+};
+
+static int allegro_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct allegro_dev *dev = video_get_drvdata(vdev);
+ struct allegro_channel *channel = NULL;
+ struct v4l2_ctrl_handler *handler;
+ u64 mask;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return -ENOMEM;
+
+ v4l2_fh_init(&channel->fh, vdev);
+ file->private_data = &channel->fh;
+ v4l2_fh_add(&channel->fh);
+
+ init_completion(&channel->completion);
+
+ channel->dev = dev;
+
+ allegro_set_default_params(channel);
+
+ handler = &channel->ctrl_handler;
+ v4l2_ctrl_handler_init(handler, 0);
+ channel->mpeg_video_h264_profile = v4l2_ctrl_new_std_menu(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
+ mask = 1 << V4L2_MPEG_VIDEO_H264_LEVEL_1B;
+ channel->mpeg_video_h264_level = v4l2_ctrl_new_std_menu(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1, mask,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1);
+ channel->mpeg_video_bitrate_mode = v4l2_ctrl_new_std_menu(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0,
+ channel->bitrate_mode);
+ channel->mpeg_video_bitrate = v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ 0, maximum_bitrate(V4L2_MPEG_VIDEO_H264_LEVEL_5_1),
+ 1, channel->bitrate);
+ channel->mpeg_video_bitrate_peak = v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+ 0, maximum_bitrate(V4L2_MPEG_VIDEO_H264_LEVEL_5_1),
+ 1, channel->bitrate_peak);
+ channel->mpeg_video_cpb_size = v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
+ 0, maximum_cpb_size(V4L2_MPEG_VIDEO_H264_LEVEL_5_1),
+ 1, channel->cpb_size);
+ channel->mpeg_video_gop_size = v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 0, ALLEGRO_GOP_SIZE_MAX,
+ 1, channel->gop_size);
+ v4l2_ctrl_new_std(handler,
+ &allegro_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+ 1, 32,
+ 1, 1);
+ channel->fh.ctrl_handler = handler;
+
+ channel->mcu_channel_id = -1;
+ channel->user_id = -1;
+
+ INIT_LIST_HEAD(&channel->buffers_reference);
+ INIT_LIST_HEAD(&channel->buffers_intermediate);
+
+ list_add(&channel->list, &dev->channels);
+
+ channel->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, channel,
+ allegro_queue_init);
+
+ return 0;
+}
+
+static int allegro_release(struct file *file)
+{
+ struct allegro_channel *channel = fh_to_channel(file->private_data);
+
+ v4l2_m2m_ctx_release(channel->fh.m2m_ctx);
+
+ list_del(&channel->list);
+
+ v4l2_ctrl_handler_free(&channel->ctrl_handler);
+
+ v4l2_fh_del(&channel->fh);
+ v4l2_fh_exit(&channel->fh);
+
+ kfree(channel);
+
+ return 0;
+}
+
+static int allegro_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct allegro_dev *dev = video_get_drvdata(vdev);
+
+ strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strscpy(cap->card, "Allegro DVT Video Encoder", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(&dev->plat_dev->dev));
+
+ return 0;
+}
+
+static int allegro_enum_fmt_vid(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index)
+ return -EINVAL;
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ f->pixelformat = V4L2_PIX_FMT_NV12;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ f->pixelformat = V4L2_PIX_FMT_H264;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int allegro_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct allegro_channel *channel = fh_to_channel(fh);
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.width = channel->width;
+ f->fmt.pix.height = channel->height;
+
+ f->fmt.pix.colorspace = channel->colorspace;
+ f->fmt.pix.ycbcr_enc = channel->ycbcr_enc;
+ f->fmt.pix.quantization = channel->quantization;
+ f->fmt.pix.xfer_func = channel->xfer_func;
+
+ f->fmt.pix.pixelformat = channel->codec;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = channel->sizeimage_encoded;
+
+ return 0;
+}
+
+static int allegro_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ f->fmt.pix.width = clamp_t(__u32, f->fmt.pix.width,
+ ALLEGRO_WIDTH_MIN, ALLEGRO_WIDTH_MAX);
+ f->fmt.pix.height = clamp_t(__u32, f->fmt.pix.height,
+ ALLEGRO_HEIGHT_MIN, ALLEGRO_HEIGHT_MAX);
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage =
+ estimate_stream_size(f->fmt.pix.width, f->fmt.pix.height);
+
+ return 0;
+}
+
+static int allegro_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct allegro_channel *channel = fh_to_channel(fh);
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ f->fmt.pix.width = channel->width;
+ f->fmt.pix.height = channel->height;
+
+ f->fmt.pix.colorspace = channel->colorspace;
+ f->fmt.pix.ycbcr_enc = channel->ycbcr_enc;
+ f->fmt.pix.quantization = channel->quantization;
+ f->fmt.pix.xfer_func = channel->xfer_func;
+
+ f->fmt.pix.pixelformat = channel->pixelformat;
+ f->fmt.pix.bytesperline = channel->stride;
+ f->fmt.pix.sizeimage = channel->sizeimage_raw;
+
+ return 0;
+}
+
+static int allegro_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ /*
+ * The firmware of the Allegro codec handles the padding internally
+ * and expects the visual frame size when configuring a channel.
+ * Therefore, unlike other encoder drivers, this driver does not round
+ * up the width and height to macroblock alignment and does not
+ * implement the selection api.
+ */
+ f->fmt.pix.width = clamp_t(__u32, f->fmt.pix.width,
+ ALLEGRO_WIDTH_MIN, ALLEGRO_WIDTH_MAX);
+ f->fmt.pix.height = clamp_t(__u32, f->fmt.pix.height,
+ ALLEGRO_HEIGHT_MIN, ALLEGRO_HEIGHT_MAX);
+
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_NV12;
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 32);
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.bytesperline * f->fmt.pix.height * 3 / 2;
+
+ return 0;
+}
+
+static int allegro_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct allegro_channel *channel = fh_to_channel(fh);
+ int err;
+
+ err = allegro_try_fmt_vid_out(file, fh, f);
+ if (err)
+ return err;
+
+ channel->width = f->fmt.pix.width;
+ channel->height = f->fmt.pix.height;
+ channel->stride = f->fmt.pix.bytesperline;
+ channel->sizeimage_raw = f->fmt.pix.sizeimage;
+
+ channel->colorspace = f->fmt.pix.colorspace;
+ channel->ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ channel->quantization = f->fmt.pix.quantization;
+ channel->xfer_func = f->fmt.pix.xfer_func;
+
+ channel->level =
+ select_minimum_h264_level(channel->width, channel->height);
+ channel->sizeimage_encoded =
+ estimate_stream_size(channel->width, channel->height);
+
+ return 0;
+}
+
+static int allegro_channel_cmd_stop(struct allegro_channel *channel)
+{
+ struct allegro_dev *dev = channel->dev;
+ struct vb2_v4l2_buffer *dst_buf;
+
+ switch (allegro_get_state(channel)) {
+ case ALLEGRO_STATE_DRAIN:
+ case ALLEGRO_STATE_WAIT_FOR_BUFFER:
+ return -EBUSY;
+ case ALLEGRO_STATE_ENCODING:
+ allegro_set_state(channel, ALLEGRO_STATE_DRAIN);
+ break;
+ default:
+ return 0;
+ }
+
+ /* If there are output buffers, they must be encoded */
+ if (v4l2_m2m_num_src_bufs_ready(channel->fh.m2m_ctx) != 0) {
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: CMD_STOP: continue encoding src buffers\n",
+ channel->mcu_channel_id);
+ return 0;
+ }
+
+ /* If there are capture buffers, use it to signal EOS */
+ dst_buf = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx);
+ if (dst_buf) {
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: CMD_STOP: signaling EOS\n",
+ channel->mcu_channel_id);
+ allegro_channel_buf_done(channel, dst_buf, VB2_BUF_STATE_DONE);
+ return 0;
+ }
+
+ /*
+ * If there are no capture buffers, we need to wait for the next
+ * buffer to signal EOS.
+ */
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "channel %d: CMD_STOP: wait for CAPTURE buffer to signal EOS\n",
+ channel->mcu_channel_id);
+ allegro_set_state(channel, ALLEGRO_STATE_WAIT_FOR_BUFFER);
+
+ return 0;
+}
+
+static int allegro_channel_cmd_start(struct allegro_channel *channel)
+{
+ switch (allegro_get_state(channel)) {
+ case ALLEGRO_STATE_DRAIN:
+ case ALLEGRO_STATE_WAIT_FOR_BUFFER:
+ return -EBUSY;
+ case ALLEGRO_STATE_STOPPED:
+ allegro_set_state(channel, ALLEGRO_STATE_ENCODING);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int allegro_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *cmd)
+{
+ struct allegro_channel *channel = fh_to_channel(fh);
+ int err;
+
+ err = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, cmd);
+ if (err)
+ return err;
+
+ switch (cmd->cmd) {
+ case V4L2_ENC_CMD_STOP:
+ err = allegro_channel_cmd_stop(channel);
+ break;
+ case V4L2_ENC_CMD_START:
+ err = allegro_channel_cmd_start(channel);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int allegro_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ switch (fsize->pixel_format) {
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_NV12:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = ALLEGRO_WIDTH_MIN;
+ fsize->stepwise.max_width = ALLEGRO_WIDTH_MAX;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = ALLEGRO_HEIGHT_MIN;
+ fsize->stepwise.max_height = ALLEGRO_HEIGHT_MAX;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int allegro_ioctl_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct allegro_channel *channel = fh_to_channel(fh);
+ int err;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ err = allegro_create_channel(channel);
+ if (err)
+ return err;
+ }
+
+ return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+}
+
+static int allegro_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+}
+
+static const struct v4l2_ioctl_ops allegro_ioctl_ops = {
+ .vidioc_querycap = allegro_querycap,
+ .vidioc_enum_fmt_vid_cap = allegro_enum_fmt_vid,
+ .vidioc_enum_fmt_vid_out = allegro_enum_fmt_vid,
+ .vidioc_g_fmt_vid_cap = allegro_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = allegro_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = allegro_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_out = allegro_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = allegro_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = allegro_s_fmt_vid_out,
+
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+
+ .vidioc_streamon = allegro_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
+ .vidioc_encoder_cmd = allegro_encoder_cmd,
+ .vidioc_enum_framesizes = allegro_enum_framesizes,
+
+ .vidioc_subscribe_event = allegro_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations allegro_fops = {
+ .owner = THIS_MODULE,
+ .open = allegro_open,
+ .release = allegro_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int allegro_register_device(struct allegro_dev *dev)
+{
+ struct video_device *video_dev = &dev->video_dev;
+
+ strscpy(video_dev->name, "allegro", sizeof(video_dev->name));
+ video_dev->fops = &allegro_fops;
+ video_dev->ioctl_ops = &allegro_ioctl_ops;
+ video_dev->release = video_device_release_empty;
+ video_dev->lock = &dev->lock;
+ video_dev->v4l2_dev = &dev->v4l2_dev;
+ video_dev->vfl_dir = VFL_DIR_M2M;
+ video_dev->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ video_set_drvdata(video_dev, dev);
+
+ return video_register_device(video_dev, VFL_TYPE_GRABBER, 0);
+}
+
+static void allegro_device_run(void *priv)
+{
+ struct allegro_channel *channel = priv;
+ struct allegro_dev *dev = channel->dev;
+ struct vb2_v4l2_buffer *src_buf;
+ struct vb2_v4l2_buffer *dst_buf;
+ dma_addr_t src_y;
+ dma_addr_t src_uv;
+ dma_addr_t dst_addr;
+ unsigned long dst_size;
+
+ dst_buf = v4l2_m2m_next_dst_buf(channel->fh.m2m_ctx);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ dst_size = vb2_plane_size(&dst_buf->vb2_buf, 0);
+ allegro_mcu_send_put_stream_buffer(dev, channel, dst_addr, dst_size);
+
+ src_buf = v4l2_m2m_next_src_buf(channel->fh.m2m_ctx);
+ src_buf->sequence = channel->osequence++;
+
+ src_y = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ src_uv = src_y + (channel->stride * channel->height);
+ allegro_mcu_send_encode_frame(dev, channel, src_y, src_uv);
+}
+
+static const struct v4l2_m2m_ops allegro_m2m_ops = {
+ .device_run = allegro_device_run,
+};
+
+static int allegro_mcu_hw_init(struct allegro_dev *dev,
+ const struct fw_info *info)
+{
+ int err;
+
+ allegro_mbox_init(dev, &dev->mbox_command,
+ info->mailbox_cmd, info->mailbox_size);
+ allegro_mbox_init(dev, &dev->mbox_status,
+ info->mailbox_status, info->mailbox_size);
+
+ allegro_mcu_enable_interrupts(dev);
+
+ /* The mcu sends INIT after reset. */
+ allegro_mcu_start(dev);
+ err = allegro_mcu_wait_for_init_timeout(dev, 5000);
+ if (err < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "mcu did not send INIT after reset\n");
+ err = -EIO;
+ goto err_disable_interrupts;
+ }
+
+ err = allegro_alloc_buffer(dev, &dev->suballocator,
+ info->suballocator_size);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to allocate %zu bytes for suballocator\n",
+ info->suballocator_size);
+ goto err_reset_mcu;
+ }
+
+ allegro_mcu_send_init(dev, dev->suballocator.paddr,
+ dev->suballocator.size);
+ err = allegro_mcu_wait_for_init_timeout(dev, 5000);
+ if (err < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "mcu failed to configure sub-allocator\n");
+ err = -EIO;
+ goto err_free_suballocator;
+ }
+
+ return 0;
+
+err_free_suballocator:
+ allegro_free_buffer(dev, &dev->suballocator);
+err_reset_mcu:
+ allegro_mcu_reset(dev);
+err_disable_interrupts:
+ allegro_mcu_disable_interrupts(dev);
+
+ return err;
+}
+
+static int allegro_mcu_hw_deinit(struct allegro_dev *dev)
+{
+ int err;
+
+ err = allegro_mcu_reset(dev);
+ if (err)
+ v4l2_warn(&dev->v4l2_dev,
+ "mcu failed to enter sleep state\n");
+
+ err = allegro_mcu_disable_interrupts(dev);
+ if (err)
+ v4l2_warn(&dev->v4l2_dev,
+ "failed to disable interrupts\n");
+
+ allegro_free_buffer(dev, &dev->suballocator);
+
+ return 0;
+}
+
+static void allegro_fw_callback(const struct firmware *fw, void *context)
+{
+ struct allegro_dev *dev = context;
+ const char *fw_codec_name = "al5e.fw";
+ const struct firmware *fw_codec;
+ int err;
+ const struct fw_info *info;
+
+ if (!fw)
+ return;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "requesting codec firmware '%s'\n", fw_codec_name);
+ err = request_firmware(&fw_codec, fw_codec_name, &dev->plat_dev->dev);
+ if (err)
+ goto err_release_firmware;
+
+ info = allegro_get_firmware_info(dev, fw, fw_codec);
+ if (!info) {
+ v4l2_err(&dev->v4l2_dev, "firmware is not supported\n");
+ goto err_release_firmware_codec;
+ }
+
+ v4l2_info(&dev->v4l2_dev,
+ "using mcu firmware version '%s'\n", info->version);
+
+ /* Ensure that the mcu is sleeping at the reset vector */
+ err = allegro_mcu_reset(dev);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev, "failed to reset mcu\n");
+ goto err_release_firmware_codec;
+ }
+
+ allegro_copy_firmware(dev, fw->data, fw->size);
+ allegro_copy_fw_codec(dev, fw_codec->data, fw_codec->size);
+
+ err = allegro_mcu_hw_init(dev, info);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev, "failed to initialize mcu\n");
+ goto err_free_fw_codec;
+ }
+
+ dev->m2m_dev = v4l2_m2m_init(&allegro_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "failed to init mem2mem device\n");
+ goto err_mcu_hw_deinit;
+ }
+
+ err = allegro_register_device(dev);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev, "failed to register video device\n");
+ goto err_m2m_release;
+ }
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "allegro codec registered as /dev/video%d\n",
+ dev->video_dev.num);
+
+ release_firmware(fw_codec);
+ release_firmware(fw);
+
+ return;
+
+err_m2m_release:
+ v4l2_m2m_release(dev->m2m_dev);
+ dev->m2m_dev = NULL;
+err_mcu_hw_deinit:
+ allegro_mcu_hw_deinit(dev);
+err_free_fw_codec:
+ allegro_free_fw_codec(dev);
+err_release_firmware_codec:
+ release_firmware(fw_codec);
+err_release_firmware:
+ release_firmware(fw);
+}
+
+static int allegro_firmware_request_nowait(struct allegro_dev *dev)
+{
+ const char *fw = "al5e_b.fw";
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "requesting firmware '%s'\n", fw);
+ return request_firmware_nowait(THIS_MODULE, true, fw,
+ &dev->plat_dev->dev, GFP_KERNEL, dev,
+ allegro_fw_callback);
+}
+
+static int allegro_probe(struct platform_device *pdev)
+{
+ struct allegro_dev *dev;
+ struct resource *res, *sram_res;
+ int ret;
+ int irq;
+ void __iomem *regs, *sram_regs;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->plat_dev = pdev;
+ init_completion(&dev->init_complete);
+ INIT_LIST_HEAD(&dev->channels);
+
+ mutex_init(&dev->lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!res) {
+ dev_err(&pdev->dev,
+ "regs resource missing from device tree\n");
+ return -EINVAL;
+ }
+ regs = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
+ if (IS_ERR(regs)) {
+ dev_err(&pdev->dev, "failed to map registers\n");
+ return PTR_ERR(regs);
+ }
+ dev->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+ &allegro_regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ dev_err(&pdev->dev, "failed to init regmap\n");
+ return PTR_ERR(dev->regmap);
+ }
+
+ sram_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (!sram_res) {
+ dev_err(&pdev->dev,
+ "sram resource missing from device tree\n");
+ return -EINVAL;
+ }
+ sram_regs = devm_ioremap_nocache(&pdev->dev,
+ sram_res->start,
+ resource_size(sram_res));
+ if (IS_ERR(sram_regs)) {
+ dev_err(&pdev->dev, "failed to map sram\n");
+ return PTR_ERR(sram_regs);
+ }
+ dev->sram = devm_regmap_init_mmio(&pdev->dev, sram_regs,
+ &allegro_sram_config);
+ if (IS_ERR(dev->sram)) {
+ dev_err(&pdev->dev, "failed to init sram\n");
+ return PTR_ERR(dev->sram);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return irq;
+ }
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ allegro_hardirq,
+ allegro_irq_thread,
+ IRQF_SHARED, dev_name(&pdev->dev), dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, dev);
+
+ ret = allegro_firmware_request_nowait(dev);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to request firmware: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int allegro_remove(struct platform_device *pdev)
+{
+ struct allegro_dev *dev = platform_get_drvdata(pdev);
+
+ video_unregister_device(&dev->video_dev);
+ if (dev->m2m_dev)
+ v4l2_m2m_release(dev->m2m_dev);
+ allegro_mcu_hw_deinit(dev);
+ allegro_free_fw_codec(dev);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return 0;
+}
+
+static const struct of_device_id allegro_dt_ids[] = {
+ { .compatible = "allegro,al5e-1.1" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, allegro_dt_ids);
+
+static struct platform_driver allegro_driver = {
+ .probe = allegro_probe,
+ .remove = allegro_remove,
+ .driver = {
+ .name = "allegro",
+ .of_match_table = of_match_ptr(allegro_dt_ids),
+ },
+};
+
+module_platform_driver(allegro_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael Tretter <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Allegro DVT encoder driver");
diff --git a/drivers/staging/media/allegro-dvt/nal-h264.c b/drivers/staging/media/allegro-dvt/nal-h264.c
new file mode 100644
index 000000000000..4e14b77851e1
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/nal-h264.c
@@ -0,0 +1,1001 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * Convert NAL units between raw byte sequence payloads (RBSP) and C structs
+ *
+ * The conversion is defined in "ITU-T Rec. H.264 (04/2017) Advanced video
+ * coding for generic audiovisual services". Decoder drivers may use the
+ * parser to parse RBSP from encoded streams and configure the hardware, if
+ * the hardware is not able to parse RBSP itself. Encoder drivers may use the
+ * generator to generate the RBSP for SPS/PPS nal units and add them to the
+ * encoded stream if the hardware does not generate the units.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/v4l2-controls.h>
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/log2.h>
+
+#include "nal-h264.h"
+
+/*
+ * See Rec. ITU-T H.264 (04/2017) Table 7-1 – NAL unit type codes, syntax
+ * element categories, and NAL unit type classes
+ */
+enum nal_unit_type {
+ SEQUENCE_PARAMETER_SET = 7,
+ PICTURE_PARAMETER_SET = 8,
+ FILLER_DATA = 12,
+};
+
+struct rbsp;
+
+struct nal_h264_ops {
+ int (*rbsp_bit)(struct rbsp *rbsp, int *val);
+ int (*rbsp_bits)(struct rbsp *rbsp, int n, unsigned int *val);
+ int (*rbsp_uev)(struct rbsp *rbsp, unsigned int *val);
+ int (*rbsp_sev)(struct rbsp *rbsp, int *val);
+};
+
+/**
+ * struct rbsp - State object for handling a raw byte sequence payload
+ * @data: pointer to the data of the rbsp
+ * @size: maximum size of the data of the rbsp
+ * @pos: current bit position inside the rbsp
+ * @num_consecutive_zeros: number of zeros before @pos
+ * @ops: per datatype functions for interacting with the rbsp
+ * @error: an error occurred while handling the rbsp
+ *
+ * This struct is passed around the various parsing functions and tracks the
+ * current position within the raw byte sequence payload.
+ *
+ * The @ops field allows to separate the operation, i.e., reading/writing a
+ * value from/to that rbsp, from the structure of the NAL unit. This allows to
+ * have a single function for iterating the NAL unit, while @ops has function
+ * pointers for handling each type in the rbsp.
+ */
+struct rbsp {
+ u8 *data;
+ size_t size;
+ unsigned int pos;
+ unsigned int num_consecutive_zeros;
+ struct nal_h264_ops *ops;
+ int error;
+};
+
+static void rbsp_init(struct rbsp *rbsp, void *addr, size_t size,
+ struct nal_h264_ops *ops)
+{
+ if (!rbsp)
+ return;
+
+ rbsp->data = addr;
+ rbsp->size = size;
+ rbsp->pos = 0;
+ rbsp->ops = ops;
+ rbsp->error = 0;
+}
+
+/**
+ * nal_h264_profile_from_v4l2() - Get profile_idc for v4l2 h264 profile
+ * @profile: the profile as &enum v4l2_mpeg_video_h264_profile
+ *
+ * Convert the &enum v4l2_mpeg_video_h264_profile to profile_idc as specified
+ * in Rec. ITU-T H.264 (04/2017) A.2.
+ *
+ * Return: the profile_idc for the passed level
+ */
+int nal_h264_profile_from_v4l2(enum v4l2_mpeg_video_h264_profile profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ return 66;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ return 77;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ return 88;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ return 100;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_h264_level_from_v4l2() - Get level_idc for v4l2 h264 level
+ * @level: the level as &enum v4l2_mpeg_video_h264_level
+ *
+ * Convert the &enum v4l2_mpeg_video_h264_level to level_idc as specified in
+ * Rec. ITU-T H.264 (04/2017) A.3.2.
+ *
+ * Return: the level_idc for the passed level
+ */
+int nal_h264_level_from_v4l2(enum v4l2_mpeg_video_h264_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 10;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return 9;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 20;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 30;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 40;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 42;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 50;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ return 51;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value);
+static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value);
+
+/*
+ * When reading or writing, the emulation_prevention_three_byte is detected
+ * only when the 2 one bits need to be inserted. Therefore, we are not
+ * actually adding the 0x3 byte, but the 2 one bits and the six 0 bits of the
+ * next byte.
+ */
+#define EMULATION_PREVENTION_THREE_BYTE (0x3 << 6)
+
+static int add_emulation_prevention_three_byte(struct rbsp *rbsp)
+{
+ rbsp->num_consecutive_zeros = 0;
+ rbsp_write_bits(rbsp, 8, EMULATION_PREVENTION_THREE_BYTE);
+
+ return 0;
+}
+
+static int discard_emulation_prevention_three_byte(struct rbsp *rbsp)
+{
+ unsigned int tmp = 0;
+
+ rbsp->num_consecutive_zeros = 0;
+ rbsp_read_bits(rbsp, 8, &tmp);
+ if (tmp != EMULATION_PREVENTION_THREE_BYTE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int rbsp_read_bit(struct rbsp *rbsp)
+{
+ int shift;
+ int ofs;
+ int bit;
+ int err;
+
+ if (rbsp->num_consecutive_zeros == 22) {
+ err = discard_emulation_prevention_three_byte(rbsp);
+ if (err)
+ return err;
+ }
+
+ shift = 7 - (rbsp->pos % 8);
+ ofs = rbsp->pos / 8;
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ bit = (rbsp->data[ofs] >> shift) & 1;
+
+ rbsp->pos++;
+
+ if (bit == 1 ||
+ (rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0)))
+ rbsp->num_consecutive_zeros = 0;
+ else
+ rbsp->num_consecutive_zeros++;
+
+ return bit;
+}
+
+static inline int rbsp_write_bit(struct rbsp *rbsp, bool value)
+{
+ int shift;
+ int ofs;
+
+ if (rbsp->num_consecutive_zeros == 22)
+ add_emulation_prevention_three_byte(rbsp);
+
+ shift = 7 - (rbsp->pos % 8);
+ ofs = rbsp->pos / 8;
+ if (ofs >= rbsp->size)
+ return -EINVAL;
+
+ rbsp->data[ofs] &= ~(1 << shift);
+ rbsp->data[ofs] |= value << shift;
+
+ rbsp->pos++;
+
+ if (value == 1 ||
+ (rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0))) {
+ rbsp->num_consecutive_zeros = 0;
+ } else {
+ rbsp->num_consecutive_zeros++;
+ }
+
+ return 0;
+}
+
+static inline int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value)
+{
+ int i;
+ int bit;
+ unsigned int tmp = 0;
+
+ if (n > 8 * sizeof(*value))
+ return -EINVAL;
+
+ for (i = n; i > 0; i--) {
+ bit = rbsp_read_bit(rbsp);
+ if (bit < 0)
+ return bit;
+ tmp |= bit << (i - 1);
+ }
+
+ if (value)
+ *value = tmp;
+
+ return 0;
+}
+
+static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value)
+{
+ int ret;
+
+ if (n > 8 * sizeof(value))
+ return -EINVAL;
+
+ while (n--) {
+ ret = rbsp_write_bit(rbsp, (value >> n) & 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rbsp_read_uev(struct rbsp *rbsp, unsigned int *value)
+{
+ int leading_zero_bits = 0;
+ unsigned int tmp = 0;
+ int ret;
+
+ while ((ret = rbsp_read_bit(rbsp)) == 0)
+ leading_zero_bits++;
+ if (ret < 0)
+ return ret;
+
+ if (leading_zero_bits > 0) {
+ ret = rbsp_read_bits(rbsp, leading_zero_bits, &tmp);
+ if (ret)
+ return ret;
+ }
+
+ if (value)
+ *value = (1 << leading_zero_bits) - 1 + tmp;
+
+ return 0;
+}
+
+static int rbsp_write_uev(struct rbsp *rbsp, unsigned int *value)
+{
+ int ret;
+ int leading_zero_bits;
+
+ if (!value)
+ return -EINVAL;
+
+ leading_zero_bits = ilog2(*value + 1);
+
+ ret = rbsp_write_bits(rbsp, leading_zero_bits, 0);
+ if (ret)
+ return ret;
+
+ return rbsp_write_bits(rbsp, leading_zero_bits + 1, *value + 1);
+}
+
+static int rbsp_read_sev(struct rbsp *rbsp, int *value)
+{
+ int ret;
+ unsigned int tmp;
+
+ ret = rbsp_read_uev(rbsp, &tmp);
+ if (ret)
+ return ret;
+
+ if (value) {
+ if (tmp & 1)
+ *value = (tmp + 1) / 2;
+ else
+ *value = -(tmp / 2);
+ }
+
+ return 0;
+}
+
+static int rbsp_write_sev(struct rbsp *rbsp, int *value)
+{
+ unsigned int tmp;
+
+ if (!value)
+ return -EINVAL;
+
+ if (*value > 0)
+ tmp = (2 * (*value)) | 1;
+ else
+ tmp = -2 * (*value);
+
+ return rbsp_write_uev(rbsp, &tmp);
+}
+
+static int __rbsp_write_bit(struct rbsp *rbsp, int *value)
+{
+ return rbsp_write_bit(rbsp, *value);
+}
+
+static int __rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int *value)
+{
+ return rbsp_write_bits(rbsp, n, *value);
+}
+
+static struct nal_h264_ops write = {
+ .rbsp_bit = __rbsp_write_bit,
+ .rbsp_bits = __rbsp_write_bits,
+ .rbsp_uev = rbsp_write_uev,
+ .rbsp_sev = rbsp_write_sev,
+};
+
+static int __rbsp_read_bit(struct rbsp *rbsp, int *value)
+{
+ int tmp = rbsp_read_bit(rbsp);
+
+ if (tmp < 0)
+ return tmp;
+ *value = tmp;
+
+ return 0;
+}
+
+static struct nal_h264_ops read = {
+ .rbsp_bit = __rbsp_read_bit,
+ .rbsp_bits = rbsp_read_bits,
+ .rbsp_uev = rbsp_read_uev,
+ .rbsp_sev = rbsp_read_sev,
+};
+
+static inline void rbsp_bit(struct rbsp *rbsp, int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_bit(rbsp, value);
+}
+
+static inline void rbsp_bits(struct rbsp *rbsp, int n, int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_bits(rbsp, n, value);
+}
+
+static inline void rbsp_uev(struct rbsp *rbsp, unsigned int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_uev(rbsp, value);
+}
+
+static inline void rbsp_sev(struct rbsp *rbsp, int *value)
+{
+ if (rbsp->error)
+ return;
+ rbsp->error = rbsp->ops->rbsp_sev(rbsp, value);
+}
+
+static void nal_h264_rbsp_trailing_bits(struct rbsp *rbsp)
+{
+ unsigned int rbsp_stop_one_bit = 1;
+ unsigned int rbsp_alignment_zero_bit = 0;
+
+ rbsp_bit(rbsp, &rbsp_stop_one_bit);
+ rbsp_bits(rbsp, round_up(rbsp->pos, 8) - rbsp->pos,
+ &rbsp_alignment_zero_bit);
+}
+
+static void nal_h264_write_start_code_prefix(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+ int i = 4;
+
+ if (DIV_ROUND_UP(rbsp->pos, 8) + i > rbsp->size) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ p[0] = 0x00;
+ p[1] = 0x00;
+ p[2] = 0x00;
+ p[3] = 0x01;
+
+ rbsp->pos += i * 8;
+}
+
+static void nal_h264_read_start_code_prefix(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+ int i = 4;
+
+ if (DIV_ROUND_UP(rbsp->pos, 8) + i > rbsp->size) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ if (p[0] != 0x00 || p[1] != 0x00 || p[2] != 0x00 || p[3] != 0x01) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp->pos += i * 8;
+}
+
+static void nal_h264_write_filler_data(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+ int i;
+
+ /* Keep 1 byte extra for terminating the NAL unit */
+ i = rbsp->size - DIV_ROUND_UP(rbsp->pos, 8) - 1;
+ memset(p, 0xff, i);
+ rbsp->pos += i * 8;
+}
+
+static void nal_h264_read_filler_data(struct rbsp *rbsp)
+{
+ u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
+
+ while (*p == 0xff) {
+ if (DIV_ROUND_UP(rbsp->pos, 8) > rbsp->size) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ p++;
+ rbsp->pos += 8;
+ }
+}
+
+static void nal_h264_rbsp_hrd_parameters(struct rbsp *rbsp,
+ struct nal_h264_hrd_parameters *hrd)
+{
+ unsigned int i;
+
+ if (!hrd) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp_uev(rbsp, &hrd->cpb_cnt_minus1);
+ rbsp_bits(rbsp, 4, &hrd->bit_rate_scale);
+ rbsp_bits(rbsp, 4, &hrd->cpb_size_scale);
+
+ for (i = 0; i <= hrd->cpb_cnt_minus1; i++) {
+ rbsp_uev(rbsp, &hrd->bit_rate_value_minus1[i]);
+ rbsp_uev(rbsp, &hrd->cpb_size_value_minus1[i]);
+ rbsp_bit(rbsp, &hrd->cbr_flag[i]);
+ }
+
+ rbsp_bits(rbsp, 5, &hrd->initial_cpb_removal_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->cpb_removal_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->dpb_output_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->time_offset_length);
+}
+
+static void nal_h264_rbsp_vui_parameters(struct rbsp *rbsp,
+ struct nal_h264_vui_parameters *vui)
+{
+ if (!vui) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp_bit(rbsp, &vui->aspect_ratio_info_present_flag);
+ if (vui->aspect_ratio_info_present_flag) {
+ rbsp_bits(rbsp, 8, &vui->aspect_ratio_idc);
+ if (vui->aspect_ratio_idc == 255) {
+ rbsp_bits(rbsp, 16, &vui->sar_width);
+ rbsp_bits(rbsp, 16, &vui->sar_height);
+ }
+ }
+
+ rbsp_bit(rbsp, &vui->overscan_info_present_flag);
+ if (vui->overscan_info_present_flag)
+ rbsp_bit(rbsp, &vui->overscan_appropriate_flag);
+
+ rbsp_bit(rbsp, &vui->video_signal_type_present_flag);
+ if (vui->video_signal_type_present_flag) {
+ rbsp_bits(rbsp, 3, &vui->video_format);
+ rbsp_bit(rbsp, &vui->video_full_range_flag);
+
+ rbsp_bit(rbsp, &vui->colour_description_present_flag);
+ if (vui->colour_description_present_flag) {
+ rbsp_bits(rbsp, 8, &vui->colour_primaries);
+ rbsp_bits(rbsp, 8, &vui->transfer_characteristics);
+ rbsp_bits(rbsp, 8, &vui->matrix_coefficients);
+ }
+ }
+
+ rbsp_bit(rbsp, &vui->chroma_loc_info_present_flag);
+ if (vui->chroma_loc_info_present_flag) {
+ rbsp_uev(rbsp, &vui->chroma_sample_loc_type_top_field);
+ rbsp_uev(rbsp, &vui->chroma_sample_loc_type_bottom_field);
+ }
+
+ rbsp_bit(rbsp, &vui->timing_info_present_flag);
+ if (vui->timing_info_present_flag) {
+ rbsp_bits(rbsp, 32, &vui->num_units_in_tick);
+ rbsp_bits(rbsp, 32, &vui->time_scale);
+ rbsp_bit(rbsp, &vui->fixed_frame_rate_flag);
+ }
+
+ rbsp_bit(rbsp, &vui->nal_hrd_parameters_present_flag);
+ if (vui->nal_hrd_parameters_present_flag)
+ nal_h264_rbsp_hrd_parameters(rbsp, &vui->nal_hrd_parameters);
+
+ rbsp_bit(rbsp, &vui->vcl_hrd_parameters_present_flag);
+ if (vui->vcl_hrd_parameters_present_flag)
+ nal_h264_rbsp_hrd_parameters(rbsp, &vui->vcl_hrd_parameters);
+
+ if (vui->nal_hrd_parameters_present_flag ||
+ vui->vcl_hrd_parameters_present_flag)
+ rbsp_bit(rbsp, &vui->low_delay_hrd_flag);
+
+ rbsp_bit(rbsp, &vui->pic_struct_present_flag);
+
+ rbsp_bit(rbsp, &vui->bitstream_restriction_flag);
+ if (vui->bitstream_restriction_flag) {
+ rbsp_bit(rbsp, &vui->motion_vectors_over_pic_boundaries_flag);
+ rbsp_uev(rbsp, &vui->max_bytes_per_pic_denom);
+ rbsp_uev(rbsp, &vui->max_bits_per_mb_denom);
+ rbsp_uev(rbsp, &vui->log2_max_mv_length_horizontal);
+ rbsp_uev(rbsp, &vui->log21_max_mv_length_vertical);
+ rbsp_uev(rbsp, &vui->max_num_reorder_frames);
+ rbsp_uev(rbsp, &vui->max_dec_frame_buffering);
+ }
+}
+
+static void nal_h264_rbsp_sps(struct rbsp *rbsp, struct nal_h264_sps *sps)
+{
+ unsigned int i;
+
+ if (!sps) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp_bits(rbsp, 8, &sps->profile_idc);
+ rbsp_bit(rbsp, &sps->constraint_set0_flag);
+ rbsp_bit(rbsp, &sps->constraint_set1_flag);
+ rbsp_bit(rbsp, &sps->constraint_set2_flag);
+ rbsp_bit(rbsp, &sps->constraint_set3_flag);
+ rbsp_bit(rbsp, &sps->constraint_set4_flag);
+ rbsp_bit(rbsp, &sps->constraint_set5_flag);
+ rbsp_bits(rbsp, 2, &sps->reserved_zero_2bits);
+ rbsp_bits(rbsp, 8, &sps->level_idc);
+
+ rbsp_uev(rbsp, &sps->seq_parameter_set_id);
+
+ if (sps->profile_idc == 100 || sps->profile_idc == 110 ||
+ sps->profile_idc == 122 || sps->profile_idc == 244 ||
+ sps->profile_idc == 44 || sps->profile_idc == 83 ||
+ sps->profile_idc == 86 || sps->profile_idc == 118 ||
+ sps->profile_idc == 128 || sps->profile_idc == 138 ||
+ sps->profile_idc == 139 || sps->profile_idc == 134 ||
+ sps->profile_idc == 135) {
+ rbsp_uev(rbsp, &sps->chroma_format_idc);
+
+ if (sps->chroma_format_idc == 3)
+ rbsp_bit(rbsp, &sps->separate_colour_plane_flag);
+ rbsp_uev(rbsp, &sps->bit_depth_luma_minus8);
+ rbsp_uev(rbsp, &sps->bit_depth_chroma_minus8);
+ rbsp_bit(rbsp, &sps->qpprime_y_zero_transform_bypass_flag);
+ rbsp_bit(rbsp, &sps->seq_scaling_matrix_present_flag);
+ if (sps->seq_scaling_matrix_present_flag)
+ rbsp->error = -EINVAL;
+ }
+
+ rbsp_uev(rbsp, &sps->log2_max_frame_num_minus4);
+
+ rbsp_uev(rbsp, &sps->pic_order_cnt_type);
+ switch (sps->pic_order_cnt_type) {
+ case 0:
+ rbsp_uev(rbsp, &sps->log2_max_pic_order_cnt_lsb_minus4);
+ break;
+ case 1:
+ rbsp_bit(rbsp, &sps->delta_pic_order_always_zero_flag);
+ rbsp_sev(rbsp, &sps->offset_for_non_ref_pic);
+ rbsp_sev(rbsp, &sps->offset_for_top_to_bottom_field);
+
+ rbsp_uev(rbsp, &sps->num_ref_frames_in_pic_order_cnt_cycle);
+ for (i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; i++)
+ rbsp_sev(rbsp, &sps->offset_for_ref_frame[i]);
+ break;
+ default:
+ rbsp->error = -EINVAL;
+ break;
+ }
+
+ rbsp_uev(rbsp, &sps->max_num_ref_frames);
+ rbsp_bit(rbsp, &sps->gaps_in_frame_num_value_allowed_flag);
+ rbsp_uev(rbsp, &sps->pic_width_in_mbs_minus1);
+ rbsp_uev(rbsp, &sps->pic_height_in_map_units_minus1);
+
+ rbsp_bit(rbsp, &sps->frame_mbs_only_flag);
+ if (!sps->frame_mbs_only_flag)
+ rbsp_bit(rbsp, &sps->mb_adaptive_frame_field_flag);
+
+ rbsp_bit(rbsp, &sps->direct_8x8_inference_flag);
+
+ rbsp_bit(rbsp, &sps->frame_cropping_flag);
+ if (sps->frame_cropping_flag) {
+ rbsp_uev(rbsp, &sps->crop_left);
+ rbsp_uev(rbsp, &sps->crop_right);
+ rbsp_uev(rbsp, &sps->crop_top);
+ rbsp_uev(rbsp, &sps->crop_bottom);
+ }
+
+ rbsp_bit(rbsp, &sps->vui_parameters_present_flag);
+ if (sps->vui_parameters_present_flag)
+ nal_h264_rbsp_vui_parameters(rbsp, &sps->vui);
+}
+
+static void nal_h264_rbsp_pps(struct rbsp *rbsp, struct nal_h264_pps *pps)
+{
+ int i;
+
+ rbsp_uev(rbsp, &pps->pic_parameter_set_id);
+ rbsp_uev(rbsp, &pps->seq_parameter_set_id);
+ rbsp_bit(rbsp, &pps->entropy_coding_mode_flag);
+ rbsp_bit(rbsp, &pps->bottom_field_pic_order_in_frame_present_flag);
+ rbsp_uev(rbsp, &pps->num_slice_groups_minus1);
+ if (pps->num_slice_groups_minus1 > 0) {
+ rbsp_uev(rbsp, &pps->slice_group_map_type);
+ switch (pps->slice_group_map_type) {
+ case 0:
+ for (i = 0; i < pps->num_slice_groups_minus1; i++)
+ rbsp_uev(rbsp, &pps->run_length_minus1[i]);
+ break;
+ case 2:
+ for (i = 0; i < pps->num_slice_groups_minus1; i++) {
+ rbsp_uev(rbsp, &pps->top_left[i]);
+ rbsp_uev(rbsp, &pps->bottom_right[i]);
+ }
+ break;
+ case 3: case 4: case 5:
+ rbsp_bit(rbsp, &pps->slice_group_change_direction_flag);
+ rbsp_uev(rbsp, &pps->slice_group_change_rate_minus1);
+ break;
+ case 6:
+ rbsp_uev(rbsp, &pps->pic_size_in_map_units_minus1);
+ for (i = 0; i < pps->pic_size_in_map_units_minus1; i++)
+ rbsp_bits(rbsp,
+ order_base_2(pps->num_slice_groups_minus1 + 1),
+ &pps->slice_group_id[i]);
+ break;
+ default:
+ break;
+ }
+ }
+ rbsp_uev(rbsp, &pps->num_ref_idx_l0_default_active_minus1);
+ rbsp_uev(rbsp, &pps->num_ref_idx_l1_default_active_minus1);
+ rbsp_bit(rbsp, &pps->weighted_pred_flag);
+ rbsp_bits(rbsp, 2, &pps->weighted_bipred_idc);
+ rbsp_sev(rbsp, &pps->pic_init_qp_minus26);
+ rbsp_sev(rbsp, &pps->pic_init_qs_minus26);
+ rbsp_sev(rbsp, &pps->chroma_qp_index_offset);
+ rbsp_bit(rbsp, &pps->deblocking_filter_control_present_flag);
+ rbsp_bit(rbsp, &pps->constrained_intra_pred_flag);
+ rbsp_bit(rbsp, &pps->redundant_pic_cnt_present_flag);
+ if (/* more_rbsp_data() */ false) {
+ rbsp_bit(rbsp, &pps->transform_8x8_mode_flag);
+ rbsp_bit(rbsp, &pps->pic_scaling_matrix_present_flag);
+ if (pps->pic_scaling_matrix_present_flag)
+ rbsp->error = -EINVAL;
+ rbsp_sev(rbsp, &pps->second_chroma_qp_index_offset);
+ }
+}
+
+/**
+ * nal_h264_write_sps() - Write SPS NAL unit into RBSP format
+ * @dev: device pointer
+ * @dest: the buffer that is filled with RBSP data
+ * @n: maximum size of @dest in bytes
+ * @sps: &struct nal_h264_sps to convert to RBSP
+ *
+ * Convert @sps to RBSP data and write it into @dest.
+ *
+ * The size of the SPS NAL unit is not known in advance and this function will
+ * fail, if @dest does not hold sufficient space for the SPS NAL unit.
+ *
+ * Return: number of bytes written to @dest or negative error code
+ */
+ssize_t nal_h264_write_sps(const struct device *dev,
+ void *dest, size_t n, struct nal_h264_sps *sps)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit = 0;
+ unsigned int nal_ref_idc = 0;
+ unsigned int nal_unit_type = SEQUENCE_PARAMETER_SET;
+
+ if (!dest)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, dest, n, &write);
+
+ nal_h264_write_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 2, &nal_ref_idc);
+ rbsp_bits(&rbsp, 5, &nal_unit_type);
+
+ nal_h264_rbsp_sps(&rbsp, sps);
+
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_write_sps);
+
+/**
+ * nal_h264_read_sps() - Read SPS NAL unit from RBSP format
+ * @dev: device pointer
+ * @sps: the &struct nal_h264_sps to fill from the RBSP data
+ * @src: the buffer that contains the RBSP data
+ * @n: size of @src in bytes
+ *
+ * Read RBSP data from @src and use it to fill @sps.
+ *
+ * Return: number of bytes read from @src or negative error code
+ */
+ssize_t nal_h264_read_sps(const struct device *dev,
+ struct nal_h264_sps *sps, void *src, size_t n)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit;
+ unsigned int nal_ref_idc;
+ unsigned int nal_unit_type;
+
+ if (!src)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, src, n, &read);
+
+ nal_h264_read_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 2, &nal_ref_idc);
+ rbsp_bits(&rbsp, 5, &nal_unit_type);
+
+ if (rbsp.error ||
+ forbidden_zero_bit != 0 ||
+ nal_ref_idc != 0 ||
+ nal_unit_type != SEQUENCE_PARAMETER_SET)
+ return -EINVAL;
+
+ nal_h264_rbsp_sps(&rbsp, sps);
+
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_read_sps);
+
+/**
+ * nal_h264_write_pps() - Write PPS NAL unit into RBSP format
+ * @dev: device pointer
+ * @dest: the buffer that is filled with RBSP data
+ * @n: maximum size of @dest in bytes
+ * @pps: &struct nal_h264_pps to convert to RBSP
+ *
+ * Convert @pps to RBSP data and write it into @dest.
+ *
+ * The size of the PPS NAL unit is not known in advance and this function will
+ * fail, if @dest does not hold sufficient space for the PPS NAL unit.
+ *
+ * Return: number of bytes written to @dest or negative error code
+ */
+ssize_t nal_h264_write_pps(const struct device *dev,
+ void *dest, size_t n, struct nal_h264_pps *pps)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit = 0;
+ unsigned int nal_ref_idc = 0;
+ unsigned int nal_unit_type = PICTURE_PARAMETER_SET;
+
+ if (!dest)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, dest, n, &write);
+
+ nal_h264_write_start_code_prefix(&rbsp);
+
+ /* NAL unit header */
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 2, &nal_ref_idc);
+ rbsp_bits(&rbsp, 5, &nal_unit_type);
+
+ nal_h264_rbsp_pps(&rbsp, pps);
+
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_write_pps);
+
+/**
+ * nal_h264_read_pps() - Read PPS NAL unit from RBSP format
+ * @dev: device pointer
+ * @pps: the &struct nal_h264_pps to fill from the RBSP data
+ * @src: the buffer that contains the RBSP data
+ * @n: size of @src in bytes
+ *
+ * Read RBSP data from @src and use it to fill @pps.
+ *
+ * Return: number of bytes read from @src or negative error code
+ */
+ssize_t nal_h264_read_pps(const struct device *dev,
+ struct nal_h264_pps *pps, void *src, size_t n)
+{
+ struct rbsp rbsp;
+
+ if (!src)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, src, n, &read);
+
+ nal_h264_read_start_code_prefix(&rbsp);
+
+ /* NAL unit header */
+ rbsp.pos += 8;
+
+ nal_h264_rbsp_pps(&rbsp, pps);
+
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_read_pps);
+
+/**
+ * nal_h264_write_filler() - Write filler data RBSP
+ * @dev: device pointer
+ * @dest: buffer to fill with filler data
+ * @n: size of the buffer to fill with filler data
+ *
+ * Write a filler data RBSP to @dest with a size of @n bytes and return the
+ * number of written filler data bytes.
+ *
+ * Use this function to generate dummy data in an RBSP data stream that can be
+ * safely ignored by h264 decoders.
+ *
+ * The RBSP format of the filler data is specified in Rec. ITU-T H.264
+ * (04/2017) 7.3.2.7 Filler data RBSP syntax.
+ *
+ * Return: number of filler data bytes (including marker) or negative error
+ */
+ssize_t nal_h264_write_filler(const struct device *dev, void *dest, size_t n)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit = 0;
+ unsigned int nal_ref_idc = 0;
+ unsigned int nal_unit_type = FILLER_DATA;
+
+ if (!dest)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, dest, n, &write);
+
+ nal_h264_write_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 2, &nal_ref_idc);
+ rbsp_bits(&rbsp, 5, &nal_unit_type);
+
+ nal_h264_write_filler_data(&rbsp);
+
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_write_filler);
+
+/**
+ * nal_h264_read_filler() - Read filler data RBSP
+ * @dev: device pointer
+ * @src: buffer with RBSP data that is read
+ * @n: maximum size of src that shall be read
+ *
+ * Read a filler data RBSP from @src up to a maximum size of @n bytes and
+ * return the size of the filler data in bytes including the marker.
+ *
+ * This function is used to parse filler data and skip the respective bytes in
+ * the RBSP data.
+ *
+ * The RBSP format of the filler data is specified in Rec. ITU-T H.264
+ * (04/2017) 7.3.2.7 Filler data RBSP syntax.
+ *
+ * Return: number of filler data bytes (including marker) or negative error
+ */
+ssize_t nal_h264_read_filler(const struct device *dev, void *src, size_t n)
+{
+ struct rbsp rbsp;
+ unsigned int forbidden_zero_bit;
+ unsigned int nal_ref_idc;
+ unsigned int nal_unit_type;
+
+ if (!src)
+ return -EINVAL;
+
+ rbsp_init(&rbsp, src, n, &read);
+
+ nal_h264_read_start_code_prefix(&rbsp);
+
+ rbsp_bit(&rbsp, &forbidden_zero_bit);
+ rbsp_bits(&rbsp, 2, &nal_ref_idc);
+ rbsp_bits(&rbsp, 5, &nal_unit_type);
+
+ if (rbsp.error)
+ return rbsp.error;
+ if (forbidden_zero_bit != 0 ||
+ nal_ref_idc != 0 ||
+ nal_unit_type != FILLER_DATA)
+ return -EINVAL;
+
+ nal_h264_read_filler_data(&rbsp);
+ nal_h264_rbsp_trailing_bits(&rbsp);
+
+ if (rbsp.error)
+ return rbsp.error;
+
+ return DIV_ROUND_UP(rbsp.pos, 8);
+}
+EXPORT_SYMBOL_GPL(nal_h264_read_filler);
diff --git a/drivers/staging/media/allegro-dvt/nal-h264.h b/drivers/staging/media/allegro-dvt/nal-h264.h
new file mode 100644
index 000000000000..2ba7cbced7a5
--- /dev/null
+++ b/drivers/staging/media/allegro-dvt/nal-h264.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ *
+ * Convert NAL units between raw byte sequence payloads (RBSP) and C structs.
+ */
+
+#ifndef __NAL_H264_H__
+#define __NAL_H264_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/**
+ * struct nal_h264_hdr_parameters - HDR parameters
+ *
+ * C struct representation of the sequence parameter set NAL unit as defined by
+ * Rec. ITU-T H.264 (04/2017) E.1.2 HRD parameters syntax.
+ */
+struct nal_h264_hrd_parameters {
+ unsigned int cpb_cnt_minus1;
+ unsigned int bit_rate_scale;
+ unsigned int cpb_size_scale;
+ struct {
+ int bit_rate_value_minus1[16];
+ int cpb_size_value_minus1[16];
+ unsigned int cbr_flag[16];
+ };
+ unsigned int initial_cpb_removal_delay_length_minus1;
+ unsigned int cpb_removal_delay_length_minus1;
+ unsigned int dpb_output_delay_length_minus1;
+ unsigned int time_offset_length;
+};
+
+/**
+ * struct nal_h264_vui_parameters - VUI parameters
+ *
+ * C struct representation of the VUI parameters as defined by Rec. ITU-T
+ * H.264 (04/2017) E.1.1 VUI parameters syntax.
+ */
+struct nal_h264_vui_parameters {
+ unsigned int aspect_ratio_info_present_flag;
+ struct {
+ unsigned int aspect_ratio_idc;
+ unsigned int sar_width;
+ unsigned int sar_height;
+ };
+ unsigned int overscan_info_present_flag;
+ unsigned int overscan_appropriate_flag;
+ unsigned int video_signal_type_present_flag;
+ struct {
+ unsigned int video_format;
+ unsigned int video_full_range_flag;
+ unsigned int colour_description_present_flag;
+ struct {
+ unsigned int colour_primaries;
+ unsigned int transfer_characteristics;
+ unsigned int matrix_coefficients;
+ };
+ };
+ unsigned int chroma_loc_info_present_flag;
+ struct {
+ unsigned int chroma_sample_loc_type_top_field;
+ unsigned int chroma_sample_loc_type_bottom_field;
+ };
+ unsigned int timing_info_present_flag;
+ struct {
+ unsigned int num_units_in_tick;
+ unsigned int time_scale;
+ unsigned int fixed_frame_rate_flag;
+ };
+ unsigned int nal_hrd_parameters_present_flag;
+ struct nal_h264_hrd_parameters nal_hrd_parameters;
+ unsigned int vcl_hrd_parameters_present_flag;
+ struct nal_h264_hrd_parameters vcl_hrd_parameters;
+ unsigned int low_delay_hrd_flag;
+ unsigned int pic_struct_present_flag;
+ unsigned int bitstream_restriction_flag;
+ struct {
+ unsigned int motion_vectors_over_pic_boundaries_flag;
+ unsigned int max_bytes_per_pic_denom;
+ unsigned int max_bits_per_mb_denom;
+ unsigned int log2_max_mv_length_horizontal;
+ unsigned int log21_max_mv_length_vertical;
+ unsigned int max_num_reorder_frames;
+ unsigned int max_dec_frame_buffering;
+ };
+};
+
+/**
+ * struct nal_h264_sps - Sequence parameter set
+ *
+ * C struct representation of the sequence parameter set NAL unit as defined by
+ * Rec. ITU-T H.264 (04/2017) 7.3.2.1.1 Sequence parameter set data syntax.
+ */
+struct nal_h264_sps {
+ unsigned int profile_idc;
+ unsigned int constraint_set0_flag;
+ unsigned int constraint_set1_flag;
+ unsigned int constraint_set2_flag;
+ unsigned int constraint_set3_flag;
+ unsigned int constraint_set4_flag;
+ unsigned int constraint_set5_flag;
+ unsigned int reserved_zero_2bits;
+ unsigned int level_idc;
+ unsigned int seq_parameter_set_id;
+ struct {
+ unsigned int chroma_format_idc;
+ unsigned int separate_colour_plane_flag;
+ unsigned int bit_depth_luma_minus8;
+ unsigned int bit_depth_chroma_minus8;
+ unsigned int qpprime_y_zero_transform_bypass_flag;
+ unsigned int seq_scaling_matrix_present_flag;
+ };
+ unsigned int log2_max_frame_num_minus4;
+ unsigned int pic_order_cnt_type;
+ union {
+ unsigned int log2_max_pic_order_cnt_lsb_minus4;
+ struct {
+ unsigned int delta_pic_order_always_zero_flag;
+ int offset_for_non_ref_pic;
+ int offset_for_top_to_bottom_field;
+ unsigned int num_ref_frames_in_pic_order_cnt_cycle;
+ int offset_for_ref_frame[255];
+ };
+ };
+ unsigned int max_num_ref_frames;
+ unsigned int gaps_in_frame_num_value_allowed_flag;
+ unsigned int pic_width_in_mbs_minus1;
+ unsigned int pic_height_in_map_units_minus1;
+ unsigned int frame_mbs_only_flag;
+ unsigned int mb_adaptive_frame_field_flag;
+ unsigned int direct_8x8_inference_flag;
+ unsigned int frame_cropping_flag;
+ struct {
+ unsigned int crop_left;
+ unsigned int crop_right;
+ unsigned int crop_top;
+ unsigned int crop_bottom;
+ };
+ unsigned int vui_parameters_present_flag;
+ struct nal_h264_vui_parameters vui;
+};
+
+/**
+ * struct nal_h264_pps - Picture parameter set
+ *
+ * C struct representation of the picture parameter set NAL unit as defined by
+ * Rec. ITU-T H.264 (04/2017) 7.3.2.2 Picture parameter set RBSP syntax.
+ */
+struct nal_h264_pps {
+ unsigned int pic_parameter_set_id;
+ unsigned int seq_parameter_set_id;
+ unsigned int entropy_coding_mode_flag;
+ unsigned int bottom_field_pic_order_in_frame_present_flag;
+ unsigned int num_slice_groups_minus1;
+ unsigned int slice_group_map_type;
+ union {
+ unsigned int run_length_minus1[8];
+ struct {
+ unsigned int top_left[8];
+ unsigned int bottom_right[8];
+ };
+ struct {
+ unsigned int slice_group_change_direction_flag;
+ unsigned int slice_group_change_rate_minus1;
+ };
+ struct {
+ unsigned int pic_size_in_map_units_minus1;
+ unsigned int slice_group_id[8];
+ };
+ };
+ unsigned int num_ref_idx_l0_default_active_minus1;
+ unsigned int num_ref_idx_l1_default_active_minus1;
+ unsigned int weighted_pred_flag;
+ unsigned int weighted_bipred_idc;
+ int pic_init_qp_minus26;
+ int pic_init_qs_minus26;
+ int chroma_qp_index_offset;
+ unsigned int deblocking_filter_control_present_flag;
+ unsigned int constrained_intra_pred_flag;
+ unsigned int redundant_pic_cnt_present_flag;
+ struct {
+ unsigned int transform_8x8_mode_flag;
+ unsigned int pic_scaling_matrix_present_flag;
+ int second_chroma_qp_index_offset;
+ };
+};
+
+int nal_h264_profile_from_v4l2(enum v4l2_mpeg_video_h264_profile profile);
+int nal_h264_level_from_v4l2(enum v4l2_mpeg_video_h264_level level);
+
+ssize_t nal_h264_write_sps(const struct device *dev,
+ void *dest, size_t n, struct nal_h264_sps *sps);
+ssize_t nal_h264_read_sps(const struct device *dev,
+ struct nal_h264_sps *sps, void *src, size_t n);
+void nal_h264_print_sps(const struct device *dev, struct nal_h264_sps *sps);
+
+ssize_t nal_h264_write_pps(const struct device *dev,
+ void *dest, size_t n, struct nal_h264_pps *pps);
+ssize_t nal_h264_read_pps(const struct device *dev,
+ struct nal_h264_pps *pps, void *src, size_t n);
+void nal_h264_print_pps(const struct device *dev, struct nal_h264_pps *pps);
+
+ssize_t nal_h264_write_filler(const struct device *dev, void *dest, size_t n);
+ssize_t nal_h264_read_filler(const struct device *dev, void *src, size_t n);
+
+#endif /* __NAL_H264_H__ */
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 09903ffb13ba..2c60a1fb6350 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2310,11 +2310,6 @@ static int bcm2048_vidioc_querycap(struct file *file, void *priv,
strscpy(capability->card, BCM2048_DRIVER_CARD,
sizeof(capability->card));
snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr);
- capability->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
- V4L2_CAP_HW_FREQ_SEEK;
- capability->capabilities = capability->device_caps |
- V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -2570,6 +2565,8 @@ static const struct video_device bcm2048_viddev_template = {
.name = BCM2048_DRIVER_NAME,
.release = video_device_release_empty,
.ioctl_ops = &bcm2048_ioctl_ops,
+ .device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+ V4L2_CAP_HW_FREQ_SEEK,
};
/*
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index 30e2edc0cec5..52397ad0e3e2 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -1251,10 +1251,10 @@ static int ipipe_s_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
unsigned int i;
int rval = 0;
+ struct ipipe_module_params *params;
for (i = 0; i < ARRAY_SIZE(ipipe_modules); i++) {
const struct ipipe_module_if *module_if;
- struct ipipe_module_params *params;
void *from, *to;
size_t size;
@@ -1265,25 +1265,30 @@ static int ipipe_s_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
from = *(void **)((void *)cfg + module_if->config_offset);
params = kmalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
to = (void *)params + module_if->param_offset;
size = module_if->param_size;
if (to && from && size) {
if (copy_from_user(to, (void __user *)from, size)) {
rval = -EFAULT;
- break;
+ goto error_free;
}
rval = module_if->set(ipipe, to);
if (rval)
- goto error;
+ goto error_free;
} else if (to && !from && size) {
rval = module_if->set(ipipe, NULL);
if (rval)
- goto error;
+ goto error_free;
}
kfree(params);
}
-error:
+ return rval;
+
+error_free:
+ kfree(params);
return rval;
}
@@ -1772,7 +1777,7 @@ vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe, struct platform_device *pdev)
struct media_pad *pads = &ipipe->pads[0];
struct v4l2_subdev *sd = &ipipe->subdev;
struct media_entity *me = &sd->entity;
- struct resource *res, *memres;
+ struct resource *res, *res2, *memres;
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
if (!res)
@@ -1786,11 +1791,11 @@ vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe, struct platform_device *pdev)
if (!ipipe->base_addr)
goto error_release;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 6);
- if (!res)
+ res2 = platform_get_resource(pdev, IORESOURCE_MEM, 6);
+ if (!res2)
goto error_unmap;
- ipipe->isp5_base_addr = ioremap_nocache(res->start,
- resource_size(res));
+ ipipe->isp5_base_addr = ioremap_nocache(res2->start,
+ resource_size(res2));
if (!ipipe->isp5_base_addr)
goto error_unmap;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index 46fd8184fc77..05a997f7aa5d 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -816,7 +816,7 @@ isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
/* Correct whole line or partial */
if (vdfc->corr_whole_line)
- val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT;
+ val |= BIT(ISIF_VDFC_CORR_WHOLE_LN_SHIFT);
/* level shift value */
val |= (vdfc->def_level_shift & ISIF_VDFC_LEVEL_SHFT_MASK) <<
@@ -844,7 +844,7 @@ isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
/* set DFCMARST and set DFCMWR */
- val |= 1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT;
+ val |= BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
val |= 1;
isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
@@ -875,7 +875,7 @@ isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
}
val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
/* clear DFCMARST and set DFCMWR */
- val &= ~(1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT);
+ val &= ~BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
val |= 1;
isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
@@ -1135,7 +1135,7 @@ static int isif_config_raw(struct v4l2_subdev *sd, int mode)
isif_write(isif->isif_cfg.base_addr, val, CGAMMAWD);
/* Configure DPCM compression settings */
if (params->v4l2_pix_fmt == V4L2_PIX_FMT_SGRBG10DPCM8) {
- val = 1 << ISIF_DPCM_EN_SHIFT;
+ val = BIT(ISIF_DPCM_EN_SHIFT);
val |= (params->dpcm_predictor &
ISIF_DPCM_PREDICTOR_MASK) << ISIF_DPCM_PREDICTOR_SHIFT;
}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index 57b93605bc58..9dc28ffe38d5 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -158,7 +158,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
{
struct vpfe_device *vpfe_dev = dev_id;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_isr\n");
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "%s\n", __func__);
vpfe_isif_buffer_isr(&vpfe_dev->vpfe_isif);
vpfe_resizer_buffer_isr(&vpfe_dev->vpfe_resizer);
return IRQ_HANDLED;
@@ -169,7 +169,7 @@ static irqreturn_t vpfe_vdint1_isr(int irq, void *dev_id)
{
struct vpfe_device *vpfe_dev = dev_id;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_vdint1_isr\n");
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "%s\n", __func__);
vpfe_isif_vidint1_isr(&vpfe_dev->vpfe_isif);
return IRQ_HANDLED;
}
@@ -179,7 +179,7 @@ static irqreturn_t vpfe_imp_dma_isr(int irq, void *dev_id)
{
struct vpfe_device *vpfe_dev = dev_id;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_imp_dma_isr\n");
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "%s\n", __func__);
vpfe_ipipeif_ss_buffer_isr(&vpfe_dev->vpfe_ipipeif);
vpfe_resizer_dma_isr(&vpfe_dev->vpfe_resizer);
return IRQ_HANDLED;
@@ -691,7 +691,7 @@ static int vpfe_remove(struct platform_device *pdev)
{
struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
- v4l2_info(pdev->dev.driver, "vpfe_remove\n");
+ v4l2_info(pdev->dev.driver, "%s\n", __func__);
kzfree(vpfe_dev->sd);
vpfe_detach_irq(vpfe_dev);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 510202a3b091..ab6bc452d9f6 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -419,6 +419,9 @@ static int vpfe_open(struct file *file)
/* If decoder is not initialized. initialize it */
if (!video->initialized && vpfe_update_pipe_state(video)) {
mutex_unlock(&video->lock);
+ v4l2_fh_del(&handle->vfh);
+ v4l2_fh_exit(&handle->vfh);
+ kfree(handle);
return -ENODEV;
}
/* Increment device users counter */
@@ -609,10 +612,6 @@ static int vpfe_querycap(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- else
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
strscpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
@@ -1625,6 +1624,11 @@ int vpfe_video_register(struct vpfe_video_device *video,
video->video_dev.v4l2_dev = vdev;
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ video->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE;
+ else
+ video->video_dev.device_caps = V4L2_CAP_VIDEO_OUTPUT;
+ video->video_dev.device_caps |= V4L2_CAP_STREAMING;
ret = video_register_device(&video->video_dev, VFL_TYPE_GRABBER, -1);
if (ret < 0)
pr_err("%s: could not register video device (%d)\n",
diff --git a/drivers/staging/media/hantro/Kconfig b/drivers/staging/media/hantro/Kconfig
new file mode 100644
index 000000000000..be133bbaa68a
--- /dev/null
+++ b/drivers/staging/media/hantro/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_HANTRO
+ tristate "Hantro VPU driver"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on MEDIA_CONTROLLER_REQUEST_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Hantro IP based Video Processing Unit present on
+ Rockchip SoC, which accelerates video and image encoding and
+ decoding.
+ To compile this driver as a module, choose M here: the module
+ will be called hantro-vpu.
+
+config VIDEO_HANTRO_ROCKCHIP
+ bool "Hantro VPU Rockchip support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ default y
+ help
+ Enable support for RK3288 and RK3399 SoCs.
diff --git a/drivers/staging/media/hantro/Makefile b/drivers/staging/media/hantro/Makefile
new file mode 100644
index 000000000000..1584acdbf4a3
--- /dev/null
+++ b/drivers/staging/media/hantro/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_VIDEO_HANTRO) += hantro-vpu.o
+
+hantro-vpu-y += \
+ hantro_drv.o \
+ hantro_v4l2.o \
+ hantro_h1_jpeg_enc.o \
+ hantro_g1_mpeg2_dec.o \
+ rk3399_vpu_hw_jpeg_enc.o \
+ rk3399_vpu_hw_mpeg2_dec.o \
+ hantro_jpeg.o \
+ hantro_mpeg2.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \
+ rk3288_vpu_hw.o \
+ rk3399_vpu_hw.o
diff --git a/drivers/staging/media/rockchip/vpu/TODO b/drivers/staging/media/hantro/TODO
index fa0c94057007..fa0c94057007 100644
--- a/drivers/staging/media/rockchip/vpu/TODO
+++ b/drivers/staging/media/hantro/TODO
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
new file mode 100644
index 000000000000..62dcca9ff19c
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro.h
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef HANTRO_H_
+#define HANTRO_H_
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+#include <linux/clk.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "hantro_hw.h"
+
+#define MPEG2_MB_DIM 16
+#define MPEG2_MB_WIDTH(w) DIV_ROUND_UP(w, MPEG2_MB_DIM)
+#define MPEG2_MB_HEIGHT(h) DIV_ROUND_UP(h, MPEG2_MB_DIM)
+
+#define JPEG_MB_DIM 16
+#define JPEG_MB_WIDTH(w) DIV_ROUND_UP(w, JPEG_MB_DIM)
+#define JPEG_MB_HEIGHT(h) DIV_ROUND_UP(h, JPEG_MB_DIM)
+
+struct hantro_ctx;
+struct hantro_codec_ops;
+
+#define HANTRO_JPEG_ENCODER BIT(0)
+#define HANTRO_ENCODERS 0x0000ffff
+
+#define HANTRO_MPEG2_DECODER BIT(16)
+#define HANTRO_DECODERS 0xffff0000
+
+/**
+ * struct hantro_irq - irq handler and name
+ *
+ * @name: irq name for device tree lookup
+ * @handler: interrupt handler
+ */
+struct hantro_irq {
+ const char *name;
+ irqreturn_t (*handler)(int irq, void *priv);
+};
+
+/**
+ * struct hantro_variant - information about VPU hardware variant
+ *
+ * @enc_offset: Offset from VPU base to encoder registers.
+ * @dec_offset: Offset from VPU base to decoder registers.
+ * @enc_fmts: Encoder formats.
+ * @num_enc_fmts: Number of encoder formats.
+ * @dec_fmts: Decoder formats.
+ * @num_dec_fmts: Number of decoder formats.
+ * @codec: Supported codecs
+ * @codec_ops: Codec ops.
+ * @init: Initialize hardware.
+ * @runtime_resume: reenable hardware after power gating
+ * @irqs: array of irq names and interrupt handlers
+ * @num_irqs: number of irqs in the array
+ * @clk_names: array of clock names
+ * @num_clocks: number of clocks in the array
+ * @reg_names: array of register range names
+ * @num_regs: number of register range names in the array
+ */
+struct hantro_variant {
+ unsigned int enc_offset;
+ unsigned int dec_offset;
+ const struct hantro_fmt *enc_fmts;
+ unsigned int num_enc_fmts;
+ const struct hantro_fmt *dec_fmts;
+ unsigned int num_dec_fmts;
+ unsigned int codec;
+ const struct hantro_codec_ops *codec_ops;
+ int (*init)(struct hantro_dev *vpu);
+ int (*runtime_resume)(struct hantro_dev *vpu);
+ const struct hantro_irq *irqs;
+ int num_irqs;
+ const char * const *clk_names;
+ int num_clocks;
+ const char * const *reg_names;
+ int num_regs;
+};
+
+/**
+ * enum hantro_codec_mode - codec operating mode.
+ * @HANTRO_MODE_NONE: No operating mode. Used for RAW video formats.
+ * @HANTRO_MODE_JPEG_ENC: JPEG encoder.
+ * @HANTRO_MODE_MPEG2_DEC: MPEG-2 decoder.
+ */
+enum hantro_codec_mode {
+ HANTRO_MODE_NONE = -1,
+ HANTRO_MODE_JPEG_ENC,
+ HANTRO_MODE_MPEG2_DEC,
+};
+
+/*
+ * struct hantro_ctrl - helper type to declare supported controls
+ * @id: V4L2 control ID (V4L2_CID_xxx)
+ * @codec: codec id this control belong to (HANTRO_JPEG_ENCODER, etc.)
+ * @cfg: control configuration
+ */
+struct hantro_ctrl {
+ unsigned int id;
+ unsigned int codec;
+ struct v4l2_ctrl_config cfg;
+};
+
+/*
+ * struct hantro_func - Hantro VPU functionality
+ *
+ * @id: processing functionality ID (can be
+ * %MEDIA_ENT_F_PROC_VIDEO_ENCODER or
+ * %MEDIA_ENT_F_PROC_VIDEO_DECODER)
+ * @vdev: &struct video_device that exposes the encoder or
+ * decoder functionality
+ * @source_pad: &struct media_pad with the source pad.
+ * @sink: &struct media_entity pointer with the sink entity
+ * @sink_pad: &struct media_pad with the sink pad.
+ * @proc: &struct media_entity pointer with the M2M device itself.
+ * @proc_pads: &struct media_pad with the @proc pads.
+ * @intf_devnode: &struct media_intf devnode pointer with the interface
+ * with controls the M2M device.
+ *
+ * Contains everything needed to attach the video device to the media device.
+ */
+struct hantro_func {
+ unsigned int id;
+ struct video_device vdev;
+ struct media_pad source_pad;
+ struct media_entity sink;
+ struct media_pad sink_pad;
+ struct media_entity proc;
+ struct media_pad proc_pads[2];
+ struct media_intf_devnode *intf_devnode;
+};
+
+static inline struct hantro_func *
+hantro_vdev_to_func(struct video_device *vdev)
+{
+ return container_of(vdev, struct hantro_func, vdev);
+}
+
+/**
+ * struct hantro_dev - driver data
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @m2m_dev: mem2mem device associated to this device.
+ * @mdev: media device associated to this device.
+ * @encoder: encoder functionality.
+ * @decoder: decoder functionality.
+ * @pdev: Pointer to VPU platform device.
+ * @dev: Pointer to device for convenient logging using
+ * dev_ macros.
+ * @clocks: Array of clock handles.
+ * @reg_bases: Mapped addresses of VPU registers.
+ * @enc_base: Mapped address of VPU encoder register for convenience.
+ * @dec_base: Mapped address of VPU decoder register for convenience.
+ * @ctrl_base: Mapped address of VPU control block.
+ * @vpu_mutex: Mutex to synchronize V4L2 calls.
+ * @irqlock: Spinlock to synchronize access to data structures
+ * shared with interrupt handlers.
+ * @variant: Hardware variant-specific parameters.
+ * @watchdog_work: Delayed work for hardware timeout handling.
+ */
+struct hantro_dev {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct media_device mdev;
+ struct hantro_func *encoder;
+ struct hantro_func *decoder;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct clk_bulk_data *clocks;
+ void __iomem **reg_bases;
+ void __iomem *enc_base;
+ void __iomem *dec_base;
+ void __iomem *ctrl_base;
+
+ struct mutex vpu_mutex; /* video_device lock */
+ spinlock_t irqlock;
+ const struct hantro_variant *variant;
+ struct delayed_work watchdog_work;
+};
+
+/**
+ * struct hantro_ctx - Context (instance) private data.
+ *
+ * @dev: VPU driver data to which the context belongs.
+ * @fh: V4L2 file handler.
+ *
+ * @sequence_cap: Sequence counter for capture queue
+ * @sequence_out: Sequence counter for output queue
+ *
+ * @vpu_src_fmt: Descriptor of active source format.
+ * @src_fmt: V4L2 pixel format of active source format.
+ * @vpu_dst_fmt: Descriptor of active destination format.
+ * @dst_fmt: V4L2 pixel format of active destination format.
+ *
+ * @ctrl_handler: Control handler used to register controls.
+ * @jpeg_quality: User-specified JPEG compression quality.
+ *
+ * @buf_finish: Buffer finish. This depends on encoder or decoder
+ * context, and it's called right before
+ * calling v4l2_m2m_job_finish.
+ * @codec_ops: Set of operations related to codec mode.
+ * @jpeg_enc: JPEG-encoding context.
+ * @mpeg2_dec: MPEG-2-decoding context.
+ */
+struct hantro_ctx {
+ struct hantro_dev *dev;
+ struct v4l2_fh fh;
+
+ u32 sequence_cap;
+ u32 sequence_out;
+
+ const struct hantro_fmt *vpu_src_fmt;
+ struct v4l2_pix_format_mplane src_fmt;
+ const struct hantro_fmt *vpu_dst_fmt;
+ struct v4l2_pix_format_mplane dst_fmt;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ int jpeg_quality;
+
+ int (*buf_finish)(struct hantro_ctx *ctx,
+ struct vb2_buffer *buf,
+ unsigned int bytesused);
+
+ const struct hantro_codec_ops *codec_ops;
+
+ /* Specific for particular codec modes. */
+ union {
+ struct hantro_jpeg_enc_hw_ctx jpeg_enc;
+ struct hantro_mpeg2_dec_hw_ctx mpeg2_dec;
+ };
+};
+
+/**
+ * struct hantro_fmt - information about supported video formats.
+ * @name: Human readable name of the format.
+ * @fourcc: FourCC code of the format. See V4L2_PIX_FMT_*.
+ * @codec_mode: Codec mode related to this format. See
+ * enum hantro_codec_mode.
+ * @header_size: Optional header size. Currently used by JPEG encoder.
+ * @max_depth: Maximum depth, for bitstream formats
+ * @enc_fmt: Format identifier for encoder registers.
+ * @frmsize: Supported range of frame sizes (only for bitstream formats).
+ */
+struct hantro_fmt {
+ char *name;
+ u32 fourcc;
+ enum hantro_codec_mode codec_mode;
+ int header_size;
+ int max_depth;
+ enum hantro_enc_fmt enc_fmt;
+ struct v4l2_frmsize_stepwise frmsize;
+};
+
+/* Logging helpers */
+
+/**
+ * debug - Module parameter to control level of debugging messages.
+ *
+ * Level of debugging messages can be controlled by bits of
+ * module parameter called "debug". Meaning of particular
+ * bits is as follows:
+ *
+ * bit 0 - global information: mode, size, init, release
+ * bit 1 - each run start/result information
+ * bit 2 - contents of small controls from userspace
+ * bit 3 - contents of big controls from userspace
+ * bit 4 - detail fmt, ctrl, buffer q/dq information
+ * bit 5 - detail function enter/leave trace information
+ * bit 6 - register write/read information
+ */
+extern int hantro_debug;
+
+#define vpu_debug(level, fmt, args...) \
+ do { \
+ if (hantro_debug & BIT(level)) \
+ pr_info("%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#define vpu_err(fmt, args...) \
+ pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
+
+/* Structure access helpers. */
+static inline struct hantro_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct hantro_ctx, fh);
+}
+
+/* Register accessors. */
+static inline void vepu_write_relaxed(struct hantro_dev *vpu,
+ u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel_relaxed(val, vpu->enc_base + reg);
+}
+
+static inline void vepu_write(struct hantro_dev *vpu, u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel(val, vpu->enc_base + reg);
+}
+
+static inline u32 vepu_read(struct hantro_dev *vpu, u32 reg)
+{
+ u32 val = readl(vpu->enc_base + reg);
+
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ return val;
+}
+
+static inline void vdpu_write_relaxed(struct hantro_dev *vpu,
+ u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel_relaxed(val, vpu->dec_base + reg);
+}
+
+static inline void vdpu_write(struct hantro_dev *vpu, u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel(val, vpu->dec_base + reg);
+}
+
+static inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg)
+{
+ u32 val = readl(vpu->dec_base + reg);
+
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ return val;
+}
+
+bool hantro_is_encoder_ctx(const struct hantro_ctx *ctx);
+
+void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id);
+dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts);
+
+#endif /* HANTRO_H_ */
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
new file mode 100644
index 000000000000..c3665f0e87a2
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Collabora, Ltd.
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "hantro_v4l2.h"
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define DRIVER_NAME "hantro-vpu"
+
+int hantro_debug;
+module_param_named(debug, hantro_debug, int, 0644);
+MODULE_PARM_DESC(debug,
+ "Debug level - higher value produces more verbose messages");
+
+void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
+{
+ struct v4l2_ctrl *ctrl;
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id);
+ return ctrl ? ctrl->p_cur.p : NULL;
+}
+
+dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts)
+{
+ struct vb2_buffer *buf;
+ int index;
+
+ index = vb2_find_timestamp(q, ts, 0);
+ if (index < 0)
+ return 0;
+ buf = vb2_get_buffer(q, index);
+ return vb2_dma_contig_plane_dma_addr(buf, 0);
+}
+
+static int
+hantro_enc_buf_finish(struct hantro_ctx *ctx, struct vb2_buffer *buf,
+ unsigned int bytesused)
+{
+ size_t avail_size;
+
+ avail_size = vb2_plane_size(buf, 0) - ctx->vpu_dst_fmt->header_size;
+ if (bytesused > avail_size)
+ return -EINVAL;
+ /*
+ * The bounce buffer is only for the JPEG encoder.
+ * TODO: Rework the JPEG encoder to eliminate the need
+ * for a bounce buffer.
+ */
+ if (ctx->jpeg_enc.bounce_buffer.cpu) {
+ memcpy(vb2_plane_vaddr(buf, 0) +
+ ctx->vpu_dst_fmt->header_size,
+ ctx->jpeg_enc.bounce_buffer.cpu, bytesused);
+ }
+ buf->planes[0].bytesused =
+ ctx->vpu_dst_fmt->header_size + bytesused;
+ return 0;
+}
+
+static int
+hantro_dec_buf_finish(struct hantro_ctx *ctx, struct vb2_buffer *buf,
+ unsigned int bytesused)
+{
+ /* For decoders set bytesused as per the output picture. */
+ buf->planes[0].bytesused = ctx->dst_fmt.plane_fmt[0].sizeimage;
+ return 0;
+}
+
+static void hantro_job_finish(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ unsigned int bytesused,
+ enum vb2_buffer_state result)
+{
+ struct vb2_v4l2_buffer *src, *dst;
+ int ret;
+
+ pm_runtime_mark_last_busy(vpu->dev);
+ pm_runtime_put_autosuspend(vpu->dev);
+ clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (WARN_ON(!src))
+ return;
+ if (WARN_ON(!dst))
+ return;
+
+ src->sequence = ctx->sequence_out++;
+ dst->sequence = ctx->sequence_cap++;
+
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ ret = ctx->buf_finish(ctx, &dst->vb2_buf, bytesused);
+ if (ret)
+ result = VB2_BUF_STATE_ERROR;
+
+ v4l2_m2m_buf_done(src, result);
+ v4l2_m2m_buf_done(dst, result);
+
+ v4l2_m2m_job_finish(vpu->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+void hantro_irq_done(struct hantro_dev *vpu, unsigned int bytesused,
+ enum vb2_buffer_state result)
+{
+ struct hantro_ctx *ctx =
+ v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+
+ /*
+ * If cancel_delayed_work returns false
+ * the timeout expired. The watchdog is running,
+ * and will take care of finishing the job.
+ */
+ if (cancel_delayed_work(&vpu->watchdog_work))
+ hantro_job_finish(vpu, ctx, bytesused, result);
+}
+
+void hantro_watchdog(struct work_struct *work)
+{
+ struct hantro_dev *vpu;
+ struct hantro_ctx *ctx;
+
+ vpu = container_of(to_delayed_work(work),
+ struct hantro_dev, watchdog_work);
+ ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+ if (ctx) {
+ vpu_err("frame processing timed out!\n");
+ ctx->codec_ops->reset(ctx);
+ hantro_job_finish(vpu, ctx, 0, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void device_run(void *priv)
+{
+ struct hantro_ctx *ctx = priv;
+ int ret;
+
+ ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
+ if (ret)
+ goto err_cancel_job;
+ ret = pm_runtime_get_sync(ctx->dev->dev);
+ if (ret < 0)
+ goto err_cancel_job;
+
+ ctx->codec_ops->run(ctx);
+ return;
+
+err_cancel_job:
+ hantro_job_finish(ctx->dev, ctx, 0, VB2_BUF_STATE_ERROR);
+}
+
+bool hantro_is_encoder_ctx(const struct hantro_ctx *ctx)
+{
+ return ctx->buf_finish == hantro_enc_buf_finish;
+}
+
+static struct v4l2_m2m_ops vpu_m2m_ops = {
+ .device_run = device_run,
+};
+
+static int
+queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct hantro_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &hantro_queue_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ /*
+ * Driver does mostly sequential access, so sacrifice TLB efficiency
+ * for faster allocation. Also, no CPU access on the source queue,
+ * so no kernel mapping needed.
+ */
+ src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->vpu_mutex;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
+ src_vq->supports_requests = true;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ /*
+ * When encoding, the CAPTURE queue doesn't need dma memory,
+ * as the CPU needs to create the JPEG frames, from the
+ * hardware-produced JPEG payload.
+ *
+ * For the DMA destination buffer, we use a bounce buffer.
+ */
+ if (hantro_is_encoder_ctx(ctx)) {
+ dst_vq->mem_ops = &vb2_vmalloc_memops;
+ } else {
+ dst_vq->bidirectional = true;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ }
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &hantro_queue_ops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->vpu_mutex;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int hantro_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hantro_ctx *ctx;
+
+ ctx = container_of(ctrl->handler,
+ struct hantro_ctx, ctrl_handler);
+
+ vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ctx->jpeg_quality = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
+ .s_ctrl = hantro_s_ctrl,
+};
+
+static struct hantro_ctrl controls[] = {
+ {
+ .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ .codec = HANTRO_JPEG_ENCODER,
+ .cfg = {
+ .min = 5,
+ .max = 100,
+ .step = 1,
+ .def = 50,
+ },
+ }, {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
+ .codec = HANTRO_MPEG2_DECODER,
+ .cfg = {
+ .elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params),
+ },
+ }, {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
+ .codec = HANTRO_MPEG2_DECODER,
+ .cfg = {
+ .elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization),
+ },
+ },
+};
+
+static int hantro_ctrls_setup(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ int allowed_codecs)
+{
+ int i, num_ctrls = ARRAY_SIZE(controls);
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
+
+ for (i = 0; i < num_ctrls; i++) {
+ if (!(allowed_codecs & controls[i].codec))
+ continue;
+ if (!controls[i].cfg.elem_size) {
+ v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &hantro_ctrl_ops,
+ controls[i].id, controls[i].cfg.min,
+ controls[i].cfg.max,
+ controls[i].cfg.step,
+ controls[i].cfg.def);
+ } else {
+ controls[i].cfg.id = controls[i].id;
+ v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &controls[i].cfg, NULL);
+ }
+
+ if (ctx->ctrl_handler.error) {
+ vpu_err("Adding control (%d) failed %d\n",
+ controls[i].id,
+ ctx->ctrl_handler.error);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return ctx->ctrl_handler.error;
+ }
+ }
+ return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+}
+
+/*
+ * V4L2 file operations.
+ */
+
+static int hantro_open(struct file *filp)
+{
+ struct hantro_dev *vpu = video_drvdata(filp);
+ struct video_device *vdev = video_devdata(filp);
+ struct hantro_func *func = hantro_vdev_to_func(vdev);
+ struct hantro_ctx *ctx;
+ int allowed_codecs, ret;
+
+ /*
+ * We do not need any extra locking here, because we operate only
+ * on local data here, except reading few fields from dev, which
+ * do not change through device's lifetime (which is guaranteed by
+ * reference on module from open()) and V4L2 internal objects (such
+ * as vdev and ctx->fh), which have proper locking done in respective
+ * helper functions used here.
+ */
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = vpu;
+ if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
+ allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
+ ctx->buf_finish = hantro_enc_buf_finish;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
+ queue_init);
+ } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
+ allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
+ ctx->buf_finish = hantro_dec_buf_finish;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
+ queue_init);
+ } else {
+ ctx->fh.m2m_ctx = ERR_PTR(-ENODEV);
+ }
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ kfree(ctx);
+ return ret;
+ }
+
+ v4l2_fh_init(&ctx->fh, vdev);
+ filp->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ hantro_reset_fmts(ctx);
+
+ ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs);
+ if (ret) {
+ vpu_err("Failed to set up controls\n");
+ goto err_fh_free;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+
+ return 0;
+
+err_fh_free:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+static int hantro_release(struct file *filp)
+{
+ struct hantro_ctx *ctx =
+ container_of(filp->private_data, struct hantro_ctx, fh);
+
+ /*
+ * No need for extra locking because this was the last reference
+ * to this file.
+ */
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations hantro_fops = {
+ .owner = THIS_MODULE,
+ .open = hantro_open,
+ .release = hantro_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct of_device_id of_hantro_match[] = {
+#ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
+ { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
+ { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
+#endif
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_hantro_match);
+
+static int hantro_register_entity(struct media_device *mdev,
+ struct media_entity *entity,
+ const char *entity_name,
+ struct media_pad *pads, int num_pads,
+ int function, struct video_device *vdev)
+{
+ char *name;
+ int ret;
+
+ entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
+ if (function == MEDIA_ENT_F_IO_V4L) {
+ entity->info.dev.major = VIDEO_MAJOR;
+ entity->info.dev.minor = vdev->minor;
+ }
+
+ name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
+ entity_name);
+ if (!name)
+ return -ENOMEM;
+
+ entity->name = name;
+ entity->function = function;
+
+ ret = media_entity_pads_init(entity, num_pads, pads);
+ if (ret)
+ return ret;
+
+ ret = media_device_register_entity(mdev, entity);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hantro_attach_func(struct hantro_dev *vpu,
+ struct hantro_func *func)
+{
+ struct media_device *mdev = &vpu->mdev;
+ struct media_link *link;
+ int ret;
+
+ /* Create the three encoder entities with their pads */
+ func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
+ &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
+ &func->vdev);
+ if (ret)
+ return ret;
+
+ func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
+ func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ ret = hantro_register_entity(mdev, &func->proc, "proc",
+ func->proc_pads, 2, func->id,
+ &func->vdev);
+ if (ret)
+ goto err_rel_entity0;
+
+ func->sink_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = hantro_register_entity(mdev, &func->sink, "sink",
+ &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
+ &func->vdev);
+ if (ret)
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+ ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 1,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+ ret = media_create_pad_link(&func->proc, 0, &func->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rm_links0;
+
+ /* Create video interface */
+ func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
+ 0, VIDEO_MAJOR,
+ func->vdev.minor);
+ if (!func->intf_devnode) {
+ ret = -ENOMEM;
+ goto err_rm_links1;
+ }
+
+ /* Connect the two DMA engines to the interface */
+ link = media_create_intf_link(&func->vdev.entity,
+ &func->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+
+ link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+ return 0;
+
+err_rm_devnode:
+ media_devnode_remove(func->intf_devnode);
+
+err_rm_links1:
+ media_entity_remove_links(&func->sink);
+
+err_rm_links0:
+ media_entity_remove_links(&func->proc);
+ media_entity_remove_links(&func->vdev.entity);
+
+err_rel_entity2:
+ media_device_unregister_entity(&func->sink);
+
+err_rel_entity1:
+ media_device_unregister_entity(&func->proc);
+
+err_rel_entity0:
+ media_device_unregister_entity(&func->vdev.entity);
+ return ret;
+}
+
+static void hantro_detach_func(struct hantro_func *func)
+{
+ media_devnode_remove(func->intf_devnode);
+ media_entity_remove_links(&func->sink);
+ media_entity_remove_links(&func->proc);
+ media_entity_remove_links(&func->vdev.entity);
+ media_device_unregister_entity(&func->sink);
+ media_device_unregister_entity(&func->proc);
+ media_device_unregister_entity(&func->vdev.entity);
+}
+
+static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
+{
+ const struct of_device_id *match;
+ struct hantro_func *func;
+ struct video_device *vfd;
+ int ret;
+
+ match = of_match_node(of_hantro_match, vpu->dev->of_node);
+ func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL);
+ if (!func) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ func->id = funcid;
+
+ vfd = &func->vdev;
+ vfd->fops = &hantro_fops;
+ vfd->release = video_device_release_empty;
+ vfd->lock = &vpu->vpu_mutex;
+ vfd->v4l2_dev = &vpu->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ vfd->ioctl_ops = &hantro_ioctl_ops;
+ snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
+ funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
+
+ if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
+ vpu->encoder = func;
+ else
+ vpu->decoder = func;
+
+ video_set_drvdata(vfd, vpu);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ ret = hantro_attach_func(vpu, func);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev,
+ "Failed to attach functionality to the media device\n");
+ goto err_unreg_dev;
+ }
+
+ v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name,
+ vfd->num);
+
+ return 0;
+
+err_unreg_dev:
+ video_unregister_device(vfd);
+ return ret;
+}
+
+static int hantro_add_enc_func(struct hantro_dev *vpu)
+{
+ if (!vpu->variant->enc_fmts)
+ return 0;
+
+ return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+}
+
+static int hantro_add_dec_func(struct hantro_dev *vpu)
+{
+ if (!vpu->variant->dec_fmts)
+ return 0;
+
+ return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+}
+
+static void hantro_remove_func(struct hantro_dev *vpu,
+ unsigned int funcid)
+{
+ struct hantro_func *func;
+
+ if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
+ func = vpu->encoder;
+ else
+ func = vpu->decoder;
+
+ if (!func)
+ return;
+
+ hantro_detach_func(func);
+ video_unregister_device(&func->vdev);
+}
+
+static void hantro_remove_enc_func(struct hantro_dev *vpu)
+{
+ hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+}
+
+static void hantro_remove_dec_func(struct hantro_dev *vpu)
+{
+ hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+}
+
+static const struct media_device_ops hantro_m2m_media_ops = {
+ .req_validate = vb2_request_validate,
+ .req_queue = v4l2_m2m_request_queue,
+};
+
+static int hantro_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct hantro_dev *vpu;
+ struct resource *res;
+ int num_bases;
+ int i, ret;
+
+ vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
+ if (!vpu)
+ return -ENOMEM;
+
+ vpu->dev = &pdev->dev;
+ vpu->pdev = pdev;
+ mutex_init(&vpu->vpu_mutex);
+ spin_lock_init(&vpu->irqlock);
+
+ match = of_match_node(of_hantro_match, pdev->dev.of_node);
+ vpu->variant = match->data;
+
+ INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
+
+ vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
+ sizeof(*vpu->clocks), GFP_KERNEL);
+ if (!vpu->clocks)
+ return -ENOMEM;
+
+ for (i = 0; i < vpu->variant->num_clocks; i++)
+ vpu->clocks[i].id = vpu->variant->clk_names[i];
+ ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
+ vpu->clocks);
+ if (ret)
+ return ret;
+
+ num_bases = vpu->variant->num_regs ?: 1;
+ vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
+ sizeof(*vpu->reg_bases), GFP_KERNEL);
+ if (!vpu->reg_bases)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bases; i++) {
+ res = vpu->variant->reg_names ?
+ platform_get_resource_byname(vpu->pdev, IORESOURCE_MEM,
+ vpu->variant->reg_names[i]) :
+ platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0);
+ vpu->reg_bases[i] = devm_ioremap_resource(vpu->dev, res);
+ if (IS_ERR(vpu->reg_bases[i]))
+ return PTR_ERR(vpu->reg_bases[i]);
+ }
+ vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
+ vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
+
+ ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
+ return ret;
+ }
+
+ for (i = 0; i < vpu->variant->num_irqs; i++) {
+ const char *irq_name = vpu->variant->irqs[i].name;
+ int irq;
+
+ if (!vpu->variant->irqs[i].handler)
+ continue;
+
+ irq = platform_get_irq_byname(vpu->pdev, irq_name);
+ if (irq <= 0) {
+ dev_err(vpu->dev, "Could not get %s IRQ.\n", irq_name);
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(vpu->dev, irq,
+ vpu->variant->irqs[i].handler, 0,
+ dev_name(vpu->dev), vpu);
+ if (ret) {
+ dev_err(vpu->dev, "Could not request %s IRQ.\n",
+ irq_name);
+ return ret;
+ }
+ }
+
+ ret = vpu->variant->init(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init VPU hardware\n");
+ return ret;
+ }
+
+ pm_runtime_set_autosuspend_delay(vpu->dev, 100);
+ pm_runtime_use_autosuspend(vpu->dev);
+ pm_runtime_enable(vpu->dev);
+
+ ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare clocks\n");
+ return ret;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ goto err_clk_unprepare;
+ }
+ platform_set_drvdata(pdev, vpu);
+
+ vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
+ if (IS_ERR(vpu->m2m_dev)) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(vpu->m2m_dev);
+ goto err_v4l2_unreg;
+ }
+
+ vpu->mdev.dev = vpu->dev;
+ strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
+ strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
+ sizeof(vpu->mdev.model));
+ media_device_init(&vpu->mdev);
+ vpu->mdev.ops = &hantro_m2m_media_ops;
+ vpu->v4l2_dev.mdev = &vpu->mdev;
+
+ ret = hantro_add_enc_func(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register encoder\n");
+ goto err_m2m_rel;
+ }
+
+ ret = hantro_add_dec_func(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register decoder\n");
+ goto err_rm_enc_func;
+ }
+
+ ret = media_device_register(&vpu->mdev);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
+ goto err_rm_dec_func;
+ }
+
+ return 0;
+
+err_rm_dec_func:
+ hantro_remove_dec_func(vpu);
+err_rm_enc_func:
+ hantro_remove_enc_func(vpu);
+err_m2m_rel:
+ media_device_cleanup(&vpu->mdev);
+ v4l2_m2m_release(vpu->m2m_dev);
+err_v4l2_unreg:
+ v4l2_device_unregister(&vpu->v4l2_dev);
+err_clk_unprepare:
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
+ pm_runtime_disable(vpu->dev);
+ return ret;
+}
+
+static int hantro_remove(struct platform_device *pdev)
+{
+ struct hantro_dev *vpu = platform_get_drvdata(pdev);
+
+ v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
+
+ media_device_unregister(&vpu->mdev);
+ hantro_remove_dec_func(vpu);
+ hantro_remove_enc_func(vpu);
+ media_device_cleanup(&vpu->mdev);
+ v4l2_m2m_release(vpu->m2m_dev);
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
+ pm_runtime_disable(vpu->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int hantro_runtime_resume(struct device *dev)
+{
+ struct hantro_dev *vpu = dev_get_drvdata(dev);
+
+ if (vpu->variant->runtime_resume)
+ return vpu->variant->runtime_resume(vpu);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops hantro_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL)
+};
+
+static struct platform_driver hantro_driver = {
+ .probe = hantro_probe,
+ .remove = hantro_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(of_hantro_match),
+ .pm = &hantro_pm_ops,
+ },
+};
+module_platform_driver(hantro_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
+MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
+MODULE_DESCRIPTION("Hantro VPU codec driver");
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
new file mode 100644
index 000000000000..e592c1b66375
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define G1_SWREG(nr) ((nr) * 4)
+
+#define G1_REG_RLC_VLC_BASE G1_SWREG(12)
+#define G1_REG_DEC_OUT_BASE G1_SWREG(13)
+#define G1_REG_REFER0_BASE G1_SWREG(14)
+#define G1_REG_REFER1_BASE G1_SWREG(15)
+#define G1_REG_REFER2_BASE G1_SWREG(16)
+#define G1_REG_REFER3_BASE G1_SWREG(17)
+#define G1_REG_QTABLE_BASE G1_SWREG(40)
+#define G1_REG_DEC_E(v) ((v) ? BIT(0) : 0)
+
+#define G1_REG_DEC_AXI_RD_ID(v) (((v) << 24) & GENMASK(31, 24))
+#define G1_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(23) : 0)
+#define G1_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(22) : 0)
+#define G1_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(21) : 0)
+#define G1_REG_DEC_INSWAP32_E(v) ((v) ? BIT(20) : 0)
+#define G1_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(18) : 0)
+#define G1_REG_DEC_LATENCY(v) (((v) << 11) & GENMASK(16, 11))
+#define G1_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(10) : 0)
+#define G1_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(9) : 0)
+#define G1_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(8) : 0)
+#define G1_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(6) : 0)
+#define G1_REG_DEC_SCMD_DIS(v) ((v) ? BIT(5) : 0)
+#define G1_REG_DEC_MAX_BURST(v) (((v) << 0) & GENMASK(4, 0))
+
+#define G1_REG_DEC_MODE(v) (((v) << 28) & GENMASK(31, 28))
+#define G1_REG_RLC_MODE_E(v) ((v) ? BIT(27) : 0)
+#define G1_REG_PIC_INTERLACE_E(v) ((v) ? BIT(23) : 0)
+#define G1_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(22) : 0)
+#define G1_REG_PIC_B_E(v) ((v) ? BIT(21) : 0)
+#define G1_REG_PIC_INTER_E(v) ((v) ? BIT(20) : 0)
+#define G1_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_FWD_INTERLACE_E(v) ((v) ? BIT(18) : 0)
+#define G1_REG_FILTERING_DIS(v) ((v) ? BIT(14) : 0)
+#define G1_REG_WRITE_MVS_E(v) ((v) ? BIT(12) : 0)
+#define G1_REG_DEC_AXI_WR_ID(v) (((v) << 0) & GENMASK(7, 0))
+
+#define G1_REG_PIC_MB_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
+#define G1_REG_PIC_MB_HEIGHT_P(v) (((v) << 11) & GENMASK(18, 11))
+#define G1_REG_ALT_SCAN_E(v) ((v) ? BIT(6) : 0)
+#define G1_REG_TOPFIELDFIRST_E(v) ((v) ? BIT(5) : 0)
+
+#define G1_REG_STRM_START_BIT(v) (((v) << 26) & GENMASK(31, 26))
+#define G1_REG_QSCALE_TYPE(v) ((v) ? BIT(24) : 0)
+#define G1_REG_CON_MV_E(v) ((v) ? BIT(4) : 0)
+#define G1_REG_INTRA_DC_PREC(v) (((v) << 2) & GENMASK(3, 2))
+#define G1_REG_INTRA_VLC_TAB(v) ((v) ? BIT(1) : 0)
+#define G1_REG_FRAME_PRED_DCT(v) ((v) ? BIT(0) : 0)
+
+#define G1_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
+#define G1_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
+
+#define G1_REG_ALT_SCAN_FLAG_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_FCODE_FWD_HOR(v) (((v) << 15) & GENMASK(18, 15))
+#define G1_REG_FCODE_FWD_VER(v) (((v) << 11) & GENMASK(14, 11))
+#define G1_REG_FCODE_BWD_HOR(v) (((v) << 7) & GENMASK(10, 7))
+#define G1_REG_FCODE_BWD_VER(v) (((v) << 3) & GENMASK(6, 3))
+#define G1_REG_MV_ACCURACY_FWD(v) ((v) ? BIT(2) : 0)
+#define G1_REG_MV_ACCURACY_BWD(v) ((v) ? BIT(1) : 0)
+
+#define G1_REG_STARTMB_X(v) (((v) << 23) & GENMASK(31, 23))
+#define G1_REG_STARTMB_Y(v) (((v) << 15) & GENMASK(22, 15))
+
+#define G1_REG_APF_THRESHOLD(v) (((v) << 0) & GENMASK(13, 0))
+
+#define PICT_TOP_FIELD 1
+#define PICT_BOTTOM_FIELD 2
+#define PICT_FRAME 3
+
+static void
+hantro_g1_mpeg2_dec_set_quantization(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ struct v4l2_ctrl_mpeg2_quantization *quantization;
+
+ quantization = hantro_get_ctrl(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
+ hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu,
+ quantization);
+ vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma,
+ G1_REG_QTABLE_BASE);
+}
+
+static void
+hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf,
+ const struct v4l2_mpeg2_sequence *sequence,
+ const struct v4l2_mpeg2_picture *picture,
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params)
+{
+ dma_addr_t forward_addr = 0, backward_addr = 0;
+ dma_addr_t current_addr, addr;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
+
+ switch (picture->picture_coding_type) {
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
+ backward_addr = hantro_get_ref(vq,
+ slice_params->backward_ref_ts);
+ /* fall-through */
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
+ forward_addr = hantro_get_ref(vq,
+ slice_params->forward_ref_ts);
+ }
+
+ /* Source bitstream buffer */
+ addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ vdpu_write_relaxed(vpu, addr, G1_REG_RLC_VLC_BASE);
+
+ /* Destination frame buffer */
+ addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ current_addr = addr;
+
+ if (picture->picture_structure == PICT_BOTTOM_FIELD)
+ addr += ALIGN(ctx->dst_fmt.width, 16);
+ vdpu_write_relaxed(vpu, addr, G1_REG_DEC_OUT_BASE);
+
+ if (!forward_addr)
+ forward_addr = current_addr;
+ if (!backward_addr)
+ backward_addr = current_addr;
+
+ /* Set forward ref frame (top/bottom field) */
+ if (picture->picture_structure == PICT_FRAME ||
+ picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B ||
+ (picture->picture_structure == PICT_TOP_FIELD &&
+ picture->top_field_first) ||
+ (picture->picture_structure == PICT_BOTTOM_FIELD &&
+ !picture->top_field_first)) {
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
+ } else if (picture->picture_structure == PICT_TOP_FIELD) {
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER1_BASE);
+ } else if (picture->picture_structure == PICT_BOTTOM_FIELD) {
+ vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
+ }
+
+ /* Set backward ref frame (top/bottom field) */
+ vdpu_write_relaxed(vpu, backward_addr, G1_REG_REFER2_BASE);
+ vdpu_write_relaxed(vpu, backward_addr, G1_REG_REFER3_BASE);
+}
+
+void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
+ const struct v4l2_mpeg2_sequence *sequence;
+ const struct v4l2_mpeg2_picture *picture;
+ u32 reg;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Apply request controls if any */
+ v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ slice_params = hantro_get_ctrl(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
+ sequence = &slice_params->sequence;
+ picture = &slice_params->picture;
+
+ reg = G1_REG_DEC_AXI_RD_ID(0) |
+ G1_REG_DEC_TIMEOUT_E(1) |
+ G1_REG_DEC_STRSWAP32_E(1) |
+ G1_REG_DEC_STRENDIAN_E(1) |
+ G1_REG_DEC_INSWAP32_E(1) |
+ G1_REG_DEC_OUTSWAP32_E(1) |
+ G1_REG_DEC_DATA_DISC_E(0) |
+ G1_REG_DEC_LATENCY(0) |
+ G1_REG_DEC_CLK_GATE_E(1) |
+ G1_REG_DEC_IN_ENDIAN(1) |
+ G1_REG_DEC_OUT_ENDIAN(1) |
+ G1_REG_DEC_ADV_PRE_DIS(0) |
+ G1_REG_DEC_SCMD_DIS(0) |
+ G1_REG_DEC_MAX_BURST(16);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(2));
+
+ reg = G1_REG_DEC_MODE(5) |
+ G1_REG_RLC_MODE_E(0) |
+ G1_REG_PIC_INTERLACE_E(!sequence->progressive_sequence) |
+ G1_REG_PIC_FIELDMODE_E(picture->picture_structure != PICT_FRAME) |
+ G1_REG_PIC_B_E(picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B) |
+ G1_REG_PIC_INTER_E(picture->picture_coding_type != V4L2_MPEG2_PICTURE_CODING_TYPE_I) |
+ G1_REG_PIC_TOPFIELD_E(picture->picture_structure == PICT_TOP_FIELD) |
+ G1_REG_FWD_INTERLACE_E(0) |
+ G1_REG_FILTERING_DIS(1) |
+ G1_REG_WRITE_MVS_E(0) |
+ G1_REG_DEC_AXI_WR_ID(0);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(3));
+
+ reg = G1_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
+ G1_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ G1_REG_ALT_SCAN_E(picture->alternate_scan) |
+ G1_REG_TOPFIELDFIRST_E(picture->top_field_first);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(4));
+
+ reg = G1_REG_STRM_START_BIT(slice_params->data_bit_offset) |
+ G1_REG_QSCALE_TYPE(picture->q_scale_type) |
+ G1_REG_CON_MV_E(picture->concealment_motion_vectors) |
+ G1_REG_INTRA_DC_PREC(picture->intra_dc_precision) |
+ G1_REG_INTRA_VLC_TAB(picture->intra_vlc_format) |
+ G1_REG_FRAME_PRED_DCT(picture->frame_pred_frame_dct);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(5));
+
+ reg = G1_REG_INIT_QP(1) |
+ G1_REG_STREAM_LEN(slice_params->bit_size >> 3);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(6));
+
+ reg = G1_REG_ALT_SCAN_FLAG_E(picture->alternate_scan) |
+ G1_REG_FCODE_FWD_HOR(picture->f_code[0][0]) |
+ G1_REG_FCODE_FWD_VER(picture->f_code[0][1]) |
+ G1_REG_FCODE_BWD_HOR(picture->f_code[1][0]) |
+ G1_REG_FCODE_BWD_VER(picture->f_code[1][1]) |
+ G1_REG_MV_ACCURACY_FWD(1) |
+ G1_REG_MV_ACCURACY_BWD(1);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(18));
+
+ reg = G1_REG_STARTMB_X(0) |
+ G1_REG_STARTMB_Y(0);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(48));
+
+ reg = G1_REG_APF_THRESHOLD(8);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(55));
+
+ hantro_g1_mpeg2_dec_set_quantization(vpu, ctx);
+
+ hantro_g1_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf,
+ sequence, picture, slice_params);
+
+ /* Controls no longer in-use, we can complete them */
+ v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ /* Kick the watchdog and start decoding */
+ schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+
+ reg = G1_REG_DEC_E(1);
+ vdpu_write(vpu, reg, G1_SWREG(1));
+}
diff --git a/drivers/staging/media/hantro/hantro_g1_regs.h b/drivers/staging/media/hantro/hantro_g1_regs.h
new file mode 100644
index 000000000000..5c0ea7994336
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_g1_regs.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_G1_REGS_H_
+#define HANTRO_G1_REGS_H_
+
+/* Decoder registers. */
+#define G1_REG_INTERRUPT 0x004
+#define G1_REG_INTERRUPT_DEC_PIC_INF BIT(24)
+#define G1_REG_INTERRUPT_DEC_TIMEOUT BIT(18)
+#define G1_REG_INTERRUPT_DEC_SLICE_INT BIT(17)
+#define G1_REG_INTERRUPT_DEC_ERROR_INT BIT(16)
+#define G1_REG_INTERRUPT_DEC_ASO_INT BIT(15)
+#define G1_REG_INTERRUPT_DEC_BUFFER_INT BIT(14)
+#define G1_REG_INTERRUPT_DEC_BUS_INT BIT(13)
+#define G1_REG_INTERRUPT_DEC_RDY_INT BIT(12)
+#define G1_REG_INTERRUPT_DEC_IRQ BIT(8)
+#define G1_REG_INTERRUPT_DEC_IRQ_DIS BIT(4)
+#define G1_REG_INTERRUPT_DEC_E BIT(0)
+#define G1_REG_CONFIG 0x008
+#define G1_REG_CONFIG_DEC_AXI_RD_ID(x) (((x) & 0xff) << 24)
+#define G1_REG_CONFIG_DEC_TIMEOUT_E BIT(23)
+#define G1_REG_CONFIG_DEC_STRSWAP32_E BIT(22)
+#define G1_REG_CONFIG_DEC_STRENDIAN_E BIT(21)
+#define G1_REG_CONFIG_DEC_INSWAP32_E BIT(20)
+#define G1_REG_CONFIG_DEC_OUTSWAP32_E BIT(19)
+#define G1_REG_CONFIG_DEC_DATA_DISC_E BIT(18)
+#define G1_REG_CONFIG_TILED_MODE_MSB BIT(17)
+#define G1_REG_CONFIG_DEC_OUT_TILED_E BIT(17)
+#define G1_REG_CONFIG_DEC_LATENCY(x) (((x) & 0x3f) << 11)
+#define G1_REG_CONFIG_DEC_CLK_GATE_E BIT(10)
+#define G1_REG_CONFIG_DEC_IN_ENDIAN BIT(9)
+#define G1_REG_CONFIG_DEC_OUT_ENDIAN BIT(8)
+#define G1_REG_CONFIG_PRIORITY_MODE(x) (((x) & 0x7) << 5)
+#define G1_REG_CONFIG_TILED_MODE_LSB BIT(7)
+#define G1_REG_CONFIG_DEC_ADV_PRE_DIS BIT(6)
+#define G1_REG_CONFIG_DEC_SCMD_DIS BIT(5)
+#define G1_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL0 0x00c
+#define G1_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 28)
+#define G1_REG_DEC_CTRL0_RLC_MODE_E BIT(27)
+#define G1_REG_DEC_CTRL0_SKIP_MODE BIT(26)
+#define G1_REG_DEC_CTRL0_DIVX3_E BIT(25)
+#define G1_REG_DEC_CTRL0_PJPEG_E BIT(24)
+#define G1_REG_DEC_CTRL0_PIC_INTERLACE_E BIT(23)
+#define G1_REG_DEC_CTRL0_PIC_FIELDMODE_E BIT(22)
+#define G1_REG_DEC_CTRL0_PIC_B_E BIT(21)
+#define G1_REG_DEC_CTRL0_PIC_INTER_E BIT(20)
+#define G1_REG_DEC_CTRL0_PIC_TOPFIELD_E BIT(19)
+#define G1_REG_DEC_CTRL0_FWD_INTERLACE_E BIT(18)
+#define G1_REG_DEC_CTRL0_SORENSON_E BIT(17)
+#define G1_REG_DEC_CTRL0_REF_TOPFIELD_E BIT(16)
+#define G1_REG_DEC_CTRL0_DEC_OUT_DIS BIT(15)
+#define G1_REG_DEC_CTRL0_FILTERING_DIS BIT(14)
+#define G1_REG_DEC_CTRL0_WEBP_E BIT(13)
+#define G1_REG_DEC_CTRL0_MVC_E BIT(13)
+#define G1_REG_DEC_CTRL0_PIC_FIXED_QUANT BIT(13)
+#define G1_REG_DEC_CTRL0_WRITE_MVS_E BIT(12)
+#define G1_REG_DEC_CTRL0_REFTOPFIRST_E BIT(11)
+#define G1_REG_DEC_CTRL0_SEQ_MBAFF_E BIT(10)
+#define G1_REG_DEC_CTRL0_PICORD_COUNT_E BIT(9)
+#define G1_REG_DEC_CTRL0_DEC_AHB_HLOCK_E BIT(8)
+#define G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL1 0x010
+#define G1_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
+#define G1_REG_DEC_CTRL1_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
+#define G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 11)
+#define G1_REG_DEC_CTRL1_MB_HEIGHT_OFF(x) (((x) & 0xf) << 7)
+#define G1_REG_DEC_CTRL1_ALT_SCAN_E BIT(6)
+#define G1_REG_DEC_CTRL1_TOPFIELDFIRST_E BIT(5)
+#define G1_REG_DEC_CTRL1_REF_FRAMES(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL1_PIC_MB_W_EXT(x) (((x) & 0x7) << 3)
+#define G1_REG_DEC_CTRL1_PIC_MB_H_EXT(x) (((x) & 0x7) << 0)
+#define G1_REG_DEC_CTRL1_PIC_REFER_FLAG BIT(0)
+#define G1_REG_DEC_CTRL2 0x014
+#define G1_REG_DEC_CTRL2_STRM_START_BIT(x) (((x) & 0x3f) << 26)
+#define G1_REG_DEC_CTRL2_SYNC_MARKER_E BIT(25)
+#define G1_REG_DEC_CTRL2_TYPE1_QUANT_E BIT(24)
+#define G1_REG_DEC_CTRL2_CH_QP_OFFSET(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL2_CH_QP_OFFSET2(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E BIT(0)
+#define G1_REG_DEC_CTRL2_INTRADC_VLC_THR(x) (((x) & 0x7) << 16)
+#define G1_REG_DEC_CTRL2_VOP_TIME_INCR(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL2_DQ_PROFILE BIT(24)
+#define G1_REG_DEC_CTRL2_DQBI_LEVEL BIT(23)
+#define G1_REG_DEC_CTRL2_RANGE_RED_FRM_E BIT(22)
+#define G1_REG_DEC_CTRL2_FAST_UVMC_E BIT(20)
+#define G1_REG_DEC_CTRL2_TRANSDCTAB BIT(17)
+#define G1_REG_DEC_CTRL2_TRANSACFRM(x) (((x) & 0x3) << 15)
+#define G1_REG_DEC_CTRL2_TRANSACFRM2(x) (((x) & 0x3) << 13)
+#define G1_REG_DEC_CTRL2_MB_MODE_TAB(x) (((x) & 0x7) << 10)
+#define G1_REG_DEC_CTRL2_MVTAB(x) (((x) & 0x7) << 7)
+#define G1_REG_DEC_CTRL2_CBPTAB(x) (((x) & 0x7) << 4)
+#define G1_REG_DEC_CTRL2_2MV_BLK_PAT_TAB(x) (((x) & 0x3) << 2)
+#define G1_REG_DEC_CTRL2_4MV_BLK_PAT_TAB(x) (((x) & 0x3) << 0)
+#define G1_REG_DEC_CTRL2_QSCALE_TYPE BIT(24)
+#define G1_REG_DEC_CTRL2_CON_MV_E BIT(4)
+#define G1_REG_DEC_CTRL2_INTRA_DC_PREC(x) (((x) & 0x3) << 2)
+#define G1_REG_DEC_CTRL2_INTRA_VLC_TAB BIT(1)
+#define G1_REG_DEC_CTRL2_FRAME_PRED_DCT BIT(0)
+#define G1_REG_DEC_CTRL2_JPEG_QTABLES(x) (((x) & 0x3) << 11)
+#define G1_REG_DEC_CTRL2_JPEG_MODE(x) (((x) & 0x7) << 8)
+#define G1_REG_DEC_CTRL2_JPEG_FILRIGHT_E BIT(7)
+#define G1_REG_DEC_CTRL2_JPEG_STREAM_ALL BIT(6)
+#define G1_REG_DEC_CTRL2_CR_AC_VLCTABLE BIT(5)
+#define G1_REG_DEC_CTRL2_CB_AC_VLCTABLE BIT(4)
+#define G1_REG_DEC_CTRL2_CR_DC_VLCTABLE BIT(3)
+#define G1_REG_DEC_CTRL2_CB_DC_VLCTABLE BIT(2)
+#define G1_REG_DEC_CTRL2_CR_DC_VLCTABLE3 BIT(1)
+#define G1_REG_DEC_CTRL2_CB_DC_VLCTABLE3 BIT(0)
+#define G1_REG_DEC_CTRL2_STRM1_START_BIT(x) (((x) & 0x3f) << 18)
+#define G1_REG_DEC_CTRL2_HUFFMAN_E BIT(17)
+#define G1_REG_DEC_CTRL2_MULTISTREAM_E BIT(16)
+#define G1_REG_DEC_CTRL2_BOOLEAN_VALUE(x) (((x) & 0xff) << 8)
+#define G1_REG_DEC_CTRL2_BOOLEAN_RANGE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL2_ALPHA_OFFSET(x) (((x) & 0x1f) << 5)
+#define G1_REG_DEC_CTRL2_BETA_OFFSET(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL3 0x018
+#define G1_REG_DEC_CTRL3_START_CODE_E BIT(31)
+#define G1_REG_DEC_CTRL3_INIT_QP(x) (((x) & 0x3f) << 25)
+#define G1_REG_DEC_CTRL3_CH_8PIX_ILEAV_E BIT(24)
+#define G1_REG_DEC_CTRL3_STREAM_LEN_EXT(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL3_STREAM_LEN(x) (((x) & 0xffffff) << 0)
+#define G1_REG_DEC_CTRL4 0x01c
+#define G1_REG_DEC_CTRL4_CABAC_E BIT(31)
+#define G1_REG_DEC_CTRL4_BLACKWHITE_E BIT(30)
+#define G1_REG_DEC_CTRL4_DIR_8X8_INFER_E BIT(29)
+#define G1_REG_DEC_CTRL4_WEIGHT_PRED_E BIT(28)
+#define G1_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(x) (((x) & 0x3) << 26)
+#define G1_REG_DEC_CTRL4_AVS_H264_H_EXT BIT(25)
+#define G1_REG_DEC_CTRL4_FRAMENUM_LEN(x) (((x) & 0x1f) << 16)
+#define G1_REG_DEC_CTRL4_FRAMENUM(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL4_BITPLANE0_E BIT(31)
+#define G1_REG_DEC_CTRL4_BITPLANE1_E BIT(30)
+#define G1_REG_DEC_CTRL4_BITPLANE2_E BIT(29)
+#define G1_REG_DEC_CTRL4_ALT_PQUANT(x) (((x) & 0x1f) << 24)
+#define G1_REG_DEC_CTRL4_DQ_EDGES(x) (((x) & 0xf) << 20)
+#define G1_REG_DEC_CTRL4_TTMBF BIT(19)
+#define G1_REG_DEC_CTRL4_PQINDEX(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
+#define G1_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
+#define G1_REG_DEC_CTRL4_UNIQP_E BIT(11)
+#define G1_REG_DEC_CTRL4_HALFQP_E BIT(10)
+#define G1_REG_DEC_CTRL4_TTFRM(x) (((x) & 0x3) << 8)
+#define G1_REG_DEC_CTRL4_2ND_BYTE_EMUL_E BIT(7)
+#define G1_REG_DEC_CTRL4_DQUANT_E BIT(6)
+#define G1_REG_DEC_CTRL4_VC1_ADV_E BIT(5)
+#define G1_REG_DEC_CTRL4_PJPEG_FILDOWN_E BIT(26)
+#define G1_REG_DEC_CTRL4_PJPEG_WDIV8 BIT(25)
+#define G1_REG_DEC_CTRL4_PJPEG_HDIV8 BIT(24)
+#define G1_REG_DEC_CTRL4_PJPEG_AH(x) (((x) & 0xf) << 20)
+#define G1_REG_DEC_CTRL4_PJPEG_AL(x) (((x) & 0xf) << 16)
+#define G1_REG_DEC_CTRL4_PJPEG_SS(x) (((x) & 0xff) << 8)
+#define G1_REG_DEC_CTRL4_PJPEG_SE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL4_DCT1_START_BIT(x) (((x) & 0x3f) << 26)
+#define G1_REG_DEC_CTRL4_DCT2_START_BIT(x) (((x) & 0x3f) << 20)
+#define G1_REG_DEC_CTRL4_CH_MV_RES BIT(13)
+#define G1_REG_DEC_CTRL4_INIT_DC_MATCH0(x) (((x) & 0x7) << 9)
+#define G1_REG_DEC_CTRL4_INIT_DC_MATCH1(x) (((x) & 0x7) << 6)
+#define G1_REG_DEC_CTRL4_VP7_VERSION BIT(5)
+#define G1_REG_DEC_CTRL5 0x020
+#define G1_REG_DEC_CTRL5_CONST_INTRA_E BIT(31)
+#define G1_REG_DEC_CTRL5_FILT_CTRL_PRES BIT(30)
+#define G1_REG_DEC_CTRL5_RDPIC_CNT_PRES BIT(29)
+#define G1_REG_DEC_CTRL5_8X8TRANS_FLAG_E BIT(28)
+#define G1_REG_DEC_CTRL5_REFPIC_MK_LEN(x) (((x) & 0x7ff) << 17)
+#define G1_REG_DEC_CTRL5_IDR_PIC_E BIT(16)
+#define G1_REG_DEC_CTRL5_IDR_PIC_ID(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL5_MV_SCALEFACTOR(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL5_REF_DIST_FWD(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL5_REF_DIST_BWD(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL5_LOOP_FILT_LIMIT(x) (((x) & 0xf) << 14)
+#define G1_REG_DEC_CTRL5_VARIANCE_TEST_E BIT(13)
+#define G1_REG_DEC_CTRL5_MV_THRESHOLD(x) (((x) & 0x7) << 10)
+#define G1_REG_DEC_CTRL5_VAR_THRESHOLD(x) (((x) & 0x3ff) << 0)
+#define G1_REG_DEC_CTRL5_DIVX_IDCT_E BIT(8)
+#define G1_REG_DEC_CTRL5_DIVX3_SLICE_SIZE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL5_PJPEG_REST_FREQ(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL5_RV_PROFILE(x) (((x) & 0x3) << 30)
+#define G1_REG_DEC_CTRL5_RV_OSV_QUANT(x) (((x) & 0x3) << 28)
+#define G1_REG_DEC_CTRL5_RV_FWD_SCALE(x) (((x) & 0x3fff) << 14)
+#define G1_REG_DEC_CTRL5_RV_BWD_SCALE(x) (((x) & 0x3fff) << 0)
+#define G1_REG_DEC_CTRL5_INIT_DC_COMP0(x) (((x) & 0xffff) << 16)
+#define G1_REG_DEC_CTRL5_INIT_DC_COMP1(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL6 0x024
+#define G1_REG_DEC_CTRL6_PPS_ID(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL6_REFIDX1_ACTIVE(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL6_REFIDX0_ACTIVE(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL6_POC_LENGTH(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL6_ICOMP0_E BIT(24)
+#define G1_REG_DEC_CTRL6_ISCALE0(x) (((x) & 0xff) << 16)
+#define G1_REG_DEC_CTRL6_ISHIFT0(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL6_STREAM1_LEN(x) (((x) & 0xffffff) << 0)
+#define G1_REG_DEC_CTRL6_PIC_SLICE_AM(x) (((x) & 0x1fff) << 0)
+#define G1_REG_DEC_CTRL6_COEFFS_PART_AM(x) (((x) & 0xf) << 24)
+#define G1_REG_FWD_PIC(i) (0x028 + ((i) * 0x4))
+#define G1_REG_FWD_PIC_PINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define G1_REG_FWD_PIC1_ICOMP1_E BIT(24)
+#define G1_REG_FWD_PIC1_ISCALE1(x) (((x) & 0xff) << 16)
+#define G1_REG_FWD_PIC1_ISHIFT1(x) (((x) & 0xffff) << 0)
+#define G1_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
+#define G1_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
+#define G1_REG_FWD_PIC1_SEGMENT_E BIT(0)
+#define G1_REG_DEC_CTRL7 0x02c
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F15(x) (((x) & 0x1f) << 25)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F14(x) (((x) & 0x1f) << 20)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F13(x) (((x) & 0x1f) << 15)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F12(x) (((x) & 0x1f) << 10)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F11(x) (((x) & 0x1f) << 5)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F10(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL7_ICOMP2_E BIT(24)
+#define G1_REG_DEC_CTRL7_ISCALE2(x) (((x) & 0xff) << 16)
+#define G1_REG_DEC_CTRL7_ISHIFT2(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL7_DCT3_START_BIT(x) (((x) & 0x3f) << 24)
+#define G1_REG_DEC_CTRL7_DCT4_START_BIT(x) (((x) & 0x3f) << 18)
+#define G1_REG_DEC_CTRL7_DCT5_START_BIT(x) (((x) & 0x3f) << 12)
+#define G1_REG_DEC_CTRL7_DCT6_START_BIT(x) (((x) & 0x3f) << 6)
+#define G1_REG_DEC_CTRL7_DCT7_START_BIT(x) (((x) & 0x3f) << 0)
+#define G1_REG_ADDR_STR 0x030
+#define G1_REG_ADDR_DST 0x034
+#define G1_REG_ADDR_REF(i) (0x038 + ((i) * 0x4))
+#define G1_REG_ADDR_REF_FIELD_E BIT(1)
+#define G1_REG_ADDR_REF_TOPC_E BIT(0)
+#define G1_REG_REF_PIC(i) (0x078 + ((i) * 0x4))
+#define G1_REG_REF_PIC_FILT_TYPE_E BIT(31)
+#define G1_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
+#define G1_REG_REF_PIC_MB_ADJ_0(x) (((x) & 0x7f) << 21)
+#define G1_REG_REF_PIC_MB_ADJ_1(x) (((x) & 0x7f) << 14)
+#define G1_REG_REF_PIC_MB_ADJ_2(x) (((x) & 0x7f) << 7)
+#define G1_REG_REF_PIC_MB_ADJ_3(x) (((x) & 0x7f) << 0)
+#define G1_REG_REF_PIC_REFER1_NBR(x) (((x) & 0xffff) << 16)
+#define G1_REG_REF_PIC_REFER0_NBR(x) (((x) & 0xffff) << 0)
+#define G1_REG_REF_PIC_LF_LEVEL_0(x) (((x) & 0x3f) << 18)
+#define G1_REG_REF_PIC_LF_LEVEL_1(x) (((x) & 0x3f) << 12)
+#define G1_REG_REF_PIC_LF_LEVEL_2(x) (((x) & 0x3f) << 6)
+#define G1_REG_REF_PIC_LF_LEVEL_3(x) (((x) & 0x3f) << 0)
+#define G1_REG_REF_PIC_QUANT_DELTA_0(x) (((x) & 0x1f) << 27)
+#define G1_REG_REF_PIC_QUANT_DELTA_1(x) (((x) & 0x1f) << 22)
+#define G1_REG_REF_PIC_QUANT_0(x) (((x) & 0x7ff) << 11)
+#define G1_REG_REF_PIC_QUANT_1(x) (((x) & 0x7ff) << 0)
+#define G1_REG_LT_REF 0x098
+#define G1_REG_VALID_REF 0x09c
+#define G1_REG_ADDR_QTABLE 0x0a0
+#define G1_REG_ADDR_DIR_MV 0x0a4
+#define G1_REG_BD_REF_PIC(i) (0x0a8 + ((i) * 0x4))
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B2(x) (((x) & 0x1f) << 25)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B1(x) (((x) & 0x1f) << 15)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F1(x) (((x) & 0x1f) << 10)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B0(x) (((x) & 0x1f) << 5)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define G1_REG_BD_REF_PIC_PRED_TAP_2_M1(x) (((x) & 0x3) << 10)
+#define G1_REG_BD_REF_PIC_PRED_TAP_2_4(x) (((x) & 0x3) << 8)
+#define G1_REG_BD_REF_PIC_PRED_TAP_4_M1(x) (((x) & 0x3) << 6)
+#define G1_REG_BD_REF_PIC_PRED_TAP_4_4(x) (((x) & 0x3) << 4)
+#define G1_REG_BD_REF_PIC_PRED_TAP_6_M1(x) (((x) & 0x3) << 2)
+#define G1_REG_BD_REF_PIC_PRED_TAP_6_4(x) (((x) & 0x3) << 0)
+#define G1_REG_BD_REF_PIC_QUANT_DELTA_2(x) (((x) & 0x1f) << 27)
+#define G1_REG_BD_REF_PIC_QUANT_DELTA_3(x) (((x) & 0x1f) << 22)
+#define G1_REG_BD_REF_PIC_QUANT_2(x) (((x) & 0x7ff) << 11)
+#define G1_REG_BD_REF_PIC_QUANT_3(x) (((x) & 0x7ff) << 0)
+#define G1_REG_BD_P_REF_PIC 0x0bc
+#define G1_REG_BD_P_REF_PIC_QUANT_DELTA_4(x) (((x) & 0x1f) << 27)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 25)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 15)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 10)
+#define G1_REG_BD_P_REF_PIC_BINIT_RLIST_B15(x) (((x) & 0x1f) << 5)
+#define G1_REG_BD_P_REF_PIC_BINIT_RLIST_F15(x) (((x) & 0x1f) << 0)
+#define G1_REG_ERR_CONC 0x0c0
+#define G1_REG_ERR_CONC_STARTMB_X(x) (((x) & 0x1ff) << 23)
+#define G1_REG_ERR_CONC_STARTMB_Y(x) (((x) & 0xff) << 15)
+#define G1_REG_PRED_FLT 0x0c4
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_0(x) (((x) & 0x3ff) << 22)
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_1(x) (((x) & 0x3ff) << 12)
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_2(x) (((x) & 0x3ff) << 2)
+#define G1_REG_REF_BUF_CTRL 0x0cc
+#define G1_REG_REF_BUF_CTRL_REFBU_E BIT(31)
+#define G1_REG_REF_BUF_CTRL_REFBU_THR(x) (((x) & 0xfff) << 19)
+#define G1_REG_REF_BUF_CTRL_REFBU_PICID(x) (((x) & 0x1f) << 14)
+#define G1_REG_REF_BUF_CTRL_REFBU_EVAL_E BIT(13)
+#define G1_REG_REF_BUF_CTRL_REFBU_FPARMOD_E BIT(12)
+#define G1_REG_REF_BUF_CTRL_REFBU_Y_OFFSET(x) (((x) & 0x1ff) << 0)
+#define G1_REG_REF_BUF_CTRL2 0x0dc
+#define G1_REG_REF_BUF_CTRL2_REFBU2_BUF_E BIT(31)
+#define G1_REG_REF_BUF_CTRL2_REFBU2_THR(x) (((x) & 0xfff) << 19)
+#define G1_REG_REF_BUF_CTRL2_REFBU2_PICID(x) (((x) & 0x1f) << 14)
+#define G1_REG_REF_BUF_CTRL2_APF_THRESHOLD(x) (((x) & 0x3fff) << 0)
+#define G1_REG_SOFT_RESET 0x194
+
+#endif /* HANTRO_G1_REGS_H_ */
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
new file mode 100644
index 000000000000..0c1e3043dc7e
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro_jpeg.h"
+#include "hantro.h"
+#include "hantro_v4l2.h"
+#include "hantro_hw.h"
+#include "hantro_h1_regs.h"
+
+#define H1_JPEG_QUANT_TABLE_COUNT 16
+
+static void hantro_h1_set_src_img_ctrl(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ u32 reg;
+
+ reg = H1_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width)
+ | H1_REG_IN_IMG_CTRL_OVRFLR_D4(0)
+ | H1_REG_IN_IMG_CTRL_OVRFLB_D4(0)
+ | H1_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
+ vepu_write_relaxed(vpu, reg, H1_REG_IN_IMG_CTRL);
+}
+
+static void hantro_h1_jpeg_enc_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ dma_addr_t src[3];
+
+ WARN_ON(pix_fmt->num_planes > 3);
+
+ vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.dma,
+ H1_REG_ADDR_OUTPUT_STREAM);
+ vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.size,
+ H1_REG_STR_BUF_LIMIT);
+
+ if (pix_fmt->num_planes == 1) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ /* single plane formats we supported are all interlaced */
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ } else if (pix_fmt->num_planes == 2) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], H1_REG_ADDR_IN_PLANE_1);
+ } else {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], H1_REG_ADDR_IN_PLANE_1);
+ vepu_write_relaxed(vpu, src[2], H1_REG_ADDR_IN_PLANE_2);
+ }
+}
+
+static void
+hantro_h1_jpeg_enc_set_qtable(struct hantro_dev *vpu,
+ unsigned char *luma_qtable,
+ unsigned char *chroma_qtable)
+{
+ u32 reg, i;
+
+ for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&luma_qtable[i]);
+ vepu_write_relaxed(vpu, reg, H1_REG_JPEG_LUMA_QUAT(i));
+
+ reg = get_unaligned_be32(&chroma_qtable[i]);
+ vepu_write_relaxed(vpu, reg, H1_REG_JPEG_CHROMA_QUAT(i));
+ }
+}
+
+void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct hantro_jpeg_ctx jpeg_ctx;
+ u32 reg;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
+ jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ jpeg_ctx.width = ctx->dst_fmt.width;
+ jpeg_ctx.height = ctx->dst_fmt.height;
+ jpeg_ctx.quality = ctx->jpeg_quality;
+ hantro_jpeg_header_assemble(&jpeg_ctx);
+
+ /* Switch to JPEG encoder mode before writing registers */
+ vepu_write_relaxed(vpu, H1_REG_ENC_CTRL_ENC_MODE_JPEG,
+ H1_REG_ENC_CTRL);
+
+ hantro_h1_set_src_img_ctrl(vpu, ctx);
+ hantro_h1_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
+ hantro_h1_jpeg_enc_set_qtable(vpu,
+ hantro_jpeg_get_qtable(&jpeg_ctx, 0),
+ hantro_jpeg_get_qtable(&jpeg_ctx, 1));
+
+ reg = H1_REG_AXI_CTRL_OUTPUT_SWAP16
+ | H1_REG_AXI_CTRL_INPUT_SWAP16
+ | H1_REG_AXI_CTRL_BURST_LEN(16)
+ | H1_REG_AXI_CTRL_OUTPUT_SWAP32
+ | H1_REG_AXI_CTRL_INPUT_SWAP32
+ | H1_REG_AXI_CTRL_OUTPUT_SWAP8
+ | H1_REG_AXI_CTRL_INPUT_SWAP8;
+ /* Make sure that all registers are written at this point. */
+ vepu_write(vpu, reg, H1_REG_AXI_CTRL);
+
+ reg = H1_REG_ENC_CTRL_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
+ | H1_REG_ENC_CTRL_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ | H1_REG_ENC_CTRL_ENC_MODE_JPEG
+ | H1_REG_ENC_PIC_INTRA
+ | H1_REG_ENC_CTRL_EN_BIT;
+ /* Kick the watchdog and start encoding */
+ schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+ vepu_write(vpu, reg, H1_REG_ENC_CTRL);
+}
diff --git a/drivers/staging/media/hantro/hantro_h1_regs.h b/drivers/staging/media/hantro/hantro_h1_regs.h
new file mode 100644
index 000000000000..d6e9825bb5c7
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_h1_regs.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_H1_REGS_H_
+#define HANTRO_H1_REGS_H_
+
+/* Encoder registers. */
+#define H1_REG_INTERRUPT 0x004
+#define H1_REG_INTERRUPT_FRAME_RDY BIT(2)
+#define H1_REG_INTERRUPT_DIS_BIT BIT(1)
+#define H1_REG_INTERRUPT_BIT BIT(0)
+#define H1_REG_AXI_CTRL 0x008
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP16 BIT(15)
+#define H1_REG_AXI_CTRL_INPUT_SWAP16 BIT(14)
+#define H1_REG_AXI_CTRL_BURST_LEN(x) ((x) << 8)
+#define H1_REG_AXI_CTRL_GATE_BIT BIT(4)
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP32 BIT(3)
+#define H1_REG_AXI_CTRL_INPUT_SWAP32 BIT(2)
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP8 BIT(1)
+#define H1_REG_AXI_CTRL_INPUT_SWAP8 BIT(0)
+#define H1_REG_ADDR_OUTPUT_STREAM 0x014
+#define H1_REG_ADDR_OUTPUT_CTRL 0x018
+#define H1_REG_ADDR_REF_LUMA 0x01c
+#define H1_REG_ADDR_REF_CHROMA 0x020
+#define H1_REG_ADDR_REC_LUMA 0x024
+#define H1_REG_ADDR_REC_CHROMA 0x028
+#define H1_REG_ADDR_IN_PLANE_0 0x02c
+#define H1_REG_ADDR_IN_PLANE_1 0x030
+#define H1_REG_ADDR_IN_PLANE_2 0x034
+#define H1_REG_ENC_CTRL 0x038
+#define H1_REG_ENC_CTRL_TIMEOUT_EN BIT(31)
+#define H1_REG_ENC_CTRL_NAL_MODE_BIT BIT(29)
+#define H1_REG_ENC_CTRL_WIDTH(w) ((w) << 19)
+#define H1_REG_ENC_CTRL_HEIGHT(h) ((h) << 10)
+#define H1_REG_ENC_PIC_INTER (0x0 << 3)
+#define H1_REG_ENC_PIC_INTRA (0x1 << 3)
+#define H1_REG_ENC_PIC_MVCINTER (0x2 << 3)
+#define H1_REG_ENC_CTRL_ENC_MODE_H264 (0x3 << 1)
+#define H1_REG_ENC_CTRL_ENC_MODE_JPEG (0x2 << 1)
+#define H1_REG_ENC_CTRL_ENC_MODE_VP8 (0x1 << 1)
+#define H1_REG_ENC_CTRL_EN_BIT BIT(0)
+#define H1_REG_IN_IMG_CTRL 0x03c
+#define H1_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12)
+#define H1_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10)
+#define H1_REG_IN_IMG_CTRL_OVRFLB_D4(x) ((x) << 6)
+#define H1_REG_IN_IMG_CTRL_FMT(x) ((x) << 2)
+#define H1_REG_ENC_CTRL0 0x040
+#define H1_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26)
+#define H1_REG_ENC_CTRL0_SLICE_ALPHA(x) ((x) << 22)
+#define H1_REG_ENC_CTRL0_SLICE_BETA(x) ((x) << 18)
+#define H1_REG_ENC_CTRL0_CHROMA_QP_OFFSET(x) ((x) << 13)
+#define H1_REG_ENC_CTRL0_FILTER_DIS(x) ((x) << 5)
+#define H1_REG_ENC_CTRL0_IDR_PICID(x) ((x) << 1)
+#define H1_REG_ENC_CTRL0_CONSTR_INTRA_PRED BIT(0)
+#define H1_REG_ENC_CTRL1 0x044
+#define H1_REG_ENC_CTRL1_PPS_ID(x) ((x) << 24)
+#define H1_REG_ENC_CTRL1_INTRA_PRED_MODE(x) ((x) << 16)
+#define H1_REG_ENC_CTRL1_FRAME_NUM(x) ((x))
+#define H1_REG_ENC_CTRL2 0x048
+#define H1_REG_ENC_CTRL2_DEBLOCKING_FILETER_MODE(x) ((x) << 30)
+#define H1_REG_ENC_CTRL2_H264_SLICE_SIZE(x) ((x) << 23)
+#define H1_REG_ENC_CTRL2_DISABLE_QUARTER_PIXMV BIT(22)
+#define H1_REG_ENC_CTRL2_TRANS8X8_MODE_EN BIT(21)
+#define H1_REG_ENC_CTRL2_CABAC_INIT_IDC(x) ((x) << 19)
+#define H1_REG_ENC_CTRL2_ENTROPY_CODING_MODE BIT(18)
+#define H1_REG_ENC_CTRL2_H264_INTER4X4_MODE BIT(17)
+#define H1_REG_ENC_CTRL2_H264_STREAM_MODE BIT(16)
+#define H1_REG_ENC_CTRL2_INTRA16X16_MODE(x) ((x))
+#define H1_REG_ENC_CTRL3 0x04c
+#define H1_REG_ENC_CTRL3_MUTIMV_EN BIT(30)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_1_4P(x) ((x) << 20)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_4P(x) ((x) << 10)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_1P(x) ((x))
+#define H1_REG_ENC_CTRL4 0x050
+#define H1_REG_ENC_CTRL4_MV_PENALTY_16X8_8X16(x) ((x) << 20)
+#define H1_REG_ENC_CTRL4_MV_PENALTY_8X8(x) ((x) << 10)
+#define H1_REG_ENC_CTRL4_8X4_4X8(x) ((x))
+#define H1_REG_ENC_CTRL5 0x054
+#define H1_REG_ENC_CTRL5_MACROBLOCK_PENALTY(x) ((x) << 24)
+#define H1_REG_ENC_CTRL5_COMPLETE_SLICES(x) ((x) << 16)
+#define H1_REG_ENC_CTRL5_INTER_MODE(x) ((x))
+#define H1_REG_STR_HDR_REM_MSB 0x058
+#define H1_REG_STR_HDR_REM_LSB 0x05c
+#define H1_REG_STR_BUF_LIMIT 0x060
+#define H1_REG_MAD_CTRL 0x064
+#define H1_REG_MAD_CTRL_QP_ADJUST(x) ((x) << 28)
+#define H1_REG_MAD_CTRL_MAD_THREDHOLD(x) ((x) << 22)
+#define H1_REG_MAD_CTRL_QP_SUM_DIV2(x) ((x))
+#define H1_REG_ADDR_VP8_PROB_CNT 0x068
+#define H1_REG_QP_VAL 0x06c
+#define H1_REG_QP_VAL_LUM(x) ((x) << 26)
+#define H1_REG_QP_VAL_MAX(x) ((x) << 20)
+#define H1_REG_QP_VAL_MIN(x) ((x) << 14)
+#define H1_REG_QP_VAL_CHECKPOINT_DISTAN(x) ((x))
+#define H1_REG_VP8_QP_VAL(i) (0x06c + ((i) * 0x4))
+#define H1_REG_CHECKPOINT(i) (0x070 + ((i) * 0x4))
+#define H1_REG_CHECKPOINT_CHECK0(x) (((x) & 0xffff))
+#define H1_REG_CHECKPOINT_CHECK1(x) (((x) & 0xffff) << 16)
+#define H1_REG_CHECKPOINT_RESULT(x) ((((x) >> (16 - 16 \
+ * (i & 1))) & 0xffff) \
+ * 32)
+#define H1_REG_CHKPT_WORD_ERR(i) (0x084 + ((i) * 0x4))
+#define H1_REG_CHKPT_WORD_ERR_CHK0(x) (((x) & 0xffff))
+#define H1_REG_CHKPT_WORD_ERR_CHK1(x) (((x) & 0xffff) << 16)
+#define H1_REG_VP8_BOOL_ENC 0x08c
+#define H1_REG_CHKPT_DELTA_QP 0x090
+#define H1_REG_CHKPT_DELTA_QP_CHK0(x) (((x) & 0x0f) << 0)
+#define H1_REG_CHKPT_DELTA_QP_CHK1(x) (((x) & 0x0f) << 4)
+#define H1_REG_CHKPT_DELTA_QP_CHK2(x) (((x) & 0x0f) << 8)
+#define H1_REG_CHKPT_DELTA_QP_CHK3(x) (((x) & 0x0f) << 12)
+#define H1_REG_CHKPT_DELTA_QP_CHK4(x) (((x) & 0x0f) << 16)
+#define H1_REG_CHKPT_DELTA_QP_CHK5(x) (((x) & 0x0f) << 20)
+#define H1_REG_CHKPT_DELTA_QP_CHK6(x) (((x) & 0x0f) << 24)
+#define H1_REG_VP8_CTRL0 0x090
+#define H1_REG_RLC_CTRL 0x094
+#define H1_REG_RLC_CTRL_STR_OFFS_SHIFT 23
+#define H1_REG_RLC_CTRL_STR_OFFS_MASK (0x3f << 23)
+#define H1_REG_RLC_CTRL_RLC_SUM(x) ((x))
+#define H1_REG_MB_CTRL 0x098
+#define H1_REG_MB_CNT_OUT(x) (((x) & 0xffff))
+#define H1_REG_MB_CNT_SET(x) (((x) & 0xffff) << 16)
+#define H1_REG_ADDR_NEXT_PIC 0x09c
+#define H1_REG_JPEG_LUMA_QUAT(i) (0x100 + ((i) * 0x4))
+#define H1_REG_JPEG_CHROMA_QUAT(i) (0x140 + ((i) * 0x4))
+#define H1_REG_STABILIZATION_OUTPUT 0x0A0
+#define H1_REG_ADDR_CABAC_TBL 0x0cc
+#define H1_REG_ADDR_MV_OUT 0x0d0
+#define H1_REG_RGB_YUV_COEFF(i) (0x0d4 + ((i) * 0x4))
+#define H1_REG_RGB_MASK_MSB 0x0dc
+#define H1_REG_INTRA_AREA_CTRL 0x0e0
+#define H1_REG_CIR_INTRA_CTRL 0x0e4
+#define H1_REG_INTRA_SLICE_BITMAP(i) (0x0e8 + ((i) * 0x4))
+#define H1_REG_ADDR_VP8_DCT_PART(i) (0x0e8 + ((i) * 0x4))
+#define H1_REG_FIRST_ROI_AREA 0x0f0
+#define H1_REG_SECOND_ROI_AREA 0x0f4
+#define H1_REG_MVC_CTRL 0x0f8
+#define H1_REG_MVC_CTRL_MV16X16_FAVOR(x) ((x) << 28)
+#define H1_REG_VP8_INTRA_PENALTY(i) (0x100 + ((i) * 0x4))
+#define H1_REG_ADDR_VP8_SEG_MAP 0x11c
+#define H1_REG_VP8_SEG_QP(i) (0x120 + ((i) * 0x4))
+#define H1_REG_DMV_4P_1P_PENALTY(i) (0x180 + ((i) * 0x4))
+#define H1_REG_DMV_4P_1P_PENALTY_BIT(x, i) ((x) << (i) * 8)
+#define H1_REG_DMV_QPEL_PENALTY(i) (0x200 + ((i) * 0x4))
+#define H1_REG_DMV_QPEL_PENALTY_BIT(x, i) ((x) << (i) * 8)
+#define H1_REG_VP8_CTRL1 0x280
+#define H1_REG_VP8_BIT_COST_GOLDEN 0x284
+#define H1_REG_VP8_LOOP_FLT_DELTA(i) (0x288 + ((i) * 0x4))
+
+#endif /* HANTRO_H1_REGS_H_ */
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
new file mode 100644
index 000000000000..3c361c2e9b88
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_HW_H_
+#define HANTRO_HW_H_
+
+#include <linux/interrupt.h>
+#include <linux/v4l2-controls.h>
+#include <media/mpeg2-ctrls.h>
+#include <media/videobuf2-core.h>
+
+struct hantro_dev;
+struct hantro_ctx;
+struct hantro_buf;
+struct hantro_variant;
+
+/**
+ * struct hantro_aux_buf - auxiliary DMA buffer for hardware data
+ * @cpu: CPU pointer to the buffer.
+ * @dma: DMA address of the buffer.
+ * @size: Size of the buffer.
+ */
+struct hantro_aux_buf {
+ void *cpu;
+ dma_addr_t dma;
+ size_t size;
+};
+
+/**
+ * struct hantro_jpeg_enc_hw_ctx
+ * @bounce_buffer: Bounce buffer
+ */
+struct hantro_jpeg_enc_hw_ctx {
+ struct hantro_aux_buf bounce_buffer;
+};
+
+/**
+ * struct hantro_mpeg2_dec_hw_ctx
+ * @qtable: Quantization table
+ */
+struct hantro_mpeg2_dec_hw_ctx {
+ struct hantro_aux_buf qtable;
+};
+
+/**
+ * struct hantro_codec_ops - codec mode specific operations
+ *
+ * @init: If needed, can be used for initialization.
+ * Optional and called from process context.
+ * @exit: If needed, can be used to undo the .init phase.
+ * Optional and called from process context.
+ * @run: Start single {en,de)coding job. Called from atomic context
+ * to indicate that a pair of buffers is ready and the hardware
+ * should be programmed and started.
+ * @done: Read back processing results and additional data from hardware.
+ * @reset: Reset the hardware in case of a timeout.
+ */
+struct hantro_codec_ops {
+ int (*init)(struct hantro_ctx *ctx);
+ void (*exit)(struct hantro_ctx *ctx);
+ void (*run)(struct hantro_ctx *ctx);
+ void (*done)(struct hantro_ctx *ctx, enum vb2_buffer_state);
+ void (*reset)(struct hantro_ctx *ctx);
+};
+
+/**
+ * enum hantro_enc_fmt - source format ID for hardware registers.
+ */
+enum hantro_enc_fmt {
+ RK3288_VPU_ENC_FMT_YUV420P = 0,
+ RK3288_VPU_ENC_FMT_YUV420SP = 1,
+ RK3288_VPU_ENC_FMT_YUYV422 = 2,
+ RK3288_VPU_ENC_FMT_UYVY422 = 3,
+};
+
+extern const struct hantro_variant rk3399_vpu_variant;
+extern const struct hantro_variant rk3328_vpu_variant;
+extern const struct hantro_variant rk3288_vpu_variant;
+
+void hantro_watchdog(struct work_struct *work);
+void hantro_run(struct hantro_ctx *ctx);
+void hantro_irq_done(struct hantro_dev *vpu, unsigned int bytesused,
+ enum vb2_buffer_state result);
+
+void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx);
+void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx);
+int hantro_jpeg_enc_init(struct hantro_ctx *ctx);
+void hantro_jpeg_enc_exit(struct hantro_ctx *ctx);
+
+void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
+void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx);
+void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
+ const struct v4l2_ctrl_mpeg2_quantization *ctrl);
+int hantro_mpeg2_dec_init(struct hantro_ctx *ctx);
+void hantro_mpeg2_dec_exit(struct hantro_ctx *ctx);
+
+#endif /* HANTRO_HW_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.c b/drivers/staging/media/hantro/hantro_jpeg.c
index 0ff0badc1f7a..125eb41f2ede 100644
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.c
+++ b/drivers/staging/media/hantro/hantro_jpeg.c
@@ -6,9 +6,11 @@
* Copyright (C) Jean-Francois Moine (http://moinejf.free.fr)
* Copyright (C) 2014 Philipp Zabel, Pengutronix
*/
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include "rockchip_vpu_jpeg.h"
+#include "hantro_jpeg.h"
+#include "hantro.h"
#define LUMA_QUANT_OFF 7
#define CHROMA_QUANT_OFF 72
@@ -116,7 +118,7 @@ static const unsigned char chroma_ac_table[] = {
* and we'll use fixed offsets to change the width, height
* quantization tables, etc.
*/
-static const unsigned char rockchip_vpu_jpeg_header[JPEG_HEADER_SIZE] = {
+static const unsigned char hantro_jpeg_header[JPEG_HEADER_SIZE] = {
/* SOI */
0xff, 0xd8,
@@ -260,19 +262,19 @@ static void jpeg_set_quality(unsigned char *buffer, int quality)
}
unsigned char *
-rockchip_vpu_jpeg_get_qtable(struct rockchip_vpu_jpeg_ctx *ctx, int index)
+hantro_jpeg_get_qtable(struct hantro_jpeg_ctx *ctx, int index)
{
if (index == 0)
return ctx->buffer + LUMA_QUANT_OFF;
return ctx->buffer + CHROMA_QUANT_OFF;
}
-void rockchip_vpu_jpeg_header_assemble(struct rockchip_vpu_jpeg_ctx *ctx)
+void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx)
{
char *buf = ctx->buffer;
- memcpy(buf, rockchip_vpu_jpeg_header,
- sizeof(rockchip_vpu_jpeg_header));
+ memcpy(buf, hantro_jpeg_header,
+ sizeof(hantro_jpeg_header));
buf[HEIGHT_OFF + 0] = ctx->height >> 8;
buf[HEIGHT_OFF + 1] = ctx->height;
@@ -288,3 +290,30 @@ void rockchip_vpu_jpeg_header_assemble(struct rockchip_vpu_jpeg_ctx *ctx)
jpeg_set_quality(buf, ctx->quality);
}
+
+int hantro_jpeg_enc_init(struct hantro_ctx *ctx)
+{
+ ctx->jpeg_enc.bounce_buffer.size =
+ ctx->dst_fmt.plane_fmt[0].sizeimage -
+ ctx->vpu_dst_fmt->header_size;
+
+ ctx->jpeg_enc.bounce_buffer.cpu =
+ dma_alloc_attrs(ctx->dev->dev,
+ ctx->jpeg_enc.bounce_buffer.size,
+ &ctx->jpeg_enc.bounce_buffer.dma,
+ GFP_KERNEL,
+ DMA_ATTR_ALLOC_SINGLE_PAGES);
+ if (!ctx->jpeg_enc.bounce_buffer.cpu)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void hantro_jpeg_enc_exit(struct hantro_ctx *ctx)
+{
+ dma_free_attrs(ctx->dev->dev,
+ ctx->jpeg_enc.bounce_buffer.size,
+ ctx->jpeg_enc.bounce_buffer.cpu,
+ ctx->jpeg_enc.bounce_buffer.dma,
+ DMA_ATTR_ALLOC_SINGLE_PAGES);
+}
diff --git a/drivers/staging/media/hantro/hantro_jpeg.h b/drivers/staging/media/hantro/hantro_jpeg.h
new file mode 100644
index 000000000000..9e8397c71388
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_jpeg.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#define JPEG_HEADER_SIZE 601
+
+struct hantro_jpeg_ctx {
+ int width;
+ int height;
+ int quality;
+ unsigned char *buffer;
+};
+
+unsigned char *hantro_jpeg_get_qtable(struct hantro_jpeg_ctx *ctx, int index);
+void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_mpeg2.c b/drivers/staging/media/hantro/hantro_mpeg2.c
new file mode 100644
index 000000000000..1d334e6fcd06
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_mpeg2.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include "hantro.h"
+
+static const u8 zigzag[64] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
+ const struct v4l2_ctrl_mpeg2_quantization *ctrl)
+{
+ int i, n;
+
+ if (!qtable || !ctrl)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(zigzag); i++) {
+ n = zigzag[i];
+ qtable[n + 0] = ctrl->intra_quantiser_matrix[i];
+ qtable[n + 64] = ctrl->non_intra_quantiser_matrix[i];
+ qtable[n + 128] = ctrl->chroma_intra_quantiser_matrix[i];
+ qtable[n + 192] = ctrl->chroma_non_intra_quantiser_matrix[i];
+ }
+}
+
+int hantro_mpeg2_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ ctx->mpeg2_dec.qtable.size = ARRAY_SIZE(zigzag) * 4;
+ ctx->mpeg2_dec.qtable.cpu =
+ dma_alloc_coherent(vpu->dev,
+ ctx->mpeg2_dec.qtable.size,
+ &ctx->mpeg2_dec.qtable.dma,
+ GFP_KERNEL);
+ if (!ctx->mpeg2_dec.qtable.cpu)
+ return -ENOMEM;
+ return 0;
+}
+
+void hantro_mpeg2_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ dma_free_coherent(vpu->dev,
+ ctx->mpeg2_dec.qtable.size,
+ ctx->mpeg2_dec.qtable.cpu,
+ ctx->mpeg2_dec.qtable.dma);
+}
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
new file mode 100644
index 000000000000..68f45ee66821
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -0,0 +1,686 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Collabora, Ltd.
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <Alpha.Lin@rock-chips.com>
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_v4l2.h"
+
+static const struct hantro_fmt *
+hantro_get_formats(const struct hantro_ctx *ctx, unsigned int *num_fmts)
+{
+ const struct hantro_fmt *formats;
+
+ if (hantro_is_encoder_ctx(ctx)) {
+ formats = ctx->dev->variant->enc_fmts;
+ *num_fmts = ctx->dev->variant->num_enc_fmts;
+ } else {
+ formats = ctx->dev->variant->dec_fmts;
+ *num_fmts = ctx->dev->variant->num_dec_fmts;
+ }
+
+ return formats;
+}
+
+static const struct hantro_fmt *
+hantro_find_format(const struct hantro_fmt *formats, unsigned int num_fmts,
+ u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_fmts; i++)
+ if (formats[i].fourcc == fourcc)
+ return &formats[i];
+ return NULL;
+}
+
+static const struct hantro_fmt *
+hantro_get_default_fmt(const struct hantro_fmt *formats, unsigned int num_fmts,
+ bool bitstream)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_fmts; i++) {
+ if (bitstream == (formats[i].codec_mode !=
+ HANTRO_MODE_NONE))
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct hantro_dev *vpu = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ strscpy(cap->driver, vpu->dev->driver->name, sizeof(cap->driver));
+ strscpy(cap->card, vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform: %s",
+ vpu->dev->driver->name);
+ return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *formats, *fmt;
+ unsigned int num_fmts;
+
+ if (fsize->index != 0) {
+ vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
+ fsize->index);
+ return -EINVAL;
+ }
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ fmt = hantro_find_format(formats, num_fmts, fsize->pixel_format);
+ if (!fmt) {
+ vpu_debug(0, "unsupported bitstream format (%08x)\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ /* This only makes sense for coded formats */
+ if (fmt->codec_mode == HANTRO_MODE_NONE)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = fmt->frmsize;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f, bool capture)
+
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *fmt, *formats;
+ unsigned int num_fmts, i, j = 0;
+ bool skip_mode_none;
+
+ /*
+ * When dealing with an encoder:
+ * - on the capture side we want to filter out all MODE_NONE formats.
+ * - on the output side we want to filter out all formats that are
+ * not MODE_NONE.
+ * When dealing with a decoder:
+ * - on the capture side we want to filter out all formats that are
+ * not MODE_NONE.
+ * - on the output side we want to filter out all MODE_NONE formats.
+ */
+ skip_mode_none = capture == hantro_is_encoder_ctx(ctx);
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++) {
+ bool mode_none = formats[i].codec_mode == HANTRO_MODE_NONE;
+
+ if (skip_mode_none == mode_none)
+ continue;
+ if (j == f->index) {
+ fmt = &formats[i];
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, priv, f, true);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, priv, f, false);
+}
+
+static int vidioc_g_fmt_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+
+ vpu_debug(4, "f->type = %d\n", f->type);
+
+ *pix_mp = ctx->src_fmt;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+
+ vpu_debug(4, "f->type = %d\n", f->type);
+
+ *pix_mp = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
+ bool capture)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ const struct hantro_fmt *formats, *fmt, *vpu_fmt;
+ unsigned int num_fmts;
+ bool coded;
+
+ coded = capture == hantro_is_encoder_ctx(ctx);
+
+ vpu_debug(4, "trying format %c%c%c%c\n",
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ fmt = hantro_find_format(formats, num_fmts, pix_mp->pixelformat);
+ if (!fmt) {
+ fmt = hantro_get_default_fmt(formats, num_fmts, coded);
+ f->fmt.pix_mp.pixelformat = fmt->fourcc;
+ }
+
+ if (coded) {
+ pix_mp->num_planes = 1;
+ vpu_fmt = fmt;
+ } else if (hantro_is_encoder_ctx(ctx)) {
+ vpu_fmt = ctx->vpu_dst_fmt;
+ } else {
+ vpu_fmt = ctx->vpu_src_fmt;
+ /*
+ * Width/height on the CAPTURE end of a decoder are ignored and
+ * replaced by the OUTPUT ones.
+ */
+ pix_mp->width = ctx->src_fmt.width;
+ pix_mp->height = ctx->src_fmt.height;
+ }
+
+ pix_mp->field = V4L2_FIELD_NONE;
+
+ v4l2_apply_frmsize_constraints(&pix_mp->width, &pix_mp->height,
+ &vpu_fmt->frmsize);
+
+ if (!coded) {
+ /* Fill remaining fields */
+ v4l2_fill_pixfmt_mp(pix_mp, fmt->fourcc, pix_mp->width,
+ pix_mp->height);
+ } else if (!pix_mp->plane_fmt[0].sizeimage) {
+ /*
+ * For coded formats the application can specify
+ * sizeimage. If the application passes a zero sizeimage,
+ * let's default to the maximum frame size.
+ */
+ pix_mp->plane_fmt[0].sizeimage = fmt->header_size +
+ pix_mp->width * pix_mp->height * fmt->max_depth;
+ }
+
+ return 0;
+}
+
+static int vidioc_try_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_try_fmt(file, priv, f, true);
+}
+
+static int vidioc_try_fmt_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_try_fmt(file, priv, f, false);
+}
+
+static void
+hantro_reset_fmt(struct v4l2_pix_format_mplane *fmt,
+ const struct hantro_fmt *vpu_fmt)
+{
+ memset(fmt, 0, sizeof(*fmt));
+
+ fmt->pixelformat = vpu_fmt->fourcc;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_JPEG,
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static void
+hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
+{
+ const struct hantro_fmt *vpu_fmt, *formats;
+ struct v4l2_pix_format_mplane *fmt;
+ unsigned int num_fmts;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ vpu_fmt = hantro_get_default_fmt(formats, num_fmts, true);
+
+ if (hantro_is_encoder_ctx(ctx)) {
+ ctx->vpu_dst_fmt = vpu_fmt;
+ fmt = &ctx->dst_fmt;
+ } else {
+ ctx->vpu_src_fmt = vpu_fmt;
+ fmt = &ctx->src_fmt;
+ }
+
+ hantro_reset_fmt(fmt, vpu_fmt);
+ fmt->num_planes = 1;
+ fmt->width = vpu_fmt->frmsize.min_width;
+ fmt->height = vpu_fmt->frmsize.min_height;
+ fmt->plane_fmt[0].sizeimage = vpu_fmt->header_size +
+ fmt->width * fmt->height * vpu_fmt->max_depth;
+}
+
+static void
+hantro_reset_raw_fmt(struct hantro_ctx *ctx)
+{
+ const struct hantro_fmt *raw_vpu_fmt, *formats;
+ struct v4l2_pix_format_mplane *raw_fmt, *encoded_fmt;
+ unsigned int num_fmts;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ raw_vpu_fmt = hantro_get_default_fmt(formats, num_fmts, false);
+
+ if (hantro_is_encoder_ctx(ctx)) {
+ ctx->vpu_src_fmt = raw_vpu_fmt;
+ raw_fmt = &ctx->src_fmt;
+ encoded_fmt = &ctx->dst_fmt;
+ } else {
+ ctx->vpu_dst_fmt = raw_vpu_fmt;
+ raw_fmt = &ctx->dst_fmt;
+ encoded_fmt = &ctx->src_fmt;
+ }
+
+ hantro_reset_fmt(raw_fmt, raw_vpu_fmt);
+ v4l2_fill_pixfmt_mp(raw_fmt, raw_vpu_fmt->fourcc,
+ encoded_fmt->width,
+ encoded_fmt->height);
+}
+
+void hantro_reset_fmts(struct hantro_ctx *ctx)
+{
+ hantro_reset_encoded_fmt(ctx);
+ hantro_reset_raw_fmt(ctx);
+}
+
+static void
+hantro_update_requires_request(struct hantro_ctx *ctx, u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_JPEG:
+ ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = false;
+ break;
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *formats;
+ unsigned int num_fmts;
+ struct vb2_queue *vq;
+ int ret;
+
+ /* Change not allowed if queue is busy. */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ if (!hantro_is_encoder_ctx(ctx)) {
+ struct vb2_queue *peer_vq;
+
+ /*
+ * Since format change on the OUTPUT queue will reset
+ * the CAPTURE queue, we can't allow doing so
+ * when the CAPTURE queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (vb2_is_busy(peer_vq))
+ return -EBUSY;
+ }
+
+ ret = vidioc_try_fmt_out_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ ctx->vpu_src_fmt = hantro_find_format(formats, num_fmts,
+ pix_mp->pixelformat);
+ ctx->src_fmt = *pix_mp;
+
+ /*
+ * Current raw format might have become invalid with newly
+ * selected codec, so reset it to default just to be safe and
+ * keep internal driver state sane. User is mandated to set
+ * the raw format again after we return, so we don't need
+ * anything smarter.
+ * Note that hantro_reset_raw_fmt() also propagates size
+ * changes to the raw format.
+ */
+ if (!hantro_is_encoder_ctx(ctx))
+ hantro_reset_raw_fmt(ctx);
+
+ /* Colorimetry information are always propagated. */
+ ctx->dst_fmt.colorspace = pix_mp->colorspace;
+ ctx->dst_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->dst_fmt.xfer_func = pix_mp->xfer_func;
+ ctx->dst_fmt.quantization = pix_mp->quantization;
+
+ hantro_update_requires_request(ctx, pix_mp->pixelformat);
+
+ vpu_debug(0, "OUTPUT codec mode: %d\n", ctx->vpu_src_fmt->codec_mode);
+ vpu_debug(0, "fmt - w: %d, h: %d\n",
+ pix_mp->width, pix_mp->height);
+ return 0;
+}
+
+static int vidioc_s_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *formats;
+ struct vb2_queue *vq;
+ unsigned int num_fmts;
+ int ret;
+
+ /* Change not allowed if queue is busy. */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ if (hantro_is_encoder_ctx(ctx)) {
+ struct vb2_queue *peer_vq;
+
+ /*
+ * Since format change on the CAPTURE queue will reset
+ * the OUTPUT queue, we can't allow doing so
+ * when the OUTPUT queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (vb2_is_busy(peer_vq) &&
+ (pix_mp->pixelformat != ctx->dst_fmt.pixelformat ||
+ pix_mp->height != ctx->dst_fmt.height ||
+ pix_mp->width != ctx->dst_fmt.width))
+ return -EBUSY;
+ }
+
+ ret = vidioc_try_fmt_cap_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ ctx->vpu_dst_fmt = hantro_find_format(formats, num_fmts,
+ pix_mp->pixelformat);
+ ctx->dst_fmt = *pix_mp;
+
+ /*
+ * Current raw format might have become invalid with newly
+ * selected codec, so reset it to default just to be safe and
+ * keep internal driver state sane. User is mandated to set
+ * the raw format again after we return, so we don't need
+ * anything smarter.
+ * Note that hantro_reset_raw_fmt() also propagates size
+ * changes to the raw format.
+ */
+ if (hantro_is_encoder_ctx(ctx))
+ hantro_reset_raw_fmt(ctx);
+
+ /* Colorimetry information are always propagated. */
+ ctx->src_fmt.colorspace = pix_mp->colorspace;
+ ctx->src_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->src_fmt.xfer_func = pix_mp->xfer_func;
+ ctx->src_fmt.quantization = pix_mp->quantization;
+
+ vpu_debug(0, "CAPTURE codec mode: %d\n", ctx->vpu_dst_fmt->codec_mode);
+ vpu_debug(0, "fmt - w: %d, h: %d\n",
+ pix_mp->width, pix_mp->height);
+
+ hantro_update_requires_request(ctx, pix_mp->pixelformat);
+
+ return 0;
+}
+
+const struct v4l2_ioctl_ops hantro_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_out_mplane,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_out_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_cap_mplane,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_cap_mplane,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+static int
+hantro_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *pixfmt;
+ int i;
+
+ switch (vq->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ pixfmt = &ctx->dst_fmt;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pixfmt = &ctx->src_fmt;
+ break;
+ default:
+ vpu_err("invalid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+
+ if (*num_planes) {
+ if (*num_planes != pixfmt->num_planes)
+ return -EINVAL;
+ for (i = 0; i < pixfmt->num_planes; ++i)
+ if (sizes[i] < pixfmt->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ return 0;
+ }
+
+ *num_planes = pixfmt->num_planes;
+ for (i = 0; i < pixfmt->num_planes; ++i)
+ sizes[i] = pixfmt->plane_fmt[i].sizeimage;
+ return 0;
+}
+
+static int
+hantro_buf_plane_check(struct vb2_buffer *vb, const struct hantro_fmt *vpu_fmt,
+ struct v4l2_pix_format_mplane *pixfmt)
+{
+ unsigned int sz;
+ int i;
+
+ for (i = 0; i < pixfmt->num_planes; ++i) {
+ sz = pixfmt->plane_fmt[i].sizeimage;
+ vpu_debug(4, "plane %d size: %ld, sizeimage: %u\n",
+ i, vb2_plane_size(vb, i), sz);
+ if (vb2_plane_size(vb, i) < sz) {
+ vpu_err("plane %d is too small for output\n", i);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int hantro_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vq);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ return hantro_buf_plane_check(vb, ctx->vpu_src_fmt,
+ &ctx->src_fmt);
+
+ return hantro_buf_plane_check(vb, ctx->vpu_dst_fmt, &ctx->dst_fmt);
+}
+
+static void hantro_buf_queue(struct vb2_buffer *vb)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static bool hantro_vq_is_coded(struct vb2_queue *q)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ return hantro_is_encoder_ctx(ctx) != V4L2_TYPE_IS_OUTPUT(q->type);
+}
+
+static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+ int ret = 0;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->sequence_out = 0;
+ else
+ ctx->sequence_cap = 0;
+
+ if (hantro_vq_is_coded(q)) {
+ enum hantro_codec_mode codec_mode;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ codec_mode = ctx->vpu_src_fmt->codec_mode;
+ else
+ codec_mode = ctx->vpu_dst_fmt->codec_mode;
+
+ vpu_debug(4, "Codec mode = %d\n", codec_mode);
+ ctx->codec_ops = &ctx->dev->variant->codec_ops[codec_mode];
+ if (ctx->codec_ops->init)
+ ret = ctx->codec_ops->init(ctx);
+ }
+
+ return ret;
+}
+
+static void
+hantro_return_bufs(struct vb2_queue *q,
+ struct vb2_v4l2_buffer *(*buf_remove)(struct v4l2_m2m_ctx *))
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ for (;;) {
+ struct vb2_v4l2_buffer *vbuf;
+
+ vbuf = buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ break;
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void hantro_stop_streaming(struct vb2_queue *q)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ if (hantro_vq_is_coded(q)) {
+ if (ctx->codec_ops && ctx->codec_ops->exit)
+ ctx->codec_ops->exit(ctx);
+ }
+
+ /*
+ * The mem2mem framework calls v4l2_m2m_cancel_job before
+ * .stop_streaming, so there isn't any job running and
+ * it is safe to return all the buffers.
+ */
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ hantro_return_bufs(q, v4l2_m2m_src_buf_remove);
+ else
+ hantro_return_bufs(q, v4l2_m2m_dst_buf_remove);
+}
+
+static void hantro_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_handler);
+}
+
+static int hantro_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+const struct vb2_ops hantro_queue_ops = {
+ .queue_setup = hantro_queue_setup,
+ .buf_prepare = hantro_buf_prepare,
+ .buf_queue = hantro_buf_queue,
+ .buf_out_validate = hantro_buf_out_validate,
+ .buf_request_complete = hantro_buf_request_complete,
+ .start_streaming = hantro_start_streaming,
+ .stop_streaming = hantro_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
diff --git a/drivers/staging/media/hantro/hantro_v4l2.h b/drivers/staging/media/hantro/hantro_v4l2.h
new file mode 100644
index 000000000000..18bc682c8556
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_v4l2.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <Alpha.Lin@rock-chips.com>
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef HANTRO_V4L2_H_
+#define HANTRO_V4L2_H_
+
+#include "hantro.h"
+
+extern const struct v4l2_ioctl_ops hantro_ioctl_ops;
+extern const struct vb2_ops hantro_queue_ops;
+
+void hantro_reset_fmts(struct hantro_ctx *ctx);
+
+#endif /* HANTRO_V4L2_H_ */
diff --git a/drivers/staging/media/hantro/rk3288_vpu_hw.c b/drivers/staging/media/hantro/rk3288_vpu_hw.c
new file mode 100644
index 000000000000..bcacc4f51093
--- /dev/null
+++ b/drivers/staging/media/hantro/rk3288_vpu_hw.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+
+#include "hantro.h"
+#include "hantro_jpeg.h"
+#include "hantro_g1_regs.h"
+#include "hantro_h1_regs.h"
+
+#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt rk3288_vpu_enc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .codec_mode = HANTRO_MODE_JPEG_ENC,
+ .max_depth = 2,
+ .header_size = JPEG_HEADER_SIZE,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = 8192,
+ .step_width = JPEG_MB_DIM,
+ .min_height = 32,
+ .max_height = 8192,
+ .step_height = JPEG_MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1920,
+ .step_width = MPEG2_MB_DIM,
+ .min_height = 48,
+ .max_height = 1088,
+ .step_height = MPEG2_MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t rk3288_vepu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status, bytesused;
+
+ status = vepu_read(vpu, H1_REG_INTERRUPT);
+ bytesused = vepu_read(vpu, H1_REG_STR_BUF_LIMIT) / 8;
+ state = (status & H1_REG_INTERRUPT_FRAME_RDY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, H1_REG_INTERRUPT);
+ vepu_write(vpu, 0, H1_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, bytesused, state);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rk3288_vdpu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G1_REG_INTERRUPT);
+ state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+
+ hantro_irq_done(vpu, 0, state);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3288_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3288_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static void rk3288_vpu_enc_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, H1_REG_INTERRUPT_DIS_BIT, H1_REG_INTERRUPT);
+ vepu_write(vpu, 0, H1_REG_ENC_CTRL);
+ vepu_write(vpu, 0, H1_REG_AXI_CTRL);
+}
+
+static void rk3288_vpu_dec_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_IRQ_DIS, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+ vdpu_write(vpu, 1, G1_REG_SOFT_RESET);
+}
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct hantro_codec_ops rk3288_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = hantro_h1_jpeg_enc_run,
+ .reset = rk3288_vpu_enc_reset,
+ .init = hantro_jpeg_enc_init,
+ .exit = hantro_jpeg_enc_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = rk3288_vpu_dec_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+};
+
+/*
+ * VPU variant.
+ */
+
+static const struct hantro_irq rk3288_irqs[] = {
+ { "vepu", rk3288_vepu_irq },
+ { "vdpu", rk3288_vdpu_irq },
+};
+
+static const char * const rk3288_clk_names[] = {
+ "aclk", "hclk"
+};
+
+const struct hantro_variant rk3288_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rk3288_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rk3288_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3288_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3288_vpu_dec_fmts),
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER,
+ .codec_ops = rk3288_vpu_codec_ops,
+ .irqs = rk3288_irqs,
+ .num_irqs = ARRAY_SIZE(rk3288_irqs),
+ .init = rk3288_vpu_hw_init,
+ .clk_names = rk3288_clk_names,
+ .num_clocks = ARRAY_SIZE(rk3288_clk_names)
+};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw.c b/drivers/staging/media/hantro/rk3399_vpu_hw.c
new file mode 100644
index 000000000000..5718f8063542
--- /dev/null
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+
+#include "hantro.h"
+#include "hantro_jpeg.h"
+#include "rk3399_vpu_regs.h"
+
+#define RK3399_ACLK_MAX_FREQ (400 * 1000 * 1000)
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt rk3399_vpu_enc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .codec_mode = HANTRO_MODE_JPEG_ENC,
+ .max_depth = 2,
+ .header_size = JPEG_HEADER_SIZE,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = 8192,
+ .step_width = JPEG_MB_DIM,
+ .min_height = 32,
+ .max_height = 8192,
+ .step_height = JPEG_MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 1920,
+ .step_width = MPEG2_MB_DIM,
+ .min_height = 48,
+ .max_height = 1088,
+ .step_height = MPEG2_MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t rk3399_vepu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status, bytesused;
+
+ status = vepu_read(vpu, VEPU_REG_INTERRUPT);
+ bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
+ state = (status & VEPU_REG_INTERRUPT_FRAME_READY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, bytesused, state);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rk3399_vdpu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, VDPU_REG_INTERRUPT);
+ state = (status & VDPU_REG_INTERRUPT_DEC_IRQ) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, VDPU_REG_INTERRUPT);
+ vdpu_write(vpu, 0, VDPU_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, 0, state);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3399_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3399_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static void rk3399_vpu_enc_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_ENCODE_START);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+}
+
+static void rk3399_vpu_dec_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, VDPU_REG_INTERRUPT_DEC_IRQ_DIS, VDPU_REG_INTERRUPT);
+ vdpu_write(vpu, 0, VDPU_REG_EN_FLAGS);
+ vdpu_write(vpu, 1, VDPU_REG_SOFT_RESET);
+}
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = rk3399_vpu_jpeg_enc_run,
+ .reset = rk3399_vpu_enc_reset,
+ .init = hantro_jpeg_enc_init,
+ .exit = hantro_jpeg_enc_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = rk3399_vpu_mpeg2_dec_run,
+ .reset = rk3399_vpu_dec_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+};
+
+/*
+ * VPU variant.
+ */
+
+static const struct hantro_irq rk3399_irqs[] = {
+ { "vepu", rk3399_vepu_irq },
+ { "vdpu", rk3399_vdpu_irq },
+};
+
+static const char * const rk3399_clk_names[] = {
+ "aclk", "hclk"
+};
+
+const struct hantro_variant rk3399_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rk3399_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rk3399_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3399_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rk3399_irqs,
+ .num_irqs = ARRAY_SIZE(rk3399_irqs),
+ .init = rk3399_vpu_hw_init,
+ .clk_names = rk3399_clk_names,
+ .num_clocks = ARRAY_SIZE(rk3399_clk_names)
+};
diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
index 3d438797692e..ae66354d2d93 100644
--- a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Rockchip VPU codec driver
+ * Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
*
@@ -25,16 +25,16 @@
#include <asm/unaligned.h>
#include <media/v4l2-mem2mem.h>
-#include "rockchip_vpu_jpeg.h"
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_common.h"
-#include "rockchip_vpu_hw.h"
+#include "hantro_jpeg.h"
+#include "hantro.h"
+#include "hantro_v4l2.h"
+#include "hantro_hw.h"
#include "rk3399_vpu_regs.h"
#define VEPU_JPEG_QUANT_TABLE_COUNT 16
-static void rk3399_vpu_set_src_img_ctrl(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx)
+static void rk3399_vpu_set_src_img_ctrl(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
{
struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
u32 reg;
@@ -60,8 +60,8 @@ static void rk3399_vpu_set_src_img_ctrl(struct rockchip_vpu_dev *vpu,
vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_CTRL1);
}
-static void rk3399_vpu_jpeg_enc_set_buffers(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx,
+static void rk3399_vpu_jpeg_enc_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
struct vb2_buffer *src_buf)
{
struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
@@ -69,9 +69,9 @@ static void rk3399_vpu_jpeg_enc_set_buffers(struct rockchip_vpu_dev *vpu,
WARN_ON(pix_fmt->num_planes > 3);
- vepu_write_relaxed(vpu, ctx->bounce_dma_addr,
+ vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.dma,
VEPU_REG_ADDR_OUTPUT_STREAM);
- vepu_write_relaxed(vpu, ctx->bounce_size,
+ vepu_write_relaxed(vpu, ctx->jpeg_enc.bounce_buffer.size,
VEPU_REG_STR_BUF_LIMIT);
if (pix_fmt->num_planes == 1) {
@@ -93,7 +93,7 @@ static void rk3399_vpu_jpeg_enc_set_buffers(struct rockchip_vpu_dev *vpu,
}
static void
-rk3399_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
+rk3399_vpu_jpeg_enc_set_qtable(struct hantro_dev *vpu,
unsigned char *luma_qtable,
unsigned char *chroma_qtable)
{
@@ -108,22 +108,26 @@ rk3399_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
}
}
-void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
+void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
{
- struct rockchip_vpu_dev *vpu = ctx->dev;
+ struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
- struct rockchip_vpu_jpeg_ctx jpeg_ctx;
+ struct hantro_jpeg_ctx jpeg_ctx;
+ struct media_request *src_req;
u32 reg;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ src_req = src_buf->vb2_buf.req_obj.req;
+ v4l2_ctrl_request_setup(src_req, &ctx->ctrl_handler);
+
memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
jpeg_ctx.width = ctx->dst_fmt.width;
jpeg_ctx.height = ctx->dst_fmt.height;
jpeg_ctx.quality = ctx->jpeg_quality;
- rockchip_vpu_jpeg_header_assemble(&jpeg_ctx);
+ hantro_jpeg_header_assemble(&jpeg_ctx);
/* Switch to JPEG encoder mode before writing registers */
vepu_write_relaxed(vpu, VEPU_REG_ENCODE_FORMAT_JPEG,
@@ -132,8 +136,8 @@ void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
rk3399_vpu_set_src_img_ctrl(vpu, ctx);
rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
rk3399_vpu_jpeg_enc_set_qtable(vpu,
- rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
- rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
+ hantro_jpeg_get_qtable(&jpeg_ctx, 0),
+ hantro_jpeg_get_qtable(&jpeg_ctx, 1));
reg = VEPU_REG_OUTPUT_SWAP32
| VEPU_REG_OUTPUT_SWAP16
@@ -153,6 +157,8 @@ void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
| VEPU_REG_ENCODE_FORMAT_JPEG
| VEPU_REG_ENCODE_ENABLE;
+ v4l2_ctrl_request_complete(src_req, &ctx->ctrl_handler);
+
/* Kick the watchdog and start encoding */
schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
new file mode 100644
index 000000000000..8685bddfbcab
--- /dev/null
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define VDPU_SWREG(nr) ((nr) * 4)
+
+#define VDPU_REG_DEC_OUT_BASE VDPU_SWREG(63)
+#define VDPU_REG_RLC_VLC_BASE VDPU_SWREG(64)
+#define VDPU_REG_QTABLE_BASE VDPU_SWREG(61)
+#define VDPU_REG_REFER0_BASE VDPU_SWREG(131)
+#define VDPU_REG_REFER2_BASE VDPU_SWREG(134)
+#define VDPU_REG_REFER3_BASE VDPU_SWREG(135)
+#define VDPU_REG_REFER1_BASE VDPU_SWREG(148)
+#define VDPU_REG_DEC_E(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(11) : 0)
+#define VDPU_REG_DEC_SCMD_DIS(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_FILTERING_DIS(v) ((v) ? BIT(8) : 0)
+#define VDPU_REG_DEC_LATENCY(v) (((v) << 1) & GENMASK(6, 1))
+
+#define VDPU_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
+#define VDPU_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
+
+#define VDPU_REG_APF_THRESHOLD(v) (((v) << 17) & GENMASK(30, 17))
+#define VDPU_REG_STARTMB_X(v) (((v) << 8) & GENMASK(16, 8))
+#define VDPU_REG_STARTMB_Y(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_DEC_MODE(v) (((v) << 0) & GENMASK(3, 0))
+
+#define VDPU_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(3) : 0)
+#define VDPU_REG_DEC_INSWAP32_E(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(22) : 0)
+#define VDPU_REG_DEC_MAX_BURST(v) (((v) << 16) & GENMASK(20, 16))
+#define VDPU_REG_DEC_AXI_WR_ID(v) (((v) << 8) & GENMASK(15, 8))
+#define VDPU_REG_DEC_AXI_RD_ID(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_RLC_MODE_E(v) ((v) ? BIT(20) : 0)
+#define VDPU_REG_PIC_INTERLACE_E(v) ((v) ? BIT(17) : 0)
+#define VDPU_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(16) : 0)
+#define VDPU_REG_PIC_B_E(v) ((v) ? BIT(15) : 0)
+#define VDPU_REG_PIC_INTER_E(v) ((v) ? BIT(14) : 0)
+#define VDPU_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(13) : 0)
+#define VDPU_REG_FWD_INTERLACE_E(v) ((v) ? BIT(12) : 0)
+#define VDPU_REG_WRITE_MVS_E(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(4) : 0)
+
+#define VDPU_REG_PIC_MB_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
+#define VDPU_REG_PIC_MB_HEIGHT_P(v) (((v) << 11) & GENMASK(18, 11))
+#define VDPU_REG_ALT_SCAN_E(v) ((v) ? BIT(6) : 0)
+#define VDPU_REG_TOPFIELDFIRST_E(v) ((v) ? BIT(5) : 0)
+
+#define VDPU_REG_STRM_START_BIT(v) (((v) << 26) & GENMASK(31, 26))
+#define VDPU_REG_QSCALE_TYPE(v) ((v) ? BIT(24) : 0)
+#define VDPU_REG_CON_MV_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_INTRA_DC_PREC(v) (((v) << 2) & GENMASK(3, 2))
+#define VDPU_REG_INTRA_VLC_TAB(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_FRAME_PRED_DCT(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_ALT_SCAN_FLAG_E(v) ((v) ? BIT(19) : 0)
+#define VDPU_REG_FCODE_FWD_HOR(v) (((v) << 15) & GENMASK(18, 15))
+#define VDPU_REG_FCODE_FWD_VER(v) (((v) << 11) & GENMASK(14, 11))
+#define VDPU_REG_FCODE_BWD_HOR(v) (((v) << 7) & GENMASK(10, 7))
+#define VDPU_REG_FCODE_BWD_VER(v) (((v) << 3) & GENMASK(6, 3))
+#define VDPU_REG_MV_ACCURACY_FWD(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_MV_ACCURACY_BWD(v) ((v) ? BIT(1) : 0)
+
+#define PICT_TOP_FIELD 1
+#define PICT_BOTTOM_FIELD 2
+#define PICT_FRAME 3
+
+static void
+rk3399_vpu_mpeg2_dec_set_quantization(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ struct v4l2_ctrl_mpeg2_quantization *quantization;
+
+ quantization = hantro_get_ctrl(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
+ hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, quantization);
+ vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma,
+ VDPU_REG_QTABLE_BASE);
+}
+
+static void
+rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf,
+ const struct v4l2_mpeg2_sequence *sequence,
+ const struct v4l2_mpeg2_picture *picture,
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params)
+{
+ dma_addr_t forward_addr = 0, backward_addr = 0;
+ dma_addr_t current_addr, addr;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
+
+ switch (picture->picture_coding_type) {
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
+ backward_addr = hantro_get_ref(vq,
+ slice_params->backward_ref_ts);
+ /* fall-through */
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
+ forward_addr = hantro_get_ref(vq,
+ slice_params->forward_ref_ts);
+ }
+
+ /* Source bitstream buffer */
+ addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ vdpu_write_relaxed(vpu, addr, VDPU_REG_RLC_VLC_BASE);
+
+ /* Destination frame buffer */
+ addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ current_addr = addr;
+
+ if (picture->picture_structure == PICT_BOTTOM_FIELD)
+ addr += ALIGN(ctx->dst_fmt.width, 16);
+ vdpu_write_relaxed(vpu, addr, VDPU_REG_DEC_OUT_BASE);
+
+ if (!forward_addr)
+ forward_addr = current_addr;
+ if (!backward_addr)
+ backward_addr = current_addr;
+
+ /* Set forward ref frame (top/bottom field) */
+ if (picture->picture_structure == PICT_FRAME ||
+ picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B ||
+ (picture->picture_structure == PICT_TOP_FIELD &&
+ picture->top_field_first) ||
+ (picture->picture_structure == PICT_BOTTOM_FIELD &&
+ !picture->top_field_first)) {
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
+ } else if (picture->picture_structure == PICT_TOP_FIELD) {
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER1_BASE);
+ } else if (picture->picture_structure == PICT_BOTTOM_FIELD) {
+ vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
+ }
+
+ /* Set backward ref frame (top/bottom field) */
+ vdpu_write_relaxed(vpu, backward_addr, VDPU_REG_REFER2_BASE);
+ vdpu_write_relaxed(vpu, backward_addr, VDPU_REG_REFER3_BASE);
+}
+
+void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
+ const struct v4l2_mpeg2_sequence *sequence;
+ const struct v4l2_mpeg2_picture *picture;
+ u32 reg;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Apply request controls if any */
+ v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ slice_params = hantro_get_ctrl(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
+ sequence = &slice_params->sequence;
+ picture = &slice_params->picture;
+
+ reg = VDPU_REG_DEC_ADV_PRE_DIS(0) |
+ VDPU_REG_DEC_SCMD_DIS(0) |
+ VDPU_REG_FILTERING_DIS(1) |
+ VDPU_REG_DEC_LATENCY(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(50));
+
+ reg = VDPU_REG_INIT_QP(1) |
+ VDPU_REG_STREAM_LEN(slice_params->bit_size >> 3);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(51));
+
+ reg = VDPU_REG_APF_THRESHOLD(8) |
+ VDPU_REG_STARTMB_X(0) |
+ VDPU_REG_STARTMB_Y(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(52));
+
+ reg = VDPU_REG_DEC_MODE(5);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(53));
+
+ reg = VDPU_REG_DEC_STRENDIAN_E(1) |
+ VDPU_REG_DEC_STRSWAP32_E(1) |
+ VDPU_REG_DEC_OUTSWAP32_E(1) |
+ VDPU_REG_DEC_INSWAP32_E(1) |
+ VDPU_REG_DEC_OUT_ENDIAN(1) |
+ VDPU_REG_DEC_IN_ENDIAN(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(54));
+
+ reg = VDPU_REG_DEC_DATA_DISC_E(0) |
+ VDPU_REG_DEC_MAX_BURST(16) |
+ VDPU_REG_DEC_AXI_WR_ID(0) |
+ VDPU_REG_DEC_AXI_RD_ID(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(56));
+
+ reg = VDPU_REG_RLC_MODE_E(0) |
+ VDPU_REG_PIC_INTERLACE_E(!sequence->progressive_sequence) |
+ VDPU_REG_PIC_FIELDMODE_E(picture->picture_structure != PICT_FRAME) |
+ VDPU_REG_PIC_B_E(picture->picture_coding_type == V4L2_MPEG2_PICTURE_CODING_TYPE_B) |
+ VDPU_REG_PIC_INTER_E(picture->picture_coding_type != V4L2_MPEG2_PICTURE_CODING_TYPE_I) |
+ VDPU_REG_PIC_TOPFIELD_E(picture->picture_structure == PICT_TOP_FIELD) |
+ VDPU_REG_FWD_INTERLACE_E(0) |
+ VDPU_REG_WRITE_MVS_E(0) |
+ VDPU_REG_DEC_TIMEOUT_E(1) |
+ VDPU_REG_DEC_CLK_GATE_E(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
+
+ reg = VDPU_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
+ VDPU_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ VDPU_REG_ALT_SCAN_E(picture->alternate_scan) |
+ VDPU_REG_TOPFIELDFIRST_E(picture->top_field_first);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(120));
+
+ reg = VDPU_REG_STRM_START_BIT(slice_params->data_bit_offset) |
+ VDPU_REG_QSCALE_TYPE(picture->q_scale_type) |
+ VDPU_REG_CON_MV_E(picture->concealment_motion_vectors) |
+ VDPU_REG_INTRA_DC_PREC(picture->intra_dc_precision) |
+ VDPU_REG_INTRA_VLC_TAB(picture->intra_vlc_format) |
+ VDPU_REG_FRAME_PRED_DCT(picture->frame_pred_frame_dct);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(122));
+
+ reg = VDPU_REG_ALT_SCAN_FLAG_E(picture->alternate_scan) |
+ VDPU_REG_FCODE_FWD_HOR(picture->f_code[0][0]) |
+ VDPU_REG_FCODE_FWD_VER(picture->f_code[0][1]) |
+ VDPU_REG_FCODE_BWD_HOR(picture->f_code[1][0]) |
+ VDPU_REG_FCODE_BWD_VER(picture->f_code[1][1]) |
+ VDPU_REG_MV_ACCURACY_FWD(1) |
+ VDPU_REG_MV_ACCURACY_BWD(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(136));
+
+ rk3399_vpu_mpeg2_dec_set_quantization(vpu, ctx);
+
+ rk3399_vpu_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf,
+ sequence, picture, slice_params);
+
+ /* Controls no longer in-use, we can complete them */
+ v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ /* Kick the watchdog and start decoding */
+ schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+
+ reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
+ vdpu_write(vpu, reg, VDPU_SWREG(57));
+}
diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_regs.h b/drivers/staging/media/hantro/rk3399_vpu_regs.h
index fbe294177ec9..88d096920f30 100644
--- a/drivers/staging/media/rockchip/vpu/rk3399_vpu_regs.h
+++ b/drivers/staging/media/hantro/rk3399_vpu_regs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Rockchip VPU codec driver
+ * Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
* Alpha Lin <alpha.lin@rock-chips.com>
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
index d2d909a36239..aa6c4b4ad37e 100644
--- a/drivers/staging/media/imx/Makefile
+++ b/drivers/staging/media/imx/Makefile
@@ -1,16 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
-imx-media-objs := imx-media-dev.o imx-media-internal-sd.o imx-media-of.o
-imx-media-objs += imx-media-dev-common.o
-imx-media-common-objs := imx-media-utils.o imx-media-fim.o
-imx-media-ic-objs := imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o
+imx6-media-objs := imx-media-dev.o imx-media-internal-sd.o \
+ imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o imx-media-vdic.o
-obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media.o
+imx-media-common-objs := imx-media-capture.o imx-media-dev-common.o \
+ imx-media-of.o imx-media-utils.o
+
+imx6-media-csi-objs := imx-media-csi.o imx-media-fim.o
+
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx6-media.o
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-common.o
-obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-capture.o
-obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-vdic.o
-obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-ic.o
-obj-$(CONFIG_VIDEO_IMX_CSI) += imx-media-csi.o
+obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-media-csi.o
obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-mipi-csi2.o
obj-$(CONFIG_VIDEO_IMX7_CSI) += imx7-media-csi.o
diff --git a/drivers/staging/media/imx/imx-ic-common.c b/drivers/staging/media/imx/imx-ic-common.c
index 18cd4cb92431..6df1ffb53895 100644
--- a/drivers/staging/media/imx/imx-ic-common.c
+++ b/drivers/staging/media/imx/imx-ic-common.c
@@ -4,8 +4,6 @@
*
* Copyright (c) 2014-2016 Mentor Graphics Inc.
*/
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include "imx-media.h"
@@ -20,23 +18,23 @@ static struct imx_ic_ops *ic_ops[IC_NUM_OPS] = {
[IC_TASK_VIEWFINDER] = &imx_ic_prpencvf_ops,
};
-static int imx_ic_probe(struct platform_device *pdev)
+struct v4l2_subdev *imx_media_ic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id)
{
- struct imx_media_ipu_internal_sd_pdata *pdata;
struct imx_ic_priv *priv;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- platform_set_drvdata(pdev, &priv->sd);
- priv->dev = &pdev->dev;
+ priv->ipu_dev = ipu_dev;
+ priv->ipu = ipu;
- /* get our ipu_id, grp_id and IC task id */
- pdata = priv->dev->platform_data;
- priv->ipu_id = pdata->ipu_id;
- switch (pdata->grp_id) {
+ /* get our IC task id */
+ switch (grp_id) {
case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
priv->task_id = IC_TASK_PRP;
break;
@@ -47,7 +45,7 @@ static int imx_ic_probe(struct platform_device *pdev)
priv->task_id = IC_TASK_VIEWFINDER;
break;
default:
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
v4l2_subdev_init(&priv->sd, ic_ops[priv->task_id]->subdev_ops);
@@ -55,55 +53,35 @@ static int imx_ic_probe(struct platform_device *pdev)
priv->sd.internal_ops = ic_ops[priv->task_id]->internal_ops;
priv->sd.entity.ops = ic_ops[priv->task_id]->entity_ops;
priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
- priv->sd.dev = &pdev->dev;
- priv->sd.owner = THIS_MODULE;
+ priv->sd.owner = ipu_dev->driver->owner;
priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
- priv->sd.grp_id = pdata->grp_id;
- strscpy(priv->sd.name, pdata->sd_name, sizeof(priv->sd.name));
+ priv->sd.grp_id = grp_id;
+ imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
+ priv->sd.grp_id, ipu_get_num(ipu));
ret = ic_ops[priv->task_id]->init(priv);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- ret = v4l2_async_register_subdev(&priv->sd);
- if (ret)
+ ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
+ if (ret) {
ic_ops[priv->task_id]->remove(priv);
+ return ERR_PTR(ret);
+ }
- return ret;
+ return &priv->sd;
}
-static int imx_ic_remove(struct platform_device *pdev)
+int imx_media_ic_unregister(struct v4l2_subdev *sd)
{
- struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct imx_ic_priv *priv = container_of(sd, struct imx_ic_priv, sd);
v4l2_info(sd, "Removing\n");
ic_ops[priv->task_id]->remove(priv);
- v4l2_async_unregister_subdev(sd);
+ v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
return 0;
}
-
-static const struct platform_device_id imx_ic_ids[] = {
- { .name = "imx-ipuv3-ic" },
- { },
-};
-MODULE_DEVICE_TABLE(platform, imx_ic_ids);
-
-static struct platform_driver imx_ic_driver = {
- .probe = imx_ic_probe,
- .remove = imx_ic_remove,
- .id_table = imx_ic_ids,
- .driver = {
- .name = "imx-ipuv3-ic",
- },
-};
-module_platform_driver(imx_ic_driver);
-
-MODULE_DESCRIPTION("i.MX IC subdev driver");
-MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-ipuv3-ic");
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index 10ffe00f1a54..5b4af3cfe670 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -35,16 +35,12 @@
#define S_ALIGN 1 /* multiple of 2 */
struct prp_priv {
- struct imx_media_dev *md;
struct imx_ic_priv *ic_priv;
struct media_pad pad[PRP_NUM_PADS];
/* lock to protect all members below */
struct mutex lock;
- /* IPU units we require */
- struct ipu_soc *ipu;
-
struct v4l2_subdev *src_sd;
struct v4l2_subdev *sink_sd_prpenc;
struct v4l2_subdev *sink_sd_prpvf;
@@ -62,7 +58,7 @@ static inline struct prp_priv *sd_to_priv(struct v4l2_subdev *sd)
{
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
- return ic_priv->prp_priv;
+ return ic_priv->task_priv;
}
static int prp_start(struct prp_priv *priv)
@@ -70,12 +66,10 @@ static int prp_start(struct prp_priv *priv)
struct imx_ic_priv *ic_priv = priv->ic_priv;
bool src_is_vdic;
- priv->ipu = priv->md->ipu[ic_priv->ipu_id];
-
/* set IC to receive from CSI or VDI depending on source */
src_is_vdic = !!(priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC);
- ipu_set_ic_src_mux(priv->ipu, priv->csi_id, src_is_vdic);
+ ipu_set_ic_src_mux(ic_priv->ipu, priv->csi_id, src_is_vdic);
return 0;
}
@@ -216,12 +210,12 @@ static int prp_link_setup(struct media_entity *entity,
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
- struct prp_priv *priv = ic_priv->prp_priv;
+ struct prp_priv *priv = ic_priv->task_priv;
struct v4l2_subdev *remote_sd;
int ret = 0;
- dev_dbg(ic_priv->dev, "link setup %s -> %s", remote->entity->name,
- local->entity->name);
+ dev_dbg(ic_priv->ipu_dev, "%s: link setup %s -> %s",
+ ic_priv->sd.name, remote->entity->name, local->entity->name);
remote_sd = media_entity_to_v4l2_subdev(remote->entity);
@@ -295,7 +289,7 @@ static int prp_link_validate(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sink_fmt)
{
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
- struct prp_priv *priv = ic_priv->prp_priv;
+ struct prp_priv *priv = ic_priv->task_priv;
struct v4l2_subdev *csi;
int ret;
@@ -304,8 +298,8 @@ static int prp_link_validate(struct v4l2_subdev *sd,
if (ret)
return ret;
- csi = imx_media_find_upstream_subdev(priv->md, &ic_priv->sd.entity,
- IMX_MEDIA_GRP_ID_IPU_CSI);
+ csi = imx_media_pipeline_subdev(&ic_priv->sd.entity,
+ IMX_MEDIA_GRP_ID_IPU_CSI, true);
if (IS_ERR(csi))
csi = NULL;
@@ -351,7 +345,7 @@ out:
static int prp_s_stream(struct v4l2_subdev *sd, int enable)
{
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
- struct prp_priv *priv = ic_priv->prp_priv;
+ struct prp_priv *priv = ic_priv->task_priv;
int ret = 0;
mutex_lock(&priv->lock);
@@ -368,7 +362,8 @@ static int prp_s_stream(struct v4l2_subdev *sd, int enable)
if (priv->stream_count != !enable)
goto update_count;
- dev_dbg(ic_priv->dev, "stream %s\n", enable ? "ON" : "OFF");
+ dev_dbg(ic_priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
if (enable)
ret = prp_start(priv);
@@ -440,9 +435,6 @@ static int prp_registered(struct v4l2_subdev *sd)
int i, ret;
u32 code;
- /* get media device */
- priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
-
for (i = 0; i < PRP_NUM_PADS; i++) {
priv->pad[i].flags = (i == PRP_SINK_PAD) ?
MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
@@ -494,12 +486,12 @@ static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
- priv = devm_kzalloc(ic_priv->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_init(&priv->lock);
- ic_priv->prp_priv = priv;
+ ic_priv->task_priv = priv;
priv->ic_priv = ic_priv;
return 0;
@@ -507,7 +499,7 @@ static int prp_init(struct imx_ic_priv *ic_priv)
static void prp_remove(struct imx_ic_priv *ic_priv)
{
- struct prp_priv *priv = ic_priv->prp_priv;
+ struct prp_priv *priv = ic_priv->task_priv;
mutex_destroy(&priv->lock);
}
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 64037b0a8387..82bba68c554e 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -50,7 +50,6 @@
#define S_ALIGN 1 /* multiple of 2 */
struct prp_priv {
- struct imx_media_dev *md;
struct imx_ic_priv *ic_priv;
struct media_pad pad[PRPENCVF_NUM_PADS];
/* the video device at output pad */
@@ -60,7 +59,6 @@ struct prp_priv {
struct mutex lock;
/* IPU units we require */
- struct ipu_soc *ipu;
struct ipu_ic *ic;
struct ipuv3_channel *out_ch;
struct ipuv3_channel *rot_in_ch;
@@ -156,9 +154,7 @@ static int prp_get_ipu_resources(struct prp_priv *priv)
struct ipuv3_channel *out_ch, *rot_in_ch, *rot_out_ch;
int ret, task = ic_priv->task_id;
- priv->ipu = priv->md->ipu[ic_priv->ipu_id];
-
- ic = ipu_ic_get(priv->ipu, task);
+ ic = ipu_ic_get(ic_priv->ipu, task);
if (IS_ERR(ic)) {
v4l2_err(&ic_priv->sd, "failed to get IC\n");
ret = PTR_ERR(ic);
@@ -166,7 +162,7 @@ static int prp_get_ipu_resources(struct prp_priv *priv)
}
priv->ic = ic;
- out_ch = ipu_idmac_get(priv->ipu, prp_channel[task].out_ch);
+ out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].out_ch);
if (IS_ERR(out_ch)) {
v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
prp_channel[task].out_ch);
@@ -175,7 +171,7 @@ static int prp_get_ipu_resources(struct prp_priv *priv)
}
priv->out_ch = out_ch;
- rot_in_ch = ipu_idmac_get(priv->ipu, prp_channel[task].rot_in_ch);
+ rot_in_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_in_ch);
if (IS_ERR(rot_in_ch)) {
v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
prp_channel[task].rot_in_ch);
@@ -184,7 +180,7 @@ static int prp_get_ipu_resources(struct prp_priv *priv)
}
priv->rot_in_ch = rot_in_ch;
- rot_out_ch = ipu_idmac_get(priv->ipu, prp_channel[task].rot_out_ch);
+ rot_out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_out_ch);
if (IS_ERR(rot_out_ch)) {
v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
prp_channel[task].rot_out_ch);
@@ -464,13 +460,13 @@ static int prp_setup_rotation(struct prp_priv *priv)
incc = priv->cc[PRPENCVF_SINK_PAD];
outcc = vdev->cc;
- ret = imx_media_alloc_dma_buf(priv->md, &priv->rot_buf[0],
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0],
outfmt->sizeimage);
if (ret) {
v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[0], %d\n", ret);
return ret;
}
- ret = imx_media_alloc_dma_buf(priv->md, &priv->rot_buf[1],
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1],
outfmt->sizeimage);
if (ret) {
v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[1], %d\n", ret);
@@ -543,14 +539,16 @@ static int prp_setup_rotation(struct prp_priv *priv)
unsetup_vb2:
prp_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
free_rot1:
- imx_media_free_dma_buf(priv->md, &priv->rot_buf[1]);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1]);
free_rot0:
- imx_media_free_dma_buf(priv->md, &priv->rot_buf[0]);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0]);
return ret;
}
static void prp_unsetup_rotation(struct prp_priv *priv)
{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
ipu_ic_task_disable(priv->ic);
ipu_idmac_disable_channel(priv->out_ch);
@@ -561,8 +559,8 @@ static void prp_unsetup_rotation(struct prp_priv *priv)
ipu_ic_disable(priv->ic);
- imx_media_free_dma_buf(priv->md, &priv->rot_buf[0]);
- imx_media_free_dma_buf(priv->md, &priv->rot_buf[1]);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0]);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1]);
}
static int prp_setup_norotation(struct prp_priv *priv)
@@ -602,7 +600,7 @@ static int prp_setup_norotation(struct prp_priv *priv)
ipu_cpmem_dump(priv->out_ch);
ipu_ic_dump(priv->ic);
- ipu_dump(priv->ipu);
+ ipu_dump(ic_priv->ipu);
ipu_ic_enable(priv->ic);
@@ -654,7 +652,7 @@ static int prp_start(struct prp_priv *priv)
outfmt = &vdev->fmt.fmt.pix;
- ret = imx_media_alloc_dma_buf(priv->md, &priv->underrun_buf,
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf,
outfmt->sizeimage);
if (ret)
goto out_put_ipu;
@@ -674,10 +672,10 @@ static int prp_start(struct prp_priv *priv)
if (ret)
goto out_free_underrun;
- priv->nfb4eof_irq = ipu_idmac_channel_irq(priv->ipu,
+ priv->nfb4eof_irq = ipu_idmac_channel_irq(ic_priv->ipu,
priv->out_ch,
IPU_IRQ_NFB4EOF);
- ret = devm_request_irq(ic_priv->dev, priv->nfb4eof_irq,
+ ret = devm_request_irq(ic_priv->ipu_dev, priv->nfb4eof_irq,
prp_nfb4eof_interrupt, 0,
"imx-ic-prp-nfb4eof", priv);
if (ret) {
@@ -688,12 +686,12 @@ static int prp_start(struct prp_priv *priv)
if (ipu_rot_mode_is_irt(priv->rot_mode))
priv->eof_irq = ipu_idmac_channel_irq(
- priv->ipu, priv->rot_out_ch, IPU_IRQ_EOF);
+ ic_priv->ipu, priv->rot_out_ch, IPU_IRQ_EOF);
else
priv->eof_irq = ipu_idmac_channel_irq(
- priv->ipu, priv->out_ch, IPU_IRQ_EOF);
+ ic_priv->ipu, priv->out_ch, IPU_IRQ_EOF);
- ret = devm_request_irq(ic_priv->dev, priv->eof_irq,
+ ret = devm_request_irq(ic_priv->ipu_dev, priv->eof_irq,
prp_eof_interrupt, 0,
"imx-ic-prp-eof", priv);
if (ret) {
@@ -718,13 +716,13 @@ static int prp_start(struct prp_priv *priv)
return 0;
out_free_eof_irq:
- devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
+ devm_free_irq(ic_priv->ipu_dev, priv->eof_irq, priv);
out_free_nfb4eof_irq:
- devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
+ devm_free_irq(ic_priv->ipu_dev, priv->nfb4eof_irq, priv);
out_unsetup:
prp_unsetup(priv, VB2_BUF_STATE_QUEUED);
out_free_underrun:
- imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf);
out_put_ipu:
prp_put_ipu_resources(priv);
return ret;
@@ -756,12 +754,12 @@ static void prp_stop(struct prp_priv *priv)
v4l2_warn(&ic_priv->sd,
"upstream stream off failed: %d\n", ret);
- devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
- devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
+ devm_free_irq(ic_priv->ipu_dev, priv->eof_irq, priv);
+ devm_free_irq(ic_priv->ipu_dev, priv->nfb4eof_irq, priv);
prp_unsetup(priv, VB2_BUF_STATE_ERROR);
- imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf);
/* cancel the EOF timeout timer */
del_timer_sync(&priv->eof_timeout_timer);
@@ -904,11 +902,8 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
- struct imx_media_video_dev *vdev = priv->vdev;
const struct imx_media_pixfmt *cc;
- struct v4l2_pix_format vdev_fmt;
struct v4l2_mbus_framefmt *fmt;
- struct v4l2_rect vdev_compose;
int ret = 0;
if (sdformat->pad >= PRPENCVF_NUM_PADS)
@@ -944,19 +939,9 @@ static int prp_set_fmt(struct v4l2_subdev *sd,
priv->cc[PRPENCVF_SRC_PAD] = outcc;
}
- if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY)
- goto out;
-
- priv->cc[sdformat->pad] = cc;
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[sdformat->pad] = cc;
- /* propagate output pad format to capture device */
- imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt, &vdev_compose,
- &priv->format_mbus[PRPENCVF_SRC_PAD],
- priv->cc[PRPENCVF_SRC_PAD]);
- mutex_unlock(&priv->lock);
- imx_media_capture_device_set_format(vdev, &vdev_fmt, &vdev_compose);
-
- return 0;
out:
mutex_unlock(&priv->lock);
return ret;
@@ -1011,8 +996,8 @@ static int prp_link_setup(struct media_entity *entity,
struct v4l2_subdev *remote_sd;
int ret = 0;
- dev_dbg(ic_priv->dev, "link setup %s -> %s", remote->entity->name,
- local->entity->name);
+ dev_dbg(ic_priv->ipu_dev, "%s: link setup %s -> %s",
+ ic_priv->sd.name, remote->entity->name, local->entity->name);
mutex_lock(&priv->lock);
@@ -1178,7 +1163,8 @@ static int prp_s_stream(struct v4l2_subdev *sd, int enable)
if (priv->stream_count != !enable)
goto update_count;
- dev_dbg(ic_priv->dev, "stream %s\n", enable ? "ON" : "OFF");
+ dev_dbg(ic_priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
if (enable)
ret = prp_start(priv);
@@ -1241,9 +1227,6 @@ static int prp_registered(struct v4l2_subdev *sd)
int i, ret;
u32 code;
- /* get media device */
- priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
-
for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
@@ -1266,14 +1249,10 @@ static int prp_registered(struct v4l2_subdev *sd)
if (ret)
return ret;
- ret = imx_media_capture_device_register(priv->md, priv->vdev);
+ ret = imx_media_capture_device_register(priv->vdev);
if (ret)
return ret;
- ret = imx_media_add_video_device(priv->md, priv->vdev);
- if (ret)
- goto unreg;
-
ret = prp_init_controls(priv);
if (ret)
goto unreg;
@@ -1325,7 +1304,7 @@ static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
- priv = devm_kzalloc(ic_priv->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -1335,7 +1314,8 @@ static int prp_init(struct imx_ic_priv *ic_priv)
spin_lock_init(&priv->irqlock);
timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
- priv->vdev = imx_media_capture_device_init(&ic_priv->sd,
+ priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
+ &ic_priv->sd,
PRPENCVF_SRC_PAD);
if (IS_ERR(priv->vdev))
return PTR_ERR(priv->vdev);
diff --git a/drivers/staging/media/imx/imx-ic.h b/drivers/staging/media/imx/imx-ic.h
index 0dbcf2a7ab5f..587c191c3eab 100644
--- a/drivers/staging/media/imx/imx-ic.h
+++ b/drivers/staging/media/imx/imx-ic.h
@@ -10,11 +10,10 @@
#include <media/v4l2-subdev.h>
struct imx_ic_priv {
- struct device *dev;
+ struct device *ipu_dev;
+ struct ipu_soc *ipu;
struct v4l2_subdev sd;
- int ipu_id;
int task_id;
- void *prp_priv;
void *task_priv;
};
@@ -29,6 +28,5 @@ struct imx_ic_ops {
extern struct imx_ic_ops imx_ic_prp_ops;
extern struct imx_ic_ops imx_ic_prpencvf_ops;
-extern struct imx_ic_ops imx_ic_pp_ops;
#endif
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index 9430c835c434..b33a07bc9105 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -202,6 +202,7 @@ static int capture_g_fmt_vid_cap(struct file *file, void *fh,
static int __capture_try_fmt_vid_cap(struct capture_priv *priv,
struct v4l2_subdev_format *fmt_src,
struct v4l2_format *f,
+ const struct imx_media_pixfmt **retcc,
struct v4l2_rect *compose)
{
const struct imx_media_pixfmt *cc, *cc_src;
@@ -242,8 +243,17 @@ static int __capture_try_fmt_vid_cap(struct capture_priv *priv,
}
}
- imx_media_mbus_fmt_to_pix_fmt(&f->fmt.pix, compose,
- &fmt_src->format, cc);
+ imx_media_mbus_fmt_to_pix_fmt(&f->fmt.pix, &fmt_src->format, cc);
+
+ if (retcc)
+ *retcc = cc;
+
+ if (compose) {
+ compose->left = 0;
+ compose->top = 0;
+ compose->width = fmt_src->format.width;
+ compose->height = fmt_src->format.height;
+ }
return 0;
}
@@ -261,7 +271,7 @@ static int capture_try_fmt_vid_cap(struct file *file, void *fh,
if (ret)
return ret;
- return __capture_try_fmt_vid_cap(priv, &fmt_src, f, NULL);
+ return __capture_try_fmt_vid_cap(priv, &fmt_src, f, NULL, NULL);
}
static int capture_s_fmt_vid_cap(struct file *file, void *fh,
@@ -269,7 +279,6 @@ static int capture_s_fmt_vid_cap(struct file *file, void *fh,
{
struct capture_priv *priv = video_drvdata(file);
struct v4l2_subdev_format fmt_src;
- struct v4l2_rect compose;
int ret;
if (vb2_is_busy(&priv->q)) {
@@ -283,14 +292,12 @@ static int capture_s_fmt_vid_cap(struct file *file, void *fh,
if (ret)
return ret;
- ret = __capture_try_fmt_vid_cap(priv, &fmt_src, f, &compose);
+ ret = __capture_try_fmt_vid_cap(priv, &fmt_src, f, &priv->vdev.cc,
+ &priv->vdev.compose);
if (ret)
return ret;
priv->vdev.fmt.fmt.pix = f->fmt.pix;
- priv->vdev.cc = imx_media_find_format(f->fmt.pix.pixelformat,
- CS_SEL_ANY, true);
- priv->vdev.compose = compose;
return 0;
}
@@ -520,6 +527,33 @@ static void capture_buf_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&priv->q_lock, flags);
}
+static int capture_validate_fmt(struct capture_priv *priv)
+{
+ struct v4l2_subdev_format fmt_src;
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_rect compose;
+ struct v4l2_format f;
+ int ret;
+
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ v4l2_fill_pix_format(&f.fmt.pix, &fmt_src.format);
+
+ ret = __capture_try_fmt_vid_cap(priv, &fmt_src, &f, &cc, &compose);
+ if (ret)
+ return ret;
+
+ return (priv->vdev.fmt.fmt.pix.width != f.fmt.pix.width ||
+ priv->vdev.fmt.fmt.pix.height != f.fmt.pix.height ||
+ priv->vdev.cc->cs != cc->cs ||
+ priv->vdev.compose.width != compose.width ||
+ priv->vdev.compose.height != compose.height) ? -EINVAL : 0;
+}
+
static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct capture_priv *priv = vb2_get_drv_priv(vq);
@@ -527,6 +561,12 @@ static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
unsigned long flags;
int ret;
+ ret = capture_validate_fmt(priv);
+ if (ret) {
+ v4l2_err(priv->src_sd, "capture format not valid\n");
+ goto return_bufs;
+ }
+
ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
true);
if (ret) {
@@ -614,7 +654,6 @@ static int capture_release(struct file *file)
struct capture_priv *priv = video_drvdata(file);
struct video_device *vfd = priv->vdev.vfd;
struct vb2_queue *vq = &priv->q;
- int ret = 0;
mutex_lock(&priv->mutex);
@@ -627,7 +666,7 @@ static int capture_release(struct file *file)
v4l2_fh_release(file);
mutex_unlock(&priv->mutex);
- return ret;
+ return 0;
}
static const struct v4l2_file_operations capture_fops = {
@@ -649,21 +688,6 @@ static struct video_device capture_videodev = {
.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING,
};
-void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
- const struct v4l2_pix_format *pix,
- const struct v4l2_rect *compose)
-{
- struct capture_priv *priv = to_capture_priv(vdev);
-
- mutex_lock(&priv->mutex);
- priv->vdev.fmt.fmt.pix = *pix;
- priv->vdev.cc = imx_media_find_format(pix->pixelformat, CS_SEL_ANY,
- true);
- priv->vdev.compose = *compose;
- mutex_unlock(&priv->mutex);
-}
-EXPORT_SYMBOL_GPL(imx_media_capture_device_set_format);
-
struct imx_media_buffer *
imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev)
{
@@ -701,19 +725,20 @@ void imx_media_capture_device_error(struct imx_media_video_dev *vdev)
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_error);
-int imx_media_capture_device_register(struct imx_media_dev *md,
- struct imx_media_video_dev *vdev)
+int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
{
struct capture_priv *priv = to_capture_priv(vdev);
struct v4l2_subdev *sd = priv->src_sd;
+ struct v4l2_device *v4l2_dev = sd->v4l2_dev;
struct video_device *vfd = vdev->vfd;
struct vb2_queue *vq = &priv->q;
struct v4l2_subdev_format fmt_src;
int ret;
- priv->md = md;
+ /* get media device */
+ priv->md = container_of(v4l2_dev->mdev, struct imx_media_dev, md);
- vfd->v4l2_dev = sd->v4l2_dev;
+ vfd->v4l2_dev = v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret) {
@@ -765,8 +790,10 @@ int imx_media_capture_device_register(struct imx_media_dev *md,
}
vdev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- imx_media_mbus_fmt_to_pix_fmt(&vdev->fmt.fmt.pix, &vdev->compose,
+ imx_media_mbus_fmt_to_pix_fmt(&vdev->fmt.fmt.pix,
&fmt_src.format, NULL);
+ vdev->compose.width = fmt_src.format.width;
+ vdev->compose.height = fmt_src.format.height;
vdev->cc = imx_media_find_format(vdev->fmt.fmt.pix.pixelformat,
CS_SEL_ANY, false);
@@ -775,6 +802,9 @@ int imx_media_capture_device_register(struct imx_media_dev *md,
vfd->ctrl_handler = &priv->ctrl_hdlr;
+ /* add vdev to the video device list */
+ imx_media_add_video_device(priv->md, vdev);
+
return 0;
unreg:
video_unregister_device(vfd);
@@ -799,18 +829,19 @@ void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev)
EXPORT_SYMBOL_GPL(imx_media_capture_device_unregister);
struct imx_media_video_dev *
-imx_media_capture_device_init(struct v4l2_subdev *src_sd, int pad)
+imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
+ int pad)
{
struct capture_priv *priv;
struct video_device *vfd;
- priv = devm_kzalloc(src_sd->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->src_sd = src_sd;
priv->src_sd_pad = pad;
- priv->dev = src_sd->dev;
+ priv->dev = dev;
mutex_init(&priv->mutex);
spin_lock_init(&priv->q_lock);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 1d248aca40a9..0eeb0db6d83f 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -56,7 +56,6 @@ struct csi_skip_desc {
struct csi_priv {
struct device *dev;
struct ipu_soc *ipu;
- struct imx_media_dev *md;
struct v4l2_subdev sd;
struct media_pad pad[CSI_NUM_PADS];
/* the video device at IDMAC output pad */
@@ -178,8 +177,8 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
* CSI-2 receiver if it is in the path, otherwise stay
* with video mux.
*/
- sd = imx_media_find_upstream_subdev(priv->md, src,
- IMX_MEDIA_GRP_ID_CSI2);
+ sd = imx_media_pipeline_subdev(src, IMX_MEDIA_GRP_ID_CSI2,
+ true);
if (!IS_ERR(sd))
src = &sd->entity;
}
@@ -193,9 +192,9 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
src = &priv->sd.entity;
/* get source pad of entity directly upstream from src */
- pad = imx_media_find_upstream_pad(priv->md, src, 0);
- if (IS_ERR(pad))
- return PTR_ERR(pad);
+ pad = imx_media_pipeline_pad(src, 0, 0, true);
+ if (!pad)
+ return -ENODEV;
sd = media_entity_to_v4l2_subdev(pad->entity);
@@ -608,7 +607,7 @@ static int csi_idmac_start(struct csi_priv *priv)
outfmt = &vdev->fmt.fmt.pix;
- ret = imx_media_alloc_dma_buf(priv->md, &priv->underrun_buf,
+ ret = imx_media_alloc_dma_buf(priv->dev, &priv->underrun_buf,
outfmt->sizeimage);
if (ret)
goto out_put_ipu;
@@ -662,7 +661,7 @@ out_free_nfb4eof_irq:
out_unsetup:
csi_idmac_unsetup(priv, VB2_BUF_STATE_QUEUED);
out_free_dma_buf:
- imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+ imx_media_free_dma_buf(priv->dev, &priv->underrun_buf);
out_put_ipu:
csi_idmac_put_ipu_resources(priv);
return ret;
@@ -694,7 +693,7 @@ static void csi_idmac_stop(struct csi_priv *priv)
csi_idmac_unsetup(priv, VB2_BUF_STATE_ERROR);
- imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+ imx_media_free_dma_buf(priv->dev, &priv->underrun_buf);
/* cancel the EOF timeout timer */
del_timer_sync(&priv->eof_timeout_timer);
@@ -1134,8 +1133,7 @@ static int csi_link_validate(struct v4l2_subdev *sd,
*/
#if 0
mutex_unlock(&priv->lock);
- vc_num = imx_media_find_mipi_csi2_channel(priv->md,
- &priv->sd.entity);
+ vc_num = imx_media_find_mipi_csi2_channel(&priv->sd.entity);
if (vc_num < 0)
return vc_num;
mutex_lock(&priv->lock);
@@ -1502,13 +1500,10 @@ static int csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sdformat)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
- struct imx_media_video_dev *vdev = priv->vdev;
struct v4l2_fwnode_endpoint upstream_ep = { .bus_type = 0 };
const struct imx_media_pixfmt *cc;
- struct v4l2_pix_format vdev_fmt;
struct v4l2_mbus_framefmt *fmt;
struct v4l2_rect *crop, *compose;
- struct v4l2_rect vdev_compose;
int ret;
if (sdformat->pad >= CSI_NUM_PADS)
@@ -1558,19 +1553,9 @@ static int csi_set_fmt(struct v4l2_subdev *sd,
}
}
- if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY)
- goto out;
-
- priv->cc[sdformat->pad] = cc;
-
- /* propagate IDMAC output pad format to capture device */
- imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt, &vdev_compose,
- &priv->format_mbus[CSI_SRC_PAD_IDMAC],
- priv->cc[CSI_SRC_PAD_IDMAC]);
- mutex_unlock(&priv->lock);
- imx_media_capture_device_set_format(vdev, &vdev_fmt, &vdev_compose);
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[sdformat->pad] = cc;
- return 0;
out:
mutex_unlock(&priv->lock);
return ret;
@@ -1762,9 +1747,6 @@ static int csi_registered(struct v4l2_subdev *sd)
int i, ret;
u32 code;
- /* get media device */
- priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
-
/* get handle to IPU CSI */
csi = ipu_csi_get(priv->ipu, priv->csi_id);
if (IS_ERR(csi)) {
@@ -1812,17 +1794,12 @@ static int csi_registered(struct v4l2_subdev *sd)
if (ret)
goto free_fim;
- ret = imx_media_capture_device_register(priv->md, priv->vdev);
+ ret = imx_media_capture_device_register(priv->vdev);
if (ret)
goto free_fim;
- ret = imx_media_add_video_device(priv->md, priv->vdev);
- if (ret)
- goto unreg;
-
return 0;
-unreg:
- imx_media_capture_device_unregister(priv->vdev);
+
free_fim:
if (priv->fim)
imx_media_fim_free(priv->fim);
@@ -1983,7 +1960,7 @@ static int imx_csi_probe(struct platform_device *pdev)
imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
priv->sd.grp_id, ipu_get_num(priv->ipu));
- priv->vdev = imx_media_capture_device_init(&priv->sd,
+ priv->vdev = imx_media_capture_device_init(priv->sd.dev, &priv->sd,
CSI_SRC_PAD_IDMAC);
if (IS_ERR(priv->vdev))
return PTR_ERR(priv->vdev);
diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c
index 6cd93419b81d..66b505f7e8df 100644
--- a/drivers/staging/media/imx/imx-media-dev-common.c
+++ b/drivers/staging/media/imx/imx-media-dev-common.c
@@ -8,9 +8,341 @@
#include <linux/of_graph.h>
#include <linux/of_platform.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
#include "imx-media.h"
-static const struct v4l2_async_notifier_operations imx_media_subdev_ops = {
+static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct imx_media_dev, notifier);
+}
+
+/* async subdev bound notifier */
+static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ v4l2_info(sd->v4l2_dev, "subdev %s bound\n", sd->name);
+
+ return 0;
+}
+
+/*
+ * Create the media links for all subdevs that registered.
+ * Called after all async subdevs have bound.
+ */
+static int imx_media_create_links(struct v4l2_async_notifier *notifier)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ struct v4l2_subdev *sd;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ switch (sd->grp_id) {
+ case IMX_MEDIA_GRP_ID_IPU_VDIC:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
+ /*
+ * links have already been created for the
+ * sync-registered subdevs.
+ */
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_CSI0:
+ case IMX_MEDIA_GRP_ID_IPU_CSI1:
+ case IMX_MEDIA_GRP_ID_CSI:
+ imx_media_create_csi_of_links(imxmd, sd);
+ break;
+ default:
+ /*
+ * if this subdev has fwnode links, create media
+ * links for them.
+ */
+ imx_media_create_of_links(imxmd, sd);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * adds given video device to given imx-media source pad vdev list.
+ * Continues upstream from the pad entity's sink pads.
+ */
+static int imx_media_add_vdev_to_pad(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev,
+ struct media_pad *srcpad)
+{
+ struct media_entity *entity = srcpad->entity;
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+ struct media_link *link;
+ struct v4l2_subdev *sd;
+ int i, ret;
+
+ /* skip this entity if not a v4l2_subdev */
+ if (!is_media_entity_v4l2_subdev(entity))
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ pad_vdev_list = to_pad_vdev_list(sd, srcpad->index);
+ if (!pad_vdev_list) {
+ v4l2_warn(&imxmd->v4l2_dev, "%s:%u has no vdev list!\n",
+ entity->name, srcpad->index);
+ /*
+ * shouldn't happen, but no reason to fail driver load,
+ * just skip this entity.
+ */
+ return 0;
+ }
+
+ /* just return if we've been here before */
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ if (pad_vdev->vdev == vdev)
+ return 0;
+ }
+
+ dev_dbg(imxmd->md.dev, "adding %s to pad %s:%u\n",
+ vdev->vfd->entity.name, entity->name, srcpad->index);
+
+ pad_vdev = devm_kzalloc(imxmd->md.dev, sizeof(*pad_vdev), GFP_KERNEL);
+ if (!pad_vdev)
+ return -ENOMEM;
+
+ /* attach this vdev to this pad */
+ pad_vdev->vdev = vdev;
+ list_add_tail(&pad_vdev->list, pad_vdev_list);
+
+ /* move upstream from this entity's sink pads */
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad = &entity->pads[i];
+
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ list_for_each_entry(link, &entity->links, list) {
+ if (link->sink != pad)
+ continue;
+ ret = imx_media_add_vdev_to_pad(imxmd, vdev,
+ link->source);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * For every subdevice, allocate an array of list_head's, one list_head
+ * for each pad, to hold the list of video devices reachable from that
+ * pad.
+ */
+static int imx_media_alloc_pad_vdev_lists(struct imx_media_dev *imxmd)
+{
+ struct list_head *vdev_lists;
+ struct media_entity *entity;
+ struct v4l2_subdev *sd;
+ int i;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ entity = &sd->entity;
+ vdev_lists = devm_kcalloc(imxmd->md.dev,
+ entity->num_pads, sizeof(*vdev_lists),
+ GFP_KERNEL);
+ if (!vdev_lists)
+ return -ENOMEM;
+
+ /* attach to the subdev's host private pointer */
+ sd->host_priv = vdev_lists;
+
+ for (i = 0; i < entity->num_pads; i++)
+ INIT_LIST_HEAD(to_pad_vdev_list(sd, i));
+ }
+
+ return 0;
+}
+
+/* form the vdev lists in all imx-media source pads */
+static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
+{
+ struct imx_media_video_dev *vdev;
+ struct media_link *link;
+ int ret;
+
+ ret = imx_media_alloc_pad_vdev_lists(imxmd);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(vdev, &imxmd->vdev_list, list) {
+ link = list_first_entry(&vdev->vfd->entity.links,
+ struct media_link, list);
+ ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* async subdev complete notifier */
+int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ int ret;
+
+ mutex_lock(&imxmd->mutex);
+
+ ret = imx_media_create_links(notifier);
+ if (ret)
+ goto unlock;
+
+ ret = imx_media_create_pad_vdev_lists(imxmd);
+ if (ret)
+ goto unlock;
+
+ ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
+unlock:
+ mutex_unlock(&imxmd->mutex);
+ if (ret)
+ return ret;
+
+ return media_device_register(&imxmd->md);
+}
+EXPORT_SYMBOL_GPL(imx_media_probe_complete);
+
+/*
+ * adds controls to a video device from an entity subdevice.
+ * Continues upstream from the entity's sink pads.
+ */
+static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
+ struct video_device *vfd,
+ struct media_entity *entity)
+{
+ int i, ret = 0;
+
+ if (is_media_entity_v4l2_subdev(entity)) {
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+
+ dev_dbg(imxmd->md.dev,
+ "adding controls to %s from %s\n",
+ vfd->entity.name, sd->entity.name);
+
+ ret = v4l2_ctrl_add_handler(vfd->ctrl_handler,
+ sd->ctrl_handler,
+ NULL, true);
+ if (ret)
+ return ret;
+ }
+
+ /* move upstream */
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad, *spad = &entity->pads[i];
+
+ if (!(spad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ pad = media_entity_remote_pad(spad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ continue;
+
+ ret = imx_media_inherit_controls(imxmd, vfd, pad->entity);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int imx_media_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct imx_media_dev *imxmd = container_of(link->graph_obj.mdev,
+ struct imx_media_dev, md);
+ struct media_entity *source = link->source->entity;
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+ struct video_device *vfd;
+ struct v4l2_subdev *sd;
+ int pad_idx, ret;
+
+ ret = v4l2_pipeline_link_notify(link, flags, notification);
+ if (ret)
+ return ret;
+
+ /* don't bother if source is not a subdev */
+ if (!is_media_entity_v4l2_subdev(source))
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(source);
+ pad_idx = link->source->index;
+
+ pad_vdev_list = to_pad_vdev_list(sd, pad_idx);
+ if (!pad_vdev_list) {
+ /* nothing to do if source sd has no pad vdev list */
+ return 0;
+ }
+
+ /*
+ * Before disabling a link, reset controls for all video
+ * devices reachable from this link.
+ *
+ * After enabling a link, refresh controls for all video
+ * devices reachable from this link.
+ */
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ !(flags & MEDIA_LNK_FL_ENABLED)) {
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ vfd = pad_vdev->vdev->vfd;
+ dev_dbg(imxmd->md.dev,
+ "reset controls for %s\n",
+ vfd->entity.name);
+ v4l2_ctrl_handler_free(vfd->ctrl_handler);
+ v4l2_ctrl_handler_init(vfd->ctrl_handler, 0);
+ }
+ } else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ (link->flags & MEDIA_LNK_FL_ENABLED)) {
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ vfd = pad_vdev->vdev->vfd;
+ dev_dbg(imxmd->md.dev,
+ "refresh controls for %s\n",
+ vfd->entity.name);
+ ret = imx_media_inherit_controls(imxmd, vfd,
+ &vfd->entity);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void imx_media_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg)
+{
+ struct media_entity *entity = &sd->entity;
+ int i;
+
+ if (notification != V4L2_DEVICE_NOTIFY_EVENT)
+ return;
+
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad = &entity->pads[i];
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+
+ pad_vdev_list = to_pad_vdev_list(sd, pad->index);
+ if (!pad_vdev_list)
+ continue;
+ list_for_each_entry(pad_vdev, pad_vdev_list, list)
+ v4l2_event_queue(pad_vdev->vdev->vfd, arg);
+ }
+}
+
+static const struct v4l2_async_notifier_operations imx_media_notifier_ops = {
.bound = imx_media_subdev_bound,
.complete = imx_media_probe_complete,
};
@@ -19,7 +351,8 @@ static const struct media_device_ops imx_media_md_ops = {
.link_notify = imx_media_link_notify,
};
-struct imx_media_dev *imx_media_dev_init(struct device *dev)
+struct imx_media_dev *imx_media_dev_init(struct device *dev,
+ const struct media_device_ops *ops)
{
struct imx_media_dev *imxmd;
int ret;
@@ -31,7 +364,7 @@ struct imx_media_dev *imx_media_dev_init(struct device *dev)
dev_set_drvdata(dev, imxmd);
strscpy(imxmd->md.model, "imx-media", sizeof(imxmd->md.model));
- imxmd->md.ops = &imx_media_md_ops;
+ imxmd->md.ops = ops ? ops : &imx_media_md_ops;
imxmd->md.dev = dev;
mutex_init(&imxmd->mutex);
@@ -50,8 +383,6 @@ struct imx_media_dev *imx_media_dev_init(struct device *dev)
goto cleanup;
}
- dev_set_drvdata(imxmd->v4l2_dev.dev, imxmd);
-
INIT_LIST_HEAD(&imxmd->vdev_list);
v4l2_async_notifier_init(&imxmd->notifier);
@@ -65,7 +396,8 @@ cleanup:
}
EXPORT_SYMBOL_GPL(imx_media_dev_init);
-int imx_media_dev_notifier_register(struct imx_media_dev *imxmd)
+int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
+ const struct v4l2_async_notifier_operations *ops)
{
int ret;
@@ -76,7 +408,7 @@ int imx_media_dev_notifier_register(struct imx_media_dev *imxmd)
}
/* prepare the async subdev notifier and register it */
- imxmd->notifier.ops = &imx_media_subdev_ops;
+ imxmd->notifier.ops = ops ? ops : &imx_media_notifier_ops;
ret = v4l2_async_notifier_register(&imxmd->v4l2_dev,
&imxmd->notifier);
if (ret) {
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 6be95584006d..6ac371f6e971 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -2,24 +2,13 @@
/*
* V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
*
- * Copyright (c) 2016 Mentor Graphics Inc.
+ * Copyright (c) 2016-2019 Mentor Graphics Inc.
*/
-#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/module.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <media/v4l2-ctrls.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-event.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-mc.h>
-#include <video/imx-ipu-v3.h>
#include <media/imx.h>
#include "imx-media.h"
@@ -28,433 +17,31 @@ static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
return container_of(n, struct imx_media_dev, notifier);
}
-/*
- * Adds a subdev to the root notifier's async subdev list. If fwnode is
- * non-NULL, adds the async as a V4L2_ASYNC_MATCH_FWNODE match type,
- * otherwise as a V4L2_ASYNC_MATCH_DEVNAME match type using the dev_name
- * of the given platform_device. This is called during driver load when
- * forming the async subdev list.
- */
-int imx_media_add_async_subdev(struct imx_media_dev *imxmd,
- struct fwnode_handle *fwnode,
- struct platform_device *pdev)
-{
- struct device_node *np = to_of_node(fwnode);
- struct imx_media_async_subdev *imxasd;
- struct v4l2_async_subdev *asd;
- const char *devname = NULL;
- int ret;
-
- if (fwnode) {
- asd = v4l2_async_notifier_add_fwnode_subdev(&imxmd->notifier,
- fwnode,
- sizeof(*imxasd));
- } else {
- devname = dev_name(&pdev->dev);
- asd = v4l2_async_notifier_add_devname_subdev(&imxmd->notifier,
- devname,
- sizeof(*imxasd));
- }
-
- if (IS_ERR(asd)) {
- ret = PTR_ERR(asd);
- if (ret == -EEXIST) {
- if (np)
- dev_dbg(imxmd->md.dev, "%s: already added %pOFn\n",
- __func__, np);
- else
- dev_dbg(imxmd->md.dev, "%s: already added %s\n",
- __func__, devname);
- }
- return ret;
- }
-
- imxasd = to_imx_media_asd(asd);
-
- if (devname)
- imxasd->pdev = pdev;
-
- if (np)
- dev_dbg(imxmd->md.dev, "%s: added %pOFn, match type FWNODE\n",
- __func__, np);
- else
- dev_dbg(imxmd->md.dev, "%s: added %s, match type DEVNAME\n",
- __func__, devname);
-
- return 0;
-}
-
-/*
- * get IPU from this CSI and add it to the list of IPUs
- * the media driver will control.
- */
-static int imx_media_get_ipu(struct imx_media_dev *imxmd,
- struct v4l2_subdev *csi_sd)
-{
- struct ipu_soc *ipu;
- int ipu_id;
-
- ipu = dev_get_drvdata(csi_sd->dev->parent);
- if (!ipu) {
- v4l2_err(&imxmd->v4l2_dev,
- "CSI %s has no parent IPU!\n", csi_sd->name);
- return -ENODEV;
- }
-
- ipu_id = ipu_get_num(ipu);
- if (ipu_id > 1) {
- v4l2_err(&imxmd->v4l2_dev, "invalid IPU id %d!\n", ipu_id);
- return -ENODEV;
- }
-
- if (!imxmd->ipu[ipu_id])
- imxmd->ipu[ipu_id] = ipu;
-
- return 0;
-}
-
/* async subdev bound notifier */
-int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
{
struct imx_media_dev *imxmd = notifier2dev(notifier);
- int ret = 0;
-
- mutex_lock(&imxmd->mutex);
+ int ret;
if (sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) {
- ret = imx_media_get_ipu(imxmd, sd);
+ /* register the IPU internal subdevs */
+ ret = imx_media_register_ipu_internal_subdevs(imxmd, sd);
if (ret)
- goto out;
+ return ret;
}
v4l2_info(&imxmd->v4l2_dev, "subdev %s bound\n", sd->name);
-out:
- mutex_unlock(&imxmd->mutex);
- return ret;
-}
-
-/*
- * Create the media links for all subdevs that registered.
- * Called after all async subdevs have bound.
- */
-static int imx_media_create_links(struct v4l2_async_notifier *notifier)
-{
- struct imx_media_dev *imxmd = notifier2dev(notifier);
- struct v4l2_subdev *sd;
- int ret;
-
- list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
- switch (sd->grp_id) {
- case IMX_MEDIA_GRP_ID_IPU_VDIC:
- case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
- case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
- case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
- case IMX_MEDIA_GRP_ID_IPU_CSI0:
- case IMX_MEDIA_GRP_ID_IPU_CSI1:
- ret = imx_media_create_ipu_internal_links(imxmd, sd);
- if (ret)
- return ret;
- /*
- * the CSIs straddle between the external and the IPU
- * internal entities, so create the external links
- * to the CSI sink pads.
- */
- if (sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI)
- imx_media_create_csi_of_links(imxmd, sd);
- break;
- case IMX_MEDIA_GRP_ID_CSI:
- imx_media_create_csi_of_links(imxmd, sd);
-
- break;
- default:
- /*
- * if this subdev has fwnode links, create media
- * links for them.
- */
- imx_media_create_of_links(imxmd, sd);
- break;
- }
- }
-
- return 0;
-}
-
-/*
- * adds given video device to given imx-media source pad vdev list.
- * Continues upstream from the pad entity's sink pads.
- */
-static int imx_media_add_vdev_to_pad(struct imx_media_dev *imxmd,
- struct imx_media_video_dev *vdev,
- struct media_pad *srcpad)
-{
- struct media_entity *entity = srcpad->entity;
- struct imx_media_pad_vdev *pad_vdev;
- struct list_head *pad_vdev_list;
- struct media_link *link;
- struct v4l2_subdev *sd;
- int i, ret;
-
- /* skip this entity if not a v4l2_subdev */
- if (!is_media_entity_v4l2_subdev(entity))
- return 0;
-
- sd = media_entity_to_v4l2_subdev(entity);
-
- pad_vdev_list = to_pad_vdev_list(sd, srcpad->index);
- if (!pad_vdev_list) {
- v4l2_warn(&imxmd->v4l2_dev, "%s:%u has no vdev list!\n",
- entity->name, srcpad->index);
- /*
- * shouldn't happen, but no reason to fail driver load,
- * just skip this entity.
- */
- return 0;
- }
-
- /* just return if we've been here before */
- list_for_each_entry(pad_vdev, pad_vdev_list, list) {
- if (pad_vdev->vdev == vdev)
- return 0;
- }
-
- dev_dbg(imxmd->md.dev, "adding %s to pad %s:%u\n",
- vdev->vfd->entity.name, entity->name, srcpad->index);
-
- pad_vdev = devm_kzalloc(imxmd->md.dev, sizeof(*pad_vdev), GFP_KERNEL);
- if (!pad_vdev)
- return -ENOMEM;
-
- /* attach this vdev to this pad */
- pad_vdev->vdev = vdev;
- list_add_tail(&pad_vdev->list, pad_vdev_list);
-
- /* move upstream from this entity's sink pads */
- for (i = 0; i < entity->num_pads; i++) {
- struct media_pad *pad = &entity->pads[i];
-
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- continue;
-
- list_for_each_entry(link, &entity->links, list) {
- if (link->sink != pad)
- continue;
- ret = imx_media_add_vdev_to_pad(imxmd, vdev,
- link->source);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-/*
- * For every subdevice, allocate an array of list_head's, one list_head
- * for each pad, to hold the list of video devices reachable from that
- * pad.
- */
-static int imx_media_alloc_pad_vdev_lists(struct imx_media_dev *imxmd)
-{
- struct list_head *vdev_lists;
- struct media_entity *entity;
- struct v4l2_subdev *sd;
- int i;
-
- list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
- entity = &sd->entity;
- vdev_lists = devm_kcalloc(imxmd->md.dev,
- entity->num_pads, sizeof(*vdev_lists),
- GFP_KERNEL);
- if (!vdev_lists)
- return -ENOMEM;
-
- /* attach to the subdev's host private pointer */
- sd->host_priv = vdev_lists;
-
- for (i = 0; i < entity->num_pads; i++)
- INIT_LIST_HEAD(to_pad_vdev_list(sd, i));
- }
-
- return 0;
-}
-
-/* form the vdev lists in all imx-media source pads */
-static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
-{
- struct imx_media_video_dev *vdev;
- struct media_link *link;
- int ret;
-
- ret = imx_media_alloc_pad_vdev_lists(imxmd);
- if (ret)
- return ret;
-
- list_for_each_entry(vdev, &imxmd->vdev_list, list) {
- link = list_first_entry(&vdev->vfd->entity.links,
- struct media_link, list);
- ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
- if (ret)
- return ret;
- }
return 0;
}
/* async subdev complete notifier */
-int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
-{
- struct imx_media_dev *imxmd = notifier2dev(notifier);
- int ret;
-
- mutex_lock(&imxmd->mutex);
-
- ret = imx_media_create_links(notifier);
- if (ret)
- goto unlock;
-
- ret = imx_media_create_pad_vdev_lists(imxmd);
- if (ret)
- goto unlock;
-
- ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
-unlock:
- mutex_unlock(&imxmd->mutex);
- if (ret)
- return ret;
-
- return media_device_register(&imxmd->md);
-}
-
-/*
- * adds controls to a video device from an entity subdevice.
- * Continues upstream from the entity's sink pads.
- */
-static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
- struct video_device *vfd,
- struct media_entity *entity)
-{
- int i, ret = 0;
-
- if (is_media_entity_v4l2_subdev(entity)) {
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
-
- dev_dbg(imxmd->md.dev,
- "adding controls to %s from %s\n",
- vfd->entity.name, sd->entity.name);
-
- ret = v4l2_ctrl_add_handler(vfd->ctrl_handler,
- sd->ctrl_handler,
- NULL, true);
- if (ret)
- return ret;
- }
-
- /* move upstream */
- for (i = 0; i < entity->num_pads; i++) {
- struct media_pad *pad, *spad = &entity->pads[i];
-
- if (!(spad->flags & MEDIA_PAD_FL_SINK))
- continue;
-
- pad = media_entity_remote_pad(spad);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
- continue;
-
- ret = imx_media_inherit_controls(imxmd, vfd, pad->entity);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-int imx_media_link_notify(struct media_link *link, u32 flags,
- unsigned int notification)
-{
- struct media_entity *source = link->source->entity;
- struct imx_media_pad_vdev *pad_vdev;
- struct list_head *pad_vdev_list;
- struct imx_media_dev *imxmd;
- struct video_device *vfd;
- struct v4l2_subdev *sd;
- int pad_idx, ret;
-
- ret = v4l2_pipeline_link_notify(link, flags, notification);
- if (ret)
- return ret;
-
- /* don't bother if source is not a subdev */
- if (!is_media_entity_v4l2_subdev(source))
- return 0;
-
- sd = media_entity_to_v4l2_subdev(source);
- pad_idx = link->source->index;
-
- imxmd = dev_get_drvdata(sd->v4l2_dev->dev);
-
- pad_vdev_list = to_pad_vdev_list(sd, pad_idx);
- if (!pad_vdev_list) {
- /* shouldn't happen, but no reason to fail link setup */
- return 0;
- }
-
- /*
- * Before disabling a link, reset controls for all video
- * devices reachable from this link.
- *
- * After enabling a link, refresh controls for all video
- * devices reachable from this link.
- */
- if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
- !(flags & MEDIA_LNK_FL_ENABLED)) {
- list_for_each_entry(pad_vdev, pad_vdev_list, list) {
- vfd = pad_vdev->vdev->vfd;
- dev_dbg(imxmd->md.dev,
- "reset controls for %s\n",
- vfd->entity.name);
- v4l2_ctrl_handler_free(vfd->ctrl_handler);
- v4l2_ctrl_handler_init(vfd->ctrl_handler, 0);
- }
- } else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
- (link->flags & MEDIA_LNK_FL_ENABLED)) {
- list_for_each_entry(pad_vdev, pad_vdev_list, list) {
- vfd = pad_vdev->vdev->vfd;
- dev_dbg(imxmd->md.dev,
- "refresh controls for %s\n",
- vfd->entity.name);
- ret = imx_media_inherit_controls(imxmd, vfd,
- &vfd->entity);
- if (ret)
- break;
- }
- }
-
- return ret;
-}
-
-void imx_media_notify(struct v4l2_subdev *sd, unsigned int notification,
- void *arg)
-{
- struct media_entity *entity = &sd->entity;
- int i;
-
- if (notification != V4L2_DEVICE_NOTIFY_EVENT)
- return;
-
- for (i = 0; i < entity->num_pads; i++) {
- struct media_pad *pad = &entity->pads[i];
- struct imx_media_pad_vdev *pad_vdev;
- struct list_head *pad_vdev_list;
-
- pad_vdev_list = to_pad_vdev_list(sd, pad->index);
- if (!pad_vdev_list)
- continue;
- list_for_each_entry(pad_vdev, pad_vdev_list, list)
- v4l2_event_queue(pad_vdev->vdev->vfd, arg);
- }
-}
+static const struct v4l2_async_notifier_operations imx_media_notifier_ops = {
+ .bound = imx_media_subdev_bound,
+ .complete = imx_media_probe_complete,
+};
static int imx_media_probe(struct platform_device *pdev)
{
@@ -463,7 +50,7 @@ static int imx_media_probe(struct platform_device *pdev)
struct imx_media_dev *imxmd;
int ret;
- imxmd = imx_media_dev_init(dev);
+ imxmd = imx_media_dev_init(dev, NULL);
if (IS_ERR(imxmd))
return PTR_ERR(imxmd);
@@ -474,14 +61,12 @@ static int imx_media_probe(struct platform_device *pdev)
goto cleanup;
}
- ret = imx_media_dev_notifier_register(imxmd);
+ ret = imx_media_dev_notifier_register(imxmd, &imx_media_notifier_ops);
if (ret)
- goto del_int;
+ goto cleanup;
return 0;
-del_int:
- imx_media_remove_ipu_internal_subdevs(imxmd);
cleanup:
v4l2_async_notifier_cleanup(&imxmd->notifier);
v4l2_device_unregister(&imxmd->v4l2_dev);
@@ -498,7 +83,7 @@ static int imx_media_remove(struct platform_device *pdev)
v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
v4l2_async_notifier_unregister(&imxmd->notifier);
- imx_media_remove_ipu_internal_subdevs(imxmd);
+ imx_media_unregister_ipu_internal_subdevs(imxmd);
v4l2_async_notifier_cleanup(&imxmd->notifier);
media_device_unregister(&imxmd->md);
v4l2_device_unregister(&imxmd->v4l2_dev);
diff --git a/drivers/staging/media/imx/imx-media-fim.c b/drivers/staging/media/imx/imx-media-fim.c
index 2ab64bc30f5c..3a9182933508 100644
--- a/drivers/staging/media/imx/imx-media-fim.c
+++ b/drivers/staging/media/imx/imx-media-fim.c
@@ -37,8 +37,6 @@ enum {
#define FIM_CL_TOLERANCE_MAX_DEF 0 /* no max tolerance (unbounded) */
struct imx_media_fim {
- struct imx_media_dev *md;
-
/* the owning subdev of this fim instance */
struct v4l2_subdev *sd;
@@ -416,7 +414,6 @@ void imx_media_fim_eof_monitor(struct imx_media_fim *fim, ktime_t timestamp)
spin_unlock_irqrestore(&fim->lock, flags);
}
-EXPORT_SYMBOL_GPL(imx_media_fim_eof_monitor);
/* Called by the subdev in its s_stream callback */
int imx_media_fim_set_stream(struct imx_media_fim *fim,
@@ -453,7 +450,6 @@ out:
v4l2_ctrl_unlock(fim->ctrl[FIM_CL_ENABLE]);
return ret;
}
-EXPORT_SYMBOL_GPL(imx_media_fim_set_stream);
int imx_media_fim_add_controls(struct imx_media_fim *fim)
{
@@ -461,7 +457,6 @@ int imx_media_fim_add_controls(struct imx_media_fim *fim)
return v4l2_ctrl_add_handler(fim->sd->ctrl_handler,
&fim->ctrl_handler, NULL, false);
}
-EXPORT_SYMBOL_GPL(imx_media_fim_add_controls);
/* Called by the subdev in its subdev registered callback */
struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd)
@@ -473,8 +468,6 @@ struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd)
if (!fim)
return ERR_PTR(-ENOMEM);
- /* get media device */
- fim->md = dev_get_drvdata(sd->v4l2_dev->dev);
fim->sd = sd;
spin_lock_init(&fim->lock);
@@ -485,10 +478,8 @@ struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd)
return fim;
}
-EXPORT_SYMBOL_GPL(imx_media_fim_init);
void imx_media_fim_free(struct imx_media_fim *fim)
{
v4l2_ctrl_handler_free(&fim->ctrl_handler);
}
-EXPORT_SYMBOL_GPL(imx_media_fim_free);
diff --git a/drivers/staging/media/imx/imx-media-internal-sd.c b/drivers/staging/media/imx/imx-media-internal-sd.c
index df49ebfbe98a..cb1e4cdd5079 100644
--- a/drivers/staging/media/imx/imx-media-internal-sd.c
+++ b/drivers/staging/media/imx/imx-media-internal-sd.c
@@ -9,208 +9,138 @@
#include <linux/platform_device.h>
#include "imx-media.h"
-enum isd_enum {
- isd_csi0 = 0,
- isd_csi1,
- isd_vdic,
- isd_ic_prp,
- isd_ic_prpenc,
- isd_ic_prpvf,
- num_isd,
-};
-
-static const struct internal_subdev_id {
- enum isd_enum index;
- const char *name;
- u32 grp_id;
-} isd_id[num_isd] = {
- [isd_csi0] = {
- .index = isd_csi0,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI0,
- .name = "imx-ipuv3-csi",
- },
- [isd_csi1] = {
- .index = isd_csi1,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI1,
- .name = "imx-ipuv3-csi",
- },
- [isd_vdic] = {
- .index = isd_vdic,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_VDIC,
- .name = "imx-ipuv3-vdic",
- },
- [isd_ic_prp] = {
- .index = isd_ic_prp,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRP,
- .name = "imx-ipuv3-ic",
- },
- [isd_ic_prpenc] = {
- .index = isd_ic_prpenc,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPENC,
- .name = "imx-ipuv3-ic",
- },
- [isd_ic_prpvf] = {
- .index = isd_ic_prpvf,
- .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPVF,
- .name = "imx-ipuv3-ic",
- },
-};
+/* max pads per internal-sd */
+#define MAX_INTERNAL_PADS 8
+/* max links per internal-sd pad */
+#define MAX_INTERNAL_LINKS 8
struct internal_subdev;
struct internal_link {
- const struct internal_subdev *remote;
+ int remote;
int local_pad;
int remote_pad;
};
-/* max pads per internal-sd */
-#define MAX_INTERNAL_PADS 8
-/* max links per internal-sd pad */
-#define MAX_INTERNAL_LINKS 8
-
struct internal_pad {
+ int num_links;
struct internal_link link[MAX_INTERNAL_LINKS];
};
-static const struct internal_subdev {
- const struct internal_subdev_id *id;
+struct internal_subdev {
+ u32 grp_id;
struct internal_pad pad[MAX_INTERNAL_PADS];
-} int_subdev[num_isd] = {
- [isd_csi0] = {
- .id = &isd_id[isd_csi0],
+
+ struct v4l2_subdev * (*sync_register)(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+ int (*sync_unregister)(struct v4l2_subdev *sd);
+};
+
+static const struct internal_subdev int_subdev[NUM_IPU_SUBDEVS] = {
+ [IPU_CSI0] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI0,
.pad[CSI_SRC_PAD_DIRECT] = {
+ .num_links = 2,
.link = {
{
.local_pad = CSI_SRC_PAD_DIRECT,
- .remote = &int_subdev[isd_ic_prp],
+ .remote = IPU_IC_PRP,
.remote_pad = PRP_SINK_PAD,
}, {
.local_pad = CSI_SRC_PAD_DIRECT,
- .remote = &int_subdev[isd_vdic],
+ .remote = IPU_VDIC,
.remote_pad = VDIC_SINK_PAD_DIRECT,
},
},
},
},
- [isd_csi1] = {
- .id = &isd_id[isd_csi1],
+ [IPU_CSI1] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI1,
.pad[CSI_SRC_PAD_DIRECT] = {
+ .num_links = 2,
.link = {
{
.local_pad = CSI_SRC_PAD_DIRECT,
- .remote = &int_subdev[isd_ic_prp],
+ .remote = IPU_IC_PRP,
.remote_pad = PRP_SINK_PAD,
}, {
.local_pad = CSI_SRC_PAD_DIRECT,
- .remote = &int_subdev[isd_vdic],
+ .remote = IPU_VDIC,
.remote_pad = VDIC_SINK_PAD_DIRECT,
},
},
},
},
- [isd_vdic] = {
- .id = &isd_id[isd_vdic],
+ [IPU_VDIC] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_VDIC,
+ .sync_register = imx_media_vdic_register,
+ .sync_unregister = imx_media_vdic_unregister,
.pad[VDIC_SRC_PAD_DIRECT] = {
+ .num_links = 1,
.link = {
{
.local_pad = VDIC_SRC_PAD_DIRECT,
- .remote = &int_subdev[isd_ic_prp],
+ .remote = IPU_IC_PRP,
.remote_pad = PRP_SINK_PAD,
},
},
},
},
- [isd_ic_prp] = {
- .id = &isd_id[isd_ic_prp],
+ [IPU_IC_PRP] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRP,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
.pad[PRP_SRC_PAD_PRPENC] = {
+ .num_links = 1,
.link = {
{
.local_pad = PRP_SRC_PAD_PRPENC,
- .remote = &int_subdev[isd_ic_prpenc],
- .remote_pad = 0,
+ .remote = IPU_IC_PRPENC,
+ .remote_pad = PRPENCVF_SINK_PAD,
},
},
},
.pad[PRP_SRC_PAD_PRPVF] = {
+ .num_links = 1,
.link = {
{
.local_pad = PRP_SRC_PAD_PRPVF,
- .remote = &int_subdev[isd_ic_prpvf],
- .remote_pad = 0,
+ .remote = IPU_IC_PRPVF,
+ .remote_pad = PRPENCVF_SINK_PAD,
},
},
},
},
- [isd_ic_prpenc] = {
- .id = &isd_id[isd_ic_prpenc],
+ [IPU_IC_PRPENC] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPENC,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
},
- [isd_ic_prpvf] = {
- .id = &isd_id[isd_ic_prpvf],
+ [IPU_IC_PRPVF] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPVF,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
},
};
-/* form a device name given an internal subdev and ipu id */
-static inline void isd_to_devname(char *devname, int sz,
- const struct internal_subdev *isd,
- int ipu_id)
-{
- int pdev_id = ipu_id * num_isd + isd->id->index;
-
- snprintf(devname, sz, "%s.%d", isd->id->name, pdev_id);
-}
-
-static const struct internal_subdev *find_intsd_by_grp_id(u32 grp_id)
-{
- enum isd_enum i;
-
- for (i = 0; i < num_isd; i++) {
- const struct internal_subdev *isd = &int_subdev[i];
-
- if (isd->id->grp_id == grp_id)
- return isd;
- }
-
- return NULL;
-}
-
-static struct v4l2_subdev *find_sink(struct imx_media_dev *imxmd,
- struct v4l2_subdev *src,
- const struct internal_link *link)
-{
- char sink_devname[32];
- int ipu_id;
-
- /*
- * retrieve IPU id from subdev name, note: can't get this from
- * struct imx_media_ipu_internal_sd_pdata because if src is
- * a CSI, it has different struct ipu_client_platformdata which
- * does not contain IPU id.
- */
- if (sscanf(src->name, "ipu%d", &ipu_id) != 1)
- return NULL;
-
- isd_to_devname(sink_devname, sizeof(sink_devname),
- link->remote, ipu_id - 1);
-
- return imx_media_find_subdev_by_devname(imxmd, sink_devname);
-}
-
-static int create_ipu_internal_link(struct imx_media_dev *imxmd,
- struct v4l2_subdev *src,
- const struct internal_link *link)
+static int create_internal_link(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *src,
+ struct v4l2_subdev *sink,
+ const struct internal_link *link)
{
- struct v4l2_subdev *sink;
int ret;
- sink = find_sink(imxmd, src, link);
- if (!sink)
- return -ENODEV;
+ /* skip if this link already created */
+ if (media_entity_find_link(&src->entity.pads[link->local_pad],
+ &sink->entity.pads[link->remote_pad]))
+ return 0;
v4l2_info(&imxmd->v4l2_dev, "%s:%d -> %s:%d\n",
src->name, link->local_pad,
@@ -219,25 +149,21 @@ static int create_ipu_internal_link(struct imx_media_dev *imxmd,
ret = media_create_pad_link(&src->entity, link->local_pad,
&sink->entity, link->remote_pad, 0);
if (ret)
- v4l2_err(&imxmd->v4l2_dev,
- "create_pad_link failed: %d\n", ret);
+ v4l2_err(&imxmd->v4l2_dev, "%s failed: %d\n", __func__, ret);
return ret;
}
-int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
- struct v4l2_subdev *sd)
+static int create_ipu_internal_links(struct imx_media_dev *imxmd,
+ const struct internal_subdev *intsd,
+ struct v4l2_subdev *sd,
+ int ipu_id)
{
- const struct internal_subdev *intsd;
const struct internal_pad *intpad;
const struct internal_link *link;
struct media_pad *pad;
int i, j, ret;
- intsd = find_intsd_by_grp_id(sd->grp_id);
- if (!intsd)
- return -ENODEV;
-
/* create the source->sink links */
for (i = 0; i < sd->entity.num_pads; i++) {
intpad = &intsd->pad[i];
@@ -246,13 +172,13 @@ int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
if (!(pad->flags & MEDIA_PAD_FL_SOURCE))
continue;
- for (j = 0; ; j++) {
- link = &intpad->link[j];
+ for (j = 0; j < intpad->num_links; j++) {
+ struct v4l2_subdev *sink;
- if (!link->remote)
- break;
+ link = &intpad->link[j];
+ sink = imxmd->sync_sd[ipu_id][link->remote];
- ret = create_ipu_internal_link(imxmd, sd, link);
+ ret = create_internal_link(imxmd, sd, sink, link);
if (ret)
return ret;
}
@@ -261,85 +187,116 @@ int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
return 0;
}
-/* register an internal subdev as a platform device */
-static int add_internal_subdev(struct imx_media_dev *imxmd,
- const struct internal_subdev *isd,
- int ipu_id)
+int imx_media_register_ipu_internal_subdevs(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *csi)
{
- struct imx_media_ipu_internal_sd_pdata pdata;
- struct platform_device_info pdevinfo = {};
- struct platform_device *pdev;
+ struct device *ipu_dev = csi->dev->parent;
+ const struct internal_subdev *intsd;
+ struct v4l2_subdev *sd;
+ struct ipu_soc *ipu;
+ int i, ipu_id, ret;
- pdata.grp_id = isd->id->grp_id;
+ ipu = dev_get_drvdata(ipu_dev);
+ if (!ipu) {
+ v4l2_err(&imxmd->v4l2_dev, "invalid IPU device!\n");
+ return -ENODEV;
+ }
- /* the id of IPU this subdev will control */
- pdata.ipu_id = ipu_id;
+ ipu_id = ipu_get_num(ipu);
+ if (ipu_id > 1) {
+ v4l2_err(&imxmd->v4l2_dev, "invalid IPU id %d!\n", ipu_id);
+ return -ENODEV;
+ }
- /* create subdev name */
- imx_media_grp_id_to_sd_name(pdata.sd_name, sizeof(pdata.sd_name),
- pdata.grp_id, ipu_id);
+ mutex_lock(&imxmd->mutex);
- pdevinfo.name = isd->id->name;
- pdevinfo.id = ipu_id * num_isd + isd->id->index;
- pdevinfo.parent = imxmd->md.dev;
- pdevinfo.data = &pdata;
- pdevinfo.size_data = sizeof(pdata);
- pdevinfo.dma_mask = DMA_BIT_MASK(32);
+ /* register the synchronous subdevs */
+ for (i = 0; i < NUM_IPU_SUBDEVS; i++) {
+ intsd = &int_subdev[i];
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ sd = imxmd->sync_sd[ipu_id][i];
- return imx_media_add_async_subdev(imxmd, NULL, pdev);
-}
+ /*
+ * skip if this sync subdev already registered or its
+ * not a sync subdev (one of the CSIs)
+ */
+ if (sd || !intsd->sync_register)
+ continue;
-/* adds the internal subdevs in one ipu */
-int imx_media_add_ipu_internal_subdevs(struct imx_media_dev *imxmd,
- int ipu_id)
-{
- enum isd_enum i;
- int ret;
+ mutex_unlock(&imxmd->mutex);
+ sd = intsd->sync_register(&imxmd->v4l2_dev, ipu_dev, ipu,
+ intsd->grp_id);
+ mutex_lock(&imxmd->mutex);
+ if (IS_ERR(sd)) {
+ ret = PTR_ERR(sd);
+ goto err_unwind;
+ }
- for (i = 0; i < num_isd; i++) {
- const struct internal_subdev *isd = &int_subdev[i];
+ imxmd->sync_sd[ipu_id][i] = sd;
+ }
- /*
- * the CSIs are represented in the device-tree, so those
- * devices are already added to the async subdev list by
- * of_parse_subdev().
- */
- switch (isd->id->grp_id) {
- case IMX_MEDIA_GRP_ID_IPU_CSI0:
- case IMX_MEDIA_GRP_ID_IPU_CSI1:
- ret = 0;
- break;
- default:
- ret = add_internal_subdev(imxmd, isd, ipu_id);
- break;
+ /*
+ * all the sync subdevs are registered, create the media links
+ * between them.
+ */
+ for (i = 0; i < NUM_IPU_SUBDEVS; i++) {
+ intsd = &int_subdev[i];
+
+ if (intsd->grp_id == csi->grp_id) {
+ sd = csi;
+ } else {
+ sd = imxmd->sync_sd[ipu_id][i];
+ if (!sd)
+ continue;
}
- if (ret)
- goto remove;
+ ret = create_ipu_internal_links(imxmd, intsd, sd, ipu_id);
+ if (ret) {
+ mutex_unlock(&imxmd->mutex);
+ imx_media_unregister_ipu_internal_subdevs(imxmd);
+ return ret;
+ }
}
+ mutex_unlock(&imxmd->mutex);
return 0;
-remove:
- imx_media_remove_ipu_internal_subdevs(imxmd);
+err_unwind:
+ while (--i >= 0) {
+ intsd = &int_subdev[i];
+ sd = imxmd->sync_sd[ipu_id][i];
+ if (!sd || !intsd->sync_unregister)
+ continue;
+ mutex_unlock(&imxmd->mutex);
+ intsd->sync_unregister(sd);
+ mutex_lock(&imxmd->mutex);
+ }
+
+ mutex_unlock(&imxmd->mutex);
return ret;
}
-void imx_media_remove_ipu_internal_subdevs(struct imx_media_dev *imxmd)
+void imx_media_unregister_ipu_internal_subdevs(struct imx_media_dev *imxmd)
{
- struct imx_media_async_subdev *imxasd;
- struct v4l2_async_subdev *asd;
+ const struct internal_subdev *intsd;
+ struct v4l2_subdev *sd;
+ int i, j;
- list_for_each_entry(asd, &imxmd->notifier.asd_list, asd_list) {
- imxasd = to_imx_media_asd(asd);
+ mutex_lock(&imxmd->mutex);
- if (!imxasd->pdev)
- continue;
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < NUM_IPU_SUBDEVS; j++) {
+ intsd = &int_subdev[j];
+ sd = imxmd->sync_sd[i][j];
+
+ if (!sd || !intsd->sync_unregister)
+ continue;
- platform_device_unregister(imxasd->pdev);
+ mutex_unlock(&imxmd->mutex);
+ intsd->sync_unregister(sd);
+ mutex_lock(&imxmd->mutex);
+ }
}
+
+ mutex_unlock(&imxmd->mutex);
}
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index 990e82aa8e42..2d3efd2a6dde 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -19,6 +19,9 @@
int imx_media_of_add_csi(struct imx_media_dev *imxmd,
struct device_node *csi_np)
{
+ struct v4l2_async_subdev *asd;
+ int ret = 0;
+
if (!of_device_is_available(csi_np)) {
dev_dbg(imxmd->md.dev, "%s: %pOFn not enabled\n", __func__,
csi_np);
@@ -26,18 +29,25 @@ int imx_media_of_add_csi(struct imx_media_dev *imxmd,
}
/* add CSI fwnode to async notifier */
- return imx_media_add_async_subdev(imxmd, of_fwnode_handle(csi_np),
- NULL);
+ asd = v4l2_async_notifier_add_fwnode_subdev(&imxmd->notifier,
+ of_fwnode_handle(csi_np),
+ sizeof(*asd));
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ if (ret == -EEXIST)
+ dev_dbg(imxmd->md.dev, "%s: already added %pOFn\n",
+ __func__, csi_np);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(imx_media_of_add_csi);
int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
struct device_node *np)
{
- bool ipu_found[2] = {false, false};
struct device_node *csi_np;
int i, ret;
- u32 ipu_id;
for (i = 0; ; i++) {
csi_np = of_parse_phandle(np, "ports", i);
@@ -55,34 +65,15 @@ int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
/* other error, can't continue */
goto err_out;
}
-
- ret = of_alias_get_id(csi_np->parent, "ipu");
- if (ret < 0)
- goto err_out;
- if (ret > 1) {
- ret = -EINVAL;
- goto err_out;
- }
-
- ipu_id = ret;
-
- if (!ipu_found[ipu_id]) {
- ret = imx_media_add_ipu_internal_subdevs(imxmd,
- ipu_id);
- if (ret)
- goto err_out;
- }
-
- ipu_found[ipu_id] = true;
}
return 0;
err_out:
- imx_media_remove_ipu_internal_subdevs(imxmd);
of_node_put(csi_np);
return ret;
}
+EXPORT_SYMBOL_GPL(imx_media_add_of_subdevs);
/*
* Create a single media link to/from sd using a fwnode link.
@@ -152,6 +143,7 @@ int imx_media_create_of_links(struct imx_media_dev *imxmd,
return 0;
}
+EXPORT_SYMBOL_GPL(imx_media_create_of_links);
/*
* Create media links to the given CSI subdevice's sink pads,
@@ -195,3 +187,4 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
return 0;
}
+EXPORT_SYMBOL_GPL(imx_media_create_csi_of_links);
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index b41842dba5ec..b5b8a3b7730a 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -573,8 +573,7 @@ void imx_media_fill_default_mbus_fields(struct v4l2_mbus_framefmt *tryfmt,
EXPORT_SYMBOL_GPL(imx_media_fill_default_mbus_fields);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
- struct v4l2_rect *compose,
- const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_mbus_framefmt *mbus,
const struct imx_media_pixfmt *cc)
{
u32 width;
@@ -621,17 +620,6 @@ int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
pix->sizeimage = cc->planar ? ((stride * pix->height * cc->bpp) >> 3) :
stride * pix->height;
- /*
- * set capture compose rectangle, which is fixed to the
- * source subdevice mbus format.
- */
- if (compose) {
- compose->left = 0;
- compose->top = 0;
- compose->width = mbus->width;
- compose->height = mbus->height;
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_pix_fmt);
@@ -643,11 +631,13 @@ int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
memset(image, 0, sizeof(*image));
- ret = imx_media_mbus_fmt_to_pix_fmt(&image->pix, &image->rect,
- mbus, NULL);
+ ret = imx_media_mbus_fmt_to_pix_fmt(&image->pix, mbus, NULL);
if (ret)
return ret;
+ image->rect.width = mbus->width;
+ image->rect.height = mbus->height;
+
return 0;
}
EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_ipu_image);
@@ -675,29 +665,28 @@ int imx_media_ipu_image_to_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
}
EXPORT_SYMBOL_GPL(imx_media_ipu_image_to_mbus_fmt);
-void imx_media_free_dma_buf(struct imx_media_dev *imxmd,
+void imx_media_free_dma_buf(struct device *dev,
struct imx_media_dma_buf *buf)
{
if (buf->virt)
- dma_free_coherent(imxmd->md.dev, buf->len,
- buf->virt, buf->phys);
+ dma_free_coherent(dev, buf->len, buf->virt, buf->phys);
buf->virt = NULL;
buf->phys = 0;
}
EXPORT_SYMBOL_GPL(imx_media_free_dma_buf);
-int imx_media_alloc_dma_buf(struct imx_media_dev *imxmd,
+int imx_media_alloc_dma_buf(struct device *dev,
struct imx_media_dma_buf *buf,
int size)
{
- imx_media_free_dma_buf(imxmd, buf);
+ imx_media_free_dma_buf(dev, buf);
buf->len = PAGE_ALIGN(size);
- buf->virt = dma_alloc_coherent(imxmd->md.dev, buf->len, &buf->phys,
+ buf->virt = dma_alloc_coherent(dev, buf->len, &buf->phys,
GFP_DMA | GFP_KERNEL);
if (!buf->virt) {
- dev_err(imxmd->md.dev, "failed to alloc dma buffer\n");
+ dev_err(dev, "%s: failed\n", __func__);
return -ENOMEM;
}
@@ -764,35 +753,37 @@ imx_media_find_subdev_by_devname(struct imx_media_dev *imxmd,
EXPORT_SYMBOL_GPL(imx_media_find_subdev_by_devname);
/*
- * Adds a video device to the master video device list. This is called by
- * an async subdev that owns a video device when it is registered.
+ * Adds a video device to the master video device list. This is called
+ * when a video device is registered.
*/
-int imx_media_add_video_device(struct imx_media_dev *imxmd,
- struct imx_media_video_dev *vdev)
+void imx_media_add_video_device(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev)
{
mutex_lock(&imxmd->mutex);
list_add_tail(&vdev->list, &imxmd->vdev_list);
mutex_unlock(&imxmd->mutex);
- return 0;
}
EXPORT_SYMBOL_GPL(imx_media_add_video_device);
/*
- * Search upstream/downstream for a subdevice in the current pipeline
- * with given grp_id, starting from start_entity. Returns the subdev's
- * source/sink pad that it was reached from. If grp_id is zero, just
- * returns the nearest source/sink pad to start_entity. Must be called
- * with mdev->graph_mutex held.
+ * Search upstream/downstream for a subdevice or video device pad in the
+ * current pipeline, starting from start_entity. Returns the device's
+ * source/sink pad that it was reached from. Must be called with
+ * mdev->graph_mutex held.
+ *
+ * If grp_id != 0, finds a subdevice's pad of given grp_id.
+ * Else If buftype != 0, finds a video device's pad of given buffer type.
+ * Else, returns the nearest source/sink pad to start_entity.
*/
-static struct media_pad *
-find_pipeline_pad(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id, bool upstream)
+struct media_pad *
+imx_media_pipeline_pad(struct media_entity *start_entity, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream)
{
struct media_entity *me = start_entity;
struct media_pad *pad = NULL;
+ struct video_device *vfd;
struct v4l2_subdev *sd;
int i;
@@ -804,16 +795,27 @@ find_pipeline_pad(struct imx_media_dev *imxmd,
continue;
pad = media_entity_remote_pad(spad);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ if (!pad)
continue;
- if (grp_id != 0) {
- sd = media_entity_to_v4l2_subdev(pad->entity);
- if (sd->grp_id & grp_id)
- return pad;
+ if (grp_id) {
+ if (is_media_entity_v4l2_subdev(pad->entity)) {
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ if (sd->grp_id & grp_id)
+ return pad;
+ }
+
+ return imx_media_pipeline_pad(pad->entity, grp_id,
+ buftype, upstream);
+ } else if (buftype) {
+ if (is_media_entity_v4l2_video_device(pad->entity)) {
+ vfd = media_entity_to_video_device(pad->entity);
+ if (buftype == vfd->queue->type)
+ return pad;
+ }
- return find_pipeline_pad(imxmd, pad->entity,
- grp_id, upstream);
+ return imx_media_pipeline_pad(pad->entity, grp_id,
+ buftype, upstream);
} else {
return pad;
}
@@ -821,28 +823,33 @@ find_pipeline_pad(struct imx_media_dev *imxmd,
return NULL;
}
+EXPORT_SYMBOL_GPL(imx_media_pipeline_pad);
/*
- * Search upstream for a subdev in the current pipeline with
- * given grp_id. Must be called with mdev->graph_mutex held.
+ * Search upstream/downstream for a subdev or video device in the current
+ * pipeline. Must be called with mdev->graph_mutex held.
*/
-static struct v4l2_subdev *
-find_upstream_subdev(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id)
+static struct media_entity *
+find_pipeline_entity(struct media_entity *start, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream)
{
+ struct media_pad *pad = NULL;
+ struct video_device *vfd;
struct v4l2_subdev *sd;
- struct media_pad *pad;
- if (is_media_entity_v4l2_subdev(start_entity)) {
- sd = media_entity_to_v4l2_subdev(start_entity);
+ if (grp_id && is_media_entity_v4l2_subdev(start)) {
+ sd = media_entity_to_v4l2_subdev(start);
if (sd->grp_id & grp_id)
- return sd;
+ return &sd->entity;
+ } else if (buftype && is_media_entity_v4l2_video_device(start)) {
+ vfd = media_entity_to_video_device(pad->entity);
+ if (buftype == vfd->queue->type)
+ return &vfd->entity;
}
- pad = find_pipeline_pad(imxmd, start_entity, grp_id, true);
+ pad = imx_media_pipeline_pad(start, grp_id, buftype, upstream);
- return pad ? media_entity_to_v4l2_subdev(pad->entity) : NULL;
+ return pad ? pad->entity : NULL;
}
/*
@@ -850,62 +857,57 @@ find_upstream_subdev(struct imx_media_dev *imxmd,
* start entity in the current pipeline.
* Must be called with mdev->graph_mutex held.
*/
-int imx_media_find_mipi_csi2_channel(struct imx_media_dev *imxmd,
- struct media_entity *start_entity)
+int imx_media_pipeline_csi2_channel(struct media_entity *start_entity)
{
struct media_pad *pad;
int ret = -EPIPE;
- pad = find_pipeline_pad(imxmd, start_entity, IMX_MEDIA_GRP_ID_CSI2,
- true);
- if (pad) {
+ pad = imx_media_pipeline_pad(start_entity, IMX_MEDIA_GRP_ID_CSI2,
+ 0, true);
+ if (pad)
ret = pad->index - 1;
- dev_dbg(imxmd->md.dev, "found vc%d from %s\n",
- ret, start_entity->name);
- }
return ret;
}
-EXPORT_SYMBOL_GPL(imx_media_find_mipi_csi2_channel);
+EXPORT_SYMBOL_GPL(imx_media_pipeline_csi2_channel);
/*
- * Find a source pad reached upstream from the given start entity in
- * the current pipeline. Must be called with mdev->graph_mutex held.
+ * Find a subdev reached upstream from the given start entity in
+ * the current pipeline.
+ * Must be called with mdev->graph_mutex held.
*/
-struct media_pad *
-imx_media_find_upstream_pad(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id)
+struct v4l2_subdev *
+imx_media_pipeline_subdev(struct media_entity *start_entity, u32 grp_id,
+ bool upstream)
{
- struct media_pad *pad;
+ struct media_entity *me;
- pad = find_pipeline_pad(imxmd, start_entity, grp_id, true);
- if (!pad)
+ me = find_pipeline_entity(start_entity, grp_id, 0, upstream);
+ if (!me)
return ERR_PTR(-ENODEV);
- return pad;
+ return media_entity_to_v4l2_subdev(me);
}
-EXPORT_SYMBOL_GPL(imx_media_find_upstream_pad);
+EXPORT_SYMBOL_GPL(imx_media_pipeline_subdev);
/*
* Find a subdev reached upstream from the given start entity in
* the current pipeline.
* Must be called with mdev->graph_mutex held.
*/
-struct v4l2_subdev *
-imx_media_find_upstream_subdev(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id)
+struct video_device *
+imx_media_pipeline_video_device(struct media_entity *start_entity,
+ enum v4l2_buf_type buftype, bool upstream)
{
- struct v4l2_subdev *sd;
+ struct media_entity *me;
- sd = find_upstream_subdev(imxmd, start_entity, grp_id);
- if (!sd)
+ me = find_pipeline_entity(start_entity, 0, buftype, upstream);
+ if (!me)
return ERR_PTR(-ENODEV);
- return sd;
+ return media_entity_to_video_device(me);
}
-EXPORT_SYMBOL_GPL(imx_media_find_upstream_subdev);
+EXPORT_SYMBOL_GPL(imx_media_pipeline_video_device);
/*
* Turn current pipeline streaming on/off starting from entity.
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index 4487374c9435..4d90eecb04a2 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -4,13 +4,6 @@
*
* Copyright (c) 2017 Mentor Graphics Inc.
*/
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -65,12 +58,11 @@ struct vdic_pipeline_ops {
#define S_ALIGN 1 /* multiple of 2 */
struct vdic_priv {
- struct device *dev;
- struct ipu_soc *ipu;
- struct imx_media_dev *md;
+ struct device *ipu_dev;
+ struct ipu_soc *ipu;
+
struct v4l2_subdev sd;
struct media_pad pad[VDIC_NUM_PADS];
- int ipu_id;
/* lock to protect all members below */
struct mutex lock;
@@ -145,8 +137,6 @@ static int vdic_get_ipu_resources(struct vdic_priv *priv)
struct ipuv3_channel *ch;
struct ipu_vdi *vdi;
- priv->ipu = priv->md->ipu[priv->ipu_id];
-
vdi = ipu_vdi_get(priv->ipu);
if (IS_ERR(vdi)) {
v4l2_err(&priv->sd, "failed to get VDIC\n");
@@ -511,7 +501,8 @@ static int vdic_s_stream(struct v4l2_subdev *sd, int enable)
if (priv->stream_count != !enable)
goto update_count;
- dev_dbg(priv->dev, "stream %s\n", enable ? "ON" : "OFF");
+ dev_dbg(priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
if (enable)
ret = vdic_start(priv);
@@ -686,8 +677,8 @@ static int vdic_link_setup(struct media_entity *entity,
struct v4l2_subdev *remote_sd;
int ret = 0;
- dev_dbg(priv->dev, "link setup %s -> %s", remote->entity->name,
- local->entity->name);
+ dev_dbg(priv->ipu_dev, "%s: link setup %s -> %s",
+ sd->name, remote->entity->name, local->entity->name);
mutex_lock(&priv->lock);
@@ -860,9 +851,6 @@ static int vdic_registered(struct v4l2_subdev *sd)
int i, ret;
u32 code;
- /* get media device */
- priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
-
for (i = 0; i < VDIC_NUM_PADS; i++) {
priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
@@ -934,77 +922,53 @@ static const struct v4l2_subdev_internal_ops vdic_internal_ops = {
.unregistered = vdic_unregistered,
};
-static int imx_vdic_probe(struct platform_device *pdev)
+struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id)
{
- struct imx_media_ipu_internal_sd_pdata *pdata;
struct vdic_priv *priv;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- platform_set_drvdata(pdev, &priv->sd);
- priv->dev = &pdev->dev;
-
- pdata = priv->dev->platform_data;
- priv->ipu_id = pdata->ipu_id;
+ priv->ipu_dev = ipu_dev;
+ priv->ipu = ipu;
v4l2_subdev_init(&priv->sd, &vdic_subdev_ops);
v4l2_set_subdevdata(&priv->sd, priv);
priv->sd.internal_ops = &vdic_internal_ops;
priv->sd.entity.ops = &vdic_entity_ops;
priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
- priv->sd.dev = &pdev->dev;
- priv->sd.owner = THIS_MODULE;
+ priv->sd.owner = ipu_dev->driver->owner;
priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
- /* get our group id */
- priv->sd.grp_id = pdata->grp_id;
- strscpy(priv->sd.name, pdata->sd_name, sizeof(priv->sd.name));
+ priv->sd.grp_id = grp_id;
+ imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
+ priv->sd.grp_id, ipu_get_num(ipu));
mutex_init(&priv->lock);
- ret = v4l2_async_register_subdev(&priv->sd);
+ ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
if (ret)
goto free;
- return 0;
+ return &priv->sd;
free:
mutex_destroy(&priv->lock);
- return ret;
+ return ERR_PTR(ret);
}
-static int imx_vdic_remove(struct platform_device *pdev)
+int imx_media_vdic_unregister(struct v4l2_subdev *sd)
{
- struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
v4l2_info(sd, "Removing\n");
- v4l2_async_unregister_subdev(sd);
+ v4l2_device_unregister_subdev(sd);
mutex_destroy(&priv->lock);
media_entity_cleanup(&sd->entity);
return 0;
}
-
-static const struct platform_device_id imx_vdic_ids[] = {
- { .name = "imx-ipuv3-vdic" },
- { },
-};
-MODULE_DEVICE_TABLE(platform, imx_vdic_ids);
-
-static struct platform_driver imx_vdic_driver = {
- .probe = imx_vdic_probe,
- .remove = imx_vdic_remove,
- .id_table = imx_vdic_ids,
- .driver = {
- .name = "imx-ipuv3-vdic",
- },
-};
-module_platform_driver(imx_vdic_driver);
-
-MODULE_DESCRIPTION("i.MX VDIC subdev driver");
-MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-ipuv3-vdic");
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index 6587aa49e005..8a60bdafe2da 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -16,6 +16,19 @@
#include <video/imx-ipu-v3.h>
/*
+ * Enumeration of the IPU internal sub-devices
+ */
+enum {
+ IPU_CSI0 = 0,
+ IPU_CSI1,
+ IPU_VDIC,
+ IPU_IC_PRP,
+ IPU_IC_PRPENC,
+ IPU_IC_PRPVF,
+ NUM_IPU_SUBDEVS,
+};
+
+/*
* Pad definitions for the subdevs with multiple source or
* sink pads
*/
@@ -111,25 +124,6 @@ struct imx_media_pad_vdev {
struct list_head list;
};
-struct imx_media_ipu_internal_sd_pdata {
- char sd_name[V4L2_SUBDEV_NAME_SIZE];
- u32 grp_id;
- int ipu_id;
-};
-
-struct imx_media_async_subdev {
- /* the base asd - must be first in this struct */
- struct v4l2_async_subdev asd;
- /* the platform device of IPU-internal subdevs */
- struct platform_device *pdev;
-};
-
-static inline struct imx_media_async_subdev *
-to_imx_media_asd(struct v4l2_async_subdev *asd)
-{
- return container_of(asd, struct imx_media_async_subdev, asd);
-}
-
struct imx_media_dev {
struct media_device md;
struct v4l2_device v4l2_dev;
@@ -142,11 +136,11 @@ struct imx_media_dev {
/* master video device list */
struct list_head vdev_list;
- /* IPUs this media driver control, valid after subdevs bound */
- struct ipu_soc *ipu[2];
-
/* for async subdev registration */
struct v4l2_async_notifier notifier;
+
+ /* the IPU internal subdev's registered synchronously */
+ struct v4l2_subdev *sync_sd[2][NUM_IPU_SUBDEVS];
};
enum codespace_sel {
@@ -176,8 +170,7 @@ void imx_media_fill_default_mbus_fields(struct v4l2_mbus_framefmt *tryfmt,
struct v4l2_mbus_framefmt *fmt,
bool ic_route);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
- struct v4l2_rect *compose,
- const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_mbus_framefmt *mbus,
const struct imx_media_pixfmt *cc);
int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
struct v4l2_mbus_framefmt *mbus);
@@ -191,18 +184,18 @@ imx_media_find_subdev_by_fwnode(struct imx_media_dev *imxmd,
struct v4l2_subdev *
imx_media_find_subdev_by_devname(struct imx_media_dev *imxmd,
const char *devname);
-int imx_media_add_video_device(struct imx_media_dev *imxmd,
- struct imx_media_video_dev *vdev);
-int imx_media_find_mipi_csi2_channel(struct imx_media_dev *imxmd,
- struct media_entity *start_entity);
+void imx_media_add_video_device(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev);
+int imx_media_pipeline_csi2_channel(struct media_entity *start_entity);
struct media_pad *
-imx_media_find_upstream_pad(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id);
+imx_media_pipeline_pad(struct media_entity *start_entity, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream);
struct v4l2_subdev *
-imx_media_find_upstream_subdev(struct imx_media_dev *imxmd,
- struct media_entity *start_entity,
- u32 grp_id);
+imx_media_pipeline_subdev(struct media_entity *start_entity, u32 grp_id,
+ bool upstream);
+struct video_device *
+imx_media_pipeline_video_device(struct media_entity *start_entity,
+ enum v4l2_buf_type buftype, bool upstream);
struct imx_media_dma_buf {
void *virt;
@@ -210,9 +203,9 @@ struct imx_media_dma_buf {
unsigned long len;
};
-void imx_media_free_dma_buf(struct imx_media_dev *imxmd,
+void imx_media_free_dma_buf(struct device *dev,
struct imx_media_dma_buf *buf);
-int imx_media_alloc_dma_buf(struct imx_media_dev *imxmd,
+int imx_media_alloc_dma_buf(struct device *dev,
struct imx_media_dma_buf *buf,
int size);
@@ -220,22 +213,12 @@ int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
struct media_entity *entity,
bool on);
-/* imx-media-dev.c */
-int imx_media_add_async_subdev(struct imx_media_dev *imxmd,
- struct fwnode_handle *fwnode,
- struct platform_device *pdev);
-
-int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd);
-int imx_media_link_notify(struct media_link *link, u32 flags,
- unsigned int notification);
-void imx_media_notify(struct v4l2_subdev *sd, unsigned int notification,
- void *arg);
+/* imx-media-dev-common.c */
int imx_media_probe_complete(struct v4l2_async_notifier *notifier);
-
-struct imx_media_dev *imx_media_dev_init(struct device *dev);
-int imx_media_dev_notifier_register(struct imx_media_dev *imxmd);
+struct imx_media_dev *imx_media_dev_init(struct device *dev,
+ const struct media_device_ops *ops);
+int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
+ const struct v4l2_async_notifier_operations *ops);
/* imx-media-fim.c */
struct imx_media_fim;
@@ -248,11 +231,9 @@ struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd);
void imx_media_fim_free(struct imx_media_fim *fim);
/* imx-media-internal-sd.c */
-int imx_media_add_ipu_internal_subdevs(struct imx_media_dev *imxmd,
- int ipu_id);
-int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
- struct v4l2_subdev *sd);
-void imx_media_remove_ipu_internal_subdevs(struct imx_media_dev *imxmd);
+int imx_media_register_ipu_internal_subdevs(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *csi);
+void imx_media_unregister_ipu_internal_subdevs(struct imx_media_dev *imxmd);
/* imx-media-of.c */
int imx_media_add_of_subdevs(struct imx_media_dev *dev,
@@ -264,18 +245,29 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
int imx_media_of_add_csi(struct imx_media_dev *imxmd,
struct device_node *csi_np);
+/* imx-media-vdic.c */
+struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+int imx_media_vdic_unregister(struct v4l2_subdev *sd);
+
+/* imx-ic-common.c */
+struct v4l2_subdev *imx_media_ic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+int imx_media_ic_unregister(struct v4l2_subdev *sd);
+
/* imx-media-capture.c */
struct imx_media_video_dev *
-imx_media_capture_device_init(struct v4l2_subdev *src_sd, int pad);
+imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
+ int pad);
void imx_media_capture_device_remove(struct imx_media_video_dev *vdev);
-int imx_media_capture_device_register(struct imx_media_dev *md,
- struct imx_media_video_dev *vdev);
+int imx_media_capture_device_register(struct imx_media_video_dev *vdev);
void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev);
struct imx_media_buffer *
imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev);
-void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
- const struct v4l2_pix_format *pix,
- const struct v4l2_rect *compose);
void imx_media_capture_device_error(struct imx_media_video_dev *vdev);
/* subdev group ids */
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index a708a0340eb1..f775870df7e0 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -152,8 +152,6 @@
#define CSI_CSICR18 0x48
#define CSI_CSICR19 0x4c
-static const char * const imx7_csi_clk_id[] = {"axi", "dcic", "mclk"};
-
struct imx7_csi {
struct device *dev;
struct v4l2_subdev sd;
@@ -180,9 +178,7 @@ struct imx7_csi {
void __iomem *regbase;
int irq;
-
- int num_clks;
- struct clk_bulk_data *clks;
+ struct clk *mclk;
/* active vb2 buffers to send to video dev sink */
struct imx_media_buffer *active_vb2_buf[2];
@@ -199,23 +195,15 @@ struct imx7_csi {
struct completion last_eof_completion;
};
-#define imx7_csi_reg_read(_csi, _offset) \
- __raw_readl((_csi)->regbase + (_offset))
-#define imx7_csi_reg_write(_csi, _val, _offset) \
- __raw_writel(_val, (_csi)->regbase + (_offset))
-
-static void imx7_csi_clk_enable(struct imx7_csi *csi)
+static u32 imx7_csi_reg_read(struct imx7_csi *csi, unsigned int offset)
{
- int ret;
-
- ret = clk_bulk_prepare_enable(csi->num_clks, csi->clks);
- if (ret < 0)
- dev_err(csi->dev, "failed to enable clocks\n");
+ return readl(csi->regbase + offset);
}
-static void imx7_csi_clk_disable(struct imx7_csi *csi)
+static void imx7_csi_reg_write(struct imx7_csi *csi, unsigned int value,
+ unsigned int offset)
{
- clk_bulk_disable_unprepare(csi->num_clks, csi->clks);
+ writel(value, csi->regbase + offset);
}
static void imx7_csi_hw_reset(struct imx7_csi *csi)
@@ -229,9 +217,9 @@ static void imx7_csi_hw_reset(struct imx7_csi *csi)
imx7_csi_reg_write(csi, CSICR3_RESET_VAL, CSI_CSICR3);
}
-static unsigned long imx7_csi_irq_clear(struct imx7_csi *csi)
+static u32 imx7_csi_irq_clear(struct imx7_csi *csi)
{
- unsigned long isr;
+ u32 isr;
isr = imx7_csi_reg_read(csi, CSI_CSISR);
imx7_csi_reg_write(csi, isr, CSI_CSISR);
@@ -257,7 +245,7 @@ static void imx7_csi_init_interface(struct imx7_csi *csi)
static void imx7_csi_hw_enable_irq(struct imx7_csi *csi)
{
- unsigned long cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+ u32 cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
cr1 |= BIT_SOF_INTEN;
cr1 |= BIT_RFF_OR_INT;
@@ -273,7 +261,7 @@ static void imx7_csi_hw_enable_irq(struct imx7_csi *csi)
static void imx7_csi_hw_disable_irq(struct imx7_csi *csi)
{
- unsigned long cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+ u32 cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
cr1 &= ~BIT_SOF_INTEN;
cr1 &= ~BIT_RFF_OR_INT;
@@ -286,7 +274,7 @@ static void imx7_csi_hw_disable_irq(struct imx7_csi *csi)
static void imx7_csi_hw_enable(struct imx7_csi *csi)
{
- unsigned long cr = imx7_csi_reg_read(csi, CSI_CSICR18);
+ u32 cr = imx7_csi_reg_read(csi, CSI_CSICR18);
cr |= BIT_CSI_HW_ENABLE;
@@ -295,7 +283,7 @@ static void imx7_csi_hw_enable(struct imx7_csi *csi)
static void imx7_csi_hw_disable(struct imx7_csi *csi)
{
- unsigned long cr = imx7_csi_reg_read(csi, CSI_CSICR18);
+ u32 cr = imx7_csi_reg_read(csi, CSI_CSICR18);
cr &= ~BIT_CSI_HW_ENABLE;
@@ -304,7 +292,7 @@ static void imx7_csi_hw_disable(struct imx7_csi *csi)
static void imx7_csi_dma_reflash(struct imx7_csi *csi)
{
- unsigned long cr3 = imx7_csi_reg_read(csi, CSI_CSICR18);
+ u32 cr3 = imx7_csi_reg_read(csi, CSI_CSICR18);
cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
cr3 |= BIT_DMA_REFLASH_RFF;
@@ -313,7 +301,7 @@ static void imx7_csi_dma_reflash(struct imx7_csi *csi)
static void imx7_csi_rx_fifo_clear(struct imx7_csi *csi)
{
- unsigned long cr1;
+ u32 cr1;
cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
imx7_csi_reg_write(csi, cr1 & ~BIT_FCC, CSI_CSICR1);
@@ -331,7 +319,7 @@ static void imx7_csi_buf_stride_set(struct imx7_csi *csi, u32 stride)
static void imx7_csi_deinterlace_enable(struct imx7_csi *csi, bool enable)
{
- unsigned long cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
+ u32 cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
if (enable)
cr18 |= BIT_DEINTERLACE_EN;
@@ -343,8 +331,8 @@ static void imx7_csi_deinterlace_enable(struct imx7_csi *csi, bool enable)
static void imx7_csi_dmareq_rff_enable(struct imx7_csi *csi)
{
- unsigned long cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
- unsigned long cr2 = imx7_csi_reg_read(csi, CSI_CSICR2);
+ u32 cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
+ u32 cr2 = imx7_csi_reg_read(csi, CSI_CSICR2);
/* Burst Type of DMA Transfer from RxFIFO. INCR16 */
cr2 |= 0xC0000000;
@@ -360,7 +348,7 @@ static void imx7_csi_dmareq_rff_enable(struct imx7_csi *csi)
static void imx7_csi_dmareq_rff_disable(struct imx7_csi *csi)
{
- unsigned long cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
+ u32 cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
cr3 &= ~BIT_DMA_REQ_EN_RFF;
cr3 &= ~BIT_HRESP_ERR_EN;
@@ -408,17 +396,23 @@ static void imx7_csi_error_recovery(struct imx7_csi *csi)
imx7_csi_hw_enable(csi);
}
-static void imx7_csi_init(struct imx7_csi *csi)
+static int imx7_csi_init(struct imx7_csi *csi)
{
+ int ret;
+
if (csi->is_init)
- return;
+ return 0;
- imx7_csi_clk_enable(csi);
+ ret = clk_prepare_enable(csi->mclk);
+ if (ret < 0)
+ return ret;
imx7_csi_hw_reset(csi);
imx7_csi_init_interface(csi);
imx7_csi_dmareq_rff_enable(csi);
csi->is_init = true;
+
+ return 0;
}
static void imx7_csi_deinit(struct imx7_csi *csi)
@@ -429,7 +423,7 @@ static void imx7_csi_deinit(struct imx7_csi *csi)
imx7_csi_hw_reset(csi);
imx7_csi_init_interface(csi);
imx7_csi_dmareq_rff_disable(csi);
- imx7_csi_clk_disable(csi);
+ clk_disable_unprepare(csi->mclk);
csi->is_init = false;
}
@@ -448,11 +442,19 @@ static int imx7_csi_get_upstream_endpoint(struct imx7_csi *csi,
src = &csi->src_sd->entity;
+ /*
+ * if the source is neither a mux or csi2 get the one directly upstream
+ * from this csi
+ */
+ if (src->function != MEDIA_ENT_F_VID_IF_BRIDGE &&
+ src->function != MEDIA_ENT_F_VID_MUX)
+ src = &csi->sd.entity;
+
skip_video_mux:
/* get source pad of entity directly upstream from src */
- pad = imx_media_find_upstream_pad(csi->imxmd, src, 0);
- if (IS_ERR(pad))
- return PTR_ERR(pad);
+ pad = imx_media_pipeline_pad(src, 0, 0, true);
+ if (!pad)
+ return -ENODEV;
sd = media_entity_to_v4l2_subdev(pad->entity);
@@ -531,7 +533,7 @@ static int imx7_csi_link_setup(struct media_entity *entity,
init:
if (csi->sink || csi->src_sd)
- imx7_csi_init(csi);
+ ret = imx7_csi_init(csi);
else
imx7_csi_deinit(csi);
@@ -653,7 +655,7 @@ static void imx7_csi_vb2_buf_done(struct imx7_csi *csi)
static irqreturn_t imx7_csi_irq_handler(int irq, void *data)
{
struct imx7_csi *csi = data;
- unsigned long status;
+ u32 status;
spin_lock(&csi->irqlock);
@@ -714,7 +716,7 @@ static int imx7_csi_dma_start(struct imx7_csi *csi)
struct v4l2_pix_format *out_pix = &vdev->fmt.fmt.pix;
int ret;
- ret = imx_media_alloc_dma_buf(csi->imxmd, &csi->underrun_buf,
+ ret = imx_media_alloc_dma_buf(csi->dev, &csi->underrun_buf,
out_pix->sizeimage);
if (ret < 0) {
v4l2_warn(&csi->sd, "consider increasing the CMA area\n");
@@ -754,7 +756,7 @@ static void imx7_csi_dma_stop(struct imx7_csi *csi)
imx7_csi_dma_unsetup_vb2_buf(csi, VB2_BUF_STATE_ERROR);
- imx_media_free_dma_buf(csi->imxmd, &csi->underrun_buf);
+ imx_media_free_dma_buf(csi->dev, &csi->underrun_buf);
}
static int imx7_csi_configure(struct imx7_csi *csi)
@@ -811,7 +813,7 @@ static int imx7_csi_configure(struct imx7_csi *csi)
return 0;
}
-static int imx7_csi_enable(struct imx7_csi *csi)
+static void imx7_csi_enable(struct imx7_csi *csi)
{
imx7_csi_sw_reset(csi);
@@ -819,10 +821,7 @@ static int imx7_csi_enable(struct imx7_csi *csi)
imx7_csi_dmareq_rff_enable(csi);
imx7_csi_hw_enable_irq(csi);
imx7_csi_hw_enable(csi);
- return 0;
}
-
- return 0;
}
static void imx7_csi_disable(struct imx7_csi *csi)
@@ -1021,7 +1020,6 @@ static int imx7_csi_try_fmt(struct imx7_csi *csi,
break;
default:
return -EINVAL;
- break;
}
return 0;
}
@@ -1031,11 +1029,8 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *sdformat)
{
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
- struct imx_media_video_dev *vdev = csi->vdev;
const struct imx_media_pixfmt *outcc;
struct v4l2_mbus_framefmt *outfmt;
- struct v4l2_pix_format vdev_fmt;
- struct v4l2_rect vdev_compose;
const struct imx_media_pixfmt *cc;
struct v4l2_mbus_framefmt *fmt;
struct v4l2_subdev_format format;
@@ -1080,19 +1075,8 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
csi->cc[IMX7_CSI_PAD_SRC] = outcc;
}
- if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY)
- goto out_unlock;
-
- csi->cc[sdformat->pad] = cc;
-
- /* propagate output pad format to capture device */
- imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt, &vdev_compose,
- &csi->format_mbus[IMX7_CSI_PAD_SRC],
- csi->cc[IMX7_CSI_PAD_SRC]);
- mutex_unlock(&csi->lock);
- imx_media_capture_device_set_format(vdev, &vdev_fmt, &vdev_compose);
-
- return 0;
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ csi->cc[sdformat->pad] = cc;
out_unlock:
mutex_unlock(&csi->lock);
@@ -1126,17 +1110,7 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
if (ret < 0)
return ret;
- ret = imx_media_capture_device_register(csi->imxmd, csi->vdev);
- if (ret < 0)
- return ret;
-
- ret = imx_media_add_video_device(csi->imxmd, csi->vdev);
- if (ret < 0) {
- imx_media_capture_device_unregister(csi->vdev);
- return ret;
- }
-
- return 0;
+ return imx_media_capture_device_register(csi->vdev);
}
static void imx7_csi_unregistered(struct v4l2_subdev *sd)
@@ -1200,31 +1174,12 @@ static int imx7_csi_parse_endpoint(struct device *dev,
return fwnode_device_is_available(asd->match.fwnode) ? 0 : -EINVAL;
}
-static int imx7_csi_clocks_get(struct imx7_csi *csi)
-{
- struct device *dev = csi->dev;
- int i;
-
- csi->num_clks = ARRAY_SIZE(imx7_csi_clk_id);
- csi->clks = devm_kcalloc(dev, csi->num_clks, sizeof(*csi->clks),
- GFP_KERNEL);
-
- if (!csi->clks)
- return -ENOMEM;
-
- for (i = 0; i < csi->num_clks; i++)
- csi->clks[i].id = imx7_csi_clk_id[i];
-
- return devm_clk_bulk_get(dev, csi->num_clks, csi->clks);
-}
-
static int imx7_csi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct imx_media_dev *imxmd;
struct imx7_csi *csi;
- struct resource *res;
int ret;
csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
@@ -1233,24 +1188,22 @@ static int imx7_csi_probe(struct platform_device *pdev)
csi->dev = dev;
- ret = imx7_csi_clocks_get(csi);
- if (ret < 0) {
- dev_err(dev, "Failed to get clocks");
- return -ENODEV;
+ csi->mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(csi->mclk)) {
+ ret = PTR_ERR(csi->mclk);
+ dev_err(dev, "Failed to get mclk: %d", ret);
+ return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
csi->irq = platform_get_irq(pdev, 0);
- if (!res || csi->irq < 0) {
+ if (csi->irq < 0) {
dev_err(dev, "Missing platform resources data\n");
- return -ENODEV;
+ return csi->irq;
}
- csi->regbase = devm_ioremap_resource(dev, res);
- if (IS_ERR(csi->regbase)) {
- dev_err(dev, "Failed platform resources map\n");
- return -ENODEV;
- }
+ csi->regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(csi->regbase))
+ return PTR_ERR(csi->regbase);
spin_lock_init(&csi->irqlock);
mutex_init(&csi->lock);
@@ -1260,12 +1213,11 @@ static int imx7_csi_probe(struct platform_device *pdev)
(void *)csi);
if (ret < 0) {
dev_err(dev, "Request CSI IRQ failed.\n");
- ret = -ENODEV;
goto destroy_mutex;
}
/* add media device */
- imxmd = imx_media_dev_init(dev);
+ imxmd = imx_media_dev_init(dev, NULL);
if (IS_ERR(imxmd)) {
ret = PTR_ERR(imxmd);
goto destroy_mutex;
@@ -1276,7 +1228,7 @@ static int imx7_csi_probe(struct platform_device *pdev)
if (ret < 0 && ret != -ENODEV && ret != -EEXIST)
goto cleanup;
- ret = imx_media_dev_notifier_register(imxmd);
+ ret = imx_media_dev_notifier_register(imxmd, NULL);
if (ret < 0)
goto cleanup;
@@ -1292,7 +1244,8 @@ static int imx7_csi_probe(struct platform_device *pdev)
csi->sd.grp_id = IMX_MEDIA_GRP_ID_CSI;
snprintf(csi->sd.name, sizeof(csi->sd.name), "csi");
- csi->vdev = imx_media_capture_device_init(&csi->sd, IMX7_CSI_PAD_SRC);
+ csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd,
+ IMX7_CSI_PAD_SRC);
if (IS_ERR(csi->vdev))
return PTR_ERR(csi->vdev);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 19455f425416..d1cdf011c8f1 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -456,13 +456,9 @@ static void mipi_csis_set_params(struct csi_state *state)
MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL);
}
-static void mipi_csis_clk_enable(struct csi_state *state)
+static int mipi_csis_clk_enable(struct csi_state *state)
{
- int ret;
-
- ret = clk_bulk_prepare_enable(state->num_clks, state->clks);
- if (ret < 0)
- dev_err(state->dev, "failed to enable clocks\n");
+ return clk_bulk_prepare_enable(state->num_clks, state->clks);
}
static void mipi_csis_clk_disable(struct csi_state *state)
@@ -784,6 +780,17 @@ static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int mipi_csis_registered(struct v4l2_subdev *mipi_sd)
+{
+ struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
+
+ state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ return media_entity_pads_init(&state->mipi_sd.entity, CSIS_PADS_NUM,
+ state->pads);
+}
+
static const struct v4l2_subdev_core_ops mipi_csis_core_ops = {
.log_status = mipi_csis_log_status,
};
@@ -809,6 +816,10 @@ static const struct v4l2_subdev_ops mipi_csis_subdev_ops = {
.pad = &mipi_csis_pad_ops,
};
+static const struct v4l2_subdev_internal_ops mipi_csis_internal_ops = {
+ .registered = mipi_csis_registered,
+};
+
static int mipi_csis_parse_dt(struct platform_device *pdev,
struct csi_state *state)
{
@@ -869,6 +880,7 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
mipi_sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
mipi_sd->entity.ops = &mipi_csis_entity_ops;
+ mipi_sd->internal_ops = &mipi_csis_internal_ops;
mipi_sd->dev = &pdev->dev;
@@ -890,7 +902,6 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
return ret;
}
-
static int mipi_csis_dump_regs_show(struct seq_file *m, void *private)
{
struct csi_state *state = m->private;
@@ -938,7 +949,7 @@ static int mipi_csis_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *mem_res;
struct csi_state *state;
- int ret = -ENOMEM;
+ int ret;
state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
if (!state)
@@ -973,7 +984,11 @@ static int mipi_csis_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- mipi_csis_clk_enable(state);
+ ret = mipi_csis_clk_enable(state);
+ if (ret < 0) {
+ dev_err(state->dev, "failed to enable clocks: %d\n", ret);
+ return ret;
+ }
ret = devm_request_irq(dev, state->irq, mipi_csis_irq_handler,
0, dev_name(dev), state);
@@ -990,13 +1005,6 @@ static int mipi_csis_probe(struct platform_device *pdev)
if (ret < 0)
goto disable_clock;
- state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- ret = media_entity_pads_init(&state->mipi_sd.entity, CSIS_PADS_NUM,
- state->pads);
- if (ret < 0)
- goto unregister_subdev;
-
memcpy(state->events, mipi_csis_events, sizeof(state->events));
mipi_csis_debugfs_init(state);
@@ -1016,7 +1024,6 @@ static int mipi_csis_probe(struct platform_device *pdev)
unregister_all:
mipi_csis_debugfs_exit(state);
media_entity_cleanup(&state->mipi_sd.entity);
-unregister_subdev:
v4l2_async_unregister_subdev(&state->mipi_sd);
disable_clock:
mipi_csis_clk_disable(state);
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index 1e7184e4311d..c7cd27efac8a 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -2472,7 +2472,7 @@ struct ipu3_uapi_acc_param {
struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32)));
struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32)));
struct ipu3_uapi_anr_config anr;
- struct ipu3_uapi_awb_fr_config_s awb_fr;
+ struct ipu3_uapi_awb_fr_config_s awb_fr __attribute__((aligned(32)));
struct ipu3_uapi_ae_config ae;
struct ipu3_uapi_af_config_s af;
struct ipu3_uapi_awb_config awb;
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.c b/drivers/staging/media/ipu3/ipu3-css-fw.c
index 4122d4e42db6..45aff76198e2 100644
--- a/drivers/staging/media/ipu3/ipu3-css-fw.c
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.c
@@ -200,13 +200,11 @@ int imgu_css_fw_init(struct imgu_css *css)
goto bad_fw;
for (j = 0; j < bi->info.isp.num_output_formats; j++)
- if (bi->info.isp.output_formats[j] < 0 ||
- bi->info.isp.output_formats[j] >=
+ if (bi->info.isp.output_formats[j] >=
IMGU_ABI_FRAME_FORMAT_NUM)
goto bad_fw;
for (j = 0; j < bi->info.isp.num_vf_formats; j++)
- if (bi->info.isp.vf_formats[j] < 0 ||
- bi->info.isp.vf_formats[j] >=
+ if (bi->info.isp.vf_formats[j] >=
IMGU_ABI_FRAME_FORMAT_NUM)
goto bad_fw;
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index 23cf5b2cfe8b..fd1ed84c400c 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -24,9 +24,8 @@
#define IPU3_CSS_MAX_H 3136
#define IPU3_CSS_MAX_W 4224
-/* filter size from graph settings is fixed as 4 */
-#define FILTER_SIZE 4
-#define MIN_ENVELOPE 8
+/* minimal envelope size(GDC in - out) should be 4 */
+#define MIN_ENVELOPE 4
/*
* pre-allocated buffer size for CSS ABI, auxiliary frames
@@ -1827,9 +1826,9 @@ int imgu_css_fmt_try(struct imgu_css *css,
vf->width = imgu_css_adjust(vf->width, VF_ALIGN_W);
vf->height = imgu_css_adjust(vf->height, 1);
- s = (bds->width - gdc->width) / 2 - FILTER_SIZE;
+ s = (bds->width - gdc->width) / 2;
env->width = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
- s = (bds->height - gdc->height) / 2 - FILTER_SIZE;
+ s = (bds->height - gdc->height) / 2;
env->height = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
ret = imgu_css_find_binary(css, pipe, q, r);
@@ -2251,9 +2250,8 @@ int imgu_css_set_parameters(struct imgu_css *css, unsigned int pipe,
css_pipe->aux_frames[a].height,
css_pipe->rect[g].width,
css_pipe->rect[g].height,
- css_pipe->rect[e].width + FILTER_SIZE,
- css_pipe->rect[e].height +
- FILTER_SIZE);
+ css_pipe->rect[e].width,
+ css_pipe->rect[e].height);
}
}
diff --git a/drivers/staging/media/ipu3/ipu3-dmamap.c b/drivers/staging/media/ipu3/ipu3-dmamap.c
index d978a00e1e0b..7431322379f6 100644
--- a/drivers/staging/media/ipu3/ipu3-dmamap.c
+++ b/drivers/staging/media/ipu3/ipu3-dmamap.c
@@ -31,12 +31,11 @@ static void imgu_dmamap_free_buffer(struct page **pages,
* Based on the implementation of __iommu_dma_alloc_pages()
* defined in drivers/iommu/dma-iommu.c
*/
-static struct page **imgu_dmamap_alloc_buffer(size_t size,
- unsigned long order_mask,
- gfp_t gfp)
+static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
{
struct page **pages;
unsigned int i = 0, count = size >> PAGE_SHIFT;
+ unsigned int order_mask = 1;
const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
/* Allocate mem for array of page ptrs */
@@ -45,10 +44,6 @@ static struct page **imgu_dmamap_alloc_buffer(size_t size,
if (!pages)
return NULL;
- order_mask &= (2U << MAX_ORDER) - 1;
- if (!order_mask)
- return NULL;
-
gfp |= __GFP_HIGHMEM | __GFP_ZERO;
while (count) {
@@ -99,7 +94,6 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
size_t len)
{
unsigned long shift = iova_shift(&imgu->iova_domain);
- unsigned int alloc_sizes = imgu->mmu->pgsize_bitmap;
struct device *dev = &imgu->pci_dev->dev;
size_t size = PAGE_ALIGN(len);
struct page **pages;
@@ -114,8 +108,7 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
if (!iova)
return NULL;
- pages = imgu_dmamap_alloc_buffer(size, alloc_sizes >> PAGE_SHIFT,
- GFP_KERNEL);
+ pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL);
if (!pages)
goto out_free_iova;
@@ -257,7 +250,7 @@ int imgu_dmamap_init(struct imgu_device *imgu)
if (ret)
return ret;
- order = __ffs(imgu->mmu->pgsize_bitmap);
+ order = __ffs(IPU3_PAGE_SIZE);
base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c
index cfc2bdfb14b3..3d969b0522ab 100644
--- a/drivers/staging/media/ipu3/ipu3-mmu.c
+++ b/drivers/staging/media/ipu3/ipu3-mmu.c
@@ -20,9 +20,6 @@
#include "ipu3-mmu.h"
-#define IPU3_PAGE_SHIFT 12
-#define IPU3_PAGE_SIZE (1UL << IPU3_PAGE_SHIFT)
-
#define IPU3_PT_BITS 10
#define IPU3_PT_PTES (1UL << IPU3_PT_BITS)
#define IPU3_PT_SIZE (IPU3_PT_PTES << 2)
@@ -238,62 +235,31 @@ static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
return 0;
}
-/*
- * The following four functions are implemented based on iommu.c
- * drivers/iommu/iommu.c/iommu_pgsize().
+/**
+ * imgu_mmu_map - map a buffer to a physical address
+ *
+ * @info: MMU mappable range
+ * @iova: the virtual address
+ * @paddr: the physical address
+ * @size: length of the mappable area
+ *
+ * The function has been adapted from iommu_map() in
+ * drivers/iommu/iommu.c .
*/
-static size_t imgu_mmu_pgsize(unsigned long pgsize_bitmap,
- unsigned long addr_merge, size_t size)
-{
- unsigned int pgsize_idx;
- size_t pgsize;
-
- /* Max page size that still fits into 'size' */
- pgsize_idx = __fls(size);
-
- /* need to consider alignment requirements ? */
- if (likely(addr_merge)) {
- /* Max page size allowed by address */
- unsigned int align_pgsize_idx = __ffs(addr_merge);
-
- pgsize_idx = min(pgsize_idx, align_pgsize_idx);
- }
-
- /* build a mask of acceptable page sizes */
- pgsize = (1UL << (pgsize_idx + 1)) - 1;
-
- /* throw away page sizes not supported by the hardware */
- pgsize &= pgsize_bitmap;
-
- /* make sure we're still sane */
- WARN_ON(!pgsize);
-
- /* pick the biggest page */
- pgsize_idx = __fls(pgsize);
- pgsize = 1UL << pgsize_idx;
-
- return pgsize;
-}
-
-/* drivers/iommu/iommu.c/iommu_map() */
int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
phys_addr_t paddr, size_t size)
{
struct imgu_mmu *mmu = to_imgu_mmu(info);
- unsigned int min_pagesz;
int ret = 0;
- /* find out the minimum page size supported */
- min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
-
/*
* both the virtual address and the physical one, as well as
* the size of the mapping, must be aligned (at least) to the
* size of the smallest page supported by the hardware
*/
- if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
- dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
- iova, &paddr, size, min_pagesz);
+ if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) {
+ dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
+ iova, &paddr, size);
return -EINVAL;
}
@@ -301,19 +267,15 @@ int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
iova, &paddr, size);
while (size) {
- size_t pgsize = imgu_mmu_pgsize(mmu->geometry.pgsize_bitmap,
- iova | paddr, size);
-
- dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
- iova, &paddr, pgsize);
+ dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
ret = __imgu_mmu_map(mmu, iova, paddr);
if (ret)
break;
- iova += pgsize;
- paddr += pgsize;
- size -= pgsize;
+ iova += IPU3_PAGE_SIZE;
+ paddr += IPU3_PAGE_SIZE;
+ size -= IPU3_PAGE_SIZE;
}
call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
@@ -321,28 +283,36 @@ int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
return ret;
}
-/* drivers/iommu/iommu.c/default_iommu_map_sg() */
+/**
+ * imgu_mmu_map_sg - Map a scatterlist
+ *
+ * @info: MMU mappable range
+ * @iova: the virtual address
+ * @sg: the scatterlist to map
+ * @nents: number of entries in the scatterlist
+ *
+ * The function has been adapted from default_iommu_map_sg() in
+ * drivers/iommu/iommu.c .
+ */
size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
struct scatterlist *sg, unsigned int nents)
{
struct imgu_mmu *mmu = to_imgu_mmu(info);
struct scatterlist *s;
size_t s_length, mapped = 0;
- unsigned int i, min_pagesz;
+ unsigned int i;
int ret;
- min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
-
for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
s_length = s->length;
- if (!IS_ALIGNED(s->offset, min_pagesz))
+ if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE))
goto out_err;
- /* must be min_pagesz aligned to be mapped singlely */
- if (i == nents - 1 && !IS_ALIGNED(s->length, min_pagesz))
+ /* must be IPU3_PAGE_SIZE aligned to be mapped singlely */
+ if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE))
s_length = PAGE_ALIGN(s->length);
ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
@@ -394,25 +364,30 @@ static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
return unmap;
}
-/* drivers/iommu/iommu.c/iommu_unmap() */
+/**
+ * imgu_mmu_unmap - Unmap a buffer
+ *
+ * @info: MMU mappable range
+ * @iova: the virtual address
+ * @size: the length of the buffer
+ *
+ * The function has been adapted from iommu_unmap() in
+ * drivers/iommu/iommu.c .
+ */
size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
size_t size)
{
struct imgu_mmu *mmu = to_imgu_mmu(info);
size_t unmapped_page, unmapped = 0;
- unsigned int min_pagesz;
-
- /* find out the minimum page size supported */
- min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
/*
* The virtual address, as well as the size of the mapping, must be
* aligned (at least) to the size of the smallest page supported
* by the hardware
*/
- if (!IS_ALIGNED(iova | size, min_pagesz)) {
- dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
- iova, size, min_pagesz);
+ if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) {
+ dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
+ iova, size);
return -EINVAL;
}
@@ -423,10 +398,7 @@ size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
* or we hit an area that isn't mapped.
*/
while (unmapped < size) {
- size_t pgsize = imgu_mmu_pgsize(mmu->geometry.pgsize_bitmap,
- iova, size - unmapped);
-
- unmapped_page = __imgu_mmu_unmap(mmu, iova, pgsize);
+ unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
if (!unmapped_page)
break;
@@ -444,6 +416,7 @@ size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
/**
* imgu_mmu_init() - initialize IPU3 MMU block
+ *
* @parent: struct device parent
* @base: IOMEM base of hardware registers.
*
@@ -505,7 +478,6 @@ struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base)
mmu->geometry.aperture_start = 0;
mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
- mmu->geometry.pgsize_bitmap = IPU3_PAGE_SIZE;
return &mmu->geometry;
@@ -523,7 +495,8 @@ fail_group:
/**
* imgu_mmu_exit() - clean up IPU3 MMU block
- * @info: IPU3 MMU private data
+ *
+ * @info: MMU mappable range
*/
void imgu_mmu_exit(struct imgu_mmu_info *info)
{
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.h b/drivers/staging/media/ipu3/ipu3-mmu.h
index fa58827eb19c..a5f0bca7e7e0 100644
--- a/drivers/staging/media/ipu3/ipu3-mmu.h
+++ b/drivers/staging/media/ipu3/ipu3-mmu.h
@@ -5,17 +5,18 @@
#ifndef __IPU3_MMU_H
#define __IPU3_MMU_H
+#define IPU3_PAGE_SHIFT 12
+#define IPU3_PAGE_SIZE (1UL << IPU3_PAGE_SHIFT)
+
/**
* struct imgu_mmu_info - Describes mmu geometry
*
* @aperture_start: First address that can be mapped
* @aperture_end: Last address that can be mapped
- * @pgsize_bitmap: Bitmap of page sizes in use
*/
struct imgu_mmu_info {
dma_addr_t aperture_start;
dma_addr_t aperture_end;
- unsigned long pgsize_bitmap;
};
struct device;
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index a7bc22040ed8..3c7ad1eed434 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -955,12 +955,12 @@ static const struct v4l2_file_operations imgu_v4l2_fops = {
static const struct v4l2_ioctl_ops imgu_v4l2_ioctl_ops = {
.vidioc_querycap = imgu_vidioc_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap_mplane = imgu_vidioc_g_fmt,
.vidioc_s_fmt_vid_cap_mplane = imgu_vidioc_s_fmt,
.vidioc_try_fmt_vid_cap_mplane = imgu_vidioc_try_fmt,
- .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_g_fmt_vid_out_mplane = imgu_vidioc_g_fmt,
.vidioc_s_fmt_vid_out_mplane = imgu_vidioc_s_fmt,
.vidioc_try_fmt_vid_out_mplane = imgu_vidioc_try_fmt,
diff --git a/drivers/staging/media/meson/vdec/Kconfig b/drivers/staging/media/meson/vdec/Kconfig
new file mode 100644
index 000000000000..9e1450193392
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config VIDEO_MESON_VDEC
+ tristate "Amlogic video decoder driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on ARCH_MESON || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select MESON_CANVAS
+ help
+ Support for the video decoder found in gxbb/gxl/gxm chips.
diff --git a/drivers/staging/media/meson/vdec/Makefile b/drivers/staging/media/meson/vdec/Makefile
new file mode 100644
index 000000000000..6bea129084b7
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Amlogic meson video decoder driver
+
+meson-vdec-objs = esparser.o vdec.o vdec_helpers.o vdec_platform.o
+meson-vdec-objs += vdec_1.o
+meson-vdec-objs += codec_mpeg12.o
+
+obj-$(CONFIG_VIDEO_MESON_VDEC) += meson-vdec.o
diff --git a/drivers/staging/media/meson/vdec/TODO b/drivers/staging/media/meson/vdec/TODO
new file mode 100644
index 000000000000..70ae990cf13b
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/TODO
@@ -0,0 +1,8 @@
+This driver is in staging until the V4L2 documentation about stateful video
+decoders is finalized, as well as the corresponding compliance tests.
+
+It is at the moment not guaranteed to work properly with a userspace
+stack that follows the latest version of the specification, especially
+with compression standards like MPEG1/2 where the driver does not support
+dynamic resolution switching, including the first one used to determine coded
+resolution.
diff --git a/drivers/staging/media/meson/vdec/codec_mpeg12.c b/drivers/staging/media/meson/vdec/codec_mpeg12.c
new file mode 100644
index 000000000000..48869cc3d973
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_mpeg12.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "codec_mpeg12.h"
+#include "dos_regs.h"
+#include "vdec_helpers.h"
+
+#define SIZE_WORKSPACE SZ_128K
+/* Offset substracted by the firmware from the workspace paddr */
+#define WORKSPACE_OFFSET (5 * SZ_1K)
+
+/* map firmware registers to known MPEG1/2 functions */
+#define MREG_SEQ_INFO AV_SCRATCH_4
+ #define MPEG2_SEQ_DAR_MASK GENMASK(3, 0)
+ #define MPEG2_DAR_4_3 2
+ #define MPEG2_DAR_16_9 3
+ #define MPEG2_DAR_221_100 4
+#define MREG_PIC_INFO AV_SCRATCH_5
+#define MREG_PIC_WIDTH AV_SCRATCH_6
+#define MREG_PIC_HEIGHT AV_SCRATCH_7
+#define MREG_BUFFERIN AV_SCRATCH_8
+#define MREG_BUFFEROUT AV_SCRATCH_9
+#define MREG_CMD AV_SCRATCH_A
+#define MREG_CO_MV_START AV_SCRATCH_B
+#define MREG_ERROR_COUNT AV_SCRATCH_C
+#define MREG_FRAME_OFFSET AV_SCRATCH_D
+#define MREG_WAIT_BUFFER AV_SCRATCH_E
+#define MREG_FATAL_ERROR AV_SCRATCH_F
+
+#define PICINFO_PROG 0x00008000
+#define PICINFO_TOP_FIRST 0x00002000
+
+struct codec_mpeg12 {
+ /* Buffer for the MPEG1/2 Workspace */
+ void *workspace_vaddr;
+ dma_addr_t workspace_paddr;
+};
+
+static const u8 eos_sequence[SZ_1K] = { 0x00, 0x00, 0x01, 0xB7 };
+
+static const u8 *codec_mpeg12_eos_sequence(u32 *len)
+{
+ *len = ARRAY_SIZE(eos_sequence);
+ return eos_sequence;
+}
+
+static int codec_mpeg12_can_recycle(struct amvdec_core *core)
+{
+ return !amvdec_read_dos(core, MREG_BUFFERIN);
+}
+
+static void codec_mpeg12_recycle(struct amvdec_core *core, u32 buf_idx)
+{
+ amvdec_write_dos(core, MREG_BUFFERIN, buf_idx + 1);
+}
+
+static int codec_mpeg12_start(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct codec_mpeg12 *mpeg12;
+ int ret;
+
+ mpeg12 = kzalloc(sizeof(*mpeg12), GFP_KERNEL);
+ if (!mpeg12)
+ return -ENOMEM;
+
+ /* Allocate some memory for the MPEG1/2 decoder's state */
+ mpeg12->workspace_vaddr = dma_alloc_coherent(core->dev, SIZE_WORKSPACE,
+ &mpeg12->workspace_paddr,
+ GFP_KERNEL);
+ if (!mpeg12->workspace_vaddr) {
+ dev_err(core->dev, "Failed to request MPEG 1/2 Workspace\n");
+ ret = -ENOMEM;
+ goto free_mpeg12;
+ }
+
+ ret = amvdec_set_canvases(sess, (u32[]){ AV_SCRATCH_0, 0 },
+ (u32[]){ 8, 0 });
+ if (ret)
+ goto free_workspace;
+
+ amvdec_write_dos(core, POWER_CTL_VLD, BIT(4));
+ amvdec_write_dos(core, MREG_CO_MV_START,
+ mpeg12->workspace_paddr + WORKSPACE_OFFSET);
+
+ amvdec_write_dos(core, MPEG1_2_REG, 0);
+ amvdec_write_dos(core, PSCALE_CTRL, 0);
+ amvdec_write_dos(core, PIC_HEAD_INFO, 0x380);
+ amvdec_write_dos(core, M4_CONTROL_REG, 0);
+ amvdec_write_dos(core, MREG_BUFFERIN, 0);
+ amvdec_write_dos(core, MREG_BUFFEROUT, 0);
+ amvdec_write_dos(core, MREG_CMD, (sess->width << 16) | sess->height);
+ amvdec_write_dos(core, MREG_ERROR_COUNT, 0);
+ amvdec_write_dos(core, MREG_FATAL_ERROR, 0);
+ amvdec_write_dos(core, MREG_WAIT_BUFFER, 0);
+
+ sess->keyframe_found = 1;
+ sess->priv = mpeg12;
+
+ return 0;
+
+free_workspace:
+ dma_free_coherent(core->dev, SIZE_WORKSPACE, mpeg12->workspace_vaddr,
+ mpeg12->workspace_paddr);
+free_mpeg12:
+ kfree(mpeg12);
+
+ return ret;
+}
+
+static int codec_mpeg12_stop(struct amvdec_session *sess)
+{
+ struct codec_mpeg12 *mpeg12 = sess->priv;
+ struct amvdec_core *core = sess->core;
+
+ if (mpeg12->workspace_vaddr)
+ dma_free_coherent(core->dev, SIZE_WORKSPACE,
+ mpeg12->workspace_vaddr,
+ mpeg12->workspace_paddr);
+
+ return 0;
+}
+
+static void codec_mpeg12_update_dar(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 seq = amvdec_read_dos(core, MREG_SEQ_INFO);
+ u32 ar = seq & MPEG2_SEQ_DAR_MASK;
+
+ switch (ar) {
+ case MPEG2_DAR_4_3:
+ amvdec_set_par_from_dar(sess, 4, 3);
+ break;
+ case MPEG2_DAR_16_9:
+ amvdec_set_par_from_dar(sess, 16, 9);
+ break;
+ case MPEG2_DAR_221_100:
+ amvdec_set_par_from_dar(sess, 221, 100);
+ break;
+ default:
+ sess->pixelaspect.numerator = 1;
+ sess->pixelaspect.denominator = 1;
+ break;
+ }
+}
+
+static irqreturn_t codec_mpeg12_threaded_isr(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 reg;
+ u32 pic_info;
+ u32 is_progressive;
+ u32 buffer_index;
+ u32 field = V4L2_FIELD_NONE;
+ u32 offset;
+
+ amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1);
+ reg = amvdec_read_dos(core, MREG_FATAL_ERROR);
+ if (reg == 1) {
+ dev_err(core->dev, "MPEG1/2 fatal error\n");
+ amvdec_abort(sess);
+ return IRQ_HANDLED;
+ }
+
+ reg = amvdec_read_dos(core, MREG_BUFFEROUT);
+ if (!reg)
+ return IRQ_HANDLED;
+
+ /* Unclear what this means */
+ if ((reg & GENMASK(23, 17)) == GENMASK(23, 17))
+ goto end;
+
+ pic_info = amvdec_read_dos(core, MREG_PIC_INFO);
+ is_progressive = pic_info & PICINFO_PROG;
+
+ if (!is_progressive)
+ field = (pic_info & PICINFO_TOP_FIRST) ?
+ V4L2_FIELD_INTERLACED_TB :
+ V4L2_FIELD_INTERLACED_BT;
+
+ codec_mpeg12_update_dar(sess);
+ buffer_index = ((reg & 0xf) - 1) & 7;
+ offset = amvdec_read_dos(core, MREG_FRAME_OFFSET);
+ amvdec_dst_buf_done_idx(sess, buffer_index, offset, field);
+
+end:
+ amvdec_write_dos(core, MREG_BUFFEROUT, 0);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t codec_mpeg12_isr(struct amvdec_session *sess)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+struct amvdec_codec_ops codec_mpeg12_ops = {
+ .start = codec_mpeg12_start,
+ .stop = codec_mpeg12_stop,
+ .isr = codec_mpeg12_isr,
+ .threaded_isr = codec_mpeg12_threaded_isr,
+ .can_recycle = codec_mpeg12_can_recycle,
+ .recycle = codec_mpeg12_recycle,
+ .eos_sequence = codec_mpeg12_eos_sequence,
+};
diff --git a/drivers/staging/media/meson/vdec/codec_mpeg12.h b/drivers/staging/media/meson/vdec/codec_mpeg12.h
new file mode 100644
index 000000000000..43cab5f39ca0
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/codec_mpeg12.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_CODEC_MPEG12_H_
+#define __MESON_VDEC_CODEC_MPEG12_H_
+
+#include "vdec.h"
+
+extern struct amvdec_codec_ops codec_mpeg12_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/dos_regs.h b/drivers/staging/media/meson/vdec/dos_regs.h
new file mode 100644
index 000000000000..abd810542dbb
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/dos_regs.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_DOS_REGS_H_
+#define __MESON_VDEC_DOS_REGS_H_
+
+/* DOS registers */
+#define VDEC_ASSIST_AMR1_INT8 0x00b4
+
+#define ASSIST_MBOX1_CLR_REG 0x01d4
+#define ASSIST_MBOX1_MASK 0x01d8
+
+#define MPSR 0x0c04
+#define MCPU_INTR_MSK 0x0c10
+#define CPSR 0x0c84
+
+#define IMEM_DMA_CTRL 0x0d00
+#define IMEM_DMA_ADR 0x0d04
+#define IMEM_DMA_COUNT 0x0d08
+#define LMEM_DMA_CTRL 0x0d40
+
+#define MC_STATUS0 0x2424
+#define MC_CTRL1 0x242c
+
+#define PSCALE_RST 0x2440
+#define PSCALE_CTRL 0x2444
+#define PSCALE_BMEM_ADDR 0x247c
+#define PSCALE_BMEM_DAT 0x2480
+
+#define DBLK_CTRL 0x2544
+#define DBLK_STATUS 0x254c
+
+#define GCLK_EN 0x260c
+#define MDEC_PIC_DC_CTRL 0x2638
+#define MDEC_PIC_DC_STATUS 0x263c
+#define ANC0_CANVAS_ADDR 0x2640
+#define MDEC_PIC_DC_THRESH 0x26e0
+
+/* Firmware interface registers */
+#define AV_SCRATCH_0 0x2700
+#define AV_SCRATCH_1 0x2704
+#define AV_SCRATCH_2 0x2708
+#define AV_SCRATCH_3 0x270c
+#define AV_SCRATCH_4 0x2710
+#define AV_SCRATCH_5 0x2714
+#define AV_SCRATCH_6 0x2718
+#define AV_SCRATCH_7 0x271c
+#define AV_SCRATCH_8 0x2720
+#define AV_SCRATCH_9 0x2724
+#define AV_SCRATCH_A 0x2728
+#define AV_SCRATCH_B 0x272c
+#define AV_SCRATCH_C 0x2730
+#define AV_SCRATCH_D 0x2734
+#define AV_SCRATCH_E 0x2738
+#define AV_SCRATCH_F 0x273c
+#define AV_SCRATCH_G 0x2740
+#define AV_SCRATCH_H 0x2744
+#define AV_SCRATCH_I 0x2748
+#define AV_SCRATCH_J 0x274c
+#define AV_SCRATCH_K 0x2750
+#define AV_SCRATCH_L 0x2754
+
+#define MPEG1_2_REG 0x3004
+#define PIC_HEAD_INFO 0x300c
+#define POWER_CTL_VLD 0x3020
+#define M4_CONTROL_REG 0x30a4
+
+/* Stream Buffer (stbuf) regs */
+#define VLD_MEM_VIFIFO_START_PTR 0x3100
+#define VLD_MEM_VIFIFO_CURR_PTR 0x3104
+#define VLD_MEM_VIFIFO_END_PTR 0x3108
+#define VLD_MEM_VIFIFO_CONTROL 0x3110
+ #define MEM_FIFO_CNT_BIT 16
+ #define MEM_FILL_ON_LEVEL BIT(10)
+ #define MEM_CTRL_EMPTY_EN BIT(2)
+ #define MEM_CTRL_FILL_EN BIT(1)
+#define VLD_MEM_VIFIFO_WP 0x3114
+#define VLD_MEM_VIFIFO_RP 0x3118
+#define VLD_MEM_VIFIFO_LEVEL 0x311c
+#define VLD_MEM_VIFIFO_BUF_CNTL 0x3120
+ #define MEM_BUFCTRL_MANUAL BIT(1)
+#define VLD_MEM_VIFIFO_WRAP_COUNT 0x3144
+
+#define DCAC_DMA_CTRL 0x3848
+
+#define DOS_SW_RESET0 0xfc00
+#define DOS_GCLK_EN0 0xfc04
+#define DOS_GEN_CTRL0 0xfc08
+#define DOS_MEM_PD_VDEC 0xfcc0
+#define DOS_MEM_PD_HEVC 0xfccc
+#define DOS_SW_RESET3 0xfcd0
+#define DOS_GCLK_EN3 0xfcd4
+#define DOS_VDEC_MCRCC_STALL_CTRL 0xfd00
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c
new file mode 100644
index 000000000000..3a21a8cec799
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/esparser.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ *
+ * The Elementary Stream Parser is a HW bitstream parser.
+ * It reads bitstream buffers and feeds them to the VIFIFO
+ */
+
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/interrupt.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "dos_regs.h"
+#include "esparser.h"
+#include "vdec_helpers.h"
+
+/* PARSER REGS (CBUS) */
+#define PARSER_CONTROL 0x00
+ #define ES_PACK_SIZE_BIT 8
+ #define ES_WRITE BIT(5)
+ #define ES_SEARCH BIT(1)
+ #define ES_PARSER_START BIT(0)
+#define PARSER_FETCH_ADDR 0x4
+#define PARSER_FETCH_CMD 0x8
+#define PARSER_CONFIG 0x14
+ #define PS_CFG_MAX_FETCH_CYCLE_BIT 0
+ #define PS_CFG_STARTCODE_WID_24_BIT 10
+ #define PS_CFG_MAX_ES_WR_CYCLE_BIT 12
+ #define PS_CFG_PFIFO_EMPTY_CNT_BIT 16
+#define PFIFO_WR_PTR 0x18
+#define PFIFO_RD_PTR 0x1c
+#define PARSER_SEARCH_PATTERN 0x24
+ #define ES_START_CODE_PATTERN 0x00000100
+#define PARSER_SEARCH_MASK 0x28
+ #define ES_START_CODE_MASK 0xffffff00
+ #define FETCH_ENDIAN_BIT 27
+#define PARSER_INT_ENABLE 0x2c
+ #define PARSER_INT_HOST_EN_BIT 8
+#define PARSER_INT_STATUS 0x30
+ #define PARSER_INTSTAT_SC_FOUND 1
+#define PARSER_ES_CONTROL 0x5c
+#define PARSER_VIDEO_START_PTR 0x80
+#define PARSER_VIDEO_END_PTR 0x84
+#define PARSER_VIDEO_WP 0x88
+#define PARSER_VIDEO_HOLE 0x90
+
+#define SEARCH_PATTERN_LEN 512
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+static int search_done;
+
+static irqreturn_t esparser_isr(int irq, void *dev)
+{
+ int int_status;
+ struct amvdec_core *core = dev;
+
+ int_status = amvdec_read_parser(core, PARSER_INT_STATUS);
+ amvdec_write_parser(core, PARSER_INT_STATUS, int_status);
+
+ if (int_status & PARSER_INTSTAT_SC_FOUND) {
+ amvdec_write_parser(core, PFIFO_RD_PTR, 0);
+ amvdec_write_parser(core, PFIFO_WR_PTR, 0);
+ search_done = 1;
+ wake_up_interruptible(&wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Pad the packet to at least 4KiB bytes otherwise the VDEC unit won't trigger
+ * ISRs.
+ * Also append a start code 000001ff at the end to trigger
+ * the ESPARSER interrupt.
+ */
+static u32 esparser_pad_start_code(struct vb2_buffer *vb)
+{
+ u32 payload_size = vb2_get_plane_payload(vb, 0);
+ u32 pad_size = 0;
+ u8 *vaddr = vb2_plane_vaddr(vb, 0) + payload_size;
+
+ if (payload_size < ESPARSER_MIN_PACKET_SIZE) {
+ pad_size = ESPARSER_MIN_PACKET_SIZE - payload_size;
+ memset(vaddr, 0, pad_size);
+ }
+
+ memset(vaddr + pad_size, 0, SEARCH_PATTERN_LEN);
+ vaddr[pad_size] = 0x00;
+ vaddr[pad_size + 1] = 0x00;
+ vaddr[pad_size + 2] = 0x01;
+ vaddr[pad_size + 3] = 0xff;
+
+ return pad_size;
+}
+
+static int
+esparser_write_data(struct amvdec_core *core, dma_addr_t addr, u32 size)
+{
+ amvdec_write_parser(core, PFIFO_RD_PTR, 0);
+ amvdec_write_parser(core, PFIFO_WR_PTR, 0);
+ amvdec_write_parser(core, PARSER_CONTROL,
+ ES_WRITE |
+ ES_PARSER_START |
+ ES_SEARCH |
+ (size << ES_PACK_SIZE_BIT));
+
+ amvdec_write_parser(core, PARSER_FETCH_ADDR, addr);
+ amvdec_write_parser(core, PARSER_FETCH_CMD,
+ (7 << FETCH_ENDIAN_BIT) |
+ (size + SEARCH_PATTERN_LEN));
+
+ search_done = 0;
+ return wait_event_interruptible_timeout(wq, search_done, (HZ / 5));
+}
+
+static u32 esparser_vififo_get_free_space(struct amvdec_session *sess)
+{
+ u32 vififo_usage;
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+ struct amvdec_core *core = sess->core;
+
+ vififo_usage = vdec_ops->vififo_level(sess);
+ vififo_usage += amvdec_read_parser(core, PARSER_VIDEO_HOLE);
+ vififo_usage += (6 * SZ_1K); // 6 KiB internal fifo
+
+ if (vififo_usage > sess->vififo_size) {
+ dev_warn(sess->core->dev,
+ "VIFIFO usage (%u) > VIFIFO size (%u)\n",
+ vififo_usage, sess->vififo_size);
+ return 0;
+ }
+
+ return sess->vififo_size - vififo_usage;
+}
+
+int esparser_queue_eos(struct amvdec_core *core, const u8 *data, u32 len)
+{
+ struct device *dev = core->dev;
+ void *eos_vaddr;
+ dma_addr_t eos_paddr;
+ int ret;
+
+ eos_vaddr = dma_alloc_coherent(dev, len + SEARCH_PATTERN_LEN,
+ &eos_paddr, GFP_KERNEL);
+ if (!eos_vaddr)
+ return -ENOMEM;
+
+ memcpy(eos_vaddr, data, len);
+ ret = esparser_write_data(core, eos_paddr, len);
+ dma_free_coherent(dev, len + SEARCH_PATTERN_LEN,
+ eos_vaddr, eos_paddr);
+
+ return ret;
+}
+
+static u32 esparser_get_offset(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ u32 offset = amvdec_read_parser(core, PARSER_VIDEO_WP) -
+ sess->vififo_paddr;
+
+ if (offset < sess->last_offset)
+ sess->wrap_count++;
+
+ sess->last_offset = offset;
+ offset += (sess->wrap_count * sess->vififo_size);
+
+ return offset;
+}
+
+static int
+esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf)
+{
+ int ret;
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ u32 num_dst_bufs = 0;
+ u32 payload_size = vb2_get_plane_payload(vb, 0);
+ dma_addr_t phy = vb2_dma_contig_plane_dma_addr(vb, 0);
+ u32 offset;
+ u32 pad_size;
+
+ if (codec_ops->num_pending_bufs)
+ num_dst_bufs = codec_ops->num_pending_bufs(sess);
+
+ num_dst_bufs += v4l2_m2m_num_dst_bufs_ready(sess->m2m_ctx);
+
+ if (esparser_vififo_get_free_space(sess) < payload_size ||
+ atomic_read(&sess->esparser_queued_bufs) >= num_dst_bufs)
+ return -EAGAIN;
+
+ v4l2_m2m_src_buf_remove_by_buf(sess->m2m_ctx, vbuf);
+
+ offset = esparser_get_offset(sess);
+
+ amvdec_add_ts_reorder(sess, vb->timestamp, offset);
+ dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X\n",
+ vb->timestamp, payload_size, offset);
+
+ pad_size = esparser_pad_start_code(vb);
+ ret = esparser_write_data(core, phy, payload_size + pad_size);
+
+ if (ret <= 0) {
+ dev_warn(core->dev, "esparser: input parsing error\n");
+ amvdec_remove_ts(sess, vb->timestamp);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ amvdec_write_parser(core, PARSER_FETCH_CMD, 0);
+
+ return 0;
+ }
+
+ /* We need to wait until we parse the first keyframe.
+ * All buffers prior to the first keyframe must be dropped.
+ */
+ if (!sess->keyframe_found)
+ usleep_range(1000, 2000);
+
+ if (sess->keyframe_found)
+ atomic_inc(&sess->esparser_queued_bufs);
+ else
+ amvdec_remove_ts(sess, vb->timestamp);
+
+ vbuf->flags = 0;
+ vbuf->field = V4L2_FIELD_NONE;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+
+ return 0;
+}
+
+void esparser_queue_all_src(struct work_struct *work)
+{
+ struct v4l2_m2m_buffer *buf, *n;
+ struct amvdec_session *sess =
+ container_of(work, struct amvdec_session, esparser_queue_work);
+
+ mutex_lock(&sess->lock);
+ v4l2_m2m_for_each_src_buf_safe(sess->m2m_ctx, buf, n) {
+ if (sess->should_stop)
+ break;
+
+ if (esparser_queue(sess, &buf->vb) < 0)
+ break;
+ }
+ mutex_unlock(&sess->lock);
+}
+
+int esparser_power_up(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+
+ reset_control_reset(core->esparser_reset);
+ amvdec_write_parser(core, PARSER_CONFIG,
+ (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) |
+ (1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) |
+ (16 << PS_CFG_MAX_FETCH_CYCLE_BIT));
+
+ amvdec_write_parser(core, PFIFO_RD_PTR, 0);
+ amvdec_write_parser(core, PFIFO_WR_PTR, 0);
+
+ amvdec_write_parser(core, PARSER_SEARCH_PATTERN,
+ ES_START_CODE_PATTERN);
+ amvdec_write_parser(core, PARSER_SEARCH_MASK, ES_START_CODE_MASK);
+
+ amvdec_write_parser(core, PARSER_CONFIG,
+ (10 << PS_CFG_PFIFO_EMPTY_CNT_BIT) |
+ (1 << PS_CFG_MAX_ES_WR_CYCLE_BIT) |
+ (16 << PS_CFG_MAX_FETCH_CYCLE_BIT) |
+ (2 << PS_CFG_STARTCODE_WID_24_BIT));
+
+ amvdec_write_parser(core, PARSER_CONTROL,
+ (ES_SEARCH | ES_PARSER_START));
+
+ amvdec_write_parser(core, PARSER_VIDEO_START_PTR, sess->vififo_paddr);
+ amvdec_write_parser(core, PARSER_VIDEO_END_PTR,
+ sess->vififo_paddr + sess->vififo_size - 8);
+ amvdec_write_parser(core, PARSER_ES_CONTROL,
+ amvdec_read_parser(core, PARSER_ES_CONTROL) & ~1);
+
+ if (vdec_ops->conf_esparser)
+ vdec_ops->conf_esparser(sess);
+
+ amvdec_write_parser(core, PARSER_INT_STATUS, 0xffff);
+ amvdec_write_parser(core, PARSER_INT_ENABLE,
+ BIT(PARSER_INT_HOST_EN_BIT));
+
+ return 0;
+}
+
+int esparser_init(struct platform_device *pdev, struct amvdec_core *core)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "esparser");
+ if (irq < 0) {
+ dev_err(dev, "Failed getting ESPARSER IRQ from dtb\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(dev, irq, esparser_isr, IRQF_SHARED,
+ "esparserirq", core);
+ if (ret) {
+ dev_err(dev, "Failed requesting ESPARSER IRQ\n");
+ return ret;
+ }
+
+ core->esparser_reset =
+ devm_reset_control_get_exclusive(dev, "esparser");
+ if (IS_ERR(core->esparser_reset)) {
+ dev_err(dev, "Failed to get esparser_reset\n");
+ return PTR_ERR(core->esparser_reset);
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/meson/vdec/esparser.h b/drivers/staging/media/meson/vdec/esparser.h
new file mode 100644
index 000000000000..ff51fe7fda66
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/esparser.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_ESPARSER_H_
+#define __MESON_VDEC_ESPARSER_H_
+
+#include <linux/platform_device.h>
+
+#include "vdec.h"
+
+int esparser_init(struct platform_device *pdev, struct amvdec_core *core);
+int esparser_power_up(struct amvdec_session *sess);
+
+/**
+ * esparser_queue_eos() - write End Of Stream sequence to the ESPARSER
+ *
+ * @core vdec core struct
+ */
+int esparser_queue_eos(struct amvdec_core *core, const u8 *data, u32 len);
+
+/**
+ * esparser_queue_all_src() - work handler that writes as many src buffers
+ * as possible to the ESPARSER
+ */
+void esparser_queue_all_src(struct work_struct *work);
+
+#define ESPARSER_MIN_PACKET_SIZE SZ_4K
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
new file mode 100644
index 000000000000..0a1a04fd5d13
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -0,0 +1,1099 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vdec.h"
+#include "esparser.h"
+#include "vdec_helpers.h"
+
+struct dummy_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+/* 16 MiB for parsed bitstream swap exchange */
+#define SIZE_VIFIFO SZ_16M
+
+static u32 get_output_size(u32 width, u32 height)
+{
+ return ALIGN(width * height, SZ_64K);
+}
+
+u32 amvdec_get_output_size(struct amvdec_session *sess)
+{
+ return get_output_size(sess->width, sess->height);
+}
+EXPORT_SYMBOL_GPL(amvdec_get_output_size);
+
+static int vdec_codec_needs_recycle(struct amvdec_session *sess)
+{
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ return codec_ops->can_recycle && codec_ops->recycle;
+}
+
+static int vdec_recycle_thread(void *data)
+{
+ struct amvdec_session *sess = data;
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ struct amvdec_buffer *tmp, *n;
+
+ while (!kthread_should_stop()) {
+ mutex_lock(&sess->bufs_recycle_lock);
+ list_for_each_entry_safe(tmp, n, &sess->bufs_recycle, list) {
+ if (!codec_ops->can_recycle(core))
+ break;
+
+ codec_ops->recycle(core, tmp->vb->index);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ mutex_unlock(&sess->bufs_recycle_lock);
+
+ usleep_range(5000, 10000);
+ }
+
+ return 0;
+}
+
+static int vdec_poweron(struct amvdec_session *sess)
+{
+ int ret;
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+
+ ret = clk_prepare_enable(sess->core->dos_parser_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(sess->core->dos_clk);
+ if (ret)
+ goto disable_dos_parser;
+
+ ret = vdec_ops->start(sess);
+ if (ret)
+ goto disable_dos;
+
+ esparser_power_up(sess);
+
+ return 0;
+
+disable_dos:
+ clk_disable_unprepare(sess->core->dos_clk);
+disable_dos_parser:
+ clk_disable_unprepare(sess->core->dos_parser_clk);
+
+ return ret;
+}
+
+static void vdec_wait_inactive(struct amvdec_session *sess)
+{
+ /* We consider 50ms with no IRQ to be inactive. */
+ while (time_is_after_jiffies64(sess->last_irq_jiffies +
+ msecs_to_jiffies(50)))
+ msleep(25);
+}
+
+static void vdec_poweroff(struct amvdec_session *sess)
+{
+ struct amvdec_ops *vdec_ops = sess->fmt_out->vdec_ops;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ sess->should_stop = 1;
+ vdec_wait_inactive(sess);
+ if (codec_ops->drain)
+ codec_ops->drain(sess);
+
+ vdec_ops->stop(sess);
+ clk_disable_unprepare(sess->core->dos_clk);
+ clk_disable_unprepare(sess->core->dos_parser_clk);
+}
+
+static void
+vdec_queue_recycle(struct amvdec_session *sess, struct vb2_buffer *vb)
+{
+ struct amvdec_buffer *new_buf;
+
+ new_buf = kmalloc(sizeof(*new_buf), GFP_KERNEL);
+ new_buf->vb = vb;
+
+ mutex_lock(&sess->bufs_recycle_lock);
+ list_add_tail(&new_buf->list, &sess->bufs_recycle);
+ mutex_unlock(&sess->bufs_recycle_lock);
+}
+
+static void vdec_m2m_device_run(void *priv)
+{
+ struct amvdec_session *sess = priv;
+
+ schedule_work(&sess->esparser_queue_work);
+}
+
+static void vdec_m2m_job_abort(void *priv)
+{
+ struct amvdec_session *sess = priv;
+
+ v4l2_m2m_job_finish(sess->m2m_dev, sess->m2m_ctx);
+}
+
+static const struct v4l2_m2m_ops vdec_m2m_ops = {
+ .device_run = vdec_m2m_device_run,
+ .job_abort = vdec_m2m_job_abort,
+};
+
+static void process_num_buffers(struct vb2_queue *q,
+ struct amvdec_session *sess,
+ unsigned int *num_buffers,
+ bool is_reqbufs)
+{
+ const struct amvdec_format *fmt_out = sess->fmt_out;
+ unsigned int buffers_total = q->num_buffers + *num_buffers;
+
+ if (is_reqbufs && buffers_total < fmt_out->min_buffers)
+ *num_buffers = fmt_out->min_buffers - q->num_buffers;
+ if (buffers_total > fmt_out->max_buffers)
+ *num_buffers = fmt_out->max_buffers - q->num_buffers;
+
+ /* We need to program the complete CAPTURE buffer list
+ * in registers during start_streaming, and the firmwares
+ * are free to choose any of them to write frames to. As such,
+ * we need all of them to be queued into the driver
+ */
+ sess->num_dst_bufs = q->num_buffers + *num_buffers;
+ q->min_buffers_needed = max(fmt_out->min_buffers, sess->num_dst_bufs);
+}
+
+static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct amvdec_session *sess = vb2_get_drv_priv(q);
+ u32 output_size = amvdec_get_output_size(sess);
+
+ if (*num_planes) {
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (*num_planes != 1 || sizes[0] < output_size)
+ return -EINVAL;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ switch (sess->pixfmt_cap) {
+ case V4L2_PIX_FMT_NV12M:
+ if (*num_planes != 2 ||
+ sizes[0] < output_size ||
+ sizes[1] < output_size / 2)
+ return -EINVAL;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ if (*num_planes != 3 ||
+ sizes[0] < output_size ||
+ sizes[1] < output_size / 4 ||
+ sizes[2] < output_size / 4)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ process_num_buffers(q, sess, num_buffers, false);
+ break;
+ }
+
+ return 0;
+ }
+
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ sizes[0] = amvdec_get_output_size(sess);
+ *num_planes = 1;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ switch (sess->pixfmt_cap) {
+ case V4L2_PIX_FMT_NV12M:
+ sizes[0] = output_size;
+ sizes[1] = output_size / 2;
+ *num_planes = 2;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ sizes[0] = output_size;
+ sizes[1] = output_size / 4;
+ sizes[2] = output_size / 4;
+ *num_planes = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ process_num_buffers(q, sess, num_buffers, true);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vdec_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct amvdec_session *sess = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_m2m_ctx *m2m_ctx = sess->m2m_ctx;
+
+ v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+
+ if (!sess->streamon_out || !sess->streamon_cap)
+ return;
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ vdec_codec_needs_recycle(sess))
+ vdec_queue_recycle(sess, vb);
+
+ schedule_work(&sess->esparser_queue_work);
+}
+
+static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct amvdec_session *sess = vb2_get_drv_priv(q);
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ struct amvdec_core *core = sess->core;
+ struct vb2_v4l2_buffer *buf;
+ int ret;
+
+ if (core->cur_sess && core->cur_sess != sess) {
+ ret = -EBUSY;
+ goto bufs_done;
+ }
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ sess->streamon_out = 1;
+ else
+ sess->streamon_cap = 1;
+
+ if (!sess->streamon_out || !sess->streamon_cap)
+ return 0;
+
+ if (sess->status == STATUS_NEEDS_RESUME &&
+ q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ codec_ops->resume(sess);
+ sess->status = STATUS_RUNNING;
+ return 0;
+ }
+
+ sess->vififo_size = SIZE_VIFIFO;
+ sess->vififo_vaddr =
+ dma_alloc_coherent(sess->core->dev, sess->vififo_size,
+ &sess->vififo_paddr, GFP_KERNEL);
+ if (!sess->vififo_vaddr) {
+ dev_err(sess->core->dev, "Failed to request VIFIFO buffer\n");
+ ret = -ENOMEM;
+ goto bufs_done;
+ }
+
+ sess->should_stop = 0;
+ sess->keyframe_found = 0;
+ sess->last_offset = 0;
+ sess->wrap_count = 0;
+ sess->pixelaspect.numerator = 1;
+ sess->pixelaspect.denominator = 1;
+ atomic_set(&sess->esparser_queued_bufs, 0);
+ v4l2_ctrl_s_ctrl(sess->ctrl_min_buf_capture, 1);
+
+ ret = vdec_poweron(sess);
+ if (ret)
+ goto vififo_free;
+
+ sess->sequence_cap = 0;
+ if (vdec_codec_needs_recycle(sess))
+ sess->recycle_thread = kthread_run(vdec_recycle_thread, sess,
+ "vdec_recycle");
+
+ sess->status = STATUS_RUNNING;
+ core->cur_sess = sess;
+
+ return 0;
+
+vififo_free:
+ dma_free_coherent(sess->core->dev, sess->vififo_size,
+ sess->vififo_vaddr, sess->vififo_paddr);
+bufs_done:
+ while ((buf = v4l2_m2m_src_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ while ((buf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ sess->streamon_out = 0;
+ else
+ sess->streamon_cap = 0;
+
+ return ret;
+}
+
+static void vdec_free_canvas(struct amvdec_session *sess)
+{
+ int i;
+
+ for (i = 0; i < sess->canvas_num; ++i)
+ meson_canvas_free(sess->core->canvas, sess->canvas_alloc[i]);
+
+ sess->canvas_num = 0;
+}
+
+static void vdec_reset_timestamps(struct amvdec_session *sess)
+{
+ struct amvdec_timestamp *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &sess->timestamps, list) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void vdec_reset_bufs_recycle(struct amvdec_session *sess)
+{
+ struct amvdec_buffer *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &sess->bufs_recycle, list) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void vdec_stop_streaming(struct vb2_queue *q)
+{
+ struct amvdec_session *sess = vb2_get_drv_priv(q);
+ struct amvdec_core *core = sess->core;
+ struct vb2_v4l2_buffer *buf;
+
+ if (sess->status == STATUS_RUNNING ||
+ (sess->status == STATUS_NEEDS_RESUME &&
+ (!sess->streamon_out || !sess->streamon_cap))) {
+ if (vdec_codec_needs_recycle(sess))
+ kthread_stop(sess->recycle_thread);
+
+ vdec_poweroff(sess);
+ vdec_free_canvas(sess);
+ dma_free_coherent(sess->core->dev, sess->vififo_size,
+ sess->vififo_vaddr, sess->vififo_paddr);
+ vdec_reset_timestamps(sess);
+ vdec_reset_bufs_recycle(sess);
+ kfree(sess->priv);
+ sess->priv = NULL;
+ core->cur_sess = NULL;
+ sess->status = STATUS_STOPPED;
+ }
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ while ((buf = v4l2_m2m_src_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+
+ sess->streamon_out = 0;
+ } else {
+ while ((buf = v4l2_m2m_dst_buf_remove(sess->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+
+ sess->streamon_cap = 0;
+ }
+}
+
+static int vdec_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static const struct vb2_ops vdec_vb2_ops = {
+ .queue_setup = vdec_queue_setup,
+ .start_streaming = vdec_start_streaming,
+ .stop_streaming = vdec_stop_streaming,
+ .buf_queue = vdec_vb2_buf_queue,
+ .buf_prepare = vdec_vb2_buf_prepare,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int
+vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, "meson-vdec", sizeof(cap->driver));
+ strscpy(cap->card, "Amlogic Video Decoder", sizeof(cap->card));
+ strscpy(cap->bus_info, "platform:meson-vdec", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static const struct amvdec_format *
+find_format(const struct amvdec_format *fmts, u32 size, u32 pixfmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ if (fmts[i].pixfmt == pixfmt)
+ return &fmts[i];
+ }
+
+ return NULL;
+}
+
+static unsigned int
+vdec_supports_pixfmt_cap(const struct amvdec_format *fmt_out, u32 pixfmt_cap)
+{
+ int i;
+
+ for (i = 0; fmt_out->pixfmts_cap[i]; i++)
+ if (fmt_out->pixfmts_cap[i] == pixfmt_cap)
+ return 1;
+
+ return 0;
+}
+
+static const struct amvdec_format *
+vdec_try_fmt_common(struct amvdec_session *sess, u32 size,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
+ const struct amvdec_format *fmts = sess->core->platform->formats;
+ const struct amvdec_format *fmt_out;
+
+ memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
+ memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fmt_out = find_format(fmts, size, pixmp->pixelformat);
+ if (!fmt_out) {
+ pixmp->pixelformat = V4L2_PIX_FMT_MPEG2;
+ fmt_out = find_format(fmts, size, pixmp->pixelformat);
+ }
+
+ pfmt[0].sizeimage =
+ get_output_size(pixmp->width, pixmp->height);
+ pfmt[0].bytesperline = 0;
+ pixmp->num_planes = 1;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt_out = sess->fmt_out;
+ if (!vdec_supports_pixfmt_cap(fmt_out, pixmp->pixelformat))
+ pixmp->pixelformat = fmt_out->pixfmts_cap[0];
+
+ memset(pfmt[1].reserved, 0, sizeof(pfmt[1].reserved));
+ if (pixmp->pixelformat == V4L2_PIX_FMT_NV12M) {
+ pfmt[0].sizeimage =
+ get_output_size(pixmp->width, pixmp->height);
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 64);
+
+ pfmt[1].sizeimage =
+ get_output_size(pixmp->width, pixmp->height) / 2;
+ pfmt[1].bytesperline = ALIGN(pixmp->width, 64);
+ pixmp->num_planes = 2;
+ } else if (pixmp->pixelformat == V4L2_PIX_FMT_YUV420M) {
+ pfmt[0].sizeimage =
+ get_output_size(pixmp->width, pixmp->height);
+ pfmt[0].bytesperline = ALIGN(pixmp->width, 64);
+
+ pfmt[1].sizeimage =
+ get_output_size(pixmp->width, pixmp->height) / 4;
+ pfmt[1].bytesperline = ALIGN(pixmp->width, 64) / 2;
+
+ pfmt[2].sizeimage =
+ get_output_size(pixmp->width, pixmp->height) / 4;
+ pfmt[2].bytesperline = ALIGN(pixmp->width, 64) / 2;
+ pixmp->num_planes = 3;
+ }
+ } else {
+ return NULL;
+ }
+
+ pixmp->width = clamp(pixmp->width, (u32)256, fmt_out->max_width);
+ pixmp->height = clamp(pixmp->height, (u32)144, fmt_out->max_height);
+
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+
+ return fmt_out;
+}
+
+static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+
+ vdec_try_fmt_common(sess, sess->core->platform->num_formats, f);
+
+ return 0;
+}
+
+static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pixmp->pixelformat = sess->pixfmt_cap;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ pixmp->pixelformat = sess->fmt_out->pixfmt;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixmp->width = sess->width;
+ pixmp->height = sess->height;
+ pixmp->colorspace = sess->colorspace;
+ pixmp->ycbcr_enc = sess->ycbcr_enc;
+ pixmp->quantization = sess->quantization;
+ pixmp->xfer_func = sess->xfer_func;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixmp->width = sess->width;
+ pixmp->height = sess->height;
+ }
+
+ vdec_try_fmt_common(sess, sess->core->platform->num_formats, f);
+
+ return 0;
+}
+
+static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ u32 num_formats = sess->core->platform->num_formats;
+ const struct amvdec_format *fmt_out;
+ struct v4l2_pix_format_mplane orig_pixmp;
+ struct v4l2_format format;
+ u32 pixfmt_out = 0, pixfmt_cap = 0;
+
+ orig_pixmp = *pixmp;
+
+ fmt_out = vdec_try_fmt_common(sess, num_formats, f);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixfmt_out = pixmp->pixelformat;
+ pixfmt_cap = sess->pixfmt_cap;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixfmt_cap = pixmp->pixelformat;
+ pixfmt_out = sess->fmt_out->pixfmt;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_out;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ vdec_try_fmt_common(sess, num_formats, &format);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ sess->width = format.fmt.pix_mp.width;
+ sess->height = format.fmt.pix_mp.height;
+ sess->colorspace = pixmp->colorspace;
+ sess->ycbcr_enc = pixmp->ycbcr_enc;
+ sess->quantization = pixmp->quantization;
+ sess->xfer_func = pixmp->xfer_func;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_cap;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ vdec_try_fmt_common(sess, num_formats, &format);
+
+ sess->width = format.fmt.pix_mp.width;
+ sess->height = format.fmt.pix_mp.height;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ sess->fmt_out = fmt_out;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ sess->pixfmt_cap = format.fmt.pix_mp.pixelformat;
+
+ return 0;
+}
+
+static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+ const struct vdec_platform *platform = sess->core->platform;
+ const struct amvdec_format *fmt_out;
+
+ memset(f->reserved, 0, sizeof(f->reserved));
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (f->index >= platform->num_formats)
+ return -EINVAL;
+
+ fmt_out = &platform->formats[f->index];
+ f->pixelformat = fmt_out->pixfmt;
+ f->flags = fmt_out->flags;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt_out = sess->fmt_out;
+ if (f->index >= 4 || !fmt_out->pixfmts_cap[f->index])
+ return -EINVAL;
+
+ f->pixelformat = fmt_out->pixfmts_cap[f->index];
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vdec_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+ const struct amvdec_format *formats = sess->core->platform->formats;
+ const struct amvdec_format *fmt;
+ u32 num_formats = sess->core->platform->num_formats;
+
+ fmt = find_format(formats, num_formats, fsize->pixel_format);
+ if (!fmt || fsize->index)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+
+ fsize->stepwise.min_width = 256;
+ fsize->stepwise.max_width = fmt->max_width;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = 144;
+ fsize->stepwise.max_height = fmt->max_height;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int
+vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ struct device *dev = sess->core->dev;
+ int ret;
+
+ ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, cmd);
+ if (ret)
+ return ret;
+
+ if (!(sess->streamon_out & sess->streamon_cap))
+ return 0;
+
+ /* Currently not handled since we do not support dynamic resolution
+ * for MPEG2. We consider both queues streaming to mean that the
+ * decoding session is started
+ */
+ if (cmd->cmd == V4L2_DEC_CMD_START)
+ return 0;
+
+ /* Should not happen */
+ if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ dev_dbg(dev, "Received V4L2_DEC_CMD_STOP\n");
+ sess->should_stop = 1;
+
+ vdec_wait_inactive(sess);
+
+ if (codec_ops->drain) {
+ codec_ops->drain(sess);
+ } else if (codec_ops->eos_sequence) {
+ u32 len;
+ const u8 *data = codec_ops->eos_sequence(&len);
+
+ esparser_queue_eos(sess->core, data, len);
+ }
+
+ return ret;
+}
+
+static int vdec_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vdec_g_pixelaspect(struct file *file, void *fh, int type,
+ struct v4l2_fract *f)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ *f = sess->pixelaspect;
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
+ .vidioc_querycap = vdec_querycap,
+ .vidioc_enum_fmt_vid_cap = vdec_enum_fmt,
+ .vidioc_enum_fmt_vid_out = vdec_enum_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vdec_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vdec_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vdec_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vdec_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vdec_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vdec_try_fmt,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_enum_framesizes = vdec_enum_framesizes,
+ .vidioc_subscribe_event = vdec_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
+ .vidioc_decoder_cmd = vdec_decoder_cmd,
+ .vidioc_g_pixelaspect = vdec_g_pixelaspect,
+};
+
+static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct amvdec_session *sess = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = &vdec_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->drv_priv = sess;
+ src_vq->buf_struct_size = sizeof(struct dummy_buf);
+ src_vq->min_buffers_needed = 1;
+ src_vq->dev = sess->core->dev;
+ src_vq->lock = &sess->lock;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = &vdec_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->drv_priv = sess;
+ dst_vq->buf_struct_size = sizeof(struct dummy_buf);
+ dst_vq->min_buffers_needed = 1;
+ dst_vq->dev = sess->core->dev;
+ dst_vq->lock = &sess->lock;
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vdec_init_ctrls(struct amvdec_session *sess)
+{
+ struct v4l2_ctrl_handler *ctrl_handler = &sess->ctrl_handler;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(ctrl_handler, 1);
+ if (ret)
+ return ret;
+
+ sess->ctrl_min_buf_capture =
+ v4l2_ctrl_new_std(ctrl_handler, NULL,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1,
+ 1);
+
+ ret = ctrl_handler->error;
+ if (ret) {
+ v4l2_ctrl_handler_free(ctrl_handler);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vdec_open(struct file *file)
+{
+ struct amvdec_core *core = video_drvdata(file);
+ struct device *dev = core->dev;
+ const struct amvdec_format *formats = core->platform->formats;
+ struct amvdec_session *sess;
+ int ret;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ return -ENOMEM;
+
+ sess->core = core;
+
+ sess->m2m_dev = v4l2_m2m_init(&vdec_m2m_ops);
+ if (IS_ERR(sess->m2m_dev)) {
+ dev_err(dev, "Fail to v4l2_m2m_init\n");
+ ret = PTR_ERR(sess->m2m_dev);
+ goto err_free_sess;
+ }
+
+ sess->m2m_ctx = v4l2_m2m_ctx_init(sess->m2m_dev, sess, m2m_queue_init);
+ if (IS_ERR(sess->m2m_ctx)) {
+ dev_err(dev, "Fail to v4l2_m2m_ctx_init\n");
+ ret = PTR_ERR(sess->m2m_ctx);
+ goto err_m2m_release;
+ }
+
+ ret = vdec_init_ctrls(sess);
+ if (ret)
+ goto err_m2m_release;
+
+ sess->pixfmt_cap = formats[0].pixfmts_cap[0];
+ sess->fmt_out = &formats[0];
+ sess->width = 1280;
+ sess->height = 720;
+ sess->pixelaspect.numerator = 1;
+ sess->pixelaspect.denominator = 1;
+
+ INIT_LIST_HEAD(&sess->timestamps);
+ INIT_LIST_HEAD(&sess->bufs_recycle);
+ INIT_WORK(&sess->esparser_queue_work, esparser_queue_all_src);
+ mutex_init(&sess->lock);
+ mutex_init(&sess->bufs_recycle_lock);
+ spin_lock_init(&sess->ts_spinlock);
+
+ v4l2_fh_init(&sess->fh, core->vdev_dec);
+ sess->fh.ctrl_handler = &sess->ctrl_handler;
+ v4l2_fh_add(&sess->fh);
+ sess->fh.m2m_ctx = sess->m2m_ctx;
+ file->private_data = &sess->fh;
+
+ return 0;
+
+err_m2m_release:
+ v4l2_m2m_release(sess->m2m_dev);
+err_free_sess:
+ kfree(sess);
+ return ret;
+}
+
+static int vdec_close(struct file *file)
+{
+ struct amvdec_session *sess =
+ container_of(file->private_data, struct amvdec_session, fh);
+
+ v4l2_m2m_ctx_release(sess->m2m_ctx);
+ v4l2_m2m_release(sess->m2m_dev);
+ v4l2_fh_del(&sess->fh);
+ v4l2_fh_exit(&sess->fh);
+
+ mutex_destroy(&sess->lock);
+ mutex_destroy(&sess->bufs_recycle_lock);
+
+ kfree(sess);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vdec_fops = {
+ .owner = THIS_MODULE,
+ .open = vdec_open,
+ .release = vdec_close,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static irqreturn_t vdec_isr(int irq, void *data)
+{
+ struct amvdec_core *core = data;
+ struct amvdec_session *sess = core->cur_sess;
+
+ sess->last_irq_jiffies = get_jiffies_64();
+
+ return sess->fmt_out->codec_ops->isr(sess);
+}
+
+static irqreturn_t vdec_threaded_isr(int irq, void *data)
+{
+ struct amvdec_core *core = data;
+ struct amvdec_session *sess = core->cur_sess;
+
+ return sess->fmt_out->codec_ops->threaded_isr(sess);
+}
+
+static const struct of_device_id vdec_dt_match[] = {
+ { .compatible = "amlogic,gxbb-vdec",
+ .data = &vdec_platform_gxbb },
+ { .compatible = "amlogic,gxm-vdec",
+ .data = &vdec_platform_gxm },
+ { .compatible = "amlogic,gxl-vdec",
+ .data = &vdec_platform_gxl },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vdec_dt_match);
+
+static int vdec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct video_device *vdev;
+ struct amvdec_core *core;
+ struct resource *r;
+ const struct of_device_id *of_id;
+ int irq;
+ int ret;
+
+ core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+
+ core->dev = dev;
+ platform_set_drvdata(pdev, core);
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dos");
+ core->dos_base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(core->dos_base)) {
+ dev_err(dev, "Couldn't remap DOS memory\n");
+ return PTR_ERR(core->dos_base);
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "esparser");
+ core->esparser_base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(core->esparser_base)) {
+ dev_err(dev, "Couldn't remap ESPARSER memory\n");
+ return PTR_ERR(core->esparser_base);
+ }
+
+ core->regmap_ao =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "amlogic,ao-sysctrl");
+ if (IS_ERR(core->regmap_ao)) {
+ dev_err(dev, "Couldn't regmap AO sysctrl\n");
+ return PTR_ERR(core->regmap_ao);
+ }
+
+ core->canvas = meson_canvas_get(dev);
+ if (IS_ERR(core->canvas))
+ return PTR_ERR(core->canvas);
+
+ core->dos_parser_clk = devm_clk_get(dev, "dos_parser");
+ if (IS_ERR(core->dos_parser_clk))
+ return -EPROBE_DEFER;
+
+ core->dos_clk = devm_clk_get(dev, "dos");
+ if (IS_ERR(core->dos_clk))
+ return -EPROBE_DEFER;
+
+ core->vdec_1_clk = devm_clk_get(dev, "vdec_1");
+ if (IS_ERR(core->vdec_1_clk))
+ return -EPROBE_DEFER;
+
+ core->vdec_hevc_clk = devm_clk_get(dev, "vdec_hevc");
+ if (IS_ERR(core->vdec_hevc_clk))
+ return -EPROBE_DEFER;
+
+ irq = platform_get_irq_byname(pdev, "vdec");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(core->dev, irq, vdec_isr,
+ vdec_threaded_isr, IRQF_ONESHOT,
+ "vdec", core);
+ if (ret)
+ return ret;
+
+ ret = esparser_init(pdev, core);
+ if (ret)
+ return ret;
+
+ ret = v4l2_device_register(dev, &core->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "Couldn't register v4l2 device\n");
+ return -ENOMEM;
+ }
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ ret = -ENOMEM;
+ goto err_vdev_release;
+ }
+
+ of_id = of_match_node(vdec_dt_match, dev->of_node);
+ core->platform = of_id->data;
+ core->vdev_dec = vdev;
+ core->dev_dec = dev;
+ mutex_init(&core->lock);
+
+ strscpy(vdev->name, "meson-video-decoder", sizeof(vdev->name));
+ vdev->release = video_device_release;
+ vdev->fops = &vdec_fops;
+ vdev->ioctl_ops = &vdec_ioctl_ops;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->v4l2_dev = &core->v4l2_dev;
+ vdev->lock = &core->lock;
+ vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+
+ video_set_drvdata(vdev, core);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(dev, "Failed registering video device\n");
+ goto err_vdev_release;
+ }
+
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+ return ret;
+}
+
+static int vdec_remove(struct platform_device *pdev)
+{
+ struct amvdec_core *core = platform_get_drvdata(pdev);
+
+ video_unregister_device(core->vdev_dec);
+
+ return 0;
+}
+
+static struct platform_driver meson_vdec_driver = {
+ .probe = vdec_probe,
+ .remove = vdec_remove,
+ .driver = {
+ .name = "meson-vdec",
+ .of_match_table = vdec_dt_match,
+ },
+};
+module_platform_driver(meson_vdec_driver);
+
+MODULE_DESCRIPTION("Meson video decoder driver for GXBB/GXL/GXM");
+MODULE_AUTHOR("Maxime Jourdan <mjourdan@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h
new file mode 100644
index 000000000000..d811e7976519
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_CORE_H_
+#define __MESON_VDEC_CORE_H_
+
+#include <linux/irqreturn.h>
+#include <linux/regmap.h>
+#include <linux/list.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <linux/soc/amlogic/meson-canvas.h>
+
+#include "vdec_platform.h"
+
+/* 32 buffers in 3-plane YUV420 */
+#define MAX_CANVAS (32 * 3)
+
+struct amvdec_buffer {
+ struct list_head list;
+ struct vb2_buffer *vb;
+};
+
+/**
+ * struct amvdec_timestamp - stores a src timestamp along with a VIFIFO offset
+ *
+ * @list: used to make lists out of this struct
+ * @ts: timestamp
+ * @offset: offset in the VIFIFO where the associated packet was written
+ */
+struct amvdec_timestamp {
+ struct list_head list;
+ u64 ts;
+ u32 offset;
+};
+
+struct amvdec_session;
+
+/**
+ * struct amvdec_core - device parameters, singleton
+ *
+ * @dos_base: DOS memory base address
+ * @esparser_base: PARSER memory base address
+ * @regmap_ao: regmap for the AO bus
+ * @dev: core device
+ * @dev_dec: decoder device
+ * @platform: platform-specific data
+ * @canvas: canvas provider reference
+ * @dos_parser_clk: DOS_PARSER clock
+ * @dos_clk: DOS clock
+ * @vdec_1_clk: VDEC_1 clock
+ * @vdec_hevc_clk: VDEC_HEVC clock
+ * @esparser_reset: RESET for the PARSER
+ * @vdec_dec: video device for the decoder
+ * @v4l2_dev: v4l2 device
+ * @cur_sess: current decoding session
+ */
+struct amvdec_core {
+ void __iomem *dos_base;
+ void __iomem *esparser_base;
+ struct regmap *regmap_ao;
+
+ struct device *dev;
+ struct device *dev_dec;
+ const struct vdec_platform *platform;
+
+ struct meson_canvas *canvas;
+
+ struct clk *dos_parser_clk;
+ struct clk *dos_clk;
+ struct clk *vdec_1_clk;
+ struct clk *vdec_hevc_clk;
+
+ struct reset_control *esparser_reset;
+
+ struct video_device *vdev_dec;
+ struct v4l2_device v4l2_dev;
+
+ struct amvdec_session *cur_sess;
+ struct mutex lock; /* video device lock */
+};
+
+/**
+ * struct amvdec_ops - vdec operations
+ *
+ * @start: mandatory call when the vdec needs to initialize
+ * @stop: mandatory call when the vdec needs to stop
+ * @conf_esparser: mandatory call to let the vdec configure the ESPARSER
+ * @vififo_level: mandatory call to get the current amount of data
+ * in the VIFIFO
+ * @use_offsets: mandatory call. Returns 1 if the VDEC supports vififo offsets
+ */
+struct amvdec_ops {
+ int (*start)(struct amvdec_session *sess);
+ int (*stop)(struct amvdec_session *sess);
+ void (*conf_esparser)(struct amvdec_session *sess);
+ u32 (*vififo_level)(struct amvdec_session *sess);
+};
+
+/**
+ * struct amvdec_codec_ops - codec operations
+ *
+ * @start: mandatory call when the codec needs to initialize
+ * @stop: mandatory call when the codec needs to stop
+ * @load_extended_firmware: optional call to load additional firmware bits
+ * @num_pending_bufs: optional call to get the number of dst buffers on hold
+ * @can_recycle: optional call to know if the codec is ready to recycle
+ * a dst buffer
+ * @recycle: optional call to tell the codec to recycle a dst buffer. Must go
+ * in pair with @can_recycle
+ * @drain: optional call if the codec has a custom way of draining
+ * @eos_sequence: optional call to get an end sequence to send to esparser
+ * for flush. Mutually exclusive with @drain.
+ * @isr: mandatory call when the ISR triggers
+ * @threaded_isr: mandatory call for the threaded ISR
+ */
+struct amvdec_codec_ops {
+ int (*start)(struct amvdec_session *sess);
+ int (*stop)(struct amvdec_session *sess);
+ int (*load_extended_firmware)(struct amvdec_session *sess,
+ const u8 *data, u32 len);
+ u32 (*num_pending_bufs)(struct amvdec_session *sess);
+ int (*can_recycle)(struct amvdec_core *core);
+ void (*recycle)(struct amvdec_core *core, u32 buf_idx);
+ void (*drain)(struct amvdec_session *sess);
+ void (*resume)(struct amvdec_session *sess);
+ const u8 * (*eos_sequence)(u32 *len);
+ irqreturn_t (*isr)(struct amvdec_session *sess);
+ irqreturn_t (*threaded_isr)(struct amvdec_session *sess);
+};
+
+/**
+ * struct amvdec_format - describes one of the OUTPUT (src) format supported
+ *
+ * @pixfmt: V4L2 pixel format
+ * @min_buffers: minimum amount of CAPTURE (dst) buffers
+ * @max_buffers: maximum amount of CAPTURE (dst) buffers
+ * @max_width: maximum picture width supported
+ * @max_height: maximum picture height supported
+ * @flags: enum flags associated with this pixfmt
+ * @vdec_ops: the VDEC operations that support this format
+ * @codec_ops: the codec operations that support this format
+ * @firmware_path: Path to the firmware that supports this format
+ * @pixfmts_cap: list of CAPTURE pixel formats available with pixfmt
+ */
+struct amvdec_format {
+ u32 pixfmt;
+ u32 min_buffers;
+ u32 max_buffers;
+ u32 max_width;
+ u32 max_height;
+ u32 flags;
+
+ struct amvdec_ops *vdec_ops;
+ struct amvdec_codec_ops *codec_ops;
+
+ char *firmware_path;
+ u32 pixfmts_cap[4];
+};
+
+enum amvdec_status {
+ STATUS_STOPPED,
+ STATUS_RUNNING,
+ STATUS_NEEDS_RESUME,
+};
+
+/**
+ * struct amvdec_session - decoding session parameters
+ *
+ * @core: reference to the vdec core struct
+ * @fh: v4l2 file handle
+ * @m2m_dev: v4l2 m2m device
+ * @m2m_ctx: v4l2 m2m context
+ * @ctrl_handler: V4L2 control handler
+ * @ctrl_min_buf_capture: V4L2 control V4L2_CID_MIN_BUFFERS_FOR_CAPTURE
+ * @fmt_out: vdec pixel format for the OUTPUT queue
+ * @pixfmt_cap: V4L2 pixel format for the CAPTURE queue
+ * @width: current picture width
+ * @height: current picture height
+ * @colorspace: current colorspace
+ * @ycbcr_enc: current ycbcr_enc
+ * @quantization: current quantization
+ * @xfer_func: current transfer function
+ * @pixelaspect: Pixel Aspect Ratio reported by the decoder
+ * @esparser_queued_bufs: number of buffers currently queued into ESPARSER
+ * @esparser_queue_work: work struct for the ESPARSER to process src buffers
+ * @streamon_cap: stream on flag for capture queue
+ * @streamon_out: stream on flag for output queue
+ * @sequence_cap: capture sequence counter
+ * @should_stop: flag set if userspace signaled EOS via command
+ * or empty buffer
+ * @keyframe_found: flag set once a keyframe has been parsed
+ * @canvas_alloc: array of all the canvas IDs allocated
+ * @canvas_num: number of canvas IDs allocated
+ * @vififo_vaddr: virtual address for the VIFIFO
+ * @vififo_paddr: physical address for the VIFIFO
+ * @vififo_size: size of the VIFIFO dma alloc
+ * @bufs_recycle: list of buffers that need to be recycled
+ * @bufs_recycle_lock: lock for the bufs_recycle list
+ * @recycle_thread: task struct for the recycling thread
+ * @timestamps: chronological list of src timestamps
+ * @ts_spinlock: spinlock for the timestamps list
+ * @last_irq_jiffies: tracks last time the vdec triggered an IRQ
+ * @status: current decoding status
+ * @priv: codec private data
+ */
+struct amvdec_session {
+ struct amvdec_core *core;
+
+ struct v4l2_fh fh;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl_min_buf_capture;
+ struct mutex lock; /* cap & out queues lock */
+
+ const struct amvdec_format *fmt_out;
+ u32 pixfmt_cap;
+
+ u32 width;
+ u32 height;
+ u32 colorspace;
+ u8 ycbcr_enc;
+ u8 quantization;
+ u8 xfer_func;
+
+ struct v4l2_fract pixelaspect;
+
+ atomic_t esparser_queued_bufs;
+ struct work_struct esparser_queue_work;
+
+ unsigned int streamon_cap, streamon_out;
+ unsigned int sequence_cap;
+ unsigned int should_stop;
+ unsigned int keyframe_found;
+ unsigned int num_dst_bufs;
+
+ u8 canvas_alloc[MAX_CANVAS];
+ u32 canvas_num;
+
+ void *vififo_vaddr;
+ dma_addr_t vififo_paddr;
+ u32 vififo_size;
+
+ struct list_head bufs_recycle;
+ struct mutex bufs_recycle_lock; /* bufs_recycle list lock */
+ struct task_struct *recycle_thread;
+
+ struct list_head timestamps;
+ spinlock_t ts_spinlock; /* timestamp list lock */
+
+ u64 last_irq_jiffies;
+ u32 last_offset;
+ u32 wrap_count;
+ u32 fw_idx_to_vb2_idx[32];
+
+ enum amvdec_status status;
+ void *priv;
+};
+
+u32 amvdec_get_output_size(struct amvdec_session *sess);
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec_1.c b/drivers/staging/media/meson/vdec/vdec_1.c
new file mode 100644
index 000000000000..3a15c6fc0567
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_1.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ *
+ * VDEC_1 is a video decoding block that allows decoding of
+ * MPEG 1/2/4, H.263, H.264, MJPEG, VC1
+ */
+
+#include <linux/firmware.h>
+#include <linux/clk.h>
+
+#include "vdec_1.h"
+#include "vdec_helpers.h"
+#include "dos_regs.h"
+
+/* AO Registers */
+#define AO_RTI_GEN_PWR_SLEEP0 0xe8
+#define AO_RTI_GEN_PWR_ISO0 0xec
+ #define GEN_PWR_VDEC_1 (BIT(3) | BIT(2))
+
+#define MC_SIZE (4096 * 4)
+
+static int
+vdec_1_load_firmware(struct amvdec_session *sess, const char *fwname)
+{
+ const struct firmware *fw;
+ struct amvdec_core *core = sess->core;
+ struct device *dev = core->dev_dec;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+ static void *mc_addr;
+ static dma_addr_t mc_addr_map;
+ int ret;
+ u32 i = 1000;
+
+ ret = request_firmware(&fw, fwname, dev);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (fw->size < MC_SIZE) {
+ dev_err(dev, "Firmware size %zu is too small. Expected %u.\n",
+ fw->size, MC_SIZE);
+ ret = -EINVAL;
+ goto release_firmware;
+ }
+
+ mc_addr = dma_alloc_coherent(core->dev, MC_SIZE,
+ &mc_addr_map, GFP_KERNEL);
+ if (!mc_addr) {
+ ret = -ENOMEM;
+ goto release_firmware;
+ }
+
+ memcpy(mc_addr, fw->data, MC_SIZE);
+
+ amvdec_write_dos(core, MPSR, 0);
+ amvdec_write_dos(core, CPSR, 0);
+
+ amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(31));
+
+ amvdec_write_dos(core, IMEM_DMA_ADR, mc_addr_map);
+ amvdec_write_dos(core, IMEM_DMA_COUNT, MC_SIZE / 4);
+ amvdec_write_dos(core, IMEM_DMA_CTRL, (0x8000 | (7 << 16)));
+
+ while (--i && amvdec_read_dos(core, IMEM_DMA_CTRL) & 0x8000);
+
+ if (i == 0) {
+ dev_err(dev, "Firmware load fail (DMA hang?)\n");
+ ret = -EINVAL;
+ goto free_mc;
+ }
+
+ if (codec_ops->load_extended_firmware)
+ ret = codec_ops->load_extended_firmware(sess,
+ fw->data + MC_SIZE,
+ fw->size - MC_SIZE);
+
+free_mc:
+ dma_free_coherent(core->dev, MC_SIZE, mc_addr, mc_addr_map);
+release_firmware:
+ release_firmware(fw);
+ return ret;
+}
+
+static int vdec_1_stbuf_power_up(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_CONTROL, 0);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_WRAP_COUNT, 0);
+ amvdec_write_dos(core, POWER_CTL_VLD, BIT(4));
+
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_START_PTR, sess->vififo_paddr);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_CURR_PTR, sess->vififo_paddr);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_END_PTR,
+ sess->vififo_paddr + sess->vififo_size - 8);
+
+ amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_CONTROL, 1);
+ amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_CONTROL, 1);
+
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_BUF_CNTL, MEM_BUFCTRL_MANUAL);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_WP, sess->vififo_paddr);
+
+ amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+ amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+
+ amvdec_write_dos_bits(core, VLD_MEM_VIFIFO_CONTROL,
+ (0x11 << MEM_FIFO_CNT_BIT) | MEM_FILL_ON_LEVEL |
+ MEM_CTRL_FILL_EN | MEM_CTRL_EMPTY_EN);
+
+ return 0;
+}
+
+static void vdec_1_conf_esparser(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ /* VDEC_1 specific ESPARSER stuff */
+ amvdec_write_dos(core, DOS_GEN_CTRL0, 0);
+ amvdec_write_dos(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+ amvdec_clear_dos_bits(core, VLD_MEM_VIFIFO_BUF_CNTL, 1);
+}
+
+static u32 vdec_1_vififo_level(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ return amvdec_read_dos(core, VLD_MEM_VIFIFO_LEVEL);
+}
+
+static int vdec_1_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ amvdec_write_dos(core, MPSR, 0);
+ amvdec_write_dos(core, CPSR, 0);
+ amvdec_write_dos(core, ASSIST_MBOX1_MASK, 0);
+
+ amvdec_write_dos(core, DOS_SW_RESET0, BIT(12) | BIT(11));
+ amvdec_write_dos(core, DOS_SW_RESET0, 0);
+ amvdec_read_dos(core, DOS_SW_RESET0);
+
+ /* enable vdec1 isolation */
+ regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0xc0);
+ /* power off vdec1 memories */
+ amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0xffffffff);
+ /* power off vdec1 */
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1, GEN_PWR_VDEC_1);
+
+ clk_disable_unprepare(core->vdec_1_clk);
+
+ if (sess->priv)
+ codec_ops->stop(sess);
+
+ return 0;
+}
+
+static int vdec_1_start(struct amvdec_session *sess)
+{
+ int ret;
+ struct amvdec_core *core = sess->core;
+ struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
+
+ /* Configure the vdec clk to the maximum available */
+ clk_set_rate(core->vdec_1_clk, 666666666);
+ ret = clk_prepare_enable(core->vdec_1_clk);
+ if (ret)
+ return ret;
+
+ /* Enable power for VDEC_1 */
+ regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VDEC_1, 0);
+ usleep_range(10, 20);
+
+ /* Reset VDEC1 */
+ amvdec_write_dos(core, DOS_SW_RESET0, 0xfffffffc);
+ amvdec_write_dos(core, DOS_SW_RESET0, 0x00000000);
+
+ amvdec_write_dos(core, DOS_GCLK_EN0, 0x3ff);
+
+ /* enable VDEC Memories */
+ amvdec_write_dos(core, DOS_MEM_PD_VDEC, 0);
+ /* Remove VDEC1 Isolation */
+ regmap_write(core->regmap_ao, AO_RTI_GEN_PWR_ISO0, 0);
+ /* Reset DOS top registers */
+ amvdec_write_dos(core, DOS_VDEC_MCRCC_STALL_CTRL, 0);
+
+ amvdec_write_dos(core, GCLK_EN, 0x3ff);
+ amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(31));
+
+ vdec_1_stbuf_power_up(sess);
+
+ ret = vdec_1_load_firmware(sess, sess->fmt_out->firmware_path);
+ if (ret)
+ goto stop;
+
+ ret = codec_ops->start(sess);
+ if (ret)
+ goto stop;
+
+ /* Enable IRQ */
+ amvdec_write_dos(core, ASSIST_MBOX1_CLR_REG, 1);
+ amvdec_write_dos(core, ASSIST_MBOX1_MASK, 1);
+
+ /* Enable 2-plane output */
+ if (sess->pixfmt_cap == V4L2_PIX_FMT_NV12M)
+ amvdec_write_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(17));
+ else
+ amvdec_clear_dos_bits(core, MDEC_PIC_DC_CTRL, BIT(17));
+
+ /* Enable firmware processor */
+ amvdec_write_dos(core, MPSR, 1);
+ /* Let the firmware settle */
+ usleep_range(10, 20);
+
+ return 0;
+
+stop:
+ vdec_1_stop(sess);
+ return ret;
+}
+
+struct amvdec_ops vdec_1_ops = {
+ .start = vdec_1_start,
+ .stop = vdec_1_stop,
+ .conf_esparser = vdec_1_conf_esparser,
+ .vififo_level = vdec_1_vififo_level,
+};
diff --git a/drivers/staging/media/meson/vdec/vdec_1.h b/drivers/staging/media/meson/vdec/vdec_1.h
new file mode 100644
index 000000000000..042d930c40d7
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_1.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_VDEC_1_H_
+#define __MESON_VDEC_VDEC_1_H_
+
+#include "vdec.h"
+
+extern struct amvdec_ops vdec_1_ops;
+
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c
new file mode 100644
index 000000000000..f16948bdbf2f
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include <linux/gcd.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vdec_helpers.h"
+
+#define NUM_CANVAS_NV12 2
+#define NUM_CANVAS_YUV420 3
+
+u32 amvdec_read_dos(struct amvdec_core *core, u32 reg)
+{
+ return readl_relaxed(core->dos_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_read_dos);
+
+void amvdec_write_dos(struct amvdec_core *core, u32 reg, u32 val)
+{
+ writel_relaxed(val, core->dos_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_write_dos);
+
+void amvdec_write_dos_bits(struct amvdec_core *core, u32 reg, u32 val)
+{
+ amvdec_write_dos(core, reg, amvdec_read_dos(core, reg) | val);
+}
+EXPORT_SYMBOL_GPL(amvdec_write_dos_bits);
+
+void amvdec_clear_dos_bits(struct amvdec_core *core, u32 reg, u32 val)
+{
+ amvdec_write_dos(core, reg, amvdec_read_dos(core, reg) & ~val);
+}
+EXPORT_SYMBOL_GPL(amvdec_clear_dos_bits);
+
+u32 amvdec_read_parser(struct amvdec_core *core, u32 reg)
+{
+ return readl_relaxed(core->esparser_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_read_parser);
+
+void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val)
+{
+ writel_relaxed(val, core->esparser_base + reg);
+}
+EXPORT_SYMBOL_GPL(amvdec_write_parser);
+
+static int canvas_alloc(struct amvdec_session *sess, u8 *canvas_id)
+{
+ int ret;
+
+ if (sess->canvas_num >= MAX_CANVAS) {
+ dev_err(sess->core->dev, "Reached max number of canvas\n");
+ return -ENOMEM;
+ }
+
+ ret = meson_canvas_alloc(sess->core->canvas, canvas_id);
+ if (ret)
+ return ret;
+
+ sess->canvas_alloc[sess->canvas_num++] = *canvas_id;
+ return 0;
+}
+
+static int set_canvas_yuv420m(struct amvdec_session *sess,
+ struct vb2_buffer *vb, u32 width,
+ u32 height, u32 reg)
+{
+ struct amvdec_core *core = sess->core;
+ u8 canvas_id[NUM_CANVAS_YUV420]; /* Y U V */
+ dma_addr_t buf_paddr[NUM_CANVAS_YUV420]; /* Y U V */
+ int ret, i;
+
+ for (i = 0; i < NUM_CANVAS_YUV420; ++i) {
+ ret = canvas_alloc(sess, &canvas_id[i]);
+ if (ret)
+ return ret;
+
+ buf_paddr[i] =
+ vb2_dma_contig_plane_dma_addr(vb, i);
+ }
+
+ /* Y plane */
+ meson_canvas_config(core->canvas, canvas_id[0], buf_paddr[0],
+ width, height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ /* U plane */
+ meson_canvas_config(core->canvas, canvas_id[1], buf_paddr[1],
+ width / 2, height / 2, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ /* V plane */
+ meson_canvas_config(core->canvas, canvas_id[2], buf_paddr[2],
+ width / 2, height / 2, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ amvdec_write_dos(core, reg,
+ ((canvas_id[2]) << 16) |
+ ((canvas_id[1]) << 8) |
+ (canvas_id[0]));
+
+ return 0;
+}
+
+static int set_canvas_nv12m(struct amvdec_session *sess,
+ struct vb2_buffer *vb, u32 width,
+ u32 height, u32 reg)
+{
+ struct amvdec_core *core = sess->core;
+ u8 canvas_id[NUM_CANVAS_NV12]; /* Y U/V */
+ dma_addr_t buf_paddr[NUM_CANVAS_NV12]; /* Y U/V */
+ int ret, i;
+
+ for (i = 0; i < NUM_CANVAS_NV12; ++i) {
+ ret = canvas_alloc(sess, &canvas_id[i]);
+ if (ret)
+ return ret;
+
+ buf_paddr[i] =
+ vb2_dma_contig_plane_dma_addr(vb, i);
+ }
+
+ /* Y plane */
+ meson_canvas_config(core->canvas, canvas_id[0], buf_paddr[0],
+ width, height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ /* U/V plane */
+ meson_canvas_config(core->canvas, canvas_id[1], buf_paddr[1],
+ width, height / 2, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+
+ amvdec_write_dos(core, reg,
+ ((canvas_id[1]) << 16) |
+ ((canvas_id[1]) << 8) |
+ (canvas_id[0]));
+
+ return 0;
+}
+
+int amvdec_set_canvases(struct amvdec_session *sess,
+ u32 reg_base[], u32 reg_num[])
+{
+ struct v4l2_m2m_buffer *buf;
+ u32 pixfmt = sess->pixfmt_cap;
+ u32 width = ALIGN(sess->width, 64);
+ u32 height = ALIGN(sess->height, 64);
+ u32 reg_cur = reg_base[0];
+ u32 reg_num_cur = 0;
+ u32 reg_base_cur = 0;
+ int i = 0;
+ int ret;
+
+ v4l2_m2m_for_each_dst_buf(sess->m2m_ctx, buf) {
+ if (!reg_base[reg_base_cur])
+ return -EINVAL;
+
+ reg_cur = reg_base[reg_base_cur] + reg_num_cur * 4;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_NV12M:
+ ret = set_canvas_nv12m(sess, &buf->vb.vb2_buf, width,
+ height, reg_cur);
+ if (ret)
+ return ret;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ ret = set_canvas_yuv420m(sess, &buf->vb.vb2_buf, width,
+ height, reg_cur);
+ if (ret)
+ return ret;
+ break;
+ default:
+ dev_err(sess->core->dev, "Unsupported pixfmt %08X\n",
+ pixfmt);
+ return -EINVAL;
+ }
+
+ reg_num_cur++;
+ if (reg_num_cur >= reg_num[reg_base_cur]) {
+ reg_base_cur++;
+ reg_num_cur = 0;
+ }
+
+ sess->fw_idx_to_vb2_idx[i++] = buf->vb.vb2_buf.index;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amvdec_set_canvases);
+
+void amvdec_add_ts_reorder(struct amvdec_session *sess, u64 ts, u32 offset)
+{
+ struct amvdec_timestamp *new_ts, *tmp;
+ unsigned long flags;
+
+ new_ts = kmalloc(sizeof(*new_ts), GFP_KERNEL);
+ new_ts->ts = ts;
+ new_ts->offset = offset;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+
+ if (list_empty(&sess->timestamps))
+ goto add_tail;
+
+ list_for_each_entry(tmp, &sess->timestamps, list) {
+ if (ts <= tmp->ts) {
+ list_add_tail(&new_ts->list, &tmp->list);
+ goto unlock;
+ }
+ }
+
+add_tail:
+ list_add_tail(&new_ts->list, &sess->timestamps);
+unlock:
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(amvdec_add_ts_reorder);
+
+void amvdec_remove_ts(struct amvdec_session *sess, u64 ts)
+{
+ struct amvdec_timestamp *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+ list_for_each_entry(tmp, &sess->timestamps, list) {
+ if (tmp->ts == ts) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ goto unlock;
+ }
+ }
+ dev_warn(sess->core->dev_dec,
+ "Couldn't remove buffer with timestamp %llu from list\n", ts);
+
+unlock:
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(amvdec_remove_ts);
+
+static void dst_buf_done(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf,
+ u32 field,
+ u64 timestamp)
+{
+ struct device *dev = sess->core->dev_dec;
+ u32 output_size = amvdec_get_output_size(sess);
+
+ switch (sess->pixfmt_cap) {
+ case V4L2_PIX_FMT_NV12M:
+ vbuf->vb2_buf.planes[0].bytesused = output_size;
+ vbuf->vb2_buf.planes[1].bytesused = output_size / 2;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ vbuf->vb2_buf.planes[0].bytesused = output_size;
+ vbuf->vb2_buf.planes[1].bytesused = output_size / 4;
+ vbuf->vb2_buf.planes[2].bytesused = output_size / 4;
+ break;
+ }
+
+ vbuf->vb2_buf.timestamp = timestamp;
+ vbuf->sequence = sess->sequence_cap++;
+
+ if (sess->should_stop &&
+ atomic_read(&sess->esparser_queued_bufs) <= 2) {
+ const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
+
+ dev_dbg(dev, "Signaling EOS\n");
+ v4l2_event_queue_fh(&sess->fh, &ev);
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ } else if (sess->should_stop)
+ dev_dbg(dev, "should_stop, %u bufs remain\n",
+ atomic_read(&sess->esparser_queued_bufs));
+
+ dev_dbg(dev, "Buffer %u done\n", vbuf->vb2_buf.index);
+ vbuf->field = field;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+
+ /* Buffer done probably means the vififo got freed */
+ schedule_work(&sess->esparser_queue_work);
+}
+
+void amvdec_dst_buf_done(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf, u32 field)
+{
+ struct device *dev = sess->core->dev_dec;
+ struct amvdec_timestamp *tmp;
+ struct list_head *timestamps = &sess->timestamps;
+ u64 timestamp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+ if (list_empty(timestamps)) {
+ dev_err(dev, "Buffer %u done but list is empty\n",
+ vbuf->vb2_buf.index);
+
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+ return;
+ }
+
+ tmp = list_first_entry(timestamps, struct amvdec_timestamp, list);
+ timestamp = tmp->ts;
+ list_del(&tmp->list);
+ kfree(tmp);
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+
+ dst_buf_done(sess, vbuf, field, timestamp);
+ atomic_dec(&sess->esparser_queued_bufs);
+}
+EXPORT_SYMBOL_GPL(amvdec_dst_buf_done);
+
+void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf,
+ u32 offset, u32 field, bool allow_drop)
+{
+ struct device *dev = sess->core->dev_dec;
+ struct amvdec_timestamp *match = NULL;
+ struct amvdec_timestamp *tmp, *n;
+ u64 timestamp = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sess->ts_spinlock, flags);
+
+ /* Look for our vififo offset to get the corresponding timestamp. */
+ list_for_each_entry_safe(tmp, n, &sess->timestamps, list) {
+ s64 delta = (s64)offset - tmp->offset;
+
+ /* Offsets reported by codecs usually differ slightly,
+ * so we need some wiggle room.
+ * 4KiB being the minimum packet size, there is no risk here.
+ */
+ if (delta > (-1 * (s32)SZ_4K) && delta < SZ_4K) {
+ match = tmp;
+ break;
+ }
+
+ if (!allow_drop)
+ continue;
+
+ /* Delete any timestamp entry that appears before our target
+ * (not all src packets/timestamps lead to a frame)
+ */
+ if (delta > 0 || delta < -1 * (s32)sess->vififo_size) {
+ atomic_dec(&sess->esparser_queued_bufs);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ }
+
+ if (!match) {
+ dev_dbg(dev, "Buffer %u done but can't match offset (%08X)\n",
+ vbuf->vb2_buf.index, offset);
+ } else {
+ timestamp = match->ts;
+ list_del(&match->list);
+ kfree(match);
+ }
+ spin_unlock_irqrestore(&sess->ts_spinlock, flags);
+
+ dst_buf_done(sess, vbuf, field, timestamp);
+ if (match)
+ atomic_dec(&sess->esparser_queued_bufs);
+}
+EXPORT_SYMBOL_GPL(amvdec_dst_buf_done_offset);
+
+void amvdec_dst_buf_done_idx(struct amvdec_session *sess,
+ u32 buf_idx, u32 offset, u32 field)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct device *dev = sess->core->dev_dec;
+
+ vbuf = v4l2_m2m_dst_buf_remove_by_idx(sess->m2m_ctx,
+ sess->fw_idx_to_vb2_idx[buf_idx]);
+
+ if (!vbuf) {
+ dev_err(dev,
+ "Buffer %u done but it doesn't exist in m2m_ctx\n",
+ buf_idx);
+ return;
+ }
+
+ if (offset != -1)
+ amvdec_dst_buf_done_offset(sess, vbuf, offset, field, true);
+ else
+ amvdec_dst_buf_done(sess, vbuf, field);
+}
+EXPORT_SYMBOL_GPL(amvdec_dst_buf_done_idx);
+
+void amvdec_set_par_from_dar(struct amvdec_session *sess,
+ u32 dar_num, u32 dar_den)
+{
+ u32 div;
+
+ sess->pixelaspect.numerator = sess->height * dar_num;
+ sess->pixelaspect.denominator = sess->width * dar_den;
+ div = gcd(sess->pixelaspect.numerator, sess->pixelaspect.denominator);
+ sess->pixelaspect.numerator /= div;
+ sess->pixelaspect.denominator /= div;
+}
+EXPORT_SYMBOL_GPL(amvdec_set_par_from_dar);
+
+void amvdec_src_change(struct amvdec_session *sess, u32 width,
+ u32 height, u32 dpb_size)
+{
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION };
+
+ v4l2_ctrl_s_ctrl(sess->ctrl_min_buf_capture, dpb_size);
+
+ /* Check if the capture queue is already configured well for our
+ * usecase. If so, keep decoding with it and do not send the event
+ */
+ if (sess->width == width &&
+ sess->height == height &&
+ dpb_size <= sess->num_dst_bufs) {
+ sess->fmt_out->codec_ops->resume(sess);
+ return;
+ }
+
+ sess->width = width;
+ sess->height = height;
+ sess->status = STATUS_NEEDS_RESUME;
+
+ dev_dbg(sess->core->dev, "Res. changed (%ux%u), DPB size %u\n",
+ width, height, dpb_size);
+ v4l2_event_queue_fh(&sess->fh, &ev);
+}
+EXPORT_SYMBOL_GPL(amvdec_src_change);
+
+void amvdec_abort(struct amvdec_session *sess)
+{
+ dev_info(sess->core->dev, "Aborting decoding session!\n");
+ vb2_queue_error(&sess->m2m_ctx->cap_q_ctx.q);
+ vb2_queue_error(&sess->m2m_ctx->out_q_ctx.q);
+}
+EXPORT_SYMBOL_GPL(amvdec_abort);
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h
new file mode 100644
index 000000000000..a455a9ee1cc2
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_HELPERS_H_
+#define __MESON_VDEC_HELPERS_H_
+
+#include "vdec.h"
+
+/**
+ * amvdec_set_canvases() - Map VB2 buffers to canvases
+ *
+ * @sess: current session
+ * @reg_base: Registry bases of where to write the canvas indexes
+ * @reg_num: number of contiguous registers after each reg_base (including it)
+ */
+int amvdec_set_canvases(struct amvdec_session *sess,
+ u32 reg_base[], u32 reg_num[]);
+
+/* Helpers to read/write to the various IPs (DOS, PARSER) */
+u32 amvdec_read_dos(struct amvdec_core *core, u32 reg);
+void amvdec_write_dos(struct amvdec_core *core, u32 reg, u32 val);
+void amvdec_write_dos_bits(struct amvdec_core *core, u32 reg, u32 val);
+void amvdec_clear_dos_bits(struct amvdec_core *core, u32 reg, u32 val);
+u32 amvdec_read_parser(struct amvdec_core *core, u32 reg);
+void amvdec_write_parser(struct amvdec_core *core, u32 reg, u32 val);
+
+/**
+ * amvdec_dst_buf_done_idx() - Signal that a buffer is done decoding
+ *
+ * @sess: current session
+ * @buf_idx: hardware buffer index
+ * @offset: VIFIFO bitstream offset corresponding to the buffer
+ * @field: V4L2 interlaced field
+ */
+void amvdec_dst_buf_done_idx(struct amvdec_session *sess, u32 buf_idx,
+ u32 offset, u32 field);
+void amvdec_dst_buf_done(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf, u32 field);
+void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
+ struct vb2_v4l2_buffer *vbuf,
+ u32 offset, u32 field, bool allow_drop);
+
+/**
+ * amvdec_add_ts_reorder() - Add a timestamp to the list in chronological order
+ *
+ * @sess: current session
+ * @ts: timestamp to add
+ * @offset: offset in the VIFIFO where the associated packet was written
+ */
+void amvdec_add_ts_reorder(struct amvdec_session *sess, u64 ts, u32 offset);
+void amvdec_remove_ts(struct amvdec_session *sess, u64 ts);
+
+/**
+ * amvdec_set_par_from_dar() - Set Pixel Aspect Ratio from Display Aspect Ratio
+ *
+ * @sess: current session
+ * @dar_num: numerator of the DAR
+ * @dar_den: denominator of the DAR
+ */
+void amvdec_set_par_from_dar(struct amvdec_session *sess,
+ u32 dar_num, u32 dar_den);
+
+/**
+ * amvdec_src_change() - Notify new resolution/DPB size to the core
+ *
+ * @sess: current session
+ * @width: picture width detected by the hardware
+ * @height: picture height detected by the hardware
+ * @dpb_size: Decoded Picture Buffer size (= amount of buffers for decoding)
+ */
+void amvdec_src_change(struct amvdec_session *sess, u32 width,
+ u32 height, u32 dpb_size);
+
+/**
+ * amvdec_abort() - Abort the current decoding session
+ *
+ * @sess: current session
+ */
+void amvdec_abort(struct amvdec_session *sess);
+#endif
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c
new file mode 100644
index 000000000000..824dbc7f46f5
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_platform.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#include "vdec_platform.h"
+#include "vdec.h"
+
+#include "vdec_1.h"
+#include "codec_mpeg12.h"
+
+static const struct amvdec_format vdec_formats_gxbb[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ },
+};
+
+static const struct amvdec_format vdec_formats_gxl[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ },
+};
+
+static const struct amvdec_format vdec_formats_gxm[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ },
+};
+
+const struct vdec_platform vdec_platform_gxbb = {
+ .formats = vdec_formats_gxbb,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxbb),
+ .revision = VDEC_REVISION_GXBB,
+};
+
+const struct vdec_platform vdec_platform_gxl = {
+ .formats = vdec_formats_gxl,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxl),
+ .revision = VDEC_REVISION_GXL,
+};
+
+const struct vdec_platform vdec_platform_gxm = {
+ .formats = vdec_formats_gxm,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxm),
+ .revision = VDEC_REVISION_GXM,
+};
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.h b/drivers/staging/media/meson/vdec/vdec_platform.h
new file mode 100644
index 000000000000..f6025326db1d
--- /dev/null
+++ b/drivers/staging/media/meson/vdec/vdec_platform.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Maxime Jourdan <mjourdan@baylibre.com>
+ */
+
+#ifndef __MESON_VDEC_PLATFORM_H_
+#define __MESON_VDEC_PLATFORM_H_
+
+#include "vdec.h"
+
+struct amvdec_format;
+
+enum vdec_revision {
+ VDEC_REVISION_GXBB,
+ VDEC_REVISION_GXL,
+ VDEC_REVISION_GXM,
+};
+
+struct vdec_platform {
+ const struct amvdec_format *formats;
+ const u32 num_formats;
+ enum vdec_revision revision;
+};
+
+extern const struct vdec_platform vdec_platform_gxbb;
+extern const struct vdec_platform vdec_platform_gxm;
+extern const struct vdec_platform vdec_platform_gxl;
+
+#endif
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index c2c5a9cd8642..c307707480f7 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -533,12 +533,6 @@ iss_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
strscpy(cap->driver, ISS_VIDEO_DRIVER_NAME, sizeof(cap->driver));
strscpy(cap->card, video->video.name, sizeof(cap->card));
strscpy(cap->bus_info, "media", sizeof(cap->bus_info));
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- else
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
-
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
| V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
@@ -1272,6 +1266,11 @@ int omap4iss_video_register(struct iss_video *video, struct v4l2_device *vdev)
int ret;
video->video.v4l2_dev = vdev;
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE;
+ else
+ video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT;
+ video->video.device_caps |= V4L2_CAP_STREAMING;
ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
if (ret < 0)
diff --git a/drivers/staging/media/rockchip/vpu/Kconfig b/drivers/staging/media/rockchip/vpu/Kconfig
deleted file mode 100644
index fc54bbf6753d..000000000000
--- a/drivers/staging/media/rockchip/vpu/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config VIDEO_ROCKCHIP_VPU
- tristate "Rockchip VPU driver"
- depends on ARCH_ROCKCHIP || COMPILE_TEST
- depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
- select VIDEOBUF2_DMA_CONTIG
- select VIDEOBUF2_VMALLOC
- select V4L2_MEM2MEM_DEV
- help
- Support for the Video Processing Unit present on Rockchip SoC,
- which accelerates video and image encoding and decoding.
- To compile this driver as a module, choose M here: the module
- will be called rockchip-vpu.
diff --git a/drivers/staging/media/rockchip/vpu/Makefile b/drivers/staging/media/rockchip/vpu/Makefile
deleted file mode 100644
index ae5d143a0bfa..000000000000
--- a/drivers/staging/media/rockchip/vpu/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_ROCKCHIP_VPU) += rockchip-vpu.o
-
-rockchip-vpu-y += \
- rockchip_vpu_drv.o \
- rockchip_vpu_enc.o \
- rk3288_vpu_hw.o \
- rk3288_vpu_hw_jpeg_enc.o \
- rk3399_vpu_hw.o \
- rk3399_vpu_hw_jpeg_enc.o \
- rockchip_vpu_jpeg.o
diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c
deleted file mode 100644
index a5e9d183fffd..000000000000
--- a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- * Jeffy Chen <jeffy.chen@rock-chips.com>
- */
-
-#include <linux/clk.h>
-
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_jpeg.h"
-#include "rk3288_vpu_regs.h"
-
-#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
-
-/*
- * Supported formats.
- */
-
-static const struct rockchip_vpu_fmt rk3288_vpu_enc_fmts[] = {
- {
- .fourcc = V4L2_PIX_FMT_YUV420M,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
- },
- {
- .fourcc = V4L2_PIX_FMT_NV12M,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
- },
- {
- .fourcc = V4L2_PIX_FMT_UYVY,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
- },
- {
- .fourcc = V4L2_PIX_FMT_JPEG,
- .codec_mode = RK_VPU_MODE_JPEG_ENC,
- .max_depth = 2,
- .header_size = JPEG_HEADER_SIZE,
- .frmsize = {
- .min_width = 96,
- .max_width = 8192,
- .step_width = JPEG_MB_DIM,
- .min_height = 32,
- .max_height = 8192,
- .step_height = JPEG_MB_DIM,
- },
- },
-};
-
-static irqreturn_t rk3288_vepu_irq(int irq, void *dev_id)
-{
- struct rockchip_vpu_dev *vpu = dev_id;
- enum vb2_buffer_state state;
- u32 status, bytesused;
-
- status = vepu_read(vpu, VEPU_REG_INTERRUPT);
- bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
- state = (status & VEPU_REG_INTERRUPT_FRAME_RDY) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
- vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
- vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
-
- rockchip_vpu_irq_done(vpu, bytesused, state);
-
- return IRQ_HANDLED;
-}
-
-static int rk3288_vpu_hw_init(struct rockchip_vpu_dev *vpu)
-{
- /* Bump ACLK to max. possible freq. to improve performance. */
- clk_set_rate(vpu->clocks[0].clk, RK3288_ACLK_MAX_FREQ);
- return 0;
-}
-
-static void rk3288_vpu_enc_reset(struct rockchip_vpu_ctx *ctx)
-{
- struct rockchip_vpu_dev *vpu = ctx->dev;
-
- vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
- vepu_write(vpu, 0, VEPU_REG_ENC_CTRL);
- vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
-}
-
-/*
- * Supported codec ops.
- */
-
-static const struct rockchip_vpu_codec_ops rk3288_vpu_codec_ops[] = {
- [RK_VPU_MODE_JPEG_ENC] = {
- .run = rk3288_vpu_jpeg_enc_run,
- .reset = rk3288_vpu_enc_reset,
- },
-};
-
-/*
- * VPU variant.
- */
-
-const struct rockchip_vpu_variant rk3288_vpu_variant = {
- .enc_offset = 0x0,
- .enc_fmts = rk3288_vpu_enc_fmts,
- .num_enc_fmts = ARRAY_SIZE(rk3288_vpu_enc_fmts),
- .codec_ops = rk3288_vpu_codec_ops,
- .codec = RK_VPU_CODEC_JPEG,
- .vepu_irq = rk3288_vepu_irq,
- .init = rk3288_vpu_hw_init,
- .clk_names = {"aclk", "hclk"},
- .num_clocks = 2
-};
diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
deleted file mode 100644
index 06daea66fb49..000000000000
--- a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
+++ /dev/null
@@ -1,125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- */
-
-#include <asm/unaligned.h>
-#include <media/v4l2-mem2mem.h>
-#include "rockchip_vpu_jpeg.h"
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_common.h"
-#include "rockchip_vpu_hw.h"
-#include "rk3288_vpu_regs.h"
-
-#define VEPU_JPEG_QUANT_TABLE_COUNT 16
-
-static void rk3288_vpu_set_src_img_ctrl(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx)
-{
- struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
- u32 reg;
-
- reg = VEPU_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width)
- | VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(0)
- | VEPU_REG_IN_IMG_CTRL_OVRFLB_D4(0)
- | VEPU_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
- vepu_write_relaxed(vpu, reg, VEPU_REG_IN_IMG_CTRL);
-}
-
-static void rk3288_vpu_jpeg_enc_set_buffers(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx,
- struct vb2_buffer *src_buf)
-{
- struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
- dma_addr_t src[3];
-
- WARN_ON(pix_fmt->num_planes > 3);
-
- vepu_write_relaxed(vpu, ctx->bounce_dma_addr,
- VEPU_REG_ADDR_OUTPUT_STREAM);
- vepu_write_relaxed(vpu, ctx->bounce_size,
- VEPU_REG_STR_BUF_LIMIT);
-
- if (pix_fmt->num_planes == 1) {
- src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- /* single plane formats we supported are all interlaced */
- vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
- } else if (pix_fmt->num_planes == 2) {
- src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
- vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
- vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
- } else {
- src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
- src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
- vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
- vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
- vepu_write_relaxed(vpu, src[2], VEPU_REG_ADDR_IN_PLANE_2);
- }
-}
-
-static void
-rk3288_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
- unsigned char *luma_qtable,
- unsigned char *chroma_qtable)
-{
- u32 reg, i;
-
- for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
- reg = get_unaligned_be32(&luma_qtable[i]);
- vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i));
-
- reg = get_unaligned_be32(&chroma_qtable[i]);
- vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i));
- }
-}
-
-void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
-{
- struct rockchip_vpu_dev *vpu = ctx->dev;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
- struct rockchip_vpu_jpeg_ctx jpeg_ctx;
- u32 reg;
-
- src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
-
- memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
- jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
- jpeg_ctx.width = ctx->dst_fmt.width;
- jpeg_ctx.height = ctx->dst_fmt.height;
- jpeg_ctx.quality = ctx->jpeg_quality;
- rockchip_vpu_jpeg_header_assemble(&jpeg_ctx);
-
- /* Switch to JPEG encoder mode before writing registers */
- vepu_write_relaxed(vpu, VEPU_REG_ENC_CTRL_ENC_MODE_JPEG,
- VEPU_REG_ENC_CTRL);
-
- rk3288_vpu_set_src_img_ctrl(vpu, ctx);
- rk3288_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
- rk3288_vpu_jpeg_enc_set_qtable(vpu,
- rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
- rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
-
- reg = VEPU_REG_AXI_CTRL_OUTPUT_SWAP16
- | VEPU_REG_AXI_CTRL_INPUT_SWAP16
- | VEPU_REG_AXI_CTRL_BURST_LEN(16)
- | VEPU_REG_AXI_CTRL_OUTPUT_SWAP32
- | VEPU_REG_AXI_CTRL_INPUT_SWAP32
- | VEPU_REG_AXI_CTRL_OUTPUT_SWAP8
- | VEPU_REG_AXI_CTRL_INPUT_SWAP8;
- /* Make sure that all registers are written at this point. */
- vepu_write(vpu, reg, VEPU_REG_AXI_CTRL);
-
- reg = VEPU_REG_ENC_CTRL_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
- | VEPU_REG_ENC_CTRL_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
- | VEPU_REG_ENC_CTRL_ENC_MODE_JPEG
- | VEPU_REG_ENC_PIC_INTRA
- | VEPU_REG_ENC_CTRL_EN_BIT;
- /* Kick the watchdog and start encoding */
- schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
- vepu_write(vpu, reg, VEPU_REG_ENC_CTRL);
-}
diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h b/drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h
deleted file mode 100644
index 9d0b9bdf3297..000000000000
--- a/drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h
+++ /dev/null
@@ -1,442 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Rockchip VPU codec driver
- *
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- */
-
-#ifndef RK3288_VPU_REGS_H_
-#define RK3288_VPU_REGS_H_
-
-/* Encoder registers. */
-#define VEPU_REG_INTERRUPT 0x004
-#define VEPU_REG_INTERRUPT_FRAME_RDY BIT(2)
-#define VEPU_REG_INTERRUPT_DIS_BIT BIT(1)
-#define VEPU_REG_INTERRUPT_BIT BIT(0)
-#define VEPU_REG_AXI_CTRL 0x008
-#define VEPU_REG_AXI_CTRL_OUTPUT_SWAP16 BIT(15)
-#define VEPU_REG_AXI_CTRL_INPUT_SWAP16 BIT(14)
-#define VEPU_REG_AXI_CTRL_BURST_LEN(x) ((x) << 8)
-#define VEPU_REG_AXI_CTRL_GATE_BIT BIT(4)
-#define VEPU_REG_AXI_CTRL_OUTPUT_SWAP32 BIT(3)
-#define VEPU_REG_AXI_CTRL_INPUT_SWAP32 BIT(2)
-#define VEPU_REG_AXI_CTRL_OUTPUT_SWAP8 BIT(1)
-#define VEPU_REG_AXI_CTRL_INPUT_SWAP8 BIT(0)
-#define VEPU_REG_ADDR_OUTPUT_STREAM 0x014
-#define VEPU_REG_ADDR_OUTPUT_CTRL 0x018
-#define VEPU_REG_ADDR_REF_LUMA 0x01c
-#define VEPU_REG_ADDR_REF_CHROMA 0x020
-#define VEPU_REG_ADDR_REC_LUMA 0x024
-#define VEPU_REG_ADDR_REC_CHROMA 0x028
-#define VEPU_REG_ADDR_IN_PLANE_0 0x02c
-#define VEPU_REG_ADDR_IN_PLANE_1 0x030
-#define VEPU_REG_ADDR_IN_PLANE_2 0x034
-#define VEPU_REG_ENC_CTRL 0x038
-#define VEPU_REG_ENC_CTRL_TIMEOUT_EN BIT(31)
-#define VEPU_REG_ENC_CTRL_NAL_MODE_BIT BIT(29)
-#define VEPU_REG_ENC_CTRL_WIDTH(w) ((w) << 19)
-#define VEPU_REG_ENC_CTRL_HEIGHT(h) ((h) << 10)
-#define VEPU_REG_ENC_PIC_INTER (0x0 << 3)
-#define VEPU_REG_ENC_PIC_INTRA (0x1 << 3)
-#define VEPU_REG_ENC_PIC_MVCINTER (0x2 << 3)
-#define VEPU_REG_ENC_CTRL_ENC_MODE_H264 (0x3 << 1)
-#define VEPU_REG_ENC_CTRL_ENC_MODE_JPEG (0x2 << 1)
-#define VEPU_REG_ENC_CTRL_ENC_MODE_VP8 (0x1 << 1)
-#define VEPU_REG_ENC_CTRL_EN_BIT BIT(0)
-#define VEPU_REG_IN_IMG_CTRL 0x03c
-#define VEPU_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12)
-#define VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10)
-#define VEPU_REG_IN_IMG_CTRL_OVRFLB_D4(x) ((x) << 6)
-#define VEPU_REG_IN_IMG_CTRL_FMT(x) ((x) << 2)
-#define VEPU_REG_ENC_CTRL0 0x040
-#define VEPU_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26)
-#define VEPU_REG_ENC_CTRL0_SLICE_ALPHA(x) ((x) << 22)
-#define VEPU_REG_ENC_CTRL0_SLICE_BETA(x) ((x) << 18)
-#define VEPU_REG_ENC_CTRL0_CHROMA_QP_OFFSET(x) ((x) << 13)
-#define VEPU_REG_ENC_CTRL0_FILTER_DIS(x) ((x) << 5)
-#define VEPU_REG_ENC_CTRL0_IDR_PICID(x) ((x) << 1)
-#define VEPU_REG_ENC_CTRL0_CONSTR_INTRA_PRED BIT(0)
-#define VEPU_REG_ENC_CTRL1 0x044
-#define VEPU_REG_ENC_CTRL1_PPS_ID(x) ((x) << 24)
-#define VEPU_REG_ENC_CTRL1_INTRA_PRED_MODE(x) ((x) << 16)
-#define VEPU_REG_ENC_CTRL1_FRAME_NUM(x) ((x))
-#define VEPU_REG_ENC_CTRL2 0x048
-#define VEPU_REG_ENC_CTRL2_DEBLOCKING_FILETER_MODE(x) ((x) << 30)
-#define VEPU_REG_ENC_CTRL2_H264_SLICE_SIZE(x) ((x) << 23)
-#define VEPU_REG_ENC_CTRL2_DISABLE_QUARTER_PIXMV BIT(22)
-#define VEPU_REG_ENC_CTRL2_TRANS8X8_MODE_EN BIT(21)
-#define VEPU_REG_ENC_CTRL2_CABAC_INIT_IDC(x) ((x) << 19)
-#define VEPU_REG_ENC_CTRL2_ENTROPY_CODING_MODE BIT(18)
-#define VEPU_REG_ENC_CTRL2_H264_INTER4X4_MODE BIT(17)
-#define VEPU_REG_ENC_CTRL2_H264_STREAM_MODE BIT(16)
-#define VEPU_REG_ENC_CTRL2_INTRA16X16_MODE(x) ((x))
-#define VEPU_REG_ENC_CTRL3 0x04c
-#define VEPU_REG_ENC_CTRL3_MUTIMV_EN BIT(30)
-#define VEPU_REG_ENC_CTRL3_MV_PENALTY_1_4P(x) ((x) << 20)
-#define VEPU_REG_ENC_CTRL3_MV_PENALTY_4P(x) ((x) << 10)
-#define VEPU_REG_ENC_CTRL3_MV_PENALTY_1P(x) ((x))
-#define VEPU_REG_ENC_CTRL4 0x050
-#define VEPU_REG_ENC_CTRL4_MV_PENALTY_16X8_8X16(x) ((x) << 20)
-#define VEPU_REG_ENC_CTRL4_MV_PENALTY_8X8(x) ((x) << 10)
-#define VEPU_REG_ENC_CTRL4_8X4_4X8(x) ((x))
-#define VEPU_REG_ENC_CTRL5 0x054
-#define VEPU_REG_ENC_CTRL5_MACROBLOCK_PENALTY(x) ((x) << 24)
-#define VEPU_REG_ENC_CTRL5_COMPLETE_SLICES(x) ((x) << 16)
-#define VEPU_REG_ENC_CTRL5_INTER_MODE(x) ((x))
-#define VEPU_REG_STR_HDR_REM_MSB 0x058
-#define VEPU_REG_STR_HDR_REM_LSB 0x05c
-#define VEPU_REG_STR_BUF_LIMIT 0x060
-#define VEPU_REG_MAD_CTRL 0x064
-#define VEPU_REG_MAD_CTRL_QP_ADJUST(x) ((x) << 28)
-#define VEPU_REG_MAD_CTRL_MAD_THREDHOLD(x) ((x) << 22)
-#define VEPU_REG_MAD_CTRL_QP_SUM_DIV2(x) ((x))
-#define VEPU_REG_ADDR_VP8_PROB_CNT 0x068
-#define VEPU_REG_QP_VAL 0x06c
-#define VEPU_REG_QP_VAL_LUM(x) ((x) << 26)
-#define VEPU_REG_QP_VAL_MAX(x) ((x) << 20)
-#define VEPU_REG_QP_VAL_MIN(x) ((x) << 14)
-#define VEPU_REG_QP_VAL_CHECKPOINT_DISTAN(x) ((x))
-#define VEPU_REG_VP8_QP_VAL(i) (0x06c + ((i) * 0x4))
-#define VEPU_REG_CHECKPOINT(i) (0x070 + ((i) * 0x4))
-#define VEPU_REG_CHECKPOINT_CHECK0(x) (((x) & 0xffff))
-#define VEPU_REG_CHECKPOINT_CHECK1(x) (((x) & 0xffff) << 16)
-#define VEPU_REG_CHECKPOINT_RESULT(x) ((((x) >> (16 - 16 \
- * (i & 1))) & 0xffff) \
- * 32)
-#define VEPU_REG_CHKPT_WORD_ERR(i) (0x084 + ((i) * 0x4))
-#define VEPU_REG_CHKPT_WORD_ERR_CHK0(x) (((x) & 0xffff))
-#define VEPU_REG_CHKPT_WORD_ERR_CHK1(x) (((x) & 0xffff) << 16)
-#define VEPU_REG_VP8_BOOL_ENC 0x08c
-#define VEPU_REG_CHKPT_DELTA_QP 0x090
-#define VEPU_REG_CHKPT_DELTA_QP_CHK0(x) (((x) & 0x0f) << 0)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK1(x) (((x) & 0x0f) << 4)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK2(x) (((x) & 0x0f) << 8)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK3(x) (((x) & 0x0f) << 12)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK4(x) (((x) & 0x0f) << 16)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK5(x) (((x) & 0x0f) << 20)
-#define VEPU_REG_CHKPT_DELTA_QP_CHK6(x) (((x) & 0x0f) << 24)
-#define VEPU_REG_VP8_CTRL0 0x090
-#define VEPU_REG_RLC_CTRL 0x094
-#define VEPU_REG_RLC_CTRL_STR_OFFS_SHIFT 23
-#define VEPU_REG_RLC_CTRL_STR_OFFS_MASK (0x3f << 23)
-#define VEPU_REG_RLC_CTRL_RLC_SUM(x) ((x))
-#define VEPU_REG_MB_CTRL 0x098
-#define VEPU_REG_MB_CNT_OUT(x) (((x) & 0xffff))
-#define VEPU_REG_MB_CNT_SET(x) (((x) & 0xffff) << 16)
-#define VEPU_REG_ADDR_NEXT_PIC 0x09c
-#define VEPU_REG_JPEG_LUMA_QUAT(i) (0x100 + ((i) * 0x4))
-#define VEPU_REG_JPEG_CHROMA_QUAT(i) (0x140 + ((i) * 0x4))
-#define VEPU_REG_STABILIZATION_OUTPUT 0x0A0
-#define VEPU_REG_ADDR_CABAC_TBL 0x0cc
-#define VEPU_REG_ADDR_MV_OUT 0x0d0
-#define VEPU_REG_RGB_YUV_COEFF(i) (0x0d4 + ((i) * 0x4))
-#define VEPU_REG_RGB_MASK_MSB 0x0dc
-#define VEPU_REG_INTRA_AREA_CTRL 0x0e0
-#define VEPU_REG_CIR_INTRA_CTRL 0x0e4
-#define VEPU_REG_INTRA_SLICE_BITMAP(i) (0x0e8 + ((i) * 0x4))
-#define VEPU_REG_ADDR_VP8_DCT_PART(i) (0x0e8 + ((i) * 0x4))
-#define VEPU_REG_FIRST_ROI_AREA 0x0f0
-#define VEPU_REG_SECOND_ROI_AREA 0x0f4
-#define VEPU_REG_MVC_CTRL 0x0f8
-#define VEPU_REG_MVC_CTRL_MV16X16_FAVOR(x) ((x) << 28)
-#define VEPU_REG_VP8_INTRA_PENALTY(i) (0x100 + ((i) * 0x4))
-#define VEPU_REG_ADDR_VP8_SEG_MAP 0x11c
-#define VEPU_REG_VP8_SEG_QP(i) (0x120 + ((i) * 0x4))
-#define VEPU_REG_DMV_4P_1P_PENALTY(i) (0x180 + ((i) * 0x4))
-#define VEPU_REG_DMV_4P_1P_PENALTY_BIT(x, i) ((x) << (i) * 8)
-#define VEPU_REG_DMV_QPEL_PENALTY(i) (0x200 + ((i) * 0x4))
-#define VEPU_REG_DMV_QPEL_PENALTY_BIT(x, i) ((x) << (i) * 8)
-#define VEPU_REG_VP8_CTRL1 0x280
-#define VEPU_REG_VP8_BIT_COST_GOLDEN 0x284
-#define VEPU_REG_VP8_LOOP_FLT_DELTA(i) (0x288 + ((i) * 0x4))
-
-/* Decoder registers. */
-#define VDPU_REG_INTERRUPT 0x004
-#define VDPU_REG_INTERRUPT_DEC_PIC_INF BIT(24)
-#define VDPU_REG_INTERRUPT_DEC_TIMEOUT BIT(18)
-#define VDPU_REG_INTERRUPT_DEC_SLICE_INT BIT(17)
-#define VDPU_REG_INTERRUPT_DEC_ERROR_INT BIT(16)
-#define VDPU_REG_INTERRUPT_DEC_ASO_INT BIT(15)
-#define VDPU_REG_INTERRUPT_DEC_BUFFER_INT BIT(14)
-#define VDPU_REG_INTERRUPT_DEC_BUS_INT BIT(13)
-#define VDPU_REG_INTERRUPT_DEC_RDY_INT BIT(12)
-#define VDPU_REG_INTERRUPT_DEC_IRQ BIT(8)
-#define VDPU_REG_INTERRUPT_DEC_IRQ_DIS BIT(4)
-#define VDPU_REG_INTERRUPT_DEC_E BIT(0)
-#define VDPU_REG_CONFIG 0x008
-#define VDPU_REG_CONFIG_DEC_AXI_RD_ID(x) (((x) & 0xff) << 24)
-#define VDPU_REG_CONFIG_DEC_TIMEOUT_E BIT(23)
-#define VDPU_REG_CONFIG_DEC_STRSWAP32_E BIT(22)
-#define VDPU_REG_CONFIG_DEC_STRENDIAN_E BIT(21)
-#define VDPU_REG_CONFIG_DEC_INSWAP32_E BIT(20)
-#define VDPU_REG_CONFIG_DEC_OUTSWAP32_E BIT(19)
-#define VDPU_REG_CONFIG_DEC_DATA_DISC_E BIT(18)
-#define VDPU_REG_CONFIG_TILED_MODE_MSB BIT(17)
-#define VDPU_REG_CONFIG_DEC_OUT_TILED_E BIT(17)
-#define VDPU_REG_CONFIG_DEC_LATENCY(x) (((x) & 0x3f) << 11)
-#define VDPU_REG_CONFIG_DEC_CLK_GATE_E BIT(10)
-#define VDPU_REG_CONFIG_DEC_IN_ENDIAN BIT(9)
-#define VDPU_REG_CONFIG_DEC_OUT_ENDIAN BIT(8)
-#define VDPU_REG_CONFIG_PRIORITY_MODE(x) (((x) & 0x7) << 5)
-#define VDPU_REG_CONFIG_TILED_MODE_LSB BIT(7)
-#define VDPU_REG_CONFIG_DEC_ADV_PRE_DIS BIT(6)
-#define VDPU_REG_CONFIG_DEC_SCMD_DIS BIT(5)
-#define VDPU_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_DEC_CTRL0 0x00c
-#define VDPU_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 28)
-#define VDPU_REG_DEC_CTRL0_RLC_MODE_E BIT(27)
-#define VDPU_REG_DEC_CTRL0_SKIP_MODE BIT(26)
-#define VDPU_REG_DEC_CTRL0_DIVX3_E BIT(25)
-#define VDPU_REG_DEC_CTRL0_PJPEG_E BIT(24)
-#define VDPU_REG_DEC_CTRL0_PIC_INTERLACE_E BIT(23)
-#define VDPU_REG_DEC_CTRL0_PIC_FIELDMODE_E BIT(22)
-#define VDPU_REG_DEC_CTRL0_PIC_B_E BIT(21)
-#define VDPU_REG_DEC_CTRL0_PIC_INTER_E BIT(20)
-#define VDPU_REG_DEC_CTRL0_PIC_TOPFIELD_E BIT(19)
-#define VDPU_REG_DEC_CTRL0_FWD_INTERLACE_E BIT(18)
-#define VDPU_REG_DEC_CTRL0_SORENSON_E BIT(17)
-#define VDPU_REG_DEC_CTRL0_REF_TOPFIELD_E BIT(16)
-#define VDPU_REG_DEC_CTRL0_DEC_OUT_DIS BIT(15)
-#define VDPU_REG_DEC_CTRL0_FILTERING_DIS BIT(14)
-#define VDPU_REG_DEC_CTRL0_WEBP_E BIT(13)
-#define VDPU_REG_DEC_CTRL0_MVC_E BIT(13)
-#define VDPU_REG_DEC_CTRL0_PIC_FIXED_QUANT BIT(13)
-#define VDPU_REG_DEC_CTRL0_WRITE_MVS_E BIT(12)
-#define VDPU_REG_DEC_CTRL0_REFTOPFIRST_E BIT(11)
-#define VDPU_REG_DEC_CTRL0_SEQ_MBAFF_E BIT(10)
-#define VDPU_REG_DEC_CTRL0_PICORD_COUNT_E BIT(9)
-#define VDPU_REG_DEC_CTRL0_DEC_AHB_HLOCK_E BIT(8)
-#define VDPU_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 0)
-#define VDPU_REG_DEC_CTRL1 0x010
-#define VDPU_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
-#define VDPU_REG_DEC_CTRL1_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
-#define VDPU_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 11)
-#define VDPU_REG_DEC_CTRL1_MB_HEIGHT_OFF(x) (((x) & 0xf) << 7)
-#define VDPU_REG_DEC_CTRL1_ALT_SCAN_E BIT(6)
-#define VDPU_REG_DEC_CTRL1_TOPFIELDFIRST_E BIT(5)
-#define VDPU_REG_DEC_CTRL1_REF_FRAMES(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_DEC_CTRL1_PIC_MB_W_EXT(x) (((x) & 0x7) << 3)
-#define VDPU_REG_DEC_CTRL1_PIC_MB_H_EXT(x) (((x) & 0x7) << 0)
-#define VDPU_REG_DEC_CTRL1_PIC_REFER_FLAG BIT(0)
-#define VDPU_REG_DEC_CTRL2 0x014
-#define VDPU_REG_DEC_CTRL2_STRM_START_BIT(x) (((x) & 0x3f) << 26)
-#define VDPU_REG_DEC_CTRL2_SYNC_MARKER_E BIT(25)
-#define VDPU_REG_DEC_CTRL2_TYPE1_QUANT_E BIT(24)
-#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET(x) (((x) & 0x1f) << 19)
-#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET2(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_DEC_CTRL2_FIELDPIC_FLAG_E BIT(0)
-#define VDPU_REG_DEC_CTRL2_INTRADC_VLC_THR(x) (((x) & 0x7) << 16)
-#define VDPU_REG_DEC_CTRL2_VOP_TIME_INCR(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL2_DQ_PROFILE BIT(24)
-#define VDPU_REG_DEC_CTRL2_DQBI_LEVEL BIT(23)
-#define VDPU_REG_DEC_CTRL2_RANGE_RED_FRM_E BIT(22)
-#define VDPU_REG_DEC_CTRL2_FAST_UVMC_E BIT(20)
-#define VDPU_REG_DEC_CTRL2_TRANSDCTAB BIT(17)
-#define VDPU_REG_DEC_CTRL2_TRANSACFRM(x) (((x) & 0x3) << 15)
-#define VDPU_REG_DEC_CTRL2_TRANSACFRM2(x) (((x) & 0x3) << 13)
-#define VDPU_REG_DEC_CTRL2_MB_MODE_TAB(x) (((x) & 0x7) << 10)
-#define VDPU_REG_DEC_CTRL2_MVTAB(x) (((x) & 0x7) << 7)
-#define VDPU_REG_DEC_CTRL2_CBPTAB(x) (((x) & 0x7) << 4)
-#define VDPU_REG_DEC_CTRL2_2MV_BLK_PAT_TAB(x) (((x) & 0x3) << 2)
-#define VDPU_REG_DEC_CTRL2_4MV_BLK_PAT_TAB(x) (((x) & 0x3) << 0)
-#define VDPU_REG_DEC_CTRL2_QSCALE_TYPE BIT(24)
-#define VDPU_REG_DEC_CTRL2_CON_MV_E BIT(4)
-#define VDPU_REG_DEC_CTRL2_INTRA_DC_PREC(x) (((x) & 0x3) << 2)
-#define VDPU_REG_DEC_CTRL2_INTRA_VLC_TAB BIT(1)
-#define VDPU_REG_DEC_CTRL2_FRAME_PRED_DCT BIT(0)
-#define VDPU_REG_DEC_CTRL2_JPEG_QTABLES(x) (((x) & 0x3) << 11)
-#define VDPU_REG_DEC_CTRL2_JPEG_MODE(x) (((x) & 0x7) << 8)
-#define VDPU_REG_DEC_CTRL2_JPEG_FILRIGHT_E BIT(7)
-#define VDPU_REG_DEC_CTRL2_JPEG_STREAM_ALL BIT(6)
-#define VDPU_REG_DEC_CTRL2_CR_AC_VLCTABLE BIT(5)
-#define VDPU_REG_DEC_CTRL2_CB_AC_VLCTABLE BIT(4)
-#define VDPU_REG_DEC_CTRL2_CR_DC_VLCTABLE BIT(3)
-#define VDPU_REG_DEC_CTRL2_CB_DC_VLCTABLE BIT(2)
-#define VDPU_REG_DEC_CTRL2_CR_DC_VLCTABLE3 BIT(1)
-#define VDPU_REG_DEC_CTRL2_CB_DC_VLCTABLE3 BIT(0)
-#define VDPU_REG_DEC_CTRL2_STRM1_START_BIT(x) (((x) & 0x3f) << 18)
-#define VDPU_REG_DEC_CTRL2_HUFFMAN_E BIT(17)
-#define VDPU_REG_DEC_CTRL2_MULTISTREAM_E BIT(16)
-#define VDPU_REG_DEC_CTRL2_BOOLEAN_VALUE(x) (((x) & 0xff) << 8)
-#define VDPU_REG_DEC_CTRL2_BOOLEAN_RANGE(x) (((x) & 0xff) << 0)
-#define VDPU_REG_DEC_CTRL2_ALPHA_OFFSET(x) (((x) & 0x1f) << 5)
-#define VDPU_REG_DEC_CTRL2_BETA_OFFSET(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_DEC_CTRL3 0x018
-#define VDPU_REG_DEC_CTRL3_START_CODE_E BIT(31)
-#define VDPU_REG_DEC_CTRL3_INIT_QP(x) (((x) & 0x3f) << 25)
-#define VDPU_REG_DEC_CTRL3_CH_8PIX_ILEAV_E BIT(24)
-#define VDPU_REG_DEC_CTRL3_STREAM_LEN_EXT(x) (((x) & 0xff) << 24)
-#define VDPU_REG_DEC_CTRL3_STREAM_LEN(x) (((x) & 0xffffff) << 0)
-#define VDPU_REG_DEC_CTRL4 0x01c
-#define VDPU_REG_DEC_CTRL4_CABAC_E BIT(31)
-#define VDPU_REG_DEC_CTRL4_BLACKWHITE_E BIT(30)
-#define VDPU_REG_DEC_CTRL4_DIR_8X8_INFER_E BIT(29)
-#define VDPU_REG_DEC_CTRL4_WEIGHT_PRED_E BIT(28)
-#define VDPU_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(x) (((x) & 0x3) << 26)
-#define VDPU_REG_DEC_CTRL4_AVS_H264_H_EXT BIT(25)
-#define VDPU_REG_DEC_CTRL4_FRAMENUM_LEN(x) (((x) & 0x1f) << 16)
-#define VDPU_REG_DEC_CTRL4_FRAMENUM(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL4_BITPLANE0_E BIT(31)
-#define VDPU_REG_DEC_CTRL4_BITPLANE1_E BIT(30)
-#define VDPU_REG_DEC_CTRL4_BITPLANE2_E BIT(29)
-#define VDPU_REG_DEC_CTRL4_ALT_PQUANT(x) (((x) & 0x1f) << 24)
-#define VDPU_REG_DEC_CTRL4_DQ_EDGES(x) (((x) & 0xf) << 20)
-#define VDPU_REG_DEC_CTRL4_TTMBF BIT(19)
-#define VDPU_REG_DEC_CTRL4_PQINDEX(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
-#define VDPU_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
-#define VDPU_REG_DEC_CTRL4_UNIQP_E BIT(11)
-#define VDPU_REG_DEC_CTRL4_HALFQP_E BIT(10)
-#define VDPU_REG_DEC_CTRL4_TTFRM(x) (((x) & 0x3) << 8)
-#define VDPU_REG_DEC_CTRL4_2ND_BYTE_EMUL_E BIT(7)
-#define VDPU_REG_DEC_CTRL4_DQUANT_E BIT(6)
-#define VDPU_REG_DEC_CTRL4_VC1_ADV_E BIT(5)
-#define VDPU_REG_DEC_CTRL4_PJPEG_FILDOWN_E BIT(26)
-#define VDPU_REG_DEC_CTRL4_PJPEG_WDIV8 BIT(25)
-#define VDPU_REG_DEC_CTRL4_PJPEG_HDIV8 BIT(24)
-#define VDPU_REG_DEC_CTRL4_PJPEG_AH(x) (((x) & 0xf) << 20)
-#define VDPU_REG_DEC_CTRL4_PJPEG_AL(x) (((x) & 0xf) << 16)
-#define VDPU_REG_DEC_CTRL4_PJPEG_SS(x) (((x) & 0xff) << 8)
-#define VDPU_REG_DEC_CTRL4_PJPEG_SE(x) (((x) & 0xff) << 0)
-#define VDPU_REG_DEC_CTRL4_DCT1_START_BIT(x) (((x) & 0x3f) << 26)
-#define VDPU_REG_DEC_CTRL4_DCT2_START_BIT(x) (((x) & 0x3f) << 20)
-#define VDPU_REG_DEC_CTRL4_CH_MV_RES BIT(13)
-#define VDPU_REG_DEC_CTRL4_INIT_DC_MATCH0(x) (((x) & 0x7) << 9)
-#define VDPU_REG_DEC_CTRL4_INIT_DC_MATCH1(x) (((x) & 0x7) << 6)
-#define VDPU_REG_DEC_CTRL4_VP7_VERSION BIT(5)
-#define VDPU_REG_DEC_CTRL5 0x020
-#define VDPU_REG_DEC_CTRL5_CONST_INTRA_E BIT(31)
-#define VDPU_REG_DEC_CTRL5_FILT_CTRL_PRES BIT(30)
-#define VDPU_REG_DEC_CTRL5_RDPIC_CNT_PRES BIT(29)
-#define VDPU_REG_DEC_CTRL5_8X8TRANS_FLAG_E BIT(28)
-#define VDPU_REG_DEC_CTRL5_REFPIC_MK_LEN(x) (((x) & 0x7ff) << 17)
-#define VDPU_REG_DEC_CTRL5_IDR_PIC_E BIT(16)
-#define VDPU_REG_DEC_CTRL5_IDR_PIC_ID(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL5_MV_SCALEFACTOR(x) (((x) & 0xff) << 24)
-#define VDPU_REG_DEC_CTRL5_REF_DIST_FWD(x) (((x) & 0x1f) << 19)
-#define VDPU_REG_DEC_CTRL5_REF_DIST_BWD(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_DEC_CTRL5_LOOP_FILT_LIMIT(x) (((x) & 0xf) << 14)
-#define VDPU_REG_DEC_CTRL5_VARIANCE_TEST_E BIT(13)
-#define VDPU_REG_DEC_CTRL5_MV_THRESHOLD(x) (((x) & 0x7) << 10)
-#define VDPU_REG_DEC_CTRL5_VAR_THRESHOLD(x) (((x) & 0x3ff) << 0)
-#define VDPU_REG_DEC_CTRL5_DIVX_IDCT_E BIT(8)
-#define VDPU_REG_DEC_CTRL5_DIVX3_SLICE_SIZE(x) (((x) & 0xff) << 0)
-#define VDPU_REG_DEC_CTRL5_PJPEG_REST_FREQ(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL5_RV_PROFILE(x) (((x) & 0x3) << 30)
-#define VDPU_REG_DEC_CTRL5_RV_OSV_QUANT(x) (((x) & 0x3) << 28)
-#define VDPU_REG_DEC_CTRL5_RV_FWD_SCALE(x) (((x) & 0x3fff) << 14)
-#define VDPU_REG_DEC_CTRL5_RV_BWD_SCALE(x) (((x) & 0x3fff) << 0)
-#define VDPU_REG_DEC_CTRL5_INIT_DC_COMP0(x) (((x) & 0xffff) << 16)
-#define VDPU_REG_DEC_CTRL5_INIT_DC_COMP1(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL6 0x024
-#define VDPU_REG_DEC_CTRL6_PPS_ID(x) (((x) & 0xff) << 24)
-#define VDPU_REG_DEC_CTRL6_REFIDX1_ACTIVE(x) (((x) & 0x1f) << 19)
-#define VDPU_REG_DEC_CTRL6_REFIDX0_ACTIVE(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_DEC_CTRL6_POC_LENGTH(x) (((x) & 0xff) << 0)
-#define VDPU_REG_DEC_CTRL6_ICOMP0_E BIT(24)
-#define VDPU_REG_DEC_CTRL6_ISCALE0(x) (((x) & 0xff) << 16)
-#define VDPU_REG_DEC_CTRL6_ISHIFT0(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL6_STREAM1_LEN(x) (((x) & 0xffffff) << 0)
-#define VDPU_REG_DEC_CTRL6_PIC_SLICE_AM(x) (((x) & 0x1fff) << 0)
-#define VDPU_REG_DEC_CTRL6_COEFFS_PART_AM(x) (((x) & 0xf) << 24)
-#define VDPU_REG_FWD_PIC(i) (0x028 + ((i) * 0x4))
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
-#define VDPU_REG_FWD_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_FWD_PIC1_ICOMP1_E BIT(24)
-#define VDPU_REG_FWD_PIC1_ISCALE1(x) (((x) & 0xff) << 16)
-#define VDPU_REG_FWD_PIC1_ISHIFT1(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
-#define VDPU_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
-#define VDPU_REG_FWD_PIC1_SEGMENT_E BIT(0)
-#define VDPU_REG_DEC_CTRL7 0x02c
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F15(x) (((x) & 0x1f) << 25)
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F14(x) (((x) & 0x1f) << 20)
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F13(x) (((x) & 0x1f) << 15)
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F12(x) (((x) & 0x1f) << 10)
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F11(x) (((x) & 0x1f) << 5)
-#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F10(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_DEC_CTRL7_ICOMP2_E BIT(24)
-#define VDPU_REG_DEC_CTRL7_ISCALE2(x) (((x) & 0xff) << 16)
-#define VDPU_REG_DEC_CTRL7_ISHIFT2(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_DEC_CTRL7_DCT3_START_BIT(x) (((x) & 0x3f) << 24)
-#define VDPU_REG_DEC_CTRL7_DCT4_START_BIT(x) (((x) & 0x3f) << 18)
-#define VDPU_REG_DEC_CTRL7_DCT5_START_BIT(x) (((x) & 0x3f) << 12)
-#define VDPU_REG_DEC_CTRL7_DCT6_START_BIT(x) (((x) & 0x3f) << 6)
-#define VDPU_REG_DEC_CTRL7_DCT7_START_BIT(x) (((x) & 0x3f) << 0)
-#define VDPU_REG_ADDR_STR 0x030
-#define VDPU_REG_ADDR_DST 0x034
-#define VDPU_REG_ADDR_REF(i) (0x038 + ((i) * 0x4))
-#define VDPU_REG_ADDR_REF_FIELD_E BIT(1)
-#define VDPU_REG_ADDR_REF_TOPC_E BIT(0)
-#define VDPU_REG_REF_PIC(i) (0x078 + ((i) * 0x4))
-#define VDPU_REG_REF_PIC_FILT_TYPE_E BIT(31)
-#define VDPU_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
-#define VDPU_REG_REF_PIC_MB_ADJ_0(x) (((x) & 0x7f) << 21)
-#define VDPU_REG_REF_PIC_MB_ADJ_1(x) (((x) & 0x7f) << 14)
-#define VDPU_REG_REF_PIC_MB_ADJ_2(x) (((x) & 0x7f) << 7)
-#define VDPU_REG_REF_PIC_MB_ADJ_3(x) (((x) & 0x7f) << 0)
-#define VDPU_REG_REF_PIC_REFER1_NBR(x) (((x) & 0xffff) << 16)
-#define VDPU_REG_REF_PIC_REFER0_NBR(x) (((x) & 0xffff) << 0)
-#define VDPU_REG_REF_PIC_LF_LEVEL_0(x) (((x) & 0x3f) << 18)
-#define VDPU_REG_REF_PIC_LF_LEVEL_1(x) (((x) & 0x3f) << 12)
-#define VDPU_REG_REF_PIC_LF_LEVEL_2(x) (((x) & 0x3f) << 6)
-#define VDPU_REG_REF_PIC_LF_LEVEL_3(x) (((x) & 0x3f) << 0)
-#define VDPU_REG_REF_PIC_QUANT_DELTA_0(x) (((x) & 0x1f) << 27)
-#define VDPU_REG_REF_PIC_QUANT_DELTA_1(x) (((x) & 0x1f) << 22)
-#define VDPU_REG_REF_PIC_QUANT_0(x) (((x) & 0x7ff) << 11)
-#define VDPU_REG_REF_PIC_QUANT_1(x) (((x) & 0x7ff) << 0)
-#define VDPU_REG_LT_REF 0x098
-#define VDPU_REG_VALID_REF 0x09c
-#define VDPU_REG_ADDR_QTABLE 0x0a0
-#define VDPU_REG_ADDR_DIR_MV 0x0a4
-#define VDPU_REG_BD_REF_PIC(i) (0x0a8 + ((i) * 0x4))
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B2(x) (((x) & 0x1f) << 25)
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B1(x) (((x) & 0x1f) << 15)
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F1(x) (((x) & 0x1f) << 10)
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B0(x) (((x) & 0x1f) << 5)
-#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_M1(x) (((x) & 0x3) << 10)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_4(x) (((x) & 0x3) << 8)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_M1(x) (((x) & 0x3) << 6)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_4(x) (((x) & 0x3) << 4)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_M1(x) (((x) & 0x3) << 2)
-#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_4(x) (((x) & 0x3) << 0)
-#define VDPU_REG_BD_REF_PIC_QUANT_DELTA_2(x) (((x) & 0x1f) << 27)
-#define VDPU_REG_BD_REF_PIC_QUANT_DELTA_3(x) (((x) & 0x1f) << 22)
-#define VDPU_REG_BD_REF_PIC_QUANT_2(x) (((x) & 0x7ff) << 11)
-#define VDPU_REG_BD_REF_PIC_QUANT_3(x) (((x) & 0x7ff) << 0)
-#define VDPU_REG_BD_P_REF_PIC 0x0bc
-#define VDPU_REG_BD_P_REF_PIC_QUANT_DELTA_4(x) (((x) & 0x1f) << 27)
-#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 25)
-#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
-#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 15)
-#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 10)
-#define VDPU_REG_BD_P_REF_PIC_BINIT_RLIST_B15(x) (((x) & 0x1f) << 5)
-#define VDPU_REG_BD_P_REF_PIC_BINIT_RLIST_F15(x) (((x) & 0x1f) << 0)
-#define VDPU_REG_ERR_CONC 0x0c0
-#define VDPU_REG_ERR_CONC_STARTMB_X(x) (((x) & 0x1ff) << 23)
-#define VDPU_REG_ERR_CONC_STARTMB_Y(x) (((x) & 0xff) << 15)
-#define VDPU_REG_PRED_FLT 0x0c4
-#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_0(x) (((x) & 0x3ff) << 22)
-#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_1(x) (((x) & 0x3ff) << 12)
-#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_2(x) (((x) & 0x3ff) << 2)
-#define VDPU_REG_REF_BUF_CTRL 0x0cc
-#define VDPU_REG_REF_BUF_CTRL_REFBU_E BIT(31)
-#define VDPU_REG_REF_BUF_CTRL_REFBU_THR(x) (((x) & 0xfff) << 19)
-#define VDPU_REG_REF_BUF_CTRL_REFBU_PICID(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_REF_BUF_CTRL_REFBU_EVAL_E BIT(13)
-#define VDPU_REG_REF_BUF_CTRL_REFBU_FPARMOD_E BIT(12)
-#define VDPU_REG_REF_BUF_CTRL_REFBU_Y_OFFSET(x) (((x) & 0x1ff) << 0)
-#define VDPU_REG_REF_BUF_CTRL2 0x0dc
-#define VDPU_REG_REF_BUF_CTRL2_REFBU2_BUF_E BIT(31)
-#define VDPU_REG_REF_BUF_CTRL2_REFBU2_THR(x) (((x) & 0xfff) << 19)
-#define VDPU_REG_REF_BUF_CTRL2_REFBU2_PICID(x) (((x) & 0x1f) << 14)
-#define VDPU_REG_REF_BUF_CTRL2_APF_THRESHOLD(x) (((x) & 0x3fff) << 0)
-
-#endif /* RK3288_VPU_REGS_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c
deleted file mode 100644
index 6fdef61e2127..000000000000
--- a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- * Jeffy Chen <jeffy.chen@rock-chips.com>
- */
-
-#include <linux/clk.h>
-
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_jpeg.h"
-#include "rk3399_vpu_regs.h"
-
-#define RK3399_ACLK_MAX_FREQ (400 * 1000 * 1000)
-
-/*
- * Supported formats.
- */
-
-static const struct rockchip_vpu_fmt rk3399_vpu_enc_fmts[] = {
- {
- .fourcc = V4L2_PIX_FMT_YUV420M,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
- },
- {
- .fourcc = V4L2_PIX_FMT_NV12M,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
- },
- {
- .fourcc = V4L2_PIX_FMT_UYVY,
- .codec_mode = RK_VPU_MODE_NONE,
- .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
- },
- {
- .fourcc = V4L2_PIX_FMT_JPEG,
- .codec_mode = RK_VPU_MODE_JPEG_ENC,
- .max_depth = 2,
- .header_size = JPEG_HEADER_SIZE,
- .frmsize = {
- .min_width = 96,
- .max_width = 8192,
- .step_width = JPEG_MB_DIM,
- .min_height = 32,
- .max_height = 8192,
- .step_height = JPEG_MB_DIM,
- },
- },
-};
-
-static irqreturn_t rk3399_vepu_irq(int irq, void *dev_id)
-{
- struct rockchip_vpu_dev *vpu = dev_id;
- enum vb2_buffer_state state;
- u32 status, bytesused;
-
- status = vepu_read(vpu, VEPU_REG_INTERRUPT);
- bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
- state = (status & VEPU_REG_INTERRUPT_FRAME_READY) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
- vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
- vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
-
- rockchip_vpu_irq_done(vpu, bytesused, state);
-
- return IRQ_HANDLED;
-}
-
-static int rk3399_vpu_hw_init(struct rockchip_vpu_dev *vpu)
-{
- /* Bump ACLK to max. possible freq. to improve performance. */
- clk_set_rate(vpu->clocks[0].clk, RK3399_ACLK_MAX_FREQ);
- return 0;
-}
-
-static void rk3399_vpu_enc_reset(struct rockchip_vpu_ctx *ctx)
-{
- struct rockchip_vpu_dev *vpu = ctx->dev;
-
- vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
- vepu_write(vpu, 0, VEPU_REG_ENCODE_START);
- vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
-}
-
-/*
- * Supported codec ops.
- */
-
-static const struct rockchip_vpu_codec_ops rk3399_vpu_codec_ops[] = {
- [RK_VPU_MODE_JPEG_ENC] = {
- .run = rk3399_vpu_jpeg_enc_run,
- .reset = rk3399_vpu_enc_reset,
- },
-};
-
-/*
- * VPU variant.
- */
-
-const struct rockchip_vpu_variant rk3399_vpu_variant = {
- .enc_offset = 0x0,
- .enc_fmts = rk3399_vpu_enc_fmts,
- .num_enc_fmts = ARRAY_SIZE(rk3399_vpu_enc_fmts),
- .codec = RK_VPU_CODEC_JPEG,
- .codec_ops = rk3399_vpu_codec_ops,
- .vepu_irq = rk3399_vepu_irq,
- .init = rk3399_vpu_hw_init,
- .clk_names = {"aclk", "hclk"},
- .num_clocks = 2
-};
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu.h
deleted file mode 100644
index 1ec2be483e27..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Rockchip VPU codec driver
- *
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- *
- * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- */
-
-#ifndef ROCKCHIP_VPU_H_
-#define ROCKCHIP_VPU_H_
-
-#include <linux/platform_device.h>
-#include <linux/videodev2.h>
-#include <linux/wait.h>
-#include <linux/clk.h>
-
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-dma-contig.h>
-
-#include "rockchip_vpu_hw.h"
-
-#define ROCKCHIP_VPU_MAX_CLOCKS 4
-
-#define JPEG_MB_DIM 16
-#define JPEG_MB_WIDTH(w) DIV_ROUND_UP(w, JPEG_MB_DIM)
-#define JPEG_MB_HEIGHT(h) DIV_ROUND_UP(h, JPEG_MB_DIM)
-
-struct rockchip_vpu_ctx;
-struct rockchip_vpu_codec_ops;
-
-#define RK_VPU_CODEC_JPEG BIT(0)
-
-/**
- * struct rockchip_vpu_variant - information about VPU hardware variant
- *
- * @enc_offset: Offset from VPU base to encoder registers.
- * @enc_fmts: Encoder formats.
- * @num_enc_fmts: Number of encoder formats.
- * @codec: Supported codecs
- * @codec_ops: Codec ops.
- * @init: Initialize hardware.
- * @vepu_irq: encoder interrupt handler
- * @clk_names: array of clock names
- * @num_clocks: number of clocks in the array
- */
-struct rockchip_vpu_variant {
- unsigned int enc_offset;
- const struct rockchip_vpu_fmt *enc_fmts;
- unsigned int num_enc_fmts;
- unsigned int codec;
- const struct rockchip_vpu_codec_ops *codec_ops;
- int (*init)(struct rockchip_vpu_dev *vpu);
- irqreturn_t (*vepu_irq)(int irq, void *priv);
- const char *clk_names[ROCKCHIP_VPU_MAX_CLOCKS];
- int num_clocks;
-};
-
-/**
- * enum rockchip_vpu_codec_mode - codec operating mode.
- * @RK_VPU_MODE_NONE: No operating mode. Used for RAW video formats.
- * @RK_VPU_MODE_JPEG_ENC: JPEG encoder.
- */
-enum rockchip_vpu_codec_mode {
- RK_VPU_MODE_NONE = -1,
- RK_VPU_MODE_JPEG_ENC,
-};
-
-/**
- * struct rockchip_vpu_dev - driver data
- * @v4l2_dev: V4L2 device to register video devices for.
- * @m2m_dev: mem2mem device associated to this device.
- * @mdev: media device associated to this device.
- * @vfd_enc: Video device for encoder.
- * @pdev: Pointer to VPU platform device.
- * @dev: Pointer to device for convenient logging using
- * dev_ macros.
- * @clocks: Array of clock handles.
- * @base: Mapped address of VPU registers.
- * @enc_base: Mapped address of VPU encoder register for convenience.
- * @vpu_mutex: Mutex to synchronize V4L2 calls.
- * @irqlock: Spinlock to synchronize access to data structures
- * shared with interrupt handlers.
- * @variant: Hardware variant-specific parameters.
- * @watchdog_work: Delayed work for hardware timeout handling.
- */
-struct rockchip_vpu_dev {
- struct v4l2_device v4l2_dev;
- struct v4l2_m2m_dev *m2m_dev;
- struct media_device mdev;
- struct video_device *vfd_enc;
- struct platform_device *pdev;
- struct device *dev;
- struct clk_bulk_data clocks[ROCKCHIP_VPU_MAX_CLOCKS];
- void __iomem *base;
- void __iomem *enc_base;
-
- struct mutex vpu_mutex; /* video_device lock */
- spinlock_t irqlock;
- const struct rockchip_vpu_variant *variant;
- struct delayed_work watchdog_work;
-};
-
-/**
- * struct rockchip_vpu_ctx - Context (instance) private data.
- *
- * @dev: VPU driver data to which the context belongs.
- * @fh: V4L2 file handler.
- *
- * @sequence_cap: Sequence counter for capture queue
- * @sequence_out: Sequence counter for output queue
- *
- * @vpu_src_fmt: Descriptor of active source format.
- * @src_fmt: V4L2 pixel format of active source format.
- * @vpu_dst_fmt: Descriptor of active destination format.
- * @dst_fmt: V4L2 pixel format of active destination format.
- *
- * @ctrl_handler: Control handler used to register controls.
- * @jpeg_quality: User-specified JPEG compression quality.
- *
- * @codec_ops: Set of operations related to codec mode.
- *
- * @bounce_dma_addr: Bounce buffer bus address.
- * @bounce_buf: Bounce buffer pointer.
- * @bounce_size: Bounce buffer size.
- */
-struct rockchip_vpu_ctx {
- struct rockchip_vpu_dev *dev;
- struct v4l2_fh fh;
-
- u32 sequence_cap;
- u32 sequence_out;
-
- const struct rockchip_vpu_fmt *vpu_src_fmt;
- struct v4l2_pix_format_mplane src_fmt;
- const struct rockchip_vpu_fmt *vpu_dst_fmt;
- struct v4l2_pix_format_mplane dst_fmt;
-
- struct v4l2_ctrl_handler ctrl_handler;
- int jpeg_quality;
-
- const struct rockchip_vpu_codec_ops *codec_ops;
-
- dma_addr_t bounce_dma_addr;
- void *bounce_buf;
- size_t bounce_size;
-};
-
-/**
- * struct rockchip_vpu_fmt - information about supported video formats.
- * @name: Human readable name of the format.
- * @fourcc: FourCC code of the format. See V4L2_PIX_FMT_*.
- * @codec_mode: Codec mode related to this format. See
- * enum rockchip_vpu_codec_mode.
- * @header_size: Optional header size. Currently used by JPEG encoder.
- * @max_depth: Maximum depth, for bitstream formats
- * @enc_fmt: Format identifier for encoder registers.
- * @frmsize: Supported range of frame sizes (only for bitstream formats).
- */
-struct rockchip_vpu_fmt {
- char *name;
- u32 fourcc;
- enum rockchip_vpu_codec_mode codec_mode;
- int header_size;
- int max_depth;
- enum rockchip_vpu_enc_fmt enc_fmt;
- struct v4l2_frmsize_stepwise frmsize;
-};
-
-/* Logging helpers */
-
-/**
- * debug - Module parameter to control level of debugging messages.
- *
- * Level of debugging messages can be controlled by bits of
- * module parameter called "debug". Meaning of particular
- * bits is as follows:
- *
- * bit 0 - global information: mode, size, init, release
- * bit 1 - each run start/result information
- * bit 2 - contents of small controls from userspace
- * bit 3 - contents of big controls from userspace
- * bit 4 - detail fmt, ctrl, buffer q/dq information
- * bit 5 - detail function enter/leave trace information
- * bit 6 - register write/read information
- */
-extern int rockchip_vpu_debug;
-
-#define vpu_debug(level, fmt, args...) \
- do { \
- if (rockchip_vpu_debug & BIT(level)) \
- pr_info("%s:%d: " fmt, \
- __func__, __LINE__, ##args); \
- } while (0)
-
-#define vpu_err(fmt, args...) \
- pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
-
-/* Structure access helpers. */
-static inline struct rockchip_vpu_ctx *fh_to_ctx(struct v4l2_fh *fh)
-{
- return container_of(fh, struct rockchip_vpu_ctx, fh);
-}
-
-/* Register accessors. */
-static inline void vepu_write_relaxed(struct rockchip_vpu_dev *vpu,
- u32 val, u32 reg)
-{
- vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
- writel_relaxed(val, vpu->enc_base + reg);
-}
-
-static inline void vepu_write(struct rockchip_vpu_dev *vpu, u32 val, u32 reg)
-{
- vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
- writel(val, vpu->enc_base + reg);
-}
-
-static inline u32 vepu_read(struct rockchip_vpu_dev *vpu, u32 reg)
-{
- u32 val = readl(vpu->enc_base + reg);
-
- vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
- return val;
-}
-
-#endif /* ROCKCHIP_VPU_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h
deleted file mode 100644
index ca77668d9579..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- * Alpha Lin <Alpha.Lin@rock-chips.com>
- * Jeffy Chen <jeffy.chen@rock-chips.com>
- *
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- *
- * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- */
-
-#ifndef ROCKCHIP_VPU_COMMON_H_
-#define ROCKCHIP_VPU_COMMON_H_
-
-#include "rockchip_vpu.h"
-
-extern const struct v4l2_ioctl_ops rockchip_vpu_enc_ioctl_ops;
-extern const struct vb2_ops rockchip_vpu_enc_queue_ops;
-
-void rockchip_vpu_enc_reset_src_fmt(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx);
-void rockchip_vpu_enc_reset_dst_fmt(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx);
-
-#endif /* ROCKCHIP_VPU_COMMON_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
deleted file mode 100644
index 8bbc905b26c8..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
+++ /dev/null
@@ -1,542 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Collabora, Ltd.
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- *
- * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/videodev2.h>
-#include <linux/workqueue.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-vmalloc.h>
-
-#include "rockchip_vpu_common.h"
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_hw.h"
-
-#define DRIVER_NAME "rockchip-vpu"
-
-int rockchip_vpu_debug;
-module_param_named(debug, rockchip_vpu_debug, int, 0644);
-MODULE_PARM_DESC(debug,
- "Debug level - higher value produces more verbose messages");
-
-static void rockchip_vpu_job_finish(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx,
- unsigned int bytesused,
- enum vb2_buffer_state result)
-{
- struct vb2_v4l2_buffer *src, *dst;
- size_t avail_size;
-
- pm_runtime_mark_last_busy(vpu->dev);
- pm_runtime_put_autosuspend(vpu->dev);
- clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
-
- src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-
- if (WARN_ON(!src))
- return;
- if (WARN_ON(!dst))
- return;
-
- src->sequence = ctx->sequence_out++;
- dst->sequence = ctx->sequence_cap++;
-
- dst->field = src->field;
- if (src->flags & V4L2_BUF_FLAG_TIMECODE)
- dst->timecode = src->timecode;
- dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
- dst->flags &= ~(V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
- V4L2_BUF_FLAG_TIMECODE);
- dst->flags |= src->flags & (V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
- V4L2_BUF_FLAG_TIMECODE);
-
- avail_size = vb2_plane_size(&dst->vb2_buf, 0) -
- ctx->vpu_dst_fmt->header_size;
- if (bytesused <= avail_size) {
- if (ctx->bounce_buf) {
- memcpy(vb2_plane_vaddr(&dst->vb2_buf, 0) +
- ctx->vpu_dst_fmt->header_size,
- ctx->bounce_buf, bytesused);
- }
- dst->vb2_buf.planes[0].bytesused =
- ctx->vpu_dst_fmt->header_size + bytesused;
- } else {
- result = VB2_BUF_STATE_ERROR;
- }
-
- v4l2_m2m_buf_done(src, result);
- v4l2_m2m_buf_done(dst, result);
-
- v4l2_m2m_job_finish(vpu->m2m_dev, ctx->fh.m2m_ctx);
-}
-
-void rockchip_vpu_irq_done(struct rockchip_vpu_dev *vpu,
- unsigned int bytesused,
- enum vb2_buffer_state result)
-{
- struct rockchip_vpu_ctx *ctx =
- v4l2_m2m_get_curr_priv(vpu->m2m_dev);
-
- /*
- * If cancel_delayed_work returns false
- * the timeout expired. The watchdog is running,
- * and will take care of finishing the job.
- */
- if (cancel_delayed_work(&vpu->watchdog_work))
- rockchip_vpu_job_finish(vpu, ctx, bytesused, result);
-}
-
-void rockchip_vpu_watchdog(struct work_struct *work)
-{
- struct rockchip_vpu_dev *vpu;
- struct rockchip_vpu_ctx *ctx;
-
- vpu = container_of(to_delayed_work(work),
- struct rockchip_vpu_dev, watchdog_work);
- ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
- if (ctx) {
- vpu_err("frame processing timed out!\n");
- ctx->codec_ops->reset(ctx);
- rockchip_vpu_job_finish(vpu, ctx, 0, VB2_BUF_STATE_ERROR);
- }
-}
-
-static void device_run(void *priv)
-{
- struct rockchip_vpu_ctx *ctx = priv;
- int ret;
-
- ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
- if (ret)
- goto err_cancel_job;
- ret = pm_runtime_get_sync(ctx->dev->dev);
- if (ret < 0)
- goto err_cancel_job;
-
- ctx->codec_ops->run(ctx);
- return;
-
-err_cancel_job:
- rockchip_vpu_job_finish(ctx->dev, ctx, 0, VB2_BUF_STATE_ERROR);
-}
-
-static struct v4l2_m2m_ops vpu_m2m_ops = {
- .device_run = device_run,
-};
-
-static int
-enc_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
-{
- struct rockchip_vpu_ctx *ctx = priv;
- int ret;
-
- src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
- src_vq->drv_priv = ctx;
- src_vq->ops = &rockchip_vpu_enc_queue_ops;
- src_vq->mem_ops = &vb2_dma_contig_memops;
-
- /*
- * Driver does mostly sequential access, so sacrifice TLB efficiency
- * for faster allocation. Also, no CPU access on the source queue,
- * so no kernel mapping needed.
- */
- src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
- DMA_ATTR_NO_KERNEL_MAPPING;
- src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- src_vq->lock = &ctx->dev->vpu_mutex;
- src_vq->dev = ctx->dev->v4l2_dev.dev;
-
- ret = vb2_queue_init(src_vq);
- if (ret)
- return ret;
-
- /*
- * The CAPTURE queue doesn't need dma memory,
- * as the CPU needs to create the JPEG frames,
- * from the hardware-produced JPEG payload.
- *
- * For the DMA destination buffer, we use
- * a bounce buffer.
- */
- dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
- dst_vq->drv_priv = ctx;
- dst_vq->ops = &rockchip_vpu_enc_queue_ops;
- dst_vq->mem_ops = &vb2_vmalloc_memops;
- dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- dst_vq->lock = &ctx->dev->vpu_mutex;
- dst_vq->dev = ctx->dev->v4l2_dev.dev;
-
- return vb2_queue_init(dst_vq);
-}
-
-static int rockchip_vpu_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct rockchip_vpu_ctx *ctx;
-
- ctx = container_of(ctrl->handler,
- struct rockchip_vpu_ctx, ctrl_handler);
-
- vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
-
- switch (ctrl->id) {
- case V4L2_CID_JPEG_COMPRESSION_QUALITY:
- ctx->jpeg_quality = ctrl->val;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct v4l2_ctrl_ops rockchip_vpu_ctrl_ops = {
- .s_ctrl = rockchip_vpu_s_ctrl,
-};
-
-static int rockchip_vpu_ctrls_setup(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx)
-{
- v4l2_ctrl_handler_init(&ctx->ctrl_handler, 1);
- if (vpu->variant->codec & RK_VPU_CODEC_JPEG) {
- v4l2_ctrl_new_std(&ctx->ctrl_handler, &rockchip_vpu_ctrl_ops,
- V4L2_CID_JPEG_COMPRESSION_QUALITY,
- 5, 100, 1, 50);
- if (ctx->ctrl_handler.error) {
- vpu_err("Adding JPEG control failed %d\n",
- ctx->ctrl_handler.error);
- v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- return ctx->ctrl_handler.error;
- }
- }
-
- return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
-}
-
-/*
- * V4L2 file operations.
- */
-
-static int rockchip_vpu_open(struct file *filp)
-{
- struct rockchip_vpu_dev *vpu = video_drvdata(filp);
- struct video_device *vdev = video_devdata(filp);
- struct rockchip_vpu_ctx *ctx;
- int ret;
-
- /*
- * We do not need any extra locking here, because we operate only
- * on local data here, except reading few fields from dev, which
- * do not change through device's lifetime (which is guaranteed by
- * reference on module from open()) and V4L2 internal objects (such
- * as vdev and ctx->fh), which have proper locking done in respective
- * helper functions used here.
- */
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->dev = vpu;
- if (vdev == vpu->vfd_enc)
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
- &enc_queue_init);
- else
- ctx->fh.m2m_ctx = ERR_PTR(-ENODEV);
- if (IS_ERR(ctx->fh.m2m_ctx)) {
- ret = PTR_ERR(ctx->fh.m2m_ctx);
- kfree(ctx);
- return ret;
- }
-
- v4l2_fh_init(&ctx->fh, vdev);
- filp->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
-
- if (vdev == vpu->vfd_enc) {
- rockchip_vpu_enc_reset_dst_fmt(vpu, ctx);
- rockchip_vpu_enc_reset_src_fmt(vpu, ctx);
- }
-
- ret = rockchip_vpu_ctrls_setup(vpu, ctx);
- if (ret) {
- vpu_err("Failed to set up controls\n");
- goto err_fh_free;
- }
- ctx->fh.ctrl_handler = &ctx->ctrl_handler;
-
- return 0;
-
-err_fh_free:
- v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
- kfree(ctx);
- return ret;
-}
-
-static int rockchip_vpu_release(struct file *filp)
-{
- struct rockchip_vpu_ctx *ctx =
- container_of(filp->private_data, struct rockchip_vpu_ctx, fh);
-
- /*
- * No need for extra locking because this was the last reference
- * to this file.
- */
- v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
- v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
- v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- kfree(ctx);
-
- return 0;
-}
-
-static const struct v4l2_file_operations rockchip_vpu_fops = {
- .owner = THIS_MODULE,
- .open = rockchip_vpu_open,
- .release = rockchip_vpu_release,
- .poll = v4l2_m2m_fop_poll,
- .unlocked_ioctl = video_ioctl2,
- .mmap = v4l2_m2m_fop_mmap,
-};
-
-static const struct of_device_id of_rockchip_vpu_match[] = {
- { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
- { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, of_rockchip_vpu_match);
-
-static int rockchip_vpu_video_device_register(struct rockchip_vpu_dev *vpu)
-{
- const struct of_device_id *match;
- struct video_device *vfd;
- int function, ret;
-
- match = of_match_node(of_rockchip_vpu_match, vpu->dev->of_node);
- vfd = video_device_alloc();
- if (!vfd) {
- v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
- return -ENOMEM;
- }
-
- vfd->fops = &rockchip_vpu_fops;
- vfd->release = video_device_release;
- vfd->lock = &vpu->vpu_mutex;
- vfd->v4l2_dev = &vpu->v4l2_dev;
- vfd->vfl_dir = VFL_DIR_M2M;
- vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
- vfd->ioctl_ops = &rockchip_vpu_enc_ioctl_ops;
- snprintf(vfd->name, sizeof(vfd->name), "%s-enc", match->compatible);
- vpu->vfd_enc = vfd;
- video_set_drvdata(vfd, vpu);
-
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
- if (ret) {
- v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
- goto err_free_dev;
- }
- v4l2_info(&vpu->v4l2_dev, "registered as /dev/video%d\n", vfd->num);
-
- function = MEDIA_ENT_F_PROC_VIDEO_ENCODER;
- ret = v4l2_m2m_register_media_controller(vpu->m2m_dev, vfd, function);
- if (ret) {
- v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem media controller\n");
- goto err_unreg_video;
- }
- return 0;
-
-err_unreg_video:
- video_unregister_device(vfd);
-err_free_dev:
- video_device_release(vfd);
- return ret;
-}
-
-static int rockchip_vpu_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- struct rockchip_vpu_dev *vpu;
- struct resource *res;
- int i, ret;
-
- vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
- if (!vpu)
- return -ENOMEM;
-
- vpu->dev = &pdev->dev;
- vpu->pdev = pdev;
- mutex_init(&vpu->vpu_mutex);
- spin_lock_init(&vpu->irqlock);
-
- match = of_match_node(of_rockchip_vpu_match, pdev->dev.of_node);
- vpu->variant = match->data;
-
- INIT_DELAYED_WORK(&vpu->watchdog_work, rockchip_vpu_watchdog);
-
- for (i = 0; i < vpu->variant->num_clocks; i++)
- vpu->clocks[i].id = vpu->variant->clk_names[i];
- ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
- vpu->clocks);
- if (ret)
- return ret;
-
- res = platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0);
- vpu->base = devm_ioremap_resource(vpu->dev, res);
- if (IS_ERR(vpu->base))
- return PTR_ERR(vpu->base);
- vpu->enc_base = vpu->base + vpu->variant->enc_offset;
-
- ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
- return ret;
- }
-
- if (vpu->variant->vepu_irq) {
- int irq;
-
- irq = platform_get_irq_byname(vpu->pdev, "vepu");
- if (irq <= 0) {
- dev_err(vpu->dev, "Could not get vepu IRQ.\n");
- return -ENXIO;
- }
-
- ret = devm_request_irq(vpu->dev, irq, vpu->variant->vepu_irq,
- 0, dev_name(vpu->dev), vpu);
- if (ret) {
- dev_err(vpu->dev, "Could not request vepu IRQ.\n");
- return ret;
- }
- }
-
- ret = vpu->variant->init(vpu);
- if (ret) {
- dev_err(&pdev->dev, "Failed to init VPU hardware\n");
- return ret;
- }
-
- pm_runtime_set_autosuspend_delay(vpu->dev, 100);
- pm_runtime_use_autosuspend(vpu->dev);
- pm_runtime_enable(vpu->dev);
-
- ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
- if (ret) {
- dev_err(&pdev->dev, "Failed to prepare clocks\n");
- return ret;
- }
-
- ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register v4l2 device\n");
- goto err_clk_unprepare;
- }
- platform_set_drvdata(pdev, vpu);
-
- vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
- if (IS_ERR(vpu->m2m_dev)) {
- v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
- ret = PTR_ERR(vpu->m2m_dev);
- goto err_v4l2_unreg;
- }
-
- vpu->mdev.dev = vpu->dev;
- strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
- strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
- sizeof(vpu->mdev.model));
- media_device_init(&vpu->mdev);
- vpu->v4l2_dev.mdev = &vpu->mdev;
-
- ret = rockchip_vpu_video_device_register(vpu);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register encoder\n");
- goto err_m2m_rel;
- }
-
- ret = media_device_register(&vpu->mdev);
- if (ret) {
- v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
- goto err_video_dev_unreg;
- }
- return 0;
-err_video_dev_unreg:
- if (vpu->vfd_enc) {
- v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
- video_unregister_device(vpu->vfd_enc);
- video_device_release(vpu->vfd_enc);
- }
-err_m2m_rel:
- media_device_cleanup(&vpu->mdev);
- v4l2_m2m_release(vpu->m2m_dev);
-err_v4l2_unreg:
- v4l2_device_unregister(&vpu->v4l2_dev);
-err_clk_unprepare:
- clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
- pm_runtime_dont_use_autosuspend(vpu->dev);
- pm_runtime_disable(vpu->dev);
- return ret;
-}
-
-static int rockchip_vpu_remove(struct platform_device *pdev)
-{
- struct rockchip_vpu_dev *vpu = platform_get_drvdata(pdev);
-
- v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
-
- media_device_unregister(&vpu->mdev);
- if (vpu->vfd_enc) {
- v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
- video_unregister_device(vpu->vfd_enc);
- video_device_release(vpu->vfd_enc);
- }
- media_device_cleanup(&vpu->mdev);
- v4l2_m2m_release(vpu->m2m_dev);
- v4l2_device_unregister(&vpu->v4l2_dev);
- clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
- pm_runtime_dont_use_autosuspend(vpu->dev);
- pm_runtime_disable(vpu->dev);
- return 0;
-}
-
-static const struct dev_pm_ops rockchip_vpu_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
-
-static struct platform_driver rockchip_vpu_driver = {
- .probe = rockchip_vpu_probe,
- .remove = rockchip_vpu_remove,
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = of_match_ptr(of_rockchip_vpu_match),
- .pm = &rockchip_vpu_pm_ops,
- },
-};
-module_platform_driver(rockchip_vpu_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
-MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
-MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
-MODULE_DESCRIPTION("Rockchip VPU codec driver");
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
deleted file mode 100644
index dcbfc3cbc9f3..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
+++ /dev/null
@@ -1,671 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Rockchip VPU codec driver
- *
- * Copyright (C) 2018 Collabora, Ltd.
- * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
- * Alpha Lin <Alpha.Lin@rock-chips.com>
- * Jeffy Chen <jeffy.chen@rock-chips.com>
- *
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- *
- * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
- * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
- */
-
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/videodev2.h>
-#include <linux/workqueue.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-dma-sg.h>
-
-#include "rockchip_vpu.h"
-#include "rockchip_vpu_hw.h"
-#include "rockchip_vpu_common.h"
-
-/**
- * struct v4l2_format_info - information about a V4L2 format
- * @format: 4CC format identifier (V4L2_PIX_FMT_*)
- * @header_size: Size of header, optional and used by compressed formats
- * @num_planes: Number of planes (1 to 3)
- * @cpp: Number of bytes per pixel (per plane)
- * @hsub: Horizontal chroma subsampling factor
- * @vsub: Vertical chroma subsampling factor
- * @is_compressed: Is it a compressed format?
- * @multiplanar: Is it a multiplanar variant format? (e.g. NV12M)
- */
-struct rockchip_vpu_v4l2_format_info {
- u32 format;
- u32 header_size;
- u8 num_planes;
- u8 cpp[3];
- u8 hsub;
- u8 vsub;
- u8 is_compressed;
- u8 multiplanar;
-};
-
-static const struct rockchip_vpu_v4l2_format_info *
-rockchip_vpu_v4l2_format_info(u32 format)
-{
- static const struct rockchip_vpu_v4l2_format_info formats[] = {
- { .format = V4L2_PIX_FMT_YUV420M, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .multiplanar = 1 },
- { .format = V4L2_PIX_FMT_NV12M, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .multiplanar = 1 },
- { .format = V4L2_PIX_FMT_YUYV, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = V4L2_PIX_FMT_UYVY, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- };
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(formats); ++i) {
- if (formats[i].format == format)
- return &formats[i];
- }
-
- vpu_err("Unsupported V4L 4CC format (%08x)\n", format);
- return NULL;
-}
-
-static void
-fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
- int pixelformat, int width, int height)
-{
- const struct rockchip_vpu_v4l2_format_info *info;
- struct v4l2_plane_pix_format *plane;
- int i;
-
- info = rockchip_vpu_v4l2_format_info(pixelformat);
- if (!info)
- return;
-
- pixfmt->width = width;
- pixfmt->height = height;
- pixfmt->pixelformat = pixelformat;
-
- if (!info->multiplanar) {
- pixfmt->num_planes = 1;
- plane = &pixfmt->plane_fmt[0];
- plane->bytesperline = info->is_compressed ?
- 0 : width * info->cpp[0];
- plane->sizeimage = info->header_size;
- for (i = 0; i < info->num_planes; i++) {
- unsigned int hsub = (i == 0) ? 1 : info->hsub;
- unsigned int vsub = (i == 0) ? 1 : info->vsub;
-
- plane->sizeimage += info->cpp[i] *
- DIV_ROUND_UP(width, hsub) *
- DIV_ROUND_UP(height, vsub);
- }
- } else {
- pixfmt->num_planes = info->num_planes;
- for (i = 0; i < info->num_planes; i++) {
- unsigned int hsub = (i == 0) ? 1 : info->hsub;
- unsigned int vsub = (i == 0) ? 1 : info->vsub;
-
- plane = &pixfmt->plane_fmt[i];
- plane->bytesperline =
- info->cpp[i] * DIV_ROUND_UP(width, hsub);
- plane->sizeimage =
- plane->bytesperline * DIV_ROUND_UP(height, vsub);
- }
- }
-}
-
-static const struct rockchip_vpu_fmt *
-rockchip_vpu_find_format(struct rockchip_vpu_ctx *ctx, u32 fourcc)
-{
- struct rockchip_vpu_dev *dev = ctx->dev;
- const struct rockchip_vpu_fmt *formats;
- unsigned int num_fmts, i;
-
- formats = dev->variant->enc_fmts;
- num_fmts = dev->variant->num_enc_fmts;
- for (i = 0; i < num_fmts; i++)
- if (formats[i].fourcc == fourcc)
- return &formats[i];
- return NULL;
-}
-
-static const struct rockchip_vpu_fmt *
-rockchip_vpu_get_default_fmt(struct rockchip_vpu_ctx *ctx, bool bitstream)
-{
- struct rockchip_vpu_dev *dev = ctx->dev;
- const struct rockchip_vpu_fmt *formats;
- unsigned int num_fmts, i;
-
- formats = dev->variant->enc_fmts;
- num_fmts = dev->variant->num_enc_fmts;
- for (i = 0; i < num_fmts; i++) {
- if (bitstream == (formats[i].codec_mode != RK_VPU_MODE_NONE))
- return &formats[i];
- }
- return NULL;
-}
-
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct rockchip_vpu_dev *vpu = video_drvdata(file);
- struct video_device *vdev = video_devdata(file);
-
- strscpy(cap->driver, vpu->dev->driver->name, sizeof(cap->driver));
- strscpy(cap->card, vdev->name, sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info), "platform: %s",
- vpu->dev->driver->name);
- return 0;
-}
-
-static int vidioc_enum_framesizes(struct file *file, void *priv,
- struct v4l2_frmsizeenum *fsize)
-{
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
- const struct rockchip_vpu_fmt *fmt;
-
- if (fsize->index != 0) {
- vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
- fsize->index);
- return -EINVAL;
- }
-
- fmt = rockchip_vpu_find_format(ctx, fsize->pixel_format);
- if (!fmt) {
- vpu_debug(0, "unsupported bitstream format (%08x)\n",
- fsize->pixel_format);
- return -EINVAL;
- }
-
- /* This only makes sense for coded formats */
- if (fmt->codec_mode == RK_VPU_MODE_NONE)
- return -EINVAL;
-
- fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise = fmt->frmsize;
-
- return 0;
-}
-
-static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct rockchip_vpu_dev *dev = video_drvdata(file);
- const struct rockchip_vpu_fmt *fmt;
- const struct rockchip_vpu_fmt *formats;
- int num_fmts, i, j = 0;
-
- formats = dev->variant->enc_fmts;
- num_fmts = dev->variant->num_enc_fmts;
- for (i = 0; i < num_fmts; i++) {
- /* Skip uncompressed formats */
- if (formats[i].codec_mode == RK_VPU_MODE_NONE)
- continue;
- if (j == f->index) {
- fmt = &formats[i];
- f->pixelformat = fmt->fourcc;
- return 0;
- }
- ++j;
- }
- return -EINVAL;
-}
-
-static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct rockchip_vpu_dev *dev = video_drvdata(file);
- const struct rockchip_vpu_fmt *formats;
- const struct rockchip_vpu_fmt *fmt;
- int num_fmts, i, j = 0;
-
- formats = dev->variant->enc_fmts;
- num_fmts = dev->variant->num_enc_fmts;
- for (i = 0; i < num_fmts; i++) {
- if (formats[i].codec_mode != RK_VPU_MODE_NONE)
- continue;
- if (j == f->index) {
- fmt = &formats[i];
- f->pixelformat = fmt->fourcc;
- return 0;
- }
- ++j;
- }
- return -EINVAL;
-}
-
-static int vidioc_g_fmt_out_mplane(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
-
- vpu_debug(4, "f->type = %d\n", f->type);
-
- *pix_mp = ctx->src_fmt;
-
- return 0;
-}
-
-static int vidioc_g_fmt_cap_mplane(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
-
- vpu_debug(4, "f->type = %d\n", f->type);
-
- *pix_mp = ctx->dst_fmt;
-
- return 0;
-}
-
-static int
-vidioc_try_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
-{
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- const struct rockchip_vpu_fmt *fmt;
-
- vpu_debug(4, "%c%c%c%c\n",
- (pix_mp->pixelformat & 0x7f),
- (pix_mp->pixelformat >> 8) & 0x7f,
- (pix_mp->pixelformat >> 16) & 0x7f,
- (pix_mp->pixelformat >> 24) & 0x7f);
-
- fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
- if (!fmt) {
- fmt = rockchip_vpu_get_default_fmt(ctx, true);
- f->fmt.pix.pixelformat = fmt->fourcc;
- }
-
- pix_mp->num_planes = 1;
- pix_mp->field = V4L2_FIELD_NONE;
- pix_mp->width = clamp(pix_mp->width,
- fmt->frmsize.min_width,
- fmt->frmsize.max_width);
- pix_mp->height = clamp(pix_mp->height,
- fmt->frmsize.min_height,
- fmt->frmsize.max_height);
- /* Round up to macroblocks. */
- pix_mp->width = round_up(pix_mp->width, JPEG_MB_DIM);
- pix_mp->height = round_up(pix_mp->height, JPEG_MB_DIM);
-
- /*
- * For compressed formats the application can specify
- * sizeimage. If the application passes a zero sizeimage,
- * let's default to the maximum frame size.
- */
- if (!pix_mp->plane_fmt[0].sizeimage)
- pix_mp->plane_fmt[0].sizeimage = fmt->header_size +
- pix_mp->width * pix_mp->height * fmt->max_depth;
- memset(pix_mp->plane_fmt[0].reserved, 0,
- sizeof(pix_mp->plane_fmt[0].reserved));
- return 0;
-}
-
-static int
-vidioc_try_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
-{
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- const struct rockchip_vpu_fmt *fmt;
- unsigned int width, height;
- int i;
-
- vpu_debug(4, "%c%c%c%c\n",
- (pix_mp->pixelformat & 0x7f),
- (pix_mp->pixelformat >> 8) & 0x7f,
- (pix_mp->pixelformat >> 16) & 0x7f,
- (pix_mp->pixelformat >> 24) & 0x7f);
-
- fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
- if (!fmt) {
- fmt = rockchip_vpu_get_default_fmt(ctx, false);
- f->fmt.pix.pixelformat = fmt->fourcc;
- }
-
- pix_mp->field = V4L2_FIELD_NONE;
- width = clamp(pix_mp->width,
- ctx->vpu_dst_fmt->frmsize.min_width,
- ctx->vpu_dst_fmt->frmsize.max_width);
- height = clamp(pix_mp->height,
- ctx->vpu_dst_fmt->frmsize.min_height,
- ctx->vpu_dst_fmt->frmsize.max_height);
- /* Round up to macroblocks. */
- width = round_up(width, JPEG_MB_DIM);
- height = round_up(height, JPEG_MB_DIM);
-
- /* Fill remaining fields */
- fill_pixfmt_mp(pix_mp, fmt->fourcc, width, height);
-
- for (i = 0; i < pix_mp->num_planes; i++) {
- memset(pix_mp->plane_fmt[i].reserved, 0,
- sizeof(pix_mp->plane_fmt[i].reserved));
- }
- return 0;
-}
-
-void rockchip_vpu_enc_reset_dst_fmt(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx)
-{
- struct v4l2_pix_format_mplane *fmt = &ctx->dst_fmt;
-
- ctx->vpu_dst_fmt = rockchip_vpu_get_default_fmt(ctx, true);
-
- memset(fmt, 0, sizeof(*fmt));
-
- fmt->num_planes = 1;
- fmt->width = clamp(fmt->width, ctx->vpu_dst_fmt->frmsize.min_width,
- ctx->vpu_dst_fmt->frmsize.max_width);
- fmt->height = clamp(fmt->height, ctx->vpu_dst_fmt->frmsize.min_height,
- ctx->vpu_dst_fmt->frmsize.max_height);
- fmt->pixelformat = ctx->vpu_dst_fmt->fourcc;
- fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_JPEG,
- fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
- fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
- fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
-
- fmt->plane_fmt[0].sizeimage = ctx->vpu_dst_fmt->header_size +
- fmt->width * fmt->height * ctx->vpu_dst_fmt->max_depth;
-}
-
-void rockchip_vpu_enc_reset_src_fmt(struct rockchip_vpu_dev *vpu,
- struct rockchip_vpu_ctx *ctx)
-{
- struct v4l2_pix_format_mplane *fmt = &ctx->src_fmt;
- unsigned int width, height;
-
- ctx->vpu_src_fmt = rockchip_vpu_get_default_fmt(ctx, false);
-
- memset(fmt, 0, sizeof(*fmt));
-
- width = clamp(fmt->width, ctx->vpu_dst_fmt->frmsize.min_width,
- ctx->vpu_dst_fmt->frmsize.max_width);
- height = clamp(fmt->height, ctx->vpu_dst_fmt->frmsize.min_height,
- ctx->vpu_dst_fmt->frmsize.max_height);
- fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_JPEG,
- fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
- fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
- fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
-
- fill_pixfmt_mp(fmt, ctx->vpu_src_fmt->fourcc, width, height);
-}
-
-static int
-vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
-{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
- struct vb2_queue *vq;
- int ret;
-
- /* Change not allowed if queue is streaming. */
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- if (vb2_is_streaming(vq))
- return -EBUSY;
-
- ret = vidioc_try_fmt_out_mplane(file, priv, f);
- if (ret)
- return ret;
-
- ctx->vpu_src_fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
- ctx->src_fmt = *pix_mp;
-
- /* Propagate to the CAPTURE format */
- ctx->dst_fmt.colorspace = pix_mp->colorspace;
- ctx->dst_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
- ctx->dst_fmt.xfer_func = pix_mp->xfer_func;
- ctx->dst_fmt.quantization = pix_mp->quantization;
- ctx->dst_fmt.width = pix_mp->width;
- ctx->dst_fmt.height = pix_mp->height;
-
- vpu_debug(0, "OUTPUT codec mode: %d\n", ctx->vpu_src_fmt->codec_mode);
- vpu_debug(0, "fmt - w: %d, h: %d, mb - w: %d, h: %d\n",
- pix_mp->width, pix_mp->height,
- JPEG_MB_WIDTH(pix_mp->width),
- JPEG_MB_HEIGHT(pix_mp->height));
- return 0;
-}
-
-static int
-vidioc_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
-{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
- struct rockchip_vpu_dev *vpu = ctx->dev;
- struct vb2_queue *vq, *peer_vq;
- int ret;
-
- /* Change not allowed if queue is streaming. */
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- if (vb2_is_streaming(vq))
- return -EBUSY;
-
- /*
- * Since format change on the CAPTURE queue will reset
- * the OUTPUT queue, we can't allow doing so
- * when the OUTPUT queue has buffers allocated.
- */
- peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
- V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (vb2_is_busy(peer_vq) &&
- (pix_mp->pixelformat != ctx->dst_fmt.pixelformat ||
- pix_mp->height != ctx->dst_fmt.height ||
- pix_mp->width != ctx->dst_fmt.width))
- return -EBUSY;
-
- ret = vidioc_try_fmt_cap_mplane(file, priv, f);
- if (ret)
- return ret;
-
- ctx->vpu_dst_fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
- ctx->dst_fmt = *pix_mp;
-
- vpu_debug(0, "CAPTURE codec mode: %d\n", ctx->vpu_dst_fmt->codec_mode);
- vpu_debug(0, "fmt - w: %d, h: %d, mb - w: %d, h: %d\n",
- pix_mp->width, pix_mp->height,
- JPEG_MB_WIDTH(pix_mp->width),
- JPEG_MB_HEIGHT(pix_mp->height));
-
- /*
- * Current raw format might have become invalid with newly
- * selected codec, so reset it to default just to be safe and
- * keep internal driver state sane. User is mandated to set
- * the raw format again after we return, so we don't need
- * anything smarter.
- */
- rockchip_vpu_enc_reset_src_fmt(vpu, ctx);
- return 0;
-}
-
-const struct v4l2_ioctl_ops rockchip_vpu_enc_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_framesizes = vidioc_enum_framesizes,
-
- .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_cap_mplane,
- .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_out_mplane,
- .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_out_mplane,
- .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_cap_mplane,
- .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_out_mplane,
- .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_cap_mplane,
- .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
- .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
-
- .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
- .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
- .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
- .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
- .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
- .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
- .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
-
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-
- .vidioc_streamon = v4l2_m2m_ioctl_streamon,
- .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
-};
-
-static int
-rockchip_vpu_queue_setup(struct vb2_queue *vq,
- unsigned int *num_buffers,
- unsigned int *num_planes,
- unsigned int sizes[],
- struct device *alloc_devs[])
-{
- struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(vq);
- struct v4l2_pix_format_mplane *pixfmt;
- int i;
-
- switch (vq->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- pixfmt = &ctx->dst_fmt;
- break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- pixfmt = &ctx->src_fmt;
- break;
- default:
- vpu_err("invalid queue type: %d\n", vq->type);
- return -EINVAL;
- }
-
- if (*num_planes) {
- if (*num_planes != pixfmt->num_planes)
- return -EINVAL;
- for (i = 0; i < pixfmt->num_planes; ++i)
- if (sizes[i] < pixfmt->plane_fmt[i].sizeimage)
- return -EINVAL;
- return 0;
- }
-
- *num_planes = pixfmt->num_planes;
- for (i = 0; i < pixfmt->num_planes; ++i)
- sizes[i] = pixfmt->plane_fmt[i].sizeimage;
- return 0;
-}
-
-static int rockchip_vpu_buf_prepare(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct vb2_queue *vq = vb->vb2_queue;
- struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(vq);
- struct v4l2_pix_format_mplane *pixfmt;
- unsigned int sz;
- int ret = 0;
- int i;
-
- switch (vq->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- pixfmt = &ctx->dst_fmt;
- break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- pixfmt = &ctx->src_fmt;
-
- if (vbuf->field == V4L2_FIELD_ANY)
- vbuf->field = V4L2_FIELD_NONE;
- if (vbuf->field != V4L2_FIELD_NONE) {
- vpu_debug(4, "field %d not supported\n",
- vbuf->field);
- return -EINVAL;
- }
- break;
- default:
- vpu_err("invalid queue type: %d\n", vq->type);
- return -EINVAL;
- }
-
- for (i = 0; i < pixfmt->num_planes; ++i) {
- sz = pixfmt->plane_fmt[i].sizeimage;
- vpu_debug(4, "plane %d size: %ld, sizeimage: %u\n",
- i, vb2_plane_size(vb, i), sz);
- if (vb2_plane_size(vb, i) < sz) {
- vpu_err("plane %d is too small\n", i);
- ret = -EINVAL;
- break;
- }
- }
-
- return ret;
-}
-
-static void rockchip_vpu_buf_queue(struct vb2_buffer *vb)
-{
- struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
-}
-
-static int rockchip_vpu_start_streaming(struct vb2_queue *q, unsigned int count)
-{
- struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(q);
- enum rockchip_vpu_codec_mode codec_mode;
-
- if (V4L2_TYPE_IS_OUTPUT(q->type))
- ctx->sequence_out = 0;
- else
- ctx->sequence_cap = 0;
-
- /* Set codec_ops for the chosen destination format */
- codec_mode = ctx->vpu_dst_fmt->codec_mode;
-
- vpu_debug(4, "Codec mode = %d\n", codec_mode);
- ctx->codec_ops = &ctx->dev->variant->codec_ops[codec_mode];
-
- /* A bounce buffer is needed for the JPEG payload */
- if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
- ctx->bounce_size = ctx->dst_fmt.plane_fmt[0].sizeimage -
- ctx->vpu_dst_fmt->header_size;
- ctx->bounce_buf = dma_alloc_attrs(ctx->dev->dev,
- ctx->bounce_size,
- &ctx->bounce_dma_addr,
- GFP_KERNEL,
- DMA_ATTR_ALLOC_SINGLE_PAGES);
- }
- return 0;
-}
-
-static void rockchip_vpu_stop_streaming(struct vb2_queue *q)
-{
- struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(q);
-
- if (!V4L2_TYPE_IS_OUTPUT(q->type))
- dma_free_attrs(ctx->dev->dev,
- ctx->bounce_size,
- ctx->bounce_buf,
- ctx->bounce_dma_addr,
- DMA_ATTR_ALLOC_SINGLE_PAGES);
-
- /*
- * The mem2mem framework calls v4l2_m2m_cancel_job before
- * .stop_streaming, so there isn't any job running and
- * it is safe to return all the buffers.
- */
- for (;;) {
- struct vb2_v4l2_buffer *vbuf;
-
- if (V4L2_TYPE_IS_OUTPUT(q->type))
- vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- else
- vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- if (!vbuf)
- break;
- v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
- }
-}
-
-const struct vb2_ops rockchip_vpu_enc_queue_ops = {
- .queue_setup = rockchip_vpu_queue_setup,
- .buf_prepare = rockchip_vpu_buf_prepare,
- .buf_queue = rockchip_vpu_buf_queue,
- .start_streaming = rockchip_vpu_start_streaming,
- .stop_streaming = rockchip_vpu_stop_streaming,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
-};
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h
deleted file mode 100644
index 2b955da1be1a..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Rockchip VPU codec driver
- *
- * Copyright 2018 Google LLC.
- * Tomasz Figa <tfiga@chromium.org>
- */
-
-#ifndef ROCKCHIP_VPU_HW_H_
-#define ROCKCHIP_VPU_HW_H_
-
-#include <linux/interrupt.h>
-#include <linux/v4l2-controls.h>
-#include <media/videobuf2-core.h>
-
-struct rockchip_vpu_dev;
-struct rockchip_vpu_ctx;
-struct rockchip_vpu_buf;
-struct rockchip_vpu_variant;
-
-/**
- * struct rockchip_vpu_codec_ops - codec mode specific operations
- *
- * @run: Start single {en,de)coding job. Called from atomic context
- * to indicate that a pair of buffers is ready and the hardware
- * should be programmed and started.
- * @done: Read back processing results and additional data from hardware.
- * @reset: Reset the hardware in case of a timeout.
- */
-struct rockchip_vpu_codec_ops {
- void (*run)(struct rockchip_vpu_ctx *ctx);
- void (*done)(struct rockchip_vpu_ctx *ctx, enum vb2_buffer_state);
- void (*reset)(struct rockchip_vpu_ctx *ctx);
-};
-
-/**
- * enum rockchip_vpu_enc_fmt - source format ID for hardware registers.
- */
-enum rockchip_vpu_enc_fmt {
- RK3288_VPU_ENC_FMT_YUV420P = 0,
- RK3288_VPU_ENC_FMT_YUV420SP = 1,
- RK3288_VPU_ENC_FMT_YUYV422 = 2,
- RK3288_VPU_ENC_FMT_UYVY422 = 3,
-};
-
-extern const struct rockchip_vpu_variant rk3399_vpu_variant;
-extern const struct rockchip_vpu_variant rk3288_vpu_variant;
-
-void rockchip_vpu_watchdog(struct work_struct *work);
-void rockchip_vpu_run(struct rockchip_vpu_ctx *ctx);
-void rockchip_vpu_irq_done(struct rockchip_vpu_dev *vpu,
- unsigned int bytesused,
- enum vb2_buffer_state result);
-
-void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx);
-void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx);
-
-#endif /* ROCKCHIP_VPU_HW_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h
deleted file mode 100644
index 72645d8e2ade..000000000000
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-
-#define JPEG_HEADER_SIZE 601
-
-struct rockchip_vpu_jpeg_ctx {
- int width;
- int height;
- int quality;
- unsigned char *buffer;
-};
-
-unsigned char *
-rockchip_vpu_jpeg_get_qtable(struct rockchip_vpu_jpeg_ctx *ctx, int index);
-void rockchip_vpu_jpeg_header_assemble(struct rockchip_vpu_jpeg_ctx *ctx);
diff --git a/drivers/staging/media/soc_camera/imx074.c b/drivers/staging/media/soc_camera/imx074.c
index d907aa62f898..14240b74cdd0 100644
--- a/drivers/staging/media/soc_camera/imx074.c
+++ b/drivers/staging/media/soc_camera/imx074.c
@@ -409,7 +409,7 @@ static int imx074_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct imx074 *priv;
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
diff --git a/drivers/staging/media/soc_camera/mt9t031.c b/drivers/staging/media/soc_camera/mt9t031.c
index 615ae9df2c57..c14f23221544 100644
--- a/drivers/staging/media/soc_camera/mt9t031.c
+++ b/drivers/staging/media/soc_camera/mt9t031.c
@@ -751,7 +751,7 @@ static int mt9t031_probe(struct i2c_client *client,
{
struct mt9t031 *mt9t031;
struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
int ret;
if (!ssdd) {
diff --git a/drivers/staging/media/soc_camera/soc_mt9v022.c b/drivers/staging/media/soc_camera/soc_mt9v022.c
index e7e0d3d29499..1739a618846d 100644
--- a/drivers/staging/media/soc_camera/soc_mt9v022.c
+++ b/drivers/staging/media/soc_camera/soc_mt9v022.c
@@ -883,7 +883,7 @@ static int mt9v022_probe(struct i2c_client *client,
{
struct mt9v022 *mt9v022;
struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
struct mt9v022_platform_data *pdata;
int ret;
diff --git a/drivers/staging/media/soc_camera/soc_ov5642.c b/drivers/staging/media/soc_camera/soc_ov5642.c
index 94696d7baf83..39ae24dca65f 100644
--- a/drivers/staging/media/soc_camera/soc_ov5642.c
+++ b/drivers/staging/media/soc_camera/soc_ov5642.c
@@ -687,7 +687,8 @@ static int reg_write16(struct i2c_client *client, u16 reg, u16 val16)
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int ov5642_get_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+static int ov5642_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
@@ -705,7 +706,8 @@ static int ov5642_get_register(struct v4l2_subdev *sd, struct v4l2_dbg_register
return ret;
}
-static int ov5642_set_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
+static int ov5642_set_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
diff --git a/drivers/staging/media/sunxi/cedrus/Makefile b/drivers/staging/media/sunxi/cedrus/Makefile
index 808842f0119e..c85ac6db0302 100644
--- a/drivers/staging/media/sunxi/cedrus/Makefile
+++ b/drivers/staging/media/sunxi/cedrus/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += sunxi-cedrus.o
-sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o cedrus_mpeg2.o
+sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o \
+ cedrus_mpeg2.o cedrus_h264.o
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index d0429c0e6b6b..370937edfc14 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -40,6 +40,36 @@ static const struct cedrus_control cedrus_controls[] = {
.codec = CEDRUS_CODEC_MPEG2,
.required = false,
},
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS,
+ .elem_size = sizeof(struct v4l2_ctrl_h264_decode_params),
+ .codec = CEDRUS_CODEC_H264,
+ .required = true,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS,
+ .elem_size = sizeof(struct v4l2_ctrl_h264_slice_params),
+ .codec = CEDRUS_CODEC_H264,
+ .required = true,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SPS,
+ .elem_size = sizeof(struct v4l2_ctrl_h264_sps),
+ .codec = CEDRUS_CODEC_H264,
+ .required = true,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PPS,
+ .elem_size = sizeof(struct v4l2_ctrl_h264_pps),
+ .codec = CEDRUS_CODEC_H264,
+ .required = true,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX,
+ .elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix),
+ .codec = CEDRUS_CODEC_H264,
+ .required = true,
+ },
};
#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
@@ -278,6 +308,7 @@ static int cedrus_probe(struct platform_device *pdev)
}
dev->dec_ops[CEDRUS_CODEC_MPEG2] = &cedrus_dec_ops_mpeg2;
+ dev->dec_ops[CEDRUS_CODEC_H264] = &cedrus_dec_ops_h264;
mutex_init(&dev->dev_mutex);
@@ -369,36 +400,41 @@ static int cedrus_remove(struct platform_device *pdev)
}
static const struct cedrus_variant sun4i_a10_cedrus_variant = {
- /* No particular capability. */
+ .mod_rate = 320000000,
};
static const struct cedrus_variant sun5i_a13_cedrus_variant = {
- /* No particular capability. */
+ .mod_rate = 320000000,
};
static const struct cedrus_variant sun7i_a20_cedrus_variant = {
- /* No particular capability. */
+ .mod_rate = 320000000,
};
static const struct cedrus_variant sun8i_a33_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .mod_rate = 320000000,
};
static const struct cedrus_variant sun8i_h3_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_a64_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h5_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h6_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
.quirks = CEDRUS_QUIRK_NO_DMA_OFFSET,
+ .mod_rate = 600000000,
};
static const struct of_device_id cedrus_dt_match[] = {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index c57c04b41d2e..3f476d0fd981 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -32,7 +32,7 @@
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
-
+ CEDRUS_CODEC_H264,
CEDRUS_CODEC_LAST,
};
@@ -42,6 +42,12 @@ enum cedrus_irq_status {
CEDRUS_IRQ_OK,
};
+enum cedrus_h264_pic_type {
+ CEDRUS_H264_PIC_TYPE_FRAME = 0,
+ CEDRUS_H264_PIC_TYPE_FIELD,
+ CEDRUS_H264_PIC_TYPE_MBAFF,
+};
+
struct cedrus_control {
u32 id;
u32 elem_size;
@@ -49,6 +55,14 @@ struct cedrus_control {
unsigned char required:1;
};
+struct cedrus_h264_run {
+ const struct v4l2_ctrl_h264_decode_params *decode_params;
+ const struct v4l2_ctrl_h264_pps *pps;
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
+ const struct v4l2_ctrl_h264_slice_params *slice_params;
+ const struct v4l2_ctrl_h264_sps *sps;
+};
+
struct cedrus_mpeg2_run {
const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
const struct v4l2_ctrl_mpeg2_quantization *quantization;
@@ -59,12 +73,20 @@ struct cedrus_run {
struct vb2_v4l2_buffer *dst;
union {
+ struct cedrus_h264_run h264;
struct cedrus_mpeg2_run mpeg2;
};
};
struct cedrus_buffer {
struct v4l2_m2m_buffer m2m_buf;
+
+ union {
+ struct {
+ unsigned int position;
+ enum cedrus_h264_pic_type pic_type;
+ } h264;
+ } codec;
};
struct cedrus_ctx {
@@ -79,6 +101,19 @@ struct cedrus_ctx {
struct v4l2_ctrl **ctrls;
struct vb2_buffer *dst_bufs[VIDEO_MAX_FRAME];
+
+ union {
+ struct {
+ void *mv_col_buf;
+ dma_addr_t mv_col_buf_dma;
+ ssize_t mv_col_buf_field_size;
+ ssize_t mv_col_buf_size;
+ void *pic_info_buf;
+ dma_addr_t pic_info_buf_dma;
+ void *neighbor_info_buf;
+ dma_addr_t neighbor_info_buf_dma;
+ } h264;
+ } codec;
};
struct cedrus_dec_ops {
@@ -94,6 +129,7 @@ struct cedrus_dec_ops {
struct cedrus_variant {
unsigned int capabilities;
unsigned int quirks;
+ unsigned int mod_rate;
};
struct cedrus_dev {
@@ -121,6 +157,7 @@ struct cedrus_dev {
};
extern struct cedrus_dec_ops cedrus_dec_ops_mpeg2;
+extern struct cedrus_dec_ops cedrus_dec_ops_h264;
static inline void cedrus_write(struct cedrus_dev *dev, u32 reg, u32 val)
{
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 4d6d602cdde6..bdad87eb9d79 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -46,6 +46,19 @@ void cedrus_device_run(void *priv)
V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
break;
+ case V4L2_PIX_FMT_H264_SLICE_RAW:
+ run.h264.decode_params = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS);
+ run.h264.pps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_H264_PPS);
+ run.h264.scaling_matrix = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX);
+ run.h264.slice_params = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS);
+ run.h264.sps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_H264_SPS);
+ break;
+
default:
break;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
new file mode 100644
index 000000000000..a30bb283f69f
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (c) 2013 Jens Kuske <jenskuske@gmail.com>
+ * Copyright (c) 2018 Bootlin
+ */
+
+#include <linux/types.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+enum cedrus_h264_sram_off {
+ CEDRUS_SRAM_H264_PRED_WEIGHT_TABLE = 0x000,
+ CEDRUS_SRAM_H264_FRAMEBUFFER_LIST = 0x100,
+ CEDRUS_SRAM_H264_REF_LIST_0 = 0x190,
+ CEDRUS_SRAM_H264_REF_LIST_1 = 0x199,
+ CEDRUS_SRAM_H264_SCALING_LIST_8x8_0 = 0x200,
+ CEDRUS_SRAM_H264_SCALING_LIST_8x8_1 = 0x210,
+ CEDRUS_SRAM_H264_SCALING_LIST_4x4 = 0x220,
+};
+
+struct cedrus_h264_sram_ref_pic {
+ __le32 top_field_order_cnt;
+ __le32 bottom_field_order_cnt;
+ __le32 frame_info;
+ __le32 luma_ptr;
+ __le32 chroma_ptr;
+ __le32 mv_col_top_ptr;
+ __le32 mv_col_bot_ptr;
+ __le32 reserved;
+} __packed;
+
+#define CEDRUS_H264_FRAME_NUM 18
+
+#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K)
+#define CEDRUS_PIC_INFO_BUF_SIZE (128 * SZ_1K)
+
+static void cedrus_h264_write_sram(struct cedrus_dev *dev,
+ enum cedrus_h264_sram_off off,
+ const void *data, size_t len)
+{
+ const u32 *buffer = data;
+ size_t count = DIV_ROUND_UP(len, 4);
+
+ cedrus_write(dev, VE_AVC_SRAM_PORT_OFFSET, off << 2);
+
+ while (count--)
+ cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, *buffer++);
+}
+
+static dma_addr_t cedrus_h264_mv_col_buf_addr(struct cedrus_ctx *ctx,
+ unsigned int position,
+ unsigned int field)
+{
+ dma_addr_t addr = ctx->codec.h264.mv_col_buf_dma;
+
+ /* Adjust for the position */
+ addr += position * ctx->codec.h264.mv_col_buf_field_size * 2;
+
+ /* Adjust for the field */
+ addr += field * ctx->codec.h264.mv_col_buf_field_size;
+
+ return addr;
+}
+
+static void cedrus_fill_ref_pic(struct cedrus_ctx *ctx,
+ struct cedrus_buffer *buf,
+ unsigned int top_field_order_cnt,
+ unsigned int bottom_field_order_cnt,
+ struct cedrus_h264_sram_ref_pic *pic)
+{
+ struct vb2_buffer *vbuf = &buf->m2m_buf.vb.vb2_buf;
+ unsigned int position = buf->codec.h264.position;
+
+ pic->top_field_order_cnt = cpu_to_le32(top_field_order_cnt);
+ pic->bottom_field_order_cnt = cpu_to_le32(bottom_field_order_cnt);
+ pic->frame_info = cpu_to_le32(buf->codec.h264.pic_type << 8);
+
+ pic->luma_ptr = cpu_to_le32(cedrus_buf_addr(vbuf, &ctx->dst_fmt, 0));
+ pic->chroma_ptr = cpu_to_le32(cedrus_buf_addr(vbuf, &ctx->dst_fmt, 1));
+ pic->mv_col_top_ptr =
+ cpu_to_le32(cedrus_h264_mv_col_buf_addr(ctx, position, 0));
+ pic->mv_col_bot_ptr =
+ cpu_to_le32(cedrus_h264_mv_col_buf_addr(ctx, position, 1));
+}
+
+static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ struct cedrus_h264_sram_ref_pic pic_list[CEDRUS_H264_FRAME_NUM];
+ const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+ const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
+ struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct cedrus_buffer *output_buf;
+ struct cedrus_dev *dev = ctx->dev;
+ unsigned long used_dpbs = 0;
+ unsigned int position;
+ unsigned int output = 0;
+ unsigned int i;
+
+ memset(pic_list, 0, sizeof(pic_list));
+
+ for (i = 0; i < ARRAY_SIZE(decode->dpb); i++) {
+ const struct v4l2_h264_dpb_entry *dpb = &decode->dpb[i];
+ struct cedrus_buffer *cedrus_buf;
+ int buf_idx;
+
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID))
+ continue;
+
+ buf_idx = vb2_find_timestamp(cap_q, dpb->reference_ts, 0);
+ if (buf_idx < 0)
+ continue;
+
+ cedrus_buf = vb2_to_cedrus_buffer(ctx->dst_bufs[buf_idx]);
+ position = cedrus_buf->codec.h264.position;
+ used_dpbs |= BIT(position);
+
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ cedrus_fill_ref_pic(ctx, cedrus_buf,
+ dpb->top_field_order_cnt,
+ dpb->bottom_field_order_cnt,
+ &pic_list[position]);
+
+ output = max(position, output);
+ }
+
+ position = find_next_zero_bit(&used_dpbs, CEDRUS_H264_FRAME_NUM,
+ output);
+ if (position >= CEDRUS_H264_FRAME_NUM)
+ position = find_first_zero_bit(&used_dpbs, CEDRUS_H264_FRAME_NUM);
+
+ output_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
+ output_buf->codec.h264.position = position;
+
+ if (slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_FIELD;
+ else if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
+ output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_MBAFF;
+ else
+ output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_FRAME;
+
+ cedrus_fill_ref_pic(ctx, output_buf,
+ decode->top_field_order_cnt,
+ decode->bottom_field_order_cnt,
+ &pic_list[position]);
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_FRAMEBUFFER_LIST,
+ pic_list, sizeof(pic_list));
+
+ cedrus_write(dev, VE_H264_OUTPUT_FRAME_IDX, position);
+}
+
+#define CEDRUS_MAX_REF_IDX 32
+
+static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
+ struct cedrus_run *run,
+ const u8 *ref_list, u8 num_ref,
+ enum cedrus_h264_sram_off sram)
+{
+ const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
+ struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct cedrus_dev *dev = ctx->dev;
+ u8 sram_array[CEDRUS_MAX_REF_IDX];
+ unsigned int i;
+ size_t size;
+
+ memset(sram_array, 0, sizeof(sram_array));
+
+ for (i = 0; i < num_ref; i++) {
+ const struct v4l2_h264_dpb_entry *dpb;
+ const struct cedrus_buffer *cedrus_buf;
+ const struct vb2_v4l2_buffer *ref_buf;
+ unsigned int position;
+ int buf_idx;
+ u8 dpb_idx;
+
+ dpb_idx = ref_list[i];
+ dpb = &decode->dpb[dpb_idx];
+
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ buf_idx = vb2_find_timestamp(cap_q, dpb->reference_ts, 0);
+ if (buf_idx < 0)
+ continue;
+
+ ref_buf = to_vb2_v4l2_buffer(ctx->dst_bufs[buf_idx]);
+ cedrus_buf = vb2_v4l2_to_cedrus_buffer(ref_buf);
+ position = cedrus_buf->codec.h264.position;
+
+ sram_array[i] |= position << 1;
+ if (ref_buf->field == V4L2_FIELD_BOTTOM)
+ sram_array[i] |= BIT(0);
+ }
+
+ size = min_t(size_t, ALIGN(num_ref, 4), sizeof(sram_array));
+ cedrus_h264_write_sram(dev, sram, &sram_array, size);
+}
+
+static void cedrus_write_ref_list0(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+
+ _cedrus_write_ref_list(ctx, run,
+ slice->ref_pic_list0,
+ slice->num_ref_idx_l0_active_minus1 + 1,
+ CEDRUS_SRAM_H264_REF_LIST_0);
+}
+
+static void cedrus_write_ref_list1(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+
+ _cedrus_write_ref_list(ctx, run,
+ slice->ref_pic_list1,
+ slice->num_ref_idx_l1_active_minus1 + 1,
+ CEDRUS_SRAM_H264_REF_LIST_1);
+}
+
+static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling =
+ run->h264.scaling_matrix;
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_0,
+ scaling->scaling_list_8x8[0],
+ sizeof(scaling->scaling_list_8x8[0]));
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_1,
+ scaling->scaling_list_8x8[3],
+ sizeof(scaling->scaling_list_8x8[3]));
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_4x4,
+ scaling->scaling_list_4x4,
+ sizeof(scaling->scaling_list_4x4));
+}
+
+static void cedrus_write_pred_weight_table(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_slice_params *slice =
+ run->h264.slice_params;
+ const struct v4l2_h264_pred_weight_table *pred_weight =
+ &slice->pred_weight_table;
+ struct cedrus_dev *dev = ctx->dev;
+ int i, j, k;
+
+ cedrus_write(dev, VE_H264_SHS_WP,
+ ((pred_weight->chroma_log2_weight_denom & 0x7) << 4) |
+ ((pred_weight->luma_log2_weight_denom & 0x7) << 0));
+
+ cedrus_write(dev, VE_AVC_SRAM_PORT_OFFSET,
+ CEDRUS_SRAM_H264_PRED_WEIGHT_TABLE << 2);
+
+ for (i = 0; i < ARRAY_SIZE(pred_weight->weight_factors); i++) {
+ const struct v4l2_h264_weight_factors *factors =
+ &pred_weight->weight_factors[i];
+
+ for (j = 0; j < ARRAY_SIZE(factors->luma_weight); j++) {
+ u32 val;
+
+ val = (((u32)factors->luma_offset[j] & 0x1ff) << 16) |
+ (factors->luma_weight[j] & 0x1ff);
+ cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, val);
+ }
+
+ for (j = 0; j < ARRAY_SIZE(factors->chroma_weight); j++) {
+ for (k = 0; k < ARRAY_SIZE(factors->chroma_weight[0]); k++) {
+ u32 val;
+
+ val = (((u32)factors->chroma_offset[j][k] & 0x1ff) << 16) |
+ (factors->chroma_weight[j][k] & 0x1ff);
+ cedrus_write(dev, VE_AVC_SRAM_PORT_DATA, val);
+ }
+ }
+ }
+}
+
+static void cedrus_set_params(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+ const struct v4l2_ctrl_h264_pps *pps = run->h264.pps;
+ const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
+ struct vb2_buffer *src_buf = &run->src->vb2_buf;
+ struct cedrus_dev *dev = ctx->dev;
+ dma_addr_t src_buf_addr;
+ u32 offset = slice->header_bit_size;
+ u32 len = (slice->size * 8) - offset;
+ u32 reg;
+
+ cedrus_write(dev, VE_H264_VLD_LEN, len);
+ cedrus_write(dev, VE_H264_VLD_OFFSET, offset);
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ cedrus_write(dev, VE_H264_VLD_END,
+ src_buf_addr + vb2_get_plane_payload(src_buf, 0));
+ cedrus_write(dev, VE_H264_VLD_ADDR,
+ VE_H264_VLD_ADDR_VAL(src_buf_addr) |
+ VE_H264_VLD_ADDR_FIRST | VE_H264_VLD_ADDR_VALID |
+ VE_H264_VLD_ADDR_LAST);
+
+ /*
+ * FIXME: Since the bitstream parsing is done in software, and
+ * in userspace, this shouldn't be needed anymore. But it
+ * turns out that removing it breaks the decoding process,
+ * without any clear indication why.
+ */
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_INIT_SWDEC);
+
+ if (((pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) &&
+ (slice->slice_type == V4L2_H264_SLICE_TYPE_P ||
+ slice->slice_type == V4L2_H264_SLICE_TYPE_SP)) ||
+ (pps->weighted_bipred_idc == 1 &&
+ slice->slice_type == V4L2_H264_SLICE_TYPE_B))
+ cedrus_write_pred_weight_table(ctx, run);
+
+ if ((slice->slice_type == V4L2_H264_SLICE_TYPE_P) ||
+ (slice->slice_type == V4L2_H264_SLICE_TYPE_SP) ||
+ (slice->slice_type == V4L2_H264_SLICE_TYPE_B))
+ cedrus_write_ref_list0(ctx, run);
+
+ if (slice->slice_type == V4L2_H264_SLICE_TYPE_B)
+ cedrus_write_ref_list1(ctx, run);
+
+ // picture parameters
+ reg = 0;
+ /*
+ * FIXME: the kernel headers are allowing the default value to
+ * be passed, but the libva doesn't give us that.
+ */
+ reg |= (slice->num_ref_idx_l0_active_minus1 & 0x1f) << 10;
+ reg |= (slice->num_ref_idx_l1_active_minus1 & 0x1f) << 5;
+ reg |= (pps->weighted_bipred_idc & 0x3) << 2;
+ if (pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE)
+ reg |= VE_H264_PPS_ENTROPY_CODING_MODE;
+ if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
+ reg |= VE_H264_PPS_WEIGHTED_PRED;
+ if (pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED)
+ reg |= VE_H264_PPS_CONSTRAINED_INTRA_PRED;
+ if (pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE)
+ reg |= VE_H264_PPS_TRANSFORM_8X8_MODE;
+ cedrus_write(dev, VE_H264_PPS, reg);
+
+ // sequence parameters
+ reg = 0;
+ reg |= (sps->chroma_format_idc & 0x7) << 19;
+ reg |= (sps->pic_width_in_mbs_minus1 & 0xff) << 8;
+ reg |= sps->pic_height_in_map_units_minus1 & 0xff;
+ if (sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
+ reg |= VE_H264_SPS_MBS_ONLY;
+ if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
+ reg |= VE_H264_SPS_MB_ADAPTIVE_FRAME_FIELD;
+ if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
+ reg |= VE_H264_SPS_DIRECT_8X8_INFERENCE;
+ cedrus_write(dev, VE_H264_SPS, reg);
+
+ // slice parameters
+ reg = 0;
+ reg |= decode->nal_ref_idc ? BIT(12) : 0;
+ reg |= (slice->slice_type & 0xf) << 8;
+ reg |= slice->cabac_init_idc & 0x3;
+ reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
+ if (slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ reg |= VE_H264_SHS_FIELD_PIC;
+ if (slice->flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ reg |= VE_H264_SHS_BOTTOM_FIELD;
+ if (slice->flags & V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED)
+ reg |= VE_H264_SHS_DIRECT_SPATIAL_MV_PRED;
+ cedrus_write(dev, VE_H264_SHS, reg);
+
+ reg = 0;
+ reg |= VE_H264_SHS2_NUM_REF_IDX_ACTIVE_OVRD;
+ reg |= (slice->num_ref_idx_l0_active_minus1 & 0x1f) << 24;
+ reg |= (slice->num_ref_idx_l1_active_minus1 & 0x1f) << 16;
+ reg |= (slice->disable_deblocking_filter_idc & 0x3) << 8;
+ reg |= (slice->slice_alpha_c0_offset_div2 & 0xf) << 4;
+ reg |= slice->slice_beta_offset_div2 & 0xf;
+ cedrus_write(dev, VE_H264_SHS2, reg);
+
+ reg = 0;
+ reg |= (pps->second_chroma_qp_index_offset & 0x3f) << 16;
+ reg |= (pps->chroma_qp_index_offset & 0x3f) << 8;
+ reg |= (pps->pic_init_qp_minus26 + 26 + slice->slice_qp_delta) & 0x3f;
+ cedrus_write(dev, VE_H264_SHS_QP, reg);
+
+ // clear status flags
+ cedrus_write(dev, VE_H264_STATUS, cedrus_read(dev, VE_H264_STATUS));
+
+ // enable int
+ cedrus_write(dev, VE_H264_CTRL,
+ VE_H264_CTRL_SLICE_DECODE_INT |
+ VE_H264_CTRL_DECODE_ERR_INT |
+ VE_H264_CTRL_VLD_DATA_REQ_INT);
+}
+
+static enum cedrus_irq_status
+cedrus_h264_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_H264_STATUS);
+
+ if (reg & (VE_H264_STATUS_DECODE_ERR_INT |
+ VE_H264_STATUS_VLD_DATA_REQ_INT))
+ return CEDRUS_IRQ_ERROR;
+
+ if (reg & VE_H264_CTRL_SLICE_DECODE_INT)
+ return CEDRUS_IRQ_OK;
+
+ return CEDRUS_IRQ_NONE;
+}
+
+static void cedrus_h264_irq_clear(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_H264_STATUS,
+ VE_H264_STATUS_INT_MASK);
+}
+
+static void cedrus_h264_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_H264_CTRL);
+
+ cedrus_write(dev, VE_H264_CTRL,
+ reg & ~VE_H264_CTRL_INT_MASK);
+}
+
+static void cedrus_h264_setup(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_engine_enable(dev, CEDRUS_CODEC_H264);
+
+ cedrus_write(dev, VE_H264_SDROT_CTRL, 0);
+ cedrus_write(dev, VE_H264_EXTRA_BUFFER1,
+ ctx->codec.h264.pic_info_buf_dma);
+ cedrus_write(dev, VE_H264_EXTRA_BUFFER2,
+ ctx->codec.h264.neighbor_info_buf_dma);
+
+ cedrus_write_scaling_lists(ctx, run);
+ cedrus_write_frame_list(ctx, run);
+
+ cedrus_set_params(ctx, run);
+}
+
+static int cedrus_h264_start(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ unsigned int field_size;
+ unsigned int mv_col_size;
+ int ret;
+
+ /*
+ * FIXME: It seems that the H6 cedarX code is using a formula
+ * here based on the size of the frame, while all the older
+ * code is using a fixed size, so that might need to be
+ * changed at some point.
+ */
+ ctx->codec.h264.pic_info_buf =
+ dma_alloc_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ &ctx->codec.h264.pic_info_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.pic_info_buf)
+ return -ENOMEM;
+
+ /*
+ * That buffer is supposed to be 16kiB in size, and be aligned
+ * on 16kiB as well. However, dma_alloc_coherent provides the
+ * guarantee that we'll have a CPU and DMA address aligned on
+ * the smallest page order that is greater to the requested
+ * size, so we don't have to overallocate.
+ */
+ ctx->codec.h264.neighbor_info_buf =
+ dma_alloc_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h264.neighbor_info_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.neighbor_info_buf) {
+ ret = -ENOMEM;
+ goto err_pic_buf;
+ }
+
+ field_size = DIV_ROUND_UP(ctx->src_fmt.width, 16) *
+ DIV_ROUND_UP(ctx->src_fmt.height, 16) * 16;
+
+ /*
+ * FIXME: This is actually conditional to
+ * V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE not being set, we
+ * might have to rework this if memory efficiency ever is
+ * something we need to work on.
+ */
+ field_size = field_size * 2;
+
+ /*
+ * FIXME: This is actually conditional to
+ * V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY not being set, we might
+ * have to rework this if memory efficiency ever is something
+ * we need to work on.
+ */
+ field_size = field_size * 2;
+ ctx->codec.h264.mv_col_buf_field_size = field_size;
+
+ mv_col_size = field_size * 2 * CEDRUS_H264_FRAME_NUM;
+ ctx->codec.h264.mv_col_buf_size = mv_col_size;
+ ctx->codec.h264.mv_col_buf = dma_alloc_coherent(dev->dev,
+ ctx->codec.h264.mv_col_buf_size,
+ &ctx->codec.h264.mv_col_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.mv_col_buf) {
+ ret = -ENOMEM;
+ goto err_neighbor_buf;
+ }
+
+ return 0;
+
+err_neighbor_buf:
+ dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h264.neighbor_info_buf,
+ ctx->codec.h264.neighbor_info_buf_dma);
+
+err_pic_buf:
+ dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ ctx->codec.h264.pic_info_buf,
+ ctx->codec.h264.pic_info_buf_dma);
+ return ret;
+}
+
+static void cedrus_h264_stop(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ dma_free_coherent(dev->dev, ctx->codec.h264.mv_col_buf_size,
+ ctx->codec.h264.mv_col_buf,
+ ctx->codec.h264.mv_col_buf_dma);
+ dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h264.neighbor_info_buf,
+ ctx->codec.h264.neighbor_info_buf_dma);
+ dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ ctx->codec.h264.pic_info_buf,
+ ctx->codec.h264.pic_info_buf_dma);
+}
+
+static void cedrus_h264_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_AVC_SLICE_DECODE);
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_h264 = {
+ .irq_clear = cedrus_h264_irq_clear,
+ .irq_disable = cedrus_h264_irq_disable,
+ .irq_status = cedrus_h264_irq_status,
+ .setup = cedrus_h264_setup,
+ .start = cedrus_h264_start,
+ .stop = cedrus_h264_stop,
+ .trigger = cedrus_h264_trigger,
+};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index fbfff7c1c771..c34aec7c6e40 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -46,6 +46,10 @@ int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
reg |= VE_MODE_DEC_MPEG;
break;
+ case CEDRUS_CODEC_H264:
+ reg |= VE_MODE_DEC_H264;
+ break;
+
default:
return -EINVAL;
}
@@ -236,7 +240,7 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
goto err_sram;
}
- ret = clk_set_rate(dev->mod_clk, CEDRUS_CLOCK_RATE_DEFAULT);
+ ret = clk_set_rate(dev->mod_clk, variant->mod_rate);
if (ret) {
dev_err(dev->dev, "Failed to set clock rate\n");
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
index b43c77d54b95..27d0882397aa 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
@@ -16,8 +16,6 @@
#ifndef _CEDRUS_HW_H_
#define _CEDRUS_HW_H_
-#define CEDRUS_CLOCK_RATE_DEFAULT 320000000
-
int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec);
void cedrus_engine_disable(struct cedrus_dev *dev);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index de2d6b6f64bf..3e9931416e45 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -232,4 +232,95 @@
#define VE_DEC_MPEG_ROT_LUMA (VE_ENGINE_DEC_MPEG + 0xcc)
#define VE_DEC_MPEG_ROT_CHROMA (VE_ENGINE_DEC_MPEG + 0xd0)
+#define VE_H264_SPS 0x200
+#define VE_H264_SPS_MBS_ONLY BIT(18)
+#define VE_H264_SPS_MB_ADAPTIVE_FRAME_FIELD BIT(17)
+#define VE_H264_SPS_DIRECT_8X8_INFERENCE BIT(16)
+
+#define VE_H264_PPS 0x204
+#define VE_H264_PPS_ENTROPY_CODING_MODE BIT(15)
+#define VE_H264_PPS_WEIGHTED_PRED BIT(4)
+#define VE_H264_PPS_CONSTRAINED_INTRA_PRED BIT(1)
+#define VE_H264_PPS_TRANSFORM_8X8_MODE BIT(0)
+
+#define VE_H264_SHS 0x208
+#define VE_H264_SHS_FIRST_SLICE_IN_PIC BIT(5)
+#define VE_H264_SHS_FIELD_PIC BIT(4)
+#define VE_H264_SHS_BOTTOM_FIELD BIT(3)
+#define VE_H264_SHS_DIRECT_SPATIAL_MV_PRED BIT(2)
+
+#define VE_H264_SHS2 0x20c
+#define VE_H264_SHS2_NUM_REF_IDX_ACTIVE_OVRD BIT(12)
+
+#define VE_H264_SHS_WP 0x210
+
+#define VE_H264_SHS_QP 0x21c
+#define VE_H264_SHS_QP_SCALING_MATRIX_DEFAULT BIT(24)
+
+#define VE_H264_CTRL 0x220
+#define VE_H264_CTRL_VLD_DATA_REQ_INT BIT(2)
+#define VE_H264_CTRL_DECODE_ERR_INT BIT(1)
+#define VE_H264_CTRL_SLICE_DECODE_INT BIT(0)
+
+#define VE_H264_CTRL_INT_MASK (VE_H264_CTRL_VLD_DATA_REQ_INT | \
+ VE_H264_CTRL_DECODE_ERR_INT | \
+ VE_H264_CTRL_SLICE_DECODE_INT)
+
+#define VE_H264_TRIGGER_TYPE 0x224
+#define VE_H264_TRIGGER_TYPE_AVC_SLICE_DECODE (8 << 0)
+#define VE_H264_TRIGGER_TYPE_INIT_SWDEC (7 << 0)
+
+#define VE_H264_STATUS 0x228
+#define VE_H264_STATUS_VLD_DATA_REQ_INT VE_H264_CTRL_VLD_DATA_REQ_INT
+#define VE_H264_STATUS_DECODE_ERR_INT VE_H264_CTRL_DECODE_ERR_INT
+#define VE_H264_STATUS_SLICE_DECODE_INT VE_H264_CTRL_SLICE_DECODE_INT
+
+#define VE_H264_STATUS_INT_MASK VE_H264_CTRL_INT_MASK
+
+#define VE_H264_CUR_MB_NUM 0x22c
+
+#define VE_H264_VLD_ADDR 0x230
+#define VE_H264_VLD_ADDR_FIRST BIT(30)
+#define VE_H264_VLD_ADDR_LAST BIT(29)
+#define VE_H264_VLD_ADDR_VALID BIT(28)
+#define VE_H264_VLD_ADDR_VAL(x) (((x) & 0x0ffffff0) | ((x) >> 28))
+
+#define VE_H264_VLD_OFFSET 0x234
+#define VE_H264_VLD_LEN 0x238
+#define VE_H264_VLD_END 0x23c
+#define VE_H264_SDROT_CTRL 0x240
+#define VE_H264_OUTPUT_FRAME_IDX 0x24c
+#define VE_H264_EXTRA_BUFFER1 0x250
+#define VE_H264_EXTRA_BUFFER2 0x254
+#define VE_H264_BASIC_BITS 0x2dc
+#define VE_AVC_SRAM_PORT_OFFSET 0x2e0
+#define VE_AVC_SRAM_PORT_DATA 0x2e4
+
+#define VE_ISP_INPUT_SIZE 0xa00
+#define VE_ISP_INPUT_STRIDE 0xa04
+#define VE_ISP_CTRL 0xa08
+#define VE_ISP_INPUT_LUMA 0xa78
+#define VE_ISP_INPUT_CHROMA 0xa7c
+
+#define VE_AVC_PARAM 0xb04
+#define VE_AVC_QP 0xb08
+#define VE_AVC_MOTION_EST 0xb10
+#define VE_AVC_CTRL 0xb14
+#define VE_AVC_TRIGGER 0xb18
+#define VE_AVC_STATUS 0xb1c
+#define VE_AVC_BASIC_BITS 0xb20
+#define VE_AVC_UNK_BUF 0xb60
+#define VE_AVC_VLE_ADDR 0xb80
+#define VE_AVC_VLE_END 0xb84
+#define VE_AVC_VLE_OFFSET 0xb88
+#define VE_AVC_VLE_MAX 0xb8c
+#define VE_AVC_VLE_LENGTH 0xb90
+#define VE_AVC_REF_LUMA 0xba0
+#define VE_AVC_REF_CHROMA 0xba4
+#define VE_AVC_REC_LUMA 0xbb0
+#define VE_AVC_REC_CHROMA 0xbb4
+#define VE_AVC_REF_SLUMA 0xbb8
+#define VE_AVC_REC_SLUMA 0xbbc
+#define VE_AVC_MB_INFO 0xbc0
+
#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index 9673874ece10..e2b530b1a956 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -38,6 +38,10 @@ static struct cedrus_format cedrus_formats[] = {
.directions = CEDRUS_DECODE_SRC,
},
{
+ .pixelformat = V4L2_PIX_FMT_H264_SLICE_RAW,
+ .directions = CEDRUS_DECODE_SRC,
+ },
+ {
.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12,
.directions = CEDRUS_DECODE_DST,
},
@@ -100,6 +104,7 @@ static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
switch (pix_fmt->pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
+ case V4L2_PIX_FMT_H264_SLICE_RAW:
/* Zero bytes per line for encoded source. */
bytesperline = 0;
@@ -464,6 +469,10 @@ static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
ctx->current_codec = CEDRUS_CODEC_MPEG2;
break;
+ case V4L2_PIX_FMT_H264_SLICE_RAW:
+ ctx->current_codec = CEDRUS_CODEC_H264;
+ break;
+
default:
return -EINVAL;
}
diff --git a/drivers/staging/media/tegra-vde/Kconfig b/drivers/staging/media/tegra-vde/Kconfig
index ff8e846cd15d..2e7f644ae591 100644
--- a/drivers/staging/media/tegra-vde/Kconfig
+++ b/drivers/staging/media/tegra-vde/Kconfig
@@ -3,6 +3,7 @@ config TEGRA_VDE
tristate "NVIDIA Tegra Video Decoder Engine driver"
depends on ARCH_TEGRA || COMPILE_TEST
select DMA_SHARED_BUFFER
+ select IOMMU_IOVA if IOMMU_SUPPORT
select SRAM
help
Say Y here to enable support for the NVIDIA Tegra video decoder
diff --git a/drivers/staging/media/tegra-vde/Makefile b/drivers/staging/media/tegra-vde/Makefile
index 7f9020e634f3..2827f7601de8 100644
--- a/drivers/staging/media/tegra-vde/Makefile
+++ b/drivers/staging/media/tegra-vde/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+tegra-vde-y := vde.o iommu.o dmabuf-cache.o
obj-$(CONFIG_TEGRA_VDE) += tegra-vde.o
diff --git a/drivers/staging/media/tegra-vde/dmabuf-cache.c b/drivers/staging/media/tegra-vde/dmabuf-cache.c
new file mode 100644
index 000000000000..a93b317885bf
--- /dev/null
+++ b/drivers/staging/media/tegra-vde/dmabuf-cache.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra Video decoder driver
+ *
+ * Copyright (C) 2016-2019 GRATE-DRIVER project
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/iova.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "vde.h"
+
+struct tegra_vde_cache_entry {
+ enum dma_data_direction dma_dir;
+ struct dma_buf_attachment *a;
+ struct delayed_work dwork;
+ struct tegra_vde *vde;
+ struct list_head list;
+ struct sg_table *sgt;
+ struct iova *iova;
+ unsigned int refcnt;
+};
+
+static void tegra_vde_release_entry(struct tegra_vde_cache_entry *entry)
+{
+ struct dma_buf *dmabuf = entry->a->dmabuf;
+
+ WARN_ON_ONCE(entry->refcnt);
+
+ if (entry->vde->domain)
+ tegra_vde_iommu_unmap(entry->vde, entry->iova);
+
+ dma_buf_unmap_attachment(entry->a, entry->sgt, entry->dma_dir);
+ dma_buf_detach(dmabuf, entry->a);
+ dma_buf_put(dmabuf);
+
+ list_del(&entry->list);
+ kfree(entry);
+}
+
+static void tegra_vde_delayed_unmap(struct work_struct *work)
+{
+ struct tegra_vde_cache_entry *entry;
+ struct tegra_vde *vde;
+
+ entry = container_of(work, struct tegra_vde_cache_entry,
+ dwork.work);
+ vde = entry->vde;
+
+ mutex_lock(&vde->map_lock);
+ tegra_vde_release_entry(entry);
+ mutex_unlock(&vde->map_lock);
+}
+
+int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
+ struct dma_buf *dmabuf,
+ enum dma_data_direction dma_dir,
+ struct dma_buf_attachment **ap,
+ dma_addr_t *addrp)
+{
+ struct device *dev = vde->miscdev.parent;
+ struct dma_buf_attachment *attachment;
+ struct tegra_vde_cache_entry *entry;
+ struct sg_table *sgt;
+ struct iova *iova;
+ int err;
+
+ mutex_lock(&vde->map_lock);
+
+ list_for_each_entry(entry, &vde->map_list, list) {
+ if (entry->a->dmabuf != dmabuf)
+ continue;
+
+ if (!cancel_delayed_work(&entry->dwork))
+ continue;
+
+ if (entry->dma_dir != dma_dir)
+ entry->dma_dir = DMA_BIDIRECTIONAL;
+
+ dma_buf_put(dmabuf);
+
+ if (vde->domain)
+ *addrp = iova_dma_addr(&vde->iova, entry->iova);
+ else
+ *addrp = sg_dma_address(entry->sgt->sgl);
+
+ goto ref;
+ }
+
+ attachment = dma_buf_attach(dmabuf, dev);
+ if (IS_ERR(attachment)) {
+ dev_err(dev, "Failed to attach dmabuf\n");
+ err = PTR_ERR(attachment);
+ goto err_unlock;
+ }
+
+ sgt = dma_buf_map_attachment(attachment, dma_dir);
+ if (IS_ERR(sgt)) {
+ dev_err(dev, "Failed to get dmabufs sg_table\n");
+ err = PTR_ERR(sgt);
+ goto err_detach;
+ }
+
+ if (!vde->domain && sgt->nents > 1) {
+ dev_err(dev, "Sparse DMA region is unsupported, please enable IOMMU\n");
+ err = -EINVAL;
+ goto err_unmap;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
+ if (vde->domain) {
+ err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size);
+ if (err)
+ goto err_free;
+
+ *addrp = iova_dma_addr(&vde->iova, iova);
+ } else {
+ *addrp = sg_dma_address(sgt->sgl);
+ iova = NULL;
+ }
+
+ INIT_DELAYED_WORK(&entry->dwork, tegra_vde_delayed_unmap);
+ list_add(&entry->list, &vde->map_list);
+
+ entry->dma_dir = dma_dir;
+ entry->iova = iova;
+ entry->vde = vde;
+ entry->sgt = sgt;
+ entry->a = attachment;
+ref:
+ entry->refcnt++;
+
+ *ap = entry->a;
+
+ mutex_unlock(&vde->map_lock);
+
+ return 0;
+
+err_free:
+ kfree(entry);
+err_unmap:
+ dma_buf_unmap_attachment(attachment, sgt, dma_dir);
+err_detach:
+ dma_buf_detach(dmabuf, attachment);
+err_unlock:
+ mutex_unlock(&vde->map_lock);
+
+ return err;
+}
+
+void tegra_vde_dmabuf_cache_unmap(struct tegra_vde *vde,
+ struct dma_buf_attachment *a,
+ bool release)
+{
+ struct tegra_vde_cache_entry *entry;
+
+ mutex_lock(&vde->map_lock);
+
+ list_for_each_entry(entry, &vde->map_list, list) {
+ if (entry->a != a)
+ continue;
+
+ WARN_ON_ONCE(!entry->refcnt);
+
+ if (--entry->refcnt == 0) {
+ if (release)
+ tegra_vde_release_entry(entry);
+ else
+ schedule_delayed_work(&entry->dwork, 5 * HZ);
+ }
+ break;
+ }
+
+ mutex_unlock(&vde->map_lock);
+}
+
+void tegra_vde_dmabuf_cache_unmap_sync(struct tegra_vde *vde)
+{
+ struct tegra_vde_cache_entry *entry, *tmp;
+
+ mutex_lock(&vde->map_lock);
+
+ list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
+ if (entry->refcnt)
+ continue;
+
+ if (!cancel_delayed_work(&entry->dwork))
+ continue;
+
+ tegra_vde_release_entry(entry);
+ }
+
+ mutex_unlock(&vde->map_lock);
+}
+
+void tegra_vde_dmabuf_cache_unmap_all(struct tegra_vde *vde)
+{
+ struct tegra_vde_cache_entry *entry, *tmp;
+
+ mutex_lock(&vde->map_lock);
+
+ while (!list_empty(&vde->map_list)) {
+ list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
+ if (!cancel_delayed_work(&entry->dwork))
+ continue;
+
+ tegra_vde_release_entry(entry);
+ }
+
+ mutex_unlock(&vde->map_lock);
+ schedule();
+ mutex_lock(&vde->map_lock);
+ }
+
+ mutex_unlock(&vde->map_lock);
+}
diff --git a/drivers/staging/media/tegra-vde/iommu.c b/drivers/staging/media/tegra-vde/iommu.c
new file mode 100644
index 000000000000..6af863d92123
--- /dev/null
+++ b/drivers/staging/media/tegra-vde/iommu.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra Video decoder driver
+ *
+ * Copyright (C) 2016-2019 GRATE-DRIVER project
+ */
+
+#include <linux/iommu.h>
+#include <linux/iova.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
+#include "vde.h"
+
+int tegra_vde_iommu_map(struct tegra_vde *vde,
+ struct sg_table *sgt,
+ struct iova **iovap,
+ size_t size)
+{
+ struct iova *iova;
+ unsigned long shift;
+ unsigned long end;
+ dma_addr_t addr;
+
+ end = vde->domain->geometry.aperture_end;
+ size = iova_align(&vde->iova, size);
+ shift = iova_shift(&vde->iova);
+
+ iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true);
+ if (!iova)
+ return -ENOMEM;
+
+ addr = iova_dma_addr(&vde->iova, iova);
+
+ size = iommu_map_sg(vde->domain, addr, sgt->sgl, sgt->nents,
+ IOMMU_READ | IOMMU_WRITE);
+ if (!size) {
+ __free_iova(&vde->iova, iova);
+ return -ENXIO;
+ }
+
+ *iovap = iova;
+
+ return 0;
+}
+
+void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova)
+{
+ unsigned long shift = iova_shift(&vde->iova);
+ unsigned long size = iova_size(iova) << shift;
+ dma_addr_t addr = iova_dma_addr(&vde->iova, iova);
+
+ iommu_unmap(vde->domain, addr, size);
+ __free_iova(&vde->iova, iova);
+}
+
+int tegra_vde_iommu_init(struct tegra_vde *vde)
+{
+ struct device *dev = vde->miscdev.parent;
+ struct iova *iova;
+ unsigned long order;
+ unsigned long shift;
+ int err;
+
+ vde->group = iommu_group_get(dev);
+ if (!vde->group)
+ return 0;
+
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+ }
+#endif
+ vde->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!vde->domain) {
+ err = -ENOMEM;
+ goto put_group;
+ }
+
+ err = iova_cache_get();
+ if (err)
+ goto free_domain;
+
+ order = __ffs(vde->domain->pgsize_bitmap);
+ init_iova_domain(&vde->iova, 1UL << order, 0);
+
+ err = iommu_attach_group(vde->domain, vde->group);
+ if (err)
+ goto put_iova;
+
+ /*
+ * We're using some static addresses that are not accessible by VDE
+ * to trap invalid memory accesses.
+ */
+ shift = iova_shift(&vde->iova);
+ iova = reserve_iova(&vde->iova, 0x60000000 >> shift,
+ 0x70000000 >> shift);
+ if (!iova) {
+ err = -ENOMEM;
+ goto detach_group;
+ }
+
+ vde->iova_resv_static_addresses = iova;
+
+ /*
+ * BSEV's end-address wraps around due to integer overflow during
+ * of hardware context preparation if IOVA is allocated at the end
+ * of address space and VDE can't handle that. Hence simply reserve
+ * the last page to avoid the problem.
+ */
+ iova = reserve_iova(&vde->iova, 0xffffffff >> shift,
+ (0xffffffff >> shift) + 1);
+ if (!iova) {
+ err = -ENOMEM;
+ goto unreserve_iova;
+ }
+
+ vde->iova_resv_last_page = iova;
+
+ return 0;
+
+unreserve_iova:
+ __free_iova(&vde->iova, vde->iova_resv_static_addresses);
+detach_group:
+ iommu_detach_group(vde->domain, vde->group);
+put_iova:
+ put_iova_domain(&vde->iova);
+ iova_cache_put();
+free_domain:
+ iommu_domain_free(vde->domain);
+put_group:
+ iommu_group_put(vde->group);
+
+ return err;
+}
+
+void tegra_vde_iommu_deinit(struct tegra_vde *vde)
+{
+ if (vde->domain) {
+ __free_iova(&vde->iova, vde->iova_resv_last_page);
+ __free_iova(&vde->iova, vde->iova_resv_static_addresses);
+ iommu_detach_group(vde->domain, vde->group);
+ put_iova_domain(&vde->iova);
+ iova_cache_put();
+ iommu_domain_free(vde->domain);
+ iommu_group_put(vde->group);
+
+ vde->domain = NULL;
+ }
+}
diff --git a/drivers/staging/media/tegra-vde/trace.h b/drivers/staging/media/tegra-vde/trace.h
index 85e2f7e2d4d0..e5714107db58 100644
--- a/drivers/staging/media/tegra-vde/trace.h
+++ b/drivers/staging/media/tegra-vde/trace.h
@@ -8,6 +8,8 @@
#include <linux/tracepoint.h>
+#include "vde.h"
+
DECLARE_EVENT_CLASS(register_access,
TP_PROTO(struct tegra_vde *vde, void __iomem *base,
u32 offset, u32 value),
diff --git a/drivers/staging/media/tegra-vde/uapi.h b/drivers/staging/media/tegra-vde/uapi.h
index a0dad1ed94ef..ffb4983e5bb6 100644
--- a/drivers/staging/media/tegra-vde/uapi.h
+++ b/drivers/staging/media/tegra-vde/uapi.h
@@ -6,8 +6,8 @@
#include <linux/types.h>
#include <asm/ioctl.h>
-#define FLAG_B_FRAME BIT(0)
-#define FLAG_REFERENCE BIT(1)
+#define FLAG_B_FRAME 0x1
+#define FLAG_REFERENCE 0x2
struct tegra_vde_h264_frame {
__s32 y_fd;
@@ -21,40 +21,42 @@ struct tegra_vde_h264_frame {
__u32 frame_num;
__u32 flags;
- __u32 reserved;
-} __attribute__((packed));
+ // Must be zero'ed
+ __u32 reserved[6];
+};
struct tegra_vde_h264_decoder_ctx {
__s32 bitstream_data_fd;
__u32 bitstream_data_offset;
__u64 dpb_frames_ptr;
- __u8 dpb_frames_nb;
- __u8 dpb_ref_frames_with_earlier_poc_nb;
+ __u32 dpb_frames_nb;
+ __u32 dpb_ref_frames_with_earlier_poc_nb;
// SPS
- __u8 baseline_profile;
- __u8 level_idc;
- __u8 log2_max_pic_order_cnt_lsb;
- __u8 log2_max_frame_num;
- __u8 pic_order_cnt_type;
- __u8 direct_8x8_inference_flag;
- __u8 pic_width_in_mbs;
- __u8 pic_height_in_mbs;
+ __u32 baseline_profile;
+ __u32 level_idc;
+ __u32 log2_max_pic_order_cnt_lsb;
+ __u32 log2_max_frame_num;
+ __u32 pic_order_cnt_type;
+ __u32 direct_8x8_inference_flag;
+ __u32 pic_width_in_mbs;
+ __u32 pic_height_in_mbs;
// PPS
- __u8 pic_init_qp;
- __u8 deblocking_filter_control_present_flag;
- __u8 constrained_intra_pred_flag;
- __u8 chroma_qp_index_offset;
- __u8 pic_order_present_flag;
+ __u32 pic_init_qp;
+ __u32 deblocking_filter_control_present_flag;
+ __u32 constrained_intra_pred_flag;
+ __u32 chroma_qp_index_offset;
+ __u32 pic_order_present_flag;
// Slice header
- __u8 num_ref_idx_l0_active_minus1;
- __u8 num_ref_idx_l1_active_minus1;
+ __u32 num_ref_idx_l0_active_minus1;
+ __u32 num_ref_idx_l1_active_minus1;
- __u32 reserved;
-} __attribute__((packed));
+ // Must be zero'ed
+ __u32 reserved[11];
+};
#define VDE_IOCTL_BASE ('v' + 0x20)
diff --git a/drivers/staging/media/tegra-vde/tegra-vde.c b/drivers/staging/media/tegra-vde/vde.c
index a5020dbf6eef..3466daddf663 100644
--- a/drivers/staging/media/tegra-vde/tegra-vde.c
+++ b/drivers/staging/media/tegra-vde/vde.c
@@ -11,6 +11,7 @@
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -22,6 +23,10 @@
#include <soc/tegra/pmc.h>
#include "uapi.h"
+#include "vde.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#define ICMDQUE_WR 0x00
#define CMDQUE_CONTROL 0x08
@@ -37,10 +42,6 @@ struct video_frame {
struct dma_buf_attachment *cb_dmabuf_attachment;
struct dma_buf_attachment *cr_dmabuf_attachment;
struct dma_buf_attachment *aux_dmabuf_attachment;
- struct sg_table *y_sgt;
- struct sg_table *cb_sgt;
- struct sg_table *cr_sgt;
- struct sg_table *aux_sgt;
dma_addr_t y_addr;
dma_addr_t cb_addr;
dma_addr_t cr_addr;
@@ -49,63 +50,6 @@ struct video_frame {
u32 flags;
};
-struct tegra_vde {
- void __iomem *sxe;
- void __iomem *bsev;
- void __iomem *mbe;
- void __iomem *ppe;
- void __iomem *mce;
- void __iomem *tfe;
- void __iomem *ppb;
- void __iomem *vdma;
- void __iomem *frameid;
- struct mutex lock;
- struct miscdevice miscdev;
- struct reset_control *rst;
- struct reset_control *rst_mc;
- struct gen_pool *iram_pool;
- struct completion decode_completion;
- struct clk *clk;
- dma_addr_t iram_lists_addr;
- u32 *iram;
-};
-
-static __maybe_unused char const *
-tegra_vde_reg_base_name(struct tegra_vde *vde, void __iomem *base)
-{
- if (vde->sxe == base)
- return "SXE";
-
- if (vde->bsev == base)
- return "BSEV";
-
- if (vde->mbe == base)
- return "MBE";
-
- if (vde->ppe == base)
- return "PPE";
-
- if (vde->mce == base)
- return "MCE";
-
- if (vde->tfe == base)
- return "TFE";
-
- if (vde->ppb == base)
- return "PPB";
-
- if (vde->vdma == base)
- return "VDMA";
-
- if (vde->frameid == base)
- return "FRAMEID";
-
- return "???";
-}
-
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
static void tegra_vde_writel(struct tegra_vde *vde,
u32 value, void __iomem *base, u32 offset)
{
@@ -543,31 +487,18 @@ static void tegra_vde_decode_frame(struct tegra_vde *vde,
vde->sxe, 0x00);
}
-static void tegra_vde_detach_and_put_dmabuf(struct dma_buf_attachment *a,
- struct sg_table *sgt,
- enum dma_data_direction dma_dir)
-{
- struct dma_buf *dmabuf = a->dmabuf;
-
- dma_buf_unmap_attachment(a, sgt, dma_dir);
- dma_buf_detach(dmabuf, a);
- dma_buf_put(dmabuf);
-}
-
-static int tegra_vde_attach_dmabuf(struct device *dev,
+static int tegra_vde_attach_dmabuf(struct tegra_vde *vde,
int fd,
unsigned long offset,
size_t min_size,
size_t align_size,
struct dma_buf_attachment **a,
- dma_addr_t *addr,
- struct sg_table **s,
+ dma_addr_t *addrp,
size_t *size,
enum dma_data_direction dma_dir)
{
- struct dma_buf_attachment *attachment;
+ struct device *dev = vde->miscdev.parent;
struct dma_buf *dmabuf;
- struct sg_table *sgt;
int err;
dmabuf = dma_buf_get(fd);
@@ -588,46 +519,24 @@ static int tegra_vde_attach_dmabuf(struct device *dev,
return -EINVAL;
}
- attachment = dma_buf_attach(dmabuf, dev);
- if (IS_ERR(attachment)) {
- dev_err(dev, "Failed to attach dmabuf\n");
- err = PTR_ERR(attachment);
+ err = tegra_vde_dmabuf_cache_map(vde, dmabuf, dma_dir, a, addrp);
+ if (err)
goto err_put;
- }
- sgt = dma_buf_map_attachment(attachment, dma_dir);
- if (IS_ERR(sgt)) {
- dev_err(dev, "Failed to get dmabufs sg_table\n");
- err = PTR_ERR(sgt);
- goto err_detach;
- }
-
- if (sgt->nents != 1) {
- dev_err(dev, "Sparse DMA region is unsupported\n");
- err = -EINVAL;
- goto err_unmap;
- }
-
- *addr = sg_dma_address(sgt->sgl) + offset;
- *a = attachment;
- *s = sgt;
+ *addrp = *addrp + offset;
if (size)
*size = dmabuf->size - offset;
return 0;
-err_unmap:
- dma_buf_unmap_attachment(attachment, sgt, dma_dir);
-err_detach:
- dma_buf_detach(dmabuf, attachment);
err_put:
dma_buf_put(dmabuf);
return err;
}
-static int tegra_vde_attach_dmabufs_to_frame(struct device *dev,
+static int tegra_vde_attach_dmabufs_to_frame(struct tegra_vde *vde,
struct video_frame *frame,
struct tegra_vde_h264_frame *src,
enum dma_data_direction dma_dir,
@@ -636,29 +545,26 @@ static int tegra_vde_attach_dmabufs_to_frame(struct device *dev,
{
int err;
- err = tegra_vde_attach_dmabuf(dev, src->y_fd,
+ err = tegra_vde_attach_dmabuf(vde, src->y_fd,
src->y_offset, lsize, SZ_256,
&frame->y_dmabuf_attachment,
&frame->y_addr,
- &frame->y_sgt,
NULL, dma_dir);
if (err)
return err;
- err = tegra_vde_attach_dmabuf(dev, src->cb_fd,
+ err = tegra_vde_attach_dmabuf(vde, src->cb_fd,
src->cb_offset, csize, SZ_256,
&frame->cb_dmabuf_attachment,
&frame->cb_addr,
- &frame->cb_sgt,
NULL, dma_dir);
if (err)
goto err_release_y;
- err = tegra_vde_attach_dmabuf(dev, src->cr_fd,
+ err = tegra_vde_attach_dmabuf(vde, src->cr_fd,
src->cr_offset, csize, SZ_256,
&frame->cr_dmabuf_attachment,
&frame->cr_addr,
- &frame->cr_sgt,
NULL, dma_dir);
if (err)
goto err_release_cb;
@@ -668,11 +574,10 @@ static int tegra_vde_attach_dmabufs_to_frame(struct device *dev,
return 0;
}
- err = tegra_vde_attach_dmabuf(dev, src->aux_fd,
+ err = tegra_vde_attach_dmabuf(vde, src->aux_fd,
src->aux_offset, csize, SZ_256,
&frame->aux_dmabuf_attachment,
&frame->aux_addr,
- &frame->aux_sgt,
NULL, dma_dir);
if (err)
goto err_release_cr;
@@ -680,34 +585,28 @@ static int tegra_vde_attach_dmabufs_to_frame(struct device *dev,
return 0;
err_release_cr:
- tegra_vde_detach_and_put_dmabuf(frame->cr_dmabuf_attachment,
- frame->cr_sgt, dma_dir);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->cr_dmabuf_attachment, true);
err_release_cb:
- tegra_vde_detach_and_put_dmabuf(frame->cb_dmabuf_attachment,
- frame->cb_sgt, dma_dir);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->cb_dmabuf_attachment, true);
err_release_y:
- tegra_vde_detach_and_put_dmabuf(frame->y_dmabuf_attachment,
- frame->y_sgt, dma_dir);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->y_dmabuf_attachment, true);
return err;
}
-static void tegra_vde_release_frame_dmabufs(struct video_frame *frame,
+static void tegra_vde_release_frame_dmabufs(struct tegra_vde *vde,
+ struct video_frame *frame,
enum dma_data_direction dma_dir,
- bool baseline_profile)
+ bool baseline_profile,
+ bool release)
{
if (!baseline_profile)
- tegra_vde_detach_and_put_dmabuf(frame->aux_dmabuf_attachment,
- frame->aux_sgt, dma_dir);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->aux_dmabuf_attachment,
+ release);
- tegra_vde_detach_and_put_dmabuf(frame->cr_dmabuf_attachment,
- frame->cr_sgt, dma_dir);
-
- tegra_vde_detach_and_put_dmabuf(frame->cb_dmabuf_attachment,
- frame->cb_sgt, dma_dir);
-
- tegra_vde_detach_and_put_dmabuf(frame->y_dmabuf_attachment,
- frame->y_sgt, dma_dir);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->cr_dmabuf_attachment, release);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->cb_dmabuf_attachment, release);
+ tegra_vde_dmabuf_cache_unmap(vde, frame->y_dmabuf_attachment, release);
}
static int tegra_vde_validate_frame(struct device *dev,
@@ -795,11 +694,10 @@ static int tegra_vde_ioctl_decode_h264(struct tegra_vde *vde,
{
struct device *dev = vde->miscdev.parent;
struct tegra_vde_h264_decoder_ctx ctx;
- struct tegra_vde_h264_frame frames[17];
+ struct tegra_vde_h264_frame *frames;
struct tegra_vde_h264_frame __user *frames_user;
struct video_frame *dpb_frames;
struct dma_buf_attachment *bitstream_data_dmabuf_attachment;
- struct sg_table *bitstream_sgt;
enum dma_data_direction dma_dir;
dma_addr_t bitstream_data_addr;
dma_addr_t bsev_ptr;
@@ -819,22 +717,27 @@ static int tegra_vde_ioctl_decode_h264(struct tegra_vde *vde,
if (ret)
return ret;
- ret = tegra_vde_attach_dmabuf(dev, ctx.bitstream_data_fd,
+ ret = tegra_vde_attach_dmabuf(vde, ctx.bitstream_data_fd,
ctx.bitstream_data_offset,
SZ_16K, SZ_16K,
&bitstream_data_dmabuf_attachment,
&bitstream_data_addr,
- &bitstream_sgt,
&bitstream_data_size,
DMA_TO_DEVICE);
if (ret)
return ret;
+ frames = kmalloc_array(ctx.dpb_frames_nb, sizeof(*frames), GFP_KERNEL);
+ if (!frames) {
+ ret = -ENOMEM;
+ goto release_bitstream_dmabuf;
+ }
+
dpb_frames = kcalloc(ctx.dpb_frames_nb, sizeof(*dpb_frames),
GFP_KERNEL);
if (!dpb_frames) {
ret = -ENOMEM;
- goto release_bitstream_dmabuf;
+ goto free_frames;
}
macroblocks_nb = ctx.pic_width_in_mbs * ctx.pic_height_in_mbs;
@@ -860,7 +763,7 @@ static int tegra_vde_ioctl_decode_h264(struct tegra_vde *vde,
dma_dir = (i == 0) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- ret = tegra_vde_attach_dmabufs_to_frame(dev, &dpb_frames[i],
+ ret = tegra_vde_attach_dmabufs_to_frame(vde, &dpb_frames[i],
&frames[i], dma_dir,
ctx.baseline_profile,
lsize, csize);
@@ -948,16 +851,19 @@ release_dpb_frames:
while (i--) {
dma_dir = (i == 0) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- tegra_vde_release_frame_dmabufs(&dpb_frames[i], dma_dir,
- ctx.baseline_profile);
+ tegra_vde_release_frame_dmabufs(vde, &dpb_frames[i], dma_dir,
+ ctx.baseline_profile, ret != 0);
}
free_dpb_frames:
kfree(dpb_frames);
+free_frames:
+ kfree(frames);
+
release_bitstream_dmabuf:
- tegra_vde_detach_and_put_dmabuf(bitstream_data_dmabuf_attachment,
- bitstream_sgt, DMA_TO_DEVICE);
+ tegra_vde_dmabuf_cache_unmap(vde, bitstream_data_dmabuf_attachment,
+ ret != 0);
return ret;
}
@@ -979,9 +885,21 @@ static long tegra_vde_unlocked_ioctl(struct file *filp,
return -ENOTTY;
}
+static int tegra_vde_release_file(struct inode *inode, struct file *filp)
+{
+ struct miscdevice *miscdev = filp->private_data;
+ struct tegra_vde *vde = container_of(miscdev, struct tegra_vde,
+ miscdev);
+
+ tegra_vde_dmabuf_cache_unmap_sync(vde);
+
+ return 0;
+}
+
static const struct file_operations tegra_vde_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tegra_vde_unlocked_ioctl,
+ .release = tegra_vde_release_file,
};
static irqreturn_t tegra_vde_isr(int irq, void *data)
@@ -1159,6 +1077,8 @@ static int tegra_vde_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ INIT_LIST_HEAD(&vde->map_list);
+ mutex_init(&vde->map_lock);
mutex_init(&vde->lock);
init_completion(&vde->decode_completion);
@@ -1167,10 +1087,16 @@ static int tegra_vde_probe(struct platform_device *pdev)
vde->miscdev.fops = &tegra_vde_fops;
vde->miscdev.parent = dev;
+ err = tegra_vde_iommu_init(vde);
+ if (err) {
+ dev_err(dev, "Failed to initialize IOMMU: %d\n", err);
+ goto err_gen_free;
+ }
+
err = misc_register(&vde->miscdev);
if (err) {
dev_err(dev, "Failed to register misc device: %d\n", err);
- goto err_gen_free;
+ goto err_deinit_iommu;
}
pm_runtime_enable(dev);
@@ -1188,6 +1114,9 @@ static int tegra_vde_probe(struct platform_device *pdev)
err_misc_unreg:
misc_deregister(&vde->miscdev);
+err_deinit_iommu:
+ tegra_vde_iommu_deinit(vde);
+
err_gen_free:
gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
gen_pool_size(vde->iram_pool));
@@ -1212,6 +1141,9 @@ static int tegra_vde_remove(struct platform_device *pdev)
misc_deregister(&vde->miscdev);
+ tegra_vde_dmabuf_cache_unmap_all(vde);
+ tegra_vde_iommu_deinit(vde);
+
gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
gen_pool_size(vde->iram_pool));
diff --git a/drivers/staging/media/tegra-vde/vde.h b/drivers/staging/media/tegra-vde/vde.h
new file mode 100644
index 000000000000..d369f1466bc7
--- /dev/null
+++ b/drivers/staging/media/tegra-vde/vde.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * NVIDIA Tegra Video decoder driver
+ *
+ * Copyright (C) 2016-2019 GRATE-DRIVER project
+ */
+
+#ifndef TEGRA_VDE_H
+#define TEGRA_VDE_H
+
+#include <linux/completion.h>
+#include <linux/dma-direction.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/iova.h>
+
+struct clk;
+struct dma_buf;
+struct gen_pool;
+struct iommu_group;
+struct iommu_domain;
+struct reset_control;
+struct dma_buf_attachment;
+
+struct tegra_vde {
+ void __iomem *sxe;
+ void __iomem *bsev;
+ void __iomem *mbe;
+ void __iomem *ppe;
+ void __iomem *mce;
+ void __iomem *tfe;
+ void __iomem *ppb;
+ void __iomem *vdma;
+ void __iomem *frameid;
+ struct mutex lock;
+ struct mutex map_lock;
+ struct list_head map_list;
+ struct miscdevice miscdev;
+ struct reset_control *rst;
+ struct reset_control *rst_mc;
+ struct gen_pool *iram_pool;
+ struct completion decode_completion;
+ struct clk *clk;
+ struct iommu_domain *domain;
+ struct iommu_group *group;
+ struct iova_domain iova;
+ struct iova *iova_resv_static_addresses;
+ struct iova *iova_resv_last_page;
+ dma_addr_t iram_lists_addr;
+ u32 *iram;
+};
+
+int tegra_vde_iommu_init(struct tegra_vde *vde);
+void tegra_vde_iommu_deinit(struct tegra_vde *vde);
+int tegra_vde_iommu_map(struct tegra_vde *vde,
+ struct sg_table *sgt,
+ struct iova **iovap,
+ size_t size);
+void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova);
+
+int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
+ struct dma_buf *dmabuf,
+ enum dma_data_direction dma_dir,
+ struct dma_buf_attachment **ap,
+ dma_addr_t *addrp);
+void tegra_vde_dmabuf_cache_unmap(struct tegra_vde *vde,
+ struct dma_buf_attachment *a,
+ bool release);
+void tegra_vde_dmabuf_cache_unmap_sync(struct tegra_vde *vde);
+void tegra_vde_dmabuf_cache_unmap_all(struct tegra_vde *vde);
+
+static __maybe_unused char const *
+tegra_vde_reg_base_name(struct tegra_vde *vde, void __iomem *base)
+{
+ if (vde->sxe == base)
+ return "SXE";
+
+ if (vde->bsev == base)
+ return "BSEV";
+
+ if (vde->mbe == base)
+ return "MBE";
+
+ if (vde->ppe == base)
+ return "PPE";
+
+ if (vde->mce == base)
+ return "MCE";
+
+ if (vde->tfe == base)
+ return "TFE";
+
+ if (vde->ppb == base)
+ return "PPB";
+
+ if (vde->vdma == base)
+ return "VDMA";
+
+ if (vde->frameid == base)
+ return "FRAMEID";
+
+ return "???";
+}
+
+#endif /* TEGRA_VDE_H */
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index 665a0b061719..fe09efbc7f77 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -1,4 +1,11 @@
TODO:
+ - complete rewrite:
+ 1. The underlying fbdev drivers need to be converted into drm kernel
+ modesetting drivers.
+ 2. The dcon low-power display mode can then be integrated using the
+ drm damage tracking and self-refresh helpers.
+ This bolted-on self-refresh support that digs around in fbdev
+ internals, but isn't properly integrated, is not the correct solution.
- see if vx855 gpio API can be made similar enough to cs5535 so we can
share more code
- convert all uses of the old GPIO API from <linux/gpio.h> to the
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 6b714f740ac3..a254238be181 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -250,11 +250,7 @@ static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
int err;
console_lock();
- if (!lock_fb_info(dcon->fbinfo)) {
- console_unlock();
- dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
- return false;
- }
+ lock_fb_info(dcon->fbinfo);
dcon->ignore_fb_events = true;
err = fb_blank(dcon->fbinfo,
diff --git a/drivers/staging/sm750fb/Kconfig b/drivers/staging/sm750fb/Kconfig
index fb5a086bf9b1..8c0d8a873d5b 100644
--- a/drivers/staging/sm750fb/Kconfig
+++ b/drivers/staging/sm750fb/Kconfig
@@ -12,4 +12,4 @@ config FB_SM750
This driver is also available as a module. The module will be
called sm750fb. If you want to compile it as a module, say M
- here and read <file:Documentation/kbuild/modules.txt>.
+ here and read <file:Documentation/kbuild/modules.rst>.
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 2dad36a05518..dd979ee4dcf1 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -871,12 +871,11 @@ static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
return;
}
- sg = scsi_sglist(scsicmd);
- for (i = 0; i < scsi_sg_count(scsicmd); i++) {
- this_page_orig = kmap_atomic(sg_page(sg + i));
+ scsi_for_each_sg(scsicmd, sg, scsi_sg_count(scsicmd), i) {
+ this_page_orig = kmap_atomic(sg_page(sg));
this_page = (void *)((unsigned long)this_page_orig |
- sg[i].offset);
- memcpy(this_page, buf + bufind, sg[i].length);
+ sg->offset);
+ memcpy(this_page, buf + bufind, sg->length);
kunmap_atomic(this_page_orig);
}
kfree(buf);
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 68f08dc18da9..49d0470f9a7e 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -864,10 +864,6 @@ static int vidioc_querycap(struct file *file, void *priv,
snprintf((char *)cap->bus_info, sizeof(cap->bus_info),
"platform:%s", dev->v4l2_dev.name);
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY |
- V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -1446,6 +1442,8 @@ static const struct video_device vdev_template = {
.fops = &camera0_fops,
.ioctl_ops = &camera0_ioctl_ops,
.release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE,
};
/* Returns the number of cameras, and also the max resolution supported
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index c859afa4308e..54bb1ebd8eb5 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -315,8 +315,10 @@ int cxgbit_ddp_init(struct cxgbit_device *cdev)
ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0],
cdev->lldi.pdev, &cdev->lldi, &tformat,
- ppmax, lldi->iscsi_llimit,
- lldi->vr->iscsi.start, 2);
+ lldi->vr->iscsi.size, lldi->iscsi_llimit,
+ lldi->vr->iscsi.start, 2,
+ lldi->vr->ppod_edram.start,
+ lldi->vr->ppod_edram.size);
if (ret >= 0) {
struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index b6e4862cc242..51ddca2033e0 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -81,6 +81,12 @@ out:
return CHAP_DIGEST_UNKNOWN;
}
+static void chap_close(struct iscsi_conn *conn)
+{
+ kfree(conn->auth_protocol);
+ conn->auth_protocol = NULL;
+}
+
static struct iscsi_chap *chap_server_open(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
@@ -118,7 +124,7 @@ static struct iscsi_chap *chap_server_open(
case CHAP_DIGEST_UNKNOWN:
default:
pr_err("Unsupported CHAP_A value\n");
- kfree(conn->auth_protocol);
+ chap_close(conn);
return NULL;
}
@@ -133,19 +139,13 @@ static struct iscsi_chap *chap_server_open(
* Generate Challenge.
*/
if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) {
- kfree(conn->auth_protocol);
+ chap_close(conn);
return NULL;
}
return chap;
}
-static void chap_close(struct iscsi_conn *conn)
-{
- kfree(conn->auth_protocol);
- conn->auth_protocol = NULL;
-}
-
static int chap_server_compute_md5(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 181a32a6f391..685d771b51d4 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -152,22 +152,11 @@ static u32 iscsi_handle_authentication(
if (strstr("None", authtype))
return 1;
-#ifdef CANSRP
- else if (strstr("SRP", authtype))
- return srp_main_loop(conn, auth, in_buf, out_buf,
- &in_length, out_length);
-#endif
else if (strstr("CHAP", authtype))
return chap_main_loop(conn, auth, in_buf, out_buf,
&in_length, out_length);
- else if (strstr("SPKM1", authtype))
- return 2;
- else if (strstr("SPKM2", authtype))
- return 2;
- else if (strstr("KRB5", authtype))
- return 2;
- else
- return 2;
+ /* SRP, SPKM1, SPKM2 and KRB5 are unsupported */
+ return 2;
}
static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index f4a075303e9a..6949ea8bc387 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -502,7 +502,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> SECTOR_SHIFT;
- sectors -= 1;
+ sectors -= sg->length >> SECTOR_SHIFT;
}
iblock_submit_bios(&list);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index b43d6385a1a0..04eda111920e 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1824,20 +1824,18 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev)
{
struct tcmu_hba *hba = udev->hba->hba_ptr;
struct uio_info *info;
- size_t size, used;
char *str;
info = &udev->uio_info;
- size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
- udev->dev_config);
- size += 1; /* for \0 */
- str = kmalloc(size, GFP_KERNEL);
- if (!str)
- return -ENOMEM;
- used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
if (udev->dev_config[0])
- snprintf(str + used, size - used, "/%s", udev->dev_config);
+ str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
+ udev->name, udev->dev_config);
+ else
+ str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
+ udev->name);
+ if (!str)
+ return -ENOMEM;
/* If the old string exists, free it */
kfree(info->name);
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 319b77126168..e85d54d1cdf3 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -43,7 +43,7 @@ MODULE_PARM_DESC(notify_delay_ms,
*/
#define MAX_NUMBER_OF_TRIPS 2
-struct pkg_device {
+struct zone_device {
int cpu;
bool work_scheduled;
u32 tj_max;
@@ -58,10 +58,10 @@ static struct thermal_zone_params pkg_temp_tz_params = {
.no_hwmon = true,
};
-/* Keep track of how many package pointers we allocated in init() */
-static int max_packages __read_mostly;
-/* Array of package pointers */
-static struct pkg_device **packages;
+/* Keep track of how many zone pointers we allocated in init() */
+static int max_id __read_mostly;
+/* Array of zone pointers */
+static struct zone_device **zones;
/* Serializes interrupt notification, work and hotplug */
static DEFINE_SPINLOCK(pkg_temp_lock);
/* Protects zone operation in the work function against hotplug removal */
@@ -108,12 +108,12 @@ err_out:
*
* - Other callsites: Must hold pkg_temp_lock
*/
-static struct pkg_device *pkg_temp_thermal_get_dev(unsigned int cpu)
+static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu)
{
- int pkgid = topology_logical_package_id(cpu);
+ int id = topology_logical_die_id(cpu);
- if (pkgid >= 0 && pkgid < max_packages)
- return packages[pkgid];
+ if (id >= 0 && id < max_id)
+ return zones[id];
return NULL;
}
@@ -138,12 +138,13 @@ static int get_tj_max(int cpu, u32 *tj_max)
static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
{
- struct pkg_device *pkgdev = tzd->devdata;
+ struct zone_device *zonedev = tzd->devdata;
u32 eax, edx;
- rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_STATUS, &eax, &edx);
+ rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_STATUS,
+ &eax, &edx);
if (eax & 0x80000000) {
- *temp = pkgdev->tj_max - ((eax >> 16) & 0x7f) * 1000;
+ *temp = zonedev->tj_max - ((eax >> 16) & 0x7f) * 1000;
pr_debug("sys_get_curr_temp %d\n", *temp);
return 0;
}
@@ -153,7 +154,7 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
static int sys_get_trip_temp(struct thermal_zone_device *tzd,
int trip, int *temp)
{
- struct pkg_device *pkgdev = tzd->devdata;
+ struct zone_device *zonedev = tzd->devdata;
unsigned long thres_reg_value;
u32 mask, shift, eax, edx;
int ret;
@@ -169,14 +170,14 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
shift = THERM_SHIFT_THRESHOLD0;
}
- ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
&eax, &edx);
if (ret < 0)
return ret;
thres_reg_value = (eax & mask) >> shift;
if (thres_reg_value)
- *temp = pkgdev->tj_max - thres_reg_value * 1000;
+ *temp = zonedev->tj_max - thres_reg_value * 1000;
else
*temp = 0;
pr_debug("sys_get_trip_temp %d\n", *temp);
@@ -187,14 +188,14 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
static int
sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
{
- struct pkg_device *pkgdev = tzd->devdata;
+ struct zone_device *zonedev = tzd->devdata;
u32 l, h, mask, shift, intr;
int ret;
- if (trip >= MAX_NUMBER_OF_TRIPS || temp >= pkgdev->tj_max)
+ if (trip >= MAX_NUMBER_OF_TRIPS || temp >= zonedev->tj_max)
return -EINVAL;
- ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
&l, &h);
if (ret < 0)
return ret;
@@ -216,11 +217,12 @@ sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
if (!temp) {
l &= ~intr;
} else {
- l |= (pkgdev->tj_max - temp)/1000 << shift;
+ l |= (zonedev->tj_max - temp)/1000 << shift;
l |= intr;
}
- return wrmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+ return wrmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ l, h);
}
static int sys_get_trip_type(struct thermal_zone_device *thermal, int trip,
@@ -275,26 +277,26 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
{
struct thermal_zone_device *tzone = NULL;
int cpu = smp_processor_id();
- struct pkg_device *pkgdev;
+ struct zone_device *zonedev;
u64 msr_val, wr_val;
mutex_lock(&thermal_zone_mutex);
spin_lock_irq(&pkg_temp_lock);
++pkg_work_cnt;
- pkgdev = pkg_temp_thermal_get_dev(cpu);
- if (!pkgdev) {
+ zonedev = pkg_temp_thermal_get_dev(cpu);
+ if (!zonedev) {
spin_unlock_irq(&pkg_temp_lock);
mutex_unlock(&thermal_zone_mutex);
return;
}
- pkgdev->work_scheduled = false;
+ zonedev->work_scheduled = false;
rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1);
if (wr_val != msr_val) {
wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, wr_val);
- tzone = pkgdev->tzone;
+ tzone = zonedev->tzone;
}
enable_pkg_thres_interrupt();
@@ -320,7 +322,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work)
static int pkg_thermal_notify(u64 msr_val)
{
int cpu = smp_processor_id();
- struct pkg_device *pkgdev;
+ struct zone_device *zonedev;
unsigned long flags;
spin_lock_irqsave(&pkg_temp_lock, flags);
@@ -329,10 +331,10 @@ static int pkg_thermal_notify(u64 msr_val)
disable_pkg_thres_interrupt();
/* Work is per package, so scheduling it once is enough. */
- pkgdev = pkg_temp_thermal_get_dev(cpu);
- if (pkgdev && !pkgdev->work_scheduled) {
- pkgdev->work_scheduled = true;
- pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work);
+ zonedev = pkg_temp_thermal_get_dev(cpu);
+ if (zonedev && !zonedev->work_scheduled) {
+ zonedev->work_scheduled = true;
+ pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work);
}
spin_unlock_irqrestore(&pkg_temp_lock, flags);
@@ -341,12 +343,12 @@ static int pkg_thermal_notify(u64 msr_val)
static int pkg_temp_thermal_device_add(unsigned int cpu)
{
- int pkgid = topology_logical_package_id(cpu);
+ int id = topology_logical_die_id(cpu);
u32 tj_max, eax, ebx, ecx, edx;
- struct pkg_device *pkgdev;
+ struct zone_device *zonedev;
int thres_count, err;
- if (pkgid >= max_packages)
+ if (id >= max_id)
return -ENOMEM;
cpuid(6, &eax, &ebx, &ecx, &edx);
@@ -360,51 +362,51 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
if (err)
return err;
- pkgdev = kzalloc(sizeof(*pkgdev), GFP_KERNEL);
- if (!pkgdev)
+ zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL);
+ if (!zonedev)
return -ENOMEM;
- INIT_DELAYED_WORK(&pkgdev->work, pkg_temp_thermal_threshold_work_fn);
- pkgdev->cpu = cpu;
- pkgdev->tj_max = tj_max;
- pkgdev->tzone = thermal_zone_device_register("x86_pkg_temp",
+ INIT_DELAYED_WORK(&zonedev->work, pkg_temp_thermal_threshold_work_fn);
+ zonedev->cpu = cpu;
+ zonedev->tj_max = tj_max;
+ zonedev->tzone = thermal_zone_device_register("x86_pkg_temp",
thres_count,
(thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01,
- pkgdev, &tzone_ops, &pkg_temp_tz_params, 0, 0);
- if (IS_ERR(pkgdev->tzone)) {
- err = PTR_ERR(pkgdev->tzone);
- kfree(pkgdev);
+ zonedev, &tzone_ops, &pkg_temp_tz_params, 0, 0);
+ if (IS_ERR(zonedev->tzone)) {
+ err = PTR_ERR(zonedev->tzone);
+ kfree(zonedev);
return err;
}
/* Store MSR value for package thermal interrupt, to restore at exit */
- rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, pkgdev->msr_pkg_therm_low,
- pkgdev->msr_pkg_therm_high);
+ rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, zonedev->msr_pkg_therm_low,
+ zonedev->msr_pkg_therm_high);
- cpumask_set_cpu(cpu, &pkgdev->cpumask);
+ cpumask_set_cpu(cpu, &zonedev->cpumask);
spin_lock_irq(&pkg_temp_lock);
- packages[pkgid] = pkgdev;
+ zones[id] = zonedev;
spin_unlock_irq(&pkg_temp_lock);
return 0;
}
static int pkg_thermal_cpu_offline(unsigned int cpu)
{
- struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu);
+ struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
bool lastcpu, was_target;
int target;
- if (!pkgdev)
+ if (!zonedev)
return 0;
- target = cpumask_any_but(&pkgdev->cpumask, cpu);
- cpumask_clear_cpu(cpu, &pkgdev->cpumask);
+ target = cpumask_any_but(&zonedev->cpumask, cpu);
+ cpumask_clear_cpu(cpu, &zonedev->cpumask);
lastcpu = target >= nr_cpu_ids;
/*
* Remove the sysfs files, if this is the last cpu in the package
* before doing further cleanups.
*/
if (lastcpu) {
- struct thermal_zone_device *tzone = pkgdev->tzone;
+ struct thermal_zone_device *tzone = zonedev->tzone;
/*
* We must protect against a work function calling
@@ -413,7 +415,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* won't try to call.
*/
mutex_lock(&thermal_zone_mutex);
- pkgdev->tzone = NULL;
+ zonedev->tzone = NULL;
mutex_unlock(&thermal_zone_mutex);
thermal_zone_device_unregister(tzone);
@@ -427,8 +429,8 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* one. When we drop the lock, then the interrupt notify function
* will see the new target.
*/
- was_target = pkgdev->cpu == cpu;
- pkgdev->cpu = target;
+ was_target = zonedev->cpu == cpu;
+ zonedev->cpu = target;
/*
* If this is the last CPU in the package remove the package
@@ -437,23 +439,23 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* worker will see the package anymore.
*/
if (lastcpu) {
- packages[topology_logical_package_id(cpu)] = NULL;
+ zones[topology_logical_die_id(cpu)] = NULL;
/* After this point nothing touches the MSR anymore. */
wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
- pkgdev->msr_pkg_therm_low, pkgdev->msr_pkg_therm_high);
+ zonedev->msr_pkg_therm_low, zonedev->msr_pkg_therm_high);
}
/*
* Check whether there is work scheduled and whether the work is
* targeted at the outgoing CPU.
*/
- if (pkgdev->work_scheduled && was_target) {
+ if (zonedev->work_scheduled && was_target) {
/*
* To cancel the work we need to drop the lock, otherwise
* we might deadlock if the work needs to be flushed.
*/
spin_unlock_irq(&pkg_temp_lock);
- cancel_delayed_work_sync(&pkgdev->work);
+ cancel_delayed_work_sync(&zonedev->work);
spin_lock_irq(&pkg_temp_lock);
/*
* If this is not the last cpu in the package and the work
@@ -461,21 +463,21 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* need to reschedule the work, otherwise the interrupt
* stays disabled forever.
*/
- if (!lastcpu && pkgdev->work_scheduled)
- pkg_thermal_schedule_work(target, &pkgdev->work);
+ if (!lastcpu && zonedev->work_scheduled)
+ pkg_thermal_schedule_work(target, &zonedev->work);
}
spin_unlock_irq(&pkg_temp_lock);
/* Final cleanup if this is the last cpu */
if (lastcpu)
- kfree(pkgdev);
+ kfree(zonedev);
return 0;
}
static int pkg_thermal_cpu_online(unsigned int cpu)
{
- struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu);
+ struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
/* Paranoia check */
@@ -483,8 +485,8 @@ static int pkg_thermal_cpu_online(unsigned int cpu)
return -ENODEV;
/* If the package exists, nothing to do */
- if (pkgdev) {
- cpumask_set_cpu(cpu, &pkgdev->cpumask);
+ if (zonedev) {
+ cpumask_set_cpu(cpu, &zonedev->cpumask);
return 0;
}
return pkg_temp_thermal_device_add(cpu);
@@ -503,10 +505,10 @@ static int __init pkg_temp_thermal_init(void)
if (!x86_match_cpu(pkg_temp_thermal_ids))
return -ENODEV;
- max_packages = topology_max_packages();
- packages = kcalloc(max_packages, sizeof(struct pkg_device *),
+ max_id = topology_max_packages() * topology_max_die_per_package();
+ zones = kcalloc(max_id, sizeof(struct zone_device *),
GFP_KERNEL);
- if (!packages)
+ if (!zones)
return -ENOMEM;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online",
@@ -525,7 +527,7 @@ static int __init pkg_temp_thermal_init(void)
return 0;
err:
- kfree(packages);
+ kfree(zones);
return ret;
}
module_init(pkg_temp_thermal_init)
@@ -537,7 +539,7 @@ static void __exit pkg_temp_thermal_exit(void)
cpuhp_remove_state(pkg_thermal_hp_state);
debugfs_remove_recursive(debugfs);
- kfree(packages);
+ kfree(zones);
}
module_exit(pkg_temp_thermal_exit)
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 3b1d312bb175..0e3e4dacbc12 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -95,7 +95,7 @@ config VT_HW_CONSOLE_BINDING
See <file:Documentation/console/console.txt> for more
information. For framebuffer console users, please refer to
- <file:Documentation/fb/fbcon.txt>.
+ <file:Documentation/fb/fbcon.rst>.
config UNIX98_PTYS
bool "Unix98 PTY support" if EXPERT
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index e38f104db174..fde8d4073e74 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -487,7 +487,7 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
{
- lockdep_assert_held_exclusive(&tty->ldisc_sem);
+ lockdep_assert_held_write(&tty->ldisc_sem);
WARN_ON(!test_bit(TTY_LDISC_OPEN, &tty->flags));
clear_bit(TTY_LDISC_OPEN, &tty->flags);
if (ld->ops->close)
@@ -509,7 +509,7 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld)
struct tty_ldisc *disc = tty_ldisc_get(tty, ld);
int r;
- lockdep_assert_held_exclusive(&tty->ldisc_sem);
+ lockdep_assert_held_write(&tty->ldisc_sem);
if (IS_ERR(disc))
return PTR_ERR(disc);
tty->ldisc = disc;
@@ -633,7 +633,7 @@ EXPORT_SYMBOL_GPL(tty_set_ldisc);
*/
static void tty_ldisc_kill(struct tty_struct *tty)
{
- lockdep_assert_held_exclusive(&tty->ldisc_sem);
+ lockdep_assert_held_write(&tty->ldisc_sem);
if (!tty->ldisc)
return;
/*
@@ -681,7 +681,7 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
struct tty_ldisc *ld;
int retval;
- lockdep_assert_held_exclusive(&tty->ldisc_sem);
+ lockdep_assert_held_write(&tty->ldisc_sem);
ld = tty_ldisc_get(tty, disc);
if (IS_ERR(ld)) {
BUG_ON(disc == N_TTY);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5c0ca1c24b6f..ec92f36ab5c4 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3822,6 +3822,8 @@ int con_is_bound(const struct consw *csw)
{
int i, bound = 0;
+ WARN_CONSOLE_UNLOCKED();
+
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (con_driver_map[i] == csw) {
bound = 1;
@@ -3834,6 +3836,20 @@ int con_is_bound(const struct consw *csw)
EXPORT_SYMBOL(con_is_bound);
/**
+ * con_is_visible - checks whether the current console is visible
+ * @vc: virtual console
+ *
+ * RETURNS: zero if not visible, nonzero if visible
+ */
+bool con_is_visible(const struct vc_data *vc)
+{
+ WARN_CONSOLE_UNLOCKED();
+
+ return *vc->vc_display_fg == vc;
+}
+EXPORT_SYMBOL(con_is_visible);
+
+/**
* con_debug_enter - prepare the console for the kernel debugger
* @sw: console driver
*
@@ -4166,6 +4182,8 @@ void do_blank_screen(int entering_gfx)
struct vc_data *vc = vc_cons[fg_console].d;
int i;
+ might_sleep();
+
WARN_CONSOLE_UNLOCKED();
if (console_blanked) {
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index fa783531ee88..a02448105527 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -63,7 +63,7 @@ struct usb_dev_state {
unsigned int discsignr;
struct pid *disc_pid;
const struct cred *cred;
- void __user *disccontext;
+ sigval_t disccontext;
unsigned long ifclaimed;
u32 disabled_bulk_eps;
bool privileges_dropped;
@@ -90,6 +90,7 @@ struct async {
unsigned int ifnum;
void __user *userbuffer;
void __user *userurb;
+ sigval_t userurb_sigval;
struct urb *urb;
struct usb_memory *usbm;
unsigned int mem_usage;
@@ -582,22 +583,19 @@ static void async_completed(struct urb *urb)
{
struct async *as = urb->context;
struct usb_dev_state *ps = as->ps;
- struct kernel_siginfo sinfo;
struct pid *pid = NULL;
const struct cred *cred = NULL;
unsigned long flags;
- int signr;
+ sigval_t addr;
+ int signr, errno;
spin_lock_irqsave(&ps->lock, flags);
list_move_tail(&as->asynclist, &ps->async_completed);
as->status = urb->status;
signr = as->signr;
if (signr) {
- clear_siginfo(&sinfo);
- sinfo.si_signo = as->signr;
- sinfo.si_errno = as->status;
- sinfo.si_code = SI_ASYNCIO;
- sinfo.si_addr = as->userurb;
+ errno = as->status;
+ addr = as->userurb_sigval;
pid = get_pid(as->pid);
cred = get_cred(as->cred);
}
@@ -615,7 +613,7 @@ static void async_completed(struct urb *urb)
spin_unlock_irqrestore(&ps->lock, flags);
if (signr) {
- kill_pid_info_as_cred(sinfo.si_signo, &sinfo, pid, cred);
+ kill_pid_usb_asyncio(signr, errno, addr, pid, cred);
put_pid(pid);
put_cred(cred);
}
@@ -1427,7 +1425,7 @@ find_memory_area(struct usb_dev_state *ps, const struct usbdevfs_urb *uurb)
static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb,
struct usbdevfs_iso_packet_desc __user *iso_frame_desc,
- void __user *arg)
+ void __user *arg, sigval_t userurb_sigval)
{
struct usbdevfs_iso_packet_desc *isopkt = NULL;
struct usb_host_endpoint *ep;
@@ -1727,6 +1725,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
isopkt = NULL;
as->ps = ps;
as->userurb = arg;
+ as->userurb_sigval = userurb_sigval;
if (as->usbm) {
unsigned long uurb_start = (unsigned long)uurb->buffer;
@@ -1801,13 +1800,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
static int proc_submiturb(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
+ sigval_t userurb_sigval;
if (copy_from_user(&uurb, arg, sizeof(uurb)))
return -EFAULT;
+ memset(&userurb_sigval, 0, sizeof(userurb_sigval));
+ userurb_sigval.sival_ptr = arg;
+
return proc_do_submiturb(ps, &uurb,
(((struct usbdevfs_urb __user *)arg)->iso_frame_desc),
- arg);
+ arg, userurb_sigval);
}
static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg)
@@ -1977,7 +1980,7 @@ static int proc_disconnectsignal_compat(struct usb_dev_state *ps, void __user *a
if (copy_from_user(&ds, arg, sizeof(ds)))
return -EFAULT;
ps->discsignr = ds.signr;
- ps->disccontext = compat_ptr(ds.context);
+ ps->disccontext.sival_int = ds.context;
return 0;
}
@@ -2005,13 +2008,17 @@ static int get_urb32(struct usbdevfs_urb *kurb,
static int proc_submiturb_compat(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
+ sigval_t userurb_sigval;
if (get_urb32(&uurb, (struct usbdevfs_urb32 __user *)arg))
return -EFAULT;
+ memset(&userurb_sigval, 0, sizeof(userurb_sigval));
+ userurb_sigval.sival_int = ptr_to_compat(arg);
+
return proc_do_submiturb(ps, &uurb,
((struct usbdevfs_urb32 __user *)arg)->iso_frame_desc,
- arg);
+ arg, userurb_sigval);
}
static int processcompl_compat(struct async *as, void __user * __user *arg)
@@ -2092,7 +2099,7 @@ static int proc_disconnectsignal(struct usb_dev_state *ps, void __user *arg)
if (copy_from_user(&ds, arg, sizeof(ds)))
return -EFAULT;
ps->discsignr = ds.signr;
- ps->disccontext = ds.context;
+ ps->disccontext.sival_ptr = ds.context;
return 0;
}
@@ -2614,22 +2621,15 @@ const struct file_operations usbdev_file_operations = {
static void usbdev_remove(struct usb_device *udev)
{
struct usb_dev_state *ps;
- struct kernel_siginfo sinfo;
while (!list_empty(&udev->filelist)) {
ps = list_entry(udev->filelist.next, struct usb_dev_state, list);
destroy_all_async(ps);
wake_up_all(&ps->wait);
list_del_init(&ps->list);
- if (ps->discsignr) {
- clear_siginfo(&sinfo);
- sinfo.si_signo = ps->discsignr;
- sinfo.si_errno = EPIPE;
- sinfo.si_code = SI_ASYNCIO;
- sinfo.si_addr = ps->disccontext;
- kill_pid_info_as_cred(ps->discsignr, &sinfo,
- ps->disc_pid, ps->cred);
- }
+ if (ps->discsignr)
+ kill_pid_usb_asyncio(ps->discsignr, EPIPE, ps->disccontext,
+ ps->disc_pid, ps->cred);
}
}
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 8c99392df593..fb0a892687c0 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -423,6 +423,7 @@ uvc_register_video(struct uvc_device *uvc)
uvc->vdev.release = video_device_release_empty;
uvc->vdev.vfl_dir = VFL_DIR_TX;
uvc->vdev.lock = &uvc->video.mutex;
+ uvc->vdev.device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
strlcpy(uvc->vdev.name, cdev->gadget->name, sizeof(uvc->vdev.name));
video_set_drvdata(&uvc->vdev, uvc);
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index a1183eccee22..495f0ec663ea 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -71,10 +71,6 @@ uvc_v4l2_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
strlcpy(cap->card, cdev->gadget->name, sizeof(cap->card));
strlcpy(cap->bus_info, dev_name(&cdev->gadget->dev),
sizeof(cap->bus_info));
-
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 607be1f4fe27..0a57c2cc8e5a 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -488,7 +488,6 @@ static void mts_command_done( struct urb *transfer )
static void mts_do_sg (struct urb* transfer)
{
- struct scatterlist * sg;
int status = transfer->status;
MTS_INT_INIT();
@@ -500,13 +499,12 @@ static void mts_do_sg (struct urb* transfer)
mts_transfer_cleanup(transfer);
}
- sg = scsi_sglist(context->srb);
- context->fragment++;
+ context->curr_sg = sg_next(context->curr_sg);
mts_int_submit_urb(transfer,
context->data_pipe,
- sg_virt(&sg[context->fragment]),
- sg[context->fragment].length,
- context->fragment + 1 == scsi_sg_count(context->srb) ?
+ sg_virt(context->curr_sg),
+ context->curr_sg->length,
+ sg_is_last(context->curr_sg) ?
mts_data_done : mts_do_sg);
}
@@ -526,22 +524,20 @@ static void
mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc)
{
int pipe;
- struct scatterlist * sg;
-
+
MTS_DEBUG_GOT_HERE();
desc->context.instance = desc;
desc->context.srb = srb;
- desc->context.fragment = 0;
if (!scsi_bufflen(srb)) {
desc->context.data = NULL;
desc->context.data_length = 0;
return;
} else {
- sg = scsi_sglist(srb);
- desc->context.data = sg_virt(&sg[0]);
- desc->context.data_length = sg[0].length;
+ desc->context.curr_sg = scsi_sglist(srb);
+ desc->context.data = sg_virt(desc->context.curr_sg);
+ desc->context.data_length = desc->context.curr_sg->length;
}
diff --git a/drivers/usb/image/microtek.h b/drivers/usb/image/microtek.h
index 66685e59241a..7bd5f4639c4a 100644
--- a/drivers/usb/image/microtek.h
+++ b/drivers/usb/image/microtek.h
@@ -21,7 +21,7 @@ struct mts_transfer_context
void *data;
unsigned data_length;
int data_pipe;
- int fragment;
+ struct scatterlist *curr_sg;
u8 *scsi_status; /* status returned from ep_response after command completion */
};
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index c97f270338bf..4a88e1ca25c0 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -16,7 +16,7 @@ config USB_EMI62
This code is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
The module will be called audio. If you want to compile it as a
- module, say M here and read <file:Documentation/kbuild/modules.txt>.
+ module, say M here and read <file:Documentation/kbuild/modules.rst>.
config USB_EMI26
tristate "EMI 2|6 USB Audio interface support"
@@ -67,7 +67,7 @@ config USB_LEGOTOWER
inserted in and removed from the running kernel whenever you want).
The module will be called legousbtower. If you want to compile it as
a module, say M here and read
- <file:Documentation/kbuild/modules.txt>.
+ <file:Documentation/kbuild/modules.rst>.
config USB_LCD
tristate "USB LCD driver support"
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index f45d8df5cfb8..86defca6623e 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -101,7 +101,7 @@ static void *usb_role_switch_match(struct device_connection *con, int ep,
struct device *dev;
if (con->fwnode) {
- if (!fwnode_property_present(con->fwnode, con->id))
+ if (con->id && !fwnode_property_present(con->fwnode, con->id))
return NULL;
dev = class_find_device(role_class, NULL, con->fwnode,
diff --git a/drivers/usb/typec/bus.h b/drivers/usb/typec/bus.h
index db40e61d8b72..0c9661c96473 100644
--- a/drivers/usb/typec/bus.h
+++ b/drivers/usb/typec/bus.h
@@ -35,4 +35,19 @@ extern const struct device_type typec_port_dev_type;
#define is_typec_altmode(_dev_) (_dev_->type == &typec_altmode_dev_type)
#define is_typec_port(_dev_) (_dev_->type == &typec_port_dev_type)
+extern struct class typec_mux_class;
+
+struct typec_switch {
+ struct device dev;
+ typec_switch_set_fn_t set;
+};
+
+struct typec_mux {
+ struct device dev;
+ typec_mux_set_fn_t set;
+};
+
+#define to_typec_switch(_dev_) container_of(_dev_, struct typec_switch, dev)
+#define to_typec_mux(_dev_) container_of(_dev_, struct typec_mux, dev)
+
#endif /* __USB_TYPEC_ALTMODE_H__ */
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 2eb623841847..a18285a990a8 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1646,13 +1646,25 @@ static int __init typec_init(void)
if (ret)
return ret;
+ ret = class_register(&typec_mux_class);
+ if (ret)
+ goto err_unregister_bus;
+
typec_class = class_create(THIS_MODULE, "typec");
if (IS_ERR(typec_class)) {
- bus_unregister(&typec_bus);
- return PTR_ERR(typec_class);
+ ret = PTR_ERR(typec_class);
+ goto err_unregister_mux_class;
}
return 0;
+
+err_unregister_mux_class:
+ class_unregister(&typec_mux_class);
+
+err_unregister_bus:
+ bus_unregister(&typec_bus);
+
+ return ret;
}
subsys_initcall(typec_init);
@@ -1661,6 +1673,7 @@ static void __exit typec_exit(void)
class_destroy(typec_class);
ida_destroy(&typec_index_ida);
bus_unregister(&typec_bus);
+ class_unregister(&typec_mux_class);
}
module_exit(typec_exit);
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 2ce54f3fc79c..61b7bc58dd81 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -15,35 +15,47 @@
#include <linux/slab.h>
#include <linux/usb/typec_mux.h>
-static DEFINE_MUTEX(switch_lock);
-static DEFINE_MUTEX(mux_lock);
-static LIST_HEAD(switch_list);
-static LIST_HEAD(mux_list);
+#include "bus.h"
+
+static int name_match(struct device *dev, const void *name)
+{
+ return !strcmp((const char *)name, dev_name(dev));
+}
+
+static bool dev_name_ends_with(struct device *dev, const char *suffix)
+{
+ const char *name = dev_name(dev);
+ const int name_len = strlen(name);
+ const int suffix_len = strlen(suffix);
+
+ if (suffix_len > name_len)
+ return false;
+
+ return strcmp(name + (name_len - suffix_len), suffix) == 0;
+}
+
+static int switch_fwnode_match(struct device *dev, const void *fwnode)
+{
+ return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-switch");
+}
static void *typec_switch_match(struct device_connection *con, int ep,
void *data)
{
- struct typec_switch *sw;
-
- if (!con->fwnode) {
- list_for_each_entry(sw, &switch_list, entry)
- if (!strcmp(con->endpoint[ep], dev_name(sw->dev)))
- return sw;
- return ERR_PTR(-EPROBE_DEFER);
- }
+ struct device *dev;
- /*
- * With OF graph the mux node must have a boolean device property named
- * "orientation-switch".
- */
- if (con->id && !fwnode_property_present(con->fwnode, con->id))
- return NULL;
+ if (con->fwnode) {
+ if (con->id && !fwnode_property_present(con->fwnode, con->id))
+ return NULL;
- list_for_each_entry(sw, &switch_list, entry)
- if (dev_fwnode(sw->dev) == con->fwnode)
- return sw;
+ dev = class_find_device(&typec_mux_class, NULL, con->fwnode,
+ switch_fwnode_match);
+ } else {
+ dev = class_find_device(&typec_mux_class, NULL,
+ con->endpoint[ep], name_match);
+ }
- return con->id ? ERR_PTR(-EPROBE_DEFER) : NULL;
+ return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
@@ -59,14 +71,10 @@ struct typec_switch *typec_switch_get(struct device *dev)
{
struct typec_switch *sw;
- mutex_lock(&switch_lock);
sw = device_connection_find_match(dev, "orientation-switch", NULL,
typec_switch_match);
- if (!IS_ERR_OR_NULL(sw)) {
- WARN_ON(!try_module_get(sw->dev->driver->owner));
- get_device(sw->dev);
- }
- mutex_unlock(&switch_lock);
+ if (!IS_ERR_OR_NULL(sw))
+ WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
return sw;
}
@@ -81,28 +89,64 @@ EXPORT_SYMBOL_GPL(typec_switch_get);
void typec_switch_put(struct typec_switch *sw)
{
if (!IS_ERR_OR_NULL(sw)) {
- module_put(sw->dev->driver->owner);
- put_device(sw->dev);
+ module_put(sw->dev.parent->driver->owner);
+ put_device(&sw->dev);
}
}
EXPORT_SYMBOL_GPL(typec_switch_put);
+static void typec_switch_release(struct device *dev)
+{
+ kfree(to_typec_switch(dev));
+}
+
+static const struct device_type typec_switch_dev_type = {
+ .name = "orientation_switch",
+ .release = typec_switch_release,
+};
+
/**
* typec_switch_register - Register USB Type-C orientation switch
- * @sw: USB Type-C orientation switch
+ * @parent: Parent device
+ * @desc: Orientation switch description
*
* This function registers a switch that can be used for routing the correct
* data pairs depending on the cable plug orientation from the USB Type-C
* connector to the USB controllers. USB Type-C plugs can be inserted
* right-side-up or upside-down.
*/
-int typec_switch_register(struct typec_switch *sw)
+struct typec_switch *
+typec_switch_register(struct device *parent,
+ const struct typec_switch_desc *desc)
{
- mutex_lock(&switch_lock);
- list_add_tail(&sw->entry, &switch_list);
- mutex_unlock(&switch_lock);
+ struct typec_switch *sw;
+ int ret;
+
+ if (!desc || !desc->set)
+ return ERR_PTR(-EINVAL);
+
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return ERR_PTR(-ENOMEM);
- return 0;
+ sw->set = desc->set;
+
+ device_initialize(&sw->dev);
+ sw->dev.parent = parent;
+ sw->dev.fwnode = desc->fwnode;
+ sw->dev.class = &typec_mux_class;
+ sw->dev.type = &typec_switch_dev_type;
+ sw->dev.driver_data = desc->drvdata;
+ dev_set_name(&sw->dev, "%s-switch", dev_name(parent));
+
+ ret = device_add(&sw->dev);
+ if (ret) {
+ dev_err(parent, "failed to register switch (%d)\n", ret);
+ put_device(&sw->dev);
+ return ERR_PTR(ret);
+ }
+
+ return sw;
}
EXPORT_SYMBOL_GPL(typec_switch_register);
@@ -114,28 +158,44 @@ EXPORT_SYMBOL_GPL(typec_switch_register);
*/
void typec_switch_unregister(struct typec_switch *sw)
{
- mutex_lock(&switch_lock);
- list_del(&sw->entry);
- mutex_unlock(&switch_lock);
+ if (!IS_ERR_OR_NULL(sw))
+ device_unregister(&sw->dev);
}
EXPORT_SYMBOL_GPL(typec_switch_unregister);
+void typec_switch_set_drvdata(struct typec_switch *sw, void *data)
+{
+ dev_set_drvdata(&sw->dev, data);
+}
+EXPORT_SYMBOL_GPL(typec_switch_set_drvdata);
+
+void *typec_switch_get_drvdata(struct typec_switch *sw)
+{
+ return dev_get_drvdata(&sw->dev);
+}
+EXPORT_SYMBOL_GPL(typec_switch_get_drvdata);
+
/* ------------------------------------------------------------------------- */
+static int mux_fwnode_match(struct device *dev, const void *fwnode)
+{
+ return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-mux");
+}
+
static void *typec_mux_match(struct device_connection *con, int ep, void *data)
{
const struct typec_altmode_desc *desc = data;
- struct typec_mux *mux;
- int nval;
+ struct device *dev;
bool match;
+ int nval;
u16 *val;
int i;
if (!con->fwnode) {
- list_for_each_entry(mux, &mux_list, entry)
- if (!strcmp(con->endpoint[ep], dev_name(mux->dev)))
- return mux;
- return ERR_PTR(-EPROBE_DEFER);
+ dev = class_find_device(&typec_mux_class, NULL,
+ con->endpoint[ep], name_match);
+
+ return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
/*
@@ -180,11 +240,10 @@ static void *typec_mux_match(struct device_connection *con, int ep, void *data)
return NULL;
find_mux:
- list_for_each_entry(mux, &mux_list, entry)
- if (dev_fwnode(mux->dev) == con->fwnode)
- return mux;
+ dev = class_find_device(&typec_mux_class, NULL, con->fwnode,
+ mux_fwnode_match);
- return ERR_PTR(-EPROBE_DEFER);
+ return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
@@ -202,14 +261,10 @@ struct typec_mux *typec_mux_get(struct device *dev,
{
struct typec_mux *mux;
- mutex_lock(&mux_lock);
mux = device_connection_find_match(dev, "mode-switch", (void *)desc,
typec_mux_match);
- if (!IS_ERR_OR_NULL(mux)) {
- WARN_ON(!try_module_get(mux->dev->driver->owner));
- get_device(mux->dev);
- }
- mutex_unlock(&mux_lock);
+ if (!IS_ERR_OR_NULL(mux))
+ WARN_ON(!try_module_get(mux->dev.parent->driver->owner));
return mux;
}
@@ -224,28 +279,63 @@ EXPORT_SYMBOL_GPL(typec_mux_get);
void typec_mux_put(struct typec_mux *mux)
{
if (!IS_ERR_OR_NULL(mux)) {
- module_put(mux->dev->driver->owner);
- put_device(mux->dev);
+ module_put(mux->dev.parent->driver->owner);
+ put_device(&mux->dev);
}
}
EXPORT_SYMBOL_GPL(typec_mux_put);
+static void typec_mux_release(struct device *dev)
+{
+ kfree(to_typec_mux(dev));
+}
+
+static const struct device_type typec_mux_dev_type = {
+ .name = "mode_switch",
+ .release = typec_mux_release,
+};
+
/**
* typec_mux_register - Register Multiplexer routing USB Type-C pins
- * @mux: USB Type-C Connector Multiplexer/DeMultiplexer
+ * @parent: Parent device
+ * @desc: Multiplexer description
*
* USB Type-C connectors can be used for alternate modes of operation besides
* USB when Accessory/Alternate Modes are supported. With some of those modes,
* the pins on the connector need to be reconfigured. This function registers
* multiplexer switches routing the pins on the connector.
*/
-int typec_mux_register(struct typec_mux *mux)
+struct typec_mux *
+typec_mux_register(struct device *parent, const struct typec_mux_desc *desc)
{
- mutex_lock(&mux_lock);
- list_add_tail(&mux->entry, &mux_list);
- mutex_unlock(&mux_lock);
+ struct typec_mux *mux;
+ int ret;
+
+ if (!desc || !desc->set)
+ return ERR_PTR(-EINVAL);
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->set = desc->set;
+
+ device_initialize(&mux->dev);
+ mux->dev.parent = parent;
+ mux->dev.fwnode = desc->fwnode;
+ mux->dev.class = &typec_mux_class;
+ mux->dev.type = &typec_mux_dev_type;
+ mux->dev.driver_data = desc->drvdata;
+ dev_set_name(&mux->dev, "%s-mux", dev_name(parent));
+
+ ret = device_add(&mux->dev);
+ if (ret) {
+ dev_err(parent, "failed to register mux (%d)\n", ret);
+ put_device(&mux->dev);
+ return ERR_PTR(ret);
+ }
- return 0;
+ return mux;
}
EXPORT_SYMBOL_GPL(typec_mux_register);
@@ -257,8 +347,24 @@ EXPORT_SYMBOL_GPL(typec_mux_register);
*/
void typec_mux_unregister(struct typec_mux *mux)
{
- mutex_lock(&mux_lock);
- list_del(&mux->entry);
- mutex_unlock(&mux_lock);
+ if (!IS_ERR_OR_NULL(mux))
+ device_unregister(&mux->dev);
}
EXPORT_SYMBOL_GPL(typec_mux_unregister);
+
+void typec_mux_set_drvdata(struct typec_mux *mux, void *data)
+{
+ dev_set_drvdata(&mux->dev, data);
+}
+EXPORT_SYMBOL_GPL(typec_mux_set_drvdata);
+
+void *typec_mux_get_drvdata(struct typec_mux *mux)
+{
+ return dev_get_drvdata(&mux->dev);
+}
+EXPORT_SYMBOL_GPL(typec_mux_get_drvdata);
+
+struct class typec_mux_class = {
+ .name = "typec_mux",
+ .owner = THIS_MODULE,
+};
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 9294e85fd34b..5585b109095b 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -23,8 +23,8 @@
struct pi3usb30532 {
struct i2c_client *client;
struct mutex lock; /* protects the cached conf register */
- struct typec_switch sw;
- struct typec_mux mux;
+ struct typec_switch *sw;
+ struct typec_mux *mux;
u8 conf;
};
@@ -48,7 +48,7 @@ static int pi3usb30532_set_conf(struct pi3usb30532 *pi, u8 new_conf)
static int pi3usb30532_sw_set(struct typec_switch *sw,
enum typec_orientation orientation)
{
- struct pi3usb30532 *pi = container_of(sw, struct pi3usb30532, sw);
+ struct pi3usb30532 *pi = typec_switch_get_drvdata(sw);
u8 new_conf;
int ret;
@@ -75,7 +75,7 @@ static int pi3usb30532_sw_set(struct typec_switch *sw,
static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
{
- struct pi3usb30532 *pi = container_of(mux, struct pi3usb30532, mux);
+ struct pi3usb30532 *pi = typec_mux_get_drvdata(mux);
u8 new_conf;
int ret;
@@ -113,6 +113,8 @@ static int pi3usb30532_mux_set(struct typec_mux *mux, int state)
static int pi3usb30532_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
+ struct typec_switch_desc sw_desc;
+ struct typec_mux_desc mux_desc;
struct pi3usb30532 *pi;
int ret;
@@ -121,10 +123,6 @@ static int pi3usb30532_probe(struct i2c_client *client)
return -ENOMEM;
pi->client = client;
- pi->sw.dev = dev;
- pi->sw.set = pi3usb30532_sw_set;
- pi->mux.dev = dev;
- pi->mux.set = pi3usb30532_mux_set;
mutex_init(&pi->lock);
ret = i2c_smbus_read_byte_data(client, PI3USB30532_CONF);
@@ -134,17 +132,27 @@ static int pi3usb30532_probe(struct i2c_client *client)
}
pi->conf = ret;
- ret = typec_switch_register(&pi->sw);
- if (ret) {
- dev_err(dev, "Error registering typec switch: %d\n", ret);
- return ret;
+ sw_desc.drvdata = pi;
+ sw_desc.fwnode = dev->fwnode;
+ sw_desc.set = pi3usb30532_sw_set;
+
+ pi->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(pi->sw)) {
+ dev_err(dev, "Error registering typec switch: %ld\n",
+ PTR_ERR(pi->sw));
+ return PTR_ERR(pi->sw);
}
- ret = typec_mux_register(&pi->mux);
- if (ret) {
- typec_switch_unregister(&pi->sw);
- dev_err(dev, "Error registering typec mux: %d\n", ret);
- return ret;
+ mux_desc.drvdata = pi;
+ mux_desc.fwnode = dev->fwnode;
+ mux_desc.set = pi3usb30532_mux_set;
+
+ pi->mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(pi->mux)) {
+ typec_switch_unregister(pi->sw);
+ dev_err(dev, "Error registering typec mux: %ld\n",
+ PTR_ERR(pi->mux));
+ return PTR_ERR(pi->mux);
}
i2c_set_clientdata(client, pi);
@@ -155,8 +163,8 @@ static int pi3usb30532_remove(struct i2c_client *client)
{
struct pi3usb30532 *pi = i2c_get_clientdata(client);
- typec_mux_unregister(&pi->mux);
- typec_switch_unregister(&pi->sw);
+ typec_mux_unregister(pi->mux);
+ typec_switch_unregister(pi->sw);
return 0;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index d57ebdd616d9..247e5585af5d 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -35,7 +35,7 @@
#include "vhost.h"
-static int experimental_zcopytx = 1;
+static int experimental_zcopytx = 0;
module_param(experimental_zcopytx, int, 0444);
MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
" 1 -Enable; 0 - Disable");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e995c12d8e24..ff8892c38666 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1711,7 +1711,7 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
/* TODO: This is really inefficient. We need something like get_user()
* (instruction directly accesses the data, with an exception table entry
- * returning -EFAULT). See Documentation/x86/exception-tables.txt.
+ * returning -EFAULT). See Documentation/x86/exception-tables.rst.
*/
static int set_bit_to_user(int nr, void __user *addr)
{
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 1ef8b6fd62ac..5dc07106a59e 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -47,7 +47,7 @@ static int fb_notifier_callback(struct notifier_block *self,
int fb_blank = 0;
/* If we aren't interested in this event, skip it immediately ... */
- if (event != FB_EVENT_BLANK && event != FB_EVENT_CONBLANK)
+ if (event != FB_EVENT_BLANK)
return 0;
bd = container_of(self, struct backlight_device, fb_notif);
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 151b18776add..d6b653aa4ee9 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -30,18 +30,6 @@ static int fb_notifier_callback(struct notifier_block *self,
struct lcd_device *ld;
struct fb_event *evdata = data;
- /* If we aren't interested in this event, skip it immediately ... */
- switch (event) {
- case FB_EVENT_BLANK:
- case FB_EVENT_MODE_CHANGE:
- case FB_EVENT_MODE_CHANGE_ALL:
- case FB_EARLY_EVENT_BLANK:
- case FB_R_EARLY_EVENT_BLANK:
- break;
- default:
- return 0;
- }
-
ld = container_of(self, struct lcd_device, fb_notif);
if (!ld->ops)
return 0;
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index ff886e99104b..2a0d0bda7faa 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -34,6 +34,8 @@ static bool dummycon_putc_called;
void dummycon_register_output_notifier(struct notifier_block *nb)
{
+ WARN_CONSOLE_UNLOCKED();
+
raw_notifier_chain_register(&dummycon_output_nh, nb);
if (dummycon_putc_called)
@@ -42,11 +44,15 @@ void dummycon_register_output_notifier(struct notifier_block *nb)
void dummycon_unregister_output_notifier(struct notifier_block *nb)
{
+ WARN_CONSOLE_UNLOCKED();
+
raw_notifier_chain_unregister(&dummycon_output_nh, nb);
}
static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos)
{
+ WARN_CONSOLE_UNLOCKED();
+
dummycon_putc_called = true;
raw_notifier_call_chain(&dummycon_output_nh, 0, NULL);
}
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 1b2f5f31fb6f..6b2de93bd302 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -31,7 +31,7 @@ menuconfig FB
in the /dev directory, i.e. /dev/fb*.
You need an utility program called fbset to make full use of frame
- buffer devices. Please read <file:Documentation/fb/framebuffer.txt>
+ buffer devices. Please read <file:Documentation/fb/framebuffer.rst>
and the Framebuffer-HOWTO at
<http://www.munted.org.uk/programming/Framebuffer-HOWTO-1.3.html> for more
information.
@@ -241,7 +241,7 @@ config FB_CIRRUS
If you have a PCI-based system, this enables support for these
chips: GD-543x, GD-544x, GD-5480.
- Please read the file <file:Documentation/fb/cirrusfb.txt>.
+ Please read the file <file:Documentation/fb/cirrusfb.rst>.
Say N unless you have such a graphics board or plan to get one
before you next recompile the kernel.
@@ -289,7 +289,7 @@ config FB_ARMCLCD
If you want to compile this as a module (=code which can be
inserted into and removed from the running kernel), say M
- here and read <file:Documentation/kbuild/modules.txt>. The module
+ here and read <file:Documentation/kbuild/modules.rst>. The module
will be called amba-clcd.
config FB_ACORN
@@ -332,7 +332,8 @@ config FB_SA1100
config FB_IMX
tristate "Freescale i.MX1/21/25/27 LCD support"
- depends on FB && ARCH_MXC
+ depends on FB && HAVE_CLK && HAS_IOMEM
+ depends on ARCH_MXC || COMPILE_TEST
select LCD_CLASS_DEVICE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -614,7 +615,7 @@ config FB_UVESA
This driver generally provides more features than vesafb but
requires a userspace helper application called 'v86d'. See
- <file:Documentation/fb/uvesafb.txt> for more information.
+ <file:Documentation/fb/uvesafb.rst> for more information.
If unsure, say N.
@@ -629,7 +630,7 @@ config FB_VESA
This is the frame buffer device driver for generic VESA 2.0
compliant graphic cards. The older VESA 1.2 cards are not supported.
You will get a boot time penguin logo at no additional cost. Please
- read <file:Documentation/fb/vesafb.txt>. If unsure, say Y.
+ read <file:Documentation/fb/vesafb.rst>. If unsure, say Y.
config FB_EFI
bool "EFI-based Framebuffer Support"
@@ -670,7 +671,8 @@ config FB_HGA
config FB_GBE
bool "SGI Graphics Backend frame buffer support"
- depends on (FB = y) && SGI_IP32
+ depends on (FB = y) && HAS_IOMEM
+ depends on SGI_IP32 || COMPILE_TEST
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -808,7 +810,8 @@ config FB_XVR1000
config FB_PVR2
tristate "NEC PowerVR 2 display support"
- depends on FB && SH_DREAMCAST
+ depends on FB && HAS_IOMEM
+ depends on SH_DREAMCAST || COMPILE_TEST
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -825,7 +828,7 @@ config FB_PVR2
module load time. The parameters look like "video=pvr2:XXX", where
the meaning of XXX can be found at the end of the main source file
(<file:drivers/video/pvr2fb.c>). Please see the file
- <file:Documentation/fb/pvr2fb.txt>.
+ <file:Documentation/fb/pvr2fb.rst>.
config FB_OPENCORES
tristate "OpenCores VGA/LCD core 2.0 framebuffer support"
@@ -856,7 +859,8 @@ config FB_S1D13XXX
config FB_ATMEL
tristate "AT91 LCD Controller support"
- depends on FB && OF && HAVE_FB_ATMEL
+ depends on FB && OF && HAVE_CLK && HAS_IOMEM
+ depends on HAVE_FB_ATMEL || COMPILE_TEST
select FB_BACKLIGHT
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -987,7 +991,7 @@ config FB_I810
module will be called i810fb.
For more information, please read
- <file:Documentation/fb/intel810.txt>
+ <file:Documentation/fb/intel810.rst>
config FB_I810_GTF
bool "use VESA Generalized Timing Formula"
@@ -1057,7 +1061,7 @@ config FB_INTEL
To compile this driver as a module, choose M here: the
module will be called intelfb.
- For more information, please read <file:Documentation/fb/intelfb.txt>
+ For more information, please read <file:Documentation/fb/intelfb.rst>
config FB_INTEL_DEBUG
bool "Intel driver Debug Messages"
@@ -1094,7 +1098,7 @@ config FB_MATROX
You can pass several parameters to the driver at boot time or at
module load time. The parameters look like "video=matroxfb:XXX", and
- are described in <file:Documentation/fb/matroxfb.txt>.
+ are described in <file:Documentation/fb/matroxfb.rst>.
config FB_MATROX_MILLENIUM
bool "Millennium I/II support"
@@ -1245,7 +1249,7 @@ config FB_ATY128
help
This driver supports graphics boards with the ATI Rage128 chips.
Say Y if you have such a graphics board and read
- <file:Documentation/fb/aty128fb.txt>.
+ <file:Documentation/fb/aty128fb.rst>.
To compile this driver as a module, choose M here: the
module will be called aty128fb.
@@ -1507,7 +1511,7 @@ config FB_VOODOO1
WARNING: Do not use any application that uses the 3D engine
(namely glide) while using this driver.
- Please read the <file:Documentation/fb/sstfb.txt> for supported
+ Please read the <file:Documentation/fb/sstfb.rst> for supported
options and other important info support.
config FB_VT8623
@@ -1539,7 +1543,7 @@ config FB_TRIDENT
There are also integrated versions of these chips called CyberXXXX,
CyberImage or CyberBlade. These chips are mostly found in laptops
but also on some motherboards including early VIA EPIA motherboards.
- For more information, read <file:Documentation/fb/tridentfb.txt>
+ For more information, read <file:Documentation/fb/tridentfb.rst>
Say Y if you have such a graphics board.
@@ -1729,7 +1733,8 @@ config FB_68328
config FB_PXA168
tristate "PXA168/910 LCD framebuffer support"
- depends on FB && (CPU_PXA168 || CPU_PXA910)
+ depends on FB && HAVE_CLK && HAS_IOMEM
+ depends on CPU_PXA168 || CPU_PXA910 || COMPILE_TEST
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1752,7 +1757,7 @@ config FB_PXA
This driver is also available as a module ( = code which can be
inserted and removed from the running kernel whenever you want). The
module will be called pxafb. If you want to compile it as a module,
- say M here and read <file:Documentation/kbuild/modules.txt>.
+ say M here and read <file:Documentation/kbuild/modules.rst>.
If unsure, say N.
@@ -1778,7 +1783,7 @@ config FB_PXA_PARAMETERS
single model of flatpanel then you can safely leave this
option disabled.
- <file:Documentation/fb/pxafb.txt> describes the available parameters.
+ <file:Documentation/fb/pxafb.rst> describes the available parameters.
config PXA3XX_GCU
tristate "PXA3xx 2D graphics accelerator driver"
@@ -1833,7 +1838,7 @@ config FB_W100
This driver is also available as a module ( = code which can be
inserted and removed from the running kernel whenever you want). The
module will be called w100fb. If you want to compile it as a module,
- say M here and read <file:Documentation/kbuild/modules.txt>.
+ say M here and read <file:Documentation/kbuild/modules.rst>.
If unsure, say N.
@@ -1862,7 +1867,7 @@ config FB_TMIO
This driver is also available as a module ( = code which can be
inserted and removed from the running kernel whenever you want). The
module will be called tmiofb. If you want to compile it as a module,
- say M here and read <file:Documentation/kbuild/modules.txt>.
+ say M here and read <file:Documentation/kbuild/modules.rst>.
If unsure, say N.
@@ -1873,7 +1878,8 @@ config FB_TMIO_ACCELL
config FB_S3C
tristate "Samsung S3C framebuffer support"
- depends on FB && (CPU_S3C2416 || ARCH_S3C64XX)
+ depends on FB && HAVE_CLK && HAS_IOMEM
+ depends on (CPU_S3C2416 || ARCH_S3C64XX) || COMPILE_TEST
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1908,7 +1914,7 @@ config FB_S3C2410
This driver is also available as a module ( = code which can be
inserted and removed from the running kernel whenever you want). The
module will be called s3c2410fb. If you want to compile it as a module,
- say M here and read <file:Documentation/kbuild/modules.txt>.
+ say M here and read <file:Documentation/kbuild/modules.rst>.
If unsure, say N.
config FB_S3C2410_DEBUG
@@ -1945,7 +1951,7 @@ config FB_SM501
This driver is also available as a module ( = code which can be
inserted and removed from the running kernel whenever you want). The
module will be called sm501fb. If you want to compile it as a module,
- say M here and read <file:Documentation/kbuild/modules.txt>.
+ say M here and read <file:Documentation/kbuild/modules.rst>.
If unsure, say N.
@@ -2055,7 +2061,8 @@ config FB_SH7760
config FB_DA8XX
tristate "DA8xx/OMAP-L1xx/AM335x Framebuffer support"
- depends on FB && (ARCH_DAVINCI_DA8XX || SOC_AM33XX)
+ depends on FB && HAVE_CLK && HAS_IOMEM
+ depends on ARCH_DAVINCI_DA8XX || SOC_AM33XX || COMPILE_TEST
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -2172,7 +2179,7 @@ config FB_EP93XX
config FB_PRE_INIT_FB
bool "Don't reinitialize, use bootloader's GDC/Display configuration"
- depends on FB && (FB_MB862XX_LIME || FB_MXS)
+ depends on FB && FB_MB862XX_LIME
---help---
Select this option if display contents should be inherited as set by
the bootloader.
@@ -2213,17 +2220,6 @@ config FB_JZ4740
help
Framebuffer support for the JZ4740 SoC.
-config FB_MXS
- tristate "MXS LCD framebuffer support"
- depends on FB && (ARCH_MXS || ARCH_MXC)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB_MODE_HELPERS
- select VIDEOMODE_HELPERS
- help
- Framebuffer support for the MXS SoC.
-
config FB_PUV3_UNIGFX
tristate "PKUnity v3 Unigfx framebuffer support"
depends on FB && UNICORE32 && ARCH_PUV3
@@ -2288,7 +2284,7 @@ config FB_SM712
This driver is also available as a module. The module will be
called sm712fb. If you want to compile it as a module, say M
- here and read <file:Documentation/kbuild/modules.txt>.
+ here and read <file:Documentation/kbuild/modules.rst>.
source "drivers/video/fbdev/omap/Kconfig"
source "drivers/video/fbdev/omap2/Kconfig"
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 655f2537cac1..7dc4861a93e6 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -131,7 +131,6 @@ obj-$(CONFIG_FB_VGA16) += vga16fb.o
obj-$(CONFIG_FB_OF) += offb.o
obj-$(CONFIG_FB_MX3) += mx3fb.o
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
-obj-$(CONFIG_FB_MXS) += mxsfb.o
obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o
obj-$(CONFIG_FB_SIMPLE) += simplefb.o
diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c
index 758457026694..91ddc9602014 100644
--- a/drivers/video/fbdev/amifb.c
+++ b/drivers/video/fbdev/amifb.c
@@ -3554,10 +3554,8 @@ static int __init amifb_probe(struct platform_device *pdev)
custom.dmacon = DMAF_ALL | DMAF_MASTER;
info = framebuffer_alloc(sizeof(struct amifb_par), &pdev->dev);
- if (!info) {
- dev_err(&pdev->dev, "framebuffer_alloc failed\n");
+ if (!info)
return -ENOMEM;
- }
strcpy(info->fix.id, "Amiga ");
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index 13ba371e70aa..f940e8b66b85 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -954,10 +954,8 @@ static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Allocate and fill driver data structure */
info = framebuffer_alloc(sizeof(struct arkfb_info), &(dev->dev));
- if (! info) {
- dev_err(&(dev->dev), "cannot allocate memory\n");
+ if (!info)
return -ENOMEM;
- }
par = info->par;
mutex_init(&par->open_lock);
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index b986af2a8042..fc9dfb0a95af 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -77,29 +77,8 @@
#define SWITCH_SND7 0x80
#define SWITCH_NONE 0x00
-
#define up(x, r) (((x) + (r) - 1) & ~((r)-1))
- /*
- * Interface to the world
- */
-
-static int atafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
-static int atafb_set_par(struct fb_info *info);
-static int atafb_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
- unsigned int blue, unsigned int transp,
- struct fb_info *info);
-static int atafb_blank(int blank, struct fb_info *info);
-static int atafb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info);
-static void atafb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
-static void atafb_copyarea(struct fb_info *info,
- const struct fb_copyarea *region);
-static void atafb_imageblit(struct fb_info *info, const struct fb_image *image);
-static int atafb_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg);
-
static int default_par; /* default resolution (0=none) */
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index e67dfd94bf1d..5ff8e0320d95 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -673,7 +673,7 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
lcdc_writel(sinfo, ATMEL_LCDC_MVAL, 0);
/* Disable all interrupts */
- lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL);
+ lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0U);
/* Enable FIFO & DMA errors */
lcdc_writel(sinfo, ATMEL_LCDC_IER, ATMEL_LCDC_UFLWI | ATMEL_LCDC_OWRI | ATMEL_LCDC_MERI);
@@ -950,7 +950,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
struct fb_videomode fb_vm;
struct gpio_desc *gpiod;
struct videomode vm;
- int ret = -ENOENT;
+ int ret;
int i;
sinfo->config = (struct atmel_lcdfb_config*)
@@ -1053,10 +1053,8 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
ret = -ENOMEM;
info = framebuffer_alloc(sizeof(struct atmel_lcdfb_info), dev);
- if (!info) {
- dev_err(dev, "cannot allocate memory\n");
+ if (!info)
goto out;
- }
sinfo = info->par;
sinfo->pdev = pdev;
@@ -1291,7 +1289,7 @@ static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg)
* We don't want to handle interrupts while the clock is
* stopped. It may take forever.
*/
- lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL);
+ lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0U);
sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index 794434891291..8504e19437ff 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -2103,10 +2103,9 @@ static int aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* We have the resources. Now virtualize them */
info = framebuffer_alloc(sizeof(struct aty128fb_par), &pdev->dev);
- if (info == NULL) {
- printk(KERN_ERR "aty128fb: can't alloc fb_info_aty128\n");
+ if (!info)
goto err_free_mmio;
- }
+
par = info->par;
info->pseudo_palette = par->pseudo_palette;
@@ -2350,70 +2349,6 @@ static int aty128fb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
return -EINVAL;
}
-#if 0
- /*
- * Accelerated functions
- */
-
-static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty,
- u_int width, u_int height,
- struct fb_info_aty128 *par)
-{
- u32 save_dp_datatype, save_dp_cntl, dstval;
-
- if (!width || !height)
- return;
-
- dstval = depth_to_dst(par->current_par.crtc.depth);
- if (dstval == DST_24BPP) {
- srcx *= 3;
- dstx *= 3;
- width *= 3;
- } else if (dstval == -EINVAL) {
- printk("aty128fb: invalid depth or RGBA\n");
- return;
- }
-
- wait_for_fifo(2, par);
- save_dp_datatype = aty_ld_le32(DP_DATATYPE);
- save_dp_cntl = aty_ld_le32(DP_CNTL);
-
- wait_for_fifo(6, par);
- aty_st_le32(SRC_Y_X, (srcy << 16) | srcx);
- aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT);
- aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM);
- aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR);
-
- aty_st_le32(DST_Y_X, (dsty << 16) | dstx);
- aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width);
-
- par->blitter_may_be_busy = 1;
-
- wait_for_fifo(2, par);
- aty_st_le32(DP_DATATYPE, save_dp_datatype);
- aty_st_le32(DP_CNTL, save_dp_cntl);
-}
-
-
- /*
- * Text mode accelerated functions
- */
-
-static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy,
- int dx, int height, int width)
-{
- sx *= fontwidth(p);
- sy *= fontheight(p);
- dx *= fontwidth(p);
- dy *= fontheight(p);
- width *= fontwidth(p);
- height *= fontheight(p);
-
- aty128_rectcopy(sx, sy, dx, dy, width, height,
- (struct fb_info_aty128 *)p->fb_info);
-}
-#endif /* 0 */
-
static void aty128_set_suspend(struct aty128fb_par *par, int suspend)
{
u32 pmgt;
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index b6fe103df145..72bcfbe42e49 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3550,10 +3550,9 @@ static int atyfb_pci_probe(struct pci_dev *pdev,
/* Allocate framebuffer */
info = framebuffer_alloc(sizeof(struct atyfb_par), &pdev->dev);
- if (!info) {
- PRINTKE("atyfb_pci_probe() can't alloc fb_info\n");
+ if (!info)
return -ENOMEM;
- }
+
par = info->par;
par->bus_type = PCI;
info->fix = atyfb_fix;
@@ -3643,10 +3642,9 @@ static int __init atyfb_atari_probe(void)
}
info = framebuffer_alloc(sizeof(struct atyfb_par), NULL);
- if (!info) {
- PRINTKE("atyfb_atari_probe() can't alloc fb_info\n");
+ if (!info)
return -ENOMEM;
- }
+
par = info->par;
info->fix = atyfb_fix;
@@ -3916,8 +3914,7 @@ static int atyfb_reboot_notify(struct notifier_block *nb,
if (!reboot_info)
goto out;
- if (!lock_fb_info(reboot_info))
- goto out;
+ lock_fb_info(reboot_info);
par = reboot_info->par;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index e8594bbaea60..6f891d82eebe 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -2294,8 +2294,6 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
info = framebuffer_alloc(sizeof(struct radeonfb_info), &pdev->dev);
if (!info) {
- printk (KERN_ERR "radeonfb (%s): could not allocate memory\n",
- pci_name(pdev));
ret = -ENOMEM;
goto err_disable;
}
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 3872ccef4cb2..26caffb02b7e 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -147,6 +147,7 @@ struct au1200_lcd_iodata_t {
struct au1200fb_device {
struct fb_info *fb_info; /* FB driver info record */
struct au1200fb_platdata *pd;
+ struct device *dev;
int plane;
unsigned char* fb_mem; /* FrameBuffer memory map */
@@ -1232,10 +1233,8 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct au1200fb_device *fbdev = info->par;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
-
- return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
+ return dma_mmap_attrs(fbdev->dev, vma, fbdev->fb_mem, fbdev->fb_phys,
+ fbdev->fb_len, DMA_ATTR_NON_CONSISTENT);
}
static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
@@ -1647,7 +1646,6 @@ static int au1200fb_drv_probe(struct platform_device *dev)
struct au1200fb_device *fbdev;
struct au1200fb_platdata *pd;
struct fb_info *fbi = NULL;
- unsigned long page;
int bpp, plane, ret, irq;
print_info("" DRIVER_DESC "");
@@ -1685,6 +1683,7 @@ static int au1200fb_drv_probe(struct platform_device *dev)
fbdev = fbi->par;
fbdev->fb_info = fbi;
fbdev->pd = pd;
+ fbdev->dev = &dev->dev;
fbdev->plane = plane;
@@ -1702,16 +1701,6 @@ static int au1200fb_drv_probe(struct platform_device *dev)
goto failed;
}
- /*
- * Set page reserved so that mmap will work. This is necessary
- * since we'll be remapping normal memory.
- */
- for (page = (unsigned long)fbdev->fb_phys;
- page < PAGE_ALIGN((unsigned long)fbdev->fb_phys +
- fbdev->fb_len);
- page += PAGE_SIZE) {
- SetPageReserved(pfn_to_page(page >> PAGE_SHIFT)); /* LCD DMA is NOT coherent on Au1200 */
- }
print_dbg("Framebuffer memory map at %p", fbdev->fb_mem);
print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index ca549e1532e6..f4dc320dcafe 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -366,7 +366,6 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
p = framebuffer_alloc(0, &dp->dev);
if (p == NULL) {
- dev_err(&dp->dev, "Cannot allocate framebuffer structure\n");
rc = -ENOMEM;
goto err_disable;
}
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index b3be06dd2908..e4ce5667b125 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -2093,7 +2093,6 @@ static int cirrusfb_pci_register(struct pci_dev *pdev,
info = framebuffer_alloc(sizeof(struct cirrusfb_info), &pdev->dev);
if (!info) {
- printk(KERN_ERR "cirrusfb: could not allocate memory\n");
ret = -ENOMEM;
goto err_out;
}
@@ -2206,10 +2205,8 @@ static int cirrusfb_zorro_register(struct zorro_dev *z,
struct cirrusfb_info *cinfo;
info = framebuffer_alloc(sizeof(struct cirrusfb_info), &z->dev);
- if (!info) {
- printk(KERN_ERR "cirrusfb: could not allocate memory\n");
+ if (!info)
return -ENOMEM;
- }
zcl = (const struct zorrocl *)ent->driver_data;
btype = zcl->type;
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 7af8db28bb80..9a680ef3ffc3 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -182,7 +182,7 @@ int init_module(void)
int ret = -ENXIO;
dp = of_find_node_by_name(NULL, "control");
- if (dp != 0 && !control_of_init(dp))
+ if (dp && !control_of_init(dp))
ret = 0;
of_node_put(dp);
@@ -580,7 +580,7 @@ static int __init control_init(void)
control_setup(option);
dp = of_find_node_by_name(NULL, "control");
- if (dp != 0 && !control_of_init(dp))
+ if (dp && !control_of_init(dp))
ret = 0;
of_node_put(dp);
@@ -683,8 +683,8 @@ static int __init control_of_init(struct device_node *dp)
return -ENXIO;
}
p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (p == 0)
- return -ENXIO;
+ if (!p)
+ return -ENOMEM;
control_fb = p; /* save it for cleanups */
/* Map in frame buffer and registers */
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index 2811c4afde01..e5ae33c1a8e8 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -285,11 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
goto out;
}
umap.start = cmap->start;
- if (!lock_fb_info(info)) {
- rc = -ENODEV;
- goto out;
- }
-
+ lock_fb_info(info);
rc = fb_set_cmap(&umap, info);
unlock_fb_info(info);
out:
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index a9c69ae30878..c9235a2f42f8 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -76,6 +76,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/crc32.h> /* For counting font checksums */
+#include <linux/uaccess.h>
#include <asm/fb.h>
#include <asm/irq.h>
@@ -87,13 +88,32 @@
# define DPRINTK(fmt, args...)
#endif
+/*
+ * FIXME: Locking
+ *
+ * - fbcon state itself is protected by the console_lock, and the code does a
+ * pretty good job at making sure that lock is held everywhere it's needed.
+ *
+ * - access to the registered_fb array is entirely unprotected. This should use
+ * proper object lifetime handling, i.e. get/put_fb_info. This also means
+ * switching from indices to proper pointers for fb_info everywhere.
+ *
+ * - fbcon doesn't bother with fb_lock/unlock at all. This is buggy, since it
+ * means concurrent access to the same fbdev from both fbcon and userspace
+ * will blow up. To fix this all fbcon calls from fbmem.c need to be moved out
+ * of fb_lock/unlock protected sections, since otherwise we'll recurse and
+ * deadlock eventually. Aside: Due to these deadlock issues the fbdev code in
+ * fbmem.c cannot use locking asserts, and there's lots of callers which get
+ * the rules wrong, e.g. fbsysfs.c entirely missed fb_lock/unlock calls too.
+ */
+
enum {
FBCON_LOGO_CANSHOW = -1, /* the logo can be shown */
FBCON_LOGO_DRAW = -2, /* draw the logo to a console */
FBCON_LOGO_DONTSHOW = -3 /* do not show the logo */
};
-static struct display fb_display[MAX_NR_CONSOLES];
+static struct fbcon_display fb_display[MAX_NR_CONSOLES];
static signed char con2fb_map[MAX_NR_CONSOLES];
static signed char con2fb_map_boot[MAX_NR_CONSOLES];
@@ -112,7 +132,6 @@ static int softback_lines;
static int first_fb_vc;
static int last_fb_vc = MAX_NR_CONSOLES - 1;
static int fbcon_is_default = 1;
-static int fbcon_has_exited;
static int primary_device = -1;
static int fbcon_has_console_bind;
@@ -185,11 +204,11 @@ static __inline__ void ywrap_up(struct vc_data *vc, int count);
static __inline__ void ywrap_down(struct vc_data *vc, int count);
static __inline__ void ypan_up(struct vc_data *vc, int count);
static __inline__ void ypan_down(struct vc_data *vc, int count);
-static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx,
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
int unit);
-static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
+static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
int line, int count, int dy);
static void fbcon_modechanged(struct fb_info *info);
static void fbcon_set_all_vcs(struct fb_info *info);
@@ -220,7 +239,7 @@ static void fbcon_rotate(struct fb_info *info, u32 rotate)
fb_info = registered_fb[con2fb_map[ops->currcon]];
if (info == fb_info) {
- struct display *p = &fb_display[ops->currcon];
+ struct fbcon_display *p = &fb_display[ops->currcon];
if (rotate < 4)
p->con_rotate = rotate;
@@ -235,7 +254,7 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
{
struct fbcon_ops *ops = info->fbcon_par;
struct vc_data *vc;
- struct display *p;
+ struct fbcon_display *p;
int i;
if (!ops || ops->currcon < 0 || rotate > 3)
@@ -900,7 +919,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
* Low Level Operations
*/
/* NOTE: fbcon cannot be __init: it may be called from do_take_over_console later */
-static int var_to_display(struct display *disp,
+static int var_to_display(struct fbcon_display *disp,
struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -925,7 +944,7 @@ static int var_to_display(struct display *disp,
}
static void display_to_var(struct fb_var_screeninfo *var,
- struct display *disp)
+ struct fbcon_display *disp)
{
fb_videomode_to_var(var, disp->mode);
var->xres_virtual = disp->xres_virtual;
@@ -946,7 +965,7 @@ static void display_to_var(struct fb_var_screeninfo *var,
static const char *fbcon_startup(void)
{
const char *display_desc = "frame buffer device";
- struct display *p = &fb_display[fg_console];
+ struct fbcon_display *p = &fb_display[fg_console];
struct vc_data *vc = vc_cons[fg_console].d;
const struct font_desc *font = NULL;
struct module *owner;
@@ -1050,23 +1069,26 @@ static const char *fbcon_startup(void)
info->var.bits_per_pixel);
fbcon_add_cursor_timer(info);
- fbcon_has_exited = 0;
return display_desc;
}
static void fbcon_init(struct vc_data *vc, int init)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info;
struct fbcon_ops *ops;
struct vc_data **default_mode = vc->vc_display_fg;
struct vc_data *svc = *default_mode;
- struct display *t, *p = &fb_display[vc->vc_num];
+ struct fbcon_display *t, *p = &fb_display[vc->vc_num];
int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256;
int cap, ret;
- if (info_idx == -1 || info == NULL)
+ if (WARN_ON(info_idx == -1))
return;
+ if (con2fb_map[vc->vc_num] == -1)
+ con2fb_map[vc->vc_num] = info_idx;
+
+ info = registered_fb[con2fb_map[vc->vc_num]];
cap = info->flags;
if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
@@ -1203,7 +1225,7 @@ static void fbcon_init(struct vc_data *vc, int init)
ops->p = &fb_display[fg_console];
}
-static void fbcon_free_font(struct display *p, bool freefont)
+static void fbcon_free_font(struct fbcon_display *p, bool freefont)
{
if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
@@ -1215,7 +1237,7 @@ static void set_vc_hi_font(struct vc_data *vc, bool set);
static void fbcon_deinit(struct vc_data *vc)
{
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
struct fb_info *info;
struct fbcon_ops *ops;
int idx;
@@ -1288,7 +1310,7 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
u_int y_break;
if (fbcon_is_inactive(vc, info))
@@ -1324,7 +1346,7 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
int count, int ypos, int xpos)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
struct fbcon_ops *ops = info->fbcon_par;
if (!fbcon_is_inactive(vc, info))
@@ -1388,7 +1410,7 @@ static int scrollback_current = 0;
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
int unit)
{
- struct display *p, *t;
+ struct fbcon_display *p, *t;
struct vc_data **default_mode, *vc;
struct vc_data *svc;
struct fbcon_ops *ops = info->fbcon_par;
@@ -1457,7 +1479,7 @@ static __inline__ void ywrap_up(struct vc_data *vc, int count)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll += count;
if (p->yscroll >= p->vrows) /* Deal with wrap */
@@ -1476,7 +1498,7 @@ static __inline__ void ywrap_down(struct vc_data *vc, int count)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll -= count;
if (p->yscroll < 0) /* Deal with wrap */
@@ -1494,7 +1516,7 @@ static __inline__ void ywrap_down(struct vc_data *vc, int count)
static __inline__ void ypan_up(struct vc_data *vc, int count)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
struct fbcon_ops *ops = info->fbcon_par;
p->yscroll += count;
@@ -1519,7 +1541,7 @@ static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll += count;
@@ -1542,7 +1564,7 @@ static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
static __inline__ void ypan_down(struct vc_data *vc, int count)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
struct fbcon_ops *ops = info->fbcon_par;
p->yscroll -= count;
@@ -1567,7 +1589,7 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
p->yscroll -= count;
@@ -1587,7 +1609,7 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
scrollback_current = 0;
}
-static void fbcon_redraw_softback(struct vc_data *vc, struct display *p,
+static void fbcon_redraw_softback(struct vc_data *vc, struct fbcon_display *p,
long delta)
{
int count = vc->vc_rows;
@@ -1680,7 +1702,7 @@ static void fbcon_redraw_softback(struct vc_data *vc, struct display *p,
}
}
-static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
+static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
int line, int count, int dy)
{
unsigned short *s = (unsigned short *)
@@ -1715,7 +1737,7 @@ static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
}
static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
- struct display *p, int line, int count, int ycount)
+ struct fbcon_display *p, int line, int count, int ycount)
{
int offset = ycount * vc->vc_cols;
unsigned short *d = (unsigned short *)
@@ -1764,7 +1786,7 @@ static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
}
}
-static void fbcon_redraw(struct vc_data *vc, struct display *p,
+static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
int line, int count, int offset)
{
unsigned short *d = (unsigned short *)
@@ -1848,7 +1870,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
enum con_scroll dir, unsigned int count)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
if (fbcon_is_inactive(vc, info))
@@ -2052,7 +2074,7 @@ static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
int height, int width)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
if (fbcon_is_inactive(vc, info))
return;
@@ -2071,7 +2093,7 @@ static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
p->vrows - p->yscroll);
}
-static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx,
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
int dy, int dx, int height, int width, u_int y_break)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
@@ -2113,7 +2135,7 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
height, width);
}
-static void updatescrollmode(struct display *p,
+static void updatescrollmode(struct fbcon_display *p,
struct fb_info *info,
struct vc_data *vc)
{
@@ -2165,7 +2187,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
struct fb_var_screeninfo var = info->var;
int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
@@ -2210,7 +2232,7 @@ static int fbcon_switch(struct vc_data *vc)
{
struct fb_info *info, *old_info = NULL;
struct fbcon_ops *ops;
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
struct fb_var_screeninfo var;
int i, ret, prev_console, charcnt = 256;
@@ -2348,8 +2370,6 @@ static int fbcon_switch(struct vc_data *vc)
static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info,
int blank)
{
- struct fb_event event;
-
if (blank) {
unsigned short charmask = vc->vc_hi_font_mask ?
0x1ff : 0xff;
@@ -2360,14 +2380,6 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info,
fbcon_clear(vc, 0, 0, vc->vc_rows, vc->vc_cols);
vc->vc_video_erase_char = oldc;
}
-
-
- if (!lock_fb_info(info))
- return;
- event.info = info;
- event.data = &blank;
- fb_notifier_call_chain(FB_EVENT_CONBLANK, &event);
- unlock_fb_info(info);
}
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
@@ -2394,9 +2406,8 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
ops->cursor_flash = (!blank);
- if (!(info->flags & FBINFO_MISC_USEREVENT))
- if (fb_blank(info, blank))
- fbcon_generic_blank(vc, info, blank);
+ if (fb_blank(info, blank))
+ fbcon_generic_blank(vc, info, blank);
}
if (!blank)
@@ -2553,7 +2564,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
int resize;
int cnt;
char *old_data = NULL;
@@ -2601,7 +2612,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
static int fbcon_copy_font(struct vc_data *vc, int con)
{
- struct display *od = &fb_display[con];
+ struct fbcon_display *od = &fb_display[con];
struct console_font *f = &vc->vc_font;
if (od->fontdata == f->data)
@@ -2826,7 +2837,7 @@ static void fbcon_scrolldelta(struct vc_data *vc, int lines)
{
struct fb_info *info = registered_fb[con2fb_map[fg_console]];
struct fbcon_ops *ops = info->fbcon_par;
- struct display *disp = &fb_display[fg_console];
+ struct fbcon_display *disp = &fb_display[fg_console];
int offset, limit, scrollback_old;
if (softback_top) {
@@ -2918,7 +2929,7 @@ static int fbcon_set_origin(struct vc_data *vc)
return 0;
}
-static void fbcon_suspended(struct fb_info *info)
+void fbcon_suspended(struct fb_info *info)
{
struct vc_data *vc = NULL;
struct fbcon_ops *ops = info->fbcon_par;
@@ -2931,7 +2942,7 @@ static void fbcon_suspended(struct fb_info *info)
fbcon_cursor(vc, CM_ERASE);
}
-static void fbcon_resumed(struct fb_info *info)
+void fbcon_resumed(struct fb_info *info)
{
struct vc_data *vc;
struct fbcon_ops *ops = info->fbcon_par;
@@ -2947,7 +2958,7 @@ static void fbcon_modechanged(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
struct vc_data *vc;
- struct display *p;
+ struct fbcon_display *p;
int rows, cols;
if (!ops || ops->currcon < 0)
@@ -2987,7 +2998,7 @@ static void fbcon_set_all_vcs(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
struct vc_data *vc;
- struct display *p;
+ struct fbcon_display *p;
int i, rows, cols, fg = -1;
if (!ops || ops->currcon < 0)
@@ -3018,11 +3029,21 @@ static void fbcon_set_all_vcs(struct fb_info *info)
fbcon_modechanged(info);
}
-static int fbcon_mode_deleted(struct fb_info *info,
- struct fb_videomode *mode)
+
+void fbcon_update_vcs(struct fb_info *info, bool all)
+{
+ if (all)
+ fbcon_set_all_vcs(info);
+ else
+ fbcon_modechanged(info);
+}
+EXPORT_SYMBOL(fbcon_update_vcs);
+
+int fbcon_mode_deleted(struct fb_info *info,
+ struct fb_videomode *mode)
{
struct fb_info *fb_info;
- struct display *p;
+ struct fbcon_display *p;
int i, j, found = 0;
/* before deletion, ensure that mode is not in use */
@@ -3045,7 +3066,7 @@ static int fbcon_mode_deleted(struct fb_info *info,
}
#ifdef CONFIG_VT_HW_CONSOLE_BINDING
-static int fbcon_unbind(void)
+static void fbcon_unbind(void)
{
int ret;
@@ -3054,25 +3075,21 @@ static int fbcon_unbind(void)
if (!ret)
fbcon_has_console_bind = 0;
-
- return ret;
}
#else
-static inline int fbcon_unbind(void)
-{
- return -EINVAL;
-}
+static inline void fbcon_unbind(void) {}
#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
/* called with console_lock held */
-static int fbcon_fb_unbind(int idx)
+void fbcon_fb_unbind(struct fb_info *info)
{
int i, new_idx = -1, ret = 0;
+ int idx = info->node;
WARN_CONSOLE_UNLOCKED();
if (!fbcon_has_console_bind)
- return 0;
+ return;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] != idx &&
@@ -3105,26 +3122,24 @@ static int fbcon_fb_unbind(int idx)
idx, 0);
if (ret) {
con2fb_map[i] = idx;
- return ret;
+ return;
}
}
}
}
- ret = fbcon_unbind();
+ fbcon_unbind();
}
-
- return ret;
}
/* called with console_lock held */
-static int fbcon_fb_unregistered(struct fb_info *info)
+void fbcon_fb_unregistered(struct fb_info *info)
{
int i, idx;
WARN_CONSOLE_UNLOCKED();
if (deferred_takeover)
- return 0;
+ return;
idx = info->node;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -3153,21 +3168,18 @@ static int fbcon_fb_unregistered(struct fb_info *info)
if (!num_registered_fb)
do_unregister_con_driver(&fb_con);
-
- return 0;
}
-/* called with console_lock held */
-static void fbcon_remap_all(int idx)
+void fbcon_remap_all(struct fb_info *info)
{
- int i;
-
- WARN_CONSOLE_UNLOCKED();
+ int i, idx = info->node;
+ console_lock();
if (deferred_takeover) {
for (i = first_fb_vc; i <= last_fb_vc; i++)
con2fb_map_boot[i] = idx;
fbcon_map_override();
+ console_unlock();
return;
}
@@ -3180,6 +3192,7 @@ static void fbcon_remap_all(int idx)
first_fb_vc + 1, last_fb_vc + 1);
info_idx = idx;
}
+ console_unlock();
}
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
@@ -3213,7 +3226,7 @@ static inline void fbcon_select_primary(struct fb_info *info)
#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
/* called with console_lock held */
-static int fbcon_fb_registered(struct fb_info *info)
+int fbcon_fb_registered(struct fb_info *info)
{
int ret = 0, i, idx;
@@ -3247,7 +3260,7 @@ static int fbcon_fb_registered(struct fb_info *info)
return ret;
}
-static void fbcon_fb_blanked(struct fb_info *info, int blank)
+void fbcon_fb_blanked(struct fb_info *info, int blank)
{
struct fbcon_ops *ops = info->fbcon_par;
struct vc_data *vc;
@@ -3269,7 +3282,7 @@ static void fbcon_fb_blanked(struct fb_info *info, int blank)
ops->blank_state = blank;
}
-static void fbcon_new_modelist(struct fb_info *info)
+void fbcon_new_modelist(struct fb_info *info)
{
int i;
struct vc_data *vc;
@@ -3290,11 +3303,11 @@ static void fbcon_new_modelist(struct fb_info *info)
}
}
-static void fbcon_get_requirement(struct fb_info *info,
- struct fb_blit_caps *caps)
+void fbcon_get_requirement(struct fb_info *info,
+ struct fb_blit_caps *caps)
{
struct vc_data *vc;
- struct display *p;
+ struct fbcon_display *p;
if (caps->flags) {
int i, charcnt;
@@ -3326,80 +3339,47 @@ static void fbcon_get_requirement(struct fb_info *info,
}
}
-static int fbcon_event_notify(struct notifier_block *self,
- unsigned long action, void *data)
+int fbcon_set_con2fb_map_ioctl(void __user *argp)
{
- struct fb_event *event = data;
- struct fb_info *info = event->info;
- struct fb_videomode *mode;
- struct fb_con2fbmap *con2fb;
- struct fb_blit_caps *caps;
- int idx, ret = 0;
-
- /*
- * ignore all events except driver registration and deregistration
- * if fbcon is not active
- */
- if (fbcon_has_exited && !(action == FB_EVENT_FB_REGISTERED ||
- action == FB_EVENT_FB_UNREGISTERED))
- goto done;
+ struct fb_con2fbmap con2fb;
+ int ret;
- switch(action) {
- case FB_EVENT_SUSPEND:
- fbcon_suspended(info);
- break;
- case FB_EVENT_RESUME:
- fbcon_resumed(info);
- break;
- case FB_EVENT_MODE_CHANGE:
- fbcon_modechanged(info);
- break;
- case FB_EVENT_MODE_CHANGE_ALL:
- fbcon_set_all_vcs(info);
- break;
- case FB_EVENT_MODE_DELETE:
- mode = event->data;
- ret = fbcon_mode_deleted(info, mode);
- break;
- case FB_EVENT_FB_UNBIND:
- idx = info->node;
- ret = fbcon_fb_unbind(idx);
- break;
- case FB_EVENT_FB_REGISTERED:
- ret = fbcon_fb_registered(info);
- break;
- case FB_EVENT_FB_UNREGISTERED:
- ret = fbcon_fb_unregistered(info);
- break;
- case FB_EVENT_SET_CONSOLE_MAP:
- /* called with console lock held */
- con2fb = event->data;
- ret = set_con2fb_map(con2fb->console - 1,
- con2fb->framebuffer, 1);
- break;
- case FB_EVENT_GET_CONSOLE_MAP:
- con2fb = event->data;
- con2fb->framebuffer = con2fb_map[con2fb->console - 1];
- break;
- case FB_EVENT_BLANK:
- fbcon_fb_blanked(info, *(int *)event->data);
- break;
- case FB_EVENT_NEW_MODELIST:
- fbcon_new_modelist(info);
- break;
- case FB_EVENT_GET_REQ:
- caps = event->data;
- fbcon_get_requirement(info, caps);
- break;
- case FB_EVENT_REMAP_ALL_CONSOLE:
- idx = info->node;
- fbcon_remap_all(idx);
- break;
+ if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
+ return -EFAULT;
+ if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
+ return -EINVAL;
+ if (con2fb.framebuffer >= FB_MAX)
+ return -EINVAL;
+ if (!registered_fb[con2fb.framebuffer])
+ request_module("fb%d", con2fb.framebuffer);
+ if (!registered_fb[con2fb.framebuffer]) {
+ return -EINVAL;
}
-done:
+
+ console_lock();
+ ret = set_con2fb_map(con2fb.console - 1,
+ con2fb.framebuffer, 1);
+ console_unlock();
+
return ret;
}
+int fbcon_get_con2fb_map_ioctl(void __user *argp)
+{
+ struct fb_con2fbmap con2fb;
+
+ if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
+ return -EFAULT;
+ if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
+ return -EINVAL;
+
+ console_lock();
+ con2fb.framebuffer = con2fb_map[con2fb.console - 1];
+ console_unlock();
+
+ return copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0;
+}
+
/*
* The console `switch' structure for the frame buffer based console
*/
@@ -3431,10 +3411,6 @@ static const struct consw fb_con = {
.con_debug_leave = fbcon_debug_leave,
};
-static struct notifier_block fbcon_event_notifier = {
- .notifier_call = fbcon_event_notify,
-};
-
static ssize_t store_rotate(struct device *device,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -3443,9 +3419,6 @@ static ssize_t store_rotate(struct device *device,
int rotate, idx;
char **last = NULL;
- if (fbcon_has_exited)
- return count;
-
console_lock();
idx = con2fb_map[fg_console];
@@ -3468,9 +3441,6 @@ static ssize_t store_rotate_all(struct device *device,
int rotate, idx;
char **last = NULL;
- if (fbcon_has_exited)
- return count;
-
console_lock();
idx = con2fb_map[fg_console];
@@ -3491,9 +3461,6 @@ static ssize_t show_rotate(struct device *device,
struct fb_info *info;
int rotate = 0, idx;
- if (fbcon_has_exited)
- return 0;
-
console_lock();
idx = con2fb_map[fg_console];
@@ -3514,9 +3481,6 @@ static ssize_t show_cursor_blink(struct device *device,
struct fbcon_ops *ops;
int idx, blink = -1;
- if (fbcon_has_exited)
- return 0;
-
console_lock();
idx = con2fb_map[fg_console];
@@ -3543,9 +3507,6 @@ static ssize_t store_cursor_blink(struct device *device,
int blink, idx;
char **last = NULL;
- if (fbcon_has_exited)
- return count;
-
console_lock();
idx = con2fb_map[fg_console];
@@ -3668,9 +3629,6 @@ static void fbcon_exit(void)
struct fb_info *info;
int i, j, mapped;
- if (fbcon_has_exited)
- return;
-
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
if (deferred_takeover) {
dummycon_unregister_output_notifier(&fbcon_output_nb);
@@ -3695,7 +3653,7 @@ static void fbcon_exit(void)
for (j = first_fb_vc; j <= last_fb_vc; j++) {
if (con2fb_map[j] == i) {
mapped = 1;
- break;
+ con2fb_map[j] = -1;
}
}
@@ -3718,8 +3676,6 @@ static void fbcon_exit(void)
info->queue.func = NULL;
}
}
-
- fbcon_has_exited = 1;
}
void __init fb_console_init(void)
@@ -3727,7 +3683,6 @@ void __init fb_console_init(void)
int i;
console_lock();
- fb_register_client(&fbcon_event_notifier);
fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), NULL,
"fbcon");
@@ -3763,7 +3718,6 @@ static void __exit fbcon_deinit_device(void)
void __exit fb_console_exit(void)
{
console_lock();
- fb_unregister_client(&fbcon_event_notifier);
fbcon_deinit_device();
device_destroy(fb_class, MKDEV(0, 0));
fbcon_exit();
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 21912a3ba32f..20dea853765f 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -25,7 +25,7 @@
* low-level frame buffer device
*/
-struct display {
+struct fbcon_display {
/* Filled in by the low-level console driver */
const u_char *fontdata;
int userfont; /* != 0 if fontdata kmalloc()ed */
@@ -68,7 +68,7 @@ struct fbcon_ops {
struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
struct timer_list cursor_timer; /* Cursor timer */
struct fb_cursor cursor_state;
- struct display *p;
+ struct fbcon_display *p;
struct fb_info *info;
int currcon; /* Current VC. */
int cur_blink_jiffies;
@@ -225,7 +225,7 @@ extern int soft_cursor(struct fb_info *info, struct fb_cursor *cursor);
#define FBCON_ATTRIBUTE_REVERSE 2
#define FBCON_ATTRIBUTE_BOLD 4
-static inline int real_y(struct display *p, int ypos)
+static inline int real_y(struct fbcon_display *p, int ypos)
{
int rows = p->vrows;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index d1949c92be98..64dd732021d8 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -80,17 +80,6 @@ static void put_fb_info(struct fb_info *fb_info)
fb_info->fbops->fb_destroy(fb_info);
}
-int lock_fb_info(struct fb_info *info)
-{
- mutex_lock(&info->lock);
- if (!info->fbops) {
- mutex_unlock(&info->lock);
- return 0;
- }
- return 1;
-}
-EXPORT_SYMBOL(lock_fb_info);
-
/*
* Helpers
*/
@@ -943,16 +932,13 @@ EXPORT_SYMBOL(fb_pan_display);
static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
u32 activate)
{
- struct fb_event event;
struct fb_blit_caps caps, fbcaps;
int err = 0;
memset(&caps, 0, sizeof(caps));
memset(&fbcaps, 0, sizeof(fbcaps));
caps.flags = (activate & FB_ACTIVATE_ALL) ? 1 : 0;
- event.info = info;
- event.data = &caps;
- fb_notifier_call_chain(FB_EVENT_GET_REQ, &event);
+ fbcon_get_requirement(info, &caps);
info->fbops->fb_get_caps(info, &fbcaps, var);
if (((fbcaps.x ^ caps.x) & caps.x) ||
@@ -968,6 +954,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
{
int flags = info->flags;
int ret = 0;
+ u32 activate;
+ struct fb_var_screeninfo old_var;
+ struct fb_videomode mode;
+ struct fb_event event;
if (var->activate & FB_ACTIVATE_INV_MODE) {
struct fb_videomode mode1, mode2;
@@ -977,100 +967,90 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
/* make sure we don't delete the videomode of current var */
ret = fb_mode_is_equal(&mode1, &mode2);
- if (!ret) {
- struct fb_event event;
-
- event.info = info;
- event.data = &mode1;
- ret = fb_notifier_call_chain(FB_EVENT_MODE_DELETE, &event);
- }
+ if (!ret)
+ fbcon_mode_deleted(info, &mode1);
if (!ret)
- fb_delete_videomode(&mode1, &info->modelist);
+ fb_delete_videomode(&mode1, &info->modelist);
- ret = (ret) ? -EINVAL : 0;
- goto done;
+ return ret ? -EINVAL : 0;
}
- if ((var->activate & FB_ACTIVATE_FORCE) ||
- memcmp(&info->var, var, sizeof(struct fb_var_screeninfo))) {
- u32 activate = var->activate;
+ if (!(var->activate & FB_ACTIVATE_FORCE) &&
+ !memcmp(&info->var, var, sizeof(struct fb_var_screeninfo)))
+ return 0;
- /* When using FOURCC mode, make sure the red, green, blue and
- * transp fields are set to 0.
- */
- if ((info->fix.capabilities & FB_CAP_FOURCC) &&
- var->grayscale > 1) {
- if (var->red.offset || var->green.offset ||
- var->blue.offset || var->transp.offset ||
- var->red.length || var->green.length ||
- var->blue.length || var->transp.length ||
- var->red.msb_right || var->green.msb_right ||
- var->blue.msb_right || var->transp.msb_right)
- return -EINVAL;
- }
+ activate = var->activate;
- if (!info->fbops->fb_check_var) {
- *var = info->var;
- goto done;
- }
+ /* When using FOURCC mode, make sure the red, green, blue and
+ * transp fields are set to 0.
+ */
+ if ((info->fix.capabilities & FB_CAP_FOURCC) &&
+ var->grayscale > 1) {
+ if (var->red.offset || var->green.offset ||
+ var->blue.offset || var->transp.offset ||
+ var->red.length || var->green.length ||
+ var->blue.length || var->transp.length ||
+ var->red.msb_right || var->green.msb_right ||
+ var->blue.msb_right || var->transp.msb_right)
+ return -EINVAL;
+ }
- ret = info->fbops->fb_check_var(var, info);
+ if (!info->fbops->fb_check_var) {
+ *var = info->var;
+ return 0;
+ }
- if (ret)
- goto done;
+ ret = info->fbops->fb_check_var(var, info);
- if ((var->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) {
- struct fb_var_screeninfo old_var;
- struct fb_videomode mode;
+ if (ret)
+ return ret;
- if (info->fbops->fb_get_caps) {
- ret = fb_check_caps(info, var, activate);
+ if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_NOW)
+ return 0;
- if (ret)
- goto done;
- }
+ if (info->fbops->fb_get_caps) {
+ ret = fb_check_caps(info, var, activate);
- old_var = info->var;
- info->var = *var;
+ if (ret)
+ return ret;
+ }
- if (info->fbops->fb_set_par) {
- ret = info->fbops->fb_set_par(info);
+ old_var = info->var;
+ info->var = *var;
- if (ret) {
- info->var = old_var;
- printk(KERN_WARNING "detected "
- "fb_set_par error, "
- "error code: %d\n", ret);
- goto done;
- }
- }
+ if (info->fbops->fb_set_par) {
+ ret = info->fbops->fb_set_par(info);
- fb_pan_display(info, &info->var);
- fb_set_cmap(&info->cmap, info);
- fb_var_to_videomode(&mode, &info->var);
+ if (ret) {
+ info->var = old_var;
+ printk(KERN_WARNING "detected "
+ "fb_set_par error, "
+ "error code: %d\n", ret);
+ return ret;
+ }
+ }
- if (info->modelist.prev && info->modelist.next &&
- !list_empty(&info->modelist))
- ret = fb_add_videomode(&mode, &info->modelist);
+ fb_pan_display(info, &info->var);
+ fb_set_cmap(&info->cmap, info);
+ fb_var_to_videomode(&mode, &info->var);
- if (!ret && (flags & FBINFO_MISC_USEREVENT)) {
- struct fb_event event;
- int evnt = (activate & FB_ACTIVATE_ALL) ?
- FB_EVENT_MODE_CHANGE_ALL :
- FB_EVENT_MODE_CHANGE;
+ if (info->modelist.prev && info->modelist.next &&
+ !list_empty(&info->modelist))
+ ret = fb_add_videomode(&mode, &info->modelist);
- info->flags &= ~FBINFO_MISC_USEREVENT;
- event.info = info;
- event.data = &mode;
- fb_notifier_call_chain(evnt, &event);
- }
- }
- }
+ if (ret)
+ return ret;
- done:
- return ret;
+ event.info = info;
+ event.data = &mode;
+ fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event);
+
+ if (flags & FBINFO_MISC_USEREVENT)
+ fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL);
+
+ return 0;
}
EXPORT_SYMBOL(fb_set_var);
@@ -1112,17 +1092,14 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
struct fb_ops *fb;
struct fb_var_screeninfo var;
struct fb_fix_screeninfo fix;
- struct fb_con2fbmap con2fb;
struct fb_cmap cmap_from;
struct fb_cmap_user cmap;
- struct fb_event event;
void __user *argp = (void __user *)arg;
long ret = 0;
switch (cmd) {
case FBIOGET_VSCREENINFO:
- if (!lock_fb_info(info))
- return -ENODEV;
+ lock_fb_info(info);
var = info->var;
unlock_fb_info(info);
@@ -1132,10 +1109,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
console_lock();
- if (!lock_fb_info(info)) {
- console_unlock();
- return -ENODEV;
- }
+ lock_fb_info(info);
info->flags |= FBINFO_MISC_USEREVENT;
ret = fb_set_var(info, &var);
info->flags &= ~FBINFO_MISC_USEREVENT;
@@ -1145,8 +1119,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
ret = -EFAULT;
break;
case FBIOGET_FSCREENINFO:
- if (!lock_fb_info(info))
- return -ENODEV;
+ lock_fb_info(info);
fix = info->fix;
if (info->flags & FBINFO_HIDE_SMEM_START)
fix.smem_start = 0;
@@ -1162,8 +1135,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOGETCMAP:
if (copy_from_user(&cmap, argp, sizeof(cmap)))
return -EFAULT;
- if (!lock_fb_info(info))
- return -ENODEV;
+ lock_fb_info(info);
cmap_from = info->cmap;
unlock_fb_info(info);
ret = fb_cmap_to_user(&cmap_from, &cmap);
@@ -1172,10 +1144,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
console_lock();
- if (!lock_fb_info(info)) {
- console_unlock();
- return -ENODEV;
- }
+ lock_fb_info(info);
ret = fb_pan_display(info, &var);
unlock_fb_info(info);
console_unlock();
@@ -1186,58 +1155,22 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
ret = -EINVAL;
break;
case FBIOGET_CON2FBMAP:
- if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
- return -EFAULT;
- if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
- return -EINVAL;
- con2fb.framebuffer = -1;
- event.data = &con2fb;
- if (!lock_fb_info(info))
- return -ENODEV;
- event.info = info;
- fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event);
- unlock_fb_info(info);
- ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0;
+ ret = fbcon_get_con2fb_map_ioctl(argp);
break;
case FBIOPUT_CON2FBMAP:
- if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
- return -EFAULT;
- if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
- return -EINVAL;
- if (con2fb.framebuffer >= FB_MAX)
- return -EINVAL;
- if (!registered_fb[con2fb.framebuffer])
- request_module("fb%d", con2fb.framebuffer);
- if (!registered_fb[con2fb.framebuffer]) {
- ret = -EINVAL;
- break;
- }
- event.data = &con2fb;
- console_lock();
- if (!lock_fb_info(info)) {
- console_unlock();
- return -ENODEV;
- }
- event.info = info;
- ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
- unlock_fb_info(info);
- console_unlock();
+ ret = fbcon_set_con2fb_map_ioctl(argp);
break;
case FBIOBLANK:
console_lock();
- if (!lock_fb_info(info)) {
- console_unlock();
- return -ENODEV;
- }
- info->flags |= FBINFO_MISC_USEREVENT;
+ lock_fb_info(info);
ret = fb_blank(info, arg);
- info->flags &= ~FBINFO_MISC_USEREVENT;
+ /* might again call into fb_blank */
+ fbcon_fb_blanked(info, arg);
unlock_fb_info(info);
console_unlock();
break;
default:
- if (!lock_fb_info(info))
- return -ENODEV;
+ lock_fb_info(info);
fb = info->fbops;
if (fb->fb_ioctl)
ret = fb->fb_ioctl(info, cmd, arg);
@@ -1357,8 +1290,7 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
{
struct fb_fix_screeninfo fix;
- if (!lock_fb_info(info))
- return -ENODEV;
+ lock_fb_info(info);
fix = info->fix;
if (info->flags & FBINFO_HIDE_SMEM_START)
fix.smem_start = 0;
@@ -1418,8 +1350,6 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
if (!info)
return -ENODEV;
fb = info->fbops;
- if (!fb)
- return -ENODEV;
mutex_lock(&info->mm_lock);
if (fb->fb_mmap) {
int res;
@@ -1483,7 +1413,7 @@ __releases(&info->lock)
if (IS_ERR(info))
return PTR_ERR(info);
- mutex_lock(&info->lock);
+ lock_fb_info(info);
if (!try_module_get(info->fbops->owner)) {
res = -ENODEV;
goto out;
@@ -1499,7 +1429,7 @@ __releases(&info->lock)
fb_deferred_io_open(info, inode, file);
#endif
out:
- mutex_unlock(&info->lock);
+ unlock_fb_info(info);
if (res)
put_fb_info(info);
return res;
@@ -1512,11 +1442,11 @@ __releases(&info->lock)
{
struct fb_info * const info = file->private_data;
- mutex_lock(&info->lock);
+ lock_fb_info(info);
if (info->fbops->fb_release)
info->fbops->fb_release(info,1);
module_put(info->fbops->owner);
- mutex_unlock(&info->lock);
+ unlock_fb_info(info);
put_fb_info(info);
return 0;
}
@@ -1621,13 +1551,13 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena,
return false;
}
-static int do_unregister_framebuffer(struct fb_info *fb_info);
+static void do_unregister_framebuffer(struct fb_info *fb_info);
#define VGA_FB_PHYS 0xA0000
-static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
+static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
{
- int i, ret;
+ int i;
/* check all firmware fbs and kick off if the base addr overlaps */
for_each_registered_fb(i) {
@@ -1643,13 +1573,9 @@ static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
printk(KERN_INFO "fb%d: switching to %s from %s\n",
i, name, registered_fb[i]->fix.id);
- ret = do_unregister_framebuffer(registered_fb[i]);
- if (ret)
- return ret;
+ do_unregister_framebuffer(registered_fb[i]);
}
}
-
- return 0;
}
static bool lockless_register_fb;
@@ -1660,17 +1586,14 @@ MODULE_PARM_DESC(lockless_register_fb,
static int do_register_framebuffer(struct fb_info *fb_info)
{
int i, ret;
- struct fb_event event;
struct fb_videomode mode;
if (fb_check_foreignness(fb_info))
return -ENOSYS;
- ret = do_remove_conflicting_framebuffers(fb_info->apertures,
- fb_info->fix.id,
- fb_is_primary_device(fb_info));
- if (ret)
- return ret;
+ do_remove_conflicting_framebuffers(fb_info->apertures,
+ fb_info->fix.id,
+ fb_is_primary_device(fb_info));
if (num_registered_fb == FB_MAX)
return -ENXIO;
@@ -1723,20 +1646,22 @@ static int do_register_framebuffer(struct fb_info *fb_info)
fb_add_videomode(&mode, &fb_info->modelist);
registered_fb[i] = fb_info;
- event.info = fb_info;
+#ifdef CONFIG_GUMSTIX_AM200EPD
+ {
+ struct fb_event event;
+ event.info = fb_info;
+ fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
+ }
+#endif
+
if (!lockless_register_fb)
console_lock();
else
atomic_inc(&ignore_console_lock_warning);
- if (!lock_fb_info(fb_info)) {
- ret = -ENODEV;
- goto unlock_console;
- }
- ret = 0;
-
- fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
+ lock_fb_info(fb_info);
+ ret = fbcon_fb_registered(fb_info);
unlock_fb_info(fb_info);
-unlock_console:
+
if (!lockless_register_fb)
console_unlock();
else
@@ -1744,44 +1669,44 @@ unlock_console:
return ret;
}
-static int unbind_console(struct fb_info *fb_info)
+static void unbind_console(struct fb_info *fb_info)
{
- struct fb_event event;
- int ret;
int i = fb_info->node;
- if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
- return -EINVAL;
+ if (WARN_ON(i < 0 || i >= FB_MAX || registered_fb[i] != fb_info))
+ return;
console_lock();
- if (!lock_fb_info(fb_info)) {
- console_unlock();
- return -ENODEV;
- }
-
- event.info = fb_info;
- ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
+ lock_fb_info(fb_info);
+ fbcon_fb_unbind(fb_info);
unlock_fb_info(fb_info);
console_unlock();
-
- return ret;
}
-static int __unlink_framebuffer(struct fb_info *fb_info);
-
-static int do_unregister_framebuffer(struct fb_info *fb_info)
+void unlink_framebuffer(struct fb_info *fb_info)
{
- struct fb_event event;
- int ret;
+ int i;
+
+ i = fb_info->node;
+ if (WARN_ON(i < 0 || i >= FB_MAX || registered_fb[i] != fb_info))
+ return;
- ret = unbind_console(fb_info);
+ if (!fb_info->dev)
+ return;
- if (ret)
- return -EINVAL;
+ device_destroy(fb_class, MKDEV(FB_MAJOR, i));
pm_vt_switch_unregister(fb_info->dev);
- __unlink_framebuffer(fb_info);
+ unbind_console(fb_info);
+
+ fb_info->dev = NULL;
+}
+EXPORT_SYMBOL(unlink_framebuffer);
+
+static void do_unregister_framebuffer(struct fb_info *fb_info)
+{
+ unlink_framebuffer(fb_info);
if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
kfree(fb_info->pixmap.addr);
@@ -1789,46 +1714,21 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
registered_fb[fb_info->node] = NULL;
num_registered_fb--;
fb_cleanup_device(fb_info);
- event.info = fb_info;
+#ifdef CONFIG_GUMSTIX_AM200EPD
+ {
+ struct fb_event event;
+ event.info = fb_info;
+ fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
+ }
+#endif
console_lock();
- fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
+ fbcon_fb_unregistered(fb_info);
console_unlock();
/* this may free fb info */
put_fb_info(fb_info);
- return 0;
}
-static int __unlink_framebuffer(struct fb_info *fb_info)
-{
- int i;
-
- i = fb_info->node;
- if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
- return -EINVAL;
-
- if (fb_info->dev) {
- device_destroy(fb_class, MKDEV(FB_MAJOR, i));
- fb_info->dev = NULL;
- }
-
- return 0;
-}
-
-int unlink_framebuffer(struct fb_info *fb_info)
-{
- int ret;
-
- ret = __unlink_framebuffer(fb_info);
- if (ret)
- return ret;
-
- unbind_console(fb_info);
-
- return 0;
-}
-EXPORT_SYMBOL(unlink_framebuffer);
-
/**
* remove_conflicting_framebuffers - remove firmware-configured framebuffers
* @a: memory range, users of which are to be removed
@@ -1842,7 +1742,6 @@ EXPORT_SYMBOL(unlink_framebuffer);
int remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary)
{
- int ret;
bool do_free = false;
if (!a) {
@@ -1856,13 +1755,13 @@ int remove_conflicting_framebuffers(struct apertures_struct *a,
}
mutex_lock(&registration_lock);
- ret = do_remove_conflicting_framebuffers(a, name, primary);
+ do_remove_conflicting_framebuffers(a, name, primary);
mutex_unlock(&registration_lock);
if (do_free)
kfree(a);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(remove_conflicting_framebuffers);
@@ -1959,16 +1858,12 @@ EXPORT_SYMBOL(register_framebuffer);
* that the driver implements fb_open() and fb_release() to
* check that no processes are using the device.
*/
-int
+void
unregister_framebuffer(struct fb_info *fb_info)
{
- int ret;
-
mutex_lock(&registration_lock);
- ret = do_unregister_framebuffer(fb_info);
+ do_unregister_framebuffer(fb_info);
mutex_unlock(&registration_lock);
-
- return ret;
}
EXPORT_SYMBOL(unregister_framebuffer);
@@ -1983,15 +1878,14 @@ EXPORT_SYMBOL(unregister_framebuffer);
*/
void fb_set_suspend(struct fb_info *info, int state)
{
- struct fb_event event;
+ WARN_CONSOLE_UNLOCKED();
- event.info = info;
if (state) {
- fb_notifier_call_chain(FB_EVENT_SUSPEND, &event);
+ fbcon_suspended(info);
info->state = FBINFO_STATE_SUSPENDED;
} else {
info->state = FBINFO_STATE_RUNNING;
- fb_notifier_call_chain(FB_EVENT_RESUME, &event);
+ fbcon_resumed(info);
}
}
EXPORT_SYMBOL(fb_set_suspend);
@@ -2059,7 +1953,6 @@ subsys_initcall(fbmem_init);
int fb_new_modelist(struct fb_info *info)
{
- struct fb_event event;
struct fb_var_screeninfo var = info->var;
struct list_head *pos, *n;
struct fb_modelist *modelist;
@@ -2079,14 +1972,12 @@ int fb_new_modelist(struct fb_info *info)
}
}
- err = 1;
+ if (list_empty(&info->modelist))
+ return 1;
- if (!list_empty(&info->modelist)) {
- event.info = info;
- err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
- }
+ fbcon_new_modelist(info);
- return err;
+ return 0;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 954ed99e80da..d54c88f88991 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fb.h>
+#include <linux/fbcon.h>
#include <linux/console.h>
#include <linux/module.h>
@@ -175,10 +176,7 @@ static ssize_t store_modes(struct device *device,
return -EINVAL;
console_lock();
- if (!lock_fb_info(fb_info)) {
- console_unlock();
- return -ENODEV;
- }
+ lock_fb_info(fb_info);
list_splice(&fb_info->modelist, &old_list);
fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
@@ -304,12 +302,13 @@ static ssize_t store_blank(struct device *device,
{
struct fb_info *fb_info = dev_get_drvdata(device);
char *last = NULL;
- int err;
+ int err, arg;
+ arg = simple_strtoul(buf, &last, 0);
console_lock();
- fb_info->flags |= FBINFO_MISC_USEREVENT;
- err = fb_blank(fb_info, simple_strtoul(buf, &last, 0));
- fb_info->flags &= ~FBINFO_MISC_USEREVENT;
+ err = fb_blank(fb_info, arg);
+ /* might again call into fb_blank */
+ fbcon_fb_blanked(fb_info, arg);
console_unlock();
if (err < 0)
return err;
@@ -405,10 +404,7 @@ static ssize_t store_fbstate(struct device *device,
state = simple_strtoul(buf, &last, 0);
console_lock();
- if (!lock_fb_info(fb_info)) {
- console_unlock();
- return -ENODEV;
- }
+ lock_fb_info(fb_info);
fb_set_suspend(fb_info, (int)state);
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 0de12be823c0..3a2d9ff0aa42 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -58,7 +58,6 @@
struct cfb_info {
struct fb_info fb;
struct display_switch *dispsw;
- struct display *display;
unsigned char __iomem *region;
unsigned char __iomem *regs;
u_int id;
@@ -1639,10 +1638,6 @@ static void cyberpro_common_resume(struct cfb_info *cfb)
}
/*
- * PCI specific support.
- */
-#ifdef CONFIG_PCI
-/*
* We need to wake up the CyberPro, and make sure its in linear memory
* mode. Unfortunately, this is specific to the platform and card that
* we are running on.
@@ -1858,7 +1853,6 @@ static struct pci_driver cyberpro_driver = {
.resume = cyberpro_pci_resume,
.id_table = cyberpro_pci_table
};
-#endif
/*
* I don't think we can use the "module_init" stuff here because
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 9ea817ac1d81..b1cf248f3291 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1387,7 +1387,6 @@ static int fb_probe(struct platform_device *device)
da8xx_fb_info = framebuffer_alloc(sizeof(struct da8xx_fb_par),
&device->dev);
if (!da8xx_fb_info) {
- dev_dbg(&device->dev, "Memory allocation failed for fb_info\n");
ret = -ENOMEM;
goto err_pm_runtime_disable;
}
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 9f39f0c360e0..04a22663b4fb 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -169,6 +169,11 @@ static void efifb_show_boot_graphics(struct fb_info *info)
return;
}
+ if (bgrt_tab.status & 0x06) {
+ pr_info("efifb: BGRT rotation bits set, not showing boot graphics\n");
+ return;
+ }
+
/* Avoid flashing the logo if we're going to print std probe messages */
if (console_loglevel > CONSOLE_LOGLEVEL_QUIET)
return;
@@ -448,7 +453,6 @@ static int efifb_probe(struct platform_device *dev)
info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
if (!info) {
- pr_err("efifb: cannot allocate framebuffer\n");
err = -ENOMEM;
goto err_release_mem;
}
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index 3fcb33232ba3..b9f6a82a0495 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -39,9 +39,7 @@ struct gbefb_par {
int valid;
};
-#ifdef CONFIG_SGI_IP32
#define GBE_BASE 0x16000000 /* SGI O2 */
-#endif
/* macro for fastest write-though access to the framebuffer */
#ifdef CONFIG_MIPS
@@ -51,10 +49,6 @@ struct gbefb_par {
#define pgprot_fb(_prot) (((_prot) & (~_CACHE_MASK)) | _CACHE_CACHABLE_NO_WA)
#endif
#endif
-#ifdef CONFIG_X86
-#define pgprot_fb(_prot) (((_prot) & ~_PAGE_CACHE_MASK) | \
- cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))
-#endif
/*
* RAM we reserve for the frame buffer. This defines the maximum screen
@@ -279,7 +273,7 @@ static void gbe_turn_off(void)
val = 0;
SET_GBE_FIELD(VT_XY, FREEZE, val, 1);
gbe->vt_xy = val;
- udelay(10000);
+ mdelay(10);
for (i = 0; i < 10000; i++) {
val = gbe->vt_xy;
if (GET_GBE_FIELD(VT_XY, FREEZE, val) != 1)
@@ -294,7 +288,7 @@ static void gbe_turn_off(void)
val = gbe->dotclock;
SET_GBE_FIELD(DOTCLK, RUN, val, 0);
gbe->dotclock = val;
- udelay(10000);
+ mdelay(10);
for (i = 0; i < 10000; i++) {
val = gbe->dotclock;
if (GET_GBE_FIELD(DOTCLK, RUN, val))
@@ -331,7 +325,7 @@ static void gbe_turn_on(void)
val = gbe->dotclock;
SET_GBE_FIELD(DOTCLK, RUN, val, 1);
gbe->dotclock = val;
- udelay(10000);
+ mdelay(10);
for (i = 0; i < 10000; i++) {
val = gbe->dotclock;
if (GET_GBE_FIELD(DOTCLK, RUN, val) != 1)
@@ -346,7 +340,7 @@ static void gbe_turn_on(void)
val = 0;
SET_GBE_FIELD(VT_XY, FREEZE, val, 0);
gbe->vt_xy = val;
- udelay(10000);
+ mdelay(10);
for (i = 0; i < 10000; i++) {
val = gbe->vt_xy;
if (GET_GBE_FIELD(VT_XY, FREEZE, val))
@@ -547,7 +541,7 @@ static void gbe_set_timing_info(struct gbe_timing_info *timing)
SET_GBE_FIELD(DOTCLK, P, val, timing->pll_p);
SET_GBE_FIELD(DOTCLK, RUN, val, 0); /* do not start yet */
gbe->dotclock = val;
- udelay(10000);
+ mdelay(10);
/* setup pixel counter */
val = 0;
@@ -1018,9 +1012,10 @@ static int gbefb_mmap(struct fb_info *info,
/* remap using the fastest write-through mode on architecture */
/* try not polluting the cache when possible */
+#ifdef CONFIG_MIPS
pgprot_val(vma->vm_page_prot) =
pgprot_fb(pgprot_val(vma->vm_page_prot));
-
+#endif
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
/* look for the starting tile */
diff --git a/drivers/video/fbdev/grvga.c b/drivers/video/fbdev/grvga.c
index df5d546e57e9..d22e8b0c906d 100644
--- a/drivers/video/fbdev/grvga.c
+++ b/drivers/video/fbdev/grvga.c
@@ -336,10 +336,8 @@ static int grvga_probe(struct platform_device *dev)
char *options = NULL, *mode_opt = NULL;
info = framebuffer_alloc(sizeof(struct grvga_par), &dev->dev);
- if (!info) {
- dev_err(&dev->dev, "framebuffer_alloc failed\n");
+ if (!info)
return -ENOMEM;
- }
/* Expecting: "grvga: modestring, [addr:<framebuffer physical address>], [size:<framebuffer size>]
*
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index 37527a10b954..c7502fd8f447 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -643,10 +643,9 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
info = framebuffer_alloc(sizeof(struct gxt4500_par), &pdev->dev);
- if (!info) {
- dev_err(&pdev->dev, "gxt4500: cannot alloc FB info record\n");
+ if (!info)
goto err_free_fb;
- }
+
par = info->par;
cardtype = ent->driver_data;
par->refclk_ps = cardinfo[cardtype].refclk_ps;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 00f5bdcc6c6f..2dcb7c58b31e 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -762,10 +762,8 @@ static int hvfb_probe(struct hv_device *hdev,
int ret;
info = framebuffer_alloc(sizeof(struct hvfb_par), &hdev->device);
- if (!info) {
- pr_err("No memory for framebuffer info\n");
+ if (!info)
return -ENOMEM;
- }
par = info->par;
par->info = info;
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 24d3280a5b5f..347cf8babc3e 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -1006,10 +1006,8 @@ static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
u8 *edid;
info = framebuffer_alloc(sizeof(struct i740fb_par), &(dev->dev));
- if (!info) {
- dev_err(&(dev->dev), "cannot allocate framebuffer\n");
+ if (!info)
return -ENOMEM;
- }
par = info->par;
mutex_init(&par->open_lock);
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 35bba3c2036d..58b01c7d9056 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1477,11 +1477,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(KERN_ERR "imsttfb: no OF node for pci device\n");
info = framebuffer_alloc(sizeof(struct imstt_par), &pdev->dev);
-
- if (!info) {
- printk(KERN_ERR "imsttfb: Can't allocate memory\n");
+ if (!info)
return -ENOMEM;
- }
par = info->par;
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index c4eb8661f751..b3286d1fa543 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -974,10 +974,9 @@ static int imxfb_probe(struct platform_device *pdev)
}
fbi->map_size = PAGE_ALIGN(info->fix.smem_len);
- info->screen_base = dma_alloc_wc(&pdev->dev, fbi->map_size,
- &fbi->map_dma, GFP_KERNEL);
-
- if (!info->screen_base) {
+ info->screen_buffer = dma_alloc_wc(&pdev->dev, fbi->map_size,
+ &fbi->map_dma, GFP_KERNEL);
+ if (!info->screen_buffer) {
dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
ret = -ENOMEM;
goto failed_map;
@@ -1046,7 +1045,7 @@ failed_cmap:
if (pdata && pdata->exit)
pdata->exit(fbi->pdev);
failed_platform_init:
- dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
+ dma_free_wc(&pdev->dev, fbi->map_size, info->screen_buffer,
fbi->map_dma);
failed_map:
iounmap(fbi->regs);
@@ -1077,7 +1076,7 @@ static int imxfb_remove(struct platform_device *pdev)
pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->exit)
pdata->exit(fbi->pdev);
- dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
+ dma_free_wc(&pdev->dev, fbi->map_size, info->screen_buffer,
fbi->map_dma);
iounmap(fbi->regs);
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index d7463a2a5d83..a76c61512c60 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -491,10 +491,9 @@ static int intelfb_pci_register(struct pci_dev *pdev,
}
info = framebuffer_alloc(sizeof(struct intelfb_info), &pdev->dev);
- if (!info) {
- ERR_MSG("Could not allocate memory for intelfb_info.\n");
- return -ENODEV;
- }
+ if (!info)
+ return -ENOMEM;
+
if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) {
ERR_MSG("Could not allocate cmap for intelfb_info.\n");
goto err_out_cmap;
diff --git a/drivers/video/fbdev/jz4740_fb.c b/drivers/video/fbdev/jz4740_fb.c
index 145095655cc2..0b6fa25f6924 100644
--- a/drivers/video/fbdev/jz4740_fb.c
+++ b/drivers/video/fbdev/jz4740_fb.c
@@ -457,7 +457,6 @@ static int jzfb_alloc_devmem(struct jzfb *jzfb)
{
int max_videosize = 0;
struct fb_videomode *mode = jzfb->pdata->modes;
- void *page;
int i;
for (i = 0; i < jzfb->pdata->num_modes; ++mode, ++i) {
@@ -482,12 +481,6 @@ static int jzfb_alloc_devmem(struct jzfb *jzfb)
if (!jzfb->vidmem)
goto err_free_framedesc;
- for (page = jzfb->vidmem;
- page < jzfb->vidmem + PAGE_ALIGN(jzfb->vidmem_size);
- page += PAGE_SIZE) {
- SetPageReserved(virt_to_page(page));
- }
-
jzfb->framedesc->next = jzfb->framedesc_phys;
jzfb->framedesc->addr = jzfb->vidmem_phys;
jzfb->framedesc->id = 0xdeafbead;
@@ -535,10 +528,8 @@ static int jzfb_probe(struct platform_device *pdev)
}
fb = framebuffer_alloc(sizeof(struct jzfb), &pdev->dev);
- if (!fb) {
- dev_err(&pdev->dev, "Failed to allocate framebuffer device\n");
+ if (!fb)
return -ENOMEM;
- }
fb->fbops = &jzfb_ops;
fb->flags = FBINFO_DEFAULT;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index c76bef078c75..1a555f70923a 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -2502,7 +2502,7 @@ MODULE_PARM_DESC(nobios, "Disables ROM BIOS (0 or 1=disabled) (default=do not ch
module_param(noinit, int, 0);
MODULE_PARM_DESC(noinit, "Disables W/SG/SD-RAM and bus interface initialization (0 or 1=do not initialize) (default=0)");
module_param(memtype, int, 0);
-MODULE_PARM_DESC(memtype, "Memory type for G200/G400 (see Documentation/fb/matroxfb.txt for explanation) (default=3 for G200, 0 for G400)");
+MODULE_PARM_DESC(memtype, "Memory type for G200/G400 (see Documentation/fb/matroxfb.rst for explanation) (default=3 for G200, 0 for G400)");
module_param(mtrr, int, 0);
MODULE_PARM_DESC(mtrr, "This speeds up video memory accesses (0=disabled or 1) (default=1)");
module_param(sgram, int, 0);
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index c0c2600c2167..962c0171d271 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -680,10 +680,8 @@ static int of_platform_mb862xx_probe(struct platform_device *ofdev)
}
info = framebuffer_alloc(sizeof(struct mb862xxfb_par), dev);
- if (info == NULL) {
- dev_err(dev, "cannot allocate framebuffer\n");
+ if (!info)
return -ENOMEM;
- }
par = info->par;
par->info = info;
@@ -1005,7 +1003,6 @@ static int mb862xx_pci_probe(struct pci_dev *pdev,
info = framebuffer_alloc(sizeof(struct mb862xxfb_par), dev);
if (!info) {
- dev_err(dev, "framebuffer alloc failed\n");
ret = -ENOMEM;
goto dis_dev;
}
diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c
index 6ded480a69b4..50935252b50b 100644
--- a/drivers/video/fbdev/mbx/mbxfb.c
+++ b/drivers/video/fbdev/mbx/mbxfb.c
@@ -899,10 +899,8 @@ static int mbxfb_probe(struct platform_device *dev)
}
fbi = framebuffer_alloc(sizeof(struct mbxfb_info), &dev->dev);
- if (fbi == NULL) {
- dev_err(&dev->dev, "framebuffer_alloc failed\n");
+ if (!fbi)
return -ENOMEM;
- }
mfbi = fbi->par;
fbi->pseudo_palette = mfbi->pseudo_palette;
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index 87d943f15a12..17174cd7a5bb 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -433,7 +433,7 @@ static int mmphw_probe(struct platform_device *pdev)
{
struct mmp_mach_plat_info *mi;
struct resource *res;
- int ret, i, size, irq;
+ int ret, i, irq;
struct mmphw_path_plat *path_plat;
struct mmphw_ctrl *ctrl = NULL;
@@ -461,9 +461,9 @@ static int mmphw_probe(struct platform_device *pdev)
}
/* allocate */
- size = sizeof(struct mmphw_ctrl) + sizeof(struct mmphw_path_plat) *
- mi->path_num;
- ctrl = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ ctrl = devm_kzalloc(&pdev->dev,
+ struct_size(ctrl, path_plats, mi->path_num),
+ GFP_KERNEL);
if (!ctrl) {
ret = -ENOMEM;
goto failed;
diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
deleted file mode 100644
index d8bebe35b410..000000000000
--- a/drivers/video/fbdev/mxsfb.c
+++ /dev/null
@@ -1,1028 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2010 Juergen Beisert, Pengutronix
- *
- * This code is based on:
- * Author: Vitaly Wool <vital@embeddedalley.com>
- *
- * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- */
-
-#define DRIVER_NAME "mxsfb"
-
-/**
- * @file
- * @brief LCDIF driver for i.MX23 and i.MX28
- *
- * The LCDIF support four modes of operation
- * - MPU interface (to drive smart displays) -> not supported yet
- * - VSYNC interface (like MPU interface plus Vsync) -> not supported yet
- * - Dotclock interface (to drive LC displays with RGB data and sync signals)
- * - DVI (to drive ITU-R BT656) -> not supported yet
- *
- * This driver depends on a correct setup of the pins used for this purpose
- * (platform specific).
- *
- * For the developer: Don't forget to set the data bus width to the display
- * in the imx_fb_videomode structure. You will else end up with ugly colours.
- * If you fight against jitter you can vary the clock delay. This is a feature
- * of the i.MX28 and you can vary it between 2 ns ... 8 ns in 2 ns steps. Give
- * the required value in the imx_fb_videomode structure.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/fb.h>
-#include <linux/regulator/consumer.h>
-#include <video/of_display_timing.h>
-#include <video/of_videomode.h>
-#include <video/videomode.h>
-
-#define REG_SET 4
-#define REG_CLR 8
-
-#define LCDC_CTRL 0x00
-#define LCDC_CTRL1 0x10
-#define LCDC_V4_CTRL2 0x20
-#define LCDC_V3_TRANSFER_COUNT 0x20
-#define LCDC_V4_TRANSFER_COUNT 0x30
-#define LCDC_V4_CUR_BUF 0x40
-#define LCDC_V4_NEXT_BUF 0x50
-#define LCDC_V3_CUR_BUF 0x30
-#define LCDC_V3_NEXT_BUF 0x40
-#define LCDC_TIMING 0x60
-#define LCDC_VDCTRL0 0x70
-#define LCDC_VDCTRL1 0x80
-#define LCDC_VDCTRL2 0x90
-#define LCDC_VDCTRL3 0xa0
-#define LCDC_VDCTRL4 0xb0
-#define LCDC_DVICTRL0 0xc0
-#define LCDC_DVICTRL1 0xd0
-#define LCDC_DVICTRL2 0xe0
-#define LCDC_DVICTRL3 0xf0
-#define LCDC_DVICTRL4 0x100
-#define LCDC_V4_DATA 0x180
-#define LCDC_V3_DATA 0x1b0
-#define LCDC_V4_DEBUG0 0x1d0
-#define LCDC_V3_DEBUG0 0x1f0
-
-#define CTRL_SFTRST (1 << 31)
-#define CTRL_CLKGATE (1 << 30)
-#define CTRL_BYPASS_COUNT (1 << 19)
-#define CTRL_VSYNC_MODE (1 << 18)
-#define CTRL_DOTCLK_MODE (1 << 17)
-#define CTRL_DATA_SELECT (1 << 16)
-#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
-#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
-#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
-#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
-#define CTRL_MASTER (1 << 5)
-#define CTRL_DF16 (1 << 3)
-#define CTRL_DF18 (1 << 2)
-#define CTRL_DF24 (1 << 1)
-#define CTRL_RUN (1 << 0)
-
-#define CTRL1_FIFO_CLEAR (1 << 21)
-#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
-#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
-
-#define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
-#define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
-#define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff)
-#define TRANSFER_COUNT_GET_HCOUNT(x) ((x) & 0xffff)
-
-
-#define VDCTRL0_ENABLE_PRESENT (1 << 28)
-#define VDCTRL0_VSYNC_ACT_HIGH (1 << 27)
-#define VDCTRL0_HSYNC_ACT_HIGH (1 << 26)
-#define VDCTRL0_DOTCLK_ACT_FALLING (1 << 25)
-#define VDCTRL0_ENABLE_ACT_HIGH (1 << 24)
-#define VDCTRL0_VSYNC_PERIOD_UNIT (1 << 21)
-#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT (1 << 20)
-#define VDCTRL0_HALF_LINE (1 << 19)
-#define VDCTRL0_HALF_LINE_MODE (1 << 18)
-#define VDCTRL0_SET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
-#define VDCTRL0_GET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
-
-#define VDCTRL2_SET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
-#define VDCTRL2_GET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
-
-#define VDCTRL3_MUX_SYNC_SIGNALS (1 << 29)
-#define VDCTRL3_VSYNC_ONLY (1 << 28)
-#define SET_HOR_WAIT_CNT(x) (((x) & 0xfff) << 16)
-#define GET_HOR_WAIT_CNT(x) (((x) >> 16) & 0xfff)
-#define SET_VERT_WAIT_CNT(x) ((x) & 0xffff)
-#define GET_VERT_WAIT_CNT(x) ((x) & 0xffff)
-
-#define VDCTRL4_SET_DOTCLK_DLY(x) (((x) & 0x7) << 29) /* v4 only */
-#define VDCTRL4_GET_DOTCLK_DLY(x) (((x) >> 29) & 0x7) /* v4 only */
-#define VDCTRL4_SYNC_SIGNALS_ON (1 << 18)
-#define SET_DOTCLK_H_VALID_DATA_CNT(x) ((x) & 0x3ffff)
-
-#define DEBUG0_HSYNC (1 < 26)
-#define DEBUG0_VSYNC (1 < 25)
-
-#define MIN_XRES 120
-#define MIN_YRES 120
-
-#define RED 0
-#define GREEN 1
-#define BLUE 2
-#define TRANSP 3
-
-#define STMLCDIF_8BIT 1 /** pixel data bus to the display is of 8 bit width */
-#define STMLCDIF_16BIT 0 /** pixel data bus to the display is of 16 bit width */
-#define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */
-#define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
-
-#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
-#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negative edge sampling */
-
-enum mxsfb_devtype {
- MXSFB_V3,
- MXSFB_V4,
-};
-
-/* CPU dependent register offsets */
-struct mxsfb_devdata {
- unsigned transfer_count;
- unsigned cur_buf;
- unsigned next_buf;
- unsigned debug0;
- unsigned hs_wdth_mask;
- unsigned hs_wdth_shift;
- unsigned ipversion;
-};
-
-struct mxsfb_info {
- struct platform_device *pdev;
- struct clk *clk;
- struct clk *clk_axi;
- struct clk *clk_disp_axi;
- void __iomem *base; /* registers */
- unsigned allocated_size;
- int enabled;
- unsigned ld_intf_width;
- unsigned dotclk_delay;
- const struct mxsfb_devdata *devdata;
- u32 sync;
- struct regulator *reg_lcd;
- int pre_init;
-};
-
-#define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
-#define mxsfb_is_v4(host) (host->devdata->ipversion == 4)
-
-static const struct mxsfb_devdata mxsfb_devdata[] = {
- [MXSFB_V3] = {
- .transfer_count = LCDC_V3_TRANSFER_COUNT,
- .cur_buf = LCDC_V3_CUR_BUF,
- .next_buf = LCDC_V3_NEXT_BUF,
- .debug0 = LCDC_V3_DEBUG0,
- .hs_wdth_mask = 0xff,
- .hs_wdth_shift = 24,
- .ipversion = 3,
- },
- [MXSFB_V4] = {
- .transfer_count = LCDC_V4_TRANSFER_COUNT,
- .cur_buf = LCDC_V4_CUR_BUF,
- .next_buf = LCDC_V4_NEXT_BUF,
- .debug0 = LCDC_V4_DEBUG0,
- .hs_wdth_mask = 0x3fff,
- .hs_wdth_shift = 18,
- .ipversion = 4,
- },
-};
-
-/* mask and shift depends on architecture */
-static inline u32 set_hsync_pulse_width(struct mxsfb_info *host, unsigned val)
-{
- return (val & host->devdata->hs_wdth_mask) <<
- host->devdata->hs_wdth_shift;
-}
-
-static inline u32 get_hsync_pulse_width(struct mxsfb_info *host, unsigned val)
-{
- return (val >> host->devdata->hs_wdth_shift) &
- host->devdata->hs_wdth_mask;
-}
-
-static const struct fb_bitfield def_rgb565[] = {
- [RED] = {
- .offset = 11,
- .length = 5,
- },
- [GREEN] = {
- .offset = 5,
- .length = 6,
- },
- [BLUE] = {
- .offset = 0,
- .length = 5,
- },
- [TRANSP] = { /* no support for transparency */
- .length = 0,
- }
-};
-
-static const struct fb_bitfield def_rgb888[] = {
- [RED] = {
- .offset = 16,
- .length = 8,
- },
- [GREEN] = {
- .offset = 8,
- .length = 8,
- },
- [BLUE] = {
- .offset = 0,
- .length = 8,
- },
- [TRANSP] = { /* no support for transparency */
- .length = 0,
- }
-};
-
-static inline unsigned chan_to_field(unsigned chan, struct fb_bitfield *bf)
-{
- chan &= 0xffff;
- chan >>= 16 - bf->length;
- return chan << bf->offset;
-}
-
-static int mxsfb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *fb_info)
-{
- struct mxsfb_info *host = fb_info->par;
- const struct fb_bitfield *rgb = NULL;
-
- if (var->xres < MIN_XRES)
- var->xres = MIN_XRES;
- if (var->yres < MIN_YRES)
- var->yres = MIN_YRES;
-
- var->xres_virtual = var->xres;
-
- var->yres_virtual = var->yres;
-
- switch (var->bits_per_pixel) {
- case 16:
- /* always expect RGB 565 */
- rgb = def_rgb565;
- break;
- case 32:
- switch (host->ld_intf_width) {
- case STMLCDIF_8BIT:
- pr_debug("Unsupported LCD bus width mapping\n");
- break;
- case STMLCDIF_16BIT:
- case STMLCDIF_18BIT:
- case STMLCDIF_24BIT:
- /* real 24 bit */
- rgb = def_rgb888;
- break;
- }
- break;
- default:
- pr_err("Unsupported colour depth: %u\n", var->bits_per_pixel);
- return -EINVAL;
- }
-
- /*
- * Copy the RGB parameters for this display
- * from the machine specific parameters.
- */
- var->red = rgb[RED];
- var->green = rgb[GREEN];
- var->blue = rgb[BLUE];
- var->transp = rgb[TRANSP];
-
- return 0;
-}
-
-static inline void mxsfb_enable_axi_clk(struct mxsfb_info *host)
-{
- if (host->clk_axi)
- clk_prepare_enable(host->clk_axi);
-}
-
-static inline void mxsfb_disable_axi_clk(struct mxsfb_info *host)
-{
- if (host->clk_axi)
- clk_disable_unprepare(host->clk_axi);
-}
-
-static void mxsfb_enable_controller(struct fb_info *fb_info)
-{
- struct mxsfb_info *host = fb_info->par;
- u32 reg;
- int ret;
-
- dev_dbg(&host->pdev->dev, "%s\n", __func__);
-
- if (host->reg_lcd) {
- ret = regulator_enable(host->reg_lcd);
- if (ret) {
- dev_err(&host->pdev->dev,
- "lcd regulator enable failed: %d\n", ret);
- return;
- }
- }
-
- if (host->clk_disp_axi)
- clk_prepare_enable(host->clk_disp_axi);
- clk_prepare_enable(host->clk);
- clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
-
- mxsfb_enable_axi_clk(host);
-
- /* if it was disabled, re-enable the mode again */
- writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_SET);
-
- /* enable the SYNC signals first, then the DMA engine */
- reg = readl(host->base + LCDC_VDCTRL4);
- reg |= VDCTRL4_SYNC_SIGNALS_ON;
- writel(reg, host->base + LCDC_VDCTRL4);
-
- writel(CTRL_RUN, host->base + LCDC_CTRL + REG_SET);
-
- host->enabled = 1;
-}
-
-static void mxsfb_disable_controller(struct fb_info *fb_info)
-{
- struct mxsfb_info *host = fb_info->par;
- unsigned loop;
- u32 reg;
- int ret;
-
- dev_dbg(&host->pdev->dev, "%s\n", __func__);
-
- /*
- * Even if we disable the controller here, it will still continue
- * until its FIFOs are running out of data
- */
- writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_CLR);
-
- loop = 1000;
- while (loop) {
- reg = readl(host->base + LCDC_CTRL);
- if (!(reg & CTRL_RUN))
- break;
- loop--;
- }
-
- reg = readl(host->base + LCDC_VDCTRL4);
- writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4);
-
- mxsfb_disable_axi_clk(host);
-
- clk_disable_unprepare(host->clk);
- if (host->clk_disp_axi)
- clk_disable_unprepare(host->clk_disp_axi);
-
- host->enabled = 0;
-
- if (host->reg_lcd) {
- ret = regulator_disable(host->reg_lcd);
- if (ret)
- dev_err(&host->pdev->dev,
- "lcd regulator disable failed: %d\n", ret);
- }
-}
-
-static int mxsfb_set_par(struct fb_info *fb_info)
-{
- struct mxsfb_info *host = fb_info->par;
- u32 ctrl, vdctrl0, vdctrl4;
- int line_size, fb_size;
- int reenable = 0;
-
- line_size = fb_info->var.xres * (fb_info->var.bits_per_pixel >> 3);
- fb_size = fb_info->var.yres_virtual * line_size;
-
- if (fb_size > fb_info->fix.smem_len)
- return -ENOMEM;
-
- fb_info->fix.line_length = line_size;
-
- if (host->pre_init) {
- mxsfb_enable_controller(fb_info);
- host->pre_init = 0;
- return 0;
- }
-
- /*
- * It seems, you can't re-program the controller if it is still running.
- * This may lead into shifted pictures (FIFO issue?).
- * So, first stop the controller and drain its FIFOs
- */
- if (host->enabled) {
- reenable = 1;
- mxsfb_disable_controller(fb_info);
- }
-
- mxsfb_enable_axi_clk(host);
-
- /* clear the FIFOs */
- writel(CTRL1_FIFO_CLEAR, host->base + LCDC_CTRL1 + REG_SET);
-
- ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER |
- CTRL_SET_BUS_WIDTH(host->ld_intf_width);
-
- switch (fb_info->var.bits_per_pixel) {
- case 16:
- dev_dbg(&host->pdev->dev, "Setting up RGB565 mode\n");
- ctrl |= CTRL_SET_WORD_LENGTH(0);
- writel(CTRL1_SET_BYTE_PACKAGING(0xf), host->base + LCDC_CTRL1);
- break;
- case 32:
- dev_dbg(&host->pdev->dev, "Setting up RGB888/666 mode\n");
- ctrl |= CTRL_SET_WORD_LENGTH(3);
- switch (host->ld_intf_width) {
- case STMLCDIF_8BIT:
- mxsfb_disable_axi_clk(host);
- dev_err(&host->pdev->dev,
- "Unsupported LCD bus width mapping\n");
- return -EINVAL;
- case STMLCDIF_16BIT:
- case STMLCDIF_18BIT:
- case STMLCDIF_24BIT:
- /* real 24 bit */
- break;
- }
- /* do not use packed pixels = one pixel per word instead */
- writel(CTRL1_SET_BYTE_PACKAGING(0x7), host->base + LCDC_CTRL1);
- break;
- default:
- mxsfb_disable_axi_clk(host);
- dev_err(&host->pdev->dev, "Unhandled color depth of %u\n",
- fb_info->var.bits_per_pixel);
- return -EINVAL;
- }
-
- writel(ctrl, host->base + LCDC_CTRL);
-
- writel(TRANSFER_COUNT_SET_VCOUNT(fb_info->var.yres) |
- TRANSFER_COUNT_SET_HCOUNT(fb_info->var.xres),
- host->base + host->devdata->transfer_count);
-
- vdctrl0 = VDCTRL0_ENABLE_PRESENT | /* always in DOTCLOCK mode */
- VDCTRL0_VSYNC_PERIOD_UNIT |
- VDCTRL0_VSYNC_PULSE_WIDTH_UNIT |
- VDCTRL0_SET_VSYNC_PULSE_WIDTH(fb_info->var.vsync_len);
- if (fb_info->var.sync & FB_SYNC_HOR_HIGH_ACT)
- vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
- if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT)
- vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
- if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT)
- vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
- if (host->sync & MXSFB_SYNC_DOTCLK_FALLING_ACT)
- vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
-
- writel(vdctrl0, host->base + LCDC_VDCTRL0);
-
- /* frame length in lines */
- writel(fb_info->var.upper_margin + fb_info->var.vsync_len +
- fb_info->var.lower_margin + fb_info->var.yres,
- host->base + LCDC_VDCTRL1);
-
- /* line length in units of clocks or pixels */
- writel(set_hsync_pulse_width(host, fb_info->var.hsync_len) |
- VDCTRL2_SET_HSYNC_PERIOD(fb_info->var.left_margin +
- fb_info->var.hsync_len + fb_info->var.right_margin +
- fb_info->var.xres),
- host->base + LCDC_VDCTRL2);
-
- writel(SET_HOR_WAIT_CNT(fb_info->var.left_margin +
- fb_info->var.hsync_len) |
- SET_VERT_WAIT_CNT(fb_info->var.upper_margin +
- fb_info->var.vsync_len),
- host->base + LCDC_VDCTRL3);
-
- vdctrl4 = SET_DOTCLK_H_VALID_DATA_CNT(fb_info->var.xres);
- if (mxsfb_is_v4(host))
- vdctrl4 |= VDCTRL4_SET_DOTCLK_DLY(host->dotclk_delay);
- writel(vdctrl4, host->base + LCDC_VDCTRL4);
-
- writel(fb_info->fix.smem_start +
- fb_info->fix.line_length * fb_info->var.yoffset,
- host->base + host->devdata->next_buf);
-
- mxsfb_disable_axi_clk(host);
-
- if (reenable)
- mxsfb_enable_controller(fb_info);
-
- return 0;
-}
-
-static int mxsfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *fb_info)
-{
- unsigned int val;
- int ret = -EINVAL;
-
- /*
- * If greyscale is true, then we convert the RGB value
- * to greyscale no matter what visual we are using.
- */
- if (fb_info->var.grayscale)
- red = green = blue = (19595 * red + 38470 * green +
- 7471 * blue) >> 16;
-
- switch (fb_info->fix.visual) {
- case FB_VISUAL_TRUECOLOR:
- /*
- * 12 or 16-bit True Colour. We encode the RGB value
- * according to the RGB bitfield information.
- */
- if (regno < 16) {
- u32 *pal = fb_info->pseudo_palette;
-
- val = chan_to_field(red, &fb_info->var.red);
- val |= chan_to_field(green, &fb_info->var.green);
- val |= chan_to_field(blue, &fb_info->var.blue);
-
- pal[regno] = val;
- ret = 0;
- }
- break;
-
- case FB_VISUAL_STATIC_PSEUDOCOLOR:
- case FB_VISUAL_PSEUDOCOLOR:
- break;
- }
-
- return ret;
-}
-
-static int mxsfb_blank(int blank, struct fb_info *fb_info)
-{
- struct mxsfb_info *host = fb_info->par;
-
- switch (blank) {
- case FB_BLANK_POWERDOWN:
- case FB_BLANK_VSYNC_SUSPEND:
- case FB_BLANK_HSYNC_SUSPEND:
- case FB_BLANK_NORMAL:
- if (host->enabled)
- mxsfb_disable_controller(fb_info);
- break;
-
- case FB_BLANK_UNBLANK:
- if (!host->enabled)
- mxsfb_enable_controller(fb_info);
- break;
- }
- return 0;
-}
-
-static int mxsfb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *fb_info)
-{
- struct mxsfb_info *host = fb_info->par;
- unsigned offset;
-
- if (var->xoffset != 0)
- return -EINVAL;
-
- offset = fb_info->fix.line_length * var->yoffset;
-
- mxsfb_enable_axi_clk(host);
-
- /* update on next VSYNC */
- writel(fb_info->fix.smem_start + offset,
- host->base + host->devdata->next_buf);
-
- mxsfb_disable_axi_clk(host);
-
- return 0;
-}
-
-static struct fb_ops mxsfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = mxsfb_check_var,
- .fb_set_par = mxsfb_set_par,
- .fb_setcolreg = mxsfb_setcolreg,
- .fb_blank = mxsfb_blank,
- .fb_pan_display = mxsfb_pan_display,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-};
-
-static int mxsfb_restore_mode(struct fb_info *fb_info,
- struct fb_videomode *vmode)
-{
- struct mxsfb_info *host = fb_info->par;
- unsigned period;
- unsigned long pa, fbsize;
- int bits_per_pixel, ofs, ret = 0;
- u32 transfer_count, vdctrl0, vdctrl2, vdctrl3, vdctrl4, ctrl;
-
- mxsfb_enable_axi_clk(host);
-
- /* Only restore the mode when the controller is running */
- ctrl = readl(host->base + LCDC_CTRL);
- if (!(ctrl & CTRL_RUN)) {
- ret = -EINVAL;
- goto err;
- }
-
- vdctrl0 = readl(host->base + LCDC_VDCTRL0);
- vdctrl2 = readl(host->base + LCDC_VDCTRL2);
- vdctrl3 = readl(host->base + LCDC_VDCTRL3);
- vdctrl4 = readl(host->base + LCDC_VDCTRL4);
-
- transfer_count = readl(host->base + host->devdata->transfer_count);
-
- vmode->xres = TRANSFER_COUNT_GET_HCOUNT(transfer_count);
- vmode->yres = TRANSFER_COUNT_GET_VCOUNT(transfer_count);
-
- switch (CTRL_GET_WORD_LENGTH(ctrl)) {
- case 0:
- bits_per_pixel = 16;
- break;
- case 3:
- bits_per_pixel = 32;
- break;
- case 1:
- default:
- ret = -EINVAL;
- goto err;
- }
-
- fb_info->var.bits_per_pixel = bits_per_pixel;
-
- vmode->pixclock = KHZ2PICOS(clk_get_rate(host->clk) / 1000U);
- vmode->hsync_len = get_hsync_pulse_width(host, vdctrl2);
- vmode->left_margin = GET_HOR_WAIT_CNT(vdctrl3) - vmode->hsync_len;
- vmode->right_margin = VDCTRL2_GET_HSYNC_PERIOD(vdctrl2) -
- vmode->hsync_len - vmode->left_margin - vmode->xres;
- vmode->vsync_len = VDCTRL0_GET_VSYNC_PULSE_WIDTH(vdctrl0);
- period = readl(host->base + LCDC_VDCTRL1);
- vmode->upper_margin = GET_VERT_WAIT_CNT(vdctrl3) - vmode->vsync_len;
- vmode->lower_margin = period - vmode->vsync_len -
- vmode->upper_margin - vmode->yres;
-
- vmode->vmode = FB_VMODE_NONINTERLACED;
-
- vmode->sync = 0;
- if (vdctrl0 & VDCTRL0_HSYNC_ACT_HIGH)
- vmode->sync |= FB_SYNC_HOR_HIGH_ACT;
- if (vdctrl0 & VDCTRL0_VSYNC_ACT_HIGH)
- vmode->sync |= FB_SYNC_VERT_HIGH_ACT;
-
- pr_debug("Reconstructed video mode:\n");
- pr_debug("%dx%d, hsync: %u left: %u, right: %u, vsync: %u, upper: %u, lower: %u\n",
- vmode->xres, vmode->yres, vmode->hsync_len, vmode->left_margin,
- vmode->right_margin, vmode->vsync_len, vmode->upper_margin,
- vmode->lower_margin);
- pr_debug("pixclk: %ldkHz\n", PICOS2KHZ(vmode->pixclock));
-
- host->ld_intf_width = CTRL_GET_BUS_WIDTH(ctrl);
- host->dotclk_delay = VDCTRL4_GET_DOTCLK_DLY(vdctrl4);
-
- fb_info->fix.line_length = vmode->xres * (bits_per_pixel >> 3);
-
- pa = readl(host->base + host->devdata->cur_buf);
- fbsize = fb_info->fix.line_length * vmode->yres;
- if (pa < fb_info->fix.smem_start) {
- ret = -EINVAL;
- goto err;
- }
- if (pa + fbsize > fb_info->fix.smem_start + fb_info->fix.smem_len) {
- ret = -EINVAL;
- goto err;
- }
- ofs = pa - fb_info->fix.smem_start;
- if (ofs) {
- memmove(fb_info->screen_base, fb_info->screen_base + ofs, fbsize);
- writel(fb_info->fix.smem_start, host->base + host->devdata->next_buf);
- }
-
- fb_info->fix.ypanstep = 1;
-
- clk_prepare_enable(host->clk);
- host->enabled = 1;
-
-err:
- if (ret)
- mxsfb_disable_axi_clk(host);
-
- return ret;
-}
-
-static int mxsfb_init_fbinfo_dt(struct fb_info *fb_info,
- struct fb_videomode *vmode)
-{
- struct mxsfb_info *host = fb_info->par;
- struct fb_var_screeninfo *var = &fb_info->var;
- struct device *dev = &host->pdev->dev;
- struct device_node *np = host->pdev->dev.of_node;
- struct device_node *display_np;
- struct videomode vm;
- u32 width;
- int ret;
-
- display_np = of_parse_phandle(np, "display", 0);
- if (!display_np) {
- dev_err(dev, "failed to find display phandle\n");
- return -ENOENT;
- }
-
- ret = of_property_read_u32(display_np, "bus-width", &width);
- if (ret < 0) {
- dev_err(dev, "failed to get property bus-width\n");
- goto put_display_node;
- }
-
- switch (width) {
- case 8:
- host->ld_intf_width = STMLCDIF_8BIT;
- break;
- case 16:
- host->ld_intf_width = STMLCDIF_16BIT;
- break;
- case 18:
- host->ld_intf_width = STMLCDIF_18BIT;
- break;
- case 24:
- host->ld_intf_width = STMLCDIF_24BIT;
- break;
- default:
- dev_err(dev, "invalid bus-width value\n");
- ret = -EINVAL;
- goto put_display_node;
- }
-
- ret = of_property_read_u32(display_np, "bits-per-pixel",
- &var->bits_per_pixel);
- if (ret < 0) {
- dev_err(dev, "failed to get property bits-per-pixel\n");
- goto put_display_node;
- }
-
- ret = of_get_videomode(display_np, &vm, OF_USE_NATIVE_MODE);
- if (ret) {
- dev_err(dev, "failed to get videomode from DT\n");
- goto put_display_node;
- }
-
- ret = fb_videomode_from_videomode(&vm, vmode);
- if (ret < 0)
- goto put_display_node;
-
- if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
- host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
-
- /*
- * The PIXDATA flags of the display_flags enum are controller
- * centric, e.g. NEGEDGE means drive data on negative edge.
- * However, the drivers flag is display centric: Sample the
- * data on negative (falling) edge. Therefore, check for the
- * POSEDGE flag:
- * drive on positive edge => sample on negative edge
- */
- if (vm.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
- host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
-
-put_display_node:
- of_node_put(display_np);
- return ret;
-}
-
-static int mxsfb_init_fbinfo(struct fb_info *fb_info,
- struct fb_videomode *vmode)
-{
- int ret;
- struct mxsfb_info *host = fb_info->par;
- struct device *dev = &host->pdev->dev;
- struct fb_var_screeninfo *var = &fb_info->var;
- dma_addr_t fb_phys;
- void *fb_virt;
- unsigned fb_size;
-
- fb_info->fbops = &mxsfb_ops;
- fb_info->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST;
- strlcpy(fb_info->fix.id, "mxs", sizeof(fb_info->fix.id));
- fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
- fb_info->fix.ypanstep = 1;
- fb_info->fix.visual = FB_VISUAL_TRUECOLOR,
- fb_info->fix.accel = FB_ACCEL_NONE;
-
- ret = mxsfb_init_fbinfo_dt(fb_info, vmode);
- if (ret)
- return ret;
-
- var->nonstd = 0;
- var->activate = FB_ACTIVATE_NOW;
- var->accel_flags = 0;
- var->vmode = FB_VMODE_NONINTERLACED;
-
- /* Memory allocation for framebuffer */
- fb_size = SZ_2M;
- fb_virt = dma_alloc_wc(dev, PAGE_ALIGN(fb_size), &fb_phys, GFP_KERNEL);
- if (!fb_virt)
- return -ENOMEM;
-
- fb_info->fix.smem_start = fb_phys;
- fb_info->screen_base = fb_virt;
- fb_info->screen_size = fb_info->fix.smem_len = fb_size;
-
- if (mxsfb_restore_mode(fb_info, vmode))
- memset(fb_virt, 0, fb_size);
-
- return 0;
-}
-
-static void mxsfb_free_videomem(struct fb_info *fb_info)
-{
- struct mxsfb_info *host = fb_info->par;
- struct device *dev = &host->pdev->dev;
-
- dma_free_wc(dev, fb_info->screen_size, fb_info->screen_base,
- fb_info->fix.smem_start);
-}
-
-static const struct platform_device_id mxsfb_devtype[] = {
- {
- .name = "imx23-fb",
- .driver_data = MXSFB_V3,
- }, {
- .name = "imx28-fb",
- .driver_data = MXSFB_V4,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
-
-static const struct of_device_id mxsfb_dt_ids[] = {
- { .compatible = "fsl,imx23-lcdif", .data = &mxsfb_devtype[0], },
- { .compatible = "fsl,imx28-lcdif", .data = &mxsfb_devtype[1], },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxsfb_dt_ids);
-
-static int mxsfb_probe(struct platform_device *pdev)
-{
- const struct of_device_id *of_id =
- of_match_device(mxsfb_dt_ids, &pdev->dev);
- struct resource *res;
- struct mxsfb_info *host;
- struct fb_info *fb_info;
- struct fb_videomode *mode;
- int ret;
-
- if (of_id)
- pdev->id_entry = of_id->data;
-
- fb_info = framebuffer_alloc(sizeof(struct mxsfb_info), &pdev->dev);
- if (!fb_info) {
- dev_err(&pdev->dev, "Failed to allocate fbdev\n");
- return -ENOMEM;
- }
-
- mode = devm_kzalloc(&pdev->dev, sizeof(struct fb_videomode),
- GFP_KERNEL);
- if (mode == NULL)
- return -ENOMEM;
-
- host = fb_info->par;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(host->base)) {
- ret = PTR_ERR(host->base);
- goto fb_release;
- }
-
- host->pdev = pdev;
- platform_set_drvdata(pdev, host);
-
- host->devdata = &mxsfb_devdata[pdev->id_entry->driver_data];
-
- host->clk = devm_clk_get(&host->pdev->dev, NULL);
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- goto fb_release;
- }
-
- host->clk_axi = devm_clk_get(&host->pdev->dev, "axi");
- if (IS_ERR(host->clk_axi))
- host->clk_axi = NULL;
-
- host->clk_disp_axi = devm_clk_get(&host->pdev->dev, "disp_axi");
- if (IS_ERR(host->clk_disp_axi))
- host->clk_disp_axi = NULL;
-
- host->reg_lcd = devm_regulator_get(&pdev->dev, "lcd");
- if (IS_ERR(host->reg_lcd))
- host->reg_lcd = NULL;
-
-#if defined(CONFIG_FB_PRE_INIT_FB)
- host->pre_init = 1;
-#endif
-
- fb_info->pseudo_palette = devm_kcalloc(&pdev->dev, 16, sizeof(u32),
- GFP_KERNEL);
- if (!fb_info->pseudo_palette) {
- ret = -ENOMEM;
- goto fb_release;
- }
-
- ret = mxsfb_init_fbinfo(fb_info, mode);
- if (ret != 0)
- goto fb_release;
-
- fb_videomode_to_var(&fb_info->var, mode);
-
- /* init the color fields */
- mxsfb_check_var(&fb_info->var, fb_info);
-
- platform_set_drvdata(pdev, fb_info);
-
- ret = register_framebuffer(fb_info);
- if (ret != 0) {
- dev_err(&pdev->dev,"Failed to register framebuffer\n");
- goto fb_destroy;
- }
-
- if (!host->enabled) {
- mxsfb_enable_axi_clk(host);
- writel(0, host->base + LCDC_CTRL);
- mxsfb_disable_axi_clk(host);
- mxsfb_set_par(fb_info);
- mxsfb_enable_controller(fb_info);
- }
-
- host->pre_init = 0;
- dev_info(&pdev->dev, "initialized\n");
-
- return 0;
-
-fb_destroy:
- if (host->enabled)
- clk_disable_unprepare(host->clk);
-fb_release:
- framebuffer_release(fb_info);
-
- return ret;
-}
-
-static int mxsfb_remove(struct platform_device *pdev)
-{
- struct fb_info *fb_info = platform_get_drvdata(pdev);
- struct mxsfb_info *host = fb_info->par;
-
- if (host->enabled)
- mxsfb_disable_controller(fb_info);
-
- unregister_framebuffer(fb_info);
- mxsfb_free_videomem(fb_info);
-
- framebuffer_release(fb_info);
-
- return 0;
-}
-
-static void mxsfb_shutdown(struct platform_device *pdev)
-{
- struct fb_info *fb_info = platform_get_drvdata(pdev);
- struct mxsfb_info *host = fb_info->par;
-
- mxsfb_enable_axi_clk(host);
-
- /*
- * Force stop the LCD controller as keeping it running during reboot
- * might interfere with the BootROM's boot mode pads sampling.
- */
- writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR);
-
- mxsfb_disable_axi_clk(host);
-}
-
-static struct platform_driver mxsfb_driver = {
- .probe = mxsfb_probe,
- .remove = mxsfb_remove,
- .shutdown = mxsfb_shutdown,
- .id_table = mxsfb_devtype,
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = mxsfb_dt_ids,
- },
-};
-
-module_platform_driver(mxsfb_driver);
-
-MODULE_DESCRIPTION("Freescale mxs framebuffer driver");
-MODULE_AUTHOR("Sascha Hauer, Pengutronix");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index 5d3a444083f7..b770946a0920 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -2122,14 +2122,7 @@ static void neofb_remove(struct pci_dev *dev)
DBG("neofb_remove");
if (info) {
- /*
- * If unregister_framebuffer fails, then
- * we will be leaving hooks that could cause
- * oopsen laying around.
- */
- if (unregister_framebuffer(info))
- printk(KERN_WARNING
- "neofb: danger danger! Oopsen imminent!\n");
+ unregister_framebuffer(info);
neo_unmap_video(info);
fb_destroy_modedb(info->monspecs.modedb);
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 406f972d2e42..90eca64e3144 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1502,8 +1502,6 @@ static int planes_init(struct omapfb_device *fbdev)
fbi = framebuffer_alloc(sizeof(struct omapfb_plane_struct),
fbdev->dev);
if (fbi == NULL) {
- dev_err(fbdev->dev,
- "unable to allocate memory for plane info\n");
planes_cleanup(fbdev);
return -ENOMEM;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
index a34820e8ab97..36b97fee2d57 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
@@ -39,18 +39,6 @@ config FB_OMAP2_DSS_DPI
help
DPI Interface. This is the Parallel Display Interface.
-config FB_OMAP2_DSS_RFBI
- bool "RFBI support"
- depends on BROKEN
- help
- MIPI DBI support (RFBI, Remote Framebuffer Interface, in Texas
- Instrument's terminology).
-
- DBI is a bus between the host processor and a peripheral,
- such as a display or a framebuffer chip.
-
- See http://www.mipi.org/ for DBI specifications.
-
config FB_OMAP2_DSS_VENC
bool "VENC support"
default y
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/Makefile b/drivers/video/fbdev/omap2/omapfb/dss/Makefile
index 7318d5260e8d..eb3689ae8d87 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/Makefile
+++ b/drivers/video/fbdev/omap2/omapfb/dss/Makefile
@@ -8,7 +8,6 @@ omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \
dispc-compat.o display-sysfs.o
omapdss-$(CONFIG_FB_OMAP2_DSS_DPI) += dpi.o
-omapdss-$(CONFIG_FB_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_FB_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_FB_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_FB_OMAP2_DSS_DSI) += dsi.o
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index f3ac5103b44a..37858be8be83 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -207,9 +207,6 @@ static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
#ifdef CONFIG_FB_OMAP2_DSS_SDI
sdi_init_platform_driver,
#endif
-#ifdef CONFIG_FB_OMAP2_DSS_RFBI
- rfbi_init_platform_driver,
-#endif
#ifdef CONFIG_FB_OMAP2_DSS_VENC
venc_init_platform_driver,
#endif
@@ -231,9 +228,6 @@ static void (*dss_output_drv_unreg_funcs[])(void) = {
#ifdef CONFIG_FB_OMAP2_DSS_VENC
venc_uninit_platform_driver,
#endif
-#ifdef CONFIG_FB_OMAP2_DSS_RFBI
- rfbi_uninit_platform_driver,
-#endif
#ifdef CONFIG_FB_OMAP2_DSS_SDI
sdi_uninit_platform_driver,
#endif
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
index 99bebc1983dc..a2269008590f 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
@@ -461,10 +461,6 @@ void hdmi4_uninit_platform_driver(void);
int hdmi5_init_platform_driver(void) __init;
void hdmi5_uninit_platform_driver(void);
-/* RFBI */
-int rfbi_init_platform_driver(void) __init;
-void rfbi_uninit_platform_driver(void);
-
#ifdef CONFIG_FB_OMAP2_DSS_COLLECT_IRQ_STATS
static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
deleted file mode 100644
index c6813b9b8a8d..000000000000
--- a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
+++ /dev/null
@@ -1,1067 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/video/omap2/dss/rfbi.c
- *
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- */
-
-#define DSS_SUBSYS_NAME "RFBI"
-
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include <linux/export.h>
-#include <linux/vmalloc.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/kfifo.h>
-#include <linux/ktime.h>
-#include <linux/hrtimer.h>
-#include <linux/seq_file.h>
-#include <linux/semaphore.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/component.h>
-
-#include <video/omapfb_dss.h>
-#include "dss.h"
-
-struct rfbi_reg { u16 idx; };
-
-#define RFBI_REG(idx) ((const struct rfbi_reg) { idx })
-
-#define RFBI_REVISION RFBI_REG(0x0000)
-#define RFBI_SYSCONFIG RFBI_REG(0x0010)
-#define RFBI_SYSSTATUS RFBI_REG(0x0014)
-#define RFBI_CONTROL RFBI_REG(0x0040)
-#define RFBI_PIXEL_CNT RFBI_REG(0x0044)
-#define RFBI_LINE_NUMBER RFBI_REG(0x0048)
-#define RFBI_CMD RFBI_REG(0x004c)
-#define RFBI_PARAM RFBI_REG(0x0050)
-#define RFBI_DATA RFBI_REG(0x0054)
-#define RFBI_READ RFBI_REG(0x0058)
-#define RFBI_STATUS RFBI_REG(0x005c)
-
-#define RFBI_CONFIG(n) RFBI_REG(0x0060 + (n)*0x18)
-#define RFBI_ONOFF_TIME(n) RFBI_REG(0x0064 + (n)*0x18)
-#define RFBI_CYCLE_TIME(n) RFBI_REG(0x0068 + (n)*0x18)
-#define RFBI_DATA_CYCLE1(n) RFBI_REG(0x006c + (n)*0x18)
-#define RFBI_DATA_CYCLE2(n) RFBI_REG(0x0070 + (n)*0x18)
-#define RFBI_DATA_CYCLE3(n) RFBI_REG(0x0074 + (n)*0x18)
-
-#define RFBI_VSYNC_WIDTH RFBI_REG(0x0090)
-#define RFBI_HSYNC_WIDTH RFBI_REG(0x0094)
-
-#define REG_FLD_MOD(idx, val, start, end) \
- rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end))
-
-enum omap_rfbi_cycleformat {
- OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0,
- OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1,
- OMAP_DSS_RFBI_CYCLEFORMAT_3_1 = 2,
- OMAP_DSS_RFBI_CYCLEFORMAT_3_2 = 3,
-};
-
-enum omap_rfbi_datatype {
- OMAP_DSS_RFBI_DATATYPE_12 = 0,
- OMAP_DSS_RFBI_DATATYPE_16 = 1,
- OMAP_DSS_RFBI_DATATYPE_18 = 2,
- OMAP_DSS_RFBI_DATATYPE_24 = 3,
-};
-
-enum omap_rfbi_parallelmode {
- OMAP_DSS_RFBI_PARALLELMODE_8 = 0,
- OMAP_DSS_RFBI_PARALLELMODE_9 = 1,
- OMAP_DSS_RFBI_PARALLELMODE_12 = 2,
- OMAP_DSS_RFBI_PARALLELMODE_16 = 3,
-};
-
-static int rfbi_convert_timings(struct rfbi_timings *t);
-static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div);
-
-static struct {
- struct platform_device *pdev;
- void __iomem *base;
-
- unsigned long l4_khz;
-
- enum omap_rfbi_datatype datatype;
- enum omap_rfbi_parallelmode parallelmode;
-
- enum omap_rfbi_te_mode te_mode;
- int te_enabled;
-
- void (*framedone_callback)(void *data);
- void *framedone_callback_data;
-
- struct omap_dss_device *dssdev[2];
-
- struct semaphore bus_lock;
-
- struct omap_video_timings timings;
- int pixel_size;
- int data_lines;
- struct rfbi_timings intf_timings;
-
- struct omap_dss_device output;
-} rfbi;
-
-static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val)
-{
- __raw_writel(val, rfbi.base + idx.idx);
-}
-
-static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
-{
- return __raw_readl(rfbi.base + idx.idx);
-}
-
-static int rfbi_runtime_get(void)
-{
- int r;
-
- DSSDBG("rfbi_runtime_get\n");
-
- r = pm_runtime_get_sync(&rfbi.pdev->dev);
- WARN_ON(r < 0);
- return r < 0 ? r : 0;
-}
-
-static void rfbi_runtime_put(void)
-{
- int r;
-
- DSSDBG("rfbi_runtime_put\n");
-
- r = pm_runtime_put_sync(&rfbi.pdev->dev);
- WARN_ON(r < 0 && r != -ENOSYS);
-}
-
-static void rfbi_bus_lock(void)
-{
- down(&rfbi.bus_lock);
-}
-
-static void rfbi_bus_unlock(void)
-{
- up(&rfbi.bus_lock);
-}
-
-static void rfbi_write_command(const void *buf, u32 len)
-{
- switch (rfbi.parallelmode) {
- case OMAP_DSS_RFBI_PARALLELMODE_8:
- {
- const u8 *b = buf;
- for (; len; len--)
- rfbi_write_reg(RFBI_CMD, *b++);
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_16:
- {
- const u16 *w = buf;
- BUG_ON(len & 1);
- for (; len; len -= 2)
- rfbi_write_reg(RFBI_CMD, *w++);
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_9:
- case OMAP_DSS_RFBI_PARALLELMODE_12:
- default:
- BUG();
- }
-}
-
-static void rfbi_read_data(void *buf, u32 len)
-{
- switch (rfbi.parallelmode) {
- case OMAP_DSS_RFBI_PARALLELMODE_8:
- {
- u8 *b = buf;
- for (; len; len--) {
- rfbi_write_reg(RFBI_READ, 0);
- *b++ = rfbi_read_reg(RFBI_READ);
- }
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_16:
- {
- u16 *w = buf;
- BUG_ON(len & ~1);
- for (; len; len -= 2) {
- rfbi_write_reg(RFBI_READ, 0);
- *w++ = rfbi_read_reg(RFBI_READ);
- }
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_9:
- case OMAP_DSS_RFBI_PARALLELMODE_12:
- default:
- BUG();
- }
-}
-
-static void rfbi_write_data(const void *buf, u32 len)
-{
- switch (rfbi.parallelmode) {
- case OMAP_DSS_RFBI_PARALLELMODE_8:
- {
- const u8 *b = buf;
- for (; len; len--)
- rfbi_write_reg(RFBI_PARAM, *b++);
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_16:
- {
- const u16 *w = buf;
- BUG_ON(len & 1);
- for (; len; len -= 2)
- rfbi_write_reg(RFBI_PARAM, *w++);
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_9:
- case OMAP_DSS_RFBI_PARALLELMODE_12:
- default:
- BUG();
-
- }
-}
-
-static void rfbi_write_pixels(const void __iomem *buf, int scr_width,
- u16 x, u16 y,
- u16 w, u16 h)
-{
- int start_offset = scr_width * y + x;
- int horiz_offset = scr_width - w;
- int i;
-
- if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
- rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
- const u16 __iomem *pd = buf;
- pd += start_offset;
-
- for (; h; --h) {
- for (i = 0; i < w; ++i) {
- const u8 __iomem *b = (const u8 __iomem *)pd;
- rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1));
- rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0));
- ++pd;
- }
- pd += horiz_offset;
- }
- } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_24 &&
- rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
- const u32 __iomem *pd = buf;
- pd += start_offset;
-
- for (; h; --h) {
- for (i = 0; i < w; ++i) {
- const u8 __iomem *b = (const u8 __iomem *)pd;
- rfbi_write_reg(RFBI_PARAM, __raw_readb(b+2));
- rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1));
- rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0));
- ++pd;
- }
- pd += horiz_offset;
- }
- } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
- rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_16) {
- const u16 __iomem *pd = buf;
- pd += start_offset;
-
- for (; h; --h) {
- for (i = 0; i < w; ++i) {
- rfbi_write_reg(RFBI_PARAM, __raw_readw(pd));
- ++pd;
- }
- pd += horiz_offset;
- }
- } else {
- BUG();
- }
-}
-
-static int rfbi_transfer_area(struct omap_dss_device *dssdev,
- void (*callback)(void *data), void *data)
-{
- u32 l;
- int r;
- struct omap_overlay_manager *mgr = rfbi.output.manager;
- u16 width = rfbi.timings.x_res;
- u16 height = rfbi.timings.y_res;
-
- /*BUG_ON(callback == 0);*/
- BUG_ON(rfbi.framedone_callback != NULL);
-
- DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
-
- dss_mgr_set_timings(mgr, &rfbi.timings);
-
- r = dss_mgr_enable(mgr);
- if (r)
- return r;
-
- rfbi.framedone_callback = callback;
- rfbi.framedone_callback_data = data;
-
- rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
-
- l = rfbi_read_reg(RFBI_CONTROL);
- l = FLD_MOD(l, 1, 0, 0); /* enable */
- if (!rfbi.te_enabled)
- l = FLD_MOD(l, 1, 4, 4); /* ITE */
-
- rfbi_write_reg(RFBI_CONTROL, l);
-
- return 0;
-}
-
-static void framedone_callback(void *data)
-{
- void (*callback)(void *data);
-
- DSSDBG("FRAMEDONE\n");
-
- REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0);
-
- callback = rfbi.framedone_callback;
- rfbi.framedone_callback = NULL;
-
- if (callback != NULL)
- callback(rfbi.framedone_callback_data);
-}
-
-#if 1 /* VERBOSE */
-static void rfbi_print_timings(void)
-{
- u32 l;
- u32 time;
-
- l = rfbi_read_reg(RFBI_CONFIG(0));
- time = 1000000000 / rfbi.l4_khz;
- if (l & (1 << 4))
- time *= 2;
-
- DSSDBG("Tick time %u ps\n", time);
- l = rfbi_read_reg(RFBI_ONOFF_TIME(0));
- DSSDBG("CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, "
- "REONTIME %d, REOFFTIME %d\n",
- l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f,
- (l >> 20) & 0x0f, (l >> 24) & 0x3f);
-
- l = rfbi_read_reg(RFBI_CYCLE_TIME(0));
- DSSDBG("WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, "
- "ACCESSTIME %d\n",
- (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f,
- (l >> 22) & 0x3f);
-}
-#else
-static void rfbi_print_timings(void) {}
-#endif
-
-
-
-
-static u32 extif_clk_period;
-
-static inline unsigned long round_to_extif_ticks(unsigned long ps, int div)
-{
- int bus_tick = extif_clk_period * div;
- return (ps + bus_tick - 1) / bus_tick * bus_tick;
-}
-
-static int calc_reg_timing(struct rfbi_timings *t, int div)
-{
- t->clk_div = div;
-
- t->cs_on_time = round_to_extif_ticks(t->cs_on_time, div);
-
- t->we_on_time = round_to_extif_ticks(t->we_on_time, div);
- t->we_off_time = round_to_extif_ticks(t->we_off_time, div);
- t->we_cycle_time = round_to_extif_ticks(t->we_cycle_time, div);
-
- t->re_on_time = round_to_extif_ticks(t->re_on_time, div);
- t->re_off_time = round_to_extif_ticks(t->re_off_time, div);
- t->re_cycle_time = round_to_extif_ticks(t->re_cycle_time, div);
-
- t->access_time = round_to_extif_ticks(t->access_time, div);
- t->cs_off_time = round_to_extif_ticks(t->cs_off_time, div);
- t->cs_pulse_width = round_to_extif_ticks(t->cs_pulse_width, div);
-
- DSSDBG("[reg]cson %d csoff %d reon %d reoff %d\n",
- t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
- DSSDBG("[reg]weon %d weoff %d recyc %d wecyc %d\n",
- t->we_on_time, t->we_off_time, t->re_cycle_time,
- t->we_cycle_time);
- DSSDBG("[reg]rdaccess %d cspulse %d\n",
- t->access_time, t->cs_pulse_width);
-
- return rfbi_convert_timings(t);
-}
-
-static int calc_extif_timings(struct rfbi_timings *t)
-{
- u32 max_clk_div;
- int div;
-
- rfbi_get_clk_info(&extif_clk_period, &max_clk_div);
- for (div = 1; div <= max_clk_div; div++) {
- if (calc_reg_timing(t, div) == 0)
- break;
- }
-
- if (div <= max_clk_div)
- return 0;
-
- DSSERR("can't setup timings\n");
- return -1;
-}
-
-
-static void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t)
-{
- int r;
-
- if (!t->converted) {
- r = calc_extif_timings(t);
- if (r < 0)
- DSSERR("Failed to calc timings\n");
- }
-
- BUG_ON(!t->converted);
-
- rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]);
- rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]);
-
- /* TIMEGRANULARITY */
- REG_FLD_MOD(RFBI_CONFIG(rfbi_module),
- (t->tim[2] ? 1 : 0), 4, 4);
-
- rfbi_print_timings();
-}
-
-static int ps_to_rfbi_ticks(int time, int div)
-{
- unsigned long tick_ps;
- int ret;
-
- /* Calculate in picosecs to yield more exact results */
- tick_ps = 1000000000 / (rfbi.l4_khz) * div;
-
- ret = (time + tick_ps - 1) / tick_ps;
-
- return ret;
-}
-
-static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
-{
- *clk_period = 1000000000 / rfbi.l4_khz;
- *max_clk_div = 2;
-}
-
-static int rfbi_convert_timings(struct rfbi_timings *t)
-{
- u32 l;
- int reon, reoff, weon, weoff, cson, csoff, cs_pulse;
- int actim, recyc, wecyc;
- int div = t->clk_div;
-
- if (div <= 0 || div > 2)
- return -1;
-
- /* Make sure that after conversion it still holds that:
- * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff,
- * csoff > cson, csoff >= max(weoff, reoff), actim > reon
- */
- weon = ps_to_rfbi_ticks(t->we_on_time, div);
- weoff = ps_to_rfbi_ticks(t->we_off_time, div);
- if (weoff <= weon)
- weoff = weon + 1;
- if (weon > 0x0f)
- return -1;
- if (weoff > 0x3f)
- return -1;
-
- reon = ps_to_rfbi_ticks(t->re_on_time, div);
- reoff = ps_to_rfbi_ticks(t->re_off_time, div);
- if (reoff <= reon)
- reoff = reon + 1;
- if (reon > 0x0f)
- return -1;
- if (reoff > 0x3f)
- return -1;
-
- cson = ps_to_rfbi_ticks(t->cs_on_time, div);
- csoff = ps_to_rfbi_ticks(t->cs_off_time, div);
- if (csoff <= cson)
- csoff = cson + 1;
- if (csoff < max(weoff, reoff))
- csoff = max(weoff, reoff);
- if (cson > 0x0f)
- return -1;
- if (csoff > 0x3f)
- return -1;
-
- l = cson;
- l |= csoff << 4;
- l |= weon << 10;
- l |= weoff << 14;
- l |= reon << 20;
- l |= reoff << 24;
-
- t->tim[0] = l;
-
- actim = ps_to_rfbi_ticks(t->access_time, div);
- if (actim <= reon)
- actim = reon + 1;
- if (actim > 0x3f)
- return -1;
-
- wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div);
- if (wecyc < weoff)
- wecyc = weoff;
- if (wecyc > 0x3f)
- return -1;
-
- recyc = ps_to_rfbi_ticks(t->re_cycle_time, div);
- if (recyc < reoff)
- recyc = reoff;
- if (recyc > 0x3f)
- return -1;
-
- cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div);
- if (cs_pulse > 0x3f)
- return -1;
-
- l = wecyc;
- l |= recyc << 6;
- l |= cs_pulse << 12;
- l |= actim << 22;
-
- t->tim[1] = l;
-
- t->tim[2] = div - 1;
-
- t->converted = 1;
-
- return 0;
-}
-
-/* xxx FIX module selection missing */
-static int rfbi_setup_te(enum omap_rfbi_te_mode mode,
- unsigned hs_pulse_time, unsigned vs_pulse_time,
- int hs_pol_inv, int vs_pol_inv, int extif_div)
-{
- int hs, vs;
- int min;
- u32 l;
-
- hs = ps_to_rfbi_ticks(hs_pulse_time, 1);
- vs = ps_to_rfbi_ticks(vs_pulse_time, 1);
- if (hs < 2)
- return -EDOM;
- if (mode == OMAP_DSS_RFBI_TE_MODE_2)
- min = 2;
- else /* OMAP_DSS_RFBI_TE_MODE_1 */
- min = 4;
- if (vs < min)
- return -EDOM;
- if (vs == hs)
- return -EINVAL;
- rfbi.te_mode = mode;
- DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n",
- mode, hs, vs, hs_pol_inv, vs_pol_inv);
-
- rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
- rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
-
- l = rfbi_read_reg(RFBI_CONFIG(0));
- if (hs_pol_inv)
- l &= ~(1 << 21);
- else
- l |= 1 << 21;
- if (vs_pol_inv)
- l &= ~(1 << 20);
- else
- l |= 1 << 20;
-
- return 0;
-}
-
-/* xxx FIX module selection missing */
-static int rfbi_enable_te(bool enable, unsigned line)
-{
- u32 l;
-
- DSSDBG("te %d line %d mode %d\n", enable, line, rfbi.te_mode);
- if (line > (1 << 11) - 1)
- return -EINVAL;
-
- l = rfbi_read_reg(RFBI_CONFIG(0));
- l &= ~(0x3 << 2);
- if (enable) {
- rfbi.te_enabled = 1;
- l |= rfbi.te_mode << 2;
- } else
- rfbi.te_enabled = 0;
- rfbi_write_reg(RFBI_CONFIG(0), l);
- rfbi_write_reg(RFBI_LINE_NUMBER, line);
-
- return 0;
-}
-
-static int rfbi_configure_bus(int rfbi_module, int bpp, int lines)
-{
- u32 l;
- int cycle1 = 0, cycle2 = 0, cycle3 = 0;
- enum omap_rfbi_cycleformat cycleformat;
- enum omap_rfbi_datatype datatype;
- enum omap_rfbi_parallelmode parallelmode;
-
- switch (bpp) {
- case 12:
- datatype = OMAP_DSS_RFBI_DATATYPE_12;
- break;
- case 16:
- datatype = OMAP_DSS_RFBI_DATATYPE_16;
- break;
- case 18:
- datatype = OMAP_DSS_RFBI_DATATYPE_18;
- break;
- case 24:
- datatype = OMAP_DSS_RFBI_DATATYPE_24;
- break;
- default:
- BUG();
- return 1;
- }
- rfbi.datatype = datatype;
-
- switch (lines) {
- case 8:
- parallelmode = OMAP_DSS_RFBI_PARALLELMODE_8;
- break;
- case 9:
- parallelmode = OMAP_DSS_RFBI_PARALLELMODE_9;
- break;
- case 12:
- parallelmode = OMAP_DSS_RFBI_PARALLELMODE_12;
- break;
- case 16:
- parallelmode = OMAP_DSS_RFBI_PARALLELMODE_16;
- break;
- default:
- BUG();
- return 1;
- }
- rfbi.parallelmode = parallelmode;
-
- if ((bpp % lines) == 0) {
- switch (bpp / lines) {
- case 1:
- cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_1_1;
- break;
- case 2:
- cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_2_1;
- break;
- case 3:
- cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_1;
- break;
- default:
- BUG();
- return 1;
- }
- } else if ((2 * bpp % lines) == 0) {
- if ((2 * bpp / lines) == 3)
- cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_2;
- else {
- BUG();
- return 1;
- }
- } else {
- BUG();
- return 1;
- }
-
- switch (cycleformat) {
- case OMAP_DSS_RFBI_CYCLEFORMAT_1_1:
- cycle1 = lines;
- break;
-
- case OMAP_DSS_RFBI_CYCLEFORMAT_2_1:
- cycle1 = lines;
- cycle2 = lines;
- break;
-
- case OMAP_DSS_RFBI_CYCLEFORMAT_3_1:
- cycle1 = lines;
- cycle2 = lines;
- cycle3 = lines;
- break;
-
- case OMAP_DSS_RFBI_CYCLEFORMAT_3_2:
- cycle1 = lines;
- cycle2 = (lines / 2) | ((lines / 2) << 16);
- cycle3 = (lines << 16);
- break;
- }
-
- REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */
-
- l = 0;
- l |= FLD_VAL(parallelmode, 1, 0);
- l |= FLD_VAL(0, 3, 2); /* TRIGGERMODE: ITE */
- l |= FLD_VAL(0, 4, 4); /* TIMEGRANULARITY */
- l |= FLD_VAL(datatype, 6, 5);
- /* l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */
- l |= FLD_VAL(0, 8, 7); /* L4FORMAT, 1pix/L4 */
- l |= FLD_VAL(cycleformat, 10, 9);
- l |= FLD_VAL(0, 12, 11); /* UNUSEDBITS */
- l |= FLD_VAL(0, 16, 16); /* A0POLARITY */
- l |= FLD_VAL(0, 17, 17); /* REPOLARITY */
- l |= FLD_VAL(0, 18, 18); /* WEPOLARITY */
- l |= FLD_VAL(0, 19, 19); /* CSPOLARITY */
- l |= FLD_VAL(1, 20, 20); /* TE_VSYNC_POLARITY */
- l |= FLD_VAL(1, 21, 21); /* HSYNCPOLARITY */
- rfbi_write_reg(RFBI_CONFIG(rfbi_module), l);
-
- rfbi_write_reg(RFBI_DATA_CYCLE1(rfbi_module), cycle1);
- rfbi_write_reg(RFBI_DATA_CYCLE2(rfbi_module), cycle2);
- rfbi_write_reg(RFBI_DATA_CYCLE3(rfbi_module), cycle3);
-
-
- l = rfbi_read_reg(RFBI_CONTROL);
- l = FLD_MOD(l, rfbi_module+1, 3, 2); /* Select CSx */
- l = FLD_MOD(l, 0, 1, 1); /* clear bypass */
- rfbi_write_reg(RFBI_CONTROL, l);
-
-
- DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n",
- bpp, lines, cycle1, cycle2, cycle3);
-
- return 0;
-}
-
-static int rfbi_configure(struct omap_dss_device *dssdev)
-{
- return rfbi_configure_bus(dssdev->phy.rfbi.channel, rfbi.pixel_size,
- rfbi.data_lines);
-}
-
-static int rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *),
- void *data)
-{
- return rfbi_transfer_area(dssdev, callback, data);
-}
-
-static void rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
-{
- rfbi.timings.x_res = w;
- rfbi.timings.y_res = h;
-}
-
-static void rfbi_set_pixel_size(struct omap_dss_device *dssdev, int pixel_size)
-{
- rfbi.pixel_size = pixel_size;
-}
-
-static void rfbi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
-{
- rfbi.data_lines = data_lines;
-}
-
-static void rfbi_set_interface_timings(struct omap_dss_device *dssdev,
- struct rfbi_timings *timings)
-{
- rfbi.intf_timings = *timings;
-}
-
-static void rfbi_dump_regs(struct seq_file *s)
-{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
-
- if (rfbi_runtime_get())
- return;
-
- DUMPREG(RFBI_REVISION);
- DUMPREG(RFBI_SYSCONFIG);
- DUMPREG(RFBI_SYSSTATUS);
- DUMPREG(RFBI_CONTROL);
- DUMPREG(RFBI_PIXEL_CNT);
- DUMPREG(RFBI_LINE_NUMBER);
- DUMPREG(RFBI_CMD);
- DUMPREG(RFBI_PARAM);
- DUMPREG(RFBI_DATA);
- DUMPREG(RFBI_READ);
- DUMPREG(RFBI_STATUS);
-
- DUMPREG(RFBI_CONFIG(0));
- DUMPREG(RFBI_ONOFF_TIME(0));
- DUMPREG(RFBI_CYCLE_TIME(0));
- DUMPREG(RFBI_DATA_CYCLE1(0));
- DUMPREG(RFBI_DATA_CYCLE2(0));
- DUMPREG(RFBI_DATA_CYCLE3(0));
-
- DUMPREG(RFBI_CONFIG(1));
- DUMPREG(RFBI_ONOFF_TIME(1));
- DUMPREG(RFBI_CYCLE_TIME(1));
- DUMPREG(RFBI_DATA_CYCLE1(1));
- DUMPREG(RFBI_DATA_CYCLE2(1));
- DUMPREG(RFBI_DATA_CYCLE3(1));
-
- DUMPREG(RFBI_VSYNC_WIDTH);
- DUMPREG(RFBI_HSYNC_WIDTH);
-
- rfbi_runtime_put();
-#undef DUMPREG
-}
-
-static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
-{
- struct omap_overlay_manager *mgr = rfbi.output.manager;
- struct dss_lcd_mgr_config mgr_config;
-
- mgr_config.io_pad_mode = DSS_IO_PAD_MODE_RFBI;
-
- mgr_config.stallmode = true;
- /* Do we need fifohandcheck for RFBI? */
- mgr_config.fifohandcheck = false;
-
- mgr_config.video_port_width = rfbi.pixel_size;
- mgr_config.lcden_sig_polarity = 0;
-
- dss_mgr_set_lcd_config(mgr, &mgr_config);
-
- /*
- * Set rfbi.timings with default values, the x_res and y_res fields
- * are expected to be already configured by the panel driver via
- * omapdss_rfbi_set_size()
- */
- rfbi.timings.hsw = 1;
- rfbi.timings.hfp = 1;
- rfbi.timings.hbp = 1;
- rfbi.timings.vsw = 1;
- rfbi.timings.vfp = 0;
- rfbi.timings.vbp = 0;
-
- rfbi.timings.interlace = false;
- rfbi.timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- rfbi.timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
- rfbi.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
-
- dss_mgr_set_timings(mgr, &rfbi.timings);
-}
-
-static int rfbi_display_enable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *out = &rfbi.output;
- int r;
-
- if (out->manager == NULL) {
- DSSERR("failed to enable display: no output/manager\n");
- return -ENODEV;
- }
-
- r = rfbi_runtime_get();
- if (r)
- return r;
-
- r = dss_mgr_register_framedone_handler(out->manager,
- framedone_callback, NULL);
- if (r) {
- DSSERR("can't get FRAMEDONE irq\n");
- goto err1;
- }
-
- rfbi_config_lcd_manager(dssdev);
-
- rfbi_configure_bus(dssdev->phy.rfbi.channel, rfbi.pixel_size,
- rfbi.data_lines);
-
- rfbi_set_timings(dssdev->phy.rfbi.channel, &rfbi.intf_timings);
-
- return 0;
-err1:
- rfbi_runtime_put();
- return r;
-}
-
-static void rfbi_display_disable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *out = &rfbi.output;
-
- dss_mgr_unregister_framedone_handler(out->manager,
- framedone_callback, NULL);
-
- rfbi_runtime_put();
-}
-
-static int rfbi_init_display(struct omap_dss_device *dssdev)
-{
- rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
- return 0;
-}
-
-static void rfbi_init_output(struct platform_device *pdev)
-{
- struct omap_dss_device *out = &rfbi.output;
-
- out->dev = &pdev->dev;
- out->id = OMAP_DSS_OUTPUT_DBI;
- out->output_type = OMAP_DISPLAY_TYPE_DBI;
- out->name = "rfbi.0";
- out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
- out->owner = THIS_MODULE;
-
- omapdss_register_output(out);
-}
-
-static void rfbi_uninit_output(struct platform_device *pdev)
-{
- struct omap_dss_device *out = &rfbi.output;
-
- omapdss_unregister_output(out);
-}
-
-/* RFBI HW IP initialisation */
-static int rfbi_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- u32 rev;
- struct resource *rfbi_mem;
- struct clk *clk;
- int r;
-
- rfbi.pdev = pdev;
-
- sema_init(&rfbi.bus_lock, 1);
-
- rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
- if (!rfbi_mem) {
- DSSERR("can't get IORESOURCE_MEM RFBI\n");
- return -EINVAL;
- }
-
- rfbi.base = devm_ioremap(&pdev->dev, rfbi_mem->start,
- resource_size(rfbi_mem));
- if (!rfbi.base) {
- DSSERR("can't ioremap RFBI\n");
- return -ENOMEM;
- }
-
- clk = clk_get(&pdev->dev, "ick");
- if (IS_ERR(clk)) {
- DSSERR("can't get ick\n");
- return PTR_ERR(clk);
- }
-
- rfbi.l4_khz = clk_get_rate(clk) / 1000;
-
- clk_put(clk);
-
- pm_runtime_enable(&pdev->dev);
-
- r = rfbi_runtime_get();
- if (r)
- goto err_runtime_get;
-
- msleep(10);
-
- rev = rfbi_read_reg(RFBI_REVISION);
- dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
- rfbi_runtime_put();
-
- dss_debugfs_create_file("rfbi", rfbi_dump_regs);
-
- rfbi_init_output(pdev);
-
- return 0;
-
-err_runtime_get:
- pm_runtime_disable(&pdev->dev);
- return r;
-}
-
-static void rfbi_unbind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- rfbi_uninit_output(pdev);
-
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-static const struct component_ops rfbi_component_ops = {
- .bind = rfbi_bind,
- .unbind = rfbi_unbind,
-};
-
-static int rfbi_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &rfbi_component_ops);
-}
-
-static int rfbi_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &rfbi_component_ops);
- return 0;
-}
-
-static int rfbi_runtime_suspend(struct device *dev)
-{
- dispc_runtime_put();
-
- return 0;
-}
-
-static int rfbi_runtime_resume(struct device *dev)
-{
- int r;
-
- r = dispc_runtime_get();
- if (r < 0)
- return r;
-
- return 0;
-}
-
-static const struct dev_pm_ops rfbi_pm_ops = {
- .runtime_suspend = rfbi_runtime_suspend,
- .runtime_resume = rfbi_runtime_resume,
-};
-
-static struct platform_driver omap_rfbihw_driver = {
- .probe = rfbi_probe,
- .remove = rfbi_remove,
- .driver = {
- .name = "omapdss_rfbi",
- .pm = &rfbi_pm_ops,
- .suppress_bind_attrs = true,
- },
-};
-
-int __init rfbi_init_platform_driver(void)
-{
- return platform_driver_register(&omap_rfbihw_driver);
-}
-
-void rfbi_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_rfbihw_driver);
-}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index c7d936f9d383..858c2c011d19 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1881,12 +1881,8 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
fbi = framebuffer_alloc(sizeof(struct omapfb_info),
fbdev->dev);
-
- if (fbi == NULL) {
- dev_err(fbdev->dev,
- "unable to allocate memory for plane info\n");
+ if (!fbi)
return -ENOMEM;
- }
clear_fb_info(fbi);
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index e1f8b5ae75b8..4a5db170ef59 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
@@ -49,8 +49,7 @@ static ssize_t store_rotate_type(struct device *dev,
if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB)
return -EINVAL;
- if (!lock_fb_info(fbi))
- return -ENODEV;
+ lock_fb_info(fbi);
r = 0;
if (rot_type == ofbi->rotation_type)
@@ -101,8 +100,7 @@ static ssize_t store_mirror(struct device *dev,
if (r)
return r;
- if (!lock_fb_info(fbi))
- return -ENODEV;
+ lock_fb_info(fbi);
ofbi->mirror = mirror;
@@ -138,8 +136,7 @@ static ssize_t show_overlays(struct device *dev,
ssize_t l = 0;
int t;
- if (!lock_fb_info(fbi))
- return -ENODEV;
+ lock_fb_info(fbi);
omapfb_lock(fbdev);
for (t = 0; t < ofbi->num_overlays; t++) {
@@ -197,8 +194,7 @@ static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
if (buf[len - 1] == '\n')
len = len - 1;
- if (!lock_fb_info(fbi))
- return -ENODEV;
+ lock_fb_info(fbi);
omapfb_lock(fbdev);
if (len > 0) {
@@ -329,8 +325,7 @@ static ssize_t show_overlays_rotate(struct device *dev,
ssize_t l = 0;
int t;
- if (!lock_fb_info(fbi))
- return -ENODEV;
+ lock_fb_info(fbi);
for (t = 0; t < ofbi->num_overlays; t++) {
l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
@@ -358,8 +353,7 @@ static ssize_t store_overlays_rotate(struct device *dev,
if (buf[len - 1] == '\n')
len = len - 1;
- if (!lock_fb_info(fbi))
- return -ENODEV;
+ lock_fb_info(fbi);
if (len > 0) {
char *p = (char *)buf;
@@ -442,8 +436,7 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
size = PAGE_ALIGN(size);
- if (!lock_fb_info(fbi))
- return -ENODEV;
+ lock_fb_info(fbi);
if (display && display->driver->sync)
display->driver->sync(display);
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index 76f299375a00..632b246ca35f 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -538,10 +538,9 @@ static int platinumfb_probe(struct platform_device* odev)
dev_info(&odev->dev, "Found Apple Platinum video hardware\n");
info = framebuffer_alloc(sizeof(*pinfo), &odev->dev);
- if (info == NULL) {
- dev_err(&odev->dev, "Failed to allocate fbdev !\n");
+ if (!info)
return -ENOMEM;
- }
+
pinfo = info->par;
if (of_address_to_resource(dp, 0, &pinfo->rsrc_reg) ||
diff --git a/drivers/video/fbdev/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c
index ca7e9390d1e7..d1e78ce3a9c2 100644
--- a/drivers/video/fbdev/pmag-aa-fb.c
+++ b/drivers/video/fbdev/pmag-aa-fb.c
@@ -165,10 +165,8 @@ static int pmagaafb_probe(struct device *dev)
int err;
info = framebuffer_alloc(sizeof(struct aafb_par), dev);
- if (!info) {
- printk(KERN_ERR "%s: Cannot allocate memory\n", dev_name(dev));
+ if (!info)
return -ENOMEM;
- }
par = info->par;
dev_set_drvdata(dev, info);
diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
index 3b9249449ea6..56b912bb28de 100644
--- a/drivers/video/fbdev/pmag-ba-fb.c
+++ b/drivers/video/fbdev/pmag-ba-fb.c
@@ -150,10 +150,8 @@ static int pmagbafb_probe(struct device *dev)
int err;
info = framebuffer_alloc(sizeof(struct pmagbafb_par), dev);
- if (!info) {
- printk(KERN_ERR "%s: Cannot allocate memory\n", dev_name(dev));
+ if (!info)
return -ENOMEM;
- }
par = info->par;
dev_set_drvdata(dev, info);
diff --git a/drivers/video/fbdev/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c
index e58df36233c4..2822b2225924 100644
--- a/drivers/video/fbdev/pmagb-b-fb.c
+++ b/drivers/video/fbdev/pmagb-b-fb.c
@@ -257,10 +257,8 @@ static int pmagbbfb_probe(struct device *dev)
int err;
info = framebuffer_alloc(sizeof(struct pmagbbfb_par), dev);
- if (!info) {
- printk(KERN_ERR "%s: Cannot allocate memory\n", dev_name(dev));
+ if (!info)
return -ENOMEM;
- }
par = info->par;
dev_set_drvdata(dev, info);
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 73d92d8a85cc..7ff4b6b84282 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -140,7 +140,7 @@ static struct pvr2fb_par {
unsigned char is_doublescan; /* Are scanlines output twice? (doublescan) */
unsigned char is_lowres; /* Is horizontal pixel-doubling enabled? */
- unsigned long mmio_base; /* MMIO base */
+ void __iomem *mmio_base; /* MMIO base */
u32 palette[16];
} *currentpar;
@@ -194,39 +194,6 @@ static unsigned int shdma = PVR2_CASCADE_CHAN;
static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS;
#endif
-static int pvr2fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue,
- unsigned int transp, struct fb_info *info);
-static int pvr2fb_blank(int blank, struct fb_info *info);
-static unsigned long get_line_length(int xres_virtual, int bpp);
-static void set_color_bitfields(struct fb_var_screeninfo *var);
-static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
-static int pvr2fb_set_par(struct fb_info *info);
-static void pvr2_update_display(struct fb_info *info);
-static void pvr2_init_display(struct fb_info *info);
-static void pvr2_do_blank(void);
-static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id);
-static int pvr2_init_cable(void);
-static int pvr2_get_param(const struct pvr2_params *p, const char *s,
- int val, int size);
-#ifdef CONFIG_PVR2_DMA
-static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
- size_t count, loff_t *ppos);
-#endif
-
-static struct fb_ops pvr2fb_ops = {
- .owner = THIS_MODULE,
- .fb_setcolreg = pvr2fb_setcolreg,
- .fb_blank = pvr2fb_blank,
- .fb_check_var = pvr2fb_check_var,
- .fb_set_par = pvr2fb_set_par,
-#ifdef CONFIG_PVR2_DMA
- .fb_write = pvr2fb_write,
-#endif
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-};
-
static struct fb_videomode pvr2_modedb[] = {
/*
* Broadcast video modes (PAL and NTSC). I'm unfamiliar with
@@ -354,6 +321,36 @@ static int pvr2fb_setcolreg(unsigned int regno, unsigned int red,
return 0;
}
+/*
+ * Determine the cable type and initialize the cable output format. Don't do
+ * anything if the cable type has been overidden (via "cable:XX").
+ */
+
+#define PCTRA ((void __iomem *)0xff80002c)
+#define PDTRA ((void __iomem *)0xff800030)
+#define VOUTC ((void __iomem *)0xa0702c00)
+
+static int pvr2_init_cable(void)
+{
+ if (cable_type < 0) {
+ fb_writel((fb_readl(PCTRA) & 0xfff0ffff) | 0x000a0000,
+ PCTRA);
+ cable_type = (fb_readw(PDTRA) >> 8) & 3;
+ }
+
+ /* Now select the output format (either composite or other) */
+ /* XXX: Save the previous val first, as this reg is also AICA
+ related */
+ if (cable_type == CT_COMPOSITE)
+ fb_writel(3 << 8, VOUTC);
+ else if (cable_type == CT_RGB)
+ fb_writel(1 << 9, VOUTC);
+ else
+ fb_writel(0, VOUTC);
+
+ return cable_type;
+}
+
static int pvr2fb_set_par(struct fb_info *info)
{
struct pvr2fb_par *par = (struct pvr2fb_par *)info->par;
@@ -623,7 +620,7 @@ static void pvr2_do_blank(void)
is_blanked = do_blank > 0 ? do_blank : 0;
}
-static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id)
+static irqreturn_t __maybe_unused pvr2fb_interrupt(int irq, void *dev_id)
{
struct fb_info *info = dev_id;
@@ -642,36 +639,6 @@ static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/*
- * Determine the cable type and initialize the cable output format. Don't do
- * anything if the cable type has been overidden (via "cable:XX").
- */
-
-#define PCTRA 0xff80002c
-#define PDTRA 0xff800030
-#define VOUTC 0xa0702c00
-
-static int pvr2_init_cable(void)
-{
- if (cable_type < 0) {
- fb_writel((fb_readl(PCTRA) & 0xfff0ffff) | 0x000a0000,
- PCTRA);
- cable_type = (fb_readw(PDTRA) >> 8) & 3;
- }
-
- /* Now select the output format (either composite or other) */
- /* XXX: Save the previous val first, as this reg is also AICA
- related */
- if (cable_type == CT_COMPOSITE)
- fb_writel(3 << 8, VOUTC);
- else if (cable_type == CT_RGB)
- fb_writel(1 << 9, VOUTC);
- else
- fb_writel(0, VOUTC);
-
- return cable_type;
-}
-
#ifdef CONFIG_PVR2_DMA
static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
size_t count, loff_t *ppos)
@@ -742,6 +709,46 @@ out_unmap:
}
#endif /* CONFIG_PVR2_DMA */
+static struct fb_ops pvr2fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_setcolreg = pvr2fb_setcolreg,
+ .fb_blank = pvr2fb_blank,
+ .fb_check_var = pvr2fb_check_var,
+ .fb_set_par = pvr2fb_set_par,
+#ifdef CONFIG_PVR2_DMA
+ .fb_write = pvr2fb_write,
+#endif
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+#ifndef MODULE
+static int pvr2_get_param_val(const struct pvr2_params *p, const char *s,
+ int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (!strncasecmp(p[i].name, s, strlen(s)))
+ return p[i].val;
+ }
+ return -1;
+}
+#endif
+
+static char *pvr2_get_param_name(const struct pvr2_params *p, int val,
+ int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (p[i].val == val)
+ return p[i].name;
+ }
+ return NULL;
+}
+
/**
* pvr2fb_common_init
*
@@ -760,7 +767,7 @@ out_unmap:
* in for flexibility anyways. Who knows, maybe someone has tv-out on a
* PCI-based version of these things ;-)
*/
-static int pvr2fb_common_init(void)
+static int __maybe_unused pvr2fb_common_init(void)
{
struct pvr2fb_par *par = currentpar;
unsigned long modememused, rev;
@@ -773,8 +780,8 @@ static int pvr2fb_common_init(void)
goto out_err;
}
- par->mmio_base = (unsigned long)ioremap_nocache(pvr2_fix.mmio_start,
- pvr2_fix.mmio_len);
+ par->mmio_base = ioremap_nocache(pvr2_fix.mmio_start,
+ pvr2_fix.mmio_len);
if (!par->mmio_base) {
printk(KERN_ERR "pvr2fb: Failed to remap mmio space\n");
goto out_err;
@@ -822,8 +829,8 @@ static int pvr2fb_common_init(void)
fb_info->var.xres, fb_info->var.yres,
fb_info->var.bits_per_pixel,
get_line_length(fb_info->var.xres, fb_info->var.bits_per_pixel),
- (char *)pvr2_get_param(cables, NULL, cable_type, 3),
- (char *)pvr2_get_param(outputs, NULL, video_output, 3));
+ pvr2_get_param_name(cables, cable_type, 3),
+ pvr2_get_param_name(outputs, video_output, 3));
#ifdef CONFIG_SH_STORE_QUEUES
fb_notice(fb_info, "registering with SQ API\n");
@@ -841,7 +848,7 @@ out_err:
if (fb_info->screen_base)
iounmap(fb_info->screen_base);
if (par->mmio_base)
- iounmap((void *)par->mmio_base);
+ iounmap(par->mmio_base);
return -ENXIO;
}
@@ -901,15 +908,15 @@ static int __init pvr2fb_dc_init(void)
return pvr2fb_common_init();
}
-static void __exit pvr2fb_dc_exit(void)
+static void pvr2fb_dc_exit(void)
{
if (fb_info->screen_base) {
iounmap(fb_info->screen_base);
fb_info->screen_base = NULL;
}
if (currentpar->mmio_base) {
- iounmap((void *)currentpar->mmio_base);
- currentpar->mmio_base = 0;
+ iounmap(currentpar->mmio_base);
+ currentpar->mmio_base = NULL;
}
free_irq(HW_EVENT_VSYNC, fb_info);
@@ -958,8 +965,8 @@ static void pvr2fb_pci_remove(struct pci_dev *pdev)
fb_info->screen_base = NULL;
}
if (currentpar->mmio_base) {
- iounmap((void *)currentpar->mmio_base);
- currentpar->mmio_base = 0;
+ iounmap(currentpar->mmio_base);
+ currentpar->mmio_base = NULL;
}
pci_release_regions(pdev);
@@ -985,29 +992,12 @@ static int __init pvr2fb_pci_init(void)
return pci_register_driver(&pvr2fb_pci_driver);
}
-static void __exit pvr2fb_pci_exit(void)
+static void pvr2fb_pci_exit(void)
{
pci_unregister_driver(&pvr2fb_pci_driver);
}
#endif /* CONFIG_PCI */
-static int pvr2_get_param(const struct pvr2_params *p, const char *s, int val,
- int size)
-{
- int i;
-
- for (i = 0 ; i < size ; i++ ) {
- if (s != NULL) {
- if (!strncasecmp(p[i].name, s, strlen(s)))
- return p[i].val;
- } else {
- if (p[i].val == val)
- return (int)p[i].name;
- }
- }
- return -1;
-}
-
/*
* Parse command arguments. Supported arguments are:
* inverse Use inverse color maps
@@ -1047,9 +1037,9 @@ static int __init pvr2fb_setup(char *options)
}
if (*cable_arg)
- cable_type = pvr2_get_param(cables, cable_arg, 0, 3);
+ cable_type = pvr2_get_param_val(cables, cable_arg, 3);
if (*output_arg)
- video_output = pvr2_get_param(outputs, output_arg, 0, 3);
+ video_output = pvr2_get_param_val(outputs, output_arg, 3);
return 0;
}
@@ -1082,12 +1072,8 @@ static int __init pvr2fb_init(void)
#endif
fb_info = framebuffer_alloc(sizeof(struct pvr2fb_par), NULL);
-
- if (!fb_info) {
- printk(KERN_ERR "Failed to allocate memory for fb_info\n");
+ if (!fb_info)
return -ENOMEM;
- }
-
currentpar = fb_info->par;
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index d59c8a59f582..4282cb117b92 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2068,7 +2068,7 @@ static int __init pxafb_setup_options(void)
#define pxafb_setup_options() (0)
module_param_string(options, g_options, sizeof(g_options), 0);
-MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)");
+MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.rst)");
#endif
#else
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index cc242ba057d3..ca593a3e41d7 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -1902,7 +1902,6 @@ static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
info = framebuffer_alloc(sizeof(struct riva_par), &pd->dev);
if (!info) {
- printk (KERN_ERR PFX "could not allocate memory\n");
ret = -ENOMEM;
goto err_ret;
}
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index 288300035164..ba04d7a67829 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -284,7 +284,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
/* 666 with one bit alpha/transparency */
var->transp.offset = 18;
var->transp.length = 1;
- /* drop through */
+ /* fall through */
case 18:
var->bits_per_pixel = 32;
@@ -312,7 +312,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
case 25:
var->transp.length = var->bits_per_pixel - 24;
var->transp.offset = 24;
- /* drop through */
+ /* fall through */
case 24:
/* our 24bpp is unpacked, so 32bpp */
var->bits_per_pixel = 32;
@@ -809,7 +809,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
case FB_BLANK_POWERDOWN:
wincon &= ~WINCONx_ENWIN;
sfb->enabled &= ~(1 << index);
- /* fall through to FB_BLANK_NORMAL */
+ /* fall through - to FB_BLANK_NORMAL */
case FB_BLANK_NORMAL:
/* disable the DMA and display 0x0 (black) */
@@ -1102,14 +1102,14 @@ static int s3c_fb_alloc_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
dev_dbg(sfb->dev, "want %u bytes for window\n", size);
- fbi->screen_base = dma_alloc_wc(sfb->dev, size, &map_dma, GFP_KERNEL);
- if (!fbi->screen_base)
+ fbi->screen_buffer = dma_alloc_wc(sfb->dev, size, &map_dma, GFP_KERNEL);
+ if (!fbi->screen_buffer)
return -ENOMEM;
dev_dbg(sfb->dev, "mapped %x to %p\n",
- (unsigned int)map_dma, fbi->screen_base);
+ (unsigned int)map_dma, fbi->screen_buffer);
- memset(fbi->screen_base, 0x0, size);
+ memset(fbi->screen_buffer, 0x0, size);
fbi->fix.smem_start = map_dma;
return 0;
@@ -1126,9 +1126,9 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
{
struct fb_info *fbi = win->fbinfo;
- if (fbi->screen_base)
+ if (fbi->screen_buffer)
dma_free_wc(sfb->dev, PAGE_ALIGN(fbi->fix.smem_len),
- fbi->screen_base, fbi->fix.smem_start);
+ fbi->screen_buffer, fbi->fix.smem_start);
}
/**
@@ -1186,10 +1186,8 @@ static int s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
fbinfo = framebuffer_alloc(sizeof(struct s3c_fb_win) +
palette_size * sizeof(u32), sfb->dev);
- if (!fbinfo) {
- dev_err(sfb->dev, "failed to allocate framebuffer\n");
- return -ENOENT;
- }
+ if (!fbinfo)
+ return -ENOMEM;
windata = sfb->pdata->win[win_no];
initmode = *sfb->pdata->vtiming;
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index d63f23e26f7d..be16c349c10f 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -1128,10 +1128,8 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Allocate and fill driver data structure */
info = framebuffer_alloc(sizeof(struct s3fb_info), &(dev->dev));
- if (!info) {
- dev_err(&(dev->dev), "cannot allocate memory\n");
+ if (!info)
return -ENOMEM;
- }
par = info->par;
mutex_init(&par->open_lock);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 15ae50063296..f7f8dee044b1 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -974,35 +974,10 @@ static void sa1100fb_task(struct work_struct *w)
*/
static unsigned int sa1100fb_min_dma_period(struct sa1100fb_info *fbi)
{
-#if 0
- unsigned int min_period = (unsigned int)-1;
- int i;
-
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
- struct display *disp = &fb_display[i];
- unsigned int period;
-
- /*
- * Do we own this display?
- */
- if (disp->fb_info != &fbi->fb)
- continue;
-
- /*
- * Ok, calculate its DMA period
- */
- period = sa1100fb_display_dma_period(&disp->var);
- if (period < min_period)
- min_period = period;
- }
-
- return min_period;
-#else
/*
* FIXME: we need to verify _all_ consoles.
*/
return sa1100fb_display_dma_period(&fbi->fb.var);
-#endif
}
/*
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index 47b78f0138c3..512789f5f884 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -2333,14 +2333,7 @@ static void savagefb_remove(struct pci_dev *dev)
DBG("savagefb_remove");
if (info) {
- /*
- * If unregister_framebuffer fails, then
- * we will be leaving hooks that could cause
- * oopsen laying around.
- */
- if (unregister_framebuffer(info))
- printk(KERN_WARNING "savagefb: danger danger! "
- "Oopsen imminent!\n");
+ unregister_framebuffer(info);
#ifdef CONFIG_FB_SAVAGE_I2C
savagefb_delete_i2c_busses(info);
diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c
index 405715b60ec7..ab8fe838c776 100644
--- a/drivers/video/fbdev/sh7760fb.c
+++ b/drivers/video/fbdev/sh7760fb.c
@@ -6,7 +6,7 @@
* Manuel Lauss <mano@roarinelk.homelinux.net>
* (c) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
*
- * PLEASE HAVE A LOOK AT Documentation/fb/sh7760fb.txt!
+ * PLEASE HAVE A LOOK AT Documentation/fb/sh7760fb.rst!
*
* Thanks to Siegfried Schaefer <s.schaefer at schaefer-edv.de>
* for his original source and testing!
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index dc46be38c970..ac0bcac9a865 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -15,6 +15,7 @@
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/fbcon.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -213,7 +214,6 @@ struct sh_mobile_lcdc_priv {
struct sh_mobile_lcdc_chan ch[2];
struct sh_mobile_lcdc_overlay overlays[4];
- struct notifier_block notifier;
int started;
int forced_fourcc; /* 2 channel LCDC must share fourcc setting */
};
@@ -534,89 +534,9 @@ static void sh_mobile_lcdc_display_off(struct sh_mobile_lcdc_chan *ch)
ch->tx_dev->ops->display_off(ch->tx_dev);
}
-static bool
-sh_mobile_lcdc_must_reconfigure(struct sh_mobile_lcdc_chan *ch,
- const struct fb_videomode *new_mode)
-{
- dev_dbg(ch->info->dev, "Old %ux%u, new %ux%u\n",
- ch->display.mode.xres, ch->display.mode.yres,
- new_mode->xres, new_mode->yres);
-
- /* It can be a different monitor with an equal video-mode */
- if (fb_mode_is_equal(&ch->display.mode, new_mode))
- return false;
-
- dev_dbg(ch->info->dev, "Switching %u -> %u lines\n",
- ch->display.mode.yres, new_mode->yres);
- ch->display.mode = *new_mode;
-
- return true;
-}
-
static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
-static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
- enum sh_mobile_lcdc_entity_event event,
- const struct fb_videomode *mode,
- const struct fb_monspecs *monspec)
-{
- struct fb_info *info = ch->info;
- struct fb_var_screeninfo var;
- int ret = 0;
-
- switch (event) {
- case SH_MOBILE_LCDC_EVENT_DISPLAY_CONNECT:
- /* HDMI plug in */
- console_lock();
- if (lock_fb_info(info)) {
-
-
- ch->display.width = monspec->max_x * 10;
- ch->display.height = monspec->max_y * 10;
-
- if (!sh_mobile_lcdc_must_reconfigure(ch, mode) &&
- info->state == FBINFO_STATE_RUNNING) {
- /* First activation with the default monitor.
- * Just turn on, if we run a resume here, the
- * logo disappears.
- */
- info->var.width = ch->display.width;
- info->var.height = ch->display.height;
- sh_mobile_lcdc_display_on(ch);
- } else {
- /* New monitor or have to wake up */
- fb_set_suspend(info, 0);
- }
-
-
- unlock_fb_info(info);
- }
- console_unlock();
- break;
-
- case SH_MOBILE_LCDC_EVENT_DISPLAY_DISCONNECT:
- /* HDMI disconnect */
- console_lock();
- if (lock_fb_info(info)) {
- fb_set_suspend(info, 1);
- unlock_fb_info(info);
- }
- console_unlock();
- break;
-
- case SH_MOBILE_LCDC_EVENT_DISPLAY_MODE:
- /* Validate a proposed new mode */
- fb_videomode_to_var(&var, mode);
- var.bits_per_pixel = info->var.bits_per_pixel;
- var.grayscale = info->var.grayscale;
- ret = sh_mobile_lcdc_check_var(&var, info);
- break;
- }
-
- return ret;
-}
-
/* -----------------------------------------------------------------------------
* Format helpers
*/
@@ -1644,10 +1564,8 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
/* Allocate and initialize the frame buffer device. */
info = framebuffer_alloc(0, priv->dev);
- if (info == NULL) {
- dev_err(priv->dev, "unable to allocate fb_info\n");
+ if (!info)
return -ENOMEM;
- }
ovl->info = info;
@@ -1838,8 +1756,6 @@ static void sh_mobile_fb_reconfig(struct fb_info *info)
struct sh_mobile_lcdc_chan *ch = info->par;
struct fb_var_screeninfo var;
struct fb_videomode mode;
- struct fb_event event;
- int evnt = FB_EVENT_MODE_CHANGE_ALL;
if (ch->use_count > 1 || (ch->use_count == 1 && !info->fbcon_par))
/* More framebuffer users are active */
@@ -1861,14 +1777,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info)
/* Couldn't reconfigure, hopefully, can continue as before */
return;
- /*
- * fb_set_var() calls the notifier change internally, only if
- * FBINFO_MISC_USEREVENT flag is set. Since we do not want to fake a
- * user event, we have to call the chain ourselves.
- */
- event.info = info;
- event.data = &ch->display.mode;
- fb_notifier_call_chain(evnt, &event);
+ fbcon_update_vcs(info, true);
}
/*
@@ -2138,10 +2047,8 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
* list and allocate the color map.
*/
info = framebuffer_alloc(0, priv->dev);
- if (info == NULL) {
- dev_err(priv->dev, "unable to allocate fb_info\n");
+ if (!info)
return -ENOMEM;
- }
ch->info = info;
@@ -2319,37 +2226,6 @@ static const struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = {
* Framebuffer notifier
*/
-/* locking: called with info->lock held */
-static int sh_mobile_lcdc_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct fb_event *event = data;
- struct fb_info *info = event->info;
- struct sh_mobile_lcdc_chan *ch = info->par;
-
- if (&ch->lcdc->notifier != nb)
- return NOTIFY_DONE;
-
- dev_dbg(info->dev, "%s(): action = %lu, data = %p\n",
- __func__, action, event->data);
-
- switch(action) {
- case FB_EVENT_SUSPEND:
- sh_mobile_lcdc_display_off(ch);
- sh_mobile_lcdc_stop(ch->lcdc);
- break;
- case FB_EVENT_RESUME:
- mutex_lock(&ch->open_lock);
- sh_mobile_fb_reconfig(info);
- mutex_unlock(&ch->open_lock);
-
- sh_mobile_lcdc_display_on(ch);
- sh_mobile_lcdc_start(ch->lcdc);
- }
-
- return NOTIFY_OK;
-}
-
/* -----------------------------------------------------------------------------
* Probe/remove and driver init/exit
*/
@@ -2377,8 +2253,6 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev);
unsigned int i;
- fb_unregister_client(&priv->notifier);
-
for (i = 0; i < ARRAY_SIZE(priv->overlays); i++)
sh_mobile_lcdc_overlay_fb_unregister(&priv->overlays[i]);
for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
@@ -2540,8 +2414,6 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch)
unsigned int max_size;
unsigned int i;
- ch->notify = sh_mobile_lcdc_display_notify;
-
/* Validate the format. */
format = sh_mobile_format_info(cfg->fourcc);
if (format == NULL) {
@@ -2770,10 +2642,6 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev)
goto err1;
}
- /* Failure ignored */
- priv->notifier.notifier_call = sh_mobile_lcdc_notify;
- fb_register_client(&priv->notifier);
-
return 0;
err1:
sh_mobile_lcdc_remove(pdev);
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.h b/drivers/video/fbdev/sh_mobile_lcdcfb.h
index b8e47a8bd8ab..589400372098 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.h
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.h
@@ -87,11 +87,6 @@ struct sh_mobile_lcdc_chan {
unsigned long base_addr_c;
unsigned int line_size;
- int (*notify)(struct sh_mobile_lcdc_chan *ch,
- enum sh_mobile_lcdc_entity_event event,
- const struct fb_videomode *mode,
- const struct fb_monspecs *monspec);
-
/* Backlight */
struct backlight_device *bl;
unsigned int bl_brightness;
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 5a326163847b..6edb4492e675 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1865,10 +1865,8 @@ static int sm501fb_probe_one(struct sm501fb_info *info,
}
fbi = framebuffer_alloc(sizeof(struct sm501fb_par), info->dev);
- if (fbi == NULL) {
- dev_err(info->dev, "cannot allocate %s framebuffer\n", name);
+ if (!fbi)
return -ENOMEM;
- }
par = fbi->par;
par->info = info;
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index f1dcc6766d1e..7b1b0d8d27a7 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1538,7 +1538,6 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
info = framebuffer_alloc(sizeof(*sfb), &pdev->dev);
if (!info) {
- dev_err(&pdev->dev, "framebuffer_alloc failed\n");
err = -ENOMEM;
goto failed_free;
}
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index 8cd7892a0b0d..0e0f5bbfc5ef 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1650,10 +1650,8 @@ static int ufx_usb_probe(struct usb_interface *interface,
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &usbdev->dev);
- if (!info) {
- dev_err(dev->gdev, "framebuffer_alloc failed\n");
+ if (!info)
goto e_nomem;
- }
dev->info = info;
info->par = dev;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 021b727e8b5c..b674948e3bb8 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -555,10 +555,8 @@ static int ssd1307fb_probe(struct i2c_client *client,
}
info = framebuffer_alloc(sizeof(struct ssd1307fb_par), &client->dev);
- if (!info) {
- dev_err(&client->dev, "Couldn't allocate framebuffer.\n");
+ if (!info)
return -ENOMEM;
- }
par = info->par;
par->info = info;
diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 8fe37c0ef2f5..784c9bd5d502 100644
--- a/drivers/video/fbdev/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
@@ -121,7 +121,6 @@ static int gfb_probe(struct platform_device *op)
info = framebuffer_alloc(sizeof(struct gfb_info), &op->dev);
if (!info) {
- printk(KERN_ERR "gfb: Cannot allocate fb_info\n");
err = -ENOMEM;
goto err_out;
}
diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 544465ba1dc0..31683e5a8b79 100644
--- a/drivers/video/fbdev/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
@@ -132,7 +132,6 @@ static int s3d_pci_register(struct pci_dev *pdev,
info = framebuffer_alloc(sizeof(struct s3d_info), &pdev->dev);
if (!info) {
- printk(KERN_ERR "s3d: Cannot allocate fb_info\n");
err = -ENOMEM;
goto err_disable;
}
diff --git a/drivers/video/fbdev/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c
index bc595937df08..d392976126a6 100644
--- a/drivers/video/fbdev/sunxvr500.c
+++ b/drivers/video/fbdev/sunxvr500.c
@@ -272,7 +272,6 @@ static int e3d_pci_register(struct pci_dev *pdev,
info = framebuffer_alloc(sizeof(struct e3d_info), &pdev->dev);
if (!info) {
- printk(KERN_ERR "e3d: Cannot allocate fb_info\n");
err = -ENOMEM;
goto err_disable;
}
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 65ba9921506e..286b2371c7dd 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -1416,10 +1416,8 @@ static int tgafb_register(struct device *dev)
/* Allocate the fb and par structures. */
info = framebuffer_alloc(sizeof(struct tga_par), dev);
- if (!info) {
- printk(KERN_ERR "tgafb: Cannot allocate memory\n");
+ if (!info)
return -ENOMEM;
- }
par = info->par;
dev_set_drvdata(dev, info);
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 00b99363e528..c328e8265cb1 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1686,10 +1686,8 @@ static int dlfb_usb_probe(struct usb_interface *intf,
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &dlfb->udev->dev);
- if (!info) {
- dev_err(&dlfb->udev->dev, "framebuffer_alloc failed\n");
+ if (!info)
goto error;
- }
dlfb->info = info;
info->par = dlfb;
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index 8db5de13e2b7..f815f98190bc 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1742,10 +1742,8 @@ int via_fb_pci_probe(struct viafb_dev *vdev)
viafbinfo = framebuffer_alloc(viafb_par_length +
ALIGN(sizeof(struct viafb_shared), BITS_PER_LONG/8),
&vdev->pdev->dev);
- if (!viafbinfo) {
- printk(KERN_ERR"Could not allocate memory for viafb_info.\n");
+ if (!viafbinfo)
return -ENOMEM;
- }
viaparinfo = (struct viafb_par *)viafbinfo->par;
viaparinfo->shared = viafbinfo->par + viafb_par_length;
@@ -1820,8 +1818,6 @@ int via_fb_pci_probe(struct viafb_dev *vdev)
viafbinfo1 = framebuffer_alloc(viafb_par_length,
&vdev->pdev->dev);
if (!viafbinfo1) {
- printk(KERN_ERR
- "allocate the second framebuffer struct error\n");
rc = -ENOMEM;
goto out_fb_release;
}
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 5cac871db3ee..c339a8fbad81 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -669,10 +669,8 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Allocate and fill driver data structure */
info = framebuffer_alloc(sizeof(struct vt8623fb_info), &(dev->dev));
- if (! info) {
- dev_err(&(dev->dev), "cannot allocate memory\n");
+ if (!info)
return -ENOMEM;
- }
par = info->par;
mutex_init(&par->open_lock);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ffe754539f5a..6cad0b33d7ad 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -18,7 +18,7 @@ menuconfig WATCHDOG
reboot the machine) and a driver for hardware watchdog boards, which
are more robust and can also keep track of the temperature inside
your computer. For details, read
- <file:Documentation/watchdog/watchdog-api.txt> in the kernel source.
+ <file:Documentation/watchdog/watchdog-api.rst> in the kernel source.
The watchdog is usually used together with the watchdog daemon
which is available from
@@ -1870,7 +1870,7 @@ config BOOKE_WDT
Watchdog driver for PowerPC Book-E chips, such as the Freescale
MPC85xx SOCs and the IBM PowerPC 440.
- Please see Documentation/watchdog/watchdog-api.txt for
+ Please see Documentation/watchdog/watchdog-api.rst for
more information.
config BOOKE_WDT_DEFAULT_TIMEOUT
@@ -2019,7 +2019,7 @@ config PCWATCHDOG
This card simply watches your kernel to make sure it doesn't freeze,
and if it does, it reboots your computer after a certain amount of
time. This driver is like the WDT501 driver but for different
- hardware. Please read <file:Documentation/watchdog/pcwd-watchdog.txt>. The PC
+ hardware. Please read <file:Documentation/watchdog/pcwd-watchdog.rst>. The PC
watchdog cards can be ordered from <http://www.berkprod.com/>.
To compile this driver as a module, choose M here: the
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 13c817ea1d6a..f5713030d0f7 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -36,7 +36,7 @@
* mknod /dev/watchdog c 10 130
*
* For an example userspace keep-alive daemon, see:
- * Documentation/watchdog/wdt.txt
+ * Documentation/watchdog/wdt.rst
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt